repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
nickderobertis/sensitivity | [
"73ccaa7cca4d266d6ab56951a9034589bc329a93"
] | [
"sensitivity/df.py"
] | [
"import operator\nfrom functools import reduce\nfrom typing import Dict, Any, Callable, Sequence, Optional\nimport itertools\nfrom copy import deepcopy\n\nimport pandas as pd\nfrom pandas.io.formats.style import Styler\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom sensitivity.colors import _get_color_map\n\n\ndef sensitivity_df(sensitivity_values: Dict[str, Any], func: Callable,\n result_name: str = 'Result',\n labels: Optional[Dict[str, str]] = None,\n **func_kwargs) -> pd.DataFrame:\n \"\"\"\n Creates a DataFrame containing the results of sensitivity analysis.\n\n Runs func with the cartesian product of the possible values for each argument, passed\n in sensitivity_values.\n\n :param sensitivity_values: Dictionary where keys are func's argument names and values are lists of possible\n values to use for that argument.\n :param func: Function that accepts arguments with names matching the keys of sensitivity_values, and outputs a\n scalar value.\n :param result_name: Name for result shown in graph color bar label\n :param labels: Optional dictionary where keys are arguments of the function and values are the displayed names\n for these arguments in the styled DataFrames and plots\n :param func_kwargs: Additional arguments to pass to func, regardless of the sensitivity values picked\n :return: a DataFrame containing the results from sensitivity analysis on func\n \"\"\"\n sensitivity_cols = list(sensitivity_values.keys())\n df = pd.DataFrame(columns=sensitivity_cols + [result_name])\n num_cases = reduce(operator.mul, [len(values) for values in sensitivity_values.values()], 1)\n for i in tqdm(itertools.product(*sensitivity_values.values()), total=num_cases):\n base_param_dict = dict(zip(sensitivity_cols, i))\n param_dict = deepcopy(base_param_dict)\n param_dict.update(func_kwargs)\n result = func(**param_dict)\n base_param_dict.update({result_name: result})\n df = df.append(pd.DataFrame(pd.Series(base_param_dict)).T)\n df.reset_index(drop=True, inplace=True)\n df = df.convert_dtypes()\n if labels:\n df.rename(columns=labels, inplace=True)\n\n return df\n\n\ndef _two_variable_sensitivity_display_df(df: pd.DataFrame, col1: str, col2: str,\n result_col: str = 'Result', agg_func: Callable = np.mean) -> pd.DataFrame:\n df_or_series = df[[col1, col2, result_col]].groupby([col1, col2]).apply(agg_func)\n if isinstance(df_or_series, pd.DataFrame):\n series = df_or_series[result_col]\n elif isinstance(df_or_series, pd.Series):\n series = df_or_series\n else:\n raise ValueError(f'expected Series or DataFrame, got {df_or_series} of type {type(df_or_series)}')\n selected_df = series.reset_index()\n\n wide_df = selected_df.pivot(index=col1, columns=col2, values=result_col)\n wide_df.columns.name = None\n return wide_df\n\n\ndef _style_sensitivity_df(df: pd.DataFrame, col1: str, col2: Optional[str] = None, result_col: str = 'Result',\n reverse_colors: bool = False,\n col_subset: Optional[Sequence[str]] = None,\n num_fmt: Optional[str] = None, color_map: str = 'RdYlGn') -> Styler:\n if col2 is not None:\n caption = f'{result_col} - {col1} vs. {col2}'\n else:\n caption = f'{result_col} vs. {col1}'\n\n if num_fmt is not None:\n fmt_dict = {col: num_fmt for col in df.columns}\n styler = df.style.format(fmt_dict)\n else:\n styler = df.style\n\n color_str = _get_color_map(reverse_colors=reverse_colors, color_map=color_map)\n return styler.background_gradient(\n cmap=color_str, subset=col_subset, axis=None\n ).set_caption(caption)\n\n"
] | [
[
"pandas.DataFrame",
"pandas.Series"
]
] |
Algue-Rythme/anomaly | [
"e6dcfa6f2c92c74e1519f285d0309cce70d0aa5e"
] | [
"sandbox.py"
] | [
"import numpy as np\nimport tensorflow as tf\n\n# from deel.lip.layers import ScaledL2NormPooling2D\nfrom models import ScaledL2NormPooling2D\n\nwith tf.device('/cpu:0'):\n pooler = ScaledL2NormPooling2D(pool_size=(3,3))\n ones = tf.ones(shape=(1,3,3,1))\n other = 3*tf.ones(shape=(1,3,3,1))\n print(ones, other, tf.linalg.norm(other-ones))\n po = pooler(ones)\n pt = pooler(other)\n print(po, pt, tf.linalg.norm(pt-po))\n"
] | [
[
"tensorflow.linalg.norm",
"tensorflow.ones",
"tensorflow.device"
]
] |
eklitzke/schemaless | [
"eb2ca453a69e8af36980c53fcc66725116ae7971"
] | [
"examples/mysqlbench/plot.py"
] | [
"import csv\nimport sys\nimport optparse\n\nimport matplotlib\nfrom matplotlib import pyplot\n\npyplot.rcParams.update({\n 'backend': 'cairo',\n 'axes.labelsize': 10,\n 'legend.fontsize': 10,\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n 'font.sans-serif': ['Droid Sans']})\n\ndef main(csv_name, opts):\n\treader = iter(csv.reader(open(csv_name)))\n\tnames = reader.next()\n\tdata = dict((n, []) for n in names)\n\tfor row in reader:\n\t\tfor name, val in zip(names, row):\n\t\t\tdata[name].append(float(val))\n\n\tfor name in names[1:]:\n\t\txs, ys = [], []\n\t\tfor x in xrange(len(data[name])):\n\t\t\txs.append(data['cumulative'][x])\n\t\t\tys.append(data[name][x])\n\t\tpyplot.plot(xs, ys, label=name)\n\t\t#pyplot.scatter(xs, ys, label=name)\n\tpyplot.xlabel('cumulative # of records inserted')\n\tpyplot.ylabel('seconds per 10k inserts')\n\tpyplot.legend(loc=2)\n\tif opts.title:\n\t\tpyplot.title(opts.title)\n\n\tpyplot.savefig(opts.output, format='png', dpi=120)\n\nif __name__ == '__main__':\n\tparser = optparse.OptionParser()\n\tparser.add_option('-t', '--title', default=None, help='the title to use')\n\tparser.add_option('-o', '--output', default='graph.png', help='what file to output to')\n\topts, args = parser.parse_args()\n\tif len(args) != 1:\n\t\tparser.error('must specify an input file')\n\tmain(args[0], opts)\n"
] | [
[
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel"
]
] |
Manjunathsai7/Chest_XRay_Classification- | [
"23545c29f3a3a8029b67d1b8f0900827058e53be"
] | [
"V3Net/Analyser.py"
] | [
"import os\nimport numpy as np\nfrom tqdm import tqdm\nimport scipy\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport skimage\nfrom skimage.transform import resize\nfrom sklearn.metrics import confusion_matrix\nimport cv2\nfrom mlxtend.plotting import plot_confusion_matrix\nimport keras\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation , Dropout, Flatten , Conv2D , BatchNormalization, MaxPool2D\nfrom keras.utils import np_utils\nfrom keras.optimizers import SGD, RMSprop\nfrom keras.constraints import maxnorm\nfrom keras import backend as k\nk.set_image_data_format('channels_first')\n\n# As the data is already sorted and split into test , train and validation folders (using zshell) now it's just to\n# feed in those directories directly\n\ntrain_dir = \"~/chest_xray/train\"\ntest_dir = \"~/chest_xray/test\"\nval_dir = \"~/chest_xray/val\"\n\n# Now labels are to be extracted and images are to be preprocessed\n\ndef data_preprocess(path):\n\tX = []\n\tY = []\n\tfor Dir in os.listdir(path):\n\t\tif not Dir.startswith('.'):\n\t\t\tif Dir in ['NORMAL']:\n\t\t\t\tlabel = 0\n\t\t\telif Dir in ['PNEUMONIA']:\n\t\t\t\tlabel = 1\n\t\t\telse:\n\t\t\t\tlabel = 2\n\n\t\t\ttmp = path +'/'+ Dir\n\n\t\t\tfor file in tqdm(os.listdir(tmp)):\n\t\t\t\timg = cv2.imread(tmp + '/' + file)\n\t\t\t\tif img is not None:\n\t\t\t\t\timg = skimage.transform.resize(img, (150, 150, 3))\n\t\t\t\t\timg = np.asarray(img)\n\t\t\t\t\tX.append(img)\n\t\t\t\t\tY.append(label)\n\n\tX = np.asarray(X)\n\tY = np.asarray(Y)\n\treturn X, Y\n\n# images and labels are loaded in respective variables\n\nX_train, Y_train = data_preprocess(train_dir)\n\nX_test , Y_test = data_preprocess(test_dir)\n\nX_val , Y_val = data_preprocess(val_dir)\n\nprint (X_train.shape, '/n', X_test.shape, '/n', X_val.shape)\nprint (Y_train.shape, '/n', Y_test.shape, '/n', Y_val.shape)\nprint('Encoding labels...')\n\n# onehot encoding labels\n\nY_train = to_categorical(Y_train,2)\nY_test = to_categorical(Y_test,2)\nY_val = to_categorical(Y_val,2)\n\nprint (Y_train.shape, '/n', Y_test.shape, '/n', Y_val.shape)\n\n\n# as the class data is imbalenced , we are measuring precision , recall and confusion matrix plot\n\n#callbacks used to reduce learning rate by monitoring 'val_acc'\n\nreduce_lr = ReduceLROnPlateau(monitor='val_accuracy',factor=0.1,patience=1,verbose=1,min_delta=0.0001)\n\n#using InceptionV3 weights\n# Checkpoints are used to monitor and save best model and avoid val_acc drop due to overfitting\n\nweights_path = '~/inception_v3_weights.h5'\ncheck_point = ModelCheckpoint(weights_path,monitor='val_acuracy',verbose=1,save_best_only=True,mode='max')\n\n#reshape data according to weights\n\nX_train = X_train.reshape(5216,3,150,150)\nX_test = X_test.reshape(624,3,150,150)\nX_val = X_val.reshape(16,3,150,150)\n\n\ndef swish_activation(x):\n\treturn (k.sigmoid(x)*x)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(16,(3,3),activation='relu',padding='same',input_shape=(3,150,150)))\nmodel.add(Conv2D(16,(3,3),padding='same',activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(32,(3,3),activation='relu',padding='same',input_shape=(3,150,150)))\nmodel.add(Conv2D(32,(3,3),padding='same',activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(64,(3,3),activation='relu',padding='same'))\nmodel.add(Conv2D(64,(3,3),padding='same',activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(96,(3,3),dilation_rate=(2,2),activation='relu',padding='same'))\nmodel.add(Conv2D(96,(3,3),padding='valid',activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(128,(3,3),dilation_rate=(2,2),activation='relu',padding='same'))\nmodel.add(Conv2D(128,(3,3),padding='valid',activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(64,activation=swish_activation))\nmodel.add(Dropout(0.4))\nmodel.add(Dense(2,activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',optimizer=RMSprop(lr=0.00005),metrics=['accuracy'])\n\nprint(model.summary())\n\nbatch_size = 256\nepochs = 6\n\nHistory = model.fit(X_train,Y_train,validation_data=(X_val,Y_val),callbacks=[reduce_lr,check_point],epochs=epochs)\n\nmodel.save('New_Inception.h5')\n\n# history of model accuracy\nplt.plot(History.history['accuracy'])\nplt.plot(History.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train','val'],loc='upper left')\nplt.savefig('model accuracy',format='png')\nplt.show()\n\n# history of model loss\nplt.plot(History.history['loss'])\nplt.plot(History.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train','val'],loc='upper right')\nplt.savefig('model loss',format='png')\nplt.show()\n\n\nprediction = model.predict(X_test)\nprediction = np.argmax(prediction,axis = 1)\nY_True = np.argmax(Y_test,axis=1)\n\nConfusionMatrix = confusion_matrix(Y_True,prediction)\nfig , ax = plot_confusion_matrix(ConfusionMatrix,figsize=(5,5))\nplt.savefig('confusion matrix',format='png')\nplt.show()\n\n\n\n\n\n\n\n\n"
] | [
[
"sklearn.metrics.confusion_matrix",
"numpy.asarray",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.argmax",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
ThunderBurster/MDAM | [
"4dfad758d47c5b7cd8f2636becd69dea6bdb1f70"
] | [
"run.py"
] | [
"#!/usr/bin/env python\n\nimport os\nimport json\nimport pprint as pp\nimport time\n\nimport torch\nimport torch.optim as optim\nfrom tensorboard_logger import Logger as TbLogger\n\n\nfrom options import get_options\nfrom train import train_epoch, validate, get_inner_model\nfrom reinforce_baselines import NoBaseline, ExponentialBaseline, CriticBaseline, RolloutBaseline, WarmupBaseline\nfrom nets.attention_model import AttentionModel\n\nfrom utils import torch_load_cpu, load_problem\n\n\ndef run(opts):\n\n # Pretty print the run args\n pp.pprint(vars(opts))\n\n # Set the random seed\n torch.manual_seed(opts.seed)\n\n # Optionally configure tensorboard\n tb_logger = None\n if not opts.no_tensorboard:\n tb_logger = TbLogger(os.path.join(opts.log_dir, \"{}_{}\".format(opts.problem, opts.graph_size), opts.run_name))\n\n os.makedirs(opts.save_dir)\n # Save arguments so exact configuration can always be found\n with open(os.path.join(opts.save_dir, \"args.json\"), 'w') as f:\n json.dump(vars(opts), f, indent=True)\n\n # Set the device\n opts.device = torch.device(\"cuda:0\" if opts.use_cuda else \"cpu\")\n\n # Figure out what's the problem\n problem = load_problem(opts.problem)\n\n # Load data from load_path\n load_data = {}\n assert opts.load_path is None or opts.resume is None, \"Only one of load path and resume can be given\"\n load_path = opts.load_path if opts.load_path is not None else opts.resume\n if load_path is not None:\n print(' [*] Loading data from {}'.format(load_path))\n load_data = torch_load_cpu(load_path)\n\n # Initialize model\n model_class = {\n 'attention': AttentionModel\n }.get(opts.model, None)\n assert model_class is not None, \"Unknown model: {}\".format(model_class)\n model = model_class(\n opts.embedding_dim,\n opts.hidden_dim,\n problem,\n n_encode_layers=opts.n_encode_layers,\n mask_inner=True,\n mask_logits=True,\n normalization=opts.normalization,\n tanh_clipping=opts.tanh_clipping,\n checkpoint_encoder=opts.checkpoint_encoder,\n shrink_size=opts.shrink_size,\n n_paths=opts.n_paths,\n n_EG=opts.n_EG\n ).to(opts.device)\n\n if opts.use_cuda and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n\n # Overwrite model parameters by parameters to load\n model_ = get_inner_model(model)\n model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})\n\n # Initialize baseline\n if opts.baseline == 'exponential':\n baseline = ExponentialBaseline(opts.exp_beta)\n elif opts.baseline == 'rollout':\n baseline = RolloutBaseline(model, problem, opts)\n else:\n assert opts.baseline is None, \"Unknown baseline: {}\".format(opts.baseline)\n baseline = NoBaseline()\n\n if opts.bl_warmup_epochs > 0:\n baseline = WarmupBaseline(baseline, opts.bl_warmup_epochs, warmup_exp_beta=opts.exp_beta)\n\n # Load baseline from data, make sure script is called with same type of baseline\n if 'baseline' in load_data:\n baseline.load_state_dict(load_data['baseline'])\n\n # Initialize optimizer\n optimizer = optim.Adam(\n [{'params': model.parameters(), 'lr': opts.lr_model}]\n + (\n [{'params': baseline.get_learnable_parameters(), 'lr': opts.lr_critic}]\n if len(baseline.get_learnable_parameters()) > 0\n else []\n )\n )\n\n # Load optimizer state\n if 'optimizer' in load_data:\n optimizer.load_state_dict(load_data['optimizer'])\n for state in optimizer.state.values():\n for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n if torch.is_tensor(v):\n state[k] = v.to(opts.device)\n\n # Initialize learning rate scheduler, decay by lr_decay once per epoch!\n lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: opts.lr_decay ** epoch)\n\n # Start the actual training loop\n val_dataset = problem.make_dataset(\n size=opts.graph_size, num_samples=opts.val_size, filename=opts.val_dataset, distribution=opts.data_distribution)\n\n if opts.resume:\n epoch_resume = int(os.path.splitext(os.path.split(opts.resume)[-1])[0].split(\"-\")[1])\n\n torch.set_rng_state(load_data['rng_state'])\n if opts.use_cuda:\n torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])\n # Set the random states\n # Dumping of state was done before epoch callback, so do that now (model is loaded)\n baseline.epoch_callback(model, epoch_resume)\n print(\"Resuming after {}\".format(epoch_resume))\n opts.epoch_start = epoch_resume + 1\n\n if opts.eval_only:\n start = time.perf_counter()\n validate(model, val_dataset, opts)\n end = time.perf_counter()\n print('eval toke total {} s'.format(end - start))\n\n else:\n for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):\n train_epoch(\n model,\n optimizer,\n baseline,\n lr_scheduler,\n epoch,\n val_dataset,\n problem,\n tb_logger,\n opts\n )\n\n\nif __name__ == \"__main__\":\n run(get_options())\n"
] | [
[
"torch.device",
"torch.cuda.set_rng_state_all",
"torch.is_tensor",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.set_rng_state",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.DataParallel"
]
] |
kawaremu/H4ckT0b3rF3st-2k20 | [
"a5f253822806fe9d31a77659cea6b5f216258fd1"
] | [
"projects/AutoBadMeme/ABM.py"
] | [
"# -*- coding: utf-8 -*-\n\n'''\nWellcome to AutoBadMeme, the first and only bad meme generator\n'''\n\n#Task one : put the bare minimum to load an image and write text on it. DONE\n\n#Task two : make the image getter read random images from one directory.Done\n\n#Task three : find the position where you write the meme stuff. Semi-Done\n\n'''partie import'''\nimport os\nimport cv2\nimport numpy as np\nimport csv\n\n'''Partie initialisation de variables'''\ndirMemeFolder = 'memeFolder/'\ndirText = ''\ncsvfile = ''\nfontSize = 0\ntext1 = \"a\"\ntext2 = \"a\"\nmemeGen=0\n\n\n'''Partie fonctions'''\ndef imgGetter(directory,csvfile):\n '''\nWe use a CSV file as a way to store information related to the meme folder, like\nthe number of meme templates, the positions of the text in the meme folder as well\nas other informations we might use in the future.\nThe relevant data is the directory of the meme folder as well as the name of the\ncsv file\n '''\n taille = os.listdir(directory)\n taille = len(taille)\n\n pc = np.random.randint(0,taille-1)\n gtFile = open(directory + csvfile + '.csv') # annotations file\n gtReader = csv.reader(gtFile, delimiter=';') # csv parser for annotations file\n gtReader.__next__() # skip header\n liste = []\n for row in gtReader:\n liste.append(row)\n \n meme = liste[pc]\n #Get the position of the text fields\n #textPos2 only gets relevant data when using two-panel memes\n textPos1 = [int(meme[1]),int(meme[2])]\n textPos2 = [int(meme[3]),int(meme[4])]\n #Get the image\n img = cv2.imread(directory+meme[0]+'.jpg',1)\n return img,textPos1,textPos2\n\ndef putText(img,text1 = \"\", text2 = \"\",textPos1=\"\",textPos2=\"\"):\n size = img.shape\n font = cv2.FONT_HERSHEY_SIMPLEX\n #This function serves to inject text fields into a still image\n #and as a result produces a meme\n for i in range(len(text1)):\n img2 = cv2.putText(img,text1[i].strip(),(textPos1[0],textPos1[1]+(i*20)),font,fontSize,(0,0,0),2,cv2.LINE_AA)\n \n for i in range(len(text2)):\n img2 = cv2.putText(img,text2[i].strip(),(textPos2[0],textPos2[1]+(i*20)),font,fontSize,(0,0,0),2,cv2.LINE_AA)\n \n img2 = cv2.putText(img2,'made by autoBadMeme',(int(size[0]*0.8),int(size[1]*0.85))\n ,font,0.5,(0,0,0),2,cv2.LINE_AA)\n return img2\n\ndef imgSaver(img):\n #Placeholder code for saving an image\n fileName = 'yourMeme/yourMeme.jpg'\n cv2.imwrite(fileName, img)\n return 0\n\ndef getMemeTxt(directory):\n #This function serves as a text getter, it gets lines from a meme folder.\n f = open(directory+\"whenmemes.txt\",\"r\")\n txt = f.readlines()\n f.close()\n pc = np.random.randint(0,len(txt))\n return txt[pc]\n\n'''Partie code'''\n\nwhile(int(memeGen) !=1 and int(memeGen) !=2):\n memeGen = input('''Please enter which meme you wanna generate?:\n1-a one panel meme (the most common meme format)\n2-a two-panel meme(Example : the drake meme) : ''')\n if int(memeGen) ==1:\n dirText = \"textFolder/onepanel/\"\n dirMemeFolder+='onepanel/'\n text1 = getMemeTxt(dirText)\n text2 = \"\"\n csvfile=\"onePanel\"\n fontSize = 0.9\n\n elif int(memeGen) == 2:\n dirText = \"textFolder/twopanel/\"\n dirMemeFolder+='twopanel/'\n while text1==text2:\n text1 = getMemeTxt(dirText)\n text2 = getMemeTxt(dirText)\n if text1 == \"Linux\":\n text1,text2 = text2,text1\n csvfile=\"twoPanels\"\n fontSize = 0.6\n else:\n print(\"Incorrect\")\n\nfunline = text1.split('#',maxsplit=-1)\nfunline2 = text2.split('#',maxsplit=-1)\n\nimg,textPos1,textPos2 = imgGetter(dirMemeFolder,csvfile)\n\nimg2 = putText(img,text1=funline,text2=funline2,textPos1=textPos1,textPos2=textPos2)\n\n\nimgSaver(img2)\n"
] | [
[
"numpy.random.randint"
]
] |
RaMdsC/evalys | [
"d2693a25209b0f547276259b10e42c24bf04c142"
] | [
"evalys/metrics.py"
] | [
"import pandas as pd\nfrom math import sqrt\n\n\ndef cumulative_waiting_time(dataframe):\n '''\n Compute the cumulative waiting time on the given dataframe\n\n :dataframe: a DataFrame that contains a \"starting_time\" and a\n \"waiting_time\" column.\n '''\n # Avoid side effect\n df = pd.DataFrame.copy(dataframe)\n df['starting_time'] = df['submission_time'] + df['waiting_time']\n df_sorted_by_starting_time = df.sort_values(by='starting_time')\n\n wt_cumsum = df_sorted_by_starting_time.waiting_time.cumsum()\n wt_cumsum.name = \"cumulative waiting time\"\n # Sort by starting time\n wt_cumsum.index = df_sorted_by_starting_time['starting_time']\n\n return wt_cumsum\n\n\ndef compute_load(dataframe, col_begin, col_end, col_cumsum,\n begin_time=0, end_time=None):\n \"\"\"\n Compute the load of the `col_cumsum` columns between events from\n `col_begin` to `col_end`. In practice it is used to compute the queue\n load and the cluster load (utilisation).\n\n :returns: a load dataframe of all events indexed by time with a `load`\n and an `area` column.\n \"\"\"\n # Avoid side effect\n df = pd.DataFrame.copy(dataframe)\n df['starting_time'] = df['submission_time'] + df['waiting_time']\n df['finish_time'] = df['starting_time'] + df['execution_time']\n\n df = df.sort_values(by=col_begin)\n\n # Cleaning:\n # - still running jobs (runtime = -1)\n # - not scheduled jobs (wait = -1)\n # - no procs allocated (proc_alloc = -1)\n max_time = df['finish_time'].max() + 1000\n df.ix[df['execution_time'] == -1, 'finish_time'] = max_time\n df.ix[df['execution_time'] == -1, 'starting_time'] = max_time\n if 'proc_alloc' in df:\n df = df[df['proc_alloc'] > 0]\n\n # Create a list of start and stop event associated to the number of\n # proc allocation changes: starts add procs, stop remove procs\n event_columns = ['time', col_cumsum, 'jobID']\n start_event_df = pd.concat([df[col_begin],\n df[col_cumsum],\n df['jobID']],\n axis=1)\n start_event_df.columns = event_columns\n # Stop event give negative proc_alloc value\n stop_event_df = pd.concat([df[col_end],\n - df[col_cumsum],\n df['jobID']],\n axis=1)\n stop_event_df.columns = event_columns\n\n # merge events and sort them\n event_df = start_event_df.append(\n stop_event_df,\n ignore_index=True).sort_values(by='time').reset_index(drop=True)\n\n # sum the event that happend at the same time and cummulate events\n load_df = pd.DataFrame(\n event_df.groupby(event_df['time'])[col_cumsum].sum().cumsum(),\n columns=[col_cumsum])\n load_df[\"time\"] = load_df.index\n\n # compute area\n load_df[\"area\"] = - load_df[\"time\"].diff(-1) * load_df[col_cumsum]\n del load_df[\"time\"]\n\n load_df.columns = [\"load\", \"area\"]\n\n return load_df\n\n\ndef _load_insert_element_if_necessary(load_df, at):\n \"\"\"\n Insert an event at the specified point that conserve data consistency\n for \"area\" and \"load\" values\n \"\"\"\n if len(load_df[load_df.time == at]) == 0:\n prev_el = load_df[load_df.time <= at].tail(1)\n new_el = prev_el.copy()\n next_el = load_df[load_df.time >= at].head(1)\n new_el.time = at\n new_el.area = float(new_el.load) * float(next_el.time - at)\n load_df.loc[prev_el.index, \"area\"] = \\\n float(prev_el.load) * float(at - prev_el.time)\n load_df.loc[len(load_df)] = [\n float(new_el.time),\n float(new_el.load),\n float(new_el.area)]\n load_df = load_df.sort_values(by=[\"time\"])\n return load_df\n\n\ndef load_mean(df, begin=None, end=None):\n \"\"\" Compute the mean load area from begin to end. \"\"\"\n load_df = df.reset_index()\n max_to = max(load_df.time)\n if end is None:\n end = max_to\n elif end > max_to:\n raise ValueError(\"computing mean load after the \"\n \"last event ({}) is NOT IMPLEMENTED\".format(max_to))\n min_to = load_df.time.iloc[0]\n if begin is None:\n begin = min_to\n elif begin < min_to:\n raise ValueError(\"computing mean load befor the \"\n \"first event ({}) is NOT IMPLEMENTED\".format(min_to))\n\n load_df = _load_insert_element_if_necessary(load_df, begin)\n load_df = _load_insert_element_if_necessary(load_df, end)\n\n u = load_df[(load_df.time < end) & (begin <= load_df.time)]\n\n return u.area.sum()/(end - begin)\n\n\ndef fragmentation(free_resources_gaps, p=2):\n \"\"\"\n Input is a resource indexed list where each element is a numpy\n array of free slots.\n\n This metrics definition comes from Gher and Shneider CCGRID 2009.\n \"\"\"\n f = free_resources_gaps\n frag = pd.Series()\n for i, fi in enumerate(f):\n if fi.size == 0:\n frag_i = 0\n else:\n frag_i = 1 - (sum(fi**p) / sum(fi)**p)\n frag.set_value(i, frag_i)\n return frag\n\n\ndef fragmentation_reis(free_resources_gaps, time, p=2):\n f = free_resources_gaps\n frag = pd.Series()\n for i, fi in enumerate(f):\n if fi.size == 0:\n frag_i = 0\n else:\n frag_i = 1 - (sqrt(sum(fi**p)) / time * len(f))\n frag.set_value(i, frag_i)\n return frag\n"
] | [
[
"pandas.DataFrame.copy",
"pandas.Series",
"pandas.concat"
]
] |
kanwatchara-k/r_lamol | [
"8fd90027989a853e36c268cb1b87368e9078b00b"
] | [
"train.py"
] | [
"import torch\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nfrom pytorch_transformers import AdamW, WEIGHTS_NAME, WarmupLinearSchedule\nimport csv\nimport numpy as np\nimport os\nimport logging\nfrom fp16 import FP16_Module, FP16_Optimizer\nfrom parallel import DataParallelModel, DataParallelCriterion\nfrom collections import OrderedDict\nfrom utils import *\nfrom settings import args, TASK_DICT, init_logging, MODEL_CONFIG, MODEL_CLASS, SPECIAL_TOKENS, CONFIG_CLASS\nfrom settings import TOKENIZER, SPECIAL_TOKEN_IDS, FILL_VAL, SAVE_NAME, FINAL_SAVE_NAME, TOKENS_WEIGHT, CONFIG_NAME\nfrom scheduler import AnnealingLR\nfrom regularizers import REG_TYPES, REG_TYPE_KEYS, Weight_Regularized_AdamW, Weight_Regularized_SGD\nfrom torch.nn import CrossEntropyLoss\nimport random\nlogger = logging.getLogger(__name__)\n\n\ndef train(task_ids, model):\n# def train(task_ids, model, first_task):\n\n tasks = [args.tasks[task_id] for task_id in task_ids]\n\n logger.info(\"start to train { task: %s, seq train type: %s }\" % (tasks, args.seq_train_type))\n model_dir = get_model_dir(tasks)\n make_dir(model_dir)\n\n train_dataset = [TASK_DICT[t][\"train\"] for t in tasks]\n train_extra_data = []\n if \"lll\" in args.seq_train_type and task_ids[0] > 0 and not args.skip_tasks:\n prev_task = args.tasks[task_ids[0]-1]\n with torch.no_grad():\n create_extra_data2(tasks[0], prev_task, model, train_extra_data)\n elif \"gem\" in args.seq_train_type and task_ids[0] > 0: \n get_real_data(tasks[0], train_extra_data, accum=False, encode=True)\n args.memory_data.append(train_extra_data)\n train_extra_data = []\n logger.info('extra training data size: {}'.format(len(train_extra_data)))\n\n if not model:\n # which_model_to_load = model_dir if os.path.isfile(os.path.join(model_dir, FINAL_SAVE_NAME)) else args.model_name\n model = MODEL_CLASS.from_pretrained(args.model_name).cuda()\n model.resize_token_embeddings(len(TOKENIZER))\n if not args.fp32:\n model = FP16_Module(model)\n\n gen_token = get_gen_token(tasks[0])\n TOKENIZER.add_tokens([gen_token])\n TOKENIZER.save_pretrained(model_dir)\n SPECIAL_TOKENS[tasks[0]] = gen_token\n SPECIAL_TOKEN_IDS[tasks[0]] = TOKENIZER.convert_tokens_to_ids(gen_token)\n logger.info('gen token = {} , gen token id = {}'.format(gen_token, SPECIAL_TOKEN_IDS[tasks[0]]))\n MODEL_CONFIG.vocab_size = len(TOKENIZER)\n MODEL_CONFIG.to_json_file(os.path.join(model_dir,CONFIG_NAME))\n global TOKENS_WEIGHT\n if len(TOKENIZER) != TOKENS_WEIGHT.shape[0]:\n TOKENS_WEIGHT = torch.cat((TOKENS_WEIGHT, torch.ones([1]).cuda()))\n\n if args.skip_tasks and len(tasks) == 1:\n logger.info(\"*********** skip task: {} ***********\".format(tasks[0]))\n if tasks[0] in args.skip_tasks:\n if len(args.skip_tasks) == 1:\n model_dir = get_model_dir(tasks)\n model_path = os.path.join(model_dir, FINAL_SAVE_NAME)\n config_path = os.path.join(model_dir,CONFIG_NAME)\n model_config = CONFIG_CLASS.from_json_file(config_path)\n model = MODEL_CLASS(model_config).cuda()\n state_dict = torch.load(model_path)\n model.load_state_dict(state_dict)\n if not args.fp32:\n model = FP16_Module(model)\n if args.seq_train_type in REG_TYPE_KEYS:\n logger.info(\"calulating reg_params ...\")\n train_qadata = QADataset(train_dataset, \"train\", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)\n max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)\n train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n parallel_model = DataParallelModel(WrapModel(model), args.device_ids)\n regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [train_dataloader], tasks[0])\n regularizer.task_start_do()\n regularizer.task_end_do()\n torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))\n logger.info(\"done reg_params!\")\n args.skip_tasks.remove(tasks[0])\n return model\n\n model.resize_token_embeddings(len(TOKENIZER))\n\n if not args.fp32: # again because resize_token_embeddings makes embedding layer fp32\n model = FP16_Module(model)\n\n parallel_model = DataParallelModel(WrapModel(model), args.device_ids)\n\n train_qadata = QADataset(train_dataset, \"train\", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)\n max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)\n# max_train_batch_size = 1\n train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n if not args.unbound and args.seq_train_type != \"multitask\":\n #n_train_epochs = TASK_DICT[tasks[0]][\"n_train_epochs\"]\n n_train_epochs = args.n_train_epochs[tasks[0]]\n else:\n n_train_epochs = args.n_train_epochs['_'.join(tasks)]\n n_train_optimization_steps = len(train_qadata) * n_train_epochs\n logger.info('len of train dataset: {} , max train batch size {} , num of opt steps: {}'.format(\n len(train_qadata), max_train_batch_size, n_train_optimization_steps))\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n \n if \"gem\" in args.seq_train_type:\n model.task_id = task_ids[0]\n if not hasattr(model, \"grad_dims\"):\n model.grad_dims = []\n for param in model.parameters():\n model.grad_dims.append(param.data.numel())\n if not hasattr(model, \"grads\"):\n model.grads = torch.zeros(sum(model.grad_dims),len(args.tasks))\n model.grads = model.grads.cuda()\n\n if args.seq_train_type in REG_TYPE_KEYS:\n optimizer = Weight_Regularized_AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n else:\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n if not args.fp32:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=None, dynamic_loss_scale=True,\n dynamic_loss_args={'scale_window': 100, 'min_scale': 1, 'delayed_shift': 2})\n\n scheduler = AnnealingLR(optimizer, start_lr=args.learning_rate, warmup_iter=int(args.n_warmup_ratio*len(train_qadata)),\n num_iters=int(n_train_optimization_steps), decay_style=args.decay_style)\n train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL, weight=TOKENS_WEIGHT), args.device_ids)\n\n if args.seq_train_type in REG_TYPE_KEYS:\n copy_train_dataloader = create_dataloader(train_qadata, \"train\", max_train_batch_size)\n prev_task = args.tasks[task_ids[0]-1]\n regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [copy_train_dataloader], tasks[0], prev_task)\n regularizer.task_start_do()\n\n tot_n_steps = 0\n train_once = TrainStep(model, optimizer, scheduler)\n if \"gem\" in args.seq_train_type and task_ids[0] != 0:\n gem_step = GEMStep(model, parallel_model, train_loss_fct, optimizer)\n model.train()\n for ep in range(n_train_epochs):\n cum_loss, cum_qa_loss, cum_lm_loss, cur_n_inputs = 0, 0, 0, 0\n for n_steps, (_, _, cqa, _, Y, gen_X, gen_Y) in enumerate(train_dataloader):\n\n n_inputs = sum(_cqa.shape[0] for _cqa in cqa)\n\n for i in range(len(cqa)):\n cqa[i] = (cqa[i].to(args.device_ids[i]),)\n Y[i] = Y[i].to(args.device_ids[i])\n gen_X[i] = (gen_X[i].to(args.device_ids[i]),)\n gen_Y[i] = gen_Y[i].to(args.device_ids[i])\n\n losses = get_losses(parallel_model, cqa, Y, gen_X, gen_Y, train_loss_fct)\n loss = sum(losses)\n if \"gem\" in args.seq_train_type and task_ids[0] != 0:\n gem_step(task_ids[0])\n train_once(loss, n_inputs)\n qa_loss = losses[0].item() * n_inputs\n lm_loss = losses[1].item() * n_inputs\n cum_loss += (qa_loss + lm_loss)\n cum_qa_loss += qa_loss\n cum_lm_loss += lm_loss\n cur_n_inputs += n_inputs\n\n if (n_steps + 1 ) % args.logging_steps == 0:\n logger.info('progress {:.3f} , lr {:.1E} , loss {:.3f} , qa loss {:.3f} , lm loss {:.3f} , avg batch size {:.1f}'.format(\n ep + cur_n_inputs/len(train_qadata), scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs,\n cur_n_inputs/(n_steps + 1)\n ))\n\n torch.save(model.state_dict(), os.path.join(model_dir, SAVE_NAME+str(ep+1)))\n tot_n_steps += (n_steps + 1)\n logger.info('epoch {}/{} done , tot steps {} , lr {:.1E} , loss {:.2f} , qa loss {:.2f} , lm loss {:.2f} , avg batch size {:.1f}'.format(\n ep+1, n_train_epochs, tot_n_steps, scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs, cur_n_inputs/(n_steps+1)\n ))\n\n # task end do for reg\n if args.seq_train_type in REG_TYPE_KEYS:\n regularizer.task_end_do()\n torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))\n\n return model\n\n\nif __name__ == '__main__':\n\n if not args.debug:\n logging.getLogger(\"pytorch_transformers\").setLevel(logging.WARNING)\n logging.getLogger(\"pytorch_transformers.tokenization_utils\").setLevel(logging.CRITICAL)\n\n make_dir(args.model_dir_root)\n\n init_logging(os.path.join(args.model_dir_root, 'log_train.txt'))\n logger.info('args = {}'.format(str(args)))\n\n model = None\n if args.seq_train_type == \"multitask\":\n model = train(list(range(len(args.tasks))), model)\n else:\n if args.unbound:\n TASK_DICT = lll_unbound_setting(split_size=args.unbound)\n first_task = True\n\n for task_id in range(len(args.tasks)):\n model = train([task_id], model)\n# model = train([task_id], model,first_task)\n\n first_task = False\n"
] | [
[
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.ones"
]
] |
Enigma-li/Sketch2CAD | [
"fb863cad17343b0729bcab0177d125d110c56fa2"
] | [
"networkTraining/utils/util_funcs.py"
] | [
"#\n# Project Sketch2CAD\n#\n# Author: Changjian Li ([email protected]),\n# Copyright (c) 2019. All Rights Reserved.\n#\n# ==============================================================================\n\"\"\"Network training utils.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport tensorflow as tf\nimport logging\n\n# util logger initialization\nutil_logger = logging.getLogger('main.utils')\n\n\ndef slice_tensor(tensor1, tensor2):\n \"\"\"Slice a new tensor from tensor1 with same H*W shape of tensor2.\n :param tensor1: bigger tensor.\n :param tensor2: smaller tensor.\n :return: sliced tensor.\n \"\"\"\n with tf.name_scope(\"slice_tenosr\") as _:\n t1_shape = tf.shape(tensor1)\n t2_shape = tf.shape(tensor2)\n\n offsets = [0, (t1_shape[1] - t2_shape[1]) // 2, (t1_shape[2] - t2_shape[2]) // 2, 0]\n size = [-1, t2_shape[1], t2_shape[2], -1]\n return tf.slice(tensor1, offsets, size)\n\n\ndef make_dir(folder_fn):\n \"\"\"Create new folder.\n :param folder_fn: folder name.\n :return:\n \"\"\"\n if tf.gfile.Exists(folder_fn):\n tf.gfile.DeleteRecursively(folder_fn)\n tf.gfile.MakeDirs(folder_fn)\n\n\ndef dump_params(path, params):\n \"\"\"Output all parameters.\n :param path: writen file.\n :param params: parameter dictionary.\n :return:\n \"\"\"\n util_logger.info('Training settings:')\n with open(path + r'/params.txt', 'w') as f:\n for param in params:\n f.write('{}: {}\\n'.format(param, params[param]))\n util_logger.info('{}: {}'.format(param, params[param]))\n\n\ndef cropconcat_layer(tensor1, tensor2, concat_dim=1, name=None):\n \"\"\"crop tensor1 to have same H,W size with tensor2 and concat them together, used in network building.\n :param tensor1: input tensor bigger one.\n :param tensor2: input smaller one.\n :param concat_dim: concatenate dimension.\n :param name: layer name.\n :return: concatenated tensor.\n \"\"\"\n with tf.name_scope(name) as _:\n t1_shape = tensor1.get_shape().as_list()\n t2_shape = tensor2.get_shape().as_list()\n\n if t1_shape[1] != t2_shape[1] and t1_shape[2] != t2_shape[2]:\n offsets = [0, (t1_shape[1] - t2_shape[1]) // 2, (t1_shape[2] - t2_shape[2]) // 2, 0]\n size = [-1, t2_shape[1], t2_shape[2], -1]\n t1_crop = tf.slice(tensor1, offsets, size)\n output = tf.concat([t1_crop, tensor2], concat_dim)\n else:\n output = tf.concat([tensor1, tensor2], concat_dim)\n\n return output\n\n\ndef concate_layers(tensor1, tensor2, tensor3, concat_dim=1, name=None):\n \"\"\" Concatenate tensors\n :param tensor1: main tensor\n :param tensor2: concat1\n :param tensor3: concat2\n :param concat_dim: concatenate dimension\n :param name: ops name\n :return: concated layer\n \"\"\"\n with tf.name_scope(name) as _:\n output = tf.concat([tensor1, tensor2, tensor3], concat_dim)\n\n return output\n"
] | [
[
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.gfile.Exists",
"tensorflow.gfile.MakeDirs",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.name_scope",
"tensorflow.slice"
]
] |
erekgit/parlai | [
"5d11848316b0b4fe7bf28ca19b215a37004ec668"
] | [
"parlai/core/torch_generator_agent.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n\"\"\"\nGeneric PyTorch-based Generator agent.\n\nImplements quite a bit of boilerplate, including forced-decoding loss and a tree search.\n\nContains the following utilities:\n\n* `ref:TorchGeneratorAgent` class, which serves as a useful parent for generative torch\n agents.\n* Beam class which provides some generic beam functionality for classes to use\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import TypeVar, List, Dict, Optional, Tuple, Set, Iterable\nimport math\nfrom operator import attrgetter\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom parlai.core.opt import Opt\nfrom parlai.utils.distributed import is_distributed, sync_parameters\nfrom parlai.core.torch_agent import TorchAgent, Batch, Output, DictionaryAgent\nfrom parlai.utils.misc import warn_once\nimport parlai.utils.logging as logging\nfrom parlai.core.metrics import SumMetric, AverageMetric, BleuMetric, FairseqBleuMetric\nfrom parlai.utils.fp16 import FP16SafeCrossEntropy\nfrom parlai.utils.torch import (\n neginf,\n total_parameters,\n trainable_parameters,\n PipelineHelper,\n)\n\n\ntry:\n from nltk.translate import bleu_score as nltkbleu\n\nexcept ImportError:\n nltkbleu = None\n\ntry:\n from fairseq import bleu as fairseq_bleu\n\nexcept ImportError:\n fairseq_bleu = None\n\n\nclass SearchBlacklist(object):\n \"\"\"\n Search blacklist facilitates blocking ngrams from being generated.\n \"\"\"\n\n def __init__(self, dict_agent: DictionaryAgent) -> None:\n self.dict = dict_agent\n self._phrases: Set[str] = set()\n self._phrase_ngrams: Dict[int, List[List[int]]] = {}\n\n def __bool__(self):\n return bool(self._phrases)\n\n def clear(self) -> None:\n self._phrases = set()\n self._phrase_ngrams = {}\n\n def _add_literal(self, phrase_literal: str):\n if phrase_literal in self._phrases:\n return\n ngram = self.dict.txt2vec(phrase_literal)\n self._phrases.add(phrase_literal)\n logging.debug(f\"Adding '{phrase_literal}' to the beam blacklist {ngram}\")\n l = len(ngram)\n if l not in self._phrase_ngrams:\n self._phrase_ngrams[l] = []\n self._phrase_ngrams[l].append(ngram)\n\n def add(self, phrase: str):\n phrase = phrase.strip()\n if not phrase:\n return\n self._add_literal(phrase)\n self._add_literal(phrase + \"s\")\n self._add_literal(phrase.lower())\n self._add_literal(phrase.lower() + \"s\")\n self._add_literal(phrase.upper())\n self._add_literal(phrase.upper() + \"S\")\n self._add_literal(phrase.title())\n self._add_literal(phrase.title() + \"S\")\n self._add_literal(phrase[0].upper() + phrase[1:])\n self._add_literal(phrase[0].upper() + phrase[1:] + \"s\")\n self._add_literal(phrase[0].upper() + phrase[1:].lower())\n self._add_literal(phrase[0].upper() + phrase[1:].lower() + \"s\")\n\n def items(self) -> Iterable[Tuple[int, List[List[int]]]]:\n return self._phrase_ngrams.items()\n\n\nTSType = TypeVar('TSType', bound='TreeSearch')\n\n\nclass TorchGeneratorModel(nn.Module, ABC):\n \"\"\"\n Abstract TorchGeneratorModel.\n\n This interface expects you to implement model with the following reqs:\n\n :attribute model.encoder:\n takes input returns tuple (enc_out, enc_hidden, attn_mask)\n\n :attribute model.decoder:\n takes decoder params and returns decoder outputs after attn\n\n :attribute model.output:\n takes decoder outputs and returns distr over dictionary\n \"\"\"\n\n def __init__(\n self,\n padding_idx=0,\n start_idx=1,\n end_idx=2,\n unknown_idx=3,\n input_dropout=0,\n longest_label=1,\n ):\n super().__init__()\n self.NULL_IDX = padding_idx\n self.END_IDX = end_idx\n self.register_buffer('START', torch.LongTensor([start_idx]))\n self.longest_label = longest_label\n\n def decode_forced(self, encoder_states, ys):\n \"\"\"\n Decode with a fixed, true sequence, computing loss.\n\n Useful for training, or ranking fixed candidates.\n\n :param ys:\n the prediction targets. Contains both the start and end tokens.\n\n :type ys:\n LongTensor[bsz, time]\n\n :param encoder_states:\n Output of the encoder. Model specific types.\n\n :type encoder_states:\n model specific\n\n :return:\n pair (logits, choices) containing the logits and MLE predictions\n\n :rtype:\n (FloatTensor[bsz, ys, vocab], LongTensor[bsz, ys])\n \"\"\"\n bsz = ys.size(0)\n seqlen = ys.size(1)\n inputs = ys.narrow(1, 0, seqlen - 1)\n inputs = torch.cat([self.START.detach().expand(bsz, 1), inputs], 1)\n latent, _ = self.decoder(inputs, encoder_states)\n logits = self.output(latent)\n _, preds = logits.max(dim=2)\n return logits, preds\n\n @abstractmethod\n def reorder_encoder_states(self, encoder_states, indices):\n \"\"\"\n Reorder encoder states according to a new set of indices.\n\n This is an abstract method, and *must* be implemented by the user.\n\n Its purpose is to provide beam search with a model-agnostic interface for\n beam search. For example, this method is used to sort hypotheses,\n expand beams, etc.\n\n For example, assume that encoder_states is an bsz x 1 tensor of values\n\n .. code-block:: python\n\n indices = [0, 2, 2]\n encoder_states = [[0.1]\n [0.2]\n [0.3]]\n\n then the output will be\n\n .. code-block:: python\n\n output = [[0.1]\n [0.3]\n [0.3]]\n\n :param encoder_states:\n output from encoder. type is model specific.\n\n :type encoder_states:\n model specific\n\n :param indices:\n the indices to select over. The user must support non-tensor\n inputs.\n\n :type indices: list[int]\n\n :return:\n The re-ordered encoder states. It should be of the same type as\n encoder states, and it must be a valid input to the decoder.\n\n :rtype:\n model specific\n \"\"\"\n pass\n\n @abstractmethod\n def reorder_decoder_incremental_state(self, incremental_state, inds):\n \"\"\"\n Reorder incremental state for the decoder.\n\n Used to expand selected beams in beam search. Unlike reorder_encoder_states,\n implementing this method is optional. However, without incremental decoding,\n decoding a single beam becomes O(n^2) instead of O(n), which can make\n beam search impractically slow.\n\n In order to fall back to non-incremental decoding, just return None from this\n method.\n\n :param incremental_state:\n second output of model.decoder\n :type incremental_state:\n model specific\n :param inds:\n indices to select and reorder over.\n :type inds:\n LongTensor[n]\n\n :return:\n The re-ordered decoder incremental states. It should be the same\n type as incremental_state, and usable as an input to the decoder.\n This method should return None if the model does not support\n incremental decoding.\n\n :rtype:\n model specific\n \"\"\"\n pass\n\n def forward(self, *xs, ys=None, prev_enc=None, maxlen=None, bsz=None):\n \"\"\"\n Get output predictions from the model.\n\n :param xs:\n input to the encoder\n :type xs:\n LongTensor[bsz, seqlen]\n :param ys:\n Expected output from the decoder. Used\n for teacher forcing to calculate loss.\n :type ys:\n LongTensor[bsz, outlen]\n :param prev_enc:\n if you know you'll pass in the same xs multiple times, you can pass\n in the encoder output from the last forward pass to skip\n recalcuating the same encoder output.\n :param maxlen:\n max number of tokens to decode. if not set, will use the length of\n the longest label this model has seen. ignored when ys is not None.\n :param bsz:\n if ys is not provided, then you must specify the bsz for greedy\n decoding.\n\n :return:\n (scores, candidate_scores, encoder_states) tuple\n\n - scores contains the model's predicted token scores.\n (FloatTensor[bsz, seqlen, num_features])\n - candidate_scores are the score the model assigned to each candidate.\n (FloatTensor[bsz, num_cands])\n - encoder_states are the output of model.encoder. Model specific types.\n Feed this back in to skip encoding on the next call.\n \"\"\"\n assert ys is not None, \"Greedy decoding in TGModel.forward no longer supported.\"\n # TODO: get rid of longest_label\n # keep track of longest label we've ever seen\n # we'll never produce longer ones than that during prediction\n self.longest_label = max(self.longest_label, ys.size(1))\n\n # use cached encoding if available\n encoder_states = prev_enc if prev_enc is not None else self.encoder(*xs)\n\n # use teacher forcing\n scores, preds = self.decode_forced(encoder_states, ys)\n return scores, preds, encoder_states\n\n\nclass PPLMetric(AverageMetric):\n def value(self):\n return math.exp(super().value())\n\n\nclass TorchGeneratorAgent(TorchAgent, ABC):\n \"\"\"\n Abstract Generator agent; only meant to be extended.\n\n TorchGeneratorAgent aims to handle much of the bookkeeping and infrastructure work\n for any generative models, like seq2seq or transformer. It implements the train_step\n and eval_step. The only requirement is that your model *must* implemented the\n interface TorchGeneratorModel interface.\n \"\"\"\n\n @classmethod\n def upgrade_opt(cls, opt_from_disk: Opt):\n # call the parent upgrades\n opt_from_disk = super(TorchGeneratorAgent, cls).upgrade_opt(opt_from_disk)\n\n # 2019-08-18: Adding support for generation other than beam search\n # Previously, selecting --beam-size > 1 enabled beam search and == 1 was\n # greedy. New behavior is --inference greedy or --inference beam.\n if 'inference' not in opt_from_disk:\n assert 'beam_size' in opt_from_disk\n if opt_from_disk['beam_size'] == 1:\n method = 'greedy'\n else:\n method = 'beam'\n opt_from_disk['inference'] = method\n warn_once(f'Old model inference method inferred as {method}')\n return opt_from_disk\n\n @classmethod\n def add_cmdline_args(cls, argparser):\n \"\"\"\n Add command line arguments.\n \"\"\"\n agent = argparser.add_argument_group('Torch Generator Agent')\n agent.add_argument(\n '--beam-size',\n type=int,\n default=1,\n help='Beam size, if 1 then greedy search',\n )\n agent.add_argument(\n '--beam-min-length',\n type=int,\n default=1,\n help='Minimum length of prediction to be generated by the beam search',\n )\n agent.add_argument(\n '--beam-context-block-ngram',\n type=int,\n default=-1,\n help=(\n 'Size n-grams to block in beam search from the context. val <= 0 '\n 'implies no blocking'\n ),\n )\n agent.add_argument(\n '--beam-block-ngram',\n type=int,\n default=-1,\n help='Size n-grams to block in beam search. val <= 0 implies no blocking',\n )\n agent.add_argument(\n '--beam-length-penalty',\n type=float,\n default=0.65,\n help='Applies a length penalty. Set to 0 for no penalty.',\n )\n agent.add_argument(\n '--skip-generation',\n type='bool',\n default=False,\n hidden=True,\n help='Skip beam search. Useful for speeding up training, '\n 'if perplexity is the validation metric.',\n )\n agent.add_argument(\n '--inference',\n choices={'beam', 'greedy', 'topk', 'nucleus', 'delayedbeam'},\n default='greedy',\n help='Generation algorithm',\n )\n agent.add_argument(\n '--topk', type=int, default=10, help='K used in Top K sampling'\n )\n agent.add_argument(\n '--topp', type=float, default=0.9, help='p used in nucleus sampling'\n )\n agent.add_argument(\n '--beam-delay', type=int, default=30, help='used in delayedbeam search'\n )\n agent.add_argument(\n '--beam-blacklist-filename',\n type=str,\n default=None,\n help='Load a text file of hard blocks for beam search to never say.',\n )\n agent.add_argument(\n '--temperature',\n type=float,\n default=1.0,\n help='temperature to add during decoding',\n )\n agent.add_argument(\n '--compute-tokenized-bleu',\n type='bool',\n default=False,\n help='if true, compute tokenized bleu scores',\n )\n\n super(TorchGeneratorAgent, cls).add_cmdline_args(argparser)\n return agent\n\n def __init__(self, opt: Opt, shared=None):\n init_model, is_finetune = self._get_init_model(opt, shared)\n super().__init__(opt, shared)\n\n self.beam_size = opt.get('beam_size', 1)\n self.beam_min_length = opt.get('beam_min_length', 1)\n self.beam_block_ngram = opt.get('beam_block_ngram', -1)\n self.beam_context_block_ngram = opt.get('beam_context_block_ngram', -1)\n self.temperature = opt.get('temperature', 1.0)\n assert self.temperature > 0, '--temperature must be greater than 0'\n self.output_token_losses = opt.get('verbose', False)\n self.compute_tokenized_bleu = opt.get('compute_tokenized_bleu', False)\n self.beam_blacklist: Optional[SearchBlacklist] = None\n\n if shared:\n # set up shared properties\n states = shared.get('states', {})\n self.beam_blacklist = shared.get('blacklist')\n else:\n # this is not a shared instance of this class, so do full init\n self.criterion = self.build_criterion()\n # ensure all distributed copies will always be in sync\n self.model = self.build_model()\n\n # load the blacklist for beam search\n self.beam_blacklist = self._load_beam_blacklist()\n\n if self.model is None or self.criterion is None:\n raise AttributeError(\n 'build_model() and build_criterion() need to return the model or criterion'\n )\n if self.use_cuda:\n if self.model_parallel:\n self.model = PipelineHelper().make_parallel(self.model)\n else:\n self.model.cuda()\n self.criterion.cuda()\n\n sync_parameters(self.model)\n train_params = trainable_parameters(self.model)\n total_params = total_parameters(self.model)\n print(f\"Total parameters: {total_params:,d} ({train_params:,d} trainable)\")\n\n if self.fp16:\n self.model = self.model.half()\n\n if init_model is not None:\n # load model parameters if available\n print('[ Loading existing model params from {} ]' ''.format(init_model))\n states = self.load(init_model)\n else:\n states = {}\n\n if shared:\n if 'optimizer' in shared:\n self.optimizer = shared['optimizer']\n elif self._should_initialize_optimizer():\n # do this regardless of share state, but don't\n self.init_optim(\n [p for p in self.model.parameters() if p.requires_grad],\n optim_states=states.get('optimizer'),\n saved_optim_type=states.get('optimizer_type'),\n )\n self.build_lr_scheduler(states, hard_reset=is_finetune)\n\n if shared is None and is_distributed():\n device_ids = None if self.model_parallel else [self.opt['gpu']]\n self.model = torch.nn.parallel.DistributedDataParallel(\n self.model, device_ids=device_ids, broadcast_buffers=False\n )\n\n self.reset()\n\n def build_criterion(self):\n \"\"\"\n Construct and return the loss function.\n\n By default torch.nn.CrossEntropyLoss.\n\n If overridden, this model should produce a sum that can be used for a per-token loss.\n \"\"\"\n if not self.fp16:\n return torch.nn.CrossEntropyLoss(\n ignore_index=self.NULL_IDX, reduction='none'\n )\n else:\n # FP16 safe cross entropy (softmax done in FP32)\n return FP16SafeCrossEntropy(ignore_index=self.NULL_IDX, reduction='none')\n\n def _v2t(self, vec):\n \"\"\"\n Convert token indices to string of tokens.\n \"\"\"\n new_vec = []\n if hasattr(vec, 'cpu'):\n vec = vec.cpu()\n for i in vec:\n if i == self.END_IDX:\n break\n elif i != self.START_IDX:\n new_vec.append(i)\n return self.dict.vec2txt(new_vec)\n\n def set_interactive_mode(self, mode, shared=False):\n \"\"\"\n Turn on interactive mode.\n \"\"\"\n super().set_interactive_mode(mode, shared)\n if mode:\n self.skip_generation = False\n else:\n self.skip_generation = self.opt.get('skip_generation', False)\n\n def _dummy_batch(self, batchsize, maxlen):\n \"\"\"\n Create a dummy batch.\n\n This is used to preinitialize the cuda buffer, or otherwise force a\n null backward pass after an OOM.\n\n If your model uses additional inputs beyond text_vec and label_vec,\n you will need to override it to add additional fields.\n \"\"\"\n return Batch(\n text_vec=torch.ones(batchsize, maxlen).long().cuda(),\n label_vec=torch.ones(batchsize, 2).long().cuda(),\n text_lengths=[maxlen] * batchsize,\n )\n\n def _init_cuda_buffer(self, batchsize, maxlen, force=False):\n \"\"\"\n Pre-initialize CUDA buffer by doing fake forward pass.\n\n This is also used in distributed mode to force a worker to sync with others.\n \"\"\"\n if self.use_cuda and (force or not hasattr(self, 'buffer_initialized')):\n try:\n self._control_local_metrics(disabled=True)\n loss = self.compute_loss(self._dummy_batch(batchsize, maxlen))\n self._control_local_metrics(enabled=True)\n self._temporarily_disable_local_metrics = False\n self.backward(loss)\n self.buffer_initialized = True\n except RuntimeError as e:\n if 'out of memory' in str(e):\n m = (\n 'CUDA OOM: Lower batch size (-bs) from {} or lower '\n ' max sequence length (-tr) from {}'\n ''.format(batchsize, maxlen)\n )\n raise RuntimeError(m)\n else:\n raise e\n\n def reset_metrics(self):\n \"\"\"\n Reset metrics for reporting loss and perplexity.\n \"\"\"\n super().reset_metrics()\n\n def share(self):\n \"\"\"\n Share internal states between parent and child instances.\n \"\"\"\n shared = super().share()\n shared['beam_blacklist'] = self.beam_blacklist\n if hasattr(self, 'optimizer'):\n shared['optimizer'] = self.optimizer\n if self.opt.get('numthreads', 1) > 1:\n shared['states'] = { # don't share optimizer states\n 'optimizer_type': self.opt['optimizer']\n }\n return shared\n\n def vectorize(self, *args, **kwargs):\n \"\"\"\n Override vectorize for generative models.\n \"\"\"\n kwargs['add_start'] = False # model does this in module code\n kwargs['add_end'] = True # we do want this\n return super().vectorize(*args, **kwargs)\n\n def _model_input(self, batch):\n \"\"\"\n Create the input (x) value for the model.\n\n Must return a tuple. This will be passed directly into the model via\n `*args`, i.e.,\n\n >>> model(*_model_input(batch))\n\n This is intentionally overridable so that richer models can pass the\n additional inputs.\n \"\"\"\n return (batch.text_vec,)\n\n def _encoder_input(self, batch):\n \"\"\"\n Create the input (x) value for the encoder.\n\n Must return a tuple. This will be passed directly into the encoder via\n `*args`, i.e.,\n\n >>> model.encoder(*_encoder_input(batch))\n\n This is intentionally overridable so that richer models can pass the\n additional inputs directly to the encoder.\n \"\"\"\n return self._model_input(batch)\n\n def compute_loss(self, batch, return_output=False):\n \"\"\"\n Compute and return the loss for the given batch.\n\n Easily overridable for customized loss functions.\n\n If return_output is True, the full output from the call to self.model()\n is also returned, via a (loss, model_output) pair.\n \"\"\"\n if batch.label_vec is None:\n raise ValueError('Cannot compute loss without a label.')\n model_output = self.model(*self._model_input(batch), ys=batch.label_vec)\n scores, preds, *_ = model_output\n score_view = scores.view(-1, scores.size(-1))\n loss = self.criterion(score_view, batch.label_vec.view(-1))\n loss = loss.view(scores.shape[:-1]).sum(dim=1)\n # save loss to metrics\n notnull = batch.label_vec.ne(self.NULL_IDX)\n target_tokens = notnull.long().sum(dim=-1)\n correct = ((batch.label_vec == preds) * notnull).sum(dim=-1)\n\n self.record_local_metric('loss', AverageMetric.many(loss, target_tokens))\n self.record_local_metric('ppl', PPLMetric.many(loss, target_tokens))\n self.record_local_metric(\n 'token_acc', AverageMetric.many(correct, target_tokens)\n )\n # actually do backwards loss\n loss = loss.sum()\n loss /= target_tokens.sum() # average loss per token\n if return_output:\n return (loss, model_output)\n else:\n return loss\n\n def train_step(self, batch):\n \"\"\"\n Train on a single batch of examples.\n \"\"\"\n # helps with memory usage\n # note we want to use the opt's batchsize instead of the observed batch size\n # in case dynamic batching is in use\n self._init_cuda_buffer(self.opt['batchsize'], self.label_truncate or 256)\n self.model.train()\n self.zero_grad()\n\n try:\n loss = self.compute_loss(batch)\n self.backward(loss)\n self.update_params()\n except RuntimeError as e:\n # catch out of memory exceptions during fwd/bck (skip batch)\n if 'out of memory' in str(e):\n print(\n '| WARNING: ran out of memory, skipping batch. '\n 'if this happens frequently, decrease batchsize or '\n 'truncate the inputs to the model.'\n )\n self.global_metrics.add('skipped_batches', SumMetric(1))\n # gradients are synced on backward, now this model is going to be\n # out of sync! catch up with the other workers\n self._init_cuda_buffer(8, 8, True)\n else:\n raise e\n\n def _construct_token_losses(self, labels, model_output):\n # Get non-aggregated losses\n scores, _, _ = model_output\n score_view = scores.view(-1, scores.size(-1))\n losses = self.criterion(score_view, labels.view(-1)).view(len(labels), -1)\n\n # Zip decoded tokens with losses\n token_losses = []\n for i, label in enumerate(labels):\n token_losses.append(\n list(\n zip(\n [self.dict[token] for token in label.tolist()],\n losses[i].tolist(),\n )\n )\n )\n return token_losses\n\n def _compute_fairseq_bleu(self, batch: Batch, preds):\n \"\"\"\n Compute BLEU score between text and label, using the FAIRSeq BLEU Scorer.\n\n :param batch:\n Batch of observations\n :param texts:\n list of string predictions\n \"\"\"\n all_results = []\n for i, t in enumerate(preds):\n result = FairseqBleuMetric.compute_many(\n t[1:],\n batch.label_vec[i].unsqueeze(0),\n pad_idx=self.NULL_IDX,\n end_idx=self.END_IDX,\n unk_idx=self.dict[self.dict.unk_token],\n )\n if result is None:\n return\n all_results.append(result)\n\n bleu_scores = list(zip(*all_results))\n for k in range(4):\n self.record_local_metric(f'fairseq_bleu{k + 1}', bleu_scores[k])\n\n def _compute_nltk_bleu(self, batch: Batch, texts: List[str]):\n \"\"\"\n Compute BLEU score between text and label(s), using the NLTK BLEU Scorer.\n\n Note this differs from BLEU in ParlAI metrics in that the answers\n are unnormalized (no removal of stop words, etc.)\n\n :param batch:\n Batch of observations\n :param texts:\n list of string predictions\n \"\"\"\n\n results = {}\n for i, p in enumerate(texts):\n obs = batch.observations[i]\n references = []\n for lbl in obs['eval_labels']:\n references.append(\n self._v2t(\n self._vectorize_text(\n lbl, True, True, self.label_truncate, False\n )\n )\n )\n for k in range(1, 5):\n b = BleuMetric.compute(p, references, k)\n if b is None:\n b = 0\n if k not in results:\n results[k] = []\n results[k].append(b)\n\n for k in range(1, 5):\n self.record_local_metric(f'nltk_bleu{k}', results[k])\n\n def eval_step(self, batch):\n \"\"\"\n Evaluate a single batch of examples.\n \"\"\"\n if batch.text_vec is None and batch.image is None:\n return\n if batch.text_vec is not None:\n bsz = batch.text_vec.size(0)\n else:\n bsz = len(batch.image)\n self.model.eval()\n cand_scores = None\n token_losses = None\n\n if batch.label_vec is not None:\n # calculate loss on targets with teacher forcing\n loss, model_output = self.compute_loss(batch, return_output=True)\n if self.output_token_losses:\n token_losses = self._construct_token_losses(\n batch.label_vec, model_output\n )\n\n preds = None\n if self.skip_generation:\n warn_once(\n \"--skip-generation does not produce accurate metrics beyond ppl\",\n RuntimeWarning,\n )\n else:\n maxlen = self.label_truncate or 256\n beam_preds_scores, _ = self._generate(batch, self.beam_size, maxlen)\n preds, scores = zip(*beam_preds_scores)\n\n cand_choices = None\n # TODO: abstract out the scoring here\n if self.rank_candidates:\n # compute roughly ppl to rank candidates\n cand_choices = []\n encoder_states = self.model.encoder(*self._encoder_input(batch))\n for i in range(bsz):\n num_cands = len(batch.candidate_vecs[i])\n enc = self.model.reorder_encoder_states(encoder_states, [i] * num_cands)\n cands, _ = self._pad_tensor(batch.candidate_vecs[i])\n scores, _ = self.model.decode_forced(enc, cands)\n cand_losses = F.cross_entropy(\n scores.view(num_cands * cands.size(1), -1),\n cands.view(-1),\n reduction='none',\n ).view(num_cands, cands.size(1))\n # now cand_losses is cands x seqlen size, but we still need to\n # check padding and such\n mask = (cands != self.NULL_IDX).float()\n cand_scores = (cand_losses * mask).sum(dim=1) / (mask.sum(dim=1) + 1e-9)\n _, ordering = cand_scores.sort()\n cand_choices.append([batch.candidates[i][o] for o in ordering])\n\n text = [self._v2t(p) for p in preds] if preds is not None else None\n if text and self.compute_tokenized_bleu:\n # compute additional bleu scores\n self._compute_fairseq_bleu(batch, preds)\n self._compute_nltk_bleu(batch, text)\n return Output(text, cand_choices, token_losses=token_losses)\n\n def _treesearch_factory(self, device):\n method = self.opt.get('inference', 'greedy')\n beam_size = self.opt.get('beam_size', 1)\n if method == 'greedy':\n return GreedySearch(\n beam_size,\n min_length=0,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'beam':\n return BeamSearch(\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'delayedbeam':\n return DelayedBeamSearch(\n self.opt['topk'],\n self.opt['beam_delay'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'topk':\n return TopKSampling(\n self.opt['topk'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n elif method == 'nucleus':\n return NucleusSampling(\n self.opt['topp'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n )\n else:\n raise ValueError(f\"Can't use inference method {method}\")\n\n def _get_context(self, batch, batch_idx):\n \"\"\"\n Set the beam context for n-gram context blocking.\n\n Intentionally overridable for more complex model histories.\n \"\"\"\n return batch.text_vec[batch_idx]\n\n def _generate(self, batch, beam_size, max_ts):\n \"\"\"\n Generate an output with beam search.\n\n Depending on the options, this may perform greedy/topk/nucleus generation.\n\n :param Batch batch:\n Batch structure with input and labels\n :param int beam_size:\n Size of each beam during the search\n :param int max_ts:\n the maximum length of the decoded sequence\n\n :return:\n tuple (beam_pred_scores, beams)\n\n - beam_preds_scores: list of (prediction, score) pairs for each sample in\n Batch\n - beams :list of Beam instances defined in Beam class, can be used for any\n following postprocessing, e.g. dot logging.\n \"\"\"\n model = self.model\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n model = self.model.module\n encoder_states = model.encoder(*self._encoder_input(batch))\n if batch.text_vec is not None:\n dev = batch.text_vec.device\n else:\n dev = batch.label_vec.device\n\n bsz = (\n len(batch.text_lengths)\n if batch.text_lengths is not None\n else len(batch.image)\n )\n if batch.text_vec is not None:\n batchsize = batch.text_vec.size(0)\n beams = [\n self._treesearch_factory(dev)\n .set_context(self._get_context(batch, batch_idx))\n .set_blacklist(self.beam_blacklist)\n for batch_idx in range(batchsize)\n ]\n else:\n beams = [self._treesearch_factory(dev) for _ in range(bsz)]\n\n # repeat encoder outputs and decoder inputs\n decoder_input = (\n torch.LongTensor([self.START_IDX]).expand(bsz * beam_size, 1).to(dev)\n )\n\n inds = torch.arange(bsz).to(dev).unsqueeze(1).repeat(1, beam_size).view(-1)\n encoder_states = model.reorder_encoder_states(encoder_states, inds)\n incr_state = None\n\n for _ts in range(max_ts):\n if all((b.is_done() for b in beams)):\n # exit early if possible\n break\n\n score, incr_state = model.decoder(decoder_input, encoder_states, incr_state)\n # only need the final hidden state to make the word prediction\n score = score[:, -1:, :]\n score = model.output(score)\n # score contains softmax scores for bsz * beam_size samples\n score = score.view(bsz, beam_size, -1)\n if self.temperature != 1.0:\n score.div_(self.temperature)\n # force to fp32 to avoid overflow issues during search calculations\n score = F.log_softmax(score, dim=-1, dtype=torch.float32)\n for i, b in enumerate(beams):\n if not b.is_done():\n b.advance(score[i])\n incr_state_inds = torch.cat(\n [\n beam_size * i + b.get_backtrack_from_current_step()\n for i, b in enumerate(beams)\n ]\n )\n incr_state = model.reorder_decoder_incremental_state(\n incr_state, incr_state_inds\n )\n decoder_input = torch.index_select(decoder_input, 0, incr_state_inds)\n selection = torch.cat(\n [b.get_output_from_current_step() for b in beams]\n ).unsqueeze(-1)\n decoder_input = torch.cat([decoder_input, selection], dim=-1)\n\n # get all finalized candidates for each sample (and validate them)\n n_best_beam_preds_scores = [b.get_rescored_finished() for b in beams]\n\n if hasattr(self, '_rerank_beams'):\n n_best_beam_preds_scores = self._rerank_beams(\n batch, n_best_beam_preds_scores\n )\n\n # get the top prediction for each beam (i.e. minibatch sample)\n beam_preds_scores = [n_best_list[0] for n_best_list in n_best_beam_preds_scores]\n if self.opt.get('verbose'):\n for i, beams in enumerate(n_best_beam_preds_scores):\n for b, (tokens, score) in enumerate(beams):\n gen = self._v2t(tokens)\n logging.debug(f\"Batch[{i:3d}] Beam[{b:3d}]: ({score:4.2f}): {gen}\")\n logging.debug('-')\n\n return beam_preds_scores, beams\n\n def _load_beam_blacklist(self) -> SearchBlacklist:\n \"\"\"\n Load the beam blacklist.\n\n :return: a dict mapping ngram length to different ngrams\n \"\"\"\n blacklist = SearchBlacklist(self.dict)\n if not self.opt.get('beam_blacklist_filename'):\n return blacklist\n\n blacklist_fn = self.opt['beam_blacklist_filename']\n try:\n with open(blacklist_fn) as f:\n for line in f:\n blacklist.add(line.strip())\n except IOError:\n logging.error(\n f\"Could not load beam blacklist {blacklist_fn}, using empty blacklist.\"\n )\n return blacklist\n\n\nclass _HypothesisTail(object):\n \"\"\"\n Hold some bookkeeping about a hypothesis.\n \"\"\"\n\n # use slots because we don't want dynamic attributes here\n __slots__ = ['timestep', 'hypid', 'score', 'tokenid']\n\n def __init__(self, timestep, hypid, score, tokenid):\n self.timestep = timestep\n self.hypid = hypid\n self.score = score\n self.tokenid = tokenid\n\n\nclass TreeSearch(object):\n \"\"\"\n Abstract Tree Search class.\n\n It keeps information about beam_size concurrent, developing hypotheses. Concrete\n implementations make choices about which token to explore next at each point in the\n tree. Different choices result in different generation algorithms.\n \"\"\"\n\n def __init__(\n self,\n beam_size,\n block_ngram=-1,\n context_block_ngram=-1,\n padding_token=0,\n bos_token=1,\n eos_token=2,\n min_length=3,\n device='cpu',\n length_penalty=0.65,\n ):\n \"\"\"\n Instantiate Beam object.\n\n :param beam_size:\n number of hypothesis in the beam\n :param block_ngram:\n size of ngrams to block.\n :param context_block_ngram:\n size of context ngrams to block\n :param padding_token:\n padding token ID\n :param bos_token:\n beginning of sentence token ID\n :param eos_token:\n end of sentence token ID\n :param min_length:\n minimum length of the predicted sequence\n :param device:\n What device to use for computations\n \"\"\"\n self.beam_size = beam_size\n self.length_penalty = length_penalty\n self.block_ngram = block_ngram\n self.min_length = min_length\n self.eos = eos_token\n self.bos = bos_token\n self.pad = padding_token\n self.context = None\n self.context_block_ngram = context_block_ngram\n self.blacklist: Optional[SearchBlacklist] = None\n self.device = device\n # recent score for each hypo in the beam\n self.scores = None\n # self.scores values per each time step\n self.all_scores = [torch.Tensor([0.0] * beam_size).to(self.device)]\n # backtracking id to hypothesis at previous time step\n self.bookkeep = []\n # output tokens at each time step\n self.outputs = [\n torch.Tensor(self.beam_size).long().fill_(self.bos).to(self.device)\n ]\n # keeps tuples (score, time_step, hyp_id)\n self.finished = []\n self.eos_top = False\n self.eos_top_ts = None\n self.n_best_counter = 0\n self.partial_hyps = [[self.bos] for i in range(beam_size)]\n\n def set_context(self: TSType, context: torch.LongTensor) -> TSType:\n \"\"\"\n Set the internal context representation and return self.\n\n :param context:\n a LongTensor representing the input context; used for context\n ngram blocking, if supplied\n \"\"\"\n self.context = context.tolist()\n return self\n\n def set_blacklist(self: TSType, blacklist: Optional[SearchBlacklist]) -> TSType:\n self.blacklist = blacklist\n return self\n\n def get_output_from_current_step(self):\n \"\"\"\n Get the outputput at the current step.\n \"\"\"\n return self.outputs[-1]\n\n def get_backtrack_from_current_step(self):\n \"\"\"\n Get the backtrack at the current step.\n \"\"\"\n return self.bookkeep[-1]\n\n @abstractmethod\n def select_paths(self, logprobs, prior_scores, current_length):\n \"\"\"\n Select the next vocabulary item in these beams.\n\n :param logprobs:\n a (beamsize x vocab) tensor of log probabilities. If this is the first\n turn in the dialogue, it will be a (1 x vocab) tensor.\n :param prior_scores:\n a (beamsize) tensor of weights with the cumulative running\n log-probability of each beam. If the first turn, it will be a (1) tensor.\n :param current_length:\n the current length in tokens\n :return:\n a (hypothesis_ids, token_id, scores) tuple, where:\n\n - hypothesis_ids is a LongTensor of hypotheses we're extending. May have\n repeats, but should always be (beamsize) long.\n - token_ids is a (beamsize) LongTensor of next-token choices for\n each of the hypotheses.\n - scores is a (beamsize) Tensor with the updated cumulative log-probs\n of each beam.\n \"\"\"\n pass\n\n def _block_ngrams(\n self, ngram_size: int, logprobs: torch.Tensor, source: torch.LongTensor = None\n ):\n \"\"\"\n Hard block ngrams from the logprobs, based on the source.\n\n :param ngram_size:\n The length of ngrams to block. Must be > 0.\n :param logprobs:\n Float or HalfTensor, representing the log-probabilities. This is\n modified in place.\n :param source:\n Source text to grab ngrams from. If None, it uses the current\n hypothesis (i.e. self-blocking).\n \"\"\"\n for beam_id, hyp in enumerate(self.partial_hyps):\n if len(hyp) < ngram_size - 1:\n continue\n source_ = hyp if source is None else source\n ngrams = self._find_ngrams(source_, ngram_size)\n prefix = hyp[-(ngram_size - 1) :]\n for ngram in ngrams:\n if ngram_size == 1 or prefix == list(ngram[:-1]):\n logprobs[beam_id][ngram[-1]] = neginf(logprobs.dtype)\n return logprobs\n\n def _block_blacklist(self, logprobs: torch.Tensor) -> torch.Tensor:\n if self.blacklist is None:\n return logprobs\n\n for beam_id, hyp in enumerate(self.partial_hyps):\n for ngram_size, bad_ngrams in self.blacklist.items():\n prefix = hyp[-(ngram_size - 1) :]\n for ngram in bad_ngrams:\n if (ngram_size == 1) or prefix == list(ngram[:-1]):\n logprobs[beam_id][ngram[-1]] = neginf(logprobs.dtype)\n return logprobs\n\n def advance(self, logprobs):\n \"\"\"\n Advance the beam one step.\n \"\"\"\n current_length = len(self.all_scores) - 1\n if current_length < self.min_length:\n # penalize all eos probs to make it decode longer\n for hyp_id in range(logprobs.size(0)):\n logprobs[hyp_id][self.eos] = neginf(logprobs.dtype)\n\n if self.scores is None:\n self.scores = torch.zeros(1).type_as(logprobs).to(logprobs.device)\n\n # penalize hypotheses ending in EOS on the prior scores (self.scores) level\n # this is related to search which uses prior scores (self.scores) (e.g. beam)\n for hyp_id, token in enumerate(self.outputs[-1]):\n if token == self.eos:\n self.scores[hyp_id] = neginf(self.scores.dtype)\n\n # beam blocking\n if self.block_ngram > 0:\n logprobs = self._block_ngrams(self.block_ngram, logprobs, None)\n\n logprobs = self._block_blacklist(logprobs)\n\n if self.context_block_ngram > 0:\n if self.context is None:\n raise ValueError(\n \"Must use TreeSearch.set_context to use context blocking.\"\n )\n logprobs = self._block_ngrams(\n self.context_block_ngram, logprobs, self.context\n )\n\n hyp_ids, tok_ids, self.scores = self.select_paths(\n logprobs, self.scores, current_length\n )\n # use clone() here to ensure that self.all_scores will not be changed\n # later due to any penalties to self.scores\n self.all_scores.append(self.scores.clone())\n\n self.outputs.append(tok_ids)\n self.bookkeep.append(hyp_ids)\n self.partial_hyps = [\n self.partial_hyps[hyp_ids[i]] + [tok_ids[i].item()]\n for i in range(self.beam_size)\n ]\n\n # check new hypos for eos label, if we have some, add to finished\n for hypid in range(self.beam_size):\n if self.outputs[-1][hypid] == self.eos:\n if self.scores[hypid] <= neginf(self.scores.dtype):\n continue\n # this is finished hypo, adding to finished\n eostail = _HypothesisTail(\n timestep=len(self.outputs) - 1,\n hypid=hypid,\n score=self.all_scores[-1][hypid],\n tokenid=self.eos,\n )\n self.finished.append(eostail)\n self.n_best_counter += 1\n\n if self.outputs[-1][0] == self.eos:\n self.eos_top = True\n if self.eos_top_ts is None:\n self.eos_top_ts = len(self.outputs) - 1\n\n def is_done(self):\n \"\"\"\n Return whether beam search is complete.\n \"\"\"\n return self.eos_top and self.n_best_counter >= self.beam_size\n\n def _find_ngrams(self, input_list, n):\n \"\"\"\n Find ngrams of size n in input list.\n \"\"\"\n return list(zip(*[input_list[i:] for i in range(n)]))\n\n def _get_hyp_from_finished(self, hypothesis_tail):\n \"\"\"\n Extract hypothesis ending with EOS at timestep with hyp_id.\n\n :param timestep:\n timestep with range up to len(self.outputs) - 1\n\n :param hyp_id:\n id with range up to beam_size - 1\n\n :return:\n hypothesis sequence\n \"\"\"\n hyp_idx = []\n endback = hypothesis_tail.hypid\n for i in range(hypothesis_tail.timestep, -1, -1):\n hyp_idx.append(\n _HypothesisTail(\n timestep=i,\n hypid=endback,\n score=self.all_scores[i][endback],\n tokenid=self.outputs[i][endback],\n )\n )\n endback = self.bookkeep[i - 1][endback]\n\n return hyp_idx\n\n def _get_pretty_hypothesis(self, list_of_hypotails):\n \"\"\"\n Return hypothesis as a tensor of token ids.\n \"\"\"\n return torch.stack([ht.tokenid for ht in reversed(list_of_hypotails)])\n\n def get_rescored_finished(self, n_best=None):\n \"\"\"\n Return finished hypotheses according to adjusted scores.\n\n Score adjustment is done according to the Google NMT paper, which\n penalizes long utterances.\n\n :param n_best:\n number of finalized hypotheses to return\n\n :return:\n list of (tokens, score) pairs, in sorted order, where:\n - tokens is a tensor of token ids\n - score is the adjusted log probability of the entire utterance\n \"\"\"\n # if we never actually finished, force one\n if not self.finished:\n self.outputs[-1][0] = self.eos\n self.finished.append(\n _HypothesisTail(\n timestep=len(self.outputs) - 1,\n hypid=0,\n score=self.all_scores[-1][0],\n tokenid=self.outputs[-1][0],\n )\n )\n\n rescored_finished = []\n for finished_item in self.finished:\n current_length = finished_item.timestep + 1\n # these weights are from Google NMT paper\n length_penalty = math.pow((1 + current_length) / 6, self.length_penalty)\n rescored_finished.append(\n _HypothesisTail(\n timestep=finished_item.timestep,\n hypid=finished_item.hypid,\n score=finished_item.score / length_penalty,\n tokenid=finished_item.tokenid,\n )\n )\n\n # Note: beam size is almost always pretty small, so sorting is cheap enough\n srted = sorted(rescored_finished, key=attrgetter('score'), reverse=True)\n\n if n_best is not None:\n srted = srted[:n_best]\n\n n_best_list = [\n (self._get_pretty_hypothesis(self._get_hyp_from_finished(hyp)), hyp.score)\n for hyp in srted\n ]\n\n # check that there is at least one finished candidate\n # and assert that each of them contains only one EOS\n assert (\n len(n_best_list) >= 1\n ), f'TreeSearch returned {len(n_best_list)} candidates, must be >= 1'\n for (pred, score) in n_best_list:\n assert (\n pred == self.eos\n ).sum() == 1, f'TreeSearch returned a finalized hypo with multiple end tokens \\\n with score {score.item():.2f}'\n\n return n_best_list\n\n\nclass GreedySearch(TreeSearch):\n \"\"\"\n Greedy search.\n\n Picks the highest probability utterance at each step. Only works with\n --beam-size 1.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.beam_size != 1:\n raise ValueError('Greedy search can only be run with beam size 1.')\n\n def select_paths(self, logprobs, prior_scores, current_length):\n tok_scores, tok_ids = logprobs.max(1)\n best_scores = tok_scores + prior_scores\n hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)\n return (hyp_ids, tok_ids, best_scores)\n\n\nclass BeamSearch(TreeSearch):\n \"\"\"\n Beam search.\n \"\"\"\n\n def select_paths(self, logprobs, prior_scores, current_length):\n \"\"\"\n Select the next vocabulary item in these beams.\n \"\"\"\n # if numel is 1, then this is the first time step, only one hyp is expanded\n if prior_scores.numel() == 1:\n logprobs = logprobs[0:1]\n\n # beam search actually looks over all hypotheses together so we flatten\n beam_scores = logprobs + prior_scores.unsqueeze(1).expand_as(logprobs)\n flat_beam_scores = beam_scores.view(-1)\n best_scores, best_idxs = torch.topk(flat_beam_scores, self.beam_size, dim=-1)\n voc_size = logprobs.size(-1)\n\n # get the backtracking hypothesis id as a multiple of full voc_sizes\n hyp_ids = best_idxs / voc_size\n # get the actual word id from residual of the same division\n tok_ids = best_idxs % voc_size\n\n return (hyp_ids, tok_ids, best_scores)\n\n\nclass DelayedBeamSearch(TreeSearch):\n \"\"\"\n DelayedBeam: Top-K sampling followed by beam search (Massarelli et al., 2019).\n\n Samples from a truncated distribution where only the most probable K words\n are considered at each time for the first N tokens, then switches to beam\n after N steps.\n\n See https://arxiv.org/abs/1911.03587 for details.\n \"\"\"\n\n def __init__(self, k, delay, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.k = k\n self.delay = delay\n\n def select_paths(self, logprobs, prior_scores, current_length):\n if current_length < self.delay:\n return TopKSampling.select_paths(\n self, logprobs, prior_scores, current_length\n )\n else:\n return BeamSearch.select_paths(self, logprobs, prior_scores, current_length)\n\n\nclass TopKSampling(TreeSearch):\n \"\"\"\n Top-K sampling (Fan et al., 2018).\n\n Samples from a truncated distribution where only the most probable K words\n are considered at each time.\n\n Typical values of k are 2, 10, 50.\n\n See https://arxiv.org/abs/1805.04833 for details.\n \"\"\"\n\n def __init__(self, k, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.k = k\n\n def select_paths(self, logprobs, prior_scores, current_length):\n values, indices = logprobs.topk(self.k, dim=-1)\n probs = torch.softmax(values, dim=-1)\n choices = torch.multinomial(probs, 1)[:, 0]\n hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)\n tok_ids = indices[hyp_ids, choices]\n scores = values[hyp_ids, choices]\n best_scores = prior_scores.expand_as(scores) + scores\n return (hyp_ids, tok_ids, best_scores)\n\n\nclass NucleusSampling(TreeSearch):\n \"\"\"\n Nucelus, aka top-p sampling (Holtzman et al., 2019).\n\n Samples from a truncated distribution which covers a fixed CDF proportion\n of the original distribution.\n\n Typical values of p are 0.3 and 0.9.\n\n See https://arxiv.org/abs/1904.09751 for details.\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.p = p\n\n def select_paths(self, logprobs, prior_scores, current_length):\n # Unlike the other treesearch methods, we have to switch to linspace\n # for the probabilities in order to compute the CDF.\n probs = torch.softmax(logprobs, dim=-1)\n sprobs, sinds = probs.sort(dim=-1, descending=True)\n # The subtraction here is so that we always include the first word to\n # go over p. For example, if the most probable token has a prob of 0.5, and\n # p = 0.3, then we need still need to include that first token.\n mask = (sprobs.cumsum(dim=-1) - sprobs[:, :1]) >= self.p\n sprobs[mask] = 0\n sprobs.div_(sprobs.sum(dim=-1).unsqueeze(1))\n choices = torch.multinomial(sprobs, 1)[:, 0]\n hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)\n tok_ids = sinds[hyp_ids, choices]\n # Convert back to logspace.\n scores = sprobs[hyp_ids, choices].log()\n best_scores = prior_scores.expand_as(scores) + scores\n return (hyp_ids, tok_ids, best_scores)\n"
] | [
[
"torch.zeros",
"torch.cat",
"torch.arange",
"torch.nn.parallel.DistributedDataParallel",
"torch.softmax",
"torch.nn.functional.log_softmax",
"torch.ones",
"torch.multinomial",
"torch.LongTensor",
"torch.index_select",
"torch.Tensor",
"torch.nn.CrossEntropyLoss",
"torch.topk"
]
] |
looselycoupled/partisan-discourse | [
"8579924094c92e25e21ce59a26232269cf6b34bc"
] | [
"corpus/learn.py"
] | [
"# corpus.learn\n# Machine learning for the corpus with Scikit-Learn.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Mon Jul 25 17:23:50 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: learn.py [3100e46] [email protected] $\n\n\"\"\"\nMachine learning for the corpus with Scikit-Learn.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport nltk\nimport unicodedata\n\nfrom nltk.corpus import wordnet as wn\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.model_selection import KFold\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.metrics import precision_recall_fscore_support\n\nfrom partisan.utils import identity, timeit\nfrom collections import Counter, defaultdict\n\n\n##########################################################################\n## Corpus Loader (Not a transformer)\n##########################################################################\n\nclass CorpusLoader(object):\n \"\"\"\n The corpus loader knows how to deal with an NLTK corpus at the top of a\n pipeline by simply taking as input a corpus to read from. It exposes both\n the data and the labels and can be set up to do cross-validation.\n\n If a number of folds is passed in for cross-validation, then the loader\n is smart about how to access data for train/test splits. Otherwise it will\n simply yield all documents in the corpus.\n \"\"\"\n\n def __init__(self, corpus, folds=None, shuffle=True):\n self.n_docs = len(corpus.fileids())\n self.corpus = corpus\n self.folds = folds\n\n if folds is not None:\n # Generate the KFold cross validation for the loader.\n self.folds = KFold(self.n_docs, folds, shuffle)\n\n @property\n def n_folds(self):\n \"\"\"\n Returns the number of folds if it exists; 0 otherwise.\n \"\"\"\n if self.folds is None: return 0\n return self.folds.n_folds\n\n def fileids(self, fold=None, train=False, test=False):\n \"\"\"\n Returns a listing of the documents filtering to retreive specific\n data from the folds/splits. If no fold, train, or test is specified\n then the method will return all fileids.\n\n If a fold is specified (should be an integer between 0 and folds),\n then the loader will return documents from that fold. Further, train\n or test must be specified to split the fold correctly.\n \"\"\"\n if fold is None:\n # If no fold is specified, return all the fileids.\n return self.corpus.fileids()\n\n # Otherwise, identify the fold specifically and get the train/test idx\n for fold_idx, (train_idx, test_idx) in enumerate(self.folds):\n if fold_idx == fold: break\n else:\n # We have discovered no correct fold.\n raise ValueError(\n \"{} is not a fold, specify an integer less than {}\".format(\n fold, self.folds.n_folds\n )\n )\n\n # Now determine if we're in train or test mode.\n if not (test or train) or (test and train):\n raise ValueError(\n \"Please specify either train or test flag\"\n )\n\n # Select only the indices to filter upon.\n indices = train_idx if train else test_idx\n return [\n fileid for doc_idx, fileid in enumerate(self.corpus.fileids())\n if doc_idx in indices\n ]\n\n def labels(self, fold=None, train=False, test=False):\n \"\"\"\n Fit will load a list of the labels from the corpus categories.\n\n If a fold is specified (should be an integer between 0 and folds),\n then the loader will return documents from that fold. Further, train\n or test must be specified to split the fold correctly.\n \"\"\"\n return [\n self.corpus.categories(fileids=fileid)[0]\n for fileid in self.fileids(fold, train, test)\n ]\n\n def documents(self, fold=None, train=False, test=False):\n \"\"\"\n A generator of documents being streamed from disk. Each document is\n a list of paragraphs, which are a list of sentences, which in turn is\n a list of tuples of (token, tag) pairs. All preprocessing is done by\n NLTK and the CorpusReader object this object wraps.\n\n If a fold is specified (should be an integer between 0 and folds),\n then the loader will return documents from that fold. Further, train\n or test must be specified to split the fold correctly. This method\n allows us to maintain the generator properties of document reads.\n \"\"\"\n for fileid in self.fileids(fold, train, test):\n yield list(self.corpus.tagged(fileids=fileid))\n\n\n##########################################################################\n## Normalize Transformer\n##########################################################################\n\nclass TextNormalizer(BaseEstimator, TransformerMixin):\n \"\"\"\n Takes a list of tokens, removes stopwords and punctuation and lowercases\n as well as lemmatizes the words for the first step in feature extraction.\n\n Note that this transformer expects as input to transform a list of tuples,\n (token, tag) pairs, that represent a single document.\n \"\"\"\n\n def __init__(self, stopwords=None):\n self.stopwords = set(stopwords or nltk.corpus.stopwords.words('english'))\n self.lemmatizer = nltk.WordNetLemmatizer()\n\n def is_punct(self, token):\n \"\"\"\n Determines if the entire token is punctuation.\n \"\"\"\n return all(\n unicodedata.category(char).startswith('P') for char in token\n )\n\n def is_stopword(self, token):\n \"\"\"\n Determines if the token is a stopword or not.\n \"\"\"\n return token.lower() in self.stopwords\n\n def tagwn(self, tag):\n \"\"\"\n Returns the WordNet tag from the Penn Treebank tag.\n \"\"\"\n return {\n 'N': wn.NOUN,\n 'V': wn.VERB,\n 'R': wn.ADV,\n 'J': wn.ADJ\n }.get(tag[0], wn.NOUN)\n\n def lemmatize(self, token, tag):\n \"\"\"\n Lemmatizes the token according to the part of speech tag.\n \"\"\"\n return self.lemmatizer.lemmatize(token, self.tagwn(tag))\n\n def normalize(self, document):\n \"\"\"\n Normalize each (token, tag) pair in the words data set.\n \"\"\"\n return [\n self.lemmatize(token, tag).lower()\n for paragraph in document\n for sentence in paragraph\n for (token, tag) in sentence\n if not self.is_punct(token) and not self.is_stopword(token)\n ]\n\n def fit(self, X, y=None):\n \"\"\"\n At the moment, fitting doesn't require any analysis.\n \"\"\"\n return self\n\n def transform(self, documents):\n \"\"\"\n Transform a corpus of documents into normalized features.\n \"\"\"\n for document in documents:\n yield self.normalize(document)\n\n\n##########################################################################\n## Statitics Transformer\n##########################################################################\n\nclass TextStats(BaseEstimator, TransformerMixin):\n \"\"\"\n Computes the document statistics like length and number of sentences.\n \"\"\"\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, documents):\n \"\"\"\n Returns a dictionary of text features in advance of a DictVectorizer.\n \"\"\"\n for document in documents:\n # Collect token and vocabulary counts\n counts = Counter(\n item[0] for para in document for sent in para for item in sent\n )\n\n # Yield structured information about the document\n yield {\n 'paragraphs': len(document),\n 'sentences': sum(len(para) for para in document),\n 'words': sum(counts.values()),\n 'vocab': len(counts),\n }\n\n\n##########################################################################\n## Model Building Functions\n##########################################################################\n\ndef construct_pipeline(classifier):\n \"\"\"\n This function creates a feature extraction pipeline that accepts data\n from a CorpusLoader and appends the classification model to the end of\n the pipeline, returning a newly constructed Pipeline object that is\n ready to be fit and trained!\n \"\"\"\n\n return Pipeline([\n # Create a Feature Union of Text Stats and Bag of Words\n ('union', FeatureUnion(\n transformer_list = [\n\n # Pipeline for pulling document structure features\n ('stats', Pipeline([\n ('stats', TextStats()),\n ('vect', DictVectorizer()),\n ])),\n\n # Pipeline for creating a bag of words TF-IDF vector\n ('bow', Pipeline([\n ('tokens', TextNormalizer()),\n ('tfidf', TfidfVectorizer(\n tokenizer=identity, preprocessor=None, lowercase=False\n )),\n ('best', TruncatedSVD(n_components=1000)),\n ])),\n\n ],\n\n # weight components in feature union\n transformer_weights = {\n 'stats': 0.15,\n 'bow': 0.85,\n },\n )),\n\n # Append the estimator to the end of the pipeline\n ('classifier', classifier),\n ])\n\n\n@timeit\ndef build_model(loader, model, **kwargs):\n \"\"\"\n This function creates a pipeline from the feature extraction method in\n construct_pipeline and the passed in model and model keyword arguments,\n then trains the model with the given loader using all folds, then the\n complete dataset given by the loader object. It returns the fitted\n pipeline object along with scores and timing information.\n \"\"\"\n\n # TODO: Add multiprocessing to parallelize build_inner_fold\n # TODO: Add verbosity to inform user on command line what is happening\n # TODO: Equip this method to be used by Celery workers\n\n @timeit\n def build_inner_fold(loader, classifier, fold=None):\n \"\"\"\n A timed inner function that will return a set of evaluation scores\n if a fold is passed in, otherwise will build the model on the entire\n dataset and return the fitted model.\n \"\"\"\n\n # Get the training data from the loader\n X_train = list(loader.documents(fold, train=True))\n y_train = list(loader.labels(fold, train=True))\n\n # Construct the pipeline from the instantiated classifier\n model = construct_pipeline(classifier)\n model.fit(X_train, y_train)\n\n # If folds is None, then return the fitted model.\n if fold is None: return model\n\n # Otherwise get the test data from the fold to perform an evaluation.\n X_test = list(loader.documents(fold, test=True))\n y_test = list(loader.labels(fold, test=True))\n y_pred = model.predict(X_test)\n\n # Get the per-class scores as a well-structured object\n keys = ('precision', 'recall', 'f1', 'support')\n scores = precision_recall_fscore_support(y_test, y_pred, labels=model.classes_)\n scores = map(lambda s: dict(zip(model.classes_, s)), scores)\n scores = dict(zip(keys, scores))\n\n # Get the weighted scores and add to the scores object\n weighted = precision_recall_fscore_support(y_test, y_pred, average='weighted', pos_label=None)\n for key, wscore in zip(keys, weighted):\n scores[key]['average'] = float(wscore) if wscore is not None else None\n\n return scores\n\n\n # Now that the inner function works, let's run the model build process on\n # each fold for cross-validation and a final time to complete the model.\n scores = defaultdict(lambda: defaultdict(list))\n for fold in range(loader.n_folds):\n\n classifier = model(**kwargs) # Instantiate the classifier\n score, time = build_inner_fold(loader, classifier, fold) # Fit the model for this fold\n\n # Update the scores as a list of scores for each run\n for name, values in score.items():\n for label, value in values.items():\n scores[name][label].append(value)\n\n # Add the time to the scores listing\n scores['times']['folds'].append(time)\n\n # Build the final model\n classifier = model(**kwargs)\n classifier, build_time = build_inner_fold(loader, classifier)\n scores['times']['final'] = build_time\n\n # Return everything we've constructed (*whew)\n return classifier, scores\n\n\nif __name__ == '__main__':\n import os\n import pickle\n\n from corpus.reader import TranscriptCorpusReader\n from sklearn.linear_model import LogisticRegression\n\n path = os.path.join(os.path.dirname(__file__), \"fixtures\", \"debates\")\n saveto = os.path.join(os.path.dirname(__file__), \"fixtures\", \"maxent-debates.pickle\")\n corpus = TranscriptCorpusReader(path)\n loader = CorpusLoader(corpus, 12)\n\n model = LogisticRegression\n (model, scores), total_time = build_model(loader, model)\n\n with open(saveto, 'wb') as f:\n pickle.dump(model, f)\n\n with open('scores.pickle', 'wb') as f:\n pickle.dump(scores, f)\n\n print(\"Finished build process in {}\".format(total_time))\n"
] | [
[
"sklearn.metrics.precision_recall_fscore_support",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.model_selection.KFold",
"sklearn.decomposition.TruncatedSVD",
"sklearn.feature_extraction.DictVectorizer"
]
] |
ZZWENG/SimCLR | [
"d2cee6e0c6d7ed4d12d5c6e3c05e75d0a18ff97b"
] | [
"clustering/hkmeans_utils.py"
] | [
"import numpy as np\n\ndef norm(x, axis=None):\n return np.linalg.norm(x, axis=axis)\n\n#-------------------------\n#----- Poincaré Disk -----\n#-------------------------\n\n# NOTE: POSSIBLE ISSUE WITH DIFFERENT WAYS TO SPECIFY MINKOWSKI DOT PRODUCT\n# arbritray sign gives different signatures (+, +, +, -), (+, -, -, -)\n \n# distance in poincare disk\ndef poincare_dist(u, v, eps=1e-5):\n d = 1 + 2 * norm(u-v)**2 / ((1 - norm(u)**2) * (1 - norm(v)**2) + eps)\n return np.arccosh(d)\n\n# compute symmetric poincare distance matrix\ndef poincare_distances(embedding):\n n = embedding.shape[0]\n dist_matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(i+1, n):\n dist_matrix[i][j] = poincare_dist(embedding[i], embedding[j])\n return dist_matrix\n\n# convert array from poincare disk to hyperboloid\ndef poincare_pts_to_hyperboloid(Y, eps=1e-6, metric='lorentz'):\n mink_pts = np.zeros((Y.shape[0], Y.shape[1]+1))\n # print('Minkowski pts shape: {}'.format(mink_pts.shape))\n r = norm(Y, axis=1)\n if metric == 'minkowski':\n mink_pts[:, 0] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n for i in range(1, mink_pts.shape[1]):\n mink_pts[:, i] = 2/(1 - r**2 + eps) * Y[:, i - 1]\n else:\n mink_pts[:, Y.shape[1]] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n for i in range(0, Y.shape[1]):\n mink_pts[:, i] = 2/(1 - r**2 + eps) * Y[:, i]\n \"\"\"\n if metric == 'minkowski':\n mink_pts[:, 0] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n mink_pts[:, 1] = 2/(1 - r**2 + eps) * Y[:, 0]\n mink_pts[:, 2] = 2/(1 - r**2 + eps) * Y[:, 1]\n else:\n mink_pts[:, 0] = 2/(1 - r**2 + eps) * Y[:, 0]\n mink_pts[:, 1] = 2/(1 - r**2 + eps) * Y[:, 1]\n mink_pts[:, 2] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n \"\"\"\n return mink_pts\n\n# convert single point to hyperboloid\ndef poincare_pt_to_hyperboloid(y, eps=1e-6, metric='lorentz'):\n mink_pt = np.zeros((y.shape[0] + 1, ))\n # print('mink_pt.shape: {}'.format(mink_pt.shape))\n r = norm(y)\n\n if metric == 'minkowski':\n mink_pt[0] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n for i in range(1, mink_pt.shape[0]):\n mink_pt[i] = 2/(1 - r**2 + eps) * y[i - 1]\n else:\n mink_pt[y.shape[0]] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n for i in range(0, y.shape[0]):\n mink_pt[i] = 2/(1 - r**2 + eps) * y[i]\n \"\"\"\n mink_pt = np.zeros((3, ))\n r = norm(y)\n if metric == 'minkowski':\n mink_pt[0] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n mink_pt[1] = 2/(1 - r**2 + eps) * y[0]\n mink_pt[2] = 2/(1 - r**2 + eps) * y[1]\n else:\n mink_pt[0] = 2/(1 - r**2 + eps) * y[0]\n mink_pt[1] = 2/(1 - r**2 + eps) * y[1]\n mink_pt[2] = 2/(1 - r**2 + eps) * (1 + r**2)/2\n \"\"\"\n return mink_pt\n\n#------------------------------\n#----- Hyperboloid Model ------\n#------------------------------\n\n# NOTE: POSSIBLE ISSUE WITH DIFFERENT WAYS TO SPECIFY MINKOWSKI DOT PRODUCT\n# arbritray sign gives different signatures (+, +, +, -), (+, -, -, -)\n\n# define hyperboloid bilinear form\ndef hyperboloid_dot(u, v):\n # print('U dim: {}'.format(u.shape))\n # print('V dim: {}'.format(v.shape))\n return np.dot(u[:-1], v[:-1]) - u[-1]*v[-1]\n\n# define alternate minkowski/hyperboloid bilinear form\ndef minkowski_dot(u, v):\n return u[0]*v[0] - np.dot(u[1:], v[1:]) \n\n# hyperboloid distance function\ndef hyperboloid_dist(u, v, eps=1e-6, metric='lorentz'):\n if metric == 'minkowski':\n dist = np.arccosh(-1*minkowski_dot(u, v))\n else:\n dist = np.arccosh(-1*hyperboloid_dot(u, v))\n if np.isnan(dist):\n #print('Hyperboloid dist returned nan value')\n return eps\n else:\n return dist\n\n# compute symmetric hyperboloid distance matrix\ndef hyperboloid_distances(embedding):\n n = embedding.shape[0]\n dist_matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(i+1, n):\n dist_matrix[i][j] = hyperboloid_dist(embedding[i], embedding[j])\n return dist_matrix\n\n# convert array to poincare disk\ndef hyperboloid_pts_to_poincare(X, eps=1e-6, metric='lorentz'):\n poincare_pts = np.zeros((X.shape[0], X.shape[1]-1))\n if metric == 'minkowski':\n for i in range(0, X.shape[1]):\n poincare_pts[:, i] = X[:, i + 1] / ((X[:, 0] + 1) + eps)\n else:\n for i in range(0, X.shape[1]):\n poincare_pts[:, i] = X[:, i] / ((X[:, -1] + 1) + eps)\n \"\"\" \n if metric == 'minkowski':\n poincare_pts[:, 0] = X[:, 1] / ((X[:, 0]+1) + eps)\n poincare_pts[:, 1] = X[:, 2] / ((X[:, 0]+1) + eps)\n else:\n poincare_pts[:, 0] = X[:, 0] / ((X[:, 2]+1) + eps)\n poincare_pts[:, 1] = X[:, 1] / ((X[:, 2]+1) + eps)\n \"\"\"\n return poincare_pts\n\n# project within disk\ndef proj(theta,eps=1e-3):\n if norm(theta) >= 1:\n theta = theta/norm(theta) - eps\n return theta\n\n# convert single point to poincare\ndef hyperboloid_pt_to_poincare(x, eps=1e-6, metric='lorentz'):\n poincare_pt = np.zeros((x.shape[0] - 1, ))\n if metric == 'minkowski':\n for i in range(0, poincare_pt.shape[0]):\n poincare_pt[i] = x[i + 1] / ((x[0] + 1) + eps)\n else:\n for i in range(0, poincare_pt.shape[0]):\n poincare_pt[i] = x[i] / ((x[-1] + 1) + eps)\n \"\"\"\n poincare_pt = np.zeros((2, ))\n if metric == 'minkowski':\n poincare_pt[0] = x[1] / ((x[0]+1) + eps)\n poincare_pt[1] = x[2] / ((x[0]+1) + eps)\n else:\n poincare_pt[0] = x[0] / ((x[2]+1) + eps)\n poincare_pt[1] = x[1] / ((x[2]+1) + eps)\n \"\"\"\n return proj(poincare_pt)\n \n# helper function to generate samples\ndef generate_data(n, radius=0.7, hyperboloid=False):\n theta = np.random.uniform(0, 2*np.pi, n)\n u = np.random.uniform(0, radius, n)\n r = np.sqrt(u)\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n init_data = np.hstack((x.reshape(-1,1), y.reshape(-1,1)))\n if hyperboloid:\n return poincare_pts_to_hyperboloid(init_data)\n else:\n return init_data\n\n"
] | [
[
"numpy.sin",
"numpy.linalg.norm",
"numpy.isnan",
"numpy.dot",
"numpy.zeros",
"numpy.arccosh",
"numpy.random.uniform",
"numpy.sqrt",
"numpy.cos"
]
] |
parkermac/LiveOcean | [
"bef3e1e729ada1069853dd4f57f79f452b54f4fa"
] | [
"x_tef2/flux_get_vol.py"
] | [
"\"\"\"\nFind the volume of each flux segment\n\"\"\"\n\n# imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport netCDF4 as nc\n\nimport os; import sys\nsys.path.append(os.path.abspath('../alpha'))\nimport Lfun\nLdir = Lfun.Lstart(gridname='cas6', tag='v3')\nimport zrfun\n\nsys.path.append(os.path.abspath(Ldir['LO'] + 'plotting'))\nimport pfun\n\nimport tef_fun\nfrom importlib import reload\nreload(tef_fun)\n\nimport flux_fun\nreload(flux_fun)\n\n# select output location\noutdir0 = Ldir['LOo'] + 'tef2/'\nLfun.make_dir(outdir0)\noutdir = outdir0 + 'volumes_' + Ldir['gridname'] + '/'\nLfun.make_dir(outdir)\n\nfng = Ldir['grid'] + 'grid.nc'\nG = zrfun.get_basic_info(fng, only_G=True)\nh = np.ma.masked_where(G['mask_rho']==False, G['h'])\nx = G['lon_rho'].data\ny = G['lat_rho'].data\nxp = G['lon_psi'].data\nyp = G['lat_psi'].data\nm = G['mask_rho']\n\nDA = G['DX'] * G['DY']\n\n# get the DataFrame of all sections\nsect_df = tef_fun.get_sect_df()\n\ntesting = False\n\n# segment definitions, assembled by looking at the figure\n# created by flux_seg_map.py\nsegs = flux_fun.segs\n\nif testing == True:\n seg_name_list = ['J4']\n plt.close('all')\n # start a useful plot\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111)\n ax.pcolormesh(xp, yp, h[1:-1,1:-1], cmap='terrain_r', vmin=-100, vmax = 400, alpha=.1)\n pfun.dar(ax)\n pfun.add_coast(ax)\nelse:\n seg_name_list = segs.keys()\n\n# initialize a DataFrame to hold all volumes:\nvol_df = pd.DataFrame(index=seg_name_list, columns=['volume m3', 'area m2', 'lon', 'lat'])\n\n# create a dict to hold the bathy file\nbathy_dict = {'h':h, 'xp':xp, 'yp':yp}\n\n# initialize a dict to store all the ji lists that define each segment area\nji_dict = {}\n\nfor seg_name in seg_name_list:\n\n print('Segment: ' + seg_name)\n\n # get the names of the sections around this segment\n seg = segs[seg_name]\n\n # initialize a DataFrame to hold segment info\n seg_df = pd.DataFrame(columns=['ii0','ii1','jj0','jj1',\n 'sdir','side','Lon0','Lon1','Lat0','Lat1',])\n\n # fill the DataFrame for this segment\n for side in list('SNWE'):\n sect_list = seg[side]\n for sect_name in sect_list:\n # get section lat, lon, and other info\n x0, x1, y0, y1 = sect_df.loc[sect_name,:]\n # get indices for this section\n ii0, ii1, jj0, jj1, sdir, Lon, Lat, Mask = tef_fun.get_inds(x0, x1, y0, y1, G)\n Lon0 = Lon.min(); Lon1 = Lon.max()\n Lat0 = Lat.min(); Lat1 = Lat.max()\n seg_df.loc[sect_name,'ii0'] = ii0\n seg_df.loc[sect_name,'ii1'] = ii1\n seg_df.loc[sect_name,'jj0'] = jj0\n seg_df.loc[sect_name,'jj1'] = jj1\n seg_df.loc[sect_name,'sdir'] = sdir\n seg_df.loc[sect_name,'side'] = side\n seg_df.loc[sect_name,'Lon0'] = Lon0\n seg_df.loc[sect_name,'Lon1'] = Lon1\n seg_df.loc[sect_name,'Lat0'] = Lat0\n seg_df.loc[sect_name,'Lat1'] = Lat1\n\n\n if testing:\n # focus the plot axes around this segment\n pad = .5\n ax.axis([seg_df['Lon0'].min()-pad,seg_df['Lon1'].max()+pad,\n seg_df['Lat0'].min()-pad,seg_df['Lat1'].max()+pad])\n\n # initialize a mask\n mm = m.copy().data # boolean array, True over water\n\n # initialize some lists\n full_ji_list = [] # full list of indices of good rho points inside the volume\n this_ji_list = [] # current list of indices of good rho points inside the volume\n next_ji_list = [] # next list of indices of good rho points inside the volume\n\n for sn in seg_df.index:\n s = seg_df.loc[sn,:]\n # mask the rho grid points on the outside of the TEF sections\n if s['sdir'] == 'NS' and s['side'] == 'W':\n mm[s['jj0']:s['jj1']+1, s['ii0']] = False\n elif s['sdir'] == 'NS' and s['side'] == 'E':\n mm[s['jj0']:s['jj1']+1, s['ii1']] = False\n if s['sdir'] == 'EW' and s['side'] == 'S':\n mm[s['jj0'], s['ii0']:s['ii1']+1] = False\n if s['sdir'] == 'EW' and s['side'] == 'N':\n mm[s['jj1'], s['ii0']:s['ii1']+1] = False\n # doing this will form a natural barrier for the \"search robot\"\n#\n if testing and True:\n # same as the loop above, but for plottting\n for sn in seg_df.index:\n s = seg_df.loc[sn,:]\n # the black dots show rho gridpoints OUTSIDE of the segment volume, while\n # the magenta ones are those just inside\n if s['sdir'] == 'NS' and s['side'] == 'W':\n ax.plot(x[s['jj0']:s['jj1']+1, s['ii0']], y[s['jj0']:s['jj1']+1, s['ii0']], 'ok')\n ax.plot(x[s['jj0']:s['jj1']+1, s['ii1']], y[s['jj0']:s['jj1']+1, s['ii1']], 'om')\n elif s['sdir'] == 'NS' and s['side'] == 'E':\n ax.plot(x[s['jj0']:s['jj1']+1, s['ii0']], y[s['jj0']:s['jj1']+1, s['ii0']], 'om')\n ax.plot(x[s['jj0']:s['jj1']+1, s['ii1']], y[s['jj0']:s['jj1']+1, s['ii1']], 'ok')\n if s['sdir'] == 'EW' and s['side'] == 'S':\n ax.plot(x[s['jj0'], s['ii0']:s['ii1']+1], y[s['jj0'], s['ii0']:s['ii1']+1], 'ok')\n ax.plot(x[s['jj1'], s['ii0']:s['ii1']+1], y[s['jj1'], s['ii0']:s['ii1']+1], 'om')\n if s['sdir'] == 'EW' and s['side'] == 'N':\n ax.plot(x[s['jj0'], s['ii0']:s['ii1']+1], y[s['jj0'], s['ii0']:s['ii1']+1], 'om')\n ax.plot(x[s['jj1'], s['ii0']:s['ii1']+1], y[s['jj1'], s['ii0']:s['ii1']+1], 'ok')\n#\n # deploy the \"search robot\" flux_fun.update_mm()\n # the algorithm is that we start the search at a good rho point at either end of each TEF\n # section, and allow it to fill in as much of the remaining points as it can\n for sn in seg_df.index:\n #print(sn)\n s = seg_df.loc[sn,:]\n if s['sdir'] == 'NS' and s['side'] == 'W':\n ji = (s['jj0'],s['ii1'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n ji = (s['jj1'],s['ii1'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n elif s['sdir'] == 'NS' and s['side'] == 'E':\n ji = (s['jj0'],s['ii0'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n ji = (s['jj1'],s['ii0'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n elif s['sdir'] == 'EW' and s['side'] == 'S':\n ji = (s['jj1'],s['ii0'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n ji = (s['jj1'],s['ii1'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n elif s['sdir'] == 'EW' and s['side'] == 'N':\n ji = (s['jj0'],s['ii0'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n ji = (s['jj0'],s['ii1'])\n mm, this_ji_list, full_ji_list, next_ji_list = flux_fun.update_mm(ji, mm,\n this_ji_list, full_ji_list, next_ji_list)\n # check results for extras\n if len(set(full_ji_list)) != len(full_ji_list):\n print(' -- Warning: had to remove duplicates from list')\n full_ji_list = set(full_ji_list)\n \n ji_dict[seg_name] = full_ji_list\n\n if testing:\n hh = np.zeros(h.shape)\n for ji in full_ji_list: \n hh[ji]=1\n H = hh==0\n hm = np.ma.masked_where(H,h)\n ax.pcolormesh(xp, yp, hm[1:-1,1:-1], cmap='terrain_r', vmin=-100, vmax = 400)\n \n#\n # find the volume and surface area\n volume = 0\n area = 0\n lon = 0\n lat = 0\n for ji in full_ji_list:\n area += DA[ji]\n volume += h[ji] * DA[ji]\n lon += x[ji]\n lat += y[ji]\n lon = lon / len(full_ji_list)\n lat = lat / len(full_ji_list)\n print(' -- Area = %0.1f km2' % (area/1e6))\n print(' -- Volume = %0.1f km3' % (volume/1e9))\n\n vol_df.loc[seg_name,'volume m3'] = volume\n vol_df.loc[seg_name,'area m2'] = area\n vol_df.loc[seg_name,'lon'] = lon\n vol_df.loc[seg_name,'lat'] = lat\n\nif not testing:\n vol_df.to_pickle(outdir + 'volumes.p')\n pickle.dump(bathy_dict, open(outdir + 'bathy_dict.p', 'wb'))\n pickle.dump(ji_dict, open(outdir + 'ji_dict.p', 'wb'))\n#\nif testing:\n plt.show()\n\n\n"
] | [
[
"numpy.zeros",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.ma.masked_where"
]
] |
adanyaev/medical-app | [
"f59b0171f98180364f1b95dc96600c3e7f16f6a5"
] | [
"med/management/commands/parseExcel.py"
] | [
"\n# settings.configure()\n# import os\n# import django\n# os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"mysite.settings\")\n# django.setup()\n\n\n# from . import models\nimport email\nimport pandas\nfrom datetime import time\nimport random\n\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom med.models import *\n\nclass Command(BaseCommand):\n\n def returnTimes(self, medics=None, dayOnRussian=None, index=0):\n if dayOnRussian and not medics.empty:\n if not pandas.isna(medics.iloc[index][dayOnRussian]):\n _ = medics.iloc[index][dayOnRussian].split('-')\n tmp = [time(int(_[0].split(':')[0]), int(_[0].split(':')[1]), 0), time(int(_[1].split(':')[0]), int(_[1].split(':')[1]), 0)]\n return tmp\n else:\n tmp = [None, None]\n return tmp\n\n\n def parseScheduleAndDoctors (self, path=None):\n #medics = pandas.read_excel(path)\n medics = pandas.read_csv(path, encoding='windows-1251')\n medics['Срок действия'] = pandas.to_datetime(medics['Срок действия'])\n print(medics)\n for i in range(len(medics)):\n schedule = Schedule()\n\n schedule.monday = self.returnTimes(medics, 'Пн', i)\n schedule.tuesday = self.returnTimes(medics, 'Вт', i)\n schedule.wednesday = self.returnTimes(medics, 'Ср', i)\n schedule.thursday = self.returnTimes(medics, 'Чт', i)\n schedule.friday = self.returnTimes(medics, 'Пт', i)\n schedule.saturday = self.returnTimes(medics, 'Сб', i)\n schedule.sunday = self.returnTimes(medics, 'Вс', i)\n\n medic = Doctor()\n\n us = User()\n us.first_name = medics.iloc[i]['Имя'].strip()\n us.last_name = medics.iloc[i]['Фамилия'].strip()\n us.patronymic = medics.iloc[i]['Отчество'].strip()\n email_field = medics.iloc[i]['Email'].strip()\n while len(User.objects.filter(email=email_field)) > 0:\n email_field = '1' + email_field\n us.email = email_field\n us.phone = medics.iloc[i]['Номер телефона'].strip()\n us.is_doctor = True\n #us.set_password(\"\".join([chr(random.randint(65, 65+25) + int(random.choice([0, 32]))) for _ in range(12)]))\n us.set_password(\"123\")\n us.save()\n medic.user = us\n\n medic.specialization = medics.iloc[i]['Должность']\n\n schedule.doctor = medic\n\n medic.schedule = schedule\n medic.save()\n\n medic.license = medics.iloc[i][\"Номер медицинской лицензии\"]\n medic.license_date = medics.iloc[i][\"Срок действия\"]\n medic.experience = medics.iloc[i][\"Стаж\"]\n\n _ = list(map(int, medics.iloc[i][\"Клиники\"].strip().split(',')))\n # _ = [random.choice(list(Clinic.objects.all())) for i in range(random.randint(2, 5))]\n for k in range(len(_)):\n medic.clinics.add(_[k])\n medic.save()\n\n schedule.save()\n\n\n def parseClinics(self, path=None):\n clinics = pandas.read_excel(path)\n print(clinics)\n for i in range(len(clinics)):\n clinic = Clinic()\n us = User()\n us.first_name = clinics.iloc[i]['Название клиники']\n us.is_clinic = True\n\n us.email = clinics.iloc[i]['Email'].strip()\n us.phone = clinics.iloc[i]['Телефон']\n us.set_password(str(clinics.iloc[i]['Пароль']))\n\n us.save()\n clinic.user = us\n clinic.specialization = clinics.iloc[i]['Специализация']\n clinic.address = clinics.iloc[i]['Адрес']\n clinic.save()\n\n def parseProcedures(self, path=None):\n procedures = pandas.read_excel(path)\n procedures = procedures[[\"Название процедуры\", \"Описание\", \"Шаги для выполнения перед процедурой\", \"Ответственный за процедуру врач\"]]\n print(procedures)\n for i in range(len(procedures)):\n procedure = Procedure()\n procedure.name = procedures.iloc[i]['Название процедуры']\n procedure.description = procedures.iloc[i]['Описание']\n procedure.steps = procedures.iloc[i]['Шаги для выполнения перед процедурой']\n procedure.doctor_spec = procedures.iloc[i]['Ответственный за процедуру врач']\n procedure.save()\n \n return None\n\n def handle(self, *args, **options):\n self.parseClinics(r\"med/excel_files/Kliniki.xls\")\n self.parseScheduleAndDoctors(r\"med/excel_files/Vrachi.csv\")\n self.parseProcedures(r\"med/excel_files/Protsedury.xls\")\n\n"
] | [
[
"pandas.to_datetime",
"pandas.read_csv",
"pandas.isna",
"pandas.read_excel"
]
] |
ajtritt/nwb-api-python | [
"927ac74e1f84b694bd034774bb21aa1ff16b303f"
] | [
"examples/create_scripts/abstract_feature.py"
] | [
"#!/usr/bin/python\nimport sys\nimport numpy as np\nfrom nwb import nwb_file\nfrom nwb import nwb_utils as ut\n\n\"\"\" \nStore the Salient Features of a Drifting Gratings Visual Stimulus\n\nThis example demonstrates how to use an AbstractFeatureSeries to store\ndata that can be summarized by certain high-level features, such as the \nsalient features of a visual stimulus. The standard example of this is \nfor drifting gratings -- the spatial frequency, orientation, phase, \ncontrast and temporal frequency are the most important characteristics \nfor analysis using drifting gratings, not necessarily the stack of all \nframes displayed by the graphics card.\n\"\"\"\n\nOUTPUT_DIR = \"../created_nwb_files/\"\nfile_name = __file__[0:-3] + \".nwb\"\n########################################################################\n# create a new NWB file\n# several settings are specified when doing so. these can be supplied within\n# the NWB constructor or defined in a dict, as in in this example\nsettings = {}\nsettings[\"file_name\"] = OUTPUT_DIR + file_name\n\n# each file should have a descriptive globally unique identifier \n# that specifies the lab and this experiment session\n# the function nwb_utils.create_identifier() is recommended to use as it takes\n# the string and appends the present date and time\nsettings[\"identifier\"] = ut.create_identifier(\"abstract-feature example\")\n\n# indicate that it's OK to overwrite exting file. The default mode\n# (\"w-\") does not overwrite an existing file.\nsettings[\"mode\"] = \"w\"\n\n# specify the start time of the experiment. all times in the NWB file\n# are relative to experiment start time\n# if the start time is not specified the present time will be used\nsettings[\"start_time\"] = \"Sat Jul 04 2015 3:14:16\"\n\n# provide one or two sentences that describe the experiment and what\n# data is in the file\nsettings[\"description\"] = \"Test file demonstrating use of the AbstractFeatureSeries\"\n\n# create the NWB File object. this manages the file\nprint(\"Creating \" + settings[\"file_name\"])\nf = nwb_file.open(**settings)\n\n########################################################################\n# create an AbstractFeatureSeries\n# this will be stored as a 'stimulus' in this example for this example. that\n# means that it will be stored in the following location in the hdf5\n# file: stimulus/presentation/\n\nabs = f.make_group(\"<AbstractFeatureSeries>\", \"my_drifting_grating_features\", path=\"/stimulus/presentation\")\nabs.set_attr(\"description\", \"This is a simulated visual stimulus that presents a moving grating\")\n\n# an AbstractFeatureSeries is an instance of a TimeSeries, with addition\n# of the following fields:\n# features -- describes the abstract features\n# feature_units -- the units that these features are measured in\n# define the abstract features that we're storing, as well as the units\n# of those features (any number of features can be specified)\nfeatures = [ \"orientation\", \"spatial frequency\", \"phase\", \"temporal frequency\"]\nunits = [ \"degrees\", \"Hz\", \"radians\", \"degrees\"]\n# store them\nabs.set_dataset(\"features\", features)\nabs.set_dataset(\"feature_units\", units)\n\n# specify the source of the abstract features. All TimeSeries types should have a\n# source, description and comments specified; otherwise a warning is generated.\nabs.set_attr(\"source\", \"Simulated data. Normally this would be the device presenting stimulus\")\n\n# create some pretend data\ndata = np.arange(4000).reshape(1000, 4)\n\n# add data to the time series. for now, ignore the last 3 parameters\nt = np.arange(1000) * 0.001\nabs.set_dataset(\"data\", data)\nabs.set_dataset(\"timestamps\", t)\n\n\n########################################################################\n# it can sometimes be useful to import documenting data from a file\n# in this case, we'll store this script in the metadata section of the\n# file, for a record of how the file was created\nscript_name = sys.argv[0]\nf.set_dataset(\"source_script\", ut.load_file(script_name), attrs= {\n \"file_name\": script_name})\n\n# when all data is entered, close the file\nf.close()\n\n\n"
] | [
[
"numpy.arange"
]
] |
DWesl/basemap | [
"8e9a37e09a65b16429b699f7c12fcab754e1a85a"
] | [
"packages/basemap/doc/users/figures/vandg.py"
] | [
"from mpl_toolkits.basemap import Basemap\nimport numpy as np\nimport matplotlib.pyplot as plt\n# lon_0 is central longitude of projection.\n# resolution = 'c' means use crude resolution coastlines.\nm = Basemap(projection='vandg',lon_0=0,resolution='c')\nm.drawcoastlines()\nm.fillcontinents(color='coral',lake_color='aqua')\n# draw parallels and meridians.\nm.drawparallels(np.arange(-80.,81.,20.))\nm.drawmeridians(np.arange(0.,360.,60.))\nm.drawmapboundary(fill_color='aqua') \nplt.title(\"van der Grinten Projection\")\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.title",
"numpy.arange"
]
] |
LaerkeDIKU/lbscTomo | [
"a23b2b23243196f7b150a2b842e5adbe1d25aee5"
] | [
"lib/dataread3d_lib.py"
] | [
"import math\r\nfrom skimage.transform import rotate\r\nfrom skimage.draw import random_shapes\r\nimport numpy as np\r\nimport scipy.io\r\nimport scipy.misc\r\nimport re\r\n\r\ndef clean_str(str):\r\n str = re.sub('f32', '', str)\r\n str = re.sub('i32', '', str)\r\n str = re.sub('\\[', '', str)\r\n str = re.sub('\\]', '', str)\r\n return str\r\n\r\ndef get_sin_slice(f):\r\n sinoslice = clean_str(f.readline())\r\n sino = np.fromstring( sinoslice, dtype=np.float32, sep=',' )\r\n return sino\r\n\r\ndef data_generator(f):\r\n content = clean_str(f.readline())\r\n angles, rhozero, deltarho, initialimg, iterations = [str for str in content.split(\" \")]\r\n angles = np.fromstring( angles, dtype=np.float32, sep=',' )\r\n rhozero = float(rhozero)\r\n deltarho = float(deltarho)\r\n initialimg = np.fromstring( initialimg, dtype=np.float32, sep=',' )\r\n # initialimg = int(initialimg)\r\n iterations = int(iterations)\r\n return angles, rhozero, deltarho, initialimg, iterations\r\n"
] | [
[
"numpy.fromstring"
]
] |
Lukaslong/pytorch-ssd | [
"e35c12f1de795f5cd8c2a5198cab8f66d771d282"
] | [
"vision/ssd/config/mobilenetv2_ssd_config.py"
] | [
"import numpy as np\n\nfrom vision.utils.box_utils import SSDSpec, SSDBoxSizes, generate_ssd_priors\n\n\nimage_size = 300\nimage_mean = np.array([127, 127, 127]) # RGB layout\nimage_std = 128.0\niou_threshold = 0.45\ncenter_variance = 0.1\nsize_variance = 0.2\n\nspecs = [\n SSDSpec(38, 8, SSDBoxSizes(30, 60), [2, 3]),\n SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]),\n SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]),\n SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]),\n SSDSpec(3, 100, SSDBoxSizes(213, 264), [2, 3]),\n SSDSpec(1, 300, SSDBoxSizes(264, 315), [2, 3])\n]\n\n\npriors = generate_ssd_priors(specs, image_size)"
] | [
[
"numpy.array"
]
] |
WilliamAshbee/gan | [
"ab75e87681b9113d3af8df8a3cd97bf3bd69c6ac"
] | [
"catalyst_gan/nn/criterion/bce_gan.py"
] | [
"import torch\nfrom torch import nn\n\n\n# BCE losses\n\n\nclass BCELossGenerator(nn.BCEWithLogitsLoss):\n\n def __init__(self, target=1.0, **kwargs):\n assert 0 <= target <= 1\n self.target = target\n super().__init__(**kwargs)\n\n def forward(self, fake_logits):\n target = self.target * torch.ones_like(fake_logits)\n return super().forward(fake_logits, target)\n\n\nclass BCELossDiscriminator(nn.BCEWithLogitsLoss):\n\n def __init__(self, target_fake=0, target_real=1, **kwargs):\n assert 0 <= target_real <= 1\n assert 0 <= target_fake <= 1\n super().__init__(**kwargs)\n self.target_real = target_real\n self.target_fake = target_fake\n\n def forward(self, fake_logits, real_logits):\n fake_target = torch.ones_like(fake_logits) * self.target_fake\n real_target = torch.ones_like(real_logits) * self.target_real\n\n fake_loss = super().forward(fake_logits, fake_target)\n real_loss = super().forward(real_logits, real_target)\n return fake_loss + real_loss\n\n\nclass BCELossDiscriminatorReal(nn.BCEWithLogitsLoss):\n\n def __init__(self, target_real=1, **kwargs):\n assert 0 <= target_real <= 1\n super().__init__(**kwargs)\n self.target_real = target_real\n\n def forward(self, real_logits):\n real_target = torch.ones_like(real_logits) * self.target_real\n\n fake_loss = super().forward(real_logits, real_target)\n return fake_loss\n\n\nclass BCELossDiscriminatorFake(nn.BCEWithLogitsLoss):\n\n def __init__(self, target_fake=0, **kwargs):\n assert 0 <= target_fake <= 1\n super().__init__(**kwargs)\n self.target_fake = target_fake\n\n def forward(self, fake_logits):\n fake_target = torch.ones_like(fake_logits) * self.target_fake\n\n fake_loss = super().forward(fake_logits, fake_target)\n return fake_loss\n"
] | [
[
"torch.ones_like"
]
] |
prashjha/BayesForSEIRD | [
"b2faa46f73bbca1bbce8705f4a9b91a223ec6d12"
] | [
"Model/seird/seird_problem.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport dolfin as dl\nimport numpy as np\nfrom picard_solver import picard_solver\nfrom seird_forms import seird_forms\n\nSTATE = 0\nPARAMETER = 1\n\nclass seird_fwd_problem:\n def __init__(self, Vh, simulation_time, dt, init_vectors, subdmn_path = './subdomains/', mesh_tag = 'mesh_5h', qoi_type='state', reset_exposed = True, save = False, save_days = True, out_path = './fwd_result/'):\n \n self.Vh = Vh\n self.init_vectors = init_vectors\n self.dt = dt\n self._save = save\n self._reset_exposed = reset_exposed\n self._qoi_type = qoi_type\n\n self._save_days = save_days\n \n if not simulation_time.is_integer():\n raise ValueError(\"The total simulation time is should be a whole number.\")\n else:\n self.T = round(simulation_time)\n \n self.nt = round(self.T/self.dt)\n if not self.nt*self.dt == self.T:\n raise ValueError(\"t = 1, 2, ... cannot be reached with the given time step.\")\n \n self.out_freq = round(1./dt)\n \n _linear_solver = dl.PETScKrylovSolver(\"gmres\", \"ilu\")\n _linear_solver.parameters[\"nonzero_initial_guess\"] = True\n \n self._solver = picard_solver(_linear_solver)\n self.problem = seird_forms(Vh, dt, save, out_path, subdmn_path, mesh_tag, qoi_type)\n self.u_0 = self.generate_pde_state()\n self.u = self.generate_pde_state()\n self.u_ic = self.generate_pde_state()\n \n self._set_initial_conditions(self.u_ic)\n \n def generate_state(self):\n \"\"\" Return a vector in the shape of the fwd model output. \"\"\"\n return np.empty((self.T,2))\n \n def generate_state_district(self):\n \"\"\" Return a vector in the shape of the fwd model output. \"\"\"\n return np.empty((self.T, 26, 2))\n\n def generate_pde_state(self):\n \"\"\" Return a list of vectors that correspons to the SEIRD state vectors. \"\"\"\n return [dl.Function(self.Vh[STATE]).vector() for i in range(5)]\n\n def generate_parameter(self):\n \"\"\" Return a vector in the shape of the parameter. \"\"\"\n return dl.Function(self.Vh[PARAMETER]).vector()\n\n def init_parameter(self, m):\n \"\"\" Initialize the parameter. \"\"\"\n dummy = self.generate_parameter()\n m.init( dummy.mpi_comm(), dummy.local_range() )\n \n def _assign_vectors(self, u1, u2):\n for i in range(5):\n u1[i].zero()\n u1[i].axpy(1, u2[i])\n\n def _set_initial_conditions(self,u):\n for i in range(5):\n if not (self._reset_exposed and i == 1):\n u[i].zero()\n u[i].set_local(self.init_vectors[i])\n \n def solveFwd(self, out, x):\n \n# print(\"Solve with parameters \", np.exp(x[PARAMETER].get_local()))\n \n self._assign_vectors(self.u, self.u_ic)\n if self._reset_exposed:\n self.u[1].zero()\n self.u[1].axpy(np.exp(x[PARAMETER].get_local())[-1], self.u[2])\n \n self.problem.set_parameters(x[PARAMETER])\n store_index = 0\n time = 0.0\n for time_index in range (self.nt):\n self._assign_vectors(self.u_0, self.u)\n self._solver.solve(self.problem, self.u, self.u_0)\n if time_index % self.out_freq == 0:\n if self._qoi_type == 'state':\n out[store_index, :] = self.problem.evaluate(self.u)*10000.\n else:\n out[store_index] = self.problem.evaluate_district(self.u)*10000.\n store_index +=1\n time += self.dt\n if self._save:\n if self._save_days and time_index % self.out_freq == 0:\n self.problem.save(self.u, time)\n\n if self._save_days == False:\n self.problem.save(self.u, time)\n if not store_index == self.T:\n raise Exception(\"The forwad solve output does not match the data.\")\n\n"
] | [
[
"numpy.empty"
]
] |
mtcrawshaw/NMT-Hyperparameters | [
"b9f71c9f5e1ab8de2dfea6496a39138fcdf24bb9"
] | [
"onmt/models/model_saver.py"
] | [
"import os\nimport torch\nimport torch.nn as nn\n\nfrom collections import deque\nfrom onmt.utils.logging import logger\n\nfrom copy import deepcopy\n\n\ndef build_model_saver(model_opt, opt, model, fields, optim):\n model_saver = ModelSaver(opt.save_model,\n model,\n model_opt,\n fields,\n optim,\n opt.keep_checkpoint)\n return model_saver\n\n\nclass ModelSaverBase(object):\n \"\"\"Base class for model saving operations\n\n Inherited classes must implement private methods:\n * `_save`\n * `_rm_checkpoint\n \"\"\"\n\n def __init__(self, base_path, model, model_opt, fields, optim,\n keep_checkpoint=-1):\n self.base_path = base_path\n self.model = model\n self.model_opt = model_opt\n self.fields = fields\n self.optim = optim\n self.last_saved_step = None\n self.keep_checkpoint = keep_checkpoint\n if keep_checkpoint > 0:\n self.checkpoint_queue = deque([], maxlen=keep_checkpoint)\n\n def save(self, step, moving_average=None):\n \"\"\"Main entry point for model saver\n\n It wraps the `_save` method with checks and apply `keep_checkpoint`\n related logic\n \"\"\"\n\n if self.keep_checkpoint == 0 or step == self.last_saved_step:\n return\n\n if moving_average:\n save_model = deepcopy(self.model)\n for avg, param in zip(moving_average, save_model.parameters()):\n param.data.copy_(avg.data)\n else:\n save_model = self.model\n\n chkpt, chkpt_name = self._save(step, save_model)\n self.last_saved_step = step\n\n if moving_average:\n del save_model\n\n if self.keep_checkpoint > 0:\n if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:\n todel = self.checkpoint_queue.popleft()\n self._rm_checkpoint(todel)\n self.checkpoint_queue.append(chkpt_name)\n\n def _save(self, step):\n \"\"\"Save a resumable checkpoint.\n\n Args:\n step (int): step number\n\n Returns:\n (object, str):\n\n * checkpoint: the saved object\n * checkpoint_name: name (or path) of the saved checkpoint\n \"\"\"\n\n raise NotImplementedError()\n\n def _rm_checkpoint(self, name):\n \"\"\"Remove a checkpoint\n\n Args:\n name(str): name that indentifies the checkpoint\n (it may be a filepath)\n \"\"\"\n\n raise NotImplementedError()\n\n\nclass ModelSaver(ModelSaverBase):\n \"\"\"Simple model saver to filesystem\"\"\"\n\n def _save(self, step, model):\n real_model = (model.module\n if isinstance(model, nn.DataParallel)\n else model)\n real_generator = (real_model.generator.module\n if isinstance(real_model.generator, nn.DataParallel)\n else real_model.generator)\n\n model_state_dict = real_model.state_dict()\n model_state_dict = {k: v for k, v in model_state_dict.items()\n if 'generator' not in k}\n generator_state_dict = real_generator.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n 'generator': generator_state_dict,\n 'vocab': self.fields,\n 'opt': self.model_opt,\n 'optim': self.optim.state_dict(),\n }\n\n logger.info(\"Saving checkpoint %s.pt\" % self.base_path)\n checkpoint_path = '%s.pt' % self.base_path\n torch.save(checkpoint, checkpoint_path)\n return checkpoint, checkpoint_path\n\n def _rm_checkpoint(self, name):\n os.remove(name)\n"
] | [
[
"torch.save"
]
] |
iclavera/cassie | [
"f2e253bf29fa0f872974188aed1fdfbe06efc37e"
] | [
"softlearning/environments/cassie/new_cassie_env.py"
] | [
"from softlearning.environments.cassie.assets.cassiemujoco import CassieSim, CassieVis, cassie_user_in_t\nimport numpy as np\nfrom gym import utils\nfrom gym import spaces\nimport gym\n\n\n# CASSIE_TORQUE_LIMITS = np.array([4.5*25, 4.5*25, 12.2*16, 12.2*16, 0.9*50]) # ctrl_limit * gear_ratio\n# CASSIE_MOTOR_VEL_LIMIT = np.array([2900, 2900, 1300, 1300, 5500]) / 60 / (2*np.pi) # max_rpm / 60 / 2*pi\n# P_GAIN_RANGE = [10, 10000]\n# D_GAIN_RANGE = [1, 100]\n# MODEL_TIMESTEP = 0.001\n#\n# DEFAULT_P_GAIN = 200\n# DEFAULT_D_GAIN = 20\n#\n# NUM_QPOS = 34\n# NUM_QVEL = 32\n#\n# CTRL_COST_COEF = 0.001\n# STABILISTY_COST_COEF = 0.01\n\n\nclass CassieEnv(gym.Env, utils.EzPickle):\n _JOINT_NAMES = ['hipRollDrive', 'hipYawDrive', 'hipPitchDrive', 'kneeDrive', 'shinJoint', 'tarsusJoint', 'footDrive']\n\n def __init__(self, render=False, fix_pelvis=False, frame_skip=20,\n stability_cost_coef=1e-2, ctrl_cost_coef=1e-3, alive_bonus=0.2, impact_cost_coef=1e-5,\n rotation_cost_coef=1e-2, policytask='running', ctrl_type='T', apply_forces=False):\n\n print('fr_skip:', frame_skip, 'task', policytask)\n\n assert ctrl_type in ['T', 'P', 'V', 'TP', 'TV', 'PV', 'TPV']\n assert ctrl_type == 'T'\n # T: Torque ctrl # TP: Torque + Position ctrl # None or all: Torque + Position + Velocity\n # P: Positon ctrl # TV: Torque + Velocity ctrl\n # V: Velocity ctrl # PV: Position + Velocity ctr\n\n self.sim = CassieSim()\n if render:\n self.vis = CassieVis()\n else:\n self.vis = None\n\n self.fix_pelvis = fix_pelvis\n self.model_timestep = 0.0005 * frame_skip\n self.act_dim = 10\n self.obs_dim = 43\n self.frame_skip = frame_skip\n self.task = policytask\n self.ctrl_type = ctrl_type\n self.apply_forces = apply_forces\n\n # Reward function coeffs\n self.stability_cost_coef = stability_cost_coef\n self.ctrl_cost_coef = ctrl_cost_coef\n self.impact_cost_coef = impact_cost_coef\n self.alive_bonus = alive_bonus\n self.rotation_cost_coef = rotation_cost_coef\n\n self._time_step = 0\n self._torque_limits = np.array([4.5, 4.5, 12.2, 12.2, 0.9] * 2)\n\n if fix_pelvis:\n self.sim.hold()\n\n utils.EzPickle.__init__(self, locals())\n\n def _get_obs(self):\n # internal_state = self.sim.recv_wait()\n # foot_state = self.sim.recv_wait_state()\n # _, pelvis_ang_vel, _, pelvis_magnetic_field = self.sim.pelvis_measurements\n #\n # pelvis_pos_rel_to_r_foot = -np.array(foot_state.rightFoot.position)\n # pelvis_vel_rel_to_r_foot = -np.array(foot_state.rightFoot.footTranslationalVelocity)\n #\n # qpos_left = [np.array(getattr(internal_state.leftLeg, joint).position) for joint in self._JOINT_NAMES]\n # qpos_right = [np.array(getattr(internal_state.rightLeg, joint).position) for joint in self._JOINT_NAMES]\n #\n # qvel_left = [np.array(getattr(internal_state.leftLeg, joint).velocity) for joint in self._JOINT_NAMES]\n # qvel_right = [np.array(getattr(internal_state.rightLeg, joint).velocity) for joint in self._JOINT_NAMES]\n #\n # obs = np.concatenate([pelvis_pos_rel_to_r_foot,\n # pelvis_magnetic_field,\n # qpos_left,\n # qpos_right,\n # pelvis_vel_rel_to_r_foot,\n # pelvis_ang_vel,\n # qvel_left,\n # qvel_right])\n\n state = self.sim.get_state()\n qpos_idx = [2, 3, 4, 5, 6,\n 7, 8, 9, 14, 20, 21, 22, 23, 28, 34,\n 15, 16, 20, 29, 30, 34\n ]\n # qpos_idx = [1, 2, 3, 4, 5, 6]\n qpos = np.asarray(state.qpos())[qpos_idx]\n # qvel_idx = [0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 13, 14, 18, 19, 20, 21, 25, 26, 27, 31]\n qvel_idx = [0, 1, 2, 3, 4, 5,\n 6, 7, 8, 12, 18, 19, 20, 21, 25, 31,\n 13, 14, 18, 26, 27, 31]\n # qvel_idx = [0, 1, 2, 3, 4, 5]\n qvel = np.asarray(state.qvel())[qvel_idx]\n\n # obs = np.concatenate(\n # [qpos, qvel, motor_pos, joint_pos, motor_vel, joint_vel], axis=0)\n\n obs = np.concatenate([qpos, qvel])\n\n return obs\n\n def step(self, action):\n assert action.ndim == 1 and action.shape == (self.act_dim,)\n if self.apply_forces and self._time_step % 10 == 0:\n self.apply_random_force()\n _ = self.do_simulation(action, self.frame_skip)\n obs = self._get_obs()\n\n reward, forward_vel = self.reward()\n\n done = self.done()\n info = {'forward_vel': forward_vel}\n\n return obs, reward, done, info\n\n def reset(self):\n self.sim = CassieSim()\n if self.fix_pelvis: self.sim.hold()\n _ = self.do_simulation(np.zeros(self.act_dim), 1)\n self._time_step = 0\n return self._get_obs()\n\n def do_simulation(self, action, n_frames):\n assert n_frames >= 1\n u = self._action_to_user_in_t(action)\n for _ in range(n_frames):\n internal_state = self.sim.step(u)\n return internal_state\n\n def done(self):\n state = self.sim.get_state()\n pelvis_pos = np.array(state.qpos())\n return pelvis_pos[2] < 0.65\n\n def reward(self):\n internal_state = self.sim.recv_wait_state()\n state = self.sim.get_state()\n # reward fct\n qvel = np.array(state.qvel())\n pelvis_rot_vel = qvel[3:6]\n pelvis_transl_vel = qvel[:3]\n\n foot_forces = self.get_foot_forces(internal_state)\n motor_torques = _to_np(internal_state.motor.torque)\n forward_vel = pelvis_transl_vel[0]\n\n ctrl_cost = self.ctrl_cost_coef * 0.5 * np.mean(np.square(motor_torques/self._torque_limits))\n stability_cost = self.stability_cost_coef * 0.5 * np.mean(np.square(pelvis_transl_vel[1:])) # quadratic velocity of pelvis in y and z direction ->\n rotation_cost = self.rotation_cost_coef * 0.5 * np.mean(np.square(pelvis_rot_vel))# enforces to hold the pelvis in same position while walking\n impact_cost = self.impact_cost_coef * 0.5 * np.sum(np.square(np.clip(foot_forces, -1, 1)))\n\n if self.task == 'balancing':\n vel_cost = self.stability_cost_coef * forward_vel ** 2\n reward = - vel_cost - ctrl_cost - stability_cost - impact_cost + self.alive_bonus\n elif self.task == 'fixed-vel':\n vel_reward = np.exp(- (2.3 - forward_vel) ** 2)\n reward = vel_reward - ctrl_cost - stability_cost - rotation_cost - impact_cost + self.alive_bonus\n else:\n reward = forward_vel - ctrl_cost - stability_cost - rotation_cost - impact_cost + self.alive_bonus\n return reward, forward_vel\n\n def render(self, *args, **kwargs):\n if self.vis is None:\n print('Setting up cassie visualizer')\n self.setup_cassie_vis()\n self.vis.draw(self.sim)\n\n def get_foot_forces(self, internal_state):\n left_toe = _to_np(internal_state.leftFoot.toeForce)\n left_heel = _to_np(internal_state.leftFoot.heelForce)\n right_toe = _to_np(internal_state.rightFoot.toeForce)\n right_heel = _to_np(internal_state.rightFoot.heelForce)\n return np.concatenate([left_toe, left_heel, right_toe, right_heel])\n\n def apply_random_force(self):\n force = np.zeros((6,))\n y_force = np.random.choice([0, 10, 25, 50]) * np.random.choice([-1, 1])\n force[1] = y_force\n self.sim.apply_force(force)\n\n def _action_to_user_in_t(self, action):\n u = cassie_user_in_t()\n for i in range(self.act_dim):\n u.torque[i] = action[i]\n return u\n\n @property\n def torque_limits(self):\n return np.concatenate([self.parameters['cassie_torque_limits']] * 2)\n\n @property\n def dt(self):\n return self.model_timestep\n\n @property\n def action_space(self):\n return spaces.Box(low=-self._torque_limits, high=self._torque_limits, dtype=np.float32)\n\n @property\n def observation_space(self):\n obs_limit = np.inf * np.ones(self.obs_dim)\n return spaces.Box(-obs_limit, obs_limit, dtype=np.float32)\n\n def setup_cassie_vis(self):\n self.vis = CassieVis()\n\n def log_diagnostics(self, paths):\n pass\n # forward_vel = [np.mean(path['env_infos']['forward_vel']) for path in paths]\n # ctrl_cost = [np.mean(path['env_infos']['ctrl_cost']) for path in paths]\n # stability_cost = [np.mean(path['env_infos']['stability_cost']) for path in paths]\n # path_length = [path[\"observations\"].shape[0] for path in paths]\n #\n # logger.record_tabular('AvgForwardVel', np.mean(forward_vel))\n # logger.record_tabular('StdForwardVel', np.std(forward_vel))\n # logger.record_tabular('AvgCtrlCost', np.mean(ctrl_cost))\n # logger.record_tabular('AvgStabilityCost', np.mean(stability_cost))\n # logger.record_tabular('AvgPathLength', np.mean(path_length))\n\n\ndef _to_np(o, dtype=np.float32):\n return np.array([o[i] for i in range(len(o))], dtype=dtype)\n\n\nif __name__ == '__main__':\n render = True\n env = CassieEnv(render=render, fix_pelvis=False, frame_skip=200)\n import time\n\n for i in range(5):\n obs = env.reset()\n for j in range(50000):\n cum_forward_vel = 0\n act = env.action_space.sample()\n env.apply_random_force()\n obs, reward, done, info = env.step(act)\n if render:\n env.render()\n time.sleep(1)\n # if done:\n # break\n"
] | [
[
"numpy.concatenate",
"numpy.square",
"numpy.array",
"numpy.random.choice",
"numpy.zeros",
"numpy.ones",
"numpy.exp",
"numpy.clip"
]
] |
renjunxiang/enjoy_myself | [
"6a4a48afd2ab234a71d1794a808f5f29bb040f94"
] | [
"chatbot/net/seq2seq.py"
] | [
"from keras.models import Model\nfrom keras.layers import Input, Embedding, GRU, Bidirectional, Dense, \\\n RepeatVector, Masking, concatenate, Reshape, TimeDistributed\nfrom keras.optimizers import SGD, Adagrad, Adam\n\n\ndef seq2seq(input_dic_len=10,\n input_len=10,\n vector_len=20,\n hidden_dim=32,\n output_dim=10,\n output_len=10):\n '''\n [X1,X2,...,Xm]->cell+hide+output->[Y1+cell1+hide1=RNN(cell+h,o),Y2=RNN(cell1+hide1,Y1),...,Yn=RNN(cell n-1 + hide n-1,Yn-1)] \n :param input_dic_len: 输入的字典长度\n :param input_len: 输入的文本长度\n :param vector_len: 词向量维度\n :param hidden_dim: encoding结尾的全连接节点数和rnn核数量\n :param output_dim: 输出的字典长度\n :param output_len: 输出的文本长度\n :return: \n '''\n\n # input_dic_len=10\n # input_len=10\n # vector_len=20\n # hidden_dim=32\n # output_dim = 10\n # output_len=10\n\n data_input = Input(shape=[input_len])\n # 创建词向量\n word_vec = Embedding(input_dim=input_dic_len + 1,\n input_length=input_len,\n output_dim=vector_len,\n mask_zero=0,\n name='Embedding')(data_input)\n # encoding过程\n rnn_encoding, state_h = GRU(units=hidden_dim,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n return_sequences=False,\n return_state=True)(word_vec)\n data_encoding = Dense(units=hidden_dim,\n activation='relu',\n name='encoding')(rnn_encoding)\n # decoding过程\n # encoding的状态作为decoding的初始,后续输出作为下一个输入\n initial_state = state_h\n decoding_input = RepeatVector(n=1)(data_encoding)\n data_decoding = []\n rnn_decoding = GRU(units=hidden_dim,\n return_sequences=False,\n return_state=True,\n activation=\"relu\",\n name='decoding')\n for i in range(output_len):\n decoding_output, state_h = rnn_decoding(decoding_input, initial_state=initial_state)\n data_decoding.append(decoding_output)\n initial_state = state_h\n decoding_input = RepeatVector(n=1)(decoding_output)\n rnn_decoding = concatenate(inputs=data_decoding, axis=-1)\n rnn_decoding = Reshape(target_shape=[output_len, hidden_dim])(rnn_decoding)\n data_decoding = TimeDistributed(Dense(units=output_dim, activation=\"relu\"),\n name='TimeDistributed')(rnn_decoding)\n optimizer = Adam(lr=0.01)\n model = Model(inputs=data_input, outputs=data_decoding)\n model.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])\n return model\n\n\nif __name__ == '__main__':\n from keras.preprocessing.text import Tokenizer\n from keras.preprocessing.sequence import pad_sequences\n from keras.utils import to_categorical\n import numpy as np\n import random\n\n input_len = 8\n ask_transform = [[random.choice('abcdefghijklmnopqrstuvwxyz') for j in range(random.randint(3, input_len))]\n for i in range(5000)]\n answer_transform = [[j.upper() for j in i] for i in ask_transform]\n '''\n ask_transform = [[random.choice('0123456789') for j in range(random.randint(3, input_len))]\n for i in range(5000)]\n v = {'0': '零', '1': '一', '2': '二', '3': '三', '4': '四',\n '5': '五', '6': '六', '7': '七', '8': '八', '9': '九'}\n answer_transform = [[v[j] for j in i] for i in ask_transform]\n\n '''\n\n tokenizer_ask = Tokenizer()\n tokenizer_ask.fit_on_texts(texts=ask_transform)\n ask_seq = tokenizer_ask.texts_to_sequences(texts=ask_transform)\n ask_new = pad_sequences(ask_seq, maxlen=input_len, padding='post', value=0, dtype='int')\n\n output_len = 8\n tokenizer_answer = Tokenizer()\n tokenizer_answer.fit_on_texts(texts=answer_transform)\n answer_seq = tokenizer_answer.texts_to_sequences(texts=answer_transform)\n answer_new = pad_sequences(answer_seq, maxlen=output_len, padding='post', value=0, dtype='int')\n answer_categorical = to_categorical(answer_new)\n\n model_seq2seq = seq2seq(input_dic_len=len(tokenizer_ask.word_index),\n input_len=input_len,\n vector_len=20,\n hidden_dim=50,\n output_dim=answer_categorical.shape[2],\n output_len=output_len)\n\n model_seq2seq.fit(x=ask_new, y=answer_categorical, batch_size=50, epochs=20, validation_split=0.2, verbose=2)\n\n answer_key = list(tokenizer_answer.word_index.keys())\n answer_values = list(tokenizer_answer.word_index.values())\n\n\n def chatbot(text=None):\n text = tokenizer_ask.texts_to_sequences(texts=[text])\n text_new = pad_sequences(text, maxlen=input_len, padding='post', value=0, dtype='float32')\n result = model_seq2seq.predict(text_new)[0]\n result = [np.argmax(i) for i in result]\n result = [answer_key[answer_values.index(i)] for i in result if i in answer_values]\n return result\n\n\n for i in ask_transform[0:20]:\n print('ask:', i, 'answer:', chatbot(text=i))\n"
] | [
[
"numpy.argmax"
]
] |
mrcsfltchr/MaskFastRCNN4GUVs | [
"0ac10832c71d481b9390b9b86f7304af82d05f52"
] | [
"frcnn/visualize.py"
] | [
"\"\"\"\nMask R-CNN\nDisplay and Visualization Functions.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport itertools\nimport colorsys\n\nimport numpy as np\nfrom skimage.measure import find_contours\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, lines\nfrom matplotlib.patches import Polygon\nimport IPython.display\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom frcnn import utils\n\n\n############################################################\n# Visualization\n############################################################\n\ndef display_images(images, titles=None, cols=4, cmap=None, norm=None,\n interpolation=None):\n \"\"\"Display the given set of images, optionally with titles.\n images: list or array of image tensors in HWC format.\n titles: optional. A list of titles to display with each image.\n cols: number of images per row\n cmap: Optional. Color map to use. For example, \"Blues\".\n norm: Optional. A Normalize instance to map values to colors.\n interpolation: Optional. Image interpolation to use for display.\n \"\"\"\n titles = titles if titles is not None else [\"\"] * len(images)\n rows = len(images) // cols + 1\n plt.figure(figsize=(14, 14 * rows // cols))\n i = 1\n for image, title in zip(images, titles):\n plt.subplot(rows, cols, i)\n plt.title(title, fontsize=9)\n plt.axis('off')\n plt.imshow(image.astype(np.uint8), cmap=cmap,\n norm=norm, interpolation=interpolation)\n i += 1\n plt.show()\n\n\ndef random_colors(N, bright=True):\n \"\"\"\n Generate random colors.\n To get visually distinct colors, generate them in HSV space then\n convert to RGB.\n \"\"\"\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image\n\n\ndef display_instances(image, boxes, class_ids, class_names, masks= None,\n scores=None, title=\"\",\n figsize=(16, 16), ax=None,\n show_mask=False, show_bbox=True,\n colors=None, captions=None):\n \"\"\"\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [height, width, num_instances]\n class_ids: [num_instances]\n class_names: list of class names of the dataset\n scores: (optional) confidence scores for each box\n title: (optional) Figure title\n show_mask, show_bbox: To show masks and bounding boxes or not\n figsize: (optional) the size of the image\n colors: (optional) An array or colors to use with each object\n captions: (optional) A list of strings to use as captions for each object\n \"\"\"\n # Number of instances\n N = boxes.shape[0]\n if not N:\n print(\"\\n*** No instances to display *** \\n\")\n else:\n if show_bbox and show_mask:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n elif show_bbox and not show_mask:\n assert boxes.shape[0] == class_ids.shape[0]\n elif not show_bbox and show_mask:\n assert masks.shape[-1] == class_ids.shape[0]\n\n # If no axis is passed, create one and automatically call show()\n auto_show = False\n if not ax:\n _, ax = plt.subplots(1, figsize=figsize)\n auto_show = True\n\n # Generate random colors\n colors = colors or random_colors(N)\n\n # Show area outside image boundaries.\n height, width = image.shape[:2]\n ax.set_ylim(height + 10, -10)\n ax.set_xlim(-10, width + 10)\n ax.axis('off')\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n color = colors[i]\n\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n print(boxes[i])\n if show_bbox:\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=0.7, linestyle=\"dashed\",\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Label\n if not captions:\n class_id = class_ids[i]\n score = scores[i] if scores is not None else None\n label = class_names[class_id]\n caption = \"{} {:.3f}\".format(label, score) if score else label\n else:\n caption = captions[i]\n ax.text(x1, y1 + 8, caption,\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n if show_mask:\n mask = masks[:, :, i]\n masked_image = apply_mask(masked_image, mask, color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n if auto_show:\n plt.show()\n\n\ndef display_differences(image,\n gt_box, gt_class_id, \n pred_box, pred_class_id, pred_score,\n class_names, gt_mask= None, pred_mask = None,title=\"\", ax=None,\n show_mask=True, show_box=True,\n iou_threshold=0.5, score_threshold=0.5):\n \"\"\"Display ground truth and prediction instances on the same image.\"\"\"\n # Match predictions to ground truth\n gt_match, pred_match, overlaps = utils.compute_matches(\n gt_box, gt_class_id,\n pred_box, pred_class_id, pred_score, gt_mask,pred_mask,\n iou_threshold=iou_threshold, score_threshold=score_threshold)\n # Ground truth = green. Predictions = red\n colors = [(0, 1, 0, .8)] * len(gt_match)\\\n + [(1, 0, 0, 1)] * len(pred_match)\n # Concatenate GT and predictions\n class_ids = np.concatenate([gt_class_id, pred_class_id])\n scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])\n boxes = np.concatenate([gt_box, pred_box])\n if gt_mask is not None:\n masks = np.concatenate([gt_mask, pred_mask], axis=-1)\n else:\n masks = None\n # Captions per instance show score/IoU\n captions = [\"\" for m in gt_match] + [\"{:.2f} / {:.2f}\".format(\n pred_score[i],\n (overlaps[i, int(pred_match[i])]\n if pred_match[i] > -1 else overlaps[i].max()))\n for i in range(len(pred_match))]\n # Set title if not provided\n title = title or \"Ground Truth and Detections\\n GT=green, pred=red, captions: score/IoU\"\n # Display\n display_instances(\n image,\n boxes, class_ids,\n class_names,masks, scores, ax=ax,\n show_bbox=show_box, show_mask=show_mask,\n colors=colors, captions=captions,\n title=title)\n\n\ndef draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):\n \"\"\"\n anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.\n proposals: [n, 4] the same anchors but refined to fit objects better.\n \"\"\"\n masked_image = image.copy()\n\n # Pick random anchors in case there are too many.\n ids = np.arange(rois.shape[0], dtype=np.int32)\n ids = np.random.choice(\n ids, limit, replace=False) if ids.shape[0] > limit else ids\n\n fig, ax = plt.subplots(1, figsize=(12, 12))\n if rois.shape[0] > limit:\n plt.title(\"Showing {} random ROIs out of {}\".format(\n len(ids), rois.shape[0]))\n else:\n plt.title(\"{} ROIs\".format(len(ids)))\n\n # Show area outside image boundaries.\n ax.set_ylim(image.shape[0] + 20, -20)\n ax.set_xlim(-50, image.shape[1] + 20)\n ax.axis('off')\n\n for i, id in enumerate(ids):\n color = np.random.rand(3)\n class_id = class_ids[id]\n # ROI\n y1, x1, y2, x2 = rois[id]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n edgecolor=color if class_id else \"gray\",\n facecolor='none', linestyle=\"dashed\")\n ax.add_patch(p)\n # Refined ROI\n if class_id:\n ry1, rx1, ry2, rx2 = refined_rois[id]\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal for easy visualization\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Label\n label = class_names[class_id]\n ax.text(rx1, ry1 + 8, \"{}\".format(label),\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n m = utils.unmold_mask(mask[id], rois[id]\n [:4].astype(np.int32), image.shape)\n masked_image = apply_mask(masked_image, m, color)\n\n ax.imshow(masked_image)\n\n # Print stats\n print(\"Positive ROIs: \", class_ids[class_ids > 0].shape[0])\n print(\"Negative ROIs: \", class_ids[class_ids == 0].shape[0])\n print(\"Positive Ratio: {:.2f}\".format(\n class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))\n\n\n# TODO: Replace with matplotlib equivalent?\ndef draw_box(image, box, color):\n \"\"\"Draw 3-pixel width bounding boxes on the given image array.\n color: list of 3 int values for RGB.\n \"\"\"\n y1, x1, y2, x2 = box\n image[y1:y1 + 2, x1:x2] = color\n image[y2:y2 + 2, x1:x2] = color\n image[y1:y2, x1:x1 + 2] = color\n image[y1:y2, x2:x2 + 2] = color\n return image\n\n\ndef display_top_masks(image, mask, class_ids, class_names, limit=4):\n \"\"\"Display the given image and the top few class masks.\"\"\"\n to_display = []\n titles = []\n to_display.append(image)\n titles.append(\"H x W={}x{}\".format(image.shape[0], image.shape[1]))\n # Pick top prominent classes in this image\n unique_class_ids = np.unique(class_ids)\n mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])\n for i in unique_class_ids]\n top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),\n key=lambda r: r[1], reverse=True) if v[1] > 0]\n # Generate images and titles\n for i in range(limit):\n class_id = top_ids[i] if i < len(top_ids) else -1\n # Pull masks of instances belonging to the same class.\n m = mask[:, :, np.where(class_ids == class_id)[0]]\n m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)\n to_display.append(m)\n titles.append(class_names[class_id] if class_id != -1 else \"-\")\n display_images(to_display, titles=titles, cols=limit + 1, cmap=\"Blues_r\")\n\n\ndef plot_precision_recall(AP, precisions, recalls):\n \"\"\"Draw the precision-recall curve.\n\n AP: Average precision at IoU >= 0.5\n precisions: list of precision values\n recalls: list of recall values\n \"\"\"\n # Plot the Precision-Recall curve\n _, ax = plt.subplots(1)\n ax.set_title(\"Precision-Recall Curve. AP@50 = {:.3f}\".format(AP))\n ax.set_ylim(0, 1.1)\n ax.set_xlim(0, 1.1)\n _ = ax.plot(recalls, precisions)\n\n\ndef plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,\n overlaps, class_names, threshold=0.5):\n \"\"\"Draw a grid showing how ground truth objects are classified.\n gt_class_ids: [N] int. Ground truth class IDs\n pred_class_id: [N] int. Predicted class IDs\n pred_scores: [N] float. The probability scores of predicted classes\n overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.\n class_names: list of all class names in the dataset\n threshold: Float. The prediction probability required to predict a class\n \"\"\"\n gt_class_ids = gt_class_ids[gt_class_ids != 0]\n pred_class_ids = pred_class_ids[pred_class_ids != 0]\n\n plt.figure(figsize=(12, 10))\n plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)\n plt.yticks(np.arange(len(pred_class_ids)),\n [\"{} ({:.2f})\".format(class_names[int(id)], pred_scores[i])\n for i, id in enumerate(pred_class_ids)])\n plt.xticks(np.arange(len(gt_class_ids)),\n [class_names[int(id)] for id in gt_class_ids], rotation=90)\n\n thresh = overlaps.max() / 2.\n for i, j in itertools.product(range(overlaps.shape[0]),\n range(overlaps.shape[1])):\n text = \"\"\n if overlaps[i, j] > threshold:\n text = \"match\" if gt_class_ids[j] == pred_class_ids[i] else \"wrong\"\n color = (\"white\" if overlaps[i, j] > thresh\n else \"black\" if overlaps[i, j] > 0\n else \"grey\")\n plt.text(j, i, \"{:.3f}\\n{}\".format(overlaps[i, j], text),\n horizontalalignment=\"center\", verticalalignment=\"center\",\n fontsize=9, color=color)\n\n plt.tight_layout()\n plt.xlabel(\"Ground Truth\")\n plt.ylabel(\"Predictions\")\n\n\ndef draw_boxes(image, boxes=None, refined_boxes=None,\n masks=None, captions=None, visibilities=None,\n title=\"\", ax=None):\n \"\"\"Draw bounding boxes and segmentation masks with different\n customizations.\n\n boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.\n refined_boxes: Like boxes, but draw with solid lines to show\n that they're the result of refining 'boxes'.\n masks: [N, height, width]\n captions: List of N titles to display on each box\n visibilities: (optional) List of values of 0, 1, or 2. Determine how\n prominent each bounding box should be.\n title: An optional title to show over the image\n ax: (optional) Matplotlib axis to draw on.\n \"\"\"\n # Number of boxes\n assert boxes is not None or refined_boxes is not None\n N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]\n\n # Matplotlib Axis\n if not ax:\n _, ax = plt.subplots(1, figsize=(12, 12))\n\n # Generate random colors\n colors = random_colors(N)\n\n # Show area outside image boundaries.\n margin = image.shape[0] // 10\n ax.set_ylim(image.shape[0] + margin, -margin)\n ax.set_xlim(-margin, image.shape[1] + margin)\n ax.axis('off')\n\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n # Box visibility\n visibility = visibilities[i] if visibilities is not None else 1\n if visibility == 0:\n color = \"gray\"\n style = \"dotted\"\n alpha = 0.5\n elif visibility == 1:\n color = colors[i]\n style = \"dotted\"\n alpha = 1\n elif visibility == 2:\n color = colors[i]\n style = \"solid\"\n alpha = 1\n\n # Boxes\n if boxes is not None:\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=alpha, linestyle=style,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Refined boxes\n if refined_boxes is not None and visibility > 0:\n ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal\n if boxes is not None:\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Captions\n if captions is not None:\n caption = captions[i]\n # If there are refined boxes, display captions on them\n if refined_boxes is not None:\n y1, x1, y2, x2 = ry1, rx1, ry2, rx2\n ax.text(x1, y1, caption, size=11, verticalalignment='top',\n color='w', backgroundcolor=\"none\",\n bbox={'facecolor': color, 'alpha': 0.5,\n 'pad': 2, 'edgecolor': 'none'})\n\n # Masks\n if masks is not None:\n mask = masks[:, :, i]\n masked_image = apply_mask(masked_image, mask, color)\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n\n\ndef display_table(table):\n \"\"\"Display values in a table format.\n table: an iterable of rows, and each row is an iterable of values.\n \"\"\"\n html = \"\"\n for row in table:\n row_html = \"\"\n for col in row:\n row_html += \"<td>{:40}</td>\".format(str(col))\n html += \"<tr>\" + row_html + \"</tr>\"\n html = \"<table>\" + html + \"</table>\"\n IPython.display.display(IPython.display.HTML(html))\n\n\ndef display_weight_stats(model):\n \"\"\"Scans all the weights in the model and returns a list of tuples\n that contain stats about each weight.\n \"\"\"\n layers = model.get_trainable_layers()\n table = [[\"WEIGHT NAME\", \"SHAPE\", \"MIN\", \"MAX\", \"STD\"]]\n for l in layers:\n weight_values = l.get_weights() # list of Numpy arrays\n weight_tensors = l.weights # list of TF tensors\n for i, w in enumerate(weight_values):\n weight_name = weight_tensors[i].name\n # Detect problematic layers. Exclude biases of conv layers.\n alert = \"\"\n if w.min() == w.max() and not (l.__class__.__name__ == \"Conv2D\" and i == 1):\n alert += \"<span style='color:red'>*** dead?</span>\"\n if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:\n alert += \"<span style='color:red'>*** Overflow?</span>\"\n # Add row\n table.append([\n weight_name + alert,\n str(w.shape),\n \"{:+9.4f}\".format(w.min()),\n \"{:+10.4f}\".format(w.max()),\n \"{:+9.4f}\".format(w.std()),\n ])\n display_table(table)\n"
] | [
[
"numpy.random.choice",
"numpy.random.rand",
"numpy.where",
"matplotlib.patches.Rectangle",
"numpy.concatenate",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.lines.Line2D",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.fliplr",
"matplotlib.pyplot.xlabel",
"numpy.any",
"matplotlib.pyplot.ylabel",
"numpy.unique",
"matplotlib.pyplot.imshow"
]
] |
kentwait/bioseq | [
"4f0649639ea1f301d2947cc5aff4d1a4fd55529c"
] | [
"bioseq/arrays.py"
] | [
"from .basetypes import *\nimport os\nimport numpy as np\nimport re\nfrom collections.abc import MutableMapping\nfrom collections import OrderedDict, Counter\nimport collections\nfrom copy import deepcopy\nfrom scipy.sparse import lil_matrix\nimport pandas as pd\n\n__all__ = ['SEQTYPES', 'validate_sequence_chars',\n 'SequenceArray', 'NucleotideArray', 'ProteinArray', 'CodonArray',\n 'SequenceAlignment', 'NucleotideAlignment', 'ProteinAlignment', 'CodonAlignment']\n\nSEQTYPES = ('nucl', 'prot', 'cod')\n\n\ndef validate_sequence_chars(seq, seqtype='nucl'):\n assert isinstance(seq, str), TypeError('seq should be str or string-like.')\n assert seqtype in SEQTYPES, ValueError('seqtype must be \"nucl\" for nucleotide, \"prot\" for protein, '\n 'or \"cod\" for codon.')\n pattern = '[^{}]'.format(AMINO_ACIDS if seqtype == 'prot' else BASES)\n invalid_chars = set(re.findall(pattern, seq, re.IGNORECASE))\n if len(invalid_chars) > 0:\n raise ValueError('Sequence contains invalid characters: {}. Check sequence type or individual sequences.'\n .format(repr(invalid_chars)))\n return seq\n\n\nclass SequenceArray(MutableMapping):\n \"\"\"\n Multiple sequence array object constructor\n\n Stores one or more biological sequences as a set of id (key) - sequence string (value) pair based on its original\n input order by using an OrderedDict container.\n\n SequenceArray can be instantiated by passing a dictionary-like object whose keys are names or descriptions of their\n corresponding sequence string value. The string value can be a nucleotide sequence (nucl), codon sequence (cod),\n or protein sequence (prot). Note that the string value type, whether 'nucl', 'prot', or 'cod, must be the same\n for all items in the SequenceArray.\n\n \"\"\"\n\n def __init__(self, input_obj, seqtype='nucl', name='', description='', validate=False):\n \"\"\"Create a new SequenceArray object from a dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : dict or str\n Object used to populate a SequenceArray object. This may be one of the following:\n - Dictionary-like object whose id are sequence record names and values are the corresponding sequences\n - Path to a FASTA file\n - FASTA-formatted string\n seqtype : str\n 'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n assert seqtype in SEQTYPES, ValueError('seqtype must be \"nucl\" for nucleotide, \"prot\" for protein, '\n 'or \"cod\" for codon.')\n self.seqtype = seqtype\n self.name = name\n self.description = description\n\n # Key-value pairs where id is the sequence record name and the value is the sequence\n if isinstance(input_obj, dict):\n records = input_obj\n self._ids = list(records.keys())\n self._sequences = list(records.values())\n # String, assumed to be path to FASTA file or contents of FASTA file as a long string\n elif isinstance(input_obj, str):\n # Test if file path\n if os.path.exists(input_obj):\n records = SequenceArray.parse_fasta(input_obj, seqtype=seqtype)\n # self._ids = [key.split(' ')[0] for key in records.keys()]\n self._ids = list(records.keys()) # preprocessing already done by parse_fasta method\n self._sequences = list(records.values())\n else:\n raise NotImplementedError('Passing FASTA-formatted strings are not yet supported. '\n 'Instantiate using an OrderedDict or passing a valid filepath instead.')\n if validate:\n # Check if sequences contain invalid characters\n if seqtype in ('nucl', 'prot'):\n self._sequences = [validate_sequence_chars(_, seqtype=seqtype) for _ in self._sequences]\n else: # codon seqtype\n pass\n\n @property\n def ids(self):\n return self._ids\n\n @ids.setter\n def ids(self, value):\n raise AttributeError('Setting ids using this method is not permitted.')\n\n @ids.deleter\n def ids(self):\n raise AttributeError('Deleting ids using this method is not permitted.')\n\n @property\n def sequences(self):\n return self._sequences\n\n @sequences.setter\n def sequences(self, value):\n raise AttributeError('Setting sequences using this method is not permitted.')\n\n @sequences.deleter\n def sequences(self):\n raise AttributeError('Deleting sequences using this method is not permitted.')\n\n def __setitem__(self, key, sequence):\n self.ids.append(key)\n self.sequences.append(sequence)\n\n def __getitem__(self, keys):\n if isinstance(keys, collections.Iterable) and not isinstance(keys, str):\n return_list = []\n for key in keys:\n if key in self.ids:\n return_list.append(self.sequences[self.ids.index(key)])\n else:\n raise KeyError('Key \"{0}\" does not exist'.format(key))\n return return_list\n else:\n key = keys\n if key in self.ids:\n return self.sequences[self.ids.index(key)]\n else:\n raise KeyError('Key \"{0}\" does not exist'.format(key))\n\n def __delitem__(self, key):\n if key in self.ids:\n index = self.ids.index(key)\n self.ids.remove(key)\n self.sequences.pop(index)\n else:\n raise KeyError('Key \"{0}\" does not exist'.format(key))\n\n def __iter__(self):\n for key, sequence in zip(self.ids, self.sequences):\n yield (key, sequence)\n\n def __len__(self):\n return len(self.ids)\n\n def __repr__(self):\n return 'SequenceArray({0})'.format([(k, v) for k, v in zip(self.ids, self.sequences)])\n\n def __contains__(self, key):\n return True if key in self.ids else False\n\n def keys(self):\n for _ in self.ids:\n yield _\n\n def values(self):\n for _ in self.sequences:\n yield _\n\n def items(self):\n for x in range(len(self.ids)):\n yield (self.ids[x], self.sequences[x])\n\n def to_fasta(self, path, linewidth=60):\n \"\"\"Save sequence array as a FASTA file\n\n Parameters\n ----------\n path : str\n Filename/path of FASTA file\n linewidth : int\n Line width of FASTA file\n\n \"\"\"\n with open(path, 'w') as f:\n print(SequenceArray.array_to_fasta(self.ids, self.sequences, linewidth=linewidth), file=f)\n\n def align(self, aln_file='out.aln', program='muscle', program_args=None):\n \"\"\"Calls an external alignment program to align the sequences in the sequence array.\n\n Parameters\n ----------\n aln_file : str\n Filename/path of resulting multiple sequence alignment\n program : str\n External program to be called for multiple sequence alignment. Currently supported programs are\n 'muscle' (Muscle), 'mafft' (MAFFT), 'clustalw' (ClustalW), 'clustalo' (Clustal Omega), 'prank' (PRANK).\n To ensure that this method works properly, make sure that these programs are installed and\n accessible from the system's PATH.\n program_args : str\n Additional user-specified arguments\n\n Returns\n -------\n SequenceAlignment\n\n \"\"\"\n # check if program is in choices or not. if not return an error\n choices = ['muscle', 'mafft', 'clustalo']\n assert program in choices, Exception('Program not supported. Choose from the following: \\\n \"muscle\", \"mafft\", \"clustalw\", \"clustalo\", \"prank\"')\n\n # Write the SequenceArray object to file\n seqfile = '{0}.{1}'.format(self.name, 'fna' if self.seqtype == 'nucl' else 'faa')\n self.to_fasta(seqfile)\n\n # TODO : extract program paths into variables so that users can alter at runtime\n # Default to MUSCLE\n if program == 'mafft':\n cmd_str = 'mafft {args} {i} > {o}'.format(args='--auto' if not program_args else program_args,\n i=seqfile, o=aln_file)\n elif program == 'clustalw': # TODO : ClustalW hook\n raise Exception('ClustalW support is in development.')\n elif program == 'clustalo':\n cmd_str = 'clustalo -i {i} -o {o} '.format(i=seqfile, o=aln_file)\n elif program == 'prank': # TODO : PRANK hook\n raise Exception('PRANK support is in development.')\n else:\n # Default to MUSCLE\n cmd_str = 'muscle {args}-in {i} -out {o}'.format(args='' if not program_args else program_args+' ',\n i=seqfile, o=aln_file)\n # TODO : change to subprocess\n os.system(cmd_str)\n if self.seqtype == 'nucl':\n return NucleotideAlignment(aln_file)\n elif self.seqtype == 'prot':\n return ProteinAlignment(aln_file)\n\n @staticmethod\n def parse_fasta(path, seqtype='nucl', upper=True):\n \"\"\"Read FASTA format entirely using only built-ins.\n\n Parameters\n ----------\n path : str\n File path (absolute or relative) where the FASTA file is located.\n seqtype : str\n 'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)\n\n Returns\n -------\n OrderedDict\n FASTA headers are stored as dictionary keys and its corresponding sequence is stored as its value.\n\n \"\"\"\n keys = []\n sequence = ''\n sequences = []\n line_id_re = re.compile('^>(.+)[\\s\\n]')\n with open(path, 'r') as f:\n for i, line in enumerate(f.readlines()):\n if line.startswith('>'):\n # TODO : remove when validated to work\n # line_id = line[1:-1] # uses the entire description line\n _match = line_id_re.search(line) # uses only the string before space\n try:\n line_id = _match.group(1)\n except:\n raise ValueError('Malformed description line <line {} of {}>'.format(i, path))\n keys.append(line_id)\n if sequence:\n if upper:\n sequence = sequence.upper()\n sequences.append(sequence)\n sequence = ''\n else:\n sequence += re.sub('\\s', '', line.upper())\n if sequence:\n if upper:\n sequence = sequence.upper()\n sequences.append(sequence)\n return SequenceArray(OrderedDict(zip(keys, sequences)), seqtype=seqtype)\n\n @staticmethod\n def array_to_fasta(keys, sequences, linewidth=60):\n \"\"\"Converts a sequence array to a FASTA-formatted string\n\n Parameters\n ----------\n keys : list\n List of record names\n sequences : list\n List of sequences (list of lists or 2d ndarray)\n linewidth : int\n Number of characters per line\n\n Returns\n -------\n str\n FASTA-formatted string\n\n \"\"\"\n fasta_str = ''\n for key, sequence in zip(keys, sequences):\n sequence = ''.join(sequence)\n header = '>{0}'.format(key)\n fasta_str += header + '\\n'\n for i in range(0, len(sequence), linewidth):\n curr_line = sequence[i:i+linewidth]\n fasta_str += curr_line + '\\n'\n return fasta_str\n\n @staticmethod\n def composition(sequence_obj, seqtype='nucl'):\n \"\"\"Return the per sequence composition of a sequence array\n\n Parameters\n ----------\n sequence_obj : SequenceArray\n seqtype : str\n\n\n Returns\n -------\n OrderedDict\n Keys are sequence ids and values are OrderedDict of the corresponding percent makeup for each character\n except gaps\n\n Notes\n -----\n The return value is an OrderedDict of OrderedDicts. First level is indexed by sequence while second level is\n indexed by character.\n >>> seq_array = OrderedDict([('key1', 'AAA'), ('key2', 'TTT')])\n >>> composition_of_seq_array = OrderedDict([ \\\n ('key1', OrderedDict([('T', 0), ('C', 0), ('A', 3/float(3)), ('G', 0)])), \\\n ('key2', OrderedDict([('T', 3/float(3)), ('C', 0), ('A', 0), ('G', 0)])), ])\n >>> seq_array['key1']\n 'AAA'\n >>> composition_of_seq_array['key1']\n OrderedDict([('T', 0), ('C', 0), ('A', 1.0), ('G', 0)])\n >>> composition_of_seq_array['key1']['A']\n 1.0\n\n \"\"\"\n # assert re.search('^[ATCG\\-]+$', sequence), 'Input sequence contains characters other than A,T,C,G,-'\n composition_of = OrderedDict()\n characters = BASES if seqtype in ['nucl', 'cod'] else AMINO_ACIDS\n for seqid, sequence in zip(sequence_obj.ids, sequence_obj.sequences):\n char_counts = Counter(sequence.upper())\n total = sum([v for k, v in char_counts.items() if k != '-'])\n composition_of[seqid] = OrderedDict([(k, char_counts[k]/float(total)) for k in characters])\n # return pd.DataFrame(composition_of)\n return composition_of\n\n\nclass NucleotideArray(SequenceArray):\n \"\"\"\n Nucleotide sequence array object constructor\n\n This is a special type of SequenceArray for nucleotide sequences containing additional methods specific for\n handling nucleotide sequence data. On instantiation, it constructs a SequenceArray object whose seqtype is set to\n 'nucl'.\n\n NucleotideArray is suitable for both protein-coding and non-protein coding nucleotide sequences. However,\n if sequences are protein-coding, it is better to use the CodonArray object as this contains methods useful\n for protein-coding sequences such as the ability count by nucleotide triplets and to translate to amino acid\n sequences.\n\n If the array contains in-frame protein-coding sequence, NucleotideArray can construct a CodonArray using the method\n `to_codonarray`. However, NucleotideArray cannot differentiate by itself whether a sequence is coding or\n non-coding, and if coding, whether it is in-frame or not, therefore it is up to the user to judge\n whether it is appropriate to represent the sequences as plain nucleotides through NucleotideArray or as\n protein-coding sequences through CodonArray.\n\n \"\"\"\n def __init__(self, input_obj, name='', description='', validate=False):\n \"\"\"Create a new NucleotideArray from a dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : dict, str\n Object used to populate a SequenceArray object. This may be one of the following:\n - Dictionary-like object whose id are sequence record names and values are the corresponding sequences\n - Path to a FASTA file\n - FASTA-formatted string\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n super().__init__(input_obj, name=name, seqtype='nucl', description=description, validate=validate)\n\n def to_codonarray(self):\n \"\"\"Create a CodonArray from the current NucleotideArray\n\n Returns\n -------\n CodonArray\n\n \"\"\"\n return CodonArray(deepcopy(self), name=self.name, description=self.description)\n\n def basecomp(self):\n \"\"\"Return base composition of each sequence\n\n Returns\n -------\n OrderedDict\n Keys are sequence ids and values are OrderedDict of the corresponding percent makeup for each character\n except gaps. For example, 'T', 'C', 'A', 'G' for nucleotides.\n\n Sea also\n --------\n SequenceArray.composition : Character composition of a sequence\n \"\"\"\n basecomp_of = super().composition(self, seqtype=self.seqtype)\n for key, comp in basecomp_of.items():\n basecomp_of[key]['AT'] = comp['A'] + comp['T']\n basecomp_of[key]['GC'] = comp['G'] + comp['C']\n return basecomp_of\n\n @staticmethod\n def nucleotide_to_codon(nucleotide_str):\n \"\"\"Converts a nucleotide triplet into its corresponding codon\n\n Parameters\n ----------\n nucleotide_str : str or sequence\n Nucleotide sequence (str or list)\n\n Yields\n ------\n str\n 3-character string codon\n\n \"\"\"\n if len(nucleotide_str) % 3 != 0:\n raise ValueError('SequenceArray length is not a multiple of three ({0}).'.format(len(nucleotide_str)))\n for j in range(0, len(nucleotide_str), 3):\n if j+3 <= len(nucleotide_str):\n yield nucleotide_str[j:j+3]\n\n\nclass CodonArray(SequenceArray):\n \"\"\"\n Protein-coding nucleotide sequence array object constructor\n\n This is a special type of SequenceArray for protein-coding sequences. If the array contains\n in-frame protein-coding sequence, NucleotideArray contains methods to represent data as codons (nculeotide triplets)\n and translate to protein sequence. Note that NucleotideArray cannot differentiate by itself whether a sequence\n is coding or non-coding, therefore it is up to the user to judge whether it is appropriate to use these methods on\n the data.\n\n \"\"\"\n def __init__(self, input_obj, name='', description='', validate=False):\n \"\"\"Create a new CodonArray from a dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : dict or str\n Object used to populate a SequenceArray object. This may be one of the following:\n - Dictionary-like object whose id are sequence record names and values are the corresponding sequences\n - Path to a FASTA file\n - FASTA-formatted string\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n super().__init__(input_obj, name=name, seqtype='cod', description=description, validate=validate)\n self.pos = OrderedDict()\n self.pos[1] = NucleotideArray(\n OrderedDict([(seqid, ''.join(sequence_as_list))\n for seqid, sequence_as_list in zip(self.ids,\n map(lambda x: x[0::3], self.sequences))]))\n self.pos[2] = NucleotideArray(\n OrderedDict([(seqid, ''.join(sequence_as_list))\n for seqid, sequence_as_list in zip(self.ids,\n map(lambda x: x[1::3], self.sequences))]))\n self.pos[3] = NucleotideArray(\n OrderedDict([(seqid, ''.join(sequence_as_list))\n for seqid, sequence_as_list in zip(self.ids,\n map(lambda x: x[2::3], self.sequences))]))\n\n @property\n def translated(self):\n \"\"\"Translates nucleotide sequences into amino acid sequences\n\n Assumes that the nucleotide sequence is protein-coding, in-frame, and the start of the ORF corresponds\n to the beginning of the nucleotide sequence.\n\n Returns\n -------\n ProteinArray\n\n \"\"\"\n translated_seqarray = OrderedDict()\n for key, nt_seq in zip(self.ids, self.sequences):\n translated_seqarray[key] = ''.join([GENETIC_CODE[cod.upper()]\n for cod in NucleotideArray.nucleotide_to_codon(nt_seq)])\n return ProteinArray(translated_seqarray)\n\n def codonalign(self, codon_aln_file='out.ffn.aln', program='muscle'):\n \"\"\"Aligns by codons using a protein alignment generated by an external program\n\n Assumes the sequence is in-frame and is a coding sequences. First, the sequences are translated into proteins,\n which are aligned by an external program. The resulting protein alignment is used as an anchor to align\n nucleotide sequences.\n\n Parameters\n ----------\n codon_aln_file : str\n File path of resulting codon-aligned multiple sequence alignment\n program : str or path\n External program to be called to align translated protein sequences. Currently supported\n programs are 'muscle' (Muscle), 'mafft' (MAFFT), 'clustalw' (ClustalW), 'clustalo' (Clustal Omega),\n 'prank' (PRANK)\n\n Returns\n -------\n CodonAlignment\n codon-based multiple sequence alignment.\n\n \"\"\"\n if self.seqtype != 'cod':\n raise Exception('Seqtype must be \"cod\" (codon) to perform codon alignment.')\n for i, sequence in enumerate(self.sequences):\n if len(sequence) % 3 != 0:\n raise ValueError('\"{0}\" sequence length is not a multiple of three ({1}).'\n .format(self.ids[i], len(sequence)))\n\n # check if program is in choices or not. if not return an error\n choices = ['muscle', 'mafft', 'clustalo']\n assert program in choices, 'Program not supported. Choose from the following: \\\n \"muscle\", \"mafft\", \"clustalw\", \"clustalo\", \"prank\"'\n\n # Write translated SequenceArray object to file\n transl_seqfile = '{0}.transl.{1}'.format(self.name, 'faa')\n self.translated.to_fasta(transl_seqfile)\n\n # Align protein sequences\n aa_aln = self.translated.align(aln_file=codon_aln_file, program=program)\n\n # Adjust codons based on amino acid alignment\n codon_aln = OrderedDict()\n i = 0\n for nt_seq, aa_aln_seq in zip(self.sequences, aa_aln.sequences):\n codon = NucleotideArray.nucleotide_to_codon(nt_seq)\n codon_aln[self.ids[i]] = ''.join([next(codon) if aa != '-' else '---' for aa in list(aa_aln_seq)])\n i += 1\n codon_aln = CodonAlignment(codon_aln)\n codon_aln.to_fasta(codon_aln_file)\n return codon_aln\n\n def basecomp(self):\n \"\"\"Return base composition of each sequence\n\n Returns\n -------\n OrderedDict\n Keys are sequence ids and values are OrderedDict of the corresponding percent makeup for each character\n except gaps. For example, 'T', 'C', 'A', 'G' for nucleotides.\n\n Sea also\n --------\n SequenceArray.composition : Character composition of a sequence\n \"\"\"\n all_basecomp_of = super().composition(self, seqtype=self.seqtype)\n for key, comp in all_basecomp_of.items():\n all_basecomp_of[key]['AT'] = comp['A'] + comp['T']\n all_basecomp_of[key]['GC'] = comp['G'] + comp['C']\n\n pos_basecomp_of = {1: super().composition(self.pos[1], seqtype=self.seqtype),\n 2: super().composition(self.pos[2], seqtype=self.seqtype),\n 3: super().composition(self.pos[3], seqtype=self.seqtype),\n }\n for pos, basecomp_of in pos_basecomp_of.items():\n for key, comp in basecomp_of.items():\n for base in BASES:\n all_basecomp_of[key][base + str(pos)] = basecomp_of[key][base]\n all_basecomp_of[key]['AT' + str(pos)] = comp['A'] + comp['T']\n all_basecomp_of[key]['GC' + str(pos)] = comp['G'] + comp['C']\n return all_basecomp_of\n\n # TODO : Subclass string and tuple to create nucl, prot, cod datatypes\n @staticmethod\n def icod(nucl_seq, pos):\n \"\"\"Retrieves the codon at the specified codon position from a nucleotide sequence\n\n Parameters\n ----------\n nucl_seq : str\n pos : int\n\n Returns\n -------\n str\n codon string of length 3\n\n \"\"\"\n assert len(nucl_seq) % 3 == 0, ValueError('Sequence is not in multiples of three.')\n assert isinstance(pos, int), ValueError('Position should be an integer value.')\n return nucl_seq[3*pos:(3*pos)+3]\n\n\nclass ProteinArray(SequenceArray):\n def __init__(self, input_obj, name='', description='', validate=False):\n \"\"\"Create a new ProteinArray from a dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : tuple or dict or str\n Object used to populate a SequenceArray object. This may be one of the following:\n - Dictionary-like object whose id are sequence record names and values are the corresponding sequences\n - Path to a FASTA file\n - FASTA-formatted string\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n super().__init__(input_obj, name=name, seqtype='prot', description=description, validate=validate)\n\n def aacomp(self):\n return super().composition(self, seqtype=self.seqtype)\n\n\nclass SequenceAlignment(MutableMapping):\n \"\"\"\n Multiple sequence alignment base class\n\n The object specified by this class is a combination of a list of sequence names and a 2d numpy ndarray that\n represents the alignment. Thus, each sequence record is a key-value pairing of a sequence name and its corresponding\n sequence in the ndarray.\n\n Records can be accessed by its key like a dictionary. In addition, multiple records can be accessed simultaneously\n by passing a list. Record values are returned as a numpy ndarray based on the order of keys passed.\n\n Methods that permutate the alignment other than adding or deleting records will return a new instance of the\n alignment. No in-place changes take place when using these methods.\n\n This is the base class for NucleotideAlignment, ProteinAlignment and CodonAlignment.\n\n \"\"\"\n def __init__(self, input_obj, seqtype, charsize=1, name='', description=''):\n # TODO : accept FASTA-formatted string\n \"\"\"Create a new SequenceAlignment from a tuple, dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : dict or str\n Alignment objects can be instantiated by passing one of the following:\n - Tuple of list of record names and ndarray of sequences\n - Dictionary-like object\n - File path (absolute or relative)\n seqtype : str\n 'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)\n charsize : int\n Number of characters that define a column of the alignment.\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n # Description\n self.seqtype = seqtype\n self.name = name\n self.description = description\n self.charsize = charsize\n\n # Tuple containing an ordered list of ids and a numpy array of the sequence\n if isinstance(input_obj, tuple):\n if len(input_obj) == 2:\n if isinstance(input_obj[0], list):\n self._ids = input_obj[0]\n else:\n raise TypeError('First item of tuple is not a list.')\n # TODO : obviate the need for importing numpy, possibly by forking tinynumpy to enable character arrays\n if isinstance(input_obj[1], np.ndarray):\n self._sequences = input_obj[1]\n else:\n raise TypeError('Second item in tuple is not a numpy array.')\n else:\n list_of_sequences = []\n\n # dictionary of id as index and sequence stored as str as the value\n if isinstance(input_obj, dict) or isinstance(input_obj, MutableMapping):\n self._ids = []\n for k, v in input_obj.items():\n if not isinstance(v, str):\n raise TypeError('SequenceArray \"{0}\" is not a string.'.format(k))\n self._ids.append(k)\n list_of_sequences.append(v)\n # Check if sequences are of equal length\n list_of_sequence_lengths = set([len(s) for s in list_of_sequences])\n if len(list_of_sequence_lengths) > 1:\n raise ValueError('Unequal sequence lengths.')\n # String path to FASTA file\n elif isinstance(input_obj, str):\n # Check if it is a valid path, and the file exists\n if os.path.exists(input_obj):\n # Parse FASTA file\n # fasta_dct = self.parse_fasta(input_obj)\n fasta_dct = SequenceArray.parse_fasta(input_obj, seqtype=seqtype)\n # Store record ID as list\n self._ids = list(fasta_dct.keys())\n list_of_sequences = list(fasta_dct.values())\n # TODO : Test if the string is in FASTA format\n else:\n raise Exception('Passing FASTA-formatted strings are not yet supported. \\\n Instantiate using an OrderedDict or passing a valid filepath instead.')\n # Store sequences as a numpy array. Order of array rows should correspond to the order of IDs\n # in the record list\n\n # Check if length is divisible by charsize\n assert len(list_of_sequences[0]) % self.charsize == 0, \\\n ValueError('Alignment length is not divisible by 3 ({} nt)'.format(len(list_of_sequences[0])))\n self._sequences = np.array(\n [[seq[j:j+self.charsize] for j in range(0, len(seq), self.charsize) if j+self.charsize <= len(seq)]\n for seq in list_of_sequences], dtype='U1' if charsize == 1 else 'U3')\n\n # Size of alignment\n self.count = len(self)\n self.shape = self.sequences.shape # number of sample, number of units (bases/aa/codons), char per unit\n self.length = self.sequences.shape[1]\n\n # TODO : Make a \"restricted\" descriptor for any type of attribute that should not be changed outside of __init__\n # Restrict setting \"ids\" attribute outside of __init__\n @property\n def ids(self):\n return self._ids\n\n @ids.setter\n def ids(self, value):\n raise AttributeError('Setting ids using this method is not permitted.')\n\n @ids.deleter\n def ids(self):\n raise AttributeError('Deleting ids using this method is not permitted.')\n\n # Restrict setting \"sequences\" attribute outside of __init__\n @property\n def sequences(self):\n return self._sequences\n\n @sequences.setter\n def sequences(self, value):\n raise AttributeError('Setting sequences using this method is not permitted.')\n\n @sequences.deleter\n def sequences(self):\n raise AttributeError('Deleting sequences using this method is not permitted.')\n\n def __setitem__(self, key, value):\n \"\"\"Add or update a sequence record\n\n Parameters\n ----------\n key: str\n Record name\n value: np.ndarray\n Numpy array of the sequence. The length of the added array must be the same as the length of the current\n array.\n\n \"\"\"\n if key in self.ids:\n raise KeyError('Key name {0} already in use.'.format(key))\n else:\n # Check if has the same length as the number of cols of the current array\n if len(value) == self.length:\n self.ids.append(key)\n self.sequences = np.vstack(\n [self.sequences, [value[j:j+self.charsize] for j in range(0, len(value), self.charsize)\n if j+self.charsize <= len(value)]])\n else:\n raise ValueError('New sequence length {0} does not match alignment length {1}.'\n .format(len(value), self.length))\n\n def __getitem__(self, keys):\n \"\"\"Given a key or list of keys, retrieve a record or multiple records\n\n Parameters\n keys : str or list\n Record name or list of record names\n\n Returns\n -------\n np.ndarray\n Sequence ndarray or multiple sequence alignment ndarray if a list of keys was passed\n\n \"\"\"\n if isinstance(keys, collections.Iterable) and not isinstance(keys, str):\n index_list = []\n for key in keys:\n if key in self.ids:\n index_list.append(self.ids.index(key))\n else:\n raise Exception('Key \"{0}\" does not exist'.format(key))\n return self.sequences[index_list]\n else:\n key = keys\n if key in self.ids:\n index = self.ids.index(key)\n else:\n raise KeyError('Key \"{0}\" does not exist'.format(key))\n return self.sequences[index]\n\n def __delitem__(self, key):\n if key in self.ids:\n index = self.ids.index(key)\n self.ids.remove(key)\n self.sequences = np.delete(self.sequences, index, axis=0) # 0 means by row\n else:\n raise KeyError('Key \"{0}\" does not exist'.format(key))\n\n def __iter__(self):\n for key, sequence in zip(self.ids, self.sequences):\n yield key, sequence\n\n def __len__(self):\n # Return the number of samples in the alignment\n return len(self.ids)\n\n def __repr__(self):\n return 'keys({0})\\n{1}'.format(repr(self.ids), repr(self.sequences))\n\n def __add__(self, other):\n # Check if self.ids and other.ids match\n if set(self.ids) != set(other.ids):\n raise KeyError('Keys do not match.')\n if self.seqtype != other.seqtype:\n raise ValueError('Seqtypes do not match.')\n other_order = [other.ids.index(key) for key in self.ids]\n return type(self)(\n MSA(ids=self.ids, alignment=np.concatenate((self.sequences, other.sequences[other_order]), axis=1)),\n self.seqtype)\n\n def __iadd__(self, other):\n return self + other\n\n def keys(self):\n for _ in self.ids:\n yield _\n\n def values(self):\n for _ in self.sequences:\n yield _\n\n def items(self):\n for x in range(len(self.ids)):\n yield (self.ids[x], self.sequences[x])\n\n def head(self):\n \"\"\"Retrieves the first 5 entries of the sequence alignment\n\n Returns\n -------\n SequenceAlignment\n Creates a subset of the current SequenceAlignment containing only the first five entries.\n\n \"\"\"\n return type(self)(MSA(ids=self.ids[:5], alignment=self.sequences[:5]), self.seqtype)\n\n def tail(self):\n \"\"\"Retrieves the last 5 entries of the sequence alignment\n\n Returns\n -------\n SequenceAlignment\n Creates a subset of the current SequenceAlignment containing only the last five entries.\n\n \"\"\"\n return type(self)(MSA(ids=self.ids[-5:], alignment=self.sequences[-5:]), self.seqtype)\n\n # noinspection PyTypeChecker\n def colx(self, *args):\n \"\"\"Returns a length-wise (column range) subset of the alignment\n\n Parameters\n ----------\n args: int\n Inclusive start and exclusive end position of the subset. Follows Python slice conventions.\n\n Returns\n -------\n SequenceAlignment\n Create a subset of the current SequenceAlignment containing only the specified column range.\n\n \"\"\"\n if len(args) == 1:\n return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, args[0]]), self.seqtype)\n elif len(args) == 2:\n return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, args[0]:args[1]]), self.seqtype)\n elif len(args) == 3:\n return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, args[0]:args[1]:args[2]]),\n self.seqtype)\n else:\n raise Exception('Method uses 3 integer arguments at most.')\n\n def subset(self, keys):\n \"\"\"Returns a row-wise (by sample) subset of the current alignment\n\n Parameters\n ----------\n keys : str or list\n Record name or list of record names\n\n Returns\n -------\n SequenceAlignment\n Create a subset of the current SequenceAlignment containing only the specified records.\n\n \"\"\"\n return type(self)(MSA(ids=keys, alignment=self[keys]), self.seqtype)\n\n def labelpartition(self, label, start, end, coding=True):\n NotImplementedError()\n\n def xgap(self, all=False):\n \"\"\"Remove columns containing gap character from the current alignment\n\n Parameters\n ----------\n all : bool\n If True, removes a column only when whole column is gapped.\n If False, removes a column even when only one gap character is present\n\n Returns\n -------\n SequenceAlignment\n Creates a new SequenceAlignment free of gap characters.\n \"\"\"\n gapchar = '-'*self.charsize\n xgap_cols = []\n for i in range(self.length):\n if all:\n # noinspection PyTypeChecker\n if not np.all(gapchar == self.sequences[:, i]):\n xgap_cols.append(i)\n else:\n # noinspection PyTypeChecker\n if not np.any(gapchar == self.sequences[:, i]):\n xgap_cols.append(i)\n return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, xgap_cols]), self.seqtype)\n\n def resample_cols(self):\n \"\"\"Creates a new SequenceAlignment using resampled alignment columns with replacement from the current data\n\n Returns\n -------\n SequenceAlignment\n\n \"\"\"\n randlist = np.random.choice(self.length, self.length, replace=True)\n return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, randlist]), self.seqtype)\n\n def reorder(self, ordered_key_list):\n \"\"\"Reorder alignment based on the order of a a given list of keys\n\n Parameters\n ----------\n ordered_key_list : list\n List of record names arranged based on how samples should be reordered\n\n Returns\n -------\n SequenceAlignment\n Reordered alignment\n\n \"\"\"\n sequences = []\n for key in ordered_key_list:\n index = self.ids.index(key)\n sequences.append(self.sequences[index])\n return type(self)(MSA(ids=ordered_key_list, alignment=np.array(sequences)), self.seqtype)\n\n def to_fasta(self, path, linewidth=60):\n \"\"\"Save the alignment as a FASTA-formatted file\n\n Parameters\n ----------\n path : str\n Filename/path of FASTA file\n linewidth : int\n Number of characters per line\n\n \"\"\"\n # TODO : Check if basedir of path exists\n with open(path, 'w') as f:\n print(self.__class__.alignment_to_fasta(self, linewidth=linewidth), file=f)\n\n def to_phylip(self, path): # TODO\n NotImplementedError()\n\n def pssm(self):\n \"\"\"\n Position-specific scoring matrix of the alignment\n\n Returns\n -------\n np.array\n\n \"\"\"\n # TODO : update for codon\n if self.seqtype == 'nucl':\n characters = list(BASES)\n characters.append('-')\n elif self.seqtype == 'prot':\n characters = list(AMINO_ACIDS)\n characters.append('X')\n characters.append('-')\n elif self.seqtype == 'cod':\n characters = list(CODONS)\n characters.append('---')\n else:\n raise ValueError()\n\n pssm_sparse = lil_matrix((self.length, len(characters)))\n for i in range(self.length):\n seq = np.array(list(map(str.upper, self.sequences[:, i])))\n unique_cnts = np.unique(seq, return_counts=True)\n for j, char in enumerate(unique_cnts[0]):\n char_cnt = unique_cnts[1][j]\n if char in characters:\n pssm_sparse[i, characters.index(char)] = char_cnt\n else:\n if self.seqtype == 'nucl':\n for part_base in DEGENERATE_BASES[char]:\n if pssm_sparse[i, characters.index(part_base)]:\n pssm_sparse[i, characters.index(part_base)] += char_cnt / \\\n float(len(DEGENERATE_BASES[char]))\n else:\n pssm_sparse[i, characters.index(part_base)] = char_cnt / \\\n float(len(DEGENERATE_BASES[char]))\n elif self.seqtype == 'cod':\n char_val = [dict(), dict(), dict()]\n for k, cod_base in enumerate(char):\n print(cod_base)\n if cod_base not in BASES:\n for part_base in DEGENERATE_BASES[cod_base]:\n char_val[k][part_base] = 1 / float(len(DEGENERATE_BASES[cod_base]))\n else:\n char_val[k][cod_base] = 1\n\n for a, a_val in char_val[0].items():\n for b, b_val in char_val[1].items():\n for c, c_val in char_val[2].items():\n if pssm_sparse[i, characters.index(a+b+c)]:\n pssm_sparse[i, characters.index(a+b+c)] += char_cnt * a_val * b_val * c_val\n else:\n pssm_sparse[i, characters.index(a + b + c)] = char_cnt * a_val * b_val * c_val\n else:\n raise ValueError(char)\n\n return pd.DataFrame(pssm_sparse.toarray(), columns=list(characters))\n\n def consensus_matrix(self):\n pssm_df = self.pssm()\n consensus_idx = pssm_df.idxmax(axis=1)\n consensus_cnt = pssm_df.max(axis=1)\n consensus_df = pd.concat([consensus_idx, consensus_cnt], axis=1)\n consensus_df.columns = ['char', 'count']\n return consensus_df\n\n def consensus_sequence(self):\n pssm_df = self.pssm()\n consensus_idx = pssm_df.idxmax(axis=1)\n return list(consensus_idx.to_dict().values())\n\n @staticmethod\n def parse_fasta(path, seqtype='nucl', upper=True, output_type='array'):\n \"\"\"Read FASTA format entirely using only built-ins.\n\n Parameters\n ----------\n path : str\n File path (absolute or relative) where the FASTA file is located.\n seqtype : str\n 'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)\n output_type : 'array', 'aln', optional (default = 'array')\n Choose between output as array (array) or an alingment (aln)\n\n Returns\n -------\n OrderedDict\n FASTA headers are stored as dictionary keys and its corresponding sequence is stored as its value.\n\n \"\"\"\n lengths = set()\n seq_array = SequenceArray.parse_fasta(path, seqtype=seqtype, upper=upper)\n for key, seq in seq_array.items():\n lengths.add(len(seq))\n if output_type == 'ndarray':\n if len(lengths) == 1:\n return SequenceAlignment(seq_array, seqtype)\n elif output_type == 'dict':\n return seq_array\n\n @staticmethod\n def concat(*alignments):\n \"\"\"Concatenate multiple sequence alignments together\n\n Parameters\n ----------\n alignments : SequenceAlignment\n\n Returns\n -------\n SequenceAlignment\n New concatenated SequenceAlignment\n\n \"\"\"\n concaternated_alignment = alignments[0]\n for alignment in alignments[1:]:\n concaternated_alignment += alignment\n return concaternated_alignment\n\n @staticmethod\n def alignment_to_fasta(alignment, linewidth=60):\n \"\"\"Save the alignment as a FASTA-formatted file\n\n Parameters\n ----------\n alignment : SequenceAlignment\n linewidth : int\n Number of characters per line\n\n Returns\n -------\n str\n FASTA-formatted string\n\n \"\"\"\n return SequenceArray.array_to_fasta(alignment.ids, alignment.sequences, linewidth=linewidth)\n\n @staticmethod\n def composition(alignment_obj, seqtype='nucl'):\n \"\"\"Returns the character composition of the sequence alignment\n\n Parameters\n ----------\n alignment_obj : SequenceAlignment\n seqtype : str\n 'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)\n\n Returns\n -------\n OrderedDict\n\n \"\"\"\n sequence_obj = SequenceArray(\n OrderedDict([(seqid, ''.join(sequence_as_list))\n for seqid, sequence_as_list in zip(alignment_obj.ids, alignment_obj.sequences)]))\n return SequenceArray.composition(sequence_obj, seqtype=seqtype)\n\n\nclass NucleotideAlignment(SequenceAlignment):\n\n def __init__(self, input_obj, name='', description=''):\n \"\"\"Create a new NucleotideAlignment from a tuple, dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : tuple or dict or str\n Alignment objects can be instantiated by passing one of the following:\n - Tuple of list of record names and ndarray of sequences\n - Dictionary-like object\n - File path (absolute or relative)\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n super().__init__(input_obj, 'nucl', charsize=1, name=name, description=description)\n\n def basecomp(self):\n \"\"\"Returns the base composition of the current nucleotide alignment\n\n Returns\n -------\n OrderedDict\n\n \"\"\"\n basecomp_of = super().composition(self, seqtype=self.seqtype)\n for key, comp in basecomp_of.items():\n basecomp_of[key]['AT'] = comp['A'] + comp['T']\n basecomp_of[key]['GC'] = comp['G'] + comp['C']\n return basecomp_of\n\n\nclass ProteinAlignment(SequenceAlignment):\n\n def __init__(self, input_obj, name='', description=''):\n \"\"\"Create a new ProteinAlignment from a tuple, dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : tuple or dict or str\n Alignment objects can be instantiated by passing one of the following:\n - Tuple of list of record names and ndarray of sequences\n - Dictionary-like object\n - File path (absolute or relative)\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n super().__init__(input_obj, 'prot', charsize=1, name=name, description=description)\n\n def aacomp(self):\n return super().composition(self, seqtype=self.seqtype)\n\n\nclass CodonAlignment(NucleotideAlignment):\n\n def __init__(self, input_obj, name='', description=''):\n \"\"\"Create a new CodonAlignment from a tuple, dictionary, file or FASTA-formatted string.\n\n Parameters\n ----------\n input_obj : tuple or dict or str\n Alignment objects can be instantiated by passing one of the following:\n - Tuple of list of record names and ndarray of sequences\n - Dictionary-like object\n - File path (absolute or relative)\n name : str\n Name of the set of sequence records\n description : str\n Short description\n\n \"\"\"\n SequenceAlignment.__init__(self, input_obj, seqtype='cod', charsize=3, name=name, description=description)\n # Create nucleotide alignment\n ntaln_lst = list()\n for seq in self.sequences:\n ntaln_concat = list()\n for seq_seq in seq:\n ntaln_concat.append(''.join(seq_seq))\n ntaln_lst.append(''.join(ntaln_concat))\n self.nucl_aln = NucleotideAlignment(\n MSA(ids=self.ids, alignment=np.array(np.array([list(seq) for seq in ntaln_lst], dtype='U1'), dtype='U1')))\n\n self.pos = OrderedDict()\n self.pos[1] = self.nucl_aln.colx(0, None, 3)\n self.pos[2] = self.nucl_aln.colx(1, None, 3)\n self.pos[3] = self.nucl_aln.colx(2, None, 3)\n\n def make_raxml_codon_partition_file(self, save_path):\n \"\"\"Make RAxML partition file for a codon alignment\n\n Parameters\n ----------\n save_path: str\n Partition file save path\n\n \"\"\"\n # TODO : check if basedir of save_path exists\n ordinal_suffix = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'}\n # TODO : Warn if file already exists\n with open(save_path, 'w') as f:\n for i in range(1, 4):\n print('DNA, {0}{1}pos={0}-{2}\\\\3'.format(i, ordinal_suffix[i], self.length*3), file=f)\n\n @staticmethod\n def composition(codon_aln_obj, fold_counts=(), pos=3):\n \"\"\"Return the character composition of a CodonAlignment depending on codon position and fold count\n\n Parameters\n ----------\n codon_aln_obj : CodonAlignment\n fold_counts : int\n 1,2,3,4,6 fold codon degeneracy\n pos : int\n 1 (1st position), 2 (2nd position), 3 (3rd position)\n\n Returns\n -------\n OrderedDict\n\n \"\"\"\n pos -= 1 # input is 1-indexed but Python is 0-indexed\n codon_filter_set = set([codon for codon, fold in CODON_FOLD.items() if fold in fold_counts])\n filtered_sequences = OrderedDict()\n for seqid, seq_ndarray in codon_aln_obj.items():\n if pos == -1:\n filtered_sequences[seqid] = ''.join(\n [codon for codon in seq_ndarray if codon in codon_filter_set]\n )\n else:\n filtered_sequences[seqid] = ''.join(\n [codon[pos] for codon in seq_ndarray if codon in codon_filter_set]\n )\n sequence_obj = SequenceArray(filtered_sequences)\n return SequenceArray.composition(sequence_obj, seqtype='cod')\n"
] | [
[
"numpy.concatenate",
"numpy.delete",
"numpy.array",
"numpy.random.choice",
"numpy.any",
"pandas.concat",
"numpy.all",
"numpy.unique"
]
] |
ArthurBook/TDLearning | [
"f9f6187eb8ebe043b31fcc6439ff90bc2eb81196"
] | [
"tests/call_options_varying_num_steps3.py"
] | [
"import numpy as np\r\nimport pandas as pd\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom StochasticProcesses import Stochastic_Processes\r\nfrom Payoffs import Payoffs\r\n\r\nfrom TD_net_v4 import TD_net, td_metrics\r\n\r\n#%%\r\n\r\ndef input_creation(strike, n_steps, specs, sample_size, td_input_count, central_weight ):\r\n \r\n walks = Stochastic_Processes.generate_walks( **specs, N_walks = sample_size, verbose =False )\r\n \r\n walks_remove_dimension = walks[:,:,0]\r\n df_walks = pd.DataFrame(walks_remove_dimension)\r\n \r\n df_walks_sorted = df_walks.sort_values(by =n_steps,ignore_index=True)\r\n np_sorted_walks = df_walks_sorted.to_numpy()\r\n walks = np_sorted_walks[..., np.newaxis]\r\n\r\n s = walks.shape\r\n timedim = np.tile( range( s[1] ), s[0] ).reshape(*s[0:-1],1)\r\n walks_with_time = np.concatenate( [walks,timedim], axis = -1 )\r\n payoffs = np.maximum( walks[:,-1,0] - 100, 0 ) \r\n \r\n central_td_inputs_count = int(central_weight * td_input_count)\r\n central_indexs = df_walks_sorted.index[abs(df_walks_sorted[n_steps] - strike) < 0.0001].tolist()\r\n central_index = central_indexs[int(len(central_indexs)/2)]\r\n lowest_central_index = central_index - int(central_td_inputs_count/2)\r\n highest_central_index = central_index + int(central_td_inputs_count/2)\r\n central_walks_td = walks_with_time[lowest_central_index : highest_central_index]\r\n central_payoffs_td = payoffs[lowest_central_index : highest_central_index]\r\n\r\n sample_size = len(walks)\r\n smallest = df_walks_sorted[n_steps][0]\r\n largest = df_walks_sorted[n_steps][sample_size-1]\r\n expected_range = largest - smallest\r\n \r\n walks_remaining = td_input_count - len(central_walks_td)\r\n \r\n numb_walks_left_sides = int(walks_remaining / 2) # number of walks we can generate on the left / right\r\n numb_walks_right_sides = int(walks_remaining / 2)\r\n \r\n \r\n left_buckets = int(lowest_central_index / numb_walks_left_sides)\r\n right_buckets = int(((sample_size-1) - highest_central_index) / numb_walks_right_sides)\r\n \r\n indexs_left = [i for i in range(0,lowest_central_index,left_buckets)]\r\n indexs_right = [i for i in range(highest_central_index,sample_size-1,right_buckets)]\r\n \r\n side_indexs = indexs_left + indexs_right\r\n \r\n side_walks_td = (walks_with_time[side_indexs])\r\n side_payoffs_td = (payoffs[side_indexs])\r\n \r\n walks_td = np.vstack((central_walks_td, side_walks_td))\r\n payoffs_td = np.hstack((central_payoffs_td, side_payoffs_td))\r\n \r\n return walks_td,payoffs_td\r\n\r\n#%%\r\n\r\nnum_steps = [i for i in range(5,55,5)]\r\nrmse_list = []\r\n\r\n#sample_size = 1000\r\nsample_size = 5000000\r\ndiscretized_space = np.linspace(80,120,21) \r\n \r\nlower_bounds = discretized_space[:-1]\r\nupper_bounds = discretized_space[1:]\r\nbin_centers = ( lower_bounds + upper_bounds ) / 2 \r\n\r\nfor step in num_steps:\r\n \r\n process_specs = {\r\n \"processdicts\" : [\r\n {\"process\" : \"GBM\", \"s0\" : 100, 'mu': 0, 'v': 0.01, \r\n \"randomize_spot\" : [-20,20] \r\n },\r\n ],\r\n \"d_t\" : (1/8) / 360, \r\n \"N_steps\" : step,\r\n \"batch_size\" : 1000,\r\n }\r\n \r\n #sample_size = 5000000\r\n\r\n discretized_time = range( 0, process_specs[\"N_steps\"] + 1, 1 )\r\n \r\n walks = Stochastic_Processes.generate_walks( **process_specs, N_walks = sample_size, verbose =False )\r\n \r\n payoffs = np.maximum( walks[:,-1,0] - 100, 0 ) \r\n\r\n conditional_expectation = pd.DataFrame() \r\n\r\n for t in discretized_time:\r\n \r\n walks_t = walks[:,t,0] \r\n \r\n for lb,ub,mid in zip(lower_bounds, upper_bounds, bin_centers):\r\n \r\n in_discrete_bin = ( lb <= walks_t ) * ( walks_t <= ub ) \r\n \r\n subset_of_payoffs_with_condition = payoffs[in_discrete_bin] \r\n \r\n conditional_expectation.loc[t,mid] = np.mean( subset_of_payoffs_with_condition )\r\n \r\n \r\n output_shape = payoffs[0].shape\r\n\r\n nodes_per_layer = 64\r\n N_hidden_layers = 5\r\n hidden_activation = keras.activations.relu\r\n output_activation = keras.activations.linear\r\n \r\n # TD MODEL\r\n td_model = TD_net(\r\n \r\n #hidden layers\r\n nodes_per_layer = nodes_per_layer,\r\n N_hidden_layers = N_hidden_layers,\r\n hidden_activation = hidden_activation,\r\n \r\n #output\r\n output_shape = output_shape, \r\n output_activation = output_activation,\r\n row_wise_output = False\r\n )\r\n \r\n td_model.compile(\r\n optimizer = tf.keras.optimizers.SGD(),\r\n metrics = [td_metrics.Temporal_MAE, td_metrics.Prediction_MAE]\r\n )\r\n \r\n walks_td,payoffs_td = input_creation(100, step, process_specs, 20000000, 500, 0.333)\r\n \r\n td_hist = td_model.fit( \r\n\r\n walks_td,\r\n payoffs_td,\r\n lr = 0.0001 ,\r\n lamb = 0.275,\r\n batch_size=2,\r\n epochs = 10,\r\n verbose = True,\r\n )\r\n \r\n error_list = []\r\n \r\n \r\n for time in range(0,step + 1):\r\n \r\n for space in range(81,121,2):\r\n \r\n td = float(td_model.predict([[[space,time]]]))\r\n \r\n mc = conditional_expectation[space][time]\r\n \r\n error = np.sqrt((td - mc)**2)\r\n \r\n error_list.append(error)\r\n \r\n rmse = sum(error_list)/len(error_list)\r\n rmse_list.append(rmse)\r\n\r\n#%%\r\n\r\n\r\nfig, ax = plt.subplots(1,1)\r\n\r\nax.set_xlabel(\"Number of Time Steps\")\r\nax.set_ylabel(\"RMSE\")\r\nax.title.set_text(\"RMSE against number of time steps\") \r\n\r\nax.plot(num_steps, rmse_list,linestyle='--', marker='o') \r\n \r\n"
] | [
[
"numpy.concatenate",
"tensorflow.keras.optimizers.SGD",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"numpy.mean",
"numpy.sqrt",
"numpy.hstack",
"numpy.linspace",
"numpy.vstack",
"numpy.maximum"
]
] |
toluwajosh/Pointnet_Pointnet2_pytorch | [
"82a37be460a004fb7e300d2f11a395916d539391"
] | [
"train_semseg.py"
] | [
"\"\"\"\nAuthor: Benny\nDate: Nov 2019\n\"\"\"\nimport argparse\nimport os\nfrom data_utils.S3DISDataLoader import S3DISDataset, S3DISDatasetWholeScene\nimport torch\nimport datetime\nimport logging\nfrom pathlib import Path\nimport sys\nimport importlib\nimport shutil\nfrom tqdm import tqdm\nimport provider\nimport numpy as np\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\n\n\nclasses = ['ceiling','floor','wall','beam','column','window','door','table','chair','sofa','bookcase','board','clutter']\nclass2label = {cls: i for i,cls in enumerate(classes)}\nseg_classes = class2label\nseg_label_to_cat = {}\nfor i,cat in enumerate(seg_classes.keys()):\n seg_label_to_cat[i] = cat\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Model')\n parser.add_argument('--model', type=str, default='pointnet_sem_seg', help='model name [default: pointnet_sem_seg]')\n parser.add_argument('--batch_size', type=int, default=12, help='Batch Size during training [default: 12]')\n parser.add_argument('--epoch', default=1024, type=int, help='Epoch to run [default: 1024]')\n parser.add_argument('--learning_rate', default=0.001, type=float, help='Initial learning rate [default: 0.001]')\n parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')\n parser.add_argument('--optimizer', type=str, default='Adam', help='Adam or SGD [default: Adam]')\n parser.add_argument('--log_dir', type=str, default=None, help='Log path [default: None]')\n parser.add_argument('--decay_rate', type=float, default=1e-4, help='weight decay [default: 1e-4]')\n parser.add_argument('--npoint', type=int, default=8192, help='Point Number [default: 2048]')\n parser.add_argument('--with_rgb', action='store_true', default=False, help='Whether to use RGB information [default: False]')\n parser.add_argument('--step_size', type=int, default=200, help='Decay step for lr decay [default: every 200 epochs]')\n parser.add_argument('--lr_decay', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')\n parser.add_argument('--test_area', type=int, default=5, help='Which area to use for test, option: 1-6 [default: 5]')\n\n return parser.parse_args()\n\ndef main(args):\n def log_string(str):\n logger.info(str)\n print(str)\n\n '''HYPER PARAMETER'''\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n '''CREATE DIR'''\n timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))\n experiment_dir = Path('./log/')\n experiment_dir.mkdir(exist_ok=True)\n experiment_dir = experiment_dir.joinpath('sem_seg')\n experiment_dir.mkdir(exist_ok=True)\n if args.log_dir is None:\n experiment_dir = experiment_dir.joinpath(timestr)\n else:\n experiment_dir = experiment_dir.joinpath(args.log_dir)\n experiment_dir.mkdir(exist_ok=True)\n checkpoints_dir = experiment_dir.joinpath('checkpoints/')\n checkpoints_dir.mkdir(exist_ok=True)\n log_dir = experiment_dir.joinpath('logs/')\n log_dir.mkdir(exist_ok=True)\n\n '''LOG'''\n args = parse_args()\n logger = logging.getLogger(\"Model\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n log_string('PARAMETER ...')\n log_string(args)\n\n root = 'data/stanford_indoor3d/'\n\n NUM_CLASSES = 13\n NUM_POINT = args.npoint\n BATCH_SIZE = args.batch_size\n FEATURE_CHANNEL = 3 if args.with_rgb else 0\n\n print(\"start loading training data ...\")\n TRAIN_DATASET = S3DISDataset(root, split='train', with_rgb=args.with_rgb, test_area=args.test_area, block_points=NUM_POINT)\n print(\"start loading test data ...\")\n TEST_DATASET = S3DISDataset(root, split='test', with_rgb=args.with_rgb, test_area=args.test_area, block_points=NUM_POINT)\n print(\"start loading whole scene validation data ...\")\n TEST_DATASET_WHOLE_SCENE = S3DISDatasetWholeScene(root, split='test', with_rgb=args.with_rgb, test_area=args.test_area, block_points=NUM_POINT)\n trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)\n testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)\n weights = TRAIN_DATASET.labelweights\n weights = torch.Tensor(weights).cuda()\n\n log_string(\"The number of training data is: %d\" % len(TRAIN_DATASET))\n log_string(\"The number of test data is: %d\" % len(TEST_DATASET_WHOLE_SCENE))\n\n '''MODEL LOADING'''\n MODEL = importlib.import_module(args.model)\n shutil.copy('models/%s.py' % args.model, str(experiment_dir))\n shutil.copy('models/pointnet_util.py', str(experiment_dir))\n\n classifier = MODEL.get_model(NUM_CLASSES, with_rgb=args.with_rgb).cuda()\n criterion = MODEL.get_loss().cuda()\n\n def weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n torch.nn.init.xavier_normal_(m.weight.data)\n torch.nn.init.constant_(m.bias.data, 0.0)\n elif classname.find('Linear') != -1:\n torch.nn.init.xavier_normal_(m.weight.data)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n try:\n checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')\n start_epoch = checkpoint['epoch']\n classifier.load_state_dict(checkpoint['model_state_dict'])\n log_string('Use pretrain model')\n except:\n log_string('No existing model, starting training from scratch...')\n start_epoch = 0\n classifier = classifier.apply(weights_init)\n\n if args.optimizer == 'Adam':\n optimizer = torch.optim.Adam(\n classifier.parameters(),\n lr=args.learning_rate,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=args.decay_rate\n )\n else:\n optimizer = torch.optim.SGD(classifier.parameters(), lr=args.learning_rate, momentum=0.9)\n\n def bn_momentum_adjust(m, momentum):\n if isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm1d):\n m.momentum = momentum\n\n LEARNING_RATE_CLIP = 1e-5\n MOMENTUM_ORIGINAL = 0.1\n MOMENTUM_DECCAY = 0.5\n MOMENTUM_DECCAY_STEP = args.step_size\n\n global_epoch = 0\n best_iou = 0\n\n for epoch in range(start_epoch,args.epoch):\n '''Train on chopped scenes'''\n log_string('**** Epoch %d (%d/%s) ****' % (global_epoch + 1, epoch + 1, args.epoch))\n lr = max(args.learning_rate * (args.lr_decay ** (epoch // args.step_size)), LEARNING_RATE_CLIP)\n log_string('Learning rate:%f' % lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY ** (epoch // MOMENTUM_DECCAY_STEP))\n if momentum < 0.01:\n momentum = 0.01\n print('BN momentum updated to: %f' % momentum)\n classifier = classifier.apply(lambda x: bn_momentum_adjust(x,momentum))\n num_batches = len(trainDataLoader)\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n for i, data in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):\n points, target, _ = data\n points = points.data.numpy()\n points[:, :, :3] = provider.normalize_data(points[:, :, :3])\n points[:,:, :3] = provider.random_scale_point_cloud(points[:,:, :3])\n points[:,:, :3] = provider.rotate_point_cloud_z(points[:,:, :3])\n points = torch.Tensor(points)\n points, target = points.float().cuda(),target.long().cuda()\n points = points.transpose(2, 1)\n optimizer.zero_grad()\n classifier = classifier.train()\n seg_pred, trans_feat = classifier(points)\n seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)\n batch_label = target.view(-1, 1)[:, 0].cpu().data.numpy()\n target = target.view(-1, 1)[:, 0]\n loss = criterion(seg_pred, target, trans_feat, weights)\n loss.backward()\n optimizer.step()\n pred_choice = seg_pred.cpu().data.max(1)[1].numpy()\n correct = np.sum(pred_choice == batch_label)\n total_correct += correct\n total_seen += (BATCH_SIZE * NUM_POINT)\n loss_sum += loss\n log_string('Training mean loss: %f' % (loss_sum / num_batches))\n log_string('Training accuracy: %f' % (total_correct / float(total_seen)))\n\n if epoch % 10 == 0 and epoch < 800:\n logger.info('Save model...')\n savepath = str(checkpoints_dir) + '/best_model.pth'\n log_string('Saving at %s' % savepath)\n state = {\n 'epoch': epoch,\n 'model_state_dict': classifier.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }\n torch.save(state, savepath)\n log_string('Saving model....')\n\n '''Evaluate on chopped scenes'''\n with torch.no_grad():\n num_batches = len(testDataLoader)\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n log_string('---- EPOCH %03d EVALUATION ----' % (global_epoch + 1))\n for i, data in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):\n points, target, _ = data\n points = points.data.numpy()\n points[:, :, :3] = provider.normalize_data(points[:, :, :3])\n points = torch.Tensor(points)\n points, target = points.float().cuda(), target.long().cuda()\n points = points.transpose(2, 1)\n classifier = classifier.eval()\n seg_pred, trans_feat = classifier(points)\n seg_pred = seg_pred.contiguous().view(-1, NUM_CLASSES)\n target = target.view(-1, 1)[:, 0]\n loss = criterion(seg_pred, target, trans_feat, weights)\n loss_sum += loss\n batch_label = target.cpu().data.numpy()\n pred_choice = seg_pred.cpu().data.max(1)[1].numpy()\n correct = np.sum(pred_choice == batch_label)\n total_correct += correct\n total_seen += (BATCH_SIZE * NUM_POINT)\n log_string('Eval mean loss: %f' % (loss_sum / num_batches))\n log_string('Eval accuracy: %f' % (total_correct / float(total_seen)))\n\n '''Evaluate on whole scenes'''\n if epoch % 5 ==0 and epoch > 800:\n with torch.no_grad():\n num_batches = len(TEST_DATASET_WHOLE_SCENE)\n log_string('---- EPOCH %03d EVALUATION WHOLE SCENE----' % (global_epoch + 1))\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n total_seen_class = [0 for _ in range(NUM_CLASSES)]\n total_correct_class = [0 for _ in range(NUM_CLASSES)]\n total_iou_deno_class = [0 for _ in range(NUM_CLASSES)]\n\n labelweights = np.zeros(NUM_CLASSES)\n is_continue_batch = False\n\n extra_batch_data = np.zeros((0, NUM_POINT, 3 + FEATURE_CHANNEL))\n extra_batch_label = np.zeros((0, NUM_POINT))\n extra_batch_smpw = np.zeros((0, NUM_POINT))\n for batch_idx in tqdm(range(num_batches),total=num_batches):\n if not is_continue_batch:\n batch_data, batch_label, batch_smpw = TEST_DATASET_WHOLE_SCENE[batch_idx]\n batch_data = np.concatenate((batch_data, extra_batch_data), axis=0)\n batch_label = np.concatenate((batch_label, extra_batch_label), axis=0)\n batch_smpw = np.concatenate((batch_smpw, extra_batch_smpw), axis=0)\n else:\n batch_data_tmp, batch_label_tmp, batch_smpw_tmp = TEST_DATASET_WHOLE_SCENE[batch_idx]\n batch_data = np.concatenate((batch_data, batch_data_tmp), axis=0)\n batch_label = np.concatenate((batch_label, batch_label_tmp), axis=0)\n batch_smpw = np.concatenate((batch_smpw, batch_smpw_tmp), axis=0)\n if batch_data.shape[0] < BATCH_SIZE:\n is_continue_batch = True\n continue\n elif batch_data.shape[0] == BATCH_SIZE:\n is_continue_batch = False\n extra_batch_data = np.zeros((0, NUM_POINT, 3 + FEATURE_CHANNEL))\n extra_batch_label = np.zeros((0, NUM_POINT))\n extra_batch_smpw = np.zeros((0, NUM_POINT))\n else:\n is_continue_batch = False\n extra_batch_data = batch_data[BATCH_SIZE:, :, :]\n extra_batch_label = batch_label[BATCH_SIZE:, :]\n extra_batch_smpw = batch_smpw[BATCH_SIZE:, :]\n batch_data = batch_data[:BATCH_SIZE, :, :]\n batch_label = batch_label[:BATCH_SIZE, :]\n batch_smpw = batch_smpw[:BATCH_SIZE, :]\n\n batch_data[:, :, :3] = provider.normalize_data(batch_data[:, :, :3])\n batch_label = torch.Tensor(batch_label)\n batch_data = torch.Tensor(batch_data)\n batch_data, batch_label = batch_data.float().cuda(), batch_label.long().cuda()\n batch_data = batch_data.transpose(2, 1)\n classifier = classifier.eval()\n seg_pred, _ = classifier(batch_data)\n seg_pred = seg_pred.contiguous()\n batch_label = batch_label.cpu().data.numpy()\n pred_val = seg_pred.cpu().data.max(2)[1].numpy()\n correct = np.sum((pred_val == batch_label) & (batch_smpw > 0))\n total_correct += correct\n total_seen += np.sum(batch_smpw > 0)\n tmp, _ = np.histogram(batch_label, range(NUM_CLASSES + 1))\n labelweights += tmp\n for l in range(NUM_CLASSES):\n total_seen_class[l] += np.sum((batch_label == l) & (batch_smpw > 0))\n total_correct_class[l] += np.sum((pred_val == l) & (batch_label == l) & (batch_smpw > 0))\n total_iou_deno_class[l] += np.sum(((pred_val == l) | (batch_label == l)) & (batch_smpw > 0))\n\n mIoU = np.mean(np.array(total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float) + 1e-6))\n log_string('eval whole scene mean loss: %f' % (loss_sum / float(num_batches)))\n log_string('eval point avg class IoU: %f' % mIoU)\n log_string('eval whole scene point accuracy: %f' % (total_correct / float(total_seen)))\n log_string('eval whole scene point avg class acc: %f' % (\n np.mean(np.array(total_correct_class) / (np.array(total_seen_class, dtype=np.float) + 1e-6))))\n labelweights = labelweights.astype(np.float32) / np.sum(labelweights.astype(np.float32))\n\n iou_per_class_str = '------- IoU --------\\n'\n for l in range(NUM_CLASSES):\n iou_per_class_str += 'class %s weight: %.3f, IoU: %.3f \\n' % (\n seg_label_to_cat[l] + ' ' * (14 - len(seg_label_to_cat[l])), labelweights[l],\n total_correct_class[l] / float(total_iou_deno_class[l]))\n log_string(iou_per_class_str)\n\n if (mIoU >= best_iou):\n logger.info('Save model...')\n savepath = str(checkpoints_dir) + '/best_model.pth'\n log_string('Saving at %s' % savepath)\n state = {\n 'epoch': epoch,\n 'class_avg_iou': mIoU,\n 'model_state_dict': classifier.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }\n torch.save(state, savepath)\n log_string('Saving model....')\n\n global_epoch += 1\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"torch.nn.init.constant_",
"numpy.sum",
"torch.save",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.nn.init.xavier_normal_",
"torch.Tensor"
]
] |
blsymens/Bioindustrial-Park | [
"196e2d60ec9bf0466ef804d036c995b89bc72f72"
] | [
"BioSTEAM 1.x.x/build/lib/biorefineries/cornstover/_plot_uncertainty_top.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 13 21:44:23 2019\n\n@author: yoelr\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\nfrom matplotlib.lines import Line2D\nfrom biosteam import colors\nfrom biosteam.evaluation.evaluation_tools import plot_single_points, plot_horizontal_line, \\\n plot_montecarlo, plot_vertical_line\n\ndata = pd.read_excel('Monte Carlo cornstover.xlsx', header=[0, 1])\n\n# %% Plot MESP\n# plt.figure()\n\n# posistions_MESP = (0,)\n# MESP_data = data[('Biorefinery', 'Minimum ethanol selling price')]\n# bx_MESP = plot_montecarlo(MESP_data,\n# colors.blue_tint.RGBn,\n# colors.blue_shade.RGBn, \n# posistions_MESP, transpose=False)\n# dot_MESP = plot_single_points(posistions_MESP, [2.15], s=125, color=colors.red_dark.RGBn)\n# plt.ylabel('MESP ($\\mathrm{USD} \\cdot \\mathrm{gal}^{-1}$)')\n# plt.ylim(1.5, 2.50)\n# bx_patch = Patch(facecolor=colors.blue_tint.RGBn, edgecolor=colors.blue_shade.RGBn)\n# plt.legend([bx_patch, dot_MESP], ['BioSTEAM', 'benchmark'])\n# plt.xticks([], [])\n \n# %% Setup of subplots\n\n# light_color = colors.blue_tint.RGBn\n# dark_color = colors.blue_shade.RGBn\n# dot_color = colors.purple_shade.RGBn\nnums = tuple(range(1, 9))\n\nfig, axes = plt.subplots(ncols=1, nrows=2, constrained_layout=True, gridspec_kw=dict(height_ratios=[1, 4]))\nmagnitude_ax, metric_over_benchmark_ax = axes\nplt.sca(metric_over_benchmark_ax)\n\n# %% Plot MESP and ethanol sales\n\npositions_other = (16, 17, 18)\nother_index = [('Biorefinery', 'Steam demand'),\n ('Biorefinery', 'Ethanol production'),\n ('Biorefinery', 'Minimum ethanol selling price')]\nother_data = np.array(data[other_index])\nother_data[:, 0] *= 100./234784.\nother_data[:, 1] *= 100./22273.\nother_data[:, 2] *= 100./2.15\nbx_other = plot_montecarlo(other_data,\n colors.blue_tint.RGBn,\n colors.blue_shade.RGBn, positions_other, transpose=False)\n\n# %% Plot electricity\n\nplot_vertical_line(15.5, color=colors.grey_tint.RGBn)\npositions_electricity = tuple(range(9))\nareas = [f'Area {i}00' for i in nums]\nunits = 'MW'\npositions = np.arange(0, 9)\nelectricity_cols = [(i, 'Electricity') for i in areas]\nhumbird_electricity = 41 * np.array((0.02, 0.14, 0.06, 0.05,\n 0.18, 0.003, 0.03, 0.08,\n 0.44))\nelectricity_data = data[electricity_cols] #/ humbird_electricity\n# electricity_data[('Consumed', 'Electricity')] = electricity_data.sum(1)\nelectricity_data[('Biorefinery', 'Excess electricity')] = data[('Biorefinery', 'Excess electricity')]/1000\nelectricity_data_humbird_normalized = electricity_data * (100/humbird_electricity)\nbx_electricity = plot_montecarlo(electricity_data_humbird_normalized,\n colors.orange_tint.RGBn,\n colors.orange_shade.RGBn,\n transpose=True, positions=positions_electricity)\n\n# plot_vertical_line(7.5, color=colors.orange_tint.shade(15).RGBn, ls='-.')\n# plot_vertical_line(9.5, color=colors.orange_tint.shade(15).RGBn, ls='-.')\n\n# %% Plot installation cost\n\nunits = '10^6 USD'\nplot_vertical_line(8.5, color=colors.grey_tint.RGBn)\npositions_installation = tuple(range(9, 16))\ninstallation_cols = [(i, 'Installation cost') for i in areas[1:]]\nhumbird_installation = np.array([24.2, 32.9, 31.2, 22.3, 49.4, 5, 66, 6.9])\ninstallation_data = data[installation_cols]\n# installation_data[('Biorefinery', 'Installation cost')] = installation_data.sum(1)\ninstallation_data_humbird_normalized = installation_data * (100/humbird_installation[1:]/1e6)\nbx_installation = plot_montecarlo(installation_data_humbird_normalized,\n colors.purple_tint.RGBn, colors.purple_shade.RGBn,\n transpose=True, positions=positions_installation)\nplot_horizontal_line(100, ls='--')\nu_lb = 0; y_ub = 250\nplt.ylim(0, 250)\nyticks = np.arange(0, 251, 50)\nplt.yticks(yticks)\ny_text = 0.885*y_ub\ny_letter = 0.875 * y_ub\nplt.text(4, y_text, \"Electricity demand\", color=colors.orange_shade.RGBn,\n horizontalalignment='center', fontsize=12, fontweight='bold')\nplt.text(-0.25, y_letter, \"C\", color=colors.neutral_shade.RGBn,\n horizontalalignment='left', fontsize=16, fontweight='bold')\nplt.text(12, y_text, \"Installation cost\", color=colors.purple_shade.RGBn,\n horizontalalignment='center', fontsize=12, fontweight='bold')\nplt.text(8.75, y_letter, \"D\", color=colors.neutral_shade.RGBn,\n horizontalalignment='left', fontsize=16, fontweight='bold')\nplt.text(15.75, y_letter, \"E\", color=colors.neutral_shade.RGBn,\n horizontalalignment='left', fontsize=16, fontweight='bold')\n# plt.text(9.25, y_letter, \"D\", color=colors.neutral_shade.RGBn,\n# horizontalalignment='center', fontsize=14, fontweight='bold')\n\n# plt.text(16, y_text, \"Ethanol\\nsales\", color=colors.red_shade.RGBn,\n# horizontalalignment='center', fontsize=12, fontweight='bold')\n# plt.text(16.0, y_text, \"MESP\", color=colors.blue_shade.RGBn,\n# horizontalalignment='center', fontsize=12, fontweight='bold')\n\nplt.xlim(-0.5, 18.5)\nplt.ylabel(\"Metric over benchmark [%]\")\narea_marks = [i.replace(' ', '\\n') for i in areas]\nxmarks = area_marks + ['Excess'] + area_marks[1:] + ['Steam\\ndemand', ' EtOH\\n prod.', ' MESP']\nxticks = positions_electricity + positions_installation + positions_other\nplt.xticks(xticks, xmarks)\nmetric_over_benchmark_ax.set_zorder(1e6)\n\nplt.sca(magnitude_ax)\nplt.fill_between([-0.5, 15.5], 0, 1, color=colors.neutral_tint.tint(85).RGBn)\n\nelectricity_areas = humbird_electricity # electricity_data.median()\nelectricity_areas /= max(electricity_areas)\nplt.bar(positions_electricity, electricity_areas, 0.5,\n align='center', label=\"Electricity demand\",\n color=colors.orange.tint(30).shade(15).RGBn,\n edgecolor=colors.orange_shade.RGBn)\nplot_vertical_line(8.5, color=colors.grey_tint.RGBn)\n\ninstallation_areas = humbird_installation[1:]# installation_data.median()\ninstallation_areas /= max(installation_areas)\nplt.bar(positions_installation, installation_areas, 0.5,\n align='center', label=\"Installation cost\",\n color=colors.purple.tint(30).shade(15).RGBn,\n edgecolor=colors.purple_shade.RGBn)\n\nplot_vertical_line(15.5, color=colors.grey_tint.RGBn)\n# plot_vertical_line(15.5, color='k', lw=0.8)\n# plt.bar(positions_MESP, [1], 0.5,\n# align='center', label=\"MESP\",\n# color=colors.blue.tint(30).shade(15).RGBn,\n# edgecolor=colors.blue_shade.RGBn)\n\nplot_vertical_line(-0.5, color='k')\nplt.hlines([1], [-0.5], [15.5], color='k')\nmagnitude_ax.spines['top'].set_visible(False)\nmagnitude_ax.spines['right'].set_visible(False)\nmagnitude_ax.tick_params(axis=\"x\", direction=\"in\", length=0)\nmagnitude_ax.set_zorder(2)\nmetric_over_benchmark_ax.set_zorder(1)\nplt.yticks([], [])\nplt.xticks(xticks[:-3], [])\nplt.ylim(0, 1)\nplt.xlim(-0.5, 18.5)\nplt.text(4, 0.65, \"Benchmark magnitude\", color=colors.neutral.shade(35).RGBn,\n horizontalalignment='center', fontsize=12, fontweight='bold')\nplt.text(-0.25, 0.60, \"A\", color=colors.neutral_shade.RGBn,\n horizontalalignment='left', fontsize=16, fontweight='bold')\nplt.text(8.75, 0.60, \"B\", color=colors.neutral_shade.RGBn,\n horizontalalignment='left', fontsize=16, fontweight='bold')\nplt.subplots_adjust(hspace=.0)\n\nmetric_over_benchmark_ax.tick_params(axis='x', direction=\"inout\", length=4)\nfor ax in axes:\n ax.tick_params(axis='y', right=False, direction=\"inout\", length=4)\nax2 = metric_over_benchmark_ax.twinx()\nplt.sca(ax2)\nplt.ylim(0, 250)\nplt.yticks(yticks, [])\nax2.zorder = 1000\nax2.tick_params(direction=\"in\")\n \nxlabels = metric_over_benchmark_ax.get_xticklabels() \n# for xtick in xlabels[-3:]:\n# xtick.set_rotation(90)\n\n\n# plot_vertical_line(15.5, color=colors.purple.tint(20).shade(10).RGBn, ls='-.')\n\n# leg1 = ax.legend([bx_economic['boxes'][0]], ['MESP'], loc=\"upper left\")\n# leg2 = ax.legend([bx_electricity['boxes'][0]], ['Electricity'], loc=\"upper center\")\n# leg3 = ax.legend([bx_installation['boxes'][0]], ['Installation'], loc=\"upper right\")\n# ax.add_artist(leg2)\n# ax.add_artist(leg1)\n\n# light_box = Patch(color=colors.neutral_tint.RGBn)\n# line = Line2D([0], [0], color=colors.neutral_shade.RGBn)\n# dark_box = Patch(color=colors.neutral_shade.RGBn)\n# plt.legend([(light_box, line), dark_box], [\"Metric over benchmark (%)\", \"Relative benchmark magnintude\"])\n\n# leg1 = ax.legend([bx_economic['boxes'][0]], ['MESP'], loc=\"upper left\")"
] | [
[
"numpy.array",
"matplotlib.pyplot.text",
"matplotlib.pyplot.xlim",
"pandas.read_excel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.yticks",
"numpy.arange",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks"
]
] |
Gwiths/seeking-the-shape-of-sound | [
"c9a7c68c66459f86e4885ba0b9a0d4a397510c0f"
] | [
"models/nn/ir_se_model.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout, MaxPool2d, \\\n AdaptiveAvgPool2d, Sequential, Module\nfrom collections import namedtuple\n\n\n# Support: ['IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152']\n\n\nclass Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\ndef l2_norm(input, axis=1):\n norm = torch.norm(input, 2, axis, True)\n output = torch.div(input, norm)\n\n return output\n\n\nclass SEModule(Module):\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = AdaptiveAvgPool2d(1)\n self.fc1 = Conv2d(\n channels, channels // reduction, kernel_size=1, padding=0, bias=False)\n\n nn.init.xavier_uniform_(self.fc1.weight.data)\n\n self.relu = ReLU(inplace=True)\n self.fc2 = Conv2d(\n channels // reduction, channels, kernel_size=1, padding=0, bias=False)\n\n self.sigmoid = Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n\n return module_input * x\n\n\nclass bottleneck_IR(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n\n return res + shortcut\n\n\nclass bottleneck_IR_SE(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR_SE, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n SEModule(depth, 16)\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n\n return res + shortcut\n\n\nclass Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):\n '''A named tuple describing a ResNet block.'''\n\n\ndef get_block(in_channel, depth, num_units, stride=2):\n\n return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]\n\n\ndef get_blocks(num_layers):\n if num_layers == 50:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=4),\n get_block(in_channel=128, depth=256, num_units=14),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n elif num_layers == 100:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=13),\n get_block(in_channel=128, depth=256, num_units=30),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n elif num_layers == 152:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=8),\n get_block(in_channel=128, depth=256, num_units=36),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n\n return blocks\n\n\nclass Backbone(Module):\n def __init__(self, input_size, num_layers, mode='ir'):\n super(Backbone, self).__init__()\n assert input_size[0] in [112, 224], \"input_size should be [112, 112] or [224, 224]\"\n assert num_layers in [50, 100, 152], \"num_layers should be 50, 100 or 152\"\n assert mode in ['ir', 'ir_se'], \"mode should be ir or ir_se\"\n blocks = get_blocks(num_layers)\n if mode == 'ir':\n unit_module = bottleneck_IR\n elif mode == 'ir_se':\n unit_module = bottleneck_IR_SE\n self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),\n BatchNorm2d(64),\n PReLU(64))\n if input_size[0] == 112:\n self.output_layer = Sequential(BatchNorm2d(512),\n Dropout(),\n Flatten(),\n Linear(512 * 7 * 7, 512),\n BatchNorm1d(512))\n else:\n self.output_layer = Sequential(BatchNorm2d(512),\n Dropout(),\n Flatten(),\n Linear(512 * 14 * 14, 512),\n BatchNorm1d(512))\n\n modules = []\n for block in blocks:\n for bottleneck in block:\n modules.append(\n unit_module(bottleneck.in_channel,\n bottleneck.depth,\n bottleneck.stride))\n self.body = Sequential(*modules)\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.input_layer(x)\n x = self.body(x)\n x = self.output_layer(x)\n\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n\ndef IR_50(input_size, pretrained=None):\n \"\"\"Constructs a ir-50 model.\n \"\"\"\n model = Backbone(input_size, 50, 'ir')\n\n if pretrained is not None:\n print('load pretrained model from %s'%pretrained)\n state_dict = torch.load(pretrained, map_location='cpu')\n model.load_state_dict(state_dict)\n return model\n\n\ndef IR_101(input_size):\n \"\"\"Constructs a ir-101 model.\n \"\"\"\n model = Backbone(input_size, 100, 'ir')\n\n return model\n\n\ndef IR_152(input_size):\n \"\"\"Constructs a ir-152 model.\n \"\"\"\n model = Backbone(input_size, 152, 'ir')\n\n return model\n\n\ndef IR_SE_50(input_size):\n \"\"\"Constructs a ir_se-50 model.\n \"\"\"\n model = Backbone(input_size, 50, 'ir_se')\n\n return model\n\n\ndef IR_SE_101(input_size):\n \"\"\"Constructs a ir_se-101 model.\n \"\"\"\n model = Backbone(input_size, 100, 'ir_se')\n\n return model\n\n\ndef IR_SE_152(input_size):\n \"\"\"Constructs a ir_se-152 model.\n \"\"\"\n model = Backbone(input_size, 152, 'ir_se')\n\n return model\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.div",
"torch.nn.Sigmoid",
"torch.norm",
"torch.nn.Sequential",
"torch.nn.MaxPool2d",
"torch.nn.init.xavier_uniform_",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.PReLU",
"torch.nn.BatchNorm1d",
"torch.load",
"torch.nn.AdaptiveAvgPool2d"
]
] |
lambda-xmu/reinforcement-learning | [
"2a4c5d148fe003a3d87ace8bbcd2954df4dea47f"
] | [
"Morvan_example/Sarsa/RL_brain.py"
] | [
"\"\"\"\nThis part of code is the Q learning brain, which is a brain of the agent.\nAll decisions are made in here.\nView more on my tutorial page: https://morvanzhou.github.io/tutorials/\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass RL(object):\n def __init__(self, action_space, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):\n self.actions = action_space # a list\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon = e_greedy\n\n self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)\n\n def check_state_exist(self, state):\n if state not in self.q_table.index:\n # append new state to q table\n self.q_table = self.q_table.append(\n pd.Series(\n [0]*len(self.actions),\n index=self.q_table.columns,\n name=state,\n )\n )\n\n def choose_action(self, observation):\n self.check_state_exist(observation)\n # action selection\n if np.random.rand() < self.epsilon:\n # choose best action\n state_action = self.q_table.loc[observation, :]\n # some actions may have the same value, randomly choose on in these actions\n action = np.random.choice(state_action[state_action == np.max(state_action)].index)\n else:\n # choose random action\n action = np.random.choice(self.actions)\n return action\n\n def learn(self, *args):\n pass\n\n\n# off-policy\nclass QLearningTable(RL):\n def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):\n super(QLearningTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)\n\n def learn(self, s, a, r, s_):\n self.check_state_exist(s_)\n q_predict = self.q_table.loc[s, a]\n if s_ != 'terminal':\n q_target = r + self.gamma * self.q_table.loc[s_, :].max() # next state is not terminal\n else:\n q_target = r # next state is terminal\n self.q_table.loc[s, a] += self.lr * (q_target - q_predict) # update\n\n\n# on-policy\nclass SarsaTable(RL):\n\n def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):\n super(SarsaTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)\n\n def learn(self, s, a, r, s_, a_):\n self.check_state_exist(s_)\n q_predict = self.q_table.loc[s, a]\n if s_ != 'terminal':\n q_target = r + self.gamma * self.q_table.loc[s_, a_] # next state is not terminal\n else:\n q_target = r # next state is terminal\n self.q_table.loc[s, a] += self.lr * (q_target - q_predict) # update"
] | [
[
"numpy.max",
"pandas.DataFrame",
"numpy.random.rand",
"numpy.random.choice"
]
] |
danassutula/biomech-inverse | [
"4ee415f181e815085660dfe722bd861c99da0cd9"
] | [
"examples/human_skin/bimaterial/data/reactionforce.py"
] | [
"\"\"\"\nObtain reaction forces and displacements measurements.\n\"\"\"\n\nimport os\nimport scipy\nimport numpy as np\nimport scipy.optimize\nimport scipy.interpolate\nimport matplotlib.pyplot as plt\n\nfrom examples.utility import apply_mean_filter\n\n\nCURRENT_DIRECTORY = os.path.dirname(os.path.relpath(__file__))\nCURRENT_DIRECTORY_NAME = os.path.basename(CURRENT_DIRECTORY)\n\nPARENT_DIRECTORY = os.path.dirname(CURRENT_DIRECTORY)\nPARENT_DIRECTORY_NAME = os.path.basename(PARENT_DIRECTORY)\n\nSUBDIRECTORY_INPUT_DATA = os.path.join(\"datafiles_unprocessed\", \"reactionforce\")\nDIRECTORY_INPUT_DATA = os.path.join(CURRENT_DIRECTORY, SUBDIRECTORY_INPUT_DATA)\n\nSUBDIRECTORY_OUTPUT_DATA = os.path.join(\"datafiles_processed\", \"reactionforce\")\nDIRECTORY_OUTPUT_DATA = os.path.join(CURRENT_DIRECTORY, SUBDIRECTORY_OUTPUT_DATA)\nDIRECTORY_OUTPUT_FIGURES = os.path.join(CURRENT_DIRECTORY, \"results\")\n\n\nPLOT_DATA = True\nSAVE_PLOTS = False\nWRITE_DATA = False\n\nAPPLY_TEMPORAL_FILTER = True\nTEMPORAL_FILTERING_TIMES = 5\nTEMPORAL_FILTER_KERNELS = [\n # np.ones((33,), float),\n # np.ones((17,), float),\n np.ones(( 9,), float),\n np.ones(( 5,), float),\n np.ones(( 3,), float)] # Flat-top filters\n\n# MEASUREMENT_SLICE = slice(0, None)\nMEASUREMENT_SLICE = slice(10, None)\n\nREVERSE_MEASUREMENT_SIGN_FINALLY = True\nSET_MEASUREMENTS_AT_ORIGIN = True\n\n\ndef write_data_files(dct):\n\n if not os.path.isdir(DIRECTORY_OUTPUT_DATA):\n os.makedirs(DIRECTORY_OUTPUT_DATA)\n\n for key, val in dct.items():\n if isinstance(val, (list, tuple)):\n if not all(isinstance(val_i, np.ndarray) for val_i in val):\n raise RuntimeError('Expected the sequence to contain arrays.')\n for i, val_i in enumerate(val):\n np.savetxt(os.path.join(DIRECTORY_OUTPUT_DATA, key+f'_{i:04d}.out'), val_i)\n else:\n np.savetxt(os.path.join(DIRECTORY_OUTPUT_DATA, key+'.out'), val)\n\n\n### Measurement data files\n\nut_msr = np.loadtxt(os.path.join(DIRECTORY_INPUT_DATA, 'u_moving_pad.dat'), ndmin=1)\nft_msr = np.loadtxt(os.path.join(DIRECTORY_INPUT_DATA, 'f_moving_pad.dat'), ndmin=1)\n\nif ut_msr.ndim > 1: ut_msr = ut_msr.squeeze()\nif ft_msr.ndim > 1: ft_msr = ft_msr.squeeze()\n\nif not (ut_msr.ndim == ft_msr.ndim == 1):\n raise RuntimeError\n\nif ut_msr.shape != ft_msr.shape:\n raise RuntimeError\n\n\n### Filter measurements\n\nif APPLY_TEMPORAL_FILTER:\n\n ut_msr_flt = ut_msr.tolist()\n ft_msr_flt = ft_msr.tolist()\n\n for w_i in TEMPORAL_FILTER_KERNELS:\n for _ in range(TEMPORAL_FILTERING_TIMES):\n apply_mean_filter(w_i, ut_msr_flt)\n apply_mean_filter(w_i, ft_msr_flt)\n\n # i0_msr = len(w_i)+1\n #\n # ut_msr_flt = np.array(ut_msr_flt[i0_msr:-i0_msr+1])\n # ft_msr_flt = np.array(ft_msr_flt[i0_msr:-i0_msr+1])\n\n ut_msr_flt = np.array(ut_msr_flt)\n ft_msr_flt = np.array(ft_msr_flt)\n\nelse:\n\n ut_msr_flt = ut_msr.copy()\n ft_msr_flt = ft_msr.copy()\n\n### Trim measurement range\n\nif MEASUREMENT_SLICE:\n ut_msr_flt = ut_msr_flt[MEASUREMENT_SLICE].copy()\n ft_msr_flt = ft_msr_flt[MEASUREMENT_SLICE].copy()\n\n\n### Reset measurement at origin\n\nif SET_MEASUREMENTS_AT_ORIGIN:\n\n offset_value_ut = ut_msr_flt[0]\n offset_value_ft = ft_msr_flt[0]\n\n ut_msr -= offset_value_ut\n ft_msr -= offset_value_ft\n\n ut_msr_flt -= offset_value_ut\n ft_msr_flt -= offset_value_ft\n\n\n### Export these variables\n\nux_pad_mov = -ut_msr_flt if REVERSE_MEASUREMENT_SIGN_FINALLY else ut_msr_flt\nfx_pad_mov = -ft_msr_flt if REVERSE_MEASUREMENT_SIGN_FINALLY else ft_msr_flt\n\nmeasurements = {\n 'ux_pad_mov': ux_pad_mov,\n 'fx_pad_mov': fx_pad_mov,\n }\n\n\nif __name__ == \"__main__\":\n\n plt.interactive(True)\n plt.close('all')\n plt.show()\n\n if PLOT_DATA or SAVE_PLOTS:\n\n all_figure_names = []\n all_figure_handles = []\n\n\n figname = 'Pad Displacement Measurements'\n\n fh = plt.figure(figname)\n ah = fh.add_subplot(111)\n\n ah.plot(ut_msr, 'k:')\n\n # ah.set_title(figname)\n ah.set_xlabel('Measurement snapshot (#)')\n ah.set_ylabel('Pad displacement, $|u|$ (mm)')\n\n all_figure_names.append(figname)\n all_figure_handles.append(fh)\n\n\n figname = 'Pad Reaction Force Measurements'\n\n fh = plt.figure(figname)\n ah = fh.add_subplot(111)\n\n ah.plot(ft_msr, 'ko', markersize=1.0)\n\n # ah.set_title(figname)\n ah.set_xlabel('Measurement snapshot (#)')\n ah.set_ylabel('Pad reaction force, $|f|$ (N)')\n\n all_figure_names.append(figname)\n all_figure_handles.append(fh)\n\n\n figname = 'Pad Reaction Force vs Displacement Curve'\n fh = plt.figure(figname)\n ah = fh.add_subplot(111)\n\n ah.plot(ut_msr, ft_msr, 'ko', markersize=1.0)\n ah.plot(ut_msr_flt, ft_msr_flt, 'r-')\n ah.plot(ut_msr_flt[0], ft_msr_flt[0], 'ws', markersize=4, markeredgecolor='r')\n\n # ah.set_title(figname)\n ah.set_xlabel('Pad displacement, $|u|$ (mm)')\n ah.set_ylabel('Pad reaction force, $|f|$ (N)')\n ah.legend(['raw data', 'filtered data', 'assumed origin'])\n\n all_figure_names.append(figname)\n all_figure_handles.append(fh)\n\n\n if SAVE_PLOTS:\n\n if not os.path.isdir(DIRECTORY_OUTPUT_FIGURES):\n os.makedirs(DIRECTORY_OUTPUT_FIGURES)\n\n for handle_i, name_i in zip(all_figure_handles, all_figure_names):\n\n savename = name_i.lower().strip().replace(' ', '_')\n savepath = os.path.join(DIRECTORY_OUTPUT_FIGURES, savename)\n\n handle_i.savefig(savepath+'.png', dpi=300)\n handle_i.savefig(savepath+'.svg')\n handle_i.savefig(savepath+'.pdf')\n\n if not PLOT_DATA:\n plt.close('all')\n\n if WRITE_DATA:\n write_data_files({\n 'ux_pad_mov': ux_pad_mov,\n 'fx_pad_mov': fx_pad_mov,\n })\n"
] | [
[
"numpy.array",
"numpy.ones",
"matplotlib.pyplot.close",
"matplotlib.pyplot.interactive",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
]
] |
jaak21/hummingbot | [
"f075f7ffaa3aea8773005af94c72f3264f398ea0"
] | [
"hummingbot/client/command/balance_command.py"
] | [
"from hummingbot.client.settings import (\n GLOBAL_CONFIG_PATH,\n ethereum_required_trading_pairs\n)\nfrom hummingbot.user.user_balances import UserBalances\nfrom hummingbot.core.utils.async_utils import safe_ensure_future\nfrom hummingbot.client.config.global_config_map import global_config_map\nfrom hummingbot.client.config.config_helpers import (\n save_to_yml\n)\nfrom hummingbot.client.config.config_validators import validate_decimal, validate_exchange\nfrom hummingbot.market.celo.celo_cli import CeloCLI\nfrom hummingbot.client.performance import smart_round\nfrom hummingbot.core.rate_oracle.rate_oracle import RateOracle\nimport pandas as pd\nfrom decimal import Decimal\nfrom hummingbot.connector.exchange.paper_trade import reset_paper_trade_account_balance\nimport time\nfrom typing import TYPE_CHECKING, Dict, Optional, List\n\nif TYPE_CHECKING:\n from hummingbot.client.hummingbot_application import HummingbotApplication\n\nOPTIONS = [\n \"limit\",\n \"paper\"\n]\n\n\nclass BalanceCommand:\n def balance(self,\n option: str = None,\n args: List[str] = None\n ):\n self.app.clear_input()\n if option is None:\n safe_ensure_future(self.show_balances())\n\n elif option in OPTIONS:\n config_map = global_config_map\n file_path = GLOBAL_CONFIG_PATH\n if option == \"limit\":\n config_var = config_map[\"balance_asset_limit\"]\n if args is None or len(args) == 0:\n safe_ensure_future(self.show_asset_limits())\n return\n if len(args) != 3 or validate_exchange(args[0]) is not None or validate_decimal(args[2]) is not None:\n self._notify(\"Error: Invalid command arguments\")\n self.notify_balance_limit_set()\n return\n exchange = args[0]\n asset = args[1].upper()\n amount = float(args[2])\n if exchange not in config_var.value or config_var.value[exchange] is None:\n config_var.value[exchange] = {}\n if amount < 0 and asset in config_var.value[exchange].keys():\n config_var.value[exchange].pop(asset)\n self._notify(f\"Limit for {asset} on {exchange} exchange removed.\")\n elif amount >= 0:\n config_var.value[exchange][asset] = amount\n self._notify(f\"Limit for {asset} on {exchange} exchange set to {amount}\")\n save_to_yml(file_path, config_map)\n\n elif option == \"paper\":\n config_var = config_map[\"paper_trade_account_balance\"]\n if args is None or len(args) == 0:\n safe_ensure_future(self.show_paper_account_balance())\n return\n if len(args) != 2 or validate_decimal(args[1]) is not None:\n self._notify(\"Error: Invalid command arguments\")\n self.notify_balance_paper_set()\n return\n asset = args[0].upper()\n amount = float(args[1])\n safe_ensure_future(self.set_paper_balance(asset, amount))\n\n async def set_paper_balance(self, asset, amount):\n self.app.clear_input()\n self.placeholder_mode = True\n self.app.hide_input = True\n\n answer = await self.app.prompt(prompt=\"Setting a new paper balance on runtime requires to clear the current trading history. Would you like to do proceed (Yes/No)? >>> \")\n if answer.lower() in (\"yes\", \"y\"):\n config_map = global_config_map\n file_path = GLOBAL_CONFIG_PATH\n config_var = config_map[\"paper_trade_account_balance\"]\n paper_balances = dict(config_var.value) if config_var.value else {}\n paper_balances[asset] = amount\n config_var.value = paper_balances\n save_to_yml(file_path, config_map)\n self._notify(f\"Paper balance for {asset} token set to {amount}\")\n\n # Reset paper trade exchange class account balance to reflect the new balance on strategy start\n reset_paper_trade_account_balance()\n\n # Set new app init time to clear current trading history because the account balance has changed which will lead to wrong performance calculations\n self._main_app.init_time = time.time()\n self._notify(\"The trading history has been cleared!\")\n else:\n self._notify(\"Your current paper balance has not been changed!\")\n\n self.app.hide_input = False\n self.placeholder_mode = False\n self.app.change_prompt(prompt=\">>> \")\n\n async def show_balances(self):\n total_col_name = f'Total ({RateOracle.global_token_symbol})'\n self._notify(\"Updating balances, please wait...\")\n all_ex_bals = await UserBalances.instance().all_balances_all_exchanges()\n all_ex_avai_bals = UserBalances.instance().all_avai_balances_all_exchanges()\n all_ex_limits: Optional[Dict[str, Dict[str, str]]] = global_config_map[\"balance_asset_limit\"].value\n\n if all_ex_limits is None:\n all_ex_limits = {}\n\n exchanges_total = 0\n\n for exchange, bals in all_ex_bals.items():\n self._notify(f\"\\n{exchange}:\")\n df, allocated_total = await self.exchange_balances_extra_df(bals, all_ex_avai_bals.get(exchange, {}))\n if df.empty:\n self._notify(\"You have no balance on this exchange.\")\n else:\n lines = [\" \" + line for line in df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n self._notify(f\"\\n Total: {RateOracle.global_token_symbol} {smart_round(df[total_col_name].sum())} \"\n f\"Allocated: {allocated_total / df[total_col_name].sum():.2%}\")\n exchanges_total += df[total_col_name].sum()\n\n self._notify(f\"\\n\\nExchanges Total: {RateOracle.global_token_symbol} {exchanges_total:.0f} \")\n\n celo_address = global_config_map[\"celo_address\"].value\n if celo_address is not None:\n try:\n if not CeloCLI.unlocked:\n await self.validate_n_connect_celo()\n df = await self.celo_balances_df()\n lines = [\" \" + line for line in df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\ncelo:\")\n self._notify(\"\\n\".join(lines))\n except Exception as e:\n self._notify(f\"\\ncelo CLI Error: {str(e)}\")\n\n eth_address = global_config_map[\"ethereum_wallet\"].value\n if eth_address is not None:\n eth_df = await self.ethereum_balances_df()\n lines = [\" \" + line for line in eth_df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\nethereum:\")\n self._notify(\"\\n\".join(lines))\n\n # XDAI balances\n xdai_df = await self.xdai_balances_df()\n lines = [\" \" + line for line in xdai_df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\nxdai:\")\n self._notify(\"\\n\".join(lines))\n\n async def exchange_balances_extra_df(self, # type: HummingbotApplication\n ex_balances: Dict[str, Decimal],\n ex_avai_balances: Dict[str, Decimal]):\n total_col_name = f\"Total ({RateOracle.global_token_symbol})\"\n allocated_total = Decimal(\"0\")\n rows = []\n for token, bal in ex_balances.items():\n if bal == Decimal(0):\n continue\n avai = Decimal(ex_avai_balances.get(token.upper(), 0)) if ex_avai_balances is not None else Decimal(0)\n allocated = f\"{(bal - avai) / bal:.0%}\"\n rate = await RateOracle.global_rate(token)\n rate = Decimal(\"0\") if rate is None else rate\n global_value = rate * bal\n allocated_total += rate * (bal - avai)\n rows.append({\"Asset\": token.upper(),\n \"Total\": round(bal, 4),\n total_col_name: smart_round(global_value),\n \"Allocated\": allocated,\n })\n df = pd.DataFrame(data=rows, columns=[\"Asset\", \"Total\", total_col_name, \"Allocated\"])\n df.sort_values(by=[\"Asset\"], inplace=True)\n return df, allocated_total\n\n async def celo_balances_df(self, # type: HummingbotApplication\n ):\n rows = []\n bals = CeloCLI.balances()\n for token, bal in bals.items():\n rows.append({\"Asset\": token.upper(), \"Amount\": round(bal.total, 4)})\n df = pd.DataFrame(data=rows, columns=[\"Asset\", \"Amount\"])\n df.sort_values(by=[\"Asset\"], inplace=True)\n return df\n\n async def ethereum_balances_df(self, # type: HummingbotApplication\n ):\n rows = []\n if ethereum_required_trading_pairs():\n bals = await UserBalances.eth_n_erc20_balances()\n for token, bal in bals.items():\n rows.append({\"Asset\": token, \"Amount\": round(bal, 4)})\n else:\n eth_bal = UserBalances.ethereum_balance()\n rows.append({\"Asset\": \"ETH\", \"Amount\": round(eth_bal, 4)})\n df = pd.DataFrame(data=rows, columns=[\"Asset\", \"Amount\"])\n df.sort_values(by=[\"Asset\"], inplace=True)\n return df\n\n async def xdai_balances_df(self, # type: HummingbotApplication\n ):\n rows = []\n bals = await UserBalances.xdai_balances()\n for token, bal in bals.items():\n rows.append({\"Asset\": token, \"Amount\": round(bal, 4)})\n df = pd.DataFrame(data=rows, columns=[\"Asset\", \"Amount\"])\n df.sort_values(by=[\"Asset\"], inplace=True)\n return df\n\n async def asset_limits_df(self,\n asset_limit_conf: Dict[str, str]):\n rows = []\n for token, amount in asset_limit_conf.items():\n rows.append({\"Asset\": token, \"Limit\": round(Decimal(amount), 4)})\n\n df = pd.DataFrame(data=rows, columns=[\"Asset\", \"Limit\"])\n df.sort_values(by=[\"Asset\"], inplace=True)\n return df\n\n async def show_asset_limits(self):\n config_var = global_config_map[\"balance_asset_limit\"]\n exchange_limit_conf: Dict[str, Dict[str, str]] = config_var.value\n\n if not any(list(exchange_limit_conf.values())):\n self._notify(\"You have not set any limits.\")\n self.notify_balance_limit_set()\n return\n\n self._notify(\"Balance Limits per exchange...\")\n\n for exchange, asset_limit_config in exchange_limit_conf.items():\n if asset_limit_config is None:\n continue\n\n self._notify(f\"\\n{exchange}\")\n df = await self.asset_limits_df(asset_limit_config)\n if df.empty:\n self._notify(\"You have no limits on this exchange.\")\n else:\n lines = [\" \" + line for line in df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n self._notify(\"\\n\")\n return\n\n async def paper_acccount_balance_df(self, paper_balances: Dict[str, Decimal]):\n rows = []\n for asset, balance in paper_balances.items():\n rows.append({\"Asset\": asset, \"Balance\": round(Decimal(str(balance)), 4)})\n df = pd.DataFrame(data=rows, columns=[\"Asset\", \"Balance\"])\n df.sort_values(by=[\"Asset\"], inplace=True)\n return df\n\n def notify_balance_limit_set(self):\n self._notify(\"To set a balance limit (how much the bot can use): \\n\"\n \" balance limit [EXCHANGE] [ASSET] [AMOUNT]\\n\"\n \"e.g. balance limit binance BTC 0.1\")\n\n def notify_balance_paper_set(self):\n self._notify(\"To set a paper account balance: \\n\"\n \" balance paper [ASSET] [AMOUNT]\\n\"\n \"e.g. balance paper BTC 0.1\")\n\n async def show_paper_account_balance(self):\n paper_balances = global_config_map[\"paper_trade_account_balance\"].value\n if not paper_balances:\n self._notify(\"You have not set any paper account balance.\")\n self.notify_balance_paper_set()\n return\n self._notify(\"Paper account balances:\")\n df = await self.paper_acccount_balance_df(paper_balances)\n lines = [\" \" + line for line in df.to_string(index=False).split(\"\\n\")]\n self._notify(\"\\n\".join(lines))\n self._notify(\"\\n\")\n return\n"
] | [
[
"pandas.DataFrame"
]
] |
AnHongjun001/YOLOv1-pytorch | [
"4b0e189698d21a16fbd89ee4427097347a5202df"
] | [
"yolo/utils/dataset/transfrom/vdict2yolo.py"
] | [
"\"\"\"\nThis file is under Apache License 2.0, see more details at https://www.apache.org/licenses/LICENSE-2.0\nAuthor: Coder.AN, contact at [email protected]\nGithub: https://github.com/BestAnHongjun/YOLOv1-pytorch\n\"\"\"\n\nimport cv2\nimport torch\n\n\nclass vdict2yolo_v1:\n \"\"\"\n A transform which can convert VOC-dict to yolo format\n \"\"\"\n\n def __init__(self, grid_num=7, input_size=448):\n self.grid_num = grid_num\n self.input_size = input_size\n\n def __call__(self, vdict):\n image = vdict.get(\"image\")\n image_shape = image.shape\n grid_size = image_shape[1] / self.grid_num, image_shape[0] / self.grid_num\n\n img = cv2.resize(image, (self.input_size, self.input_size)).transpose(2, 0, 1)\n source = torch.tensor(img, dtype=torch.float32)\n\n target = torch.zeros((self.grid_num, self.grid_num, 8))\n target[:, :, 5] = int(vdict.get(\"filename\")) # filename\n target[:, :, 6] = image_shape[1] # src_width\n target[:, :, 7] = image_shape[0] # src_height\n\n for object_to_detect in vdict.get(\"objects\"):\n bbox = object_to_detect.get(\"bbox\")\n class_id = object_to_detect.get(\"class_id\")\n\n w = (bbox[2] - bbox[0]) / image_shape[1]\n h = (bbox[3] - bbox[1]) / image_shape[0]\n\n cx = (bbox[2] + bbox[0]) / 2\n cy = (bbox[3] + bbox[1]) / 2\n\n grid_j = int(cx // grid_size[0])\n grid_i = int(cy // grid_size[1])\n\n cx = (cx - grid_j * grid_size[0]) / grid_size[0]\n cy = (cy - grid_i * grid_size[1]) / grid_size[1]\n\n target[grid_i, grid_j, 0] = class_id\n target[grid_i, grid_j, 1] = cx\n target[grid_i, grid_j, 2] = cy\n target[grid_i, grid_j, 3] = w\n target[grid_i, grid_j, 4] = h\n\n return source, target\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n"
] | [
[
"torch.zeros",
"torch.tensor"
]
] |
XuankangLin/DiffAbs | [
"b17176464b80b368fa9a87f507e07b1d631ce6ef"
] | [
"diffabs/interval.py"
] | [
"\"\"\" Implement the Vanilla Interval domain based on PyTorch.\n Vanilla Interval: Simply propagates using interval arithmetic without any optimization.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Tuple, Union, Iterator, Callable, Iterable\n\nimport torch\nfrom torch import Tensor, nn\nfrom torch.nn import functional as F\n\nfrom diffabs.abs import AbsDom, AbsEle, AbsDist, AbsBlackSheep, forward_linear\nfrom diffabs.utils import valid_lb_ub, divide_pos_neg\n\n\nclass Dom(AbsDom):\n name = Path(__file__).with_suffix('').name # use file name (without extension) as domain name\n\n def __getattr__(self, name: str) -> object:\n assert name in globals()\n return eval(name)\n pass\n\n\nclass Ele(AbsEle):\n def __init__(self, lb: Tensor, ub: Tensor):\n \"\"\" In Vanilla Interval domain, only the Lower Bounds and Upper Bounds are maintained. \"\"\"\n assert valid_lb_ub(lb, ub)\n\n self._lb = lb\n self._ub = ub\n return\n\n @classmethod\n def by_intvl(cls, lb: Tensor, ub: Tensor) -> Ele:\n return Ele(lb, ub)\n\n def __iter__(self) -> Iterator[Tensor]:\n return iter((self._lb, self._ub))\n\n def __getitem__(self, key):\n return Ele(self._lb[key], self._ub[key])\n\n def __len__(self) -> int:\n return len(self._lb)\n\n def size(self):\n return self._lb.size()\n\n def dim(self):\n return self._lb.dim()\n\n def device(self):\n return self._lb.device\n\n def lb(self) -> Tensor:\n return self._lb\n\n def ub(self) -> Tensor:\n return self._ub\n\n def view(self, *shape) -> Ele:\n return Ele(self._lb.view(*shape), self._ub.view(*shape))\n\n def contiguous(self) -> Ele:\n return Ele(self._lb.contiguous(), self._ub.contiguous())\n\n def transpose(self, dim0, dim1) -> Ele:\n return Ele(self._lb.transpose(dim0, dim1), self._ub.transpose(dim0, dim1))\n\n def matmul(self, weights: Tensor) -> Ele:\n \"\"\" A much faster trick:\n L' = max(0, w) * L + min(0, w) * U\n U' = max(0, w) * U + min(0, w) * L\n \"\"\"\n pos_ws, neg_ws = divide_pos_neg(weights)\n\n newl_pos = self._lb.matmul(pos_ws)\n newl_neg = self._ub.matmul(neg_ws)\n newl = newl_pos + newl_neg\n\n newu_pos = self._ub.matmul(pos_ws)\n newu_neg = self._lb.matmul(neg_ws)\n newu = newu_pos + newu_neg\n return Ele(newl, newu)\n\n def __add__(self, other) -> Ele:\n if isinstance(other, Ele):\n return Ele(self._lb + other._lb, self._ub + other._ub)\n else:\n return Ele(self._lb + other, self._ub + other)\n\n def __mul__(self, flt) -> Ele:\n if isinstance(flt, Tensor) and flt.dim() == 1 and flt.shape[0] == self.size()[-1]:\n # each output vector dimension has its own factor\n pos_ws, neg_ws = divide_pos_neg(flt)\n\n newl_pos = self._lb * (pos_ws)\n newl_neg = self._ub * (neg_ws)\n newl = newl_pos + newl_neg\n\n newu_pos = self._ub * (pos_ws)\n newu_neg = self._lb * (neg_ws)\n newu = newu_pos + newu_neg\n return Ele(newl, newu)\n elif not (isinstance(flt, float) or isinstance(flt, int)):\n raise ValueError('Unsupported multiplication with', str(flt), type(flt))\n\n flt = float(flt)\n if flt >= 0:\n return Ele(self._lb * flt, self._ub * flt)\n else:\n return Ele(self._ub * flt, self._lb * flt)\n\n def __rmul__(self, flt) -> Ele:\n return self.__mul__(flt)\n pass\n\n\ndef cat0(es: Iterable[Ele]) -> Ele:\n new_lb = torch.cat([e._lb for e in es], dim=0)\n new_ub = torch.cat([e._ub for e in es], dim=0)\n return Ele(new_lb, new_ub)\n\n\nclass Dist(AbsDist):\n \"\"\" Vanilla interval domain is non-relational, thus the distances are purely based on LB/UB tensors. \"\"\"\n def __init__(self, eps: float = 1e-5):\n \"\"\"\n :param eps: add to break the tie when choosing max/min.\n \"\"\"\n self.eps = eps\n return\n\n def cols_not_max(self, e: Ele, *idxs: int) -> Tensor:\n \"\"\" Intuitively, always-not-max => exists col . target < col is always true.\n Therefore, target_col.UB() - other_col.LB() should < 0, if not, that is the distance.\n As long as some of the others < 0, it's OK (i.e., min).\n \"\"\"\n others = self._idxs_not(e, *idxs)\n others = e.lb()[..., others]\n\n res = []\n for i in idxs:\n target = e.ub()[..., [i]]\n diff = target - others # will broadcast\n diff = F.relu(diff + self.eps)\n mins, _ = torch.min(diff, dim=-1)\n res.append(mins)\n return sum(res)\n\n def cols_is_max(self, e: Ele, *idxs: int) -> Tensor:\n \"\"\" Intuitively, some-is-max => exists target . target > all_others is always true.\n Therefore, other_col.UB() - target_col.LB() should < 0, if not, that is the distance.\n All of the others should be accounted (i.e., max).\n \"\"\"\n others = self._idxs_not(e, *idxs)\n others = e.ub()[..., others]\n\n res = []\n for i in idxs:\n target = e.lb()[..., [i]]\n diffs = others - target # will broadcast\n diffs = F.relu(diffs + self.eps)\n res.append(diffs)\n\n if len(idxs) == 1:\n all_diffs = res[0]\n else:\n all_diffs = torch.stack(res, dim=-1)\n all_diffs, _ = torch.min(all_diffs, dim=-1) # it's OK to have either one to be max, thus use torch.min()\n\n # then it needs to surpass everybody else, thus use torch.max() for maximum distance\n diffs, _ = torch.max(all_diffs, dim=-1)\n return diffs\n\n def cols_not_min(self, e: Ele, *idxs: int) -> Tensor:\n \"\"\" Intuitively, always-not-min => exists col . col < target is always true.\n Therefore, other_col.UB() - target_col.LB() should < 0, if not, that is the distance.\n As long as some of the others < 0, it's OK (i.e., min).\n \"\"\"\n others = self._idxs_not(e, *idxs)\n others = e.ub()[..., others]\n\n res = []\n for i in idxs:\n target = e.lb()[..., [i]]\n diffs = others - target # will broadcast\n diffs = F.relu(diffs + self.eps)\n mins, _ = torch.min(diffs, dim=-1)\n res.append(mins)\n return sum(res)\n\n def cols_is_min(self, e: Ele, *idxs: int) -> Tensor:\n \"\"\" Intuitively, some-is-min => exists target . target < all_others is always true.\n Therefore, target_col.UB() - other_col.LB() should < 0, if not, that is the distance.\n All of the others should be accounted (i.e., max).\n \"\"\"\n others = self._idxs_not(e, *idxs)\n others = e.lb()[..., others]\n\n res = []\n for i in idxs:\n target = e.ub()[..., [i]]\n diffs = target - others # will broadcast\n diffs = F.relu(diffs + self.eps)\n res.append(diffs)\n\n if len(idxs) == 1:\n all_diffs = res[0]\n else:\n all_diffs = torch.stack(res, dim=-1)\n all_diffs, _ = torch.min(all_diffs, dim=-1) # it's OK to have either one to be min, thus use torch.min()\n\n # then it needs to surpass everybody else, thus use torch.max() for maximum distance\n diffs, _ = torch.max(all_diffs, dim=-1)\n return diffs\n pass\n\n\nclass BlackSheep(AbsBlackSheep):\n def labels_predicted(self, e: Ele, labels: Tensor) -> Tensor:\n \"\"\" Intuitively, this is specifying a label_is_max for every input abstraction. \"\"\"\n # TODO to review again\n full_lb = e.lb()\n full_ub = e.ub()\n res = []\n for i in range(len(labels)):\n cat = labels[i]\n piece_outs_lb = full_lb[[i]]\n piece_outs_ub = full_ub[[i]]\n\n # default lb-ub or ub-lb doesn't know that target domain has distance 0, so specify that explicitly\n lefts = piece_outs_ub[..., :cat]\n rights = piece_outs_ub[..., cat + 1:]\n target = piece_outs_lb[..., [cat]]\n\n full = torch.cat((lefts, target, rights), dim=-1)\n diffs = full - target # will broadcast\n # no need to ReLU here, negative values are also useful\n res.append(diffs)\n\n res = torch.cat(res, dim=0)\n return res\n\n def labels_not_predicted(self, e: Ele, labels: Tensor) -> Tensor:\n \"\"\" Intuitively, this is specifying a label_not_max for every input abstraction.\n :param label: same number of batches as self\n \"\"\"\n full_lb = e.lb()\n full_ub = e.ub()\n res = []\n for i in range(len(labels)):\n cat = labels[i]\n piece_outs_lb = full_lb[[i]]\n piece_outs_ub = full_ub[[i]]\n\n # default lb-ub or ub-lb doesn't know that target domain has distance 0, so specify that explicitly\n lefts = piece_outs_lb[..., :cat]\n rights = piece_outs_lb[..., cat+1:]\n target = piece_outs_ub[..., [cat]]\n\n full = torch.cat((lefts, target, rights), dim=-1)\n diffs = target - full # will broadcast\n # no need to ReLU here, negative values are also useful\n res.append(diffs)\n\n res = torch.cat(res, dim=0)\n # TODO\n raise NotImplementedError('To use this as distance, it has to have target category not being max, ' +\n 'thus use torch.min(dim=-1) then ReLU().')\n return res\n pass\n\n\n# ===== Below are customized layers that can take and propagate abstract elements. =====\n\n\nclass Linear(nn.Linear):\n \"\"\" Linear layer with the ability to take approximations rather than concrete inputs. \"\"\"\n def __str__(self):\n return f'{Dom.name}.' + super().__str__()\n\n @classmethod\n def from_module(cls, src: nn.Linear) -> Linear:\n with_bias = src.bias is not None\n new_lin = Linear(src.in_features, src.out_features, with_bias)\n new_lin.load_state_dict(src.state_dict())\n return new_lin\n\n def export(self) -> nn.Linear:\n with_bias = self.bias is not None\n lin = nn.Linear(self.in_features, self.out_features, with_bias)\n lin.load_state_dict(self.state_dict())\n return lin\n\n def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n \"\"\" Re-implement the forward computation by myself, because F.linear() may apply optimization using\n torch.addmm() which requires inputs to be tensor.\n :param ts: either Tensor, Ele, or Ele tensors\n :rtype: corresponding to inputs, Tensor for Tensor, Ele for Ele, Ele tensors for Ele tensors\n \"\"\"\n input_is_ele = True\n if len(ts) == 1:\n if isinstance(ts[0], Tensor):\n return super().forward(ts[0]) # plain tensor, no abstraction\n elif isinstance(ts[0], Ele):\n e = ts[0] # abstract element\n else:\n raise ValueError(f'Not supported argument type {type(ts[0])}.')\n else:\n input_is_ele = False\n e = Ele(*ts) # reconstruct abstract element\n\n out = forward_linear(self, e)\n return out if input_is_ele else tuple(out)\n pass\n\n\nclass Conv2d(nn.Conv2d):\n \"\"\" Convolutional layer with the ability to take in approximations rather than concrete inputs. \"\"\"\n def __str__(self):\n return f'{Dom.name}.' + super().__str__()\n\n def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n \"\"\" I have to implement the forward computation by myself, because F.conv2d() requires input to be Tensors.\n :param ts: either Tensor, Ele, or Ele tensors\n :rtype: corresponding to inputs, Tensor for Tensor, Ele for Ele, Ele tensors for Ele tensors\n \"\"\"\n input_is_ele = True\n if len(ts) == 1:\n if isinstance(ts[0], Tensor):\n return super().forward(ts[0]) # plain tensor, no abstraction\n elif isinstance(ts[0], Ele):\n e = ts[0] # abstract element\n else:\n raise ValueError(f'Not supported argument type {type(ts[0])}.')\n else:\n input_is_ele = False\n e = Ele(*ts) # reconstruct abstract element\n\n ''' See 'https://github.com/vdumoulin/conv_arithmetic' for animated illustrations.\n It's not hard to support them, but we just don't need that right now.\n '''\n if self.dilation != (1, 1):\n raise NotImplementedError(f'Unsupported dilation {self.dilation}')\n if self.groups != 1:\n raise NotImplementedError(f'Unsupported groups {self.groups}')\n\n assert e.dim() == 4\n img_b, img_c, img_h, img_w = e.size() # Batch x C x H x W\n\n fil_h, fil_w = self.kernel_size\n pad_h, pad_w = self.padding\n stride_h, stride_w = self.stride\n\n # formula: (W - F + 2P) / S + 1\n cnt_h = (img_h - fil_h + 2 * pad_h) / stride_h + 1\n cnt_w = (img_w - fil_w + 2 * pad_w) / stride_w + 1\n assert int(cnt_h) == cnt_h and int(cnt_w) == cnt_w, \"img and filter dimensions don't fit?\"\n cnt_h = int(cnt_h)\n cnt_w = int(cnt_w)\n\n ''' Pad the original image just in case (this is different for each abstract domain).\n First pad the left and right (width), then pad the top and bottom (height).\n ########### 2 ##########\n ## 1 ## center ## 1 ##\n ########### 2 ##########\n '''\n def _pad(orig: Tensor) -> Tensor:\n if pad_w > 0:\n zs = torch.zeros(img_b, img_c, img_h, pad_w, device=orig.device)\n orig = torch.cat((zs, orig, zs), dim=-1)\n if pad_h > 0:\n zs = torch.zeros(img_b, img_c, pad_h, img_w + 2 * pad_w, device=orig.device) # width has increased\n orig = torch.cat((zs, orig, zs), dim=-2)\n return orig\n\n full_lb = _pad(e._lb)\n full_ub = _pad(e._ub)\n\n # collect all filtered sub-images in a large batch\n filtered_lb = []\n filtered_ub = []\n for i in range(cnt_h):\n row_lb = []\n row_ub = []\n for j in range(cnt_w):\n h_start = i * stride_h\n h_end = h_start + fil_h\n w_start = j * stride_w\n w_end = w_start + fil_w\n\n sub_lb = full_lb[:, :, h_start : h_end, w_start : w_end] # Batch x InC x FilterH x FilterW\n sub_ub = full_ub[:, :, h_start : h_end, w_start : w_end]\n row_lb.append(sub_lb)\n row_ub.append(sub_ub)\n\n row_lb = torch.stack(row_lb, dim=1) # dim=1: right after Batch x ...\n row_ub = torch.stack(row_ub, dim=1) # Now Batch x OutW x InC x FilterH x FilterW\n filtered_lb.append(row_lb)\n filtered_ub.append(row_ub)\n\n filtered_lb = torch.stack(filtered_lb, dim=1) # dim=1: right after Batch x ... again\n filtered_ub = torch.stack(filtered_ub, dim=1) # Now Batch x OutH x OutW x InC x FilterH x FilterW\n\n # reshape everything to directly apply matmul\n filtered_lb = filtered_lb.view(img_b, cnt_h, cnt_w, -1) # Batch x OutH x OutW x (InC * FilterH * Filter)\n filtered_ub = filtered_ub.view(img_b, cnt_h, cnt_w, -1)\n ws = self.weight.view(self.out_channels, -1).t() # (InC * FilterH * FilterW) x OutC\n newe = Ele(filtered_lb, filtered_ub).matmul(ws) + self.bias # Batch x OutH x OutW x OutC\n\n newl = newe._lb.permute(0, 3, 1, 2) # Batch x OutC x OutH x OutW\n newu = newe._ub.permute(0, 3, 1, 2)\n out = Ele(newl, newu)\n return out if input_is_ele else tuple(out)\n pass\n\n\ndef _distribute_to_super(super_fn: Callable, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n \"\"\" Common pattern shared among different customized modules, applying original methods to the bounds. \"\"\"\n input_is_ele = True\n if len(ts) == 1:\n if isinstance(ts[0], Tensor):\n return super_fn(ts[0]) # plain tensor, no abstraction\n elif isinstance(ts[0], Ele):\n e = ts[0] # abstract element\n else:\n raise ValueError(f'Not supported argument type {type(ts[0])}.')\n else:\n input_is_ele = False\n e = Ele(*ts) # reconstruct abstract element\n\n out_tuple = (super_fn(t) for t in iter(e)) # simply apply to both lower and upper bounds\n return Ele(*out_tuple) if input_is_ele else out_tuple\n\n\nclass ReLU(nn.ReLU):\n def __str__(self):\n return f'{Dom.name}.' + super().__str__()\n\n def export(self) -> nn.ReLU:\n return nn.ReLU()\n\n def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n return _distribute_to_super(super().forward, *ts)\n pass\n\n\nclass Tanh(nn.Tanh):\n def __str__(self):\n return f'{Dom.name}.' + super().__str__()\n\n def export(self) -> nn.Tanh:\n return nn.Tanh()\n\n def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n return _distribute_to_super(super().forward, *ts)\n pass\n\n\nclass MaxPool1d(nn.MaxPool1d):\n def __str__(self):\n return f'{Dom.name}.' + super().__str__()\n\n def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n return _distribute_to_super(super().forward, *ts)\n pass\n\n\nclass MaxPool2d(nn.MaxPool2d):\n def __str__(self):\n return f'{Dom.name}.' + super().__str__()\n\n def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n return _distribute_to_super(super().forward, *ts)\n pass\n\n\nclass Clamp(nn.Module):\n def __init__(self, min: float, max: float):\n super().__init__()\n self.min = min\n self.max = max\n return\n\n def __str__(self):\n return f'{Dom.name}.Clamp({self.min}, {self.max})'\n\n def forward(self, *ts: Union[Tensor, Ele]) -> Union[Tensor, Ele, Tuple[Tensor, ...]]:\n fn = lambda x: torch.clamp(x, self.min, self.max)\n return _distribute_to_super(fn, *ts)\n pass\n"
] | [
[
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.min",
"torch.max",
"torch.nn.Tanh",
"torch.clamp",
"torch.nn.ReLU",
"torch.nn.functional.relu"
]
] |
inidun/unesco_data_collection | [
"6157883ea391f370d350eaaed038dfde6ad7a8fe"
] | [
"courier/elements/export_articles.py"
] | [
"# pylint: disable=redefined-outer-name\nimport os\nimport re\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Union\n\nimport pandas as pd\nfrom loguru import logger\n\nfrom courier.article_index import article_index_to_csv\nfrom courier.config import get_config\nfrom courier.elements.assign_page_service import AssignPageService\nfrom courier.elements.consolidate_text_service import ConsolidateTextService\nfrom courier.elements.elements import CourierIssue\nfrom courier.elements.statistics import IssueStatistics\nfrom courier.utils.logging import file_logger\n\nCONFIG = get_config()\n\n\nclass ExtractArticles:\n @staticmethod\n def extract(issue: CourierIssue) -> CourierIssue:\n AssignPageService().assign(issue)\n ConsolidateTextService().consolidate(issue)\n return issue\n\n @staticmethod\n def statistics(issue: CourierIssue) -> IssueStatistics:\n return IssueStatistics(issue)\n\n\ndef export_articles(\n courier_id: str,\n export_folder: Union[str, os.PathLike] = CONFIG.articles_dir / 'exported',\n) -> List[Dict[str, Any]]:\n\n issue: CourierIssue = CourierIssue(courier_id)\n ExtractArticles.extract(issue)\n\n Path(export_folder).mkdir(parents=True, exist_ok=True)\n\n for article in issue.articles:\n if article.catalogue_title is None:\n continue\n safe_title = re.sub(r'[^\\w]+', '_', str(article.catalogue_title).lower())\n file = (\n Path(export_folder)\n / f'{article.year or \"0000\"}_{article.courier_id}_{article.record_number}_{safe_title[:60]}.txt'\n )\n\n logger.trace(\n f'{courier_id};{article.year};{article.record_number};{len(article.get_assigned_pages())};{len(article.get_not_found_pages())};{len(article.page_numbers)}'\n )\n\n with open(file, 'w', encoding='utf-8') as fp:\n fp.write(article.get_text())\n\n return IssueStatistics(issue).errors\n\n\nif __name__ == '__main__':\n\n export_folder: Path = CONFIG.articles_dir / 'exported'\n article_index_to_csv(CONFIG.article_index, export_folder)\n stats: List[Dict[str, Any]] = []\n\n with file_logger(\n Path(export_folder) / 'extract_log.csv', format='{message}', level='TRACE'\n ) as logger: # noqa: F811\n logger.trace('courier_id;year;record_number;assigned;not_found;total')\n\n courier_ids = [x[:6] for x in CONFIG.get_courier_ids()]\n for courier_id in courier_ids:\n if courier_id not in CONFIG.article_index.courier_id.values:\n if len(CONFIG.get_issue_article_index(courier_id)) != 0:\n raise Exception(f'{courier_id} not in article index but has articles')\n continue\n stats += export_articles(courier_id, export_folder)\n\n pd.DataFrame(stats).drop(columns=['article']).sort_values(\n by=['year', 'courier_id', 'record_number', 'page', 'case']\n ).to_csv(Path(export_folder) / 'stats.csv', sep=';', index=False)\n"
] | [
[
"pandas.DataFrame"
]
] |
FlyingFordAnglia/Scrabble | [
"47887c7780c81a66b8b4c6f82ca2c50d645b075c"
] | [
"code/savegame.py"
] | [
"# Save game to and load game from file\nimport numpy as np\n\n\ndef savegame(board, racks, players, turn, filename='savegame.sv'):\n # create header for the active racks, player scores, players names, and the active turn\n racks = list(map(lambda x: ''.join(x), racks)) # for each rack, make it into a single string\n racks = ';'.join(racks) + ':' # combine all racks into a string\n scores = [str(i.score) for i in players]\n scores = ';'.join(scores) + ':' # single string for player scores\n names = [i.name for i in players]\n names = ';'.join(names) + ':' # single string for player names\n turn = str(turn)\n np.savetxt(filename, board, header=racks + scores + names + turn, fmt='%d', delimiter=',')\n\n\ndef loadgame(filename='savegame.sv'):\n # load from header\n with open(filename, 'r') as f:\n header = f.readline()\n header = header[2:len(header)] # remove the \"comment\" prefix added to headers by np.savetxt()\n header = header.split(':')\n racks = header[0].split(';')\n racks = list(map(list, racks)) # each rack was stored as a single string; this makes it into a list\n scores = header[1].split(';')\n scores = list(map(int, scores))\n names = header[2].split(';')\n turn = int(header[3])\n board = []\n for i in f: # load the board\n board.append(list(map(int, i.split(','))))\n board = np.array(board, dtype=int)\n return board, racks, scores, names, turn\n"
] | [
[
"numpy.array",
"numpy.savetxt"
]
] |
jwarley/hcpi | [
"c2f9b305fd21cd2e5cb375acc5b4f8d445432a66"
] | [
"tests/cartpole.py"
] | [
"from policies import FourierPolicy\nfrom agents import FCHC\nimport gym\nimport numpy as np\nimport csv\n\n# Create the environment\nenv = gym.make('CartPole-v0')\n\n# Specify how to normalize the states.\ndef cp_normalize(obs):\n \"\"\" Normalize the features of a cartpole state state observation to the interval [0, 1]\n\n The feature domains are:\n [-4.8, 4.8]\n [-inf, inf]\n [-24, 24]\n [-inf, inf]\n\n Infinite-domain features are normalized by a sigmoid.\n \"\"\"\n return np.array([\n (obs[0] + 4.8) / 9.6,\n np.exp(obs[1]) / (np.exp(obs[1]) + 1),\n (obs[2] + 24) / 48,\n np.exp(obs[3]) / (np.exp(obs[3]) + 1)\n ])\n\ndef find_okay_policy(deg, min_perf_target=10.0):\n # Start with a randomly initialized Fourier policy\n pi = FourierPolicy(deg, env.action_space.n, env.reset().size)\n\n # Use a simple hill-climbing agent to obtain a semi-competent policy\n agent = FCHC(pi, env, cp_normalize, gamma=1.0, sigma=18.5, n_eval_eps=50, eval_ep_len=1000)\n (mean_return, trained_policy) = agent.train_until(target_return=min_perf_target, log=True)\n print(\"==============================================\")\n print(\"Found an acceptable policy with mean return {}\".format(mean_return))\n print(\"==============================================\")\n return (mean_return, trained_policy)\n\ndef generate_data(pi, n_episodes=10, max_ep_length=1000, output_path='cartpole.csv'):\n # Open a csv file to record episode data for consumption by HCPI\n with open('datasets/' + output_path, 'w', newline='') as datafile:\n w = csv.writer(datafile, delimiter=',')\n\n print(\"Writing prelude data to {}\".format(output_path))\n w.writerow([pi.state_dim])\n w.writerow([pi.n_actions])\n w.writerow([pi.deg])\n w.writerow(pi.params.flatten())\n w.writerow([n_episodes])\n\n def run_one_ep():\n ep_return = 0.0\n obs = cp_normalize(env.reset())\n hist = list(obs)\n\n for t in range(max_ep_length):\n action = pi.sample(obs)\n raw_obs, r, is_terminal, _ = env.step(action)\n obs = cp_normalize(raw_obs)\n ep_return += r # Note: we're not discounting in these examples\n hist.extend([action, r]) # Record the transition data\n\n if is_terminal:\n break\n\n hist.extend(obs) # Record the newly entered state if it's nonterminal\n\n w.writerow(hist)\n return hist\n\n # Run the newly found policy for n episodes to generate data for HCPI\n e1 = run_one_ep() # Save the history data from the first episode;\n\n for ep in range(1, n_episodes):\n if ep % (n_episodes // 10) == 0:\n print(\"Running trial episode {} of {}\".format(ep + 1, n_episodes))\n run_one_ep()\n\n # Record the policy probabilities at each time step from the first episode\n # so that HCPI can confirm that its policy representation matches ours.\n state_dim = pi.state_dim\n policy_probs = []\n while e1:\n # Slice off one (s, a, r) triple\n step, e1 = (e1[:state_dim + 2], e1[state_dim + 2:])\n s, a = (np.array(step[:state_dim]), step[state_dim])\n policy_probs.append(pi.eval_one(s, a))\n\n w.writerow(policy_probs)\n env.close()\n\n\nSIZES = [100000, 200000]\nDEGS = [1, 2]\n\nfor n_eps in SIZES:\n for deg in DEGS:\n (mean_ret, pi) = find_okay_policy(deg, min_perf_target=100.0)\n fname = 'cartpole_deg{}_ret{}_eps{}.csv'.format(deg, round(mean_ret, 2), n_eps)\n generate_data(pi, n_episodes=n_eps, output_path=fname)\n\n"
] | [
[
"numpy.array",
"numpy.exp"
]
] |
Vinit-source/SubmodularStreamingMaximization | [
"d61de27549c7cf02103e35cdc6d4ec26991ebba0"
] | [
"experiments/examiner/init.py"
] | [
"import spacy\nimport csv\nimport numpy as np\nimport tqdm\n\ndef examiner():\n # https://www.kaggle.com/therohk/examine-the-examiner/\n data = csv.DictReader(open(\"data/examiner-date-text.csv\", \"r\"), delimiter=\",\", dialect=\"excel\")\n nlp = spacy.load(\"en_core_web_lg\")\n dataset = []\n dates, texts = zip(*[(x[\"publish_date\"], x[\"headline_text\"]) for x in data])\n for date, text in tqdm.tqdm(zip(dates, nlp.pipe(texts, batch_size=50, disable=[\"tagger\", \"parser\", \"ner\"])), total=3089781):\n dataset.append(text.vector)\n # print(date, text.vector)\n # break\n dataset = np.array(dataset)\n print(dataset.shape)\n np.save(\"data/examiner.npy\", dataset)\n\nif __name__ == \"__main__\":\n examiner()\n"
] | [
[
"numpy.array",
"numpy.save"
]
] |
ph10m/coref | [
"88152e6bf63b5b8f9a0d7146f4235a2ce6b55e43"
] | [
"tollef_dataclean.py"
] | [
"from bert import tokenization\nimport json\nimport sys\nimport os\nimport json\n\nimport tensorflow as tf\nimport util\n\nfilename = \"none\"\n\ntext = [\n\"Firefly is an American space Western drama television series which ran from 2002-2003, created by writer and director Joss Whedon, under his Mutant Enemy Productions label.\",\n\"Whedon served as an executive producer, along with Tim Minear.\",\n\"The series is set in the year 2517, after the arrival of humans in a new star system and follows the adventures of the renegade crew of Serenity, a 'Firefly-class' spaceship.\",\n\"The ensemble cast portrays the nine characters who live on Serenity.\",\n\"Whedon pitched the show as 'nine people looking into the blackness of space and seeing nine different things.'\",\n\"The show explores the lives of a group of people, some of whom fought on the losing side of a civil war, who make a living on the fringes of society as part of the pioneer culture of their star system.\",\n\"In this future, the only two surviving superpowers, the United States and China, fused to form the central federal government, called the Alliance, resulting in the fusion of the two cultures.\",\n\"According to Whedon's vision, 'nothing will change in the future: technology will advance, but we will still have the same political, moral, and ethical problems as today.'\",\n\"Firefly premiered in the U.S. on the Fox network on September 20, 2002.\",\n\"By mid-December, Firefly had averaged 4.7 million viewers per episode and was 98th in Nielsen ratings.\",\n\"It was canceled after 11 of the 14 produced episodes were aired.\",\n\"Despite the relatively short life span of the series, it received strong sales when it was released on DVD and has large fan support campaigns.\",\n\"It won a Primetime Emmy Award in 2003 for Outstanding Special Visual Effects for a Series.\",\n\"TV Guide ranked the series at No. 5 on their 2013 list of 60 shows that were 'Cancelled Too Soon.'\",\n\"The post-airing success of the show led Whedon and Universal Pictures to produce Serenity, a 2005 film which continues from the story of the series, and the Firefly franchise expanded to other media, including comics and a role-playing game.\",\n]\n\npreco_2 = [[\"At\", \"the\", \"moment\", \",\", \"it\", \"may\", \"be\", \"difficult\", \"to\", \"imagine\", \",\", \"but\", \"many\", \"people\", \"believe\", \"that\", \",\", \"by\", \"the\", \"year\", \"2100\", \",\", \"we\", \"will\", \"live\", \"on\", \"the\", \"planet\", \"Mars\", \".\"], [\"Our\", \"own\", \"planet\", \",\", \"Earth\", \",\", \"is\", \"becoming\", \"more\", \"and\", \"more\", \"crowed\", \"and\", \"polluted\", \".\"], [\"Luckily\", \",\", \"we\", \"can\", \"start\", \"again\", \"and\", \"build\", \"a\", \"better\", \"world\", \"on\", \"Mars\", \".\"], [\"Here\", \"is\", \"what\", \"life\", \"could\", \"be\", \"like\", \".\"], [\" \"], [\"First\", \"of\", \"all\", \",\", \"transport\", \"should\", \"be\", \"much\", \"better\", \".\"], [\"At\", \"present\", \",\", \"our\", \"spaceships\", \"are\", \"too\", \"slow\", \"to\", \"carry\", \"large\", \"numbers\", \"of\", \"people\", \"to\", \"Mars\", \"--\", \"it\", \"takes\", \"months\", \".\"], [\"However\", \",\", \"by\", \"2100\", \",\", \"spaceship\", \"can\", \"travel\", \"at\", \"half\", \"the\", \"speed\", \"of\", \"light\", \".\"], [\"It\", \"might\", \"take\", \"us\", \"two\", \"or\", \"three\", \"days\", \"to\", \"get\", \"to\", \"Mars\", \"!\"], [\" \"], [\"Secondly\", \",\", \"humans\", \"need\", \"food\", \",\", \"water\", \"and\", \"air\", \"to\", \"live\", \".\"], [\"Scientists\", \"should\", \"be\", \"able\", \"to\", \"develop\", \"plants\", \"that\", \"can\", \"be\", \"grown\", \"on\", \"Mars\", \".\"], [\"These\", \"plants\", \"will\", \"produce\", \"the\", \"food\", \"and\", \"air\", \"that\", \"we\", \"need\", \".\"], [\"However\", \",\", \"can\", \"these\", \"plants\", \"produce\", \"water\", \"for\", \"us\", \"?\"], [\"There\", \"is\", \"no\", \"answer\", \"now\", \".\"], [\" \"], [\"There\", \"is\", \"a\", \"problem\", \"for\", \"us\", \"to\", \"live\", \"on\", \"Mars\", \".\"], [\"Mars\", \"pulls\", \"us\", \"much\", \"less\", \"than\", \"the\", \"Earth\", \"does\", \".\"], [\"This\", \"will\", \"be\", \"dangerous\", \"because\", \"we\", \"could\", \"easily\", \"jump\", \"too\", \"high\", \"and\", \"fly\", \"slowly\", \"away\", \"into\", \"space\", \".\"], [\"To\", \"prevent\", \"this\", \",\", \"humans\", \"on\", \"Mars\", \"have\", \"to\", \"wear\", \"special\", \"shoes\", \"to\", \"make\", \"themselves\", \"heavier\", \".\"], [\" \"], [\"Life\", \"on\", \"Mars\", \"will\", \"be\", \"better\", \"than\", \"that\", \"on\", \"Earth\", \"in\", \"many\", \"ways\", \",\", \"People\", \"will\", \"have\", \"more\", \"space\", \".\"], [\"Living\", \"in\", \"a\", \"large\", \"building\", \"with\", \"only\", \"10\", \"bedrooms\", \"is\", \"highly\", \"possible\", \".\"], [\"Many\", \"people\", \"believe\", \"that\", \"robot\", \"will\", \"do\", \"most\", \"of\", \"our\", \"work\", \",\", \"so\", \"we\", \"have\", \"more\", \"time\", \"for\", \"our\", \"hobbies\", \".\"], [\" \"], [\"There\", \"will\", \"probably\", \"be\", \"no\", \"school\", \"on\", \"Mars\", \".\"], [\"Every\", \"student\", \"will\", \"have\", \"a\", \"computer\", \"at\", \"home\", \"which\", \"is\", \"connected\", \"to\", \"the\", \"internet\", \".\"], [\"They\", \"can\", \"study\", \",\", \"do\", \"their\", \"homework\", \"and\", \"take\", \"exams\", \"in\", \"online\", \"schools\", \".\"], [\"Each\", \"student\", \"will\", \"also\", \"have\", \"their\", \"own\", \"online\", \"teacher\", \"called\", \"``\", \"e-teacher\", \"''\", \".\"], [\" \"], [\"However\", \",\", \"in\", \"some\", \"ways\", \",\", \"life\", \"on\", \"Mars\", \"may\", \"not\", \"be\", \"better\", \"than\", \"that\", \"on\", \"the\", \"earth\", \"today\", \".\"], [\"Food\", \"will\", \"not\", \"be\", \"the\", \"same\", \"--\", \"meals\", \"will\", \"probably\", \"be\", \"in\", \"the\", \"form\", \"of\", \"pills\", \"and\", \"will\", \"not\", \"be\", \"as\", \"delicious\", \"as\", \"they\", \"are\", \"today\", \",\", \"Also\", \",\", \"space\", \"travel\", \"will\", \"make\", \"many\", \"people\", \"feel\", \"ill\", \".\"], [\"The\", \"spaceship\", \"will\", \"travel\", \"fast\", \"but\", \"the\", \"journey\", \"to\", \"Mars\", \"will\", \"probably\", \"be\", \"very\", \"uncomfortable\", \".\"]]\n\nif filename != \"none\":\n text = [l.strip() for l in open(filename).readlines()]\n\ntext = preco_2\n\ngenre = \"nz\"\n# The Ontonotes data for training the model contains text from several sources\n# of very different styles. You need to specify the most suitable one out of:\n# \"bc\": broadcast conversation\n# \"bn\": broadcast news\n# \"mz\": magazine\n# \"nw\": newswire\n# \"pt\": Bible text\n# \"tc\": telephone conversation\n# \"wb\": web data\n\nmodel_name = \"spanbert_base\"\n# The fine-tuned model to use. Options are:\n# bert_base\n# spanbert_base\n# bert_large\n# spanbert_large\n\ndata = {\n 'doc_key': genre,\n 'sentences': [[\"[CLS]\"]],\n 'speakers': [[\"[SPL]\"]],\n 'clusters': [],\n 'sentence_map': [0],\n 'subtoken_map': [0],\n}\n\n# Determine Max Segment\nmax_segment = None\nfor line in open('experiments.conf'):\n if line.startswith(model_name):\n max_segment = True\n elif line.strip().startswith(\"max_segment_len\"):\n if max_segment:\n max_segment = int(line.strip().split()[-1])\n break\n\ntokenizer = tokenization.FullTokenizer(vocab_file=\"cased_config_vocab/vocab.txt\", do_lower_case=False)\nsubtoken_num = 0\nfor sent_num, line in enumerate(text):\n raw_tokens = line\n to_sent = ' '.join(line)\n tokens = tokenizer.tokenize(to_sent)\n if len(tokens) + len(data['sentences'][-1]) >= max_segment:\n data['sentences'][-1].append(\"[SEP]\")\n data['sentences'].append([\"[CLS]\"])\n data['speakers'][-1].append(\"[SPL]\")\n data['speakers'].append([\"[SPL]\"])\n data['sentence_map'].append(sent_num - 1)\n data['subtoken_map'].append(subtoken_num - 1)\n data['sentence_map'].append(sent_num)\n data['subtoken_map'].append(subtoken_num)\n\n ctoken = raw_tokens[0]\n cpos = 0\n for token in tokens:\n data['sentences'][-1].append(token)\n data['speakers'][-1].append(\"-\")\n data['sentence_map'].append(sent_num)\n data['subtoken_map'].append(subtoken_num)\n \n if token.startswith(\"##\"):\n token = token[2:]\n if len(ctoken) == len(token):\n subtoken_num += 1\n cpos += 1\n if cpos < len(raw_tokens):\n ctoken = raw_tokens[cpos]\n else:\n ctoken = ctoken[len(token):]\n\ndata['sentences'][-1].append(\"[SEP]\")\ndata['speakers'][-1].append(\"[SPL]\")\ndata['sentence_map'].append(sent_num - 1)\ndata['subtoken_map'].append(subtoken_num - 1)\n\n\n#with open(\"sample.in.json\", 'w') as out:\n# json.dump(data, out, sort_keys=True)\n\ndef pred(indata, out):\n config = util.initialize_from_env()\n model = util.get_model(config)\n\n with tf.Session() as session:\n model.restore(session)\n\n with open(out, \"w\") as output_file:\n tensorized_example = model.tensorize_example(indata, is_training=False)\n feed_dict = {i:t for i,t in zip(model.input_tensors, tensorized_example)}\n _, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(model.predictions, feed_dict=feed_dict)\n predicted_antecedents = model.get_predicted_antecedents(top_antecedents, top_antecedent_scores)\n indata[\"predicted_clusters\"], _ = model.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)\n indata[\"top_spans\"] = list(zip((int(i) for i in top_span_starts), (int(i) for i in top_span_ends)))\n indata['head_scores'] = []\n\n output_file.write(json.dumps(indata))\n output_file.write(\"\\n\")\n\npred(data, \"precoout.txt\")"
] | [
[
"tensorflow.Session"
]
] |
Je-Ba/Wind-farm-wake-control-using-convolutional-neural-networks | [
"4b1accaa262961d78bc82fc28ed9273f6f5a5a63"
] | [
"CNNWake/train_CNN.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.optim import lr_scheduler\nfrom .CNN_model import Generator\n\n\n__author__ = \"Jens Bauer\"\n__copyright__ = \"Copyright 2021, CNNwake\"\n__credits__ = [\"Jens Bauer\"]\n__license__ = \"MIT\"\n__version__ = \"1.0\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n\n\ndef train_CNN_model(\n nr_filters, nr_epochs, learing_rate, batch_size,\n train_size, val_size, image_size, device, u_range,\n ti_range, yaw_range, model_name, nr_workers=0, floris_path=\".\"):\n \"\"\"\n Create a new model and train it for a certain number of epochs using a\n newly generated dataset. Hyper-parameters such as model size or lr can be\n changed as input to the function.\n After training the model error for all epochs is plotted and the model\n performance will be evaluated on a test set. Finally, the model\n will saved as the model_name which needs to add as .pt file\n\n Args:\n nr_filters (int): Nr. of filters used for the conv layers\n nr_epochs (int): Nr. of training epochs\n learing_rate (float): Model learning rate\n batch_size (int): Training batch size\n train_size (int): Size of the generated training set\n val_size (int): Size of the generated validation set\n image_size (int): Size of the data set images, needs to match the\n model output size for the current model this is 163 x 163\n device (torch.device): Device to run the training on, cuda or cpu\n u_range (list): Bound of u values [u_min, u_max] used\n ti_range (list): Bound of TI values [TI_min, TI_max] used\n yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used\n model_name (str): Name of the trained saved model (needs be .pt)\n nr_workers (int, optional): Nr. of worker to load data. Defaults to 0.\n floris_path (str, optinal): Path to FLORIS jason file.\n\n Returns:\n gen (Generator): Trained model\n loss (float): training loss defined by the loss function\n val_error (float): Percentage error on the validation set\n \"\"\"\n\n # The current inputs are: u, ti and yaw. If more are\n # used please change this input var\n nr_input_var = 3\n\n # create a generator of the specified size\n gen = Generator(nr_input_var, nr_filters).to(device)\n\n # create a datasets from the data generated by FLORIS\n x_train, y_train = gen.create_floris_dataset(\n size=train_size, image_size=image_size, u_range=u_range,\n ti_range=ti_range, yaw_range=yaw_range, floris_init_path=floris_path,\n curl=False)\n x_eval, y_eval = gen.create_floris_dataset(\n size=val_size, image_size=image_size, u_range=u_range,\n ti_range=ti_range, yaw_range=yaw_range, floris_init_path=floris_path,\n curl=False)\n dataset = TensorDataset(y_train.unsqueeze(1), x_train.float())\n # generate dataload for training\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True,\n num_workers=nr_workers)\n\n # init the weights of the generator\n gen.initialize_weights()\n # set up and optimizer and learing rate scheduler using hyperparameters\n optimizer = optim.Adam(gen.parameters(), lr=learing_rate)\n scheduler_gen = lr_scheduler.ReduceLROnPlateau(\n optimizer, 'min', factor=0.6, patience=4, verbose=True)\n\n # use L2 norm as criterion\n criterion = nn.MSELoss()\n\n # init to list to store error\n error_list = []\n\n for _ in range(nr_epochs): # train model\n\n gen.train() # set model to training mode\n\n # use method to train for one epoch\n loss = gen.epoch_training(criterion, optimizer, dataloader, device)\n\n gen.eval() # set model to evaluation\n # evaluation on validation set\n val_error = gen.error(x_eval, y_eval,\n device, image_size=image_size,\n normalisation=12)\n # if error has not decreased over the past 4\n # epochs decrease the lr by a factor of 0.6\n scheduler_gen.step(val_error)\n error_list.append(val_error)\n\n print(f\" Epoch: {_:.0f},\"\n f\" Training loss: {loss:.4f},\"\n f\" Validation error: {val_error:.2f}\")\n\n print(\"Finished training\")\n # save model\n gen.save_model(model_name)\n\n # plot the val error over the epochs\n plt.plot(range(nr_epochs), error_list)\n plt.show()\n\n return gen, loss, val_error\n\n\nif __name__ == '__main__':\n # To run individual CNNWake files, the imports are not allowed to be\n # relative. Instead of: from .CNN_model import Generator\n # it needs to be: from CNN_model import Generator for all CNNWake imports\n\n # Set device used for training\n devices = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # Train a new model with the given parameters\n train_CNN_model(\n nr_filters=16, nr_epochs=25, learing_rate=0.003, batch_size=50,\n train_size=200, val_size=30, image_size=163, device=devices,\n u_range=[3, 12], ti_range=[0.015, 0.25], yaw_range=[-30, 30],\n model_name='generator.pt'\n )\n"
] | [
[
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.show"
]
] |
datastreaming/skyline-1 | [
"b02de2f40f8deb6ed1fddd0eace5b19a9b59d0de"
] | [
"skyline/webapp/luminosity_plot_cloudburst.py"
] | [
"import logging\nimport os\nimport traceback\n\nimport pandas as pd\nfrom sqlalchemy.sql import select\nfrom adtk.visualization import plot\n\nimport settings\nfrom functions.database.queries.get_cloudburst_row import get_cloudburst_row\nfrom functions.graphite.get_metrics_timeseries import get_metrics_timeseries\nfrom database import get_engine, engine_disposal, cloudburst_table_meta\n\nskyline_app = 'webapp'\nskyline_app_logger = '%sLog' % skyline_app\nlogger = logging.getLogger(skyline_app_logger)\nskyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)\nlogfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)\n\n\ndef get_cloudburst_plot(cloudburst_id, base_name, shift, all_in_period=False):\n \"\"\"\n Create a plot of the cloudburst and return the path and filename\n\n :param cloudburst_id: the cloudburt id\n :param base_name: the name of the metric\n :param shift: the number of indice to shift the plot\n :type cloudburst_id: int\n :type base_name: str\n :type shift: int\n :return: path and file\n :rtype: str\n\n \"\"\"\n\n function_str = 'get_cloudburst_plot'\n\n logger.info(\n 'get_cloudburst_plot - cloudburst_id: %s, base_name: %s' % (\n str(cloudburst_id), str(base_name)))\n\n save_to_file = '%s/cloudburst_id.%s.%s.shift.%s.png' % (\n settings.SKYLINE_TMP_DIR, str(cloudburst_id), base_name, str(shift))\n if all_in_period:\n save_to_file = '%s/cloudburst_id.%s.all.%s.shift.%s.png' % (\n settings.SKYLINE_TMP_DIR, str(cloudburst_id), base_name, str(shift))\n\n cloudburst_dict = {}\n try:\n cloudburst_dict = get_cloudburst_row(skyline_app, cloudburst_id)\n except Exception as err:\n logger.error(traceback.format_exc())\n logger.error('error :: %s :: get_cloudburst_row failed - %s' % (\n function_str, err))\n raise\n\n if not cloudburst_dict:\n logger.error('error :: %s :: no cloudburst_dict - %s' % function_str)\n return None, None\n\n if os.path.isfile(save_to_file):\n return cloudburst_dict, save_to_file\n\n try:\n from_timestamp = cloudburst_dict['from_timestamp']\n until_timestamp = from_timestamp + cloudburst_dict['full_duration']\n resolution = cloudburst_dict['resolution']\n except Exception as err:\n logger.error(traceback.format_exc())\n logger.error('error :: %s :: failed parse values from cloudburst_dict - %s' % (\n function_str, err))\n raise\n\n metrics_functions = {}\n metrics_functions[base_name] = {}\n metrics_functions[base_name]['functions'] = None\n\n if resolution > 60:\n resolution_minutes = int(resolution / 60)\n summarize_intervalString = '%smin' % str(resolution_minutes)\n summarize_func = 'median'\n metrics_functions[base_name]['functions'] = {'summarize': {'intervalString': summarize_intervalString, 'func': summarize_func}}\n\n try:\n metrics_timeseries = get_metrics_timeseries(skyline_app, metrics_functions, from_timestamp, until_timestamp, log=False)\n except Exception as err:\n logger.error(traceback.format_exc())\n logger.error('error :: %s :: get_metrics_timeseries failed - %s' % (\n function_str, err))\n raise\n\n try:\n timeseries = metrics_timeseries[base_name]['timeseries']\n timeseries_length = len(timeseries)\n timeseries = timeseries[1:(timeseries_length - 2)]\n except Exception as err:\n logger.error(traceback.format_exc())\n logger.error('error :: %s :: failed to determine timeseries - %s' % (\n function_str, err))\n raise\n\n anomalies_in_period = []\n if all_in_period:\n try:\n engine, fail_msg, trace = get_engine(skyline_app)\n except Exception as err:\n trace = traceback.format_exc()\n logger.error(trace)\n fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % (function_str, err)\n logger.error('%s' % fail_msg)\n if engine:\n engine_disposal(skyline_app, engine)\n raise\n try:\n cloudburst_table, log_msg, trace = cloudburst_table_meta(skyline_app, engine)\n except Exception as err:\n logger.error(traceback.format_exc())\n logger.error('error :: %s :: failed to get cloudburst_table meta for cloudburst id %s - %s' % (\n function_str, str(cloudburst_id), err))\n if engine:\n engine_disposal(engine)\n raise\n try:\n connection = engine.connect()\n stmt = select([cloudburst_table]).\\\n where(cloudburst_table.c.metric_id == cloudburst_dict['metric_id']).\\\n where(cloudburst_table.c.timestamp >= from_timestamp).\\\n where(cloudburst_table.c.timestamp <= until_timestamp).\\\n where(cloudburst_table.c.id != cloudburst_id)\n result = connection.execute(stmt)\n for row in result:\n anomalies_in_period.append([row['timestamp'], row['end']])\n connection.close()\n except Exception as err:\n logger.error(traceback.format_exc())\n logger.error('error :: %s :: could not get cloudburst row for cloudburst id %s - %s' % (\n function_str, str(cloudburst_id), err))\n if engine:\n engine_disposal(engine)\n raise\n if engine:\n engine_disposal(skyline_app, engine)\n\n anomalies = []\n if anomalies_in_period:\n logger.info(\n 'get_cloudburst_plot - adding %s all_in_period anomalies to cloudburst plot' % (\n str(len(anomalies_in_period))))\n for period_anomalies in anomalies_in_period:\n new_anomalies = [item for item in timeseries if int(item[0]) >= period_anomalies[0] and int(item[0]) <= period_anomalies[1]]\n if new_anomalies:\n anomalies = anomalies + new_anomalies\n try:\n cloudburst_anomalies = [item for item in timeseries if int(item[0]) >= cloudburst_dict['timestamp'] and int(item[0]) <= cloudburst_dict['end']]\n anomalies = anomalies + cloudburst_anomalies\n df = pd.DataFrame(timeseries, columns=['date', 'value'])\n df['date'] = pd.to_datetime(df['date'], unit='s')\n datetime_index = pd.DatetimeIndex(df['date'].values)\n df = df.set_index(datetime_index)\n df.drop('date', axis=1, inplace=True)\n anomalies_data = []\n # @modified 20210831\n # Align periods\n # anomaly_timestamps = [int(item[0]) for item in anomalies]\n # anomaly_timestamps = [(int(item[0]) + (resolution * 2)) for item in anomalies]\n # anomaly_timestamps = [(int(item[0]) + (resolution * 6)) for item in anomalies]\n # anomaly_timestamps = [(int(item[0]) + (resolution * 4)) for item in anomalies]\n # anomaly_timestamps = [(int(item[0]) + (resolution * 3)) for item in anomalies]\n anomaly_timestamps = [(int(item[0]) + (resolution * shift)) for item in anomalies]\n for item in timeseries:\n if int(item[0]) in anomaly_timestamps:\n anomalies_data.append(1)\n else:\n anomalies_data.append(0)\n df['anomalies'] = anomalies_data\n title = '%s\\ncloudburst id: %s' % (base_name, str(cloudburst_id))\n if all_in_period:\n title = '%s (all in period)' % title\n plot(df['value'], anomaly=df['anomalies'], anomaly_color='red', title=title, save_to_file=save_to_file)\n except Exception as err:\n logger.error(traceback.format_exc())\n logger.error('error :: %s :: failed to plot cloudburst - %s' % (\n function_str, err))\n raise\n\n if not os.path.isfile(save_to_file):\n return cloudburst_dict, None\n\n return cloudburst_dict, save_to_file\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.DatetimeIndex"
]
] |
illinois-ceesd/mirgecom | [
"6ae28905dbb9d073a9f778111d12b10e474fe799"
] | [
"examples/autoignition-mpi.py"
] | [
"\"\"\"Demonstrate combustive mixture with Pyrometheus.\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2020 University of Illinois Board of Trustees\n\"\"\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\nimport logging\nimport numpy as np\nimport pyopencl as cl\nimport pyopencl.tools as cl_tools\nfrom functools import partial\n\nfrom meshmode.array_context import (\n PyOpenCLArrayContext,\n PytatoPyOpenCLArrayContext\n)\nfrom mirgecom.profiling import PyOpenCLProfilingArrayContext\n\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\nfrom grudge.eager import EagerDGDiscretization\nfrom grudge.shortcuts import make_visualizer\n\n\nfrom logpyle import IntervalTimer, set_dt\nfrom mirgecom.euler import extract_vars_for_logging, units_for_logging\nfrom mirgecom.euler import euler_operator\nfrom mirgecom.simutil import (\n get_sim_timestep,\n generate_and_distribute_mesh,\n write_visfile,\n allsync\n)\nfrom mirgecom.io import make_init_message\nfrom mirgecom.mpi import mpi_entry_point\nfrom mirgecom.integrators import rk4_step\nfrom mirgecom.steppers import advance_state\nfrom mirgecom.boundary import AdiabaticSlipBoundary\nfrom mirgecom.initializers import MixtureInitializer\nfrom mirgecom.eos import PyrometheusMixture\nfrom mirgecom.gas_model import GasModel\nfrom arraycontext import thaw\n\nfrom mirgecom.logging_quantities import (\n initialize_logmgr,\n logmgr_add_many_discretization_quantities,\n logmgr_add_cl_device_info,\n logmgr_add_device_memory_usage,\n set_sim_state\n)\n\nimport cantera\n\nlogger = logging.getLogger(__name__)\n\n\nclass MyRuntimeError(RuntimeError):\n \"\"\"Simple exception for fatal driver errors.\"\"\"\n\n pass\n\n\n@mpi_entry_point\ndef main(ctx_factory=cl.create_some_context, use_logmgr=True,\n use_leap=False, use_profiling=False, casename=None,\n rst_filename=None, actx_class=PyOpenCLArrayContext,\n log_dependent=True):\n \"\"\"Drive example.\"\"\"\n cl_ctx = ctx_factory()\n\n if casename is None:\n casename = \"mirgecom\"\n\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n nproc = comm.Get_size()\n\n from mirgecom.simutil import global_reduce as _global_reduce\n global_reduce = partial(_global_reduce, comm=comm)\n\n logmgr = initialize_logmgr(use_logmgr,\n filename=f\"{casename}.sqlite\", mode=\"wu\", mpi_comm=comm)\n\n if use_profiling:\n queue = cl.CommandQueue(cl_ctx,\n properties=cl.command_queue_properties.PROFILING_ENABLE)\n else:\n queue = cl.CommandQueue(cl_ctx)\n\n actx = actx_class(\n queue,\n allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))\n\n # Some discretization parameters\n dim = 2\n nel_1d = 8\n order = 1\n\n # {{{ Time stepping control\n\n # This example runs only 3 steps by default (to keep CI ~short)\n # With the mixture defined below, equilibrium is achieved at ~40ms\n # To run to equilibrium, set t_final >= 40ms.\n\n # Time stepper selection\n if use_leap:\n from leap.rk import RK4MethodBuilder\n timestepper = RK4MethodBuilder(\"state\")\n else:\n timestepper = rk4_step\n\n # Time loop control parameters\n current_step = 0\n t_final = 1e-8\n current_cfl = 1.0\n current_dt = 1e-9\n current_t = 0\n constant_cfl = False\n\n # i.o frequencies\n nstatus = 1\n nviz = 5\n nhealth = 1\n nrestart = 5\n\n # }}} Time stepping control\n\n debug = False\n\n rst_path = \"restart_data/\"\n rst_pattern = (\n rst_path + \"{cname}-{step:04d}-{rank:04d}.pkl\"\n )\n if rst_filename: # read the grid from restart data\n rst_filename = f\"{rst_filename}-{rank:04d}.pkl\"\n\n from mirgecom.restart import read_restart_data\n restart_data = read_restart_data(actx, rst_filename)\n local_mesh = restart_data[\"local_mesh\"]\n local_nelements = local_mesh.nelements\n global_nelements = restart_data[\"global_nelements\"]\n assert restart_data[\"num_parts\"] == nproc\n rst_time = restart_data[\"t\"]\n rst_step = restart_data[\"step\"]\n rst_order = restart_data[\"order\"]\n else: # generate the grid from scratch\n from meshmode.mesh.generation import generate_regular_rect_mesh\n box_ll = -0.005\n box_ur = 0.005\n generate_mesh = partial(generate_regular_rect_mesh, a=(box_ll,)*dim,\n b=(box_ur,) * dim, nelements_per_axis=(nel_1d,)*dim)\n local_mesh, global_nelements = generate_and_distribute_mesh(comm,\n generate_mesh)\n local_nelements = local_mesh.nelements\n\n from grudge.dof_desc import DISCR_TAG_BASE, DISCR_TAG_QUAD\n from meshmode.discretization.poly_element import \\\n default_simplex_group_factory, QuadratureSimplexGroupFactory\n\n discr = EagerDGDiscretization(\n actx, local_mesh,\n discr_tag_to_group_factory={\n DISCR_TAG_BASE: default_simplex_group_factory(\n base_dim=local_mesh.dim, order=order),\n DISCR_TAG_QUAD: QuadratureSimplexGroupFactory(2*order + 1)\n },\n mpi_communicator=comm\n )\n nodes = thaw(discr.nodes(), actx)\n ones = discr.zeros(actx) + 1.0\n\n vis_timer = None\n\n if logmgr:\n logmgr_add_cl_device_info(logmgr, queue)\n logmgr_add_device_memory_usage(logmgr, queue)\n\n vis_timer = IntervalTimer(\"t_vis\", \"Time spent visualizing\")\n logmgr.add_quantity(vis_timer)\n\n logmgr.add_watches([\n (\"step.max\", \"step = {value}, \"),\n (\"t_sim.max\", \"sim time: {value:1.6e} s\\n\"),\n (\"t_step.max\", \"------- step walltime: {value:6g} s, \"),\n (\"t_log.max\", \"log walltime: {value:6g} s\")\n ])\n\n if log_dependent:\n logmgr_add_many_discretization_quantities(logmgr, discr, dim,\n extract_vars_for_logging,\n units_for_logging)\n logmgr.add_watches([\n (\"min_pressure\", \"\\n------- P (min, max) (Pa) = ({value:1.9e}, \"),\n (\"max_pressure\", \"{value:1.9e})\\n\"),\n (\"min_temperature\", \"------- T (min, max) (K) = ({value:7g}, \"),\n (\"max_temperature\", \"{value:7g})\\n\")])\n\n # {{{ Set up initial state using Cantera\n\n # Use Cantera for initialization\n # -- Pick up a CTI for the thermochemistry config\n # --- Note: Users may add their own CTI file by dropping it into\n # --- mirgecom/mechanisms alongside the other CTI files.\n from mirgecom.mechanisms import get_mechanism_cti\n mech_cti = get_mechanism_cti(\"uiuc\")\n\n cantera_soln = cantera.Solution(phase_id=\"gas\", source=mech_cti)\n nspecies = cantera_soln.n_species\n\n # Initial temperature, pressure, and mixutre mole fractions are needed to\n # set up the initial state in Cantera.\n temperature_seed = 1500.0 # Initial temperature hot enough to burn\n # Parameters for calculating the amounts of fuel, oxidizer, and inert species\n equiv_ratio = 1.0\n ox_di_ratio = 0.21\n stoich_ratio = 3.0\n # Grab the array indices for the specific species, ethylene, oxygen, and nitrogen\n i_fu = cantera_soln.species_index(\"C2H4\")\n i_ox = cantera_soln.species_index(\"O2\")\n i_di = cantera_soln.species_index(\"N2\")\n x = np.zeros(nspecies)\n # Set the species mole fractions according to our desired fuel/air mixture\n x[i_fu] = (ox_di_ratio*equiv_ratio)/(stoich_ratio+ox_di_ratio*equiv_ratio)\n x[i_ox] = stoich_ratio*x[i_fu]/equiv_ratio\n x[i_di] = (1.0-ox_di_ratio)*x[i_ox]/ox_di_ratio\n # Uncomment next line to make pylint fail when it can't find cantera.one_atm\n one_atm = cantera.one_atm # pylint: disable=no-member\n # one_atm = 101325.0\n\n # Let the user know about how Cantera is being initilized\n print(f\"Input state (T,P,X) = ({temperature_seed}, {one_atm}, {x}\")\n # Set Cantera internal gas temperature, pressure, and mole fractios\n cantera_soln.TPX = temperature_seed, one_atm, x\n # Pull temperature, total density, mass fractions, and pressure from Cantera\n # We need total density, and mass fractions to initialize the fluid/gas state.\n can_t, can_rho, can_y = cantera_soln.TDY\n can_p = cantera_soln.P\n # *can_t*, *can_p* should not differ (significantly) from user's initial data,\n # but we want to ensure that we use exactly the same starting point as Cantera,\n # so we use Cantera's version of these data.\n\n # }}}\n\n # {{{ Create Pyrometheus thermochemistry object & EOS\n\n # Create a Pyrometheus EOS with the Cantera soln. Pyrometheus uses Cantera and\n # generates a set of methods to calculate chemothermomechanical properties and\n # states for this particular mechanism.\n from mirgecom.thermochemistry import make_pyrometheus_mechanism_class\n pyro_mechanism = make_pyrometheus_mechanism_class(cantera_soln)(actx.np)\n eos = PyrometheusMixture(pyro_mechanism, temperature_guess=temperature_seed)\n\n gas_model = GasModel(eos=eos)\n from pytools.obj_array import make_obj_array\n\n def get_temperature_update(cv, temperature):\n y = cv.species_mass_fractions\n e = gas_model.eos.internal_energy(cv) / cv.mass\n return pyro_mechanism.get_temperature_update_energy(e, temperature, y)\n\n from mirgecom.gas_model import make_fluid_state\n\n def get_fluid_state(cv, tseed):\n return make_fluid_state(cv=cv, gas_model=gas_model,\n temperature_seed=tseed)\n\n compute_temperature_update = actx.compile(get_temperature_update)\n construct_fluid_state = actx.compile(get_fluid_state)\n\n # }}}\n\n # {{{ MIRGE-Com state initialization\n\n # Initialize the fluid/gas state with Cantera-consistent data:\n # (density, pressure, temperature, mass_fractions)\n print(f\"Cantera state (rho,T,P,Y) = ({can_rho}, {can_t}, {can_p}, {can_y}\")\n velocity = np.zeros(shape=(dim,))\n initializer = MixtureInitializer(dim=dim, nspecies=nspecies,\n pressure=can_p, temperature=can_t,\n massfractions=can_y, velocity=velocity)\n\n my_boundary = AdiabaticSlipBoundary()\n boundaries = {BTAG_ALL: my_boundary}\n\n if rst_filename:\n current_step = rst_step\n current_t = rst_time\n if logmgr:\n from mirgecom.logging_quantities import logmgr_set_time\n logmgr_set_time(logmgr, current_step, current_t)\n if order == rst_order:\n current_cv = restart_data[\"cv\"]\n temperature_seed = restart_data[\"temperature_seed\"]\n else:\n rst_cv = restart_data[\"cv\"]\n old_discr = EagerDGDiscretization(actx, local_mesh, order=rst_order,\n mpi_communicator=comm)\n from meshmode.discretization.connection import make_same_mesh_connection\n connection = make_same_mesh_connection(actx, discr.discr_from_dd(\"vol\"),\n old_discr.discr_from_dd(\"vol\"))\n current_cv = connection(rst_cv)\n temperature_seed = connection(restart_data[\"temperature_seed\"])\n else:\n # Set the current state from time 0\n current_cv = initializer(eos=gas_model.eos, x_vec=nodes)\n temperature_seed = temperature_seed * ones\n\n # The temperature_seed going into this function is:\n # - At time 0: the initial temperature input data (maybe from Cantera)\n # - On restart: the restarted temperature seed from restart file (saving\n # the *seed* allows restarts to be deterministic\n current_fluid_state = construct_fluid_state(current_cv, temperature_seed)\n current_dv = current_fluid_state.dv\n temperature_seed = current_dv.temperature\n\n # Inspection at physics debugging time\n if debug:\n print(\"Initial MIRGE-Com state:\")\n print(f\"Initial DV pressure: {current_fluid_state.pressure}\")\n print(f\"Initial DV temperature: {current_fluid_state.temperature}\")\n\n # }}}\n\n visualizer = make_visualizer(discr)\n initname = initializer.__class__.__name__\n eosname = gas_model.eos.__class__.__name__\n init_message = make_init_message(dim=dim, order=order,\n nelements=local_nelements,\n global_nelements=global_nelements,\n dt=current_dt, t_final=t_final, nstatus=nstatus,\n nviz=nviz, cfl=current_cfl,\n constant_cfl=constant_cfl, initname=initname,\n eosname=eosname, casename=casename)\n\n # Cantera equilibrate calculates the expected end state @ chemical equilibrium\n # i.e. the expected state after all reactions\n cantera_soln.equilibrate(\"UV\")\n eq_temperature, eq_density, eq_mass_fractions = cantera_soln.TDY\n eq_pressure = cantera_soln.P\n\n # Report the expected final state to the user\n if rank == 0:\n logger.info(init_message)\n logger.info(f\"Expected equilibrium state:\"\n f\" {eq_pressure=}, {eq_temperature=},\"\n f\" {eq_density=}, {eq_mass_fractions=}\")\n\n def my_write_status(dt, cfl, dv=None):\n status_msg = f\"------ {dt=}\" if constant_cfl else f\"----- {cfl=}\"\n if ((dv is not None) and (not log_dependent)):\n\n temp = dv.temperature\n press = dv.pressure\n\n from grudge.op import nodal_min_loc, nodal_max_loc\n tmin = allsync(actx.to_numpy(nodal_min_loc(discr, \"vol\", temp)),\n comm=comm, op=MPI.MIN)\n tmax = allsync(actx.to_numpy(nodal_max_loc(discr, \"vol\", temp)),\n comm=comm, op=MPI.MAX)\n pmin = allsync(actx.to_numpy(nodal_min_loc(discr, \"vol\", press)),\n comm=comm, op=MPI.MIN)\n pmax = allsync(actx.to_numpy(nodal_max_loc(discr, \"vol\", press)),\n comm=comm, op=MPI.MAX)\n dv_status_msg = f\"\\nP({pmin}, {pmax}), T({tmin}, {tmax})\"\n status_msg = status_msg + dv_status_msg\n\n if rank == 0:\n logger.info(status_msg)\n\n def my_write_viz(step, t, dt, state, ts_field, dv, production_rates, cfl):\n viz_fields = [(\"cv\", state), (\"dv\", dv),\n (\"production_rates\", production_rates),\n (\"dt\" if constant_cfl else \"cfl\", ts_field)]\n write_visfile(discr, viz_fields, visualizer, vizname=casename,\n step=step, t=t, overwrite=True, vis_timer=vis_timer)\n\n def my_write_restart(step, t, state, temperature_seed):\n rst_fname = rst_pattern.format(cname=casename, step=step, rank=rank)\n if rst_fname == rst_filename:\n if rank == 0:\n logger.info(\"Skipping overwrite of restart file.\")\n else:\n rst_data = {\n \"local_mesh\": local_mesh,\n \"cv\": state.cv,\n \"temperature_seed\": temperature_seed,\n \"t\": t,\n \"step\": step,\n \"order\": order,\n \"global_nelements\": global_nelements,\n \"num_parts\": nproc\n }\n from mirgecom.restart import write_restart_file\n write_restart_file(actx, rst_data, rst_fname, comm)\n\n def my_health_check(cv, dv):\n import grudge.op as op\n health_error = False\n\n pressure = dv.pressure\n temperature = dv.temperature\n\n from mirgecom.simutil import check_naninf_local, check_range_local\n if check_naninf_local(discr, \"vol\", pressure):\n health_error = True\n logger.info(f\"{rank=}: Invalid pressure data found.\")\n\n if check_range_local(discr, \"vol\", pressure, 1e5, 2.6e5):\n health_error = True\n logger.info(f\"{rank=}: Pressure range violation.\")\n\n if check_naninf_local(discr, \"vol\", temperature):\n health_error = True\n logger.info(f\"{rank=}: Invalid temperature data found.\")\n if check_range_local(discr, \"vol\", temperature, 1.498e3, 1.6e3):\n health_error = True\n logger.info(f\"{rank=}: Temperature range violation.\")\n\n # This check is the temperature convergence check\n # The current *temperature* is what Pyrometheus gets\n # after a fixed number of Newton iterations, *n_iter*.\n # Calling `compute_temperature` here with *temperature*\n # input as the guess returns the calculated gas temperature after\n # yet another *n_iter*.\n # The difference between those two temperatures is the\n # temperature residual, which can be used as an indicator of\n # convergence in Pyrometheus `get_temperature`.\n # Note: The local max jig below works around a very long compile\n # in lazy mode.\n temp_resid = compute_temperature_update(cv, temperature) / temperature\n temp_err = (actx.to_numpy(op.nodal_max_loc(discr, \"vol\", temp_resid)))\n if temp_err > 1e-8:\n health_error = True\n logger.info(f\"{rank=}: Temperature is not converged {temp_resid=}.\")\n\n return health_error\n\n from mirgecom.inviscid import get_inviscid_timestep\n\n def get_dt(state):\n return get_inviscid_timestep(discr, state=state)\n\n compute_dt = actx.compile(get_dt)\n\n from mirgecom.inviscid import get_inviscid_cfl\n\n def get_cfl(state, dt):\n return get_inviscid_cfl(discr, dt=dt, state=state)\n\n compute_cfl = actx.compile(get_cfl)\n\n def get_production_rates(cv, temperature):\n return eos.get_production_rates(cv, temperature)\n\n compute_production_rates = actx.compile(get_production_rates)\n\n def my_get_timestep(t, dt, state):\n # richer interface to calculate {dt,cfl} returns node-local estimates\n t_remaining = max(0, t_final - t)\n\n if constant_cfl:\n ts_field = current_cfl * compute_dt(state)\n from grudge.op import nodal_min_loc\n dt = allsync(actx.to_numpy(nodal_min_loc(discr, \"vol\", ts_field)),\n comm=comm, op=MPI.MIN)\n cfl = current_cfl\n else:\n ts_field = compute_cfl(state, current_dt)\n from grudge.op import nodal_max_loc\n cfl = allsync(actx.to_numpy(nodal_max_loc(discr, \"vol\", ts_field)),\n comm=comm, op=MPI.MAX)\n return ts_field, cfl, min(t_remaining, dt)\n\n def my_pre_step(step, t, dt, state):\n cv, tseed = state\n fluid_state = construct_fluid_state(cv, tseed)\n dv = fluid_state.dv\n\n try:\n\n if logmgr:\n logmgr.tick_before()\n\n from mirgecom.simutil import check_step\n do_viz = check_step(step=step, interval=nviz)\n do_restart = check_step(step=step, interval=nrestart)\n do_health = check_step(step=step, interval=nhealth)\n do_status = check_step(step=step, interval=nstatus)\n\n if do_health:\n health_errors = global_reduce(my_health_check(cv, dv), op=\"lor\")\n if health_errors:\n if rank == 0:\n logger.info(\"Fluid solution failed health check.\")\n raise MyRuntimeError(\"Failed simulation health check.\")\n\n ts_field, cfl, dt = my_get_timestep(t=t, dt=dt, state=fluid_state)\n\n if do_status:\n my_write_status(dt=dt, cfl=cfl, dv=dv)\n\n if do_restart:\n my_write_restart(step=step, t=t, state=fluid_state,\n temperature_seed=tseed)\n\n if do_viz:\n production_rates = compute_production_rates(fluid_state.cv,\n fluid_state.temperature)\n my_write_viz(step=step, t=t, dt=dt, state=cv, dv=dv,\n production_rates=production_rates,\n ts_field=ts_field, cfl=cfl)\n\n except MyRuntimeError:\n if rank == 0:\n logger.info(\"Errors detected; attempting graceful exit.\")\n # my_write_viz(step=step, t=t, dt=dt, state=cv)\n # my_write_restart(step=step, t=t, state=fluid_state)\n raise\n\n return state, dt\n\n def my_post_step(step, t, dt, state):\n cv, tseed = state\n fluid_state = construct_fluid_state(cv, tseed)\n\n # Logmgr needs to know about EOS, dt, dim?\n # imo this is a design/scope flaw\n if logmgr:\n set_dt(logmgr, dt)\n set_sim_state(logmgr, dim, cv, gas_model.eos)\n logmgr.tick_after()\n return make_obj_array([cv, fluid_state.temperature]), dt\n\n def my_rhs(t, state):\n cv, tseed = state\n from mirgecom.gas_model import make_fluid_state\n fluid_state = make_fluid_state(cv=cv, gas_model=gas_model,\n temperature_seed=tseed)\n return make_obj_array([\n euler_operator(discr, state=fluid_state, time=t, boundaries=boundaries,\n gas_model=gas_model)\n + eos.get_species_source_terms(cv, fluid_state.temperature),\n 0*tseed])\n\n current_dt = get_sim_timestep(discr, current_fluid_state, current_t, current_dt,\n current_cfl, t_final, constant_cfl)\n\n current_step, current_t, current_state = \\\n advance_state(rhs=my_rhs, timestepper=timestepper,\n pre_step_callback=my_pre_step,\n post_step_callback=my_post_step, dt=current_dt,\n state=make_obj_array([current_cv, temperature_seed]),\n t=current_t, t_final=t_final)\n\n # Dump the final data\n if rank == 0:\n logger.info(\"Checkpointing final state ...\")\n\n final_cv, tseed = current_state\n final_fluid_state = construct_fluid_state(final_cv, tseed)\n final_dv = final_fluid_state.dv\n final_dm = compute_production_rates(final_cv, final_dv.temperature)\n ts_field, cfl, dt = my_get_timestep(t=current_t, dt=current_dt,\n state=final_fluid_state)\n my_write_viz(step=current_step, t=current_t, dt=dt, state=final_cv,\n dv=final_dv, production_rates=final_dm, ts_field=ts_field, cfl=cfl)\n my_write_status(dt=dt, cfl=cfl, dv=final_dv)\n my_write_restart(step=current_step, t=current_t, state=final_fluid_state,\n temperature_seed=tseed)\n\n if logmgr:\n logmgr.close()\n elif use_profiling:\n print(actx.tabulate_profiling_data())\n\n finish_tol = 1e-16\n assert np.abs(current_t - t_final) < finish_tol\n\n\nif __name__ == \"__main__\":\n import argparse\n casename = \"autoignition\"\n parser = argparse.ArgumentParser(description=f\"MIRGE-Com Example: {casename}\")\n parser.add_argument(\"--lazy\", action=\"store_true\",\n help=\"switch to a lazy computation mode\")\n parser.add_argument(\"--profiling\", action=\"store_true\",\n help=\"turn on detailed performance profiling\")\n parser.add_argument(\"--log\", action=\"store_true\", default=True,\n help=\"turn on logging\")\n parser.add_argument(\"--leap\", action=\"store_true\",\n help=\"use leap timestepper\")\n parser.add_argument(\"--restart_file\", help=\"root name of restart file\")\n parser.add_argument(\"--casename\", help=\"casename to use for i/o\")\n args = parser.parse_args()\n from warnings import warn\n warn(\"Automatically turning off DV logging. MIRGE-Com Issue(578)\")\n log_dependent = False\n if args.profiling:\n if args.lazy:\n raise ValueError(\"Can't use lazy and profiling together.\")\n actx_class = PyOpenCLProfilingArrayContext\n else:\n if args.lazy:\n log_dependent = False\n actx_class = PytatoPyOpenCLArrayContext\n else:\n actx_class = PyOpenCLArrayContext\n\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO)\n if args.casename:\n casename = args.casename\n rst_filename = None\n if args.restart_file:\n rst_filename = args.restart_file\n\n main(use_logmgr=args.log, use_leap=args.leap, use_profiling=args.profiling,\n casename=casename, rst_filename=rst_filename, actx_class=actx_class,\n log_dependent=log_dependent)\n\n# vim: foldmethod=marker\n"
] | [
[
"numpy.abs",
"numpy.zeros"
]
] |
danielbom/codewars | [
"d45b5a813c6f1d952a50d22f0b2fcea4ef3d0e27"
] | [
"Python/7 - kyu/7 kyu - Sum of odd numbers.py"
] | [
"# https://www.codewars.com/kata/sum-of-odd-numbers/train/python\r\n# My solution\r\ndef row_sum_odd_numbers(n):\r\n return n**3\r\n\r\n# ...\r\nrow_sum_odd_numbers=(3).__rpow__\r\n\r\n# ...\r\nimport numpy as np\r\ndef row_sum_odd_numbers(n):\r\n return sum(np.linspace(n**2-(n-1),(n**2+n-1),n))"
] | [
[
"numpy.linspace"
]
] |
severilov/BCI-thesis | [
"18ffe5e72ba65150026d5d66c744e59589492535"
] | [
"code/train_fc.py"
] | [
"import os\nimport pickle\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom datetime import datetime\n\nfrom models.fc_nn import BaselineNN\nfrom dataset.base_dataset import SimulateDataset\nfrom visualization import plot_loss\n\nTRAIN_DATASET_PATH = \"../data/train_data.pickle\"\nTEST_DATASET_PATH = \"../data/test_data.pickle\"\n\ndef predict_fc(model, x1, gen_len):\n model.eval()\n outputs = []\n inputs = x1\n for i in tqdm(range(gen_len)):\n output = np.array(model(torch.from_numpy(inputs)).detach())\n outputs.append(output)\n inputs = output\n return np.array(outputs).squeeze()\n\ndef train(trainset, testset, num_epochs=5, lr=0.01, batch_size=32, log_dir='./logs'):\n run_name = datetime.now().strftime(\"%d_%m_%Y.%H_%M\")\n\n model = BaselineNN()\n trainDataLoader = DataLoader(trainset, batch_size=batch_size)\n testDataLoader = DataLoader(testset, batch_size=batch_size)\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n train_losses, valid_losses = [], []\n for epoch in tqdm(range(num_epochs)): # loop over the dataset multiple times\n train_loss = 0.0\n num_batches = len(trainDataLoader)\n \n for i, data in enumerate(trainDataLoader):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n \n train_losses.append(train_loss / num_batches)\n train_loss = 0.0\n \n if log_dir is not None:\n plot_loss(train_losses, valid_losses, model_name='fc', log_dir=log_dir)\n torch.save(model.state_dict(), os.path.join(log_dir, f\"fc_{run_name}.pth\"))\n\n\ndef main():\n with open(TRAIN_DATASET_PATH, 'rb') as f:\n train_data = pickle.load(f)\n with open(TEST_DATASET_PATH, 'rb') as f:\n test_data = pickle.load(f)\n \n [x_train, xt_train, y_train] = train_data\n [x_test, xt_test, y_test] = test_data\n trainset = SimulateDataset(x_train, y_train)\n testset = SimulateDataset(x_test, y_test)\n train(trainset, testset, num_epochs=5, lr=0.01, batch_size=32, log_dir='./logs')\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.nn.MSELoss",
"numpy.array",
"torch.utils.data.DataLoader",
"torch.from_numpy"
]
] |
cattale93/pytorch_self_supervised_learning | [
"162c26446837a7c0ffd2679b3fb54ba01f204123"
] | [
"Lib/Nets/utils/generic/trainSN.py"
] | [
"import os\nfrom torch.utils.tensorboard import SummaryWriter\nfrom Lib.Nets.SN.SN import SN\nfrom Lib.Nets.utils.config.config_routine import config_routine\nfrom Lib.Nets.utils.config.general_parser import general_parser\nfrom Lib.Nets.utils.config.specific_parser import specific_parser\nfrom Lib.Datasets.EUSAR.EUSARDataset import EUSARDataset\nimport argparse\nfrom torch.utils.data import DataLoader\nfrom Lib.Nets.utils.generic.generic_training import get_subset\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n\"\"\"\nAuthor: Alessandro Cattoi\nDescription: This function is employed to test feature extraction capability. In fact can be called by network\nimplementations to train a classifier on the top of the capacity just leaned.\n\"\"\"\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n\n\ndef trainSN(options, epoch, device):\n \"\"\"\n Run a quick SN training\n :param options: pretrained model options\n :param epoch: epoch to be loaded\n :param device:\n :return:\n \"\"\"\n \"\"\"-------------------------------CONFIG----------------------------------\"\"\"\n parser = argparse.ArgumentParser(description=\"PyTorch Regression GAN\")\n parser = general_parser(parser)\n opt = specific_parser(\n parser=parser, run_folder=options.log_dir, mode='train', tot_epochs=30, pretrained_GAN=options.checkpoint_dir,\n GAN_epoch=epoch, acc_log_freq=options.acc_log_freq, loss_log_freq=options.loss_log_freq,\n batch_size_SN=options.batch_size_SN, images_log_freq=options.images_log_freq,\n data_dir_train=options.data_dir_train2, data_dir_test=options.data_dir_test2,\n experiment_name='SN'+str(epoch), sar_c=options.sar_c, optical_c=options.optical_c,\n save_model_freq=1000, res_block_N=options.res_block_N)\n\n opt = config_routine(opt)\n\n \"\"\"-----------------------------DATA LOADER--------------------------------\"\"\"\n train_dataset = EUSARDataset(os.path.join(options.data_dir_train2), True, False, options.sar_c, options.optical_c)\n train_dataset = get_subset(train_dataset, options.prc_test)\n train_dataset = DataLoader(train_dataset, batch_size=options.batch_size_SN, shuffle=True,\n num_workers=options.workers, pin_memory=True, drop_last=False)\n\n test_dataset = EUSARDataset(os.path.join(options.data_dir_test2), True, False, options.sar_c, options.optical_c)\n test_dataset = get_subset(test_dataset, options.prc_test, True)\n test_dataset = DataLoader(test_dataset, batch_size=options.batch_size_SN, shuffle=False,\n num_workers=options.workers, pin_memory=True, drop_last=False)\n\n \"\"\"--------------------------------TRAIN-----------------------------------\"\"\"\n # Init model\n model = SN(opt, device)\n\n # set up tensorboard logging\n writer = SummaryWriter(log_dir=os.path.join(opt.tb_dir))\n # Model Training\n model.train(train_dataset, test_dataset, writer)\n"
] | [
[
"torch.utils.data.DataLoader"
]
] |
amrrs/scikit-lego | [
"e4304a67ac114259259b688fe94aa64f22933245"
] | [
"sklego/meta.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom sklearn import clone\nfrom sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, MetaEstimatorMixin\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.utils.validation import check_is_fitted, check_X_y, check_array, FLOAT_DTYPES\n\nfrom sklego.base import ProbabilisticClassifier\nfrom sklego.common import as_list, expanding_list, TrainOnlyTransformerMixin\n\n\nclass EstimatorTransformer(TransformerMixin, MetaEstimatorMixin, BaseEstimator):\n \"\"\"\n Allows using an estimator such as a model as a transformer in an earlier step of a pipeline\n\n :param estimator: An instance of the estimator that should be used for the transformation\n :param predict_func: The function called on the estimator when transforming e.g. (`predict`, `predict_proba`)\n \"\"\"\n\n def __init__(self, estimator, predict_func='predict'):\n self.estimator = estimator\n self.predict_func = predict_func\n\n def fit(self, X, y):\n \"\"\"Fits the estimator\"\"\"\n X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)\n\n self.estimator_ = clone(self.estimator)\n self.estimator_.fit(X, y)\n return self\n\n def transform(self, X):\n \"\"\"\n Applies the `predict_func` on the fitted estimator.\n\n Returns an array of shape `(X.shape[0], )`.\n \"\"\"\n check_is_fitted(self, 'estimator_')\n return getattr(self.estimator_, self.predict_func)(X).reshape(-1, 1)\n\n\ndef constant_shrinkage(group_sizes: list, alpha: float) -> np.ndarray:\n r\"\"\"\n The augmented prediction for each level is the weighted average between its prediction and the augmented\n prediction for its parent.\n\n Let $\\hat{y}_i$ be the prediction at level $i$, with $i=0$ being the root, than the augmented prediction\n $\\hat{y}_i^* = \\alpha \\hat{y}_i + (1 - \\alpha) \\hat{y}_{i-1}^*$, with $\\hat{y}_0^* = \\hat{y}_0$.\n \"\"\"\n return np.array(\n [alpha ** (len(group_sizes) - 1)]\n + [alpha ** (len(group_sizes) - 1 - i) * (1 - alpha) for i in range(1, len(group_sizes) - 1)]\n + [(1 - alpha)]\n )\n\n\ndef relative_shrinkage(group_sizes: list) -> np.ndarray:\n \"\"\"Weigh each group according to it's size\"\"\"\n return np.array(group_sizes)\n\n\ndef min_n_obs_shrinkage(group_sizes: list, min_n_obs) -> np.ndarray:\n \"\"\"Use only the smallest group with a certain amount of observations\"\"\"\n if min_n_obs > max(group_sizes):\n raise ValueError(f\"There is no group with size greater than or equal to {min_n_obs}\")\n\n res = np.zeros(len(group_sizes))\n res[np.argmin(np.array(group_sizes) >= min_n_obs) - 1] = 1\n return res\n\n\nclass GroupedEstimator(BaseEstimator):\n \"\"\"\n Construct an estimator per data group. Splits data by values of a\n single column and fits one estimator per such column.\n\n :param estimator: the model/pipeline to be applied per group\n :param groups: the column(s) of the matrix/dataframe to select as a grouping parameter set\n :param value_columns: Columns to use in the prediction. If None (default), use all non-grouping columns\n :param shrinkage: How to perform shrinkage.\n None: No shrinkage (default)\n {\"constant\", \"min_n_obs\", \"relative\"} or a callable\n * constant: shrinked prediction for a level is weighted average of its prediction and its\n parents prediction\n * min_n_obs: shrinked prediction is the prediction for the smallest group with at least\n n observations in it\n * relative: each group-level is weight according to its size\n * function: a function that takes a list of group lengths and returns an array of the\n same size with the weights for each group\n :param use_global_model: With shrinkage: whether to have a model over the entire input as first group\n Without shrinkage: whether or not to fall back to a general model in case the group\n parameter is not found during `.predict()`\n :param **shrinkage_kwargs: keyword arguments to the shrinkage function\n \"\"\"\n def __init__(\n self, estimator, groups, value_columns=None, shrinkage=None, use_global_model=True, **shrinkage_kwargs\n ):\n self.estimator = estimator\n self.groups = groups\n self.value_columns = value_columns\n self.shrinkage = shrinkage\n self.use_global_model = use_global_model\n self.shrinkage_kwargs = shrinkage_kwargs\n\n def __set_shrinkage_function(self):\n if isinstance(self.shrinkage, str):\n # Predefined shrinkage functions\n shrink_options = {\n \"constant\": constant_shrinkage,\n \"relative\": relative_shrinkage,\n \"min_n_obs\": min_n_obs_shrinkage,\n }\n\n try:\n self.shrinkage_function_ = shrink_options[self.shrinkage]\n except KeyError:\n raise ValueError(f\"The specified shrinkage function {self.shrinkage} is not valid, \"\n f\"choose from {list(shrink_options.keys())} or supply a callable.\")\n elif callable(self.shrinkage):\n self.__check_shrinkage_func()\n self.shrinkage_function_ = self.shrinkage\n else:\n raise ValueError(f\"Invalid shrinkage specified. Should be either None (no shrinkage), str or callable.\")\n\n def __check_shrinkage_func(self):\n \"\"\"Validate the shrinkage function if a function is specified\"\"\"\n group_lengths = [10, 5, 2]\n expected_shape = np.array(group_lengths).shape\n try:\n result = self.shrinkage(group_lengths)\n except Exception as e:\n raise ValueError(f\"Caught an exception while checking the shrinkage function: {str(e)}\") from e\n else:\n if not isinstance(result, np.ndarray):\n raise ValueError(f\"shrinkage_function({group_lengths}) should return an np.ndarray\")\n if result.shape != expected_shape:\n raise ValueError(f\"shrinkage_function({group_lengths}).shape should be {expected_shape}\")\n\n @staticmethod\n def __check_cols_exist(X, cols):\n \"\"\"Check whether the specified grouping columns are in X\"\"\"\n if X.shape[1] == 0:\n raise ValueError(f\"0 feature(s) (shape=({X.shape[0]}, 0)) while a minimum of 1 is required.\")\n\n # X has been converted to a DataFrame\n x_cols = set(X.columns)\n diff = set(as_list(cols)) - x_cols\n\n if len(diff) > 0:\n raise ValueError(f'{diff} not in columns of X {x_cols}')\n\n @staticmethod\n def __check_missing_and_inf(X):\n \"\"\"Check that all elements of X are non-missing and finite, needed because check_array cannot handle strings\"\"\"\n if np.any(pd.isnull(X)):\n raise ValueError(\"X has NaN values\")\n try:\n if np.any(np.isinf(X)):\n raise ValueError(\"X has infinite values\")\n except TypeError:\n # if X cannot be converted to numeric, checking infinites does not make sense\n pass\n\n def __validate(self, X, y=None):\n \"\"\"Validate the input, used in both fit and predict\"\"\"\n if self.shrinkage and len(as_list(self.groups)) == 1 and not self.use_global_model:\n raise ValueError(\"Cannot do shrinkage with a single group if use_global_model is False\")\n\n self.__check_cols_exist(X, self.value_colnames_)\n self.__check_cols_exist(X, self.group_colnames_)\n\n # Split the model data from the grouping columns, this part is checked `regularly`\n X_data = X.loc[:, self.value_colnames_]\n\n # y can be None because __validate used in predict, X can have no columns if the estimator only uses y\n if X_data.shape[1] > 0 and y is not None:\n check_X_y(X_data, y, multi_output=True)\n elif y is not None:\n check_array(y, ensure_2d=False)\n elif X_data.shape[1] > 0:\n check_array(X_data)\n\n self.__check_missing_and_inf(X)\n\n def __fit_grouped_estimator(self, X, y, value_columns, group_columns):\n # Reset indices such that they are the same in X and y\n X, y = X.reset_index(drop=True), y.reset_index(drop=True)\n\n group_indices = X.groupby(group_columns).indices\n\n grouped_estimations = {\n group: clone(self.estimator).fit(X.loc[indices, value_columns], y.loc[indices])\n for group, indices in group_indices.items()\n }\n\n return grouped_estimations\n\n def __get_shrinkage_factor(self, X):\n \"\"\"Get for all complete groups an array of shrinkages\"\"\"\n counts = X.groupby(self.group_colnames_).size()\n\n # Groups that are split on all\n most_granular_groups = [grp for grp in self.groups_ if len(as_list(grp)) == len(self.group_colnames_)]\n\n # For each hierarchy level in each most granular group, get the number of observations\n hierarchical_counts = {\n granular_group: [counts[tuple(subgroup)].sum() for subgroup in expanding_list(granular_group, tuple)]\n for granular_group in most_granular_groups\n }\n\n # For each hierarchy level in each most granular group, get the shrinkage factor\n shrinkage_factors = {\n group: self.shrinkage_function_(counts, **self.shrinkage_kwargs)\n for group, counts in hierarchical_counts.items()\n }\n\n # Make sure that the factors sum to one\n shrinkage_factors = {group: value / value.sum() for group, value in shrinkage_factors.items()}\n\n return shrinkage_factors\n\n def __prepare_input_data(self, X, y=None):\n if isinstance(X, np.ndarray):\n X = pd.DataFrame(X, columns=[str(_) for _ in range(X.shape[1])])\n\n if self.shrinkage is not None and self.use_global_model:\n global_col = \"a-column-that-is-constant-for-all-data\"\n X = X.assign(**{global_col: \"global\"})\n self.groups = [global_col] + as_list(self.groups)\n\n if y is not None:\n if isinstance(y, np.ndarray):\n pred_col = 'the-column-that-i-want-to-predict-but-dont-have-the-name-for'\n cols = pred_col if y.ndim == 1 else [\"_\".join([pred_col, i]) for i in range(y.shape[1])]\n y = pd.Series(y, name=cols) if y.ndim == 1 else pd.DataFrame(y, columns=cols)\n\n return X, y\n\n return X\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the model using X, y as training data. Will also learn the groups that exist within the dataset.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :param y: array-like, shape=(n_samples,) training data.\n :return: Returns an instance of self.\n \"\"\"\n X, y = self.__prepare_input_data(X, y)\n\n if self.shrinkage is not None:\n self.__set_shrinkage_function()\n\n self.group_colnames_ = [str(_) for _ in as_list(self.groups)]\n\n if self.value_columns is not None:\n self.value_colnames_ = [str(_) for _ in as_list(self.value_columns)]\n else:\n self.value_colnames_ = [_ for _ in X.columns if _ not in self.group_colnames_]\n self.__validate(X, y)\n\n # List of all hierarchical subsets of columns\n self.group_colnames_hierarchical_ = expanding_list(self.group_colnames_, list)\n\n self.fallback_ = None\n\n if self.shrinkage is None and self.use_global_model:\n subset_x = X[self.value_colnames_]\n self.fallback_ = clone(self.estimator).fit(subset_x, y)\n\n if self.shrinkage is not None:\n self.estimators_ = {}\n\n for level_colnames in self.group_colnames_hierarchical_:\n self.estimators_.update(\n self.__fit_grouped_estimator(X, y, self.value_colnames_, level_colnames)\n )\n else:\n self.estimators_ = self.__fit_grouped_estimator(X, y, self.value_colnames_, self.group_colnames_)\n\n self.groups_ = as_list(self.estimators_.keys())\n\n if self.shrinkage is not None:\n self.shrinkage_factors_ = self.__get_shrinkage_factor(X)\n\n return self\n\n def __predict_group(self, X, group_colnames):\n \"\"\"Make predictions for all groups\"\"\"\n try:\n return (\n X\n .groupby(group_colnames, as_index=False)\n .apply(lambda d: pd.DataFrame(\n self.estimators_.get(d.name, self.fallback_).predict(d[self.value_colnames_]), index=d.index))\n .values\n .squeeze()\n )\n except AttributeError:\n # Handle new groups\n culprits = (\n set(X[self.group_colnames_].agg(func=tuple, axis=1))\n - set(self.estimators_.keys())\n )\n\n if self.shrinkage is not None and self.use_global_model:\n # Remove the global group from the culprits because the user did not specify\n culprits = {culprit[1:] for culprit in culprits}\n\n raise ValueError(f\"found a group(s) {culprits} in `.predict` that was not in `.fit`\")\n\n def __predict_shrinkage_groups(self, X):\n \"\"\"Make predictions for all shrinkage groups\"\"\"\n # DataFrame with predictions for each hierarchy level, per row. Missing groups errors are thrown here.\n hierarchical_predictions = pd.concat([\n pd.Series(self.__predict_group(X, level_columns)) for level_columns in self.group_colnames_hierarchical_\n ], axis=1)\n\n # This is a Series with values the tuples of hierarchical grouping\n prediction_groups = X[self.group_colnames_].agg(func=tuple, axis=1)\n\n # This is a Series of arrays\n shrinkage_factors = prediction_groups.map(self.shrinkage_factors_)\n\n # Convert the Series of arrays it to a DataFrame\n shrinkage_factors = pd.DataFrame.from_dict(shrinkage_factors.to_dict()).T\n\n return (hierarchical_predictions * shrinkage_factors).sum(axis=1)\n\n def predict(self, X):\n \"\"\"\n Predict on new data.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :return: array, shape=(n_samples,) the predicted data\n \"\"\"\n X = self.__prepare_input_data(X)\n self.__validate(X)\n\n check_is_fitted(self, ['estimators_', 'groups_', 'group_colnames_', 'value_colnames_', 'fallback_'])\n\n if self.shrinkage is None:\n return self.__predict_group(X, group_colnames=self.group_colnames_)\n else:\n return self.__predict_shrinkage_groups(X)\n\n\nclass OutlierRemover(TrainOnlyTransformerMixin, BaseEstimator):\n \"\"\"\n Removes outliers (train-time only) using the supplied removal model.\n\n :param outlier_detector: must implement `fit` and `predict` methods\n :param refit: If True, fits the estimator during pipeline.fit().\n\n \"\"\"\n def __init__(self, outlier_detector, refit=True):\n self.outlier_detector = outlier_detector\n self.refit = refit\n self.estimator_ = None\n\n def fit(self, X, y=None):\n self.estimator_ = clone(self.outlier_detector)\n if self.refit:\n super().fit(X, y)\n self.estimator_.fit(X, y)\n return self\n\n def transform_train(self, X):\n check_is_fitted(self, 'estimator_')\n predictions = self.estimator_.predict(X)\n check_array(predictions, estimator=self.outlier_detector, ensure_2d=False)\n return X[predictions != -1]\n\n\nclass DecayEstimator(BaseEstimator):\n \"\"\"\n Morphs an estimator suchs that the training weights can be\n adapted to ensure that points that are far away have less weight.\n Note that it is up to the user to sort the dataset appropriately.\n This meta estimator will only work for estimators that have a\n \"sample_weights\" argument in their `.fit()` method.\n\n The DecayEstimator will use exponential decay to weight the parameters.\n\n w_{t-1} = decay * w_{t}\n \"\"\"\n\n def __init__(self, model, decay: float = 0.999, decay_func=\"exponential\"):\n self.model = model\n self.decay = decay\n self.func = decay_func\n\n def _is_classifier(self):\n return any(['ClassifierMixin' in p.__name__ for p in type(self.model).__bases__])\n\n def fit(self, X, y):\n \"\"\"\n Fit the data after adapting the same weight.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :param y: array-like, shape=(n_samples,) training data.\n :return: Returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)\n self.weights_ = np.cumprod(np.ones(X.shape[0]) * self.decay)[::-1]\n self.estimator_ = clone(self.model)\n try:\n self.estimator_.fit(X, y, sample_weight=self.weights_)\n except TypeError as e:\n if \"sample_weight\" in str(e):\n raise TypeError(f\"Model {type(self.model).__name__}.fit() does not have 'sample_weight'\")\n if self._is_classifier():\n self.classes_ = self.estimator_.classes_\n return self\n\n def predict(self, X):\n \"\"\"\n Predict new data.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :return: array, shape=(n_samples,) the predicted data\n \"\"\"\n if self._is_classifier():\n check_is_fitted(self, ['classes_'])\n check_is_fitted(self, ['weights_', 'estimator_'])\n return self.estimator_.predict(X)\n\n def score(self, X, y):\n return self.estimator_.score(X, y)\n\n\nclass Thresholder(BaseEstimator, ClassifierMixin):\n \"\"\"\n Takes a two class estimator and moves the threshold. This way you might\n design the algorithm to only accept a certain class if the probability\n for it is larger than, say, 90% instead of 50%.\n \"\"\"\n\n def __init__(self, model, threshold: float):\n self.model = model\n self.threshold = threshold\n\n def fit(self, X, y):\n \"\"\"\n Fit the data.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :param y: array-like, shape=(n_samples,) training data.\n :return: Returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)\n self.estimator_ = clone(self.model)\n if not isinstance(self.estimator_, ProbabilisticClassifier):\n raise ValueError(\"The Thresholder meta model only works on classifcation models with .predict_proba.\")\n self.estimator_.fit(X, y)\n self.classes_ = self.estimator_.classes_\n if len(self.classes_) != 2:\n raise ValueError(\"The Thresholder meta model only works on models with two classes.\")\n return self\n\n def predict(self, X):\n \"\"\"\n Predict new data.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :return: array, shape=(n_samples,) the predicted data\n \"\"\"\n check_is_fitted(self, ['classes_', 'estimator_'])\n predicate = self.estimator_.predict_proba(X)[:, 1] > self.threshold\n return np.where(predicate, self.classes_[1], self.classes_[0])\n\n def predict_proba(self, X):\n check_is_fitted(self, ['classes_', 'estimator_'])\n return self.estimator_.predict_proba(X)\n\n def score(self, X, y):\n return self.estimator_.score(X, y)\n\n\nclass ConfusionBalancer(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):\n \"\"\"\n The ConfusionBalancer attempts to give it's child estimator a more balanced\n output by learning from the confusion matrix during training. The idea is that\n the confusion matrix calculates P(C_i | M_i) where C_i is the actual class and\n M_i is the class that the underlying model gives. We use these probabilities to\n attempt a more balanced prediction by averaging the correction from the confusion\n matrix with the original probabilities.\n\n .. math::\n p(\\text{class_j}) = \\alpha p(\\text{model}_j) + (1-\\alpha) p(\\text{class_j} | \\text{model}_j) p(\\text{model}_j)\n\n :param model: a scikit learn compatible classification model that has predict_proba\n :param alpha: a hyperparameter between 0 and 1, determines how much to apply smoothing\n :param cfm_smooth: a smoothing parameter for the confusion matrices to ensure zeros don't exist\n \"\"\"\n def __init__(self, estimator, alpha: float = 0.5, cfm_smooth=0):\n self.estimator = estimator\n self.alpha = alpha\n self.cfm_smooth = cfm_smooth\n\n def fit(self, X, y):\n \"\"\"\n Fit the data.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :param y: array-like, shape=(n_samples,) training data.\n :return: Returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, estimator=self.estimator, dtype=FLOAT_DTYPES)\n if not isinstance(self.estimator, ProbabilisticClassifier):\n raise ValueError(\"The ConfusionBalancer meta model only works on classifcation models with .predict_proba.\")\n self.estimator.fit(X, y)\n self.classes_ = unique_labels(y)\n cfm = confusion_matrix(y, self.estimator.predict(X)).T + self.cfm_smooth\n self.cfm_ = cfm / cfm.sum(axis=1).reshape(-1, 1)\n return self\n\n def predict_proba(self, X):\n \"\"\"\n Predict new data.\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :return: array, shape=(n_samples, n_classes) the predicted data\n \"\"\"\n X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)\n preds = self.estimator.predict_proba(X)\n return (1 - self.alpha) * preds + self.alpha * preds @ self.cfm_\n\n def predict(self, X):\n \"\"\"\n Predict new data, with probabilities\n\n :param X: array-like, shape=(n_columns, n_samples,) training data.\n :return: array, shape=(n_samples,) the predicted data\n \"\"\"\n check_is_fitted(self, ['cfm_', 'classes_'])\n X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)\n return self.classes_[self.predict_proba(X).argmax(axis=1)]\n"
] | [
[
"pandas.isnull",
"numpy.isinf",
"numpy.array",
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.validation.check_X_y",
"sklearn.utils.multiclass.unique_labels",
"sklearn.utils.validation.check_array",
"pandas.DataFrame",
"sklearn.clone",
"numpy.ones",
"numpy.where",
"pandas.Series"
]
] |
Iamgoofball/VocoderComparisons | [
"3dfc5cb604ccf3756321e2cdf9934aa933314145"
] | [
"repos/hifi-gan/models.py"
] | [
"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d\nfrom torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm\nfrom utils import init_weights, get_padding\n\nLRELU_SLOPE = 0.1\n\n\nclass ResBlock1(torch.nn.Module):\n def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):\n super(ResBlock1, self).__init__()\n self.h = h\n self.convs1 = nn.ModuleList([\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],\n padding=get_padding(kernel_size, dilation[0]))),\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],\n padding=get_padding(kernel_size, dilation[1]))),\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],\n padding=get_padding(kernel_size, dilation[2])))\n ])\n self.convs1.apply(init_weights)\n\n self.convs2 = nn.ModuleList([\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,\n padding=get_padding(kernel_size, 1))),\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,\n padding=get_padding(kernel_size, 1))),\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,\n padding=get_padding(kernel_size, 1)))\n ])\n self.convs2.apply(init_weights)\n\n def forward(self, x):\n for c1, c2 in zip(self.convs1, self.convs2):\n xt = F.leaky_relu(x, LRELU_SLOPE)\n xt = c1(xt)\n xt = F.leaky_relu(xt, LRELU_SLOPE)\n xt = c2(xt)\n x = xt + x\n return x\n\n def remove_weight_norm(self):\n for l in self.convs1:\n remove_weight_norm(l)\n for l in self.convs2:\n remove_weight_norm(l)\n\n\nclass ResBlock2(torch.nn.Module):\n def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):\n super(ResBlock2, self).__init__()\n self.h = h\n self.convs = nn.ModuleList([\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],\n padding=get_padding(kernel_size, dilation[0]))),\n weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],\n padding=get_padding(kernel_size, dilation[1])))\n ])\n self.convs.apply(init_weights)\n\n def forward(self, x):\n for c in self.convs:\n xt = F.leaky_relu(x, LRELU_SLOPE)\n xt = c(xt)\n x = xt + x\n return x\n\n def remove_weight_norm(self):\n for l in self.convs:\n remove_weight_norm(l)\n\n\nclass Generator(torch.nn.Module):\n def __init__(self, h):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),\n k, u, padding=(k-u)//2)))\n\n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = h.upsample_initial_channel//(2**(i+1))\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n\n def forward(self, x):\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i*self.num_kernels+j](x)\n else:\n xs += self.resblocks[i*self.num_kernels+j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)\n\n\nclass DiscriminatorP(torch.nn.Module):\n def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):\n super(DiscriminatorP, self).__init__()\n self.period = period\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.convs = nn.ModuleList([\n norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),\n norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),\n norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),\n norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),\n norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),\n ])\n self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))\n\n def forward(self, x):\n fmap = []\n\n # 1d to 2d\n b, c, t = x.shape\n if t % self.period != 0: # pad first\n n_pad = self.period - (t % self.period)\n x = F.pad(x, (0, n_pad), \"reflect\")\n t = t + n_pad\n x = x.view(b, c, t // self.period, self.period)\n\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n fmap.append(x)\n x = torch.flatten(x, 1, -1)\n\n return x, fmap\n\n\nclass MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, periods=None):\n super(MultiPeriodDiscriminator, self).__init__()\n self.periods = periods if periods is not None else [2, 3, 5, 7, 11]\n self.discriminators = nn.ModuleList()\n for period in self.periods:\n self.discriminators.append(DiscriminatorP(period))\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs\n\n\nclass DiscriminatorS(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(DiscriminatorS, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.convs = nn.ModuleList([\n norm_f(Conv1d(1, 128, 15, 1, padding=7)),\n norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),\n norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),\n norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),\n norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),\n norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),\n norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),\n ])\n self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))\n\n def forward(self, x):\n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n fmap.append(x)\n x = torch.flatten(x, 1, -1)\n\n return x, fmap\n\n\nclass MultiScaleDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiScaleDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n DiscriminatorS(use_spectral_norm=True),\n DiscriminatorS(),\n DiscriminatorS(),\n ])\n self.meanpools = nn.ModuleList([\n AvgPool1d(4, 2, padding=2),\n AvgPool1d(4, 2, padding=2)\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n if i != 0:\n y = self.meanpools[i-1](y)\n y_hat = self.meanpools[i-1](y_hat)\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs\n\n\ndef feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss*2\n\n\ndef discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses\n\n\ndef generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses\n\n"
] | [
[
"torch.nn.AvgPool1d",
"torch.flatten",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.nn.utils.remove_weight_norm",
"torch.nn.ConvTranspose1d",
"torch.abs",
"torch.nn.Conv2d",
"torch.tanh",
"torch.nn.functional.pad",
"torch.mean",
"torch.nn.functional.leaky_relu"
]
] |
Data-Designer/Informer2020 | [
"03ac6f51a11fe1af5caeabcbc0b96340c66b7fe3"
] | [
"models/model.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom utils.masking import TriangularCausalMask, ProbMask\r\nfrom models.encoder import Encoder, EncoderLayer, ConvLayer, EncoderStack\r\nfrom models.decoder import Decoder, DecoderLayer\r\nfrom models.attn import FullAttention, ProbAttention, AttentionLayer\r\nfrom models.embed import DataEmbedding\r\n\r\nclass Informer(nn.Module):\r\n def __init__(self, enc_in, dec_in, c_out, seq_len, label_len, out_len, \r\n factor=5, d_model=512, n_heads=8, e_layers=3, d_layers=2, d_ff=512, \r\n dropout=0.0, attn='prob', embed='fixed', freq='h', activation='gelu', \r\n output_attention = False, distil=True,\r\n device=torch.device('cuda:0')):\r\n super(Informer, self).__init__()\r\n self.pred_len = out_len\r\n self.attn = attn\r\n self.output_attention = output_attention\r\n\r\n # Encoding\r\n self.enc_embedding = DataEmbedding(enc_in, d_model, embed, freq, dropout)\r\n self.dec_embedding = DataEmbedding(dec_in, d_model, embed, freq, dropout)\r\n # Attention\r\n Attn = ProbAttention if attn=='prob' else FullAttention\r\n # Encoder\r\n self.encoder = Encoder(\r\n [\r\n EncoderLayer(\r\n AttentionLayer(Attn(False, factor, attention_dropout=dropout, output_attention=output_attention), \r\n d_model, n_heads),\r\n d_model,\r\n d_ff,\r\n dropout=dropout,\r\n activation=activation\r\n ) for l in range(e_layers)\r\n ],\r\n [\r\n ConvLayer(\r\n d_model\r\n ) for l in range(e_layers-1)\r\n ] if distil else None,\r\n norm_layer=torch.nn.LayerNorm(d_model)\r\n )\r\n # Decoder\r\n self.decoder = Decoder(\r\n [\r\n DecoderLayer(\r\n AttentionLayer(Attn(True, factor, attention_dropout=dropout, output_attention=False), \r\n d_model, n_heads),\r\n AttentionLayer(FullAttention(False, factor, attention_dropout=dropout, output_attention=False), \r\n d_model, n_heads),\r\n d_model,\r\n d_ff,\r\n dropout=dropout,\r\n activation=activation,\r\n )\r\n for l in range(d_layers)\r\n ],\r\n norm_layer=torch.nn.LayerNorm(d_model)\r\n )\r\n # self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)\r\n # self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)\r\n self.projection = nn.Linear(d_model, c_out, bias=True)\r\n \r\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, \r\n enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):\r\n enc_out = self.enc_embedding(x_enc, x_mark_enc)\r\n enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)\r\n\r\n dec_out = self.dec_embedding(x_dec, x_mark_dec)\r\n dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)\r\n dec_out = self.projection(dec_out)\r\n \r\n # dec_out = self.end_conv1(dec_out)\r\n # dec_out = self.end_conv2(dec_out.transpose(2,1)).transpose(1,2)\r\n if self.output_attention:\r\n return dec_out[:,-self.pred_len:,:], attns\r\n else:\r\n return dec_out[:,-self.pred_len:,:] # [B, L, D]\r\n\r\n\r\nclass InformerStack(nn.Module):\r\n '''这个好像没得差别,只是这个可以堆叠Informer的Encoder和Decoder的数量'''\r\n def __init__(self, enc_in, dec_in, c_out, seq_len, label_len, out_len, \r\n factor=5, d_model=512, n_heads=8, e_layers=3, d_layers=2, d_ff=512, \r\n dropout=0.0, attn='prob', embed='fixed', freq='h', activation='gelu',\r\n output_attention = False, distil=True,\r\n device=torch.device('cuda:0')):\r\n super(InformerStack, self).__init__()\r\n self.pred_len = out_len\r\n self.attn = attn\r\n self.output_attention = output_attention\r\n\r\n # Encoding\r\n self.enc_embedding = DataEmbedding(enc_in, d_model, embed, freq, dropout)\r\n self.dec_embedding = DataEmbedding(dec_in, d_model, embed, freq, dropout)\r\n # Attention\r\n Attn = ProbAttention if attn=='prob' else FullAttention\r\n # Encoder\r\n\r\n stacks = list(range(e_layers, 2, -1)) # you can customize here\r\n encoders = [\r\n Encoder(\r\n [\r\n EncoderLayer(\r\n AttentionLayer(Attn(False, factor, attention_dropout=dropout, output_attention=output_attention), \r\n d_model, n_heads),\r\n d_model,\r\n d_ff,\r\n dropout=dropout,\r\n activation=activation\r\n ) for l in range(el)\r\n ],\r\n [\r\n ConvLayer(\r\n d_model\r\n ) for l in range(el-1)\r\n ] if distil else None,\r\n norm_layer=torch.nn.LayerNorm(d_model)\r\n ) for el in stacks]\r\n self.encoder = EncoderStack(encoders) # 上面之所以用列表存贮,因为这里进行了特殊处理,不然最好使用ModuleList\r\n # Decoder\r\n self.decoder = Decoder(\r\n [\r\n DecoderLayer(\r\n AttentionLayer(Attn(True, factor, attention_dropout=dropout, output_attention=False), \r\n d_model, n_heads),\r\n AttentionLayer(FullAttention(False, factor, attention_dropout=dropout, output_attention=False), \r\n d_model, n_heads),\r\n d_model,\r\n d_ff,\r\n dropout=dropout,\r\n activation=activation,\r\n )\r\n for l in range(d_layers)\r\n ],\r\n norm_layer=torch.nn.LayerNorm(d_model)\r\n )\r\n # self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)\r\n # self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)\r\n self.projection = nn.Linear(d_model, c_out, bias=True) # 该全连接层用于输出最后的L_token+ prediction\r\n \r\n def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, \r\n enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):\r\n enc_out = self.enc_embedding(x_enc, x_mark_enc)\r\n enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)\r\n\r\n dec_out = self.dec_embedding(x_dec, x_mark_dec)\r\n dec_out = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask) # decoder的输入\r\n dec_out = self.projection(dec_out)\r\n \r\n # dec_out = self.end_conv1(dec_out)\r\n # dec_out = self.end_conv2(dec_out.transpose(2,1)).transpose(1,2)\r\n if self.output_attention:\r\n return dec_out[:,-self.pred_len:,:], attns\r\n else:\r\n return dec_out[:,-self.pred_len:,:] # [B, L, D],batch,prediction,Dimension\r\n"
] | [
[
"torch.nn.Linear",
"torch.device",
"torch.nn.LayerNorm"
]
] |
alperyeg/spiking_GANs | [
"9249093db043c0b41eb1e935dd09f5d415307ca5"
] | [
"src/STP_generation.py"
] | [
"# -*- coding: utf-8 -*-\nimport stocmod as stoc\nimport quantities as pq\nimport time\nimport numpy as np\nimport neo\nimport random\n\nt0 = time.time()\n\n\ndef generate_stp(occurr, xi, t_stop, delays, t_start=0 * pq.s):\n '''\n Generate a spatio-temporal-pattern (STP). One pattern consists in a\n repeated sequence of spikes with fixed inter spikes intervals (delays).\n The starting time of the repetitions of the pattern are randomly generated.\n '''\n # Generating all the first spikes of the repetitions\n s1 = np.sort(\n np.random.uniform(\n high=(t_stop - t_start - delays[-1]).magnitude, size=occurr))\n\n # Using matrix algebra to add all the delays\n s1_matr = (s1 * np.ones([xi - 1, occurr])).T\n delays_matr = np.ones(\n [occurr, 1]) * delays.rescale(t_stop.units).magnitude.reshape(\n [1, xi - 1])\n ss = s1_matr + delays_matr\n\n # Stacking the first and successive spikes\n stp = np.hstack((s1.reshape(occurr, 1), ss))\n\n # Transofm in to neo SpikeTrain\n stp = [\n neo.core.SpikeTrain(\n t * t_stop.units + t_start, t_stop, t_start=t_start) for t in\n stp.T]\n return stp\n\n\ndef generate_sts(data_type, N=100, T=1000 * pq.ms, sampl_period=10 * pq.ms):\n \"\"\"\n Generate a list of parallel spike trains with different statistics.\n\n The data are composed of background spiking activity plus possibly\n a repeated sequence of synchronous events (SSE).\n The background activity depends on the value of data_type.\n The size and occurrence count of the SSE is specified by sse_params.\n\n Parameters\n ----------\n data_type : int\n An integer specifying the type of background activity.\n At the moment the following types of background activity are\n supported (note: homog = across neurons; stat = over time):\n 0 | 3 : 100 indep Poisson | Gamma(5), homog, stat (15 Hz)\n 7 indep Poisson, homog, stat (20 Hz)\n 8 indep Poisson, homog, stat (25 Hz)\n 1 | 4 : 100 indep Poisson | Gamma(5), homog, nonstat-step (10/60/10 Hz)\n 2 | 5 : 100 indep Poisson | Gamma(5), heterog (5->25 Hz), stat\n 6 : 100 indep Poisson, rate increase with latency variability\n 9 : 100 indep Poisson, heterog, nonstat: 14/100/14 Hz+0.2*i Hz, i=1->100\n\n N: int (optional; works only for data_type = 0 !!!)\n total number of neurons in the model. The default is N=100.\n T: quantity.Quantity\n Simulation time. Default is 1000 * pq.ms\n sampl_period: quantity.Quantity\n Sampling period of the rate profile. Default is 10 * pq.ms\n\n Returns\n ------\n sts : list of SpikeTrains\n a list of spike trains\n params : dict\n a dictionary of simulation parameters (to be enriched...)\n\n \"\"\"\n params = {'nr_neurons': N, 'simul_time': T}\n\n # Indep Poisson / Gamma(5), homog, stat (15 Hz)\n if data_type == 0 or data_type == 3:\n rate = 15 * pq.Hz\n shape = 1 if data_type == 0 else 5\n # Define a rate profile\n sts = stoc.gamma_thinning(\n rate=rate, shape=shape, t_stop=T, N=N)\n # Indep Poisson, homog, stat (20 Hz)\n elif data_type == 7:\n rate = 20 * pq.Hz\n shape = 1\n # Define a rate profile\n sts = stoc.gamma_thinning(\n rate=rate, shape=shape, t_stop=T, N=N)\n # Indep Poisson, homog, stat (25 Hz)\n elif data_type == 8:\n rate = 25 * pq.Hz\n shape = 1\n # Define a rate profile\n sts = stoc.gamma_thinning(\n rate=rate, shape=shape, t_stop=T, N=N)\n\n # Indep Poisson / Gamma(5), homog, nonstat-step (10/60/10 Hz)\n elif data_type == 1 or data_type == 4:\n shape = 1 if data_type == 1 else 5 # regularity parameter for Gamma\n a0, a1 = 10 * pq.Hz, 60 * pq.Hz # baseline and transient rates\n t1, t2 = 600 * pq.ms, 700 * pq.ms # time segment of transient rate\n\n # Define a rate profile\n times = sampl_period.units * np.arange(\n 0, T.rescale(sampl_period.units).magnitude, sampl_period.magnitude)\n\n rate_profile = np.zeros(times.shape)\n rate_profile[np.any([times < t1, times > t2], axis=0)] = a0.magnitude\n rate_profile[np.all([times >= t1, times <= t2], axis=0)] = a1.magnitude\n rate_profile = rate_profile * a0.units\n rate_profile = neo.AnalogSignal(\n rate_profile, sampling_period=sampl_period)\n sts = stoc.gamma_nonstat_rate(\n rate_profile, shape=shape, N=N)\n\n # Indep Poisson / Gamma(5), heterog (5->15 Hz), stat\n elif data_type == 2 or data_type == 5:\n rate_min = 5 * pq.Hz # min rate. Ensures that there is >=1 spike\n rate_max = 25 * pq.Hz # max rate\n rate = np.linspace(rate_min.magnitude, rate_max.magnitude, N) * pq.Hz\n shape = 1 if data_type == 2 else 5 # regularity parameter for Gamma\n\n # Define a rate profile\n sts = stoc.gamma_thinning(rate=rate, shape=shape, t_stop=T, N=N)\n random.shuffle(sts)\n # Indep Poisson, latency variability\n elif data_type == 6:\n\n l = 20 # 20 groups of neurons\n w = 5 # of 5 neurons each\n t0 = 50 * pq.ms # the first of which increases the rate at time t0\n t00 = 500 * pq.ms # and again at time t00\n ratechange_dur = 5 * pq.ms # old: 10ms # for a short period\n a0, a1 = 14 * pq.Hz, 100 * pq.Hz # old: 10/60 Hz; from rate a0 to a1\n ratechange_delay = 5 * pq.ms # old: 10ms; followed with delay by next group\n\n # Define a rate profile\n times = sampl_period.units * np.arange(\n 0, T.rescale(sampl_period.units).magnitude, sampl_period.magnitude)\n sts = []\n rate_profiles = []\n for i in range(N):\n t1 = t0 + (i // w) * ratechange_delay\n t2 = t1 + ratechange_dur\n t11 = t00 + (i // w) * ratechange_delay\n t22 = t11 + ratechange_dur\n # print t1, t2, t11, t22\n rate_profile = np.zeros(times.shape)\n rate_profile[np.any([times < t1, times > t2], axis=0)] = \\\n a0.magnitude\n rate_profile[np.all([times >= t1, times <= t2], axis=0)] = \\\n a1.magnitude\n rate_profile[np.all([times >= t11, times <= t22], axis=0)] = \\\n a1.magnitude\n rate_profile = rate_profile * a0.units\n rate_profile = neo.AnalogSignal(\n rate_profile, sampling_period=sampl_period)\n # print np.where(np.diff(rate_profile)>0*pq.Hz)\n sts += stoc.poisson_nonstat(rate_profile, N=1)\n\n # Indep Poisson, heterog, nonstat: 10/60/10 Hz + .05 * i, i=-50,...,50\n elif data_type == 9:\n # Define a rate profile\n times = sampl_period.units * np.arange(\n 0, T.rescale(sampl_period.units).magnitude, sampl_period.magnitude)\n\n a0, a1 = 10 * pq.Hz, 60 * pq.Hz # avg baseline and transient rates\n t1, t2 = 600 * pq.ms, 700 * pq.ms # time segment of transient rate\n minrate = 5 * pq.Hz\n drates = np.linspace( # dev of each train from avg baseline\n minrate - a0, a0 - minrate, N)\n rate_profile = np.zeros(times.shape)\n rate_profile[np.any([times < t1, times > t2], axis=0)] = a0.magnitude\n rate_profile[np.all([times >= t1, times <= t2], axis=0)] = a1.magnitude\n rate_profile = rate_profile * a0.units\n rate_profile = neo.AnalogSignal(\n rate_profile, sampling_period=sampl_period) # avg rate profile\n rate_profiles = [rate_profile + dr for dr in drates] # each profile\n sts = [stoc.poisson_nonstat_thinning(rate_profiles[i], N=1,\n cont_sign_method='step')[0] for i\n in range(N)]\n\n else:\n raise ValueError(\n 'data_type %d not supported. Provide int from 0 to 10' % data_type)\n return sts, params\n"
] | [
[
"numpy.zeros",
"numpy.ones",
"numpy.any",
"numpy.random.uniform",
"numpy.all",
"numpy.linspace"
]
] |
davidmashburn/mtm_stats | [
"6808d37c84716b41a356eab75189d3e4ffc98451"
] | [
"mtm_stats/mtm_stats.py"
] | [
"'''The main script'''\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import range\nfrom future.utils import viewitems\n\n# To update with any Cython changes, just run:\n# python setup.py build_ext --inplace\n\nimport numpy as np\nfrom .sparse_block_array import sba_compress_64, sba_compress_64_index_list\nfrom . import cy_mtm_stats\n\ndef extract_sets_from_connections(connections):\n '''Get two sorted array sets from the connections tuples,\n one for the first elements and one for the second''' \n setA = np.array(sorted({i[0] for i in connections}))\n setB = np.array(sorted({i[1] for i in connections}))\n return setA, setB\n\ndef convert_connections_to_binary(connections, setA, setB):\n '''connections is a many-to-many mapping from set A to set B\n Returns a binary matrix where each item in set B gets mapped to a single bit and each item in set A gets a row of these bits'''\n mappingA = {p: i for i, p in enumerate(setA)}\n mappingB = {p: i for i, p in enumerate(setB)}\n \n lenB64 = int(np.ceil(len(setB) / 64))\n output = np.zeros((len(setA), lenB64), np.uint64)\n for a, b in connections:\n ia = mappingA[a]\n ib = mappingB[b]\n output[ia, ib // 64] |= np.uint64(1 << (ib % 64))\n return output\n\ndef get_grouped_indices(connections, mappingA, mappingB):\n grouped = {}\n for a, b in connections:\n grouped.setdefault(mappingA[a],[]).append(mappingB[b])\n return grouped\n\ndef convert_connections_to_sba_list_space_efficient(connections, setA, setB, chunk_length_64):\n '''connections is a many-to-many mapping from set A to set B\n Returns a list of SBA compressed binary arrays where each item in set B gets mapped to a single bit and each item in set A gets a row of these bits'''\n mappingA = {p: i for i, p in enumerate(setA)}\n mappingB = {p: i for i, p in enumerate(setB)}\n \n lenB64 = int(np.ceil(len(setB) / 64))\n tmp_arr = np.empty(lenB64, np.uint64)\n grouped = get_grouped_indices(connections, mappingA, mappingB)\n sba_list = [None] * len(setA)\n for ia, ib_list in viewitems(grouped):\n sba_list[ia] = sba_compress_64_index_list(ib_list, tmp_arr, chunk_length_64)\n \n return sba_list\n\ndef _mtm_common(connections, chunk_length_64=1, dense_input=False):\n '''Common setup for static and partitioned-generator variants of\n mtm_stats\n There are three steps:\n * extracts the two sets\n * converts the connections to binary\n * compute the base counts\n \n Returns setA, setB, base_counts, and rows\n \"rows\" will be either a 2d rows_arr (when dense_input=True)\n or a sba_list (dense_input=False, DEFAULT)\n \n This is the data needed to perform the more expensive\n intersection counts calculation and the post-process union counts'''\n \n setA, setB = extract_sets_from_connections(connections)\n \n if dense_input:\n rows_arr = convert_connections_to_binary(connections, setA, setB)\n base_counts = cy_mtm_stats.cy_compute_counts_dense_input(rows_arr)\n rows = rows_arr\n else:\n sba_list = convert_connections_to_sba_list_space_efficient(connections, setA, setB, chunk_length_64)\n base_counts = cy_mtm_stats.cy_compute_counts(sba_list, chunk_length_64)\n rows = sba_list\n \n return setA, setB, base_counts, rows\n\ndef _mtm_intersection_counts(rows, chunk_length_64=1, indices_a=None, cutoff=0, start_j=0, upper_only=True, dense_input=False):\n '''The function that actually calls into cython for the intersection_counts\n Return the intersection_counts_list'''\n \n if dense_input:\n rows_arr = rows\n intersection_counts_list = cy_mtm_stats.cy_compute_intersection_counts_dense_input(rows_arr, indices_a, cutoff, start_j, upper_only)\n else:\n sba_list = rows\n intersection_counts_list = cy_mtm_stats.cy_compute_intersection_counts(sba_list, chunk_length_64, indices_a, cutoff, start_j, upper_only)\n \n return intersection_counts_list\n\ndef mtm_stats_raw(connections, chunk_length_64=1, indices_a=None, cutoff=0, start_j=0, upper_only=True, dense_input=False):\n '''The function that actually calls into cython\n Produces the sets from the connections,\n converts the connection to binary and compresses them into sba's\n and then performs the actual counts\n Returns:\n setA, setB, base_counts, intersection_counts_list'''\n \n setA, setB, base_counts, rows = _mtm_common(connections, chunk_length_64, dense_input)\n intersection_counts_list = _mtm_intersection_counts(rows, chunk_length_64, indices_a, cutoff, start_j, upper_only, dense_input)\n return setA, setB, base_counts, intersection_counts_list\n\ndef _partition_range(x, n):\n '''Return a generator for a series of chunked ranges that end at x\n partition_range(x, 1) <==> range(x)\n Examples:\n list(partition_range(19, 10)) -> [range(0, 10), range(10, 19)]\n list(partition_range(20, 10)) -> [range(0, 10), range(10, 20)]\n list(partition_range(21, 10)) -> [range(0, 10), range(10, 20), range(20, 21)]\n '''\n return (range(i, min(x, i+n)) for i in range(0, x, n))\n\ndef mtm_stats_raw_iterator(connections, partition_size, chunk_length_64=1, cutoff=0, start_j=0, upper_only=True, dense_input=False):\n '''This version of mtm_stats returns a generator instead of doing the\n actual intersection_counts calculation\n Each \n Returns:\n setA, setB, base_counts, intersection_counts_generator'''\n \n setA, setB, base_counts, rows = _mtm_common(connections, chunk_length_64, dense_input)\n intersection_counts_generator = (_mtm_intersection_counts(rows, chunk_length_64, indices_a, cutoff, start_j, upper_only, dense_input)\n for indices_a in _partition_range(len(rows), partition_size))\n return setA, setB, base_counts, intersection_counts_generator\n\ndef get_base_counts_dict(base_counts, setA):\n return {setA[i]: p\n for i, p in enumerate(base_counts)}\n\ndef get_iu_counts_dict(base_counts, intersection_counts_list, setA):\n return {(setA[i], setA[j]): (ic, base_counts[i] + base_counts[j] - ic)\n for intersection_counts in intersection_counts_list\n for i, j, ic in intersection_counts}\n\ndef get_base_counts_gen(base_counts, setA):\n return ((setA[i], p) # (key, value)\n for i, p in enumerate(base_counts))\n\ndef get_iu_counts_gen(base_counts, intersection_counts_list, setA):\n return ((setA[i], setA[j], ic, base_counts[i] + base_counts[j] - ic)\n for intersection_counts in intersection_counts_list\n for i, j, ic in intersection_counts)\n\ndef mtm_stats(connections, chunk_length_64=1, indices_a=None, cutoff=0, start_j=0, upper_only=True, dense_input=False):\n '''Get base counts and intersection counts'''\n setA, setB, base_counts, intersection_counts_list = mtm_stats_raw(connections, chunk_length_64, indices_a, cutoff, start_j, upper_only, dense_input)\n base_counts_dict = get_base_counts_dict(base_counts, setA)\n iu_counts_dict = get_iu_counts_dict(base_counts, intersection_counts_list, setA)\n return base_counts_dict, iu_counts_dict\n\ndef mtm_stats_iterator(connections, partition_size, chunk_length_64=1, cutoff=0, start_j=0, upper_only=True, dense_input=False):\n '''Like mtm_stats, but returns generators instead of dicts for performance\n Returns:\n base_counts_generator:\n a generator that yeilds:\n (item_i, base_count_i)\n iu_counts_double_generator:\n a generator that yeilds a generator that yields:\n (item_i, item_j, intersection_count, union_count)\n \n Example for getting data out:\n for item_i, base_count_i in base_counts_generator:\n print(\"{} has {} things from set B\".format(item_i, base_count_i))\n \n for iu_counts_gen in iu_counts_double_generator:\n for item_i, item_j, ic, uc in iu_counts_gen:\n print(\"{} and {} have {} things in common and {} things in total from set B\".format(item_i, item_j, ic, uc))\n '''\n \n setA, setB, base_counts, intersection_counts_iterator = mtm_stats_raw_iterator(connections, partition_size, chunk_length_64, cutoff, start_j, upper_only, dense_input)\n base_counts_generator = get_base_counts_gen(base_counts, setA)\n \n iu_counts_double_generator = (get_iu_counts_gen(base_counts, intersection_counts_list, setA)\n for intersection_counts_list in intersection_counts_iterator)\n \n return base_counts_generator, iu_counts_double_generator\n\ndef get_Jaccard_index_from_sparse_connections(iu_counts_dict):\n return {k: ic / uc\n for k, (ic, uc) in viewitems(iu_counts_dict)}\n\ndef get_Jaccard_index(connections, chunk_length_64=1, indices_a=None, cutoff=0, start_j=0, upper_only=True, dense_input=False):\n base_counts_dict, iu_counts_dict = mtm_stats(connections, chunk_length_64, indices_a, cutoff, start_j, upper_only, dense_input)\n jaccard_index = get_Jaccard_index_from_sparse_connections(iu_counts_dict)\n return base_counts_dict, jaccard_index\n\ndef mtm_stats_from_iterator(connections, partition_size, chunk_length_64=1, cutoff=0, start_j=0, upper_only=True, dense_input=False):\n '''Same results as regular mtm_stats, but uses mtm_stats_iterator instead\n Mostly useful for testing, although it is not actually that different\n (faster or slower) than the original, so should probably just refactor to always do things this way'''\n base_counts_generator, iu_counts_double_generator = mtm_stats_iterator(connections, partition_size, chunk_length_64, cutoff, start_j, upper_only, dense_input)\n base_counts_dict = dict(base_counts_generator)\n iu_counts_dict = {(i, j): (ic, uc)\n for iu_counts_generator in iu_counts_double_generator\n for i, j, ic, uc in iu_counts_generator}\n return base_counts_dict, iu_counts_dict\n\nif __name__ == '__main__':\n r = mtm_stats([('a1', 'b1'),\n ('a1', 'b2'),\n ('a1', 'b3'),\n ('a2', 'b1'),\n ('a2', 'b2'),\n ('a3', 'b3'),\n ('a4', 'b9'),])\n print(r[0])\n print(r[1])\n"
] | [
[
"numpy.uint64",
"numpy.empty"
]
] |
ngonhan2k5/cs253-bdt | [
"2b916bf9e45bba8061819d663b17d72c9ab0ea3b"
] | [
"proj/plot/testplot.py"
] | [
"import random\nfrom itertools import count\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n\nimport happybase\nCONNECTION = happybase.Connection('127.0.0.1', 9090)\nCONNECTION.open()\nprint(CONNECTION.tables())\n\ntable = CONNECTION.table('plot')\n\nplt.style.use('fivethirtyeight')\n\nx_vals = []\ny_vals = []\n# plt.plot(x_vals, y_vals)\n\nindex = count()\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n x = data['x_value']\n y1 = data['total_1']\n y2 = data['total_2']\n\n plt.cla()\n\n plt.plot(x, y1, label=\"Ch1\")\n plt.plot(x, y2, label=\"Ch2\")\n\n plt.legend(loc='upper left')\n plt.tight_layout()\n\n # x_vals.append(next(index))\n # y_vals.append(random.randint(0,5))\n\n # plt.plot(x_vals, y_vals)\n\n for (key, data) in table.scan(limit=10):\n print(key, data)\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\n\nplt.tight_layout()\nplt.show()\n\n\n\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
] |
cuihantao/Andes | [
"6cdc057986c4a8382194ef440b6e92b8dfb77e25"
] | [
"andes/models/shunt/shuntsw.py"
] | [
"\"\"\"\nSwitched shunt model.\n\"\"\"\n\nimport ast\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom andes.core import NumParam, ConstService\nfrom andes.core.discrete import ShuntAdjust\nfrom andes.core.service import SwBlock\nfrom andes.models.shunt.shunt import ShuntData, ShuntModel\n\n\nclass ShuntSwData(ShuntData):\n \"\"\"\n Data for switched shunts.\n \"\"\"\n\n def __init__(self):\n ShuntData.__init__(self)\n self.gs = NumParam(info='a list literal of switched conductances blocks',\n default=0.0,\n unit='p.u.',\n vtype=object,\n iconvert=list_iconv,\n oconvert=list_oconv,\n y=True,\n )\n\n self.bs = NumParam(info='a list literal of switched susceptances blocks',\n default=0.0,\n unit='p.u.',\n vtype=object,\n iconvert=list_iconv,\n oconvert=list_oconv,\n y=True,\n )\n\n self.ns = NumParam(info='a list literal of the element numbers in each switched block',\n default=[0],\n vtype=object,\n iconvert=list_iconv,\n oconvert=list_oconv,\n )\n\n self.vref = NumParam(info='voltage reference',\n default=1.0,\n unit='p.u.',\n non_zero=True,\n non_negative=True,\n )\n\n self.dv = NumParam(info='voltage error deadband',\n default=0.05,\n unit='p.u.',\n non_zero=True,\n non_negative=True,\n )\n\n self.dt = NumParam(info='delay before two consecutive switching',\n default=30.,\n unit='seconds',\n non_negative=True,\n )\n\n\ndef list_iconv(x):\n \"\"\"\n Helper function to convert a list literal into a numpy array.\n \"\"\"\n if isinstance(x, str):\n x = ast.literal_eval(x)\n if isinstance(x, (int, float)):\n if not np.isnan(x):\n x = [x]\n else:\n return None\n if isinstance(x, list):\n x = np.array(x)\n return x\n\n\ndef list_oconv(x):\n \"\"\"\n Convert list into a list literal.\n \"\"\"\n return np.array2string(x, separator=', ')\n\n\nclass ShuntSwModel(ShuntModel):\n \"\"\"\n Switched shunt model.\n \"\"\"\n\n def __init__(self, system, config):\n ShuntModel.__init__(self, system, config)\n\n self.config.add(OrderedDict((('min_iter', 2),\n ('err_tol', 0.01),\n )))\n self.config.add_extra(\"_help\",\n min_iter=\"iteration number starting from which to enable switching\",\n err_tol=\"iteration error below which to enable switching\",\n )\n self.config.add_extra(\"_alt\",\n min_iter='int',\n err_tol='float',\n )\n self.config.add_extra(\"_tex\",\n min_iter=\"sw_{iter}\",\n err_tol=r\"\\epsilon_{tol}\",\n )\n\n self.beff = SwBlock(init=self.b, ns=self.ns, blocks=self.bs)\n self.geff = SwBlock(init=self.g, ns=self.ns, blocks=self.gs,\n ext_sel=self.beff)\n\n self.vlo = ConstService(v_str='vref - dv', tex_name='v_{lo}')\n self.vup = ConstService(v_str='vref + dv', tex_name='v_{up}')\n\n self.adj = ShuntAdjust(v=self.v, lower=self.vlo, upper=self.vup,\n bsw=self.beff, gsw=self.geff, dt=self.dt,\n u=self.u,\n min_iter=self.config.min_iter,\n err_tol=self.config.err_tol,\n info='shunt adjuster')\n\n self.a.e_str = 'u * v**2 * geff'\n self.v.e_str = '-u * v**2 * beff'\n\n\nclass ShuntSw(ShuntSwData, ShuntSwModel):\n \"\"\"\n Switched Shunt Model.\n\n Parameters `gs`, `bs` and `bs` must be entered in string literals,\n comma-separated. They need to have the same length.\n\n For example, in the excel file, one can put ::\n\n gs = [0, 0]\n bs = [0.2, 0.2]\n ns = [2, 4]\n\n To use individual shunts as fixed shunts, set the corresponding\n `ns = 0` or `ns = [0]`.\n\n The effective shunt susceptances and conductances are stored in\n services `beff` and `geff`.\n \"\"\"\n\n def __init__(self, system=None, config=None):\n ShuntSwData.__init__(self)\n ShuntSwModel.__init__(self, system, config)\n"
] | [
[
"numpy.array",
"numpy.isnan",
"numpy.array2string"
]
] |
t-wojciech/LightGBM | [
"d90a16d520fa12d84ecd983fba323727348616a4"
] | [
"python-package/lightgbm/plotting.py"
] | [
"# coding: utf-8\n\"\"\"Plotting library.\"\"\"\nimport warnings\nfrom copy import deepcopy\nfrom io import BytesIO\n\nimport numpy as np\n\nfrom .basic import Booster\nfrom .compat import MATPLOTLIB_INSTALLED, GRAPHVIZ_INSTALLED\nfrom .sklearn import LGBMModel\n\n\ndef _check_not_tuple_of_2_elements(obj, obj_name='obj'):\n \"\"\"Check object is not tuple or does not have 2 elements.\"\"\"\n if not isinstance(obj, tuple) or len(obj) != 2:\n raise TypeError('%s must be a tuple of 2 elements.' % obj_name)\n\n\ndef _float2str(value, precision=None):\n return (\"{0:.{1}f}\".format(value, precision)\n if precision is not None and not isinstance(value, str)\n else str(value))\n\n\ndef plot_importance(booster, ax=None, height=0.2,\n xlim=None, ylim=None, title='Feature importance',\n xlabel='Feature importance', ylabel='Features',\n importance_type='split', max_num_features=None,\n ignore_zero=True, figsize=None, dpi=None, grid=True,\n precision=3, **kwargs):\n \"\"\"Plot model's feature importances.\n\n Parameters\n ----------\n booster : Booster or LGBMModel\n Booster or LGBMModel instance which feature importance should be plotted.\n ax : matplotlib.axes.Axes or None, optional (default=None)\n Target axes instance.\n If None, new figure and axes will be created.\n height : float, optional (default=0.2)\n Bar height, passed to ``ax.barh()``.\n xlim : tuple of 2 elements or None, optional (default=None)\n Tuple passed to ``ax.xlim()``.\n ylim : tuple of 2 elements or None, optional (default=None)\n Tuple passed to ``ax.ylim()``.\n title : string or None, optional (default=\"Feature importance\")\n Axes title.\n If None, title is disabled.\n xlabel : string or None, optional (default=\"Feature importance\")\n X-axis title label.\n If None, title is disabled.\n ylabel : string or None, optional (default=\"Features\")\n Y-axis title label.\n If None, title is disabled.\n importance_type : string, optional (default=\"split\")\n How the importance is calculated.\n If \"split\", result contains numbers of times the feature is used in a model.\n If \"gain\", result contains total gains of splits which use the feature.\n max_num_features : int or None, optional (default=None)\n Max number of top features displayed on plot.\n If None or <1, all features will be displayed.\n ignore_zero : bool, optional (default=True)\n Whether to ignore features with zero importance.\n figsize : tuple of 2 elements or None, optional (default=None)\n Figure size.\n dpi : int or None, optional (default=None)\n Resolution of the figure.\n grid : bool, optional (default=True)\n Whether to add a grid for axes.\n precision : int or None, optional (default=3)\n Used to restrict the display of floating point values to a certain precision.\n **kwargs\n Other parameters passed to ``ax.barh()``.\n\n Returns\n -------\n ax : matplotlib.axes.Axes\n The plot with model's feature importances.\n \"\"\"\n if MATPLOTLIB_INSTALLED:\n import matplotlib.pyplot as plt\n else:\n raise ImportError('You must install matplotlib to plot importance.')\n\n if isinstance(booster, LGBMModel):\n booster = booster.booster_\n elif not isinstance(booster, Booster):\n raise TypeError('booster must be Booster or LGBMModel.')\n\n importance = booster.feature_importance(importance_type=importance_type)\n feature_name = booster.feature_name()\n\n if not len(importance):\n raise ValueError(\"Booster's feature_importance is empty.\")\n\n tuples = sorted(zip(feature_name, importance), key=lambda x: x[1])\n if ignore_zero:\n tuples = [x for x in tuples if x[1] > 0]\n if max_num_features is not None and max_num_features > 0:\n tuples = tuples[-max_num_features:]\n labels, values = zip(*tuples)\n\n if ax is None:\n if figsize is not None:\n _check_not_tuple_of_2_elements(figsize, 'figsize')\n _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)\n\n ylocs = np.arange(len(values))\n ax.barh(ylocs, values, align='center', height=height, **kwargs)\n\n for x, y in zip(values, ylocs):\n ax.text(x + 1, y,\n _float2str(x, precision) if importance_type == 'gain' else x,\n va='center')\n\n ax.set_yticks(ylocs)\n ax.set_yticklabels(labels)\n\n if xlim is not None:\n _check_not_tuple_of_2_elements(xlim, 'xlim')\n else:\n xlim = (0, max(values) * 1.1)\n ax.set_xlim(xlim)\n\n if ylim is not None:\n _check_not_tuple_of_2_elements(ylim, 'ylim')\n else:\n ylim = (-1, len(values))\n ax.set_ylim(ylim)\n\n if title is not None:\n ax.set_title(title)\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n ax.grid(grid)\n return ax\n\n\ndef plot_split_value_histogram(booster, feature, bins=None, ax=None, width_coef=0.8,\n xlim=None, ylim=None,\n title='Split value histogram for feature with @index/name@ @feature@',\n xlabel='Feature split value', ylabel='Count',\n figsize=None, dpi=None, grid=True, **kwargs):\n \"\"\"Plot split value histogram for the specified feature of the model.\n\n Parameters\n ----------\n booster : Booster or LGBMModel\n Booster or LGBMModel instance of which feature split value histogram should be plotted.\n feature : int or string\n The feature name or index the histogram is plotted for.\n If int, interpreted as index.\n If string, interpreted as name.\n bins : int, string or None, optional (default=None)\n The maximum number of bins.\n If None, the number of bins equals number of unique split values.\n If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.\n ax : matplotlib.axes.Axes or None, optional (default=None)\n Target axes instance.\n If None, new figure and axes will be created.\n width_coef : float, optional (default=0.8)\n Coefficient for histogram bar width.\n xlim : tuple of 2 elements or None, optional (default=None)\n Tuple passed to ``ax.xlim()``.\n ylim : tuple of 2 elements or None, optional (default=None)\n Tuple passed to ``ax.ylim()``.\n title : string or None, optional (default=\"Split value histogram for feature with @index/name@ @feature@\")\n Axes title.\n If None, title is disabled.\n @feature@ placeholder can be used, and it will be replaced with the value of ``feature`` parameter.\n @index/name@ placeholder can be used,\n and it will be replaced with ``index`` word in case of ``int`` type ``feature`` parameter\n or ``name`` word in case of ``string`` type ``feature`` parameter.\n xlabel : string or None, optional (default=\"Feature split value\")\n X-axis title label.\n If None, title is disabled.\n ylabel : string or None, optional (default=\"Count\")\n Y-axis title label.\n If None, title is disabled.\n figsize : tuple of 2 elements or None, optional (default=None)\n Figure size.\n dpi : int or None, optional (default=None)\n Resolution of the figure.\n grid : bool, optional (default=True)\n Whether to add a grid for axes.\n **kwargs\n Other parameters passed to ``ax.bar()``.\n\n Returns\n -------\n ax : matplotlib.axes.Axes\n The plot with specified model's feature split value histogram.\n \"\"\"\n if MATPLOTLIB_INSTALLED:\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\n else:\n raise ImportError('You must install matplotlib to plot split value histogram.')\n\n if isinstance(booster, LGBMModel):\n booster = booster.booster_\n elif not isinstance(booster, Booster):\n raise TypeError('booster must be Booster or LGBMModel.')\n\n hist, bins = booster.get_split_value_histogram(feature=feature, bins=bins, xgboost_style=False)\n if np.count_nonzero(hist) == 0:\n raise ValueError('Cannot plot split value histogram, '\n 'because feature {} was not used in splitting'.format(feature))\n width = width_coef * (bins[1] - bins[0])\n centred = (bins[:-1] + bins[1:]) / 2\n\n if ax is None:\n if figsize is not None:\n _check_not_tuple_of_2_elements(figsize, 'figsize')\n _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)\n\n ax.bar(centred, hist, align='center', width=width, **kwargs)\n\n if xlim is not None:\n _check_not_tuple_of_2_elements(xlim, 'xlim')\n else:\n range_result = bins[-1] - bins[0]\n xlim = (bins[0] - range_result * 0.2, bins[-1] + range_result * 0.2)\n ax.set_xlim(xlim)\n\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n if ylim is not None:\n _check_not_tuple_of_2_elements(ylim, 'ylim')\n else:\n ylim = (0, max(hist) * 1.1)\n ax.set_ylim(ylim)\n\n if title is not None:\n title = title.replace('@feature@', str(feature))\n title = title.replace('@index/name@', ('name' if isinstance(feature, str) else 'index'))\n ax.set_title(title)\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n ax.grid(grid)\n return ax\n\n\ndef plot_metric(booster, metric=None, dataset_names=None,\n ax=None, xlim=None, ylim=None,\n title='Metric during training',\n xlabel='Iterations', ylabel='auto',\n figsize=None, dpi=None, grid=True):\n \"\"\"Plot one metric during training.\n\n Parameters\n ----------\n booster : dict or LGBMModel\n Dictionary returned from ``lightgbm.train()`` or LGBMModel instance.\n metric : string or None, optional (default=None)\n The metric name to plot.\n Only one metric supported because different metrics have various scales.\n If None, first metric picked from dictionary (according to hashcode).\n dataset_names : list of strings or None, optional (default=None)\n List of the dataset names which are used to calculate metric to plot.\n If None, all datasets are used.\n ax : matplotlib.axes.Axes or None, optional (default=None)\n Target axes instance.\n If None, new figure and axes will be created.\n xlim : tuple of 2 elements or None, optional (default=None)\n Tuple passed to ``ax.xlim()``.\n ylim : tuple of 2 elements or None, optional (default=None)\n Tuple passed to ``ax.ylim()``.\n title : string or None, optional (default=\"Metric during training\")\n Axes title.\n If None, title is disabled.\n xlabel : string or None, optional (default=\"Iterations\")\n X-axis title label.\n If None, title is disabled.\n ylabel : string or None, optional (default=\"auto\")\n Y-axis title label.\n If 'auto', metric name is used.\n If None, title is disabled.\n figsize : tuple of 2 elements or None, optional (default=None)\n Figure size.\n dpi : int or None, optional (default=None)\n Resolution of the figure.\n grid : bool, optional (default=True)\n Whether to add a grid for axes.\n\n Returns\n -------\n ax : matplotlib.axes.Axes\n The plot with metric's history over the training.\n \"\"\"\n if MATPLOTLIB_INSTALLED:\n import matplotlib.pyplot as plt\n else:\n raise ImportError('You must install matplotlib to plot metric.')\n\n if isinstance(booster, LGBMModel):\n eval_results = deepcopy(booster.evals_result_)\n elif isinstance(booster, dict):\n eval_results = deepcopy(booster)\n else:\n raise TypeError('booster must be dict or LGBMModel.')\n\n num_data = len(eval_results)\n\n if not num_data:\n raise ValueError('eval results cannot be empty.')\n\n if ax is None:\n if figsize is not None:\n _check_not_tuple_of_2_elements(figsize, 'figsize')\n _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)\n\n if dataset_names is None:\n dataset_names = iter(eval_results.keys())\n elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names:\n raise ValueError('dataset_names should be iterable and cannot be empty')\n else:\n dataset_names = iter(dataset_names)\n\n name = next(dataset_names) # take one as sample\n metrics_for_one = eval_results[name]\n num_metric = len(metrics_for_one)\n if metric is None:\n if num_metric > 1:\n msg = \"More than one metric available, picking one to plot.\"\n warnings.warn(msg, stacklevel=2)\n metric, results = metrics_for_one.popitem()\n else:\n if metric not in metrics_for_one:\n raise KeyError('No given metric in eval results.')\n results = metrics_for_one[metric]\n num_iteration, max_result, min_result = len(results), max(results), min(results)\n x_ = range(num_iteration)\n ax.plot(x_, results, label=name)\n\n for name in dataset_names:\n metrics_for_one = eval_results[name]\n results = metrics_for_one[metric]\n max_result, min_result = max(max(results), max_result), min(min(results), min_result)\n ax.plot(x_, results, label=name)\n\n ax.legend(loc='best')\n\n if xlim is not None:\n _check_not_tuple_of_2_elements(xlim, 'xlim')\n else:\n xlim = (0, num_iteration)\n ax.set_xlim(xlim)\n\n if ylim is not None:\n _check_not_tuple_of_2_elements(ylim, 'ylim')\n else:\n range_result = max_result - min_result\n ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2)\n ax.set_ylim(ylim)\n\n if ylabel == 'auto':\n ylabel = metric\n\n if title is not None:\n ax.set_title(title)\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n ax.grid(grid)\n return ax\n\n\ndef _to_graphviz(tree_info, show_info, feature_names, precision=3,\n orientation='horizontal', constraints=None, **kwargs):\n \"\"\"Convert specified tree to graphviz instance.\n\n See:\n - https://graphviz.readthedocs.io/en/stable/api.html#digraph\n \"\"\"\n if GRAPHVIZ_INSTALLED:\n from graphviz import Digraph\n else:\n raise ImportError('You must install graphviz to plot tree.')\n\n def add(root, total_count, parent=None, decision=None):\n \"\"\"Recursively add node or edge.\"\"\"\n if 'split_index' in root: # non-leaf\n l_dec = 'yes'\n r_dec = 'no'\n if root['decision_type'] == '<=':\n lte_symbol = \"≤\"\n operator = lte_symbol\n elif root['decision_type'] == '==':\n operator = \"=\"\n else:\n raise ValueError('Invalid decision type in tree model.')\n name = 'split{0}'.format(root['split_index'])\n if feature_names is not None:\n label = '<B>{0}</B> {1} '.format(feature_names[root['split_feature']], operator)\n else:\n label = 'feature <B>{0}</B> {1} '.format(root['split_feature'], operator)\n label += '<B>{0}</B>'.format(_float2str(root['threshold'], precision))\n for info in ['split_gain', 'internal_value', 'internal_weight', \"internal_count\", \"data_percentage\"]:\n if info in show_info:\n output = info.split('_')[-1]\n if info in {'split_gain', 'internal_value', 'internal_weight'}:\n label += '<br/>{0} {1}'.format(_float2str(root[info], precision), output)\n elif info == 'internal_count':\n label += '<br/>{0}: {1}'.format(output, root[info])\n elif info == \"data_percentage\":\n label += '<br/>{0}% of data'.format(_float2str(root['internal_count'] / total_count * 100, 2))\n\n fillcolor = \"white\"\n style = \"\"\n if constraints:\n if constraints[root['split_feature']] == 1:\n fillcolor = \"#ddffdd\" # light green\n if constraints[root['split_feature']] == -1:\n fillcolor = \"#ffdddd\" # light red\n style = \"filled\"\n label = \"<\" + label + \">\"\n graph.node(name, label=label, shape=\"rectangle\", style=style, fillcolor=fillcolor)\n add(root['left_child'], total_count, name, l_dec)\n add(root['right_child'], total_count, name, r_dec)\n else: # leaf\n name = 'leaf{0}'.format(root['leaf_index'])\n label = 'leaf {0}: '.format(root['leaf_index'])\n label += '<B>{0}</B>'.format(_float2str(root['leaf_value'], precision))\n if 'leaf_weight' in show_info:\n label += '<br/>{0} weight'.format(_float2str(root['leaf_weight'], precision))\n if 'leaf_count' in show_info:\n label += '<br/>count: {0}'.format(root['leaf_count'])\n if \"data_percentage\" in show_info:\n label += '<br/>{0}% of data'.format(_float2str(root['leaf_count'] / total_count * 100, 2))\n label = \"<\" + label + \">\"\n graph.node(name, label=label)\n if parent is not None:\n graph.edge(parent, name, decision)\n\n graph = Digraph(**kwargs)\n rankdir = \"LR\" if orientation == \"horizontal\" else \"TB\"\n graph.attr(\"graph\", nodesep=\"0.05\", ranksep=\"0.3\", rankdir=rankdir)\n if \"internal_count\" in tree_info['tree_structure']:\n add(tree_info['tree_structure'], tree_info['tree_structure'][\"internal_count\"])\n else:\n raise Exception(\"Cannot plot trees with no split\")\n\n if constraints:\n # \"#ddffdd\" is light green, \"#ffdddd\" is light red\n legend = \"\"\"<\n <TABLE BORDER=\"0\" CELLBORDER=\"1\" CELLSPACING=\"0\" CELLPADDING=\"4\">\n <TR>\n <TD COLSPAN=\"2\"><B>Monotone constraints</B></TD>\n </TR>\n <TR>\n <TD>Increasing</TD>\n <TD BGCOLOR=\"#ddffdd\"></TD>\n </TR>\n <TR>\n <TD>Decreasing</TD>\n <TD BGCOLOR=\"#ffdddd\"></TD>\n </TR>\n </TABLE>\n >\"\"\"\n graph.node(\"legend\", label=legend, shape=\"rectangle\", color=\"white\")\n return graph\n\n\ndef create_tree_digraph(booster, tree_index=0, show_info=None, precision=3,\n orientation='horizontal', **kwargs):\n \"\"\"Create a digraph representation of specified tree.\n\n Each node in the graph represents a node in the tree.\n\n Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means\n \"this node splits on the feature named \"Column_10\", with threshold 875.9\".\n\n Leaf nodes have labels like ``leaf 2: 0.422``, which means \"this node is a\n leaf node, and the predicted value for records that fall into this node\n is 0.422\". The number (``2``) is an internal unique identifier and doesn't\n have any special meaning.\n\n .. note::\n\n For more information please visit\n https://graphviz.readthedocs.io/en/stable/api.html#digraph.\n\n Parameters\n ----------\n booster : Booster or LGBMModel\n Booster or LGBMModel instance to be converted.\n tree_index : int, optional (default=0)\n The index of a target tree to convert.\n show_info : list of strings or None, optional (default=None)\n What information should be shown in nodes.\n\n - ``'split_gain'`` : gain from adding this split to the model\n - ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node\n - ``'internal_count'`` : number of records from the training data that fall into this non-leaf node\n - ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node\n - ``'leaf_count'`` : number of records from the training data that fall into this leaf node\n - ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node\n - ``'data_percentage'`` : percentage of training data that fall into this node\n precision : int or None, optional (default=3)\n Used to restrict the display of floating point values to a certain precision.\n orientation : string, optional (default='horizontal')\n Orientation of the tree.\n Can be 'horizontal' or 'vertical'.\n **kwargs\n Other parameters passed to ``Digraph`` constructor.\n Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.\n\n Returns\n -------\n graph : graphviz.Digraph\n The digraph representation of specified tree.\n \"\"\"\n if isinstance(booster, LGBMModel):\n booster = booster.booster_\n elif not isinstance(booster, Booster):\n raise TypeError('booster must be Booster or LGBMModel.')\n\n model = booster.dump_model()\n tree_infos = model['tree_info']\n if 'feature_names' in model:\n feature_names = model['feature_names']\n else:\n feature_names = None\n\n monotone_constraints = model.get('monotone_constraints', None)\n\n if tree_index < len(tree_infos):\n tree_info = tree_infos[tree_index]\n else:\n raise IndexError('tree_index is out of range.')\n\n if show_info is None:\n show_info = []\n\n graph = _to_graphviz(tree_info, show_info, feature_names, precision,\n orientation, monotone_constraints, **kwargs)\n\n return graph\n\n\ndef plot_tree(booster, ax=None, tree_index=0, figsize=None, dpi=None,\n show_info=None, precision=3, orientation='horizontal', **kwargs):\n \"\"\"Plot specified tree.\n\n Each node in the graph represents a node in the tree.\n\n Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means\n \"this node splits on the feature named \"Column_10\", with threshold 875.9\".\n\n Leaf nodes have labels like ``leaf 2: 0.422``, which means \"this node is a\n leaf node, and the predicted value for records that fall into this node\n is 0.422\". The number (``2``) is an internal unique identifier and doesn't\n have any special meaning.\n\n .. note::\n\n It is preferable to use ``create_tree_digraph()`` because of its lossless quality\n and returned objects can be also rendered and displayed directly inside a Jupyter notebook.\n\n Parameters\n ----------\n booster : Booster or LGBMModel\n Booster or LGBMModel instance to be plotted.\n ax : matplotlib.axes.Axes or None, optional (default=None)\n Target axes instance.\n If None, new figure and axes will be created.\n tree_index : int, optional (default=0)\n The index of a target tree to plot.\n figsize : tuple of 2 elements or None, optional (default=None)\n Figure size.\n dpi : int or None, optional (default=None)\n Resolution of the figure.\n show_info : list of strings or None, optional (default=None)\n What information should be shown in nodes.\n\n - ``'split_gain'`` : gain from adding this split to the model\n - ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node\n - ``'internal_count'`` : number of records from the training data that fall into this non-leaf node\n - ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node\n - ``'leaf_count'`` : number of records from the training data that fall into this leaf node\n - ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node\n - ``'data_percentage'`` : percentage of training data that fall into this node\n precision : int or None, optional (default=3)\n Used to restrict the display of floating point values to a certain precision.\n orientation : string, optional (default='horizontal')\n Orientation of the tree.\n Can be 'horizontal' or 'vertical'.\n **kwargs\n Other parameters passed to ``Digraph`` constructor.\n Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.\n\n Returns\n -------\n ax : matplotlib.axes.Axes\n The plot with single tree.\n \"\"\"\n if MATPLOTLIB_INSTALLED:\n import matplotlib.pyplot as plt\n import matplotlib.image as image\n else:\n raise ImportError('You must install matplotlib to plot tree.')\n\n if ax is None:\n if figsize is not None:\n _check_not_tuple_of_2_elements(figsize, 'figsize')\n _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)\n\n graph = create_tree_digraph(booster=booster, tree_index=tree_index,\n show_info=show_info, precision=precision,\n orientation=orientation, **kwargs)\n\n s = BytesIO()\n s.write(graph.pipe(format='png'))\n s.seek(0)\n img = image.imread(s)\n\n ax.imshow(img)\n ax.axis('off')\n return ax\n"
] | [
[
"numpy.count_nonzero",
"matplotlib.image.imread",
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.subplots"
]
] |
lahdjirayhan/scikit-lego | [
"5dd145df796c4d254cd505727c9db01484ebc39c"
] | [
"sklego/meta/grouped_transformer.py"
] | [
"import numpy as np\nimport pandas as pd\n\nfrom sklearn.base import BaseEstimator, TransformerMixin, clone\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom ._grouped_utils import _split_groups_and_values\n\n\nclass GroupedTransformer(BaseEstimator, TransformerMixin):\n \"\"\"\n Construct a transformer per data group. Splits data by groups from single or multiple columns\n and transforms remaining columns using the transformers corresponding to the groups.\n\n :param transformer: the transformer to be applied per group\n :param groups: the column(s) of the matrix/dataframe to select as a grouping parameter set. If None,\n the transformer will be applied to the entire input without grouping\n :param use_global_model: Whether or not to fall back to a general transformation in case a group\n is not found during `.transform()`\n \"\"\"\n\n _check_kwargs = {\"accept_large_sparse\": False}\n\n def __init__(self, transformer, groups, use_global_model=True):\n self.transformer = transformer\n self.groups = groups\n self.use_global_model = use_global_model\n\n def __fit_single_group(self, group, X, y=None):\n try:\n return clone(self.transformer).fit(X, y)\n except Exception as e:\n raise type(e)(f\"Exception for group {group}: {e}\")\n\n def __fit_grouped_transformer(\n self, X_group: pd.DataFrame, X_value: np.array, y=None\n ):\n \"\"\"Fit a transformer to each group\"\"\"\n # Make the groups based on the groups dataframe, use the indices on the values array\n try:\n group_indices = X_group.groupby(X_group.columns.tolist()).indices\n except TypeError:\n # This one is needed because of line #918 of sklearn/utils/estimator_checks\n raise TypeError(\"argument must be a string, date or number\")\n\n if y is not None:\n if isinstance(y, pd.Series):\n y.index = X_group.index\n\n grouped_transformers = {\n # Fit a clone of the transformer to each group\n group: self.__fit_single_group(group, X_value[indices, :], y[indices])\n for group, indices in group_indices.items()\n }\n else:\n grouped_transformers = {\n group: self.__fit_single_group(group, X_value[indices, :])\n for group, indices in group_indices.items()\n }\n\n return grouped_transformers\n\n def __check_transformer(self):\n if not hasattr(self.transformer, \"transform\"):\n raise ValueError(\n \"The supplied transformer should have a 'transform' method\"\n )\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the transformers to the groups in X\n\n :param X: Array-like with at least two columns, of which at least one corresponds to groups defined in init,\n and the remaining columns represent the values to transform.\n :param y: (Optional) target variable\n \"\"\"\n self.__check_transformer()\n\n self.fallback_ = None\n\n if self.groups is None:\n self.transformers_ = clone(self.transformer).fit(X, y)\n return self\n\n X_group, X_value = _split_groups_and_values(\n X, self.groups, **self._check_kwargs\n )\n self.transformers_ = self.__fit_grouped_transformer(X_group, X_value, y)\n\n if self.use_global_model:\n self.fallback_ = clone(self.transformer).fit(X_value)\n\n return self\n\n def __transform_single_group(self, group, X):\n \"\"\"Transform a single group by getting its transformer from the fitted dict\"\"\"\n # Keep track of the original index such that we can sort in __transform_groups\n index = X.index\n try:\n group_transformer = self.transformers_[group]\n except KeyError:\n if self.fallback_:\n group_transformer = self.fallback_\n else:\n raise ValueError(\n f\"Found new group {group} during transform with use_global_model = False\"\n )\n\n return pd.DataFrame(group_transformer.transform(X)).set_index(index)\n\n def __transform_groups(self, X_group: pd.DataFrame, X_value: np.array):\n \"\"\"Transform all groups\"\"\"\n # Reset indices such that they are the same in X_group (reset in __check_grouping_columns),\n # this way we can track the order of the result\n X_value = pd.DataFrame(X_value).reset_index(drop=True)\n\n # Make the groups based on the groups dataframe, use the indices on the values array\n group_indices = X_group.groupby(X_group.columns.tolist()).indices\n\n return (\n pd.concat(\n [\n self.__transform_single_group(group, X_value.loc[indices, :])\n for group, indices in group_indices.items()\n ],\n axis=0,\n )\n .sort_index()\n .values\n )\n\n def transform(self, X):\n \"\"\"\n Fit the transformers to the groups in X\n\n :param X: Array-like with columns corresponding to the ones in .fit()\n \"\"\"\n check_is_fitted(self, [\"fallback_\", \"transformers_\"])\n\n if self.groups is None:\n return self.transformers_.transform(X)\n\n X_group, X_value = _split_groups_and_values(\n X, self.groups, **self._check_kwargs\n )\n\n return self.__transform_groups(X_group, X_value)\n"
] | [
[
"pandas.DataFrame",
"sklearn.base.clone",
"sklearn.utils.validation.check_is_fitted"
]
] |
janisoteps/imsim1 | [
"e0f7ec186ac5bbca9d4a453f8147dc525642e6da"
] | [
"vgg16jd.py"
] | [
"# -*- coding: utf-8 -*-\n'''VGG16 model for Keras.\n\n# Reference:\n\n- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)\n\n'''\nfrom __future__ import print_function\n\nimport numpy as np\nimport warnings\n\nfrom keras.models import Model\nfrom keras.layers import Flatten, Dense, Input\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.preprocessing import image\nfrom keras.utils.layer_utils import convert_all_kernels_in_model\nfrom keras.utils.data_utils import get_file\nfrom keras import backend as K\nfrom imagenet_utils import decode_predictions, preprocess_input\n\n\nTH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5'\n# TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'\nTF_WEIGHTS_PATH = '/Users/janisdzikevics/dev/imsim/imsim1/.keras/models/vgg16_weights_tf_dim_ordering_tf_kernels.h5'\nTH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'\n# TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'\nTF_WEIGHTS_PATH_NO_TOP = '/Users/janisdzikevics/dev/imsim/imsim1/.keras/models/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\ndef VGG16(include_top=True, weights='imagenet',\n input_tensor=None):\n '''Instantiate the VGG16 architecture,\n optionally loading weights pre-trained\n on ImageNet. Note that when using TensorFlow,\n for best performance you should set\n `image_dim_ordering=\"tf\"` in your Keras config\n at ~/.keras/keras.json.\n\n The model and the weights are compatible with both\n TensorFlow and Theano. The dimension ordering\n convention used by the model is the one\n specified in your Keras config file.\n\n # Arguments\n include_top: whether to include the 3 fully-connected\n layers at the top of the network.\n weights: one of `None` (random initialization)\n or \"imagenet\" (pre-training on ImageNet).\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n\n # Returns\n A Keras model instance.\n '''\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n # Determine proper input shape\n if K.image_dim_ordering() == 'th':\n if include_top:\n input_shape = (3, 224, 224)\n else:\n input_shape = (3, None, None)\n else:\n if include_top:\n input_shape = (224, 224, 3)\n else:\n input_shape = (None, None, 3)\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor)\n else:\n img_input = input_tensor\n # Block 1\n x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input)\n x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)\n x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)\n x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)\n x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)\n x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)\n x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv1')(x)\n x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv2')(x)\n x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block5_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n # Block JD\n # x = Flatten(name='flatten')(x)\n # x = Dense(4096, activation='relu', name='fc1')(x)\n\n if include_top:\n # Classification block\n x = Flatten(name='flatten')(x)\n x = Dense(4096, activation='relu', name='fc1')(x)\n x = Dense(4096, activation='relu', name='fc2')(x)\n x = Dense(1000, activation='relu', name='predictions')(x)\n\n # Create model\n model = Model(img_input, x)\n\n # load weights\n if weights == 'imagenet':\n print('K.image_dim_ordering:', K.image_dim_ordering())\n if K.image_dim_ordering() == 'th':\n if include_top:\n weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels.h5',\n TH_WEIGHTS_PATH,\n cache_subdir='models')\n else:\n weights_path = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5',\n TH_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models')\n model.load_weights(weights_path)\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image dimension ordering convention '\n '(`image_dim_ordering=\"th\"`). '\n 'For best performance, set '\n '`image_dim_ordering=\"tf\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n convert_all_kernels_in_model(model)\n else:\n if include_top:\n # weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',\n # TF_WEIGHTS_PATH,\n # cache_subdir='models')\n weights_path = TF_WEIGHTS_PATH\n else:\n # weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',\n # TF_WEIGHTS_PATH_NO_TOP,\n # cache_subdir='models')\n weights_path = TF_WEIGHTS_PATH_NO_TOP\n # weights_path = TF_WEIGHTS_PATH\n\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n convert_all_kernels_in_model(model)\n return model\n\n\nif __name__ == '__main__':\n model = VGG16(include_top=True, weights='imagenet')\n\n img_path = 'elephant.jpg'\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n print('Input image shape:', x.shape)\n\n preds = model.predict(x)\n print('Predicted:', decode_predictions(preds))\n"
] | [
[
"numpy.expand_dims"
]
] |
wannaphong/HoogBERTa | [
"36ef588314c2c1934a652eb330e88a612ce151ee"
] | [
"hoogberta/trainer/models/multitask_tagger.py"
] | [
"from ..utils import build_dataloader\r\nfrom . import register_model\r\n\r\nimport time\r\nimport torch.nn as nn\r\nfrom torch.nn import ModuleList\r\nimport torch\r\n\r\nfrom fairseq.data.dictionary import Dictionary\r\nfrom fairseq.data.data_utils import collate_tokens\r\nfrom fairseq.models.roberta import RobertaModel\r\n\r\ninit_funcs = {\r\n 1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias\r\n 2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight\r\n 3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter\r\n 4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter\r\n \"default\": lambda x: torch.nn.init.constant(x, 1.), # everything else\r\n}\r\n\r\ndef init_all(model, init_funcs):\r\n for p in model.parameters():\r\n init_func = init_funcs.get(len(p.shape), init_funcs[\"default\"])\r\n init_func(p)\r\n\r\n@register_model(\"multitask-tagger\")\r\nclass MultiTaskTagger(nn.Module):\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add model-specific arguments to the parser.\"\"\"\r\n parser.add_argument('--dropout',type=float, default=0.1, help='fc layer dropout')\r\n parser.add_argument('--outputdim',type=str, default=\"10,10,10\", help='list of output dim separated by a comma (,)')\r\n parser.add_argument('--feature-layer', type=int, default=-1,help='select feature layer (default : -1)')\r\n\r\n\r\n return parser\r\n\r\n def __init__(self,args, output_dim = None):\r\n super().__init__()\r\n self.args = args\r\n self.base_path = args.base_path\r\n self.encoder = self.load_pretrained(args.pretrained)\r\n self.bert = self.encoder\r\n\r\n embedding_dim = self.bert.model.encoder.sentence_encoder.embed_tokens.weight.size()[1]\r\n\r\n self.fc_pos = nn.Linear(embedding_dim, output_dim[0])\r\n self.fc_ne = nn.Linear(embedding_dim, output_dim[1])\r\n self.fc_sent = nn.Linear(embedding_dim, output_dim[2])\r\n self.dropout = nn.Dropout(args.dropout)\r\n #For backward compatible issue\r\n self.fc = nn.Linear(10,10)\r\n\r\n\r\n def load_pretrained(self,pretrained=\"lst\"):\r\n if pretrained == \"lst\":\r\n roberta = RobertaModel.from_pretrained(self.base_path + '/models/hoogberta_base/', checkpoint_file='checkpoint_best.pt',bpe=\"subword_nmt\", bpe_codes=self.base_path + \"/models/hoogberta_base/th_18M.50000.bpe\",data_name_or_path=\".:\")\r\n return roberta\r\n\r\n return None\r\n\r\n def forward(self, token_batch):\r\n \"\"\"\r\n token_batch : Tensor of token ids (long Tensor) [batch , seq length]\r\n \"\"\"\r\n \r\n all_layers = self.bert.extract_features(token_batch, return_all_hiddens=True)\r\n last_layer = all_layers[self.args.feature_layer]\r\n #ic(\"ALL Layer size\",all_layers[-1].size())\r\n\r\n embedded = last_layer\r\n pos_pred = self.fc_pos(self.dropout(embedded))\r\n ne_pred = self.fc_ne(self.dropout(embedded))\r\n sent_pred = self.fc_sent(self.dropout(embedded))\r\n \r\n #predictions = [sent len, batch size, output dim]\r\n \r\n return pos_pred, ne_pred, sent_pred\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"hi\")"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.constant",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.normal_",
"torch.nn.init.xavier_normal_"
]
] |
osgirl/chs-s111 | [
"a88a3de20868d0a0884498fffe3c1a1ea106bd12"
] | [
"scripts/s111_add_irregular_grid.py"
] | [
"#******************************************************************************\n#\n#******************************************************************************\nimport argparse\nimport h5py\nimport numpy\nimport iso8601\nimport pytz\nimport netCDF4\nimport math\n\nms2Knots = 1.943844\n\n#****************************************************************************** \ndef create_xy_group(hdf_file, latc, lonc):\n \"\"\" Create the XY group containing the position information.\n\n :param hdf_file: The S-111 HDF file.\n :param latc: A list of latitude values.\n :param lonc: A list of longitude values.\n :returns: A tuple containing minimum x, minimum y, maximum x, maximum y values from the given lists.\n \"\"\"\n\n numberOfLat = latc.shape[0]\n numberOfLon = lonc.shape[0]\n\n xCoordinates = numpy.empty((1, numberOfLat), dtype=numpy.float64)\n yCoordinates = numpy.empty((1, numberOfLon), dtype=numpy.float64)\n minX = minY = maxX = maxY = None\n\n for index in range(0, numberOfLat):\n latitude = latc[index]\n longitude = lonc[index]\n\n #Keep track of the data extents so we can update the metadata.\n if minX == None:\n minX = maxX = longitude\n minY = maxY = latitude\n else:\n minX = min(minX, longitude)\n maxX = max(maxX, longitude)\n minY = min(minY, latitude)\n maxY = max(maxY, latitude)\n\n xCoordinates[0][index] = longitude\n yCoordinates[0][index] = latitude\n\n\n #Add the 'Group XY' to store the position information.\n groupName = 'Group XY'\n print(\"Creating\", groupName, \"dataset.\")\n xy_group = hdf_file.create_group(groupName)\n\n #Add the x and y datasets to the xy group.\n xy_group.create_dataset('X', (1, numberOfLat), dtype=numpy.float64, data=xCoordinates)\n xy_group.create_dataset('Y', (1, numberOfLon), dtype=numpy.float64, data=yCoordinates) \n\n return (minX, minY, maxX, maxY)\n\n\n#****************************************************************************** \ndef create_direction_speed(group, ua, va):\n \"\"\" Create the speed and direction datasets.\n\n :param group: The HDF group to add the speed and direction datasets to.\n :param ua: List of velocity values along the x axis in metres per second.\n :param va: List of velocity values along the y axis in metres per second.\n :returns: A tuple containing the minimum and maximum speed values added.\n \"\"\"\n\n min_speed = None\n max_speed = None\n\n numberOfVaValues = len(va)\n\n directions = numpy.empty((1, numberOfVaValues), dtype=numpy.float64)\n speeds = numpy.empty((1, numberOfVaValues), dtype=numpy.float64)\n for index in range(0, numberOfVaValues):\n\n v_ms = va[index]\n u_ms = ua[index]\n\n #Convert from metres per second to knots\n v_knot = v_ms * ms2Knots\n u_knot = u_ms * ms2Knots\n\n windSpeed = math.sqrt(math.pow(u_knot, 2) + math.pow(v_knot, 2))\n windDirectionRadians = math.atan2(v_knot, u_knot)\n windDirectionDegrees = math.degrees(windDirectionRadians)\n windDirectionNorth = 90.0 - windDirectionDegrees\n\n #The direction must always be positive.\n if windDirectionNorth < 0.0:\n windDirectionNorth += 360.0\n\n directions[0][index] = windDirectionNorth\n speeds[0][index] = windSpeed\n\n if min_speed == None:\n min_speed = max_speed = windSpeed\n else:\n min_speed = min(min_speed, windSpeed)\n max_speed = max(max_speed, windSpeed)\n\n #Create the datasets.\n direction_dataset = group.create_dataset('Direction', (1, numberOfVaValues), dtype=numpy.float64, data=directions)\n speed_dataset = group.create_dataset('Speed', (1, numberOfVaValues), dtype=numpy.float64, data=speeds)\n\n return min_speed, max_speed\n\n\n#****************************************************************************** \ndef create_data_groups(hdf_file, times, ua, va):\n \"\"\"Create the data groups in the S-111 file. (One group for each time value)\n\n :param hdf_file: The S-111 HDF file.\n :param times: The list of time values from the source data.\n :param ua: List of velocity values along the x axis in metres per second. (An array of values per time)\n :param va: List of velocity values along the y axis in metres per second. (An array of values per time)\n :returns: A tuple containing the minimum time, maximum time, time interval, minimum speed, and maximum speed of the source data.\n \"\"\"\n\n numberOfTimes = times.shape[0]\n \n interval = None\n minTime = maxTime = None\n minSpeed = maxSpeed = None\n for index in range(0, numberOfTimes):\n\n newGroupName = 'Group ' + str(index + 1)\n print(\"Creating\", newGroupName, \"dataset.\")\n newGroup = hdf_file.create_group(newGroupName)\n \n groupTitle = 'Irregular Grid at DateTime ' + str(index + 1)\n newGroup.attrs.create('Title', groupTitle.encode())\n\n #Store the start time.\n strVal = times[index].tostring().decode()\n timeVal = iso8601.parse_date(strVal)\n timeVal = timeVal.astimezone(pytz.utc)\n\n #Keep track of the min/max time so we can update the metadata\n if minTime == None:\n minTime = maxTime = timeVal\n else:\n minTime = min(minTime, timeVal)\n maxTime = max(maxTime, timeVal)\n\n strVal = timeVal.strftime(\"%Y%m%dT%H%M%SZ\")\n newGroup.attrs.create('DateTime', strVal.encode())\n\n groupMinSpeed, groupMaxSpeed = create_direction_speed(newGroup, ua[index], va[index])\n\n #Keep track of the min/max speed so we can update the metadata\n if minSpeed == None:\n minSpeed = groupMinSpeed\n maxSpeed = groupMaxSpeed\n else:\n minSpeed = min(minSpeed, groupMinSpeed)\n maxSpeed = max(maxSpeed, groupMaxSpeed)\n\n #Figure out what the interval is between the times (use only the first)\n if numberOfTimes > 1:\n\n strVal = times[0].tostring().decode()\n firstTimeVal = iso8601.parse_date(strVal)\n firstTimeVal = firstTimeVal.astimezone(pytz.utc)\n\n strVal = times[1].tostring().decode()\n secondTimeVal = iso8601.parse_date(strVal)\n secondTimeVal = secondTimeVal.astimezone(pytz.utc)\n\n interval = secondTimeVal - firstTimeVal\n\n return (minTime, maxTime, interval, minSpeed, maxSpeed)\n\n\n#****************************************************************************** \ndef update_metadata(hdf_file, numberOfTimes, numberOfValues, minTime, maxTime, interval, minX, minY, maxX, maxY, minSpeed, maxSpeed):\n \"\"\"Update the S-111 file's metadata.\n\n :param hdf_file: The S-111 HDF file.\n :param numberOfTimes: The number of times in the source data.\n :param numberOfValues: The number of values per record in the source data.\n :param minTime: The minimum temporal extents of the source data.\n :param maxTime: The maximum temporal extents of the source data.\n :param interval: The time interval between records of the source data.\n :param minX: The minimum x coordinate of the source data.\n :param minY: The minimum y coordinate of the source data.\n :param maxX: The maximum x coordinate of the source data.\n :param maxY: The maximum y coordinate of the source data.\n :param minSpeed: The minimum surface speed of the source data.\n :param maxSpeed: The maximum surface speed of the source data.\n \"\"\"\n\n #Set the correct coding format.\n hdf_file.attrs.create('dataCodingFormat', 3, dtype=numpy.int64)\n\n #Set the number of times.\n hdf_file.attrs.create('numberOfTimes', numberOfTimes, dtype=numpy.int64)\n\n #Set the number of nodes.\n hdf_file.attrs.create('numberOfNodes', numberOfValues, dtype=numpy.int64)\n \n #Set the time interval (if we have one)\n if interval != None:\n intervalInSeconds = interval.total_seconds()\n hdf_file.attrs.create('timeRecordInterval', intervalInSeconds, dtype=numpy.int64)\n\n #Update the temporal extents in the metadata.\n strVal = minTime.strftime(\"%Y%m%dT%H%M%SZ\")\n hdf_file.attrs.create('dateTimeOfFirstRecord', strVal.encode())\n strVal = maxTime.strftime(\"%Y%m%dT%H%M%SZ\")\n hdf_file.attrs.create('dateTimeOfLastRecord', strVal.encode())\n\n #Update the geo coverage in the metadata. (These are not set anymore... since 1.09)\n #hdf_file.attrs.create('westBoundLongitude', minX, dtype=numpy.float64)\n #hdf_file.attrs.create('eastBoundLongitude', maxX, dtype=numpy.float64)\n #hdf_file.attrs.create('southBoundLatitude', minY, dtype=numpy.float64)\n #hdf_file.attrs.create('northBoundLatitude', maxY, dtype=numpy.float64)\n\n #Update the surface speed values.\n if 'minSurfCurrentSpeed' in hdf_file.attrs:\n minSpeed = min(minSpeed, hdf_file.attrs['minSurfCurrentSpeed'])\n\n if 'maxSurfCurrentSpeed' in hdf_file.attrs:\n maxSpeed = max(maxSpeed, hdf_file.attrs['maxSurfCurrentSpeed'])\n\n hdf_file.attrs.create('minSurfCurrentSpeed', minSpeed)\n hdf_file.attrs.create('maxSurfCurrentSpeed', maxSpeed)\n\n\n#****************************************************************************** \ndef create_command_line():\n \"\"\"Create and initialize the command line parser.\n \n :returns: The command line parser.\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Add S-111 irregular grid Dataset')\n\n parser.add_argument('-g', '--grid-file', help='The netcdf file containing the irregular grid data.', required=True)\n parser.add_argument(\"inOutFile\", nargs=1)\n\n return parser\n\n\n#****************************************************************************** \ndef main():\n\n #Create the command line parser.\n parser = create_command_line()\n\n #Parse the command line.\n results = parser.parse_args()\n \n #open the HDF5 file.\n with h5py.File(results.inOutFile[0], \"r+\") as hdf_file:\n\n #Open the grid file.\n with netCDF4.Dataset(results.grid_file, \"r\", format=\"NETCDF4\") as grid_file:\n\n #Grab the data that we need.\n times = grid_file.variables['Times']\n latc = grid_file.variables['latc']\n lonc = grid_file.variables['lonc']\n ua = grid_file.variables['ua']\n va = grid_file.variables['va']\n\n #Verify that these arrays are the same size.\n numberOfTimes = times.shape[0]\n numberOfVaSeries = va.shape[0]\n numberOfUaSeries = ua.shape[0]\n if numberOfTimes != numberOfVaSeries or numberOfTimes != numberOfUaSeries:\n raise Exception('The number of time values does not match the number of speed and distance values.')\n\n #Verify that these arrays are the same size.\n numberOfLat = latc.shape[0]\n numberOfLon = lonc.shape[0]\n numberOfVaValues = va.shape[1]\n numberOfUaValues = ua.shape[1]\n if numberOfLat != numberOfLon:\n raise Exception('The input latitude and longitude array are different sizes.')\n elif numberOfLat != numberOfVaValues or numberOfLat != numberOfUaValues:\n raise Exception('The number of positions does not match the number of speed and distance values.')\n\n #Verify that the input data is in the correct units.\n vaUnits = va.getncattr('units')\n uaUnits = ua.getncattr('units')\n if vaUnits != uaUnits and vaUnits != 'metres s-1':\n raise Exception('The input velocity data is stored in an unsupported unit.')\n\n print(\"Adding irregular grid dataset\")\n print(\"Number of timestamps in source file:\", numberOfTimes)\n print(\"Number of records for each timestamp:\", numberOfLat)\n\n #Add the 'Group XY' to store the position information.\n minX, minY, maxX, maxY = create_xy_group(hdf_file, latc, lonc)\n \n #Add all of the groups\n minTime, maxTime, interval, minSpeed, maxSpeed = create_data_groups(hdf_file, times, ua, va)\n\n #Update the s-111 file's metadata\n update_metadata(hdf_file, numberOfTimes, numberOfVaValues,\n minTime, maxTime, interval, minX, minY, maxX, maxY,\n minSpeed, maxSpeed)\n\n print(\"Dataset successfully added\")\n\n #Flush any edits out.\n hdf_file.flush()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.empty"
]
] |
neuroevolution-ai/NeuroEvolution-CTRNN_new | [
"23fedf85f9344c7bb6b926c4582cc6064e387c0a"
] | [
"tests/test_i_layered_brain.py"
] | [
"import pytest\nfrom attr import s\nimport numpy as np\nfrom gym import Space\nfrom gym.spaces import Box\n\nfrom brains.i_layer_based_brain import ILayerBasedBrain, LayerdConfigClass\nfrom tools.configurations import ILayerBasedBrainCfg\n\n\n@s(auto_attribs=True, frozen=True, slots=True)\nclass BrainParam:\n weight_ih: np.ndarray\n weight_hh: np.ndarray\n bias_h: np.ndarray\n hidden: np.ndarray\n weight_ho: np.ndarray\n\n\nclass LayerTestCase(ILayerBasedBrain[ILayerBasedBrainCfg]):\n\n @staticmethod\n def get_number_hidden_values():\n return 1\n\n def __init__(self, input_space: Space, output_space: Space, individual: np.ndarray, config: LayerdConfigClass):\n super().__init__(input_space, output_space, individual, config)\n\n @staticmethod\n def get_number_gates():\n return 1\n\n @staticmethod\n def layer_step(layer_input: np.ndarray, weight_ih, weight_hh, bias_h, hidden):\n result = np.dot(weight_ih[0], layer_input) + np.dot(weight_hh[0], hidden[0]) + bias_h[0]\n return [[result], result]\n\n\nclass TestILayeredBrain:\n\n @pytest.fixture\n def brain_param_simple(self):\n return BrainParam(\n weight_ih=np.array([[[[1, 2], [3, 4]]]]),\n weight_hh=np.array([[[[5, 6], [7, 8]]]]),\n bias_h=np.array([[[9, 10]]]),\n hidden=np.array([[[11, 12]]]),\n weight_ho=np.array([[13, 14], [15, 16]])\n )\n\n @pytest.fixture\n def brain_param_identity(self):\n return BrainParam(\n weight_ih=np.array([[[[1, 0], [0, 1]]]]),\n weight_hh=np.array([[[[1, 0], [0, 1]]]]),\n bias_h=np.array([[[0, 0]]]),\n hidden=np.array([[[0, 0]]]),\n weight_ho=np.array([[1, 0], [0, 1]])\n )\n\n @staticmethod\n def param_to_genom(param):\n return np.concatenate(\n [param.weight_ih.flatten(),\n param.weight_hh.flatten(),\n param.bias_h.flatten(),\n param.hidden.flatten(),\n param.weight_ho.flatten()\n ])\n\n def test_individual(self, layer_config, brain_param_simple, box2d):\n bp = brain_param_simple\n brain = LayerTestCase(input_space=box2d, output_space=box2d, individual=self.param_to_genom(bp),\n config=layer_config)\n assert np.array_equal(bp.weight_ih, brain.weight_ih)\n assert np.array_equal(bp.weight_hh, brain.weight_hh)\n assert np.array_equal(bp.bias_h, brain.bias_h)\n assert np.array_equal(bp.hidden, brain.hidden)\n assert np.array_equal(bp.weight_ho, brain.weight_ho)\n\n def test_step(self, layer_config, brain_param_identity, box2d):\n bp = brain_param_identity\n brain = LayerTestCase(input_space=box2d, output_space=box2d, individual=self.param_to_genom(bp),\n config=layer_config)\n ob = np.array([1, 2])\n assert np.allclose(brain.hidden, np.zeros([2, 2]))\n res = brain.step(ob)\n # due to identity matrices after one iteration the internal state is now exactly the observersion\n assert np.allclose(brain.hidden, ob)\n # due to identity matrices after one iteration the output is just the input, but with tanh.\n assert np.allclose(res, ob)\n brain.step(ob)\n print(brain.hidden)\n print(ob+ob)\n assert np.allclose(brain.hidden, ob + ob)\n"
] | [
[
"numpy.array",
"numpy.dot",
"numpy.array_equal",
"numpy.zeros",
"numpy.allclose"
]
] |
FMsunyh/keras-retinanet | [
"cb86a987237d3f6bd504004e2b186cf65606c890"
] | [
"keras_retinanet/utils/image.py"
] | [
"\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import division\nimport keras\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\nfrom .transform import change_transform_origin\n\n\ndef read_image_bgr(path):\n \"\"\" Read an image in BGR format.\n\n Args\n path: Path to the image.\n \"\"\"\n image = np.asarray(Image.open(path).convert('RGB'))\n try:\n image = np.asarray(Image.open(path).convert('RGB'))\n except Exception as ex:\n print(path)\n\n return image[:, :, ::-1].copy()\n\n\ndef preprocess_image(x, mode='caffe'):\n \"\"\" Preprocess an image by subtracting the ImageNet mean.\n\n Args\n x: np.array of shape (None, None, 3) or (3, None, None).\n mode: One of \"caffe\" or \"tf\".\n - caffe: will zero-center each color channel with\n respect to the ImageNet dataset, without scaling.\n - tf: will scale pixels between -1 and 1, sample-wise.\n\n Returns\n The input with the ImageNet mean subtracted.\n \"\"\"\n # mostly identical to \"https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py\"\n # except for converting RGB -> BGR since we assume BGR already\n x = x.astype(keras.backend.floatx())\n if mode == 'tf':\n x /= 127.5\n x -= 1.\n elif mode == 'caffe':\n if keras.backend.image_data_format() == 'channels_first':\n if x.ndim == 3:\n x[0, :, :] -= 103.939\n x[1, :, :] -= 116.779\n x[2, :, :] -= 123.68\n else:\n x[:, 0, :, :] -= 103.939\n x[:, 1, :, :] -= 116.779\n x[:, 2, :, :] -= 123.68\n else:\n x[..., 0] -= 103.939\n x[..., 1] -= 116.779\n x[..., 2] -= 123.68\n\n return x\n\n\ndef adjust_transform_for_image(transform, image, relative_translation):\n \"\"\" Adjust a transformation for a specific image.\n\n The translation of the matrix will be scaled with the size of the image.\n The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.\n \"\"\"\n height, width, channels = image.shape\n\n result = transform\n\n # Scale the translation with the image size if specified.\n if relative_translation:\n result[0:2, 2] *= [width, height]\n\n # Move the origin of transformation.\n result = change_transform_origin(transform, (0.5 * width, 0.5 * height))\n\n return result\n\n\nclass TransformParameters:\n \"\"\" Struct holding parameters determining how to apply a transformation to an image.\n\n Args\n fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'\n interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'\n cval: Fill value to use with fill_mode='constant'\n data_format: Same as for keras.preprocessing.image.apply_transform\n relative_translation: If true (the default), interpret translation as a factor of the image size.\n If false, interpret it as absolute pixels.\n \"\"\"\n def __init__(\n self,\n fill_mode = 'nearest',\n interpolation = 'linear',\n cval = 0,\n data_format = None,\n relative_translation = True,\n ):\n self.fill_mode = fill_mode\n self.cval = cval\n self.interpolation = interpolation\n self.relative_translation = relative_translation\n\n if data_format is None:\n data_format = keras.backend.image_data_format()\n self.data_format = data_format\n\n if data_format == 'channels_first':\n self.channel_axis = 0\n elif data_format == 'channels_last':\n self.channel_axis = 2\n else:\n raise ValueError(\"invalid data_format, expected 'channels_first' or 'channels_last', got '{}'\".format(data_format))\n\n def cvBorderMode(self):\n if self.fill_mode == 'constant':\n return cv2.BORDER_CONSTANT\n if self.fill_mode == 'nearest':\n return cv2.BORDER_REPLICATE\n if self.fill_mode == 'reflect':\n return cv2.BORDER_REFLECT_101\n if self.fill_mode == 'wrap':\n return cv2.BORDER_WRAP\n\n def cvInterpolation(self):\n if self.interpolation == 'nearest':\n return cv2.INTER_NEAREST\n if self.interpolation == 'linear':\n return cv2.INTER_LINEAR\n if self.interpolation == 'cubic':\n return cv2.INTER_CUBIC\n if self.interpolation == 'area':\n return cv2.INTER_AREA\n if self.interpolation == 'lanczos4':\n return cv2.INTER_LANCZOS4\n\n\ndef apply_transform(matrix, image, params):\n \"\"\"\n Apply a transformation to an image.\n\n The origin of transformation is at the top left corner of the image.\n\n The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.\n Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.\n\n Args\n matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply.\n image: The image to transform.\n params: The transform parameters (see TransformParameters)\n \"\"\"\n if params.channel_axis != 2:\n image = np.moveaxis(image, params.channel_axis, 2)\n\n output = cv2.warpAffine(\n image,\n matrix[:2, :],\n dsize = (image.shape[1], image.shape[0]),\n flags = params.cvInterpolation(),\n borderMode = params.cvBorderMode(),\n borderValue = params.cval,\n )\n\n if params.channel_axis != 2:\n output = np.moveaxis(output, 2, params.channel_axis)\n return output\n\n\ndef resize_image(img, min_side=800, max_side=1333):\n \"\"\" Resize an image such that the size is constrained to min_side and max_side.\n\n Args\n min_side: The image's min side will be equal to min_side after resizing.\n max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.\n\n Returns\n A resized image.\n \"\"\"\n (rows, cols, _) = img.shape\n\n smallest_side = min(rows, cols)\n\n # rescale the image so the smallest side is min_side\n scale = min_side / smallest_side\n\n # check if the largest side is now greater than max_side, which can happen\n # when images have a large aspect ratio\n largest_side = max(rows, cols)\n if largest_side * scale > max_side:\n scale = max_side / largest_side\n\n # resize the image with the computed scale\n img = cv2.resize(img, None, fx=scale, fy=scale)\n\n return img, scale\n"
] | [
[
"numpy.moveaxis"
]
] |
Columbine21/THUIAR-ERC | [
"90e928e1ce777152e459dbc487acf04c32cbc645"
] | [
"TextCnn/train_iemocap.py"
] | [
"from tqdm import tqdm\nimport pandas as pd\nimport numpy as np, argparse, time, pickle, random, os, datetime\n\nimport torch\n\nimport torch.optim as optim\nfrom model import MaskedNLLLoss, CnnModel\nfrom dataloader import IEMOCAPDataLoader\n\n\nfrom sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report\n\ndef setup_seed(seed):\n \"\"\" Manually Fix the random seed to get deterministic results.\n \"\"\"\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.benchmark = False\n torch.backends.cudnn.deterministic = True\n\ndef train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, train=False):\n losses, preds, labels, masks, losses_sense = [], [], [], [], []\n \n max_sequence_len = []\n\n assert not train or optimizer!=None\n if train:\n model.train()\n else:\n model.eval()\n \n with tqdm(dataloader) as td:\n for data in td:\n\n if train:\n optimizer.zero_grad()\n \n textf, text_len, visuf, acouf, party_mask, mask, label = [d.cuda() for d in data[:-1]] if args.cuda else data[:-1]\n \n log_prob = model(textf, text_len, visuf, acouf, party_mask, mask)\n\n lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes\n labels_ = label.view(-1) # batch*seq_len\n loss = loss_function(lp_, labels_, mask)\n\n pred_ = torch.argmax(lp_,1) # batch*seq_len\n preds.append(pred_.data.cpu().numpy())\n labels.append(labels_.data.cpu().numpy())\n masks.append(mask.view(-1).cpu().numpy())\n losses.append(loss.item()*masks[-1].sum())\n\n if train:\n total_loss = loss\n total_loss.backward()\n \n optimizer.step()\n\n if preds!=[]:\n preds = np.concatenate(preds)\n labels = np.concatenate(labels)\n masks = np.concatenate(masks)\n else:\n return float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[]\n\n avg_loss = round(np.sum(losses)/np.sum(masks), 4)\n avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4)\n\n avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2)\n avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2)\n \n return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_workers', type=int, default=0,\n help='num workers of loading data')\n \n # dataloader settings \n parser.add_argument('--batch-size', type=int, default=16, metavar='BS', help='batch size')\n parser.add_argument('--vocabPath', type=str, default='./dataset/IEMOCAP_vocab.json')\n parser.add_argument('--data_path', type=str, default='./dataset/IEMOCAP_features.pkl')\n\n # model settings.\n parser.add_argument('--glove_embedding_path', type=str, default='./dataset/IEMOCAP_embedding.pkl',\n help='pretrain glove embedding path')\n parser.add_argument('--embedding_dim', type=int, default=300,\n help='embedding dims to use')\n parser.add_argument('--cnn_output_size', type=int, default=100)\n parser.add_argument('--cnn_filters', type=int, default=50)\n parser.add_argument('--cnn_kernel_sizes', type=list, default=[3,4,5])\n parser.add_argument('--cnn_dropout', type=float, default=0.5)\n parser.add_argument('--n_classes', type=int, default=6)\n # late fusion module.\n parser.add_argument('--lateFusionModule', type=str, default='concat')\n parser.add_argument('--input_features', type=tuple, default=(100, 100))\n parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7))\n parser.add_argument('--pre_fusion_dropout', type=float, default=0.4)\n parser.add_argument('--post_fusion_dropout', type=float, default=0.3)\n\n # train settings.\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate')\n parser.add_argument('--l2', type=float, default=0.0005, metavar='L2', help='L2 regularization weight')\n parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs')\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n\n args.cuda = torch.cuda.is_available()\n if args.cuda:\n print('Running on GPU')\n else:\n print('Running on CPU')\n\n for seed in [1, 11, 111, 1111, 11111]:\n setup_seed(seed)\n args.seed = seed\n \n print(args)\n\n model = CnnModel(args)\n print('IEMOCAP CNN MODULE ...')\n\n if args.cuda:\n model.cuda()\n \n loss_weights = torch.FloatTensor([1/0.086747, 1/0.144406, 1/0.227883, 1/0.160585, 1/0.127711, 1/0.252668])\n \n loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights)\n \n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)\n lf = open('logs/cnn_iemocap_logs.txt', 'a')\n \n dataloader = IEMOCAPDataLoader(args)\n\n valid_losses, valid_fscores = [], []\n test_fscores, test_accuracys, test_losses = [], [], []\n best_loss, best_label, best_pred, best_mask = None, None, None, None\n\n for e in range(args.epochs):\n start_time = time.time()\n train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, True)\n valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e)\n test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e)\n \n valid_losses.append(valid_loss)\n valid_fscores.append(valid_fscore)\n test_losses.append(test_loss)\n test_accuracys.append(test_acc)\n test_fscores.append(test_fscore)\n \n x = 'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2))\n \n print (x)\n lf.write(x + '\\n')\n\n valid_fscores = np.array(valid_fscores).transpose()\n test_fscores = np.array(test_fscores).transpose() # [1, epoches]\n test_accuracys = np.array(test_accuracys).transpose() # [epoches]\n\n f1_score1 = test_fscores[0][np.argmin(valid_losses)]\n acc_score1 = test_accuracys[np.argmin(valid_losses)]\n f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])]\n acc_score2 = test_accuracys[np.argmax(valid_fscores[0])]\n scores = [acc_score1, f1_score1, acc_score2, f1_score2]\n scores = [str(item) for item in scores]\n\n print ('Test Scores: Weighted F1')\n print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1))\n print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2))\n\n rf = open('results/cnn_iemocap_results.txt', 'a')\n rf.write('\\t'.join(scores) + '\\t' + str(args) + '\\n')\n rf.close()"
] | [
[
"numpy.concatenate",
"numpy.array",
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"numpy.argmin",
"numpy.random.seed",
"numpy.sum",
"torch.FloatTensor",
"sklearn.metrics.accuracy_score",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.argmax",
"sklearn.metrics.f1_score",
"torch.argmax"
]
] |
varenius/salsa | [
"2ddb4c34943d85aecebdef8745cc64c2daa4b8bb"
] | [
"Developer_notes/Beam_measurements/Beam_2014-10-03/single.py"
] | [
"import matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nimport numpy as np\n# The offset values in Az given to the telescope. Note that \n# This does not necesarily mean that the telescope was pointing in this \n# direction, since it might not move if the difference is too small.\nxdata = [\n-20,\n-19,\n-18,\n-17,\n-16,\n-15,\n-14,\n-13,\n-12,\n-11,\n-10,\n-9,\n-8,\n-7,\n-6,\n-5,\n-4,\n-3,\n-2,\n-1,\n0,\n1,\n2,\n3,\n4,\n5,\n6,\n7,\n8,\n9,\n10,\n11,\n12,\n13,\n14,\n15,\n16,\n17,\n18,\n19,\n20,\n]\n\n# First I was measuring following the motion of the sun, i.e.\n# increasing in Azimuth. This means that while measuring, the sun will move\n# towards a higher az value. Here I was starting with -5 deg offset and moving to 5 deg\nfdata = [\n426.4892,\n417.2378,\n413.5343,\n410.8377,\n409.7647,\n408.9738,\n415.0654,\n427.2815,\n473.5249,\n519.086,\n549.9259,\n557.2949,\n522.2085,\n468.8787,\n445.6153,\n522.589,\n718.6798,\n988.6602,\n1328.0975,\n1566.2021,\n1605.5733,\n1426.0575,\n1136.0245,\n861.9328,\n627.0873,\n485.5463,\n459.839,\n474.4248,\n498.8185,\n500.2032,\n477.1168,\n453.2831,\n431.5507,\n418.0383,\n415.3673,\n413.91,\n411.7411,\n410.3259,\n411.06,\n414.6461,\n417.4226,\n]\n\n# remove continuum\nfdata = np.array(fdata)-min(fdata)\n\n# Define model function to be used to fit to the data above:\ndef gauss(x, *p):\n A, mu, sigma= p\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))\n\n# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)\np0 = [1., 0., 0.1]\n\nfres, var_matrix = curve_fit(gauss, xdata, fdata, p0=p0)\n\n#Make nice grid for fitted data\nfitx = np.linspace(min(xdata), max(xdata), 500)\n\n# Get the fitted curve\nffit = gauss(fitx, *fres)\n\nfsigma = fres[2]\n\nfmu = fres[1]\n\nfbeam = 2.355*fsigma # FWHM\n\nplt.plot(xdata, fdata, '*')\nplt.plot(fitx, ffit)\nplt.title('Beam measurements of SALSA-Vale 2014-10-03 at 1410MHz. Fitted FWHM=' + str(round(fbeam,1)) + '$^\\circ$' + ', offset = ' + str(round(fmu,1)) + '$^\\circ$')\nplt.ylabel('Continuum intensity [arbitrary units]')\nplt.xlabel('Azimuth offset relative to the Sun [degrees]')\nplt.legend(['Measurements', 'Fitted Gaussian'])\n\n## SINC FITTING\n#def sincSquare_mod(x, A, mu, sigma):\n# x=np.array(x)\n# return A * (np.sin(np.pi*(x[:]-mu)*sigma) / (np.pi*(x[:]-mu)*sigma))**2\n#\n## p0 is the initial guess for the fitting coefficients (A, mu and sigma above)\n#p0 = [1., 0., 0.1]\n#\n#fres, var_matrix = curve_fit(sincSquare_mod, xdata, fdata, p0=p0)\n#\n##Make nice grid for fitted data\n#fitx = np.linspace(min(xdata), max(xdata), 500)\n#\n## Get the fitted curve\n#ffit = sincSquare_mod(fitx, *fres)\n#\n#fsigma = fres[2]\n#\n#fmu = fres[1]\n#\n#fbeam = 2.355*fsigma # FWHM\n#plt.figure()\n#plt.plot(xdata, fdata, '*')\n#plt.plot(fitx, ffit)\n#plt.title('Sinc fit')\nplt.show()\n"
] | [
[
"numpy.array",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
zjjlivein/continuous_integration | [
"c8825f32136fdd425389702c37ded08d6fd28a26"
] | [
"framework_api/test_static_optimizer.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"test static optimizer.\"\"\"\nimport numpy as np\nimport paddle as paddle\nimport paddle.fluid as fluid\nimport tools\nuse_cuda = False\n\n\n# SGDOptimizer\ndef test_SGDOptimizer():\n \"\"\"\n test SGDOptimizer\n default : lr = 0.001 、 regularization = NONE 、 name = NONE\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.992, 0.992, 0.992],\n [0.98800004, 0.98800004, 0.98800004]]\n expect_b = [-0.004, -0.004, -0.004]\n expect_res = [29.831999]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_SGDOptimizer_lr_float():\n \"\"\"\n test SGDOptimizer : learning_rate_float=0.000001\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.000001)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.999992, 0.999992, 0.999992],\n [0.99998796, 0.99998796, 0.99998796]]\n expect_b = [-4.e-06, -4.e-06, -4.e-06]\n expect_res = [29.999832]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_SGDOptimizer_lr_var():\n \"\"\"\n test SGDOptimizer : learning_rate_variable=0.000001\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n lr = fluid.layers.create_global_var(\n shape=[1],\n value=0.000001,\n dtype='float32',\n persistable=True,\n name=\"lr\")\n optimizer = fluid.optimizer.SGDOptimizer(learning_rate=lr)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.999992, 0.999992, 0.999992],\n [0.99998796, 0.99998796, 0.99998796]]\n expect_b = [-4.e-06, -4.e-06, -4.e-06]\n expect_res = [29.999832]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_SGDOptimizer_regularization():\n \"\"\"\n test SGDOptimizer : regularization:fluid.regularizer.L2Decay(0.01)\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.SGDOptimizer(\n learning_rate=0.001,\n regularization=fluid.regularizer.L2Decay(\n regularization_coeff=0.01))\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0@GRAD\", \"fc.b_0@GRAD\", out.name])\n expect_w = [[4.0099597, 4.0099597, 4.0099597],\n [6.0099397, 6.0099397, 6.0099397]]\n expect_b = [1.99998, 1.99998, 1.99998]\n expect_res = [29.8317]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_SGDOptimizer_minimize_parameter_list():\n \"\"\"\n test SGDOptimizer : parameter_list = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001)\n optimizer.minimize(out, parameter_list=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.992, 0.992, 0.992],\n [0.98800004, 0.98800004, 0.98800004]]\n expect_b = [0., 0., 0.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_SGDOptimizer_minimize_no_grad_set():\n \"\"\"\n test SGDOptimizer : no_grad_set = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.001)\n optimizer.minimize(out, no_grad_set=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[1., 1., 1.], [1., 1., 1.]]\n expect_b = [-0.004, -0.004, -0.004]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\n# AdagradOptimizer\ndef test_AdagradOptimizer():\n \"\"\"\n test AdagradOptimizer\n default : learning_rate=0.2, epsilon=1e-06, regularization=None, name=None, initial_accumulator_value=0.0\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.65857875, 0.65857875, 0.65857875],\n [0.65857863, 0.65857863, 0.65857863]]\n expect_b = [-0.34142125, -0.34142125, -0.34142125]\n expect_res = [22.800003]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_AdagradOptimizer_learning_rate():\n \"\"\"\n test AdagradOptimizer : learning_rate_float=0.01\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.01)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.99, 0.99, 0.99], [0.99, 0.99, 0.99]]\n expect_b = [-0.01, -0.01, -0.01]\n expect_res = [30]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_AdagradOptimizer_learning_rate_var():\n \"\"\"\n test AdagradOptimizer : learning_rate_variable=0.01\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n lr = fluid.layers.create_global_var(\n shape=[1],\n value=0.01,\n dtype='float32',\n persistable=True,\n name=\"lr\")\n optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=lr)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.99, 0.99, 0.99], [0.99, 0.99, 0.99]]\n expect_b = [-0.01, -0.01, -0.01]\n expect_res = [30]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_AdagradOptimizer_epsilon():\n \"\"\"\n test AdagradOptimizer : epsilon=0.0\n default : 1e-06\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdagradOptimizer(\n learning_rate=0.2, epsilon=0.0)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.8, 0.8, 0.8], [0.8, 0.8, 0.8]]\n expect_b = [-0.2, -0.2, -0.2]\n expect_res = [30]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_AdagradOptimizer_regularization():\n \"\"\"\n test AdagradOptimizer : regularization:fluid.regularizer.L2Decay(0.1)\n default : none\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdagradOptimizer(\n learning_rate=0.2,\n regularization=fluid.regularizer.L2Decay(\n regularization_coeff=0.1))\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0@GRAD\", \"fc.b_0@GRAD\", out.name])\n expect_w = [[4.08, 4.08, 4.08], [6.08, 6.08, 6.08]]\n expect_b = [1.98, 1.98, 1.98]\n expect_res = [22.800003]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_AdagradOptimizer_initial_accumulator_value():\n \"\"\"\n test AdagradOptimizer : initial_accumulator_value=1.0\n default : 0.0\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdagradOptimizer(\n learning_rate=0.2, initial_accumulator_value=1.0, epsilon=0.0)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.8059715, 0.8059715, 0.8059715],\n [0.8027212, 0.8027212, 0.8027212]]\n expect_b = [-0.17888544, -0.17888544, -0.17888544]\n expect_res = [30.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_AdagradOptimizer_minimize_parameter_list():\n \"\"\"\n test AdagradOptimizer : parameter_list = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2)\n optimizer.minimize(out, parameter_list=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.65857875, 0.65857875, 0.65857875],\n [0.65857863, 0.65857863, 0.65857863]]\n expect_b = [0., 0., 0.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_AdagradOptimizer_minimize_no_grad_set():\n \"\"\"\n test AdagradOptimizer : no_grad_set = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.2)\n optimizer.minimize(out, no_grad_set=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[1., 1., 1.], [1., 1., 1.]]\n expect_b = [-0.34142125, -0.34142125, -0.34142125]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\n# MomentumOptimizer\ndef test_MomentumOptimizer():\n \"\"\"\n test MomentumOptimizer\n default : learning_rate=0.001, momentum=0.9, use_nesterov = false regularization = NONE 、 name = NONE\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.9884, 0.9884, 0.9884],\n [0.98260003, 0.98260003, 0.98260003]]\n expect_b = [-0.0058, -0.0058, -0.0058]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_MomentumOptimizer_lr_float():\n \"\"\"\n test MomentumOptimizer : learning_rate_float=0.01, momentum=0.9\n :return :\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.01, momentum=0.9)\n moment_optimizer.minimize(out)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.884, 0.884, 0.884], [0.826, 0.826, 0.826]]\n expect_b = [-0.058, -0.058, -0.058]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_MomentumOptimizer_lr_var():\n \"\"\"\n test MomentumOptimizer : learning_rate_variable=0.01, momentum=0.9\n :return :\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n lr = fluid.layers.create_global_var(\n shape=[1],\n value=0.01,\n dtype='float32',\n persistable=True,\n name=\"lr\")\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=lr, momentum=0.9)\n moment_optimizer.minimize(out)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.884, 0.884, 0.884], [0.826, 0.826, 0.826]]\n expect_b = [-0.058, -0.058, -0.058]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_MomentumOptimizer_momentum():\n \"\"\"\n test MomentumOptimizer : momentum=0.0\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.0)\n moment_optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.992, 0.992, 0.992],\n [0.98800004, 0.98800004, 0.98800004]]\n expect_b = [-0.004, -0.004, -0.004]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_MomentumOptimizer_use_nesterov():\n \"\"\"\n test MomentumOptimizer : use_nesterov = True\n :return: fc.w_0, fc.b_0\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9, use_nesterov=True)\n moment_optimizer.minimize(out)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.98156, 0.98156, 0.98156],\n [0.97234, 0.97234, 0.97234]]\n expect_b = [-0.00922, -0.00922, -0.00922]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_MomentumOptimizer_use_nesterov_momentum():\n \"\"\"\n test MomentumOptimizer : use_nesterov = True, momentum=0.0\n :return: fc.w_0, fc.b_0\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.0, use_nesterov=True)\n moment_optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.992, 0.992, 0.992],\n [0.98800004, 0.98800004, 0.98800004]]\n expect_b = [-0.004, -0.004, -0.004]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_MomentumOptimizer_regularization():\n \"\"\"\n test MomentumOptimizer : regularization:fluid.regularizer.L2Decay(0.01)\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001,\n momentum=0.9,\n regularization=fluid.regularizer.L2Decay(\n regularization_coeff=0.01))\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0@GRAD\", \"fc.b_0@GRAD\", out.name])\n expect_w = [[4.009959, 4.009959, 4.009959],\n [6.009939, 6.009939, 6.009939]]\n expect_b = [1.99998, 1.99998, 1.99998]\n expect_res = [29.8317]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_MomentumOptimizer_minimize_parameter_list():\n \"\"\"\n test MomentumOptimizer : parameter_list = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n optimizer.minimize(out, parameter_list=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.9884, 0.9884, 0.9884],\n [0.98260003, 0.98260003, 0.98260003]]\n expect_b = [0., 0., 0.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_MomentumOptimizer_minimize_no_grad_set():\n \"\"\"\n test MomentumOptimizer : no_grad_set = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n optimizer.minimize(out, no_grad_set=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[1., 1., 1.], [1., 1., 1.]]\n expect_b = [-0.0058, -0.0058, -0.0058]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\n# DecayedAdagradOptimizer\ndef test_DecayedAdagradOptimizer():\n \"\"\"\n test DecayedAdagradOptimizer\n default : lr = 0.2, decay=0.95, epsilon=1e-06, regularization=None, name=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=0.2)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[-0.53493816, -0.53493816, -0.53493816],\n [-0.5349387, -0.5349387, -0.5349387]]\n expect_b = [-1.5349367, -1.5349367, -1.5349367]\n expect_res = [-2.1993403]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_DecayedAdagradOptimizer_learning_rate_float():\n \"\"\"\n test DecayedAdagradOptimizer : learning_rate_float=0.01\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=0.01)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.9552787, 0.9552787, 0.9552787],\n [0.9552787, 0.9552787, 0.9552787]]\n expect_b = [-0.04472125, -0.04472125, -0.04472125]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_DecayedAdagradOptimizer_learning_rate_var():\n \"\"\"\n test DecayedAdagradOptimizer : learning_rate_variable=0.01\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n lr = fluid.layers.create_global_var(\n shape=[1],\n value=0.01,\n dtype='float32',\n persistable=True,\n name=\"lr\")\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=lr)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.9552787, 0.9552787, 0.9552787],\n [0.9552787, 0.9552787, 0.9552787]]\n expect_b = [-0.04472125, -0.04472125, -0.04472125]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_DecayedAdagradOptimizer_decay():\n \"\"\"\n test DecayedAdagradOptimizer : decay没有生效 http://newicafe.baidu.com/issue/DLTP-3183/show?cid=5\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=0.2, decay=0.6, epsilon=0.0)\n optimizer.minimize(out)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n for i in range(5):\n res = exe.run(feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0_moment_0\", \"fc.b_0_moment_0\",\n \"fc.w_0\", \"fc.b_0\"\n ])\n # print(res[0])\n\n\ndef test_DecayedAdagradOptimizer_epsilon():\n \"\"\"\n test DecayedAdagradOptimizer : epsilon=0.0\n default : 1e-06\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=0.2, epsilon=0.0)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[-0.5349397, -0.5349397, -0.5349397],\n [-0.5349397, -0.5349397, -0.5349397]]\n expect_b = [-1.5349398, -1.5349398, -1.5349398]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_DecayedAdagradOptimizer_regularization():\n \"\"\"\n test DecayedAdagradOptimizer : regularization:fluid.regularizer.L2Decay(0.1)\n default : none\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n # out = fluid.layers.fc(inp, size=3)\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=0.2,\n regularization=fluid.regularizer.L2Decay(\n regularization_coeff=0.1))\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0@GRAD\", \"fc.b_0@GRAD\", out.name])\n expect_w = [[4.010557, 4.010557, 4.010557],\n [6.010557, 6.010557, 6.010557]]\n expect_b = [1.9105575, 1.9105575, 1.9105575]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_DecayedAdagradOptimizer_minimize_parameter_list():\n \"\"\"\n test DecayedAdagradOptimizer : parameter_list = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=0.2)\n optimizer.minimize(out, parameter_list=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.10557389, 0.10557389, 0.10557389],\n [0.10557353, 0.10557353, 0.10557353]]\n expect_b = [0., 0., 0.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_DecayedAdagradOptimizer_minimize_no_grad_set():\n \"\"\"\n test DecayedAdagradOptimizer : no_grad_set = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.DecayedAdagradOptimizer(\n learning_rate=0.2)\n optimizer.minimize(out, no_grad_set=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[1., 1., 1.], [1., 1., 1.]]\n expect_b = [-0.8944251, -0.8944251, -0.8944251]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\n# RMSPropOptimizer\ndef test_RMSPropOptimizer():\n \"\"\"\n test RMSPropOptimizer :\n default : learning_rate=0.1, rho=0.95, epsilon=1e-06, momentum=0.0, centered=False, regularization=None, name=None\n :return:\n rmsprop\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=0.1)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.5527867, 0.5527867, 0.5527867],\n [0.5527866, 0.5527866, 0.5527866]]\n expect_b = [-0.44721246, -0.44721246, -0.44721246]\n expect_res = [30.0]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_RMSPropOptimizer_learning_rate_float():\n \"\"\"\n test RMSPropOptimizer : learning_rate=0.01\n default : 0.1\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=0.01)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.9552787, 0.9552787, 0.9552787],\n [0.95527864, 0.95527864, 0.95527864]]\n expect_b = [-0.04472124, -0.04472124, -0.04472124]\n expect_res = [30.0]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_RMSPropOptimizer_rho():\n \"\"\"\n test RMSPropOptimizer : rho = 0.1\n default : 0.95\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(\n learning_rate=0.1, rho=0.1, epsilon=0.0)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.89459074, 0.89459074, 0.89459074],\n [0.89459074, 0.89459074, 0.89459074]]\n expect_b = [-0.10540926, -0.10540926, -0.10540926]\n expect_res = [30.0]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_RMSPropOptimizer_epsilon():\n \"\"\"\n test RMSPropOptimizer : epsilon = 1.0\n default : 1e-6\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(\n learning_rate=0.1, epsilon=1.0)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.70185757, 0.70185757, 0.70185757],\n [0.64143145, 0.64143145, 0.64143145]]\n expect_b = [-0.18257418, -0.18257418, -0.18257418]\n expect_res = [30.0]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_RMSPropOptimizer_momentum():\n \"\"\"\n test RMSPropOptimizer : momentum = 0.1\n default : 0.0\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(\n learning_rate=0.1, momentum=1.0)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[-0.21468276, -0.21468276, -0.21468276],\n [-0.21468306, -0.21468306, -0.21468306]]\n expect_b = [-1.2146808, -1.2146808, -1.2146808]\n expect_res = [13.900324]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_RMSPropOptimizer_centered():\n \"\"\"\n test RMSPropOptimizer : centered = True\n default : False\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(\n learning_rate=0.1, centered=True)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.54116887, 0.54116887, 0.54116887],\n [0.5411687, 0.5411687, 0.5411687]]\n expect_b = [-0.4588302, -0.4588302, -0.4588302]\n expect_res = [30.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_RMSPropOptimizer_regularization():\n \"\"\"\n test RMSPropOptimizer : regularization_coeff = 0.1\n default : none\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(\n learning_rate=0.1,\n regularization=fluid.regularizer.L2Decay(\n regularization_coeff=0.1))\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0@GRAD\", \"fc.b_0@GRAD\", out.name])\n expect_w = [[4.1, 4.1, 4.1], [6.1, 6.1, 6.1]]\n expect_b = [2., 2., 2.]\n expect_res = [30.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_RMSPropOptimizer_minimize_parameter_list():\n \"\"\"\n test RMSPropOptimizer : parameter_list = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=0.1)\n optimizer.minimize(out, parameter_list=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.5527867, 0.5527867, 0.5527867],\n [0.5527866, 0.5527866, 0.5527866]]\n expect_b = [0., 0., 0.]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\ndef test_RMSPropOptimizer_minimize_no_grad_set():\n \"\"\"\n test RMSPropOptimizer : no_grad_set = [\"fc.w_0\"]\n default : startup_program=None, parameter_list=None, no_grad_set=None, grad_clip=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.RMSPropOptimizer(learning_rate=0.1)\n optimizer.minimize(out, no_grad_set=[\"fc.w_0\"])\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[1., 1., 1.], [1., 1., 1.]]\n expect_b = [-0.44721246, -0.44721246, -0.44721246]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n\n\n# ModelAverage\ndef test_ModelAverage():\n \"\"\"\n test ModelAverage : average_window_rate = 1.0 , min_average_window=2, max_average_window=3\n regularization=None, name=None\n :return: fc.w_0, fc.b_0\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(out)\n\n model_average = fluid.optimizer.ModelAverage(\n average_window_rate=1.0,\n min_average_window=2,\n max_average_window=3)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(3):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n expect_w_restore = res[0]\n expect_b_restore = res[1]\n\n inference_program = fluid.default_main_program().clone(\n for_test=True)\n with model_average.apply(exe, need_restore=True):\n res = exe.run(inference_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n expect_w_apply = [[0.98732, 0.98732, 0.98732],\n [0.98098, 0.98098, 0.98098]]\n expect_b_apply = [-0.00634, -0.00634, -0.00634]\n tools.compare(res[0], expect_w_apply)\n tools.compare(res[1], expect_b_apply)\n\n model_average.restore(exe)\n res = exe.run(inference_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n tools.compare(res[0], expect_w_restore)\n tools.compare(res[1], expect_b_restore)\n\n\ndef test_ModelAverage_average_window_rate():\n \"\"\"\n test ModelAverage : average_window_rate=0.1\n min_average_window=3, max_average_window=3,\n regularization=None, name=None\n :return: fc.w_0, fc.b_0\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(out)\n\n model_average = fluid.optimizer.ModelAverage(\n average_window_rate=0.1,\n min_average_window=3,\n max_average_window=3)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(3):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n expect_w_restore = res[0]\n expect_b_restore = res[1]\n #\n inference_program = fluid.default_main_program().clone(\n for_test=True)\n with model_average.apply(exe, need_restore=True):\n res = exe.run(inference_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n expect_w_apply = [[0.98732, 0.98732, 0.98732],\n [0.98098, 0.98098, 0.98098]]\n expect_b_apply = [-0.00634, -0.00634, -0.00634]\n tools.compare(res[0], expect_w_apply)\n tools.compare(res[1], expect_b_apply)\n\n model_average.restore(exe)\n res = exe.run(inference_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n tools.compare(res[0], expect_w_restore)\n tools.compare(res[1], expect_b_restore)\n\n\ndef test_ModelAverage_need_restore():\n \"\"\"\n test ModelAverage : need_restore写死成True了 http://newicafe.baidu.com/issue/DLTP-3279/show?cid=5\n :return: fc.w_0, fc.b_0\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(out)\n\n model_average = fluid.optimizer.ModelAverage(\n average_window_rate=1.0,\n min_average_window=2,\n max_average_window=3)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n for i in range(3):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n expect_w_restore = res[0]\n expect_b_restore = res[1]\n\n inference_program = fluid.default_main_program().clone(\n for_test=True)\n with model_average.apply(exe, need_restore=False):\n res = exe.run(inference_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n expect_w_apply = [[0.98732, 0.98732, 0.98732],\n [0.98098, 0.98098, 0.98098]]\n expect_b_apply = [-0.00634, -0.00634, -0.00634]\n tools.compare(res[0], expect_w_apply)\n tools.compare(res[1], expect_b_apply)\n\n model_average.restore(exe)\n res = exe.run(inference_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n tools.compare(res[0], expect_w_restore)\n tools.compare(res[1], expect_b_restore)\n\n\n# ExponentialMovingAverage\ndef test_ExponentialMovingAverage():\n \"\"\"\n test ExponentialMovingAverage : 偏置校正计算与官网不一致 http://newicafe.baidu.com/issue/DLTP-3276/show?cid=5\n :return: fc.w_0, fc.b_0\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n test_program = fluid.default_main_program().clone(for_test=True)\n\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(out)\n\n EMA = fluid.optimizer.ExponentialMovingAverage(decay=0.999)\n EMA.update()\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n for i in range(3):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n # print(res)\n # expect_w_restore = [[0.97756, 0.97756, 0.97756], [0.96634, 0.96634, 0.96634]]\n # expect_b_restore = [-0.01122, -0.01122, -0.01122]\n with EMA.apply(exe, need_restore=True):\n res = exe.run(\n test_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n # \"learning_rate_0\",\n # \"scheduled_ema_decay_rate\",\n # \"fc.w_0.ema_tmp_0\",\n # \"fc.b_0.ema_tmp_0\",\n \"fc.w_0_ema_0\",\n \"fc.b_0_ema_0\",\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n # print(res)\n # print()\n # expect_w_apply =\n # expect_b_apply =\n # tools.compare(res[0], expect_w_apply)\n # tools.compare(res[1], expect_b_apply)\n\n # EMA.restore(exe)\n # res = exe.run(test_program, feed={\"inp\": np_inp}, fetch_list=[\n # \"fc.w_0\",\n # \"fc.b_0\",\n # ])\n # tools.compare(res[0], expect_w_restore)\n # tools.compare(res[1], expect_b_restore)\n\n\ndef test_ExponentialMovingAverage_thres_steps():\n \"\"\"\n test ExponentialMovingAverage : 偏置校正计算与官网不一致 http://newicafe.baidu.com/issue/DLTP-3276/show?cid=5\n (偏置校正计算不一致、无法正确更新参数、无法验证thres_steps是否生效)\n thres_steps = 1\n :return: fc.w_0, fc.b_0\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n test_program = fluid.default_main_program().clone(for_test=True)\n\n moment_optimizer = fluid.optimizer.MomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(out)\n ts = fluid.layers.create_global_var(\n shape=[1],\n value=1.0,\n dtype='float32',\n persistable=True,\n name=\"ts\")\n EMA = fluid.optimizer.ExponentialMovingAverage(\n decay=0.999, thres_steps=ts)\n EMA.update()\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n # print(res)\n # expect_w_restore = res[0]\n # expect_b_restore = res[1]\n with EMA.apply(exe, need_restore=True):\n res = exe.run(\n test_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n # \"learning_rate_0\",\n # \"scheduled_ema_decay_rate\",\n # \"fc.w_0.ema_tmp_0\",\n # \"fc.b_0.ema_tmp_0\",\n \"fc.w_0_ema_0\",\n \"fc.b_0_ema_0\",\n \"fc.w_0\",\n \"fc.b_0\",\n ])\n # print()\n # print(res)\n # expect_w_apply =\n # expect_b_apply =\n # tools.compare(res[0], expect_w_apply)\n # tools.compare(res[1], expect_b_apply)\n\n # EMA.restore(exe)\n # res = exe.run(test_program, feed={\"inp\": np_inp}, fetch_list=[\n # \"fc.w_0\",\n # \"fc.b_0\",\n # ])\n # tools.compare(res[0], expect_w_restore)\n # tools.compare(res[1], expect_b_restore)\n\n\n # LarsMomentumOptimizer\ndef test_LarsMomentumOptimizer():\n \"\"\"\n test LarsMomentumOptimizer\n default : learning_rate, momentum, lars_coeff=0.001, lars_weight_decay=0.0005, regularization=None, name=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LarsMomentumOptimizer(\n learning_rate=0.001, momentum=0.9)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.99999774, 0.99999774, 0.99999774],\n [0.99999654, 0.99999654, 0.99999654]]\n expect_b = [-0.0038, -0.0038, -0.0038]\n expect_res = [29.98797]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_LarsMomentumOptimizer_learning_rate_float():\n \"\"\"\n test LarsMomentumOptimizer learning_rate=0.1\n default :\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LarsMomentumOptimizer(\n learning_rate=0.1, momentum=0.9)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.99977252, 0.99977252, 0.99977252],\n [0.99965878, 0.99965878, 0.99965878]]\n expect_b = [-0.38002, -0.38002, -0.38002]\n expect_res = [28.79694]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_LarsMomentumOptimizer_moment():\n \"\"\"\n test LarsMomentumOptimizer : momentum=0.1\n default :\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LarsMomentumOptimizer(\n learning_rate=0.1, momentum=0.1)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0\",\n \"fc.b_0\",\n out.name,\n ])\n expect_w = [[0.99983525, 0.99983525, 0.99983525],\n [0.99975294, 0.99975294, 0.99975294]]\n expect_b = [-0.22002, -0.22002, -0.22002]\n expect_res = [28.79694]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_LarsMomentumOptimizer_lars_coeff():\n \"\"\"\n test LarsMomentumOptimizer : lars_coeff=0.01\n default :\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LarsMomentumOptimizer(\n learning_rate=0.1, momentum=0.9, lars_coeff=0.1)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n # print(res)\n # print()\n expect_w = [[0.9773268, 0.9773268, 0.9773268],\n [0.9659917, 0.9659917, 0.9659917]]\n expect_b = [-0.3819998, -0.3819998, -0.3819998]\n expect_res = [28.49406]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_LarsMomentumOptimizer_lars_weight_decay():\n \"\"\"\n test LarsMomentumOptimizer : lars_weight_decay=0.1\n default :\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LarsMomentumOptimizer(\n learning_rate=0.1, momentum=0.9, lars_weight_decay=0.1)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(2):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\"fc.w_0\", \"fc.b_0\", out.name])\n expect_w = [[0.9997713, 0.9997713, 0.9997713],\n [0.9996598, 0.9996598, 0.9996598]]\n expect_b = [-0.3800196, -0.3800196, -0.3800196]\n expect_res = [28.796944]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\n# LookaheadOptimizer\ndef test_LookaheadOptimizer():\n \"\"\"\n test LookaheadOptimizer :\n default : inner_optimizer, alpha=0.5, k=5\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n sgd = fluid.optimizer.SGD(learning_rate=0.01)\n optimizer = fluid.optimizer.LookaheadOptimizer(sgd, alpha=0.5, k=5)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(5):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0@SLOW\", \"fc.b_0@SLOW\", \"fc.w_0\",\n \"fc.b_0\", out.name\n ])\n expect_w_slow = [[0.9, 0.9, 0.9], [0.85, 0.85, 0.85]]\n expect_b_slow = [-0.05, -0.05, -0.05]\n expect_w_fast = [[0.9, 0.9, 0.9], [0.85, 0.85, 0.85]]\n expect_b_fats = [-0.05, -0.05, -0.05]\n expect_res = [23.279999]\n tools.compare(res[0], expect_w_slow)\n tools.compare(res[1], expect_b_slow)\n tools.compare(res[2], expect_w_fast)\n tools.compare(res[3], expect_b_fats)\n tools.compare(res[4], expect_res)\n\n\ndef test_LookaheadOptimizer_adagrad():\n \"\"\"\n test LookaheadOptimizer : inner_optimizer = AdagradOptimizer\n default : inner_optimizer, alpha=0.5, k=5\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n adagrad = fluid.optimizer.AdagradOptimizer(learning_rate=0.2)\n optimizer = fluid.optimizer.LookaheadOptimizer(\n adagrad, alpha=0.5, k=5)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(5):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0@SLOW\", \"fc.b_0@SLOW\", \"fc.w_0\",\n \"fc.b_0\", out.name\n ])\n expect_w_slow = [[0.67683303, 0.67683303, 0.67683303],\n [0.67683303, 0.67683303, 0.67683303]]\n expect_b_slow = [-0.32316697, -0.32316697, -0.32316697]\n expect_w_fast = [[0.67683303, 0.67683303, 0.67683303],\n [0.67683303, 0.67683303, 0.67683303]]\n expect_b_fats = [-0.32316697, -0.32316697, -0.32316697]\n expect_res = [9.951912]\n tools.compare(res[0], expect_w_slow)\n tools.compare(res[1], expect_b_slow)\n tools.compare(res[2], expect_w_fast)\n tools.compare(res[3], expect_b_fats)\n tools.compare(res[4], expect_res)\n\n\ndef test_LookaheadOptimizer_alpha():\n \"\"\"\n test LookaheadOptimizer : alpha =0.1\n default : inner_optimizer, alpha=0.5, k=5\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n sgd = fluid.optimizer.SGD(learning_rate=0.01)\n optimizer = fluid.optimizer.LookaheadOptimizer(sgd, alpha=1.0, k=5)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(5):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0@SLOW\", \"fc.b_0@SLOW\", \"fc.w_0\",\n \"fc.b_0\", out.name\n ])\n expect_w_slow = [[0.8, 0.8, 0.8], [0.7, 0.7, 0.7]]\n expect_b_slow = [-0.1, -0.1, -0.1]\n expect_w_fast = [[0.8, 0.8, 0.8], [0.7, 0.7, 0.7]]\n expect_b_fats = [-0.1, -0.1, -0.1]\n expect_res = [23.279999]\n tools.compare(res[0], expect_w_slow)\n tools.compare(res[1], expect_b_slow)\n tools.compare(res[2], expect_w_fast)\n tools.compare(res[3], expect_b_fats)\n tools.compare(res[4], expect_res)\n\n\ndef test_LookaheadOptimizer_k():\n \"\"\"\n test LookaheadOptimizer : k = 10\n default : inner_optimizer, alpha=0.5, k=5\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n sgd = fluid.optimizer.SGD(learning_rate=0.01)\n optimizer = fluid.optimizer.LookaheadOptimizer(sgd, alpha=0.5, k=10)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(10):\n res = exe.run(train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"fc.w_0@SLOW\", \"fc.b_0@SLOW\", \"fc.w_0\",\n \"fc.b_0\", out.name\n ])\n expect_w_slow = [[0.8, 0.8, 0.8], [0.7, 0.7, 0.7]]\n expect_b_slow = [-0.1, -0.1, -0.1]\n expect_w_fast = [[0.8, 0.8, 0.8], [0.7, 0.7, 0.7]]\n expect_b_fats = [-0.1, -0.1, -0.1]\n expect_res = [14.879996]\n tools.compare(res[0], expect_w_slow)\n tools.compare(res[1], expect_b_slow)\n tools.compare(res[2], expect_w_fast)\n tools.compare(res[3], expect_b_fats)\n tools.compare(res[4], expect_res)\n\n\n# AdamOptimizer\ndef test_AdamOptimizer():\n \"\"\"\n test AdamOptimizer\n default : http://newicafe.baidu.com/issue/DLTP-3196/show 返回不一致\n lr = 0.1 、beta1 = 0.9、 beta2 = 0.999、 epsilon = 1e-08 、 regularization = NONE 、 name = NONE 、lazy_mode = False\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.1)\n optimizer.minimize(out)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"learning_rate_0\", # 0\n \"fc.w_0@GRAD\", # 1\n \"fc.b_0@GRAD\", # 2\n \"fc.w_0_beta1_pow_acc_0\", # 3\n \"fc.w_0_beta2_pow_acc_0\", # 4\n \"fc.b_0_beta1_pow_acc_0\", # 5\n \"fc.b_0_beta2_pow_acc_0\", # 6\n \"fc.w_0_moment1_0\", # 7\n \"fc.w_0_moment2_0\", # 8\n \"fc.b_0_moment1_0\", # 9\n \"fc.b_0_moment2_0\", # 10\n \"fc.w_0\", # 11\n \"fc.b_0\", # 12\n out.name\n ])\n # expect_w = [[0.92558715, 0.92558715, 0.92558715], [0.92558696, 0.92558696, 0.92558696]]\n # expect_b = [-0.07441226, -0.07441226, -0.07441226]\n # expect_res = [30.0]\n # tools.compare(res[0], expect_w)\n # tools.compare(res[1], expect_b)\n # tools.compare(res[4], expect_res)\n\n\n # AdamaxOptimizer\ndef test_AdamaxOptimizer():\n \"\"\"\n test AdamaxOptimizer : http://newicafe.baidu.com/issue/DLTP-3198/show?cid=5\n default : lr = 0.1 、beta1 = 0.9、 beta2 = 0.999、 epsilon = 1e-08 、 regularization = NONE 、 name = NONE 、\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdamaxOptimizer(\n learning_rate=0.001, beta1=0.1, beta2=0.9, epsilon=0.0)\n optimizer.minimize(out)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"learning_rate_0\", # 0\n \"fc.w_0@GRAD\", # 1\n \"fc.b_0@GRAD\", # 2\n \"fc.w_0_beta1_pow_acc_0\", # 3\n \"fc.b_0_beta1_pow_acc_0\", # 4\n \"fc.w_0_moment_0\", # 5\n \"fc.b_0_moment_0\", # 6\n \"fc.w_0_inf_norm_0\", # 7\n \"fc.b_0_inf_norm_0\", # 8\n \"fc.w_0\", # 9\n \"fc.b_0\", # 10\n out.name\n ])\n # print(res[9], res[10])\n\n\n # AdadeltaOptimizer\ndef test_AdadeltaOptimizer():\n \"\"\"\n test AdadeltaOptimizer : http://newicafe.baidu.com/issue/DLTP-3199/show?cid=5\n default : lr = 0.01 、 regularization = NONE 、 name = NONE 、\n lazy_mode = False\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.AdadeltaOptimizer(learning_rate=0.01, )\n optimizer.minimize(out)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for i in range(1):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n \"learning_rate_0\", # 0\n \"fc.w_0@GRAD\", # 1\n \"fc.b_0@GRAD\", # 2\n \"fc.w_0__avg_squared_update_0\", # 3\n \"fc.b_0__avg_squared_update_0\", # 4\n \"fc.w_0__avg_squared_grad_0\", # 5\n \"fc.b_0__avg_squared_grad_0\", # 6\n \"fc.w_0\", # 7\n \"fc.b_0\", # 8\n out.name # 9\n ])\n # print(res[7], res[8])\n\n\n # LambOptimizer\ndef test_LambOptimizer():\n \"\"\"\n test LambOptimizer :\n default :learning_rate=0.001, lamb_weight_decay=0.01, beta1=0.9, beta2=0.999, epsilon=1e-06, regularization=None,\n exclude_from_weight_decay_fn=None, name=None\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LambOptimizer(learning_rate=0.001)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n for i in range(2):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n # \"fc.w_0_moment1_0\",\n # \"fc.b_0_moment1_0\",\n #\n # \"fc.w_0_moment2_0\",\n # \"fc.b_0_moment2_0\",\n \"fc.w_0\",\n \"fc.b_0\",\n out.name\n ])\n expect_w = [[0.998001, 0.998001, 0.998001],\n [0.998001, 0.998001, 0.998001]]\n expect_b = [-0.00316541, -0.00316541, -0.00316541]\n expect_res = [29.951027]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n\n\ndef test_LambOptimizer_learning_rate_float():\n \"\"\"\n test LambOptimizer :learning_rate = 0.1\n default\n :return:\n \"\"\"\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.unique_name.guard():\n with fluid.program_guard(train_program, startup_program):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(name=\"fc\",\n input=inp,\n size=3,\n param_attr=fluid.initializer.Constant(\n value=1.0, force_cpu=True))\n out = fluid.layers.reduce_sum(out)\n\n optimizer = fluid.optimizer.LambOptimizer(learning_rate=0.1)\n optimizer.minimize(out)\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n for i in range(2):\n res = exe.run(\n train_program,\n feed={\"inp\": np_inp},\n fetch_list=[\n # \"fc.w_0_moment1_0\",\n # \"fc.b_0_moment1_0\",\n #\n # \"fc.w_0_moment2_0\",\n # \"fc.b_0_moment2_0\",\n \"fc.w_0\",\n \"fc.b_0\",\n out.name\n ])\n expect_w = [[0.81000024, 0.81000024, 0.81000024],\n [0.80999977, 0.80999977, 0.80999977]]\n expect_b = [-0.34784739, -0.34784739, -0.34784739]\n expect_res = [25.10265]\n tools.compare(res[0], expect_w)\n tools.compare(res[1], expect_b)\n tools.compare(res[2], expect_res)\n"
] | [
[
"numpy.array"
]
] |
Benjamin-Fouquet/dynMRI | [
"c80b48cee834e2042e95241701ce064c22e3d3d1"
] | [
"AnkleSegmentation/TestReconstruction.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 16 10:35:20 2020\n\n@author: p20coupe\n\"\"\"\n\nimport argparse\nimport sys\n\nimport joblib\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport os\nimport math\nimport statistics\n\nimport torch\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader, Dataset\nimport torch.backends.cudnn as cudnn\nimport torch.backends.cudnn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torchvision.transforms import ToTensor, Normalize, Compose\nimport torch.optim as optim\n\n\nfrom ResNet_Reconstruction import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"TestData\", help=\"PATH to testing data\")\nparser.add_argument(\"NetworkPATH\",help=\"PATH to the network to use\")\nparser.add_argument(\"ResultsDirectory\",help=\"PATH to the results storage directory\")\nargs = parser.parse_args()\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\nDatadirectory = sys.argv[1]\nResultsDirectory = sys.argv[3]\nNetworkPath = sys.argv[2]\n\ndef psnr(lP,lT):\n mse = np.mean( (lP - lT) ** 2 )\n if mse == 0:\n return 100\n PIXEL_MAX = 3.0\n return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))\n\ndef imshow(img,corr):\n\tn_rows = 2\n\tn_cols = int(len(img)/2)\n\tplt.figure(figsize=(n_cols, n_rows))\n\tfor i in range(n_rows):\n\t\tfor j in range(n_cols):\n\t\t\tsub = plt.subplot(n_rows, n_cols, i*n_cols+1+j)\n\t\t\tsub.imshow(img[j+n_rows*i,0,:,:].numpy(),\n\t\t \tcmap=plt.cm.gray,\n\t\t \tinterpolation=\"nearest\",\n\t\t \tvmin=-3,vmax=2)\n\t\t\tsub.set_title('%.3f' %(corr[j+n_rows*i]))\n\t\t\tsub.axis('off')\n\t\n\ndef imshow_difMap(img,label):\n\tn_rows = 2\n\tn_cols = int(len(img)/2)\n\tplt.figure(figsize=(n_cols, n_rows))\n\tfor i in range(n_rows):\n\t\tfor j in range(n_cols):\n\t\t\tsub = plt.subplot(n_rows, n_cols, i*n_cols+1+j)\n\t\t\tsub.imshow(img[j+n_rows*i,0,:,:].numpy()-label[j+n_rows*i,0,:,:].numpy(),\n\t\t \tcmap=plt.cm.gray,\n\t\t \tinterpolation=\"nearest\",\n\t\t \tvmin=-3,vmax=2)\n\t\t\tsub.axis('off')\n \ndef ValidRed2D(testloader,path):\n psnr_value=[]\n \n net = ResNet(BasicBlock, [3,4,6]).to(device)\n net.load_state_dict(torch.load(path))\n \n for i, data in enumerate(testloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels, correlation = data\n inputs= inputs.to(device)\n labels = labels.to(device)\n correlation = correlation.to(device)\n\t\n outputs = net(inputs)\n\n\t\n psnr_val = 0\n\t\n pr= labels[0].cpu().detach().numpy()\n gt = outputs[0].cpu().detach().numpy()\n psnr_val = psnr_val + psnr(gt[0,:,:],pr[0,:,:])\n\t\n\t\n if i == 800:\n imshow(inputs.cpu().detach(),correlation.cpu().detach().numpy())\n plt.savefig(os.path.join(ResultsDirectory,'Images','inputs_myloss_test.png'),dpi=150)\n imshow(labels.cpu().detach(),correlation.cpu().detach().numpy())\n plt.savefig(os.path.join(ResultsDirectory,'Images','labels_myloss_test.png'),dpi=150)\n imshow(outputs.cpu().detach(),correlation.cpu().detach().numpy())\n plt.savefig(os.path.join(ResultsDirectory,'Images','outputs_myloss_test.png'),dpi=150)\n imshow_difMap(outputs.cpu().detach(),labels.cpu().detach())\n plt.savefig(os.path.join(ResultsDirectory,'Images','DifMap_myloss_test.png'),dpi=150)\n\t\n \n psnr_value.append(psnr_val)\n \n np.savetxt(os.path.join(ResultsDirectory,'PSNR_test.txt'),psnr_value)\n print('Standard Deviation:' + str(statistics.stdev(psnr_value)))\n print('Mean:' + str(statistics.mean(psnr_value)))\n \n\nbones=['calcaneus','talus','tibia'] \nX_test=[[]]\nY_test=[[]] \ncorr=[]\n \nsujets = os.listdir(Datadirectory)\nsujets = np.sort(sujets)\n\n\nfor i in range(len(sujets)):\n\t\n #Dataset Validation\n for bone in bones:\n patches= os.listdir(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone))\n for k in range(len(patches)):\n if(patches[k].find('BR')!=-1):\n if X_test[0]==[]:\n X_test[0] = joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))\n else:\n X_test[0] = X_test[0]+joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))\n \t \n if(patches[k].find('HR')!=-1):\n if Y_test[0]==[]:\n Y_test[0] = joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))\n else:\n Y_test[0] = Y_test[0]+joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))\n \n if(patches[k].find('corr')!=-1):\n corr = np.append(corr,joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k])))\t\t\n\t\n\nY_test = np.moveaxis(Y_test,0,1)\nX_test = np.moveaxis(X_test,0,1)\nprint(np.shape(Y_test))\nprint(np.shape(corr))\n\n\ntestset = torch.utils.data.TensorDataset(torch.Tensor(X_test),torch.Tensor(Y_test),torch.Tensor(corr))\ntestloader = torch.utils.data.DataLoader(testset, batch_size=8,\n\t\t shuffle=False, pin_memory=use_cuda, num_workers=2)\n\n#Create directores for the results \nif not os.path.exists(os.path.join(ResultsDirectory,'Images')):\n os.mkdir(os.path.join(ResultsDirectory, 'Images'))\n\nValidRed2D(testloader,NetworkPath)"
] | [
[
"matplotlib.use",
"torch.device",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.shape",
"torch.cuda.is_available",
"numpy.sort",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.moveaxis",
"torch.Tensor",
"matplotlib.pyplot.subplot"
]
] |
hoyeon94/CartoonGAN-Tensorflow | [
"e30086bbfb63be49744569b037eda3babe3e60e1"
] | [
"edge_smooth.py"
] | [
"from utils import check_folder\nimport numpy as np\nimport cv2, os, argparse\nfrom glob import glob\nfrom tqdm import tqdm\n\ndef parse_args():\n desc = \"Edge smoothed\"\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('--dataset', type=str, default='hw', help='dataset_name')\n parser.add_argument('--img_size', type=int, default=256, help='The size of image')\n\n return parser.parse_args()\n\ndef make_edge_smooth(dataset_name, img_size) :\n check_folder('./dataset/{}/{}'.format(dataset_name, 'trainB_smooth'))\n\n file_list = glob('./dataset/{}/{}/*.*'.format(dataset_name, 'trainB'))\n save_dir = './dataset/{}/trainB_smooth'.format(dataset_name)\n\n kernel_size = 5\n kernel = np.ones((kernel_size, kernel_size), np.uint8)\n gauss = cv2.getGaussianKernel(kernel_size, 0)\n gauss = gauss * gauss.transpose(1, 0)\n\n for f in tqdm(file_list) :\n file_name = os.path.basename(f)\n\n bgr_img = cv2.imread(f)\n gray_img = cv2.imread(f, 0)\n\n bgr_img = cv2.resize(bgr_img, (img_size, img_size))\n pad_img = np.pad(bgr_img, ((2, 2), (2, 2), (0, 0)), mode='reflect')\n gray_img = cv2.resize(gray_img, (img_size, img_size))\n\n edges = cv2.Canny(gray_img, 100, 200)\n dilation = cv2.dilate(edges, kernel)\n\n gauss_img = np.copy(bgr_img)\n idx = np.where(dilation != 0)\n for i in range(np.sum(dilation != 0)):\n gauss_img[idx[0][i], idx[1][i], 0] = np.sum(\n np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))\n gauss_img[idx[0][i], idx[1][i], 1] = np.sum(\n np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))\n gauss_img[idx[0][i], idx[1][i], 2] = np.sum(\n np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))\n\n cv2.imwrite(os.path.join(save_dir, file_name), gauss_img)\n\n\"\"\"main\"\"\"\ndef main():\n # parse arguments\n args = parse_args()\n if args is None:\n exit()\n\n make_edge_smooth(args.dataset, args.img_size)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.pad",
"numpy.sum",
"numpy.copy",
"numpy.ones",
"numpy.multiply",
"numpy.where"
]
] |
cnedwards/lottery-ticket-hypothesis | [
"e2eb64638183b75dc689feb4b745810a2e3e1c8b"
] | [
"lottery_ticket/foundations/save_restore.py"
] | [
"# Copyright (C) 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Save and restore networks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\n\nfrom lottery_ticket.foundations import paths\nimport numpy as np\nimport six\nimport tensorflow as tf\n\n\ndef save_network(filename, weights_dict):\n \"\"\"Save the parameters of a neural network.\n\n weights_dict is a dictionary where each key is the name of a tensor and each\n value is a numpy array containing that tensor's weights. filename is created\n as a directory and each item of weights_dict is saved as a separate npy file\n within that directory.\n\n This function is useful for saving the weights of a network, the\n initialization of a network (in the same manner), or the masks used to prune a\n network.\n\n Args:\n filename: A directory in which the network should be saved.\n weights_dict: A dictionary where each key is the name of a tensor and each\n value is a numpy array. This is the dictionary of values that is to be\n saved.\n \"\"\"\n if tf.io.gfile.exists(filename):\n tf.compat.v1.gfile.DeleteRecursively(filename)\n tf.io.gfile.makedirs(filename)\n\n for k, v in weights_dict.items():\n with tf.compat.v1.gfile.FastGFile(os.path.join(filename, k + '.npy'), 'w') as fp:\n np.save(fp, v)\n\n\ndef restore_network(filename):\n \"\"\"Loads a network in the form stored by save_network.\n\n The inverse operation of save_network.\n\n filename is the name of a directory containing many npy files. Each npy file\n is loaded into a numpy array and added to a dictionary. The dictionary key\n is the name of the file (without the .npy extension). This dictionary is\n returned.\n\n Args:\n filename: The name of the directory where the npy files are saved.\n\n Returns:\n A dictionary where each key is the name of a npy file within filename and\n each value is the corresponding numpy array stored in that file. This\n dictionary is of the same form as that passed to save_network.\n\n Raises:\n ValueError: If filename does not exist.\n \"\"\"\n if not tf.io.gfile.Exists(filename):\n raise ValueError('Filename {} does not exist.'.format(filename))\n\n weights_dict = {}\n\n for basename in tf.io.gfile.ListDirectory(filename):\n name = basename.split('.')[0]\n with tf.io.gfile.FastGFile(os.path.join(filename, basename)) as fp:\n weights_dict[name] = np.load(fp)\n\n return weights_dict\n\n\ndef standardize(network, combine_fn=None):\n \"\"\"Restore a network that has been provided in one of four possible forms.\n\n A network can be represented in one of four forms:\n * None, the absence of a network.\n * A dictionary where keys are names of tensors and values are numpy arrays\n of the values to be stored in those tensors.\n * The name of a directory containing npy files. The filenames become\n dictionary keys and the file contents become dictionary values.\n * A list of directory names and dictionaries in one of the aforementioned\n forms. Any directories are restored into dictionaries, after which\n combine_fn is applied to the list of dictionaries to combine it into\n a single dictionary.\n\n Args:\n network: A reference to a network in one of the forms described above.\n combine_fn: The function used to combine a list of dictionaries into a\n single dictionary. This argument is only required if network could be\n a list.\n\n Returns:\n A dictionary whose keys are tensor names and whose values are numpy arrays.\n This dictionary was derived from the dictionary, location, or location_list\n arguments.\n\n Raises:\n ValueError: If the network is of an unexpected type.\n \"\"\"\n if isinstance(network, dict) or network is None:\n return network\n elif isinstance(network, six.string_types):\n return restore_network(network)\n elif isinstance(network, list):\n return combine_fn([standardize(n) for n in network])\n else:\n raise ValueError('network must be a dict, string path, None, or a list '\n ' of those types.')\n\n\ndef read_log(directory, name='test', tail=0):\n \"\"\"Reads logged data about the performance of a lottery ticket experiment.\n\n Args:\n directory: The directory where the log data for a particular experiment\n is stored.\n name: Whether to retrieve data from the \"test\", \"train\", or \"validate\"\n logs.\n tail: If nonzero, returns only the last tail entries in each run.\n\n Returns:\n A dictionary with three keys.\n 'iteration' is a numpy array of the iterations at which data was collected.\n 'loss' is a numpy array of loss values at the corresponding iteration.\n 'accuracy' is a numpy array of accuracy values at the corresponding\n iteration.\n \"\"\"\n output = {\n 'iteration': [],\n 'loss': [],\n 'accuracy': [],\n }\n\n with tf.io.gfile.GFile(paths.log(directory, name)) as fp:\n reader = csv.reader(fp)\n for row in reader:\n output['iteration'].append(float(row[1]))\n output['loss'].append(float(row[3]))\n output['accuracy'].append(float(row[5]))\n\n output['iteration'] = np.array(output['iteration'][-tail:])\n output['loss'] = np.array(output['loss'][-tail:])\n output['accuracy'] = np.array(output['accuracy'][-tail:])\n\n return output\n\n\ndef write_log(data, directory, name='test'):\n \"\"\"Writes data about the performance of a lottery ticket experiment.\n\n Input data takes the same form as data returned by read_data. Writes a file\n in the format read by read_data.\n\n Args:\n data: The data to be written to the file. Takes the same form as the data\n returned by read_data.\n directory: The directory where the log data for a particular experiment is\n to be stored.\n name: What to call the data file itself.\n \"\"\"\n with tf.io.gfile.GFile(paths.log(directory, name), 'w') as fp:\n for loss, it, acc in zip(data['loss'], data['iteration'], data['accuracy']):\n fp.write(','.join(\n ('iteration',\n str(it), 'loss',\n str(loss), 'accuracy',\n str(acc))))\n fp.write('\\n')\n"
] | [
[
"numpy.array",
"tensorflow.io.gfile.Exists",
"numpy.load",
"numpy.save",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.ListDirectory",
"tensorflow.compat.v1.gfile.DeleteRecursively"
]
] |
christophanneser/Bao-for-Presto | [
"b1d93689025d51cdea1a2e81edb8f077df8afcc1"
] | [
"tree_conv/test/test_utils.py"
] | [
"import unittest\nimport numpy as np\nfrom util import prepare_trees, TreeConvolutionError\n\n\nclass TestUtils(unittest.TestCase):\n\n def test_prepare(self):\n # simple smoke test from the example file\n tree1 = (\n (0, 1),\n ((1, 2), ((0, 1),), ((-1, 0),)),\n ((-3, 0), ((2, 3),), ((1, 2),))\n )\n \n tree2 = (\n (16, 3),\n ((0, 1), ((5, 3),), ((2, 6),)),\n ((2, 9),)\n )\n\n trees = [tree1, tree2]\n \n # function to extract the left child of a node\n def left_child(x):\n assert isinstance(x, tuple)\n if len(x) == 1:\n # leaf.\n return None\n return x[1]\n\n # function to extract the right child of node\n def right_child(x):\n assert isinstance(x, tuple)\n if len(x) == 1:\n # leaf.\n return None\n return x[2]\n\n # function to transform a node into a (feature) vector,\n # should be a numpy array.\n def transformer(x):\n return np.array(x[0])\n\n\n prepared_trees = prepare_trees(trees, transformer, left_child, right_child)\n self.assertEqual(len(prepared_trees), 2)\n\n def test_raises_on_malformed(self):\n # simple smoke test from the example file\n tree1 = (\n (0, 1),\n ((1, 2), ((0, 1),), ((-1, 0),)),\n ((-3, 0), ((2, 3),), ((1, 2),))\n )\n \n tree2 = (\n (16, 3, 2),\n ((0, 1), ((5, 3),), ((2, 6),)),\n ((2, 9),)\n )\n\n trees = [tree1, tree2]\n \n # function to extract the left child of a node\n def left_child(x):\n assert isinstance(x, tuple)\n if len(x) == 1:\n # leaf.\n return None\n return x[1]\n\n # function to extract the right child of node\n def right_child(x):\n assert isinstance(x, tuple)\n if len(x) == 1:\n # leaf.\n return None\n return x[2]\n\n # function to transform a node into a (feature) vector,\n # should be a numpy array.\n def transformer(x):\n return np.array(x[0])\n\n\n with self.assertRaises(TreeConvolutionError):\n prepare_trees(trees,\n transformer, left_child, right_child)\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array"
]
] |
pittwolfe/pyqg | [
"3a4b8b0a53dc0204a437376ffdcb981568edb111"
] | [
"setup.py"
] | [
"from setuptools import setup, Extension\nfrom Cython.Build import cythonize\nimport warnings\nimport numpy as np\nimport os\nimport tempfile, subprocess, shutil\nimport versioneer\n\n\nDISTNAME='pyqg'\nURL='http://github.com/pyqg/pyqg'\nAUTHOR='pyqg team'\nAUTHOR_EMAIL='[email protected]'\nLICENSE='MIT'\n\nDESCRIPTION='python quasigeostrophic model'\nLONG_DESCRIPTION=\"\"\"\npyqg is a python solver for quasigeostrophic systems. Quasigeostophic\nequations are an approximation to the full fluid equations of motion in\nthe limit of strong rotation and stratitifcation and are most applicable\nto geophysical fluid dynamics problems.\n\nStudents and researchers in ocean and atmospheric dynamics are the intended\naudience of pyqg. The model is simple enough to be used by students new to\nthe field yet powerful enough for research. We strive for clear documentation\nand thorough testing.\n\npyqg supports a variety of different configurations using the same\ncomputational kernel. The different configurations are evolving and are\ndescribed in detail in the documentation. The kernel, implement in cython,\nuses a pseudo-spectral method which is heavily dependent of the fast Fourier\ntransform. For this reason, pyqg depends on pyfftw and the FFTW Fourier\nTransform library. The kernel is multi-threaded but does not support mpi.\nOptimal performance will be achieved on a single system with many cores.\n\nLinks\n-----\n\n- HTML documentation: http://pyqg.readthedocs.org\n- Issue tracker: http://github.com/pyqg/pyqg/issues\n- Source code: http://github.com/pyqg/pyqg\n\"\"\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Atmospheric Science'\n]\n\n\n### Dependency section ###\ninstall_requires = [\n 'cython',\n 'numpy'\n]\n\n# This hack tells cython whether pyfftw is present\nuse_pyfftw_file = 'pyqg/.compile_time_use_pyfftw.pxi'\nwith open(use_pyfftw_file, 'wb') as f:\n try:\n import pyfftw\n f.write(b'DEF PYQG_USE_PYFFTW = 1')\n except ImportError:\n f.write(b'DEF PYQG_USE_PYFFTW = 0')\n warnings.warn('Could not import pyfftw. Model may be slower.')\n\n# check for openmp following\n# http://stackoverflow.com/questions/16549893/programatically-testing-for-openmp-support-from-a-python-setup-script\n# see http://openmp.org/wp/openmp-compilers/\nomp_test = \\\nbr\"\"\"\n#include <omp.h>\n#include <stdio.h>\nint main() {\n#pragma omp parallel\nprintf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n}\n\"\"\"\n\n# python 3 needs rb\n\ndef check_for_openmp():\n tmpdir = tempfile.mkdtemp()\n curdir = os.getcwd()\n os.chdir(tmpdir)\n filename = r'test.c'\n try:\n cc = os.environ['CC']\n except KeyError:\n cc = 'gcc'\n with open(filename, 'wb', 0) as file:\n file.write(omp_test)\n with open(os.devnull, 'wb') as fnull:\n try:\n result = subprocess.call([cc, '-fopenmp', filename],\n stdout=fnull, stderr=fnull)\n except FileNotFoundError:\n result = 1\n print('check_for_openmp() result: ', result)\n os.chdir(curdir)\n #clean up\n shutil.rmtree(tmpdir)\n\n return result==0\n\nextra_compile_args = []\nextra_link_args = []\n\nuse_openmp = True\nif check_for_openmp() and use_openmp:\n extra_compile_args.append('-fopenmp')\n extra_link_args.append('-fopenmp')\nelse:\n warnings.warn('Could not link with openmp. Model will be slow.')\n\n# reathedocs can't and shouldn't build pyfftw\n# apparently setup.py overrides docs/requirements.txt\n#on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n#if on_rtd:\n# install_requires.remove('pyfftw')\n\ntests_require = ['pytest']\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\next_module = Extension(\n \"pyqg.kernel\",\n [\"pyqg/kernel.pyx\"],\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n)\n\nsetup(name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=DESCRIPTION,\n classifiers=CLASSIFIERS,\n long_description=LONG_DESCRIPTION,\n url=URL,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n packages=['pyqg'],\n install_requires=install_requires,\n ext_modules = cythonize(ext_module),\n include_dirs = [np.get_include()],\n tests_require = tests_require,\n test_suite = 'nose.collector',\n zip_safe=False)\n"
] | [
[
"numpy.get_include"
]
] |
Daniel1586/Initiative_tensorflow_tutorials | [
"5f1299696e3851d621cc260ae46cda853e56c2b0"
] | [
"lecture_morvan/013_tf_rnn_classification.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# 通过tf.set_random_seed设定种子数,后面定义的全部变量都可以跨会话生成相同的随机数\ntf.set_random_seed(1)\nnp.random.seed(1)\n\nprint('========== 1.Setting Hyper Parameters...')\ninput_size = 28 # rnn input size / image width\ntimes_step = 28 # rnn time step / image height\nbatch_size = 64\nlearn_rate = 0.01\n\nprint('========== 2.Loading data...')\nmnist = input_data.read_data_sets('mnist', one_hot=True) # they has been normalized to range (0,1)\ntest_x = mnist.test.images[:2000]\ntest_y = mnist.test.labels[:2000]\n\nprint('----- x shape: ', mnist.train.images.shape) # (55000, 28 * 28)\nprint('----- y shape: ', mnist.train.labels.shape) # (55000, 10)\nplt.imshow(mnist.train.images[1].reshape((28, 28)), cmap='gray')\nplt.title('%i' % np.argmax(mnist.train.labels[1]))\nplt.show()\n\ntf_x = tf.placeholder(tf.float32, [None, times_step * input_size]) # shape(batch, 784)\ntf_y = tf.placeholder(tf.int32, [None, 10]) # input y\nimage = tf.reshape(tf_x, [-1, times_step, input_size]) # (batch, height, width)\n\nprint('========== 3.Building Network...')\nrnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=64)\noutputs, (h_c, h_n) = tf.nn.dynamic_rnn(\n rnn_cell, # cell you have chosen\n image, # input\n initial_state=None, # the initial hidden state\n dtype=tf.float32, # must given if set initial_state = None\n time_major=False, # False: (batch, time step, input); True: (time step, batch, input)\n)\noutput = tf.layers.dense(outputs[:, -1, :], 10) # output based on the last output step\n\nloss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=output) # compute cost\ntrain_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)\naccuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables\n labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(output, axis=1),)[1]\n\nsess = tf.Session()\ninit_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\nsess.run(init_op)\nfor step in range(1200):\n b_x, b_y = mnist.train.next_batch(batch_size)\n _, loss_ = sess.run([train_op, loss], {tf_x: b_x, tf_y: b_y})\n if step % 100 == 0:\n accuracy_ = sess.run(accuracy, {tf_x: test_x, tf_y: test_y})\n print('Step:', step, '| train loss: %.4f' % loss_, '| test accuracy: %.2f' % accuracy_)\n\ntest_output = sess.run(output, {tf_x: test_x[:10]})\npred_y = np.argmax(test_output, 1)\nprint('----- pred number: ', pred_y)\nprint('----- real number: ', np.argmax(test_y[:10], 1))\n"
] | [
[
"tensorflow.set_random_seed",
"tensorflow.local_variables_initializer",
"tensorflow.train.AdamOptimizer",
"numpy.random.seed",
"tensorflow.argmax",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.Session",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.reshape",
"tensorflow.placeholder",
"numpy.argmax",
"tensorflow.layers.dense",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"tensorflow.nn.dynamic_rnn"
]
] |
zlw21gxy/DRL | [
"a0852bbc51de29165d74f58ff86a4a4d9e68c83e"
] | [
"spinup/algos/vpg/core.py"
] | [
"import numpy as np\nimport tensorflow as tf\nimport scipy.signal\nfrom gym.spaces import Box, Discrete\n\nEPS = 1e-8\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\ndef placeholder(dim=None):\n return tf.placeholder(dtype=tf.float32, shape=combined_shape(None,dim))\n\ndef placeholders(*args):\n return [placeholder(dim) for dim in args]\n\ndef placeholder_from_space(space):\n if isinstance(space, Box):\n return placeholder(space.shape)\n elif isinstance(space, Discrete):\n return tf.placeholder(dtype=tf.int32, shape=(None,))\n raise NotImplementedError\n\ndef placeholders_from_spaces(*args):\n return [placeholder_from_space(space) for space in args]\n\ndef mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):\n for h in hidden_sizes[:-1]:\n x = tf.layers.dense(x, units=h, activation=activation)\n return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)\n\ndef get_vars(scope=''):\n return [x for x in tf.trainable_variables() if scope in x.name]\n\ndef count_vars(scope=''):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])\n\ndef gaussian_likelihood(x, mu, log_std):\n pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi))\n return tf.reduce_sum(pre_sum, axis=1)\n\ndef discount_cumsum(x, discount):\n \"\"\"\n magic from rllab for computing discounted cumulative sums of vectors.\n\n input: \n vector x, \n [x0, \n x1, \n x2]\n\n output:\n [x0 + discount * x1 + discount^2 * x2, \n x1 + discount * x2,\n x2]\n \"\"\"\n return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]\n\n\n\"\"\"\nPolicies\n\"\"\"\n\ndef mlp_categorical_policy(x, a, hidden_sizes, activation, output_activation, action_space):\n act_dim = action_space.n\n logits = mlp(x, list(hidden_sizes)+[act_dim], activation, None)\n logp_all = tf.nn.log_softmax(logits)\n pi = tf.squeeze(tf.multinomial(logits,1), axis=1)\n logp = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logp_all, axis=1)\n logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logp_all, axis=1)\n return pi, logp, logp_pi\n\n\ndef mlp_gaussian_policy(x, a, hidden_sizes, activation, output_activation, action_space):\n act_dim = a.shape.as_list()[-1]\n mu = mlp(x, list(hidden_sizes)+[act_dim], activation, output_activation)\n log_std = tf.get_variable(name='log_std', initializer=-0.5*np.ones(act_dim, dtype=np.float32))\n std = tf.exp(log_std)\n pi = mu + tf.random_normal(tf.shape(mu)) * std\n logp = gaussian_likelihood(a, mu, log_std)\n logp_pi = gaussian_likelihood(pi, mu, log_std)\n return pi, logp, logp_pi\n\n\n\"\"\"\nActor-Critics\n\"\"\"\ndef mlp_actor_critic(x, a, hidden_sizes=(64,64), activation=tf.tanh, \n output_activation=None, policy=None, action_space=None):\n\n # default policy builder depends on action space\n if policy is None and isinstance(action_space, Box):\n policy = mlp_gaussian_policy\n elif policy is None and isinstance(action_space, Discrete):\n policy = mlp_categorical_policy\n\n with tf.variable_scope('pi'):\n pi, logp, logp_pi = policy(x, a, hidden_sizes, activation, output_activation, action_space)\n with tf.variable_scope('v'):\n v = tf.squeeze(mlp(x, list(hidden_sizes)+[1], activation, None), axis=1)\n return pi, logp, logp_pi, v"
] | [
[
"tensorflow.exp",
"tensorflow.trainable_variables",
"tensorflow.shape",
"numpy.log",
"tensorflow.multinomial",
"numpy.ones",
"tensorflow.variable_scope",
"numpy.isscalar",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.layers.dense",
"tensorflow.one_hot",
"tensorflow.nn.log_softmax"
]
] |
rth/pandas | [
"fd151ba5a873ecf6392897f722abfdfae915303e"
] | [
"pandas/core/reshape/tile.py"
] | [
"\"\"\"\nQuantilization functions and related stuff\n\"\"\"\nfrom typing import (\n Any,\n Callable,\n Literal,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n Timedelta,\n Timestamp,\n)\nfrom pandas._libs.lib import infer_dtype\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n ensure_platform_int,\n is_bool_dtype,\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_extension_array_dtype,\n is_integer,\n is_list_like,\n is_numeric_dtype,\n is_scalar,\n is_timedelta64_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas import (\n Categorical,\n Index,\n IntervalIndex,\n to_datetime,\n to_timedelta,\n)\nimport pandas.core.algorithms as algos\nimport pandas.core.nanops as nanops\n\n\ndef cut(\n x,\n bins,\n right: bool = True,\n labels=None,\n retbins: bool = False,\n precision: int = 3,\n include_lowest: bool = False,\n duplicates: str = \"raise\",\n ordered: bool = True,\n):\n \"\"\"\n Bin values into discrete intervals.\n\n Use `cut` when you need to segment and sort data values into bins. This\n function is also useful for going from a continuous variable to a\n categorical variable. For example, `cut` could convert ages to groups of\n age ranges. Supports binning into an equal number of bins, or a\n pre-specified array of bins.\n\n Parameters\n ----------\n x : array-like\n The input array to be binned. Must be 1-dimensional.\n bins : int, sequence of scalars, or IntervalIndex\n The criteria to bin by.\n\n * int : Defines the number of equal-width bins in the range of `x`. The\n range of `x` is extended by .1% on each side to include the minimum\n and maximum values of `x`.\n * sequence of scalars : Defines the bin edges allowing for non-uniform\n width. No extension of the range of `x` is done.\n * IntervalIndex : Defines the exact bins to be used. Note that\n IntervalIndex for `bins` must be non-overlapping.\n\n right : bool, default True\n Indicates whether `bins` includes the rightmost edge or not. If\n ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``\n indicate (1,2], (2,3], (3,4]. This argument is ignored when\n `bins` is an IntervalIndex.\n labels : array or False, default None\n Specifies the labels for the returned bins. Must be the same length as\n the resulting bins. If False, returns only integer indicators of the\n bins. This affects the type of the output container (see below).\n This argument is ignored when `bins` is an IntervalIndex. If True,\n raises an error. When `ordered=False`, labels must be provided.\n retbins : bool, default False\n Whether to return the bins or not. Useful when bins is provided\n as a scalar.\n precision : int, default 3\n The precision at which to store and display the bins labels.\n include_lowest : bool, default False\n Whether the first interval should be left-inclusive or not.\n duplicates : {default 'raise', 'drop'}, optional\n If bin edges are not unique, raise ValueError or drop non-uniques.\n ordered : bool, default True\n Whether the labels are ordered or not. Applies to returned types\n Categorical and Series (with Categorical dtype). If True,\n the resulting categorical will be ordered. If False, the resulting\n categorical will be unordered (labels must be provided).\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n out : Categorical, Series, or ndarray\n An array-like object representing the respective bin for each value\n of `x`. The type depends on the value of `labels`.\n\n * True (default) : returns a Series for Series `x` or a\n Categorical for all other inputs. The values stored within\n are Interval dtype.\n\n * sequence of scalars : returns a Series for Series `x` or a\n Categorical for all other inputs. The values stored within\n are whatever the type in the sequence is.\n\n * False : returns an ndarray of integers.\n\n bins : numpy.ndarray or IntervalIndex.\n The computed or specified bins. Only returned when `retbins=True`.\n For scalar or sequence `bins`, this is an ndarray with the computed\n bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For\n an IntervalIndex `bins`, this is equal to `bins`.\n\n See Also\n --------\n qcut : Discretize variable into equal-sized buckets based on rank\n or based on sample quantiles.\n Categorical : Array type for storing data that come from a\n fixed set of values.\n Series : One-dimensional array with axis labels (including time series).\n IntervalIndex : Immutable Index implementing an ordered, sliceable set.\n\n Notes\n -----\n Any NA values will be NA in the result. Out of bounds values will be NA in\n the resulting Series or Categorical object.\n\n Examples\n --------\n Discretize into three equal-sized bins.\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)\n ... # doctest: +ELLIPSIS\n [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...\n Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)\n ... # doctest: +ELLIPSIS\n ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...\n Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...\n array([0.994, 3. , 5. , 7. ]))\n\n Discovers the same bins, but assign them specific labels. Notice that\n the returned Categorical's categories are `labels` and is ordered.\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),\n ... 3, labels=[\"bad\", \"medium\", \"good\"])\n ['bad', 'good', 'medium', 'medium', 'good', 'bad']\n Categories (3, object): ['bad' < 'medium' < 'good']\n\n ``ordered=False`` will result in unordered categories when labels are passed.\n This parameter can be used to allow non-unique labels:\n\n >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3,\n ... labels=[\"B\", \"A\", \"B\"], ordered=False)\n ['B', 'B', 'A', 'A', 'B', 'B']\n Categories (2, object): ['A', 'B']\n\n ``labels=False`` implies you just want the bins back.\n\n >>> pd.cut([0, 1, 1, 2], bins=4, labels=False)\n array([0, 1, 1, 3])\n\n Passing a Series as an input returns a Series with categorical dtype:\n\n >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> pd.cut(s, 3)\n ... # doctest: +ELLIPSIS\n a (1.992, 4.667]\n b (1.992, 4.667]\n c (4.667, 7.333]\n d (7.333, 10.0]\n e (7.333, 10.0]\n dtype: category\n Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ...\n\n Passing a Series as an input returns a Series with mapping value.\n It is used to map numerically to intervals based on bins.\n\n >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),\n ... index=['a', 'b', 'c', 'd', 'e'])\n >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)\n ... # doctest: +ELLIPSIS\n (a 1.0\n b 2.0\n c 3.0\n d 4.0\n e NaN\n dtype: float64,\n array([ 0, 2, 4, 6, 8, 10]))\n\n Use `drop` optional when bins is not unique\n\n >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,\n ... right=False, duplicates='drop')\n ... # doctest: +ELLIPSIS\n (a 1.0\n b 2.0\n c 3.0\n d 3.0\n e NaN\n dtype: float64,\n array([ 0, 2, 4, 6, 10]))\n\n Passing an IntervalIndex for `bins` results in those categories exactly.\n Notice that values not covered by the IntervalIndex are set to NaN. 0\n is to the left of the first bin (which is closed on the right), and 1.5\n falls between two bins.\n\n >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])\n >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)\n [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]]\n Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]]\n \"\"\"\n # NOTE: this binning code is changed a bit from histogram for var(x) == 0\n\n original = x\n x = _preprocess_for_cut(x)\n x, dtype = _coerce_to_type(x)\n\n if not np.iterable(bins):\n if is_scalar(bins) and bins < 1:\n raise ValueError(\"`bins` should be a positive integer.\")\n\n try: # for array-like\n sz = x.size\n except AttributeError:\n x = np.asarray(x)\n sz = x.size\n\n if sz == 0:\n raise ValueError(\"Cannot cut empty array\")\n\n rng = (nanops.nanmin(x), nanops.nanmax(x))\n mn, mx = (mi + 0.0 for mi in rng)\n\n if np.isinf(mn) or np.isinf(mx):\n # GH 24314\n raise ValueError(\n \"cannot specify integer `bins` when input data contains infinity\"\n )\n elif mn == mx: # adjust end points before binning\n mn -= 0.001 * abs(mn) if mn != 0 else 0.001\n mx += 0.001 * abs(mx) if mx != 0 else 0.001\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n else: # adjust end points after binning\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n adj = (mx - mn) * 0.001 # 0.1% of the range\n if right:\n bins[0] -= adj\n else:\n bins[-1] += adj\n\n elif isinstance(bins, IntervalIndex):\n if bins.is_overlapping:\n raise ValueError(\"Overlapping IntervalIndex is not accepted.\")\n\n else:\n if is_datetime64tz_dtype(bins):\n bins = np.asarray(bins, dtype=DT64NS_DTYPE)\n else:\n bins = np.asarray(bins)\n bins = _convert_bin_to_numeric_type(bins, dtype)\n\n # GH 26045: cast to float64 to avoid an overflow\n if (np.diff(bins.astype(\"float64\")) < 0).any():\n raise ValueError(\"bins must increase monotonically.\")\n\n fac, bins = _bins_to_cuts(\n x,\n bins,\n right=right,\n labels=labels,\n precision=precision,\n include_lowest=include_lowest,\n dtype=dtype,\n duplicates=duplicates,\n ordered=ordered,\n )\n\n return _postprocess_for_cut(fac, bins, retbins, dtype, original)\n\n\ndef qcut(\n x,\n q,\n labels=None,\n retbins: bool = False,\n precision: int = 3,\n duplicates: str = \"raise\",\n):\n \"\"\"\n Quantile-based discretization function.\n\n Discretize variable into equal-sized buckets based on rank or based\n on sample quantiles. For example 1000 values for 10 quantiles would\n produce a Categorical object indicating quantile membership for each data point.\n\n Parameters\n ----------\n x : 1d ndarray or Series\n q : int or list-like of float\n Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately\n array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.\n labels : array or False, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, return only integer indicators of the\n bins. If True, raises an error.\n retbins : bool, optional\n Whether to return the (bins, labels) or not. Can be useful if bins\n is given as a scalar.\n precision : int, optional\n The precision at which to store and display the bins labels.\n duplicates : {default 'raise', 'drop'}, optional\n If bin edges are not unique, raise ValueError or drop non-uniques.\n\n Returns\n -------\n out : Categorical or Series or array of integers if labels is False\n The return type (Categorical or Series) depends on the input: a Series\n of type category if input is a Series else Categorical. Bins are\n represented as categories when categorical data is returned.\n bins : ndarray of floats\n Returned only if `retbins` is True.\n\n Notes\n -----\n Out of bounds values will be NA in the resulting Categorical object\n\n Examples\n --------\n >>> pd.qcut(range(5), 4)\n ... # doctest: +ELLIPSIS\n [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]\n Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ...\n\n >>> pd.qcut(range(5), 3, labels=[\"good\", \"medium\", \"bad\"])\n ... # doctest: +SKIP\n [good, good, medium, bad, bad]\n Categories (3, object): [good < medium < bad]\n\n >>> pd.qcut(range(5), 4, labels=False)\n array([0, 0, 1, 2, 3])\n \"\"\"\n original = x\n x = _preprocess_for_cut(x)\n x, dtype = _coerce_to_type(x)\n\n if is_integer(q):\n quantiles = np.linspace(0, 1, q + 1)\n else:\n quantiles = q\n bins = algos.quantile(x, quantiles)\n fac, bins = _bins_to_cuts(\n x,\n bins,\n labels=labels,\n precision=precision,\n include_lowest=True,\n dtype=dtype,\n duplicates=duplicates,\n )\n\n return _postprocess_for_cut(fac, bins, retbins, dtype, original)\n\n\ndef _bins_to_cuts(\n x,\n bins: np.ndarray,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n dtype=None,\n duplicates: str = \"raise\",\n ordered: bool = True,\n):\n if not ordered and labels is None:\n raise ValueError(\"'labels' must be provided if 'ordered = False'\")\n\n if duplicates not in [\"raise\", \"drop\"]:\n raise ValueError(\n \"invalid value for 'duplicates' parameter, valid options are: raise, drop\"\n )\n\n if isinstance(bins, IntervalIndex):\n # we have a fast-path here\n ids = bins.get_indexer(x)\n result = Categorical.from_codes(ids, categories=bins, ordered=True)\n return result, bins\n\n unique_bins = algos.unique(bins)\n if len(unique_bins) < len(bins) and len(bins) != 2:\n if duplicates == \"raise\":\n raise ValueError(\n f\"Bin edges must be unique: {repr(bins)}.\\n\"\n f\"You can drop duplicate edges by setting the 'duplicates' kwarg\"\n )\n else:\n bins = unique_bins\n\n side: Literal[\"left\", \"right\"] = \"left\" if right else \"right\"\n ids = ensure_platform_int(bins.searchsorted(x, side=side))\n\n if include_lowest:\n ids[np.asarray(x) == bins[0]] = 1\n\n na_mask = isna(x) | (ids == len(bins)) | (ids == 0)\n has_nas = na_mask.any()\n\n if labels is not False:\n if not (labels is None or is_list_like(labels)):\n raise ValueError(\n \"Bin labels must either be False, None or passed in as a \"\n \"list-like argument\"\n )\n\n elif labels is None:\n labels = _format_labels(\n bins, precision, right=right, include_lowest=include_lowest, dtype=dtype\n )\n elif ordered and len(set(labels)) != len(labels):\n raise ValueError(\n \"labels must be unique if ordered=True; pass ordered=False for duplicate labels\" # noqa\n )\n else:\n if len(labels) != len(bins) - 1:\n raise ValueError(\n \"Bin labels must be one fewer than the number of bin edges\"\n )\n if not is_categorical_dtype(labels):\n labels = Categorical(\n labels,\n categories=labels if len(set(labels)) == len(labels) else None,\n ordered=ordered,\n )\n # TODO: handle mismatch between categorical label order and pandas.cut order.\n np.putmask(ids, na_mask, 0)\n result = algos.take_nd(labels, ids - 1)\n\n else:\n result = ids - 1\n if has_nas:\n result = result.astype(np.float64)\n np.putmask(result, na_mask, np.nan)\n\n return result, bins\n\n\ndef _coerce_to_type(x):\n \"\"\"\n if the passed data is of datetime/timedelta, bool or nullable int type,\n this method converts it to numeric so that cut or qcut method can\n handle it\n \"\"\"\n dtype = None\n\n if is_datetime64tz_dtype(x.dtype):\n dtype = x.dtype\n elif is_datetime64_dtype(x.dtype):\n x = to_datetime(x)\n dtype = np.dtype(\"datetime64[ns]\")\n elif is_timedelta64_dtype(x.dtype):\n x = to_timedelta(x)\n dtype = np.dtype(\"timedelta64[ns]\")\n elif is_bool_dtype(x.dtype):\n # GH 20303\n x = x.astype(np.int64)\n # To support cut and qcut for IntegerArray we convert to float dtype.\n # Will properly support in the future.\n # https://github.com/pandas-dev/pandas/pull/31290\n # https://github.com/pandas-dev/pandas/issues/31389\n elif is_extension_array_dtype(x.dtype) and is_numeric_dtype(x.dtype):\n x = x.to_numpy(dtype=np.float64, na_value=np.nan)\n\n if dtype is not None:\n # GH 19768: force NaT to NaN during integer conversion\n x = np.where(x.notna(), x.view(np.int64), np.nan)\n\n return x, dtype\n\n\ndef _convert_bin_to_numeric_type(bins, dtype):\n \"\"\"\n if the passed bin is of datetime/timedelta type,\n this method converts it to integer\n\n Parameters\n ----------\n bins : list-like of bins\n dtype : dtype of data\n\n Raises\n ------\n ValueError if bins are not of a compat dtype to dtype\n \"\"\"\n bins_dtype = infer_dtype(bins, skipna=False)\n if is_timedelta64_dtype(dtype):\n if bins_dtype in [\"timedelta\", \"timedelta64\"]:\n bins = to_timedelta(bins).view(np.int64)\n else:\n raise ValueError(\"bins must be of timedelta64 dtype\")\n elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):\n if bins_dtype in [\"datetime\", \"datetime64\"]:\n bins = to_datetime(bins).view(np.int64)\n else:\n raise ValueError(\"bins must be of datetime64 dtype\")\n\n return bins\n\n\ndef _convert_bin_to_datelike_type(bins, dtype):\n \"\"\"\n Convert bins to a DatetimeIndex or TimedeltaIndex if the original dtype is\n datelike\n\n Parameters\n ----------\n bins : list-like of bins\n dtype : dtype of data\n\n Returns\n -------\n bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is\n datelike\n \"\"\"\n if is_datetime64tz_dtype(dtype):\n bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz)\n elif is_datetime_or_timedelta_dtype(dtype):\n bins = Index(bins.astype(np.int64), dtype=dtype)\n return bins\n\n\ndef _format_labels(\n bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None\n):\n \"\"\"based on the dtype, return our labels\"\"\"\n closed = \"right\" if right else \"left\"\n\n formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta]\n\n if is_datetime64tz_dtype(dtype):\n formatter = lambda x: Timestamp(x, tz=dtype.tz)\n adjust = lambda x: x - Timedelta(\"1ns\")\n elif is_datetime64_dtype(dtype):\n formatter = Timestamp\n adjust = lambda x: x - Timedelta(\"1ns\")\n elif is_timedelta64_dtype(dtype):\n formatter = Timedelta\n adjust = lambda x: x - Timedelta(\"1ns\")\n else:\n precision = _infer_precision(precision, bins)\n formatter = lambda x: _round_frac(x, precision)\n adjust = lambda x: x - 10 ** (-precision)\n\n breaks = [formatter(b) for b in bins]\n if right and include_lowest:\n # adjust lhs of first interval by precision to account for being right closed\n breaks[0] = adjust(breaks[0])\n\n return IntervalIndex.from_breaks(breaks, closed=closed)\n\n\ndef _preprocess_for_cut(x):\n \"\"\"\n handles preprocessing for cut where we convert passed\n input to array, strip the index information and store it\n separately\n \"\"\"\n # Check that the passed array is a Pandas or Numpy object\n # We don't want to strip away a Pandas data-type here (e.g. datetimetz)\n ndim = getattr(x, \"ndim\", None)\n if ndim is None:\n x = np.asarray(x)\n if x.ndim != 1:\n raise ValueError(\"Input array must be 1 dimensional\")\n\n return x\n\n\ndef _postprocess_for_cut(fac, bins, retbins: bool, dtype, original):\n \"\"\"\n handles post processing for the cut method where\n we combine the index information if the originally passed\n datatype was a series\n \"\"\"\n if isinstance(original, ABCSeries):\n fac = original._constructor(fac, index=original.index, name=original.name)\n\n if not retbins:\n return fac\n\n bins = _convert_bin_to_datelike_type(bins, dtype)\n\n return fac, bins\n\n\ndef _round_frac(x, precision: int):\n \"\"\"\n Round the fractional part of the given number\n \"\"\"\n if not np.isfinite(x) or x == 0:\n return x\n else:\n frac, whole = np.modf(x)\n if whole == 0:\n digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision\n else:\n digits = precision\n return np.around(x, digits)\n\n\ndef _infer_precision(base_precision: int, bins) -> int:\n \"\"\"\n Infer an appropriate precision for _round_frac\n \"\"\"\n for precision in range(base_precision, 20):\n levels = [_round_frac(b, precision) for b in bins]\n if algos.unique(levels).size == bins.size:\n return precision\n return base_precision # default\n"
] | [
[
"pandas._libs.Timedelta",
"pandas.IntervalIndex.from_breaks",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.dtype",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.infer_dtype",
"pandas.core.nanops.nanmin",
"numpy.putmask",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.nanops.nanmax",
"pandas.core.algorithms.unique",
"numpy.isfinite",
"numpy.around",
"pandas.to_datetime",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"numpy.modf",
"pandas.Categorical.from_codes",
"pandas.core.algorithms.quantile",
"pandas.core.dtypes.common.is_list_like",
"numpy.isinf",
"numpy.iterable",
"numpy.asarray",
"pandas.to_timedelta",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.linspace",
"pandas.core.algorithms.take_nd",
"pandas.core.dtypes.common.is_bool_dtype"
]
] |
Dr-Hemanth/CarND-Behavioral-Cloning | [
"9bf5079451ee99e5ff07c30f1611871bacbba4f4"
] | [
"model.py"
] | [
"import csv\nimport numpy as np\nimport cv2\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Conv2D, Lambda, Dropout\nfrom keras.layers.convolutional import Cropping2D\nfrom keras.optimizers import Adam\n\n# Read and store lines from driving data log\nlines = []\nwith open('/home/workspace/CarND-Behavioral-Cloning-P3/data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n \n# Create a data Generator \ndef generator(lines, batch_size=32):\n num_lines = len(lines)\n while 1: # Loop forever so the generator never terminates\n shuffle(lines)\n for offset in range(0, num_lines, batch_size):\n batch_lines = lines[offset:offset+batch_size]\n \n # Create empty arrays to hold images and steering values\n images = []\n angles = []\n \n # For each line in the driving data log, read camera image (left, right and centre) and steering value\n for batch_sample in batch_lines:\n for i in range(3): # center, left and rights images\n name = 'data/IMG/' + batch_sample[i].split('/')[-1]\n current_image = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB)\n images.append(current_image)\n # Correction value for steering angle for left and right camera images \n steering_correction = 0.2\n center_angle = float(batch_sample[3])\n left_angle = (center_angle + steering_correction)\n right_angle = (center_angle - steering_correction)\n if i == 0:\n angles.append(center_angle)\n elif i == 1: \n angles.append(left_angle)\n elif i == 2: \n angles.append(right_angle)\n \n images.append(cv2.flip(current_image, 1))\n \n # Augment training data by flipping images and changing sign of steering\n if i == 0:\n angles.append(center_angle * -1.0)\n elif i == 1: \n angles.append((center_angle + steering_correction) * -1.0)\n elif i == 2: \n angles.append((center_angle - steering_correction) * -1.0)\n \n # Convert images and steering_angles to numpy arrays for Keras to accept as input \n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# split driving data to train and validate\ntrain_lines, validation_lines = train_test_split(lines[1:], test_size=0.2)\n\n# Use generator to pull data \nm_batch_size = 32\ntrain_generator = generator(train_lines, batch_size=m_batch_size)\nvalidation_generator = generator(validation_lines, batch_size=m_batch_size)\n\n# nVidia model\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\nmodel.add(Conv2D(24,(5,5), strides=(2,2), activation='relu'))\nmodel.add(Conv2D(36,(5,5), strides=(2,2), activation='relu'))\nmodel.add(Conv2D(48,(5,5), strides=(2,2), activation='relu'))\nmodel.add(Conv2D(64,(3,3), activation='relu'))\nmodel.add(Conv2D(64,(3,3), activation='relu'))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(1164, activation='relu'))\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mse')\n\n# fit the model\nhistory_object = model.fit_generator(train_generator, steps_per_epoch=len(train_lines)//m_batch_size,\nvalidation_data=validation_generator, validation_steps=len(validation_lines)//m_batch_size, epochs=5, verbose = 1)\n\nimport matplotlib.pyplot as plt\n\nprint(history_object.history.keys())\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\n\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\n# Save model\nmodel.save('model.h5')"
] | [
[
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"sklearn.utils.shuffle"
]
] |
stwind/datasets | [
"b097e0985eaaadc6b0c1f4dfa3b3cf88d116c607"
] | [
"tensorflow_datasets/image_classification/oxford_flowers102.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Oxford 102 Category Flower Dataset.\"\"\"\n\nimport os\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n\n_BASE_URL = \"https://www.robots.ox.ac.uk/~vgg/data/flowers/102/\"\n\n_NAMES = [\n \"pink primrose\", \"hard-leaved pocket orchid\", \"canterbury bells\",\n \"sweet pea\", \"english marigold\", \"tiger lily\", \"moon orchid\",\n \"bird of paradise\", \"monkshood\", \"globe thistle\", \"snapdragon\",\n \"colt's foot\", \"king protea\", \"spear thistle\", \"yellow iris\",\n \"globe-flower\", \"purple coneflower\", \"peruvian lily\", \"balloon flower\",\n \"giant white arum lily\", \"fire lily\", \"pincushion flower\", \"fritillary\",\n \"red ginger\", \"grape hyacinth\", \"corn poppy\", \"prince of wales feathers\",\n \"stemless gentian\", \"artichoke\", \"sweet william\", \"carnation\",\n \"garden phlox\", \"love in the mist\", \"mexican aster\", \"alpine sea holly\",\n \"ruby-lipped cattleya\", \"cape flower\", \"great masterwort\", \"siam tulip\",\n \"lenten rose\", \"barbeton daisy\", \"daffodil\", \"sword lily\", \"poinsettia\",\n \"bolero deep blue\", \"wallflower\", \"marigold\", \"buttercup\", \"oxeye daisy\",\n \"common dandelion\", \"petunia\", \"wild pansy\", \"primula\", \"sunflower\",\n \"pelargonium\", \"bishop of llandaff\", \"gaura\", \"geranium\", \"orange dahlia\",\n \"pink-yellow dahlia?\", \"cautleya spicata\", \"japanese anemone\",\n \"black-eyed susan\", \"silverbush\", \"californian poppy\", \"osteospermum\",\n \"spring crocus\", \"bearded iris\", \"windflower\", \"tree poppy\", \"gazania\",\n \"azalea\", \"water lily\", \"rose\", \"thorn apple\", \"morning glory\",\n \"passion flower\", \"lotus\", \"toad lily\", \"anthurium\", \"frangipani\",\n \"clematis\", \"hibiscus\", \"columbine\", \"desert-rose\", \"tree mallow\",\n \"magnolia\", \"cyclamen\", \"watercress\", \"canna lily\", \"hippeastrum\",\n \"bee balm\", \"ball moss\", \"foxglove\", \"bougainvillea\", \"camellia\", \"mallow\",\n \"mexican petunia\", \"bromelia\", \"blanket flower\", \"trumpet creeper\",\n \"blackberry lily\"\n]\n\n_CITATION = \"\"\"\\\n@InProceedings{Nilsback08,\n author = \"Nilsback, M-E. and Zisserman, A.\",\n title = \"Automated Flower Classification over a Large Number of Classes\",\n booktitle = \"Proceedings of the Indian Conference on Computer Vision, Graphics and Image Processing\",\n year = \"2008\",\n month = \"Dec\"\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\nThe Oxford Flowers 102 dataset is a consistent of 102 flower categories commonly occurring\nin the United Kingdom. Each class consists of between 40 and 258 images. The images have\nlarge scale, pose and light variations. In addition, there are categories that have large\nvariations within the category and several very similar categories.\n\nThe dataset is divided into a training set, a validation set and a test set.\nThe training set and validation set each consist of 10 images per class (totalling 1020 images each).\nThe test set consists of the remaining 6149 images (minimum 20 per class).\n\"\"\"\n\n\nclass OxfordFlowers102(tfds.core.GeneratorBasedBuilder):\n \"\"\"Oxford 102 category flower dataset.\"\"\"\n\n VERSION = tfds.core.Version(\"2.1.1\")\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(),\n \"label\": tfds.features.ClassLabel(names=_NAMES),\n \"file_name\": tfds.features.Text(),\n }),\n supervised_keys=(\"image\", \"label\"),\n homepage=_BASE_URL,\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n \"\"\"Returns SplitGenerators.\"\"\"\n # Download images and annotations that come in separate archives.\n # Note, that the extension of archives is .tar.gz even though the actual\n # archives format is uncompressed tar.\n dl_paths = dl_manager.download_and_extract({\n \"images\": os.path.join(_BASE_URL, \"102flowers.tgz\"),\n \"labels\": os.path.join(_BASE_URL, \"imagelabels.mat\"),\n \"setid\": os.path.join(_BASE_URL, \"setid.mat\"),\n })\n\n gen_kwargs = dict(\n images_dir_path=os.path.join(dl_paths[\"images\"], \"jpg\"),\n labels_path=dl_paths[\"labels\"],\n setid_path=dl_paths[\"setid\"],\n )\n\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs=dict(split_name=\"trnid\", **gen_kwargs)),\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs=dict(split_name=\"tstid\", **gen_kwargs)),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs=dict(split_name=\"valid\", **gen_kwargs)),\n ]\n\n def _generate_examples(self, images_dir_path, labels_path, setid_path,\n split_name):\n \"\"\"Yields examples.\"\"\"\n with tf.io.gfile.GFile(labels_path, \"rb\") as f:\n labels = tfds.core.lazy_imports.scipy.io.loadmat(f)[\"labels\"][0]\n with tf.io.gfile.GFile(setid_path, \"rb\") as f:\n examples = tfds.core.lazy_imports.scipy.io.loadmat(f)[split_name][0]\n\n for image_id in examples:\n file_name = \"image_%05d.jpg\" % image_id\n record = {\n \"image\": os.path.join(images_dir_path, file_name),\n \"label\": labels[image_id - 1] - 1,\n \"file_name\": file_name,\n }\n yield file_name, record\n"
] | [
[
"tensorflow.compat.v2.io.gfile.GFile"
]
] |
ouseful-backup/ggplot | [
"0f3774e6a645796b843d3ce77fb388958773338e"
] | [
"ggplot/stats/stat_smooth.py"
] | [
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport numpy as np\nimport pandas as pd\n\nfrom ggplot.components import smoothers\nfrom ggplot.utils import make_iterable_ntimes\nfrom .stat import stat\n\n\nclass stat_smooth(stat):\n REQUIRED_AES = {'x', 'y'}\n DEFAULT_PARAMS = {'geom': 'smooth', 'position': 'identity', 'method': 'auto',\n 'se': True, 'n': 80, 'fullrange': False, 'level': 0.95,\n 'span': 2/3., 'window': None}\n CREATES = {'ymin', 'ymax'}\n\n def _calculate(self, data):\n # sort data by x and\n # convert x and y to lists so that the Series index\n # does not mess with the smoothing functions\n data = data.sort(['x'])\n x = list(data.pop('x'))\n y = list(data.pop('y'))\n\n se = self.params['se']\n level = self.params['level']\n method = self.params['method']\n span = self.params['span']\n window = self.params['window']\n\n if window is None:\n window = int(np.ceil(len(x) / 10.0))\n\n # TODO: fix the smoothers\n # - lm : y1, y2 are NaNs\n # - mvg: investigate unexpected looking output\n if method == \"lm\":\n x, y, y1, y2 = smoothers.lm(x, y, 1-level)\n elif method == \"ma\":\n x, y, y1, y2 = smoothers.mavg(x, y, window=window)\n else:\n x, y, y1, y2 = smoothers.lowess(x, y, span=span)\n\n new_data = pd.DataFrame({'x': x, 'y': y})\n if se:\n new_data['ymin'] = y1\n new_data['ymax'] = y2\n\n # Copy the other aesthetics into the new dataframe\n n = len(x)\n for ae in data:\n new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)\n return new_data\n"
] | [
[
"pandas.DataFrame"
]
] |
shaun95/espnet | [
"afa8f8ec5b8ec77deb1a3c1531915ebbee7b80e6"
] | [
"test/espnet2/bin/test_asr_inference.py"
] | [
"from argparse import ArgumentParser\nfrom pathlib import Path\nimport string\n\nimport numpy as np\nimport pytest\nimport yaml\n\nfrom espnet.nets.beam_search import Hypothesis\nfrom espnet2.bin.asr_inference import get_parser\nfrom espnet2.bin.asr_inference import main\nfrom espnet2.bin.asr_inference import Speech2Text\nfrom espnet2.bin.asr_inference_streaming import Speech2TextStreaming\nfrom espnet2.tasks.asr import ASRTask\nfrom espnet2.tasks.enh_s2t import EnhS2TTask\nfrom espnet2.tasks.lm import LMTask\n\n\ndef test_get_parser():\n assert isinstance(get_parser(), ArgumentParser)\n\n\ndef test_main():\n with pytest.raises(SystemExit):\n main()\n\n\[email protected]()\ndef token_list(tmp_path: Path):\n with (tmp_path / \"tokens.txt\").open(\"w\") as f:\n f.write(\"<blank>\\n\")\n for c in string.ascii_letters:\n f.write(f\"{c}\\n\")\n f.write(\"<unk>\\n\")\n f.write(\"<sos/eos>\\n\")\n return tmp_path / \"tokens.txt\"\n\n\[email protected]()\ndef asr_config_file(tmp_path: Path, token_list):\n # Write default configuration file\n ASRTask.main(\n cmd=[\n \"--dry_run\",\n \"true\",\n \"--output_dir\",\n str(tmp_path / \"asr\"),\n \"--token_list\",\n str(token_list),\n \"--token_type\",\n \"char\",\n ]\n )\n return tmp_path / \"asr\" / \"config.yaml\"\n\n\[email protected]()\ndef lm_config_file(tmp_path: Path, token_list):\n # Write default configuration file\n LMTask.main(\n cmd=[\n \"--dry_run\",\n \"true\",\n \"--output_dir\",\n str(tmp_path / \"lm\"),\n \"--token_list\",\n str(token_list),\n \"--token_type\",\n \"char\",\n ]\n )\n return tmp_path / \"lm\" / \"config.yaml\"\n\n\[email protected]_timeout(5)\ndef test_Speech2Text(asr_config_file, lm_config_file):\n speech2text = Speech2Text(\n asr_train_config=asr_config_file, lm_train_config=lm_config_file, beam_size=1\n )\n speech = np.random.randn(100000)\n results = speech2text(speech)\n for text, token, token_int, hyp in results:\n assert isinstance(text, str)\n assert isinstance(token[0], str)\n assert isinstance(token_int[0], int)\n assert isinstance(hyp, Hypothesis)\n\n\[email protected]_timeout(5)\ndef test_Speech2Text_quantized(asr_config_file, lm_config_file):\n speech2text = Speech2Text(\n asr_train_config=asr_config_file,\n lm_train_config=lm_config_file,\n beam_size=1,\n quantize_asr_model=True,\n quantize_lm=True,\n )\n speech = np.random.randn(100000)\n results = speech2text(speech)\n for text, token, token_int, hyp in results:\n assert isinstance(text, str)\n assert isinstance(token[0], str)\n assert isinstance(token_int[0], int)\n assert isinstance(hyp, Hypothesis)\n\n\[email protected]()\ndef asr_config_file_streaming(tmp_path: Path, token_list):\n # Write default configuration file\n ASRTask.main(\n cmd=[\n \"--dry_run\",\n \"true\",\n \"--output_dir\",\n str(tmp_path / \"asr_streaming\"),\n \"--token_list\",\n str(token_list),\n \"--token_type\",\n \"char\",\n \"--decoder\",\n \"transformer\",\n \"--encoder\",\n \"contextual_block_transformer\",\n ]\n )\n return tmp_path / \"asr_streaming\" / \"config.yaml\"\n\n\[email protected]_timeout(20)\ndef test_Speech2Text_streaming(asr_config_file_streaming, lm_config_file):\n file = open(asr_config_file_streaming, \"r\", encoding=\"utf-8\")\n asr_train_config = file.read()\n asr_train_config = yaml.full_load(asr_train_config)\n asr_train_config[\"frontend\"] = \"default\"\n asr_train_config[\"encoder_conf\"] = {\n \"look_ahead\": 16,\n \"hop_size\": 16,\n \"block_size\": 40,\n }\n # Change the configuration file\n with open(asr_config_file_streaming, \"w\", encoding=\"utf-8\") as files:\n yaml.dump(asr_train_config, files)\n speech2text = Speech2TextStreaming(\n asr_train_config=asr_config_file_streaming,\n lm_train_config=lm_config_file,\n beam_size=1,\n )\n speech = np.random.randn(10000)\n for sim_chunk_length in [1, 32, 128, 512, 1024, 2048]:\n if (len(speech) // sim_chunk_length) > 1:\n for i in range(len(speech) // sim_chunk_length):\n speech2text(\n speech=speech[i * sim_chunk_length : (i + 1) * sim_chunk_length],\n is_final=False,\n )\n results = speech2text(\n speech[(i + 1) * sim_chunk_length : len(speech)], is_final=True\n )\n else:\n results = speech2text(speech)\n for text, token, token_int, hyp in results:\n assert isinstance(text, str)\n assert isinstance(token[0], str)\n assert isinstance(token_int[0], int)\n assert isinstance(hyp, Hypothesis)\n\n # Test edge case: https://github.com/espnet/espnet/pull/4216\n file = open(asr_config_file_streaming, \"r\", encoding=\"utf-8\")\n asr_train_config = file.read()\n asr_train_config = yaml.full_load(asr_train_config)\n asr_train_config[\"frontend\"] = \"default\"\n asr_train_config[\"frontend_conf\"] = {\n \"n_fft\": 256,\n \"win_length\": 256,\n \"hop_length\": 128,\n }\n # Change the configuration file\n with open(asr_config_file_streaming, \"w\", encoding=\"utf-8\") as files:\n yaml.dump(asr_train_config, files)\n speech2text = Speech2TextStreaming(\n asr_train_config=asr_config_file_streaming,\n lm_train_config=lm_config_file,\n beam_size=1,\n )\n # edge case: speech is exactly multiple of sim_chunk_length, e.g., 10240 = 5 x 2048\n speech = np.random.randn(10240)\n for sim_chunk_length in [1, 32, 64, 128, 512, 1024, 2048]:\n if (len(speech) // sim_chunk_length) > 1:\n for i in range(len(speech) // sim_chunk_length):\n speech2text(\n speech=speech[i * sim_chunk_length : (i + 1) * sim_chunk_length],\n is_final=False,\n )\n results = speech2text(\n speech[(i + 1) * sim_chunk_length : len(speech)], is_final=True\n )\n else:\n results = speech2text(speech)\n for text, token, token_int, hyp in results:\n assert isinstance(text, str)\n assert isinstance(token[0], str)\n assert isinstance(token_int[0], int)\n assert isinstance(hyp, Hypothesis)\n\n\[email protected]()\ndef enh_asr_config_file(tmp_path: Path, token_list):\n # Write default configuration file\n EnhS2TTask.main(\n cmd=[\n \"--dry_run\",\n \"true\",\n \"--output_dir\",\n str(tmp_path / \"enh_asr\"),\n \"--token_list\",\n str(token_list),\n \"--token_type\",\n \"char\",\n ]\n )\n return tmp_path / \"enh_asr\" / \"config.yaml\"\n\n\[email protected]_timeout(5)\ndef test_EnhS2T_Speech2Text(enh_asr_config_file, lm_config_file):\n speech2text = Speech2Text(\n asr_train_config=enh_asr_config_file,\n lm_train_config=lm_config_file,\n beam_size=1,\n enh_s2t_task=True,\n )\n speech = np.random.randn(48000)\n results = speech2text(speech)\n for text, token, token_int, hyp in results:\n assert isinstance(text, str)\n assert isinstance(token[0], str)\n assert isinstance(token_int[0], int)\n assert isinstance(hyp, Hypothesis)\n"
] | [
[
"numpy.random.randn"
]
] |
hudeven/pytext | [
"6e5ab16803be33bcb784b7fd79aa99935cfd12ec"
] | [
"pytext/exporters/test/text_model_exporter_test.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport json\nimport tempfile\nfrom collections import Counter\n\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.predictor.predictor_exporter as pe\nimport hypothesis.strategies as st\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom caffe2.python import workspace\nfrom hypothesis import given\nfrom pytext.builtin_task import (\n DocumentClassificationTask,\n IntentSlotTask,\n SeqNNTask,\n WordTaggingTask,\n)\nfrom pytext.common.constants import DatasetFieldName, SpecialTokens\nfrom pytext.config import config_from_json\nfrom pytext.config.component import create_exporter, create_model\nfrom pytext.data import CommonMetadata\nfrom pytext.data.utils import Vocabulary\nfrom pytext.exporters.exporter import ModelExporter\nfrom pytext.fields import (\n CharFeatureField,\n DictFeatureField,\n FieldMeta,\n SeqFeatureField,\n TextFeatureField,\n)\nfrom pytext.task.new_task import _NewTask\nfrom pytext.utils.onnx import CAFFE2_DB_TYPE\nfrom torchtext.vocab import Vocab\n\n\nJOINT_CONFIG = \"\"\"\n{\n \"model\": {\n \"representation\": {\n \"BiLSTMDocSlotAttention\": {\n \"lstm\": {\n \"BiLSTM\": {\n \"lstm_dim\": 30,\n \"num_layers\": 1\n }\n },\n \"pooling\": {\n \"SelfAttention\": {\n \"attn_dimension\": 30,\n \"dropout\": 0.3\n }\n }\n }\n },\n \"decoder\": {\n \"use_doc_probs_in_word\": true\n },\n \"output_layer\": {\n \"doc_output\": {\n \"loss\": {\n \"CrossEntropyLoss\": {}\n }\n },\n \"word_output\": {\n \"CRFOutputLayer\": {}\n }\n }\n }\n}\n\"\"\"\n\nDOC_CONFIGS = [\n \"\"\"\n{\n \"model\": {\n \"representation\": {\n \"DocNNRepresentation\": {}\n },\n \"output_layer\": {\n \"loss\": {\n \"CrossEntropyLoss\": {}\n }\n }\n },\n \"features\": {\n \"word_feat\": {},\n \"dict_feat\": {},\n \"char_feat\": {\n \"embed_dim\": 5,\n \"cnn\": {\n \"kernel_num\": 2,\n \"kernel_sizes\": [2, 3]\n }\n },\n \"dense_feat\": {\n \"dim\":10\n }\n },\n \"featurizer\": {\n \"SimpleFeaturizer\": {}\n },\n \"trainer\": {\n \"epochs\": 1\n },\n \"exporter\": {}\n}\n\"\"\",\n \"\"\"\n{\n \"model\": {\n \"representation\": {\n \"BiLSTMDocAttention\": {\n \"pooling\": {\n \"MaxPool\": {}\n }\n }\n },\n \"output_layer\": {\n \"loss\": {\n \"CrossEntropyLoss\": {}\n }\n }\n },\n \"features\": {\n \"dict_feat\": {\n \"embed_dim\": 10\n }\n },\n \"featurizer\": {\n \"SimpleFeaturizer\": {}\n },\n \"trainer\": {\n \"epochs\": 1\n },\n \"exporter\": {}\n}\n\"\"\",\n \"\"\"\n{\n \"model\": {\n \"representation\": {\n \"DocNNRepresentation\": {}\n },\n \"output_layer\": {\n \"loss\": {\n \"CrossEntropyLoss\": {}\n }\n }\n },\n \"features\": {\n \"word_feat\": {},\n \"dict_feat\": {},\n \"char_feat\": {\n \"embed_dim\": 5,\n \"cnn\": {\n \"kernel_num\": 2,\n \"kernel_sizes\": [2, 3]\n }\n }\n },\n \"featurizer\": {\n \"SimpleFeaturizer\": {}\n },\n \"trainer\": {\n \"epochs\": 1\n },\n \"exporter\": {}\n}\n\"\"\",\n]\n\nDOC_CONFIGS_WITH_EXPORT_LOGITS = [\n \"\"\"\n{\n \"model\": {\n \"representation\": {\n \"BiLSTMDocAttention\": {\n \"pooling\": {\n \"MaxPool\": {}\n }\n }\n },\n \"output_layer\": {\n \"loss\": {\n \"CrossEntropyLoss\": {}\n }\n }\n },\n \"features\": {\n \"dict_feat\": {\n \"embed_dim\": 10\n }\n },\n \"featurizer\": {\n \"SimpleFeaturizer\": {}\n },\n \"trainer\": {\n \"epochs\": 1\n },\n \"exporter\": {\n \"export_logits\": true\n }\n}\n\"\"\"\n]\n\nWORD_CONFIGS = [\n \"\"\"\n{\n \"model\": {\n \"representation\": {\n \"BiLSTMSlotAttention\": {\n \"lstm\": {\n \"lstm_dim\": 30,\n \"num_layers\": 2\n }\n }\n },\n \"output_layer\": {\n \"WordTaggingOutputLayer\": {}\n }\n }\n}\n\"\"\",\n \"\"\"\n{\n \"model\": {\n \"representation\": {\n \"BiLSTMSlotAttention\": {\n \"lstm\": {\n \"lstm_dim\": 30,\n \"num_layers\": 2\n }\n }\n },\n \"output_layer\": {\n \"CRFOutputLayer\": {}\n }\n }\n}\n\"\"\",\n]\n\n\nSEQ_NN_CONFIG = \"\"\"\n {\n \"model\": {\n \"representation\": {\n \"doc_representation\": {},\n \"seq_representation\": {\n \"DocNNRepresentation\": {}\n }\n }\n }\n}\n\"\"\"\n\n\nCONTEXTUAL_INTENT_SLOT_CONFIG = \"\"\"\n{\n \"trainer\": {\n \"epochs\": 1\n },\n \"metric_reporter\": {\n \"IntentSlotMetricReporter\": {}\n },\n \"model\": {\n \"ContextualIntentSlotModel\": {\n \"inputs\": {\n \"tokens\": {\n },\n \"seq_tokens\": {}\n },\n \"word_embedding\": {\n \"embed_dim\": 10\n },\n \"seq_embedding\": {\n \"embed_dim\": 10\n }\n }\n }\n}\n\"\"\"\nWORD_VOCAB = [SpecialTokens.UNK, \"W1\", \"W2\", \"W3\", \"W4\", \"W5\", \"W6\", \"W7\", \"W8\", \"W9\"]\n\n\nW_VOCAB_SIZE = 10\nUNK_IDX = 0\nPAD_IDX = 1\nW_VOCAB = [\"<UNK>\", \"W1\", \"W2\", \"W3\", \"W4\", \"W5\", \"W6\", \"W7\", \"W8\", \"W9\"]\nDICT_VOCAB_SIZE = 10\nDICT_VOCAB = [\"<UNK>\", \"D1\", \"D2\", \"D3\", \"D4\", \"D5\", \"D6\", \"D7\", \"D8\", \"D9\"]\nCHAR_VOCAB_SIZE = 10\nCHAR_VOCAB = [\"<UNK>\", \"C1\", \"C2\", \"C3\", \"C4\", \"C5\", \"C6\", \"C7\", \"C8\", \"C9\"]\n\n# For now we need to fix the batch_size for exporting and testing,\n# Need to remove this and make it a random input once ONNX is able to\n# Handle different batch_sizes\nBATCH_SIZE = 1\n\n# Fixed dimension of dense_features since it needs to be specified in config\nDENSE_FEATURE_DIM = 10\n\n\nclass ModelExporterTest(hu.HypothesisTestCase):\n @given(\n export_num_words=st.integers(1, 5),\n export_num_dict_feat=st.integers(1, 6),\n num_doc_classes=st.integers(2, 5),\n test_num_words=st.integers(1, 7),\n test_num_dict_feat=st.integers(1, 8),\n num_predictions=st.integers(1, 4),\n test_num_chars=st.integers(1, 7),\n )\n # TODO () Port this test to DocumentClassificationTask\n def DISABLED_test_doc_export_to_caffe2(\n self,\n export_num_words,\n export_num_dict_feat,\n num_doc_classes,\n test_num_words,\n test_num_dict_feat,\n num_predictions,\n test_num_chars,\n ):\n for config in DOC_CONFIGS:\n config = self._get_config(DocumentClassificationTask.Config, config)\n metadata = self._get_metadata(num_doc_classes, 0)\n py_model = create_model(config.model, config.features, metadata)\n exporter = create_exporter(\n config.exporter, config.features, config.labels, metadata\n )\n\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=\".predictor\"\n ) as pred_file:\n print(pred_file.name)\n output_names = exporter.export_to_caffe2(py_model, pred_file.name)\n workspace.ResetWorkspace()\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n\n for _i in range(num_predictions):\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n test_inputs = self._get_rand_input(\n config.features,\n BATCH_SIZE,\n W_VOCAB_SIZE,\n DICT_VOCAB_SIZE,\n CHAR_VOCAB_SIZE,\n test_num_words,\n test_num_dict_feat,\n test_num_chars,\n )\n self._feed_c2_input(\n workspace,\n test_inputs,\n exporter.input_names,\n metadata.feature_itos_map,\n )\n workspace.RunNetOnce(pred_net)\n c2_out = [list(workspace.FetchBlob(o_name)) for o_name in output_names]\n\n py_model.eval()\n py_outs = py_model(*test_inputs)\n # Do log_softmax since we do that before exporting predictor nets\n py_outs = F.log_softmax(py_outs, 1)\n np.testing.assert_array_almost_equal(\n py_outs.view(-1).detach().numpy(), np.array(c2_out).flatten()\n )\n\n @given(\n num_doc_classes=st.integers(2, 5),\n test_num_words=st.integers(1, 7),\n test_num_dict_feat=st.integers(1, 8),\n num_predictions=st.integers(1, 4),\n test_num_chars=st.integers(1, 7),\n )\n # TODO () Port this test to DocumentClassificationTask\n def DISABLED_test_doc_export_to_caffe2_with_logits(\n self,\n num_doc_classes,\n test_num_words,\n test_num_dict_feat,\n num_predictions,\n test_num_chars,\n ):\n for config in DOC_CONFIGS_WITH_EXPORT_LOGITS:\n config = self._get_config(DocumentClassificationTask.Config, config)\n metadata = self._get_metadata(num_doc_classes, 0)\n py_model = create_model(config.model, config.features, metadata)\n exporter = create_exporter(\n config.exporter, config.features, config.labels, metadata\n )\n\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=\".predictor\"\n ) as pred_file:\n print(pred_file.name)\n output_names = exporter.export_to_caffe2(py_model, pred_file.name)\n workspace.ResetWorkspace()\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n\n for _i in range(num_predictions):\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n test_inputs = self._get_rand_input(\n config.features,\n BATCH_SIZE,\n W_VOCAB_SIZE,\n DICT_VOCAB_SIZE,\n CHAR_VOCAB_SIZE,\n test_num_words,\n test_num_dict_feat,\n test_num_chars,\n )\n self._feed_c2_input(\n workspace,\n test_inputs,\n exporter.input_names,\n metadata.feature_itos_map,\n )\n workspace.RunNetOnce(pred_net)\n c2_out = [list(workspace.FetchBlob(o_name)) for o_name in output_names]\n\n py_model.eval()\n py_outs = py_model(*test_inputs)\n np.testing.assert_array_almost_equal(\n py_outs.view(-1).detach().numpy(), np.array(c2_out[-1]).flatten()\n )\n\n # Do log_softmax since we do that before exporting predictor nets\n py_outs = F.log_softmax(py_outs, 1)\n np.testing.assert_array_almost_equal(\n py_outs.view(-1).detach().numpy(), np.array(c2_out[:-1]).flatten()\n )\n\n @given(\n export_num_words=st.integers(1, 5),\n num_word_classes=st.integers(2, 5),\n test_num_words=st.integers(1, 7),\n num_predictions=st.integers(2, 5),\n )\n def test_wordblstm_export_to_caffe2(\n self, export_num_words, num_word_classes, test_num_words, num_predictions\n ):\n for WORD_CONFIG in WORD_CONFIGS:\n config = self._get_config(WordTaggingTask.Config, WORD_CONFIG)\n tensorizers, data = _NewTask._init_tensorizers(config)\n word_labels = [SpecialTokens.PAD, SpecialTokens.UNK, \"NoLabel\", \"person\"]\n tensorizers[\"labels\"].vocab = Vocabulary(word_labels)\n tensorizers[\"tokens\"].vocab = Vocabulary(WORD_VOCAB)\n py_model = _NewTask._init_model(config.model, tensorizers)\n dummy_test_input = self._get_rand_input_intent_slot(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words\n )\n exporter = ModelExporter(\n ModelExporter.Config(),\n py_model.get_export_input_names(tensorizers),\n dummy_test_input,\n py_model.vocab_to_export(tensorizers),\n py_model.get_export_output_names(tensorizers),\n )\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=\".{}\".format(\".predictor\")\n ) as pred_file:\n exporter.export_to_caffe2(py_model, pred_file.name)\n workspace.ResetWorkspace()\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n for _i in range(num_predictions):\n test_inputs = self._get_rand_input_intent_slot(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words\n )\n self._feed_c2_input(\n workspace, test_inputs, exporter.input_names, exporter.vocab_map\n )\n workspace.RunNetOnce(pred_net)\n word_output_names = [\n \"{}:{}\".format(\"word_scores\", class_name)\n for class_name in word_labels\n ]\n py_model.eval()\n py_outs = py_model(*test_inputs)\n context = {\"seq_lens\": test_inputs[-1]}\n target = None\n pred, score = py_model.get_pred(py_outs, target, context)\n c2_word_out = []\n for o_name in word_output_names:\n c2_word_out.extend(list(workspace.FetchBlob(o_name)))\n\n np.testing.assert_array_almost_equal(\n torch.transpose(score, 1, 2).contiguous().view(-1).detach().numpy(),\n np.array(c2_word_out).flatten(),\n )\n\n def _get_rand_input_intent_slot(\n self, batch_size, w_vocab_size, num_words, num_seq=0\n ):\n\n text = torch.from_numpy(\n np.random.randint(w_vocab_size, size=(batch_size, num_words)).astype(\n np.int64\n )\n )\n lengths = torch.from_numpy(\n np.random.randint(num_words, num_words + 1, size=(batch_size)).astype(\n np.int64\n )\n )\n inputs = [text]\n if num_seq > 0:\n inputs.append(\n torch.from_numpy(\n np.random.randint(\n w_vocab_size, size=(batch_size, num_seq, num_words)\n ).astype(np.int64)\n )\n )\n inputs.append(lengths)\n if num_seq > 0:\n inputs.append(\n torch.from_numpy(\n np.random.randint(num_seq, num_seq + 1, size=(batch_size)).astype(\n np.int64\n )\n )\n )\n return tuple(inputs)\n\n @given(\n export_num_words=st.integers(1, 5),\n num_doc_classes=st.integers(2, 5),\n num_word_classes=st.integers(2, 4),\n test_num_words=st.integers(1, 7),\n num_predictions=st.integers(1, 5),\n )\n def test_joint_export_to_caffe2(\n self,\n export_num_words,\n num_doc_classes,\n num_word_classes,\n test_num_words,\n num_predictions,\n ):\n config = self._get_config(IntentSlotTask.Config, JOINT_CONFIG)\n tensorizers, data = _NewTask._init_tensorizers(config)\n doc_labels = [SpecialTokens.UNK, \"cu:other\", \"cu:address_Person\"]\n word_labels = [SpecialTokens.PAD, SpecialTokens.UNK, \"NoLabel\", \"person\"]\n tensorizers[\"word_labels\"].vocab = Vocabulary(word_labels)\n tensorizers[\"doc_labels\"].vocab = Vocabulary(doc_labels)\n tensorizers[\"tokens\"].vocab = Vocabulary(WORD_VOCAB)\n py_model = _NewTask._init_model(config.model, tensorizers)\n dummy_test_input = self._get_rand_input_intent_slot(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words\n )\n exporter = ModelExporter(\n ModelExporter.Config(),\n py_model.get_export_input_names(tensorizers),\n dummy_test_input,\n py_model.vocab_to_export(tensorizers),\n py_model.get_export_output_names(tensorizers),\n )\n\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=\".{}\".format(\".predictor\")\n ) as pred_file:\n exporter.export_to_caffe2(py_model, pred_file.name)\n workspace.ResetWorkspace()\n\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n\n for _i in range(num_predictions):\n test_inputs = self._get_rand_input_intent_slot(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words\n )\n self._feed_c2_input(\n workspace, test_inputs, exporter.input_names, exporter.vocab_map\n )\n workspace.RunNetOnce(pred_net)\n doc_output_names = [\n \"{}:{}\".format(\"doc_scores\", class_name) for class_name in doc_labels\n ]\n word_output_names = [\n \"{}:{}\".format(\"word_scores\", class_name) for class_name in word_labels\n ]\n\n py_model.eval()\n logits = py_model(*test_inputs)\n context = {\"seq_lens\": test_inputs[-1]}\n target = None\n (d_pred, w_pred), (d_score, w_score) = py_model.get_pred(\n logits, target, context\n )\n\n c2_doc_out = []\n for o_name in doc_output_names:\n c2_doc_out.extend(list(workspace.FetchBlob(o_name)))\n np.testing.assert_array_almost_equal(\n d_score.view(-1).detach().numpy(), np.array(c2_doc_out).flatten()\n )\n\n c2_word_out = []\n for o_name in word_output_names:\n c2_word_out.extend(list(workspace.FetchBlob(o_name)))\n\n np.testing.assert_array_almost_equal(\n torch.transpose(w_score, 1, 2).contiguous().view(-1).detach().numpy(),\n np.array(c2_word_out).flatten(),\n )\n\n @given(\n export_num_words=st.integers(1, 5),\n num_doc_classes=st.integers(2, 5),\n test_num_words=st.integers(1, 7),\n num_predictions=st.integers(1, 5),\n test_num_seq=st.integers(1, 7),\n )\n def test_seq_nn_export_to_caffe2(\n self,\n export_num_words,\n num_doc_classes,\n test_num_words,\n num_predictions,\n test_num_seq,\n ):\n config = self._get_config(SeqNNTask.Config, SEQ_NN_CONFIG)\n tensorizers, data = _NewTask._init_tensorizers(config)\n doc_labels = [SpecialTokens.UNK, \"cu:other\", \"cu:address_Person\"]\n tensorizers[\"labels\"].vocab = Vocabulary(doc_labels)\n tensorizers[\"tokens\"].vocab = Vocabulary(WORD_VOCAB)\n py_model = _NewTask._init_model(config.model, tensorizers)\n dummy_test_input = self._get_seq_nn_rand_input(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq\n )\n exporter = ModelExporter(\n ModelExporter.Config(),\n py_model.get_export_input_names(tensorizers),\n dummy_test_input,\n py_model.vocab_to_export(tensorizers),\n py_model.get_export_output_names(tensorizers),\n )\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=\".{}\".format(\".predictor\")\n ) as pred_file:\n output_names = exporter.export_to_caffe2(py_model, pred_file.name)\n workspace.ResetWorkspace()\n\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n for _i in range(num_predictions):\n test_inputs = self._get_seq_nn_rand_input(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq\n )\n self._feed_c2_input(\n workspace, test_inputs, exporter.input_names, exporter.vocab_map\n )\n workspace.RunNetOnce(pred_net)\n c2_out = [list(workspace.FetchBlob(o_name)) for o_name in output_names]\n\n py_model.eval()\n py_outs = py_model(*test_inputs)\n # Do log_softmax since we do that before exporting predictor nets\n py_outs = F.log_softmax(py_outs, 1)\n np.testing.assert_array_almost_equal(\n py_outs.view(-1).detach().numpy(), np.array(c2_out).flatten()\n )\n\n @given(\n test_num_words=st.integers(1, 7),\n num_predictions=st.integers(1, 5),\n test_num_seq=st.integers(1, 7),\n )\n def test_contextual_intent_slot_export_to_caffe2(\n self, test_num_words, num_predictions, test_num_seq\n ):\n config = self._get_config(IntentSlotTask.Config, CONTEXTUAL_INTENT_SLOT_CONFIG)\n tensorizers, data = _NewTask._init_tensorizers(config)\n doc_labels = [SpecialTokens.UNK, \"cu:other\", \"cu:address_Person\"]\n word_labels = [SpecialTokens.UNK, \"NoLabel\", \"person\"]\n tensorizers[\"word_labels\"].vocab = Vocabulary(word_labels)\n tensorizers[\"doc_labels\"].vocab = Vocabulary(doc_labels)\n tensorizers[\"tokens\"].vocab = Vocabulary(WORD_VOCAB)\n tensorizers[\"seq_tokens\"].vocab = Vocabulary(WORD_VOCAB)\n py_model = _NewTask._init_model(config.model, tensorizers)\n dummy_test_input = self._get_rand_input_intent_slot(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq\n )\n exporter = ModelExporter(\n ModelExporter.Config(),\n py_model.get_export_input_names(tensorizers),\n dummy_test_input,\n py_model.vocab_to_export(tensorizers),\n py_model.get_export_output_names(tensorizers),\n )\n\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=\".{}\".format(\".predictor\")\n ) as pred_file:\n print(pred_file.name)\n exporter.export_to_caffe2(py_model, pred_file.name)\n workspace.ResetWorkspace()\n\n pred_net = pe.prepare_prediction_net(pred_file.name, CAFFE2_DB_TYPE)\n for _i in range(num_predictions):\n test_inputs = self._get_rand_input_intent_slot(\n BATCH_SIZE, W_VOCAB_SIZE, test_num_words, test_num_seq\n )\n self._feed_c2_input(\n workspace, test_inputs, exporter.input_names, exporter.vocab_map\n )\n workspace.RunNetOnce(pred_net)\n doc_output_names = [\n \"{}:{}\".format(\"doc_scores\", class_name) for class_name in doc_labels\n ]\n word_output_names = [\n \"{}:{}\".format(\"word_scores\", class_name) for class_name in word_labels\n ]\n py_model.eval()\n logits = py_model(*test_inputs)\n context = {\"seq_lens\": test_inputs[-1]}\n target = None\n (d_pred, w_pred), (d_score, w_score) = py_model.get_pred(\n logits, target, context\n )\n\n c2_doc_out = []\n for o_name in doc_output_names:\n c2_doc_out.extend(list(workspace.FetchBlob(o_name)))\n c2_word_out = []\n for o_name in word_output_names:\n c2_word_out.extend(list(workspace.FetchBlob(o_name)))\n\n np.testing.assert_array_almost_equal(\n d_score.view(-1).detach().numpy(), np.array(c2_doc_out).flatten()\n )\n\n np.testing.assert_array_almost_equal(\n torch.transpose(w_score, 1, 2).contiguous().view(-1).detach().numpy(),\n np.array(c2_word_out).flatten(),\n )\n\n def _get_metadata(self, num_doc_classes, num_word_classes):\n labels = []\n if num_doc_classes:\n vocab = Vocab(Counter())\n vocab.itos = [\"C_{}\".format(i) for i in range(num_doc_classes)]\n label_meta = FieldMeta()\n label_meta.vocab_size = num_doc_classes\n label_meta.vocab = vocab\n labels.append(label_meta)\n\n if num_word_classes:\n vocab = Vocab(Counter())\n vocab.itos = [\"W_{}\".format(i) for i in range(num_word_classes)]\n label_meta = FieldMeta()\n label_meta.vocab_size = num_word_classes\n label_meta.vocab = vocab\n label_meta.pad_token_idx = 0\n labels.append(label_meta)\n\n w_vocab = Vocab(Counter())\n dict_vocab = Vocab(Counter())\n c_vocab = Vocab(Counter())\n d_vocab = Vocab(Counter())\n w_vocab.itos = W_VOCAB\n dict_vocab.itos = DICT_VOCAB\n c_vocab.itos = CHAR_VOCAB\n d_vocab.itos = []\n\n text_feat_meta = FieldMeta()\n text_feat_meta.unk_token_idx = UNK_IDX\n text_feat_meta.pad_token_idx = PAD_IDX\n text_feat_meta.vocab_size = W_VOCAB_SIZE\n text_feat_meta.vocab = w_vocab\n text_feat_meta.vocab_export_name = \"tokens_vals\"\n text_feat_meta.pretrained_embeds_weight = None\n text_feat_meta.dummy_model_input = TextFeatureField.dummy_model_input\n\n dict_feat_meta = FieldMeta()\n dict_feat_meta.vocab_size = DICT_VOCAB_SIZE\n dict_feat_meta.vocab = dict_vocab\n dict_feat_meta.vocab_export_name = \"dict_vals\"\n dict_feat_meta.pretrained_embeds_weight = None\n dict_feat_meta.dummy_model_input = DictFeatureField.dummy_model_input\n\n char_feat_meta = FieldMeta()\n char_feat_meta.vocab_size = CHAR_VOCAB_SIZE\n char_feat_meta.vocab = c_vocab\n char_feat_meta.vocab_export_name = \"char_vals\"\n char_feat_meta.pretrained_embeds_weight = None\n char_feat_meta.dummy_model_input = CharFeatureField.dummy_model_input\n\n dense_feat_meta = FieldMeta()\n dense_feat_meta.vocab_size = 0\n dense_feat_meta.vocab = d_vocab\n dense_feat_meta.vocab_export_name = \"dense_vals\"\n dense_feat_meta.pretrained_embeds_weight = None\n # ugh, dims are fixed\n dense_feat_meta.dummy_model_input = torch.tensor(\n [[1.0] * DENSE_FEATURE_DIM, [1.0] * DENSE_FEATURE_DIM],\n dtype=torch.float,\n device=\"cpu\",\n )\n\n seq_feat_meta = FieldMeta()\n seq_feat_meta.unk_token_idx = UNK_IDX\n seq_feat_meta.pad_token_idx = PAD_IDX\n seq_feat_meta.vocab_size = W_VOCAB_SIZE\n seq_feat_meta.vocab = w_vocab\n seq_feat_meta.vocab_export_name = \"seq_tokens_vals\"\n seq_feat_meta.pretrained_embeds_weight = None\n seq_feat_meta.dummy_model_input = SeqFeatureField.dummy_model_input\n\n meta = CommonMetadata()\n meta.features = {\n DatasetFieldName.TEXT_FIELD: text_feat_meta,\n DatasetFieldName.DICT_FIELD: dict_feat_meta,\n DatasetFieldName.CHAR_FIELD: char_feat_meta,\n DatasetFieldName.DENSE_FIELD: dense_feat_meta,\n DatasetFieldName.SEQ_FIELD: seq_feat_meta,\n }\n meta.target = labels\n if len(labels) == 1:\n [meta.target] = meta.target\n meta.label_names = [label.vocab.itos for label in labels]\n meta.feature_itos_map = {\n f.vocab_export_name: f.vocab.itos for _, f in meta.features.items()\n }\n return meta\n\n def _get_seq_nn_rand_input(self, batch_size, w_vocab_size, num_words, num_seq=1):\n seq = torch.from_numpy(\n np.random.randint(\n w_vocab_size, size=(batch_size, num_seq, num_words)\n ).astype(np.int64)\n )\n seq_lengths = torch.from_numpy(\n np.random.randint(num_seq, num_seq + 1, size=(batch_size)).astype(np.int64)\n )\n return (seq, seq_lengths)\n\n def _get_rand_input(\n self,\n features,\n batch_size,\n w_vocab_size,\n d_vocab_size,\n c_vocab_size,\n num_words,\n num_dict_feats,\n num_chars,\n num_seq=1,\n ):\n text = torch.from_numpy(\n np.random.randint(w_vocab_size, size=(batch_size, num_words)).astype(\n np.int64\n )\n )\n lengths = torch.from_numpy(\n np.random.randint(num_words, num_words + 1, size=(batch_size)).astype(\n np.int64\n )\n )\n dict_feat = torch.from_numpy(\n np.random.randint(\n d_vocab_size, size=(batch_size, num_dict_feats * num_words)\n ).astype(np.int64)\n )\n dict_weights = torch.from_numpy(\n np.random.randn(batch_size, num_words * num_dict_feats).astype(np.float32)\n )\n dict_lengths = torch.from_numpy(\n np.random.randint(\n 1, num_dict_feats + 1, size=(batch_size, num_words)\n ).astype(np.int64)\n )\n chars = torch.from_numpy(\n np.random.randint(\n c_vocab_size, size=(batch_size, num_words, num_chars)\n ).astype(np.int64)\n )\n dense_features = torch.from_numpy(\n np.random.rand(batch_size, DENSE_FEATURE_DIM).astype(np.float32)\n )\n inputs = []\n if features.word_feat:\n inputs.append(text)\n if features.dict_feat:\n inputs.append((dict_feat, dict_weights, dict_lengths))\n if features.char_feat:\n inputs.append(chars)\n if getattr(features, \"seq_word_feat\", False):\n inputs.append(\n torch.from_numpy(\n np.random.randint(\n w_vocab_size, size=(batch_size, num_seq, num_words)\n ).astype(np.int64)\n )\n )\n inputs.append(lengths)\n if getattr(features, \"seq_word_feat\", False):\n inputs.append(\n torch.from_numpy(\n np.random.randint(num_seq, num_seq + 1, size=(batch_size)).astype(\n np.int64\n )\n )\n )\n if features.dense_feat:\n inputs.append(dense_features)\n return tuple(inputs)\n\n def _get_config(self, cls, config_str):\n params_json = json.loads(config_str)\n config = config_from_json(cls, params_json)\n return config\n\n def _feed_c2_input(self, workspace, py_inputs, input_names, vocab_map):\n c2_input = []\n\n for py_input in py_inputs:\n c2_input = c2_input + (\n list(py_input) if isinstance(py_input, tuple) else [py_input]\n )\n for i, input in enumerate(list(c2_input)):\n input_np = input.numpy()\n if input_names[i] in vocab_map.keys():\n # Map the input to the str form\n input_vocab = vocab_map[input_names[i]]\n map_fn = np.vectorize(lambda x: input_vocab[x])\n input_str = map_fn(input_np)\n input_np = np.array(input_str, dtype=str)\n workspace.FeedBlob(input_names[i] + \"_str:value\", input_np)\n else:\n workspace.FeedBlob(input_names[i], input_np)\n"
] | [
[
"numpy.array",
"numpy.random.rand",
"numpy.vectorize",
"numpy.random.randn",
"torch.nn.functional.log_softmax",
"numpy.random.randint",
"torch.tensor",
"torch.transpose"
]
] |
meetshah1995/model-server | [
"1533cbc9f9eb46f244c7b22d7b56c1b70b702f3b"
] | [
"examples/keras_image_classification/keras_image_classification.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom model_server import Servable\n\n\nclass InceptionV3Classifier(Servable):\n def __init__(self, args):\n print(\"Loading InceptionV3 from tf.keras\")\n self.model = tf.keras.applications.InceptionV3(include_top=True, weights=\"imagenet\")\n # This is a hack to make this work with server. Check https://github.com/keras-team/keras/issues/2397\n # If you don't run this, you will get <tensor> is not an element of this graph error\n self.model._make_predict_function()\n print(\"Model loaded!\")\n\n def predict(self, input_array_dict):\n image_tensor = input_array_dict[\"image_tensor\"]\n if image_tensor.shape == 3:\n image_tensor = np.expand_dims(image_tensor, axis=0)\n predictions = self.model.predict(image_tensor)\n return {\"prediction_scores\": predictions}\n\n def get_model_info(self, list_of_model_info_dict):\n return [{\"name\": \"InceptionV3\", \"version\": 1, \"status\": \"up\", \"misc\": \"This is an example\"}]\n"
] | [
[
"tensorflow.keras.applications.InceptionV3",
"numpy.expand_dims"
]
] |
Orekisiori/ChatBot | [
"b6492470fd2b7dad893d67526e37c14112181ae2"
] | [
"generate_dialogue_subset.py"
] | [
"import argparse\nfrom os.path import join\nimport numpy as np\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import MultipleLocator\n\n\ndef generate_subset():\n \"\"\"\n 用于生成训练子集\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--raw_data_path', default='data/train.txt', type=str, required=False, help='原始训练语料')\n parser.add_argument('--subset_size', default=500000, type=int, required=False, help='要获取的对话数据子集的规模')\n parser.add_argument('--subset_data_path', default='data', type=str, required=False,\n help='数据子集文件路径,指定文件的父目录')\n args = parser.parse_args()\n with open(args.raw_data_path, \"r\", encoding=\"utf8\") as f:\n data = f.read()\n dialogues = data.split(\"\\n\\n\")\n subset_size = min(len(dialogues), args.subset_size)\n\n with open(join(args.subset_data_path, \"train_{}w.txt\".format(int(subset_size / 10000))), \"w\", encoding=\"utf8\") as f:\n print(\"generating subset,please wait a few seconds \")\n for dialogue_index, dialogue in enumerate(dialogues):\n if dialogue_index >= subset_size:\n break\n for utterance in dialogue.split(\"\\n\"):\n f.writelines(utterance + \"\\n\")\n f.writelines(\"\\n\")\n\n\ndef compute_dialogue_length():\n \"\"\"\n 查看聊天语料中的dialogue的长度分布\n :return:\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--raw_data_path', default='data/train.txt', type=str, required=False, help='原始训练语料')\n args = parser.parse_args()\n with open(args.raw_data_path, \"r\", encoding=\"utf8\") as f:\n data = f.read()\n dialogues = data.split(\"\\n\\n\")\n # 统计各个dialogue的长度\n dialogues_lengths = [len(dialogue.replace(\"\\n\", \"\")) for dialogue in dialogues]\n counter = Counter(dialogues_lengths) # {label:sum(label)}\n dialogue_length_arr = list(counter)\n num_arr = [counter[element] for element in list(counter)]\n print(counter[300])\n\n x_major_locator = MultipleLocator(100) # MultipleLocator用于设置刻度间隔\n # y_major_locator = MultipleLocator(20000)\n ax = plt.gca() # ax为两条坐标轴的实例\n ax.xaxis.set_major_locator(x_major_locator) # 把x轴的主刻度设置为10的倍数\n # ax.yaxis.set_major_locator(y_major_locator)\n\n plt.xlabel('dialogue length')\n plt.ylabel('number of dialogue')\n # plt.plot(dialogue_length_arr, num_arr, c='green')\n plt.scatter(dialogue_length_arr, num_arr)\n plt.show()\n\n\nif __name__ == '__main__':\n compute_dialogue_length()\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.MultipleLocator",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.gca"
]
] |
cuent/comp551 | [
"4463e4c0d2e7c161bc1575c63389c5893299233a"
] | [
"assignments/assignment4/src/models/model1.py"
] | [
"import torch\nimport torch.utils.data\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass VAE(nn.Module):\n def __init__(self, h, n):\n super(VAE, self).__init__()\n self.img_size = 28 * 28\n\n self.h_encoder = nn.Linear(self.img_size, h)\n self.h_mu = nn.Linear(h, n)\n self.h_logvar = nn.Linear(h, n)\n self.h_decoder = nn.Linear(n, h)\n self.output = nn.Linear(h, self.img_size)\n\n def encoder(self, x):\n h1 = torch.tanh(self.h_encoder(x))\n return self.h_mu(h1), self.h_logvar(h1)\n\n def sample(self, mu, logvar):\n # eq 10\n std = torch.sqrt(torch.exp(logvar))\n eps = torch.randn_like(std)\n return mu + eps * std # z ~ p(z|x)\n\n def decoder(self, z):\n out = torch.tanh(self.h_decoder(z))\n return torch.sigmoid(self.output(out))\n\n def forward(self, x):\n mu, logvar = self.encoder(x.view(-1, self.img_size))\n z = self.sample(mu, logvar)\n return self.decoder(z), mu, logvar\n\n def loss_function(self, x, r, mean, log_var):\n # eq10\n x = x.view(-1, 28 * 28)\n kl = (-0.5 * ((1 + log_var - torch.pow(mean, 2) - torch.exp(log_var)))).sum(1).mean()\n # bce = F.binary_cross_entropy(r, x, reduction='sum') / batch_size\n recons = (F.binary_cross_entropy(r, x, reduction='none')).sum(1).mean()\n return kl + recons\n"
] | [
[
"torch.nn.Linear",
"torch.randn_like",
"torch.exp",
"torch.nn.functional.binary_cross_entropy",
"torch.pow"
]
] |
satyakisikdar/Attributed-VRG | [
"502375d6a62eb84563c2fb6786e2c257edc32e0c"
] | [
"VRG/src/graph_stats.py"
] | [
"\"\"\"\nContainer for different graph stats\n\"\"\"\nimport platform\nimport subprocess as sub\nimport sys\nfrom collections import Counter, deque\nfrom typing import Dict, Tuple, List, Any\n\nsys.path.extend(['./../', './../../'])\nprint('sys path: ', sys.path)\nimport editdistance as ed\nimport matplotlib.pyplot as plt\nimport networkx as nx\n# import NetLSD.netlsd as net\nimport numpy as np\nimport seaborn as sns\nimport igraph as ig\n\n# from src.utils import check_file_exists, ColorPrint as CP, dump_pickle\n\nsns.set()\nsns.set_style(\"darkgrid\")\n\nclass ColorPrint:\n @staticmethod\n def print_red(message, end='\\n'):\n sys.stderr.write('\\x1b[1;31m' + message.strip() + '\\x1b[0m' + end)\n\n @staticmethod\n def print_green(message, end='\\n'):\n sys.stdout.write('\\x1b[1;32m' + message.strip() + '\\x1b[0m' + end)\n\n @staticmethod\n def print_orange(message, end='\\n'):\n sys.stderr.write('\\x1b[1;33m' + message.strip() + '\\x1b[0m' + end)\n\n @staticmethod\n def print_blue(message, end='\\n'):\n # pass\n sys.stdout.write('\\x1b[1;34m' + message.strip() + '\\x1b[0m' + end)\n\n @staticmethod\n def print_bold(message, end='\\n'):\n sys.stdout.write('\\x1b[1;37m' + message.strip() + '\\x1b[0m' + end)\n\n @staticmethod\n def print_none(message, end='\\n'):\n pass\n # sys.stdout.write(message + end)\n\nCP = ColorPrint\n\n\nclass GraphStats:\n \"\"\"\n GraphStats has methods for finding different statistics for a NetworkX graph\n \"\"\"\n __slots__ = ['graph', 'ig_graph', 'stats']\n\n def __init__(self, graph: nx.Graph):\n self.graph: nx.Graph = graph\n self.ig_graph: ig.Graph = None\n self.stats: Dict[str, Any] = {'name': graph.name, 'n': graph.order(), 'm': graph.size()}\n\n def __str__(self) -> str:\n st = f'\"{self.graph.name}\" stats:'\n for key, val in self.stats.items():\n if isinstance(val, float):\n val = round(val, 3)\n elif isinstance(val, dict):\n val = list(val.items())[: 3] # print the first 3 key value pairs\n elif 'numpy' in str(type(val)):\n val = val[: 3]\n st += f'\"{key}\": {val} '\n return st\n\n def __getitem__(self, item):\n \"\"\"\n Allows square bracket indexing for stats - allow for some fuzzy matching\n \"\"\"\n if item in self.stats: # the stat has already been calculated\n return self.stats[item]\n\n # try some fuzzy matching to figure out thde function to call based on the item\n object_methods = [method_name for method_name in dir(self)\n if callable(getattr(self, method_name)) and not method_name.startswith('_')]\n\n best_match_func = ''\n best_match_score = float('inf')\n\n for method in object_methods:\n dist = ed.eval(method, item)\n if dist == 0:\n best_match_score = dist\n best_match_func = method\n break\n\n if dist < best_match_score:\n best_match_score = dist\n best_match_func = method\n\n assert best_match_func != '', 'edit distance did not work'\n item = best_match_func\n if best_match_score != 0:\n CP.print_orange(\n f'Best matching function found for \"{item}\": \"{best_match_func}()\", edit distance: {best_match_score}')\n\n if best_match_func not in self.stats:\n best_match_func = getattr(self,\n best_match_func) # translates best_match_fun from string to a function object\n best_match_func() # call the best match function\n\n assert item in self.stats, f'stat: {item} is not updated after function call'\n return self.stats[item]\n\n def plot(self, y, ax=None, kind='line', x=None, **kwargs) -> None:\n if isinstance(y, dict):\n lists = sorted(y.items())\n x, y = zip(*lists)\n else: # if isinstance(x, list) or isinstance(x, np.array):\n x = list(range(len(y)))\n\n if kind == 'line':\n # plt.plot(x, y, marker='o', linestyle='--')\n sns.lineplot(x, y, marker='o', dashes='--', ax=ax, **kwargs) # , dashes=True)\n if kind == 'scatter':\n # plt.scatter(x, y, marker='o')\n ax = sns.scatterplot(x, y, ax=ax, **kwargs)\n\n title = kwargs.get('title', '')\n xlabel = kwargs.get('xlabel', '')\n ylabel = kwargs.get('ylabel', '')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.legend(loc='best')\n return ax\n\n def shortest_path_ast(self, alpha: float) -> float:\n if self.ig_graph is None:\n self.ig_graph = ig.Graph.from_networkx(self.graph)\n n = self.ig_graph.vcount()\n\n d = np.array(self.ig_graph.degree()).reshape(n, 1)\n\n H = np.array(self.ig_graph.shortest_paths())\n H_alpha = np.float_power(H, -alpha)\n np.fill_diagonal(H_alpha, 0)\n\n h = H_alpha.sum()\n E = H_alpha / h\n\n q = E.sum(axis=1).reshape(n, 1)\n D_q = np.diag(q.flatten())\n qq_t = np.multiply(q, q.T)\n\n X = E - qq_t\n Y = D_q - qq_t\n\n num = np.matmul(np.matmul(d.T, X), d)\n denom = np.matmul(np.matmul(d.T, Y), d)\n shortest_path_ast = (num / denom).item()\n self.stats[f'shortest_path_ast_{alpha}'] = shortest_path_ast\n return shortest_path_ast\n\n def degree_mixing_matrix(self, normalized: bool = True) -> np.array:\n mat = nx.degree_mixing_matrix(self.graph, normalized=normalized)\n self.stats['degree_mixing_matrix'] = mat\n return mat\n\n def attr_mixing_matrix(self, attr_name: str = 'value', mapping: dict = None, normalized: bool = True) -> np.array:\n mat = nx.attribute_mixing_matrix(self.graph, attribute=attr_name, mapping=mapping, normalized=normalized)\n self.stats['attr_mixing_matrix'] = mat\n return mat\n\n def _calculate_robustness_measures(self) -> None:\n \"\"\"\n Calls the Leiden comms and frac of nodes in giant component methods\n \"\"\"\n print('Calling number of components, frac of nodes in giant component, leiden alg')\n print('Populates \"num_components\", \"giant_frac\", \"num_clusters\", \"modularity\" in self.stats')\n self.stats['num_components'] = nx.number_connected_components(self.graph)\n self.giant_component_frac()\n self.leiden_communities()\n self.giant_component_frac()\n return\n\n def leiden_communities(self) -> Tuple[int, float]:\n \"\"\"\n Use Leiden alg to find (a) the number of communities and (b) modularity\n \"\"\"\n if self.ig_graph is None:\n self.ig_graph = ig.Graph.from_networkx(self.graph)\n partition = self.ig_graph.community_leiden(n_iterations=-1, objective_function='modularity')\n self.stats['num_clusters'] = len(partition)\n self.stats['modularity'] = partition.modularity\n return len(partition), partition.modularity\n\n def giant_component_frac(self):\n \"\"\"\n returns the fraction of nodes in the giant connected component\n \"\"\"\n lcc = max(nx.connected_components(self.graph), key=len)\n frac = len(lcc) / self.graph.order()\n self.stats['giant_frac'] = frac\n return frac\n\n def adj_eigenvalues(self):\n \"\"\"\n Returns the eigenvalues of the Adjacency matrix\n :return:\n \"\"\"\n CP.print_none('Calculating eigenvalues of Adjacency Matrix')\n\n adj_eigenvalues = nx.adjacency_spectrum(self.graph)\n self.stats['adj_eigenvalues'] = adj_eigenvalues\n\n return adj_eigenvalues\n\n def attribute_assortativity(self, attr_name='value') -> float:\n \"\"\"\n returns the attribute assortativity\n :param attr_name:\n :return:\n \"\"\"\n attr_ast = nx.attribute_assortativity_coefficient(self.graph, attribute=attr_name)\n self.stats['attribute_assortativity'] = attr_ast\n return attr_ast\n\n def degree_assortativity(self) -> float:\n \"\"\"\n Returns the degree assortativity of the network\n :return:\n \"\"\"\n if self.ig_graph is None:\n self.ig_graph = ig.Graph.from_networkx(self.graph)\n CP.print_none('Calculating Degree Assortativity')\n\n assortativity = self.ig_graph.assortativity_degree(directed=False)\n # assortativity = nx.degree_assortativity_coefficient(self.graph)\n self.stats['degree_assortativity'] = assortativity\n\n return assortativity\n\n def _calculate_all_stats(self):\n \"\"\"\n Calculate all stats\n \"\"\"\n CP.print_orange('Calculating all stats')\n\n object_methods = [method_name for method_name in dir(self)\n if callable(getattr(self, method_name)) and not method_name.startswith('_')]\n\n for method in object_methods:\n method = getattr(self, method)\n try:\n method()\n except NotImplementedError as e:\n pass\n\n def closeness_centrality(self) -> Dict[int, float]:\n \"\"\"\n Closeness centrality\n \"\"\"\n CP.print_none('Calculating Closeness Centrality')\n\n closeness = nx.closeness_centrality(self.graph)\n self.stats['closeness_centrality'] = closeness\n\n return closeness\n\n def clustering_coefficients_by_degree(self) -> Dict[int, float]:\n \"\"\"\n Returns the average clustering coefficient by degree\n :return:\n \"\"\"\n CP.print_none('Calculating Clustering Coefficients and CC by degree')\n\n clustering_coeffs = nx.clustering(self.graph)\n self.stats['clustering_coeffs'] = clustering_coeffs\n\n clustering_by_degree = {} # clustering per degree\n\n # get the sums\n for node, cc in clustering_coeffs.items():\n deg = self.graph.degree[node]\n if deg not in clustering_by_degree:\n clustering_by_degree[deg] = []\n clustering_by_degree[deg].append(cc)\n\n avg_clustering_by_degree = {deg: np.mean(ccs) for deg, ccs in clustering_by_degree.items()}\n self.stats['clustering_coefficients_by_degree'] = avg_clustering_by_degree\n\n return avg_clustering_by_degree\n\n def component_size_distribution(self) -> List[Tuple[int, float]]:\n \"\"\"\n Returns the distribution of component sizes and fraction of nodes in each component, largest first\n :return:\n \"\"\"\n CP.print_none('Calculating Component Size Distribution')\n\n component_size_ratio_list = [(len(c), len(c) / self.graph.order()) for c in\n sorted(nx.connected_components(self.graph),\n key=len, reverse=True)]\n self.stats['component_size_distribution'] = component_size_ratio_list\n\n return component_size_ratio_list\n\n def degree_centrality(self) -> Dict[int, float]:\n \"\"\"\n Degree centrality\n \"\"\"\n CP.print_none('Calculating Degree Centrality')\n\n degree_centrality = nx.degree_centrality(self.graph)\n self.stats['degree_centrality'] = degree_centrality\n\n return degree_centrality\n\n def degree_dist(self, normalized=True) -> Dict[int, float]:\n \"\"\"\n Returns the degrees counter - keys: degrees, values: #nodes with that degree\n :return:\n \"\"\"\n CP.print_none('Calculating Degree Distribution')\n\n degree_seq = sorted(deg for _, deg in self.graph.degree())\n self.stats['degree_seq'] = degree_seq\n\n degree_counts = Counter(degree_seq)\n\n if normalized:\n for deg, count in degree_counts.items():\n degree_counts[deg] /= self.graph.order()\n\n self.stats['degree_dist'] = dict(degree_counts)\n return dict(degree_counts)\n\n def diameter(self) -> float:\n CP.print_none('Calculating Diameter')\n\n diam = self.ig_graph.diameter(directed=False)\n self.stats['diameter'] = diam\n\n return diam\n\n def effective_diameter(self) -> None:\n \"\"\"\n Returns the 90% effective diameter of a graph\n :return:\n \"\"\"\n raise NotImplementedError()\n\n def k_hop_reach(self) -> np.array:\n \"\"\"\n Returns the average number of nodes reachable from any node in k-hops\n Two levels of aggregation:\n 1. _k_hop_reachability gives the absolute count of nodes reachable within a k-hops from a node\n 2. overall_k_hop_dict aggregates the sum of all absolute counts for all nodes\n Normalizing factor: n ** 2 (once for each step)\n Then convert to a cumulative distribution\n :return:\n \"\"\"\n CP.print_none('Calculating hop-plot')\n\n overall_k_hop_dict = Counter()\n\n for node in self.graph.nodes():\n k_hop_dict = self._k_hop_reachability_counter(node)\n overall_k_hop_dict += Counter(k_hop_dict)\n\n k_hop_vec = np.array([v for k, v in sorted(overall_k_hop_dict.items(), key=lambda x: x[0])])\n k_hop_vec = k_hop_vec / (self.graph.order() ** 2)\n\n self.stats['k_hop_reach'] = np.cumsum(k_hop_vec)\n\n return self.stats['k_hop_reach']\n\n def _k_hop_reachability_counter(self, node) -> Dict[int, float]:\n \"\"\"\n computes fraction of nodes reachable from the given node in k hops\n :param node: node to compute the k_hop_reach vector\n :return:\n \"\"\"\n reachability_counter = {0: 1} # within 0 hops, you can reach 1 node - itself\n hop_counter = {node: 0} # node is 0 hops away from itself\n queue = deque([node])\n\n while len(queue) != 0:\n node = queue.popleft()\n for nbr in self.graph.neighbors(node):\n if nbr not in hop_counter: # unexplored neighbor\n hop_counter[nbr] = hop_counter[node] + 1 # update hop distance of neighbor\n\n if hop_counter[nbr] not in reachability_counter:\n reachability_counter[hop_counter[nbr]] = 0 # reachability_counter[hop_counter[node]]\n reachability_counter[hop_counter[nbr]] += 1 # keep track of fraction of nodes reachable\n\n queue.append(nbr)\n\n # normalized_reachability_counter = {key: value / n for key, value in reachability_counter.items()}\n return reachability_counter\n\n def laplacian_eigenvalues(self) -> np.array:\n \"\"\"\n Returns eigenvalues of the Laplacian\n :return:\n \"\"\"\n CP.print_none('Calculating Laplacian Eigenvalues')\n\n laplacian_eigs = nx.laplacian_spectrum(self.graph)\n self.stats['laplacian_eigenvalues'] = laplacian_eigs\n\n return laplacian_eigs\n\n def pagerank(self) -> Dict[int, float]:\n \"\"\"\n PageRank centrality\n \"\"\"\n CP.print_none('Calculating PageRank')\n # if self.ig_graph is None:\n # self.ig_graph = ig.Graph.from_networkx(self.graph)\n # pagerank = self.ig_graph.pagerank(directed=False)\n pagerank = nx.pagerank_scipy(self.graph)\n self.stats['pagerank'] = pagerank\n\n return pagerank\n\n def pgd_graphlet_counts(self, n_threads=4) -> Dict:\n \"\"\"\n Return the dictionary of graphlets and their counts - based on Neville's PGD\n :return:\n \"\"\"\n pgd_path = './src/PGD'\n graphlet_counts = {}\n\n if 'Linux' in platform.platform() and check_file_exists(f'{pgd_path}/pgd_0'):\n edgelist = '\\n'.join(nx.generate_edgelist(self.graph, data=False))\n edgelist += '\\nX' # add the X\n dummy_path = f'{pgd_path}/dummy.txt'\n\n try:\n bash_script = f'{pgd_path}/pgd -w {n_threads} -f {dummy_path} -c {dummy_path}'\n\n pipe = sub.run(bash_script, shell=True, capture_output=True, input=edgelist.encode(), check=True,\n timeout=30000) # timeout of heat death of universe\n\n output_data = pipe.stdout.decode()\n\n except sub.TimeoutExpired as e:\n CP.print_blue(f'PGD timeout!{e.stderr}')\n graphlet_counts = {}\n\n except sub.CalledProcessError as e:\n CP.print_blue(f'PGD error {e.stderr}')\n graphlet_counts = {}\n except Exception as e:\n CP.print_blue(str(e))\n graphlet_counts = {}\n else: # pgd is successfully run\n for line in output_data.split('\\n')[: -1]: # last line blank\n graphlet_name, count = map(lambda st: st.strip(), line.split('='))\n graphlet_counts[graphlet_name] = int(count)\n else:\n graphlet_counts = {}\n self.stats['pgd_graphlet_counts'] = graphlet_counts\n\n return graphlet_counts\n\n\nif __name__ == '__main__':\n # g = nx.karate_club_graph()\n # g = nx.ring_of_cliques(50, 4)\n g = nx.erdos_renyi_graph(5, 0.2, seed=1)\n # g = nx.path_graph(5)\n gs = GraphStats(graph=g, trial=0, dataset='karate', model='blah', iteration=0)\n print(gs.netlsd())\n print(gs.stats)"
] | [
[
"numpy.fill_diagonal",
"numpy.matmul",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.mean",
"numpy.multiply",
"numpy.float_power",
"matplotlib.pyplot.ylabel",
"numpy.cumsum"
]
] |
verypluming/HELP | [
"216b6497978869718afe629266a672d2e9919326"
] | [
"scripts/create_dataset_PMB.py"
] | [
"\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright 2019 Hitomi Yanaka\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport numpy as np\nimport pandas as pd\nimport re\nfrom collections import defaultdict\nfrom lxml import etree\nfrom nltk.corpus import wordnet as wn\nimport xml.dom.minidom\nfrom pattern3.en import *\nimport subprocess\nimport copy\nimport os\nimport sys\nfrom xml.dom import minidom\nimport inflect\nimport logging as log\nfrom nltk.wsd import lesk\ninflect = inflect.engine()\n\n\ndef keep_plurals(noun, newnoun):\n if inflect.singular_noun(noun) is False:\n # singular\n return singularize(newnoun)\n else:\n # plural\n return pluralize(newnoun)\n\ndef keep_tenses(verb, newverb):\n ori_tense = tenses(verb)[0]\n ori_tense2 = [x for x in ori_tense if x is not None]\n #print(ori_tense2)\n tense, person, number, mood, aspect = None, None, None, None, None\n\n if 'infinitive' in ori_tense2:\n tense = INFINITIVE\n elif 'present' in ori_tense2:\n tense = PRESENT\n elif 'past' in ori_tense2:\n tense = PAST\n elif 'future' in ori_tense2:\n tense = FUTURE\n\n if 1 in ori_tense2:\n person = 1\n elif 2 in ori_tense2:\n person = 2\n elif 3 in ori_tense2:\n person = 3\n else:\n person = None\n\n if 'singular' in ori_tense2:\n number = SINGULAR\n elif 'plural' in ori_tense2:\n number = PLURAL\n else:\n number = None\n\n if 'indicative' in ori_tense2:\n mood = INDICATIVE\n elif 'imperative' in ori_tense2:\n mood = IMPERATIVE\n elif 'conditional' in ori_tense2:\n mood = CONDITIONAL\n elif 'subjunctive' in ori_tense2:\n mood = SUBJUNCTIVE\n else:\n mood = None\n \n #if 'imperfective' in ori_tense2:\n # aspect = IMPERFECTIVE\n #elif 'perfective' in ori_tense2:\n # aspect = PERFECTIVE\n if 'progressive' in ori_tense2:\n aspect = PROGRESSIVE\n else:\n aspect = None\n\n newverb_tense = conjugate(newverb, \n tense = tense, # INFINITIVE, PRESENT, PAST, FUTURE\n person = person, # 1, 2, 3 or None\n number = number, # SG, PL\n mood = mood, # INDICATIVE, IMPERATIVE, CONDITIONAL, SUBJUNCTIVE\n aspect = aspect,\n negated = False, # True or False\n parse = True)\n #print(newverb, newverb_tense)\n return newverb_tense\n\n# functions for replacement\ndef remove_duplicates(x):\n y=[]\n for i in x:\n if i not in y:\n y.append(i)\n return y\n\ndef replace_sentence(determiner, nounmono, noun, newnoun, sentence, results, target):\n pat = re.compile(noun)\n newpat = re.compile(newnoun)\n newsentence = re.sub(noun, newnoun, sentence)\n gold_label = check_label(nounmono, 'simple')\n record = pd.Series([target, determiner, nounmono, gold_label, noun, newnoun, 'simple', sentence, newsentence], index=results.columns)\n record = pd.Series([target, determiner, nounmono, rev_label(gold_label, nounmono), noun, newnoun, 'simple', newsentence, sentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n return results\n\ndef replace_sentence_WN_nv(determiner, nounmono, verbmono, noun, nounsense, verb, verbsense, sentence, results, target):\n nounsynset = wn.synset(nounsense)\n nounhypernyms = nounsynset.hypernyms()\n nounhyponyms = nounsynset.hyponyms()\n verbsynset = wn.synset(verbsense)\n verbhypernyms = verbsynset.hypernyms()\n verbhyponyms = verbsynset.hyponyms()\n \n nounhypersim = [nounhypernym.wup_similarity(verbsynset) if nounhypernym.wup_similarity(verbsynset) is not None else 0 for nounhypernym in nounhypernyms]\n nounhyposim = [nounhyponym.wup_similarity(verbsynset) if nounhyponym.wup_similarity(verbsynset) is not None else 0 for nounhyponym in nounhyponyms]\n verbhypersim = [verbhypernym.wup_similarity(nounsynset) if verbhypernym.wup_similarity(nounsynset) is not None else 0 for verbhypernym in verbhypernyms]\n verbhyposim = [verbhyponym.wup_similarity(nounsynset) if verbhyponym.wup_similarity(nounsynset) is not None else 0 for verbhyponym in verbhyponyms]\n \n nounhypernym = nounhypernyms[nounhypersim.index(max(nounhypersim))]\n nounhyponym = nounhyponyms[nounhyposim.index(max(nounhyposim))]\n verbhypernym = verbhypernyms[verbhypersim.index(max(verbhypersim))]\n verbhyponym = verbhyponyms[verbhyposim.index(max(verbhyposim))]\n\n synsetdict = {#\"noun_synset\": nounsynset,\n \"noun_hypernym\": nounhypernym,\n \"noun_hyponym\": nounhyponym,\n #\"verb_synset\": verbsynset,\n \"verb_hypernym\": verbhypernym,\n \"verb_hyponym\": verbhyponym\n }\n #print(synsetdict)\n for rel, synset in synsetdict.items():\n synsetwords = synset.lemma_names()\n #print(synsetwords)\n for synsetword in synsetwords:\n new_synsetword = re.sub(\"_\", \" \", synsetword)\n if re.search(\"noun\", rel):\n newnoun = keep_plurals(noun, new_synsetword)\n pat = re.compile(noun)\n newpat = re.compile(newnoun)\n newsentence = re.sub(noun, newnoun, sentence)\n gold_label = check_label(nounmono, rel)\n record = pd.Series([target, determiner, nounmono, gold_label, noun, newnoun, rel, sentence, newsentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n record = pd.Series([target, determiner, nounmono, rev_label(gold_label, nounmono), noun, newnoun, rel, newsentence, sentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n else:\n newverb = keep_tenses(verb, new_synsetword)\n pat = re.compile(verb)\n newpat = re.compile(newverb)\n newsentence = re.sub(verb, newverb, sentence)\n gold_label = check_label(verbmono, rel)\n record = pd.Series([target, determiner, verbmono, gold_label, verb, newverb, rel, sentence, newsentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n record = pd.Series([target, determiner, verbmono, rev_label(gold_label, verbmono), verb, newverb, rel, newsentence, sentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n\n return results\n\ndef replace_sentence_WN(determiner, nounmono, noun, sense, sentence, results, target):\n synset = wn.synset(sense)\n hypernyms = synset.hypernyms()\n hyponyms = synset.hyponyms()\n\n for hypernym in hypernyms:\n #if len(hypernym.examples()) == 0:\n # # remove if no example exists in WordNet\n # continue\n hypernym_words = hypernym.lemma_names()\n for hypernym_word in hypernym_words:\n #print(hypernym_word)\n new_hypernym_word = re.sub(\"_\", \" \", hypernym_word)\n newnoun = keep_plurals(noun, new_hypernym_word)\n pat = re.compile(noun)\n newpat = re.compile(newnoun)\n newsentence = re.sub(noun, newnoun, sentence)\n gold_label = check_label(nounmono, 'noun_hypernym_obj')\n record = pd.Series([target, determiner, nounmono, gold_label, noun, newnoun, 'noun_hypernym_obj', sentence, newsentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n record = pd.Series([target, determiner, nounmono, rev_label(gold_label, nounmono), noun, newnoun, 'noun_hypernym_obj', newsentence, sentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n \n for hyponym in hyponyms:\n #if len(hyponym.examples()) == 0:\n # # remove if no example exists in WordNet\n # continue\n hyponym_words = hyponym.lemma_names()\n for hyponym_word in hyponym_words:\n #print(hyponym_word)\n new_hyponym_word = re.sub(\"_\", \" \", hyponym_word)\n newnoun = keep_plurals(noun, new_hyponym_word)\n pat = re.compile(noun)\n newpat = re.compile(newnoun)\n newsentence = re.sub(noun, newnoun, sentence)\n gold_label = check_label(nounmono, 'noun_hyponym_obj')\n record = pd.Series([target, determiner, nounmono, gold_label, noun, newnoun, 'noun_hyponym_obj', sentence, newsentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n record = pd.Series([target, determiner, nounmono, rev_label(gold_label, nounmono), noun, newnoun, 'noun_hypernym_obj', newsentence, sentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n return results\n\ndef replace_sentence_numeral(det, num, sentence, results, target):\n #not used\n tmpnum = str(number(num))\n tmpnum = re.sub(\",\", \"\", tmpnum)\n if det.lower() in ['more', 'greater', 'larger', 'taller', 'bigger', 'least']:\n #upward monotonicity\n pat = re.compile(num)\n newnum = str(int(tmpnum) - 1) #trial\n newpat = re.compile(newnum)\n newsentence = re.sub(num, newnum, sentence)\n record = pd.Series([target, num, newnum, 'numeral', sentence, newsentence], index=results.columns)\n #print(target, newnum, newsentence)\n results = results.append(record, ignore_index = True)\n elif det.lower() in ['less', 'fewer', 'smaller', 'shorter', 'most']:\n #downward monotonicity\n pat = re.compile(num)\n newnum = str(int(tmpnum) + 1) #trial\n newpat = re.compile(newnum)\n newsentence = re.sub(num, newnum, sentence)\n record = pd.Series([target, num, newnum, 'numeral', sentence, newsentence], index=results.columns)\n results = results.append(record, ignore_index = True)\n else:\n print(\"target: \"+target+\", other determiner: \"+determiner) \n return results\n\n#candc2transccg\ndef get_nodes_by_tag(root, tag):\n nodes = []\n if root.tag == tag:\n nodes.append(root)\n for node in root:\n nodes.extend(get_nodes_by_tag(node, tag))\n return nodes\n\ndef assign_ids_to_nodes(ccg_tree, sentence_number, current=0):\n ccg_tree.set('id', 's' + str(sentence_number) + '_sp' + str(current))\n current += 1\n for node in ccg_tree:\n current = assign_ids_to_nodes(node, sentence_number, current)\n return current\n\ndef rename_attributes(ccg_root, src_attribute, trg_attribute):\n if src_attribute in ccg_root.attrib:\n ccg_root.set(trg_attribute, ccg_root.get(src_attribute))\n del ccg_root.attrib[src_attribute]\n for child_node in ccg_root:\n rename_attributes(child_node, src_attribute, trg_attribute)\n\ndef assign_values_in_feat_structs(ccg_root):\n assert 'category' in ccg_root.attrib, 'Category field not present in node {0}'\\\n .format(etree.tostring(ccg_root, pretty_print=True))\n category = ccg_root.get('category')\n category_assigned_value = re.sub(r'([,\\]])', r'=true\\1', category)\n ccg_root.set('category', category_assigned_value)\n for child_node in ccg_root:\n assign_values_in_feat_structs(child_node)\n\ndef assign_child_info(ccg_tree, sentence_number, tokens_node):\n if len(ccg_tree) == 0:\n token_position = ccg_tree.get('start')\n ccg_tree.set('terminal', 't' + str(sentence_number) + '_' + str(token_position))\n else:\n child_str = ' '.join([child_node.get('id') for child_node in ccg_tree])\n ccg_tree.set('child', child_str)\n ccg_tree.set('pos', \"None\")\n for child_node in ccg_tree:\n assign_child_info(child_node, sentence_number, tokens_node)\n\ndef flatten_and_rename_nodes(ccg_root):\n spans = []\n ccg_root.tag = 'span'\n spans.append(ccg_root)\n for child_node in ccg_root:\n spans.extend(flatten_and_rename_nodes(child_node))\n return spans\n\ndef candc_to_transccg(ccg_tree, sentence_number):\n # Obtain the <tokens> subtree and store it in variable tokens_node.\n tokens = get_nodes_by_tag(ccg_tree, 'lf')\n for i, token in enumerate(tokens):\n token.tag = 'token'\n token.set('id', 't' + str(sentence_number) + '_' + str(i))\n # Prefix every surface and base form with an underscore.\n # This is useful to avoid collisions of reserved words (e.g. \"some\", \"all\")\n # in nltk or coq. We also substitute dots '.' by 'DOT'.\n word = normalize_string(token.get('word'), 'surf')\n lemma = normalize_string(token.get('lemma'), 'base')\n token.set('surf', word)\n token.set('base', lemma)\n del token.attrib['word']\n del token.attrib['lemma']\n tokens_node = etree.Element('tokens')\n for token in tokens:\n tokens_node.append(copy.deepcopy(token))\n # Obtain the <ccg> subtree and store it in variable ccg_node.\n ccg_tree.set('root', 's' + str(sentence_number) + '_sp0')\n ccg_tree.set('id', 's' + str(sentence_number) + '_ccg0')\n # Assign an ID to every node, in depth order.\n ccg_root = ccg_tree[0]\n ccg_root.set('root', 'true')\n assign_ids_to_nodes(ccg_root, sentence_number)\n assign_child_info(ccg_root, sentence_number, tokens_node)\n # Rename attributes.\n rename_attributes(ccg_root, 'cat', 'category')\n rename_attributes(ccg_root, 'type', 'rule')\n # Assign values to feature structures. E.g. S[adj] --> S[adj=true]\n assign_values_in_feat_structs(ccg_root)\n # Flatten structure.\n spans = flatten_and_rename_nodes(ccg_root)\n for child_span in spans:\n ccg_tree.append(child_span)\n if child_span.get('id').endswith('sp0'):\n child_span.set('root', 'true')\n sentence_node = etree.Element('sentence')\n sentence_node.append(tokens_node)\n sentence_node.append(ccg_tree)\n return sentence_node\n\ndef normalize_string(raw_string, attribute):\n normalized = raw_string\n if attribute == 'base':\n normalized = normalized.lower()\n return normalized\n\ndef make_transccg_xml_tree(transccg_trees):\n \"\"\"\n Create the structure:\n <root>\n <document>\n <sentences>\n <sentence id=\"s1\">\n ...\n </sentence>\n </sentences>\n </document>\n </root>\n \"\"\"\n sentences_node = etree.Element('sentences')\n for transccg_tree in transccg_trees:\n sentences_node.append(transccg_tree)\n document_node = etree.Element('document')\n document_node.append(sentences_node)\n root_node = etree.Element('root')\n root_node.append(document_node)\n return root_node\n\ndef candc2transccg(candc_trees):\n parser = etree.XMLParser(remove_blank_text=True)\n root = etree.fromstring(candc_trees, parser)\n #root = xml_tree.getroot()\n ccg_trees = root.findall('ccg')\n\n transccg_trees = []\n for i, ccg_tree in enumerate(ccg_trees):\n transccg_tree = candc_to_transccg(ccg_tree, i)\n transccg_trees.append(transccg_tree)\n\n transccg_xml_tree = make_transccg_xml_tree(transccg_trees)\n # transccg_xml_tree.write(pretty_print=True, encoding='utf-8')\n parse_result = etree.tostring(transccg_xml_tree, xml_declaration=True, pretty_print=True)\n return parse_result\n\ndef parse(parser_name, sentence):\n parse_result = \"\"\n f = open(\"../data/parser_location.txt\",\"r\")\n locations = f.readlines()\n f.close()\n candc_dir = locations[0].split(\":\")[1].strip()\n if parser_name == \"candc\":\n # Parse using C&C.\n command = \"echo \"+sentence+\"|\"+candc_dir+\"bin/candc --models \"+candc_dir+\"models --candc-printer xml\"\n result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = result.communicate()\n parse_result = candc2transccg(out)\n return parse_result\n\ndef check_monotonicity(determiner):\n nounmono, verbmono = \"non_monotone\", \"non_monotone\"\n upward_noun = [\"some\", \"a\"]\n upward_verb = [\"every\", \"each\", \"all\", \"some\", \"both\", \"most\", \"many\", \"several\", \"this\", \"that\", \"a\", \"the\"]\n downward_noun = [\"every\", \"each\", \"all\", \"no\", \"neither\", \"any\", \"never\"]\n downward_verb = [\"no\", \"neither\", \"any\", \"never\", \"few\"]\n if determiner in upward_noun:\n nounmono = \"upward_monotone\"\n if determiner in upward_verb:\n verbmono = \"upward_monotone\"\n if determiner in downward_noun:\n nounmono = \"downward_monotone\"\n if determiner in downward_verb:\n verbmono = \"downward_monotone\"\n return nounmono, verbmono\n\ndef check_label(monotonicity, mode):\n modegroup = \"\"\n if re.search(\"hypo\", mode):\n modegroup = \"down\"\n elif re.search(\"hyper\", mode):\n modegroup = \"up\"\n elif mode == \"simple\":\n modegroup = \"up\"\n if monotonicity == \"upward_monotone\" and modegroup == \"up\":\n return \"entailment\"\n elif monotonicity == \"upward_monotone\" and modegroup == \"down\":\n return \"neutral\"\n elif monotonicity == \"downward_monotone\" and modegroup == \"up\":\n return \"neutral\"\n elif monotonicity == \"downward_monotone\" and modegroup == \"down\":\n return \"entailment\"\n else:\n return \"neutral\"\n\ndef rev_label(gold_label, monotonicity):\n #reverse the gold_label\n if monotonicity == \"non_monotone\":\n return \"neutral\"\n elif gold_label == \"entailment\":\n return \"neutral\"\n elif gold_label == \"neutral\":\n return \"entailment\"\n\ndef rev_mono(monotonicity):\n #reverse the polarity\n if monotonicity == \"non_monotone\":\n return \"non_monotone\"\n elif monotonicity == \"downward_monotone\":\n return \"upward_monotone\"\n elif monotonicity == \"upward_monotone\":\n return \"downward_monotone\"\n\ndef main():\n parser = etree.XMLParser(remove_blank_text=True)\n files = glob.glob(\"../data/pmb-2.1.0/data/*/*/*/en.drs.xml\")\n #files = glob.glob(\"../data/pmb-2.1.0/data/gold/*/*/en.drs.xml\")\n df_list = []\n determiners = [\"every\", \"each\", \"all\", \"some\", \"no\", \"both\", \"neither\", \"most\", \"many\", \"any\",\\\n \"several\", \"exactly\"]\n #determiners = [\"each\"]\n floating_list = [\"both\", \"all\", \"each\"]\n #a ,the, this, that\n for determiner in determiners:\n target_files = []\n nounmono, verbmono = check_monotonicity(determiner)\n for file in files:\n filename = re.search(\"\\/data\\/pmb-2.1.0\\/data\\/(.*?)\\/en.drs.xml\", file).group(1)\n try:\n tree = etree.parse(\"../data/pmb-2.1.0/data/\"+filename+\"/en.drs.xml\", parser)\n words = tree.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='lemma']/text()\")\n if determiner in words:\n target_files.append(filename)\n except:\n continue\n\n results = pd.DataFrame(index=[], columns=['filename', 'determiner', 'monotonicity', 'gold_label', 'replace_target', 'replace_source', 'replace_mode', 'ori_sentence', 'new_sentence'])\n #target_files = [\"silver/p47/d2720\"]\n for target in target_files:\n #print(target)\n try:\n tree2 = etree.parse(\"../data/pmb-2.1.0/data/\"+target+\"/en.drs.xml\", parser)\n #semtag = tree.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='sem']/text()\")\n #perid = tree.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='sem'][contains(text(), 'PER')]/../../@xml:id\")\n impid = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='sem'][contains(text(), 'IMP')]/../../@xml:id\")\n negid = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='sem'][contains(text(), 'NOT')]/../../@xml:id\")\n if len(negid) > 0 or len(impid) > 0:\n # reverse monotonicity if negation or implication exists\n print(target+\": contains negation or implication. reverse monotonicity\\n\")\n nounmono = rev_mono(nounmono)\n verbmono = rev_mono(verbmono)\n queid = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='sem'][contains(text(), 'QUE')]/../../@xml:id\")\n firstpos = tree2.xpath(\"//taggedtokens/tagtoken[@xml:id='i1001']/tags/tag[@type='pos']/text()\")\n #tree2 = etree.parse(\"../data/pmb-2.1.0/data/gold/\"+target+\"/en.drs.xml\", parser)\n IDs = len(tree2.xpath(\"//xdrs\"))+1\n for ID in range(1, IDs):\n floating_flg = 0\n noun, newnoun, verb, newverb = \"\", \"\", \"\", \"\"\n verbs = []\n nouns = []\n words = []\n words = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='tok']/text()\")\n if len(words) <= 5:\n # remove less than 5 words\n print(target+\": is less than 5 words\\n\")\n continue\n if len(firstpos) > 0:\n meirei = firstpos[0]\n if re.search(\"^VB\", meirei):\n # remove imperatives\n print(target+\": is meireikei\\n\")\n continue\n if \"\\\"\" in words or len(queid) > 0:\n # remove questions\n print(target+\": contains quotation or question\\n\")\n continue\n sentence = \" \".join(words)\n sentence = re.sub(\"ø \", \"\", sentence)\n if determiner == \"no\":\n # remove collocations\n if re.search(\"no one\", sentence) or re.search(\"No one\", sentence) or re.search(\"No doubt\", sentence) or re.search(\"no doubt\", sentence) or re.search(\"No ,\", sentence):\n continue\n #print(sentence)\n parse_result = parse(\"candc\", sentence)\n #doc = minidom.parseString(parse_result)\n #print(doc.toxml()) \n tree3 = etree.fromstring(parse_result, parser)\n target_id = tree3.xpath(\"//ccg/span[@base='\" + determiner + \"']/@id\")\n verb_id = []\n child_ids, child_verb_ids = [], []\n #print(target_id)\n\n # detect the parent node of NP and VP\n while True:\n parent_id = tree3.xpath(\"//ccg/span[contains(@child, '\" + target_id[0] + \"')]/@id\")\n parent_category = tree3.xpath(\"//ccg/span[contains(@child, '\" + target_id[0] + \"')]/@category\")[0]\n #print(parent_category)\n if not re.search(\"^NP\\[?\", parent_category):\n tmp4 = tree3.xpath(\"//ccg/span[contains(@child, '\" + target_id[0] + \"')]/@child\")\n if len(tmp4) > 0:\n verb_id = tmp4[0].split(\" \")\n verb_id.remove(target_id[0])\n verb_base = tree3.xpath(\"//ccg/span[contains(@id, '\" + verb_id[0] + \"')]/@base\")\n if 'be' in verb_base and determiner in floating_list:\n #floating\n floating_flg = 1\n break\n else:\n target_id = parent_id\n\n #print(target_id, verb_id)\n # extract the whole NP subtree\n list_target_id = target_id[0].split(\" \")\n while True:\n childid = []\n for parentid in list_target_id:\n tmp = tree3.xpath(\"//ccg/span[contains(@id, '\" + parentid + \"')]/@child\")\n if len(tmp) > 0:\n childid.extend(tmp[0].split(\" \"))\n if len(childid) == 0:\n break\n else:\n child_ids.extend(childid)\n list_target_id = childid\n \n # extract the whole VP subtree\n list_verb_id = verb_id[0].split(\" \")\n while True:\n childid = []\n for parentid in list_verb_id:\n tmp5 = tree3.xpath(\"//ccg/span[contains(@id, '\" + parentid + \"')]/@child\")\n if len(tmp5) > 0:\n childid.extend(tmp5[0].split(\" \"))\n if len(childid) == 0:\n break\n else:\n child_verb_ids.extend(childid)\n list_verb_id = childid\n\n for nounphrase in sorted(child_ids, key=lambda x:int((re.search(r\"sp([0-9]+)\", x)).group(1))):\n tmp2 = tree3.xpath(\"//ccg/span[@id='\" + nounphrase + \"']/@surf\")\n if len(tmp2) > 0:\n nouns.extend(tmp2)\n #print(nounphrase, nouns)\n \n for verbphrase in sorted(child_verb_ids, key=lambda x:int((re.search(r\"sp([0-9]+)\", x)).group(1))):\n tmp3 = tree3.xpath(\"//ccg/span[@id='\" + verbphrase + \"']/@surf\")\n if len(tmp3) > 0:\n verbs.extend(tmp3)\n\n if floating_flg == 1:\n # remove floating\n continue\n # replace an subjective word by its hypernym and hyponym\n elif len(nouns) > 0 and len(verbs) > 0:\n noun = \" \".join(nouns)\n newnoun = nouns[-1]\n newnounpos = tree3.xpath(\"//ccg/span[@surf='\" + newnoun + \"']/@pos\")[0]\n if re.search(\"^PRP\", newnounpos):\n # remove pronouns\n print(target+\": is pronoun\\n\")\n continue\n if re.search(\"^NNP\", newnounpos):\n # replace its specific hypernym if a proper noun exists\n print(target+\" contains koyumeishi\\n\")\n semtag = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='tok' and text()='\" + newnoun + \"']/following-sibling::tag[@type='sem']/text()\")\n if len(semtag) > 0:\n if semtag[0] == \"PER\" or semtag[0] == \"GPO\":\n newnoun = \"someone\"\n elif semtag[0] == \"GPE\" or semtag[0] == \"GEO\":\n newnoun = \"somewhere\"\n else:\n print(target+\" contains other semtag\"+semtag[0]+\"\\n\")\n newnoun = \"something\"\n results = replace_sentence(determiner, nounmono, noun, newnoun, sentence, results, target)\n continue\n if len(nouns) > 2:\n newnewnoun = determiner + \" \" + nouns[-1]\n results = replace_sentence(determiner, nounmono, noun, newnewnoun, sentence, results, target)\n verb = \" \".join(verbs)\n newverb = verbs[-1]\n #print(results)\n # replace hypernym and hyponym using senseid\n nounsense = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='tok' and text()='\" + newnoun + \"']/following-sibling::tag[@type='wordnet']/text()\")\n verbsense = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='tok' and text()='\" + newverb + \"']/following-sibling::tag[@type='wordnet']/text()\")\n if nounsense[0] == 'O':\n nounsense = [str(lesk(words, newnoun, 'n'))[8:-2]]\n if verbsense[0] == 'O':\n verbsense = [str(lesk(words, newverb, 'v'))[8:-2]]\n results = replace_sentence_WN_nv(determiner, nounmono, verbmono, newnoun, nounsense[0], newverb, verbsense[0], sentence, results, target)\n \n elif len(nouns) > 0:\n # replace an objective word by its hypernym and hyponym\n noun = \" \".join(nouns)\n newnoun = nouns[-1] \n if len(nouns) > 2:\n newnewnoun = determiner + \" \" + nouns[-1]\n results = replace_sentence(determiner, nounmono, noun, newnewnoun, sentence, results, target)\n #print(results)\n # replace hypernym and hyponym using senseid\n nounsense = tree2.xpath(\"//taggedtokens/tagtoken/tags/tag[@type='tok' and text()='\" + newnoun + \"']/following-sibling::tag[@type='wordnet']/text()\")\n if nounsense[0] == 'O':\n nounsense = [str(lesk(words, newnoun, 'n'))[8:-2]]\n results = replace_sentence_WN(determiner, nounmono, newnoun, nounsense[0], sentence, results, target)\n\n except Exception as e:\n log.exception(\"ERROR target: \"+target)\n log.exception(e)\n continue\n results.to_csv('../output_en/leskexppmb_'+determiner+'.tsv', sep='\\t')\n\n\n\ndef format_files():\n datas = glob.glob(\"../output_en/leskexppmb_*.tsv\")\n alldata = pd.DataFrame(index=[], columns=['filename', 'determiner', 'monotonicity', 'gold_label', 'replace_target', 'replace_source', 'replace_mode', 'ori_sentence', 'new_sentence'])\n\n for d in datas:\n dataframe = pd.read_csv(d, sep=\"\\t\", index_col=0)\n if len(dataframe) > 0:\n alldata = alldata.append(dataframe)\n\n \n alldata.to_csv(\"../output_en/pmb_train.tsv\", sep='\\t', index=False)\n \n # MultiNLI train format\n results = pd.DataFrame(index=[], columns=['index','promptID','pairID','genre','sentence1_binary_parse','sentence2_binary_parse','sentence1_parse','sentence2_parse','sentence1','sentence2','label1','gold_label'])\n results['label1'] = alldata['gold_label']\n results['gold_label'] = alldata['gold_label']\n results['sentence1'] = alldata['ori_sentence']\n results['sentence2'] = alldata['new_sentence']\n results['index'] = results.reset_index()\n results['promptID'] = results.reset_index()\n results['pairID'] = results.reset_index()\n results.to_csv(\"../output_en/pmb_train_mnliformat.tsv\", sep=\"\\t\", index=False, header=False)\n\n\n\nif __name__ == '__main__':\n main()\n format_files()\n \n"
] | [
[
"pandas.DataFrame",
"pandas.read_csv",
"pandas.Series"
]
] |
bioinsilico/LSTM_CONV2D_RRI | [
"77561adbcf3bdaa24d01c1da90c7ffb1011083b5"
] | [
"load_data.py"
] | [
"import subprocess\nimport numpy as np\nfrom aa_to_ch import *\nimport random\n\n\nr_features = dict()\nl_features = dict()\n\ndef create_data(r,l):\n R = [r]\n L = [l]\n dR = dict()\n dL = dict()\n\n dR[r] = 0\n for i in range( 0,len(r_features[r]['nn']) ):\n dR[ r_features[r]['nn'][i] ] = r_features[r]['nn_dist'][i] \n\n dL[l] = 0\n for i in range( 0,len(l_features[l]['nn']) ):\n dL[ l_features[l]['nn'][i] ] = l_features[l]['nn_dist'][i] \n\n R.extend( r_features[r]['nn'] )\n L.extend( l_features[l]['nn'] )\n \n matrix = []\n for i in R:\n row = []\n for j in L:\n data = []\n data.extend( r_features[ i ][ 'aa' ] )\n data.extend( r_features[ i ][ 'features' ] )\n data.append( dR[i] )\n data.extend( l_features[ j ][ 'aa' ] )\n data.extend( l_features[ j ][ 'features' ] )\n data.append( dL[j] )\n row.append(data)\n matrix.append(row)\n return matrix\n\ndef collect_complex(r_features_file,l_features_file,rri_file,pdb,n_features=6,n_neigh=8):\n fh = open(r_features_file,'r')\n for i in fh:\n r = i.strip()\n R=r.split(\"\\t\")\n r_features[ R[0] ] = dict()\n r_features[ R[0] ][ 'aa_' ] = R[1] \n r_features[ R[0] ][ 'aa' ] = [ float(x) for x in AA[ R[1] ]]\n r_features[ R[0] ][ 'features' ] = [ float(x) for x in R[2:(n_features+2)] ]\n r_features[ R[0] ][ 'nn' ] = R[(n_features+2):(n_features+n_neigh+2)]\n r_features[ R[0] ][ 'nn_dist' ] = [ float(x) for x in R[(n_features+n_neigh+2):(n_features+n_neigh+2+n_neigh*2)] ]\n fh.close()\n \n fh = open(l_features_file,'r')\n for i in fh:\n r = i.strip()\n R=r.split(\"\\t\")\n l_features[ R[0] ] = dict()\n l_features[ R[0] ][ 'aa_' ] = R[1]\n l_features[ R[0] ][ 'aa' ] = [ float(x) for x in AA[ R[1] ]]\n l_features[ R[0] ][ 'features' ] = [ float(x) for x in R[2:(n_features+2)]]\n l_features[ R[0] ][ 'nn' ] = R[(n_features+2):(n_features+n_neigh+2)]\n l_features[ R[0] ][ 'nn_dist' ] = [ float(x) for x in R[(n_features+n_neigh+2):(n_features+n_neigh+2+n_neigh*2)] ]\n fh.close()\n \n rri = dict()\n fh = open(rri_file,'r')\n for i in fh:\n r = i.strip()\n R=r.split(\"\\t\")\n rri[ R[0]+\":\"+R[1] ] = 1\n fh.close()\n\n np = 0\n collection = []\n labels = []\n negatives = []\n\n #print(\"\\tbatching positives\")\n for rr in rri:\n R = rr.split(\":\")\n r = R[0]\n l = R[1]\n np += 1\n __data = create_data(r,l)\n collection.append( create_data(r,l) )\n labels.append([1.0,0.0])\n\n #print(\"\\tbatching negatives\")\n R = list(r_features.keys())\n L = list(l_features.keys())\n while np>0:\n r = random.choice(R)\n l = random.choice(L)\n if not r+\":\"+l in rri:\n __data = create_data(r,l)\n collection.append( __data )\n labels.append([0.0,1.0])\n np-=1\n return [collection, labels]\n \n\ndef __batch(pdb,n_features=6,n_neigh=8):\n rri_file = \"/home/joan/tools/RRI/DEEP_LEARNING/pairPred_contactMap/\"+pdb+\".int\"\n r_features_file = \"/home/joan/tools/RRI/DEEP_LEARNING/features/\"+pdb+\"_r_u.nn.tsv\"\n l_features_file = \"/home/joan/tools/RRI/DEEP_LEARNING/features/\"+pdb+\"_l_u.nn.tsv\"\n return collect_complex(r_features_file,l_features_file,rri_file,pdb,n_features=6,n_neigh=8)\n\ndef random_batch_excluding(out):\n batch_x = []\n labels_y = []\n fh=open('/home/joan/tools/RRI/DEEP_LEARNING/pdb_list.tsv','r')\n for i in fh:\n pdb = i.strip()\n if i != out:\n #print(pdb)\n [batch,labels] = __batch(pdb)\n batch_x.extend(batch)\n labels_y.extend(labels_y)\n fh.close()\n return [np.array(batch_x),np.array(labels_y)]\n\ndef random_batch_of(pdb):\n batch_x = []\n labels_y = []\n [batch,labels] = __batch(pdb)\n batch_x.extend(batch)\n labels_y.extend(labels_y)\n return [np.array(batch_x),np.array(labels_y)]\n\n"
] | [
[
"numpy.array"
]
] |
nicholasbao/nlp_job | [
"39fb8118c5f7c2674dc38cada86520c634104c6c"
] | [
"text_similarity_cnn/run_cnn.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nfrom datetime import timedelta\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import metrics\n\nfrom cnn_model import TCNNConfig, TextCNN\nfrom data.cnews_loader import read_vocab, read_category, batch_iter, process_file, build_vocab\n\nbase_dir = 'data/cnews'\ntrain_dir = os.path.join(base_dir, 'train.csv')\ntest_dir = os.path.join(base_dir, 'test.csv')\nval_dir = os.path.join(base_dir, 'val.csv')\nvocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')\n\nsave_dir = 'checkpoints/textcnn'\nsave_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径\n\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\n\ndef feed_data(xl_batch, xr_batch, y_batch, keep_prob):\n feed_dict = {\n model.input_xl: xl_batch,\n model.input_xr: xr_batch,\n model.input_y: y_batch,\n model.keep_prob: keep_prob\n }\n return feed_dict\n\n\ndef evaluate(sess, xl_,xr_, y_):\n \"\"\"评估在某一数据上的准确率和损失\"\"\"\n data_len = len(xl_)\n batch_eval = batch_iter(xl_,xr_, y_, 128)\n total_loss = 0.0\n total_acc = 0.0\n for xl_batch, xr_batch, y_batch in batch_eval:\n batch_len = len(xl_batch)\n feed_dict = feed_data(xl_batch, xr_batch, y_batch, 1.0)\n loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)\n total_loss += loss * batch_len\n total_acc += acc * batch_len\n\n return total_loss / data_len, total_acc / data_len\n\n\ndef train():\n print(\"Configuring TensorBoard and Saver...\")\n # 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖\n tensorboard_dir = 'tensorboard/textcnn'\n if not os.path.exists(tensorboard_dir):\n os.makedirs(tensorboard_dir)\n\n tf.summary.scalar(\"loss\", model.loss)\n tf.summary.scalar(\"accuracy\", model.acc)\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(tensorboard_dir)\n\n # 配置 Saver\n saver = tf.train.Saver()\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n print(\"Loading training and validation data...\")\n # 载入训练集与验证集\n start_time = time.time()\n xl_train, xr_train, y_train = process_file(train_dir, word_to_id, config.seq_length)\n print(\"y_train_is********* \",y_train)\n xl_val, xr_val, y_val = process_file(val_dir, word_to_id, config.seq_length)\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n # 创建session\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n writer.add_graph(session.graph)\n\n print('Training and evaluating...')\n start_time = time.time()\n total_batch = 0 # 总批次\n best_acc_val = 0.0 # 最佳验证集准确率\n last_improved = 0 # 记录上一次提升批次\n require_improvement = 1000 # 如果超过1000轮未提升,提前结束训练\n\n flag = False\n for epoch in range(config.num_epochs):\n print('Epoch:', epoch + 1)\n batch_train = batch_iter(xl_train, xr_train, y_train, config.batch_size)\n for xl_batch, xr_batch, y_batch in batch_train:\n feed_dict = feed_data(xl_batch, xr_batch, y_batch, config.dropout_keep_prob)\n\n if total_batch % config.save_per_batch == 0:\n # 每多少轮次将训练结果写入tensorboard scalar\n s = session.run(merged_summary, feed_dict=feed_dict)\n writer.add_summary(s, total_batch)\n\n if total_batch % config.print_per_batch == 0:\n # 每多少轮次输出在训练集和验证集上的性能\n feed_dict[model.keep_prob] = 1.0\n loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)\n loss_val, acc_val = evaluate(session, xl_val, xr_val, y_val) # todo\n\n if acc_val > best_acc_val:\n # 保存最好结果\n best_acc_val = acc_val\n last_improved = total_batch\n saver.save(sess=session, save_path=save_path)\n improved_str = '*'\n else:\n improved_str = ''\n\n time_dif = get_time_dif(start_time)\n msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \\\n + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'\n print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str))\n\n feed_dict[model.keep_prob] = config.dropout_keep_prob\n session.run(model.optim, feed_dict=feed_dict) # 运行优化\n total_batch += 1\n\n if total_batch - last_improved > require_improvement:\n # 验证集正确率长期不提升,提前结束训练\n print(\"No optimization for a long time, auto-stopping...\")\n flag = True\n break # 跳出循环\n if flag: # 同上\n break\n\n\ndef test():\n print(\"Loading test data...\")\n start_time = time.time()\n xl_test, xr_test, y_test = process_file(test_dir, word_to_id, config.seq_length)\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess=session, save_path=save_path) # 读取保存的模型\n\n print('Testing...')\n loss_test, acc_test = evaluate(session, xl_test, xr_test, y_test)\n msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'\n print(msg.format(loss_test, acc_test))\n\n batch_size = 128\n data_len = len(xl_test)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n y_test_cls = np.argmax(y_test, 1)\n y_pred_cls = np.zeros(shape=len(xl_test), dtype=np.int32) # 保存预测结果\n for i in range(num_batch): # 逐批次处理\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n feed_dict = {\n model.input_xl: xl_test[start_id:end_id],\n model.input_xr: xr_test[start_id:end_id],\n model.keep_prob: 1.0\n }\n y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)\n result = 0\n print(\"y_pred_cls is \",y_pred_cls)\n print(\"y_Test_cls is \",y_test_cls)\n for i in (zip(y_pred_cls, y_test_cls)):\n print(i)\n # 评估\n print(\"Precision, Recall and F1-Score...\")\n print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=['0','1']))\n\n # 混淆矩阵\n print(\"Confusion Matrix...\")\n cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)\n print(cm)\n\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 or sys.argv[1] not in ['train', 'test']:\n raise ValueError(\"\"\"usage: python run_cnn.py [train / test]\"\"\")\n\n print('Configuring CNN model...')\n config = TCNNConfig()\n if not os.path.exists(vocab_dir): # 如果不存在词汇表,重建\n build_vocab(train_dir, vocab_dir, config.vocab_size)\n words, word_to_id = read_vocab(vocab_dir)\n config.vocab_size = len(words)\n model = TextCNN(config)\n\n if sys.argv[1] == 'train':\n train()\n else:\n test()\n"
] | [
[
"sklearn.metrics.confusion_matrix",
"tensorflow.summary.scalar",
"tensorflow.Session",
"tensorflow.train.Saver",
"sklearn.metrics.classification_report",
"numpy.argmax",
"tensorflow.summary.merge_all",
"tensorflow.summary.FileWriter",
"tensorflow.global_variables_initializer"
]
] |
jareddk/triton | [
"ea03d6207728315312d9cb6fa014f6a394b79c71"
] | [
"python/triton/ops/blocksparse/matmul.py"
] | [
"import triton\nimport triton._C.libtriton as libtriton\nimport torch\nimport os\nimport math\n\nsrc = triton.read(os.path.join(os.path.dirname(__file__), 'matmul.c'))\n\n##############\n# MAIN API #\n##############\nclass _matmul(torch.autograd.Function):\n \n sdd_cache = dict()\n dsd_cache = dict()\n dds_cache = dict()\n locks = dict()\n\n # Given an array sizes representing reduction size for each\n # column of a block-mode matrix multiplication,\n # performs load-balancing to achieve more smaller reductions\n # between `seg_size` elements\n @staticmethod\n def load_balance(sizes, block):\n # segment size\n # heuristics taken from OpenAI blocksparse code\n # https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95\n max_size = sizes.max()\n min_size = sizes[sizes != 0].min()\n #if max_size > min_size * 2.0:\n # seg_max = max(triton.cdiv(max_size, 4), min_size*2)\n #else:\n # seg_max = max_size\n seg_max = max_size\n seg_min = max(triton.cdiv(seg_max, 4), 4)\n # split reduction into segments\n div = sizes // seg_max\n rem = sizes % seg_max\n packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()\n width = packs.sum()\n segments = torch.empty(width, dtype=sizes.dtype)\n column = torch.empty_like(segments)\n lockid = torch.zeros_like(segments)\n maxid = torch.zeros_like(segments)\n nlocks = 0\n current = 0\n col_idx = 0\n for i in range(len(sizes)):\n d, r = div[i], rem[i]\n isempty = sizes[i] < seg_min\n last = current + d + (r >= seg_min) + isempty\n # column id\n column[current:last] = col_idx\n # lock id\n if d > 1 or (d == 1 and r >= seg_min):\n nlocks += 1\n lockid[current:last] = nlocks\n maxid[current:last] = last - current\n # segment size\n segments[current:current+d] = seg_max\n if r < seg_min and not isempty:\n segments[current+d-1] += r\n if r >= seg_min or isempty:\n segments[current+d] = r\n current = last\n col_idx += 1\n offsets = torch.zeros_like(segments)\n offsets[1:] = torch.cumsum(segments[:-1], dim=0)\n return segments, column, lockid, maxid, offsets\n \n @staticmethod\n def get_locks(size, dev):\n if dev not in _matmul.locks or \\\n size > _matmul.locks[dev].size(0):\n _matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)\n return _matmul.locks[dev]\n\n ##########################\n # SPARSE = DENSE x DENSE #\n ##########################\n\n @staticmethod\n def make_sdd_lut(layout, block, dtype, device):\n start_width = 128 // block\n superblocks = libtriton.superblock(layout.type(torch.int32), start_width)\n luts, widths, packs = [], [], []\n for size, nnz in superblocks:\n width = nnz.shape[0] // (size*size)\n h = nnz[:, 0]\n i = nnz[:, 1]\n j = nnz[:, 2]\n b = nnz[:, 3]\n lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous()\n luts.append(lut.type(torch.int32).to(device)) \n widths.append(width)\n packs.append(size)\n # create locks\n return luts, None, widths, packs\n\n @staticmethod\n def _sdd_matmul(a, b, trans_a, trans_b, trans_c,\n spdims, block, luts, num_locks, widths, packs):\n \n if trans_c:\n a, b = b, a\n trans_a, trans_b = not trans_b, not trans_a\n AS0 = a.size(0)\n AS1 = a.size(1)\n AS2 = a.size(3 if trans_a else 2)\n AS3 = a.size(2 if trans_a else 3)\n BS0 = b.size(0)\n BS1 = b.size(1)\n BS2 = b.size(3 if trans_b else 2)\n BS3 = b.size(2 if trans_b else 3)\n dtype = a.dtype\n device = a.device\n is_16_multiple = AS3 % 16 == 0\n is_32_multiple = AS3 % 32 == 0\n is_64_multiple = AS3 % 64 == 0\n if not is_16_multiple:\n raise ValueError('Reduction size for SDD must be a multiple of 16')\n # create kernel\n total_width = sum([width*pack*pack for width,pack in zip(widths, packs)])\n c = torch.empty((AS0, total_width, block, block), dtype=dtype, device=device)\n for lut, width, pack in zip(luts, widths, packs):\n num_lock = 1\n key = (block, device, a.dtype, b.dtype, trans_a, trans_b, trans_c, pack, is_32_multiple, is_64_multiple)\n if key not in _matmul.sdd_cache:\n defines = {'TM': block*pack, 'TN': block*pack,\n 'TMN': block*block*pack*pack, \n 'BLOCK': block, \n 'TK': 32, \n 'TYPE': dtype,\n 'STRIDE_AM': '1' if trans_a else 'lda', \n 'STRIDE_AK': 'lda' if trans_a else '1',\n 'STRIDE_BN': 'ldb' if trans_b else '1', \n 'STRIDE_BK': '1' if trans_b else 'ldb',\n 'STRIDE_CM': 'ldc', 'STRIDE_CN': '1',\n 'SDD': True, 'TZ': 1, 'NAME': 'sdd_kernel'}\n _matmul.sdd_cache[key] = triton.kernel(src, device=device, defines=defines)\n\n kernel = _matmul.sdd_cache[key]\n # create output\n locks = _matmul.get_locks(2*width*AS0*num_lock, a.device)\n # maximum grid size is 65535\n # so operation might be decomposed into multiple\n # kernel calls\n max_width = 49152\n for off_width in range(0, width, max_width):\n kernel(a.data_ptr(), b.data_ptr(), c.data_ptr(), \n a.stride(2), b.stride(2), block, \n a.stride(0), b.stride(0), c.stride(0),\n a.stride(1), b.stride(1), c.stride(0), \n AS2, AS2, AS3, off_width, lut.data_ptr(), locks.data_ptr(), num_lock, \n grid = lambda opt: [opt.TZ, min(max_width, width - off_width), AS0])\n # save for backward pass\n return c\n\n ##########################\n # DENSE = DENSE x SPARSE #\n # DENSE = SPARSE x DENSE #\n ##########################\n \n # Given a binary layout of 0s and 1s,\n # Construct look-up table for efficient execution on GPUs\n @staticmethod\n def make_dxx_lut(layout, block, step, trans, device, transform = lambda idx: idx):\n # load-balancing\n _empty = torch.tensor([], dtype=torch.int64, device=layout.device)\n segments = _empty.clone()\n column = _empty.clone()\n depth = _empty.clone()\n lockid = _empty.clone()\n maxid = _empty.clone()\n offsets = _empty.clone()\n current_offset = 0\n current_maxid = 0\n for z in range(layout.size(0)):\n if trans:\n sizes = torch.sum(layout[z, :, :], 1)\n else:\n sizes = torch.sum(layout[z, :, :], 0)\n z_segments, z_column, z_lockid, z_maxid, z_offsets = _matmul.load_balance(sizes, block)\n z_depth = z * torch.ones_like(z_segments)\n z_lockid[z_lockid > 0] += current_maxid\n current_maxid = z_lockid.max()\n # concatenate depth\n segments = torch.cat((segments, z_segments))\n column = torch.cat((column, z_column))\n depth = torch.cat((depth, z_depth))\n maxid = torch.cat((maxid, z_maxid))\n offsets = torch.cat((offsets, current_offset + z_offsets))\n lockid = torch.cat((lockid, z_lockid))\n current_offset += layout[z, :, :].sum()\n segments *= step\n # pointer increments\n if trans:\n nnz = layout.nonzero(as_tuple=False)\n else:\n nnz = layout.transpose(1, 2).nonzero(as_tuple=False)\n num_blocks = nnz.size(0)\n offsets = torch.min(offsets, (num_blocks - 1)*torch.ones_like(offsets))\n idx = transform(nnz[:, 2]*block)\n xincs = idx.clone() \n xincs[1:] -= idx[:-1]\n # divide block into multiple steps\n div = block // step\n xincs = xincs.view(-1, 1).repeat(1, div)\n xincs[:, 1:] = step\n xincs[:, 0 ] -= (div-1)*step\n # first increment for each reduction is actually the offset\n xincs[offsets[segments>0], 0] = idx[offsets[segments>0]]\n xincs = xincs.view(-1)\n # block-mode input increments\n if trans:\n widx = torch.arange(num_blocks)\n else:\n widx = _empty.clone()\n current_offset = 0\n for z in range(layout.size(0)):\n layoutw = layout[z, :, :].clone()\n msum = layoutw.sum()\n layoutw[layoutw > 0] = 1 + torch.arange(msum)\n widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))\n current_offset += msum\n widx = widx\n wincs = widx*block*block\n wincs[1:] -= widx[:-1]*block*block\n wincs = wincs.view(-1, 1).repeat(1, div)\n if trans:\n wincs[:, 1:] = step\n wincs[:, 0] -= (div-1)*step\n else:\n wincs[:, 1:] = step*block\n wincs[:, 0] -= (div - 1)*step*block\n wincs[offsets[segments>0], 0] = widx[offsets[segments>0]]\n wincs = wincs.view(-1)\n # adjust offset and segment size\n offsets *= 2*div\n segments *= div\n # create header\n width = column.size(0)\n offsets += 6*width\n header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()\n incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()\n incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))\n # create lut\n lut = torch.cat((header, incs))\n lut = lut.type(torch.int32).to(device)\n # create locks\n num_locks = max(1, lockid.max())\n return lut, num_locks, width, None\n\n @staticmethod\n def _dds_matmul(a, b, trans_a, trans_b, trans_c,\n spdims, block, lut, num_locks, width, packs):\n # shapes / dtypes\n AS0 = a.size(0)\n AS1 = a.size(1)\n AS2 = a.size(3 if trans_a else 2)\n AS3 = a.size(2 if trans_a else 3)\n BS0 = spdims[0]\n BS1 = block * spdims[2 if trans_b else 1]\n BS2 = block * spdims[1 if trans_b else 2]\n dtype = a.dtype\n # kernel\n key = (block, a.device, a.dtype, b.dtype, trans_a, trans_b, trans_c)\n if key not in _matmul.dds_cache:\n defines = {'TM': 128, \n 'TN': block, \n 'TK': 16, \n 'BLOCK': block,\n 'TYPE': dtype,\n 'STRIDE_AM': 1 if trans_a else 'lda',\n 'STRIDE_AK': 'lda' if trans_a else 1,\n 'STRIDE_BN': block if trans_b else 1, \n 'STRIDE_BK': 1 if trans_b else block,\n 'STRIDE_CM': '1' if trans_c else 'ldc',\n 'STRIDE_CN': 'ldc' if trans_c else '1',\n 'NAME': 'dds_kernel',\n 'DDS': True}\n _matmul.dds_cache[key] = triton.kernel(src, device=a.device, defines=defines)\n kernel = _matmul.dds_cache[key]\n # output\n CS0 = AS0\n CS1 = AS1\n CS2 = BS2 if trans_c else AS2\n CS3 = AS2 if trans_c else BS2\n locks = _matmul.get_locks(2*AS0*AS2//32*num_locks, a.device)\n c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)\n kernel(a.data_ptr(), b.data_ptr(), c.data_ptr(), \n a.stride(2), block, c.stride(2), \n a.stride(0), b.stride(0), c.stride(0),\n a.stride(1), b.stride(1), c.stride(1),\n AS2, BS2, 0, 0, lut.data_ptr(), locks.data_ptr(), num_locks, \n grid = lambda opt: [width, triton.cdiv(AS2, opt.TM), AS0])\n return c\n \n @staticmethod\n def _dsd_matmul(a, b, trans_a, trans_b, trans_c,\n spdims, block, lut, num_locks, width, packs):\n # shapes / dtypes\n AS0 = spdims[0]\n AS1 = block * spdims[2 if trans_a else 1]\n AS2 = block * spdims[1 if trans_a else 2]\n BS0 = b.size(0)\n BS1 = b.size(1)\n BS2 = b.size(3 if trans_b else 2)\n BS3 = b.size(2 if trans_b else 3)\n dtype = a.dtype\n # kernel\n key = (block, a.device, a.dtype, b.dtype, trans_a, trans_b, trans_c)\n if key not in _matmul.dsd_cache:\n defines = {'TM': block, \n 'TN': 128, \n 'TK': 16, \n 'BLOCK': block,\n 'TYPE': dtype,\n 'STRIDE_AM': 1 if trans_a else block, \n 'STRIDE_AK': block if trans_a else 1,\n 'STRIDE_BN': 'ldb' if trans_b else '1',\n 'STRIDE_BK': '1' if trans_b else 'ldb',\n 'STRIDE_CM': '1' if trans_c else 'ldc',\n 'STRIDE_CN': 'ldc' if trans_c else '1',\n 'NAME': 'dsd_kernel',\n 'DSD': True}\n _matmul.dsd_cache[key] = triton.kernel(src, device=a.device, defines=defines)\n kernel = _matmul.dsd_cache[key]\n # output\n CS0 = BS0\n CS1 = BS1\n CS2 = BS3 if trans_c else AS1\n CS3 = AS1 if trans_c else BS3\n locks = _matmul.get_locks(2*BS0*BS3//32*num_locks, a.device)\n c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)\n kernel(a.data_ptr(), b.data_ptr(), c.data_ptr(), \n block, b.stride(2), c.stride(2), \n a.stride(0), b.stride(0), c.stride(0),\n a.stride(1), b.stride(1), c.stride(1),\n BS3, AS1, 0, 0, lut.data_ptr(), locks.data_ptr(), num_locks, \n grid = lambda opt: [width, triton.cdiv(BS3, opt.TN), BS0])\n return c\n\n fn = {'sdd': _sdd_matmul.__get__(object),\n 'dsd': _dsd_matmul.__get__(object),\n 'dds': _dds_matmul.__get__(object)}\n\n @staticmethod\n def forward(ctx, a, b, trans_a, trans_b, trans_c,\n mode, spdims, block,\n c_lut, c_num_locks, c_width, c_packs, \n da_lut, da_num_locks, da_width, da_packs,\n db_lut, db_num_locks, db_width, db_packs):\n c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, \n c_lut, c_num_locks, c_width, c_packs)\n # save for backward\n ctx.save_for_backward(a, b)\n ctx.da_num_locks = da_num_locks\n ctx.da_lut = da_lut\n ctx.da_width = da_width\n ctx.da_packs = da_packs\n ctx.db_lut = db_lut\n ctx.db_num_locks = db_num_locks\n ctx.db_width = db_width\n ctx.db_packs = db_packs\n ctx.mode = mode\n ctx.spdims = spdims\n ctx.block = block\n ctx.trans_a = trans_a\n ctx.trans_b = trans_b\n return c\n\n @staticmethod\n def backward(ctx, dc):\n # saved for backward\n a, b = ctx.saved_tensors\n mode = ctx.mode\n # gradients w.r.t. a\n if ctx.needs_input_grad[0]:\n mode_da = mode[1] + mode[0] + mode[2]\n da = _matmul.fn[mode_da](dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block,\n ctx.da_lut, ctx.da_num_locks, ctx.da_width, ctx.da_packs)\n # gradients w.r.t. b\n if ctx.needs_input_grad[1]:\n mode_db = mode[2] + mode[1] + mode[0]\n db = _matmul.fn[mode_db](a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block,\n ctx.db_lut, ctx.db_num_locks, ctx.db_width, ctx.db_packs)\n return da, db, None, None, None,\\\n None, None, None, None,\\\n None, None, None, None, None, None,\\\n None, None, None, None, None, None,\\\n None, None, None, None, None, None\n\nclass matmul:\n \n def make_lut(self, dtype, device):\n key = (dtype, device)\n if key in self.lut_cache:\n return self.lut_cache[key]\n # C look-up table\n layout, block = self.layout, self.block\n step = 8 if dtype == torch.float32 else 16\n if self.mode == 'sdd':\n c_lut, c_num_locks, c_width, c_packs = _matmul.make_sdd_lut(layout, block, dtype, device)\n elif self.mode == 'dsd':\n c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_a, device)\n elif self.mode == 'dds':\n c_lut, c_num_locks, c_width, c_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_b, device)\n # DA look-up table\n if self.mode == 'sdd':\n da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, True, device)\n elif self.mode == 'dsd':\n da_lut, da_num_locks, da_width, da_packs = _matmul.make_sdd_lut(layout, block, dtype, device)\n elif self.mode == 'dds':\n da_lut, da_num_locks, da_width, da_packs = _matmul.make_dxx_lut(layout, block, step, not self.trans_b, device)\n # DB look-up table\n if self.mode == 'sdd':\n db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, False, device)\n elif self.mode == 'dsd':\n db_lut, db_num_locks, db_width, db_packs = _matmul.make_dxx_lut(layout, block, step, self.trans_a, device)\n elif self.mode == 'dds':\n db_lut, db_num_locks, db_width, db_packs = _matmul.make_sdd_lut(layout, block, dtype, device)\n self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\\\n da_lut, da_num_locks, da_width, da_packs,\\\n db_lut, db_num_locks, db_width, db_packs)\n return self.lut_cache[key]\n\n def __init__(self, layout, block, mode, trans_a = False, trans_b = False):\n if mode not in ['sdd', 'dsd', 'dds']:\n raise NotImplementedError('Supported modes are: sdd, dsd, dds')\n # look-up table cache\n self.lut_cache = dict()\n # attributes\n self.trans_a = trans_a\n self.trans_b = trans_b\n self.mode = mode\n self.spdims = layout.shape\n self.block = block\n self.layout = layout\n \n # pad shapes of a tensor to make it\n # compatible with kernel calls\n @staticmethod\n def _pad_shape(x, is_sparse):\n max_dim = 3 if is_sparse else 4\n for i in range(max_dim - x.dim()):\n x = x.unsqueeze(0)\n return x\n\n def __call__(self, a, b):\n c_lut, c_num_locks, c_width, c_packs,\\\n da_lut, da_num_locks, da_width, da_packs,\\\n db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)\n # pad shapes with ones\n a = matmul._pad_shape(a, self.mode == 'dsd')\n b = matmul._pad_shape(b, self.mode == 'dds')\n # execute\n c = _matmul.apply(a, b, self.trans_a, self.trans_b, False,\n self.mode, self.spdims, self.block,\n c_lut, c_num_locks, c_width, c_packs, \n da_lut, da_num_locks, da_width, da_packs,\n db_lut, db_num_locks, db_width, db_packs)\n return c"
] | [
[
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.arange",
"torch.empty_like",
"torch.sum",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like",
"torch.empty",
"torch.cumsum"
]
] |
changwoonchoi/nerf-pytorch | [
"ddf4f6224397a7326e2ae113df274f467087693b"
] | [
"src/dataset/dataset_clevr.py"
] | [
"from abc import ABC\n\nfrom torch.utils.data import Dataset\nimport os\nimport numpy as np\nimport json\nimport imageio\nimport torch\nfrom utils.label_utils import colored_mask_to_label_map_np\nfrom utils.math_utils import pose_spherical\n\nimport matplotlib.pyplot as plt\nfrom dataset.dataset_interface import NerfDataset\nfrom torchvision import transforms\nimport cv2\n\n\nclass ClevrDataset(NerfDataset):\n\tdef __init__(self, basedir, **kwargs):\n\t\tsuper().__init__(\"clevr\", **kwargs)\n\t\twith open(os.path.join(basedir, 'transforms_{}.json'.format(self.split)), 'r') as fp:\n\t\t\tself.meta = json.load(fp)\n\n\t\tself.instance_color_list = np.loadtxt(os.path.join(basedir, 'train/instance_label_render.txt'))\n\t\tself.instance_num = len(self.instance_color_list)\n\t\tself.basedir = basedir\n\n\t\tself.skip = kwargs.get(\"skip\", 1)\n\t\tif self.split == \"train\":\n\t\t\tself.skip = 1\n\n\t\tself.camera_angle_x = float(self.meta['camera_angle_x'])\n\n\t\timage0_path = os.path.join(self.basedir, self.split, os.path.split(self.meta['frames'][0]['file_path'])[1])\n\t\timage0 = imageio.imread(image0_path, pilmode='RGB')\n\t\tself.original_height, self.original_width, _ = image0.shape\n\n\t\tself.height = int(self.original_height * self.scale)\n\t\tself.width = int(self.original_width * self.scale)\n\t\tself.focal = .5 * self.width / np.tan(0.5 * self.camera_angle_x)\n\t\tself.load_near_far_plane(**kwargs)\n\n\tdef load_near_far_plane(self, **kwargs):\n\t\t\"\"\"\n\t\tLoad near and far plane\n\t\t:return:\n\t\t\"\"\"\n\t\t# need average from all data\n\t\tposes = []\n\t\tfor split in [\"train\", \"val\", \"test\"]:\n\t\t\twith open(os.path.join(self.basedir, 'transforms_{}.json'.format(split)), 'r') as fp:\n\t\t\t\tmeta = json.load(fp)\n\t\t\tfor frame in meta['frames']:\n\t\t\t\tpose = np.array(frame['transform_matrix'])\n\t\t\t\tposes.append(pose)\n\t\tposes = np.asarray(poses)\n\t\themi_R = np.mean(np.linalg.norm(poses[:, :3, -1], axis=-1))\n\t\tsample_length = kwargs.get(\"sample_length\", 8)\n\t\tnear = hemi_R - sample_length / 2\n\t\tfar = hemi_R + sample_length / 2\n\t\tself.near = near\n\t\tself.far = far\n\n\tdef __len__(self):\n\t\treturn len(self.meta['frames'][::self.skip])\n\n\tdef __getitem__(self, index):\n\t\t\"\"\"\n\t\tLoad single data corresponding to specific index\n\t\t:param index: data index\n\t\t\"\"\"\n\t\tframe = self.meta['frames'][::self.skip][index]\n\t\timage_file_path = os.path.join(self.basedir, self.split, os.path.split(frame['file_path'])[1])\n\t\tmask_file_path = os.path.join(os.path.split(image_file_path)[0], 'mask_' + os.path.split(image_file_path)[1])\n\n\t\t# (1) load RGB Image\n\t\timage = cv2.imread(image_file_path)\n\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\tif self.scale != 1:\n\t\t\timage = cv2.resize(image, None, fx=self.scale, fy=self.scale)\n\n\t\t# (2) load colored mask and convert into labeled mask\n\t\tinstance_label_mask = None\n\t\tif self.load_instance_label_mask:\n\t\t\tcolored_mask = cv2.imread(mask_file_path)\n\t\t\tcolored_mask = cv2.cvtColor(colored_mask, cv2.COLOR_BGR2RGB)\n\t\t\tif self.scale != 1:\n\t\t\t\tcolored_mask = cv2.resize(colored_mask, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST)\n\t\t\tinstance_label_mask = colored_mask_to_label_map_np(colored_mask, self.instance_color_list)\n\n\t\t# (3) load pose information\n\t\tpose = np.array(frame['transform_matrix']).astype(np.float32)\n\n\t\timage = image.astype(np.float32)\n\t\timage /= 255.0\n\n\t\tsample = {}\n\t\tsample[\"image\"] = image\n\t\tif self.load_instance_label_mask:\n\t\t\tsample[\"mask\"] = instance_label_mask\n\t\tsample[\"pose\"] = pose\n\t\treturn sample\n\n\tdef get_test_render_poses(self):\n\t\treturn torch.stack([pose_spherical(angle, -30.0, 11.0) for angle in np.linspace(-180, 180, 40 + 1)[:-1]], 0)\n"
] | [
[
"numpy.array",
"numpy.linalg.norm",
"numpy.asarray",
"numpy.tan",
"numpy.linspace"
]
] |
Cherry-pashka/SignClass | [
"13b0b7913b7f46788ecbbc59f5295ccb33ffbccc"
] | [
"pipeline/models.py"
] | [
"from typing import Optional\n\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\n\nfrom constants import *\n\n\ndef get_resnet_152(device: str = DEVICE,\n ckpt_path: Optional[str] = None\n ) -> nn.Module:\n \"\"\"Returns the pretrained model resnet152 and if checkpoint is specified load it\"\"\"\n model = models.resnet152(True)\n model.fc = nn.Sequential(nn.Linear(2048, 131))\n model = model.to(device)\n if ckpt_path:\n try:\n checkpoint = torch.load(ckpt_path, map_location=device)\n model.load_state_dict(checkpoint)\n except:\n print(\"Wrong checkpoint\")\n return model\n\n\ndef get_densenet_121(device: str = DEVICE,\n ckpt_path: Optional[str] = None\n ) -> nn.Module:\n \"\"\"Returns the pretrained model densenet152 and if checkpoint is specified load it\"\"\"\n model = models.densenet121(True)\n model.classifier = nn.Sequential(nn.Linear(1024, 131))\n model = model.to(device)\n if ckpt_path:\n try:\n checkpoint = torch.load(ckpt_path, map_location=device)\n model.load_state_dict(checkpoint)\n except:\n print(\"Wrong checkpoint\")\n return model\n\n\ndef get_vgg_19(device: str = DEVICE,\n ckpt_path: Optional[str] = None\n ) -> nn.Module:\n \"\"\"Returns the pretrained model vgg19 and if checkpoint is specified load it\"\"\"\n model = models.vgg19(True)\n model.classifier = nn.Sequential(nn.Linear(in_features=25088, out_features=4096, bias=True),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5, inplace=False),\n nn.Linear(in_features=4096, out_features=4096, bias=True),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5, inplace=False),\n nn.Linear(in_features=4096, out_features=131, bias=True)\n\n )\n model = model.to(device)\n if ckpt_path:\n try:\n checkpoint = torch.load(ckpt_path, map_location=device)\n model.load_state_dict(checkpoint)\n except:\n print(\"Wrong checkpoint\")\n return model\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.load",
"torch.nn.ReLU"
]
] |
tsigalko18/transferability-testing-sdcs | [
"1294466e6cc0bb251a912a68ea88a8468611a12d"
] | [
"manage.py"
] | [
"#!/usr/bin/env python3\r\n\"\"\"\r\nScripts to drive a donkey 2 car\r\n\r\nUsage:\r\n manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer|latent)] [--useadversarial] [--advimage=<file>] [--camera=(single|stereo)] [--meta=<key:value> ...] [--myconfig=<filename>] [--corruption=<corruption>] [--severity=<severity>] [--delay=<delay>]\r\n manage.py (train) [--tub=<tub1,tub2,..tubn>] [--file=<file> ...] (--model=<model>) [--transfer=<model>] [--type=(linear|categorical|rnn|imu|behavior|3d|localizer)] [--continuous] [--aug] [--myconfig=<filename>]\r\n\r\n\r\nOptions:\r\n -h --help Show this screen.\r\n --js Use physical joystick.\r\n -f --file=<file> A text file containing paths to tub320x240_train files, one per line. Option may be used more than once.\r\n --meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.\r\n --myconfig=filename Specify myconfig file to use. \r\n [default: myconfig.py]\r\n\"\"\"\r\n\r\nimport pickle\r\nimport warnings\r\n\r\nfrom visualodometry.xte_predictor.testing.get_corrupted_images import corrupt\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\nimport shutil\r\n\r\nfrom docopt import docopt\r\n\r\nimport donkeycar as dk\r\n\r\n# import parts\r\nfrom donkeycar.parts.transform import TriggeredCallback, DelayedTrigger\r\nfrom donkeycar.parts.datastore import TubHandler\r\nfrom donkeycar.parts.controller import LocalWebController, \\\r\n JoystickController, WebFpv\r\nfrom donkeycar.parts.throttle_filter import ThrottleFilter\r\nfrom donkeycar.parts.behavior import BehaviorPart\r\nfrom donkeycar.parts.file_watcher import FileWatcher\r\nfrom donkeycar.parts.launch import AiLaunch\r\nfrom visualodometry.utils import *\r\n\r\n# TODO: Fix\r\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\n\r\n\r\ndef drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]):\r\n '''\r\n Construct a working robotic vehicle from many parts.\r\n Each part runs as a job in the Vehicle loop, calling either\r\n it's run or run_threaded method depending on the constructor flag `threaded`.\r\n All parts are updated one after another at the framerate given in\r\n cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.\r\n Parts may have named outputs and inputs. The framework handles passing named outputs\r\n to parts requesting the same named input.\r\n '''\r\n\r\n if cfg.DONKEY_GYM:\r\n # the simulator will use cuda and then we usually run out of resources\r\n # if we also try to use cuda. so disable for donkey_gym.\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\r\n\r\n if model_type is None:\r\n if cfg.TRAIN_LOCALIZER:\r\n model_type = \"localizer\"\r\n elif cfg.TRAIN_BEHAVIORS:\r\n model_type = \"behavior\"\r\n else:\r\n model_type = cfg.DEFAULT_MODEL_TYPE\r\n\r\n # Initialize car\r\n V = dk.vehicle.Vehicle()\r\n\r\n print(\"cfg.CAMERA_TYPE\", cfg.CAMERA_TYPE)\r\n if camera_type == \"stereo\":\r\n\r\n if cfg.CAMERA_TYPE == \"WEBCAM\":\r\n from donkeycar.parts.camera import Webcam\r\n\r\n camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0)\r\n camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1)\r\n\r\n elif cfg.CAMERA_TYPE == \"CVCAM\":\r\n from donkeycar.parts.cv import CvCam\r\n\r\n camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0)\r\n camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1)\r\n else:\r\n raise (Exception(\"Unsupported camera type: %s\" % cfg.CAMERA_TYPE))\r\n\r\n V.add(camA, outputs=['cam/image_array_a'], threaded=True)\r\n V.add(camB, outputs=['cam/image_array_b'], threaded=True)\r\n\r\n from donkeycar.parts.image import StereoPair\r\n\r\n V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],\r\n outputs=['cam/image_array'])\r\n elif cfg.CAMERA_TYPE == \"D435\":\r\n from donkeycar.parts.realsense435i import RealSense435i\r\n cam = RealSense435i(\r\n enable_rgb=cfg.REALSENSE_D435_RGB,\r\n enable_depth=cfg.REALSENSE_D435_DEPTH,\r\n enable_imu=cfg.REALSENSE_D435_IMU,\r\n device_id=cfg.REALSENSE_D435_ID)\r\n V.add(cam, inputs=[],\r\n outputs=['cam/image_array', 'cam/depth_array',\r\n 'imu/acl_x', 'imu/acl_y', 'imu/acl_z',\r\n 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],\r\n threaded=True)\r\n\r\n else:\r\n if cfg.DONKEY_GYM:\r\n from donkeycar.parts.dgym import DonkeyGymEnv\r\n\r\n inputs = []\r\n threaded = True\r\n if cfg.DONKEY_GYM:\r\n from donkeycar.parts.dgym import DonkeyGymEnv\r\n cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME,\r\n conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)\r\n threaded = True\r\n inputs = ['angle', 'throttle']\r\n elif cfg.CAMERA_TYPE == \"PICAM\":\r\n from donkeycar.parts.camera import PiCamera\r\n cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,\r\n framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)\r\n elif cfg.CAMERA_TYPE == \"WEBCAM\":\r\n from donkeycar.parts.camera import Webcam\r\n cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)\r\n elif cfg.CAMERA_TYPE == \"CVCAM\":\r\n from donkeycar.parts.cv import CvCam\r\n cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)\r\n elif cfg.CAMERA_TYPE == \"CSIC\":\r\n from donkeycar.parts.camera import CSICamera\r\n cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,\r\n framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)\r\n elif cfg.CAMERA_TYPE == \"V4L\":\r\n from donkeycar.parts.camera import V4LCamera\r\n cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,\r\n framerate=cfg.CAMERA_FRAMERATE)\r\n elif cfg.CAMERA_TYPE == \"MOCK\":\r\n from donkeycar.parts.camera import MockCamera\r\n cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)\r\n elif cfg.CAMERA_TYPE == \"IMAGE_LIST\":\r\n from donkeycar.parts.camera import ImageListCamera\r\n cam = ImageListCamera(path_mask=cfg.PATH_MASK)\r\n else:\r\n raise (Exception(\"Unknown camera type: %s\" % cfg.CAMERA_TYPE))\r\n\r\n V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)\r\n\r\n if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:\r\n # modify max_throttle closer to 1.0 to have more power\r\n # modify steering_scale lower than 1.0 to have less responsive steering\r\n if cfg.CONTROLLER_TYPE == \"MM1\":\r\n from donkeycar.parts.robohat import RoboHATController\r\n ctr = RoboHATController(cfg)\r\n elif \"custom\" == cfg.CONTROLLER_TYPE:\r\n #\r\n # custom controller created with `donkey createjs` command\r\n #\r\n from my_joystick import MyJoystickController\r\n ctr = MyJoystickController(\r\n throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,\r\n throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,\r\n steering_scale=cfg.JOYSTICK_STEERING_SCALE,\r\n auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)\r\n ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)\r\n else:\r\n from donkeycar.parts.controller import get_js_controller\r\n\r\n ctr = get_js_controller(cfg)\r\n\r\n if cfg.USE_NETWORKED_JS:\r\n from donkeycar.parts.controller import JoyStickSub\r\n netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)\r\n V.add(netwkJs, threaded=True)\r\n ctr.js = netwkJs\r\n\r\n V.add(ctr,\r\n inputs=['cam/image_array'],\r\n outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],\r\n threaded=True)\r\n\r\n else:\r\n # This web controller will create a web server that is capable\r\n # of managing steering, throttle, and modes, and more.\r\n ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)\r\n\r\n V.add(ctr,\r\n inputs=['cam/image_array', 'tub320x240_train/num_records'],\r\n outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],\r\n threaded=True)\r\n\r\n # this throttle filter will allow one tap back for esc reverse\r\n th_filter = ThrottleFilter()\r\n V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])\r\n\r\n # See if we should even run the pilot module.\r\n # This is only needed because the part run_condition only accepts boolean\r\n class PilotCondition:\r\n def run(self, mode):\r\n if mode == 'user':\r\n return False\r\n else:\r\n return True\r\n\r\n V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])\r\n\r\n class LedConditionLogic:\r\n def __init__(self, cfg):\r\n self.cfg = cfg\r\n\r\n def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):\r\n # returns a blink rate. 0 for off. -1 for on. positive for rate.\r\n\r\n if track_loc is not None:\r\n led.set_rgb(*self.cfg.LOC_COLORS[track_loc])\r\n return -1\r\n\r\n if model_file_changed:\r\n led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)\r\n return 0.1\r\n else:\r\n led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)\r\n\r\n if recording_alert:\r\n led.set_rgb(*recording_alert)\r\n return self.cfg.REC_COUNT_ALERT_BLINK_RATE\r\n else:\r\n led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)\r\n\r\n if behavior_state is not None and model_type == 'behavior':\r\n r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]\r\n led.set_rgb(r, g, b)\r\n return -1 # solid on\r\n\r\n if recording:\r\n return -1 # solid on\r\n elif mode == 'user':\r\n return 1\r\n elif mode == 'local_angle':\r\n return 0.5\r\n elif mode == 'local':\r\n return 0.1\r\n return 0\r\n\r\n if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:\r\n from donkeycar.parts.led_status import RGB_LED\r\n led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)\r\n led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)\r\n\r\n V.add(LedConditionLogic(cfg),\r\n inputs=['user/mode', 'recording', \"records/alert\", 'behavior/state', 'modelfile/modified', \"pilot/loc\"],\r\n outputs=['led/blink_rate'])\r\n\r\n V.add(led, inputs=['led/blink_rate'])\r\n\r\n def get_record_alert_color(num_records):\r\n col = (0, 0, 0)\r\n for count, color in cfg.RECORD_ALERT_COLOR_ARR:\r\n if num_records >= count:\r\n col = color\r\n return col\r\n\r\n class RecordTracker:\r\n def __init__(self):\r\n self.last_num_rec_print = 0\r\n self.dur_alert = 0\r\n self.force_alert = 0\r\n\r\n def run(self, num_records):\r\n if num_records is None:\r\n return 0\r\n\r\n if self.last_num_rec_print != num_records or self.force_alert:\r\n self.last_num_rec_print = num_records\r\n\r\n if num_records % 10 == 0:\r\n print(\"recorded\", num_records, \"records\")\r\n\r\n if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:\r\n self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC\r\n self.force_alert = 0\r\n\r\n if self.dur_alert > 0:\r\n self.dur_alert -= 1\r\n\r\n if self.dur_alert != 0:\r\n return get_record_alert_color(num_records)\r\n\r\n return 0\r\n\r\n rec_tracker_part = RecordTracker()\r\n V.add(rec_tracker_part, inputs=[\"tub320x240_train/num_records\"], outputs=['records/alert'])\r\n\r\n if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):\r\n # then we are not using the circle button. hijack that to force a record count indication\r\n def show_record_acount_status():\r\n rec_tracker_part.last_num_rec_print = 0\r\n rec_tracker_part.force_alert = 1\r\n\r\n ctr.set_button_down_trigger('circle', show_record_acount_status)\r\n\r\n # Sombrero\r\n if cfg.HAVE_SOMBRERO:\r\n from donkeycar.parts.sombrero import Sombrero\r\n s = Sombrero()\r\n\r\n # IMU\r\n if cfg.HAVE_IMU:\r\n from donkeycar.parts.imu import IMU\r\n imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)\r\n V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',\r\n 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)\r\n\r\n class ImgPreProcess():\r\n '''\r\n preprocess camera image for inference.\r\n normalize and crop if needed.\r\n '''\r\n\r\n def clean_temp_dir(self):\r\n shutil.rmtree('data/temp')\r\n os.mkdir('data/temp')\r\n\r\n def __init__(self, cfg):\r\n self.cfg = cfg\r\n self.start_time = time.perf_counter()\r\n self.counter = 1\r\n self.clean_temp_dir()\r\n if not self.cfg.USE_ADVERSARIAL_DRIVING:\r\n self.adv_img = None\r\n else:\r\n self.adv_img = np.array(pickle.load(open(self.cfg.ADV_IMAGE, \"rb\"))[0])\r\n\r\n def run(self, img_arr):\r\n if self.cfg.USE_CORRUPTED_INPUTS:\r\n\r\n if time.perf_counter() - self.start_time > self.cfg.CORRUPTION_DELAY:\r\n img = Image.fromarray(np.uint8(img_arr)).convert('RGB')\r\n img = img.resize((224, 224))\r\n\r\n img = corrupt(img, corruption=self.cfg.CORRUPTION, severity=self.cfg.SEVERITY)\r\n img = img.resize((self.cfg.IMAGE_W, self.cfg.IMAGE_H))\r\n img_arr = np.array(img)\r\n\r\n # im = Image.fromarray(img_arr)\r\n # im.save(\"data/temp/\" + str(self.counter) + \"_cam-image_array_.jpg\")\r\n self.counter += 1\r\n\r\n img_arr = normalize_and_crop(img_arr, self.cfg)\r\n\r\n if self.cfg.USE_ADVERSARIAL_DRIVING:\r\n if time.perf_counter() - self.start_time > self.cfg.CORRUPTION_DELAY:\r\n img_arr = img_arr + self.adv_img * self.cfg.SEVERITY\r\n\r\n im = Image.fromarray((img_arr * 255).astype(np.uint8))\r\n im.save(\"data/temp/\" + str(self.counter) + \"_cam-image_array_.jpg\")\r\n self.counter += 1\r\n\r\n # print('adv attack is active')\r\n\r\n return img_arr\r\n\r\n if \"coral\" in model_type:\r\n inf_input = 'cam/image_array'\r\n else:\r\n inf_input = 'cam/normalized/cropped'\r\n V.add(ImgPreProcess(cfg),\r\n inputs=['cam/image_array'],\r\n outputs=[inf_input],\r\n run_condition='run_pilot')\r\n\r\n # Use the FPV preview, which will show the cropped image output, or the full frame.\r\n if cfg.USE_FPV:\r\n V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)\r\n\r\n # Behavioral state\r\n if cfg.TRAIN_BEHAVIORS:\r\n bh = BehaviorPart(cfg.BEHAVIOR_LIST)\r\n V.add(bh, outputs=['behavior/state', 'behavior/label', \"behavior/one_hot_state_array\"])\r\n try:\r\n ctr.set_button_down_trigger('L1', bh.increment_state)\r\n except:\r\n pass\r\n\r\n inputs = [inf_input, \"behavior/one_hot_state_array\"]\r\n # IMU\r\n elif model_type == \"imu\":\r\n assert (cfg.HAVE_IMU)\r\n # Run the pilot if the mode is not user.\r\n inputs = [inf_input,\r\n 'imu/acl_x', 'imu/acl_y', 'imu/acl_z',\r\n 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']\r\n else:\r\n inputs = [inf_input]\r\n\r\n def load_model(kl, model_path):\r\n start = time.time()\r\n print('loading model', model_path)\r\n kl.load(model_path)\r\n print('finished loading in %s sec.' % (str(time.time() - start)))\r\n\r\n def load_weights(kl, weights_path):\r\n start = time.time()\r\n try:\r\n print('loading model weights', weights_path)\r\n kl.get_vgg.load_weights(weights_path)\r\n print('finished loading in %s sec.' % (str(time.time() - start)))\r\n except Exception as e:\r\n print(e)\r\n print('ERR>> problems loading weights', weights_path)\r\n\r\n def load_model_json(kl, json_fnm):\r\n start = time.time()\r\n print('loading model json', json_fnm)\r\n from tensorflow.python import keras\r\n try:\r\n with open(json_fnm, 'r') as handle:\r\n contents = handle.read()\r\n kl.get_vgg = keras.models.model_from_json(contents)\r\n print('finished loading json in %s sec.' % (str(time.time() - start)))\r\n except Exception as e:\r\n print(e)\r\n print(\"ERR>> problems loading model json\", json_fnm)\r\n\r\n if model_path:\r\n # When we have a model, first create an appropriate Keras part\r\n # kl = dk.utils.get_model_by_type(model_type, cfg)\r\n kl = get_model_by_type(model_type, cfg)\r\n\r\n model_reload_cb = None\r\n\r\n if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:\r\n # when we have a .h5 extension\r\n # load everything from the model file\r\n load_model(kl, model_path)\r\n\r\n def reload_model(filename):\r\n load_model(kl, filename)\r\n\r\n model_reload_cb = reload_model\r\n\r\n elif '.json' in model_path:\r\n # when we have a .json extension\r\n # load the model from there and look for a matching\r\n # .wts file with just weights\r\n load_model_json(kl, model_path)\r\n weights_path = model_path.replace('.json', '.weights')\r\n load_weights(kl, weights_path)\r\n\r\n def reload_weights(filename):\r\n weights_path = filename.replace('.json', '.weights')\r\n load_weights(kl, weights_path)\r\n\r\n model_reload_cb = reload_weights\r\n\r\n else:\r\n print(\"ERR>> Unknown extension type on model file!!\")\r\n return\r\n\r\n # this part will signal visual LED, if connected\r\n V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])\r\n\r\n # these parts will reload the model file, but only when ai is running so we don't interrupt user driving\r\n V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition=\"ai_running\")\r\n V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition=\"ai_running\")\r\n V.add(TriggeredCallback(model_path, model_reload_cb), inputs=[\"modelfile/reload\"], run_condition=\"ai_running\")\r\n\r\n outputs = ['pilot/angle', 'pilot/throttle']\r\n\r\n if cfg.TRAIN_LOCALIZER:\r\n outputs.append(\"pilot/loc\")\r\n\r\n V.add(kl, inputs=inputs,\r\n outputs=outputs,\r\n run_condition='run_pilot')\r\n\r\n if cfg.STOP_SIGN_DETECTOR:\r\n from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector\r\n V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX),\r\n inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])\r\n\r\n # Choose what inputs should change the car.\r\n class DriveMode:\r\n def run(self, mode,\r\n user_angle, user_throttle,\r\n pilot_angle, pilot_throttle):\r\n if mode == 'user':\r\n return user_angle, user_throttle\r\n\r\n elif mode == 'local_angle':\r\n return pilot_angle if pilot_angle else 0.0, user_throttle\r\n\r\n else:\r\n return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0\r\n\r\n V.add(DriveMode(),\r\n inputs=['user/mode', 'user/angle', 'user/throttle',\r\n 'pilot/angle', 'pilot/throttle'],\r\n outputs=['angle', 'throttle'])\r\n\r\n # to give the car a boost when starting ai mode in a race.\r\n aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)\r\n\r\n V.add(aiLauncher,\r\n inputs=['user/mode', 'throttle'],\r\n outputs=['throttle'])\r\n\r\n if isinstance(ctr, JoystickController):\r\n ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)\r\n\r\n class AiRunCondition:\r\n '''\r\n A bool part to let us know when ai is running.\r\n '''\r\n\r\n def run(self, mode):\r\n if mode == \"user\":\r\n return False\r\n return True\r\n\r\n V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])\r\n\r\n # Ai Recording\r\n class AiRecordingCondition:\r\n '''\r\n return True when ai mode, otherwise respect user mode recording flag\r\n '''\r\n\r\n def run(self, mode, recording):\r\n if mode == 'user':\r\n return recording\r\n return True\r\n\r\n if cfg.RECORD_DURING_AI:\r\n V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])\r\n\r\n # Drive train setup\r\n if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == \"MOCK\":\r\n pass\r\n elif cfg.DRIVE_TRAIN_TYPE == \"SERVO_ESC\":\r\n from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle\r\n\r\n steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)\r\n steering = PWMSteering(controller=steering_controller,\r\n left_pulse=cfg.STEERING_LEFT_PWM,\r\n right_pulse=cfg.STEERING_RIGHT_PWM)\r\n\r\n throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)\r\n throttle = PWMThrottle(controller=throttle_controller,\r\n max_pulse=cfg.THROTTLE_FORWARD_PWM,\r\n zero_pulse=cfg.THROTTLE_STOPPED_PWM,\r\n min_pulse=cfg.THROTTLE_REVERSE_PWM)\r\n\r\n V.add(steering, inputs=['angle'], threaded=True)\r\n V.add(throttle, inputs=['throttle'], threaded=True)\r\n\r\n\r\n elif cfg.DRIVE_TRAIN_TYPE == \"DC_STEER_THROTTLE\":\r\n from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM\r\n\r\n steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)\r\n throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)\r\n\r\n V.add(steering, inputs=['angle'])\r\n V.add(throttle, inputs=['throttle'])\r\n\r\n\r\n elif cfg.DRIVE_TRAIN_TYPE == \"DC_TWO_WHEEL\":\r\n from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM\r\n\r\n left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)\r\n right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)\r\n two_wheel_control = TwoWheelSteeringThrottle()\r\n\r\n V.add(two_wheel_control,\r\n inputs=['throttle', 'angle'],\r\n outputs=['left_motor_speed', 'right_motor_speed'])\r\n\r\n V.add(left_motor, inputs=['left_motor_speed'])\r\n V.add(right_motor, inputs=['right_motor_speed'])\r\n\r\n elif cfg.DRIVE_TRAIN_TYPE == \"SERVO_HBRIDGE_PWM\":\r\n from donkeycar.parts.actuator import ServoBlaster, PWMSteering\r\n steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) # really pin\r\n # PWM pulse values should be in the range of 100 to 200\r\n assert (cfg.STEERING_LEFT_PWM <= 200)\r\n assert (cfg.STEERING_RIGHT_PWM <= 200)\r\n steering = PWMSteering(controller=steering_controller,\r\n left_pulse=cfg.STEERING_LEFT_PWM,\r\n right_pulse=cfg.STEERING_RIGHT_PWM)\r\n\r\n from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM\r\n motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)\r\n\r\n V.add(steering, inputs=['angle'], threaded=True)\r\n V.add(motor, inputs=[\"throttle\"])\r\n\r\n elif cfg.DRIVE_TRAIN_TYPE == \"MM1\":\r\n from donkeycar.parts.robohat import RoboHATDriver\r\n V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])\r\n\r\n elif cfg.DRIVE_TRAIN_TYPE == \"PIGPIO_PWM\":\r\n from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM\r\n steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ,\r\n inverted=cfg.STEERING_PWM_INVERTED)\r\n steering = PWMSteering(controller=steering_controller,\r\n left_pulse=cfg.STEERING_LEFT_PWM,\r\n right_pulse=cfg.STEERING_RIGHT_PWM)\r\n\r\n throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ,\r\n inverted=cfg.THROTTLE_PWM_INVERTED)\r\n throttle = PWMThrottle(controller=throttle_controller,\r\n max_pulse=cfg.THROTTLE_FORWARD_PWM,\r\n zero_pulse=cfg.THROTTLE_STOPPED_PWM,\r\n min_pulse=cfg.THROTTLE_REVERSE_PWM)\r\n V.add(steering, inputs=['angle'], threaded=True)\r\n V.add(throttle, inputs=['throttle'], threaded=True)\r\n\r\n # OLED setup\r\n if cfg.USE_SSD1306_128_32:\r\n from donkeycar.parts.oled import OLEDPart\r\n auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE\r\n oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)\r\n V.add(oled_part, inputs=['recording', 'tub320x240_train/num_records', 'user/mode'], outputs=[], threaded=True)\r\n\r\n # add tub320x240_train to save data\r\n\r\n inputs = ['cam/image_array',\r\n 'user/angle', 'user/throttle',\r\n 'user/mode']\r\n\r\n types = ['image_array',\r\n 'float', 'float',\r\n 'str']\r\n\r\n if cfg.TRAIN_BEHAVIORS:\r\n inputs += ['behavior/state', 'behavior/label', \"behavior/one_hot_state_array\"]\r\n types += ['int', 'str', 'vector']\r\n\r\n if cfg.CAMERA_TYPE == \"D435\" and cfg.REALSENSE_D435_DEPTH:\r\n inputs += ['cam/depth_array']\r\n types += ['gray16_array']\r\n\r\n if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == \"D435\" and cfg.REALSENSE_D435_IMU):\r\n inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',\r\n 'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']\r\n\r\n types += ['float', 'float', 'float',\r\n 'float', 'float', 'float']\r\n\r\n if cfg.RECORD_DURING_AI:\r\n inputs += ['pilot/angle', 'pilot/throttle']\r\n types += ['float', 'float']\r\n\r\n th = TubHandler(path=cfg.DATA_PATH)\r\n tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)\r\n V.add(tub, inputs=inputs, outputs=[\"tub320x240_train/num_records\"], run_condition='recording')\r\n\r\n if cfg.PUB_CAMERA_IMAGES:\r\n from donkeycar.parts.network import TCPServeValue\r\n from donkeycar.parts.image import ImgArrToJpg\r\n pub = TCPServeValue(\"camera\")\r\n V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])\r\n V.add(pub, inputs=['jpg/bin'])\r\n\r\n if type(ctr) is LocalWebController:\r\n if cfg.DONKEY_GYM:\r\n print(\"You can now go to http://localhost:%d to drive your car.\" % cfg.WEB_CONTROL_PORT)\r\n else:\r\n print(\"You can now go to <your hostname.local>:%d to drive your car.\" % cfg.WEB_CONTROL_PORT)\r\n elif isinstance(ctr, JoystickController):\r\n print(\"You can now move your joystick to drive your car.\")\r\n # tell the controller about the tub320x240_train\r\n ctr.set_tub(tub)\r\n\r\n if cfg.BUTTON_PRESS_NEW_TUB:\r\n def new_tub_dir():\r\n V.parts.pop()\r\n tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)\r\n V.add(tub, inputs=inputs, outputs=[\"tub320x240_train/num_records\"], run_condition='recording')\r\n ctr.set_tub(tub)\r\n\r\n ctr.set_button_down_trigger('cross', new_tub_dir)\r\n ctr.print_controls()\r\n\r\n # run the vehicle for 20 seconds\r\n V.start(rate_hz=cfg.DRIVE_LOOP_HZ,\r\n max_loop_count=cfg.MAX_LOOPS)\r\n\r\n\r\nif __name__ == '__main__':\r\n args = docopt(__doc__)\r\n cfg = dk.load_config(myconfig=args['--myconfig'])\r\n\r\n if args['drive']:\r\n model_type = args['--type']\r\n camera_type = args['--camera']\r\n\r\n # experimental\r\n if args['--corruption'] is not None and args['--corruption'] is True:\r\n cfg.USE_CORRUPTIONS = True\r\n cfg.CORRUPTION = args['--corruption']\r\n cfg.SEVERITY = int(args['--severity'])\r\n\r\n if args['--useadversarial'] is not None and args['--useadversarial'] is True:\r\n cfg.USE_ADVERSARIAL_DRIVING = True\r\n cfg.ADV_IMAGE = args['--advimage']\r\n cfg.SEVERITY = int(args['--severity'])\r\n\r\n drive(cfg, model_path=args['--model'], use_joystick=args['--js'],\r\n model_type=model_type, camera_type=camera_type,\r\n meta=args['--meta'])\r\n\r\n if args['train']:\r\n from train import multi_train, preprocessFileList\r\n\r\n tub = args['--tub']\r\n model = args['--model']\r\n transfer = args['--transfer']\r\n model_type = args['--type']\r\n continuous = args['--continuous']\r\n aug = args['--aug']\r\n dirs = preprocessFileList(args['--file'])\r\n\r\n if tub is not None:\r\n tub_paths = [os.path.expanduser(n) for n in tub.split(',')]\r\n dirs.extend(tub_paths)\r\n\r\n if model_type is None:\r\n model_type = cfg.DEFAULT_MODEL_TYPE\r\n print(\"using default model type of\", model_type)\r\n\r\n multi_train(cfg, dirs, model, transfer, model_type, continuous, aug)\r\n"
] | [
[
"tensorflow.python.keras.models.model_from_json"
]
] |
tanguy-magne/ritm_interactive_segmentation | [
"53e55253a1ff879f370525bdbddf026402a19ff9"
] | [
"inria-aerial-image-labeling/model.py"
] | [
"\"\"\"\nINCLUDE ONLY, DO NOT EXECUTE\n\"\"\"\nfrom settings import *\nimport numpy as np\nimport tensorflow as tf\nimport segmentation_models as sm\n\n\ndef create_model(border=False, trainable_encoder=False):\n if model_type == 'unet':\n model = sm.Unet(backbone_name=backbone,\n input_shape=(image_size, image_size, 3),\n classes=2 if border else 1,\n activation='sigmoid',\n encoder_weights='imagenet',\n encoder_freeze=not trainable_encoder,\n encoder_features='default',\n decoder_block_type='upsampling',\n decoder_filters=(256, 128, 64, 32, 16),\n decoder_use_batchnorm=True)\n elif model_type == 'fpn':\n model = sm.FPN(backbone_name=backbone,\n input_shape=(image_size, image_size, 3),\n classes=2 if border else 1,\n activation='sigmoid',\n encoder_weights='imagenet',\n encoder_freeze=not trainable_encoder,\n encoder_features='default',\n pyramid_block_filters=256,\n pyramid_use_batchnorm=True,\n pyramid_aggregation='concat',\n pyramid_dropout=None)\n elif model_type == 'linknet':\n model = sm.Linknet(backbone_name=backbone,\n input_shape=(image_size, image_size, 3),\n classes=2 if border else 1,\n activation='sigmoid',\n encoder_weights='imagenet',\n encoder_freeze=not trainable_encoder,\n encoder_features='default',\n decoder_block_type='upsampling',\n decoder_filters=(None, None, None, None, 16),\n decoder_use_batchnorm=True)\n elif model_type == 'pspnet':\n model = sm.PSPNet(backbone_name=backbone,\n input_shape=(image_size, image_size, 3),\n classes=2 if border else 1,\n activation='sigmoid',\n encoder_weights='imagenet',\n encoder_freeze=not trainable_encoder,\n downsample_factor=8,\n psp_conv_filters=512,\n psp_pooling_type='avg',\n psp_use_batchnorm=True,\n psp_dropout=None)\n else:\n print('Invalid segmentation model type')\n exit(0)\n return model\n\n\npreprocessing = sm.get_preprocessing(backbone)\niou = sm.metrics.IOUScore(per_image=False)\n\n\ndef bce_dice_loss(y_true, y_pred):\n bce = tf.keras.losses.binary_crossentropy(y_true, y_pred)\n y_true_ = y_true[..., 0] # !!! only first channel\n y_pred_ = y_pred[..., 0] # !!! only first channel\n dice = sm.losses.dice_loss(y_true_, y_pred_)\n return bce + dice\n\n\ndef iou_fc(y_true, y_pred):\n y_true_ = y_true[..., 0] # !!! only first channel\n y_pred_ = y_pred[..., 0] # !!! only first channel\n return iou(y_true_, y_pred_)\n\n\ndef acc_fc(y_true, y_pred):\n y_true_ = y_true[..., 0] # !!! only first channel\n y_pred_ = y_pred[..., 0] # !!! only first channel\n return tf.keras.metrics.binary_accuracy(y_true_, y_pred_)\n\n\ndef acc_iou_fc(y_true, y_pred):\n return (acc_fc(y_true, y_pred) + iou_fc(y_true, y_pred)) / 2\n\n\ndef acc_img(gt, pred):\n gt_ = np.clip(gt, 0, 1)\n pred_ = np.clip(pred, 0, 1)\n return np.mean(np.equal(gt_, pred_))\n\n\ndef iou_img(gt, pred):\n gt_ = np.clip(gt, 0, 1)\n pred_ = np.clip(pred, 0, 1)\n intersection = np.sum(np.minimum(gt_, pred_))\n union = np.sum(np.maximum(gt_, pred_))\n if union > 0:\n return intersection / union\n return 1\n"
] | [
[
"numpy.equal",
"numpy.minimum",
"numpy.clip",
"tensorflow.keras.metrics.binary_accuracy",
"tensorflow.keras.losses.binary_crossentropy",
"numpy.maximum"
]
] |
saethlin/unyt | [
"e25848166d8739a8ec0ba08b536fe7a81b37bee0"
] | [
"unyt/_physical_ratios.py"
] | [
"import numpy as np\n\n#\n# Physical Constants and Units Conversion Factors\n#\n# Values for these constants, unless otherwise noted, are drawn from IAU,\n# IUPAC, NIST, and NASA data, whichever is newer.\n# http://maia.usno.navy.mil/NSFA/IAU2009_consts.html\n# http://goldbook.iupac.org/list_goldbook_phys_constants_defs.html\n# http://physics.nist.gov/cuu/Constants/index.html\n# http://nssdc.gsfc.nasa.gov/planetary/factsheet/jupiterfact.html\n\n# Elementary masses\nmass_electron_kg = 9.10938291e-31\namu_kg = 1.660538921e-27\namu_grams = amu_kg * 1.0e3\nmass_hydrogen_kg = 1.007947 * amu_kg\nmass_proton_kg = 1.672623110e-27\n\n# Solar values (see Mamajek 2012)\n# https://sites.google.com/site/mamajeksstarnotes/bc-scale\nmass_sun_kg = 1.98841586e30\ntemp_sun_kelvin = 5870.0\nluminosity_sun_watts = 3.8270e26\n\n# Consistent with solar abundances used in Cloudy\nmetallicity_sun = 0.01295\n\n# Conversion Factors: X au * mpc_per_au = Y mpc\n# length\nmpc_per_mpc = 1e0\nmpc_per_kpc = 1e-3\nmpc_per_pc = 1e-6\nmpc_per_au = 4.84813682e-12\nmpc_per_rsun = 2.253962e-14\nmpc_per_rearth = 2.06470307893e-16\nmpc_per_rjup = 2.26566120943e-15\nmpc_per_miles = 5.21552871e-20\nmpc_per_km = 3.24077929e-20\nmpc_per_m = 3.24077929e-23\nkpc_per_m = mpc_per_m / mpc_per_kpc\npc_per_m = mpc_per_m / mpc_per_pc\nkm_per_pc = 3.08567758e13\ncm_per_pc = 3.08567758e18\ncm_per_mpc = 3.08567758e21\nkm_per_m = 1e-3\nkm_per_cm = 1e-5\nm_per_cm = 1e-2\nly_per_m = 1.05702341e-16\nrsun_per_m = 1.4378145e-9\nrearth_per_m = 1.56961033e-7 # Mean (volumetric) radius\nrjup_per_m = 1.43039006737e-8 # Mean (volumetric) radius\nau_per_m = 6.68458712e-12\nang_per_m = 1.0e10\n\nm_per_fpc = 0.0324077929\n\nkpc_per_mpc = 1.0 / mpc_per_kpc\npc_per_mpc = 1.0 / mpc_per_pc\nau_per_mpc = 1.0 / mpc_per_au\nrsun_per_mpc = 1.0 / mpc_per_rsun\nrearth_per_mpc = 1.0 / mpc_per_rearth\nrjup_per_mpc = 1.0 / mpc_per_rjup\nmiles_per_mpc = 1.0 / mpc_per_miles\nkm_per_mpc = 1.0 / mpc_per_km\nm_per_mpc = 1.0 / mpc_per_m\nm_per_kpc = 1.0 / kpc_per_m\nm_per_km = 1.0 / km_per_m\ncm_per_km = 1.0 / km_per_cm\ncm_per_m = 1.0 / m_per_cm\npc_per_km = 1.0 / km_per_pc\nm_per_pc = 1.0 / pc_per_m\nm_per_ly = 1.0 / ly_per_m\nm_per_rsun = 1.0 / rsun_per_m\nm_per_rearth = 1.0 / rearth_per_m\nm_per_rjup = 1.0 / rjup_per_m\nm_per_au = 1.0 / au_per_m\nm_per_ang = 1.0 / ang_per_m\n\n# time\n# \"IAU Style Manual\" by G.A. Wilkins, Comm. 5, in IAU Transactions XXB (1989)\nsec_per_Gyr = 31.5576e15\nsec_per_Myr = 31.5576e12\nsec_per_kyr = 31.5576e9\nsec_per_year = 31.5576e6\nsec_per_day = 86400.0\nsec_per_hr = 3600.0\nsec_per_min = 60.0\nday_per_year = 365.25\n\n# velocities, accelerations\nspeed_of_light_m_per_s = 2.99792458e8\nspeed_of_light_cm_per_s = speed_of_light_m_per_s * 100.0\nstandard_gravity_m_per_s2 = 9.80665\n\n# some constants\nnewton_mks = 6.67408e-11\nplanck_mks = 6.62606957e-34\n# permeability of Free Space\nmu_0 = 4.0e-7 * np.pi\n# permittivity of Free Space\neps_0 = 1.0 / (speed_of_light_m_per_s ** 2 * mu_0)\navogadros_number = 6.02214085774 * 10 ** 23\n\n# temperature / energy\nboltzmann_constant_J_per_K = 1.3806488e-23\nerg_per_eV = 1.602176562e-12\nJ_per_eV = erg_per_eV * 1.0e-7\nerg_per_keV = erg_per_eV * 1.0e3\nJ_per_keV = J_per_eV * 1.0e3\nK_per_keV = J_per_keV / boltzmann_constant_J_per_K\nkeV_per_K = 1.0 / K_per_keV\nkeV_per_erg = 1.0 / erg_per_keV\neV_per_erg = 1.0 / erg_per_eV\nkelvin_per_rankine = 5.0 / 9.0\nwatt_per_horsepower = 745.69987158227022\nerg_per_s_per_watt = 1e7\n\n# Solar System masses\n# Standish, E.M. (1995) \"Report of the IAU WGAS Sub-Group on Numerical\n# Standards\", in Highlights of Astronomy (I. Appenzeller, ed.), Table 1,\n# Kluwer Academic Publishers, Dordrecht.\n# REMARK: following masses include whole systems (planet + moons)\nmass_jupiter_kg = mass_sun_kg / 1047.3486\nmass_mercury_kg = mass_sun_kg / 6023600.0\nmass_venus_kg = mass_sun_kg / 408523.71\nmass_earth_kg = mass_sun_kg / 328900.56\nmass_mars_kg = mass_sun_kg / 3098708.0\nmass_saturn_kg = mass_sun_kg / 3497.898\nmass_uranus_kg = mass_sun_kg / 22902.98\nmass_neptune_kg = mass_sun_kg / 19412.24\n\n# flux\njansky_mks = 1.0e-26\n# Cosmological constants\n# Calculated with H = 100 km/s/Mpc, value given in units of h^2 g cm^-3\n# Multiply by h^2 to get the critical density in units of g cm^-3\nrho_crit_g_cm3_h2 = 1.8788e-29\nprimordial_H_mass_fraction = 0.76\n\n# Misc. Approximations\nmass_mean_atomic_cosmology = 1.22\nmass_mean_atomic_galactic = 2.3\n\n# Miscellaneous\nHUGE = 1.0e90\nTINY = 1.0e-40\n\n# Planck units\nhbar_mks = 0.5 * planck_mks / np.pi\nplanck_mass_kg = np.sqrt(hbar_mks * speed_of_light_m_per_s / newton_mks)\nplanck_length_m = np.sqrt(hbar_mks * newton_mks / speed_of_light_m_per_s ** 3)\nplanck_time_s = planck_length_m / speed_of_light_m_per_s\nplanck_energy_J = planck_mass_kg * speed_of_light_m_per_s * speed_of_light_m_per_s\nplanck_temperature_K = planck_energy_J / boltzmann_constant_J_per_K\nplanck_charge_C = np.sqrt(4.0 * np.pi * eps_0 * hbar_mks * speed_of_light_m_per_s)\n\n# Imperial and other non-metric units\nkg_per_pound = 0.45359237\npascal_per_atm = 101325.0\nm_per_inch = 0.0254\nm_per_ft = 0.3048\n"
] | [
[
"numpy.sqrt"
]
] |
rominashirazi/SpineSegmentation | [
"fb08122ac6d9a598b60aecb4f1a1a2a31fba96ab"
] | [
"segmentation_test/Scripts/medpy_split_xd_to_xminus1d.py"
] | [
"#!c:\\users\\hooma\\documents\\github\\spinesegmentation\\segmentation_test\\scripts\\python.exe\n\n\"\"\"\nSplits a XD into a number of (X-1)D volumes.\n\nCopyright (C) 2013 Oskar Maier\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\n# build-in modules\nimport argparse\nimport logging\n\n# third-party modules\nimport scipy\n\n# path changes\n\n# own modules\nfrom medpy.io import load, save, header\nfrom medpy.core import Logger\nfrom medpy.core.exceptions import ArgumentError\n\n\n# information\n__author__ = \"Oskar Maier\"\n__version__ = \"r0.1.2, 2012-05-25\"\n__email__ = \"[email protected]\"\n__status__ = \"Release\"\n__description__ = \"\"\"\n Splits a XD into a number of (X-1)D volumes.\n \n One common use case is the creation of manual markers for 4D images.\n This script allows to split a 4D into a number of either spatial or\n temporal 3D volumes, for which one then can create the markers. These\n can be rejoined using the join_xd_to_xplus1d.py script.\n \n Copyright (C) 2013 Oskar Maier\n This program comes with ABSOLUTELY NO WARRANTY; This is free software,\n and you are welcome to redistribute it under certain conditions; see\n the LICENSE file or <http://www.gnu.org/licenses/> for details. \n \"\"\"\n\n# code\ndef main():\n # parse cmd arguments\n parser = getParser()\n parser.parse_args()\n args = getArguments(parser)\n \n # prepare logger\n logger = Logger.getInstance()\n if args.debug: logger.setLevel(logging.DEBUG)\n elif args.verbose: logger.setLevel(logging.INFO)\n \n # load input image\n data_input, header_input = load(args.input)\n \n # check if the supplied dimension is valid\n if args.dimension >= data_input.ndim or args.dimension < 0:\n raise ArgumentError('The supplied cut-dimension {} exceeds the image dimensionality of 0 to {}.'.format(args.dimension, data_input.ndim - 1))\n \n # prepare output file string\n name_output = args.output.replace('{}', '{:03d}')\n \n # compute the new the voxel spacing\n spacing = list(header.get_pixel_spacing(header_input))\n del spacing[args.dimension]\n \n # iterate over the cut dimension\n slices = data_input.ndim * [slice(None)]\n for idx in range(data_input.shape[args.dimension]):\n # cut the current slice from the original image \n slices[args.dimension] = slice(idx, idx + 1)\n data_output = scipy.squeeze(data_input[slices])\n # update the header and set the voxel spacing\n header_input.set_voxel_spacing(spacing)\n # save current slice\n save(data_output, name_output.format(idx), header_input, args.force)\n \n logger.info(\"Successfully terminated.\")\n \ndef getArguments(parser):\n \"Provides additional validation of the arguments collected by argparse.\"\n args = parser.parse_args()\n if not '{}' in args.output:\n raise argparse.ArgumentError(args.output, 'The output argument string must contain the sequence \"{}\".')\n return args\n\ndef getParser():\n \"Creates and returns the argparse parser object.\"\n parser = argparse.ArgumentParser(description=__description__)\n parser.add_argument('input', help='Source volume.')\n parser.add_argument('output', help='Target volumes. Has to include the sequence \"{}\" in the place where the volume number should be placed.')\n parser.add_argument('dimension', type=int, help='The dimension along which to split (starting from 0).')\n parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.')\n parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')\n parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.')\n return parser \n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"scipy.squeeze"
]
] |
Pegasus-01/Data-manipulation-and-merging-with-pandas | [
"5346678d25820d9fe352bd70294484ecd96fccf7"
] | [
"02-Creating&VisualizingDataframes/05-FindingMissingValues.py"
] | [
"# Import matplotlib.pyplot with alias plt\r\nimport matplotlib.pyplot as plt\r\n\r\n# Check individual values for missing values\r\nprint(avocados_2016.isna())\r\n\r\n# Check each column for missing values\r\nprint(avocados_2016.isna().any())\r\n\r\n# Bar plot of missing values by variable\r\navocados_2016.isna().sum().plot(kind=\"bar\")\r\n\r\n# Show plot\r\nplt.show()"
] | [
[
"matplotlib.pyplot.show"
]
] |
audeering/audformat | [
"a9ffce03e333e21a1ceb0db1d13e9f1fb5b61cca"
] | [
"audformat/core/database.py"
] | [
"import datetime\nimport itertools\nimport os\nimport shutil\nimport typing\n\nimport audiofile\nimport oyaml as yaml\ntry:\n from yaml import CLoader as Loader\nexcept ImportError: # pragma: nocover\n from yaml import Loader\nimport pandas as pd\n\nimport audeer\n\nfrom audformat.core import define\nfrom audformat.core import utils\nfrom audformat.core.column import Column\nfrom audformat.core.common import HeaderBase, HeaderDict\nfrom audformat.core.errors import BadIdError\nfrom audformat.core.media import Media\nfrom audformat.core.rater import Rater\nfrom audformat.core.scheme import Scheme\nfrom audformat.core.split import Split\nfrom audformat.core.table import Table\n\n\nclass Database(HeaderBase):\n r\"\"\"Database object.\n\n A database consists of a header holding raters,\n schemes, splits, and other meta information.\n In addition it links to a number of tables\n listing files and labels.\n\n Args:\n name: name of database\n source: data source (e.g. link to website)\n usage: permission of usage, see :class:`audformat.define.Usage`.\n Set to ``'other'``\n if none of the other fields fit.\n expires: expiry date\n languages: list of languages\n description: database description\n author: database author(s)\n organization: organization(s) maintaining the database\n license: database license.\n You can use a custom license\n or pick one from :attr:`audformat.define.License`.\n In the later case,\n ``license_url`` will be automatically set\n if it is not given\n license_url: URL of database license\n meta: additional meta fields\n\n Raises:\n BadValueError: if an invalid ``usage`` value is passed\n ValueError: if language is unknown\n\n Example:\n >>> db = Database(\n ... 'mydb',\n ... 'https://www.audeering.com/',\n ... define.Usage.COMMERCIAL,\n ... languages=['English', 'de'],\n ... )\n >>> db\n name: mydb\n source: https://www.audeering.com/\n usage: commercial\n languages: [eng, deu]\n >>> labels = ['positive', 'neutral', 'negative']\n >>> db.schemes['emotion'] = Scheme(\n ... labels=labels,\n ... )\n >>> db.raters['rater'] = Rater()\n >>> db.media['audio'] = Media(\n ... define.MediaType.AUDIO,\n ... format='wav',\n ... sampling_rate=16000,\n ... )\n >>> db['table'] = Table(\n ... media_id='audio',\n ... )\n >>> db['table']['column'] = Column(\n ... scheme_id='emotion',\n ... rater_id='rater',\n ... )\n >>> db\n name: mydb\n source: https://www.audeering.com/\n usage: commercial\n languages: [eng, deu]\n media:\n audio: {type: audio, format: wav, sampling_rate: 16000}\n raters:\n rater: {type: human}\n schemes:\n emotion:\n dtype: str\n labels: [positive, neutral, negative]\n tables:\n table:\n type: filewise\n media_id: audio\n columns:\n column: {scheme_id: emotion, rater_id: rater}\n\n \"\"\"\n def __init__(\n self,\n name: str,\n source: str = '',\n usage: str = define.Usage.UNRESTRICTED,\n *,\n expires: datetime.date = None,\n languages: typing.Union[str, typing.Sequence[str]] = None,\n description: str = None,\n author: str = None,\n organization: str = None,\n license: typing.Union[str, define.License] = None,\n license_url: str = None,\n meta: dict = None,\n ):\n define.Usage.assert_has_attribute_value(usage)\n if (\n license_url is None\n and license in define.License.attribute_values()\n ):\n license_url = define.LICENSE_URLS[license]\n\n languages = [] if languages is None else audeer.to_list(languages)\n for idx in range(len(languages)):\n languages[idx] = utils.map_language(languages[idx])\n\n self.name = name\n r\"\"\"Name of database\"\"\"\n super().__init__(description=description, meta=meta)\n self.source = source\n r\"\"\"Database source\"\"\"\n self.usage = usage\n r\"\"\"Usage permission\"\"\"\n self.expires = expires\n r\"\"\"Expiry date\"\"\"\n self.languages = languages\n r\"\"\"List of included languages\"\"\"\n self.author = author\n r\"\"\"Author(s) of database\"\"\"\n self.organization = organization\n r\"\"\"Organization that created the database\"\"\"\n self.license = license\n r\"\"\"License of database\"\"\"\n self.license_url = license_url\n r\"\"\"URL of database license\"\"\"\n self.media = HeaderDict(value_type=Media)\n r\"\"\"Dictionary of media information\"\"\"\n self.raters = HeaderDict(value_type=Rater)\n r\"\"\"Dictionary of raters\"\"\"\n self.schemes = HeaderDict(\n value_type=Scheme,\n set_callback=self._set_scheme,\n )\n r\"\"\"Dictionary of schemes\"\"\"\n self.splits = HeaderDict(value_type=Split)\n r\"\"\"Dictionary of splits\"\"\"\n self.tables = HeaderDict(\n value_type=Table,\n set_callback=self._set_table,\n )\n r\"\"\"Dictionary of tables\"\"\"\n\n self._files_duration = {}\n self._name = None\n self._root = None\n\n @property\n def files(self) -> pd.Index:\n r\"\"\"Files referenced in the database.\n\n Includes files from filewise and segmented tables.\n\n Returns:\n files\n\n \"\"\"\n index = utils.union(\n [table.files.drop_duplicates() for table in self.tables.values()]\n )\n return index\n\n @property\n def is_portable(\n self,\n ) -> bool:\n r\"\"\"Check if a database can be moved to another location.\n\n To be portable,\n media must not be referenced with an absolute path,\n or contain ``.`` or ``..`` to specify a folder.\n If a database is portable\n it can be moved to another folder\n or updated by another database.\n\n Returns:\n ``True`` if the database is portable\n\n \"\"\"\n if len(self.files) == 0:\n return True\n return not any(\n (\n os.path.isabs(f)\n or f.startswith(f'.{os.path.sep}')\n or f'{os.path.sep}.{os.path.sep}' in f\n or f.startswith(f'..{os.path.sep}')\n or f'{os.path.sep}..{os.path.sep}' in f\n )\n for f in self.files\n )\n\n @property\n def root(self) -> typing.Optional[str]:\n r\"\"\"Database root directory.\n\n Returns ``None`` if database has not been stored yet.\n\n Returns:\n root directory\n\n \"\"\"\n return self._root\n\n @property\n def segments(self) -> pd.MultiIndex:\n r\"\"\"Segments referenced in the database.\n\n Returns:\n segments\n\n \"\"\"\n index = utils.union(\n [\n table.df.index\n for table in self.tables.values()\n if table.is_segmented\n ]\n )\n return index\n\n def drop_files(\n self,\n files: typing.Union[\n str,\n typing.Sequence[str],\n typing.Callable[[str], bool],\n ],\n num_workers: typing.Optional[int] = 1,\n verbose: bool = False,\n ):\n r\"\"\"Drop files from tables.\n\n Iterate through all tables and remove rows with a reference to\n listed or matching files.\n\n Args:\n files: list of files or condition function\n num_workers: number of parallel jobs.\n If ``None`` will be set to the number of processors\n on the machine multiplied by 5\n verbose: show progress bar\n\n \"\"\"\n audeer.run_tasks(\n lambda x: x.drop_files(files, inplace=True),\n params=[([table], {}) for table in self.tables.values()],\n num_workers=num_workers,\n progress_bar=verbose,\n task_description='Drop files',\n )\n\n def drop_tables(\n self,\n table_ids: typing.Union[str, typing.Sequence[str]],\n ):\n r\"\"\"Drop tables by ID.\n\n Args:\n table_ids: table IDs to drop\n\n \"\"\"\n if isinstance(table_ids, str):\n table_ids = [table_ids]\n for table_id in table_ids:\n self.tables.pop(table_id)\n\n def files_duration(\n self,\n files: typing.Union[str, typing.Sequence[str]],\n *,\n root: str = None,\n ) -> pd.Series:\n r\"\"\"Duration of files in the database.\n\n Use ``db.files_duration(db.files).sum()``\n to get the total duration of all files in a database.\n Or ``db.files_duration(db[table_id].files).sum()``\n to get the total duration of all files assigned to a table.\n\n .. note:: Durations are cached,\n i.e. changing the files on disk after calling\n this function can lead to wrong results.\n The cache is cleared when the\n database is reloaded from disk.\n\n Args:\n files: file names\n root: root directory under which the files are stored.\n Provide if file names are relative and\n database was not saved or loaded from disk.\n If ``None`` :attr:`audformat.Database.root` is used\n\n Returns:\n mapping from file to duration\n\n Raises:\n ValueError: if ``root`` is not set\n when using relative file names\n with a database that was not saved\n or loaded from disk\n\n \"\"\"\n root = root or self.root\n\n def duration(file: str) -> pd.Timedelta:\n\n # expand file path\n if os.path.isabs(file):\n full_file = file\n else:\n if root is None:\n raise ValueError(\n f\"Found relative file name \"\n f\"{file}, \"\n f\"but db.root is None. \"\n f\"Please save database or \"\n f\"provide a root folder.\"\n )\n full_file = os.path.join(root, file)\n\n # check cache\n full_file = audeer.safe_path(full_file)\n if full_file in self._files_duration:\n return self._files_duration[full_file]\n\n # calculate duration and cache it\n dur = audiofile.duration(full_file)\n dur = pd.to_timedelta(dur, unit='s')\n self._files_duration[full_file] = dur\n\n return dur\n\n files = audeer.to_list(files)\n y = pd.Series(\n files,\n index=files,\n name=define.IndexField.FILE,\n ).map(duration)\n\n return y\n\n def map_files(\n self,\n func: typing.Callable[[str], str],\n num_workers: typing.Optional[int] = 1,\n verbose: bool = False,\n ):\n r\"\"\"Apply function to file names in all tables.\n\n Relies on :meth:`pandas.Index.map`,\n which can be slow.\n If speed is crucial,\n consider to change the index directly.\n In the following example we prefix every file with a folder:\n\n .. code-block:: python\n\n root = '/root/'\n for table in db.tables.values():\n if table.is_filewise:\n table.df.index = root + table.df.index\n table.df.index.name = audformat.define.IndexField.FILE\n elif len(table.df.index) > 0:\n table.df.index.set_levels(\n root + table.df.index.levels[0],\n audformat.define.IndexField.FILE,\n inplace=True,\n )\n\n Args:\n func: map function\n num_workers: number of parallel jobs.\n If ``None`` will be set to the number of processors\n on the machine multiplied by 5\n verbose: show progress bar\n\n \"\"\"\n def job(table):\n if table.is_segmented:\n table.df.index = table.df.index.map(\n lambda x: (func(x[0]), x[1], x[2])\n )\n else:\n table.df.index = table.df.index.map(lambda x: func(x))\n\n audeer.run_tasks(\n job,\n params=[([table], {}) for table in self.tables.values()],\n num_workers=num_workers,\n progress_bar=verbose,\n task_description='Map files',\n )\n\n def pick_files(\n self,\n files: typing.Union[\n str,\n typing.Sequence[str],\n typing.Callable[[str], bool],\n ],\n num_workers: typing.Optional[int] = 1,\n verbose: bool = False,\n ):\n r\"\"\"Pick files from tables.\n\n Iterate through all tables and keep only rows with a reference\n to listed files or matching files.\n\n Args:\n files: list of files or condition function\n num_workers: number of parallel jobs.\n If ``None`` will be set to the number of processors\n on the machine multiplied by 5\n verbose: show progress bar\n\n \"\"\"\n audeer.run_tasks(\n lambda x: x.pick_files(files, inplace=True),\n params=[([table], {}) for table in self.tables.values()],\n num_workers=num_workers,\n progress_bar=verbose,\n task_description='Pick files',\n )\n\n def pick_tables(\n self,\n table_ids: typing.Union[str, typing.Sequence[str]],\n ):\n r\"\"\"Pick tables by ID.\n\n Args:\n table_ids: table IDs to pick\n\n \"\"\"\n if isinstance(table_ids, str):\n table_ids = [table_ids]\n drop_ids = []\n for table_id in list(self.tables):\n if table_id not in table_ids:\n drop_ids.append(table_id)\n self.drop_tables(drop_ids)\n\n def save(\n self,\n root: str,\n *,\n name: str = 'db',\n indent: int = 2,\n storage_format: str = define.TableStorageFormat.CSV,\n update_other_formats: bool = True,\n header_only: bool = False,\n num_workers: typing.Optional[int] = 1,\n verbose: bool = False,\n ):\n r\"\"\"Save database to disk.\n\n Creates a header ``<root>/<name>.yaml``\n and for every table a file ``<root>/<name>.<table-id>.[csv,pkl]``.\n\n Existing files will be overwritten.\n If ``update_other_formats`` is provided,\n it will overwrite all existing files in others formats as well.\n\n Args:\n root: root directory (possibly created)\n name: base name of files\n indent: indent size\n storage_format: storage format of tables.\n See :class:`audformat.define.TableStorageFormat`\n for available formats\n update_other_formats: if ``True`` it will not only save\n to the given ``storage_format``,\n but update all files stored in other storage formats as well\n header_only: store header only\n num_workers: number of parallel jobs.\n If ``None`` will be set to the number of processors\n on the machine multiplied by 5\n verbose: show progress bar\n\n \"\"\"\n root = audeer.mkdir(root)\n\n ext = '.yaml'\n header_path = os.path.join(root, name + ext)\n with open(header_path, 'w') as fp:\n self.dump(fp, indent=indent)\n\n if not header_only:\n\n def job(table_id, table):\n table_path = os.path.join(root, name + '.' + table_id)\n table.save(\n table_path,\n storage_format=storage_format,\n update_other_formats=update_other_formats,\n )\n\n audeer.run_tasks(\n job,\n params=[\n ([table_id, table], {})\n for table_id, table in self.tables.items()\n ],\n num_workers=num_workers,\n progress_bar=verbose,\n task_description='Save tables',\n )\n\n self._name = name\n self._root = root\n\n def update(\n self,\n others: typing.Union['Database', typing.Sequence['Database']],\n *,\n copy_media: bool = False,\n overwrite: bool = False,\n ) -> 'Database':\n r\"\"\"Update database with other database(s).\n\n In order to update a database, *license* and *usage* have to match.\n *Media*, *raters*, *schemes* and *splits* that are not part of\n the database yet are added. Other fields will be updated by\n applying the following rules:\n\n ============= =====================================\n **field** **result**\n ------------- -------------------------------------\n author 'db.author, other.author'\n description db.description\n expires min(db.expires, other.expires)\n languages db.languages + other.languages\n license_url db.license_url\n meta db.meta + other.meta\n name db.name\n organization 'db.organization, other.organization'\n source 'db.source, other.source'\n ============= =====================================\n\n Args:\n others: database object(s)\n copy_media: if ``True`` it copies the media files\n associated with ``others`` to the current database root folder\n overwrite: overwrite table values where indices overlap\n\n Returns:\n the updated database\n\n Raises:\n ValueError: if database has different license or usage\n ValueError: if different media, rater, scheme or split with\n same ID is found\n ValueError: if table data cannot be combined (e.g. values in\n same position overlap)\n RuntimeError: if ``copy_media=True``,\n but one of the involved databases was not saved\n (contains files but no root folder)\n RuntimeError: if any involved database is not portable\n\n \"\"\"\n\n if isinstance(others, Database):\n others = [others]\n\n def assert_equal(\n other: Database,\n field: str,\n ):\n r\"\"\"Assert fields are equal.\"\"\"\n value1 = self.__dict__[field]\n value2 = other.__dict__[field]\n if value1 != value2:\n raise ValueError(\n \"Cannot update database, \"\n \"found different value for \"\n f\"'db.{field}':\\n\"\n f\"{value1}\\n\"\n \"!=\\n\"\n f\"{value2}\"\n )\n\n def join_dict(\n field: str,\n ds: typing.Sequence[dict],\n ):\n r\"\"\"Join list of dictionaries.\n\n Raise error if dictionaries have same key with different values.\n\n \"\"\"\n d = ds[0].copy()\n for d_other in ds[1:]:\n for key, value in d_other.items():\n if key in d:\n if d[key] != value:\n raise ValueError(\n \"Cannot update database, \"\n \"found different value for \"\n f\"'db.{field}['{key}']':\\n\"\n f\"{d[key]}\\n\"\n \"!=\\n\"\n f\"{d_other[key]}\"\n )\n else:\n d[key] = value\n return d\n\n def join_field(\n other: Database,\n field: str,\n op: typing.Callable,\n ):\n r\"\"\"Join two fields of db header.\"\"\"\n value1 = self.__dict__[field]\n value2 = other.__dict__[field]\n if value1 != value2:\n if value1 and value2:\n self.__dict__[field] = op([value1, value2])\n elif value1:\n self.__dict__[field] = value1\n elif value2:\n self.__dict__[field] = value2\n\n # assert equal fields\n for other in others:\n assert_equal(other, 'license')\n assert_equal(other, 'usage')\n\n # can only join databases with relatvie paths\n for database in [self] + others:\n if not database.is_portable:\n raise RuntimeError(\n f\"You can only update with databases that are portable. \"\n f\"The database '{database.name}' is not portable.\"\n )\n\n # join fields\n for other in others:\n join_field(other, 'author', ', '.join)\n join_field(other, 'expires', min)\n join_field(other, 'languages', itertools.chain.from_iterable)\n # remove duplicates whilst preserving order\n self.languages = list(dict.fromkeys(self.languages))\n join_field(other, 'media', lambda x: join_dict('media', x))\n join_field(other, 'meta', lambda x: join_dict('meta', x))\n join_field(other, 'organization', ', '.join)\n join_field(other, 'schemes', lambda x: join_dict('schemes', x))\n join_field(other, 'source', ', '.join)\n join_field(other, 'splits', lambda x: join_dict('splits', x))\n join_field(other, 'raters', lambda x: join_dict('raters', x))\n\n # join tables\n for other in others:\n for table_id, table in other.tables.items():\n if table_id in self.tables:\n self[table_id].update(table, overwrite=overwrite)\n else:\n self[table_id] = table.copy()\n\n # copy media files\n\n if copy_media:\n if self.root is None:\n raise RuntimeError(\n f\"You can only update a saved database. \"\n f\"'{self.name}' was not saved yet.\"\n )\n for other in others:\n if len(other.files) > 0 and other.root is None:\n raise RuntimeError(\n f\"You can only update with saved databases. \"\n f\"The database '{other.name}' was not saved yet.\"\n )\n for file in other.files:\n src_file = os.path.join(other.root, file)\n dst_file = os.path.join(self.root, file)\n dst_dir = os.path.dirname(dst_file)\n audeer.mkdir(dst_dir)\n shutil.copy(src_file, dst_file)\n\n return self\n\n def __contains__(\n self,\n table_id: str,\n ) -> bool:\n r\"\"\"Check if table exists.\n\n Args:\n table_id: table identifier\n\n \"\"\"\n return table_id in self.tables\n\n def __getitem__(\n self,\n table_id: str,\n ) -> Table:\n r\"\"\"Get table from database.\n\n Args:\n table_id: table identifier\n\n \"\"\"\n return self.tables[table_id]\n\n def __eq__(\n self,\n other: 'Database',\n ) -> bool:\n if self.dump() != other.dump():\n return False\n for table_id in self.tables:\n if self[table_id] != other[table_id]:\n return False\n return True\n\n def __setitem__(\n self,\n table_id: str,\n table: Table,\n ) -> Table:\n r\"\"\"Add table to database.\n\n Args:\n table_id: table identifier\n table: the table\n\n Raises:\n BadIdError: if table has a ``split_id`` or ``media_id``,\n which is not specified in the underlying database\n\n \"\"\"\n self.tables[table_id] = table\n return table\n\n @staticmethod\n def load(\n root: str,\n *,\n name: str = 'db',\n load_data: bool = False,\n num_workers: typing.Optional[int] = 1,\n verbose: bool = False,\n ) -> 'Database':\n r\"\"\"Load database from disk.\n\n Expects a header ``<root>/<name>.yaml``\n and for every table a file ``<root>/<name>.<table-id>.[csv|pkl]``\n Media files should be located under ``root``.\n\n Args:\n root: root directory\n name: base name of header and table files\n load_data: if ``False``,\n :class:`audformat.Table`\n data is only loaded on demand,\n e.g. when\n :meth:`audformat.Table.get`\n is called for the first time.\n Set to ``True`` to load all\n :class:`audformat.Table`\n data immediately\n num_workers: number of parallel jobs.\n If ``None`` will be set to the number of processors\n on the machine multiplied by 5\n verbose: show progress bar\n\n Returns:\n database object\n\n \"\"\"\n ext = '.yaml'\n root = audeer.safe_path(root)\n path = os.path.join(root, name + ext)\n\n if not os.path.exists(path):\n raise FileNotFoundError(path)\n\n with open(path, 'r') as fp:\n\n header = yaml.load(fp, Loader=Loader)\n db = Database.load_header_from_yaml(header)\n\n if 'tables' in header and header['tables']:\n\n if load_data:\n\n def job(table_id):\n table = db[table_id]\n path = os.path.join(root, name + '.' + table_id)\n table.load(path)\n\n # load all tables into memory\n audeer.run_tasks(\n job,\n params=[\n ([table_id], {}) for table_id in header['tables']\n ],\n num_workers=num_workers,\n progress_bar=verbose,\n task_description='Load tables',\n )\n\n else:\n\n # signal that table data is not loaded\n # by setting the DataFrame to None\n for table_id in header['tables']:\n db[table_id]._df = None\n\n db._name = name\n db._root = root\n\n return db\n\n @staticmethod\n def load_header_from_yaml(header: dict) -> 'Database':\n r\"\"\"Load database header from YAML.\n\n Args:\n header: YAML header definition\n\n Returns:\n database object\n\n \"\"\"\n # for backward compatibility\n if len(header) == 1: # pragma: no cover\n id = next(iter(header))\n header = header[id]\n header['name'] = id\n\n db = Database(\n name=header['name'],\n source=header['source'],\n usage=header['usage'])\n db.from_dict(header, ignore_keys=['media', 'raters', 'schemes',\n 'tables', 'splits'])\n\n if 'media' in header and header['media']:\n for media_id, media_d in header['media'].items():\n media = Media()\n media.from_dict(media_d)\n db.media[media_id] = media\n\n if 'raters' in header and header['raters']:\n for rater_id, rater_d in header['raters'].items():\n rater = Rater()\n rater.from_dict(rater_d)\n db.raters[rater_id] = rater\n\n if 'schemes' in header and header['schemes']:\n for scheme_id, scheme_d in header['schemes'].items():\n scheme = Scheme()\n scheme.from_dict(scheme_d)\n db.schemes[scheme_id] = scheme\n\n if 'splits' in header and header['splits']:\n for split_id, split_d in header['splits'].items():\n split = Split()\n split.from_dict(split_d)\n db.splits[split_id] = split\n\n if 'tables' in header and header['tables']:\n for table_id, table_d in header['tables'].items():\n table = Table()\n table.from_dict(table_d, ignore_keys=['is_segmented',\n 'columns'])\n if 'columns' in table_d and table_d['columns']:\n tmp_callback = table.columns.set_callback\n table.columns.set_callback = None\n for column_id, column_d in \\\n table_d['columns'].items():\n column = Column()\n column.from_dict(\n column_d, ignore_keys=['has_confidence']\n )\n column._id = column_id\n column._table = table\n table.columns[column_id] = column\n\n # for backward compatibility we insert\n # confidences as a regular column\n if 'has_confidence' in column_d: # pragma: no cover\n column = Column()\n column._id = '@' + column_id\n column._table = table\n table.columns['@' + column_id] = column\n\n table.columns.set_callback = tmp_callback\n db[table_id] = table\n\n return db\n\n def _set_scheme(\n self,\n scheme_id: str,\n scheme: Scheme,\n ) -> Scheme:\n scheme._db = self\n scheme._id = scheme_id\n return scheme\n\n def _set_table(\n self,\n table_id: str,\n table: Table,\n ) -> Table:\n if table.split_id is not None and table.split_id not in self.splits:\n raise BadIdError('split', table.split_id, self.splits)\n if table.media_id is not None and table.media_id not in self.media:\n raise BadIdError('media', table.media_id, self.media)\n table._db = self\n table._id = table_id\n return table\n"
] | [
[
"pandas.to_timedelta",
"pandas.Series"
]
] |
stungkit/StockRecommendSystem | [
"020ef035e5189415d01e767f3907751e01d5cefe"
] | [
"Source/StockProcessing/Filter_Stock_US.py"
] | [
"import sys, os, time, datetime, warnings, configparser\nimport pandas as pd\nimport numpy as np\nimport talib\nimport concurrent.futures\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ncur_path = os.path.dirname(os.path.abspath(__file__))\nfor _ in range(2):\n root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]\n cur_path = root_path\nsys.path.append(root_path + \"/\" + 'Source/FetchData/')\nsys.path.append(root_path + \"/\" + 'Source/DataBase/')\n\nfrom Fetch_Data_Stock_US_StockList import getStocksList_US\nfrom Fetch_Data_Stock_US_Daily import updateStockData_US_Daily\nfrom Fetch_Data_Stock_US_Weekly import updateStockData_US_Weekly\nfrom Fetch_Data_Stock_US_Monthly import updateStockData_US_Monthly\nfrom DB_API import queryStock\n\ndef get_single_stock_data_daily(root_path, symbol):\n '''\n All data is from quandl wiki dataset\n Feature set: [Open High Low Close Volume Ex-Dividend Split Ratio Adj. Open Adj. High Adj. Low\n Adj. Close Adj. Volume]\n '''\n df, lastUpdateTime = queryStock(root_path, \"DB_STOCK\", \"SHEET_US\", \"_DAILY\", symbol, \"daily_update\")\n df.index = pd.to_datetime(df.index)\n\n if df.empty: \n print(\"daily empty df\", symbol)\n return df\n\n if 'adj_close' in df:\n df = df.drop('close', 1)\n df = df.rename(columns = {'adj_close':'close'})\n\n return df\n\ndef get_single_stock_data_weekly(root_path, symbol):\n '''\n All data is from quandl wiki dataset\n Feature set: [Open High Low Close Volume Ex-Dividend Split Ratio Adj. Open Adj. High Adj. Low\n Adj. Close Adj. Volume]\n '''\n df, lastUpdateTime = queryStock(root_path, \"DB_STOCK\", \"SHEET_US\", \"_WEEKLY\", symbol, \"weekly_update\")\n df.index = pd.to_datetime(df.index)\n\n if df.empty: \n print(\"weekly empty df\", symbol)\n return df\n\n if 'adj_close' in df:\n df = df.drop('close', 1)\n df = df.rename(columns = {'adj_close':'close'})\n\n return df\n\ndef get_single_stock_data_monthly(root_path, symbol):\n '''\n All data is from quandl wiki dataset\n Feature set: [Open High Low Close Volume Ex-Dividend Split Ratio Adj. Open Adj. High Adj. Low\n Adj. Close Adj. Volume]\n '''\n df, lastUpdateTime = queryStock(root_path, \"DB_STOCK\", \"SHEET_US\", \"_MONTHLY\", symbol, \"monthly_update\")\n df.index = pd.to_datetime(df.index)\n\n if df.empty: \n print(\"monthly empty df\", symbol)\n return df\n\n if 'adj_close' in df:\n df = df.drop('close', 1)\n df = df.rename(columns = {'adj_close':'close'})\n\n return df\n\ndef KDJ(df):\n low_list = df['low'].rolling(center=False,window=9).min()\n low_list.fillna(value=df['low'].expanding(min_periods=1).min(), inplace=True)\n high_list = df['high'].rolling(center=False,window=9).max()\n high_list.fillna(value=df['high'].expanding(min_periods=1).max(), inplace=True)\n rsv = (df['close'] - low_list) / (high_list - low_list) * 100\n df['kdj_k'] = rsv.ewm(min_periods=0,adjust=True,ignore_na=False,com=2).mean()\n df['kdj_d'] = df['kdj_k'].ewm(min_periods=0,adjust=True,ignore_na=False,com=2).mean()\n df['kdj_j'] = 3 * df['kdj_k'] - 2 * df['kdj_d']\n return df\n\ndef RSI(df, n=14):\n prices = df['close'].values.tolist()\n deltas = np.diff(prices)\n seed = deltas[:n+1]\n up = seed[seed>=0].sum()/n\n down = -seed[seed<0].sum()/n\n rs = up/down\n rsi = np.zeros_like(prices)\n rsi[:n] = 100. - 100./(1.+rs)\n\n for i in range(n, len(prices)):\n delta = deltas[i-1] # cause the diff is 1 shorter\n\n if delta>0:\n upval = delta\n downval = 0.\n else:\n upval = 0.\n downval = -delta\n\n up = (up*(n-1) + upval)/n\n down = (down*(n-1) + downval)/n\n\n rs = up/down\n rsi[i] = 100. - 100./(1.+rs)\n\n key = 'rsi_' + str(n)\n df[key] = rsi\n return df\n\ndef MACD(df, short_win=12, long_win=26, macd_win=9):\n # talib计算MACD\n prices = np.array(df['close'])\n macd_tmp = talib.MACD(prices, fastperiod=short_win, slowperiod=long_win, signalperiod=macd_win)\n df['macd_dif'] = macd_tmp[0]\n df['macd_dea'] = macd_tmp[1]\n df['macd'] = macd_tmp[2]\n return df\n\ndef corssover(input_1, input_2, index = -1):\n return (input_1[index] > input_2[index]) & (input_1[index-1] < input_2[index-1])\n\ndef ma_rule(df, type = 0, index = -1):\n default_parameters = [5, 10, 20, 30, 60, 120, 250]\n \n if type == 0:\n min_parameters, delta = 3, df['close'][-1] * 1 / 100\n elif type == 1:\n min_parameters, delta = 3, df['close'][-1] * 2 / 100\n else:\n min_parameters, delta = 3, df['close'][-1] * 3 / 100\n\n len_cnt = len(df)\n ma_parameters = [item for item in default_parameters if item <= len_cnt]\n item_cnt = len(ma_parameters)\n\n if item_cnt < min_parameters: return False\n ma_names = ['ma'+str(item) for item in ma_parameters]\n\n try:\n if not set(ma_names).issubset(df.columns): \n for idx, item in enumerate(ma_names):\n df[item] = df['close'].rolling(window=ma_parameters[idx], center=False).mean()\n except Exception as e: \n print(e)\n return False\n\n ma_array = []\n for item in ma_names:\n ma_array.append(df[item][index])\n\n ma_array = sorted(ma_array)\n min_lines_required = min_parameters - 1\n\n for index in range(min_lines_required, item_cnt):\n if (ma_array[index] - ma_array[index - min_lines_required]) < delta:\n return True\n return False\n\ndef kdj_rule(df, index = -1):\n if len(df) < 2: return False\n\n try:\n if not {'kdj_k', 'kdj_d', 'kdj_j'}.issubset(df.columns): \n df = KDJ(df)\n except Exception as e: \n print(e)\n return False\n\n return corssover(df['kdj_j'], df['kdj_d']) & (df['kdj_d'][index] > df['kdj_d'][index-1]) & (df['kdj_d'][index] < 50)\n \ndef kdj_rule_1(df, index = -1):\n if len(df) < 2: return False\n\n try:\n if not {'kdj_k', 'kdj_d', 'kdj_j'}.issubset(df.columns): \n df = KDJ(df)\n except Exception as e: \n print(e)\n return False\n\n return (df['kdj_d'][index] < 45)\n\ndef kdj_rule_2(df, index = -1):\n if len(df) < 2: return False\n\n try:\n if not {'kdj_k', 'kdj_d', 'kdj_j'}.issubset(df.columns): \n df = KDJ(df)\n except Exception as e: \n print(e)\n return False\n\n return (df['kdj_j'][index] < df['kdj_d'][index]) & (df['kdj_j'][index-1] < df['kdj_d'][index-1]) & (df['kdj_j'][index-1] < df['kdj_j'][index]) & (df['kdj_d'][index] < 40)\n\ndef kdj_rule_3(df, index = -1):\n if len(df) < 2: return False\n\n try:\n if not {'kdj_k', 'kdj_d', 'kdj_j'}.issubset(df.columns): \n df = KDJ(df)\n except Exception as e: \n print(e)\n return False\n\n return (df['kdj_j'][index] < df['kdj_d'][index]) & (df['kdj_j'][index-1] < df['kdj_d'][index-1]) & (df['kdj_j'][index-1] < df['kdj_j'][index]) & (df['kdj_d'][index] < 20)\n\n\ndef macd_rule(df, index = -1):\n try: \n if not {'macd_dif', 'macd_dea', 'macd'}.issubset(df.columns):\n df = MACD(df)\n except Exception as e: \n print(e)\n return False\n\n input_1 = -3\n input_2 = -0.2\n \n # df['macd_dif_1'] = df['macd_dif'].shift(1)\n # df['macd_dea_1'] = df['macd_dea'].shift(1)\n\n#(df['macd_dif'][-input_3:].min() < input_2) & \\\n\n return (df['macd_dif'][index] > input_1) & \\\n (df['macd_dif'][index] < input_2) & \\\n (df['macd_dif'][index] > df['macd_dea'][index]) & \\\n ((df['macd_dea'][index-1] > df['macd_dif'][index-1]) | (abs(df['macd_dea'][index-1] - df['macd_dif'][index-1]) < 0.007))\n\ndef macd_rule_1(df, index = -1):\n try: \n if not {'macd_dif', 'macd_dea', 'macd'}.issubset(df.columns):\n df = MACD(df)\n except Exception as e: \n print(e)\n return False\n\n return (df['macd_dif'][index] > df['macd_dea'][index]) & \\\n ((df['macd_dea'][index-1] > df['macd_dif'][index-1]) | (abs(df['macd_dea'][index-1] - df['macd_dif'][index-1]) < 0.007))\n\ndef macd_rule_2(df, index = -1):\n try: \n if not {'macd_dif', 'macd_dea', 'macd'}.issubset(df.columns):\n df = MACD(df)\n except Exception as e: \n print(e)\n return False\n\n input = 0.05\n\n return (df['macd_dif'][index] < input) & (df['macd_dea'][index] < input) \n\ndef rsi_rule(df, index = -1):\n try: \n df = RSI(df, 6)\n df = RSI(df, 12)\n df = RSI(df, 24)\n except Exception as e: \n print(e)\n return False\n\n rsi_6, rsi_12, rsi_24 = df['rsi_6'][index], df['rsi_12'][index], df['rsi_24'][index]\n\n return (rsi_6 < 20) & (rsi_12 < 20) & (rsi_24 < 30)\n \n\ndef judge_rule_daily(symbol, dataset, window, selection):\n #if ma_rule(dataset) & (macd_rule(dataset) | macd_rule(dataset, -2)):\n #if (kdj_rule(dataset) | kdj_rule(dataset, -2)) & (macd_rule(dataset) | macd_rule(dataset, -2)):\n if (kdj_rule(dataset) | kdj_rule(dataset, -2)) & ma_rule(dataset):\n #if kdj_rule(dataset) & macd_rule(dataset):\n #if kdj_rule_2(dataset) & macd_rule(dataset):\n #if kdj_rule_3(dataset):\n selection.append(symbol)\n\ndef judge_rule_weekly(symbol, dataset, window, selection):\n #if (kdj_rule(dataset) | kdj_rule(dataset, -2)) & (macd_rule(dataset) | macd_rule(dataset, -2)):\n #if kdj_rule(dataset):\n #if (kdj_rule(dataset) | kdj_rule(dataset, -2) | kdj_rule_2(dataset)) & ma_rule(dataset, 1):\n if (kdj_rule(dataset) | kdj_rule(dataset, -2)) & ma_rule(dataset):\n #if (kdj_rule(dataset) | kdj_rule(dataset, -2) | kdj_rule_2(dataset)) & macd_rule_2(dataset):\n #if kdj_rule_3(dataset):\n selection.append(symbol)\n\ndef judge_rule_monthly(symbol, dataset, window, selection):\n #if ma_rule(dataset) & kdj_rule_1(dataset):\n #if kdj_rule(dataset):\n #if (kdj_rule(dataset) | kdj_rule(dataset, -2) | kdj_rule_2(dataset)) & ma_rule(dataset, 1):\n #if (kdj_rule(dataset) | kdj_rule(dataset, -2) | kdj_rule_2(dataset)) & macd_rule_2(dataset):\n if (kdj_rule(dataset) | kdj_rule(dataset, -2)) & ma_rule(dataset):\n #if kdj_rule_3(dataset):\n selection.append(symbol)\n\ndef inner_processing_stock_data(symbol, input_data, window, day_selection, week_selection, month_selection):\n day_data = input_data['daily'] #input_data[input_data['volume'] > 0].copy()\n week_data = input_data['weekly'] #convert_week_based_data(day_data)\n month_data = input_data['monthly'] #convert_month_based_data(day_data)\n\n judge_rule_daily(symbol, day_data, window, day_selection)\n judge_rule_weekly(symbol, week_data, window, week_selection)\n judge_rule_monthly(symbol, month_data, window, month_selection)\n\n\ndef processing_stock_data(root_path, symbol, window, day_selection, week_selection, month_selection):\n startTime = time.time()\n data_daily = get_single_stock_data_daily(root_path, symbol)\n\n if data_daily['close'][-1] * data_daily['volume'][-1] < 1000 * 10000: return startTime\n\n data_weekly = get_single_stock_data_weekly(root_path, symbol)\n data_monthly = get_single_stock_data_monthly(root_path, symbol)\n\n if data_daily.empty: return startTime\n if len(data_daily) < 60 + window: return startTime\n \n data = { \"daily\":data_daily, \"weekly\":data_weekly, \"monthly\":data_monthly }\n\n inner_processing_stock_data(symbol, data, window, day_selection, week_selection, month_selection)\n\n return startTime\n\ndef process_all_stocks_data(root_path, window = 5):\n symbols = getStocksList_US(root_path).index.values.tolist()\n\n pbar = tqdm(total=len(symbols))\n\n day_selection = []\n week_selection = []\n month_selection = []\n\n # for index in range(0, window):\n # day_window = []\n # day_selection.append(day_window)\n # week_window = []\n # week_selection.append(week_window)\n # month_window = []\n # month_selection.append(month_window)\n\n startTime_1 = time.time()\n for symbol in symbols:\n startTime = processing_stock_data(root_path, symbol, window, day_selection, week_selection, month_selection)\n outMessage = '%-*s processed in: %.4s seconds' % (6, symbol, (time.time() - startTime))\n pbar.set_description(outMessage)\n pbar.update(1)\n print('total processing in: %.4s seconds' % ((time.time() - startTime_1)))\n\n # with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n # # Start the load operations and mark each future with its URL\n # future_to_stock = {executor.submit(processing_stock_data, root_path, symbol, window, day_selection, week_selection, month_selection): symbol for symbol in symbols}\n # for future in concurrent.futures.as_completed(future_to_stock):\n # stock = future_to_stock[future]\n # try:\n # startTime = future.result()\n # except Exception as exc:\n # startTime = time.time()\n # print('%r generated an exception: %s' % (stock, exc))\n # outMessage = '%-*s processed in: %.4s seconds' % (6, stock, (time.time() - startTime))\n # pbar.set_description(outMessage)\n # pbar.update(1)\n\n # day_week_selection = []\n # week_month_selection = []\n # day_month_selection = []\n # all_selection = []\n\n # count = []\n\n day_week_selection = list(set(day_selection) & set(week_selection ))\n week_month_selection = list(set(week_selection) & set(month_selection ))\n day_month_selection = list(set(day_selection) & set(month_selection ))\n all_selection = list(set(day_week_selection) & set(week_month_selection))\n\n #day_selection = list(set(day_selection) - set(all_selection))\n #week_selection = list(set(week_selection) - set(all_selection))\n #month_selection = list(set(month_selection) - set(all_selection))\n\n # sumUp = len(day_week_selection[index]) + len(week_month_selection[index]) + len(day_month_selection[index]) + len(all_selection[index])\n # count.insert(0,sumUp)\n\n print(\"all_selection\", len(all_selection), sorted(all_selection))\n print(\"day_week_selection\", len(day_week_selection), sorted(day_week_selection))\n print(\"week_month_selection\", len(week_month_selection), sorted(week_month_selection))\n print(\"day_month_selection\", len(day_month_selection), sorted(day_month_selection))\n print(\"/n ------------------------ /n\")\n\n # plt.plot(range(0, len(count)), count)\n # plt.title('A simple chirp')\n # plt.show()\n print(\"day_selection\", len(day_selection), sorted(day_selection))\n print(\"week_selection\", len(week_selection), sorted(week_selection))\n print(\"month_selection\", len(month_selection), sorted(month_selection))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Input parameter error\")\n exit()\n\n pd.set_option('precision', 3)\n pd.set_option('display.width',1000)\n warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)\n\n update = str(sys.argv[1])\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n config = configparser.ConfigParser()\n config.read(root_path + \"/\" + \"config.ini\")\n storeType = int(config.get('Setting', 'StoreType'))\n\n if update == '1':\n print(\"updating Daily data...\")\n updateStockData_US_Daily(root_path, \"2014-01-01\", now, storeType)\n\n print(\"updating Weekly data...\")\n updateStockData_US_Weekly(root_path, \"2014-01-01\", now, storeType)\n\n print(\"updating Monthly data...\")\n updateStockData_US_Monthly(root_path, \"2014-01-01\", now, storeType)\n \n print(\"Processing data...\")\n process_all_stocks_data(root_path, 5)\n\n\n"
] | [
[
"pandas.to_datetime",
"numpy.array",
"numpy.zeros_like",
"pandas.set_option",
"numpy.diff"
]
] |
pchabets/chronicity-prediction-depression | [
"ba43009cc7213cab91376540393512282e5f9319"
] | [
"code/python/transcriptomics>2-y-chronicity_variance_based_feature_selection_VM.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\nos.chdir(\"/home/pchabets/Dropbox/STRESS_INDEX/\")\n\n\n# ### Load in transcriptomics data\n\n# In[3]:\n\n\nexpr_train = pd.read_csv(\"data/blood_and_saliva_variables/W1/transcriptomics/transcriptomics_2ychronicity_TRAIN.csv\")\nexpr_test = pd.read_csv(\"data/blood_and_saliva_variables/W1/transcriptomics/transcriptomics_2ychronicity_TEST.csv\")\n\n\n# ### Check for highest variance in train data\n\n# In[4]:\n\n\nprobe_variance = expr_train.iloc[:,2:].apply(lambda x: x.var(), axis=0)\nprobe_variance.sort_values(ascending=False)\n\n\n# In[34]:\n\n\n# plot distribution of variances\nplt.figure(figsize=(16,8))\nsns.histplot(probe_variance, bins=1000)\n\n\n# ### Select only top variance probes \n\n# In[39]:\n\n\n# set top n probes with highest variance\ntop = 5000\n\n\n# In[40]:\n\n\n# select top highest variance probes\nselected_probes = probe_variance.sort_values(ascending=False)[0:top]\n\n\n# In[41]:\n\n\n# plot distribution of top variances\nplt.figure(figsize=(16,8))\nsns.histplot(probe_variance, bins=1000)\nsns.histplot(selected_probes, bins=1000, color='red')\n\n\n# ### Transform train and test set to only include top variance probes (from train data)\n\n# In[42]:\n\n\nselected_train = expr_train[expr_train.columns[0:2].append(selected_probes.index)]\n\n\n# In[43]:\n\n\nselected_test = expr_test[expr_test.columns[0:2].append(selected_probes.index)]\n\n\n# ### Write to file\n\n# In[44]:\n\n\n# Train \nselected_train.to_csv(\"scripts/VM/Python/output/transcriptomics_variance_selection_top_{}_TRAIN.csv\".format(top))\n\n\n# In[45]:\n\n\n# Test\nselected_test.to_csv(\"scripts/VM/Python/output/transcriptomics_variance_selection_top_{}_TEST.csv\".format(top))\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.figure"
]
] |
pradghos/cudf | [
"58316cb0c1303253f254b7247402ec0ed9bf357d"
] | [
"python/cudf/dataframe/dataframe.py"
] | [
"# Copyright (c) 2018, NVIDIA CORPORATION.\n\nfrom __future__ import print_function, division\n\nimport inspect\nimport random\nfrom collections import OrderedDict\nfrom collections.abc import Sequence, Mapping\nfrom copy import copy\nimport logging\nimport warnings\nimport numbers\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nfrom pandas.api.types import is_dict_like\n\ntry:\n # pd 0.24.X\n from pandas.core.dtypes.common import infer_dtype_from_object\nexcept ImportError:\n # pd 0.23.X\n from pandas.core.dtypes.common import \\\n _get_dtype_from_object as infer_dtype_from_object\n\n\nfrom types import GeneratorType\n\nfrom librmm_cffi import librmm as rmm\nfrom libgdf_cffi import libgdf\n\nfrom cudf import formatting, _gdf\nfrom cudf.utils import cudautils, queryutils, applyutils, utils, ioutils\nfrom cudf.dataframe.index import as_index, Index, RangeIndex\nfrom cudf.dataframe.series import Series\nfrom cudf.settings import NOTSET, settings\nfrom cudf.comm.serialize import register_distributed_serializer\nfrom cudf.dataframe.categorical import CategoricalColumn\nfrom cudf.dataframe.buffer import Buffer\nfrom cudf._gdf import nvtx_range_push, nvtx_range_pop\nfrom cudf._sort import get_sorted_inds\nfrom cudf.dataframe import columnops\n\nimport cudf.bindings.join as cpp_join\n\n\nclass DataFrame(object):\n \"\"\"\n A GPU Dataframe object.\n\n Examples\n --------\n\n Build dataframe with `__setitem__`:\n\n >>> import cudf\n >>> df = cudf.DataFrame()\n >>> df['key'] = [0, 1, 2, 3, 4]\n >>> df['val'] = [float(i + 10) for i in range(5)] # insert column\n >>> print(df)\n key val\n 0 0 10.0\n 1 1 11.0\n 2 2 12.0\n 3 3 13.0\n 4 4 14.0\n\n Build dataframe with initializer:\n\n >>> import cudf\n >>> import numpy as np\n >>> from datetime import datetime, timedelta\n >>> ids = np.arange(5)\n\n Create some datetime data\n\n >>> t0 = datetime.strptime('2018-10-07 12:00:00', '%Y-%m-%d %H:%M:%S')\n >>> datetimes = [(t0+ timedelta(seconds=x)) for x in range(5)]\n >>> dts = np.array(datetimes, dtype='datetime64')\n\n Create the GPU DataFrame\n\n >>> df = cudf.DataFrame([('id', ids), ('datetimes', dts)])\n >>> df\n id datetimes\n 0 0 2018-10-07T12:00:00.000\n 1 1 2018-10-07T12:00:01.000\n 2 2 2018-10-07T12:00:02.000\n 3 3 2018-10-07T12:00:03.000\n 4 4 2018-10-07T12:00:04.000\n\n Convert from a Pandas DataFrame:\n\n >>> import pandas as pd\n >>> import cudf\n >>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})\n >>> df = cudf.from_pandas(pdf)\n >>> df\n a b\n 0 0 0.1\n 1 1 0.2\n 2 2 nan\n 3 3 0.3\n \"\"\"\n LEFT_RIGHT_INDEX_NAME = 'cudf_left_right_index_key'\n\n def __init__(self, name_series=None, index=None):\n if index is None:\n index = RangeIndex(start=0)\n self._index = index\n self._size = len(index)\n self._cols = OrderedDict()\n # has initializer?\n if name_series is not None:\n if isinstance(name_series, dict):\n name_series = name_series.items()\n for k, series in name_series:\n self.add_column(k, series, forceindex=index is not None)\n\n def serialize(self, serialize):\n header = {}\n frames = []\n header['index'], index_frames = serialize(self._index)\n header['index_frame_count'] = len(index_frames)\n frames.extend(index_frames)\n # Use the column directly to avoid duplicating the index\n columns = [col._column for col in self._cols.values()]\n serialized_columns = zip(*map(serialize, columns))\n header['columns'], column_frames = serialized_columns\n header['column_names'] = tuple(self._cols)\n for f in column_frames:\n frames.extend(f)\n return header, frames\n\n @classmethod\n def deserialize(cls, deserialize, header, frames):\n # Reconstruct the index\n index_header = header['index']\n index_frames = frames[:header['index_frame_count']]\n index = deserialize(index_header, index_frames)\n # Reconstruct the columns\n column_frames = frames[header['index_frame_count']:]\n columns = []\n for k, meta in zip(header['column_names'], header['columns']):\n col_frame_count = meta['frame_count']\n colobj = deserialize(meta, column_frames[:col_frame_count])\n columns.append((k, colobj))\n # Advance frames\n column_frames = column_frames[col_frame_count:]\n return cls(columns, index=index)\n\n @property\n def dtypes(self):\n \"\"\"Return the dtypes in this object.\"\"\"\n return pd.Series([x.dtype for x in self._cols.values()],\n index=self._cols.keys())\n\n @property\n def shape(self):\n \"\"\"Returns a tuple representing the dimensionality of the DataFrame.\n \"\"\"\n return len(self), len(self._cols)\n\n def __dir__(self):\n o = set(dir(type(self)))\n o.update(self.__dict__)\n o.update(c for c in self.columns if\n (isinstance(c, pd.compat.string_types) and\n pd.compat.isidentifier(c)))\n return list(o)\n\n def __getattr__(self, key):\n if key != '_cols' and key in self._cols:\n return self[key]\n\n raise AttributeError(\"'DataFrame' object has no attribute %r\" % key)\n\n def __getitem__(self, arg):\n \"\"\"\n If *arg* is a ``str`` or ``int`` type, return the column Series.\n If *arg* is a ``slice``, return a new DataFrame with all columns\n sliced to the specified range.\n If *arg* is an ``array`` containing column names, return a new\n DataFrame with the corresponding columns.\n If *arg* is a ``dtype.bool array``, return the rows marked True\n\n Examples\n --------\n >>> df = DataFrame([('a', list(range(20))),\n ... ('b', list(range(20))),\n ... ('c', list(range(20)))])\n >>> print(df[:4]) # get first 4 rows of all columns\n a b c\n 0 0 0 0\n 1 1 1 1\n 2 2 2 2\n 3 3 3 3\n >>> print(df[-5:]) # get last 5 rows of all columns\n a b c\n 15 15 15 15\n 16 16 16 16\n 17 17 17 17\n 18 18 18 18\n 19 19 19 19\n >>> print(df[['a', 'c']]) # get columns a and c\n a c\n 0 0 0\n 1 1 1\n 2 2 2\n 3 3 3\n 4 4 4\n 5 5 5\n 6 6 6\n 7 7 7\n 8 8 8\n 9 9 9\n >>> print(df[[True, False, True, False]]) # mask the entire dataframe,\n # returning the rows specified in the boolean mask\n \"\"\"\n if isinstance(arg, str) or isinstance(arg, numbers.Integral) or \\\n isinstance(arg, tuple):\n s = self._cols[arg]\n s.name = arg\n return s\n elif isinstance(arg, slice):\n df = DataFrame()\n for k, col in self._cols.items():\n df[k] = col[arg]\n return df\n elif isinstance(arg, (list, np.ndarray, pd.Series,\n Series, Index, pd.Index)):\n mask = arg\n if isinstance(mask, list):\n mask = np.array(mask)\n df = DataFrame()\n if(mask.dtype == 'bool'):\n # New df-wide index\n selvals, selinds = columnops.column_select_by_boolmask(\n columnops.as_column(self.index), Series(mask))\n index = self.index.take(selinds.to_gpu_array())\n for col in self._cols:\n df[col] = Series(self._cols[col][arg], index=index)\n df.set_index(index)\n else:\n for col in arg:\n df[col] = self[col]\n return df\n elif isinstance(arg, DataFrame):\n return self.mask(arg)\n else:\n msg = \"__getitem__ on type {!r} is not supported\"\n raise TypeError(msg.format(type(arg)))\n\n def mask(self, other):\n df = self.copy()\n for col in self.columns:\n if col in other.columns:\n boolbits = cudautils.compact_mask_bytes(\n other[col].to_gpu_array())\n else:\n boolbits = cudautils.make_empty_mask(len(self[col]))\n df[col]._column = df[col]._column.set_mask(boolbits)\n return df\n\n def __setitem__(self, name, col):\n \"\"\"Add/set column by *name or DataFrame*\n \"\"\"\n # div[div < 0] = 0\n if isinstance(name, DataFrame):\n for col_name in self._cols:\n mask = name[col_name]\n self._cols[col_name] = self._cols[col_name] \\\n .masked_assign(value=col, mask=mask)\n\n elif name in self._cols:\n self._cols[name] = self._prepare_series_for_add(col)\n else:\n self.add_column(name, col)\n\n def __delitem__(self, name):\n \"\"\"\n Drop the given column by *name*.\n \"\"\"\n self._drop_column(name)\n\n def __sizeof__(self):\n return sum(col.__sizeof__() for col in self._cols.values())\n\n def __len__(self):\n \"\"\"\n Returns the number of rows\n \"\"\"\n return self._size\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if method == '__call__' and 'sqrt' == ufunc.__name__:\n from cudf import sqrt\n return sqrt(self)\n else:\n return NotImplemented\n\n @property\n def empty(self):\n return not len(self)\n\n def assign(self, **kwargs):\n \"\"\"\n Assign columns to DataFrame from keyword arguments.\n\n Examples\n --------\n >>> import cudf\n >>> df = cudf.DataFrame()\n >>> df = df.assign(a=[0, 1, 2], b=[3, 4, 5])\n >>> print(df)\n a b\n 0 0 3\n 1 1 4\n 2 2 5\n \"\"\"\n new = self.copy()\n for k, v in kwargs.items():\n new[k] = v\n return new\n\n def head(self, n=5):\n \"\"\"\n Returns the first n rows as a new DataFrame\n\n Examples\n --------\n >>> import cudf\n >>> df = cudf.DataFrame()\n >>> df['key'] = [0, 1, 2, 3, 4]\n >>> df['val'] = [float(i + 10) for i in range(5)] # insert column\n >>> print(df.head(2))\n key val\n 0 0 10.0\n 1 1 11.0\n \"\"\"\n return self.iloc[:n]\n\n def tail(self, n=5):\n \"\"\"\n Returns the last n rows as a new DataFrame\n\n Examples\n --------\n >>> import cudf\n >>> df = cudf.DataFrame()\n >>> df['key'] = [0, 1, 2, 3, 4]\n >>> df['val'] = [float(i + 10) for i in range(5)] # insert column\n >>> print(df.tail(2))\n key val\n 3 3 13.0\n 4 4 14.0\n\n \"\"\"\n if n == 0:\n return self.iloc[0:0]\n\n return self.iloc[-n:]\n\n def to_string(self, nrows=NOTSET, ncols=NOTSET):\n \"\"\"\n Convert to string\n\n Parameters\n ----------\n nrows : int\n Maximum number of rows to show.\n If it is None, all rows are shown.\n\n ncols : int\n Maximum number of columns to show.\n If it is None, all columns are shown.\n\n Examples\n --------\n >>> import cudf\n >>> df = cudf.DataFrame()\n >>> df['key'] = [0, 1, 2]\n >>> df['val'] = [float(i + 10) for i in range(3)]\n >>> df.to_string()\n ' key val\\\\n0 0 10.0\\\\n1 1 11.0\\\\n2 2 12.0'\n \"\"\"\n if nrows is NOTSET:\n nrows = settings.formatting.get('nrows')\n if ncols is NOTSET:\n ncols = settings.formatting.get('ncols')\n\n if nrows is None:\n nrows = len(self)\n else:\n nrows = min(nrows, len(self)) # cap row count\n\n if ncols is None:\n ncols = len(self.columns)\n else:\n ncols = min(ncols, len(self.columns)) # cap col count\n\n more_cols = len(self.columns) - ncols\n more_rows = len(self) - nrows\n\n # Prepare cells\n cols = OrderedDict()\n dtypes = OrderedDict()\n use_cols = list(self.columns[:ncols - 1])\n if ncols > 0:\n use_cols.append(self.columns[-1])\n\n for h in use_cols:\n cols[h] = self[h].values_to_string(nrows=nrows)\n dtypes[h] = self[h].dtype\n\n # Format into a table\n return formatting.format(index=self._index, cols=cols, dtypes=dtypes,\n show_headers=True, more_cols=more_cols,\n more_rows=more_rows, min_width=2)\n\n def __str__(self):\n nrows = settings.formatting.get('nrows') or 10\n ncols = settings.formatting.get('ncols') or 8\n return self.to_string(nrows=nrows, ncols=ncols)\n\n def __repr__(self):\n return \"<cudf.DataFrame ncols={} nrows={} >\".format(\n len(self.columns),\n len(self),\n )\n\n # binary, rbinary, orderedcompare, unorderedcompare\n def _apply_op(self, fn, other):\n result = DataFrame()\n result.set_index(self.index)\n if isinstance(other, Sequence):\n for k, col in enumerate(self._cols):\n result[col] = getattr(self._cols[col], fn)(other[k])\n elif isinstance(other, DataFrame):\n for col in other._cols:\n if col in self._cols:\n result[col] = getattr(self._cols[col], fn)(\n other._cols[col])\n else:\n result[col] = Series(cudautils.full(self.shape[0],\n np.dtype('float64').type(np.nan),\n 'float64'), nan_as_null=False)\n for col in self._cols:\n if col not in other._cols:\n result[col] = Series(cudautils.full(self.shape[0],\n np.dtype('float64').type(np.nan),\n 'float64'), nan_as_null=False)\n elif isinstance(other, Series):\n raise NotImplementedError(\n \"Series to DataFrame arithmetic not supported \"\n \"until strings can be used as indices. Try converting your\"\n \" Series into a DataFrame first.\")\n elif isinstance(other, numbers.Number):\n for col in self._cols:\n result[col] = getattr(self._cols[col], fn)(other)\n else:\n raise NotImplementedError(\n \"DataFrame operations with \" + str(type(other)) + \" not \"\n \"supported at this time.\")\n return result\n\n def _unaryop(self, fn):\n result = DataFrame()\n result.set_index(self.index)\n for col in self._cols:\n result[col] = self._cols[col]._unaryop(fn)\n return result\n\n def __add__(self, other):\n return self._apply_op('__add__', other)\n\n def __radd__(self, other):\n return self._apply_op('__radd__', other)\n\n def __sub__(self, other):\n return self._apply_op('__sub__', other)\n\n def __rsub__(self, other):\n return self._apply_op('__rsub__', other)\n\n def __mul__(self, other):\n return self._apply_op('__mul__', other)\n\n def __rmul__(self, other):\n return self._apply_op('__rmul__', other)\n\n def __pow__(self, other):\n if other == 2:\n return self * self\n else:\n return NotImplemented\n\n def __floordiv__(self, other):\n return self._apply_op('__floordiv__', other)\n\n def __rfloordiv__(self, other):\n return self._apply_op('__rfloordiv__', other)\n\n def __truediv__(self, other):\n return self._apply_op('__truediv__', other)\n\n def __rtruediv__(self, other):\n return self._apply_op('__rtruediv__', other)\n\n __div__ = __truediv__\n\n def __and__(self, other):\n return self._apply_op('__and__', other)\n\n def __or__(self, other):\n return self._apply_op('__or__', other)\n\n def __xor__(self, other):\n return self._apply_op('__xor__', other)\n\n def __eq__(self, other):\n return self._apply_op('__eq__', other)\n\n def __ne__(self, other):\n return self._apply_op('__ne__', other)\n\n def __lt__(self, other):\n return self._apply_op('__lt__', other)\n\n def __le__(self, other):\n return self._apply_op('__le__', other)\n\n def __gt__(self, other):\n return self._apply_op('__gt__', other)\n\n def __ge__(self, other):\n return self._apply_op('__ge__', other)\n\n def __iter__(self):\n return iter(self.columns)\n\n def iteritems(self):\n \"\"\" Iterate over column names and series pairs \"\"\"\n for k in self:\n yield (k, self[k])\n\n @property\n def loc(self):\n \"\"\"\n Returns a label-based indexer for row-slicing and column selection.\n\n Examples\n --------\n >>> df = DataFrame([('a', list(range(20))),\n ... ('b', list(range(20))),\n ... ('c', list(range(20)))])\n\n Get the row by index label from 'a' and 'b' columns\n\n >>> df.loc[0, ['a', 'b']]\n a 0\n b 0\n\n Get rows from index 2 to index 5 from 'a' and 'b' columns.\n\n >>> df.loc[2:5, ['a', 'b']]\n a b\n 2 2 2\n 3 3 3\n 4 4 4\n 5 5 5\n\n Get the every 3rd rows from index 2 to 10 from 'a' and 'b'\n\n >>> df.loc[2:10:3, ['a', 'b']]\n a b\n 2 2 2\n 5 5 5\n 8 8 8\n \"\"\"\n return Loc(self)\n\n @property\n def iloc(self):\n \"\"\"\n Returns a integer-location based indexer for selection by position.\n\n Examples\n --------\n >>> df = DataFrame([('a', list(range(20))),\n ... ('b', list(range(20))),\n ... ('c', list(range(20)))])\n >>> df.iloc[1] # get the row from index 1st\n a 1\n b 1\n c 1\n >>> df.iloc[[0, 2, 9, 18]] # get the rows from indices 0,2,9 and 18.\n a b c\n 0 0 0 0\n 2 2 2 2\n 9 9 9 9\n 18 18 18 18\n >>> df.iloc[3:10:2] # get the rows using slice indices\n a b c\n 3 3 3 3\n 5 5 5 5\n 7 7 7 7\n 9 9 9 9\n \"\"\"\n return Iloc(self)\n\n @property\n def columns(self):\n \"\"\"Returns a tuple of columns\n \"\"\"\n return pd.Index(self._cols)\n\n @columns.setter\n def columns(self, columns):\n old_cols = list(self._cols.keys())\n l_old_cols = len(old_cols)\n l_new_cols = len(columns)\n if l_new_cols != l_old_cols:\n msg = f'Length of new column names: {l_new_cols} does not ' \\\n 'match length of previous column names: {l_old_cols}'\n raise ValueError(msg)\n\n mapper = dict(zip(old_cols, columns))\n self.rename(mapper=mapper, inplace=True)\n\n @property\n def index(self):\n \"\"\"Returns the index of the DataFrame\n \"\"\"\n return self._index\n\n @index.setter\n def index(self, _index):\n new_length = len(_index)\n old_length = len(self._index)\n\n if new_length != old_length:\n msg = f'Length mismatch: Expected index has {old_length}' \\\n ' elements, new values have {new_length} elements'\n raise ValueError(msg)\n\n # try to build an index from generic _index\n idx = as_index(_index)\n self._index = idx\n for k in self.columns:\n self[k] = self[k].set_index(idx)\n\n def set_index(self, index):\n \"\"\"Return a new DataFrame with a new index\n\n Parameters\n ----------\n index : Index, Series-convertible, or str\n Index : the new index.\n Series-convertible : values for the new index.\n str : name of column to be used as series\n \"\"\"\n # When index is a column name\n if isinstance(index, str):\n df = self.copy(deep=False)\n df._drop_column(index)\n return df.set_index(self[index])\n # Otherwise\n else:\n index = index if isinstance(index, Index) else as_index(index)\n df = DataFrame()\n df._index = index\n for k in self.columns:\n df[k] = self[k].set_index(index)\n return df\n\n def reset_index(self, drop=False):\n if not drop:\n name = self.index.name or 'index'\n out = DataFrame()\n out[name] = self.index\n for c in self.columns:\n out[c] = self[c]\n else:\n out = self\n return out.set_index(RangeIndex(len(self)))\n\n def take(self, positions, ignore_index=False):\n out = DataFrame()\n for col in self.columns:\n out[col] = self[col].take(positions, ignore_index=ignore_index)\n return out\n\n def copy(self, deep=True):\n \"\"\"\n Returns a copy of this dataframe\n\n Parameters\n ----------\n deep: bool\n Make a full copy of Series columns and Index at the GPU level, or\n create a new allocation with references.\n \"\"\"\n df = DataFrame()\n df._size = self._size\n if deep:\n df._index = self._index.copy(deep)\n for k in self._cols:\n df._cols[k] = self._cols[k].copy(deep)\n else:\n df._index = self._index\n for k in self._cols:\n df._cols[k] = self._cols[k]\n return df\n\n def __copy__(self):\n return self.copy(deep=True)\n\n def __deepcopy__(self, memo={}):\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n if memo is None:\n memo = {}\n return self.copy(deep=True)\n\n def _sanitize_columns(self, col):\n \"\"\"Sanitize pre-appended\n col values\n \"\"\"\n series = Series(col)\n if len(self) == 0 and len(self.columns) > 0 and len(series) > 0:\n ind = series.index\n dtype = np.float64\n if self[next(iter(self._cols))].dtype == np.dtype(\"object\"):\n dtype = np.dtype(\"object\")\n arr = rmm.device_array(shape=len(ind), dtype=dtype)\n size = utils.calc_chunk_size(arr.size, utils.mask_bitsize)\n mask = cudautils.zeros(size, dtype=utils.mask_dtype)\n val = Series.from_masked_array(arr, mask, null_count=len(ind))\n for name in self._cols:\n self._cols[name] = val\n self._index = series.index\n self._size = len(series)\n\n def _sanitize_values(self, col):\n \"\"\"Sanitize col values before\n being added\n \"\"\"\n index = self._index\n series = Series(col)\n sind = series.index\n\n # This won't handle 0 dimensional arrays which should be okay\n SCALAR = np.isscalar(col)\n\n if len(self) > 0 and len(series) == 1 and SCALAR:\n if series.dtype == np.dtype(\"object\"):\n gather_map = cudautils.zeros(len(index), 'int32')\n return series[gather_map]\n else:\n arr = rmm.device_array(shape=len(index), dtype=series.dtype)\n cudautils.gpu_fill_value.forall(arr.size)(arr, col)\n return Series(arr)\n elif len(self) > 0 and len(sind) != len(index):\n raise ValueError('Length of values does not match index length')\n return col\n\n def _prepare_series_for_add(self, col, forceindex=False):\n \"\"\"Prepare a series to be added to the DataFrame.\n\n Parameters\n ----------\n col : Series, array-like\n Values to be added.\n\n Returns\n -------\n The prepared Series object.\n \"\"\"\n self._sanitize_columns(col)\n col = self._sanitize_values(col)\n\n empty_index = len(self._index) == 0\n series = Series(col)\n if forceindex or empty_index or self._index.equals(series.index):\n if empty_index:\n self._index = series.index\n self._size = len(series)\n return series\n else:\n return series.set_index(self._index)\n\n def add_column(self, name, data, forceindex=False):\n \"\"\"Add a column\n\n Parameters\n ----------\n name : str\n Name of column to be added.\n data : Series, array-like\n Values to be added.\n \"\"\"\n\n if name in self._cols:\n raise NameError('duplicated column name {!r}'.format(name))\n\n if isinstance(data, GeneratorType):\n data = Series(data)\n series = self._prepare_series_for_add(data, forceindex=forceindex)\n series.name = name\n self._cols[name] = series\n\n def drop(self, labels):\n \"\"\"Drop column(s)\n\n Parameters\n ----------\n labels : str or sequence of strings\n Name of column(s) to be dropped.\n\n Returns\n -------\n A dataframe without dropped column(s)\n\n Examples\n --------\n >>> import cudf\n >>> df = cudf.DataFrame()\n >>> df['key'] = [0, 1, 2, 3, 4]\n >>> df['val'] = [float(i + 10) for i in range(5)]\n >>> df_new = df.drop('val')\n >>> print(df)\n key val\n 0 0 10.0\n 1 1 11.0\n 2 2 12.0\n 3 3 13.0\n 4 4 14.0\n >>> print(df_new)\n key\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n \"\"\"\n columns = [labels] if isinstance(labels, str) else list(labels)\n\n outdf = self.copy()\n for c in columns:\n outdf._drop_column(c)\n return outdf\n\n def drop_column(self, name):\n \"\"\"Drop a column by *name*\n \"\"\"\n warnings.warn(\n 'The drop_column method is deprecated. '\n 'Use the drop method instead.',\n DeprecationWarning\n )\n self._drop_column(name)\n\n def _drop_column(self, name):\n \"\"\"Drop a column by *name*\n \"\"\"\n if name not in self._cols:\n raise NameError('column {!r} does not exist'.format(name))\n del self._cols[name]\n\n def rename(self, mapper=None, columns=None, copy=True, inplace=False):\n \"\"\"\n Alter column labels.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don’t throw an\n error.\n\n Parameters\n ----------\n mapper, columns : dict-like or function, optional\n dict-like or functions transformations to apply to\n the column axis' values.\n copy : boolean, default True\n Also copy underlying data\n inplace: boolean, default False\n Retrun new DataFrame. If True, assign columns without copy\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n Difference from pandas:\n * Support axis='columns' only.\n * Not supporting: index, level\n \"\"\"\n # Pandas defaults to using columns over mapper\n if columns:\n mapper = columns\n\n out = DataFrame()\n out = out.set_index(self.index)\n\n if isinstance(mapper, Mapping):\n for column in self.columns:\n if column in mapper:\n out[mapper[column]] = self[column]\n else:\n out[column] = self[column]\n elif callable(mapper):\n for column in self.columns:\n out[mapper(column)] = self[column]\n\n if inplace:\n self._cols = out._cols\n else:\n return out.copy(deep=copy)\n\n @classmethod\n def _concat(cls, objs, axis=0, ignore_index=False):\n nvtx_range_push(\"CUDF_CONCAT\", \"orange\")\n if len(set(frozenset(o.columns) for o in objs)) != 1:\n what = set(frozenset(o.columns) for o in objs)\n raise ValueError('columns mismatch: {}'.format(what))\n\n objs = [o for o in objs]\n if ignore_index:\n index = RangeIndex(sum(map(len, objs)))\n else:\n index = Index._concat([o.index for o in objs])\n data = [(c, Series._concat([o[c] for o in objs], index=index))\n for c in objs[0].columns]\n out = cls(data)\n out._index = index\n nvtx_range_pop()\n return out\n\n def as_gpu_matrix(self, columns=None, order='F'):\n \"\"\"Convert to a matrix in device memory.\n\n Parameters\n ----------\n columns : sequence of str\n List of a column names to be extracted. The order is preserved.\n If None is specified, all columns are used.\n order : 'F' or 'C'\n Optional argument to determine whether to return a column major\n (Fortran) matrix or a row major (C) matrix.\n\n Returns\n -------\n A (nrow x ncol) numpy ndarray in \"F\" order.\n \"\"\"\n if columns is None:\n columns = self.columns\n\n cols = [self._cols[k] for k in columns]\n ncol = len(cols)\n nrow = len(self)\n if ncol < 1:\n raise ValueError(\"require at least 1 column\")\n if nrow < 1:\n raise ValueError(\"require at least 1 row\")\n dtype = cols[0].dtype\n if any(dtype != c.dtype for c in cols):\n raise ValueError('all columns must have the same dtype')\n for k, c in self._cols.items():\n if c.null_count > 0:\n errmsg = (\"column {!r} has null values. \"\n \"hint: use .fillna() to replace null values\")\n raise ValueError(errmsg.format(k))\n\n if order == 'F':\n matrix = rmm.device_array(shape=(nrow, ncol), dtype=dtype,\n order=order)\n for colidx, inpcol in enumerate(cols):\n dense = inpcol.to_gpu_array(fillna='pandas')\n matrix[:, colidx].copy_to_device(dense)\n elif order == 'C':\n matrix = cudautils.row_matrix(cols, nrow, ncol, dtype)\n else:\n errmsg = (\"order parameter should be 'C' for row major or 'F' for\"\n \"column major GPU matrix\")\n raise ValueError(errmsg.format(k))\n return matrix\n\n def as_matrix(self, columns=None):\n \"\"\"Convert to a matrix in host memory.\n\n Parameters\n ----------\n columns : sequence of str\n List of a column names to be extracted. The order is preserved.\n If None is specified, all columns are used.\n\n Returns\n -------\n A (nrow x ncol) numpy ndarray in \"F\" order.\n \"\"\"\n return self.as_gpu_matrix(columns=columns).copy_to_host()\n\n def one_hot_encoding(self, column, prefix, cats, prefix_sep='_',\n dtype='float64'):\n \"\"\"\n Expand a column with one-hot-encoding.\n\n Parameters\n ----------\n\n column : str\n the source column with binary encoding for the data.\n prefix : str\n the new column name prefix.\n cats : sequence of ints\n the sequence of categories as integers.\n prefix_sep : str\n the separator between the prefix and the category.\n dtype :\n the dtype for the outputs; defaults to float64.\n\n Returns\n -------\n\n a new dataframe with new columns append for each category.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import cudf\n >>> pet_owner = [1, 2, 3, 4, 5]\n >>> pet_type = ['fish', 'dog', 'fish', 'bird', 'fish']\n >>> df = pd.DataFrame({'pet_owner': pet_owner, 'pet_type': pet_type})\n >>> df.pet_type = df.pet_type.astype('category')\n\n Create a column with numerically encoded category values\n\n >>> df['pet_codes'] = df.pet_type.cat.codes\n >>> gdf = cudf.from_pandas(df)\n\n Create the list of category codes to use in the encoding\n\n >>> codes = gdf.pet_codes.unique()\n >>> gdf.one_hot_encoding('pet_codes', 'pet_dummy', codes).head()\n pet_owner pet_type pet_codes pet_dummy_0 pet_dummy_1 pet_dummy_2\n 0 1 fish 2 0.0 0.0 1.0\n 1 2 dog 1 0.0 1.0 0.0\n 2 3 fish 2 0.0 0.0 1.0\n 3 4 bird 0 1.0 0.0 0.0\n 4 5 fish 2 0.0 0.0 1.0\n \"\"\"\n newnames = [prefix_sep.join([prefix, str(cat)]) for cat in cats]\n newcols = self[column].one_hot_encoding(cats=cats, dtype=dtype)\n outdf = self.copy()\n for name, col in zip(newnames, newcols):\n outdf.add_column(name, col)\n return outdf\n\n def label_encoding(self, column, prefix, cats, prefix_sep='_', dtype=None,\n na_sentinel=-1):\n \"\"\"Encode labels in a column with label encoding.\n\n Parameters\n ----------\n column : str\n the source column with binary encoding for the data.\n prefix : str\n the new column name prefix.\n cats : sequence of ints\n the sequence of categories as integers.\n prefix_sep : str\n the separator between the prefix and the category.\n dtype :\n the dtype for the outputs; see Series.label_encoding\n na_sentinel : number\n Value to indicate missing category.\n Returns\n -------\n a new dataframe with a new column append for the coded values.\n \"\"\"\n\n newname = prefix_sep.join([prefix, 'labels'])\n newcol = self[column].label_encoding(cats=cats, dtype=dtype,\n na_sentinel=na_sentinel)\n outdf = self.copy()\n outdf.add_column(newname, newcol)\n\n return outdf\n\n def _sort_by(self, sorted_indices):\n df = DataFrame()\n # Perform out = data[index] for all columns\n for k in self.columns:\n df[k] = self[k].take(sorted_indices.to_gpu_array())\n return df\n\n def argsort(self, ascending=True, na_position='last'):\n cols = [series._column for series in self._cols.values()]\n return get_sorted_inds(cols, ascending=ascending,\n na_position=na_position)\n\n def sort_index(self, ascending=True):\n \"\"\"Sort by the index\n \"\"\"\n return self._sort_by(self.index.argsort(ascending=ascending))\n\n def sort_values(self, by, ascending=True, na_position='last'):\n \"\"\"\n\n Sort by the values row-wise.\n\n Parameters\n ----------\n by : str or list of str\n Name or list of names to sort by.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of the\n by.\n na_position : {‘first’, ‘last’}, default ‘last’\n 'first' puts nulls at the beginning, 'last' puts nulls at the end\n Returns\n -------\n sorted_obj : cuDF DataFrame\n\n Notes\n -----\n Difference from pandas:\n * Support axis='index' only.\n * Not supporting: inplace, kind\n\n Examples\n --------\n >>> import cudf\n >>> a = ('a', [0, 1, 2])\n >>> b = ('b', [-3, 2, 0])\n >>> df = cudf.DataFrame([a, b])\n >>> print(df.sort_values('b'))\n a b\n 0 0 -3\n 2 2 0\n 1 1 2\n \"\"\"\n # argsort the `by` column\n return self._sort_by(self[by].argsort(\n ascending=ascending,\n na_position=na_position)\n )\n\n def nlargest(self, n, columns, keep='first'):\n \"\"\"Get the rows of the DataFrame sorted by the n largest value of *columns*\n\n Notes\n -----\n Difference from pandas:\n * Only a single column is supported in *columns*\n \"\"\"\n return self._n_largest_or_smallest('nlargest', n, columns, keep)\n\n def nsmallest(self, n, columns, keep='first'):\n \"\"\"Get the rows of the DataFrame sorted by the n smallest value of *columns*\n\n Difference from pandas:\n * Only a single column is supported in *columns*\n \"\"\"\n return self._n_largest_or_smallest('nsmallest', n, columns, keep)\n\n def _n_largest_or_smallest(self, method, n, columns, keep):\n # Get column to operate on\n if not isinstance(columns, str):\n [column] = columns\n else:\n column = columns\n if not (0 <= n < len(self)):\n raise ValueError(\"n out-of-bound\")\n col = self[column].reset_index(drop=True)\n # Operate\n sorted_series = getattr(col, method)(n=n, keep=keep)\n df = DataFrame()\n new_positions = sorted_series.index.gpu_values\n for k in self.columns:\n if k == column:\n df[k] = sorted_series\n else:\n df[k] = self[k].reset_index(drop=True).take(new_positions)\n return df.set_index(self.index.take(new_positions))\n\n def transpose(self):\n \"\"\"Transpose index and columns.\n\n Returns\n -------\n a new (ncol x nrow) dataframe. self is (nrow x ncol)\n\n Notes\n -----\n Difference from pandas:\n Not supporting *copy* because default and only behaviour is copy=True\n \"\"\"\n if len(self.columns) == 0:\n return self\n\n dtype = self.dtypes[0]\n if pd.api.types.is_categorical_dtype(dtype):\n raise NotImplementedError('Categorical columns are not yet '\n 'supported for function')\n if any(t != dtype for t in self.dtypes):\n raise ValueError('all columns must have the same dtype')\n has_null = any(c.null_count for c in self._cols.values())\n\n df = DataFrame()\n\n ncols = len(self.columns)\n cols = [self[col]._column.cffi_view for col in self._cols]\n\n new_nrow = ncols\n new_ncol = len(self)\n\n if has_null:\n new_col_series = [\n Series.from_masked_array(\n data=Buffer(rmm.device_array(shape=new_nrow, dtype=dtype)),\n mask=cudautils.make_empty_mask(size=new_nrow),\n )\n for i in range(0, new_ncol)]\n else:\n new_col_series = [\n Series(\n data=Buffer(rmm.device_array(shape=new_nrow, dtype=dtype)),\n )\n for i in range(0, new_ncol)]\n new_col_ptrs = [\n new_col_series[i]._column.cffi_view\n for i in range(0, new_ncol)]\n\n # TODO (dm): move to _gdf.py\n libgdf.gdf_transpose(\n ncols,\n cols,\n new_col_ptrs\n )\n\n for series in new_col_series:\n series._column._update_null_count()\n\n for i in range(0, new_ncol):\n df[str(i)] = new_col_series[i]\n return df\n\n @property\n def T(self):\n return self.transpose()\n\n def merge(self, right, on=None, how='inner', left_on=None, right_on=None,\n left_index=False, right_index=False, lsuffix=None, rsuffix=None,\n type=\"\", method='hash', indicator=False, suffixes=('_x', '_y')):\n \"\"\"Merge GPU DataFrame objects by performing a database-style join\n operation by columns or indexes.\n\n Parameters\n ----------\n right : DataFrame\n on : label or list; defaults to None\n Column or index level names to join on. These must be found in\n both DataFrames.\n\n If on is None and not merging on indexes then\n this defaults to the intersection of the columns\n in both DataFrames.\n left_on : label or list, or array-like\n Column or index level names to join on in the left DataFrame.\n Can also be an array or list of arrays of the length of the\n left DataFrame. These arrays are treated as if they are columns.\n right_on : label or list, or array-like\n Column or index level names to join on in the right DataFrame.\n Can also be an array or list of arrays of the length of the\n right DataFrame. These arrays are treated as if they are columns.\n left_index : bool, default False\n Use the index from the left DataFrame as the join key(s).\n right_index : bool, default False\n Use the index from the right DataFrame as the join key.\n how : str, defaults to 'left'\n Only accepts 'left'\n left: use only keys from left frame, similar to\n a SQL left outer join; preserve key order\n suffixes: Tuple[str, str], defaults to ('_x', '_y')\n Suffixes applied to overlapping column names on the left and right\n sides\n type : str, defaults to 'hash'\n\n Returns\n -------\n merged : DataFrame\n\n Examples\n --------\n >>> import cudf\n >>> df_a = cudf.DataFrame()\n >>> df_a['key'] = [0, 1, 2, 3, 4]\n >>> df_a['vals_a'] = [float(i + 10) for i in range(5)]\n >>> df_b = cudf.DataFrame()\n >>> df_b['key'] = [1, 2, 4]\n >>> df_b['vals_b'] = [float(i+10) for i in range(3)]\n >>> df_merged = df_a.merge(df_b, on=['key'], how='left')\n >>> df_merged.sort_values('key') # doctest: +SKIP\n key vals_a vals_b\n 3 0 10.0\n 0 1 11.0 10.0\n 1 2 12.0 11.0\n 4 3 13.0\n 2 4 14.0 12.0\n \"\"\"\n import nvstrings\n _gdf.nvtx_range_push(\"CUDF_JOIN\", \"blue\")\n if indicator:\n raise NotImplementedError(\n \"Only indicator=False is currently supported\"\n )\n\n if lsuffix or rsuffix:\n raise ValueError(\n \"The lsuffix and rsuffix keywords have been replaced with the \"\n \"``suffixes=`` keyword. \"\n \"Please provide the following instead: \\n\\n\"\n \" suffixes=('%s', '%s')\" %\n (lsuffix or '_x', rsuffix or '_y')\n )\n else:\n lsuffix, rsuffix = suffixes\n\n if left_on and right_on and left_on != right_on:\n raise NotImplementedError(\"left_on='x', right_on='y' not supported\"\n \"in CUDF at this time.\")\n\n lhs = self.copy(deep=False)\n rhs = right.copy(deep=False)\n if on:\n on = copy(on)\n if left_on:\n left_on = copy(left_on)\n if right_on:\n right_on = copy(right_on)\n\n # Early termination Error checking\n if type != \"\":\n warnings.warn(\n 'type=\"' + type + '\" parameter is deprecated.'\n 'Use method=\"' + type + '\" instead.',\n DeprecationWarning\n )\n method = type\n if how not in ['left', 'inner', 'outer']:\n raise NotImplementedError('{!r} merge not supported yet'\n .format(how))\n same_names = set(lhs.columns) & set(rhs.columns)\n if same_names and not (lsuffix or rsuffix):\n raise ValueError('there are overlapping columns but '\n 'lsuffix and rsuffix are not defined')\n\n def fix_name(name, suffix):\n if name in same_names:\n return \"{}{}\".format(name, suffix)\n return name\n\n if left_index and right_index:\n on = lhs.LEFT_RIGHT_INDEX_NAME\n lhs[on] = lhs.index\n rhs[on] = rhs.index\n if on is None and left_on is None and right_on is None:\n on = list(same_names)\n if len(on) == 0:\n raise ValueError('No common columns to perform merge on')\n\n # Essential parameters\n if on:\n on = [on] if isinstance(on, str) else list(on)\n if left_on:\n left_on = [left_on] if isinstance(left_on, str) else list(left_on)\n if right_on:\n right_on = ([right_on] if isinstance(right_on, str)\n else list(right_on))\n\n # Pandas inconsistency warning\n if len(lhs) == 0 and len(lhs.columns) > len(rhs.columns) and\\\n set(rhs.columns).intersection(lhs.columns):\n logging.warning(\n \"Pandas and CUDF column ordering may not match for \"\n \"DataFrames with 0 rows.\"\n )\n\n # Column prep - this should be simplified\n col_cats = {}\n\n for name in left_on or []:\n if pd.api.types.is_categorical_dtype(lhs[name]):\n lcats = lhs[name].cat.categories\n rcats = rhs[name].cat.categories\n if how == 'rhs':\n cats = rcats\n lhs[name] = (lhs[name].cat._set_categories(cats)\n .fillna(-1))\n elif how in ['inner', 'outer']:\n # Do the join using the union of categories from both side.\n # Adjust for inner joins afterwards\n cats = sorted(set(lcats) | set(rcats))\n lhs[name] = (lhs[name].cat._set_categories(cats)\n .fillna(-1))\n lhs[name] = lhs[name]._column.as_numerical\n rhs[name] = (rhs[name].cat._set_categories(cats)\n .fillna(-1))\n rhs[name] = rhs[name]._column.as_numerical\n col_cats[name] = cats\n for name in right_on or []:\n if pd.api.types.is_categorical_dtype(rhs[name]):\n lcats = lhs[name].cat.categories\n rcats = rhs[name].cat.categories\n if how == 'left':\n cats = lcats\n rhs[name] = (rhs[name].cat._set_categories(cats)\n .fillna(-1))\n elif how in ['inner', 'outer']:\n # Do the join using the union of categories from both side.\n # Adjust for inner joins afterwards\n cats = sorted(set(lcats) | set(rcats))\n lhs[name] = (lhs[name].cat._set_categories(cats)\n .fillna(-1))\n lhs[name] = lhs[name]._column.as_numerical\n rhs[name] = (rhs[name].cat._set_categories(cats)\n .fillna(-1))\n rhs[name] = rhs[name]._column.as_numerical\n col_cats[name] = cats\n for name, col in lhs._cols.items():\n if pd.api.types.is_categorical_dtype(col) and name not in on:\n f_n = fix_name(name, lsuffix)\n col_cats[f_n] = lhs[name].cat.categories\n for name, col in rhs._cols.items():\n if pd.api.types.is_categorical_dtype(col) and name not in on:\n f_n = fix_name(name, rsuffix)\n col_cats[f_n] = rhs[name].cat.categories\n\n if left_index and right_on:\n lhs[right_on[0]] = lhs.index\n left_on = right_on\n elif right_index and left_on:\n rhs[left_on[0]] = rhs.index\n right_on = left_on\n\n if on:\n left_on = on\n right_on = on\n\n # Compute merge\n cols, valids = cpp_join.join(lhs._cols, rhs._cols, left_on, right_on,\n how, method=method)\n\n # Output conversion - take cols and valids from `cpp_join` and\n # combine into a DataFrame()\n df = DataFrame()\n\n # Columns are returned in order on - left - rhs from libgdf\n # In order to mirror pandas, reconstruct our df using the\n # columns from `left` and the data from `cpp_join`. The final order\n # is left columns, followed by non-join-key rhs columns.\n on_count = 0\n on = list(set(right_on + left_on))\n # gap spaces between left and `on` for result from `cpp_join`\n gap = len(lhs.columns) - len(on)\n for idc, name in enumerate(lhs.columns):\n if name in on:\n # on columns returned first from `cpp_join`\n for idx in range(len(on)):\n if on[idx] == name:\n on_idx = idx + gap\n on_count = on_count + 1\n key = on[idx]\n categories = col_cats[key] if key in col_cats.keys()\\\n else None\n if isinstance(cols[on_idx], nvstrings.nvstrings):\n df[key] = cols[on_idx]\n else:\n mask = None\n if valids[on_idx] is not None:\n mask = Buffer(valids[on_idx])\n df[key] = columnops.build_column(\n Buffer(cols[on_idx]),\n dtype=cols[on_idx].dtype,\n mask=mask,\n categories=categories,\n )\n else: # not an `on`-column, `cpp_join` returns these after `on`\n # but they need to be added to the result before `on` columns.\n # on_count corrects gap for non-`on` columns\n left_column_idx = idc - on_count\n left_name = fix_name(name, lsuffix)\n categories = col_cats[left_name] if left_name in\\\n col_cats.keys() else None\n if isinstance(cols[left_column_idx], nvstrings.nvstrings):\n df[left_name] = cols[left_column_idx]\n else:\n mask = None\n if valids[left_column_idx] is not None:\n mask = Buffer(valids[left_column_idx])\n df[left_name] = columnops.build_column(\n Buffer(cols[left_column_idx]),\n dtype=cols[left_column_idx].dtype,\n mask=mask,\n categories=categories,\n )\n rhs_column_idx = len(lhs.columns)\n for name in rhs.columns:\n if name not in on:\n # now copy the columns from `rhs` that were not in `on`\n rhs_name = fix_name(name, rsuffix)\n categories = col_cats[rhs_name] if rhs_name in\\\n col_cats.keys() else None\n if isinstance(cols[rhs_column_idx], nvstrings.nvstrings):\n df[rhs_name] = cols[rhs_column_idx]\n else:\n mask = None\n if valids[rhs_column_idx] is not None:\n mask = Buffer(valids[rhs_column_idx])\n df[rhs_name] = columnops.build_column(\n Buffer(cols[rhs_column_idx]),\n dtype=cols[rhs_column_idx].dtype,\n mask=mask,\n categories=categories,\n )\n rhs_column_idx = rhs_column_idx + 1\n\n if left_index and right_index:\n df = df.drop(lhs.LEFT_RIGHT_INDEX_NAME)\n df = df.set_index(lhs.index[df.index.gpu_values])\n elif right_index and left_on:\n new_index = Series(lhs.index,\n index=RangeIndex(0, len(lhs[left_on[0]])))\n indexed = lhs[left_on[0]][df[left_on[0]]-1]\n new_index = new_index[indexed-1]\n df.index = new_index\n elif left_index and right_on:\n new_index = Series(rhs.index,\n index=RangeIndex(0, len(rhs[right_on[0]])))\n indexed = rhs[right_on[0]][df[right_on[0]]-1]\n new_index = new_index[indexed-1]\n df.index = new_index\n\n _gdf.nvtx_range_pop()\n\n return df\n\n def join(self, other, on=None, how='left', lsuffix='', rsuffix='',\n sort=False, type=\"\", method='hash'):\n \"\"\"Join columns with other DataFrame on index or on a key column.\n\n Parameters\n ----------\n other : DataFrame\n how : str\n Only accepts \"left\", \"right\", \"inner\", \"outer\"\n lsuffix, rsuffix : str\n The suffices to add to the left (*lsuffix*) and right (*rsuffix*)\n column names when avoiding conflicts.\n sort : bool\n Set to True to ensure sorted ordering.\n\n Returns\n -------\n joined : DataFrame\n\n Notes\n -----\n Difference from pandas:\n\n - *other* must be a single DataFrame for now.\n - *on* is not supported yet due to lack of multi-index support.\n \"\"\"\n\n _gdf.nvtx_range_push(\"CUDF_JOIN\", \"blue\")\n\n # Outer joins still use the old implementation\n if type != \"\":\n warnings.warn(\n 'type=\"' + type + '\" parameter is deprecated.'\n 'Use method=\"' + type + '\" instead.',\n DeprecationWarning\n )\n method = type\n\n if how not in ['left', 'right', 'inner', 'outer']:\n raise NotImplementedError('unsupported {!r} join'.format(how))\n\n if how == 'right':\n # libgdf doesn't support right join directly, we will swap the\n # dfs and use left join\n return other.join(self, other, how='left', lsuffix=rsuffix,\n rsuffix=lsuffix, sort=sort, method='hash')\n\n same_names = set(self.columns) & set(other.columns)\n if same_names and not (lsuffix or rsuffix):\n raise ValueError('there are overlapping columns but '\n 'lsuffix and rsuffix are not defined')\n\n lhs = DataFrame()\n rhs = DataFrame()\n\n # Creating unique column name to use libgdf join\n idx_col_name = str(random.randint(2**29, 2**31))\n\n while idx_col_name in self.columns or idx_col_name in other.columns:\n idx_col_name = str(random.randint(2**29, 2**31))\n\n lhs[idx_col_name] = Series(self.index.as_column()).set_index(self\n .index)\n rhs[idx_col_name] = Series(other.index.as_column()).set_index(other\n .index)\n\n for name in self.columns:\n lhs[name] = self[name]\n\n for name in other.columns:\n rhs[name] = other[name]\n\n lhs = lhs.reset_index(drop=True)\n rhs = rhs.reset_index(drop=True)\n\n cat_join = False\n\n if pd.api.types.is_categorical_dtype(lhs[idx_col_name]):\n cat_join = True\n lcats = lhs[idx_col_name].cat.categories\n rcats = rhs[idx_col_name].cat.categories\n if how == 'left':\n cats = lcats\n rhs[idx_col_name] = (rhs[idx_col_name].cat\n ._set_categories(cats)\n .fillna(-1))\n elif how == 'right':\n cats = rcats\n lhs[idx_col_name] = (lhs[idx_col_name].cat\n ._set_categories(cats)\n .fillna(-1))\n elif how in ['inner', 'outer']:\n cats = sorted(set(lcats) | set(rcats))\n\n lhs[idx_col_name] = (lhs[idx_col_name].cat\n ._set_categories(cats)\n .fillna(-1))\n lhs[idx_col_name] = lhs[idx_col_name]._column.as_numerical\n\n rhs[idx_col_name] = (rhs[idx_col_name].cat\n ._set_categories(cats)\n .fillna(-1))\n rhs[idx_col_name] = rhs[idx_col_name]._column.as_numerical\n\n if lsuffix == '':\n lsuffix = 'l'\n if rsuffix == '':\n rsuffix = 'r'\n\n df = lhs.merge(rhs, on=[idx_col_name], how=how,\n suffixes=(lsuffix, rsuffix), method=method)\n\n if cat_join:\n df[idx_col_name] = CategoricalColumn(data=df[idx_col_name].data,\n categories=cats,\n ordered=False)\n\n df = df.set_index(idx_col_name)\n\n if sort and len(df):\n return df.sort_index()\n\n return df\n\n def groupby(self, by=None, sort=False, as_index=True, method=\"hash\",\n level=None):\n \"\"\"Groupby\n\n Parameters\n ----------\n by : list-of-str or str\n Column name(s) to form that groups by.\n sort : bool\n Force sorting group keys.\n Depends on the underlying algorithm.\n as_index : bool; defaults to False\n Must be False. Provided to be API compatible with pandas.\n The keys are always left as regular columns in the result.\n method : str, optional\n A string indicating the method to use to perform the group by.\n Valid values are \"hash\" or \"cudf\".\n \"cudf\" method may be deprecated in the future, but is currently\n the only method supporting group UDFs via the `apply` function.\n\n Returns\n -------\n The groupby object\n\n Notes\n -----\n Unlike pandas, this groupby operation behaves like a SQL groupby.\n No empty rows are returned. (For categorical keys, pandas returns\n rows for all categories even if they are no corresponding values.)\n\n Only a minimal number of operations is implemented so far.\n\n - Only *by* argument is supported.\n - Since we don't support multiindex, the *by* columns are stored\n as regular columns.\n \"\"\"\n\n if by is None and level is None:\n raise TypeError('groupby() requires either by or level to be'\n 'specified.')\n if (method == \"cudf\"):\n from cudf.groupby.legacy_groupby import Groupby\n if as_index:\n warnings.warn(\n 'as_index==True not supported due to the lack of '\n 'multi-index with legacy groupby function. Use hash '\n 'method for multi-index'\n )\n result = Groupby(self, by=by)\n return result\n else:\n from cudf.groupby.groupby import Groupby\n\n _gdf.nvtx_range_push(\"CUDF_GROUPBY\", \"purple\")\n # The matching `pop` for this range is inside LibGdfGroupby\n # __apply_agg\n result = Groupby(self, by=by, method=method, as_index=as_index,\n level=level)\n return result\n\n def query(self, expr, local_dict={}):\n \"\"\"\n Query with a boolean expression using Numba to compile a GPU kernel.\n\n See pandas.DataFrame.query.\n\n Parameters\n ----------\n\n expr : str\n A boolean expression. Names in expression refer to columns.\n\n Names starting with `@` refer to Python variables\n\n local_dict : dict\n Containing the local variable to be used in query.\n\n Returns\n -------\n\n filtered : DataFrame\n\n Examples\n --------\n >>> import cudf\n >>> a = ('a', [1, 2, 2])\n >>> b = ('b', [3, 4, 5])\n >>> df = cudf.DataFrame([a, b])\n >>> expr = \"(a == 2 and b == 4) or (b == 3)\"\n >>> print(df.query(expr))\n a b\n 0 1 3\n 1 2 4\n\n DateTime conditionals:\n\n >>> import numpy as np\n >>> import datetime\n >>> df = cudf.DataFrame()\n >>> data = np.array(['2018-10-07', '2018-10-08'], dtype='datetime64')\n >>> df['datetimes'] = data\n >>> search_date = datetime.datetime.strptime('2018-10-08', '%Y-%m-%d')\n >>> print(df.query('datetimes==@search_date'))\n datetimes\n 1 2018-10-08T00:00:00.000\n\n Using local_dict:\n\n >>> import numpy as np\n >>> import datetime\n >>> df = cudf.DataFrame()\n >>> data = np.array(['2018-10-07', '2018-10-08'], dtype='datetime64')\n >>> df['datetimes'] = data\n >>> search_date2 = datetime.datetime.strptime('2018-10-08', '%Y-%m-%d')\n >>> print(df.query('datetimes==@search_date',\n >>> local_dict={'search_date':search_date2}))\n datetimes\n 1 2018-10-08T00:00:00.000\n \"\"\"\n if not isinstance(local_dict, dict):\n raise TypeError(\"local_dict type: expected dict but found {!r}\"\n .format(type(local_dict)))\n\n _gdf.nvtx_range_push(\"CUDF_QUERY\", \"purple\")\n # Get calling environment\n callframe = inspect.currentframe().f_back\n callenv = {\n 'locals': callframe.f_locals,\n 'globals': callframe.f_globals,\n 'local_dict': local_dict,\n }\n # Run query\n boolmask = queryutils.query_execute(self, expr, callenv)\n\n selected = Series(boolmask)\n newdf = DataFrame()\n for col in self.columns:\n newseries = self[col][selected]\n newdf[col] = newseries\n result = newdf\n _gdf.nvtx_range_pop()\n return result\n\n @applyutils.doc_apply()\n def apply_rows(self, func, incols, outcols, kwargs, cache_key=None):\n \"\"\"\n Apply a row-wise user defined function.\n\n Parameters\n ----------\n {params}\n\n Examples\n --------\n The user function should loop over the columns and set the output for\n each row. Loop execution order is arbitrary, so each iteration of\n the loop **MUST** be independent of each other.\n\n When ``func`` is invoked, the array args corresponding to the\n input/output are strided so as to improve GPU parallelism.\n The loop in the function resembles serial code, but executes\n concurrently in multiple threads.\n\n >>> import cudf\n >>> import numpy as np\n >>> df = cudf.DataFrame()\n >>> nelem = 3\n >>> df['in1'] = np.arange(nelem)\n >>> df['in2'] = np.arange(nelem)\n >>> df['in3'] = np.arange(nelem)\n\n Define input columns for the kernel\n\n >>> in1 = df['in1']\n >>> in2 = df['in2']\n >>> in3 = df['in3']\n >>> def kernel(in1, in2, in3, out1, out2, kwarg1, kwarg2):\n ... for i, (x, y, z) in enumerate(zip(in1, in2, in3)):\n ... out1[i] = kwarg2 * x - kwarg1 * y\n ... out2[i] = y - kwarg1 * z\n\n Call ``.apply_rows`` with the name of the input columns, the name and\n dtype of the output columns, and, optionally, a dict of extra\n arguments.\n\n >>> df.apply_rows(kernel,\n ... incols=['in1', 'in2', 'in3'],\n ... outcols=dict(out1=np.float64, out2=np.float64),\n ... kwargs=dict(kwarg1=3, kwarg2=4))\n in1 in2 in3 out1 out2\n 0 0 0 0 0.0 0.0\n 1 1 1 1 1.0 -2.0\n 2 2 2 2 2.0 -4.0\n \"\"\"\n return applyutils.apply_rows(self, func, incols, outcols, kwargs,\n cache_key=cache_key)\n\n @applyutils.doc_applychunks()\n def apply_chunks(self, func, incols, outcols, kwargs={}, chunks=None,\n tpb=1):\n \"\"\"\n Transform user-specified chunks using the user-provided function.\n\n Parameters\n ----------\n {params}\n {params_chunks}\n\n Examples\n --------\n\n For ``tpb > 1``, ``func`` is executed by ``tpb`` number of threads\n concurrently. To access the thread id and count,\n use ``numba.cuda.threadIdx.x`` and ``numba.cuda.blockDim.x``,\n respectively (See `numba CUDA kernel documentation`_).\n\n .. _numba CUDA kernel documentation:\\\n http://numba.pydata.org/numba-doc/latest/cuda/kernels.html\n\n In the example below, the *kernel* is invoked concurrently on each\n specified chunk. The *kernel* computes the corresponding output\n for the chunk.\n\n By looping over the range\n ``range(cuda.threadIdx.x, in1.size, cuda.blockDim.x)``, the *kernel*\n function can be used with any *tpb* in a efficient manner.\n\n >>> from numba import cuda\n >>> @cuda.jit\n ... def kernel(in1, in2, in3, out1):\n ... for i in range(cuda.threadIdx.x, in1.size, cuda.blockDim.x):\n ... x = in1[i]\n ... y = in2[i]\n ... z = in3[i]\n ... out1[i] = x * y + z\n\n See also\n --------\n DataFrame.apply_rows\n \"\"\"\n if chunks is None:\n raise ValueError('*chunks* must be defined')\n return applyutils.apply_chunks(self, func, incols, outcols, kwargs,\n chunks=chunks, tpb=tpb)\n\n def hash_columns(self, columns=None):\n \"\"\"Hash the given *columns* and return a new Series\n\n Parameters\n ----------\n column : sequence of str; optional\n Sequence of column names. If columns is *None* (unspecified),\n all columns in the frame are used.\n \"\"\"\n from cudf.dataframe import numerical\n\n if columns is None:\n columns = self.columns\n\n cols = [self[k]._column for k in columns]\n return Series(numerical.column_hash_values(*cols))\n\n def partition_by_hash(self, columns, nparts):\n \"\"\"Partition the dataframe by the hashed value of data in *columns*.\n\n Parameters\n ----------\n columns : sequence of str\n The names of the columns to be hashed.\n Must have at least one name.\n nparts : int\n Number of output partitions\n\n Returns\n -------\n partitioned: list of DataFrame\n \"\"\"\n cols = [col._column for col in self._cols.values()]\n names = list(self._cols.keys())\n key_indices = [names.index(k) for k in columns]\n # Allocate output buffers\n outputs = [col.copy() for col in cols]\n # Call hash_partition\n offsets = _gdf.hash_partition(cols, key_indices, nparts, outputs)\n # Re-construct output partitions\n outdf = DataFrame()\n for k, col in zip(self._cols, outputs):\n outdf[k] = col\n # Slice into partition\n return [outdf[s:e] for s, e in zip(offsets, offsets[1:] + [None])]\n\n def replace(self, to_replace, value):\n \"\"\"\n Replace values given in *to_replace* with *value*.\n\n Parameters\n ----------\n to_replace : numeric, str, list-like or dict\n Value(s) to replace.\n\n * numeric or str:\n\n - values equal to *to_replace* will be replaced\n with *value*\n\n * list of numeric or str:\n\n - If *value* is also list-like,\n *to_replace* and *value* must be of same length.\n\n * dict:\n\n - Dicts can be used to replace different values in different\n columns. For example, `{'a': 1, 'z': 2}` specifies that the\n value 1 in column `a` and the value 2 in column `z` should be\n replaced with value*.\n value : numeric, str, list-like, or dict\n Value(s) to replace `to_replace` with. If a dict is provided, then\n its keys must match the keys in *to_replace*, and correponding\n values must be compatible (e.g., if they are lists, then they must\n match in length).\n\n Returns\n -------\n result : DataFrame\n DataFrame after replacement.\n \"\"\"\n outdf = self.copy()\n\n if not is_dict_like(to_replace):\n to_replace = dict.fromkeys(self.columns, to_replace)\n if not is_dict_like(value):\n value = dict.fromkeys(self.columns, value)\n\n for k in to_replace:\n outdf[k] = self[k].replace(to_replace[k], value[k])\n\n return outdf\n\n def fillna(self, value, method=None, axis=None, inplace=False, limit=None):\n \"\"\"Fill null values with ``value``.\n\n Parameters\n ----------\n value : scalar, Series-like or dict\n Value to use to fill nulls. If Series-like, null values\n are filled with values in corresponding indices.\n A dict can be used to provide different values to fill nulls\n in different columns.\n\n Returns\n -------\n result : DataFrame\n Copy with nulls filled.\n\n Examples\n --------\n >>> import cudf\n >>> gdf = cudf.DataFrame({'a': [1, 2, None], 'b': [3, None, 5]})\n >>> gdf.fillna(4).to_pandas()\n a b\n 0 1 3\n 1 2 4\n 2 4 5\n >>> gdf.fillna({'a': 3, 'b': 4}).to_pandas()\n a b\n 0 1 3\n 1 2 4\n 2 3 5\n \"\"\"\n if inplace:\n outdf = {} # this dict will just hold Nones\n else:\n outdf = self.copy()\n\n if not is_dict_like(value):\n value = dict.fromkeys(self.columns, value)\n\n for k in value:\n outdf[k] = self[k].fillna(value[k], method=method, axis=axis,\n inplace=inplace, limit=limit)\n\n if not inplace:\n return outdf\n\n def to_pandas(self):\n \"\"\"\n Convert to a Pandas DataFrame.\n\n Examples\n --------\n >>> import cudf\n >>> a = ('a', [0, 1, 2])\n >>> b = ('b', [-3, 2, 0])\n >>> df = cudf.DataFrame([a, b])\n >>> type(df.to_pandas())\n <class 'pandas.core.frame.DataFrame'>\n \"\"\"\n index = self.index.to_pandas()\n out = pd.DataFrame(index=index)\n for c, x in self._cols.items():\n out[c] = x.to_pandas(index=index)\n return out\n\n @classmethod\n def from_pandas(cls, dataframe, nan_as_null=True):\n \"\"\"\n Convert from a Pandas DataFrame.\n\n Raises\n ------\n TypeError for invalid input type.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> data = [[0,1], [1,2], [3,4]]\n >>> pdf = pd.DataFrame(data, columns=['a', 'b'], dtype=int)\n >>> cudf.from_pandas(pdf)\n <cudf.DataFrame ncols=2 nrows=3 >\n \"\"\"\n if not isinstance(dataframe, pd.DataFrame):\n raise TypeError('not a pandas.DataFrame')\n\n df = cls()\n # Set columns\n for colk in dataframe.columns:\n vals = dataframe[colk].values\n df[colk] = Series(vals, nan_as_null=nan_as_null)\n # Set index\n return df.set_index(dataframe.index)\n\n def to_arrow(self, preserve_index=True):\n \"\"\"\n Convert to a PyArrow Table.\n\n Examples\n --------\n >>> import cudf\n >>> a = ('a', [0, 1, 2])\n >>> b = ('b', [-3, 2, 0])\n >>> df = cudf.DataFrame([a, b])\n >>> df.to_arrow()\n pyarrow.Table\n None: int64\n a: int64\n b: int64\n \"\"\"\n arrays = []\n names = []\n types = []\n index_names = []\n index_columns = []\n\n for name, column in self._cols.items():\n names.append(name)\n arrow_col = column.to_arrow()\n arrays.append(arrow_col)\n types.append(arrow_col.type)\n\n index_name = pa.pandas_compat._index_level_name(self.index, 0, names)\n index_names.append(index_name)\n index_columns.append(self.index)\n # It would be better if we didn't convert this if we didn't have to,\n # but we first need better tooling for cudf --> pyarrow type\n # conversions\n index_arrow = self.index.to_arrow()\n types.append(index_arrow.type)\n if preserve_index:\n arrays.append(index_arrow)\n names.append(index_name)\n\n # We may want to add additional metadata to this in the future, but\n # for now lets just piggyback off of what's done for Pandas\n metadata = pa.pandas_compat.construct_metadata(\n self, names, index_columns, index_names, preserve_index, types\n )\n\n return pa.Table.from_arrays(arrays, names=names, metadata=metadata)\n\n @classmethod\n def from_arrow(cls, table):\n \"\"\"Convert from a PyArrow Table.\n\n Raises\n ------\n TypeError for invalid input type.\n\n **Notes**\n\n Does not support automatically setting index column(s) similar to how\n ``to_pandas`` works for PyArrow Tables.\n\n Examples\n --------\n >>> import pyarrow as pa\n >>> import cudf\n >>> data = [pa.array([1, 2, 3]), pa.array([4, 5, 6])]\n >>> batch = pa.RecordBatch.from_arrays(data, ['f0', 'f1'])\n >>> table = pa.Table.from_batches([batch])\n >>> cudf.DataFrame.from_arrow(table)\n <cudf.DataFrame ncols=2 nrows=3 >\n \"\"\"\n import json\n if not isinstance(table, pa.Table):\n raise TypeError('not a pyarrow.Table')\n\n index_col = None\n dtypes = None\n if isinstance(table.schema.metadata, dict):\n if b'pandas' in table.schema.metadata:\n metadata = json.loads(\n table.schema.metadata[b'pandas']\n )\n index_col = metadata['index_columns']\n dtypes = {col['field_name']: col['pandas_type'] for col in\n metadata['columns'] if 'field_name' in col}\n\n df = cls()\n for col in table.columns:\n if dtypes:\n dtype = dtypes[col.name]\n if dtype == 'categorical':\n dtype = 'category'\n elif dtype == 'date':\n dtype = 'datetime64[ms]'\n else:\n dtype = None\n\n df[col.name] = columnops.as_column(\n col.data,\n dtype=dtype\n )\n if index_col:\n df = df.set_index(index_col[0])\n new_index_name = pa.pandas_compat._backwards_compatible_index_name(\n df.index.name, df.index.name)\n df.index.name = new_index_name\n return df\n\n def to_records(self, index=True):\n \"\"\"Convert to a numpy recarray\n\n Parameters\n ----------\n index : bool\n Whether to include the index in the output.\n\n Returns\n -------\n numpy recarray\n \"\"\"\n members = [('index', self.index.dtype)] if index else []\n members += [(col, self[col].dtype) for col in self.columns]\n dtype = np.dtype(members)\n ret = np.recarray(len(self), dtype=dtype)\n if index:\n ret['index'] = self.index.values\n for col in self.columns:\n ret[col] = self[col].to_array()\n return ret\n\n @classmethod\n def from_records(self, data, index=None, columns=None, nan_as_null=False):\n \"\"\"Convert from a numpy recarray or structured array.\n\n Parameters\n ----------\n data : numpy structured dtype or recarray of ndim=2\n index : str\n The name of the index column in *data*.\n If None, the default index is used.\n columns : list of str\n List of column names to include.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if data.ndim != 1 and data.ndim != 2:\n raise ValueError(\"records dimension expected 1 or 2 but found {!r}\"\n .format(data.ndim))\n\n num_cols = len(data[0])\n if columns is None and data.dtype.names is None:\n names = [i for i in range(num_cols)]\n\n elif data.dtype.names is not None:\n names = data.dtype.names\n\n else:\n if len(columns) != num_cols:\n msg = \"columns length expected {!r} but found {!r}\"\n raise ValueError(msg.format(num_cols, len(columns)))\n names = columns\n\n df = DataFrame()\n if data.ndim == 2:\n for i, k in enumerate(names):\n df[k] = Series(data[:, i], nan_as_null=nan_as_null)\n elif data.ndim == 1:\n for k in names:\n df[k] = Series(data[k], nan_as_null=nan_as_null)\n\n if index is not None:\n indices = data[index]\n return df.set_index(indices.astype(np.int64))\n return df\n\n @classmethod\n def from_gpu_matrix(self, data, index=None, columns=None,\n nan_as_null=False):\n \"\"\"Convert from a numba gpu ndarray.\n\n Parameters\n ----------\n data : numba gpu ndarray\n index : str\n The name of the index column in *data*.\n If None, the default index is used.\n columns : list of str\n List of column names to include.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if data.ndim != 2:\n raise ValueError(\"matrix dimension expected 2 but found {!r}\"\n .format(data.ndim))\n\n if columns is None:\n names = [i for i in range(data.shape[1])]\n else:\n if len(columns) != data.shape[1]:\n msg = \"columns length expected {!r} but found {!r}\"\n raise ValueError(msg.format(data.shape[1], len(columns)))\n names = columns\n\n if index is not None and len(index) != data.shape[0]:\n msg = \"index length expected {!r} but found {!r}\"\n raise ValueError(msg.format(data.shape[0], len(index)))\n\n df = DataFrame()\n data = data.transpose() # to mimic the pandas behaviour\n for i, k in enumerate(names):\n df[k] = Series(data[i], nan_as_null=nan_as_null)\n\n if index is not None:\n indices = data[index]\n return df.set_index(indices.astype(np.int64))\n\n return df\n\n def to_gpu_matrix(self):\n \"\"\"Convert to a numba gpu ndarray\n\n\n\n Returns\n -------\n numba gpu ndarray\n \"\"\"\n def quantile(self,\n q=0.5,\n interpolation='linear',\n columns=None,\n exact=True):\n \"\"\"\n Return values at the given quantile.\n\n Parameters\n ----------\n\n q : float or array-like\n 0 <= q <= 1, the quantile(s) to compute\n interpolation : {`linear`, `lower`, `higher`, `midpoint`, `nearest`}\n This parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points i and j.\n Default 'linear'.\n columns : list of str\n List of column names to include.\n exact : boolean\n Whether to use approximate or exact quantile algorithm.\n\n Returns\n -------\n\n DataFrame\n\n \"\"\"\n if columns is None:\n columns = self.columns\n\n result = DataFrame()\n result['Quantile'] = q\n for k, col in self._cols.items():\n if k in columns:\n result[k] = col.quantile(q, interpolation=interpolation,\n exact=exact,\n quant_index=False)\n return result\n\n def select_dtypes(self, include=None, exclude=None):\n \"\"\"Return a subset of the DataFrame’s columns based on the column dtypes.\n\n Parameters\n ----------\n include : str or list\n which columns to include based on dtypes\n exclude : str or list\n which columns to exclude based on dtypes\n\n \"\"\"\n\n # code modified from:\n # https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L3196\n\n if not isinstance(include, (list, tuple)):\n include = (include,) if include is not None else ()\n if not isinstance(exclude, (list, tuple)):\n exclude = (exclude,) if exclude is not None else ()\n\n df = DataFrame()\n\n # infer_dtype_from_object can distinguish between\n # np.float and np.number\n selection = tuple(map(frozenset, (include, exclude)))\n include, exclude = map(\n lambda x: frozenset(\n map(infer_dtype_from_object, x)),\n selection,\n )\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError('include and exclude overlap on {inc_ex}'.format(\n inc_ex=(include & exclude)))\n\n cat_type = pd.core.dtypes.dtypes.CategoricalDtypeType\n\n # include all subtypes\n include_subtypes = set()\n for dtype in self.dtypes:\n for i_dtype in include:\n # category handling\n if i_dtype is cat_type:\n include_subtypes.add(i_dtype)\n break\n if issubclass(dtype.type, i_dtype):\n include_subtypes.add(dtype.type)\n\n # exclude all subtypes\n exclude_subtypes = set()\n for dtype in self.dtypes:\n for e_dtype in exclude:\n # category handling\n if e_dtype is cat_type:\n exclude_subtypes.add(e_dtype)\n break\n if issubclass(dtype.type, e_dtype):\n exclude_subtypes.add(dtype.type)\n\n include_all = set([infer_dtype_from_object(d)\n for d in self.dtypes])\n\n # remove all exclude types\n inclusion = include_all - exclude_subtypes\n\n # keep only those included\n if include_subtypes:\n inclusion = inclusion & include_subtypes\n\n for x in self._cols.values():\n infered_type = infer_dtype_from_object(x.dtype)\n if infered_type in inclusion:\n df.add_column(x.name, x)\n\n return df\n\n @ioutils.doc_to_parquet()\n def to_parquet(self, path, *args, **kwargs):\n \"\"\"{docstring}\"\"\"\n import cudf.io.parquet as pq\n pq.to_parquet(self, path, *args, **kwargs)\n\n @ioutils.doc_to_feather()\n def to_feather(self, path, *args, **kwargs):\n \"\"\"{docstring}\"\"\"\n import cudf.io.feather as feather\n feather.to_feather(self, path, *args, **kwargs)\n\n @ioutils.doc_to_json()\n def to_json(self, path_or_buf=None, *args, **kwargs):\n \"\"\"{docstring}\"\"\"\n import cudf.io.json as json\n json.to_json(\n self,\n path_or_buf=path_or_buf,\n *args,\n **kwargs\n )\n\n @ioutils.doc_to_hdf()\n def to_hdf(self, path_or_buf, key, *args, **kwargs):\n \"\"\"{docstring}\"\"\"\n import cudf.io.hdf as hdf\n hdf.to_hdf(path_or_buf, key, self, *args, **kwargs)\n\n @ioutils.doc_to_dlpack()\n def to_dlpack(self):\n \"\"\"{docstring}\"\"\"\n import cudf.io.dlpack as dlpack\n return dlpack.to_dlpack(self)\n\n\nclass Loc(object):\n \"\"\"\n For selection by label.\n \"\"\"\n\n def __init__(self, df):\n self._df = df\n\n def __getitem__(self, arg):\n row_slice = None\n row_label = None\n\n if isinstance(arg, int):\n if arg < 0 or arg >= len(self._df):\n raise IndexError(\"label scalar %s is out of bound\" % arg)\n row_label = arg\n col_slice = self._df.columns\n\n elif isinstance(arg, tuple):\n arg_1, arg_2 = arg\n if isinstance(arg_1, int):\n row_label = arg_1\n elif isinstance(arg_1, slice):\n row_slice = arg_1\n else:\n raise TypeError(type(arg_1))\n col_slice = arg_2\n\n elif isinstance(arg, slice):\n row_slice = arg\n col_slice = self._df.columns\n else:\n raise TypeError(type(arg))\n\n if row_label is not None:\n ret_list = []\n col_list = pd.Categorical(list(col_slice))\n for col in col_list:\n if pd.api.types.is_categorical_dtype(\n self._df[col][row_label].dtype\n ):\n raise NotImplementedError(\n \"categorical dtypes are not yet supported in loc\"\n )\n ret_list.append(self._df[col][row_label])\n promoted_type = np.result_type(*[val.dtype for val in ret_list])\n ret_list = np.array(ret_list, dtype=promoted_type)\n return Series(ret_list,\n index=as_index(col_list))\n\n df = DataFrame()\n begin, end = self._df.index.find_label_range(row_slice.start,\n row_slice.stop)\n row_step = row_slice.step if row_slice.step is not None else 1\n for col in col_slice:\n sr = self._df[col]\n df.add_column(col, sr[begin:end:row_step], forceindex=True)\n\n return df\n\n\nclass Iloc(object):\n \"\"\"\n For integer-location based selection.\n \"\"\"\n\n def __init__(self, df):\n self._df = df\n\n def __getitem__(self, arg):\n if isinstance(arg, (tuple)):\n if len(arg) == 1:\n arg = list(arg)\n elif len(arg) == 2:\n return self[arg[0]][arg[1]]\n else:\n return pd.core.indexing.IndexingError(\n \"Too many indexers\"\n )\n\n if isinstance(arg, numbers.Integral):\n rows = []\n for col in self._df.columns:\n rows.append(self._df[col][arg])\n return Series(np.array(rows), name=arg)\n else:\n df = DataFrame()\n for col in self._df.columns:\n df[col] = self._df[col][arg]\n df.index = self._df.index[arg]\n\n return df\n\n def __setitem__(self, key, value):\n # throws an exception while updating\n msg = \"updating columns using iloc is not allowed\"\n raise ValueError(msg)\n\n\ndef from_pandas(obj):\n \"\"\"\n Convert a Pandas DataFrame or Series object into the cudf equivalent\n\n Raises\n ------\n TypeError for invalid input type.\n\n Examples\n --------\n >>> import cudf\n >>> import pandas as pd\n >>> data = [[0, 1], [1, 2], [3, 4]]\n >>> pdf = pd.DataFrame(data, columns=['a', 'b'], dtype=int)\n >>> cudf.from_pandas(pdf)\n <cudf.DataFrame ncols=2 nrows=3 >\n \"\"\"\n if isinstance(obj, pd.DataFrame):\n return DataFrame.from_pandas(obj)\n elif isinstance(obj, pd.Series):\n return Series.from_pandas(obj)\n else:\n raise TypeError(\n \"from_pandas only accepts Pandas Dataframes and Series objects. \"\n \"Got %s\" % type(obj)\n )\n\n\ndef merge(left, right, *args, **kwargs):\n return left.merge(right, *args, **kwargs)\n\n\n# a bit of fanciness to inject doctstring with left parameter\nmerge_doc = DataFrame.merge.__doc__\nidx = merge_doc.find('right')\nmerge.__doc__ = ''.join([merge_doc[:idx], '\\n\\tleft : DataFrame\\n\\t',\n merge_doc[idx:]])\n\nregister_distributed_serializer(DataFrame)\n"
] | [
[
"numpy.result_type",
"pandas.Index",
"pandas.core.dtypes.common._get_dtype_from_object",
"numpy.array",
"pandas.compat.isidentifier",
"pandas.DataFrame",
"pandas.core.indexing.IndexingError",
"numpy.isscalar",
"pandas.api.types.is_dict_like",
"pandas.api.types.is_categorical_dtype",
"numpy.dtype"
]
] |
GaelVaroquaux/scikit-learn-tutorial-1 | [
"c15a1fd2e27923e29b4749e098f68a150010e4b6"
] | [
"python_scripts/02_basic_preprocessing_exercise_01_solution.py"
] | [
"# ---\n# jupyter:\n# jupytext:\n# formats: python_scripts//py:percent,notebooks//ipynb\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.2.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Solution for Exercise 01\n#\n# The goal of is to compare the performance of our classifier to some baseline classifier that would ignore the input data and instead make constant predictions:\n\n# %%\nimport pandas as pd\n\ndf = pd.read_csv(\n \"https://www.openml.org/data/get_csv/1595261/adult-census.csv\")\n\n# %%\ntarget_name = \"class\"\ntarget = df[target_name].to_numpy()\ndata = df.drop(columns=[target_name, \"fnlwgt\"])\nnumerical_columns = [\n c for c in data.columns if data[c].dtype.kind in [\"i\", \"f\"]]\ndata_numeric = data[numerical_columns]\n\n# %%\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.dummy import DummyClassifier\n\nhigh_revenue_clf = DummyClassifier(strategy=\"constant\",\n constant=\" >50K\")\nscores = cross_val_score(high_revenue_clf, data_numeric, target)\nprint(f\"{scores.mean():.3f} +/- {scores.std():.3f}\")\n\n# %%\nlow_revenue_clf = DummyClassifier(strategy=\"constant\",\n constant=\" <=50K\")\nscores = cross_val_score(low_revenue_clf, data_numeric, target)\nprint(f\"{scores.mean():.3f} +/- {scores.std():.3f}\")\n\n# %%\nmost_freq_revenue_clf = DummyClassifier(strategy=\"most_frequent\")\nscores = cross_val_score(most_freq_revenue_clf, data_numeric, target)\nprint(f\"{scores.mean():.3f} +/- {scores.std():.3f}\")\n\n# %% [markdown]\n# So 81% accuracy is significantly better than 76% which is the score of a baseline model that would always predict the most frequent class which is the low revenue class: `\" <=50K\"`.\n#\n# In this dataset, we can see that the target classes are imbalanced: almost 3/4 of the records are people with a revenue below 50K:\n\n# %%\ndf[\"class\"].value_counts()\n\n# %%\n(target == \" <=50K\").mean()\n"
] | [
[
"pandas.read_csv",
"sklearn.dummy.DummyClassifier",
"sklearn.model_selection.cross_val_score"
]
] |
PaccMann/paccmann_sets | [
"ef5a9106df140c3261135c88d3d2668b516d93cc"
] | [
"paccmann_sets/models/set_matching/dnn.py"
] | [
"import torch\nimport torch.nn as nn\nfrom paccmann_sets.utils.hyperparameters import ACTIVATION_FN_FACTORY\n\n\nclass DNNSetMatching(nn.Module):\n \"\"\"Generalisable DNN module to allow for flexibility in architecture.\"\"\"\n\n def __init__(self, params: dict) -> None:\n \"\"\"Constructor.\n\n Args:\n params (dict): DNN parameter dictionary with the following keys:\n input_size (int): Input tensor dimensions.\n fc_layers (int): Number of fully connected layers to add.\n fc_units (List[(int)]): List of hidden units for each layer.\n fc_activation (str): Activation function to apply after each\n fully connected layer. See utils/hyperparameter.py\n for options.\n\n \"\"\"\n super(DNNSetMatching, self).__init__()\n\n self.input_size = params['input_size']\n self.layers = params['fc_layers']\n self.hidden_size = params['fc_units']\n self.activation = params['fc_activation']\n\n modules = []\n hidden_units = [self.input_size] + self.hidden_size\n for layer in range(self.layers):\n modules.append(nn.Linear(hidden_units[layer], hidden_units[layer + 1]))\n if self.activation[layer] != 'None':\n modules.append(ACTIVATION_FN_FACTORY[self.activation[layer]])\n self.model = nn.Sequential(*modules)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Passes input through a feed forward neural network.\n\n Args:\n x (torch.Tensor): Input tensor of shape [batch_size,*,input_size]\n\n Returns:\n torch.Tensor: Output tensor of shape [batch_size,*, hidden_sizes[-1]].\n \"\"\"\n\n return self.model(x)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Linear"
]
] |
mpelchat04/geo-deep-learning | [
"87d01b0dac05ed03103d838f5c234b711940759b"
] | [
"evaluate_segmentation.py"
] | [
"import time\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Sequence\n\nimport numpy as np\nimport pandas as pd\nimport rasterio\nfrom hydra.utils import get_original_cwd\nfrom mlflow import log_metrics\nfrom shapely.geometry import Polygon\nfrom tqdm import tqdm\nimport geopandas as gpd\n\nfrom utils.geoutils import clip_raster_with_gpkg, vector_to_raster\nfrom utils.metrics import ComputePixelMetrics\nfrom utils.utils import get_key_def, list_input_images, get_logger, read_modalities\nfrom utils.verifications import validate_num_classes, assert_crs_match\n\nlogging = get_logger(__name__)\n\n\ndef metrics_per_tile(label_arr: np.ndarray, pred_img: np.ndarray, input_image: rasterio.DatasetReader,\n chunk_size: int, gpkg_name: str, num_classes: int) -> gpd.GeoDataFrame:\n \"\"\"\n Compute metrics for each tile processed during inference\n @param label_arr: numpy array of label\n @param pred_img: numpy array of prediction\n @param input_image: Rasterio file handle holding the (already opened) input raster\n @param chunk_size: tile size for per-tile metrics\n @param gpkg_name: name of geopackage\n @param num_classes: number of classes\n @return:\n \"\"\"\n xmin, ymin, xmax, ymax = input_image.bounds # left, bottom, right, top\n xres, yres = (abs(input_image.transform.a), abs(input_image.transform.e))\n mx = chunk_size * xres\n my = chunk_size * yres\n h, w = input_image.shape\n\n feature = defaultdict(list)\n cnt = 0\n for row in tqdm(range(0, h, chunk_size), position=2, leave=False):\n for col in tqdm(range(0, w, chunk_size), position=3, leave=False):\n label = label_arr[row:row + chunk_size, col:col + chunk_size]\n pred = pred_img[row:row + chunk_size, col:col + chunk_size]\n pixelMetrics = ComputePixelMetrics(label.flatten(), pred.flatten(), num_classes)\n eval = pixelMetrics.update(pixelMetrics.iou)\n feature['id_image'].append(gpkg_name)\n for c_num in range(num_classes):\n feature['L_count_' + str(c_num)].append(int(np.count_nonzero(label == c_num)))\n feature['P_count_' + str(c_num)].append(int(np.count_nonzero(pred == c_num)))\n feature['IoU_' + str(c_num)].append(eval['iou_' + str(c_num)])\n feature['mIoU'].append(eval['macro_avg_iou'])\n logging.debug(eval['macro_avg_iou'])\n x_1, y_1 = (xmin + (col * xres)), (ymax - (row * yres))\n x_2, y_2 = (xmin + ((col * xres) + mx)), y_1\n x_3, y_3 = x_2, (ymax - ((row * yres) + my))\n x_4, y_4 = x_1, y_3\n geom = Polygon([(x_1, y_1), (x_2, y_2), (x_3, y_3), (x_4, y_4)])\n feature['geometry'].append(geom)\n feature['length'].append(geom.length)\n feature['pointx'].append(geom.centroid.x)\n feature['pointy'].append(geom.centroid.y)\n feature['area'].append(geom.area)\n cnt += 1\n gdf = gpd.GeoDataFrame(feature, crs=input_image.crs.to_epsg())\n\n return gdf\n\n\ndef main(params):\n \"\"\"\n Computes benchmark metrics from inference and ground truth and write results to a gpkg.\n @param params:\n @return:\n \"\"\"\n start_seg = time.time()\n state_dict = Path(params['inference']['state_dict_path']).resolve(strict=True)\n modalities = read_modalities(get_key_def('modalities', params['dataset'], expected_type=str))\n num_bands = len(modalities)\n working_folder = state_dict.parent.joinpath(f'inference_{num_bands}bands')\n img_dir_or_csv = get_key_def('img_dir_or_csv_file', params['inference'], default=params['general']['raw_data_csv'],\n expected_type=str)\n num_classes = len(get_key_def('classes_dict', params['dataset']).keys())\n single_class_mode = True if num_classes == 1 else False\n threshold = 0.5\n debug = get_key_def('debug', params, default=False, expected_type=bool)\n\n # benchmark (ie when gkpgs are inputted along with imagery)\n out_gpkg = get_key_def('out_benchmark_gpkg', params['inference'], default=working_folder/\"benchmark.gpkg\",\n expected_type=str)\n chunk_size = get_key_def('chunk_size', params['inference'], default=512, expected_type=int)\n dontcare = get_key_def(\"ignore_index\", params[\"training\"], -1)\n attribute_field = get_key_def('attribute_field', params['dataset'], None, expected_type=str)\n attr_vals = get_key_def('attribute_values', params['dataset'], None, expected_type=Sequence)\n\n # Assert that all values are integers (ex.: to benchmark single-class model with multi-class labels)\n if attr_vals:\n for item in attr_vals:\n if not isinstance(item, int):\n raise ValueError(f'\\nValue \"{item}\" in attribute_values is {type(item)}, expected int.')\n\n list_img = list_input_images(img_dir_or_csv, glob_patterns=[\"*.tif\", \"*.TIF\"],\n in_case_of_path=Path(get_original_cwd())/'data')\n\n # VALIDATION: anticipate problems with imagery and label (if provided) before entering main for loop\n valid_gpkg_set = set()\n for info in tqdm(list_img, desc='Validating ground truth'):\n if not 'gpkg' in info.keys() and not info['gpkg']:\n raise ValueError(f\"No ground truth was inputted to evaluate with\")\n elif not Path(info['gpkg']).is_file():\n raise FileNotFoundError(f\"Couldn't locate ground truth to evaluate with.\")\n\n if info['gpkg'] not in valid_gpkg_set:\n validate_num_classes(vector_file=info['gpkg'],\n num_classes=num_classes,\n attribute_name=attribute_field,\n ignore_index=dontcare,\n attribute_values=attr_vals)\n assert_crs_match(info['tif'], info['gpkg'])\n valid_gpkg_set.add(info['gpkg'])\n\n logging.info('\\nSuccessfully validated label data for benchmarking')\n\n gdf_ = []\n gpkg_name_ = []\n\n for info in tqdm(list_img, desc='Evaluating from input list', position=0, leave=True):\n local_img = Path(info['tif'])\n Path.mkdir(working_folder.joinpath(local_img.parent.name), parents=True, exist_ok=True)\n inference_image = working_folder / local_img.parent.name / f\"{local_img.stem}_inference.tif\"\n if not inference_image.is_file():\n raise FileNotFoundError(f\"Couldn't locate inference to evaluate metrics with. Make inferece has been run \"\n f\"before you run evaluate mode.\")\n\n pred = rasterio.open(inference_image).read()[0, ...]\n\n local_gpkg = Path(info['gpkg'])\n\n logging.info(f'\\nBurning label as raster: {local_gpkg}')\n with rasterio.open(local_img, 'r') as raster:\n local_img = clip_raster_with_gpkg(raster, local_gpkg)\n\n raster_clipped = rasterio.open(local_img, 'r')\n logging.info(f'\\nReading clipped image: {raster_clipped.name}')\n inf_meta = raster_clipped.meta\n\n label = vector_to_raster(vector_file=local_gpkg,\n input_image=raster_clipped,\n out_shape=(inf_meta['height'], inf_meta['width']),\n attribute_name=attribute_field,\n fill=0, # background value in rasterized vector.\n attribute_values=attr_vals)\n if debug:\n logging.debug(f'\\nUnique values in loaded label as raster: {np.unique(label)}\\n'\n f'Shape of label as raster: {label.shape}')\n\n gdf = metrics_per_tile(label_arr=label, pred_img=pred, input_image=raster_clipped, chunk_size=chunk_size,\n gpkg_name=local_gpkg.stem, num_classes=num_classes)\n\n gdf_.append(gdf.to_crs(4326))\n gpkg_name_.append(local_gpkg.stem)\n\n if 'tracker_uri' in locals():\n pixelMetrics = ComputePixelMetrics(label, pred, num_classes)\n log_metrics(pixelMetrics.update(pixelMetrics.iou))\n log_metrics(pixelMetrics.update(pixelMetrics.dice))\n\n if not len(gdf_) == len(gpkg_name_):\n raise logging.critical(ValueError('\\nbenchmarking unable to complete'))\n all_gdf = pd.concat(gdf_) # Concatenate all geo data frame into one geo data frame\n all_gdf.reset_index(drop=True, inplace=True)\n gdf_x = gpd.GeoDataFrame(all_gdf, crs=4326)\n gdf_x.to_file(out_gpkg, driver=\"GPKG\", index=False)\n logging.info(f'\\nSuccessfully wrote benchmark geopackage to: {out_gpkg}')\n\n end_seg_ = time.time() - start_seg\n logging.info('Benchmark operation completed in {:.0f}m {:.0f}s'.format(end_seg_ // 60, end_seg_ % 60))\n"
] | [
[
"numpy.count_nonzero",
"numpy.unique",
"pandas.concat"
]
] |
DiegoArcelli/De-Stylization-Network | [
"b74d4175c6dd4fad12871e6fe9cde1761803a469"
] | [
"ibn.py"
] | [
"import math\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom adaptive_instance_normalization import AdaptiveInstanceNormalization\nfrom destylization_module import DeStylizationModule\nfrom normalization import AdaIN\nfrom torch.nn import Linear\nfrom sequential import DestylerSequential\n\n\nclass IBN(nn.Module):\n r\"\"\"Instance-Batch Normalization layer from\n `\"Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net\"\n <https://arxiv.org/pdf/1807.09441.pdf>`\n Args:\n planes (int): Number of channels for the input tensor\n ratio (float): Ratio of instance normalization in the IBN layer\n \"\"\"\n\n def __init__(self, planes, ratio=0.5):\n super(IBN, self).__init__()\n self.half = int(planes * ratio)\n # self.IN = nn.InstanceNorm2d(self.half, affine=True)\n self.IN = AdaptiveInstanceNormalization(planes - self.half)\n self.BN = nn.BatchNorm2d(planes - self.half)\n\n def forward(self, x, y):\n split = torch.split(x, self.half, 1)\n out1 = self.IN(split[0].contiguous(), y)\n out2 = self.BN(split[1].contiguous())\n out = torch.cat((out1, out2), 1)\n return out\n\n\nclass BasicBlock_IBN(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, ibn=None, stride=1, downsample=None):\n super(BasicBlock_IBN, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n if ibn == 'a':\n self.bn1 = IBN(planes)\n else:\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.IN = nn.InstanceNorm2d(\n planes, affine=True) if ibn == 'b' else None\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n if self.IN is not None:\n out = self.IN(out)\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck_IBN(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, ibn=None, stride=1, downsample=None):\n super(Bottleneck_IBN, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n if ibn == 'a':\n self.bn1 = IBN(planes)\n else:\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(\n planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.IN = nn.InstanceNorm2d(\n planes * 4, affine=True) if ibn == 'b' else None\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x, y = None):\n residual = x\n\n out = self.conv1(x)\n if y is None:\n out = self.bn1(out)\n else:\n out = self.bn1(out, y)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n if self.IN is not None:\n out = self.IN(out)\n out = self.relu(out)\n\n return out\n\nclass ResNet_IBN(nn.Module):\n\n def __init__(self,\n block,\n layers,\n ibn_cfg=('a', 'a', 'a', None),\n num_classes=1000):\n self.inplanes = 64\n super(ResNet_IBN, self).__init__()\n\n self.ds = DeStylizationModule()\n if ibn_cfg[0] == 'a':\n self.ds_fc_5_1 = Linear(32, 64)\n self.ds_fc_5_2 = Linear(32, 64)\n self.ds_fc_5_3 = Linear(32, 64)\n self.ds_fc_5_4 = Linear(32, 128)\n self.ds_fc_5_5 = Linear(32, 128)\n self.ds_fc_5_6 = Linear(32, 128)\n self.ds_fc_5_7 = Linear(32, 128)\n self.ds_fc_5_8 = Linear(32, 256)\n self.ds_fc_5_9 = Linear(32, 256)\n self.ds_fc_5_10 = Linear(32, 256)\n self.ds_fc_5_11 = Linear(32, 256)\n self.ds_fc_5_12 = Linear(32, 256)\n self.ds_fc_5_13 = Linear(32, 256)\n\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n if ibn_cfg[0] == 'b':\n self.bn1 = nn.InstanceNorm2d(64, affine=True)\n else:\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], ibn=ibn_cfg[0])\n self.layer2 = self._make_layer(\n block, 128, layers[1], stride=2, ibn=ibn_cfg[1])\n self.layer3 = self._make_layer(\n block, 256, layers[2], stride=2, ibn=ibn_cfg[2])\n self.layer4 = self._make_layer(\n block, 512, layers[3], stride=2, ibn=ibn_cfg[3])\n self.avgpool = nn.AvgPool2d(7)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, ibn=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes,\n None if ibn == 'b' else ibn,\n stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,\n None if (ibn == 'b' and i < blocks-1) else ibn))\n\n return DestylerSequential(*layers)\n\n def forward(self, x):\n\n feat = self.ds(x)\n y_1 = self.ds_fc_5_1(feat)\n y_2 = self.ds_fc_5_2(feat)\n y_3 = self.ds_fc_5_3(feat)\n y_4 = self.ds_fc_5_4(feat)\n y_5 = self.ds_fc_5_5(feat)\n y_6 = self.ds_fc_5_6(feat)\n y_7 = self.ds_fc_5_7(feat)\n y_8 = self.ds_fc_5_8(feat)\n y_9 = self.ds_fc_5_9(feat)\n y_10 = self.ds_fc_5_10(feat)\n y_11 = self.ds_fc_5_11(feat)\n y_12 = self.ds_fc_5_12(feat)\n y_13 = self.ds_fc_5_13(feat)\n\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x, [y_1, y_2, y_3])\n x = self.layer2(x, [y_4, y_5, y_6, y_7])\n x = self.layer3(x, [y_8, y_9, y_10, y_11, y_12, y_13])\n x = self.layer4(x, [])\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.split",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d"
]
] |
lxuechen/private-transformers | [
"99c74dfdf7cc0c03d261ec1e942d77f8a19190fb"
] | [
"examples/classification/common.py"
] | [
"import torch\n\ntask_name2suffix_name = {\"sst-2\": \"GLUE-SST-2\", \"mnli\": \"MNLI\", \"qqp\": \"QQP\", \"qnli\": \"QNLI\"}\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n"
] | [
[
"torch.cuda.is_available"
]
] |
leowangzi/LightHeadRCNN | [
"2135707d63e519b517ec7690a6383a0a51083430"
] | [
"lib/model/faster_rcnn/xception_like.py"
] | [
"\"\"\" \r\nCreates an Xception-like Model as defined in:\r\nZeming Li, Chao Peng, Gang Yu, Xiangyu Zhang, Yangdong Deng, Jian Sun\r\nLight-Head R-CNN: In Defense of Two-Stage Object Detector\r\nhttps://arxiv.org/pdf/1711.07264.pdf\r\nREMEMBER to set your image size to 3x224x224 for both test and validation\r\nnormalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\r\n std=[0.5, 0.5, 0.5])\r\nThe resize parameter of the validation transform should be 333, and make sure to center crop at 224x224\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom model.utils.config import cfg\r\nfrom model.utils.layer_utils import _Block\r\nfrom model.faster_rcnn.faster_rcnn import _fasterRCNN\r\n\r\nimport math\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.utils.model_zoo as model_zoo\r\nfrom torch.nn import init\r\nimport torch\r\n\r\n__all__ = ['xception']\r\n\r\n\r\nclass Xception(nn.Module):\r\n \"\"\"\r\n Xception optimized for the ImageNet dataset, as specified in\r\n https://arxiv.org/pdf/1610.02357.pdf\r\n \"\"\"\r\n def __init__(self, num_classes=1000):\r\n \"\"\" Constructor\r\n Args:\r\n num_classes: number of classes\r\n \"\"\"\r\n super(Xception, self).__init__()\r\n\r\n self.num_classes = num_classes\r\n\r\n self.conv1 = nn.Conv2d(3,\r\n 24,\r\n kernel_size=3,\r\n stride=2,\r\n padding=1,\r\n bias=False) # 224 x 224 -> 112 x 112\r\n self.bn1 = nn.BatchNorm2d(24)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.maxpool = nn.MaxPool2d(kernel_size=3,\r\n stride=2,\r\n padding=0,\r\n ceil_mode=True) # -> 56 x 56\r\n\r\n # Stage 2\r\n self.block1 = _Block(24,\r\n 144,\r\n 1 + 3,\r\n 2,\r\n start_with_relu=False,\r\n grow_first=True) # -> 28 x 28\r\n\r\n # Stage 3\r\n self.block2 = _Block(144,\r\n 288,\r\n 1 + 7,\r\n 2,\r\n start_with_relu=True,\r\n grow_first=True) # -> 14 x 14\r\n\r\n # Stage 4\r\n self.block3 = _Block(288,\r\n 576,\r\n 1 + 3,\r\n 2,\r\n start_with_relu=True,\r\n grow_first=True) # -> 7 x 7\r\n\r\n self.avgpool = nn.AvgPool2d(7)\r\n self.fc = nn.Linear(576, num_classes)\r\n\r\n #------- init weights --------\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n #-----------------------------\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n x = self.maxpool(x)\r\n\r\n x = self.block1(x)\r\n x = self.block2(x)\r\n x = self.block3(x)\r\n\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n\r\n return x\r\n\r\n\r\nclass xception(_fasterRCNN):\r\n def __init__(self,\r\n classes,\r\n pretrained=False,\r\n class_agnostic=False,\r\n lighthead=True):\r\n self.dout_base_model = 576 # Output channel at Stage4\r\n self.dout_lh_base_model = 576\r\n self.class_agnostic = class_agnostic\r\n self.pretrained = pretrained\r\n\r\n _fasterRCNN.__init__(self,\r\n classes,\r\n class_agnostic,\r\n lighthead,\r\n compact_mode=True)\r\n\r\n def _init_modules(self):\r\n xception = Xception()\r\n\r\n # Check pretrained\r\n if self.pretrained == True:\r\n print(\"Loading pretrained weights from %s\" % (self.model_path))\r\n if torch.cuda.is_available():\r\n state_dict = torch.load(self.model_path)\r\n else:\r\n state_dict = torch.load(\r\n self.model_path, map_location=lambda storage, loc: storage)\r\n xception.load_state_dict({\r\n k: v\r\n for k, v in state_dict.items() if k in xception.state_dict()\r\n })\r\n\r\n # Build xception-like network.\r\n self.RCNN_base = nn.Sequential(\r\n xception.conv1,\r\n xception.bn1,\r\n xception.relu,\r\n xception.maxpool, # Conv1\r\n xception.block1,\r\n xception.block2,\r\n xception.block3)\r\n\r\n self.RCNN_top = nn.Sequential(nn.Linear(490 * 7 * 7, 2048),\r\n nn.ReLU(inplace=True))\r\n\r\n self.RCNN_cls_score = nn.Linear(2048, self.n_classes)\r\n if self.class_agnostic:\r\n self.RCNN_bbox_pred = nn.Linear(2048, 4)\r\n else:\r\n self.RCNN_bbox_pred = nn.Linear(2048, 4 * self.n_classes)\r\n\r\n # Fix blocks\r\n if self.pretrained:\r\n for layer in range(len(self.RCNN_base)):\r\n for p in self.RCNN_base[layer].parameters():\r\n p.requires_grad = False\r\n\r\n def _head_to_tail(self, pool5):\r\n pool5 = pool5.view(pool5.size(0), -1)\r\n fc7 = self.RCNN_top(pool5) # or two large fully-connected layers\r\n\r\n return fc7"
] | [
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.load"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.