repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
HamidDoost/ImageProcessing
[ "cc63f6485f5ec2dc2cb8099533a3c0a2fbfe0c47" ]
[ "Shi_Thomas_corner_keypoints.py" ]
[ "'''\r\n===============================================================================\r\n-- Author: Hamid Doostmohammadi, Azadeh Nazemi\r\n-- Create date: 05/11/2020\r\n-- Description:\tThis code is for detection of corners keypoints by Shi-Thomas method. \r\n-- Status: In progress\r\n================================================================================\r\n'''\r\n\r\nimport sys\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimage = cv2.imread(sys.argv[1])\r\nho, wo = image.shape[:2]\r\nimg = cv2.resize(image, (int(wo/10), int(ho/10)))\r\ngray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\ncorners = cv2.goodFeaturesToTrack(gray_img, 20, 0.01, 100)\r\n\r\ncorners = np.int0(corners)\r\n\r\nfor i in corners:\r\n x, y = i.ravel()\r\n cv2.circle(img, (x, y), 3, (255, 0, 0), -1)\r\n\r\ncv2.imshow('corner', img)\r\n\r\nif cv2.waitKey(0) & 0xff == 27:\r\n cv2.destroyAllWindows()\r\n" ]
[ [ "numpy.int0" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
syberflea/materials
[ "c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5" ]
[ "pandas-gradebook-project/04-grouping-the-data.py" ]
[ "\"\"\"Calculate student grades by combining data from many sources.\n\nUsing Pandas, this script combines data from the:\n\n* Roster\n* Homework & Exam grades\n* Quiz grades\n\nto calculate final grades for a class.\n\"\"\"\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\n\nHERE = Path(__file__).parent\nDATA_FOLDER = HERE / \"data\"\n\n# ----------------------\n# 01 - LOADING THE DATA\n# ----------------------\n\nroster = pd.read_csv(\n DATA_FOLDER / \"roster.csv\",\n converters={\"NetID\": str.lower, \"Email Address\": str.lower},\n usecols=[\"Section\", \"Email Address\", \"NetID\"],\n index_col=\"NetID\",\n)\n\nhw_exam_grades = pd.read_csv(\n DATA_FOLDER / \"hw_exam_grades.csv\",\n converters={\"SID\": str.lower},\n usecols=lambda x: \"Submission\" not in x,\n index_col=\"SID\",\n)\n\nquiz_grades = pd.DataFrame()\nfor file_path in DATA_FOLDER.glob(\"quiz_*_grades.csv\"):\n quiz_name = \" \".join(file_path.stem.title().split(\"_\")[:2])\n quiz = pd.read_csv(\n file_path,\n converters={\"Email\": str.lower},\n index_col=[\"Email\"],\n usecols=[\"Email\", \"Grade\"],\n ).rename(columns={\"Grade\": quiz_name})\n quiz_grades = pd.concat([quiz_grades, quiz], axis=1)\n\n# ------------------------\n# 02 - MERGING DATAFRAMES\n# ------------------------\n\nfinal_data = pd.merge(\n roster,\n hw_exam_grades,\n left_index=True,\n right_index=True,\n)\nfinal_data = pd.merge(\n final_data, quiz_grades, left_on=\"Email Address\", right_index=True\n)\nfinal_data = final_data.fillna(0)\n\n# ------------------------\n# 03 - CALCULATING GRADES\n# ------------------------\n\nn_exams = 3\nfor n in range(1, n_exams + 1):\n final_data[f\"Exam {n} Score\"] = (\n final_data[f\"Exam {n}\"] / final_data[f\"Exam {n} - Max Points\"]\n )\n\nhomework_scores = final_data.filter(regex=r\"^Homework \\d\\d?$\", axis=1)\nhomework_max_points = final_data.filter(regex=r\"^Homework \\d\\d? -\", axis=1)\n\nsum_of_hw_scores = homework_scores.sum(axis=1)\nsum_of_hw_max = homework_max_points.sum(axis=1)\nfinal_data[\"Total Homework\"] = sum_of_hw_scores / sum_of_hw_max\n\nhw_max_renamed = homework_max_points.set_axis(homework_scores.columns, axis=1)\naverage_hw_scores = (homework_scores / hw_max_renamed).sum(axis=1)\nfinal_data[\"Average Homework\"] = average_hw_scores / homework_scores.shape[1]\n\nfinal_data[\"Homework Score\"] = final_data[\n [\"Total Homework\", \"Average Homework\"]\n].max(axis=1)\n\nquiz_scores = final_data.filter(regex=r\"^Quiz \\d$\", axis=1)\nquiz_max_points = pd.Series(\n {\"Quiz 1\": 11, \"Quiz 2\": 15, \"Quiz 3\": 17, \"Quiz 4\": 14, \"Quiz 5\": 12}\n)\n\nsum_of_quiz_scores = quiz_scores.sum(axis=1)\nsum_of_quiz_max = quiz_max_points.sum()\nfinal_data[\"Total Quizzes\"] = sum_of_quiz_scores / sum_of_quiz_max\n\naverage_quiz_scores = (quiz_scores / quiz_max_points).sum(axis=1)\nfinal_data[\"Average Quizzes\"] = average_quiz_scores / quiz_scores.shape[1]\n\nfinal_data[\"Quiz Score\"] = final_data[\n [\"Total Quizzes\", \"Average Quizzes\"]\n].max(axis=1)\n\nweightings = pd.Series(\n {\n \"Exam 1 Score\": 0.05,\n \"Exam 2 Score\": 0.1,\n \"Exam 3 Score\": 0.15,\n \"Quiz Score\": 0.30,\n \"Homework Score\": 0.4,\n }\n)\n\nfinal_data[\"Final Score\"] = (final_data[weightings.index] * weightings).sum(\n axis=1\n)\nfinal_data[\"Ceiling Score\"] = np.ceil(final_data[\"Final Score\"] * 100)\n\ngrades = {\n 90: \"A\",\n 80: \"B\",\n 70: \"C\",\n 60: \"D\",\n 0: \"F\",\n}\n\n\ndef grade_mapping(value):\n \"\"\"Map numerical grade to letter grade.\"\"\"\n for key, letter in grades.items():\n if value >= key:\n return letter\n\n\nletter_grades = final_data[\"Ceiling Score\"].map(grade_mapping)\nfinal_data[\"Final Grade\"] = pd.Categorical(\n letter_grades, categories=grades.values(), ordered=True\n)\n\n# -----------------------\n# 04 - GROUPING THE DATA\n# -----------------------\n\nfor section, table in final_data.groupby(\"Section\"):\n section_file = DATA_FOLDER / f\"Section {section} Grades.csv\"\n num_students = table.shape[0]\n print(\n f\"In Section {section} there are {num_students} students saved to \"\n f\"file {section_file}.\"\n )\n table.sort_values(by=[\"Last Name\", \"First Name\"]).to_csv(section_file)\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.concat", "pandas.Series", "pandas.DataFrame", "numpy.ceil" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
SPECTRELWF/pytorch-cnn-study
[ "5c6a07f110ef973b1a910a528cdc0464efc2126d", "5c6a07f110ef973b1a910a528cdc0464efc2126d" ]
[ "Densenet_COVID/densenet.py", "Alexnet-MNIST/train.py" ]
[ "# !/usr/bin/python3\n# -*- coding:utf-8 -*-\n# Author:WeiFeng Liu\n# @Time: 2021/11/9 下午4:57\n\nimport torchvision\nimport torch.nn as nn\n\n\n\nclass my_densenet(nn.Module):\n def __init__(self):\n super(my_densenet, self).__init__()\n self.backbone = torchvision.models.densenet121(pretrained=False)\n self.fc2 = nn.Linear(1000,512)\n self.fc3 = nn.Linear(512,2)\n\n def forward(self,x):\n x = self.backbone(x)\n x = self.fc2(x)\n x = self.fc3(x)\n return x", "# !/usr/bin/python3\n# -*- coding:utf-8 -*-\n# Author:WeiFeng Liu\n# @Time: 2021/11/2 下午3:38\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\n\nfrom alexnet import AlexNet\nfrom utils import plot_curve\n\n# 定义使用GPU\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# 设置超参数\nepochs = 30\nbatch_size = 256\nlr = 0.01\n\ntrain_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data', train=True, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n # 数据归一化\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])),\n batch_size=batch_size, shuffle=True\n)\n\ntest_loader = torch.utils.data.DataLoader(\n torchvision.datasets.MNIST('mnist_data/', train=False, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ])),\n batch_size=256, shuffle=False\n)\n\n# 定义损失函数\ncriterion = nn.CrossEntropyLoss()\n\n# 定义网络\nnet = AlexNet().to(device)\n\n# 定义优化器\noptimzer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)\n\n# train\ntrain_loss = []\nfor epoch in range(epochs):\n sum_loss = 0.0\n for batch_idx, (x, y) in enumerate(train_loader):\n print(x.shape)\n x = x.to(device)\n y = y.to(device)\n\n # 梯度清零\n optimzer.zero_grad()\n\n pred = net(x)\n loss = criterion(pred, y)\n loss.backward()\n optimzer.step()\n train_loss.append(loss.item())\n\n sum_loss += loss.item()\n\n if batch_idx % 100 == 99:\n print('[%d, %d] loss: %.03f'\n % (epoch + 1, batch_idx + 1, sum_loss / 100))\n sum_loss = 0.0\ntorch.save(net.state_dict(), '/home/lwf/code/pytorch学习/alexnet图像分类/model/model.pth')\nplot_curve(train_loss)\n" ]
[ [ "torch.nn.Linear" ], [ "torch.nn.CrossEntropyLoss", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lutqiys/smc-oopsi
[ "5856aab3fed0b711923a918848ab255911a66ba4" ]
[ "code/python/smc.py" ]
[ "import numpy, pylab, matplotlib\nimport random, os, bisect, re\n\n\nclass Variables(object):\n \"\"\"variables specifying the particulars of the trace and the preferences of the user.\"\"\"\n def __init__(self, F, dt, \n x=None, #stimulus (zeros vector of length T that has 1s in the frames that have high spike likelihood)\n name='oopy', #name for plots/figures\n Nparticles=999, # number of particles\n Nspikehist=0, # number of spike history terms\n condsamp=False, #use conditional sampler?\n true_n = None, #true spikes, if available\n smc_iter_max = 3, # max number of iterations\n est_c = True, # do we estimate tau_c, A, C_0?\n est_t = True, # do we estimate tau_c?\n est_n = True, # b,k\n est_h = True, #w\n est_F = True, #alpha, beta\n showGraphs = True #show graphical results as we go\n ):\n \"\"\"\n initializes the variables. requires the Fluorescence trace and the frame length (dt), everything else has defaults.\n @param F: the Fluorescence trace itself.\n @param dt: seconds between timesteps\n @param x=None: stimulus (zeros vector of length T that has 1s in the frames that have high spike likelihood)\n @param name='oopy': name for plots/figures\n @param Nparticles=99: number of particles\n @param Nspikehist=0: number of spike history terms\n @param condsamp=True: use conditional sampler?\n @param true_n = None: true spikes, if available\n @param smc_iter_max = 3: max number of iterations\n @param est_c = True: do we estimate tau_c, A, C_0?\n @param est_t = True: do we estimate tau_c?\n @param est_n = True: b,k\n @param est_h = True: w\n @param est_F = True: alpha, beta\n @param showGraphs = True: show graphical results as we go\n \"\"\"\n \n self.F = F\n self.T = len(F)\n self.dt = dt\n self.name=name\n self.Nparticles=Nparticles\n self.Nspikehist = Nspikehist\n self.condsamp=condsamp\n self.true_n = true_n\n self.smc_iter_max = smc_iter_max\n self.est_c = est_c\n self.est_t = est_t\n self.est_n = est_n\n self.est_h = est_h\n self.est_F = est_F\n self.showGraphs =showGraphs\n \n if(x==None): self.x = numpy.ones(self.T)\n else:self.x = x\n \n\nclass Parameters(object):\n \"\"\"parameters of the model\"\"\"\n def __init__(self, \n V, \n tau_c = 1.0,\n A=50.0,\n C_0 = 5e-5,\n C_init = 5e-5,\n sigma_c = 0.1,\n n=1.,\n k_d = 200.,\n k = None,\n alpha=None,\n beta=None, \n zeta=None,\n gamma=None,\n omega=-1.,\n tau_h = 0.02,\n sigma_h = 5e-5,\n g=None,\n sig2_h = None,\n a=None,\n sig2_c = None\n ):\n \"\"\"initializes model parameters. needs a Variables object but everything else has default values.\n @param V: a Variables object initialized for this data set \n @param tau_c = 1.: calcium decay time constant (seconds)\n @param A = 50.: change in [Ca++] after a spike (uM)\n @param C_0 = 0.: baseline [Ca++] (uM)\n @param C_init = 0.: initial [Ca++] (uM)\n @param sigma_c = 0.1: standard dev of noise (uM ?? )\n @param n = 1.: hill eq. exponent\n @param k_d = 200: hill coeff.\n @param k = None: for now, ansatz for spikes per second. \n @param alpha=None: scale for F (defaults to mean(F))\n @param beta = None: offset for F (defaults to min(F))\n @param zeta=None: constant variance (defaults to alpha/5.)\n @param gamma=None: scaled variance (defaults to zeta/5.)\n @param omega=-1,: weight?\n @param tau_h = 0.02: time constant?\n @param sigma_h = 0: standard dev of noise\n @param g=None: dt/tau_h, memoized for convenience\n @param sig2_h = None: square(sigma_h)*dt , memoized for convenience\n @param a=None: dt/tau_c, memoized for convenience\n @param sig2_c = None: square(sigma_c)*dt, memoized for convenience\n \"\"\"\n \n self.lik = -numpy.inf #initial likelihood: uninitialized state is none too likely!\n \n self.V = V\n self.tau_c = tau_c\n self.A = A\n self.C_0 = C_0\n self.C_init = C_init\n self.sigma_c = sigma_c\n self.n=n\n self.k_d = k_d\n self.k=k\n if(k==None): self.k = .0001\n self.kx = self.k*V.x; #we'll have to fix this to allow k to be non-scalar. later. \n if(alpha==None): self.alpha = numpy.mean(V.F)\n else: self.alpha = alpha\n if(beta==None): self.beta = min(V.F)\n else: self.beta = beta\n if(zeta==None): self.zeta = self.alpha / 5.0\n else: self.zeta = zeta\n if(gamma==None): self.gamma = self.zeta / 5.0\n else: self.gamma = gamma\n self.omega = omega\n self.tau_h = tau_h\n self.sigma_h = sigma_h\n if(g==None): self.g = V.dt / self.tau_h\n else: self.g = g\n if(sig2_h==None): self.sig2_h = (self.sigma_h**2)*V.dt\n else:self.sig2_h = sig2_h\n if(a==None): self.a = V.dt / self.tau_c\n else: self.a = a\n if(sig2_c == None): self.sig2_c = (self.sigma_c ** 2)*V.dt\n else: self.sig2_c = sig2_c\n \n \nclass Memoized(object):\n ''' creates and holds some convenience vectors and matrices that we'll reuse a lot. \n '''\n def __init__(self, vars, pars):\n '''\n sets it up. \n @param vars: a Variables object. \n '''\n self.n_sampl = numpy.random.uniform(size = (vars.Nparticles, vars.T))\n self.C_sampl = numpy.random.uniform(size=(vars.Nparticles, vars.T))\n self.oney = numpy.ones((vars.Nparticles, 1))\n self.zeroy = numpy.zeros((vars.Nparticles, 1))\n self.U_sampl = numpy.random.uniform(size=(vars.Nparticles, vars.T))\n \n \n diffs = 1.0 / vars.Nparticles\n ints = numpy.arange(0,1,diffs)\n shapedInts = numpy.repeat(ints, vars.T).reshape((vars.Nparticles,vars.T)).T #the second .T is a transpose operation.\n self.U_resamp = shapedInts + diffs*numpy.random.uniform(size = (vars.T,vars.Nparticles))\n \n \n#here's the original code, and ints was to Nparticles+1. \n#so instead, take ints only to Nparticles, \n#and since V.T_o is 1, we're tiling exactly once, so no more repmat\n#A.U_resamp = repmat(ints(1:end-1),V.T_o,1)+diffs*rand(V.T_o,V.Nparticles); % resampling matrix\n\n self.epsilon_c = numpy.sqrt(pars.sig2_c) * numpy.random.normal(size=(vars.Nparticles, vars.T))\n\nclass ObsLik(object):\n '''\n holds the observation likelihood parameters\n '''\n def __init__(self, vars, pars):\n '''\n initializes the likelihoods.\n @param vars: a Variables object \n @param pars: a Parameters object \n '''\n self.p_o = numpy.zeros((2,1))\n self.mu_o = numpy.zeros((2,1))\n self.sig2_o = numpy.zeros(1)\n self.p = numpy.zeros(1)\n self.mu = numpy.zeros(1)\n self.sig2 = numpy.zeros(1)\n self.s = 1 # s = v.freq = \"intermittent observation frequency\". it's always going to be 1 for a while.\n self.V = vars\n self.P = pars\n \n self.init_lik()\n \n def init_lik(self):\n '''\n get the mean (mu1) and variance (sig1) for P[C_t | F_t]\n '''\n F = self.V.F #for brevity\n P = self.P #for brevity\n \n finv = numpy.power( ((P.k_d * (F-P.beta)) / (P.alpha - F + P.beta)),\n 1/P.n)\n \n mu1 = finv #copying josh's variable names\n if( (mu1 > 0).all() and (numpy.imag(mu1)==0).all()):\n self.sig1 = -1 / (-(-P.alpha * mu1 ** P.n * P.n / mu1 / (mu1 ** P.n + P.k_d) + P.alpha * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1) ** 2 / (P.gamma * mu1 ** P.n / (mu1 ** P.n + P.k_d) + P.zeta) + 2 * (F - P.alpha * mu1 ** P.n / (mu1 ** P.n + P.k_d) - P.beta) / (P.gamma * mu1 ** P.n / (mu1 ** P.n + P.k_d) + P.zeta) ** 2 * (-P.alpha * mu1 ** P.n * P.n / mu1 / (mu1 ** P.n + P.k_d) + P.alpha * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1) * (P.gamma * mu1 ** P.n * P.n / mu1 / (mu1 ** P.n + P.k_d) - P.gamma * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1) - (F - P.alpha * mu1 ** P.n / (mu1 ** P.n + P.k_d) - P.beta) / (P.gamma * mu1 ** P.n / (mu1 ** P.n + P.k_d) + P.zeta) * (-P.alpha * mu1 ** P.n * P.n ** 2 / mu1 ** 2 / (mu1 ** P.n + P.k_d) + P.alpha * mu1 ** P.n * P.n / mu1 ** 2 / (mu1 ** P.n + P.k_d) + 3 * P.alpha * (mu1 ** P.n) ** 2 * P.n ** 2 / mu1 ** 2 / (mu1 ** P.n + P.k_d) ** 2 - 2 * P.alpha * (mu1 ** P.n) ** 3 / (mu1 ** P.n + P.k_d) ** 3 * P.n ** 2 / mu1 ** 2 - P.alpha * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1 ** 2) - (F - P.alpha * mu1 ** P.n / (mu1 ** P.n + P.k_d) - P.beta) ** 2 / (P.gamma * mu1 ** P.n / (mu1 ** P.n + P.k_d) + P.zeta) ** 3 * (P.gamma * mu1 ** P.n * P.n / mu1 / (mu1 ** P.n + P.k_d) - P.gamma * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1) ** 2 + 1 / 2 * (F - P.alpha * mu1 ** P.n / (mu1 ** P.n + P.k_d) - P.beta) ** 2 / (P.gamma * mu1 ** P.n / (mu1 ** P.n + P.k_d) + P.zeta) ** 2 * (P.gamma * mu1 ** P.n * P.n ** 2 / mu1 ** 2 / (mu1 ** P.n + P.k_d) - P.gamma * mu1 ** P.n * P.n / mu1 ** 2 / (mu1 ** P.n + P.k_d) - 3 * P.gamma * (mu1 ** P.n) ** 2 * P.n ** 2 / mu1 ** 2 / (mu1 ** P.n + P.k_d) ** 2 + 2 * P.gamma * (mu1 ** P.n) ** 3 / (mu1 ** P.n + P.k_d) ** 3 * P.n ** 2 / mu1 ** 2 + P.gamma * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1 ** 2) - 1 / 2 * (P.gamma * mu1 ** P.n * P.n ** 2 / mu1 ** 2 / (mu1 ** P.n + P.k_d) - P.gamma * mu1 ** P.n * P.n / mu1 ** 2 / (mu1 ** P.n + P.k_d) - 3 * P.gamma * (mu1 ** P.n) ** 2 * P.n ** 2 / mu1 ** 2 / (mu1 ** P.n + P.k_d) ** 2 + 2 * P.gamma * (mu1 ** P.n) ** 3 / (mu1 ** P.n + P.k_d) ** 3 * P.n ** 2 / mu1 ** 2 + P.gamma * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1 ** 2) / (P.gamma * mu1 ** P.n / (mu1 ** P.n + P.k_d) + P.zeta) + 1 / 2 * (P.gamma * mu1 ** P.n * P.n / mu1 / (mu1 ** P.n + P.k_d) - P.gamma * (mu1 ** P.n) ** 2 / (mu1 ** P.n + P.k_d) ** 2 * P.n / mu1) ** 2 / (P.gamma * mu1 ** P.n / (mu1 ** P.n + P.k_d) + P.zeta) ** 2)\n #sig1=-1/(-(-P.alpha*mu1^P.n*P.n/mu1/(mu1^P.n+P.k_d)+P.alpha*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1)^2/(P.gamma*mu1^P.n/(mu1^P.n+P.k_d)+P.zeta)+2*(F-P.alpha*mu1^P.n/(mu1^P.n+P.k_d)-P.beta)/(P.gamma*mu1^P.n/(mu1^P.n+P.k_d)+P.zeta)^2*(-P.alpha*mu1^P.n*P.n/mu1/(mu1^P.n+P.k_d)+P.alpha*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1)*(P.gamma*mu1^P.n*P.n/mu1/(mu1^P.n+P.k_d)-P.gamma*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1)-(F-P.alpha*mu1^P.n/(mu1^P.n+P.k_d)-P.beta)/(P.gamma*mu1^P.n/(mu1^P.n+P.k_d)+P.zeta)*(-P.alpha*mu1^P.n*P.n^2/mu1^2/(mu1^P.n+P.k_d)+P.alpha*mu1^P.n*P.n/mu1^2/(mu1^P.n+P.k_d)+3*P.alpha*(mu1^P.n)^2*P.n^2/mu1^2/(mu1^P.n+P.k_d)^2-2*P.alpha*(mu1^P.n)^3/(mu1^P.n+P.k_d)^3*P.n^2/mu1^2-P.alpha*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1^2)-(F-P.alpha*mu1^P.n/(mu1^P.n+P.k_d)-P.beta)^2/(P.gamma*mu1^P.n/(mu1^P.n+P.k_d)+P.zeta)^3*(P.gamma*mu1^P.n*P.n/mu1/(mu1^P.n+P.k_d)-P.gamma*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1)^2+1/2*(F-P.alpha*mu1^P.n/(mu1^P.n+P.k_d)-P.beta)^2/(P.gamma*mu1^P.n/(mu1^P.n+P.k_d)+P.zeta)^2*(P.gamma*mu1^P.n*P.n^2/mu1^2/(mu1^P.n+P.k_d)-P.gamma*mu1^P.n*P.n/mu1^2/(mu1^P.n+P.k_d)-3*P.gamma*(mu1^P.n)^2*P.n^2/mu1^2/(mu1^P.n+P.k_d)^2+2*P.gamma*(mu1^P.n)^3/(mu1^P.n+P.k_d)^3*P.n^2/mu1^2+P.gamma*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1^2)-1/2*(P.gamma*mu1^P.n*P.n^2/mu1^2/(mu1^P.n+P.k_d)-P.gamma*mu1^P.n*P.n/mu1^2/(mu1^P.n+P.k_d)-3*P.gamma*(mu1^P.n)^2*P.n^2/mu1^2/(mu1^P.n+P.k_d)^2+2*P.gamma*(mu1^P.n)^3/(mu1^P.n+P.k_d)^3*P.n^2/mu1^2+P.gamma*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1^2)/(P.gamma*mu1^P.n/(mu1^P.n+P.k_d)+P.zeta)+1/2*(P.gamma*mu1^P.n*P.n/mu1/(mu1^P.n+P.k_d)-P.gamma*(mu1^P.n)^2/(mu1^P.n+P.k_d)^2*P.n/mu1)^2/(P.gamma*mu1^P.n/(mu1^P.n+P.k_d)+P.zeta)^2);\n else:\n self.mu1 = 5e-5\n self.sig1 = 5e-5\n \n def update_moments(self, A, states, t):\n '''\n @param A: A Memoized instance \n @param states: States instance\n @param t: current frame or timestep \n '''\n S = states #give me convenience or give me death!\n P = self.P\n V = self.V\n \n self.init_lik()\n self.p[0] = 1.0\n #if we had called init_like we'd now be propagating those vals into self.[mu,sig2]\n \n #two blocks for spike histories\n \n if(t < (V.T-1)):\n phat = 1-numpy.exp(-numpy.exp(P.kx[t+1])*V.dt) #if k, kx are non-scalar, then the inner term becomes -numpy.exp( p.kx[t+1).T * V.dt\n \n #an intermittent sampling for loop: tt=s:-1:2\n \n #ok but we must need this code!\n \n# for tt=s:-1:2\n# O.p_o(1:2^(s-tt+1),tt-1) = repmat(O.p_o(1:2^(s-tt),tt),2,1).*[(1-phat(tt))*ones(1,2^(s-tt)) phat(tt)*ones(1,2^(s-tt))]';\n# O.mu_o(1:2^(s-tt+1),tt-1) = (1-P.a)^(-1)*(repmat(O.mu_o(1:2^(s-tt),tt),2,1)-P.A*A.spikemat(1:2^(s-tt+1),tt-1)-P.a*P.C_0); %mean of P[O_s | C_k]\n# O.sig2_o(tt-1) = (1-P.a)^(-2)*(P.sig2_c+O.sig2_o(tt)); % var of P[O_s | C_k]\n#\n# for n=0:s-tt+1\n# nind=A.ninds{n+1};\n# O.p(n+1,tt-1) = sum(O.p_o(nind,tt-1));\n# ps = (O.p_o(nind,tt-1)/O.p(n+1,tt-1))';\n# O.mu(n+1,tt-1) = ps*O.mu_o(nind,tt-1);\n# O.sig2(n+1,tt-1)= O.sig2_o(tt-1) + ps*(O.mu_o(nind,tt-1)-repmat(O.mu(n+1,tt-1)',A.lenn(n+1),1)).^2;\n# end\n#end\n \n #if s== 2 , another intermittent sampling block \n \n #the while loop to get rid of NaN : \n #python/numpy doesn't seem to allow assignment of NaN, so we'll have to watch for div by zero errors as we go.\n \n self.p += numpy.finfo(float).eps #to avoid div by zeros\n \n \n \n\n\n\nclass States(object):\n \"\"\" states of the model\"\"\"\n def __init__(self, vars, pars):\n '''\n set up the states. \n @param vars: instance of a Variables object\n @param pars: instance of a Parameters object \n '''\n self.V = vars\n self.P = pars\n #convenience:\n V = self.V\n P = self.P\n \n self.p = (P.k/V.dt) + numpy.zeros((vars.Nparticles, vars.T)) #rate\n n = numpy.zeros((V.Nparticles, V.T))\n self.n = n.astype('bool') #spike counts\n self.C = P.C_init * numpy.ones((V.Nparticles, V.T)) #calcium -- probably to be rao-blackwellized away tho!\n self.w_f = (1.0 / V.Nparticles) * numpy.ones((V.Nparticles, V.T)) #forward particle weights\n self.w_b = (1.0 / V.Nparticles) * numpy.ones((V.Nparticles, V.T)) #backward particle weights\n \n #note: i think we shouldn't need this, or it shouldn't be a function of T_o, which we've gotten rid of. \n # but i want it here commented out so that when i try to use S.Neff i remember why it doesn't exist.\n self.Neff = (1.0 / V.Nparticles) * numpy.ones((1,V.T+1)) \n \n \n #just set S.p to the appropriate constant of putative firing rate times dt. \n #S.p = repmat(1-exp(-exp(P.kx)*V.dt)',1,V.Nparticles)';\n\n\n def prior_sampler(self, memoized, t):\n '''\n @param memoized: instance of Memoized\n @param t: current frame/timestep. \n '''\n \n #convenience! :\n A = memoized\n P = self.P\n V = self.V\n F = V.F\n \n #spike histories block\n \n #self.p_new = self.p[:,t]\n \n self.n[:,t] = A.U_sampl[:,t] < self.p[:,t]\n \n #this line is clearly a calcium thing but it'll have to be something for the hill stuff, which is worthwhile since the dye definitely saturates. \n self.C[:,t] = (1-P.a)*self.C[:,t-1]+P.A*self.n[:,t]+P.a*P.C_0+A.epsilon_c[:,t]\n \n #then there's an if for intermittent sampling that is now always true\n S_mu = Hill_v1(P,self.C[:,t])\n F_mu = P.alpha*S_mu+P.beta #E[F_t]\n F_var = P.gamma*S_mu+P.zeta #V[F_t]\n ln_w = -0.5* numpy.power((F[t] - F_mu),2.) / F_var - numpy.log(F_var)/2.\n \n for i in xrange(len(ln_w)):\n if(numpy.isnan(ln_w[i])):\n print('particle %d ln_w is NaN. \\n F[t]=%f \\n n=%s \\n C=%f \\n S_mu=%f \\n F_mu=%f \\n F_var=%f' %\n (i,F[t], self.n[i,t], self.C[i,t], S_mu[i], F_mu[i], F_var[i]))\n \n \n if(t>0):\n ln_w += numpy.log(self.w_f[:,t-1])\n \n #print(ln_w)\n \n ln_w = ln_w - numpy.max(ln_w)\n\n \n \n w = numpy.exp(ln_w)\n\n self.w_f[:,t] = w/numpy.sum(w)\n #print(self.w_f[:,t])\n \n #print('gamma: %f zeta: %f'%(P.gamma, P.zeta))\n #print(S_mu)\n #print(w)\n \n \ndef backward(vars, pars):\n '''\n '''\n \n\n\nclass Z(object):\n '''more memoized shit'''\n def __init__(vars, states):\n self.oney = numpy.ones(size=(vars.Nparticles,1))\n self.zeroy = numpy.zeros(size = (vars.Nparticles,vars.Nparticles))\n self.C0 = states.C[:,vars.T-1]\n self.C0mat = numpy.zeros(shape=(len(oney),len(C0)))\n for i in xrange(len(c0)):\n c0mat[:,i] = c0[i]\n \n \n \n\ndef forward(vars, pars):\n \n '''\n the model is F_t = f(C) = alpha C^n/(C^n + k_d) + beta + e_t,\n where e_t ~ N[0, gamma*f(C)+zeta]\n \n @param vars: instance of a variables object \n @param pars: instance of a parameters object \n \n @return: instance of a States object (simulation states)\n '''\n A = Memoized(vars, pars) \n S = States(vars, pars)\n #skipping the spike history stuff, but it would go roughly here-ish, and in some __init__s. \n #O = ObsLik(vars, pars)\n \n #O.p[0] = 1.\n #O.mu[0] = O.mu_o[0]\n #O.sig2[0] = O.sig2_o[0]\n \n #skipping another V.freq block\n \n #O.update_moments(A,S,0) # the 0 used to be s, which is initialized to V.Freq, which is 1. but is it even really a constant, or does V.freq incremement somewhwere?\n \n #convenience:\n V = vars\n P = pars\n Nresamp = 0\n #here is the particle filter:\n for t in xrange(1,V.T): #are these the right timestep bounds?\n print(t)\n S.prior_sampler(A,t)\n\n #spikeHist block\n \n #here is stratified respampling:\n Nresamp = t\n S.Neff[0,Nresamp] = 1/numpy.sum(numpy.power(S.w_f[:,t],2))\n #print(S.Neff[0,Nresamp])\n if(S.Neff[0,Nresamp] < V.Nparticles/2.0):\n #so resample:\n print('resampling')\n edges = numpy.insert(S.w_f[:,t].cumsum(),0,0)\n ind = histc_j(A.U_resamp[Nresamp,:], edges) # this does the strat. resamp.\n random.shuffle(ind) #do a permutation of the inds (to avoid potential biases.?)\n \n S.p[:,t] = S.p[ind,t] #:: if V.freq is 1, then t-V.freq+1 = t. right? #t-V.freq+1:t]; #% resample probabilities (necessary?)\n S.n[:,t] = S.n[ind,t]\n S.C[:,t] = S.C[ind,t]\n S.w_f[:,t] = (1.0/V.Nparticles)*numpy.ones((V.Nparticles)) #% reset weights\n #Nresamp += 1\n \n #skipping a spikehist block\n #O.update_moments(A,S,t); #% estimate P[O_s | C_tt] for all t'<tt<s as a gaussian\n #O.s = t #update the time var\n \n \n return S\n \n \n \ndef Hill_v1(pars,C):\n '''\n % generalized hill model\n '''\n C[C<0] = 5e-5;\n return numpy.power(C,pars.n) / ( numpy.power(C,pars.n) +pars.k_d)\n\ndef histc_j(x, edges):\n '''\n given a vector, x, and a (sorted) list of N+1 edges defining the N bins, \n returns a map, m, s.t. m[j] tells you which bin x[j] falls into.\n '''\n inds = numpy.zeros(x.size, dtype=numpy.int)\n for i in xrange(len(x)):\n inds[i] = int(max(0, bisect.bisect_left(edges,x[i])-1) )\n #bisect is intended for mainting a sorted list, so it only returns 0\n #if x[i]<=edges[0], so the call to max is to ensure that we don't put\n #the smallest value into some imaginary bin -1. \n \n return inds\n \n \n\ndef backward(vars, states, pars):\n z = Z(vars, states)\n for t in reversed(xrange(vars.T)):\n z = step_backward(vars, states, pars, z, t)\n states.w_b[:,t-1] = z.w_b\n \n \ndef step_backward(vars, states, pars, z, t):\n \n # % compute ln P[n_t^i | h_t^i]\n z.n1 = states.n[:,t]\n ln_Pn = numpy.zeros(shape = z.oney.shape)\n spikingInds = z.n1==1\n nsInds = z.n1 != 1\n ln_Pn[spikingInds] = numpy.log(states.p[spikingInds,t])\n ln_Pn[nsInds] = numpy.log(1.0 - states.p[nsInds,t])\n \n # % compute ln P[C_t^i | C_{t-1}^j, n_t^i]\n z.C0 = S.c[:,t-1]\n z.C1 = S.c[:,t]\n z.C0mat = numpy.zeros(shape=(len(z.oney),len(z.C0)))\n for i in xrange(len(z.C0)):\n z.C0mat[:,i] = z.C0[i] \n z.C1mat = numpy.zeros(shape=(len(z.oney),len(z.C1)))\n for i in xrange(len(z.C1)):\n z.C1mat[:,i] = z.C1[i] \n mu = (1-pars.a) * states.C[:,t-1] + pars.a * z.n1 + pars.a*pars.C_0\n mumat = numpy.zeros(shape = (len(z.oney),len(mu)))\n for i in xrange(len(mu)):\n mumat[:,i] = mu[i]\n ln_PC_Cn = -0.5 * (z.C1mat - mumat)**2 / pars.sig2_c\n \n #% compute ln P[h_t^i | h_{t-1}^j, n_{t-1}^i]\n ln_Ph_hn = z.zeroy\n #spike hist block.. but i think we skip it. \n \n #% compute P[H_t^i | H_{t-1}^j]\n sum_lns = ln_PC_Cn + ln_Ph_hn\n for i in xrange(len(ln_Pn)):\n sum_lns[:,i] += ln_Pn[i]\n mx = numpy.max(sum_lns, 1) #seems that here, 1 is the rows? \n T = numpy.exp(sum_lns - mx)\n ncols = T.shape[1] #we want each column to sum to one (here, 1 is cols)\n for i in xrange(ncols):\n normer = numpy.sum(T[:,i])\n for j in xrange(T.shape[0]):\n T[i,j] /= normer\n \n #% compute P[H_t^i, H_{t-1}^j | 0]\n \n \n \n \n \ndef realDataTest():\n par = os.path.pardir\n sep = os.path.sep\n fstring=par + sep + par + sep + 'data'+sep+'fluo.txt'\n #print(fstring)\n fluofile = open(fstring,'r')\n \n line = fluofile.readline()\n #print(line)\n numtext=re.split('[\\t]', line)\n numlist=numpy.zeros(len(numtext)-1) #omitting the last value in the fluo file b/c it's a zero-pad. \n for i in xrange(len(numtext)-1):\n #print('%d: %s'%(i, numtext[i]))\n numlist[i] = float(numtext[i])\n \n fluofile.close()\n \n numlist -= numpy.min(numlist)\n numlist /= numpy.max(numlist)\n \n v = Variables(numlist, 0.075)\n p = Parameters(v, A=20, C_0=numpy.mean(numlist[1:10]),C_init=numpy.mean(numlist[1:10]))\n S = forward(v,p)\n \n cbar = numpy.zeros(p.V.T)\n nbar = numpy.zeros(p.V.T)\n \n for t in xrange(p.V.T):\n for i in xrange(p.V.Nparticles):\n weight = S.w_f[i,t]\n cbar[t] += weight * S.C[i,t]\n nbar[t] += weight * S.n[i,t]\n \n \n \n #nbar /= p.V.Nparticles\n pylab.figure()\n pylab.hold(True)\n pylab.plot(nbar, label='expected spikes')\n pylab.title('expected spikes')\n pylab.plot(numlist, label='F')\n pylab.legend()\n \n pylab.figure()\n pylab.plot(cbar)\n pylab.title('expected Ca')\n \n #pylab.plot(numlist)\n pylab.show()\n \nif __name__ == \"__main__\":\n realDataTest()" ]
[ [ "numpy.log", "numpy.imag", "numpy.sqrt", "numpy.min", "numpy.power", "numpy.arange", "numpy.isnan", "numpy.ones", "numpy.finfo", "numpy.max", "numpy.random.normal", "numpy.mean", "numpy.random.uniform", "numpy.repeat", "numpy.exp", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saltwater-tensor/POT
[ "9412f0ad1c0003e659b7d779bf8b6728e0e5e60f" ]
[ "ot/weak.py" ]
[ "\"\"\"\nWeak optimal ransport solvers\n\"\"\"\n\n# Author: Remi Flamary <[email protected]>\n#\n# License: MIT License\n\nfrom .backend import get_backend\nfrom .optim import cg\nimport numpy as np\n\n__all__ = ['weak_optimal_transport']\n\n\ndef weak_optimal_transport(Xa, Xb, a=None, b=None, verbose=False, log=False, G0=None, **kwargs):\n r\"\"\"Solves the weak optimal transport problem between two empirical distributions\n\n\n .. math::\n \\gamma = \\mathop{\\arg \\min}_\\gamma \\quad \\|X_a-diag(1/a)\\gammaX_b\\|_F^2\n\n s.t. \\ \\gamma \\mathbf{1} = \\mathbf{a}\n\n \\gamma^T \\mathbf{1} = \\mathbf{b}\n\n \\gamma \\geq 0\n\n where :\n\n - :math:`X_a` :math:`X_b` are the sample matrices.\n - :math:`\\mathbf{a}` and :math:`\\mathbf{b}` are the sample weights\n\n\n .. note:: This function is backend-compatible and will work on arrays\n from all compatible backends. But the algorithm uses the C++ CPU backend\n which can lead to copy overhead on GPU arrays.\n\n Uses the conditional gradient algorithm to solve the problem proposed\n in :ref:`[39] <references-weak>`.\n\n Parameters\n ----------\n Xa : (ns,d) array-like, float\n Source samples\n Xb : (nt,d) array-like, float\n Target samples\n a : (ns,) array-like, float\n Source histogram (uniform weight if empty list)\n b : (nt,) array-like, float\n Target histogram (uniform weight if empty list))\n numItermax : int, optional\n Max number of iterations\n numItermaxEmd : int, optional\n Max number of iterations for emd\n stopThr : float, optional\n Stop threshold on the relative variation (>0)\n stopThr2 : float, optional\n Stop threshold on the absolute variation (>0)\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n\n Returns\n -------\n gamma: array-like, shape (ns, nt)\n Optimal transportation matrix for the given\n parameters\n log: dict, optional\n If input log is true, a dictionary containing the\n cost and dual variables and exit status\n\n\n .. _references-weak:\n References\n ----------\n .. [39] Gozlan, N., Roberto, C., Samson, P. M., & Tetali, P. (2017).\n Kantorovich duality for general transport costs and applications.\n Journal of Functional Analysis, 273(11), 3327-3405.\n\n See Also\n --------\n ot.bregman.sinkhorn : Entropic regularized OT\n ot.optim.cg : General regularized OT\n \"\"\"\n\n nx = get_backend(Xa, Xb)\n\n Xa2 = nx.to_numpy(Xa)\n Xb2 = nx.to_numpy(Xb)\n\n if a is None:\n a2 = np.ones((Xa.shape[0])) / Xa.shape[0]\n else:\n a2 = nx.to_numpy(a)\n if b is None:\n b2 = np.ones((Xb.shape[0])) / Xb.shape[0]\n else:\n b2 = nx.to_numpy(b)\n\n # init uniform\n if G0 is None:\n T0 = a2[:, None] * b2[None, :]\n else:\n T0 = nx.to_numpy(G0)\n\n # weak OT loss\n def f(T):\n return np.dot(a2, np.sum((Xa2 - np.dot(T, Xb2) / a2[:, None])**2, 1))\n\n # weak OT gradient\n def df(T):\n return -2 * np.dot(Xa2 - np.dot(T, Xb2) / a2[:, None], Xb2.T)\n\n # solve with conditional gradient and return solution\n if log:\n res, log = cg(a2, b2, 0, 1, f, df, T0, log=log, verbose=verbose, **kwargs)\n log['u'] = nx.from_numpy(log['u'], type_as=Xa)\n log['v'] = nx.from_numpy(log['v'], type_as=Xb)\n return nx.from_numpy(res, type_as=Xa), log\n else:\n return nx.from_numpy(cg(a2, b2, 0, 1, f, df, T0, log=log, verbose=verbose, **kwargs), type_as=Xa)\n" ]
[ [ "numpy.dot", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SeanSyue/Insightface-IJB-release
[ "17b8bbc1ec48228ea30ce86114cc3f226fd47478" ]
[ "recognition/embedding.py" ]
[ "import argparse\nimport cv2\nimport numpy as np\nimport sys\nimport mxnet as mx\nimport datetime\nfrom skimage import transform as trans\nimport sklearn\nfrom sklearn import preprocessing\n\nclass Embedding:\n def __init__(self, prefix, epoch, ctx_ids=None):\n if ctx_ids is None:\n ctx_ids = [0]\n print('loading',prefix, epoch)\n ctx = [mx.gpu(ctx_id) for ctx_id in ctx_ids]\n sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)\n all_layers = sym.get_internals()\n sym = all_layers['fc1_output']\n image_size = (112,112)\n self.image_size = image_size\n model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)\n model.bind(for_training=False, data_shapes=[('data', (2, 3, image_size[0], image_size[1]))])\n model.set_params(arg_params, aux_params)\n self.model = model\n src = np.array([\n [30.2946, 51.6963],\n [65.5318, 51.5014],\n [48.0252, 71.7366],\n [33.5493, 92.3655],\n [62.7299, 92.2041] ], dtype=np.float32 )\n src[:,0] += 8.0\n self.src = src\n\n def get(self, rimg, landmark):\n assert landmark.shape[0]==68 or landmark.shape[0]==5\n assert landmark.shape[1]==2\n if landmark.shape[0]==68:\n landmark5 = np.zeros( (5,2), dtype=np.float32 )\n landmark5[0] = (landmark[36]+landmark[39])/2\n landmark5[1] = (landmark[42]+landmark[45])/2\n landmark5[2] = landmark[30]\n landmark5[3] = landmark[48]\n landmark5[4] = landmark[54]\n else:\n landmark5 = landmark\n tform = trans.SimilarityTransform()\n tform.estimate(landmark5, self.src)\n M = tform.params[0:2,:]\n img = cv2.warpAffine(rimg,M,(self.image_size[1],self.image_size[0]), borderValue = 0.0)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_flip = np.fliplr(img)\n img = np.transpose(img, (2,0,1)) #3*112*112, RGB\n img_flip = np.transpose(img_flip,(2,0,1))\n input_blob = np.zeros((2, 3, self.image_size[1], self.image_size[0]),dtype=np.uint8)\n input_blob[0] = img\n input_blob[1] = img_flip\n data = mx.nd.array(input_blob)\n db = mx.io.DataBatch(data=(data,))\n self.model.forward(db, is_train=False)\n feat = self.model.get_outputs()[0].asnumpy()\n feat = feat.reshape([-1, feat.shape[0] * feat.shape[1]])\n feat = feat.flatten()\n return feat" ]
[ [ "numpy.fliplr", "numpy.array", "numpy.zeros", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kjanjua26/dispflownet-tf
[ "569d334275839e6ec33af04c06604ab9a8488bb2" ]
[ "inference.py" ]
[ "import argparse\nimport tensorflow as tf\nimport numpy as np\nimport os\nfrom dispnet import DispNet\nfrom util import get_var_to_restore_list\nfrom matplotlib import pyplot as plt\nimport cv2\n\n# INPUT_SIZE = (384, 768, 3)\n# INPUT_SIZE = (540, 960, 3)\nDOWNGRADE_FACTOR = 64\n\ndef pad_image(immy,down_factor = DOWNGRADE_FACTOR):\n \"\"\"\n pad image with a proper number of 0 to prevent problem when concatenating after upconv\n \"\"\"\n immy_shape = tf.shape(immy)\n new_height = tf.where(tf.equal(immy_shape[0]%down_factor,0),x=immy_shape[0],y=(tf.floordiv(immy_shape[0],down_factor)+1)*down_factor)\n new_width = tf.where(tf.equal(immy_shape[1]%down_factor,0),x=immy_shape[1],y=(tf.floordiv(immy_shape[1],down_factor)+1)*down_factor)\n immy = tf.image.resize_image_with_crop_or_pad(immy,new_height,new_width)\n return immy\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--left\", required=True, type=str, metavar=\"FILE\",\n help='path to the folder with left image or file with path to elaborate (one per row)')\n parser.add_argument(\"--right\", required=True, type=str, metavar='FILE',\n help=\"path to the folder with right image or file with path to elaborate (one per row)\")\n parser.add_argument(\"-c\", \"--ckpt\", dest=\"checkpoint_path\",\n default=\".\", help='model checkpoint path')\n parser.add_argument(\"--corr_type\", dest=\"corr_type\", type=str, default=\"tf\",\n help=\"correlation layer realization\", choices=['tf', 'cuda', 'none'])\n parser.add_argument(\"-o\", \"--output\", required=True,\n help=\"path were the predictions will be saved\")\n parser.add_argument(\"-v\", \"--visualization\",\n action='store_true', help=\"flag to enable visualization\")\n parser.add_argument(\"--fullRes\",help='save output of the network rescaled to full resolution',action=\"store_true\")\n parser.add_argument(\"--max_disp\",help=\"maximum value of disparity that can be predicted, clip value above\",default=500,type=int)\n args = parser.parse_args()\n\n use_dir = False\n for f in [args.left, args.right]:\n if not os.path.exists(f):\n raise Exception('Unable to find: {}'.format(f))\n if os.path.isdir(f):\n use_dir = True\n\n # create output folders\n os.makedirs(args.output, exist_ok=True)\n\n # load inputs\n if use_dir:\n left_files = [os.path.join(args.left, f) for f in os.listdir(\n args.left) if f.endswith('.png') or f.endswith('.jpg')]\n right_files = [os.path.join(args.right, f) for f in os.listdir(\n args.right) if f.endswith('.png') or f.endswith('.jpg')]\n else:\n with open(args.left) as f_in:\n left_files = [x.strip() for x in f_in.readlines()]\n with open(args.right) as f_in:\n right_files = [x.strip() for x in f_in.readlines()]\n args.left = os.path.abspath(os.path.join(args.left, os.pardir))\n\n assert(len(left_files) == len(right_files))\n couples = [(l, r) for l, r in zip(left_files, right_files)]\n filename_queue = tf.train.input_producer(\n couples, element_shape=[2], num_epochs=1, shuffle=False)\n filenames = filename_queue.dequeue()\n left_fn, right_fn = filenames[0], filenames[1]\n left_raw = tf.read_file(left_fn)\n right_raw = tf.read_file(right_fn)\n\n left_img = tf.image.decode_image(left_raw, channels=3)\n left_img.set_shape([None, None, 3])\n original_resolution = tf.shape(left_img)\n left_img = tf.image.convert_image_dtype(left_img, tf.float32)\n left_img = left_img - (100.0 / 255)\n left_img = pad_image(left_img)\n\n right_img = tf.image.decode_image(right_raw, channels=3)\n right_img = tf.image.convert_image_dtype(right_img, tf.float32)\n right_img.set_shape([None, None, 3])\n right_img = right_img - (100.0 / 255)\n right_img = pad_image(right_img)\n\n target_shape = tf.placeholder(dtype=tf.int32, shape=[None])\n left_placeholder = tf.placeholder(dtype=tf.float32,shape=[None,None,3])\n right_placeholder = tf.placeholder(dtype=tf.float32,shape=[None,None,3])\n\n left_input = tf.expand_dims(left_placeholder,axis=0)\n right_input = tf.expand_dims(right_placeholder,axis=0)\n\n #left_img = tf.placeholder(dtype=tf.float32,shape=[1,Npne,None,3])\n #right_img = tf.placeholder(dtype=tf.float32,)\n\n # build input batch\n #left_img_batch, right_img_batch, name_batch, resolution_batch = tf.train.batch([left_img, right_img, left_fn, original_resolution], args.batch_size, num_threads=4, capacity=args.batch_size * 100, allow_smaller_final_batch=True)\n\n # build model\n is_corr = args.corr_type != 'none'\n dispnet = DispNet(mode=\"inference\", ckpt_path=args.checkpoint_path, batch_size=1, is_corr=is_corr, corr_type=args.corr_type, image_ops=[left_input, right_input])\n raw_prediction = dispnet.predictions_test[0]\n rescaled_prediction = tf.image.resize_images(raw_prediction,tf.shape(left_placeholder)[0:2],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n cropped_prediction = tf.image.resize_image_with_crop_or_pad(rescaled_prediction,target_shape[0],target_shape[1])\n\n gpu_options = tf.GPUOptions(allow_growth=True)\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n sess.run(dispnet.init)\n print(\"initialized\")\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n print(\"queue runners started\")\n\n var_to_restore = get_var_to_restore_list(args.checkpoint_path, [], prefix=\"\")\n \n print('Found {} variables to restore'.format(len(var_to_restore)))\n restorer = tf.train.Saver(var_list=var_to_restore)\n restorer.restore(sess, args.checkpoint_path)\n print('Weights restored')\n\n try:\n cc = 0\n saved = []\n while True:\n lefty,righty,f,ressy = sess.run([left_img,right_img,left_fn,original_resolution])\n raw_prediction_np, full_res_prediction_np = sess.run([raw_prediction,cropped_prediction],feed_dict={left_placeholder:lefty,right_placeholder:righty,target_shape:ressy})\n\n dest = f.decode('utf-8').replace(args.left, args.output)\n dest_folder = os.path.abspath(os.path.join(dest, os.pardir))\n os.makedirs(dest_folder, exist_ok=True)\n disparity = full_res_prediction_np if args.fullRes else raw_prediction_np\n immy = -1 * np.squeeze(disparity)\n immy = immy.astype(np.uint16)\n immy[np.where(immy>args.max_disp)]=args.max_disp\n cv2.imwrite(dest, immy)\n saved.append(dest)\n\n if args.visualization:\n plt.figure('input_L')\n plt.imshow(np.squeeze(lefty + (100 / 255)))\n\n plt.figure('input_R')\n plt.imshow(np.squeeze(righty + (100 / 255)))\n\n plt.figure('prediction')\n plt.imshow(np.squeeze(disparity))\n plt.colorbar()\n plt.show()\n cc += 1\n print('{}/{}'.format(cc,\n len(left_files)), end='\\r')\n\n except tf.errors.OutOfRangeError:\n print('Done')\n\n finally:\n coord.request_stop()\n coord.join(threads)\n sess.close()\n\n saved = sorted(saved)\n with open(os.path.join(args.output,'prediction_list.txt'),'w+') as f_out:\n f_out.write('\\n'.join(saved))\n" ]
[ [ "tensorflow.floordiv", "numpy.squeeze", "tensorflow.equal", "tensorflow.GPUOptions", "tensorflow.image.decode_image", "tensorflow.train.input_producer", "numpy.where", "tensorflow.read_file", "tensorflow.ConfigProto", "tensorflow.train.Saver", "matplotlib.pyplot.figure", "tensorflow.shape", "tensorflow.train.Coordinator", "tensorflow.placeholder", "matplotlib.pyplot.show", "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.train.start_queue_runners", "tensorflow.expand_dims", "matplotlib.pyplot.colorbar", "tensorflow.image.convert_image_dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
nanpuhaha/SerpentAI-Image
[ "3e7d4c134e4a52bad5f9ce9af95717b05ce3f34f" ]
[ "serpent_image/image.py" ]
[ "import io\nimport pathlib\nimport uuid\n\nimport numpy as np\n\nimport skimage.io\nimport skimage.color\nimport skimage.filters\nimport skimage.metrics\nimport skimage.morphology\nimport skimage.segmentation\nimport skimage.transform\n\nfrom PIL import Image as PILImage\n\nfrom sklearn.cluster import KMeans\n\n\nclass Image:\n small_ratio = 0.25\n\n def __init__(self, array=None):\n if not isinstance(array, np.ndarray):\n raise ValueError(\"'array' should be a numpy array...\")\n\n self.uuid = str(uuid.uuid4())\n self.array = self._as_uint8_rgba(array)\n\n def _repr_png_(self):\n \"\"\"Jupyter Notebook image display hook\"\"\"\n return PILImage.fromarray(self.array)._repr_png_()\n\n @property\n def width(self):\n return self.array.shape[1]\n\n @property\n def height(self):\n return self.array.shape[0]\n\n @property\n def short_side(self):\n return min(self.width, self.height)\n\n @property\n def long_side(self):\n return max(self.width, self.height)\n\n @property\n def aspect_ratio(self):\n return self.width / self.height\n\n @property\n def area(self):\n return self.width * self.height\n\n @property\n def area_non_zero(self):\n return np.sum(self.array[:, :, 0] > 0)\n\n @property\n def has_transparency(self):\n return np.min(self.array[:, :, 3]) < 255\n\n @property\n def small(self):\n return self.rescale(self.small_ratio)\n\n @property\n def rgba(self):\n return self.array\n\n @property\n def rgba_small(self):\n return self.small\n\n @property\n def rgba_normalized(self):\n return np.array(self.array, dtype=np.float64) / 255.0\n\n @property\n def rgba_normalized_small(self):\n return np.array(self.small, dtype=np.float64) / 255.0\n\n @property\n def alpha(self):\n return self.array[:, :, 3]\n\n @property\n def rgb(self):\n return self.array[:, :, :3]\n\n @property\n def rgb_small(self):\n return self.small[:, :, :3]\n\n @property\n def rgb_normalized(self):\n return np.array(self.array[:, :, :3], dtype=np.float64) / 255.0\n\n @property\n def rgb_normalized_small(self):\n return np.array(self.small[:, :, :3], dtype=np.float64) / 255.0\n\n @property\n def lab(self):\n return skimage.color.rgb2lab(self.rgb)\n\n @property\n def lab_small(self):\n return skimage.color.rgb2lab(self.rgb_small)\n\n @property\n def lab_normalized(self):\n lab_array = self.lab\n\n lab_array[:, :, 0] = lab_array[:, :, 0] / 100.0\n lab_array[:, :, 1] = (lab_array[:, :, 1] + 128.0) / 255.0\n lab_array[:, :, 2] = (lab_array[:, :, 2] + 128.0) / 255.0\n\n return lab_array\n\n @property\n def lab_normalized_small(self):\n lab_array = self.lab_small\n\n lab_array[:, :, 0] = lab_array[:, :, 0] / 100.0\n lab_array[:, :, 1] = (lab_array[:, :, 1] + 128.0) / 255.0\n lab_array[:, :, 2] = (lab_array[:, :, 2] + 128.0) / 255.0\n\n return lab_array\n\n @property\n def grayscale(self):\n return np.array(skimage.color.rgb2gray(self.rgb) * 255, dtype=np.uint8)\n\n @property\n def grayscale_rgba(self):\n grayscale_array = np.array(\n skimage.color.rgb2gray(self.rgb) * 255, dtype=np.uint8\n )\n\n grayscale_array = skimage.color.gray2rgb(grayscale_array)\n\n return np.dstack((grayscale_array, self.array[:, :, 3]))\n\n @property\n def blurred(self):\n return self.blur(sigma=1.0)\n\n @property\n def gradients(self):\n return self.generate_gradients_array(shape=\"SQUARE\", size=8)\n\n @property\n def empty_mask(self):\n return self.__class__(np.empty_like(self.rgb, dtype=np.uint8))\n\n @property\n def full_mask(self):\n return self.__class__((np.full_like(self.rgb, 255, dtype=np.uint8)))\n\n @property\n def inverted(self):\n return self.invert(as_image=True)\n\n @property\n def average_color(self):\n average_r = int(round(np.mean(self.array[:, :, 0])))\n average_g = int(round(np.mean(self.array[:, :, 1])))\n average_b = int(round(np.mean(self.array[:, :, 2])))\n\n return average_r, average_g, average_b\n\n @property\n def average_lightness(self):\n return int(round(np.mean(self.grayscale.flatten())))\n\n @property\n def average_mask_lightness(self):\n grayscale = self.grayscale.flatten()\n grayscale = np.delete(grayscale, np.where(grayscale == 0))\n\n return int(round(np.mean(grayscale)))\n\n @property\n def predominant_color(self):\n minimum_error = 10.0\n predominant_color = None\n\n for color in self.dominant_colors:\n image_error = np.full(self.rgb.shape, color, dtype=np.uint8)\n image_error = np.dstack((image_error, self.array[:, :, 3]))\n\n error = skimage.metrics.normalized_root_mse(self.array, image_error)\n\n if error < minimum_error:\n minimum_error = error\n predominant_color = color\n\n return predominant_color\n\n @property\n def dominant_colors(self):\n return self.determine_dominant_colors(quantity=8)\n\n @property\n def as_pil(self):\n return PILImage.fromarray(self.array)\n\n @property\n def as_png_bytes(self):\n png_bytes = io.BytesIO()\n self.as_pil.save(png_bytes, format=\"PNG\")\n png_bytes.seek(0)\n\n return png_bytes.read()\n\n def update(self, array, new_uuid=False):\n self.array = self._as_uint8_rgba(array)\n\n if new_uuid:\n self.uuid = str(uuid.uuid4())\n\n def resize(self, width, height, order=1, anti_aliasing=True, as_image=False):\n array = skimage.transform.resize(\n self.array, (height, width), anti_aliasing=anti_aliasing, order=order\n )\n\n if as_image:\n return self.__class__(array)\n else:\n return self._as_uint8_rgba(array)\n\n def resize_long_side_to(self, size, as_image=False):\n scale = size / self.long_side\n return self.rescale(scale, as_image=as_image)\n\n def resize_short_side_to(self, size, as_image=False):\n scale = size / self.short_side\n return self.rescale(scale, as_image=as_image)\n\n def rescale(self, scale, as_image=False):\n array = skimage.transform.rescale(self.array, scale, multichannel=True)\n\n if as_image:\n return self.__class__(array)\n else:\n return self._as_uint8_rgba(array)\n\n def rotate(self, angle, as_image=False):\n rotate_array = np.array(\n self.as_pil.rotate(angle, resample=PILImage.BICUBIC), dtype=np.uint8\n )\n\n if as_image:\n return self.__class__(rotate_array)\n else:\n return rotate_array\n\n def blur(self, sigma=1.0, as_image=False):\n blur_array = np.array(\n skimage.filters.gaussian(self.array, sigma=sigma, multichannel=True) * 255,\n dtype=np.uint8,\n )\n\n if as_image:\n return self.__class__(blur_array)\n else:\n return blur_array\n\n def invert(self, as_image=False):\n invert_array = np.array(skimage.util.invert(self.rgb))\n\n invert_array = np.dstack((invert_array, self.array[:, :, 3]))\n\n if as_image:\n return self.__class__(invert_array)\n else:\n return invert_array\n\n def desaturate(self, ratio, as_image=False):\n desaturate_array = np.array(\n (\n (self.rgb_normalized * ratio)\n + (self.__class__(self.grayscale_rgba).rgb_normalized * (1.0 - ratio))\n )\n * 255,\n dtype=np.uint8,\n )\n\n desaturate_array = np.dstack((desaturate_array, self.array[:, :, 3]))\n\n if as_image:\n return self.__class__(desaturate_array)\n else:\n return desaturate_array\n\n def segment(self, segments=24, segment_by=\"COLOR\", compactness=8, sigma=2):\n if segment_by not in [\"COLOR\", \"LIGHTNESS\"]:\n segment_by = \"COLOR\"\n\n if segment_by == \"COLOR\":\n array = self.rgb_normalized\n else:\n array = self.__class__(self.grayscale_rgba).rgb_normalized\n\n return skimage.segmentation.slic(\n array,\n n_segments=segments,\n compactness=compactness,\n sigma=sigma,\n enforce_connectivity=False,\n )\n\n def determine_dominant_colors(self, quantity=8):\n image = self.resize_long_side_to(256, as_image=True)\n image_kmeans = image.lab.reshape(image.width * image.height, 3)\n\n kmeans = KMeans(n_clusters=quantity, n_jobs=1).fit_predict(image_kmeans)\n\n clusters = dict()\n\n for i in range(quantity):\n clusters[i] = list()\n\n for i, cluster in enumerate(kmeans):\n clusters[cluster].append(image_kmeans[i])\n\n dominant_colors = list()\n\n for lab_tuples in clusters.values():\n l, a, b = zip(*lab_tuples)\n\n lab_color = np.array((np.mean(l), np.mean(a), np.mean(b)), dtype=np.float64)\n lab_color = np.full((1, 1, 3), lab_color, dtype=np.float64)\n\n rgb_color = np.array(skimage.color.lab2rgb(lab_color) * 255, dtype=np.uint8)\n\n dominant_colors.append(tuple(rgb_color[0, 0, :]))\n\n return dominant_colors\n\n def generate_color_strip(self, colors=2048, height=1):\n image = self.resize_long_side_to(256, as_image=True)\n\n segments = skimage.segmentation.slic(\n image.rgb,\n compactness=1,\n n_segments=colors,\n sigma=0,\n enforce_connectivity=False,\n )\n\n image = skimage.color.label2rgb(segments, image.rgb, kind=\"avg\")\n image = np.array(skimage.color.rgb2hsv(image) * 255, dtype=np.uint8)\n\n colors = np.unique(image.reshape(-1, image.shape[2]), axis=0)\n\n color_strip = np.zeros((height, len(colors), 3), dtype=np.uint8)\n\n for i, color in enumerate(colors):\n color_strip[:, i, :] = color\n\n color_strip = np.array(skimage.color.hsv2rgb(color_strip) * 255, dtype=np.uint8)\n\n return self.__class__(color_strip)\n\n def generate_gradients_array(self, shape=\"SQUARE\", size=1):\n if shape not in (\"SQUARE\", \"DISK\"):\n shape = \"SQUARE\"\n\n shapes = {\"SQUARE\": skimage.morphology.square, \"DISK\": skimage.morphology.disk}\n\n return skimage.filters.rank.gradient(self.grayscale, shapes[shape](size))\n\n def generate_otsu_threshold_mask(self, sigma=0.0):\n grayscale = self.grayscale\n\n threshold = skimage.filters.threshold_otsu(grayscale)\n\n mask = grayscale <= threshold\n mask = np.array(mask, dtype=np.uint8) * 255\n\n if sigma > 0.0:\n mask = skimage.filters.gaussian(mask, sigma=sigma)\n mask = np.array(mask * 255, dtype=np.uint8)\n\n return self.__class__(mask)\n\n def generate_local_threshold_mask(self, size=51, sigma=0.0):\n if not size % 2:\n size += 1\n\n grayscale = self.grayscale\n\n threshold = skimage.filters.threshold_local(grayscale, size)\n\n mask = grayscale <= threshold\n mask = np.array(mask, dtype=np.uint8) * 255\n\n if sigma > 0.0:\n mask = skimage.filters.gaussian(mask, sigma=sigma)\n mask = np.array(mask * 255, dtype=np.uint8)\n\n return self.__class__(mask)\n\n def get_local_neighborhood(self, point, size, as_image=False):\n y0 = max(point[0] - size, 0)\n x0 = max(point[1] - size, 0)\n y1 = min(point[0] + size, self.height)\n x1 = min(point[1] + size, self.width)\n\n local_neighborhood_array = self.array[y0:y1, x0:x1, :]\n\n if as_image:\n return self.__class__(local_neighborhood_array)\n else:\n return local_neighborhood_array\n\n def calculate_error(self, image):\n return skimage.metrics.mean_squared_error(\n self.rgba_normalized, image.rgba_normalized\n )\n\n def calculate_point_error(self, image, point, size):\n reference_array = self.get_local_neighborhood(\n point, size, as_image=True\n ).rgba_normalized\n\n test_array = image.get_local_neighborhood(\n point, size, as_image=True\n ).rgba_normalized\n\n return skimage.metrics.mean_squared_error(reference_array, test_array)\n\n def calculate_otsu_threshold(self, channel=0):\n if channel not in (0, 1, 2, 3):\n channel = 0\n\n return skimage.filters.threshold_otsu(self.array[:, :, channel])\n\n @classmethod\n def from_png_bytes(cls, png_bytes):\n try:\n array = skimage.io.imread(png_bytes)\n except ValueError:\n raise ValueError(\"Invalid image data!\")\n\n return cls(array)\n\n @classmethod\n def from_file(cls, file_path):\n if isinstance(file_path, str):\n file_path = pathlib.Path(file_path)\n\n if not file_path.is_file():\n raise FileNotFoundError()\n\n try:\n array = skimage.io.imread(file_path)\n except ValueError:\n raise ValueError(f\"Invalid image data: 'f{file_path}'\")\n\n return cls(array)\n\n @classmethod\n def copy(cls, image=None, keep_uuid=False):\n if not isinstance(image, cls):\n raise TypeError(\"'image' should be of type Image...\")\n\n image_copy = cls(image.array)\n\n if keep_uuid:\n image_copy.uuid = image.uuid\n\n return image_copy\n\n @staticmethod\n def _as_uint8_rgba(array):\n # Rescue a few common dtypes before exception\n if array.dtype == np.bool:\n array = np.array(array, dtype=np.uint8) * 255\n elif array.dtype == np.float64:\n if np.max(array) <= 1.0:\n array = array * 255.0\n\n array = np.array(array, dtype=np.uint8)\n\n if array.dtype != np.uint8:\n raise TypeError(f\"Unsupported image array dtype: '{array.dtype}'\")\n\n # Grayscale to RGB\n if len(array.shape) == 2:\n array = skimage.color.gray2rgb(array)\n\n # RGB to RGBA\n if array.shape[2] == 3:\n array = np.array(PILImage.fromarray(array).convert(\"RGBA\"))\n\n return array\n" ]
[ [ "sklearn.cluster.KMeans", "numpy.min", "numpy.empty_like", "numpy.dstack", "numpy.full", "numpy.full_like", "numpy.max", "numpy.mean", "numpy.array", "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wenh18/OnDeviceNAS
[ "d6e39500b794ddd9737ef4bc631cf4f977b47617", "d6e39500b794ddd9737ef4bc631cf4f977b47617" ]
[ "mytimm/models/layers/split_batchnorm.py", "mytimm/models/efficientnet_blocks.py" ]
[ "\"\"\" Split BatchNorm\n\nA PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through\na separate BN layer. The first split is passed through the parent BN layers with weight/bias\nkeys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'\nnamespace.\n\nThis allows easily removing the auxiliary BN layers after training to efficiently\nachieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,\n'Disentangled Learning via An Auxiliary BN'\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n\nclass SplitBatchNorm2d(torch.nn.BatchNorm2d):\n\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,\n track_running_stats=True, num_splits=2):\n super().__init__(num_features, eps, momentum, affine, track_running_stats)\n assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)'\n self.num_splits = num_splits\n self.aux_bn = nn.ModuleList([\n nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)])\n\n def forward(self, input: torch.Tensor):\n if self.training: # aux BN only relevant while training\n split_size = input.shape[0] // self.num_splits\n assert input.shape[0] == split_size * self.num_splits, \"batch size must be evenly divisible by num_splits\"\n split_input = input.split(split_size)\n x = [super().forward(split_input[0])]\n for i, a in enumerate(self.aux_bn):\n x.append(a(split_input[i + 1]))\n return torch.cat(x, dim=0)\n else:\n return super().forward(input)\n\n\ndef convert_splitbn_model(module, num_splits=2):\n \"\"\"\n Recursively traverse module and its children to replace all instances of\n ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`.\n Args:\n module (torch.nn.Module): input module\n num_splits: number of separate batchnorm layers to split input across\n Example::\n >>> # model is an instance of torch.nn.Module\n >>> model = mytimm.models.convert_splitbn_model(model, num_splits=2)\n \"\"\"\n mod = module\n if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):\n return module\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\n mod = SplitBatchNorm2d(\n module.num_features, module.eps, module.momentum, module.affine,\n module.track_running_stats, num_splits=num_splits)\n mod.running_mean = module.running_mean\n mod.running_var = module.running_var\n mod.num_batches_tracked = module.num_batches_tracked\n if module.affine:\n # print(\"model is affine\")\n mod.weight.data = module.weight.data.clone().detach()\n mod.bias.data = module.bias.data.clone().detach()\n # else:\n # print(\"ooooooooooooooooooooooooooops\")\n for aux in mod.aux_bn:\n aux.running_mean = module.running_mean.clone()\n aux.running_var = module.running_var.clone()\n aux.num_batches_tracked = module.num_batches_tracked.clone()\n if module.affine:\n aux.weight.data = module.weight.data.clone().detach()\n aux.bias.data = module.bias.data.clone().detach()\n for name, child in module.named_children():\n # print(name, child)\n mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits))\n del module\n return mod\n", "\"\"\" EfficientNet, MobileNetV3, etc Blocks\n\nHacked together by / Copyright 2019, Ross Wightman\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nfrom .layers import create_conv2d, drop_path, make_divisible, create_act_layer\nfrom .layers.activations import sigmoid\n\n__all__ = [\n 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual']\n\n\nclass SqueezeExcite(nn.Module):\n \"\"\" Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family\n\n Args:\n in_chs (int): input channels to layer\n rd_ratio (float): ratio of squeeze reduction\n act_layer (nn.Module): activation layer of containing block\n gate_layer (Callable): attention gate function\n force_act_layer (nn.Module): override block's activation fn if this is set/bound\n rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs\n \"\"\"\n\n def __init__(\n self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU,\n gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None):\n super(SqueezeExcite, self).__init__()\n if rd_channels is None:\n rd_round_fn = rd_round_fn or round\n rd_channels = rd_round_fn(in_chs * rd_ratio)\n act_layer = force_act_layer or act_layer\n self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)\n self.act1 = create_act_layer(act_layer, inplace=True)\n self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)\n self.gate = create_act_layer(gate_layer)\n\n def forward(self, x):\n x_se = x.mean((2, 3), keepdim=True)\n x_se = self.conv_reduce(x_se)\n x_se = self.act1(x_se)\n x_se = self.conv_expand(x_se)\n return x * self.gate(x_se)\n\n\nclass ConvBnAct(nn.Module):\n \"\"\" Conv + Norm Layer + Activation w/ optional skip connection\n \"\"\"\n def __init__(\n self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='',\n skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.):\n super(ConvBnAct, self).__init__()\n self.has_residual = skip and stride == 1 and in_chs == out_chs\n self.drop_path_rate = drop_path_rate\n self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)\n self.bn1 = norm_layer(out_chs)\n self.act1 = act_layer(inplace=True)\n\n def feature_info(self, location):\n if location == 'expansion': # output of conv after act, same as block coutput\n info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels)\n else: # location == 'bottleneck', block output\n info = dict(module='', hook_type='', num_chs=self.conv.out_channels)\n return info\n\n def forward(self, x):\n shortcut = x\n x = self.conv(x)\n x = self.bn1(x)\n x = self.act1(x)\n if self.has_residual:\n if self.drop_path_rate > 0.:\n x = drop_path(x, self.drop_path_rate, self.training)\n x += shortcut\n return x\n\n\nclass DepthwiseSeparableConv(nn.Module):\n \"\"\" DepthwiseSeparable block\n Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion\n (factor of 1.0). This is an alternative to having a IR with an optional first pw conv.\n \"\"\"\n def __init__(\n self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='',\n noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,\n se_layer=None, drop_path_rate=0.):\n super(DepthwiseSeparableConv, self).__init__()\n self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip\n self.has_pw_act = pw_act # activation after point-wise conv\n self.drop_path_rate = drop_path_rate\n\n self.conv_dw = create_conv2d(\n in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True)\n self.bn1 = norm_layer(in_chs)\n self.act1 = act_layer(inplace=True)\n\n # Squeeze-and-excitation\n self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity()\n\n self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)\n self.bn2 = norm_layer(out_chs)\n self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()\n\n def feature_info(self, location):\n if location == 'expansion': # after SE, input to PW\n info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)\n else: # location == 'bottleneck', block output\n info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels)\n return info\n\n def forward(self, x):\n shortcut = x\n\n x = self.conv_dw(x)\n x = self.bn1(x)\n x = self.act1(x)\n\n x = self.se(x)\n\n x = self.conv_pw(x)\n x = self.bn2(x)\n x = self.act2(x)\n\n if self.has_residual:\n if self.drop_path_rate > 0.:\n x = drop_path(x, self.drop_path_rate, self.training)\n x += shortcut\n return x\n\n\nclass InvertedResidual(nn.Module):\n \"\"\" Inverted residual block w/ optional SE\n\n Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often\n referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in\n * MNasNet - https://arxiv.org/abs/1807.11626\n * EfficientNet - https://arxiv.org/abs/1905.11946\n * MobileNet-V3 - https://arxiv.org/abs/1905.02244\n \"\"\"\n\n def __init__(\n self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='',\n noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,\n norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.):\n super(InvertedResidual, self).__init__()\n conv_kwargs = conv_kwargs or {}\n\n self.conv_kwargs = conv_kwargs\n self.pad_type = pad_type\n self.dilation = dilation\n\n\n mid_chs = make_divisible(in_chs * exp_ratio)\n self.has_residual = (in_chs == out_chs and stride == 1) and not noskip\n self.drop_path_rate = drop_path_rate\n\n self.mid_chs = mid_chs\n\n # Point-wise expansion\n self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)\n self.bn1 = norm_layer(mid_chs)\n self.act1 = act_layer(inplace=True)\n\n # Depth-wise convolution\n self.conv_dw = create_conv2d(\n mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,\n padding=pad_type, depthwise=True, **conv_kwargs)\n self.bn2 = norm_layer(mid_chs)\n self.act2 = act_layer(inplace=True)\n\n # Squeeze-and-excitation\n self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()\n\n # Point-wise linear projection\n self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)\n self.bn3 = norm_layer(out_chs)\n\n def feature_info(self, location):\n if location == 'expansion': # after SE, input to PWL\n info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)\n else: # location == 'bottleneck', block output\n info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)\n return info\n\n def forward(self, x):\n shortcut = x\n\n # Point-wise expansion\n x = self.conv_pw(x)\n x = self.bn1(x)\n x = self.act1(x)\n\n # Depth-wise convolution\n x = self.conv_dw(x)\n x = self.bn2(x)\n x = self.act2(x)\n\n # Squeeze-and-excitation\n x = self.se(x)\n\n # Point-wise linear projection\n x = self.conv_pwl(x)\n x = self.bn3(x)\n\n if self.has_residual:\n if self.drop_path_rate > 0.:\n x = drop_path(x, self.drop_path_rate, self.training)\n x += shortcut\n\n return x\n\n\nclass CondConvResidual(InvertedResidual):\n \"\"\" Inverted residual block w/ CondConv routing\"\"\"\n\n def __init__(\n self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='',\n noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,\n norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.):\n\n self.num_experts = num_experts\n conv_kwargs = dict(num_experts=self.num_experts)\n\n super(CondConvResidual, self).__init__(\n in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type,\n act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,\n pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs,\n drop_path_rate=drop_path_rate)\n\n self.routing_fn = nn.Linear(in_chs, self.num_experts)\n\n def forward(self, x):\n shortcut = x\n\n # CondConv routing\n pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)\n routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))\n\n # Point-wise expansion\n x = self.conv_pw(x, routing_weights)\n x = self.bn1(x)\n x = self.act1(x)\n\n # Depth-wise convolution\n x = self.conv_dw(x, routing_weights)\n x = self.bn2(x)\n x = self.act2(x)\n\n # Squeeze-and-excitation\n x = self.se(x)\n\n # Point-wise linear projection\n x = self.conv_pwl(x, routing_weights)\n x = self.bn3(x)\n\n if self.has_residual:\n if self.drop_path_rate > 0.:\n x = drop_path(x, self.drop_path_rate, self.training)\n x += shortcut\n return x\n\n\nclass EdgeResidual(nn.Module):\n \"\"\" Residual block with expansion convolution followed by pointwise-linear w/ stride\n\n Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`\n - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html\n\n This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers\n * MobileDet - https://arxiv.org/abs/2004.14525\n * EfficientNet-X - https://arxiv.org/abs/2102.05610\n * EfficientNet-V2 - https://arxiv.org/abs/2104.00298\n \"\"\"\n\n def __init__(\n self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, pad_type='',\n force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU,\n norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.):\n super(EdgeResidual, self).__init__()\n if force_in_chs > 0:\n mid_chs = make_divisible(force_in_chs * exp_ratio)\n else:\n mid_chs = make_divisible(in_chs * exp_ratio)\n self.has_residual = (in_chs == out_chs and stride == 1) and not noskip\n self.drop_path_rate = drop_path_rate\n\n self.pad_type = pad_type\n self.dilation = dilation\n self.mid_chs = mid_chs\n self.noskip = noskip\n # Expansion convolution\n self.conv_exp = create_conv2d(\n in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type)\n self.bn1 = norm_layer(mid_chs)\n self.act1 = act_layer(inplace=True)\n\n # Squeeze-and-excitation\n self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()\n\n # Point-wise linear projection\n self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)\n self.bn2 = norm_layer(out_chs)\n\n def feature_info(self, location):\n if location == 'expansion': # after SE, before PWL\n info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)\n else: # location == 'bottleneck', block output\n info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)\n return info\n\n def forward(self, x):\n shortcut = x\n\n # Expansion convolution\n x = self.conv_exp(x)\n x = self.bn1(x)\n x = self.act1(x)\n\n # Squeeze-and-excitation\n x = self.se(x)\n\n # Point-wise linear projection\n x = self.conv_pwl(x)\n x = self.bn2(x)\n\n if self.has_residual:\n if self.drop_path_rate > 0.:\n x = drop_path(x, self.drop_path_rate, self.training)\n x += shortcut\n\n return x\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.cat" ], [ "torch.nn.Linear", "torch.nn.Identity", "torch.nn.Conv2d", "torch.nn.functional.adaptive_avg_pool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlexBlack2202/iou-tracker
[ "fe53ac5533dbc0ed20919c15ebba5abae1b38e30" ]
[ "util.py" ]
[ "# ---------------------------------------------------------\n# IOU Tracker\n# Copyright (c) 2017 TU Berlin, Communication Systems Group\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Erik Bochinski\n# ---------------------------------------------------------\n\nimport numpy as np\nimport csv\n\n\ndef load_mot(detections):\n \"\"\"\n Loads detections stored in a mot-challenge like formatted CSV or numpy array (fieldNames = ['frame', 'id', 'x', 'y',\n 'w', 'h', 'score']).\n\n Args:\n detections\n\n Returns:\n list: list containing the detections for each frame.\n \"\"\"\n\n data = []\n if type(detections) is str:\n raw = np.genfromtxt(detections, delimiter=',', dtype=np.float32)\n else:\n # assume it is an array\n assert isinstance(detections, np.ndarray), \"only numpy arrays or *.csv paths are supported as detections.\"\n raw = detections.astype(np.float32)\n\n end_frame = int(np.max(raw[:, 0]))\n for i in range(1, end_frame+1):\n idx = raw[:, 0] == i\n bbox = raw[idx, 2:6]\n bbox[:, 2:4] += bbox[:, 0:2] # x1, y1, w, h -> x1, y1, x2, y2\n scores = raw[idx, 6]\n dets = []\n for bb, s in zip(bbox, scores):\n dets.append({'bbox': (bb[0], bb[1], bb[2], bb[3]), 'score': s})\n data.append(dets)\n\n return data\n\n\ndef save_to_csv(out_path, tracks):\n \"\"\"\n Saves tracks to a CSV file.\n\n Args:\n out_path (str): path to output csv file.\n tracks (list): list of tracks to store.\n \"\"\"\n\n with open(out_path, \"w\") as ofile:\n field_names = ['frame', 'id', 'x', 'y', 'w', 'h', 'score', 'wx', 'wy', 'wz']\n\n odict = csv.DictWriter(ofile, field_names)\n id_ = 1\n for track in tracks:\n for i, bbox in enumerate(track['bboxes']):\n row = {'id': id_,\n 'frame': track['start_frame'] + i,\n 'x': bbox[0],\n 'y': bbox[1],\n 'w': bbox[2] - bbox[0],\n 'h': bbox[3] - bbox[1],\n 'score': track['max_score'],\n 'wx': -1,\n 'wy': -1,\n 'wz': -1}\n\n odict.writerow(row)\n id_ += 1\n\n\ndef iou(bbox1, bbox2):\n \"\"\"\n Calculates the intersection-over-union of two bounding boxes.\n\n Args:\n bbox1 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.\n bbox2 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.\n\n Returns:\n int: intersection-over-onion of bbox1, bbox2\n \"\"\"\n\n bbox1 = [float(x) for x in bbox1]\n bbox2 = [float(x) for x in bbox2]\n\n (x0_1, y0_1, x1_1, y1_1) = bbox1\n (x0_2, y0_2, x1_2, y1_2) = bbox2\n\n # get the overlap rectangle\n overlap_x0 = max(x0_1, x0_2)\n overlap_y0 = max(y0_1, y0_2)\n overlap_x1 = min(x1_1, x1_2)\n overlap_y1 = min(y1_1, y1_2)\n\n # check if there is an overlap\n if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:\n return 0\n\n # if yes, calculate the ratio of the overlap to each ROI size and the unified size\n size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)\n size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)\n size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)\n size_union = size_1 + size_2 - size_intersection\n\n return size_intersection / size_union\n" ]
[ [ "numpy.max", "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rckitson/machine_learning
[ "842bbf2ed095cb45617b011b8738851cab7bbfb1", "842bbf2ed095cb45617b011b8738851cab7bbfb1" ]
[ "trafficSimulation/traffic.py", "optimizers/test_optimizer.py" ]
[ "#!/opt/anaconda3/bin/python3\n\"\"\" Class definitions and a main routine to simulate traffic \"\"\"\nimport os\nimport subprocess\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Car:\n \"\"\" A class for modeling cars in traffic \"\"\"\n\n def __init__(self, aggression=1.0, ahead=None):\n \"\"\" Constructor\n\n Args:\n aggression: The driver's aggressiveness\n ahead: The car ahead\n \"\"\"\n\n self.aggression = aggression\n self.ahead = ahead\n self.position = 0\n self.velocity = 0\n\n def drive(self, time, time_step=0.001):\n \"\"\" Drive the car\n\n Args:\n time_step: The time step, seconds\n \"\"\"\n\n displacement = (self.ahead.position - self.position)\n velocity_diff = (self.ahead.velocity - self.velocity)\n # Model driving as a linear spring and a linear damper\n force = self.aggression * (displacement + 0.25 * velocity_diff)\n self.position += time_step * self.velocity\n self.velocity += time_step * force\n\n\nclass Traffic:\n \"\"\" A class to simulate traffic \"\"\"\n\n def __init__(self, length=1, average_speed=1):\n \"\"\" Constructor\n\n Args:\n length: The length of the traffic\n average_speed: The average speed of the traffic\n \"\"\"\n\n self.length = length\n self.average_speed = average_speed\n\n self.time = 0\n self.cars = []\n self.positions = -1 * np.arange(self.length) / self.length * np.pi\n for ii in range(self.length):\n if ii > 0:\n self.cars.append(Car(aggression=1e1, ahead=self.cars[ii - 1]))\n elif ii == 0:\n self.cars.append(Car())\n\n def run(self, run_time, time_step=1e-3):\n \"\"\" Run the simulation \n \n Args:\n run_time: The total number of time steps\n time_step: The time step\n \"\"\"\n\n for _ in range(run_time):\n print(\"Time {} / {}\".format(self.time, run_time))\n self.time += 1\n self.positions += time_step * self.average_speed\n for ii, car in enumerate(self.cars):\n if ii == 0:\n car.position = -abs(self.positions[1]) / 4. * np.sin(0.5 * 2 * np.pi * self.time / run_time)\n else:\n car.drive(time=self.time * time_step, time_step=time_step)\n self.plot()\n\n def plot(self):\n \"\"\" Plot the cars on a circle \"\"\"\n R = self.length\n\n plt.figure()\n for ii, car in enumerate(self.cars):\n theta = self.positions[ii] + car.position\n x = R * np.cos(theta)\n y = R * np.sin(theta)\n if ii == 0:\n plt.scatter(x, y, marker='x')\n else:\n plt.scatter(x, y)\n\n plt.axis('scaled')\n lim = (-1.2 * R, 1.2 * R)\n plt.ylim(lim)\n plt.xlim(lim)\n plt.savefig('traffic_{:d}.png'.format(self.time))\n plt.close()\n\n\nif __name__ == \"__main__\":\n for ff in glob.glob('*.png'):\n os.remove(ff)\n Traffic(length=10, average_speed=2).run(500, time_step=5e-3)\n\n ffmpeg = '/usr/local/bin/ffmpeg'\n movie_filename = 'traffic.mp4'\n cmd = '-f image2 -i traffic_%d.png {}'.format(movie_filename).split(' ')\n\n if os.path.exists(movie_filename):\n os.remove(movie_filename)\n subprocess.call([ffmpeg] + cmd)\n subprocess.call(['open', movie_filename])\n", "#!/opt/anaconda3/bin/python3\n\"\"\" Tests for the Optimizer class \"\"\"\nimport os\nimport glob\nimport shutil\nimport subprocess\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport optimizer\n\n\ndef main():\n \"\"\" The main routine \"\"\"\n np.random.seed(0)\n for ff in glob.glob('*.dat'):\n os.remove(ff)\n\n tolerance = 1e-6\n learning_rate = 1e-4\n function = circle\n # See the first few methods for available test functions\n # x0 can be increased to higher dimensions\n x0 = np.random.random(3)\n x0 = x0 / np.linalg.norm(x0)\n\n test_all(x0, function, learning_rate, tolerance)\n plot_history()\n subprocess.call(['open', 'convergence_history.png'])\n\n\n\n\ndef test_all(x0, function, learning_rate, tolerance):\n \"\"\" Test all the algorithms\n\n Args:\n x0: The initial point\n function: The test function\n learning_rate: The learning rate\n tolerance: The tolerance on the gradient\n\n \"\"\"\n for algorithm in ['sgd', 'momentum', 'adam', 'rmsprop', 'adagrad', 'adadelta']:\n opt = optimizer.Optimizer(x0=x0, function=function, method=algorithm,\n learning_rate=learning_rate, error_threshold=tolerance)\n opt.solve()\n shutil.move('history.dat', algorithm + '.dat')\n\n\ndef plot_history():\n files = glob.glob('*.dat')\n\n plt.figure()\n for ff in files:\n if ff == 'history.dat':\n continue\n\n dat = np.loadtxt(ff)\n if len(dat) > 1000:\n skip = max(1, len(dat) // 10)\n else:\n skip = 1\n plt.plot(dat[::skip, 0], dat[::skip, 1], label=os.path.splitext(ff)[0])\n\n plt.legend()\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel('Iterations')\n plt.ylabel('Residual')\n plt.savefig('convergence_history.png')\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "numpy.arange", "numpy.cos", "numpy.sin", "matplotlib.pyplot.xlim", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.legend", "numpy.random.random", "numpy.random.seed", "matplotlib.pyplot.yscale", "numpy.linalg.norm", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xscale", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YosefLab/scVI
[ "73952af10292852b8e5b7319409678cc66d85a47", "73952af10292852b8e5b7319409678cc66d85a47", "73952af10292852b8e5b7319409678cc66d85a47" ]
[ "scvi/dataloaders/_semi_dataloader.py", "scvi/module/base/_base_module.py", "scvi/model/_autozi.py" ]
[ "from typing import List, Optional, Union\n\nimport numpy as np\n\nfrom scvi import REGISTRY_KEYS\nfrom scvi.data import AnnDataManager\nfrom scvi.data._utils import get_anndata_attribute\n\nfrom ._concat_dataloader import ConcatDataLoader\n\n\nclass SemiSupervisedDataLoader(ConcatDataLoader):\n \"\"\"\n DataLoader that supports semisupervised training.\n\n Parameters\n ----------\n adata_manager\n :class:`~scvi.data.AnnDataManager` object that has been created via ``setup_anndata``.\n n_samples_per_label\n Number of subsamples for each label class to sample per epoch. By default, there\n is no label subsampling.\n indices\n The indices of the observations in the adata to load\n shuffle\n Whether the data should be shuffled\n batch_size\n minibatch size to load each iteration\n data_and_attributes\n Dictionary with keys representing keys in data registry (`adata_manager.data_registry`)\n and value equal to desired numpy loading type (later made into torch tensor).\n If `None`, defaults to all registered data.\n data_loader_kwargs\n Keyword arguments for :class:`~torch.utils.data.DataLoader`\n \"\"\"\n\n def __init__(\n self,\n adata_manager: AnnDataManager,\n n_samples_per_label: Optional[int] = None,\n indices: Optional[List[int]] = None,\n shuffle: bool = False,\n batch_size: int = 128,\n data_and_attributes: Optional[dict] = None,\n drop_last: Union[bool, int] = False,\n **data_loader_kwargs,\n ):\n adata = adata_manager.adata\n if indices is None:\n indices = np.arange(adata.n_obs)\n\n self.indices = np.asarray(indices)\n\n if len(self.indices) == 0:\n return None\n\n self.n_samples_per_label = n_samples_per_label\n\n labels_state_registry = adata_manager.get_state_registry(\n REGISTRY_KEYS.LABELS_KEY\n )\n labels = get_anndata_attribute(\n adata_manager.adata,\n adata_manager.data_registry.labels.attr_name,\n labels_state_registry.original_key,\n ).ravel()\n\n # save a nested list of the indices per labeled category\n self.labeled_locs = []\n for label in np.unique(labels):\n if label != labels_state_registry.unlabeled_category:\n label_loc_idx = np.where(labels[indices] == label)[0]\n label_loc = self.indices[label_loc_idx]\n self.labeled_locs.append(label_loc)\n labelled_idx = self.subsample_labels()\n\n super().__init__(\n adata_manager=adata_manager,\n indices_list=[self.indices, labelled_idx],\n shuffle=shuffle,\n batch_size=batch_size,\n data_and_attributes=data_and_attributes,\n drop_last=drop_last,\n **data_loader_kwargs,\n )\n\n def resample_labels(self):\n \"\"\"Resamples the labeled data.\"\"\"\n labelled_idx = self.subsample_labels()\n # self.dataloaders[0] iterates over full_indices\n # self.dataloaders[1] iterates over the labelled_indices\n # change the indices of the labelled set\n self.dataloaders[1].indices = labelled_idx\n\n def subsample_labels(self):\n \"\"\"Subsamples each label class by taking up to n_samples_per_label samples per class.\"\"\"\n if self.n_samples_per_label is None:\n return np.concatenate(self.labeled_locs)\n\n sample_idx = []\n for loc in self.labeled_locs:\n if len(loc) < self.n_samples_per_label:\n sample_idx.append(loc)\n else:\n label_subset = np.random.choice(\n loc, self.n_samples_per_label, replace=False\n )\n sample_idx.append(label_subset)\n sample_idx = np.concatenate(sample_idx)\n return sample_idx\n", "from abc import abstractmethod\nfrom typing import Callable, Dict, Iterable, Optional, Tuple, Union\n\nimport jax.numpy as jnp\nimport pyro\nimport torch\nimport torch.nn as nn\nfrom flax import linen\nfrom numpyro.distributions import Distribution\nfrom pyro.infer.predictive import Predictive\n\nfrom scvi._types import LossRecord\n\nfrom ._decorators import auto_move_data\nfrom ._pyro import AutoMoveDataPredictive\n\n\nclass LossRecorder:\n \"\"\"\n Loss signature for models.\n\n This class provides an organized way to record the model loss, as well as\n the components of the ELBO. This may also be used in MLE, MAP, EM methods.\n The loss is used for backpropagation during inference. The other parameters\n are used for logging/early stopping during inference.\n\n Parameters\n ----------\n loss\n Tensor with loss for minibatch. Should be one dimensional with one value.\n Note that loss should be a :class:`~torch.Tensor` and not the result of ``.item()``.\n reconstruction_loss\n Reconstruction loss for each observation in the minibatch.\n kl_local\n KL divergence associated with each observation in the minibatch.\n kl_global\n Global kl divergence term. Should be one dimensional with one value.\n **kwargs\n Additional metrics can be passed as keyword arguments and will\n be available as attributes of the object.\n \"\"\"\n\n def __init__(\n self,\n loss: LossRecord,\n reconstruction_loss: Optional[LossRecord] = None,\n kl_local: Optional[LossRecord] = None,\n kl_global: Optional[LossRecord] = None,\n **kwargs,\n ):\n\n default = (\n torch.tensor(0.0) if isinstance(loss, torch.Tensor) else jnp.array(0.0)\n )\n if reconstruction_loss is None:\n reconstruction_loss = default\n if kl_local is None:\n kl_local = default\n if kl_global is None:\n kl_global = default\n\n self._loss = loss if isinstance(loss, dict) else dict(loss=loss)\n self._reconstruction_loss = (\n reconstruction_loss\n if isinstance(reconstruction_loss, dict)\n else dict(reconstruction_loss=reconstruction_loss)\n )\n self._kl_local = (\n kl_local if isinstance(kl_local, dict) else dict(kl_local=kl_local)\n )\n self._kl_global = (\n kl_global if isinstance(kl_global, dict) else dict(kl_global=kl_global)\n )\n self.extra_metric_attrs = []\n for key, value in kwargs.items():\n setattr(self, key, value)\n self.extra_metric_attrs.append(key)\n\n @staticmethod\n def _get_dict_sum(dictionary):\n total = 0.0\n for value in dictionary.values():\n total += value\n return total\n\n @property\n def loss(self) -> Union[torch.Tensor, jnp.ndarray]:\n return self._get_dict_sum(self._loss)\n\n @property\n def reconstruction_loss(self) -> Union[torch.Tensor, jnp.ndarray]:\n return self._get_dict_sum(self._reconstruction_loss)\n\n @property\n def kl_local(self) -> Union[torch.Tensor, jnp.ndarray]:\n return self._get_dict_sum(self._kl_local)\n\n @property\n def kl_global(self) -> Union[torch.Tensor, jnp.ndarray]:\n return self._get_dict_sum(self._kl_global)\n\n\nclass BaseModuleClass(nn.Module):\n \"\"\"Abstract class for scvi-tools modules.\"\"\"\n\n def __init__(\n self,\n ):\n super().__init__()\n\n @property\n def device(self):\n device = list(set(p.device for p in self.parameters()))\n if len(device) > 1:\n raise RuntimeError(\"Module tensors on multiple devices.\")\n return device[0]\n\n def on_load(self, model):\n \"\"\"\n Callback function run in :method:`~scvi.model.base.BaseModelClass.load` prior to loading module state dict.\n \"\"\"\n pass\n\n @auto_move_data\n def forward(\n self,\n tensors,\n get_inference_input_kwargs: Optional[dict] = None,\n get_generative_input_kwargs: Optional[dict] = None,\n inference_kwargs: Optional[dict] = None,\n generative_kwargs: Optional[dict] = None,\n loss_kwargs: Optional[dict] = None,\n compute_loss=True,\n ) -> Union[\n Tuple[torch.Tensor, torch.Tensor],\n Tuple[torch.Tensor, torch.Tensor, LossRecorder],\n ]:\n \"\"\"\n Forward pass through the network.\n\n Parameters\n ----------\n tensors\n tensors to pass through\n get_inference_input_kwargs\n Keyword args for ``_get_inference_input()``\n get_generative_input_kwargs\n Keyword args for ``_get_generative_input()``\n inference_kwargs\n Keyword args for ``inference()``\n generative_kwargs\n Keyword args for ``generative()``\n loss_kwargs\n Keyword args for ``loss()``\n compute_loss\n Whether to compute loss on forward pass. This adds\n another return value.\n \"\"\"\n return _generic_forward(\n self,\n tensors,\n inference_kwargs,\n generative_kwargs,\n loss_kwargs,\n get_inference_input_kwargs,\n get_generative_input_kwargs,\n compute_loss,\n )\n\n @abstractmethod\n def _get_inference_input(self, tensors: Dict[str, torch.Tensor], **kwargs):\n \"\"\"Parse tensors dictionary for inference related values.\"\"\"\n\n @abstractmethod\n def _get_generative_input(\n self,\n tensors: Dict[str, torch.Tensor],\n inference_outputs: Dict[str, torch.Tensor],\n **kwargs,\n ):\n \"\"\"Parse tensors dictionary for generative related values.\"\"\"\n\n @abstractmethod\n def inference(\n self,\n *args,\n **kwargs,\n ) -> Dict[str, Union[torch.Tensor, torch.distributions.Distribution]]:\n \"\"\"\n Run the inference (recognition) model.\n\n In the case of variational inference, this function will perform steps related to\n computing variational distribution parameters. In a VAE, this will involve running\n data through encoder networks.\n\n This function should return a dictionary with str keys and :class:`~torch.Tensor` values.\n \"\"\"\n pass\n\n @abstractmethod\n def generative(\n self, *args, **kwargs\n ) -> Dict[str, Union[torch.Tensor, torch.distributions.Distribution]]:\n \"\"\"\n Run the generative model.\n\n This function should return the parameters associated with the likelihood of the data.\n This is typically written as :math:`p(x|z)`.\n\n This function should return a dictionary with str keys and :class:`~torch.Tensor` values.\n \"\"\"\n pass\n\n @abstractmethod\n def loss(self, *args, **kwargs) -> LossRecorder:\n \"\"\"\n Compute the loss for a minibatch of data.\n\n This function uses the outputs of the inference and generative functions to compute\n a loss. This many optionally include other penalty terms, which should be computed here.\n\n This function should return an object of type :class:`~scvi.module.base.LossRecorder`.\n \"\"\"\n pass\n\n @abstractmethod\n def sample(self, *args, **kwargs):\n \"\"\"Generate samples from the learned model.\"\"\"\n pass\n\n\ndef _get_dict_if_none(param):\n param = {} if not isinstance(param, dict) else param\n\n return param\n\n\nclass PyroBaseModuleClass(nn.Module):\n \"\"\"\n Base module class for Pyro models.\n\n In Pyro, ``model`` and ``guide`` should have the same signature. Out of convenience,\n the forward function of this class passes through to the forward of the ``model``.\n\n There are two ways this class can be equipped with a model and a guide. First,\n ``model`` and ``guide`` can be class attributes that are :class:`~pyro.nn.PyroModule`\n instances. The implemented ``model`` and ``guide`` class method can then return the (private) attributes.\n Second, ``model`` and ``guide`` methods can be written directly (see Pyro scANVI example)\n https://pyro.ai/examples/scanvi.html.\n\n The ``model`` and ``guide`` may also be equipped with ``n_obs`` attributes, which can be set\n to ``None`` (e.g., ``self.n_obs = None``). This attribute may be helpful in designating the\n size of observation-specific Pyro plates. The value will be updated automatically by\n :class:`~scvi.train.PyroTrainingPlan`, provided that it is given the number of training examples\n upon initialization.\n\n Parameters\n ----------\n on_load_kwargs\n Dictionary containing keyword args to use in ``self.on_load``.\n \"\"\"\n\n def __init__(self, on_load_kwargs: Optional[dict] = None):\n super().__init__()\n self.on_load_kwargs = on_load_kwargs or {}\n\n @staticmethod\n @abstractmethod\n def _get_fn_args_from_batch(\n tensor_dict: Dict[str, torch.Tensor]\n ) -> Union[Iterable, dict]:\n \"\"\"\n Parse the minibatched data to get the correct inputs for ``model`` and ``guide``.\n\n In Pyro, ``model`` and ``guide`` must have the same signature. This is a helper method\n that gets the args and kwargs for these two methods. This helper method aids ``forward`` and\n ``guide`` in having transparent signatures, as well as allows use of our generic\n :class:`~scvi.dataloaders.AnnDataLoader`.\n\n Returns\n -------\n args and kwargs for the functions, args should be an Iterable and kwargs a dictionary.\n \"\"\"\n\n @property\n @abstractmethod\n def model(self):\n pass\n\n @property\n @abstractmethod\n def guide(self):\n pass\n\n @property\n def list_obs_plate_vars(self):\n \"\"\"\n Model annotation for minibatch training with pyro plate.\n\n A dictionary with:\n 1. \"name\" - the name of observation/minibatch plate;\n 2. \"in\" - indexes of model args to provide to encoder network when using amortised inference;\n 3. \"sites\" - dictionary with\n keys - names of variables that belong to the observation plate (used to recognise\n and merge posterior samples for minibatch variables)\n values - the dimensions in non-plate axis of each variable (used to construct output\n layer of encoder network when using amortised inference)\n \"\"\"\n return {\"name\": \"\", \"in\": [], \"sites\": {}}\n\n def on_load(self, model):\n \"\"\"\n Callback function run in :method:`~scvi.model.base.BaseModelClass.load` prior to loading module state dict.\n\n For some Pyro modules with AutoGuides, run one training step prior to loading state dict.\n \"\"\"\n old_history = model.history_.copy()\n model.train(max_steps=1, **self.on_load_kwargs)\n model.history_ = old_history\n pyro.clear_param_store()\n\n def create_predictive(\n self,\n model: Optional[Callable] = None,\n posterior_samples: Optional[dict] = None,\n guide: Optional[Callable] = None,\n num_samples: Optional[int] = None,\n return_sites: Tuple[str] = (),\n parallel: bool = False,\n ) -> Predictive:\n \"\"\"\n Creates a :class:`~pyro.infer.Predictive` object.\n\n Parameters\n ----------\n model\n Python callable containing Pyro primitives. Defaults to ``self.model``.\n posterior_samples\n Dictionary of samples from the posterior\n guide\n Optional guide to get posterior samples of sites not present\n in ``posterior_samples``. Defaults to ``self.guide``\n num_samples\n Number of samples to draw from the predictive distribution.\n This argument has no effect if ``posterior_samples`` is non-empty, in which case,\n the leading dimension size of samples in ``posterior_samples`` is used.\n return_sites\n Sites to return; by default only sample sites not present\n in ``posterior_samples`` are returned.\n parallel\n predict in parallel by wrapping the existing model\n in an outermost ``plate`` messenger. Note that this requires that the model has\n all batch dims correctly annotated via :class:`~pyro.plate`.\n \"\"\"\n if model is None:\n model = self.model\n if guide is None:\n guide = self.guide\n predictive = AutoMoveDataPredictive(\n model=model,\n posterior_samples=posterior_samples,\n guide=guide,\n num_samples=num_samples,\n return_sites=return_sites,\n parallel=parallel,\n )\n # necessary to comply with auto_move_data decorator\n predictive.eval()\n\n return predictive\n\n def forward(self, *args, **kwargs):\n \"\"\"Passthrough to Pyro model.\"\"\"\n return self.model(*args, **kwargs)\n\n\nclass JaxBaseModuleClass(linen.Module):\n \"\"\"Abstract class for Jax-based scvi-tools modules.\"\"\"\n\n def on_load(self, model):\n \"\"\"\n Callback function run in :method:`~scvi.model.base.BaseModelClass.load` prior to loading module state dict.\n \"\"\"\n pass\n\n @abstractmethod\n def setup(self):\n \"\"\"\n Flax setup method.\n\n With scvi-tools we prefer to use the setup parameterization of\n flax.linen Modules. This lends the interface to be more like\n PyTorch. More about this can be found here:\n\n https://flax.readthedocs.io/en/latest/design_notes/setup_or_nncompact.html\n \"\"\"\n pass\n\n def __call__(\n self,\n tensors: Dict[str, jnp.ndarray],\n get_inference_input_kwargs: Optional[dict] = None,\n get_generative_input_kwargs: Optional[dict] = None,\n inference_kwargs: Optional[dict] = None,\n generative_kwargs: Optional[dict] = None,\n loss_kwargs: Optional[dict] = None,\n compute_loss=True,\n ) -> Union[\n Tuple[jnp.ndarray, jnp.ndarray],\n Tuple[jnp.ndarray, jnp.ndarray, LossRecorder],\n ]:\n \"\"\"\n Forward pass through the network.\n\n Parameters\n ----------\n tensors\n tensors to pass through\n get_inference_input_kwargs\n Keyword args for ``_get_inference_input()``\n get_generative_input_kwargs\n Keyword args for ``_get_generative_input()``\n inference_kwargs\n Keyword args for ``inference()``\n generative_kwargs\n Keyword args for ``generative()``\n loss_kwargs\n Keyword args for ``loss()``\n compute_loss\n Whether to compute loss on forward pass. This adds\n another return value.\n \"\"\"\n return _generic_forward(\n self,\n tensors,\n inference_kwargs,\n generative_kwargs,\n loss_kwargs,\n get_inference_input_kwargs,\n get_generative_input_kwargs,\n compute_loss,\n )\n\n @abstractmethod\n def _get_inference_input(self, tensors: Dict[str, jnp.ndarray], **kwargs):\n \"\"\"Parse tensors dictionary for inference related values.\"\"\"\n\n @abstractmethod\n def _get_generative_input(\n self,\n tensors: Dict[str, jnp.ndarray],\n inference_outputs: Dict[str, jnp.ndarray],\n **kwargs,\n ):\n \"\"\"Parse tensors dictionary for generative related values.\"\"\"\n\n @abstractmethod\n def inference(\n self,\n *args,\n **kwargs,\n ) -> Dict[str, Union[jnp.ndarray, Distribution]]:\n \"\"\"\n Run the inference (recognition) model.\n\n In the case of variational inference, this function will perform steps related to\n computing variational distribution parameters. In a VAE, this will involve running\n data through encoder networks.\n\n This function should return a dictionary with str keys and :class:`~jnp.ndarray` values.\n \"\"\"\n pass\n\n @abstractmethod\n def generative(\n self, *args, **kwargs\n ) -> Dict[str, Union[jnp.ndarray, Distribution]]:\n \"\"\"\n Run the generative model.\n\n This function should return the parameters associated with the likelihood of the data.\n This is typically written as :math:`p(x|z)`.\n\n This function should return a dictionary with str keys and :class:`~jnp.ndarray` values.\n \"\"\"\n pass\n\n @abstractmethod\n def loss(self, *args, **kwargs) -> LossRecorder:\n \"\"\"\n Compute the loss for a minibatch of data.\n\n This function uses the outputs of the inference and generative functions to compute\n a loss. This many optionally include other penalty terms, which should be computed here.\n\n This function should return an object of type :class:`~scvi.module.base.LossRecorder`.\n \"\"\"\n pass\n\n\ndef _generic_forward(\n module,\n tensors,\n inference_kwargs,\n generative_kwargs,\n loss_kwargs,\n get_inference_input_kwargs,\n get_generative_input_kwargs,\n compute_loss,\n):\n \"\"\"Core of the forward call shared by PyTorch- and Jax-based modules.\"\"\"\n inference_kwargs = _get_dict_if_none(inference_kwargs)\n generative_kwargs = _get_dict_if_none(generative_kwargs)\n loss_kwargs = _get_dict_if_none(loss_kwargs)\n get_inference_input_kwargs = _get_dict_if_none(get_inference_input_kwargs)\n get_generative_input_kwargs = _get_dict_if_none(get_generative_input_kwargs)\n\n inference_inputs = module._get_inference_input(\n tensors, **get_inference_input_kwargs\n )\n inference_outputs = module.inference(**inference_inputs, **inference_kwargs)\n generative_inputs = module._get_generative_input(\n tensors, inference_outputs, **get_generative_input_kwargs\n )\n generative_outputs = module.generative(**generative_inputs, **generative_kwargs)\n if compute_loss:\n losses = module.loss(\n tensors, inference_outputs, generative_outputs, **loss_kwargs\n )\n return inference_outputs, generative_outputs, losses\n else:\n return inference_outputs, generative_outputs\n", "import logging\nfrom typing import Dict, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\nfrom anndata import AnnData\nfrom torch import logsumexp\nfrom torch.distributions import Beta\n\nfrom scvi import REGISTRY_KEYS\nfrom scvi._compat import Literal\nfrom scvi.data import AnnDataManager\nfrom scvi.data.fields import CategoricalObsField, LayerField\nfrom scvi.model._utils import _init_library_size\nfrom scvi.model.base import UnsupervisedTrainingMixin\nfrom scvi.module import AutoZIVAE\nfrom scvi.utils import setup_anndata_dsp\n\nfrom .base import BaseModelClass, VAEMixin\n\nlogger = logging.getLogger(__name__)\n\n# register buffer\n\n\nclass AUTOZI(VAEMixin, UnsupervisedTrainingMixin, BaseModelClass):\n \"\"\"\n Automatic identification of ZI genes [Clivio19]_.\n\n Parameters\n ----------\n adata\n AnnData object that has been registered via :meth:`~scvi.model.AUTOZI.setup_anndata`.\n n_hidden\n Number of nodes per hidden layer\n n_latent\n Dimensionality of the latent space\n n_layers\n Number of hidden layers used for encoder NN\n dropout_rate\n Dropout rate for neural networks\n dispersion\n One of the following\n\n * ``'gene'`` - dispersion parameter of NB is constant per gene across cells\n * ``'gene-batch'`` - dispersion can differ between different batches\n * ``'gene-label'`` - dispersion can differ between different labels\n * ``'gene-cell'`` - dispersion can differ for every gene in every cell\n latent_distribution\n One of\n\n * ``'normal'`` - Normal distribution\n * ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)\n alpha_prior\n Float denoting the alpha parameter of the prior Beta distribution of\n the zero-inflation Bernoulli parameter. Should be between 0 and 1, not included.\n When set to ``None'', will be set to 1 - beta_prior if beta_prior is not ``None'',\n otherwise the prior Beta distribution will be learned on an Empirical Bayes fashion.\n beta_prior\n Float denoting the beta parameter of the prior Beta distribution of\n the zero-inflation Bernoulli parameter. Should be between 0 and 1, not included.\n When set to ``None'', will be set to 1 - alpha_prior if alpha_prior is not ``None'',\n otherwise the prior Beta distribution will be learned on an Empirical Bayes fashion.\n minimal_dropout\n Float denoting the lower bound of the cell-gene ZI rate in the ZINB component.\n Must be non-negative. Can be set to 0 but not recommended as this may make\n the mixture problem ill-defined.\n zero_inflation: One of the following\n\n * ``'gene'`` - zero-inflation Bernoulli parameter of AutoZI is constant per gene across cells\n * ``'gene-batch'`` - zero-inflation Bernoulli parameter can differ between different batches\n * ``'gene-label'`` - zero-inflation Bernoulli parameter can differ between different labels\n * ``'gene-cell'`` - zero-inflation Bernoulli parameter can differ for every gene in every cell\n use_observed_lib_size\n Use observed library size for RNA as scaling factor in mean of conditional distribution\n **model_kwargs\n Keyword args for :class:`~scvi.module.AutoZIVAE`\n\n Examples\n --------\n\n >>> adata = anndata.read_h5ad(path_to_anndata)\n >>> scvi.model.AUTOZI.setup_anndata(adata, batch_key=\"batch\")\n >>> vae = scvi.model.AUTOZI(adata)\n >>> vae.train(n_epochs=400)\n\n Notes\n -----\n See further usage examples in the following tutorials:\n\n 1. :doc:`/tutorials/notebooks/AutoZI_tutorial`\n \"\"\"\n\n def __init__(\n self,\n adata: AnnData,\n n_hidden: int = 128,\n n_latent: int = 10,\n n_layers: int = 1,\n dropout_rate: float = 0.1,\n dispersion: Literal[\"gene\", \"gene-batch\", \"gene-label\", \"gene-cell\"] = \"gene\",\n latent_distribution: Literal[\"normal\", \"ln\"] = \"normal\",\n alpha_prior: Optional[float] = 0.5,\n beta_prior: Optional[float] = 0.5,\n minimal_dropout: float = 0.01,\n zero_inflation: str = \"gene\",\n use_observed_lib_size: bool = True,\n **model_kwargs,\n ):\n super(AUTOZI, self).__init__(adata)\n\n self.use_observed_lib_size = use_observed_lib_size\n n_batch = self.summary_stats.n_batch\n library_log_means, library_log_vars = _init_library_size(\n self.adata_manager, n_batch\n )\n\n self.module = AutoZIVAE(\n n_input=self.summary_stats.n_vars,\n n_batch=n_batch,\n n_labels=self.summary_stats.n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers=n_layers,\n dropout_rate=dropout_rate,\n dispersion=dispersion,\n latent_distribution=latent_distribution,\n zero_inflation=zero_inflation,\n alpha_prior=alpha_prior,\n beta_prior=beta_prior,\n minimal_dropout=minimal_dropout,\n use_observed_lib_size=use_observed_lib_size,\n library_log_means=library_log_means,\n library_log_vars=library_log_vars,\n **model_kwargs,\n )\n self.model_summary_string = (\n \"AutoZI Model with the following params: \\nn_hidden: {}, n_latent: {}, \"\n \"n_layers: {}, dropout_rate: {}, dispersion: {}, latent_distribution: \"\n \"{}, alpha_prior: {}, beta_prior: {}, minimal_dropout: {}, zero_inflation:{}\"\n ).format(\n n_hidden,\n n_latent,\n n_layers,\n dropout_rate,\n dispersion,\n latent_distribution,\n alpha_prior,\n beta_prior,\n minimal_dropout,\n zero_inflation,\n )\n self.init_params_ = self._get_init_params(locals())\n\n def get_alphas_betas(\n self, as_numpy: bool = True\n ) -> Dict[str, Union[torch.Tensor, np.ndarray]]:\n \"\"\"Return parameters of Bernoulli Beta distributions in a dictionary.\"\"\"\n return self.module.get_alphas_betas(as_numpy=as_numpy)\n\n @torch.no_grad()\n def get_marginal_ll(\n self,\n adata: Optional[AnnData] = None,\n indices: Optional[Sequence[int]] = None,\n n_mc_samples: int = 1000,\n batch_size: Optional[int] = None,\n ) -> float:\n \"\"\"\n Return the marginal LL for the data.\n\n The computation here is a biased estimator of the marginal log likelihood of the data.\n Note, this is not the negative log likelihood, higher is better.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n n_mc_samples\n Number of Monte Carlo samples to use for marginal LL estimation.\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n \"\"\"\n adata = self._validate_anndata(adata)\n if indices is None:\n indices = np.arange(adata.n_obs)\n\n scdl = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size\n )\n\n log_lkl = 0\n to_sum = torch.zeros((n_mc_samples,)).to(self.device)\n alphas_betas = self.module.get_alphas_betas(as_numpy=False)\n alpha_prior = alphas_betas[\"alpha_prior\"]\n alpha_posterior = alphas_betas[\"alpha_posterior\"]\n beta_prior = alphas_betas[\"beta_prior\"]\n beta_posterior = alphas_betas[\"beta_posterior\"]\n\n for i in range(n_mc_samples):\n bernoulli_params = self.module.sample_from_beta_distribution(\n alpha_posterior, beta_posterior\n )\n for tensors in scdl:\n sample_batch = tensors[REGISTRY_KEYS.X_KEY].to(self.device)\n batch_index = tensors[REGISTRY_KEYS.BATCH_KEY].to(self.device)\n labels = tensors[REGISTRY_KEYS.LABELS_KEY].to(self.device)\n\n # Distribution parameters and sampled variables\n inf_outputs, gen_outputs, _ = self.module.forward(tensors)\n\n px = gen_outputs[\"px\"]\n px_r = px.theta\n px_rate = px.mu\n px_dropout = px.zi_logits\n qz = inf_outputs[\"qz\"]\n z = inf_outputs[\"z\"]\n\n # Reconstruction Loss\n bernoulli_params_batch = self.module.reshape_bernoulli(\n bernoulli_params,\n batch_index,\n labels,\n )\n reconst_loss = self.module.get_reconstruction_loss(\n sample_batch,\n px_rate,\n px_r,\n px_dropout,\n bernoulli_params_batch,\n )\n\n # Log-probabilities\n p_z = gen_outputs[\"pz\"].log_prob(z).sum(dim=-1)\n p_x_zld = -reconst_loss\n q_z_x = qz.log_prob(z).sum(dim=-1)\n log_prob_sum = p_z + p_x_zld - q_z_x\n\n if not self.use_observed_lib_size:\n ql = inf_outputs[\"ql\"]\n library = inf_outputs[\"library\"]\n (\n local_library_log_means,\n local_library_log_vars,\n ) = self.module._compute_local_library_params(batch_index)\n\n p_l = gen_outputs[\"pl\"].log_prob(library).sum(dim=-1)\n\n q_l_x = ql.log_prob(library).sum(dim=-1)\n\n log_prob_sum += p_l - q_l_x\n\n batch_log_lkl = torch.sum(log_prob_sum, dim=0)\n to_sum[i] += batch_log_lkl\n\n p_d = Beta(alpha_prior, beta_prior).log_prob(bernoulli_params).sum()\n q_d = Beta(alpha_posterior, beta_posterior).log_prob(bernoulli_params).sum()\n\n to_sum[i] += p_d - q_d\n\n log_lkl = logsumexp(to_sum, dim=-1).item() - np.log(n_mc_samples)\n n_samples = len(scdl.indices)\n return log_lkl / n_samples\n\n @classmethod\n @setup_anndata_dsp.dedent\n def setup_anndata(\n cls,\n adata: AnnData,\n batch_key: Optional[str] = None,\n labels_key: Optional[str] = None,\n layer: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n %(summary)s.\n\n Parameters\n ----------\n %(param_batch_key)s\n %(param_labels_key)s\n %(param_layer)s\n \"\"\"\n setup_method_args = cls._get_setup_method_args(**locals())\n anndata_fields = [\n LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),\n CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key),\n CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, labels_key),\n ]\n adata_manager = AnnDataManager(\n fields=anndata_fields, setup_method_args=setup_method_args\n )\n adata_manager.register_fields(adata, **kwargs)\n cls.register_manager(adata_manager)\n" ]
[ [ "numpy.random.choice", "numpy.asarray", "numpy.arange", "numpy.unique", "numpy.concatenate", "numpy.where" ], [ "torch.tensor" ], [ "numpy.log", "torch.zeros", "torch.distributions.Beta", "numpy.arange", "torch.sum", "torch.no_grad", "torch.logsumexp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joelhaynie/cs760_final_project
[ "85f19be8fdc046dff3e1325e1ef85aca702d4000" ]
[ "src/vggish_inference_demo.py" ]
[ "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"A simple demonstration of running VGGish in inference mode.\n\nThis is intended as a toy example that demonstrates how the various building\nblocks (feature extraction, model definition and loading, postprocessing) work\ntogether in an inference context.\n\nA WAV file (assumed to contain signed 16-bit PCM samples) is read in, converted\ninto log mel spectrogram examples, fed into VGGish, the raw embedding output is\nwhitened and quantized, and the postprocessed embeddings are optionally written\nin a SequenceExample to a TFRecord file (using the same format as the embedding\nfeatures released in AudioSet).\n\nUsage:\n # Run a WAV file through the model and print the embeddings. The model\n # checkpoint is loaded from vggish_model.ckpt and the PCA parameters are\n # loaded from vggish_pca_params.npz in the current directory.\n $ python vggish_inference_demo.py --wav_file /path/to/a/wav/file\n\n # Run a WAV file through the model and also write the embeddings to\n # a TFRecord file. The model checkpoint and PCA parameters are explicitly\n # passed in as well.\n $ python vggish_inference_demo.py --wav_file /path/to/a/wav/file \\\n --tfrecord_file /path/to/tfrecord/file \\\n --checkpoint /path/to/model/checkpoint \\\n --pca_params /path/to/pca/params\n\n # Run a built-in input (a sine wav) through the model and print the\n # embeddings. Associated model files are read from the current directory.\n $ python vggish_inference_demo.py\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy.io import wavfile\nimport six\nimport tensorflow as tf\n\nimport vggish_input\nimport vggish_params\nimport vggish_postprocess\nimport vggish_slim\n\nflags = tf.app.flags\n\nflags.DEFINE_string(\n 'wav_file', None,\n 'Path to a wav file. Should contain signed 16-bit PCM samples. '\n 'If none is provided, a synthetic sound is used.')\n\nflags.DEFINE_string(\n 'checkpoint', 'vggish_model.ckpt',\n 'Path to the VGGish checkpoint file.')\n\nflags.DEFINE_string(\n 'pca_params', 'vggish_pca_params.npz',\n 'Path to the VGGish PCA parameters file.')\n\nflags.DEFINE_string(\n 'tfrecord_file', None,\n 'Path to a TFRecord file where embeddings will be written.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n # In this simple example, we run the examples from a single audio file through\n # the model. If none is provided, we generate a synthetic input.\n if FLAGS.wav_file:\n wav_file = FLAGS.wav_file\n else:\n # Write a WAV of a sine wav into an in-memory file object.\n num_secs = 5\n freq = 1000\n sr = 44100\n t = np.linspace(0, num_secs, int(num_secs * sr))\n x = np.sin(2 * np.pi * freq * t)\n # Convert to signed 16-bit samples.\n samples = np.clip(x * 32768, -32768, 32767).astype(np.int16)\n wav_file = six.BytesIO()\n wavfile.write(wav_file, sr, samples)\n wav_file.seek(0)\n examples_batch = vggish_input.wavfile_to_examples(wav_file)\n print(examples_batch)\n\n # Prepare a postprocessor to munge the model embeddings.\n pproc = vggish_postprocess.Postprocessor(FLAGS.pca_params)\n\n # If needed, prepare a record writer to store the postprocessed embeddings.\n writer = tf.python_io.TFRecordWriter(\n FLAGS.tfrecord_file) if FLAGS.tfrecord_file else None\n\n with tf.Graph().as_default(), tf.Session() as sess:\n # Define the model in inference mode, load the checkpoint, and\n # locate input and output tensors.\n vggish_slim.define_vggish_slim(training=False)\n vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)\n features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)\n embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME)\n\n # Run inference and postprocessing.\n [embedding_batch] = sess.run([embedding_tensor],\n feed_dict={features_tensor: examples_batch})\n print(embedding_batch)\n postprocessed_batch = pproc.postprocess(embedding_batch)\n print(postprocessed_batch)\n\n # Write the postprocessed embeddings as a SequenceExample, in a similar\n # format as the features released in AudioSet. Each row of the batch of\n # embeddings corresponds to roughly a second of audio (96 10ms frames), and\n # the rows are written as a sequence of bytes-valued features, where each\n # feature value contains the 128 bytes of the whitened quantized embedding.\n seq_example = tf.train.SequenceExample(\n feature_lists=tf.train.FeatureLists(\n feature_list={\n vggish_params.AUDIO_EMBEDDING_FEATURE_NAME:\n tf.train.FeatureList(\n feature=[\n tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[embedding.tobytes()]))\n for embedding in postprocessed_batch\n ]\n )\n }\n )\n )\n print(seq_example)\n if writer:\n writer.write(seq_example.SerializeToString())\n\n if writer:\n writer.close()\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.Graph", "scipy.io.wavfile.write", "numpy.clip", "numpy.sin", "tensorflow.python_io.TFRecordWriter", "tensorflow.Session", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "1.10" ] } ]
varunjha089/tensorflow_cookbook
[ "94af4606dd918fe446984670560fb11190b97bc5", "c1fa5051c860ecb6de875db975465ced06f43ba6", "c1fa5051c860ecb6de875db975465ced06f43ba6", "c1fa5051c860ecb6de875db975465ced06f43ba6" ]
[ "03_Linear_Regression/04_Loss_Functions_in_Linear_Regressions/04_lin_reg_l1_vs_l2.py", "07_Natural_Language_Processing/02_Working_with_Bag_of_Words/02_bag_of_words.py", "07_Natural_Language_Processing/07_Sentiment_Analysis_With_Doc2Vec/07_sentiment_with_doc2vec.py", "08_Convolutional_Neural_Networks/05_Stylenet_NeuralStyle/05_stylenet.py" ]
[ "# Linear Regression: L1 vs L2\n#----------------------------------\n#\n# This function shows how to use TensorFlow to\n# solve linear regression via the matrix inverse.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import datasets\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n\n# Create graph\nsess = tf.Session()\n\n# Load the data\n# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]\niris = datasets.load_iris()\nx_vals = np.array([x[3] for x in iris.data])\ny_vals = np.array([y[0] for y in iris.data])\n\n# Declare batch size and number of iterations\nbatch_size = 25\nlearning_rate = 0.4 # Will not converge with learning rate at 0.4\niterations = 50\n\n# Initialize placeholders\nx_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)\n\n# Create variables for linear regression\nA = tf.Variable(tf.random_normal(shape=[1,1]))\nb = tf.Variable(tf.random_normal(shape=[1,1]))\n\n# Declare model operations\nmodel_output = tf.add(tf.matmul(x_data, A), b)\n\n# Declare loss functions\nloss_l1 = tf.reduce_mean(tf.abs(y_target - model_output))\n\n# Declare optimizers\nmy_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate)\ntrain_step_l1 = my_opt_l1.minimize(loss_l1)\n\n# Initialize variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Training loop\nloss_vec_l1 = []\nfor i in range(iterations):\n rand_index = np.random.choice(len(x_vals), size=batch_size)\n rand_x = np.transpose([x_vals[rand_index]])\n rand_y = np.transpose([y_vals[rand_index]])\n sess.run(train_step_l1, feed_dict={x_data: rand_x, y_target: rand_y})\n temp_loss_l1 = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y})\n loss_vec_l1.append(temp_loss_l1)\n if (i+1)%25==0:\n print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))\n\n\n# L2 Loss\n# Reinitialize graph\nops.reset_default_graph()\n\n# Create graph\nsess = tf.Session()\n\n# Initialize placeholders\nx_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)\n\n# Create variables for linear regression\nA = tf.Variable(tf.random_normal(shape=[1,1]))\nb = tf.Variable(tf.random_normal(shape=[1,1]))\n\n# Declare model operations\nmodel_output = tf.add(tf.matmul(x_data, A), b)\n\n# Declare loss functions\nloss_l2 = tf.reduce_mean(tf.square(y_target - model_output))\n\n# Declare optimizers\nmy_opt_l2 = tf.train.GradientDescentOptimizer(learning_rate)\ntrain_step_l2 = my_opt_l2.minimize(loss_l2)\n\n# Initialize variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nloss_vec_l2 = []\nfor i in range(iterations):\n rand_index = np.random.choice(len(x_vals), size=batch_size)\n rand_x = np.transpose([x_vals[rand_index]])\n rand_y = np.transpose([y_vals[rand_index]])\n sess.run(train_step_l2, feed_dict={x_data: rand_x, y_target: rand_y})\n temp_loss_l2 = sess.run(loss_l2, feed_dict={x_data: rand_x, y_target: rand_y})\n loss_vec_l2.append(temp_loss_l2)\n if (i+1)%25==0:\n print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))\n\n\n# Plot loss over time\nplt.plot(loss_vec_l1, 'k-', label='L1 Loss')\nplt.plot(loss_vec_l2, 'r--', label='L2 Loss')\nplt.title('L1 and L2 Loss per Generation')\nplt.xlabel('Generation')\nplt.ylabel('L1 Loss')\nplt.legend(loc='upper right')\nplt.show()\n", "# Working with Bag of Words\n#---------------------------------------\n#\n# In this example, we will download and preprocess the ham/spam\n# text data. We will then use a one-hot-encoding to make a\n# bag of words set of features to use in logistic regression.\n#\n# We will use these one-hot-vectors for logistic regression to\n# predict if a text is spam or ham.\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport csv\nimport string\nimport requests\nimport io\nfrom zipfile import ZipFile\nfrom tensorflow.contrib import learn\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n\n# Start a graph session\nsess = tf.Session()\n\n# Check if data was downloaded, otherwise download it and save for future use\nsave_file_name = os.path.join('temp','temp_spam_data.csv')\n\n# Create directory if it doesn't exist\nif not os.path.exists('temp'):\n os.makedirs('temp')\n\nif os.path.isfile(save_file_name):\n text_data = []\n with open(save_file_name, 'r') as temp_output_file:\n reader = csv.reader(temp_output_file)\n for row in reader:\n text_data.append(row)\nelse:\n zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'\n r = requests.get(zip_url)\n z = ZipFile(io.BytesIO(r.content))\n file = z.read('SMSSpamCollection')\n # Format Data\n text_data = file.decode()\n text_data = text_data.encode('ascii',errors='ignore')\n text_data = text_data.decode().split('\\n')\n text_data = [x.split('\\t') for x in text_data if len(x)>=1]\n \n # And write to csv\n with open(save_file_name, 'w') as temp_output_file:\n writer = csv.writer(temp_output_file)\n writer.writerows(text_data)\n\ntexts = [x[1] for x in text_data]\ntarget = [x[0] for x in text_data]\n\n# Relabel 'spam' as 1, 'ham' as 0\ntarget = [1 if x=='spam' else 0 for x in target]\n\n# Normalize text\n# Lower case\ntexts = [x.lower() for x in texts]\n\n# Remove punctuation\ntexts = [''.join(c for c in x if c not in string.punctuation) for x in texts]\n\n# Remove numbers\ntexts = [''.join(c for c in x if c not in '0123456789') for x in texts]\n\n# Trim extra whitespace\ntexts = [' '.join(x.split()) for x in texts]\n\n# Plot histogram of text lengths\ntext_lengths = [len(x.split()) for x in texts]\ntext_lengths = [x for x in text_lengths if x < 50]\nplt.hist(text_lengths, bins=25)\nplt.title('Histogram of # of Words in Texts')\n\n# Choose max text word length at 25\nsentence_size = 25\nmin_word_freq = 3\n\n# Setup vocabulary processor\nvocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)\n\n# Have to fit transform to get length of unique words.\nvocab_processor.transform(texts)\nembedding_size = len([x for x in vocab_processor.transform(texts)])\n\n# Split up data set into train/test\ntrain_indices = np.random.choice(len(texts), round(len(texts)*0.8), replace=False)\ntest_indices = np.array(list(set(range(len(texts))) - set(train_indices)))\ntexts_train = [x for ix, x in enumerate(texts) if ix in train_indices]\ntexts_test = [x for ix, x in enumerate(texts) if ix in test_indices]\ntarget_train = [x for ix, x in enumerate(target) if ix in train_indices]\ntarget_test = [x for ix, x in enumerate(target) if ix in test_indices]\n\n# Setup Index Matrix for one-hot-encoding\nidentity_mat = tf.diag(tf.ones(shape=[embedding_size]))\n\n# Create variables for logistic regression\nA = tf.Variable(tf.random_normal(shape=[embedding_size,1]))\nb = tf.Variable(tf.random_normal(shape=[1,1]))\n\n# Initialize placeholders\nx_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)\ny_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)\n\n# Text-Vocab Embedding\nx_embed = tf.nn.embedding_lookup(identity_mat, x_data)\nx_col_sums = tf.reduce_sum(x_embed, 0)\n\n# Declare model operations\nx_col_sums_2D = tf.expand_dims(x_col_sums, 0)\nmodel_output = tf.add(tf.matmul(x_col_sums_2D, A), b)\n\n# Declare loss function (Cross Entropy loss)\nloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))\n\n# Prediction operation\nprediction = tf.sigmoid(model_output)\n\n# Declare optimizer\nmy_opt = tf.train.GradientDescentOptimizer(0.001)\ntrain_step = my_opt.minimize(loss)\n\n# Intitialize Variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Start Logistic Regression\nprint('Starting Training Over {} Sentences.'.format(len(texts_train)))\nloss_vec = []\ntrain_acc_all = []\ntrain_acc_avg = []\nfor ix, t in enumerate(vocab_processor.fit_transform(texts_train)):\n y_data = [[target_train[ix]]]\n \n \n sess.run(train_step, feed_dict={x_data: t, y_target: y_data})\n temp_loss = sess.run(loss, feed_dict={x_data: t, y_target: y_data})\n loss_vec.append(temp_loss)\n \n if (ix+1)%10==0:\n print('Training Observation #' + str(ix+1) + ': Loss = ' + str(temp_loss))\n \n # Keep trailing average of past 50 observations accuracy\n # Get prediction of single observation\n [[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})\n # Get True/False if prediction is accurate\n train_acc_temp = target_train[ix]==np.round(temp_pred)\n train_acc_all.append(train_acc_temp)\n if len(train_acc_all) >= 50:\n train_acc_avg.append(np.mean(train_acc_all[-50:]))\n\n# Get test set accuracy\nprint('Getting Test Set Accuracy For {} Sentences.'.format(len(texts_test)))\ntest_acc_all = []\nfor ix, t in enumerate(vocab_processor.fit_transform(texts_test)):\n y_data = [[target_test[ix]]]\n \n if (ix+1)%50==0:\n print('Test Observation #' + str(ix+1)) \n \n # Keep trailing average of past 50 observations accuracy\n # Get prediction of single observation\n [[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})\n # Get True/False if prediction is accurate\n test_acc_temp = target_test[ix]==np.round(temp_pred)\n test_acc_all.append(test_acc_temp)\n\nprint('\\nOverall Test Accuracy: {}'.format(np.mean(test_acc_all)))\n\n# Plot training accuracy over time\nplt.plot(range(len(train_acc_avg)), train_acc_avg, 'k-', label='Train Accuracy')\nplt.title('Avg Training Acc Over Past 50 Generations')\nplt.xlabel('Generation')\nplt.ylabel('Training Accuracy')\nplt.show()", "# Doc2Vec Model\n#---------------------------------------\n#\n# In this example, we will download and preprocess the movie\n# review data.\n#\n# From this data set we will compute/fit a Doc2Vec model to get\n# Document vectors. From these document vectors, we will split the\n# documents into train/test and use these doc vectors to do sentiment\n# analysis on the movie review dataset.\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport os\nimport pickle\nimport string\nimport requests\nimport collections\nimport io\nimport tarfile\nimport urllib.request\nimport text_helpers\nfrom nltk.corpus import stopwords\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n\nos.chdir(os.path.dirname(os.path.realpath(__file__)))\n\n# Make a saving directory if it doesn't exist\ndata_folder_name = 'temp'\nif not os.path.exists(data_folder_name):\n os.makedirs(data_folder_name)\n\n# Start a graph session\nsess = tf.Session()\n\n# Declare model parameters\nbatch_size = 500\nvocabulary_size = 7500\ngenerations = 100000\nmodel_learning_rate = 0.001\n\nembedding_size = 200 # Word embedding size\ndoc_embedding_size = 100 # Document embedding size\nconcatenated_size = embedding_size + doc_embedding_size\n\nnum_sampled = int(batch_size/2) # Number of negative examples to sample.\nwindow_size = 3 # How many words to consider to the left.\n\n# Add checkpoints to training\nsave_embeddings_every = 5000\nprint_valid_every = 5000\nprint_loss_every = 100\n\n# Declare stop words\n#stops = stopwords.words('english')\nstops = []\n\n# We pick a few test words for validation.\nvalid_words = ['love', 'hate', 'happy', 'sad', 'man', 'woman']\n# Later we will have to transform these into indices\n\n# Load the movie review data\nprint('Loading Data')\ntexts, target = text_helpers.load_movie_data(data_folder_name)\n\n# Normalize text\nprint('Normalizing Text Data')\ntexts = text_helpers.normalize_text(texts, stops)\n\n# Texts must contain at least 3 words\ntarget = [target[ix] for ix, x in enumerate(texts) if len(x.split()) > window_size]\ntexts = [x for x in texts if len(x.split()) > window_size] \nassert(len(target)==len(texts))\n\n# Build our data set and dictionaries\nprint('Creating Dictionary')\nword_dictionary = text_helpers.build_dictionary(texts, vocabulary_size)\nword_dictionary_rev = dict(zip(word_dictionary.values(), word_dictionary.keys()))\ntext_data = text_helpers.text_to_numbers(texts, word_dictionary)\n\n# Get validation word keys\nvalid_examples = [word_dictionary[x] for x in valid_words] \n\nprint('Creating Model')\n# Define Embeddings:\nembeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\ndoc_embeddings = tf.Variable(tf.random_uniform([len(texts), doc_embedding_size], -1.0, 1.0))\n\n# NCE loss parameters\nnce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, concatenated_size],\n stddev=1.0 / np.sqrt(concatenated_size)))\nnce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n# Create data/target placeholders\nx_inputs = tf.placeholder(tf.int32, shape=[None, window_size + 1]) # plus 1 for doc index\ny_target = tf.placeholder(tf.int32, shape=[None, 1])\nvalid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n\n# Lookup the word embedding\n# Add together element embeddings in window:\nembed = tf.zeros([batch_size, embedding_size])\nfor element in range(window_size):\n embed += tf.nn.embedding_lookup(embeddings, x_inputs[:, element])\n\ndoc_indices = tf.slice(x_inputs, [0,window_size],[batch_size,1])\ndoc_embed = tf.nn.embedding_lookup(doc_embeddings,doc_indices)\n\n# concatenate embeddings\nfinal_embed = tf.concat(axis=1, values=[embed, tf.squeeze(doc_embed)])\n\n# Get loss from prediction\nloss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=y_target,\n inputs=final_embed,\n num_sampled=num_sampled,\n num_classes=vocabulary_size))\n \n# Create optimizer\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=model_learning_rate)\ntrain_step = optimizer.minimize(loss)\n\n# Cosine similarity between words\nnorm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\nnormalized_embeddings = embeddings / norm\nvalid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\nsimilarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)\n\n# Create model saving operation\nsaver = tf.train.Saver({\"embeddings\": embeddings, \"doc_embeddings\": doc_embeddings})\n\n#Add variable initializer.\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Run the doc2vec model.\nprint('Starting Training')\nloss_vec = []\nloss_x_vec = []\nfor i in range(generations):\n batch_inputs, batch_labels = text_helpers.generate_batch_data(text_data, batch_size,\n window_size, method='doc2vec')\n feed_dict = {x_inputs : batch_inputs, y_target : batch_labels}\n\n # Run the train step\n sess.run(train_step, feed_dict=feed_dict)\n\n # Return the loss\n if (i+1) % print_loss_every == 0:\n loss_val = sess.run(loss, feed_dict=feed_dict)\n loss_vec.append(loss_val)\n loss_x_vec.append(i+1)\n print('Loss at step {} : {}'.format(i+1, loss_val))\n \n # Validation: Print some random words and top 5 related words\n if (i+1) % print_valid_every == 0:\n sim = sess.run(similarity, feed_dict=feed_dict)\n for j in range(len(valid_words)):\n valid_word = word_dictionary_rev[valid_examples[j]]\n top_k = 5 # number of nearest neighbors\n nearest = (-sim[j, :]).argsort()[1:top_k+1]\n log_str = \"Nearest to {}:\".format(valid_word)\n for k in range(top_k):\n close_word = word_dictionary_rev[nearest[k]]\n log_str = '{} {},'.format(log_str, close_word)\n print(log_str)\n \n # Save dictionary + embeddings\n if (i+1) % save_embeddings_every == 0:\n # Save vocabulary dictionary\n with open(os.path.join(data_folder_name,'movie_vocab.pkl'), 'wb') as f:\n pickle.dump(word_dictionary, f)\n \n # Save embeddings\n model_checkpoint_path = os.path.join(os.getcwd(),data_folder_name,'doc2vec_movie_embeddings.ckpt')\n save_path = saver.save(sess, model_checkpoint_path)\n print('Model saved in file: {}'.format(save_path))\n\n# Start logistic model-------------------------\nmax_words = 20\nlogistic_batch_size = 500\n\n# Split dataset into train and test sets\n# Need to keep the indices sorted to keep track of document index\ntrain_indices = np.sort(np.random.choice(len(target), round(0.8*len(target)), replace=False))\ntest_indices = np.sort(np.array(list(set(range(len(target))) - set(train_indices))))\ntexts_train = [x for ix, x in enumerate(texts) if ix in train_indices]\ntexts_test = [x for ix, x in enumerate(texts) if ix in test_indices]\ntarget_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])\ntarget_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])\n\n# Convert texts to lists of indices\ntext_data_train = np.array(text_helpers.text_to_numbers(texts_train, word_dictionary))\ntext_data_test = np.array(text_helpers.text_to_numbers(texts_test, word_dictionary))\n\n# Pad/crop movie reviews to specific length\ntext_data_train = np.array([x[0:max_words] for x in [y+[0]*max_words for y in text_data_train]])\ntext_data_test = np.array([x[0:max_words] for x in [y+[0]*max_words for y in text_data_test]])\n\n# Define Logistic placeholders\nlog_x_inputs = tf.placeholder(tf.int32, shape=[None, max_words + 1]) # plus 1 for doc index\nlog_y_target = tf.placeholder(tf.int32, shape=[None, 1])\n\n# Define logistic embedding lookup (needed if we have two different batch sizes)\n# Add together element embeddings in window:\nlog_embed = tf.zeros([logistic_batch_size, embedding_size])\nfor element in range(max_words):\n log_embed += tf.nn.embedding_lookup(embeddings, log_x_inputs[:, element])\n\nlog_doc_indices = tf.slice(log_x_inputs, [0,max_words],[logistic_batch_size,1])\nlog_doc_embed = tf.nn.embedding_lookup(doc_embeddings,log_doc_indices)\n\n# concatenate embeddings\nlog_final_embed = tf.concat(axis=1, values=[log_embed, tf.squeeze(log_doc_embed)])\n\n# Define model:\n# Create variables for logistic regression\nA = tf.Variable(tf.random_normal(shape=[concatenated_size,1]))\nb = tf.Variable(tf.random_normal(shape=[1,1]))\n\n# Declare logistic model (sigmoid in loss function)\nmodel_output = tf.add(tf.matmul(log_final_embed, A), b)\n\n# Declare loss function (Cross Entropy loss)\nlogistic_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=tf.cast(log_y_target, tf.float32)))\n\n# Actual Prediction\nprediction = tf.round(tf.sigmoid(model_output))\npredictions_correct = tf.cast(tf.equal(prediction, tf.cast(log_y_target, tf.float32)), tf.float32)\naccuracy = tf.reduce_mean(predictions_correct)\n\n# Declare optimizer\nlogistic_opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)\nlogistic_train_step = logistic_opt.minimize(logistic_loss, var_list=[A, b])\n\n# Intitialize Variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Start Logistic Regression\nprint('Starting Logistic Doc2Vec Model Training')\ntrain_loss = []\ntest_loss = []\ntrain_acc = []\ntest_acc = []\ni_data = []\nfor i in range(10000):\n rand_index = np.random.choice(text_data_train.shape[0], size=logistic_batch_size)\n rand_x = text_data_train[rand_index]\n # Append review index at the end of text data\n rand_x_doc_indices = train_indices[rand_index]\n rand_x = np.hstack((rand_x, np.transpose([rand_x_doc_indices])))\n rand_y = np.transpose([target_train[rand_index]])\n \n feed_dict = {log_x_inputs : rand_x, log_y_target : rand_y}\n sess.run(logistic_train_step, feed_dict=feed_dict)\n \n # Only record loss and accuracy every 100 generations\n if (i+1)%100==0:\n rand_index_test = np.random.choice(text_data_test.shape[0], size=logistic_batch_size)\n rand_x_test = text_data_test[rand_index_test]\n # Append review index at the end of text data\n rand_x_doc_indices_test = test_indices[rand_index_test]\n rand_x_test = np.hstack((rand_x_test, np.transpose([rand_x_doc_indices_test])))\n rand_y_test = np.transpose([target_test[rand_index_test]])\n \n test_feed_dict = {log_x_inputs: rand_x_test, log_y_target: rand_y_test}\n \n i_data.append(i+1)\n\n train_loss_temp = sess.run(logistic_loss, feed_dict=feed_dict)\n train_loss.append(train_loss_temp)\n \n test_loss_temp = sess.run(logistic_loss, feed_dict=test_feed_dict)\n test_loss.append(test_loss_temp)\n \n train_acc_temp = sess.run(accuracy, feed_dict=feed_dict)\n train_acc.append(train_acc_temp)\n \n test_acc_temp = sess.run(accuracy, feed_dict=test_feed_dict)\n test_acc.append(test_acc_temp)\n if (i+1)%500==0:\n acc_and_loss = [i+1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp]\n acc_and_loss = [np.round(x,2) for x in acc_and_loss]\n print('Generation # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))\n\n\n# Plot loss over time\nplt.plot(i_data, train_loss, 'k-', label='Train Loss')\nplt.plot(i_data, test_loss, 'r--', label='Test Loss', linewidth=4)\nplt.title('Cross Entropy Loss per Generation')\nplt.xlabel('Generation')\nplt.ylabel('Cross Entropy Loss')\nplt.legend(loc='upper right')\nplt.show()\n\n# Plot train and test accuracy\nplt.plot(i_data, train_acc, 'k-', label='Train Set Accuracy')\nplt.plot(i_data, test_acc, 'r--', label='Test Set Accuracy', linewidth=4)\nplt.title('Train and Test Accuracy')\nplt.xlabel('Generation')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\nplt.show()", "# Using TensorFlow for Stylenet/NeuralStyle\n#---------------------------------------\n#\n# We use two images, an original image and a style image\n# and try to make the original image in the style of the style image.\n#\n# Reference paper:\n# https://arxiv.org/abs/1508.06576\n#\n# Need to download the model 'imagenet-vgg-verydee-19.mat' from:\n# http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat\n\nimport scipy.io\nimport scipy.misc\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nops.reset_default_graph()\n\n# Start a graph session\nsess = tf.Session()\n\n\n# Image Files\noriginal_image_file = '../images/book_cover.jpg'\nstyle_image_file = '../images/starry_night.jpg'\n\n# Saved VGG Network path under the current project dir.\nvgg_path = 'imagenet-vgg-verydeep-19.mat'\n\n\n# Default Arguments\noriginal_image_weight = 5.0\nstyle_image_weight = 500.0\nregularization_weight = 100\nlearning_rate = 0.001\ngenerations = 5000\noutput_generations = 250\nbeta1 = 0.9\nbeta2 = 0.999\n\n# Read in images\noriginal_image = scipy.misc.imread(original_image_file)\nstyle_image = scipy.misc.imread(style_image_file)\n\n# Get shape of target and make the style image the same\ntarget_shape = original_image.shape\nstyle_image = scipy.misc.imresize(style_image, target_shape[1] / style_image.shape[1])\n\n# VGG-19 Layer Setup\n# From paper\nvgg_layers = ['conv1_1', 'relu1_1',\n 'conv1_2', 'relu1_2', 'pool1',\n 'conv2_1', 'relu2_1',\n 'conv2_2', 'relu2_2', 'pool2',\n 'conv3_1', 'relu3_1',\n 'conv3_2', 'relu3_2',\n 'conv3_3', 'relu3_3',\n 'conv3_4', 'relu3_4', 'pool3',\n 'conv4_1', 'relu4_1',\n 'conv4_2', 'relu4_2',\n 'conv4_3', 'relu4_3',\n 'conv4_4', 'relu4_4', 'pool4',\n 'conv5_1', 'relu5_1',\n 'conv5_2', 'relu5_2',\n 'conv5_3', 'relu5_3',\n 'conv5_4', 'relu5_4']\n\n# Extract weights and matrix means\ndef extract_net_info(path_to_params):\n vgg_data = scipy.io.loadmat(path_to_params)\n normalization_matrix = vgg_data['normalization'][0][0][0]\n mat_mean = np.mean(normalization_matrix, axis=(0,1))\n network_weights = vgg_data['layers'][0]\n return(mat_mean, network_weights)\n \n\n# Create the VGG-19 Network\ndef vgg_network(network_weights, init_image):\n network = {}\n image = init_image\n\n for i, layer in enumerate(vgg_layers):\n if layer[0] == 'c':\n weights, bias = network_weights[i][0][0][0][0]\n weights = np.transpose(weights, (1, 0, 2, 3))\n bias = bias.reshape(-1)\n conv_layer = tf.nn.conv2d(image, tf.constant(weights), (1, 1, 1, 1), 'SAME')\n image = tf.nn.bias_add(conv_layer, bias)\n elif layer[0] == 'r':\n image = tf.nn.relu(image)\n else:\n image = tf.nn.max_pool(image, (1, 2, 2, 1), (1, 2, 2, 1), 'SAME')\n network[layer] = image\n return(network)\n\n# Here we define which layers apply to the original or style image\noriginal_layer = 'relu4_2'\nstyle_layers = ['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1']\n\n# Get network parameters\nnormalization_mean, network_weights = extract_net_info(vgg_path)\n\nshape = (1,) + original_image.shape\nstyle_shape = (1,) + style_image.shape\noriginal_features = {}\nstyle_features = {}\n\n# Get network parameters\nimage = tf.placeholder('float', shape=shape)\nvgg_net = vgg_network(network_weights, image)\n\n# Normalize original image\noriginal_minus_mean = original_image - normalization_mean\noriginal_norm = np.array([original_minus_mean])\noriginal_features[original_layer] = sess.run(vgg_net[original_layer],\n feed_dict={image: original_norm})\n\n# Get style image network\nimage = tf.placeholder('float', shape=style_shape)\nvgg_net = vgg_network(network_weights, image)\nstyle_minus_mean = style_image - normalization_mean\nstyle_norm = np.array([style_minus_mean])\n\nfor layer in style_layers:\n layer_output = sess.run(vgg_net[layer], feed_dict={image: style_norm})\n layer_output = np.reshape(layer_output, (-1, layer_output.shape[3]))\n style_gram_matrix = np.matmul(layer_output.T, layer_output) / layer_output.size\n style_features[layer] = style_gram_matrix\n\n# Make Combined Image\ninitial = tf.random_normal(shape) * 0.256\nimage = tf.Variable(initial)\nvgg_net = vgg_network(network_weights, image)\n\n# Loss\noriginal_loss = original_image_weight * (2 * tf.nn.l2_loss(vgg_net[original_layer] - original_features[original_layer]) /\n original_features[original_layer].size)\n \n# Loss from Style Image\nstyle_loss = 0\nstyle_losses = []\nfor style_layer in style_layers:\n layer = vgg_net[style_layer]\n feats, height, width, channels = [x.value for x in layer.get_shape()]\n size = height * width * channels\n features = tf.reshape(layer, (-1, channels))\n style_gram_matrix = tf.matmul(tf.transpose(features), features) / size\n style_expected = style_features[style_layer]\n style_losses.append(2 * tf.nn.l2_loss(style_gram_matrix - style_expected) / style_expected.size)\nstyle_loss += style_image_weight * tf.reduce_sum(style_losses)\n \n# To Smooth the resuts, we add in total variation loss \ntotal_var_x = sess.run(tf.reduce_prod(image[:,1:,:,:].get_shape()))\ntotal_var_y = sess.run(tf.reduce_prod(image[:,:,1:,:].get_shape()))\nfirst_term = regularization_weight * 2\nsecond_term_numerator = tf.nn.l2_loss(image[:,1:,:,:] - image[:,:shape[1]-1,:,:])\nsecond_term = second_term_numerator / total_var_y\nthird_term = (tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:shape[2]-1,:]) / total_var_x)\ntotal_variation_loss = first_term * (second_term + third_term)\n \n# Combined Loss\nloss = original_loss + style_loss + total_variation_loss\n\n# Declare Optimization Algorithm\noptimizer = tf.train.AdamOptimizer(learning_rate,beta1,beta2)\ntrain_step = optimizer.minimize(loss)\n\n# Initialize Variables and start Training\nsess.run(tf.global_variables_initializer())\nfor i in range(generations):\n \n sess.run(train_step)\n\n # Print update and save temporary output\n if (i+1) % output_generations == 0:\n print('Generation {} out of {}, loss: {}'.format(i + 1, generations,sess.run(loss)))\n image_eval = sess.run(image)\n best_image_add_mean = image_eval.reshape(shape[1:]) + normalization_mean\n output_file = 'temp_output_{}.jpg'.format(i)\n scipy.misc.imsave(output_file, best_image_add_mean)\n \n \n# Save final image\nimage_eval = sess.run(image)\nbest_image_add_mean = image_eval.reshape(shape[1:]) + normalization_mean\noutput_file = 'final_output.jpg'\nscipy.misc.imsave(output_file, best_image_add_mean)" ]
[ [ "matplotlib.pyplot.legend", "tensorflow.matmul", "matplotlib.pyplot.title", "sklearn.datasets.load_iris", "tensorflow.placeholder", "matplotlib.pyplot.plot", "tensorflow.global_variables_initializer", "tensorflow.abs", "tensorflow.train.GradientDescentOptimizer", "tensorflow.random_normal", "tensorflow.Session", "numpy.transpose", "tensorflow.square", "matplotlib.pyplot.xlabel", "numpy.array", "tensorflow.python.framework.ops.reset_default_graph", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "tensorflow.reduce_sum", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "numpy.round", "numpy.mean", "tensorflow.Session", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.matmul", "matplotlib.pyplot.title", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "matplotlib.pyplot.show", "tensorflow.nn.embedding_lookup", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "tensorflow.sigmoid", "tensorflow.expand_dims", "tensorflow.ones", "matplotlib.pyplot.xlabel", "tensorflow.contrib.learn.preprocessing.VocabularyProcessor", "tensorflow.random_normal" ], [ "matplotlib.pyplot.legend", "numpy.sqrt", "tensorflow.zeros", "tensorflow.cast", "matplotlib.pyplot.plot", "numpy.round", "tensorflow.nn.nce_loss", "tensorflow.squeeze", "tensorflow.Session", "tensorflow.square", "tensorflow.train.Saver", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.matmul", "matplotlib.pyplot.title", "numpy.random.choice", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "numpy.transpose", "numpy.array", "matplotlib.pyplot.show", "tensorflow.nn.embedding_lookup", "matplotlib.pyplot.ylabel", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.slice", "tensorflow.sigmoid", "matplotlib.pyplot.xlabel", "tensorflow.random_uniform", "tensorflow.random_normal" ], [ "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.nn.l2_loss", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.Variable", "numpy.reshape", "numpy.matmul", "tensorflow.Session", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.transpose", "numpy.array", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.transpose", "tensorflow.constant", "tensorflow.reshape", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
pcwuyu/PaddleGAN
[ "b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268", "b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268" ]
[ "ppgan/models/starganv2_model.py", "ppgan/models/generators/msvsr.py" ]
[ "# code was heavily based on https://github.com/clovaai/stargan-v2\n# Users should be careful about adopting these functions in any commercial matters.\n# https://github.com/clovaai/stargan-v2#license\n\nfrom paddle.fluid.layers.nn import soft_relu\nfrom .base_model import BaseModel\n\nfrom paddle import nn\nimport paddle\nimport paddle.nn.functional as F\nfrom .builder import MODELS\nfrom .generators.builder import build_generator\nfrom .discriminators.builder import build_discriminator\nfrom ..modules.init import kaiming_normal_, constant_\nfrom ppgan.utils.visual import make_grid, tensor2img\n\nimport numpy as np\n\n\ndef translate_using_reference(nets, w_hpf, x_src, x_ref, y_ref):\n N, C, H, W = x_src.shape\n wb = paddle.to_tensor(np.ones((1, C, H, W))).astype('float32')\n x_src_with_wb = paddle.concat([wb, x_src], axis=0)\n\n masks = nets['fan'].get_heatmap(x_src) if w_hpf > 0 else None\n s_ref = nets['style_encoder'](x_ref, y_ref)\n s_ref_list = paddle.unsqueeze(s_ref, axis=[1])\n s_ref_lists = []\n for _ in range(N):\n s_ref_lists.append(s_ref_list)\n s_ref_list = paddle.stack(s_ref_lists, axis=1)\n s_ref_list = paddle.reshape(\n s_ref_list,\n (s_ref_list.shape[0], s_ref_list.shape[1], s_ref_list.shape[3]))\n x_concat = [x_src_with_wb]\n for i, s_ref in enumerate(s_ref_list):\n x_fake = nets['generator'](x_src, s_ref, masks=masks)\n x_fake_with_ref = paddle.concat([x_ref[i:i + 1], x_fake], axis=0)\n x_concat += [x_fake_with_ref]\n\n x_concat = paddle.concat(x_concat, axis=0)\n img = tensor2img(make_grid(x_concat, nrow=N + 1, range=(0, 1)))\n del x_concat\n return img\n\n\ndef compute_d_loss(nets,\n lambda_reg,\n x_real,\n y_org,\n y_trg,\n z_trg=None,\n x_ref=None,\n masks=None):\n assert (z_trg is None) != (x_ref is None)\n # with real images\n x_real.stop_gradient = False\n out = nets['discriminator'](x_real, y_org)\n loss_real = adv_loss(out, 1)\n loss_reg = r1_reg(out, x_real)\n\n # with fake images\n with paddle.no_grad():\n if z_trg is not None:\n s_trg = nets['mapping_network'](z_trg, y_trg)\n else: # x_ref is not None\n s_trg = nets['style_encoder'](x_ref, y_trg)\n\n x_fake = nets['generator'](x_real, s_trg, masks=masks)\n out = nets['discriminator'](x_fake, y_trg)\n loss_fake = adv_loss(out, 0)\n\n loss = loss_real + loss_fake + lambda_reg * loss_reg\n return loss, {\n 'real': loss_real.numpy(),\n 'fake': loss_fake.numpy(),\n 'reg': loss_reg.numpy()\n }\n\n\ndef adv_loss(logits, target):\n assert target in [1, 0]\n targets = paddle.full_like(logits, fill_value=target)\n loss = F.binary_cross_entropy_with_logits(logits, targets)\n return loss\n\n\ndef r1_reg(d_out, x_in):\n # zero-centered gradient penalty for real images\n batch_size = x_in.shape[0]\n grad_dout = paddle.grad(outputs=d_out.sum(),\n inputs=x_in,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n grad_dout2 = grad_dout.pow(2)\n assert (grad_dout2.shape == x_in.shape)\n reg = 0.5 * paddle.reshape(grad_dout2, (batch_size, -1)).sum(1).mean(0)\n return reg\n\n\ndef soft_update(source, target, beta=1.0):\n assert 0.0 <= beta <= 1.0\n\n if isinstance(source, paddle.DataParallel):\n source = source._layers\n\n target_model_map = dict(target.named_parameters())\n for param_name, source_param in source.named_parameters():\n target_param = target_model_map[param_name]\n target_param.set_value(beta * source_param +\n (1.0 - beta) * target_param)\n\n\ndef dump_model(model):\n params = {}\n for k in model.state_dict().keys():\n if k.endswith('.scale'):\n params[k] = model.state_dict()[k].shape\n return params\n\n\ndef compute_g_loss(nets,\n w_hpf,\n lambda_sty,\n lambda_ds,\n lambda_cyc,\n x_real,\n y_org,\n y_trg,\n z_trgs=None,\n x_refs=None,\n masks=None):\n assert (z_trgs is None) != (x_refs is None)\n if z_trgs is not None:\n z_trg, z_trg2 = z_trgs\n if x_refs is not None:\n x_ref, x_ref2 = x_refs\n\n # adversarial loss\n if z_trgs is not None:\n s_trg = nets['mapping_network'](z_trg, y_trg)\n else:\n s_trg = nets['style_encoder'](x_ref, y_trg)\n\n x_fake = nets['generator'](x_real, s_trg, masks=masks)\n out = nets['discriminator'](x_fake, y_trg)\n loss_adv = adv_loss(out, 1)\n\n # style reconstruction loss\n s_pred = nets['style_encoder'](x_fake, y_trg)\n loss_sty = paddle.mean(paddle.abs(s_pred - s_trg))\n\n # diversity sensitive loss\n if z_trgs is not None:\n s_trg2 = nets['mapping_network'](z_trg2, y_trg)\n else:\n s_trg2 = nets['style_encoder'](x_ref2, y_trg)\n x_fake2 = nets['generator'](x_real, s_trg2, masks=masks)\n loss_ds = paddle.mean(paddle.abs(x_fake - x_fake2))\n\n # cycle-consistency loss\n if w_hpf > 0:\n if isinstance(nets['fan'], paddle.DataParallel):\n masks = nets['fan']._layers.get_heatmap(x_fake)\n else:\n masks = nets['fan'].get_heatmap(x_fake)\n else:\n masks = None\n\n s_org = nets['style_encoder'](x_real, y_org)\n x_rec = nets['generator'](x_fake, s_org, masks=masks)\n loss_cyc = paddle.mean(paddle.abs(x_rec - x_real))\n\n loss = loss_adv + lambda_sty * loss_sty \\\n - lambda_ds * loss_ds + lambda_cyc * loss_cyc\n return loss, {\n 'adv': loss_adv.numpy(),\n 'sty': loss_sty.numpy(),\n 'ds:': loss_ds.numpy(),\n 'cyc': loss_cyc.numpy()\n }\n\n\ndef he_init(module):\n if isinstance(module, nn.Conv2D):\n kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')\n if module.bias is not None:\n constant_(module.bias, 0)\n if isinstance(module, nn.Linear):\n kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')\n if module.bias is not None:\n constant_(module.bias, 0)\n\n\[email protected]()\nclass StarGANv2Model(BaseModel):\n def __init__(\n self,\n generator,\n style=None,\n mapping=None,\n discriminator=None,\n fan=None,\n latent_dim=16,\n lambda_reg=1,\n lambda_sty=1,\n lambda_ds=1,\n lambda_cyc=1,\n ):\n super(StarGANv2Model, self).__init__()\n self.w_hpf = generator['w_hpf']\n self.nets_ema = {}\n self.nets['generator'] = build_generator(generator)\n self.nets_ema['generator'] = build_generator(generator)\n self.nets['style_encoder'] = build_generator(style)\n self.nets_ema['style_encoder'] = build_generator(style)\n self.nets['mapping_network'] = build_generator(mapping)\n self.nets_ema['mapping_network'] = build_generator(mapping)\n if discriminator:\n self.nets['discriminator'] = build_discriminator(discriminator)\n if self.w_hpf > 0:\n fan_model = build_generator(fan)\n fan_model.eval()\n self.nets['fan'] = fan_model\n self.nets_ema['fan'] = fan_model\n self.latent_dim = latent_dim\n self.lambda_reg = lambda_reg\n self.lambda_sty = lambda_sty\n self.lambda_ds = lambda_ds\n self.lambda_cyc = lambda_cyc\n\n self.nets['generator'].apply(he_init)\n self.nets['style_encoder'].apply(he_init)\n self.nets['mapping_network'].apply(he_init)\n self.nets['discriminator'].apply(he_init)\n\n # remember the initial value of ds weight\n self.initial_lambda_ds = self.lambda_ds\n\n def setup_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Args:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n pass\n self.input = input\n self.input['z_trg'] = paddle.randn(\n (input['src'].shape[0], self.latent_dim))\n self.input['z_trg2'] = paddle.randn(\n (input['src'].shape[0], self.latent_dim))\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n pass\n\n def _reset_grad(self, optims):\n for optim in optims.values():\n optim.clear_gradients()\n\n def train_iter(self, optimizers=None):\n #TODO\n x_real, y_org = self.input['src'], self.input['src_cls']\n x_ref, x_ref2, y_trg = self.input['ref'], self.input[\n 'ref2'], self.input['ref_cls']\n z_trg, z_trg2 = self.input['z_trg'], self.input['z_trg2']\n\n if self.w_hpf > 0:\n if isinstance(self.nets['fan'], paddle.DataParallel):\n masks = self.nets['fan']._layers.get_heatmap(x_real)\n else:\n masks = self.nets['fan'].get_heatmap(x_real)\n else:\n masks = None\n\n # train the discriminator\n d_loss, d_losses_latent = compute_d_loss(self.nets,\n self.lambda_reg,\n x_real,\n y_org,\n y_trg,\n z_trg=z_trg,\n masks=masks)\n self._reset_grad(optimizers)\n d_loss.backward()\n optimizers['discriminator'].minimize(d_loss)\n\n d_loss, d_losses_ref = compute_d_loss(self.nets,\n self.lambda_reg,\n x_real,\n y_org,\n y_trg,\n x_ref=x_ref,\n masks=masks)\n self._reset_grad(optimizers)\n d_loss.backward()\n optimizers['discriminator'].step()\n\n # train the generator\n g_loss, g_losses_latent = compute_g_loss(self.nets,\n self.w_hpf,\n self.lambda_sty,\n self.lambda_ds,\n self.lambda_cyc,\n x_real,\n y_org,\n y_trg,\n z_trgs=[z_trg, z_trg2],\n masks=masks)\n self._reset_grad(optimizers)\n g_loss.backward()\n optimizers['generator'].step()\n optimizers['mapping_network'].step()\n optimizers['style_encoder'].step()\n\n g_loss, g_losses_ref = compute_g_loss(self.nets,\n self.w_hpf,\n self.lambda_sty,\n self.lambda_ds,\n self.lambda_cyc,\n x_real,\n y_org,\n y_trg,\n x_refs=[x_ref, x_ref2],\n masks=masks)\n self._reset_grad(optimizers)\n g_loss.backward()\n optimizers['generator'].step()\n\n # compute moving average of network parameters\n soft_update(self.nets['generator'],\n self.nets_ema['generator'],\n beta=0.999)\n soft_update(self.nets['mapping_network'],\n self.nets_ema['mapping_network'],\n beta=0.999)\n soft_update(self.nets['style_encoder'],\n self.nets_ema['style_encoder'],\n beta=0.999)\n\n # decay weight for diversity sensitive loss\n if self.lambda_ds > 0:\n self.lambda_ds -= (self.initial_lambda_ds / self.total_iter)\n\n for loss, prefix in zip(\n [d_losses_latent, d_losses_ref, g_losses_latent, g_losses_ref],\n ['D/latent_', 'D/ref_', 'G/latent_', 'G/ref_']):\n for key, value in loss.items():\n self.losses[prefix + key] = value\n self.losses['G/lambda_ds'] = self.lambda_ds\n self.losses['Total iter'] = int(self.total_iter)\n\n def test_iter(self, metrics=None):\n #TODO\n self.nets_ema['generator'].eval()\n self.nets_ema['style_encoder'].eval()\n soft_update(self.nets['generator'],\n self.nets_ema['generator'],\n beta=0.999)\n soft_update(self.nets['mapping_network'],\n self.nets_ema['mapping_network'],\n beta=0.999)\n soft_update(self.nets['style_encoder'],\n self.nets_ema['style_encoder'],\n beta=0.999)\n src_img = self.input['src']\n ref_img = self.input['ref']\n ref_label = self.input['ref_cls']\n with paddle.no_grad():\n img = translate_using_reference(\n self.nets_ema, self.w_hpf,\n paddle.to_tensor(src_img).astype('float32'),\n paddle.to_tensor(ref_img).astype('float32'),\n paddle.to_tensor(ref_label).astype('float32'))\n self.visual_items['reference'] = img\n self.nets_ema['generator'].train()\n self.nets_ema['style_encoder'].train()\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport numpy as np\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.vision.ops import DeformConv2D\n\nfrom .basicvsr import PixelShufflePack, flow_warp, SPyNet, ResidualBlocksWithInputConv\nfrom ...utils.download import get_path_from_url\nfrom ...modules.init import kaiming_normal_, constant_\nfrom .builder import GENERATORS\n\n\[email protected]()\nclass MSVSR(nn.Layer):\n \"\"\"PP-MSVSR network structure for video super-resolution.\n\n Support only x4 upsampling.\n Paper:\n PP-MSVSR: Multi-Stage Video Super-Resolution, 2021\n\n Args:\n mid_channels (int): Channel number of the intermediate features.\n Default: 32.\n num_init_blocks (int): Number of residual blocks in feat_extract.\n Default: 2.\n num_blocks (int): Number of residual blocks in each propagation branch.\n Default: 3.\n num_reconstruction_blocks (int): Number of residual blocks in reconstruction.\n Default: 2.\n only_last (bool): Whether the hr feature only do the last convolution.\n Default: True.\n use_tiny_spynet (bool): Whether use tiny spynet.\n Default: True.\n deform_groups (int): Number of deformable_groups in DeformConv2D in stage2 and stage3.\n Defaults: 4.\n stage1_groups (int): Number of deformable_groups in DeformConv2D in stage1.\n Defaults: 8.\n auxiliary_loss (bool): Whether use auxiliary loss.\n Default: True.\n use_refine_align (bool): Whether use refine align.\n Default: True.\n aux_reconstruction_blocks : Number of residual blocks in auxiliary reconstruction.\n Default: 1.\n use_local_connnect (bool): Whether add feature of stage1 after upsample.\n Default: True.\n \"\"\"\n def __init__(self,\n mid_channels=32,\n num_init_blocks=2,\n num_blocks=3,\n num_reconstruction_blocks=2,\n only_last=True,\n use_tiny_spynet=True,\n deform_groups=4,\n stage1_groups=8,\n auxiliary_loss=True,\n use_refine_align=True,\n aux_reconstruction_blocks=1,\n use_local_connnect=True):\n\n super().__init__()\n\n self.mid_channels = mid_channels\n self.only_last = only_last\n self.deform_groups = deform_groups\n self.auxiliary_loss = auxiliary_loss\n self.use_refine_align = use_refine_align\n self.use_local_connnect = use_local_connnect\n\n # optical flow module\n if use_tiny_spynet:\n self.spynet = ModifiedSPyNet(num_blocks=3, use_tiny_block=True)\n weight_path = get_path_from_url(\n 'https://paddlegan.bj.bcebos.com/models/modified_spynet_tiny.pdparams'\n )\n self.spynet.set_state_dict(paddle.load(weight_path))\n else:\n self.spynet = ModifiedSPyNet(num_blocks=6, use_tiny_block=False)\n weight_path = get_path_from_url(\n 'https://paddlegan.bj.bcebos.com/models/modified_spynet.pdparams'\n )\n self.spynet.set_state_dict(paddle.load(weight_path))\n\n # feature extraction module\n self.feat_extract = ResidualBlocksWithInputConv(3, mid_channels,\n num_init_blocks)\n\n # propagation branches module for stage2 and stage3\n self.deform_align = nn.LayerDict()\n self.backbone = nn.LayerDict()\n\n prop_names = [\n 'stage2_backward', 'stage2_forward', 'stage3_backward',\n 'stage3_forward'\n ]\n\n for i, layer in enumerate(prop_names):\n if i > 1 and self.use_refine_align:\n self.deform_align[layer] = ReAlignmentModule(\n mid_channels,\n mid_channels,\n 3,\n padding=1,\n deformable_groups=deform_groups)\n else:\n self.deform_align[layer] = AlignmentModule(\n mid_channels,\n mid_channels,\n 3,\n padding=1,\n deformable_groups=deform_groups)\n\n self.backbone[layer] = ResidualBlocksWithInputConv(\n (3 + i) * mid_channels, mid_channels, num_blocks)\n\n # stage1\n self.stage1_align = AlignmentModule(mid_channels,\n mid_channels,\n 3,\n padding=1,\n deformable_groups=stage1_groups)\n self.stage1_blocks = ResidualBlocksWithInputConv(\n 3 * mid_channels, mid_channels, 3)\n\n # upsampling module\n self.reconstruction = ResidualBlocksWithInputConv(\n 6 * mid_channels, mid_channels, num_reconstruction_blocks)\n\n self.upsample1 = PixelShufflePack(mid_channels,\n mid_channels,\n 2,\n upsample_kernel=3)\n self.upsample2 = PixelShufflePack(mid_channels,\n mid_channels,\n 2,\n upsample_kernel=3)\n if self.only_last:\n self.conv_last = nn.Conv2D(mid_channels, 3, 3, 1, 1)\n else:\n self.conv_hr = nn.Conv2D(mid_channels, mid_channels, 3, 1, 1)\n self.conv_last = nn.Conv2D(mid_channels, 3, 3, 1, 1)\n self.img_upsample = nn.Upsample(scale_factor=4,\n mode='bilinear',\n align_corners=False)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1)\n\n # auxiliary loss\n if self.auxiliary_loss:\n self.aux_fusion = nn.Conv2D(mid_channels * 2, mid_channels, 3, 1, 1)\n\n self.aux_reconstruction = ResidualBlocksWithInputConv(\n 4 * mid_channels, mid_channels, aux_reconstruction_blocks)\n\n self.aux_block_down1 = nn.Sequential(\n nn.Conv2D(3 + mid_channels, mid_channels, 3, 2, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(mid_channels, mid_channels, 3, 1, 1))\n self.aux_block_down2 = nn.Sequential(\n nn.Conv2D(mid_channels * 2, mid_channels, 3, 2, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(mid_channels, mid_channels, 3, 1, 1))\n\n self.aux_conv_last = nn.Conv2D(mid_channels, 3, 3, 1, 1)\n\n self.aux_upsample1 = PixelShufflePack(mid_channels,\n mid_channels,\n 2,\n upsample_kernel=3)\n self.aux_upsample2 = PixelShufflePack(mid_channels,\n mid_channels,\n 2,\n upsample_kernel=3)\n self.hybrid_conv_last = nn.Conv2D(mid_channels, 3, 3, 1, 1)\n\n def check_if_mirror_extended(self, lrs):\n \"\"\"Check whether the input is a mirror-extended sequence.\n If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the\n (t-1-i)-th frame.\n Args:\n lrs (tensor): Input LR images with shape (n, t, c, h, w)\n\n Returns:\n Bool: Whether the input is a mirror-extended sequence.\n \"\"\"\n\n with paddle.no_grad():\n self.is_mirror_extended = False\n if lrs.shape[1] % 2 == 0:\n lrs_1, lrs_2 = paddle.chunk(lrs, 2, axis=1)\n lrs_2 = paddle.flip(lrs_2, [1])\n if paddle.norm(lrs_1 - lrs_2) == 0:\n self.is_mirror_extended = True\n\n def compute_flow(self, lrs):\n \"\"\"Compute optical flow using pretrained flow network for feature alignment.\n Args:\n lrs (tensor): Input LR images with shape (n, t, c, h, w)\n\n Returns:\n Tuple: Tensor of forward optical flow and backward optical flow with shape (n, t-1, 2, h, w).\n \"\"\"\n n, t, c, h, w = lrs.shape\n\n lrs_1 = lrs[:, :-1, :, :, :].reshape([-1, c, h, w])\n lrs_2 = lrs[:, 1:, :, :, :].reshape([-1, c, h, w])\n\n flows_backward = self.spynet(lrs_1, lrs_2).reshape([n, t - 1, 2, h, w])\n\n if self.is_mirror_extended:\n flows_forward = flows_backward.flip(1)\n else:\n flows_forward = self.spynet(lrs_2,\n lrs_1).reshape([n, t - 1, 2, h, w])\n\n return flows_forward, flows_backward\n\n def stage1(self, feats, flows, flows_forward=None):\n \"\"\"Stage1 of PP-MSVSR network.\n Args:\n feats (dict): Dict with key 'spatial', the value is Array of tensor after feature extraction with shape (n, c, h, w).\n flows (tensor): Backward optical flow with shape (n, t-1, 2, h, w).\n flows_forward (tensor): Forward optical flow with shape (n, t-1, 2, h, w).\n\n Returns:\n Dict: The input dict with new keys 'feat_stage1', the value of 'feat_stage1' is Array of tensor after Local Fusion Module with shape (n, c, h, w).\n \"\"\"\n\n n, t, _, h, w = flows.shape\n\n frame_idx = range(t, -1, -1)\n flow_idx = range(t, -1, -1)\n mapping_idx = list(range(0, len(feats['spatial'])))\n mapping_idx += mapping_idx[::-1]\n\n # Local Fusion Module\n for i, idx in enumerate(frame_idx):\n feat_current = feats['spatial'][mapping_idx[idx]]\n\n # get aligned right adjacent frames\n if i > 0:\n feat_prop = feats['spatial'][mapping_idx[idx + 1]]\n flow_n1 = flows[:, flow_idx[i], :, :, :]\n cond_n1 = flow_warp(feat_prop, flow_n1.transpose([0, 2, 3, 1]))\n cond = paddle.concat([cond_n1, feat_current], axis=1)\n feat_prop, _, _ = self.stage1_align(feat_prop, cond, flow_n1)\n else:\n feat_prop = paddle.zeros([n, self.mid_channels, h, w])\n\n # get aligned left adjacent frames\n if i < t:\n feat_back = feats['spatial'][mapping_idx[idx - 1]]\n flow_n1_ = flows_forward[:, flow_idx[i] - 1, :, :, :]\n cond_n1_ = flow_warp(feat_back, flow_n1_.transpose([0, 2, 3,\n 1]))\n cond_ = paddle.concat([cond_n1_, feat_current], axis=1)\n feat_back, _, _ = self.stage1_align(feat_back, cond_, flow_n1_)\n else:\n feat_back = paddle.zeros([n, self.mid_channels, h, w])\n\n # concatenate and residual blocks\n feat = [feat_current] + [feat_prop] + [feat_back]\n feat = paddle.concat(feat, axis=1)\n feat = self.stage1_blocks(feat)\n\n feats['feat_stage1'].append(feat)\n\n feats['feat_stage1'] = feats['feat_stage1'][::-1]\n\n return feats\n\n def stage2(self, feats, flows):\n \"\"\"Stage2 of PP-MSVSR network.\n Args:\n feats (dict): Dict with key 'spatial' and 'feat_stage1' after stage1.\n flows (tuple): Tensor of backward optical flow and forward optical flow with shape (n, t-1, 2, h, w).\n\n Returns:\n feats (dict): The input dict with new keys 'stage2_backward' and 'stage2_forward', the value of both is Array of feature after stage2 with shape (n, c, h, w).\n pre_offset (dict): Dict with keys 'stage2_backward' and 'stage2_forward', the value of both is Array of offset in stage2 with shape (n, 18*deform_groups, h, w).\n pre_mask (dict): Dict with keys 'stage2_backward' and 'stage2_forward', the value of both is Array of mask in stage2 with shape (n, 9*deform_groups, h, w).\n \"\"\"\n flows_backward, flows_forward = flows\n n, t, _, h, w = flows_backward.shape\n\n pre_offset = {}\n pre_mask = {}\n\n # propagation branches module\n prop_names = ['stage2_backward', 'stage2_forward']\n for index in range(2):\n prop_name = prop_names[index]\n pre_offset[prop_name] = [0 for _ in range(t)]\n pre_mask[prop_name] = [0 for _ in range(t)]\n feats[prop_name] = []\n frame_idx = range(0, t + 1)\n flow_idx = range(-1, t)\n mapping_idx = list(range(0, len(feats['spatial'])))\n mapping_idx += mapping_idx[::-1]\n\n if 'backward' in prop_name:\n frame_idx = frame_idx[::-1]\n flow_idx = frame_idx\n flows = flows_backward\n else:\n flows = flows_forward\n\n feat_prop = paddle.zeros([n, self.mid_channels, h, w])\n for i, idx in enumerate(frame_idx):\n feat_current = feats['spatial'][mapping_idx[idx]]\n\n if i > 0:\n flow_n1 = flows[:, flow_idx[i], :, :, :]\n\n cond_n1 = flow_warp(feat_prop,\n flow_n1.transpose([0, 2, 3, 1]))\n cond = paddle.concat([cond_n1, feat_current], axis=1)\n\n feat_prop, offset, mask = self.deform_align[prop_name](\n feat_prop, cond, flow_n1)\n pre_offset[prop_name][flow_idx[i]] = offset\n pre_mask[prop_name][flow_idx[i]] = (mask)\n\n # concatenate and residual blocks\n feat = [feat_current] + [\n feats[k][idx]\n for k in feats if k not in ['spatial', prop_name]\n ] + [feat_prop]\n\n feat = paddle.concat(feat, axis=1)\n feat_prop = feat_prop + self.backbone[prop_name](feat)\n\n feats[prop_name].append(feat_prop)\n\n if 'backward' in prop_name:\n feats[prop_name] = feats[prop_name][::-1]\n\n return feats, pre_offset, pre_mask\n\n def stage3(self,\n feats,\n flows,\n aux_feats=None,\n pre_offset=None,\n pre_mask=None):\n \"\"\"Stage3 of PP-MSVSR network.\n Args:\n feats (dict): Dict of features after stage2.\n flows (tuple): Tensor of backward optical flow and forward optical flow with shape (n, t-1, 2, h, w).\n aux_feats (dict): Dict with keys 'outs' and 'feats', the value is Array of tensor after auxiliary_stage with shape (n, 3, 4*h, 4*w) and (n, c, h, w), separately.\n pre_offset (dict): Dict with keys 'stage2_backward' and 'stage2_forward', the value of both is Array of offset in stage2 with shape (n, 18*deform_groups, h, w).\n pre_mask (dict): Dict with keys 'stage2_backward' and 'stage2_forward', the value of both is Array of mask in stage2 with shape (n, 9*deform_groups, h, w).\n\n Returns:\n feats (dict): The input feats dict with new keys 'stage3_backward' and 'stage3_forward', the value of both is Array of feature after stage3 with shape (n, c, h, w).\n \"\"\"\n flows_backward, flows_forward = flows\n n, t, _, h, w = flows_backward.shape\n\n # propagation branches module\n prop_names = ['stage3_backward', 'stage3_forward']\n for index in range(2):\n prop_name = prop_names[index]\n feats[prop_name] = []\n frame_idx = range(0, t + 1)\n flow_idx = range(-1, t)\n mapping_idx = list(range(0, len(feats['spatial'])))\n mapping_idx += mapping_idx[::-1]\n\n if 'backward' in prop_name:\n frame_idx = frame_idx[::-1]\n flow_idx = frame_idx\n flows = flows_backward\n pre_stage_name = 'stage2_backward'\n else:\n flows = flows_forward\n pre_stage_name = 'stage2_forward'\n\n feat_prop = paddle.zeros([n, self.mid_channels, h, w])\n for i, idx in enumerate(frame_idx):\n feat_current = feats['spatial'][mapping_idx[idx]]\n if aux_feats is not None and 'feats' in aux_feats:\n feat_current = aux_feats['feats'][mapping_idx[idx]]\n\n if i > 0:\n flow_n1 = flows[:, flow_idx[i], :, :, :]\n\n cond_n1 = flow_warp(feat_prop,\n flow_n1.transpose([0, 2, 3, 1]))\n cond = paddle.concat([cond_n1, feat_current], axis=1)\n\n feat_prop = self.deform_align[prop_name](\n feat_prop, cond, flow_n1, feat_current,\n pre_offset[pre_stage_name][flow_idx[i]],\n pre_mask[pre_stage_name][flow_idx[i]])\n\n # concatenate and residual blocks\n feat = [feat_current] + [\n feats[k][idx]\n for k in feats if k not in ['spatial', prop_name]\n ] + [feat_prop]\n\n feat = paddle.concat(feat, axis=1)\n feat_prop = feat_prop + self.backbone[prop_name](feat)\n\n feats[prop_name].append(feat_prop)\n\n if 'backward' in prop_name:\n feats[prop_name] = feats[prop_name][::-1]\n\n return feats\n\n def auxiliary_stage(self, feats, lqs):\n \"\"\"Compute the output image and auxiliary feature for Auxiliary Loss in stage2.\n Args:\n feats (dict): Dict of features after stage2.\n lqs (tensor): Input LR images with shape (n, t, c, h, w)\n\n Returns:\n dict: Dict with keys 'outs' and 'feats', the value is Array of tensor after auxiliary_stage with shape (n, 3, 4*h, 4*w) and (n, c, h, w), separately.\n \"\"\"\n aux_feats = {}\n aux_feats['outs'] = []\n aux_feats['feats'] = []\n num_outputs = len(feats['spatial'])\n\n mapping_idx = list(range(0, num_outputs))\n mapping_idx += mapping_idx[::-1]\n\n t = lqs.shape[1]\n for i in range(0, t):\n hr = [feats[k][i] for k in feats if (k != 'spatial')]\n feat_current = feats['spatial'][mapping_idx[i]]\n hr.insert(0, feat_current)\n hr = paddle.concat(hr, axis=1)\n\n hr_low = self.aux_reconstruction(hr)\n hr_mid = self.lrelu(self.aux_upsample1(hr_low))\n hr_high = self.lrelu(self.aux_upsample2(hr_mid))\n\n hr = self.aux_conv_last(hr_high)\n hr += self.img_upsample(lqs[:, i, :, :, :])\n\n # output tensor of auxiliary_stage with shape (n, 3, 4*h, 4*w)\n aux_feats['outs'].append(hr)\n\n aux_feat = self.aux_block_down1(paddle.concat([hr, hr_high],\n axis=1))\n aux_feat = self.aux_block_down2(\n paddle.concat([aux_feat, hr_mid], axis=1))\n aux_feat = self.aux_fusion(paddle.concat([aux_feat, hr_low],\n axis=1))\n\n # out feature of auxiliary_stage with shape (n, c, h, w)\n aux_feats['feats'].append(aux_feat)\n\n return aux_feats\n\n def upsample(self, lqs, feats, aux_feats=None):\n \"\"\"Compute the output image given the features.\n Args:\n lqs (tensor): Input LR images with shape (n, t, c, h, w).\n feats (dict): Dict of features after stage3.\n aux_feats (dict): Dict with keys 'outs' and 'feats', the value is Array of tensor after auxiliary_stage with shape (n, 3, 4*h, 4*w) and (n, c, h, w), separately.\n\n Returns:\n Tensor: Output HR sequence with shape (n, t, 3, 4*h, 4*w).\n \"\"\"\n\n outputs = []\n num_outputs = len(feats['spatial'])\n\n mapping_idx = list(range(0, num_outputs))\n mapping_idx += mapping_idx[::-1]\n\n t = lqs.shape[1]\n for i in range(0, t):\n hr = [\n feats[k].pop(0) for k in feats\n if (k != 'spatial' and k != 'feat_stage1')\n ]\n if 'feat_stage1' in feats:\n local_feat = feats['feat_stage1'].pop(0)\n hr.insert(0, local_feat)\n hr.insert(0, feats['spatial'][mapping_idx[i]])\n hr = paddle.concat(hr, axis=1)\n\n hr = self.reconstruction(hr)\n\n hr = self.lrelu(self.upsample1(hr))\n hr = self.lrelu(self.upsample2(hr))\n if self.only_last:\n hr = self.conv_last(hr)\n else:\n hr = self.lrelu(self.conv_hr(hr))\n hr = self.conv_last(hr)\n\n hr += self.img_upsample(lqs[:, i, :, :, :])\n if self.use_local_connnect:\n local_head = self.lrelu(self.aux_upsample1(local_feat))\n local_head = self.lrelu(self.aux_upsample2(local_head))\n hr = self.hybrid_conv_last(local_head) + hr\n\n outputs.append(hr)\n\n if self.auxiliary_loss:\n return paddle.stack(aux_feats['outs'],\n axis=1), paddle.stack(outputs, axis=1)\n return paddle.stack(outputs, axis=1)\n\n def forward(self, lqs):\n \"\"\"Forward function for PP-MSVSR.\n Args:\n lqs (Tensor): Input LR sequence with shape (n, t, c, h, w).\n Returns:\n Tensor: Output HR sequence with shape (n, t, 3, 4*h, 4*w).\n \"\"\"\n\n n, t, c, h, w = lqs.shape\n\n lqs_downsample = lqs\n\n # check whether the input is an extended sequence\n self.check_if_mirror_extended(lqs)\n\n feats = {}\n feats_ = self.feat_extract(lqs.reshape([-1, c, h, w]))\n\n h, w = feats_.shape[2:]\n feats_ = feats_.reshape([n, t, -1, h, w])\n feats['spatial'] = [feats_[:, i, :, :, :] for i in range(0, t)]\n\n # compute optical flow using the low-res inputs\n assert lqs_downsample.shape[3] >= 64 and lqs_downsample.shape[4] >= 64, (\n 'The height and width of low-res inputs must be at least 64, '\n f'but got {h} and {w}.')\n\n flows_forward, flows_backward = self.compute_flow(lqs_downsample)\n\n # feature propgation\n feats['feat_stage1'] = []\n feats = self.stage1(feats, flows_backward, flows_forward)\n\n feats, pre_offset, pre_mask = self.stage2(\n feats, (flows_backward, flows_forward))\n\n if self.auxiliary_loss:\n aux_feats = self.auxiliary_stage(feats, lqs)\n\n feats = self.stage3(feats, (flows_backward, flows_forward), aux_feats,\n pre_offset, pre_mask)\n\n return self.upsample(lqs, feats, aux_feats=aux_feats)\n\n\nclass AlignmentModule(nn.Layer):\n \"\"\"deformable alignment module.\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d.\n padding (int or tuple[int]): Same as nn.Conv2d.\n dilation (int or tuple[int]): Same as nn.Conv2d.\n groups (int): Same as nn.Conv2d.\n deformable_groups (int): Number of deformable_groups in DeformConv2D.\n \"\"\"\n def __init__(self,\n in_channels=128,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n dilation=1,\n groups=1,\n deformable_groups=16):\n super(AlignmentModule, self).__init__()\n\n self.conv_offset = nn.Sequential(\n nn.Conv2D(2 * out_channels + 2, out_channels, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(out_channels, out_channels, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(out_channels, out_channels, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(out_channels, 27 * deformable_groups, 3, 1, 1),\n )\n self.dcn = DeformConv2D(in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n deformable_groups=deformable_groups)\n\n self.init_offset()\n\n def init_offset(self):\n constant_(self.conv_offset[-1].weight, 0)\n constant_(self.conv_offset[-1].bias, 0)\n\n def forward(self, x, extra_feat, flow_1):\n extra_feat = paddle.concat([extra_feat, flow_1], axis=1)\n out = self.conv_offset(extra_feat)\n o1, o2, mask = paddle.chunk(out, 3, axis=1)\n\n # offset\n offset = 10 * paddle.tanh(paddle.concat((o1, o2), axis=1))\n offset = offset + flow_1.flip(1).tile([1, offset.shape[1] // 2, 1, 1])\n\n # mask\n mask = F.sigmoid(mask)\n out = self.dcn(x, offset, mask)\n return out, offset, mask\n\n\nclass ReAlignmentModule(nn.Layer):\n \"\"\"refine deformable alignment module.\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d.\n padding (int or tuple[int]): Same as nn.Conv2d.\n dilation (int or tuple[int]): Same as nn.Conv2d.\n groups (int): Same as nn.Conv2d.\n deformable_groups (int): Number of deformable_groups in DeformConv2D.\n \"\"\"\n def __init__(self,\n in_channels=128,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n dilation=1,\n groups=1,\n deformable_groups=16):\n super(ReAlignmentModule, self).__init__()\n\n self.mdconv = DeformConv2D(in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n deformable_groups=deformable_groups)\n self.conv_offset = nn.Sequential(\n nn.Conv2D(2 * out_channels + 2, out_channels, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(out_channels, out_channels, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(out_channels, out_channels, 3, 1, 1),\n nn.LeakyReLU(negative_slope=0.1),\n nn.Conv2D(out_channels, 27 * deformable_groups, 3, 1, 1),\n )\n self.dcn = DeformConv2D(in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n deformable_groups=deformable_groups)\n\n self.init_offset()\n\n def init_offset(self):\n constant_(self.conv_offset[-1].weight, 0)\n constant_(self.conv_offset[-1].bias, 0)\n\n def forward(self,\n x,\n extra_feat,\n flow_1,\n feat_current,\n pre_stage_flow=None,\n pre_stage_mask=None):\n if pre_stage_flow is not None:\n pre_feat = self.mdconv(x, pre_stage_flow, pre_stage_mask)\n extra_feat = paddle.concat([pre_feat, feat_current, flow_1], axis=1)\n else:\n extra_feat = paddle.concat([extra_feat, flow_1], axis=1)\n out = self.conv_offset(extra_feat)\n o1, o2, mask = paddle.chunk(out, 3, axis=1)\n\n # offset\n offset = 10 * paddle.tanh(paddle.concat((o1, o2), axis=1))\n if pre_stage_flow is not None:\n offset = offset + pre_stage_flow\n else:\n offset = offset + flow_1.flip(1).tile(\n [1, offset.shape[1] // 2, 1, 1])\n\n # mask\n if pre_stage_mask is not None:\n mask = (F.sigmoid(mask) + pre_stage_mask) / 2.0\n else:\n mask = F.sigmoid(mask)\n out = self.dcn(x, offset, mask)\n return out\n\n\nclass ModifiedSPyNet(nn.Layer):\n \"\"\"Modified SPyNet network structure.\n\n The difference to the SPyNet in paper is that\n 1. convolution with kernel_size=7 is replaced by convolution with kernel_size=3 in this version,\n 2. less SPyNetBasicModule is used in this version,\n 3. no BN is used in this version.\n\n Paper:\n Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017\n\n Args:\n act_cfg (dict): Activation function.\n Default: dict(name='LeakyReLU').\n num_blocks (int): Number of SPyNetBlock.\n Default: 6.\n use_tiny_block (bool): Whether use tiny spynet.\n Default: True.\n \"\"\"\n def __init__(self,\n act_cfg=dict(name='LeakyReLU'),\n num_blocks=6,\n use_tiny_block=False):\n super().__init__()\n self.num_blocks = num_blocks\n self.basic_module = nn.LayerList([\n SPyNetBlock(act_cfg=act_cfg, use_tiny_block=use_tiny_block)\n for _ in range(num_blocks)\n ])\n\n self.register_buffer(\n 'mean',\n paddle.to_tensor([0.485, 0.456, 0.406]).reshape([1, 3, 1, 1]))\n self.register_buffer(\n 'std',\n paddle.to_tensor([0.229, 0.224, 0.225]).reshape([1, 3, 1, 1]))\n\n def compute_flow(self, ref, supp):\n \"\"\"Compute flow from ref to supp.\n\n Note that in this function, the images are already resized to a\n multiple of 32.\n\n Args:\n ref (Tensor): Reference image with shape of (n, 3, h, w).\n supp (Tensor): Supporting image with shape of (n, 3, h, w).\n\n Returns:\n Tensor: Estimated optical flow: (n, 2, h, w).\n \"\"\"\n n, _, h, w = ref.shape\n\n # normalize the input images\n ref = [(ref - self.mean) / self.std]\n supp = [(supp - self.mean) / self.std]\n\n # generate downsampled frames\n for level in range(self.num_blocks - 1):\n ref.append(F.avg_pool2d(ref[-1], kernel_size=2, stride=2))\n supp.append(F.avg_pool2d(supp[-1], kernel_size=2, stride=2))\n ref = ref[::-1]\n supp = supp[::-1]\n\n # flow computation\n flow = paddle.to_tensor(\n np.zeros([\n n, 2, h // (2**(self.num_blocks - 1)), w //\n (2**(self.num_blocks - 1))\n ], 'float32'))\n\n for level in range(len(ref)):\n if level == 0:\n flow_up = flow\n else:\n flow_up = F.interpolate(\n flow, scale_factor=2, mode='bilinear',\n align_corners=True) * 2.0\n\n # add the residue to the upsampled flow\n flow = flow_up + self.basic_module[level](paddle.concat([\n ref[level],\n flow_warp(supp[level],\n flow_up.transpose([0, 2, 3, 1]),\n padding_mode='border'), flow_up\n ],\n axis=1))\n\n return flow\n\n def compute_flow_list(self, ref, supp):\n n, _, h, w = ref.shape\n\n # normalize the input images\n ref = [(ref - self.mean) / self.std]\n supp = [(supp - self.mean) / self.std]\n\n # generate downsampled frames\n for level in range(self.num_blocks - 1):\n ref.append(F.avg_pool2d(ref[-1], kernel_size=2, stride=2))\n supp.append(F.avg_pool2d(supp[-1], kernel_size=2, stride=2))\n ref = ref[::-1]\n supp = supp[::-1]\n\n # flow computation\n flow_list = []\n flow = paddle.to_tensor(\n np.zeros([\n n, 2, h // (2**(self.num_blocks - 1)), w //\n (2**(self.num_blocks - 1))\n ], 'float32'))\n for level in range(len(ref)):\n if level == 0:\n flow_up = flow\n else:\n flow_up = F.interpolate(\n flow, scale_factor=2, mode='bilinear',\n align_corners=True) * 2.0\n\n # add the residue to the upsampled flow\n flow = flow_up + self.basic_module[level](paddle.concat([\n ref[level],\n flow_warp(supp[level],\n flow_up.transpose([0, 2, 3, 1]),\n padding_mode='border'), flow_up\n ],\n axis=1))\n flow_list.append(flow)\n return flow_list\n\n def forward(self, ref, supp):\n \"\"\"Forward function of Modified SPyNet.\n\n This function computes the optical flow from ref to supp.\n\n Args:\n ref (Tensor): Reference image with shape of (n, 3, h, w).\n supp (Tensor): Supporting image with shape of (n, 3, h, w).\n\n Returns:\n Tensor: Estimated optical flow: (n, 2, h, w).\n \"\"\"\n\n # upsize to a multiple of 32\n h, w = ref.shape[2:4]\n w_up = w if (w % 32) == 0 else 32 * (w // 32 + 1)\n h_up = h if (h % 32) == 0 else 32 * (h // 32 + 1)\n ref = F.interpolate(ref,\n size=(h_up, w_up),\n mode='bilinear',\n align_corners=False)\n\n supp = F.interpolate(supp,\n size=(h_up, w_up),\n mode='bilinear',\n align_corners=False)\n\n ref.stop_gradient = False\n supp.stop_gradient = False\n\n # compute flow, and resize back to the original resolution\n flow = F.interpolate(self.compute_flow(ref, supp),\n size=(h, w),\n mode='bilinear',\n align_corners=False)\n\n # adjust the flow values\n flow[:, 0, :, :] *= float(w) / float(w_up)\n flow[:, 1, :, :] *= float(h) / float(h_up)\n\n return flow\n\n\nclass SPyNetBlock(nn.Layer):\n \"\"\"Basic Block of Modified SPyNet.\n refer to Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017\n \"\"\"\n def __init__(self, act_cfg=dict(name='LeakyReLU'), use_tiny_block=False):\n super().__init__()\n if use_tiny_block:\n self.basic_module = nn.Sequential(\n ConvLayer(in_channels=8,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=8,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=8,\n out_channels=8,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=8,\n out_channels=2,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=None))\n else:\n self.basic_module = nn.Sequential(\n ConvLayer(in_channels=8,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=64,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=32,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=16,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=act_cfg),\n ConvLayer(in_channels=16,\n out_channels=2,\n kernel_size=3,\n stride=1,\n padding=1,\n act_cfg=None))\n\n def forward(self, tensor_input):\n \"\"\"Forward function of SPyNetBlock.\n Args:\n tensor_input (Tensor): Input tensor with shape (b, 8, h, w).\n 8 channels contain:\n [reference image (3), neighbor image (3), initial flow (2)].\n\n Returns:\n Tensor: Refined flow with shape (b, 2, h, w)\n \"\"\"\n return self.basic_module(tensor_input)\n\n\nclass ConvLayer(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n act_cfg=dict(name='ReLU')):\n super(ConvLayer, self).__init__()\n self.act_cfg = act_cfg\n self.with_activation = act_cfg is not None\n\n self.conv = nn.Conv2D(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups)\n\n if self.with_activation:\n if act_cfg['name'] == 'ReLU':\n self.act = paddle.nn.ReLU()\n elif act_cfg['name'] == 'LeakyReLU':\n self.act = nn.LeakyReLU(negative_slope=0.1)\n\n def forward(self, tensor_input):\n out = self.conv(tensor_input)\n if self.with_activation:\n out = self.act(out)\n return out\n" ]
[ [ "numpy.ones" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Stanford-ILIAD/Learn-Imperfect-Varying-Dynamics
[ "25191f7b076033ac9dbe8fd08f2a92e3caa57cb3" ]
[ "imperfect_envs/driving/envs/gridworld_continuous.py" ]
[ "import io\nfrom typing import Text\nimport gym\nfrom gym import spaces\nfrom PIL import Image\nimport numpy as np\nimport scipy.special\nfrom driving.world import World\nfrom driving.entities import TextEntity, Entity\nfrom driving.agents import Car, Building, Goal\nfrom driving.geometry import Point\nfrom typing import Tuple\n\nimport random\n\nclass PidVelPolicy:\n \"\"\"PID controller for H that maintains its initial velocity.\"\"\"\n\n def __init__(self, dt: float, params: Tuple[float, float, float] = (3.0, 1.0, 6.0)):\n self._target_vel = None\n self.previous_error = 0\n self.integral = 0\n self.errors = []\n self.dt = dt\n self.Kp, self.Ki, self.Kd = params\n\n def action(self, obs):\n my_y_dot = obs[3]\n if self._target_vel is None:\n self._target_vel = my_y_dot\n error = self._target_vel - my_y_dot\n derivative = (error - self.previous_error) * self.dt\n self.integral = self.integral + self.dt * error\n acc = self.Kp * error + self.Ki * self.integral + self.Kd * derivative\n self.previous_error = error\n self.errors.append(error)\n return acc\n\n def reset(self):\n self._target_vel = None\n self.previous_error = 0\n self.integral = 0\n self.errors = []\n\n def __str__(self):\n return \"PidVelPolicy({})\".format(self.dt)\n\nclass GridworldContinuousEnv(gym.Env):\n\n def __init__(self,\n dt: float = 0.1,\n width: int = 30,\n height: int = 40,\n time_limit: float = 300.0):\n super(GridworldContinuousEnv, self).__init__()\n self.dt = dt\n self.width = width\n self.height = height\n self.world = World(self.dt, width=width, height=height, ppm=6)\n self.accelerate = PidVelPolicy(self.dt)\n self.step_num = 0\n self.time_limit = time_limit\n self.action_space = spaces.Box(\n np.array([-1.]), np.array([1.]), dtype=np.float32\n )\n self.goal_radius = 2.\n self.observation_space = spaces.Box(-np.inf, np.inf, shape=(7,))\n self.start = np.array([self.width/2.,self.goal_radius])\n self.goal = np.array([self.width/2., self.height-self.goal_radius])\n self.max_dist = np.linalg.norm(self.goal-self.start,2)\n\n self.target = [self.height/5., self.height*2./5., self.height*3./5., self.height*4./5., np.inf]\n self.obstacle_width = 6.\n self.initial_speed = 3.\n\n def step(self, action: np.ndarray, verbose: bool = False):\n self.step_num += 1\n\n action = action * 0.1\n car = self.world.dynamic_agents[0]\n acc = self.accelerate.action(self._get_obs())\n action = np.append(action, acc)\n if self.stop:\n action = np.array([0, -5])\n car.set_control(*action)\n self.world.tick()\n\n reward = self.reward(verbose)\n\n done = False\n if car.y >= self.height or car.y <= 0 or car.x <= 0 or car.x >= self.width:\n reward -= 10000\n done = True\n if self.step_num >= self.time_limit:\n done = True\n if self.car.collidesWith(self.goal_obj):\n done = True\n self.stop = True\n #if self.step_num < 6:\n # done = False\n return self._get_obs(), reward, done, {'episode': {'r': reward, 'l': self.step_num}}\n\n def reset(self):\n self.world.reset()\n self.stop = False\n self.target_count = 0\n\n self.buildings = [\n Building(Point(self.width/2., self.height/2.-3), Point(self.obstacle_width,1), \"gray80\"),\n ]\n\n random_dis = random.random()*2.\n random_angle = random.random()*2*np.pi\n init_x = self.start[0] + random_dis*np.cos(random_angle)\n init_y = self.start[1] + random_dis*np.sin(random_angle)\n self.car = Car(Point(init_x, init_y), np.pi/2., \"blue\")\n self.car.velocity = Point(0, self.initial_speed)\n\n self.goal_obj = Goal(Point(self.goal[0], self.goal[1]), self.goal_radius, 0.0)\n\n for building in self.buildings:\n self.world.add(building)\n self.world.add(self.car)\n self.world.add(self.goal_obj)\n \n self.last_heading = np.pi / 2\n\n self.step_num = 0\n return self._get_obs()\n\n def reset_with_obs(self, obs):\n self.world.reset()\n self.stop = False\n self.target_count = 0\n\n self.buildings = [\n Building(Point(self.width/2., self.height/2.-3), Point(self.obstacle_width,1), \"gray80\"),\n ]\n\n init_x = (obs[0]/2.+0.5)*self.width\n init_y = (obs[1]/2.+0.5)*self.height\n self.car = Car(Point(init_x, init_y), np.pi/2., \"blue\")\n self.car.velocity = Point(0, self.initial_speed)\n\n self.goal_obj = Goal(Point(self.goal[0], self.goal[1]), self.goal_radius, 0.0)\n\n for building in self.buildings:\n self.world.add(building)\n self.world.add(self.car)\n self.world.add(self.goal_obj)\n \n self.last_heading = np.pi / 2\n\n self.step_num = 0\n return self._get_obs()\n\n def _get_obs(self):\n \"\"\"\n Get state of car\n \"\"\"\n return_state = np.array(self.world.state)\n #print(return_state)\n return_state[1] = 2.* ((return_state[1] / self.height) - 0.5)\n return_state[0] = 2.* ((return_state[0] / self.width) - 0.5)\n return_state[2] /= self.initial_speed\n return_state[3] /= self.initial_speed\n return return_state\n\n def inverse_dynamic(self, state, next_state):\n return (next_state[-2] / np.linalg.norm(self.initial_speed*state[2:4], ord=2))/self.dt\n\n def reward(self, verbose, weight=10.0):\n dist_rew = -1. # * (self.car.center.distanceTo(self.goal_obj)/self.max_dist)\n coll_rew = 0\n for building in self.buildings:\n if self.car.collidesWith(building):\n coll_rew = -1000.\n break\n\n goal_rew = 0.0\n if self.car.collidesWith(self.goal_obj) and (not self.stop):\n goal_rew = 100.\n\n extra_rew = 0.\n #if self.car.x < self.width / 4.:\n # extra_rew = (self.width / 4. - self.car.x)/(self.width/4.) * (-1.)\n #elif self.car.x > self.width * 3. / 4.:\n # extra_rew = (self.car.x-self.width * 3. / 4.)/(self.width/4.) * (-1.)\n\n reward = sum([dist_rew, coll_rew, extra_rew, goal_rew])\n if verbose: print(\"dist reward: \", dist_rew,\n \"goal reward: \", goal_rew,\n \"extra reward: \", extra_rew,\n \"reward: \", reward)\n return reward\n\n def render(self):\n self.world.render()\n\nclass GridworldContinuousSlowRandomInitEnv(GridworldContinuousEnv):\n def reset(self):\n self.world.reset()\n\n self.stop = False\n self.target_count = 0\n\n self.buildings = [\n Building(Point(self.width/2., self.height/2.-3), Point(self.obstacle_width,1), \"gray80\"),\n ]\n\n while True:\n random_w = random.random()\n random_h = random.random()\n init_x = self.width/2.-(self.obstacle_width/2.+2.) + random_w*(self.obstacle_width+4.)\n init_y = self.goal_radius + (self.height-3*self.goal_radius)*random_h\n cond1 = abs(init_x - self.width/2.) < (self.obstacle_width/2.+2.) and init_y-self.height/2. < 3. and init_y-self.height/2.>-13.\n slope = ((self.height - self.goal_radius) - (self.height/2.-3))/(self.width/4.)\n #print(slope, init_x, ((self.width/4.-abs(init_x - self.width/2.)) * slope + (self.height/2.-3.)))\n cond2 = init_y < ((self.width/4.-abs(init_x - self.width/2.)) * slope + (self.height/2.-3.))\n if cond2 and not cond1:\n break\n init_heading = np.pi/2. # np.arctan2(self.goal[1] - init_y, self.goal[0]-init_x)\n self.car = Car(Point(init_x, init_y), init_heading, \"blue\")\n self.car.velocity = Point(0, self.initial_speed)\n\n self.goal_obj = Goal(Point(self.goal[0], self.goal[1]), self.goal_radius, 0.0)\n\n for building in self.buildings:\n self.world.add(building)\n self.world.add(self.car)\n self.world.add(self.goal_obj)\n \n self.last_heading = np.pi / 2\n\n self.step_num = 0\n return self._get_obs()\n\nclass GridworldContinuousFastRandomInitEnv(GridworldContinuousSlowRandomInitEnv):\n def __init__(self,\n dt: float = 0.1,\n width: int = 30,\n height: int = 40,\n time_limit: float = 300.0):\n super(GridworldContinuousFastRandomInitEnv, self).__init__(dt, width, height, time_limit)\n self.initial_speed = 9.\n" ]
[ [ "numpy.cos", "numpy.linalg.norm", "numpy.sin", "numpy.append", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
z8674558/idocp
[ "946524db7ae4591b578be2409ca619961572e7be" ]
[ "examples/anymal/python/trotting.py" ]
[ "import idocp\nimport numpy as np\nimport math\n\n\nLF_foot_id = 12\nLH_foot_id = 22\nRF_foot_id = 32\nRH_foot_id = 42\ncontact_frames = [LF_foot_id, LH_foot_id, RF_foot_id, RH_foot_id] \npath_to_urdf = '../anymal_b_simple_description/urdf/anymal.urdf'\nbaumgarte_time_step = 0.04\nrobot = idocp.Robot(path_to_urdf, idocp.BaseJointType.FloatingBase, \n contact_frames, baumgarte_time_step)\n\ndt = 0.02\nstep_length = 0.15\nstep_height = 0.1\nperiod_swing = 0.5\nperiod_double_support = 0.04\nt0 = period_double_support \ncycle = 3\n\ncost = idocp.CostFunction()\nq_standing = np.array([0, 0, 0.4792, 0, 0, 0, 1, \n -0.1, 0.7, -1.0, \n -0.1, -0.7, 1.0, \n 0.1, 0.7, -1.0, \n 0.1, -0.7, 1.0])\nq_weight = np.array([0, 0, 0, 250000, 250000, 250000, \n 0.0001, 0.0001, 0.0001, \n 0.0001, 0.0001, 0.0001,\n 0.0001, 0.0001, 0.0001,\n 0.0001, 0.0001, 0.0001])\nv_weight = np.array([100, 100, 100, 100, 100, 100, \n 1, 1, 1, \n 1, 1, 1,\n 1, 1, 1,\n 1, 1, 1])\nu_weight = np.full(robot.dimu(), 1.0e-01)\nqi_weight = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \n 100, 100, 100, \n 100, 100, 100,\n 100, 100, 100,\n 100, 100, 100])\nvi_weight = np.full(robot.dimv(), 100)\nconfig_cost = idocp.ConfigurationSpaceCost(robot)\nconfig_cost.set_q_ref(q_standing)\nconfig_cost.set_q_weight(q_weight)\nconfig_cost.set_qf_weight(q_weight)\nconfig_cost.set_qi_weight(qi_weight)\nconfig_cost.set_v_weight(v_weight)\nconfig_cost.set_vf_weight(v_weight)\nconfig_cost.set_vi_weight(vi_weight)\nconfig_cost.set_u_weight(u_weight)\ncost.push_back(config_cost)\n\nrobot.forward_kinematics(q_standing)\nq0_3d_LF = robot.frame_position(LF_foot_id)\nq0_3d_LH = robot.frame_position(LH_foot_id)\nq0_3d_RF = robot.frame_position(RF_foot_id)\nq0_3d_RH = robot.frame_position(RH_foot_id)\nLF_t0 = t0 + period_swing + period_double_support\nLH_t0 = t0\nRF_t0 = t0\nRH_t0 = t0 + period_swing + period_double_support\nLF_foot_ref = idocp.PeriodicFootTrackRef(q0_3d_LF, step_length, step_height, \n LF_t0, period_swing, \n period_swing+2*period_double_support, False)\nLH_foot_ref = idocp.PeriodicFootTrackRef(q0_3d_LH, step_length, step_height, \n LH_t0, period_swing, \n period_swing+2*period_double_support, True)\nRF_foot_ref = idocp.PeriodicFootTrackRef(q0_3d_RF, step_length, step_height, \n RF_t0, period_swing, \n period_swing+2*period_double_support, True)\nRH_foot_ref = idocp.PeriodicFootTrackRef(q0_3d_RH, step_length, step_height, \n RH_t0, period_swing, \n period_swing+2*period_double_support, False)\nLF_cost = idocp.TimeVaryingTaskSpace3DCost(robot, LF_foot_id, LF_foot_ref)\nLH_cost = idocp.TimeVaryingTaskSpace3DCost(robot, LH_foot_id, LH_foot_ref)\nRF_cost = idocp.TimeVaryingTaskSpace3DCost(robot, RF_foot_id, RF_foot_ref)\nRH_cost = idocp.TimeVaryingTaskSpace3DCost(robot, RH_foot_id, RH_foot_ref)\nfoot_track_weight = np.full(3, 1.0e06)\nLF_cost.set_q_weight(foot_track_weight)\nLH_cost.set_q_weight(foot_track_weight)\nRF_cost.set_q_weight(foot_track_weight)\nRH_cost.set_q_weight(foot_track_weight)\ncost.push_back(LF_cost)\ncost.push_back(LH_cost)\ncost.push_back(RF_cost)\ncost.push_back(RH_cost)\n\ncom_ref0 = (q0_3d_LF + q0_3d_LH + q0_3d_RF + q0_3d_RH) / 4\ncom_ref0[2] = robot.com()[2]\nv_com_ref = np.zeros(3)\nv_com_ref[0] = 0.5 * step_length / period_swing\ncom_ref = idocp.PeriodicCoMRef(com_ref0, v_com_ref, t0, period_swing, \n period_double_support, True)\ncom_cost = idocp.TimeVaryingCoMCost(robot, com_ref)\ncom_cost.set_q_weight(np.full(3, 1.0e06))\ncost.push_back(com_cost)\n\nconstraints = idocp.Constraints()\njoint_position_lower = idocp.JointPositionLowerLimit(robot)\njoint_position_upper = idocp.JointPositionUpperLimit(robot)\njoint_velocity_lower = idocp.JointVelocityLowerLimit(robot)\njoint_velocity_upper = idocp.JointVelocityUpperLimit(robot)\njoint_torques_lower = idocp.JointTorquesLowerLimit(robot)\njoint_torques_upper = idocp.JointTorquesUpperLimit(robot)\nmu = 0.7\nfriction_cone = idocp.FrictionCone(robot, mu)\nconstraints.push_back(joint_position_lower)\nconstraints.push_back(joint_position_upper)\nconstraints.push_back(joint_velocity_lower)\nconstraints.push_back(joint_velocity_upper)\nconstraints.push_back(joint_torques_lower)\nconstraints.push_back(joint_torques_upper)\nconstraints.push_back(friction_cone)\nconstraints.set_barrier(1.0e-01)\n\nT = t0 + cycle*(2*period_double_support+2*period_swing)\nN = math.floor(T/dt) \nmax_num_impulse_phase = 2*cycle\n\nnthreads = 4\nt = 0.0\nocp_solver = idocp.OCPSolver(robot, cost, constraints, T, N, \n max_num_impulse_phase, nthreads)\n\ncontact_points = [q0_3d_LF, q0_3d_LH, q0_3d_RF, q0_3d_RH]\ncontact_status_initial = robot.create_contact_status()\ncontact_status_initial.activate_contacts([0, 1, 2, 3])\ncontact_status_initial.set_contact_points(contact_points)\nocp_solver.set_contact_status_uniformly(contact_status_initial)\n\ncontact_status_even = robot.create_contact_status()\ncontact_status_even.activate_contacts([0, 3])\ncontact_status_even.set_contact_points(contact_points)\nocp_solver.push_back_contact_status(contact_status_even, t0)\n\ncontact_points[1][0] += 0.5 * step_length\ncontact_points[2][0] += 0.5 * step_length\ncontact_status_initial.set_contact_points(contact_points)\nocp_solver.push_back_contact_status(contact_status_initial, t0+period_swing)\n\ncontact_status_odd = robot.create_contact_status()\ncontact_status_odd.activate_contacts([1, 2])\ncontact_status_odd.set_contact_points(contact_points)\nocp_solver.push_back_contact_status(contact_status_odd, \n t0+period_swing+period_double_support)\n\ncontact_points[0][0] += step_length\ncontact_points[3][0] += step_length\ncontact_status_initial.set_contact_points(contact_points)\nocp_solver.push_back_contact_status(contact_status_initial, \n t0+2*period_swing+period_double_support)\n\nfor i in range(cycle-1):\n t1 = t0 + (i+1)*(2*period_swing+2*period_double_support)\n contact_status_even.set_contact_points(contact_points)\n ocp_solver.push_back_contact_status(contact_status_even, t1)\n\n contact_points[1][0] += step_length\n contact_points[2][0] += step_length\n contact_status_initial.set_contact_points(contact_points)\n ocp_solver.push_back_contact_status(contact_status_initial, t1+period_swing)\n\n contact_status_odd.set_contact_points(contact_points)\n ocp_solver.push_back_contact_status(contact_status_odd, \n t1+period_swing+period_double_support)\n\n contact_points[0][0] += step_length\n contact_points[3][0] += step_length\n contact_status_initial.set_contact_points(contact_points)\n ocp_solver.push_back_contact_status(contact_status_initial, \n t1+2*period_swing+period_double_support)\n\nq = q_standing\nv = np.zeros(robot.dimv())\n\nocp_solver.set_solution(\"q\", q)\nocp_solver.set_solution(\"v\", v)\nf_init = np.array([0.0, 0.0, 0.25*robot.total_weight()])\nocp_solver.set_solution(\"f\", f_init)\n\nocp_solver.init_constraints(t)\n\nnum_iteration = 20\nidocp.utils.benchmark.convergence(ocp_solver, t, q, v, num_iteration)\n# num_iteration = 1000\n# idocp.utils.benchmark.cpu_time(ocp_solver, t, q, v, num_iteration)\n\nviewer = idocp.utils.TrajectoryViewer(path_to_urdf=path_to_urdf, \n base_joint_type=idocp.BaseJointType.FloatingBase)\nviewer.set_contact_info(contact_frames, mu)\nviewer.display(dt, ocp_solver.get_solution('q'), \n ocp_solver.get_solution('f', 'WORLD'), viewer='gepetto')\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rajatbansal01/Bulk-Email-Sender
[ "6f0f40167eddefaddf7a2353335a8b38cf285bca" ]
[ "code.py" ]
[ "import smtplib\nimport pandas as pd\n\n#Read Emails\ne=pd.read_excel('emails.xlsx') #here provide the excel file which is contaning the email addresses.\nemails_to_send=e['students'].values #students is the header for the emails.\nprint(emails_to_send)\n\n#Create and log to server\nserver = smtplib.SMTP('smtp.gmail.com' , 587) #starting the SMTP server.\nserver.starttls()\nserver.login('sender_email_address','password_for_login')\n\n#Massage body\nmsg='the meassage you want to send'\nsub=\"SUBJECT FOR THE EMAIL\"\nbody=\"Subject:{}\\n\\n{}\".format(sub,msg)\n\n#Send emails\nfor i in emails_to_send:\n server.sendmail('sender_email_address',i,body)\nprint('email sent')\n\nserver.quit() #quit the server here.\n" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
FreJoe/federated
[ "124947b8b354f381fcb9726e0f055756ed626860" ]
[ "tensorflow_federated/python/core/impl/transforming_executor_test.py" ]
[ "# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\n\nfrom absl.testing import absltest\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.api import intrinsics\nfrom tensorflow_federated.python.core.impl import transformations\nfrom tensorflow_federated.python.core.impl import transforming_executor\nfrom tensorflow_federated.python.core.impl.compiler import building_blocks\nfrom tensorflow_federated.python.core.impl.compiler import type_factory\nfrom tensorflow_federated.python.core.impl.executors import executor_base\n\n\nclass FakeEx(executor_base.Executor):\n\n async def create_value(self, val, unused):\n return str(building_blocks.ComputationBuildingBlock.from_proto(val))\n\n async def create_call(self, comp, arg=None):\n raise NotImplementedError\n\n async def create_tuple(self, elements):\n raise NotImplementedError\n\n async def create_selection(self, source, index=None, name=None):\n raise NotImplementedError\n\n\ndef _test_create_value(val, transform_fn):\n ex = transforming_executor.TransformingExecutor(transform_fn, FakeEx())\n return asyncio.get_event_loop().run_until_complete(ex.create_value(val))\n\n\[email protected]_computation(tf.int32)\ndef _identity(x):\n return x\n\n\nclass TransformingExecutorTest(absltest.TestCase):\n\n def test_with_removal_of_identity_mapping(self):\n\n @computations.federated_computation(type_factory.at_server(tf.int32))\n def comp(x):\n return intrinsics.federated_map(_identity, x)\n\n def transformation_fn(x):\n x, _ = transformations.remove_mapped_or_applied_identity(x)\n return x\n\n self.assertEqual(\n _test_create_value(comp, transformation_fn),\n '(FEDERATED_arg -> FEDERATED_arg)')\n\n def test_with_inlining_of_blocks(self):\n\n @computations.federated_computation(type_factory.at_server(tf.int32))\n def comp(x):\n return intrinsics.federated_zip([x, x])\n\n # TODO(b/134543154): Slide in something more powerful so that this test\n # doesn't break when the implementation changes; for now, this will do.\n def transformation_fn(x):\n x, _ = transformations.remove_mapped_or_applied_identity(x)\n x, _ = transformations.inline_block_locals(x)\n x, _ = transformations.replace_selection_from_tuple_with_element(x)\n return x\n\n self.assertIn('federated_zip_at_server(<FEDERATED_arg,FEDERATED_arg>)',\n _test_create_value(comp, transformation_fn))\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_v2_behavior()\n absltest.main()\n" ]
[ [ "tensorflow.compat.v1.enable_v2_behavior" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gimac/OpenRadar
[ "cab1b60ebc69be635860e474b2b6564a25befb5c" ]
[ "mmwave/dataloader/adc.py" ]
[ "# Copyright 2019 The OpenRadar Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport codecs\nimport socket\nimport struct\nfrom enum import Enum\n\nimport numpy as np\n\n\nclass CMD(Enum):\n RESET_FPGA_CMD_CODE = '0100'\n RESET_AR_DEV_CMD_CODE = '0200'\n CONFIG_FPGA_GEN_CMD_CODE = '0300'\n CONFIG_EEPROM_CMD_CODE = '0400'\n RECORD_START_CMD_CODE = '0500'\n RECORD_STOP_CMD_CODE = '0600'\n PLAYBACK_START_CMD_CODE = '0700'\n PLAYBACK_STOP_CMD_CODE = '0800'\n SYSTEM_CONNECT_CMD_CODE = '0900'\n SYSTEM_ERROR_CMD_CODE = '0a00'\n CONFIG_PACKET_DATA_CMD_CODE = '0b00'\n CONFIG_DATA_MODE_AR_DEV_CMD_CODE = '0c00'\n INIT_FPGA_PLAYBACK_CMD_CODE = '0d00'\n READ_FPGA_VERSION_CMD_CODE = '0e00'\n\n def __str__(self):\n return str(self.value)\n\n\n# MESSAGE = codecs.decode(b'5aa509000000aaee', 'hex')\nCONFIG_HEADER = '5aa5'\nCONFIG_STATUS = '0000'\nCONFIG_FOOTER = 'aaee'\nADC_PARAMS = {'chirps': 128, # 32\n 'rx': 4,\n 'tx': 3,\n 'samples': 128,\n 'IQ': 2,\n 'bytes': 2}\n# STATIC\nMAX_PACKET_SIZE = 4096\nBYTES_IN_PACKET = 1456\n# DYNAMIC\nBYTES_IN_FRAME = (ADC_PARAMS['chirps'] * ADC_PARAMS['rx'] * ADC_PARAMS['tx'] *\n ADC_PARAMS['IQ'] * ADC_PARAMS['samples'] * ADC_PARAMS['bytes'])\nBYTES_IN_FRAME_CLIPPED = (BYTES_IN_FRAME // BYTES_IN_PACKET) * BYTES_IN_PACKET\nPACKETS_IN_FRAME = BYTES_IN_FRAME / BYTES_IN_PACKET\nPACKETS_IN_FRAME_CLIPPED = BYTES_IN_FRAME // BYTES_IN_PACKET\nUINT16_IN_PACKET = BYTES_IN_PACKET // 2\nUINT16_IN_FRAME = BYTES_IN_FRAME // 2\n\n\nclass DCA1000:\n \"\"\"Software interface to the DCA1000 EVM board via ethernet.\n\n Attributes:\n static_ip (str): IP to receive data from the FPGA\n adc_ip (str): IP to send configuration commands to the FPGA\n data_port (int): Port that the FPGA is using to send data\n config_port (int): Port that the FPGA is using to read configuration commands from\n\n\n General steps are as follows:\n 1. Power cycle DCA1000 and XWR1xxx sensor\n 2. Open mmWaveStudio and setup normally until tab SensorConfig or use lua script\n 3. Make sure to connect mmWaveStudio to the board via ethernet\n 4. Start streaming data\n 5. Read in frames using class\n\n Examples:\n >>> dca = DCA1000()\n >>> adc_data = dca.read(timeout=.1)\n >>> frame = dca.organize(adc_data, 128, 4, 256)\n\n \"\"\"\n\n def __init__(self, static_ip='192.168.33.30', adc_ip='192.168.33.180',\n data_port=4098, config_port=4096):\n # Save network data\n # self.static_ip = static_ip\n # self.adc_ip = adc_ip\n # self.data_port = data_port\n # self.config_port = config_port\n\n # Create configuration and data destinations\n self.cfg_dest = (adc_ip, config_port)\n self.cfg_recv = (static_ip, config_port)\n self.data_recv = (static_ip, data_port)\n\n # Create sockets\n self.config_socket = socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM,\n socket.IPPROTO_UDP)\n self.data_socket = socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM,\n socket.IPPROTO_UDP)\n\n # Bind data socket to fpga\n self.data_socket.bind(self.data_recv)\n\n # Bind config socket to fpga\n self.config_socket.bind(self.cfg_recv)\n\n self.data = []\n self.packet_count = []\n self.byte_count = []\n\n self.frame_buff = []\n\n self.curr_buff = None\n self.last_frame = None\n\n self.lost_packets = None\n\n def configure(self):\n \"\"\"Initializes and connects to the FPGA\n\n Returns:\n None\n\n \"\"\"\n # SYSTEM_CONNECT_CMD_CODE\n # 5a a5 09 00 00 00 aa ee\n print(self._send_command(CMD.SYSTEM_CONNECT_CMD_CODE))\n\n # READ_FPGA_VERSION_CMD_CODE\n # 5a a5 0e 00 00 00 aa ee\n print(self._send_command(CMD.READ_FPGA_VERSION_CMD_CODE))\n\n # CONFIG_FPGA_GEN_CMD_CODE\n # 5a a5 03 00 06 00 01 02 01 02 03 1e aa ee\n print(self._send_command(CMD.CONFIG_FPGA_GEN_CMD_CODE, '0600', 'c005350c0000'))\n\n # CONFIG_PACKET_DATA_CMD_CODE \n # 5a a5 0b 00 06 00 c0 05 35 0c 00 00 aa ee\n print(self._send_command(CMD.CONFIG_PACKET_DATA_CMD_CODE, '0600', 'c005350c0000'))\n\n def close(self):\n \"\"\"Closes the sockets that are used for receiving and sending data\n\n Returns:\n None\n\n \"\"\"\n self.data_socket.close()\n self.config_socket.close()\n\n def read(self, timeout=1):\n \"\"\" Read in a single packet via UDP\n\n Args:\n timeout (float): Time to wait for packet before moving on\n\n Returns:\n Full frame as array if successful, else None\n\n \"\"\"\n # Configure\n self.data_socket.settimeout(timeout)\n\n # Frame buffer\n ret_frame = np.zeros(UINT16_IN_FRAME, dtype=np.uint16)\n\n # Wait for start of next frame\n while True:\n packet_num, byte_count, packet_data = self._read_data_packet()\n if byte_count % BYTES_IN_FRAME_CLIPPED == 0:\n packets_read = 1\n ret_frame[0:UINT16_IN_PACKET] = packet_data\n break\n\n # Read in the rest of the frame \n while True:\n packet_num, byte_count, packet_data = self._read_data_packet()\n packets_read += 1\n\n if byte_count % BYTES_IN_FRAME_CLIPPED == 0:\n self.lost_packets = PACKETS_IN_FRAME_CLIPPED - packets_read\n return ret_frame\n\n curr_idx = ((packet_num - 1) % PACKETS_IN_FRAME_CLIPPED)\n try:\n ret_frame[curr_idx * UINT16_IN_PACKET:(curr_idx + 1) * UINT16_IN_PACKET] = packet_data\n except:\n pass\n\n if packets_read > PACKETS_IN_FRAME_CLIPPED:\n packets_read = 0\n\n def _send_command(self, cmd, length='0000', body='', timeout=1):\n \"\"\"Helper function to send a single commmand to the FPGA\n\n Args:\n cmd (CMD): Command code to send to the FPGA\n length (str): Length of the body of the command (if any)\n body (str): Body information of the command\n timeout (int): Time in seconds to wait for socket data until timeout\n\n Returns:\n str: Response message\n\n \"\"\"\n # Create timeout exception\n self.config_socket.settimeout(timeout)\n\n # Create and send message\n resp = ''\n msg = codecs.decode(''.join((CONFIG_HEADER, str(cmd), length, body, CONFIG_FOOTER)), 'hex')\n try:\n self.config_socket.sendto(msg, self.cfg_dest)\n resp, addr = self.config_socket.recvfrom(MAX_PACKET_SIZE)\n except socket.timeout as e:\n print(e)\n return resp\n\n def _read_data_packet(self):\n \"\"\"Helper function to read in a single ADC packet via UDP\n\n Returns:\n int: Current packet number, byte count of data that has already been read, raw ADC data in current packet\n\n \"\"\"\n data, addr = self.data_socket.recvfrom(MAX_PACKET_SIZE)\n packet_num = struct.unpack('<1l', data[:4])[0]\n byte_count = struct.unpack('>Q', b'\\x00\\x00' + data[4:10][::-1])[0]\n packet_data = np.frombuffer(data[10:], dtype=np.uint16)\n return packet_num, byte_count, packet_data\n\n def _listen_for_error(self):\n \"\"\"Helper function to try and read in for an error message from the FPGA\n\n Returns:\n None\n\n \"\"\"\n self.config_socket.settimeout(None)\n msg = self.config_socket.recvfrom(MAX_PACKET_SIZE)\n if msg == b'5aa50a000300aaee':\n print('stopped:', msg)\n\n def _stop_stream(self):\n \"\"\"Helper function to send the stop command to the FPGA\n\n Returns:\n str: Response Message\n\n \"\"\"\n return self._send_command(CMD.RECORD_STOP_CMD_CODE)\n\n @staticmethod\n def organize(raw_frame, num_chirps, num_rx, num_samples):\n \"\"\"Reorganizes raw ADC data into a full frame\n\n Args:\n raw_frame (ndarray): Data to format\n num_chirps: Number of chirps included in the frame\n num_rx: Number of receivers used in the frame\n num_samples: Number of ADC samples included in each chirp\n\n Returns:\n ndarray: Reformatted frame of raw data of shape (num_chirps, num_rx, num_samples)\n\n \"\"\"\n ret = np.zeros(len(raw_frame) // 2, dtype=complex)\n\n # Separate IQ data\n ret[0::2] = raw_frame[0::4] + 1j * raw_frame[2::4]\n ret[1::2] = raw_frame[1::4] + 1j * raw_frame[3::4]\n return ret.reshape((num_chirps, num_rx, num_samples))\n" ]
[ [ "numpy.frombuffer", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
opendifferentialprivacy/smartnoise-sdk
[ "34154e5a45ef8ea1ced72bcbad44e0bf35ded872" ]
[ "sql/snsql/reader/base.py" ]
[ "import pandas as pd\n\n\nclass Reader:\n ENGINE = None\n\n @property\n def engine(self):\n return self.ENGINE.lower() if self.ENGINE else None\n\n def execute(self, query, *ignore, accuracy: bool = False):\n raise NotImplementedError(\"Execute must be implemented on the inherited class\")\n\n def _to_df(self, rows):\n # always assumes the first row is column names\n rows = list(rows)\n header = rows[0]\n if len(header) == 2 and isinstance(header[1], (list, tuple)):\n accuracy = True\n else:\n accuracy = False\n if len(rows) < 1:\n return None\n elif len(rows) < 2:\n if not accuracy:\n return pd.DataFrame(columns=header)\n else:\n return (pd.DataFrame(columns=header[0]), [pd.DataFrame(columns=h) for h in header[1]])\n else:\n if not accuracy:\n return pd.DataFrame(rows[1:], columns=header)\n else:\n result = []\n accuracies = [[] for a in header[1]]\n for result_row, acc in rows:\n result.append(result_row)\n for acc_row, idx in zip(acc, range(len(acc))):\n accuracies[idx].append(acc_row)\n\n return [pd.DataFrame(result[1:], columns=result[0]),\n [pd.DataFrame(a[1:], columns=a[0]) for a in accuracies]]\n\n def execute_df(self, query, *ignore, accuracy: bool = False):\n if not isinstance(query, str):\n raise ValueError(\"Please pass a string to this function.\")\n\n return self._to_df(self.execute(query, accuracy=accuracy))\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Chenlu-Wu/CarND-LaneLines
[ "db836515cbd8064c78ced5fe7a90ba06e8292603" ]
[ "lane_line_tracking.py" ]
[ "#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport os\nimport math\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n `vertices` should be a numpy array of integer points.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=10):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n\n for line in lines:\n sumx1 = 0\n sumy1 = 0\n sumx2 = 0\n sumy2 = 0\n count = 0\n for x1,y1,x2,y2 in line:\n count=count+1\n sumx1+=x1\n sumy1+=y1\n sumx2+=x2\n sumy2+=y2\n avx1 = int(sumx1/count)\n avy1 = int(sumy1/count)\n avx2 = int(sumx2/count)\n avy2 = int(sumy2/count)\n slope = (avy1-avy2)/(avx1-avx2)\n intercept = int(avy1 - avx1*slope)\n if ((avy1-avy2)/(avx1-avx2)>0.5) or ((avy1-avy2)/(avx1-avx2)<(-0.5)):\n cv2.line(img, (avx1, avy1), (avx2, avy2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)\n\n\n\ndef find_lane(name, v1, v2 ):\n \n readname= 'test_images/' + name \n img1=mpimg.imread(readname)\n plt.figure()\n plt.imshow(img1)\n \n grayimg1 = grayscale(img1)\n grayname = name +'_grayscale.jpg'\n cv2.imwrite(os.path.join('test_images_output/' , grayname), grayimg1)\n \n kernal_size = 3\n blurgrayimg1 = gaussian_blur(grayimg1, kernal_size)\n blurname = name +'_Gaussin_blur.jpg'\n cv2.imwrite(os.path.join('test_images_output/' , blurname), blurgrayimg1)\n \n \n low_threshold = 84\n high_threshold = 168\n edges1 = canny(blurgrayimg1, low_threshold, high_threshold)\n cannyname = name +'_canny.jpg'\n cv2.imwrite(os.path.join('test_images_output/' , cannyname), edges1) \n \n imshape = img1.shape\n vertices = np.array([[(0,imshape[0]),v1, v2, (imshape[1],imshape[0])]], dtype=np.int32)\n #vertices = np.array([[0,imgshape1[0]], [450,315], [525,315], [imgshape1[1],imgshape1[0]]], dtype=np.int32)\n masked_img1 = region_of_interest(edges1, vertices)\n maskedname = name +'_masked.jpg'\n cv2.imwrite(os.path.join('test_images_output/' , maskedname), masked_img1) \n \n \n rho = 2\n theta = np.pi/180\n threshold = 15\n min_line_len = 10\n max_line_gap = 5\n \n max_line_gap_con = 120\n \n line_img1 = hough_lines(masked_img1, rho, theta, threshold, min_line_len, max_line_gap)\n houghname = name +'_hough.jpg'\n cv2.imwrite(os.path.join('test_images_output/' , houghname), cv2.cvtColor(line_img1, cv2.COLOR_RGB2BGR))\n #cv2.imwrite(os.path.join('test_images_output/' , houghname), line_img1)\n \n line_con_img1 = hough_lines(masked_img1, rho, theta, threshold, min_line_len, max_line_gap_con)\n \n final_seg_img1 = weighted_img(line_img1, img1, α=0.8, β=1., γ=0.)\n final_con_img1 = weighted_img(line_con_img1, img1, α=0.8, β=1., γ=0.)\n plt.figure()\n plt.imshow(final_seg_img1)\n plt.figure()\n plt.imshow(final_con_img1)\n #cv2.imwrite('sample_out_2.png', cv2.cvtColor(final_con_img1, cv2.COLOR_RGB2BGR)) \n segname = name + '_segment.jpg'\n continame = name + '_thirdpass.jpg'\n cv2.imwrite(os.path.join('test_images_output/' , segname), cv2.cvtColor(final_seg_img1, cv2.COLOR_RGB2BGR))\n cv2.imwrite(os.path.join('test_images_output/' , continame), cv2.cvtColor(final_con_img1, cv2.COLOR_RGB2BGR))\n \nos.listdir(\"test_images/\") \nname1 = 'solidWhiteCurve.jpg'\nv1 = (457, 320)\nv2 = (490, 315)\nfind_lane(name1, v1, v2)\n\nname2 = 'solidWhiteRight.jpg'\nv1 = (457, 320)\nv2 = (490, 315)\nfind_lane(name2, v1, v2)\n\nname3 = 'solidYellowCurve.jpg'\nv1 = (444, 326)\nv2 = (529, 326)\nfind_lane(name3, v1, v2)\n\nname4 = 'solidYellowCurve2.jpg'\nv1 = (444, 326)\nv2 = (529, 326)\nfind_lane(name4, v1, v2)\n\nname5 = 'solidYellowLeft.jpg'\nv1 = (444, 326)\nv2 = (529, 326)\nfind_lane(name5, v1, v2)\n\nname6 = 'whiteCarLaneSwitch.jpg'\nv1 = (444, 326)\nv2 = (529, 326)\nfind_lane(name6, v1, v2)\n\n\n# ## Test on Videos\n# \n# You know what's cooler than drawing lanes over images? Drawing lanes over video!\n# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\n\n# In[24]:\n\n\ndef process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n grayimg1 = grayscale(image)\n #grayname = name +'_grayscale.jpg'\n #cv2.imwrite(os.path.join('test_images_output/' , grayname), grayimg1)\n \n kernal_size = 3\n blurgrayimg1 = gaussian_blur(grayimg1, kernal_size)\n \n \n low_threshold = 84\n high_threshold = 168\n edges1 = canny(blurgrayimg1, low_threshold, high_threshold)\n \n imshape = image.shape\n vertices = np.array([[(0,imshape[0]),(444, 326), (529, 326), (imshape[1],imshape[0])]], dtype=np.int32)\n #vertices = np.array([[0,imgshape1[0]], [450,315], [525,315], [imgshape1[1],imgshape1[0]]], dtype=np.int32)\n masked_img1 = region_of_interest(edges1, vertices)\n \n rho = 2\n theta = np.pi/180\n threshold = 15\n min_line_len = 20\n max_line_gap = 15\n \n max_line_gap_con = 90\n \n line_img1 = hough_lines(masked_img1, rho, theta, threshold, min_line_len, max_line_gap) \n line_con_img1 = hough_lines(masked_img1, rho, theta, threshold, min_line_len, max_line_gap_con)\n \n #final_seg_img1 = weighted_img(line_img1, img1, α=0.8, β=1., γ=0.)\n result = weighted_img(line_con_img1, image, α=0.8, β=1., γ=0.)\n #plt.figure()\n #plt.imshow(final_seg_img1)\n\n return result\n\n\n\n# In[27]:\n\n\n'''\nimg1=mpimg.imread('test_images/solidWhiteCurve.jpg')\nplt.figure()\nplt.imshow(img1)\n'''\nwhite_output = 'test_videos_output/solidWhiteRight.mp4'\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\nget_ipython().run_line_magic('time', 'white_clip.write_videofile(white_output, audio=False)')\n\n\n# Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.\n\n# In[28]:\n\n\nHTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))\n\n\n\n# In[25]:\n\n\nyellow_output = 'test_videos_output/solidYellowLeft.mp4'\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\nget_ipython().run_line_magic('time', 'yellow_clip.write_videofile(yellow_output, audio=False)')\n\n\n# In[26]:\n\n\nHTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))\n\n\n\nchallenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\nget_ipython().run_line_magic('time', 'challenge_clip.write_videofile(challenge_output, audio=False)')\n\n\n# In[ ]:\n\n\nHTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))\n\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.image.imread", "numpy.zeros_like", "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Taye310/hmd
[ "46d453392213b8ead39f8e850054fc7bbb81146b" ]
[ "eval/pred_hmd_ja.py" ]
[ "import numpy as np\nimport cv2, argparse, pickle, sys, PIL.Image\nimport openmesh as om\nfrom tqdm import trange\nsys.path.append(\"../src/\")\nfrom predictor import joint_predictor\nfrom predictor import anchor_predictor\nfrom mesh_edit import fast_deform_dja\nfrom mesh_edit import fast_deform_dsa\nfrom renderer import SMPLRenderer\nfrom utility import center_crop\nfrom utility import make_trimesh\nfrom utility import get_anchor_posi\nfrom utility import get_joint_posi\n\n# parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--num', type = int, required = True, \n help = 'data_num')\nparser.add_argument('--set', type = str, required = True, \n help = 'recon or syn')\nopt = parser.parse_args()\n\nassert opt.set in [\"recon\", \"syn\"], \\\n \"set must be one of [recon, syn]\"\n\ndata_num = int(opt.num)\n\npdt_j = joint_predictor(\"../demo/pretrained_model/pretrained_joint.pth\")\npdt_a = anchor_predictor(\"../demo/pretrained_model/pretrained_anchor.pth\")\n\nrenderer = SMPLRenderer(face_path=\"../predef/smpl_faces.npy\")\n\nfaces = np.load(\"../predef/smpl_faces.npy\")\n\n# make verts for joint deform\nwith open ('../predef/mesh_joint_list.pkl', 'rb') as fp:\n item_dic = pickle.load(fp)\npoint_list = item_dic[\"point_list\"]\nindex_map = item_dic[\"index_map\"]\n\n# make verts for anchor deform\nwith open ('../predef/dsa_achr.pkl', 'rb') as fp:\n dic_achr = pickle.load(fp)\nachr_id = dic_achr['achr_id']\nachr_num = len(achr_id)\n\n\ntr = trange(data_num, desc='Bar desc', leave=True)\nfor test_ind in tr:\n\n src_img = np.array(PIL.Image.open(\"./eval_data/%s_set/input_masked/%03d_img.png\" \\\n % (opt.set, test_ind)))\n\n #verts, cam, proc_para, std_img = hmr_pred.predict(src_img)\n with open ('./eval_data/%s_set/pred_save/pre_%03d.pkl' % \\\n (opt.set, test_ind), 'rb') as fp:\n hmr_data = pickle.load(fp)\n verts = hmr_data['verts']\n cam = hmr_data['cam']\n proc_para = hmr_data['proc_para']\n std_img = hmr_data['std_img']\n\n mesh = make_trimesh(verts, faces, compute_vn = True)\n vert_norms = mesh.vertex_normals()\n\n # get proj sil\n proj_sil = renderer.silhouette(verts = verts,\n cam = cam,\n img_size = src_img.shape,\n norm = True)\n\n # make joint posi\n joint_posi = get_joint_posi(verts, point_list)\n\n sil_j = np.expand_dims(proj_sil, 2)\n src_j = np.zeros((10, 4, 64, 64))\n for i in range(len(joint_posi)):\n crop_sil = center_crop(sil_j, joint_posi[i], 64)\n crop_img = center_crop(src_img, joint_posi[i], 64)\n crop_img = crop_img.astype(np.float)\n crop_img = crop_img - crop_img[31, 31, :]\n crop_img = np.absolute(crop_img)\n crop_img = crop_img/255.0\n src_j[i,0,:,:] = np.rollaxis(crop_sil, 2, 0)\n src_j[i,1:4,:,:] = np.rollaxis(crop_img, 2, 0)\n\n # predict joint\n joint_tsr = pdt_j.predict_batch(src_j)\n joint_para = np.array(joint_tsr.data.cpu())\n joint_para = np.concatenate((joint_para, np.zeros((10,1))),axis = 1)\n\n # apply scale\n joint_para = joint_para * 0.007# 0.007\n\n flat_point_list = [item for sublist in point_list for item in sublist]\n\n num_mj = len(point_list)\n j_list = []\n for i in range(num_mj):\n j_p_list = []\n for j in range(len(point_list[i])):\n j_p_list.append(verts[point_list[i][j]])\n j_list.append(sum(j_p_list)/len(j_p_list))\n\n new_jv = []\n ori_jv = []\n for i in range(len(j_list)):\n # make new joint verts\n for j in point_list[i]:\n new_jv.append(verts[j] + joint_para[i])\n ori_jv.append(verts[j])\n new_jv = np.array(new_jv)\n ori_jv = np.array(ori_jv)\n\n # joint deform\n fd_ja = fast_deform_dja(weight = 10.0)\n ja_verts = fd_ja.deform(np.asarray(verts), new_jv)\n\n # make src_a\n proj_sil_j = renderer.silhouette(verts = ja_verts)\n src_a = np.zeros((200, 4, 32, 32))\n\n # make anchor posi\n anchor_verts = np.zeros((200, 3))\n for i in range(achr_num):\n anchor_verts[i, :] = ja_verts[achr_id[i], :]\n achr_posi = get_anchor_posi(anchor_verts)\n\n for i in range(len(achr_posi)):\n crop_sil = center_crop(proj_sil_j, achr_posi[i], 32)\n crop_img = center_crop(src_img, achr_posi[i], 32)\n crop_img = crop_img.astype(np.int)\n crop_img = crop_img - crop_img[15, 15, :]\n crop_img = np.absolute(crop_img)\n crop_img = crop_img.astype(np.float)/255.0\n src_a[i,0,:,:] = crop_sil\n src_a[i,1:4,:,:] = np.rollaxis(crop_img, 2, 0)\n\n # predict anchor\n achr_tsr = pdt_a.predict_batch(src_a)\n achr_para = np.array(achr_tsr.data.cpu()) \n achr_para = achr_para * 0.003\n \n # compute the achr movement\n ori_av = []\n new_av = []\n for j in range(achr_num):\n ori_av.append(ja_verts[achr_id[j]])\n new_av.append(ja_verts[achr_id[j]] + \n vert_norms[achr_id[j]] * achr_para[j])\n ori_av = np.array(ori_av)\n new_av = np.array(new_av)\n \n # build active list of anchor, added in 2018-10-30\n contours, _ = cv2.findContours(proj_sil_j, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cim = np.zeros_like(proj_sil_j)\n cv2.drawContours(cim, contours, -1, 255, 1)\n cim = cv2.dilate(cim, kernel=np.ones((6, 6)))\n active_index = np.ones(len(achr_posi))\n for j in range(len(achr_posi)):\n ay = np.int(np.round(achr_posi[j][1]))\n ax = np.int(np.round(achr_posi[j][0]))\n if cim[ay, ax] == 0:\n active_index[j] = 0\n \n # anchor deform\n fd_sa = fast_deform_dsa(weight=1.0)\n sa_verts = fd_sa.deform(np.asarray(ja_verts), \n new_av,\n active_index = active_index,\n )\n \n # visualize\n if False:\n ori_proj_img = renderer(verts = verts, img = src_img)\n joint_move_img = draw_vert_move(ori_jv, new_jv, src_img)\n achr_move_img = draw_vert_move(ori_av, new_av, src_img)\n ja_proj_img = renderer(verts = ja_verts, img = src_img)\n sa_proj_img = renderer(verts = sa_verts, img = src_img)\n final_prv_img = np.concatenate((ori_proj_img, \n joint_move_img, \n ja_proj_img, \n achr_move_img, \n sa_proj_img), axis = 1)\n show_img_arr(final_prv_img.astype(np.uint8))\n \n mesh_j = make_trimesh(ja_verts, faces)\n om.write_mesh(\"./eval_data/%s_set/pred_save/j_%03d.obj\" % \\\n (opt.set, test_ind), mesh_j)\n mesh_a = make_trimesh(sa_verts, faces)\n om.write_mesh(\"./eval_data/%s_set/pred_save/a_%03d.obj\" % \\\n (opt.set, test_ind), mesh_a)" ]
[ [ "numpy.rollaxis", "numpy.expand_dims", "numpy.absolute", "numpy.asarray", "numpy.ones", "numpy.concatenate", "numpy.round", "numpy.zeros_like", "numpy.load", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gwallison/OH_injection_wells_compile
[ "70ffd6bfc99932d1364574335cc289ca23390655", "70ffd6bfc99932d1364574335cc289ca23390655" ]
[ "processInjectionInput.py", "Injection_Wells1.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 09:21:40 2018\n\n@author: @gary.allison\n\nThis code is used to take ODNR files for Brine disposal fee and \neventually create a file to be used to show overall injection volumes.\n\nThe ODNR data have several limitations that we must find and account for: \n - data type consistency, \n - lumped API numbers\n - typos\n - different file formats across the years etc.\n \n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport pandas.api.types as ptypes\nfrom validAPI import getAPI10\n\n##### --------------------------------------------------\n#### Input file definitions\n##### --------------------------------------------------\n# set data dirs for input files and for resulting output files\ndatadir = './sources/'\noutdir = './out/'\nindir = datadir+'OH_injection/'\npre_proc_out = outdir+'injection_tall_pre.csv'\n\n\n# input files are in four different formats:\n# for the oldest, specify filename, year and quarter: tuple (filename,yr,q)\n# all columns are named the same!!\nfn_old = [('OH_1ST QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,1),\n ('OH_2ND QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,2),\n ('OH_3RD QUARTER 2011 BRINE DISPOSAL FEES-1.xls',2011,3),\n ('OH_4TH QUARTER 2010 BRINE DISPOSAL FEES.xls',2010,4),\n ('OH_4TH QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,4),\n ('OH_Brine Disposal Fee - 3rd Quarter 2010-2.xls',2010,3)]\n\n# the 2012 file is ina funky state - the set of worksheets have two different formats: a blend of old and main\n# so we have to process it separately\nfn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012.xls'\n\n# bulk of the data are here - first four worksheets are quarters. \n# Total worksheet (the fifth one) is ignored\n# specify the filename and the year: tuple: (filename,year)\nfn_2013_17 = [('BRINE DISPOSAL FEES FOR 2013.xlsx',2013),\n ('BRINE DISPOSAL FEES FOR 2014.xlsx',2014),\n ('BRINE DISPOSAL FEES FOR 2015.xlsx',2015),\n ('BRINE DISPOSAL FEES FOR 2016.xlsx',2016), \n ('BRINE DISPOSAL FEES FOR 2017.xlsx',2017)]\n\n# Finally, the current file is of a different format and must also\n# be treated separately. It currently includes all quarters of the \n# year (even if they are in the future) and on a single worksheet\nfn_2018_plus = [('BRINE DISPOSAL FEES FOR 2018.xlsx',2018),\n ('BRINE DISPOSAL FEES FOR 2019.xlsx',2019)]\n\n# The text file with the records to collapse into one\naggfn = 'aggregateAPI.txt'\n\n# We define these temporary file to examine output in progress\ntempf = outdir+'temp.csv'\ntempf1 = outdir+'temp1.csv'\n\n\ndef fetchAggregateList(fn=aggfn):\n agglist = []\n aggaction = {}\n with open(fn) as f:\n f.readline() # ignore header\n for ln in f.readlines():\n lst = ln.split('|')\n key = (lst[0],int(lst[1]),int(lst[2]))\n agglist.append(key)\n aggaction[key] = lst[3] # what to do when you find a match?\n return agglist, aggaction\nagglist, aggaction = fetchAggregateList()\n#print(agglist)\n\ndef is_on_AggregateList(API10,yr,q):\n if (API10,yr,q) in agglist:\n #print(f'Aggregating {API10}')\n return True\n return False\n\ndef getCollapseSet(ser,yr,q):\n # return list of APIs from the AggList to colllapse\n clst = []\n for index,row in ser.iteritems():\n if is_on_AggregateList(row,yr,q):\n clst.append(row)\n cset = set(clst) \n return cset\n\n##### --------------------------------------------------\n##### Input file readers\n##### --------------------------------------------------\n \n#### -------------------------2010 - 2011 ---------------\ndef read_old(fn,yr,quar,review=False,flag_problems=True):\n # read excel file and produce a pandas dataframe\n # we keep only 4 columns from the sheet, ignore the header,\n # and skip several rows at the top.\n # Unlike later files, these have only 1 volume column with a \n # label column to specify if it is from in-district or out-of-district\n # We must combine the two (in and out) into a single record\n d = pd.read_excel(indir+fn,skiprows=5,header=None,usecols=[7,8,10,11],\n names=['CompanyName','APIstr','Vol','In_Out'])\n # some volumes cells contain the work 'zero', \n d.Vol = d.Vol.where(d.Vol.str.lower().str.strip()!='zero',0)\n d.Vol = pd.to_numeric(d.Vol)\n \n # make all of in-out into lowercase\n d.In_Out = d.In_Out.str.lower()\n \n # some In_Out cells have 'zero' in them: assign them to In\n d.In_Out = d.In_Out.where(d.In_Out.str.lower().str.strip()!='zero','in')\n\n assert ptypes.is_numeric_dtype(d.Vol)\n assert ptypes.is_string_dtype(d.CompanyName)\n assert ptypes.is_string_dtype(d.APIstr)\n api10 = []\n for index, row in d.iterrows():\n api10.append(getAPI10(row[1],yr,quar,flag_problems=flag_problems))\n if review:\n print(f'{api10[-1]}, {row[1]},{yr},{quar}')\n d['API10'] = api10\n\n ### ---------- handle multiple entries for a given API ---------\n cset = getCollapseSet(d.API10,yr,quar)\n #print(cset)\n for capi in cset:\n tmp = d[d.API10 == capi]\n action = aggaction[(capi,yr,quar)]\n if action == 'sum':\n vol = tmp.groupby(['API10','In_Out'])['Vol'].sum()\n else:\n print(f'UNRECOGINIZED ACTION for {capi}')\n \n # make into df\n vol = pd.DataFrame(vol)\n # always take last of collapsed - assuming it is most recent\n other = tmp.groupby(['API10','In_Out'])['APIstr','CompanyName'].last()\n mg = pd.merge(vol,other,left_index=True,right_index=True,\n validate='1:1')\n mg.reset_index(level=[0,1],inplace=True) \n tmp = d[d.API10 != capi] # drop the old\n d = pd.concat([tmp,mg],sort=True) # add the new\n ### --------------------------------------------------------------\n \n ### --------------- Make a meta df ----------------------------\n meta = d.copy().filter(['API10','APIstr','CompanyName'])\n meta = meta.groupby(['API10'],as_index=False)['APIstr','CompanyName'].first() \n\n ### ----------------- snag all in-district records\n dIn = d[d.In_Out.str.lower().str[0]=='i'] #'In district'\n dIn = dIn.filter(['API10','Vol'])\n dIn.columns = ['API10','Vol_InDist']\n# =============================================================================\n# print(f'{len(dIn)}, {len(dIn.API10.unique())}')\n# print(dIn[dIn.API10.duplicated()==True])\n# dIn.sort_values(by='API10').to_csv(tempf)\n# =============================================================================\n assert len(dIn)==len(dIn.API10.unique())\n # put together with all\n meta = pd.merge(meta,dIn,how='left',on='API10',validate='1:1')\n meta.to_csv(tempf)\n \n \n ### --------------- snag all out-of-district records\n dOut = d[d.In_Out.str.lower().str[0]=='o']\n dOut = dOut.filter(['API10','Vol'])\n dOut.columns = ['API10','Vol_OutDist']\n# =============================================================================\n# print(f'{len(dOut)}, {len(dOut.API10.unique())}')\n# print(dOut[dOut.API10.duplicated()==True])\n# dOut.sort_values(by='API10').to_csv(tempf)\n# =============================================================================\n assert len(dOut)==len(dOut.API10.unique())\n meta = pd.merge(meta,dOut,how='left',on='API10',validate='1:1')\n meta.to_csv(tempf)\n \n meta['Year'] = yr\n meta['Quarter']= quar\n assert len(d.API10.unique())==len(meta) \n return meta\n\n\n###### ------------- Read the 2012 file --------------------\ndef read_2012(fn,review=False,flag_problems=True):\n # read excel file and produce a pandas dataframe\n # we keep only 4 columns from the sheet, ignore the header,\n # and skip several rows at the top.\n # This file has TWO different formats, so we must accomodate that (uc1 and uc2) \n dlst = []\n uc1 = [1,2,4,8] # sheet 0,2,3 are different from sheet 1\n uc2 = [7,8,10,14] # os we take from different columns\n for ws in [0,1,2,3]: # ws 1 is like 'main'; others like 'old'\n yr = 2012\n quar = ws+1 # worksheets are labeled 0-3\n print(f'Processing {yr} Q{quar}')\n if ws == 1:\n uc = uc1\n else:\n uc= uc2\n d = pd.read_excel(indir+fn_2012,skiprows=6,sheet_name=ws,\n usecols=uc,header=None,\n names=['CompanyName','APIstr','Vol_InDist','Vol_OutDist'])\n d.Vol_InDist = pd.to_numeric(d.Vol_InDist,errors='coerce')\n d.Vol_OutDist = pd.to_numeric(d.Vol_OutDist,errors='coerce')\n d = d.dropna(axis=0,subset=['CompanyName']) # no CompanyName = no record\n assert ptypes.is_numeric_dtype(d.Vol_InDist)\n assert ptypes.is_numeric_dtype(d.Vol_OutDist)\n assert ptypes.is_string_dtype(d.CompanyName)\n assert ptypes.is_string_dtype(d.APIstr)\n api10 = []\n for index, row in d.iterrows():\n api10.append(getAPI10(row[1],yr,quar,flag_problems=flag_problems))\n if review:\n print(f'{api10[-1]}, {row[1]},{yr},{quar}')\n d['API10'] = api10\n\n ### ---------- handle multiple entries for a given API ---------\n cset = getCollapseSet(d.API10,yr,quar)\n print(f' collapse set for this worksheet: {cset}')\n for capi in cset:\n tmp = d[d.API10 == capi]\n action = aggaction[(capi,yr,quar)]\n if action == 'sum':\n vol = tmp.groupby(['API10'])['Vol_InDist','Vol_OutDist'].sum()\n else:\n print(f'UNRECOGINIZED ACTION for {capi}')\n # make into df\n vol = pd.DataFrame(vol)\n # always take last of collapsed - assuming it is most recent\n other = tmp.groupby(['API10'])['APIstr','CompanyName'].last()\n mg = pd.merge(vol,other,left_index=True,right_index=True,\n validate='1:1')\n mg.reset_index(level=[0],inplace=True) \n tmp = d[d.API10 != capi] # drop the old\n d = pd.concat([tmp,mg],sort=True) # add the new\n ### --------------------------------------------------------------\n# =============================================================================\n# print(f'{len(d)}, {len(d.API10.unique())}')\n# print(d[d.API10.duplicated()==True])\n# d.sort_values(by='API10').to_csv(tempf)\n# =============================================================================\n assert len(d)==len(d.API10.unique())\n d['Year'] = 2012\n d['Quarter'] = quar \n\n dlst.append(d)\n \n trans2012 = pd.concat(dlst,sort=True)\n #trans2012.to_csv(tempf)\n \n return trans2012\n\n#### ---------------------- Main data files 2013-2017+ -------------\n\ndef read_2013_17(fn,yr,review=False,flag_problems=True):\n # read excel file and produce a pandas dataframe\n # we keep only 4 columns from the sheet, ignore the header,\n # and skip several rows at the top.\n\n dlst = []\n for ws in [0,1,2,3]: # four quarterly worksheets\n quar = ws+1\n print(f'Processing {yr} Q{quar}')\n d = pd.read_excel(indir+fn,skiprows=6,sheet_name=ws,\n usecols=[0,1,2,4,8],header=None,\n names=['AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist'])\n \n # have to re-order columns to match earlier formats\n d = d.filter(['CompanyName','APIstr','Vol_InDist','Vol_OutDist','AltName'])\n d.Vol_InDist = pd.to_numeric(d.Vol_InDist,errors='coerce')\n d.Vol_OutDist = pd.to_numeric(d.Vol_OutDist,errors='coerce')\n d.APIstr = d.APIstr.astype('str')\n d = d.dropna(axis=0,subset=['CompanyName']) # no CompanyName = no record\n \n\n# d['Year'] = yr\n# d['Quarter'] = quar\n assert ptypes.is_numeric_dtype(d.Vol_InDist)\n assert ptypes.is_numeric_dtype(d.Vol_OutDist)\n assert ptypes.is_string_dtype(d.CompanyName)\n assert ptypes.is_string_dtype(d.APIstr)\n api10 = []\n for index, row in d.iterrows():\n api10.append(getAPI10(row[1],yr,quar,flag_problems=flag_problems))\n if review:\n print(f'{api10[-1]}, {row[1]},{yr},{quar}')\n d['API10'] = api10\n\n ### ---------- handle multiple entries for a given API ---------\n cset = getCollapseSet(d.API10,yr,quar)\n print(f' collapse set for this worksheet: {cset}')\n for capi in cset:\n tmp = d[d.API10 == capi]\n action = aggaction[(capi,yr,quar)]\n if action == 'sum':\n vol = tmp.groupby(['API10'])['Vol_InDist','Vol_OutDist'].sum()\n elif action== 'last':\n vol = tmp.groupby(['API10'])['Vol_InDist','Vol_OutDist'].last()\n else:\n print(f'UNRECOGINIZED ACTION for {capi}')\n # make into df\n vol = pd.DataFrame(vol)\n # always take last of collapsed - assuming it is most recent\n other = tmp.groupby(['API10'])['APIstr','CompanyName'].last()\n mg = pd.merge(vol,other,left_index=True,right_index=True,\n validate='1:1')\n mg.reset_index(level=[0],inplace=True) \n tmp = d[d.API10 != capi] # drop the old\n d = pd.concat([tmp,mg],sort=True) # add the new\n ### --------------------------------------------------------------\n# =============================================================================\n# print(f'{len(d)}, {len(d.API10.unique())}')\n# print(d[d.API10.duplicated()==True])\n# d.sort_values(by='API10').to_csv(tempf)\n# =============================================================================\n assert len(d)==len(d.API10.unique())\n d['Year'] = yr\n d['Quarter'] = quar\n\n\n dlst.append(d)\n \n main = pd.concat(dlst,sort=True)\n main.to_csv(tempf)\n return main\n\n#### ---------------------- Current data files (including future within year)\ndef read_2018_plus(fn,yr,review=False,flag_problems=True):\n # read excel file and produce a pandas dataframe\n # we keep only 4 columns from the sheet, ignore the header,\n # and skip several rows at the top.\n\n print(f'\\nProcessing {fn},{yr}')\n d = pd.read_excel(indir+fn,skiprows=6,sheet_name=0,\n usecols=[0,1,2,3,5,9],header=None,\n names=['QtrStr','AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist'])\n d = d.dropna(axis=0,subset=['CompanyName'])\n d['Year'] = yr\n d['Quarter'] = d.QtrStr.str[0]\n d = d[d.Quarter != 'Y'] # drop the year total rows\n d.Quarter = pd.to_numeric(d.Quarter,errors='coerce')\n d.Vol_InDist = pd.to_numeric(d.Vol_InDist,errors='coerce')\n d.Vol_OutDist = pd.to_numeric(d.Vol_OutDist,errors='coerce')\n d.APIstr = d.APIstr.astype('str')\n d = d.filter(['CompanyName','APIstr','Vol_InDist','Vol_OutDist','AltName','Year','Quarter'])\n\n api10 = []\n for index, row in d.iterrows():\n quar = int(row[6])\n api10.append(getAPI10(row[1],yr,quar,flag_problems=flag_problems))\n if review:\n print(f'{api10[-1]}, {row[1]},{yr},{quar}')\n d['API10'] = api10\n\n #Because all quarters are on one sheet, but we have to verify that there\n # are no duplicates within a quarter, we apply the cset tests to\n # quarter subsets then concat at the end\n dlst = []\n for quar in [1,2,3,4]:\n print(f' working on quarter {quar}')\n dq = d[d.Quarter==quar]\n ### ---------- handle multiple entries for a given API ---------\n cset = getCollapseSet(dq.API10,yr,quar)\n print(f' collapse set for this worksheet: {cset}')\n for capi in cset:\n tmp = dq[dq.API10 == capi]\n action = aggaction[(capi,yr,quar)]\n if action == 'sum':\n vol = tmp.groupby(['API10'])['Vol_InDist','Vol_OutDist'].sum()\n elif action== 'last':\n vol = tmp.groupby(['API10'])['Vol_InDist','Vol_OutDist'].last()\n else:\n print(f'UNRECOGINIZED ACTION for {capi}')\n # make into df\n vol = pd.DataFrame(vol)\n # always take last of collapsed - assuming it is most recent\n other = tmp.groupby(['API10'])['APIstr','CompanyName','Year','Quarter'].last()\n mg = pd.merge(vol,other,left_index=True,right_index=True,\n validate='1:1')\n mg.reset_index(level=[0],inplace=True) \n tmp = dq[dq.API10 != capi] # drop the old\n dq = pd.concat([tmp,mg],sort=True) # add the new\n\n# =============================================================================\n# print(f'{len(dq)}, {len(dq.API10.unique())}')\n# print(dq[dq.API10.duplicated()==True])\n# dq.sort_values(by='API10').to_csv(tempf)\n# =============================================================================\n assert len(dq)==len(dq.API10.unique()), f'{len(dq)} != {len(dq.API10.unique())}'\n dlst.append(dq)\n ### --------------------------------------------------------------\n d_curr = pd.concat(dlst,sort=True)\n\n\n return d_curr\n\n\ndef processAllFiles(review=False):\n dlst = []\n for fnl in fn_old:\n print(f'Processing {fnl[0]}')\n out = read_old(fnl[0],fnl[1],fnl[2],review=review)\n dlst.append(out)\n out = read_2012(fn_2012,review=review)\n dlst.append(out)\n for fnl in fn_2013_17:\n print(f'\\n ***** Processing {fnl[0]}')\n out = read_2013_17(fnl[0],fnl[1],review=review)\n dlst.append(out)\n for fnl in fn_2018_plus:\n print(f'\\n ***** Processing {fnl[0]}')\n out = read_2018_plus(fnl[0],fnl[1],review=review)\n dlst.append(out)\n final = pd.concat(dlst,sort=True)\n \n final['YrQ'] = final.Year.astype('str') + 'Q' + final.Quarter.astype('str')\n final.sort_values(by=['API10','Year','Quarter']).to_csv(pre_proc_out)\n\n return final.sort_values(by=['API10','Year','Quarter'])\n\nif __name__ == '__main__':\n out = processAllFiles()", "\n# coding: utf-8\n\n# # Creating a dataset of Ohio injection wells\n\n\n\nimport matplotlib.pyplot as plt\nimport random\nimport numpy as np\nimport pandas as pd\nimport os\n\n# set datadir to the directory that holds the zipfile\ndatadir = 'c:\\MyDocs/sandbox/data/datasets/FracFocus/'\noutdir = datadir+'output/'\nindir = datadir+'OH_injection/'\n\ntempf = outdir+'temp.csv'\ntempf1 = outdir+'temp1.csv'\npre_four = outdir+'pre_four.csv'\n# print(os.listdir(indir))\n# input files are in three different formats:\n# oldest: tuple (filename,yr,q)\n# all columns are named the same!!\nfn_old = [('OH_1ST QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,1),\n ('OH_2ND QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,2),\n ('OH_3RD QUARTER 2011 BRINE DISPOSAL FEES-1.xls',2011,3),\n ('OH_4TH QUARTER 2010 BRINE DISPOSAL FEES.xls',2010,4),\n ('OH_4TH QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,4),\n ('OH_Brine Disposal Fee - 3rd Quarter 2010-2.xls',2010,3)]\n\n# the 2012 file is ina funky state - the set of worksheets have two different formats: a blend of old and main\n# so we have to process it separately\nfn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012.xls'\n# fn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012 CORRECTED.xlsx'\n\n# bulk of the data are here - first four worksheets are quarters. Total worksheet ignored\n# tuple: (filename,year)\nfn_main = [('BRINE DISPOSAL FEES FOR 2013.xlsx',2013),\n ('BRINE DISPOSAL FEES FOR 2014.xlsx',2014),\n ('BRINE DISPOSAL FEES FOR 2015.xlsx',2015),\n ('BRINE DISPOSAL FEES FOR 2016.xlsx',2016), \n ('BRINE DISPOSAL FEES FOR 2017.xlsx',2017)]\n# current files are of a different format.\nfn_2018_etc = [('BRINE DISPOSAL FEES FOR 2018.xlsx',2018),\n ('BRINE DISPOSAL FEES FOR 2019.xlsx',2019)]\n\nSWDfn = indir+'Copy of SWD locations - July_2018.xls'\nODNR_permit_pickle = outdir+'ODNR_permit.pkl'\nODNR_injection_pickle = outdir+'ODNR_injection.pkl'\n\ninj_excel = outdir+'Inject_wide.xlsx'\n\n\n# In[59]:\n\n\nt = pd.read_pickle(ODNR_injection_pickle)\nx = t[t.Owner.str.contains('HUNTER')]\nt.to_csv(tempf)\n\n\n# ## get oldest data\n\n# In[60]:\n\n\ndlst = []\nfor fnl in fn_old:\n print(fnl)\n fn = fnl[0]\n yr = fnl[1]\n quar = fnl[2]\n# print(fn,yr,quar)\n d = pd.read_excel(indir+fn,skiprows=5,header=None,usecols=[7,8,10,11],\n names=['CompanyName','APIstr','Vol','In_Out'])\n d.Vol = d.Vol.where(d.Vol.str.lower().str.strip()!='zero',0)\n d.Vol = pd.to_numeric(d.Vol)\n dIn = d[d.In_Out.str.lower().str[0]=='i']\n dIn = dIn.filter(['CompanyName','APIstr','Vol'])\n dIn.columns = ['CompanyName','APIstr','Vol_InDist']\n \n dOut = d[d.In_Out.str.lower().str[0]=='o']\n dOut = dOut.filter(['APIstr','Vol'])\n dOut.columns = ['APIstr','Vol_OutDist']\n d['Year'] = fnl[1]\n d['Quarter'] = fnl[2]\n \n mg = pd.merge(dIn,dOut,how='outer',left_on='APIstr',right_on='APIstr')\n mg['Year'] = fnl[1]\n mg['Quarter'] = fnl[2]\n \n dlst.append(mg)\n\nold = pd.concat(dlst)\nold.to_csv(tempf)\n\n\n# In[61]:\n\n\nold.info()\n\n\n# ## process the 2012 file\n\n# In[62]:\n\n\ndlst = []\nuc1 = [1,2,4,8]\nuc2 = [7,8,10,14]\nfor ws in [0,1,2,3]: # ws 1 is like 'main'; others like 'old'\n# print(ws)\n if ws == 1:\n uc = uc1\n else:\n uc= uc2\n# print(uc)\n d = pd.read_excel(indir+fn_2012,skiprows=6,sheet_name=ws,\n usecols=uc,header=None,\n names=['CompanyName','APIstr','Vol_InDist','Vol_OutDist'])\n d = d.dropna(axis=0,subset=['CompanyName'])\n d['Year'] = 2012\n d['Quarter'] = ws+1\n dlst.append(d)\n if ws==1:\n tmp = d\n\ntrans2012 = pd.concat(dlst)\ntrans2012.to_csv(tempf)\ntmp.head()\n\n\n# In[63]:\n\n\ntwo = pd.concat([old,trans2012])\ntwo.head()\n\n\n# \n# ## get main data files\n\n# In[64]:\n\n\ndlst = []\nfor fnl in fn_main:\n print(fnl)\n fn = fnl[0]\n yr = fnl[1]\n for ws in [0,1,2,3]: # four quarterly worksheets\n d = pd.read_excel(indir+fn,skiprows=6,sheet_name=ws,\n usecols=[0,1,2,4,8],header=None,\n names=['AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist'])\n d = d.dropna(axis=0,subset=['CompanyName'])\n d['Year'] = yr\n d['Quarter'] = ws+1\n# d.columns= ['AltName','CompanyName','APIstr','Desc',\n# 'Vol_InDist','GrossIn','NetIn','PercRet',\n# 'Vol_OutDist','GrossOut','NetOut','PercRetOut','Comments']\n# print(d.columns)\n dlst.append(d)\n\nmain = pd.concat(dlst)\nmain.to_csv(tempf)\n\n\n# In[65]:\n\n\nthree = pd.concat([two,main],sort=True)\n# out = two.groupby(['APIstr'],as_index=True)['APIstr','Year','Quarter',\n# 'CompanyName','Vol_InDist','Vol_OutDist']\nthree.to_csv(tempf)\n\n\n# ## get current file\n\n# In[100]:\n\ndlst = []\nfor fnl in fn_current:\n fn = fnl[0]\n yr = fnl[1]\n #print(fn,yr)\n d = pd.read_excel(indir+fn,skiprows=6,sheet_name=0,\n usecols=[0,1,2,3,5,9],header=None,\n names=['QtrStr','AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist'])\n d = d.dropna(axis=0,subset=['CompanyName'])\n d['Year'] = yr\n d['Quarter'] = d.QtrStr.str[0]\n d = d[d.Quarter != 'Y']\n d = d.filter(['AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist','Year','Quarter'])\n dlst.append(d)\nfour = pd.concat(dlst,sort=True)\nfour = pd.concat([three,four],sort=True)\nfour.to_csv(tempf)\nfour.info()\n\n\n# ## some clean up of the API string and Yr_Q\n# \n# \n\n# In[101]:\n\n\nfour.APIstr = four.APIstr.astype('str') # make sure all are strings\n\n# First create some flags base on status of APIstr\nfour['NoAPIstr'] = four.APIstr.str.strip()==''\nprint(f'Number of records with no APIstring: {four.NoAPIstr.sum()}')\nfour.APIstr = np.where(four.NoAPIstr,'No API string recorded',four.APIstr)\n\nfour['API_non_numeric'] = ~four.APIstr.str[:5].str.isnumeric().astype('bool') \nprint(f'Number of records that are non-numeric: {four.API_non_numeric.sum()}')\n\nfour['MultipleNumericAPI'] = four.APIstr.str.contains('&') & ~four.API_non_numeric\n# four['tmp'] = four.APIstr.astype('str') + '&junk'\n# four.tmp = four.tmp.str.split('&').str.get(0).str[0:10] # grab first 10 char before the &\nprint(f'Number of records with multiple numeric API: {four.MultipleNumericAPI.sum()}')\n\nfour['temp1'] = four.APIstr.astype('str') + '/' # cover those few API without /\nfour.temp1 = four.temp1.str.replace('SWIW','/SWIW')\nfour.temp1 = four.temp1.str.split('/').str.get(0)\n\n\nfour['API10'] = four.APIstr # just use APIstr for non-numeric\ncond = ~four.NoAPIstr & ~four.API_non_numeric & ~ four.MultipleNumericAPI\nfour.API10 = np.where(cond,four.temp1,four.API10)\n\n# one more tweek\ncond2 = four.API10.str.len() >15\ncond3 = ~four.API_non_numeric\ncond4 = cond2 & cond3\nfour.MultipleNumericAPI = np.where(cond4,True,four.MultipleNumericAPI)\nfour.API10 = np.where(four.MultipleNumericAPI,four.APIstr.str[0:10],four.API10) # fix API10 for the multiple API records\n\nfour.API10 = four.API10.astype('str')\nfour.to_csv(tempf)\n# four[four.MultipleNumericAPI].head(20)\n\n\n# In[102]:\n\n\n\nfour.Year = four.Year.astype('str')\nfour.Quarter = four.Quarter.astype('str')\nfour['YrQ'] = four.Year+'Q'+four.Quarter\nfour = four.drop(['Year','Quarter'],axis=1)\nfour = four.filter(['API10','MultipleNumericAPI','APIstr','CompanyName','AltName','YrQ','Vol_InDist','Vol_OutDist'])\n# four[four.API10.str.contains('Trum')].head(20)\nfour.to_csv(tempf)\n\n\n# ## Changing by hand: non_numeric API to a representative API\n\n# In[103]:\n\n\ncond1 = four.API10.str.contains('Ashtabula')\nfour.API10 = np.where(cond1,'3400723262',four.API10)\n\ncond1 = four.API10.str.lower().str.contains('trum')\n#len(four[cond1])\nfour.API10 = np.where(cond1,'3415521893',four.API10)\n\ncond1 = four.API10.str.lower().str.contains('stark')\nlen(four[cond1])\nfour.API10 = np.where(cond1,'3415121920',four.API10)\n\ncond1 = four.API10.str.contains('4439/SWIW')\nlen(four[cond1])\nfour.API10 = np.where(cond1,'3411924439',four.API10)\n\ncond1 = four.API10.str.contains('34009237610000')\nlen(four[cond1])\nfour.API10 = np.where(cond1,'3400923761',four.API10)\n\n\nfour.to_csv(pre_four)\n\n\n# ## how well does API10 map on to APIstr (the full string)?\n\n# In[104]:\n\n\na10s = list(four.API10.unique())\napi10 = []\nastr = []\nfor a in a10s:\n api10.append(a)\n astr.append(list(four.APIstr[four.API10==a].unique()))\ntmp = pd.DataFrame({'api10':api10,'APIstr':astr})\ntmp.to_csv(tempf)\n\n\n# ## Get SWD list\n# \n\n# In[111]:\n\n\nSWD_df = pd.read_excel(SWDfn)\nSWD_df['API10'] = SWD_df['API #'].astype('str').str[0:10]\nSWD_df.API10 = SWD_df.API10.astype('str')\nSWD_df.columns = ['API','Owner','WellName','County','Township','Latitude','Longitude','WellStatus','API10']\n# SWD_df.head()\napis = pd.DataFrame({'API10':four.API10.unique()})\ntmp = pd.merge(SWD_df,apis,how='outer',left_on='API10',right_on='API10',indicator=True)\ntmp.to_csv(tempf)\ntmp.head()\ncol0 = tmp[tmp['_merge']=='both']\ncol0 = col0.drop(['_merge'],axis=1) #,'API'],axis=1) #######\ncol0['meta source'] = 'SWD_list_july_2018'\ncol0.to_csv(tempf)\n# # not matched yet\ncolnot = tmp[tmp['_merge']=='right_only']\ncolnot = colnot.filter(['API10'],axis=1)\n\n\n# ## Examine APIs so we can fetch metadata\n\n# In[106]:\n\n\n# apis = pd.DataFrame({'API10':four.API10.unique()})\n\n# first try to match from the injection well dataset\nODNRi = pd.read_pickle(ODNR_injection_pickle)\nODNRi.API10 = ODNRi.API10.astype('str')\nmg_apii = pd.merge(ODNRi,colnot,how='outer',left_on='API10',right_on='API10',indicator=True)\ncol1 = mg_apii[mg_apii['_merge']=='both']\ncol1 = col1.drop(['_merge','API'],axis=1)\ncol1['meta source'] = 'ODNR_injection'\n# not matched yet\ncol2 = mg_apii[mg_apii['_merge']=='right_only']\ncol2 = col2.filter(['API10'],axis=1)\n\n# try to match the rest with the permit dataset\nODNR = pd.read_pickle(ODNR_permit_pickle)\nODNR.API10 = ODNR.API10.astype('str')\nmg_api = pd.merge(ODNR,col2,how='outer',left_on='API10',right_on='API10',indicator=True)\ncol3 = mg_api[mg_api['_merge']=='both']\ncol3 = col3.drop(['_merge','API'],axis=1)\ncol3['meta source'] = 'ODNR_permit'\ncol3.columns = ['County','Owner','Township','PermitDate','WellName','WellNumber','Latitude','Longitude','API10','meta source']\n\ncol4 = mg_api[mg_api['_merge']=='right_only']\ncol4 = col4.filter(['API10'],axis=1)\ncol4['meta source'] = 'No_Match'\n\ncol5 = pd.concat([col0,col1,col3,col4],sort=True)\ncol5.to_csv(tempf1)\n\n\n# ## Reorganize so data are in FT format\n\n# In[140]:\n\n\nyqs = four.YrQ.unique()\nyqs.sort()\ndf_wide = apis.copy()\ndf_wide['chk_API10'] = df_wide.API10.copy()\ndlst = [df_wide]\nfor yq in yqs:\n d = four[four.YrQ==yq].copy()\n d['chkAPI'] = d.API10.copy()\n d = d.drop(['YrQ'],axis=1)\n newcol = []\n for c in d.columns:\n if c != 'API10':\n newcol.append(c+' '+yq)\n else:\n newcol.append(c)\n d.columns = newcol\n print(d.duplicated)\n tmp = pd.merge(df_wide,d,how='left',left_on='API10',right_on='API10',validate='1:m')\n tmp = tmp.drop(['API10'],axis=1)\n dlst.append(tmp)\n\ndf_wide = pd.concat(dlst,axis=1) \n# df_wide.API10 = df_wide.API10.astype('str')\n# col5.API10 = col5.API10.astype('str')\ndf_wide.to_csv(tempf)\n# df_wide = df_wide.filter(['API10','APIstr 2010Q3'],axis=1)\ncol5 = col5.filter(['API10'],axis=1)\n# col5 = col5.sort_values(by='API10')\nbig = pd.merge(col5,df_wide,on='API10',how='outer',validate='1:m',indicator=True)\nbig.head()\n# df_wide.head()\n\n\n# In[148]:\n\n\nprint(f'{len(df_wide.API10)}, {len(df_wide.API10.unique())}')\nprint(f'{len(col5.API10)}, {len(col5.API10.unique())}')\ndf_wide.to_csv(tempf)\n\n\n# In[80]:\n\n\ncol5.info()\n\n\n# In[126]:\n\n\nbig.to_excel(inj_excel)\n\n\n# In[35]:\n\n\nfour.plot('YrQ','Vol_InDist',style='o')\n\n" ]
[ [ "pandas.merge", "pandas.read_excel", "pandas.concat", "pandas.api.types.is_numeric_dtype", "pandas.DataFrame", "pandas.api.types.is_string_dtype", "pandas.to_numeric" ], [ "pandas.concat", "pandas.read_excel", "pandas.merge", "pandas.to_numeric", "pandas.DataFrame", "pandas.read_pickle", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
FGlazov/IL2Stats_ByAircraftMod
[ "acf66eb3f31a7e789d61a5c60d32fd30dbcedf4b" ]
[ "src/mod_stats_by_aircraft/reservoir_sampling.py" ]
[ "import json\nimport random\n\nimport numpy as np\n\nSAMPLE = 'SAMPLE' # A sample of ammo breakdowns. Holds up to SAMPLE_SIZE many elements.\nSAMPLE_SIZE = 50\nRESERVOIR_COUNTER = 'RESERVOIR_COUNTER' # Helper int that is used for \"reservoir sampling\", single pass fair sampling.\n\n\n# https://stackoverflow.com/a/47626762\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef update_reservoir(sample_dict, reservoir_dict):\n \"\"\"\n reservoir_dict has SAMPLE and RESERVOIR values. The SAMPLE is a json of a numpy array, where each row corresponds to\n a single sample. The columns correspond to the keys in sample_dict, sorted alphabetically. The values in each row\n then correspond to the values of a sample dict. In effect the reservoir_dict contains a sample of size\n <= SAMPLE_SIZE, which stores some selected sample_dicts for later use.\n\n We only see each sample_dict once for performance reasons. So we use an online sampling algorithm. Reservoir\n sampling is used, this code is based on https://stackoverflow.com/a/42532968.\n \"\"\"\n new_row = np.empty([1, len(sample_dict)])\n\n for i, ammo_key in enumerate(sorted(list(sample_dict))):\n new_row[0, i] = sample_dict[ammo_key]\n\n reservoir = get_samples(reservoir_dict, len(sample_dict))\n\n reservoir_updated = False\n if reservoir_dict[RESERVOIR_COUNTER] < SAMPLE_SIZE:\n reservoir = np.append(reservoir, new_row, axis=0)\n reservoir_updated = True\n else:\n n = random.randint(0, reservoir_dict[RESERVOIR_COUNTER])\n if n < SAMPLE_SIZE:\n reservoir[n] = new_row\n reservoir_updated = True\n\n reservoir_dict[RESERVOIR_COUNTER] += 1\n if reservoir_updated:\n reservoir_dict[SAMPLE] = json.dumps(reservoir, cls=NumpyEncoder)\n\n\ndef get_samples(reservoir_dict, nr_ammo_types):\n if SAMPLE not in reservoir_dict:\n return []\n\n if reservoir_dict[SAMPLE] is not None:\n json_load = json.loads(reservoir_dict[SAMPLE])\n reservoir = np.asarray(json_load)\n else:\n reservoir = np.empty((0, nr_ammo_types))\n return reservoir\n" ]
[ [ "numpy.asarray", "numpy.append", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RonKbS/pandas
[ "ba1b6571bd507c27072900d778adf81a99affc7a", "ba1b6571bd507c27072900d778adf81a99affc7a", "ba1b6571bd507c27072900d778adf81a99affc7a", "ba1b6571bd507c27072900d778adf81a99affc7a" ]
[ "pandas/core/dtypes/missing.py", "pandas/tests/extension/arrow/test_bool.py", "pandas/tests/arithmetic/test_period.py", "pandas/tests/util/test_assert_almost_equal.py" ]
[ "\"\"\"\nmissing types & inference\n\"\"\"\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nimport pandas._libs.missing as libmissing\nfrom pandas._libs.tslibs import NaT, iNaT\n\nfrom .common import (\n _NS_DTYPE,\n _TD_DTYPE,\n ensure_object,\n is_bool_dtype,\n is_complex_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_datetimelike_v_numeric,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_object_dtype,\n is_period_dtype,\n is_scalar,\n is_string_dtype,\n is_string_like_dtype,\n is_timedelta64_dtype,\n needs_i8_conversion,\n pandas_dtype,\n)\nfrom .generic import (\n ABCDatetimeArray,\n ABCExtensionArray,\n ABCGeneric,\n ABCIndexClass,\n ABCMultiIndex,\n ABCSeries,\n ABCTimedeltaArray,\n)\nfrom .inference import is_list_like\n\nisposinf_scalar = libmissing.isposinf_scalar\nisneginf_scalar = libmissing.isneginf_scalar\n\n\ndef isna(obj):\n \"\"\"\n Detect missing values for an array-like object.\n\n This function takes a scalar or array-like object and indicates\n whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``\n in object arrays, ``NaT`` in datetimelike).\n\n Parameters\n ----------\n obj : scalar or array-like\n Object to check for null or missing values.\n\n Returns\n -------\n bool or array-like of bool\n For scalar input, returns a scalar boolean.\n For array input, returns an array of boolean indicating whether each\n corresponding element is missing.\n\n See Also\n --------\n notna : Boolean inverse of pandas.isna.\n Series.isna : Detect missing values in a Series.\n DataFrame.isna : Detect missing values in a DataFrame.\n Index.isna : Detect missing values in an Index.\n\n Examples\n --------\n Scalar arguments (including strings) result in a scalar boolean.\n\n >>> pd.isna('dog')\n False\n\n >>> pd.isna(pd.NA)\n True\n\n >>> pd.isna(np.nan)\n True\n\n ndarrays result in an ndarray of booleans.\n\n >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])\n >>> array\n array([[ 1., nan, 3.],\n [ 4., 5., nan]])\n >>> pd.isna(array)\n array([[False, True, False],\n [False, False, True]])\n\n For indexes, an ndarray of booleans is returned.\n\n >>> index = pd.DatetimeIndex([\"2017-07-05\", \"2017-07-06\", None,\n ... \"2017-07-08\"])\n >>> index\n DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],\n dtype='datetime64[ns]', freq=None)\n >>> pd.isna(index)\n array([False, False, True, False])\n\n For Series and DataFrame, the same type is returned, containing booleans.\n\n >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df\n 0 1 2\n 0 ant bee cat\n 1 dog None fly\n >>> pd.isna(df)\n 0 1 2\n 0 False False False\n 1 False True False\n\n >>> pd.isna(df[1])\n 0 False\n 1 True\n Name: 1, dtype: bool\n \"\"\"\n return _isna(obj)\n\n\nisnull = isna\n\n\ndef _isna_new(obj):\n\n if is_scalar(obj):\n return libmissing.checknull(obj)\n # hack (for now) because MI registers as ndarray\n elif isinstance(obj, ABCMultiIndex):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n elif isinstance(obj, type):\n return False\n elif isinstance(\n obj,\n (\n ABCSeries,\n np.ndarray,\n ABCIndexClass,\n ABCExtensionArray,\n ABCDatetimeArray,\n ABCTimedeltaArray,\n ),\n ):\n return _isna_ndarraylike(obj)\n elif isinstance(obj, ABCGeneric):\n return obj._constructor(obj._data.isna(func=isna))\n elif isinstance(obj, list):\n return _isna_ndarraylike(np.asarray(obj, dtype=object))\n elif hasattr(obj, \"__array__\"):\n return _isna_ndarraylike(np.asarray(obj))\n else:\n return obj is None\n\n\ndef _isna_old(obj):\n \"\"\"\n Detect missing values, treating None, NaN, INF, -INF as null.\n\n Parameters\n ----------\n arr: ndarray or object value\n\n Returns\n -------\n boolean ndarray or boolean\n \"\"\"\n if is_scalar(obj):\n return libmissing.checknull_old(obj)\n # hack (for now) because MI registers as ndarray\n elif isinstance(obj, ABCMultiIndex):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n elif isinstance(obj, type):\n return False\n elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, ABCExtensionArray)):\n return _isna_ndarraylike_old(obj)\n elif isinstance(obj, ABCGeneric):\n return obj._constructor(obj._data.isna(func=_isna_old))\n elif isinstance(obj, list):\n return _isna_ndarraylike_old(np.asarray(obj, dtype=object))\n elif hasattr(obj, \"__array__\"):\n return _isna_ndarraylike_old(np.asarray(obj))\n else:\n return obj is None\n\n\n_isna = _isna_new\n\n\ndef _use_inf_as_na(key):\n \"\"\"\n Option change callback for na/inf behaviour.\n\n Choose which replacement for numpy.isnan / -numpy.isfinite is used.\n\n Parameters\n ----------\n flag: bool\n True means treat None, NaN, INF, -INF as null (old way),\n False means None and NaN are null, but INF, -INF are not null\n (new way).\n\n Notes\n -----\n This approach to setting global module values is discussed and\n approved here:\n\n * https://stackoverflow.com/questions/4859217/\n programmatically-creating-variables-in-python/4859312#4859312\n \"\"\"\n flag = get_option(key)\n if flag:\n globals()[\"_isna\"] = _isna_old\n else:\n globals()[\"_isna\"] = _isna_new\n\n\ndef _isna_ndarraylike(obj):\n is_extension = is_extension_array_dtype(obj)\n\n if not is_extension:\n # Avoid accessing `.values` on things like\n # PeriodIndex, which may be expensive.\n values = getattr(obj, \"values\", obj)\n else:\n values = obj\n\n dtype = values.dtype\n\n if is_extension:\n if isinstance(obj, (ABCIndexClass, ABCSeries)):\n values = obj._values\n else:\n values = obj\n result = values.isna()\n elif isinstance(obj, ABCDatetimeArray):\n return obj.isna()\n elif is_string_dtype(dtype):\n # Working around NumPy ticket 1542\n shape = values.shape\n\n if is_string_like_dtype(dtype):\n # object array of strings\n result = np.zeros(values.shape, dtype=bool)\n else:\n # object array of non-strings\n result = np.empty(shape, dtype=bool)\n vec = libmissing.isnaobj(values.ravel())\n result[...] = vec.reshape(shape)\n\n elif needs_i8_conversion(dtype):\n # this is the NaT pattern\n result = values.view(\"i8\") == iNaT\n else:\n result = np.isnan(values)\n\n # box\n if isinstance(obj, ABCSeries):\n result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)\n\n return result\n\n\ndef _isna_ndarraylike_old(obj):\n values = getattr(obj, \"values\", obj)\n dtype = values.dtype\n\n if is_string_dtype(dtype):\n # Working around NumPy ticket 1542\n shape = values.shape\n\n if is_string_like_dtype(dtype):\n result = np.zeros(values.shape, dtype=bool)\n else:\n result = np.empty(shape, dtype=bool)\n vec = libmissing.isnaobj_old(values.ravel())\n result[:] = vec.reshape(shape)\n\n elif is_datetime64_dtype(dtype):\n # this is the NaT pattern\n result = values.view(\"i8\") == iNaT\n else:\n result = ~np.isfinite(values)\n\n # box\n if isinstance(obj, ABCSeries):\n result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)\n\n return result\n\n\ndef notna(obj):\n \"\"\"\n Detect non-missing values for an array-like object.\n\n This function takes a scalar or array-like object and indicates\n whether values are valid (not missing, which is ``NaN`` in numeric\n arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).\n\n Parameters\n ----------\n obj : array-like or object value\n Object to check for *not* null or *non*-missing values.\n\n Returns\n -------\n bool or array-like of bool\n For scalar input, returns a scalar boolean.\n For array input, returns an array of boolean indicating whether each\n corresponding element is valid.\n\n See Also\n --------\n isna : Boolean inverse of pandas.notna.\n Series.notna : Detect valid values in a Series.\n DataFrame.notna : Detect valid values in a DataFrame.\n Index.notna : Detect valid values in an Index.\n\n Examples\n --------\n Scalar arguments (including strings) result in a scalar boolean.\n\n >>> pd.notna('dog')\n True\n\n >>> pd.notna(pd.NA)\n False\n\n >>> pd.notna(np.nan)\n False\n\n ndarrays result in an ndarray of booleans.\n\n >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])\n >>> array\n array([[ 1., nan, 3.],\n [ 4., 5., nan]])\n >>> pd.notna(array)\n array([[ True, False, True],\n [ True, True, False]])\n\n For indexes, an ndarray of booleans is returned.\n\n >>> index = pd.DatetimeIndex([\"2017-07-05\", \"2017-07-06\", None,\n ... \"2017-07-08\"])\n >>> index\n DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],\n dtype='datetime64[ns]', freq=None)\n >>> pd.notna(index)\n array([ True, True, False, True])\n\n For Series and DataFrame, the same type is returned, containing booleans.\n\n >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df\n 0 1 2\n 0 ant bee cat\n 1 dog None fly\n >>> pd.notna(df)\n 0 1 2\n 0 True True True\n 1 True False True\n\n >>> pd.notna(df[1])\n 0 True\n 1 False\n Name: 1, dtype: bool\n \"\"\"\n res = isna(obj)\n if is_scalar(res):\n return not res\n return ~res\n\n\nnotnull = notna\n\n\ndef _isna_compat(arr, fill_value=np.nan) -> bool:\n \"\"\"\n Parameters\n ----------\n arr: a numpy array\n fill_value: fill value, default to np.nan\n\n Returns\n -------\n True if we can fill using this fill_value\n \"\"\"\n dtype = arr.dtype\n if isna(fill_value):\n return not (is_bool_dtype(dtype) or is_integer_dtype(dtype))\n return True\n\n\ndef array_equivalent(left, right, strict_nan: bool = False) -> bool:\n \"\"\"\n True if two arrays, left and right, have equal non-NaN elements, and NaNs\n in corresponding locations. False otherwise. It is assumed that left and\n right are NumPy arrays of the same dtype. The behavior of this function\n (particularly with respect to NaNs) is not defined if the dtypes are\n different.\n\n Parameters\n ----------\n left, right : ndarrays\n strict_nan : bool, default False\n If True, consider NaN and None to be different.\n\n Returns\n -------\n b : bool\n Returns True if the arrays are equivalent.\n\n Examples\n --------\n >>> array_equivalent(\n ... np.array([1, 2, np.nan]),\n ... np.array([1, 2, np.nan]))\n True\n >>> array_equivalent(\n ... np.array([1, np.nan, 2]),\n ... np.array([1, 2, np.nan]))\n False\n \"\"\"\n\n left, right = np.asarray(left), np.asarray(right)\n\n # shape compat\n if left.shape != right.shape:\n return False\n\n # Object arrays can contain None, NaN and NaT.\n # string dtypes must be come to this path for NumPy 1.7.1 compat\n if is_string_dtype(left) or is_string_dtype(right):\n\n if not strict_nan:\n # isna considers NaN and None to be equivalent.\n return lib.array_equivalent_object(\n ensure_object(left.ravel()), ensure_object(right.ravel())\n )\n\n for left_value, right_value in zip(left, right):\n if left_value is NaT and right_value is not NaT:\n return False\n\n elif left_value is libmissing.NA and right_value is not libmissing.NA:\n return False\n\n elif isinstance(left_value, float) and np.isnan(left_value):\n if not isinstance(right_value, float) or not np.isnan(right_value):\n return False\n else:\n try:\n if np.any(np.asarray(left_value != right_value)):\n return False\n except TypeError as err:\n if \"Cannot compare tz-naive\" in str(err):\n # tzawareness compat failure, see GH#28507\n return False\n elif \"boolean value of NA is ambiguous\" in str(err):\n return False\n raise\n return True\n\n # NaNs can occur in float and complex arrays.\n if is_float_dtype(left) or is_complex_dtype(left):\n\n # empty\n if not (np.prod(left.shape) and np.prod(right.shape)):\n return True\n return ((left == right) | (isna(left) & isna(right))).all()\n\n elif is_datetimelike_v_numeric(left, right):\n # GH#29553 avoid numpy deprecation warning\n return False\n\n elif needs_i8_conversion(left) or needs_i8_conversion(right):\n # datetime64, timedelta64, Period\n if not is_dtype_equal(left.dtype, right.dtype):\n return False\n\n left = left.view(\"i8\")\n right = right.view(\"i8\")\n\n # if we have structured dtypes, compare first\n if left.dtype.type is np.void or right.dtype.type is np.void:\n if left.dtype != right.dtype:\n return False\n\n return np.array_equal(left, right)\n\n\ndef _infer_fill_value(val):\n \"\"\"\n infer the fill value for the nan/NaT from the provided\n scalar/ndarray/list-like if we are a NaT, return the correct dtyped\n element to provide proper block construction\n \"\"\"\n\n if not is_list_like(val):\n val = [val]\n val = np.array(val, copy=False)\n if needs_i8_conversion(val):\n return np.array(\"NaT\", dtype=val.dtype)\n elif is_object_dtype(val.dtype):\n dtype = lib.infer_dtype(ensure_object(val), skipna=False)\n if dtype in [\"datetime\", \"datetime64\"]:\n return np.array(\"NaT\", dtype=_NS_DTYPE)\n elif dtype in [\"timedelta\", \"timedelta64\"]:\n return np.array(\"NaT\", dtype=_TD_DTYPE)\n return np.nan\n\n\ndef _maybe_fill(arr, fill_value=np.nan):\n \"\"\"\n if we have a compatible fill_value and arr dtype, then fill\n \"\"\"\n if _isna_compat(arr, fill_value):\n arr.fill(fill_value)\n return arr\n\n\ndef na_value_for_dtype(dtype, compat: bool = True):\n \"\"\"\n Return a dtype compat na value\n\n Parameters\n ----------\n dtype : string / dtype\n compat : bool, default True\n\n Returns\n -------\n np.dtype or a pandas dtype\n\n Examples\n --------\n >>> na_value_for_dtype(np.dtype('int64'))\n 0\n >>> na_value_for_dtype(np.dtype('int64'), compat=False)\n nan\n >>> na_value_for_dtype(np.dtype('float64'))\n nan\n >>> na_value_for_dtype(np.dtype('bool'))\n False\n >>> na_value_for_dtype(np.dtype('datetime64[ns]'))\n NaT\n \"\"\"\n dtype = pandas_dtype(dtype)\n\n if is_extension_array_dtype(dtype):\n return dtype.na_value\n if (\n is_datetime64_dtype(dtype)\n or is_datetime64tz_dtype(dtype)\n or is_timedelta64_dtype(dtype)\n or is_period_dtype(dtype)\n ):\n return NaT\n elif is_float_dtype(dtype):\n return np.nan\n elif is_integer_dtype(dtype):\n if compat:\n return 0\n return np.nan\n elif is_bool_dtype(dtype):\n return False\n return np.nan\n\n\ndef remove_na_arraylike(arr):\n \"\"\"\n Return array-like containing only true/non-NaN values, possibly empty.\n \"\"\"\n if is_extension_array_dtype(arr):\n return arr[notna(arr)]\n else:\n return arr[notna(lib.values_from_object(arr))]\n\n\ndef is_valid_nat_for_dtype(obj, dtype) -> bool:\n \"\"\"\n isna check that excludes incompatible dtypes\n\n Parameters\n ----------\n obj : object\n dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype\n\n Returns\n -------\n bool\n \"\"\"\n if not lib.is_scalar(obj) or not isna(obj):\n return False\n if dtype.kind == \"M\":\n return not isinstance(obj, np.timedelta64)\n if dtype.kind == \"m\":\n return not isinstance(obj, np.datetime64)\n\n # must be PeriodDType\n return not isinstance(obj, (np.datetime64, np.timedelta64))\n", "import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension import base\n\npytest.importorskip(\"pyarrow\", minversion=\"0.12.0\")\n\nfrom .arrays import ArrowBoolArray, ArrowBoolDtype # isort:skip\n\n\[email protected]\ndef dtype():\n return ArrowBoolDtype()\n\n\[email protected]\ndef data():\n values = np.random.randint(0, 2, size=100, dtype=bool)\n values[1] = ~values[0]\n return ArrowBoolArray.from_scalars(values)\n\n\[email protected]\ndef data_missing():\n return ArrowBoolArray.from_scalars([None, True])\n\n\nclass BaseArrowTests:\n pass\n\n\nclass TestDtype(BaseArrowTests, base.BaseDtypeTests):\n def test_array_type_with_arg(self, data, dtype):\n pytest.skip(\"GH-22666\")\n\n\nclass TestInterface(BaseArrowTests, base.BaseInterfaceTests):\n def test_copy(self, data):\n # __setitem__ does not work, so we only have a smoke-test\n data.copy()\n\n def test_view(self, data):\n # __setitem__ does not work, so we only have a smoke-test\n data.view()\n\n\nclass TestConstructors(BaseArrowTests, base.BaseConstructorsTests):\n def test_from_dtype(self, data):\n pytest.skip(\"GH-22666\")\n\n # seems like some bug in isna on empty BoolArray returning floats.\n @pytest.mark.xfail(reason=\"bad is-na for empty data\")\n def test_from_sequence_from_cls(self, data):\n super().test_from_sequence_from_cls(data)\n\n\nclass TestReduce(base.BaseNoReduceTests):\n def test_reduce_series_boolean(self):\n pass\n\n\nclass TestReduceBoolean(base.BaseBooleanReduceTests):\n pass\n\n\ndef test_is_bool_dtype(data):\n assert pd.api.types.is_bool_dtype(data)\n assert pd.core.common.is_bool_indexer(data)\n s = pd.Series(range(len(data)))\n result = s[data]\n expected = s[np.asarray(data)]\n tm.assert_series_equal(result, expected)\n", "# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for Period dtype\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import Period, PeriodIndex, Series, period_range\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.arrays import TimedeltaArray\n\nfrom pandas.tseries.frequencies import to_offset\n\nfrom .common import assert_invalid_comparison\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestPeriodArrayLikeComparisons:\n # Comparison tests for PeriodDtype vectors fully parametrized over\n # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison\n # tests will eventually end up here.\n\n def test_compare_zerodim(self, box_with_array):\n # GH#26689 make sure we unbox zero-dimensional arrays\n xbox = box_with_array if box_with_array is not pd.Index else np.ndarray\n\n pi = pd.period_range(\"2000\", periods=4)\n other = np.array(pi.to_numpy()[0])\n\n pi = tm.box_expected(pi, box_with_array)\n result = pi <= other\n expected = np.array([True, False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"scalar\", [\"foo\", pd.Timestamp.now(), pd.Timedelta(days=4)]\n )\n def test_compare_invalid_scalar(self, box_with_array, scalar):\n # comparison with scalar that cannot be interpreted as a Period\n pi = pd.period_range(\"2000\", periods=4)\n parr = tm.box_expected(pi, box_with_array)\n assert_invalid_comparison(parr, scalar, box_with_array)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n pd.date_range(\"2000\", periods=4).array,\n pd.timedelta_range(\"1D\", periods=4).array,\n np.arange(4),\n np.arange(4).astype(np.float64),\n list(range(4)),\n ],\n )\n def test_compare_invalid_listlike(self, box_with_array, other):\n pi = pd.period_range(\"2000\", periods=4)\n parr = tm.box_expected(pi, box_with_array)\n assert_invalid_comparison(parr, other, box_with_array)\n\n @pytest.mark.parametrize(\"other_box\", [list, np.array, lambda x: x.astype(object)])\n def test_compare_object_dtype(self, box_with_array, other_box):\n pi = pd.period_range(\"2000\", periods=5)\n parr = tm.box_expected(pi, box_with_array)\n\n xbox = np.ndarray if box_with_array is pd.Index else box_with_array\n\n other = other_box(pi)\n\n expected = np.array([True, True, True, True, True])\n expected = tm.box_expected(expected, xbox)\n\n result = parr == other\n tm.assert_equal(result, expected)\n result = parr <= other\n tm.assert_equal(result, expected)\n result = parr >= other\n tm.assert_equal(result, expected)\n\n result = parr != other\n tm.assert_equal(result, ~expected)\n result = parr < other\n tm.assert_equal(result, ~expected)\n result = parr > other\n tm.assert_equal(result, ~expected)\n\n other = other_box(pi[::-1])\n\n expected = np.array([False, False, True, False, False])\n expected = tm.box_expected(expected, xbox)\n result = parr == other\n tm.assert_equal(result, expected)\n\n expected = np.array([True, True, True, False, False])\n expected = tm.box_expected(expected, xbox)\n result = parr <= other\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, True, True, True])\n expected = tm.box_expected(expected, xbox)\n result = parr >= other\n tm.assert_equal(result, expected)\n\n expected = np.array([True, True, False, True, True])\n expected = tm.box_expected(expected, xbox)\n result = parr != other\n tm.assert_equal(result, expected)\n\n expected = np.array([True, True, False, False, False])\n expected = tm.box_expected(expected, xbox)\n result = parr < other\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, False, True, True])\n expected = tm.box_expected(expected, xbox)\n result = parr > other\n tm.assert_equal(result, expected)\n\n\nclass TestPeriodIndexComparisons:\n # TODO: parameterize over boxes\n\n @pytest.mark.parametrize(\"other\", [\"2017\", 2017])\n def test_eq(self, other):\n idx = PeriodIndex([\"2017\", \"2017\", \"2018\"], freq=\"D\")\n expected = np.array([True, True, False])\n result = idx == other\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_pi_cmp_period(self):\n idx = period_range(\"2007-01\", periods=20, freq=\"M\")\n\n result = idx < idx[10]\n exp = idx.values < idx.values[10]\n tm.assert_numpy_array_equal(result, exp)\n\n # TODO: moved from test_datetime64; de-duplicate with version below\n def test_parr_cmp_period_scalar2(self, box_with_array):\n xbox = box_with_array if box_with_array is not pd.Index else np.ndarray\n\n pi = pd.period_range(\"2000-01-01\", periods=10, freq=\"D\")\n\n val = Period(\"2000-01-04\", freq=\"D\")\n expected = [x > val for x in pi]\n\n ser = tm.box_expected(pi, box_with_array)\n expected = tm.box_expected(expected, xbox)\n result = ser > val\n tm.assert_equal(result, expected)\n\n val = pi[5]\n result = ser > val\n expected = [x > val for x in pi]\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\"freq\", [\"M\", \"2M\", \"3M\"])\n def test_parr_cmp_period_scalar(self, freq, box_with_array):\n # GH#13200\n xbox = np.ndarray if box_with_array is pd.Index else box_with_array\n\n base = PeriodIndex([\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=freq)\n base = tm.box_expected(base, box_with_array)\n per = Period(\"2011-02\", freq=freq)\n\n exp = np.array([False, True, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base == per, exp)\n tm.assert_equal(per == base, exp)\n\n exp = np.array([True, False, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base != per, exp)\n tm.assert_equal(per != base, exp)\n\n exp = np.array([False, False, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base > per, exp)\n tm.assert_equal(per < base, exp)\n\n exp = np.array([True, False, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base < per, exp)\n tm.assert_equal(per > base, exp)\n\n exp = np.array([False, True, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base >= per, exp)\n tm.assert_equal(per <= base, exp)\n\n exp = np.array([True, True, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base <= per, exp)\n tm.assert_equal(per >= base, exp)\n\n @pytest.mark.parametrize(\"freq\", [\"M\", \"2M\", \"3M\"])\n def test_parr_cmp_pi(self, freq, box_with_array):\n # GH#13200\n xbox = np.ndarray if box_with_array is pd.Index else box_with_array\n\n base = PeriodIndex([\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=freq)\n base = tm.box_expected(base, box_with_array)\n\n # TODO: could also box idx?\n idx = PeriodIndex([\"2011-02\", \"2011-01\", \"2011-03\", \"2011-05\"], freq=freq)\n\n exp = np.array([False, False, True, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base == idx, exp)\n\n exp = np.array([True, True, False, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base != idx, exp)\n\n exp = np.array([False, True, False, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base > idx, exp)\n\n exp = np.array([True, False, False, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base < idx, exp)\n\n exp = np.array([False, True, True, False])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base >= idx, exp)\n\n exp = np.array([True, False, True, True])\n exp = tm.box_expected(exp, xbox)\n tm.assert_equal(base <= idx, exp)\n\n @pytest.mark.parametrize(\"freq\", [\"M\", \"2M\", \"3M\"])\n def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):\n # GH#13200\n # different base freq\n base = PeriodIndex([\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=freq)\n base = tm.box_expected(base, box_with_array)\n\n msg = \"Input has different freq=A-DEC from \"\n with pytest.raises(IncompatibleFrequency, match=msg):\n base <= Period(\"2011\", freq=\"A\")\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n Period(\"2011\", freq=\"A\") >= base\n\n # TODO: Could parametrize over boxes for idx?\n idx = PeriodIndex([\"2011\", \"2012\", \"2013\", \"2014\"], freq=\"A\")\n rev_msg = r\"Input has different freq=(M|2M|3M) from PeriodArray\\(freq=A-DEC\\)\"\n idx_msg = rev_msg if box_with_array is tm.to_array else msg\n with pytest.raises(IncompatibleFrequency, match=idx_msg):\n base <= idx\n\n # Different frequency\n msg = \"Input has different freq=4M from \"\n with pytest.raises(IncompatibleFrequency, match=msg):\n base <= Period(\"2011\", freq=\"4M\")\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n Period(\"2011\", freq=\"4M\") >= base\n\n idx = PeriodIndex([\"2011\", \"2012\", \"2013\", \"2014\"], freq=\"4M\")\n rev_msg = r\"Input has different freq=(M|2M|3M) from PeriodArray\\(freq=4M\\)\"\n idx_msg = rev_msg if box_with_array is tm.to_array else msg\n with pytest.raises(IncompatibleFrequency, match=idx_msg):\n base <= idx\n\n @pytest.mark.parametrize(\"freq\", [\"M\", \"2M\", \"3M\"])\n def test_pi_cmp_nat(self, freq):\n idx1 = PeriodIndex([\"2011-01\", \"2011-02\", \"NaT\", \"2011-05\"], freq=freq)\n\n result = idx1 > Period(\"2011-02\", freq=freq)\n exp = np.array([False, False, False, True])\n tm.assert_numpy_array_equal(result, exp)\n result = Period(\"2011-02\", freq=freq) < idx1\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 == Period(\"NaT\", freq=freq)\n exp = np.array([False, False, False, False])\n tm.assert_numpy_array_equal(result, exp)\n result = Period(\"NaT\", freq=freq) == idx1\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 != Period(\"NaT\", freq=freq)\n exp = np.array([True, True, True, True])\n tm.assert_numpy_array_equal(result, exp)\n result = Period(\"NaT\", freq=freq) != idx1\n tm.assert_numpy_array_equal(result, exp)\n\n idx2 = PeriodIndex([\"2011-02\", \"2011-01\", \"2011-04\", \"NaT\"], freq=freq)\n result = idx1 < idx2\n exp = np.array([True, False, False, False])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 == idx2\n exp = np.array([False, False, False, False])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 != idx2\n exp = np.array([True, True, True, True])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 == idx1\n exp = np.array([True, True, False, True])\n tm.assert_numpy_array_equal(result, exp)\n\n result = idx1 != idx1\n exp = np.array([False, False, True, False])\n tm.assert_numpy_array_equal(result, exp)\n\n @pytest.mark.parametrize(\"freq\", [\"M\", \"2M\", \"3M\"])\n def test_pi_cmp_nat_mismatched_freq_raises(self, freq):\n idx1 = PeriodIndex([\"2011-01\", \"2011-02\", \"NaT\", \"2011-05\"], freq=freq)\n\n diff = PeriodIndex([\"2011-02\", \"2011-01\", \"2011-04\", \"NaT\"], freq=\"4M\")\n msg = \"Input has different freq=4M from Period(Array|Index)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n idx1 > diff\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n idx1 == diff\n\n # TODO: De-duplicate with test_pi_cmp_nat\n @pytest.mark.parametrize(\"dtype\", [object, None])\n def test_comp_nat(self, dtype):\n left = pd.PeriodIndex(\n [pd.Period(\"2011-01-01\"), pd.NaT, pd.Period(\"2011-01-03\")]\n )\n right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period(\"2011-01-03\")])\n\n if dtype is not None:\n left = left.astype(dtype)\n right = right.astype(dtype)\n\n result = left == right\n expected = np.array([False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = left != right\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(left == pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT == right, expected)\n\n expected = np.array([True, True, True])\n tm.assert_numpy_array_equal(left != pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT != left, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(left < pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT > left, expected)\n\n\nclass TestPeriodSeriesComparisons:\n def test_cmp_series_period_series_mixed_freq(self):\n # GH#13200\n base = Series(\n [\n Period(\"2011\", freq=\"A\"),\n Period(\"2011-02\", freq=\"M\"),\n Period(\"2013\", freq=\"A\"),\n Period(\"2011-04\", freq=\"M\"),\n ]\n )\n\n ser = Series(\n [\n Period(\"2012\", freq=\"A\"),\n Period(\"2011-01\", freq=\"M\"),\n Period(\"2013\", freq=\"A\"),\n Period(\"2011-05\", freq=\"M\"),\n ]\n )\n\n exp = Series([False, False, True, False])\n tm.assert_series_equal(base == ser, exp)\n\n exp = Series([True, True, False, True])\n tm.assert_series_equal(base != ser, exp)\n\n exp = Series([False, True, False, False])\n tm.assert_series_equal(base > ser, exp)\n\n exp = Series([True, False, False, True])\n tm.assert_series_equal(base < ser, exp)\n\n exp = Series([False, True, True, False])\n tm.assert_series_equal(base >= ser, exp)\n\n exp = Series([True, False, True, True])\n tm.assert_series_equal(base <= ser, exp)\n\n\nclass TestPeriodIndexSeriesComparisonConsistency:\n \"\"\" Test PeriodIndex and Period Series Ops consistency \"\"\"\n\n # TODO: needs parametrization+de-duplication\n\n def _check(self, values, func, expected):\n # Test PeriodIndex and Period Series Ops consistency\n\n idx = pd.PeriodIndex(values)\n result = func(idx)\n\n # check that we don't pass an unwanted type to tm.assert_equal\n assert isinstance(expected, (pd.Index, np.ndarray))\n tm.assert_equal(result, expected)\n\n s = pd.Series(values)\n result = func(s)\n\n exp = pd.Series(expected, name=values.name)\n tm.assert_series_equal(result, exp)\n\n def test_pi_comp_period(self):\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n\n f = lambda x: x == pd.Period(\"2011-03\", freq=\"M\")\n exp = np.array([False, False, True, False], dtype=np.bool)\n self._check(idx, f, exp)\n f = lambda x: pd.Period(\"2011-03\", freq=\"M\") == x\n self._check(idx, f, exp)\n\n f = lambda x: x != pd.Period(\"2011-03\", freq=\"M\")\n exp = np.array([True, True, False, True], dtype=np.bool)\n self._check(idx, f, exp)\n f = lambda x: pd.Period(\"2011-03\", freq=\"M\") != x\n self._check(idx, f, exp)\n\n f = lambda x: pd.Period(\"2011-03\", freq=\"M\") >= x\n exp = np.array([True, True, True, False], dtype=np.bool)\n self._check(idx, f, exp)\n\n f = lambda x: x > pd.Period(\"2011-03\", freq=\"M\")\n exp = np.array([False, False, False, True], dtype=np.bool)\n self._check(idx, f, exp)\n\n f = lambda x: pd.Period(\"2011-03\", freq=\"M\") >= x\n exp = np.array([True, True, True, False], dtype=np.bool)\n self._check(idx, f, exp)\n\n def test_pi_comp_period_nat(self):\n idx = PeriodIndex(\n [\"2011-01\", \"NaT\", \"2011-03\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n\n f = lambda x: x == pd.Period(\"2011-03\", freq=\"M\")\n exp = np.array([False, False, True, False], dtype=np.bool)\n self._check(idx, f, exp)\n f = lambda x: pd.Period(\"2011-03\", freq=\"M\") == x\n self._check(idx, f, exp)\n\n f = lambda x: x == pd.NaT\n exp = np.array([False, False, False, False], dtype=np.bool)\n self._check(idx, f, exp)\n f = lambda x: pd.NaT == x\n self._check(idx, f, exp)\n\n f = lambda x: x != pd.Period(\"2011-03\", freq=\"M\")\n exp = np.array([True, True, False, True], dtype=np.bool)\n self._check(idx, f, exp)\n f = lambda x: pd.Period(\"2011-03\", freq=\"M\") != x\n self._check(idx, f, exp)\n\n f = lambda x: x != pd.NaT\n exp = np.array([True, True, True, True], dtype=np.bool)\n self._check(idx, f, exp)\n f = lambda x: pd.NaT != x\n self._check(idx, f, exp)\n\n f = lambda x: pd.Period(\"2011-03\", freq=\"M\") >= x\n exp = np.array([True, False, True, False], dtype=np.bool)\n self._check(idx, f, exp)\n\n f = lambda x: x < pd.Period(\"2011-03\", freq=\"M\")\n exp = np.array([True, False, False, False], dtype=np.bool)\n self._check(idx, f, exp)\n\n f = lambda x: x > pd.NaT\n exp = np.array([False, False, False, False], dtype=np.bool)\n self._check(idx, f, exp)\n\n f = lambda x: pd.NaT >= x\n exp = np.array([False, False, False, False], dtype=np.bool)\n self._check(idx, f, exp)\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestPeriodFrameArithmetic:\n def test_ops_frame_period(self):\n # GH#13043\n df = pd.DataFrame(\n {\n \"A\": [pd.Period(\"2015-01\", freq=\"M\"), pd.Period(\"2015-02\", freq=\"M\")],\n \"B\": [pd.Period(\"2014-01\", freq=\"M\"), pd.Period(\"2014-02\", freq=\"M\")],\n }\n )\n assert df[\"A\"].dtype == \"Period[M]\"\n assert df[\"B\"].dtype == \"Period[M]\"\n\n p = pd.Period(\"2015-03\", freq=\"M\")\n off = p.freq\n # dtype will be object because of original dtype\n exp = pd.DataFrame(\n {\n \"A\": np.array([2 * off, 1 * off], dtype=object),\n \"B\": np.array([14 * off, 13 * off], dtype=object),\n }\n )\n tm.assert_frame_equal(p - df, exp)\n tm.assert_frame_equal(df - p, -1 * exp)\n\n df2 = pd.DataFrame(\n {\n \"A\": [pd.Period(\"2015-05\", freq=\"M\"), pd.Period(\"2015-06\", freq=\"M\")],\n \"B\": [pd.Period(\"2015-05\", freq=\"M\"), pd.Period(\"2015-06\", freq=\"M\")],\n }\n )\n assert df2[\"A\"].dtype == \"Period[M]\"\n assert df2[\"B\"].dtype == \"Period[M]\"\n\n exp = pd.DataFrame(\n {\n \"A\": np.array([4 * off, 4 * off], dtype=object),\n \"B\": np.array([16 * off, 16 * off], dtype=object),\n }\n )\n tm.assert_frame_equal(df2 - df, exp)\n tm.assert_frame_equal(df - df2, -1 * exp)\n\n\nclass TestPeriodIndexArithmetic:\n # ---------------------------------------------------------------\n # __add__/__sub__ with PeriodIndex\n # PeriodIndex + other is defined for integers and timedelta-like others\n # PeriodIndex - other is defined for integers, timedelta-like others,\n # and PeriodIndex (with matching freq)\n\n def test_parr_add_iadd_parr_raises(self, box_with_array):\n rng = pd.period_range(\"1/1/2000\", freq=\"D\", periods=5)\n other = pd.period_range(\"1/6/2000\", freq=\"D\", periods=5)\n # TODO: parametrize over boxes for other?\n\n rng = tm.box_expected(rng, box_with_array)\n # An earlier implementation of PeriodIndex addition performed\n # a set operation (union). This has since been changed to\n # raise a TypeError. See GH#14164 and GH#13077 for historical\n # reference.\n with pytest.raises(TypeError):\n rng + other\n\n with pytest.raises(TypeError):\n rng += other\n\n def test_pi_sub_isub_pi(self):\n # GH#20049\n # For historical reference see GH#14164, GH#13077.\n # PeriodIndex subtraction originally performed set difference,\n # then changed to raise TypeError before being implemented in GH#20049\n rng = pd.period_range(\"1/1/2000\", freq=\"D\", periods=5)\n other = pd.period_range(\"1/6/2000\", freq=\"D\", periods=5)\n\n off = rng.freq\n expected = pd.Index([-5 * off] * 5)\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n rng -= other\n tm.assert_index_equal(rng, expected)\n\n def test_pi_sub_pi_with_nat(self):\n rng = pd.period_range(\"1/1/2000\", freq=\"D\", periods=5)\n other = rng[1:].insert(0, pd.NaT)\n assert other[1:].equals(rng[1:])\n\n result = rng - other\n off = rng.freq\n expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])\n tm.assert_index_equal(result, expected)\n\n def test_parr_sub_pi_mismatched_freq(self, box_with_array):\n rng = pd.period_range(\"1/1/2000\", freq=\"D\", periods=5)\n other = pd.period_range(\"1/6/2000\", freq=\"H\", periods=5)\n # TODO: parametrize over boxes for other?\n\n rng = tm.box_expected(rng, box_with_array)\n with pytest.raises(IncompatibleFrequency):\n rng - other\n\n @pytest.mark.parametrize(\"n\", [1, 2, 3, 4])\n def test_sub_n_gt_1_ticks(self, tick_classes, n):\n # GH 23878\n p1_d = \"19910905\"\n p2_d = \"19920406\"\n p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))\n p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))\n\n expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(\n [p1_d], freq=p1.freq.base\n )\n\n tm.assert_index_equal((p2 - p1), expected)\n\n @pytest.mark.parametrize(\"n\", [1, 2, 3, 4])\n @pytest.mark.parametrize(\n \"offset, kwd_name\",\n [\n (pd.offsets.YearEnd, \"month\"),\n (pd.offsets.QuarterEnd, \"startingMonth\"),\n (pd.offsets.MonthEnd, None),\n (pd.offsets.Week, \"weekday\"),\n ],\n )\n def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):\n # GH 23878\n kwds = {kwd_name: 3} if kwd_name is not None else {}\n p1_d = \"19910905\"\n p2_d = \"19920406\"\n freq = offset(n, normalize=False, **kwds)\n p1 = pd.PeriodIndex([p1_d], freq=freq)\n p2 = pd.PeriodIndex([p2_d], freq=freq)\n\n result = p2 - p1\n expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(\n [p1_d], freq=freq.base\n )\n\n tm.assert_index_equal(result, expected)\n\n # -------------------------------------------------------------\n # Invalid Operations\n\n @pytest.mark.parametrize(\"other\", [3.14, np.array([2.0, 3.0])])\n @pytest.mark.parametrize(\"op\", [operator.add, ops.radd, operator.sub, ops.rsub])\n def test_parr_add_sub_float_raises(self, op, other, box_with_array):\n dti = pd.DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], freq=\"D\")\n pi = dti.to_period(\"D\")\n pi = tm.box_expected(pi, box_with_array)\n with pytest.raises(TypeError):\n op(pi, other)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n # datetime scalars\n pd.Timestamp.now(),\n pd.Timestamp.now().to_pydatetime(),\n pd.Timestamp.now().to_datetime64(),\n # datetime-like arrays\n pd.date_range(\"2016-01-01\", periods=3, freq=\"H\"),\n pd.date_range(\"2016-01-01\", periods=3, tz=\"Europe/Brussels\"),\n pd.date_range(\"2016-01-01\", periods=3, freq=\"S\")._data,\n pd.date_range(\"2016-01-01\", periods=3, tz=\"Asia/Tokyo\")._data,\n # Miscellaneous invalid types\n ],\n )\n def test_parr_add_sub_invalid(self, other, box_with_array):\n # GH#23215\n rng = pd.period_range(\"1/1/2000\", freq=\"D\", periods=3)\n rng = tm.box_expected(rng, box_with_array)\n\n with pytest.raises(TypeError):\n rng + other\n with pytest.raises(TypeError):\n other + rng\n with pytest.raises(TypeError):\n rng - other\n with pytest.raises(TypeError):\n other - rng\n\n # -----------------------------------------------------------------\n # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]\n\n def test_pi_add_sub_td64_array_non_tick_raises(self):\n rng = pd.period_range(\"1/1/2000\", freq=\"Q\", periods=3)\n tdi = pd.TimedeltaIndex([\"-1 Day\", \"-1 Day\", \"-1 Day\"])\n tdarr = tdi.values\n\n with pytest.raises(IncompatibleFrequency):\n rng + tdarr\n with pytest.raises(IncompatibleFrequency):\n tdarr + rng\n\n with pytest.raises(IncompatibleFrequency):\n rng - tdarr\n with pytest.raises(TypeError):\n tdarr - rng\n\n def test_pi_add_sub_td64_array_tick(self):\n # PeriodIndex + Timedelta-like is allowed only with\n # tick-like frequencies\n rng = pd.period_range(\"1/1/2000\", freq=\"90D\", periods=3)\n tdi = pd.TimedeltaIndex([\"-1 Day\", \"-1 Day\", \"-1 Day\"])\n tdarr = tdi.values\n\n expected = pd.period_range(\"12/31/1999\", freq=\"90D\", periods=3)\n result = rng + tdi\n tm.assert_index_equal(result, expected)\n result = rng + tdarr\n tm.assert_index_equal(result, expected)\n result = tdi + rng\n tm.assert_index_equal(result, expected)\n result = tdarr + rng\n tm.assert_index_equal(result, expected)\n\n expected = pd.period_range(\"1/2/2000\", freq=\"90D\", periods=3)\n\n result = rng - tdi\n tm.assert_index_equal(result, expected)\n result = rng - tdarr\n tm.assert_index_equal(result, expected)\n\n with pytest.raises(TypeError):\n tdarr - rng\n\n with pytest.raises(TypeError):\n tdi - rng\n\n # -----------------------------------------------------------------\n # operations with array/Index of DateOffset objects\n\n @pytest.mark.parametrize(\"box\", [np.array, pd.Index])\n def test_pi_add_offset_array(self, box):\n # GH#18849\n pi = pd.PeriodIndex([pd.Period(\"2015Q1\"), pd.Period(\"2016Q2\")])\n offs = box(\n [\n pd.offsets.QuarterEnd(n=1, startingMonth=12),\n pd.offsets.QuarterEnd(n=-2, startingMonth=12),\n ]\n )\n expected = pd.PeriodIndex([pd.Period(\"2015Q2\"), pd.Period(\"2015Q4\")])\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pi + offs\n tm.assert_index_equal(res, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res2 = offs + pi\n tm.assert_index_equal(res2, expected)\n\n unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])\n # addition/subtraction ops with incompatible offsets should issue\n # a PerformanceWarning and _then_ raise a TypeError.\n with pytest.raises(IncompatibleFrequency):\n with tm.assert_produces_warning(PerformanceWarning):\n pi + unanchored\n with pytest.raises(IncompatibleFrequency):\n with tm.assert_produces_warning(PerformanceWarning):\n unanchored + pi\n\n @pytest.mark.parametrize(\"box\", [np.array, pd.Index])\n def test_pi_sub_offset_array(self, box):\n # GH#18824\n pi = pd.PeriodIndex([pd.Period(\"2015Q1\"), pd.Period(\"2016Q2\")])\n other = box(\n [\n pd.offsets.QuarterEnd(n=1, startingMonth=12),\n pd.offsets.QuarterEnd(n=-2, startingMonth=12),\n ]\n )\n\n expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = pi - other\n tm.assert_index_equal(res, expected)\n\n anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])\n\n # addition/subtraction ops with anchored offsets should issue\n # a PerformanceWarning and _then_ raise a TypeError.\n with pytest.raises(IncompatibleFrequency):\n with tm.assert_produces_warning(PerformanceWarning):\n pi - anchored\n with pytest.raises(IncompatibleFrequency):\n with tm.assert_produces_warning(PerformanceWarning):\n anchored - pi\n\n def test_pi_add_iadd_int(self, one):\n # Variants of `one` for #19012\n rng = pd.period_range(\"2000-01-01 09:00\", freq=\"H\", periods=10)\n result = rng + one\n expected = pd.period_range(\"2000-01-01 10:00\", freq=\"H\", periods=10)\n tm.assert_index_equal(result, expected)\n rng += one\n tm.assert_index_equal(rng, expected)\n\n def test_pi_sub_isub_int(self, one):\n \"\"\"\n PeriodIndex.__sub__ and __isub__ with several representations of\n the integer 1, e.g. int, np.int64, np.uint8, ...\n \"\"\"\n rng = pd.period_range(\"2000-01-01 09:00\", freq=\"H\", periods=10)\n result = rng - one\n expected = pd.period_range(\"2000-01-01 08:00\", freq=\"H\", periods=10)\n tm.assert_index_equal(result, expected)\n rng -= one\n tm.assert_index_equal(rng, expected)\n\n @pytest.mark.parametrize(\"five\", [5, np.array(5, dtype=np.int64)])\n def test_pi_sub_intlike(self, five):\n rng = period_range(\"2007-01\", periods=50)\n\n result = rng - five\n exp = rng + (-five)\n tm.assert_index_equal(result, exp)\n\n def test_pi_sub_isub_offset(self):\n # offset\n # DateOffset\n rng = pd.period_range(\"2014\", \"2024\", freq=\"A\")\n result = rng - pd.offsets.YearEnd(5)\n expected = pd.period_range(\"2009\", \"2019\", freq=\"A\")\n tm.assert_index_equal(result, expected)\n rng -= pd.offsets.YearEnd(5)\n tm.assert_index_equal(rng, expected)\n\n rng = pd.period_range(\"2014-01\", \"2016-12\", freq=\"M\")\n result = rng - pd.offsets.MonthEnd(5)\n expected = pd.period_range(\"2013-08\", \"2016-07\", freq=\"M\")\n tm.assert_index_equal(result, expected)\n\n rng -= pd.offsets.MonthEnd(5)\n tm.assert_index_equal(rng, expected)\n\n @pytest.mark.parametrize(\"transpose\", [True, False])\n def test_pi_add_offset_n_gt1(self, box_with_array, transpose):\n # GH#23215\n # add offset to PeriodIndex with freq.n > 1\n\n per = pd.Period(\"2016-01\", freq=\"2M\")\n pi = pd.PeriodIndex([per])\n\n expected = pd.PeriodIndex([\"2016-03\"], freq=\"2M\")\n\n pi = tm.box_expected(pi, box_with_array, transpose=transpose)\n expected = tm.box_expected(expected, box_with_array, transpose=transpose)\n\n result = pi + per.freq\n tm.assert_equal(result, expected)\n\n result = per.freq + pi\n tm.assert_equal(result, expected)\n\n def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):\n # GH#23215\n # PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0\n pi = pd.PeriodIndex([\"2016-01\"], freq=\"2M\")\n expected = pd.PeriodIndex([\"2016-04\"], freq=\"2M\")\n\n # FIXME: with transposing these tests fail\n pi = tm.box_expected(pi, box_with_array, transpose=False)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n\n result = pi + to_offset(\"3M\")\n tm.assert_equal(result, expected)\n\n result = to_offset(\"3M\") + pi\n tm.assert_equal(result, expected)\n\n # ---------------------------------------------------------------\n # __add__/__sub__ with integer arrays\n\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n @pytest.mark.parametrize(\"op\", [operator.add, ops.radd])\n def test_pi_add_intarray(self, int_holder, op):\n # GH#19959\n pi = pd.PeriodIndex([pd.Period(\"2015Q1\"), pd.Period(\"NaT\")])\n other = int_holder([4, -1])\n\n result = op(pi, other)\n expected = pd.PeriodIndex([pd.Period(\"2016Q1\"), pd.Period(\"NaT\")])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_pi_sub_intarray(self, int_holder):\n # GH#19959\n pi = pd.PeriodIndex([pd.Period(\"2015Q1\"), pd.Period(\"NaT\")])\n other = int_holder([4, -1])\n\n result = pi - other\n expected = pd.PeriodIndex([pd.Period(\"2014Q1\"), pd.Period(\"NaT\")])\n tm.assert_index_equal(result, expected)\n\n with pytest.raises(TypeError):\n other - pi\n\n # ---------------------------------------------------------------\n # Timedelta-like (timedelta, timedelta64, Timedelta, Tick)\n # TODO: Some of these are misnomers because of non-Tick DateOffsets\n\n def test_pi_add_timedeltalike_minute_gt1(self, three_days):\n # GH#23031 adding a time-delta-like offset to a PeriodArray that has\n # minute frequency with n != 1. A more general case is tested below\n # in test_pi_add_timedeltalike_tick_gt1, but here we write out the\n # expected result more explicitly.\n other = three_days\n rng = pd.period_range(\"2014-05-01\", periods=3, freq=\"2D\")\n\n expected = pd.PeriodIndex([\"2014-05-04\", \"2014-05-06\", \"2014-05-08\"], freq=\"2D\")\n\n result = rng + other\n tm.assert_index_equal(result, expected)\n\n result = other + rng\n tm.assert_index_equal(result, expected)\n\n # subtraction\n expected = pd.PeriodIndex([\"2014-04-28\", \"2014-04-30\", \"2014-05-02\"], freq=\"2D\")\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n with pytest.raises(TypeError):\n other - rng\n\n @pytest.mark.parametrize(\"freqstr\", [\"5ns\", \"5us\", \"5ms\", \"5s\", \"5T\", \"5h\", \"5d\"])\n def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):\n # GH#23031 adding a time-delta-like offset to a PeriodArray that has\n # tick-like frequency with n != 1\n other = three_days\n rng = pd.period_range(\"2014-05-01\", periods=6, freq=freqstr)\n\n expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)\n\n result = rng + other\n tm.assert_index_equal(result, expected)\n\n result = other + rng\n tm.assert_index_equal(result, expected)\n\n # subtraction\n expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n with pytest.raises(TypeError):\n other - rng\n\n def test_pi_add_iadd_timedeltalike_daily(self, three_days):\n # Tick\n other = three_days\n rng = pd.period_range(\"2014-05-01\", \"2014-05-15\", freq=\"D\")\n expected = pd.period_range(\"2014-05-04\", \"2014-05-18\", freq=\"D\")\n\n result = rng + other\n tm.assert_index_equal(result, expected)\n\n rng += other\n tm.assert_index_equal(rng, expected)\n\n def test_pi_sub_isub_timedeltalike_daily(self, three_days):\n # Tick-like 3 Days\n other = three_days\n rng = pd.period_range(\"2014-05-01\", \"2014-05-15\", freq=\"D\")\n expected = pd.period_range(\"2014-04-28\", \"2014-05-12\", freq=\"D\")\n\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n rng -= other\n tm.assert_index_equal(rng, expected)\n\n def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):\n other = not_daily\n rng = pd.period_range(\"2014-05-01\", \"2014-05-15\", freq=\"D\")\n msg = \"Input has different freq(=.+)? from Period.*?\\\\(freq=D\\\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng - other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng -= other\n\n def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):\n other = two_hours\n rng = pd.period_range(\"2014-01-01 10:00\", \"2014-01-05 10:00\", freq=\"H\")\n expected = pd.period_range(\"2014-01-01 12:00\", \"2014-01-05 12:00\", freq=\"H\")\n\n result = rng + other\n tm.assert_index_equal(result, expected)\n\n rng += other\n tm.assert_index_equal(rng, expected)\n\n def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):\n other = not_hourly\n rng = pd.period_range(\"2014-01-01 10:00\", \"2014-01-05 10:00\", freq=\"H\")\n msg = \"Input has different freq(=.+)? from Period.*?\\\\(freq=H\\\\)\"\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n\n def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):\n other = two_hours\n rng = pd.period_range(\"2014-01-01 10:00\", \"2014-01-05 10:00\", freq=\"H\")\n expected = pd.period_range(\"2014-01-01 08:00\", \"2014-01-05 08:00\", freq=\"H\")\n\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n rng -= other\n tm.assert_index_equal(rng, expected)\n\n def test_add_iadd_timedeltalike_annual(self):\n # offset\n # DateOffset\n rng = pd.period_range(\"2014\", \"2024\", freq=\"A\")\n result = rng + pd.offsets.YearEnd(5)\n expected = pd.period_range(\"2019\", \"2029\", freq=\"A\")\n tm.assert_index_equal(result, expected)\n rng += pd.offsets.YearEnd(5)\n tm.assert_index_equal(rng, expected)\n\n def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):\n other = mismatched_freq\n rng = pd.period_range(\"2014\", \"2024\", freq=\"A\")\n msg = \"Input has different freq(=.+)? from Period.*?\\\\(freq=A-DEC\\\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng - other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng -= other\n\n def test_pi_add_iadd_timedeltalike_M(self):\n rng = pd.period_range(\"2014-01\", \"2016-12\", freq=\"M\")\n expected = pd.period_range(\"2014-06\", \"2017-05\", freq=\"M\")\n\n result = rng + pd.offsets.MonthEnd(5)\n tm.assert_index_equal(result, expected)\n\n rng += pd.offsets.MonthEnd(5)\n tm.assert_index_equal(rng, expected)\n\n def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):\n other = mismatched_freq\n rng = pd.period_range(\"2014-01\", \"2016-12\", freq=\"M\")\n msg = \"Input has different freq(=.+)? from Period.*?\\\\(freq=M\\\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng + other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng += other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng - other\n with pytest.raises(IncompatibleFrequency, match=msg):\n rng -= other\n\n @pytest.mark.parametrize(\"transpose\", [True, False])\n def test_parr_add_sub_td64_nat(self, box_with_array, transpose):\n # GH#23320 special handling for timedelta64(\"NaT\")\n pi = pd.period_range(\"1994-04-01\", periods=9, freq=\"19D\")\n other = np.timedelta64(\"NaT\")\n expected = pd.PeriodIndex([\"NaT\"] * 9, freq=\"19D\")\n\n obj = tm.box_expected(pi, box_with_array, transpose=transpose)\n expected = tm.box_expected(expected, box_with_array, transpose=transpose)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n with pytest.raises(TypeError):\n other - obj\n\n @pytest.mark.parametrize(\n \"other\",\n [\n np.array([\"NaT\"] * 9, dtype=\"m8[ns]\"),\n TimedeltaArray._from_sequence([\"NaT\"] * 9),\n ],\n )\n def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other):\n pi = pd.period_range(\"1994-04-01\", periods=9, freq=\"19D\")\n expected = pd.PeriodIndex([\"NaT\"] * 9, freq=\"19D\")\n\n obj = tm.box_expected(pi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n with pytest.raises(TypeError):\n other - obj\n\n # ---------------------------------------------------------------\n # Unsorted\n\n def test_parr_add_sub_index(self):\n # Check that PeriodArray defers to Index on arithmetic ops\n pi = pd.period_range(\"2000-12-31\", periods=3)\n parr = pi.array\n\n result = parr - pi\n expected = pi - pi\n tm.assert_index_equal(result, expected)\n\n def test_parr_add_sub_object_array(self):\n pi = pd.period_range(\"2000-12-31\", periods=3, freq=\"D\")\n parr = pi.array\n\n other = np.array([pd.Timedelta(days=1), pd.offsets.Day(2), 3])\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = parr + other\n\n expected = pd.PeriodIndex(\n [\"2001-01-01\", \"2001-01-03\", \"2001-01-05\"], freq=\"D\"\n ).array\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = parr - other\n\n expected = pd.PeriodIndex([\"2000-12-30\"] * 3, freq=\"D\").array\n tm.assert_equal(result, expected)\n\n\nclass TestPeriodSeriesArithmetic:\n def test_ops_series_timedelta(self):\n # GH#13043\n ser = pd.Series(\n [pd.Period(\"2015-01-01\", freq=\"D\"), pd.Period(\"2015-01-02\", freq=\"D\")],\n name=\"xxx\",\n )\n assert ser.dtype == \"Period[D]\"\n\n expected = pd.Series(\n [pd.Period(\"2015-01-02\", freq=\"D\"), pd.Period(\"2015-01-03\", freq=\"D\")],\n name=\"xxx\",\n )\n\n result = ser + pd.Timedelta(\"1 days\")\n tm.assert_series_equal(result, expected)\n\n result = pd.Timedelta(\"1 days\") + ser\n tm.assert_series_equal(result, expected)\n\n result = ser + pd.tseries.offsets.Day()\n tm.assert_series_equal(result, expected)\n\n result = pd.tseries.offsets.Day() + ser\n tm.assert_series_equal(result, expected)\n\n def test_ops_series_period(self):\n # GH#13043\n ser = pd.Series(\n [pd.Period(\"2015-01-01\", freq=\"D\"), pd.Period(\"2015-01-02\", freq=\"D\")],\n name=\"xxx\",\n )\n assert ser.dtype == \"Period[D]\"\n\n per = pd.Period(\"2015-01-10\", freq=\"D\")\n off = per.freq\n # dtype will be object because of original dtype\n expected = pd.Series([9 * off, 8 * off], name=\"xxx\", dtype=object)\n tm.assert_series_equal(per - ser, expected)\n tm.assert_series_equal(ser - per, -1 * expected)\n\n s2 = pd.Series(\n [pd.Period(\"2015-01-05\", freq=\"D\"), pd.Period(\"2015-01-04\", freq=\"D\")],\n name=\"xxx\",\n )\n assert s2.dtype == \"Period[D]\"\n\n expected = pd.Series([4 * off, 2 * off], name=\"xxx\", dtype=object)\n tm.assert_series_equal(s2 - ser, expected)\n tm.assert_series_equal(ser - s2, -1 * expected)\n\n\nclass TestPeriodIndexSeriesMethods:\n \"\"\" Test PeriodIndex and Period Series Ops consistency \"\"\"\n\n def _check(self, values, func, expected):\n idx = pd.PeriodIndex(values)\n result = func(idx)\n tm.assert_equal(result, expected)\n\n ser = pd.Series(values)\n result = func(ser)\n\n exp = pd.Series(expected, name=values.name)\n tm.assert_series_equal(result, exp)\n\n def test_pi_ops(self):\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n\n expected = PeriodIndex(\n [\"2011-03\", \"2011-04\", \"2011-05\", \"2011-06\"], freq=\"M\", name=\"idx\"\n )\n\n self._check(idx, lambda x: x + 2, expected)\n self._check(idx, lambda x: 2 + x, expected)\n\n self._check(idx + 2, lambda x: x - 2, idx)\n\n result = idx - Period(\"2011-01\", freq=\"M\")\n off = idx.freq\n exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name=\"idx\")\n tm.assert_index_equal(result, exp)\n\n result = Period(\"2011-01\", freq=\"M\") - idx\n exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name=\"idx\")\n tm.assert_index_equal(result, exp)\n\n @pytest.mark.parametrize(\"ng\", [\"str\", 1.5])\n @pytest.mark.parametrize(\n \"func\",\n [\n lambda obj, ng: obj + ng,\n lambda obj, ng: ng + obj,\n lambda obj, ng: obj - ng,\n lambda obj, ng: ng - obj,\n lambda obj, ng: np.add(obj, ng),\n lambda obj, ng: np.add(ng, obj),\n lambda obj, ng: np.subtract(obj, ng),\n lambda obj, ng: np.subtract(ng, obj),\n ],\n )\n def test_parr_ops_errors(self, ng, func, box_with_array):\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n obj = tm.box_expected(idx, box_with_array)\n msg = (\n r\"unsupported operand type\\(s\\)|can only concatenate|\"\n r\"must be str|object to str implicitly\"\n )\n\n with pytest.raises(TypeError, match=msg):\n func(obj, ng)\n\n def test_pi_ops_nat(self):\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"NaT\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n expected = PeriodIndex(\n [\"2011-03\", \"2011-04\", \"NaT\", \"2011-06\"], freq=\"M\", name=\"idx\"\n )\n\n self._check(idx, lambda x: x + 2, expected)\n self._check(idx, lambda x: 2 + x, expected)\n self._check(idx, lambda x: np.add(x, 2), expected)\n\n self._check(idx + 2, lambda x: x - 2, idx)\n self._check(idx + 2, lambda x: np.subtract(x, 2), idx)\n\n # freq with mult\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"NaT\", \"2011-04\"], freq=\"2M\", name=\"idx\"\n )\n expected = PeriodIndex(\n [\"2011-07\", \"2011-08\", \"NaT\", \"2011-10\"], freq=\"2M\", name=\"idx\"\n )\n\n self._check(idx, lambda x: x + 3, expected)\n self._check(idx, lambda x: 3 + x, expected)\n self._check(idx, lambda x: np.add(x, 3), expected)\n\n self._check(idx + 3, lambda x: x - 3, idx)\n self._check(idx + 3, lambda x: np.subtract(x, 3), idx)\n\n def test_pi_ops_array_int(self):\n\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"NaT\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n f = lambda x: x + np.array([1, 2, 3, 4])\n exp = PeriodIndex(\n [\"2011-02\", \"2011-04\", \"NaT\", \"2011-08\"], freq=\"M\", name=\"idx\"\n )\n self._check(idx, f, exp)\n\n f = lambda x: np.add(x, np.array([4, -1, 1, 2]))\n exp = PeriodIndex(\n [\"2011-05\", \"2011-01\", \"NaT\", \"2011-06\"], freq=\"M\", name=\"idx\"\n )\n self._check(idx, f, exp)\n\n f = lambda x: x - np.array([1, 2, 3, 4])\n exp = PeriodIndex(\n [\"2010-12\", \"2010-12\", \"NaT\", \"2010-12\"], freq=\"M\", name=\"idx\"\n )\n self._check(idx, f, exp)\n\n f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))\n exp = PeriodIndex(\n [\"2010-10\", \"2010-12\", \"NaT\", \"2011-06\"], freq=\"M\", name=\"idx\"\n )\n self._check(idx, f, exp)\n\n def test_pi_ops_offset(self):\n idx = PeriodIndex(\n [\"2011-01-01\", \"2011-02-01\", \"2011-03-01\", \"2011-04-01\"],\n freq=\"D\",\n name=\"idx\",\n )\n f = lambda x: x + pd.offsets.Day()\n exp = PeriodIndex(\n [\"2011-01-02\", \"2011-02-02\", \"2011-03-02\", \"2011-04-02\"],\n freq=\"D\",\n name=\"idx\",\n )\n self._check(idx, f, exp)\n\n f = lambda x: x + pd.offsets.Day(2)\n exp = PeriodIndex(\n [\"2011-01-03\", \"2011-02-03\", \"2011-03-03\", \"2011-04-03\"],\n freq=\"D\",\n name=\"idx\",\n )\n self._check(idx, f, exp)\n\n f = lambda x: x - pd.offsets.Day(2)\n exp = PeriodIndex(\n [\"2010-12-30\", \"2011-01-30\", \"2011-02-27\", \"2011-03-30\"],\n freq=\"D\",\n name=\"idx\",\n )\n self._check(idx, f, exp)\n\n def test_pi_offset_errors(self):\n idx = PeriodIndex(\n [\"2011-01-01\", \"2011-02-01\", \"2011-03-01\", \"2011-04-01\"],\n freq=\"D\",\n name=\"idx\",\n )\n ser = pd.Series(idx)\n\n # Series op is applied per Period instance, thus error is raised\n # from Period\n for obj in [idx, ser]:\n msg = r\"Input has different freq=2H from Period.*?\\(freq=D\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n obj + pd.offsets.Hour(2)\n\n with pytest.raises(IncompatibleFrequency, match=msg):\n pd.offsets.Hour(2) + obj\n\n msg = r\"Input has different freq=-2H from Period.*?\\(freq=D\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n obj - pd.offsets.Hour(2)\n\n def test_pi_sub_period(self):\n # GH#13071\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n\n result = idx - pd.Period(\"2012-01\", freq=\"M\")\n off = idx.freq\n exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name=\"idx\")\n tm.assert_index_equal(result, exp)\n\n result = np.subtract(idx, pd.Period(\"2012-01\", freq=\"M\"))\n tm.assert_index_equal(result, exp)\n\n result = pd.Period(\"2012-01\", freq=\"M\") - idx\n exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name=\"idx\")\n tm.assert_index_equal(result, exp)\n\n result = np.subtract(pd.Period(\"2012-01\", freq=\"M\"), idx)\n tm.assert_index_equal(result, exp)\n\n exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name=\"idx\")\n tm.assert_index_equal(idx - pd.Period(\"NaT\", freq=\"M\"), exp)\n tm.assert_index_equal(pd.Period(\"NaT\", freq=\"M\") - idx, exp)\n\n def test_pi_sub_pdnat(self):\n # GH#13071\n idx = PeriodIndex(\n [\"2011-01\", \"2011-02\", \"NaT\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n exp = pd.TimedeltaIndex([pd.NaT] * 4, name=\"idx\")\n tm.assert_index_equal(pd.NaT - idx, exp)\n tm.assert_index_equal(idx - pd.NaT, exp)\n\n def test_pi_sub_period_nat(self):\n # GH#13071\n idx = PeriodIndex(\n [\"2011-01\", \"NaT\", \"2011-03\", \"2011-04\"], freq=\"M\", name=\"idx\"\n )\n\n result = idx - pd.Period(\"2012-01\", freq=\"M\")\n off = idx.freq\n exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name=\"idx\")\n tm.assert_index_equal(result, exp)\n\n result = pd.Period(\"2012-01\", freq=\"M\") - idx\n exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name=\"idx\")\n tm.assert_index_equal(result, exp)\n\n exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name=\"idx\")\n tm.assert_index_equal(idx - pd.Period(\"NaT\", freq=\"M\"), exp)\n tm.assert_index_equal(pd.Period(\"NaT\", freq=\"M\") - idx, exp)\n", "import numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Index, Series, Timestamp\nimport pandas._testing as tm\n\n\ndef _assert_almost_equal_both(a, b, **kwargs):\n \"\"\"\n Check that two objects are approximately equal.\n\n This check is performed commutatively.\n\n Parameters\n ----------\n a : object\n The first object to compare.\n b : object\n The second object to compare.\n kwargs : dict\n The arguments passed to `tm.assert_almost_equal`.\n \"\"\"\n tm.assert_almost_equal(a, b, **kwargs)\n tm.assert_almost_equal(b, a, **kwargs)\n\n\ndef _assert_not_almost_equal(a, b, **kwargs):\n \"\"\"\n Check that two objects are not approximately equal.\n\n Parameters\n ----------\n a : object\n The first object to compare.\n b : object\n The second object to compare.\n kwargs : dict\n The arguments passed to `tm.assert_almost_equal`.\n \"\"\"\n try:\n tm.assert_almost_equal(a, b, **kwargs)\n msg = (\n \"{a} and {b} were approximately equal when they shouldn't have been\"\n ).format(a=a, b=b)\n pytest.fail(msg=msg)\n except AssertionError:\n pass\n\n\ndef _assert_not_almost_equal_both(a, b, **kwargs):\n \"\"\"\n Check that two objects are not approximately equal.\n\n This check is performed commutatively.\n\n Parameters\n ----------\n a : object\n The first object to compare.\n b : object\n The second object to compare.\n kwargs : dict\n The arguments passed to `tm.assert_almost_equal`.\n \"\"\"\n _assert_not_almost_equal(a, b, **kwargs)\n _assert_not_almost_equal(b, a, **kwargs)\n\n\[email protected](\n \"a,b\",\n [\n (1.1, 1.1),\n (1.1, 1.100001),\n (np.int16(1), 1.000001),\n (np.float64(1.1), 1.1),\n (np.uint32(5), 5),\n ],\n)\ndef test_assert_almost_equal_numbers(a, b):\n _assert_almost_equal_both(a, b)\n\n\[email protected](\"a,b\", [(1.1, 1), (1.1, True), (1, 2), (1.0001, np.int16(1))])\ndef test_assert_not_almost_equal_numbers(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\[email protected](\"a,b\", [(0, 0), (0, 0.0), (0, np.float64(0)), (0.000001, 0)])\ndef test_assert_almost_equal_numbers_with_zeros(a, b):\n _assert_almost_equal_both(a, b)\n\n\[email protected](\"a,b\", [(0.001, 0), (1, 0)])\ndef test_assert_not_almost_equal_numbers_with_zeros(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\[email protected](\"a,b\", [(1, \"abc\"), (1, [1]), (1, object())])\ndef test_assert_not_almost_equal_numbers_with_mixed(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\[email protected](\n \"left_dtype\", [\"M8[ns]\", \"m8[ns]\", \"float64\", \"int64\", \"object\"]\n)\[email protected](\n \"right_dtype\", [\"M8[ns]\", \"m8[ns]\", \"float64\", \"int64\", \"object\"]\n)\ndef test_assert_almost_equal_edge_case_ndarrays(left_dtype, right_dtype):\n # Empty compare.\n _assert_almost_equal_both(\n np.array([], dtype=left_dtype),\n np.array([], dtype=right_dtype),\n check_dtype=False,\n )\n\n\ndef test_assert_almost_equal_dicts():\n _assert_almost_equal_both({\"a\": 1, \"b\": 2}, {\"a\": 1, \"b\": 2})\n\n\[email protected](\n \"a,b\",\n [\n ({\"a\": 1, \"b\": 2}, {\"a\": 1, \"b\": 3}),\n ({\"a\": 1, \"b\": 2}, {\"a\": 1, \"b\": 2, \"c\": 3}),\n ({\"a\": 1}, 1),\n ({\"a\": 1}, \"abc\"),\n ({\"a\": 1}, [1]),\n ],\n)\ndef test_assert_not_almost_equal_dicts(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\[email protected](\"val\", [1, 2])\ndef test_assert_almost_equal_dict_like_object(val):\n dict_val = 1\n real_dict = dict(a=val)\n\n class DictLikeObj:\n def keys(self):\n return (\"a\",)\n\n def __getitem__(self, item):\n if item == \"a\":\n return dict_val\n\n func = (\n _assert_almost_equal_both if val == dict_val else _assert_not_almost_equal_both\n )\n func(real_dict, DictLikeObj(), check_dtype=False)\n\n\ndef test_assert_almost_equal_strings():\n _assert_almost_equal_both(\"abc\", \"abc\")\n\n\[email protected](\n \"a,b\", [(\"abc\", \"abcd\"), (\"abc\", \"abd\"), (\"abc\", 1), (\"abc\", [1])]\n)\ndef test_assert_not_almost_equal_strings(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\[email protected](\n \"a,b\", [([1, 2, 3], [1, 2, 3]), (np.array([1, 2, 3]), np.array([1, 2, 3]))]\n)\ndef test_assert_almost_equal_iterables(a, b):\n _assert_almost_equal_both(a, b)\n\n\[email protected](\n \"a,b\",\n [\n # Class is different.\n (np.array([1, 2, 3]), [1, 2, 3]),\n # Dtype is different.\n (np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])),\n # Can't compare generators.\n (iter([1, 2, 3]), [1, 2, 3]),\n ([1, 2, 3], [1, 2, 4]),\n ([1, 2, 3], [1, 2, 3, 4]),\n ([1, 2, 3], 1),\n ],\n)\ndef test_assert_not_almost_equal_iterables(a, b):\n _assert_not_almost_equal(a, b)\n\n\ndef test_assert_almost_equal_null():\n _assert_almost_equal_both(None, None)\n\n\[email protected](\"a,b\", [(None, np.NaN), (None, 0), (np.NaN, 0)])\ndef test_assert_not_almost_equal_null(a, b):\n _assert_not_almost_equal(a, b)\n\n\[email protected](\n \"a,b\",\n [\n (np.inf, np.inf),\n (np.inf, float(\"inf\")),\n (np.array([np.inf, np.nan, -np.inf]), np.array([np.inf, np.nan, -np.inf])),\n (\n np.array([np.inf, None, -np.inf], dtype=np.object_),\n np.array([np.inf, np.nan, -np.inf], dtype=np.object_),\n ),\n ],\n)\ndef test_assert_almost_equal_inf(a, b):\n _assert_almost_equal_both(a, b)\n\n\ndef test_assert_not_almost_equal_inf():\n _assert_not_almost_equal_both(np.inf, 0)\n\n\[email protected](\n \"a,b\",\n [\n (Index([1.0, 1.1]), Index([1.0, 1.100001])),\n (Series([1.0, 1.1]), Series([1.0, 1.100001])),\n (np.array([1.1, 2.000001]), np.array([1.1, 2.0])),\n (DataFrame({\"a\": [1.0, 1.1]}), DataFrame({\"a\": [1.0, 1.100001]})),\n ],\n)\ndef test_assert_almost_equal_pandas(a, b):\n _assert_almost_equal_both(a, b)\n\n\ndef test_assert_almost_equal_object():\n a = [Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-01\")]\n b = [Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-01\")]\n _assert_almost_equal_both(a, b)\n\n\ndef test_assert_almost_equal_value_mismatch():\n msg = \"expected 2\\\\.00000 but got 1\\\\.00000, with decimal 5\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(1, 2)\n\n\[email protected](\n \"a,b,klass1,klass2\",\n [(np.array([1]), 1, \"ndarray\", \"int\"), (1, np.array([1]), \"int\", \"ndarray\")],\n)\ndef test_assert_almost_equal_class_mismatch(a, b, klass1, klass2):\n msg = \"\"\"numpy array are different\n\nnumpy array classes are different\n\\\\[left\\\\]: {klass1}\n\\\\[right\\\\]: {klass2}\"\"\".format(\n klass1=klass1, klass2=klass2\n )\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(a, b)\n\n\ndef test_assert_almost_equal_value_mismatch1():\n msg = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(66\\\\.66667 %\\\\)\n\\\\[left\\\\]: \\\\[nan, 2\\\\.0, 3\\\\.0\\\\]\n\\\\[right\\\\]: \\\\[1\\\\.0, nan, 3\\\\.0\\\\]\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))\n\n\ndef test_assert_almost_equal_value_mismatch2():\n msg = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[1, 2\\\\]\n\\\\[right\\\\]: \\\\[1, 3\\\\]\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([1, 2]), np.array([1, 3]))\n\n\ndef test_assert_almost_equal_value_mismatch3():\n msg = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(16\\\\.66667 %\\\\)\n\\\\[left\\\\]: \\\\[\\\\[1, 2\\\\], \\\\[3, 4\\\\], \\\\[5, 6\\\\]\\\\]\n\\\\[right\\\\]: \\\\[\\\\[1, 3\\\\], \\\\[3, 4\\\\], \\\\[5, 6\\\\]\\\\]\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(\n np.array([[1, 2], [3, 4], [5, 6]]), np.array([[1, 3], [3, 4], [5, 6]])\n )\n\n\ndef test_assert_almost_equal_value_mismatch4():\n msg = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(25\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[\\\\[1, 2\\\\], \\\\[3, 4\\\\]\\\\]\n\\\\[right\\\\]: \\\\[\\\\[1, 3\\\\], \\\\[3, 4\\\\]\\\\]\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([[1, 2], [3, 4]]), np.array([[1, 3], [3, 4]]))\n\n\ndef test_assert_almost_equal_shape_mismatch_override():\n msg = \"\"\"Index are different\n\nIndex shapes are different\n\\\\[left\\\\]: \\\\(2L*,\\\\)\n\\\\[right\\\\]: \\\\(3L*,\\\\)\"\"\"\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]), obj=\"Index\")\n\n\ndef test_assert_almost_equal_unicode():\n # see gh-20503\n msg = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(33\\\\.33333 %\\\\)\n\\\\[left\\\\]: \\\\[á, à, ä\\\\]\n\\\\[right\\\\]: \\\\[á, à, å\\\\]\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([\"á\", \"à\", \"ä\"]), np.array([\"á\", \"à\", \"å\"]))\n\n\ndef test_assert_almost_equal_timestamp():\n a = np.array([Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-01\")])\n b = np.array([Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")])\n\n msg = \"\"\"numpy array are different\n\nnumpy array values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\\\]\n\\\\[right\\\\]: \\\\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\\\]\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(a, b)\n\n\ndef test_assert_almost_equal_iterable_length_mismatch():\n msg = \"\"\"Iterable are different\n\nIterable length are different\n\\\\[left\\\\]: 2\n\\\\[right\\\\]: 3\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal([1, 2], [3, 4, 5])\n\n\ndef test_assert_almost_equal_iterable_values_mismatch():\n msg = \"\"\"Iterable are different\n\nIterable values are different \\\\(50\\\\.0 %\\\\)\n\\\\[left\\\\]: \\\\[1, 2\\\\]\n\\\\[right\\\\]: \\\\[1, 3\\\\]\"\"\"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal([1, 2], [1, 3])\n" ]
[ [ "pandas._libs.missing.checknull", "numpy.array_equal", "numpy.isfinite", "numpy.asarray", "numpy.isnan", "pandas._libs.lib.is_scalar", "pandas._config.get_option", "pandas._libs.missing.checknull_old", "pandas._libs.lib.values_from_object", "numpy.prod", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "pandas.core.common.is_bool_indexer", "numpy.asarray", "pandas._testing.assert_series_equal", "pandas.api.types.is_bool_dtype", "numpy.random.randint" ], [ "pandas.tseries.offsets.Day", "pandas.tseries.frequencies.to_offset", "pandas.PeriodIndex", "pandas.Series", "pandas.offsets.Day", "pandas._testing.box_expected", "pandas._testing.assert_frame_equal", "pandas._testing.assert_numpy_array_equal", "numpy.arange", "numpy.subtract", "pandas.Index", "pandas.DatetimeIndex", "pandas.offsets.QuarterEnd", "pandas.offsets.MonthEnd", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas._testing.assert_produces_warning", "pandas.Timedelta", "numpy.timedelta64", "pandas.date_range", "numpy.array", "pandas.offsets.YearEnd", "pandas.timedelta_range", "pandas._testing.assert_equal", "pandas.TimedeltaIndex", "pandas.period_range", "pandas.core.arrays.TimedeltaArray._from_sequence", "pandas.offsets.Minute", "pandas.Timestamp.now", "pandas.Period", "numpy.add", "pandas.offsets.Hour" ], [ "pandas._testing.assert_almost_equal", "pandas.Series", "numpy.uint32", "pandas.Timestamp", "pandas.Index", "pandas.DataFrame", "numpy.int16", "numpy.float64", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
cpadavis/piffy3pipeline
[ "c12f8e719d45409b85c20e34479d7ff238a1247d" ]
[ "collect.py" ]
[ "\"\"\"\nCollect fit parameters and plot their values\n\nCalculate stats over full set of stars\n\nMostly just copied form stats rho stats\n\"\"\"\nfrom __future__ import print_function, division\nimport glob\nimport galsim\nimport treecorr\nimport numpy as np\nimport os\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\n\nimport pandas as pd\nimport fitsio\n\ndef convert_momentshapes_to_shapes(momentshapes):\n shapes = []\n for m in momentshapes:\n e0, e1, e2 = m\n sigma = np.sqrt(np.sqrt(np.square(e0 ** 2 - e1 ** 2 - e2 ** 2) * 0.25))\n shear = galsim.Shear(e1=e1/e0, e2=e2/e0)\n g1 = shear.g1\n g2 = shear.g2\n shapes.append([sigma, g1, g2])\n shapes = np.array(shapes)\n return shapes\n\ndef load_shapes(fo):\n # loads shapes, converts columns, and mad cuts\n shapes = pd.read_hdf(fo)\n\n # now add shapes to, ahum, shapes\n T, g1, g2 = convert_momentshapes_to_shapes(shapes[['data_e0', 'data_e1', 'data_e2']].values).T\n T_model, g1_model, g2_model = convert_momentshapes_to_shapes(shapes[['model_e0', 'model_e1', 'model_e2']].values).T\n dT = T - T_model\n dg1 = g1 - g1_model\n dg2 = g2 - g2_model\n\n shapes['data_T'] = T\n shapes['data_g1'] = g1\n shapes['data_g2'] = g2\n shapes['model_T'] = T_model\n shapes['model_g1'] = g1_model\n shapes['model_g2'] = g2_model\n shapes['dT'] = dT\n shapes['dg1'] = dg1\n shapes['dg2'] = dg2\n\n\n # nacut\n shapes.replace([np.inf, -np.inf], np.nan, inplace=True)\n shapes.dropna(axis=0, inplace=True)\n # madcut on data outliers\n mad_cols = ['data_T', 'data_g1', 'data_g2']\n mad = shapes[mad_cols].sub(shapes[mad_cols].median()).abs()\n madcut = mad.lt(5 * 1.48 * shapes[mad_cols].mad() + 1e-8) # add a lil bit for those cases where every value in a column is exactly the same\n conds = madcut.all(axis=1)\n cut_shapes = shapes[conds]\n return cut_shapes, shapes, conds\n\ndef self__plot_single(ax, rho, color, marker, offset=0.):\n # Add a single rho stat to the plot.\n meanr = rho.meanr * (1. + rho.bin_size * offset)\n xip = rho.xip\n sig = np.sqrt(rho.varxi)\n ax.plot(meanr, xip, color=color)\n ax.plot(meanr, -xip, color=color, ls=':')\n ax.errorbar(meanr[xip>0], xip[xip>0], yerr=sig[xip>0], color=color, ls='', marker=marker)\n ax.errorbar(meanr[xip<0], -xip[xip<0], yerr=sig[xip<0], color=color, ls='', marker=marker)\n return ax.errorbar(-meanr, xip, yerr=sig, color=color, marker=marker)\n\ndef sum_sq(x):\n return np.sum(np.square(x))\n\ndef run_onedhists(files, plotdict):\n\n # plotdict has: plot_path, key_x, key_y, bins_x\n outs = {}\n\n for file_indx, fo in enumerate(files):\n if (file_indx + 1) % int(max([len(files) * 0.05, 1])) == 0:\n print('doing {0} out of {1}:'.format(file_indx + 1, len(files)))\n # load up the dataframe containing the shapes as measured with hsm\n shapes, nocut_shapes, conds = load_shapes(fo)\n\n # iterate through plotdict to do the summaries\n for key in plotdict:\n key_x = plotdict[key]['key_x']\n key_y = plotdict[key]['key_y']\n bins_x = plotdict[key]['bins_x']\n\n if key_x not in shapes:\n continue\n elif key_y not in shapes:\n continue\n\n df = shapes[[key_x, key_y]]\n # cut out infinite\n df = df.dropna(how='any')\n\n cut = pd.cut(df[key_x], bins_x, labels=False)\n group = df.groupby(cut)\n size = group.size()\n sums = group.agg('sum')\n sqsums = group.agg(sum_sq)\n\n if key not in outs:\n outs[key] = {'size': size, 'sums': sums, 'sqsums': sqsums}\n else:\n outs[key]['size'] = outs[key]['size'] + size\n outs[key]['sums'] = outs[key]['sums'] + sums\n outs[key]['sqsums'] = outs[key]['sqsums'] + sqsums\n\n # convert into estimations of the mean and std\n for key in outs:\n outs[key]['means'] = outs[key]['sums'].div(outs[key]['size'], axis=0)\n outs[key]['std'] = np.sqrt(outs[key]['sqsums'].div(outs[key]['size'], axis=0) - outs[key]['means'] ** 2).div(np.sqrt(outs[key]['size']), axis=0) # err on the mean\n\n # now plot the outs\n for key in outs:\n plot_path = plotdict[key]['plot_path']\n key_x = plotdict[key]['key_x']\n key_y = plotdict[key]['key_y']\n bins_x = plotdict[key]['bins_x']\n\n fig = Figure(figsize = (10,5))\n ax = fig.add_subplot(1,1,1)\n\n ax.set_xlabel(key_x)\n ax.set_ylabel(key_y)\n\n x = outs[key]['means'][key_x]\n y = outs[key]['means'][key_y]\n yerr = outs[key]['std'][key_y]\n conds = np.isfinite(x) * np.isfinite(y) * np.isfinite(yerr)\n x = x[conds]\n y = y[conds]\n yerr = yerr[conds]\n ax.errorbar(x, y, yerr=yerr)\n\n try:\n ax.set_xlim(x.min(), x.max())\n except ValueError:\n continue\n if 'log_x' in plotdict[key]:\n if plotdict[key]['log_x']:\n ax.set_xscale('log', nonposx='mask')\n\n canvas = FigureCanvasAgg(fig)\n # Do this after we've set the canvas to use Agg to avoid warning.\n fig.set_tight_layout(True)\n\n # save files based on what is listed\n print('saving plot to {0}'.format(plot_path))\n canvas.print_figure(plot_path, dpi=100)\n\ndef run_rho(files, plot_path, uv_coord):\n\n all_shapes = []\n for file_indx, fo in enumerate(files):\n if (file_indx + 1) % int(max([len(files) * 0.05, 1])) == 0:\n print('doing {0} out of {1}:'.format(file_indx + 1, len(files)))\n # load up the dataframe containing the shapes as measured with hsm\n shapes, nocut_shapes, conds = load_shapes(fo)\n all_shapes.append(shapes)\n print('concatenating')\n shapes = pd.concat(all_shapes, ignore_index=True)\n\n print('doing shapes')\n if uv_coord:\n u = shapes['u']\n v = shapes['v']\n else:\n ra = shapes['ra']\n dec = shapes['dec']\n\n # though we have to convert to regular shapes\n T, g1, g2 = convert_momentshapes_to_shapes(shapes[['data_e0', 'data_e1', 'data_e2']].values).T\n T_model, g1_model, g2_model = convert_momentshapes_to_shapes(shapes[['model_e0', 'model_e1', 'model_e2']].values).T\n\n dT = T - T_model\n dg1 = g1 - g1_model\n dg2 = g2 - g2_model\n\n if uv_coord:\n cat_kwargs = {'x': u, 'y': v, 'x_units': 'arcsec', 'y_units': 'arcsec'}\n else:\n cat_kwargs = {'ra': ra, 'dec': dec, 'ra_units': 'deg', 'dec_units': 'deg'}\n\n print('doing catalogs')\n cat_kwargs['g1'] = g1\n cat_kwargs['g2'] = g2\n cat_g = treecorr.Catalog(**cat_kwargs)\n\n cat_kwargs['g1'] = dg1\n cat_kwargs['g2'] = dg2\n cat_dg = treecorr.Catalog(**cat_kwargs)\n\n cat_kwargs['g1'] = g1 * dT / T\n cat_kwargs['g2'] = g2 * dT / T\n cat_gdTT = treecorr.Catalog(**cat_kwargs)\n\n # accumulate corr\n self_tckwargs = {'min_sep': 0.1, 'max_sep': 600, 'bin_size': 0.2, 'sep_units': 'arcmin'}\n self_rho1 = treecorr.GGCorrelation(self_tckwargs)\n self_rho2 = treecorr.GGCorrelation(self_tckwargs)\n self_rho3 = treecorr.GGCorrelation(self_tckwargs)\n self_rho4 = treecorr.GGCorrelation(self_tckwargs)\n self_rho5 = treecorr.GGCorrelation(self_tckwargs)\n\n print('accumulating')\n self_rho1.process(cat_dg)\n self_rho2.process(cat_g, cat_dg)\n self_rho3.process(cat_gdTT)\n self_rho4.process(cat_dg, cat_gdTT)\n self_rho5.process(cat_g, cat_gdTT)\n\n print('make figure')\n # figure\n fig = Figure(figsize = (10,5))\n # In matplotlib 2.0, this will be\n # axs = fig.subplots(ncols=2)\n axs = [ fig.add_subplot(1,2,1),\n fig.add_subplot(1,2,2) ]\n axs = np.array(axs, dtype=object)\n\n # Left plot is rho1,3,4\n rho1 = self__plot_single(axs[0], self_rho1, 'blue', 'o')\n rho3 = self__plot_single(axs[0], self_rho3, 'green', 's', 0.1)\n rho4 = self__plot_single(axs[0], self_rho4, 'red', '^', 0.2)\n\n axs[0].legend([rho1, rho3, rho4],\n [r'$\\rho_1(\\theta)$', r'$\\rho_3(\\theta)$', r'$\\rho_4(\\theta)$'],\n loc='upper right', fontsize=12)\n axs[0].set_ylim(1.e-9, 1e-3)\n axs[0].set_xlim(self_tckwargs['min_sep'], self_tckwargs['max_sep'])\n axs[0].set_xlabel(r'$\\theta$ (arcmin)')\n axs[0].set_ylabel(r'$\\rho(\\theta)$')\n axs[0].set_xscale('log')\n axs[0].set_yscale('log', nonposy='clip')\n\n # Right plot is rho2,5\n rho2 = self__plot_single(axs[1], self_rho2, 'blue', 'o')\n rho5 = self__plot_single(axs[1], self_rho5, 'green', 's', 0.1)\n\n axs[1].legend([rho2, rho5],\n [r'$\\rho_2(\\theta)$', r'$\\rho_5(\\theta)$'],\n loc='upper right', fontsize=12)\n axs[1].set_ylim(1.e-7, 1e-3)\n axs[1].set_xlim(self_tckwargs['min_sep'], self_tckwargs['max_sep'])\n axs[1].set_xlabel(r'$\\theta$ (arcmin)')\n axs[1].set_ylabel(r'$\\rho(\\theta)$')\n axs[1].set_xscale('log')\n axs[1].set_yscale('log', nonposy='clip')\n\n canvas = FigureCanvasAgg(fig)\n # Do this after we've set the canvas to use Agg to avoid warning.\n fig.set_tight_layout(True)\n\n # save files based on what is listed\n print('saving plot to {0}'.format(plot_path))\n canvas.print_figure(plot_path, dpi=100)\n\ndef run_collect_optics(files, file_out):\n # extract fit parameters from psf file. We are assuming optics\n fits = {'expid': []}\n skip_keys = []\n for file_indx, fo in enumerate(files):\n if (file_indx + 1) % int(max([len(files) * 0.05, 1])) == 0:\n print('doing {0} out of {1}:'.format(file_indx + 1, len(files)))\n # extract solution\n arr = fitsio.read(fo, 'psf_solution')\n for key in arr.dtype.names:\n if 'min_' in key and key not in skip_keys:\n skip_keys.append(key)\n elif 'max_' in key and key not in skip_keys:\n skip_keys.append(key)\n elif 'fix_' in key and key not in skip_keys:\n skip_keys.append(key)\n\n if key not in skip_keys:\n if file_indx == 0:\n fits[key] = []\n fits[key].append(arr[key][0])\n\n # make sure the expid column also gets an entry\n expid = int(fo.split('/')[-2])\n fits['expid'].append(expid)\n\n # load in the atmo portion\n try:\n arr = fitsio.read(fo, 'psf_atmo_interp_kernel')\n vals = arr['FIT_THETA'][0]\n for vi, val in enumerate(vals):\n for vj, vali in enumerate(val):\n key = 'gp_atmo_{0}_param_{1}'.format(['size', 'g1', 'g2'][vi], vj)\n if file_indx == 0:\n fits[key] = []\n fits[key].append(vali)\n except IOError:\n # not in the file\n continue\n\n try:\n fits = pd.DataFrame(fits)\n except Exception as e:\n for key in fits:\n print(key, len(fits[key]))\n print(type(e))\n print(e.args)\n print(e)\n raise e\n\n # load up Aaron's DBase fits\n ajr_fits_path = '/nfs/slac/g/ki/ki19/des/cpd/piff_test/CtioDB_db-part1.csv'\n if os.path.exists(ajr_fits_path):\n ajr = pd.read_csv(ajr_fits_path)\n fits = pd.merge(fits, ajr, on='expid', how='left')\n else:\n print('Could not find Donut fits at {0}!'.format(ajr_fits_path))\n\n print('saving fits to {0}'.format(file_out))\n if os.path.exists(file_out):\n os.remove(file_out)\n fits.to_hdf(file_out, 'data', mode='w')\n\ndef agg_to_array(agg, key, bins_x, bins_y):\n indx_x_transform = agg.index.labels[0].values()\n indx_y_transform = agg.index.labels[1].values()\n\n C = np.ma.zeros((bins_x.size - 1, bins_y.size - 1))\n C.mask = np.ones((bins_x.size - 1, bins_y.size - 1))\n np.add.at(C, [indx_x_transform, indx_y_transform],\n agg[key].values)\n np.multiply.at(C.mask, [indx_x_transform, indx_y_transform], 0)\n # bloops\n C = C.T\n return C\n\ndef run_twodhists(files, file_out_base, sep=50):\n bins_u = np.arange(-3900, 3900 + sep, sep)\n bins_v = np.arange(-3500, 3500 + sep, sep)\n\n all_shapes = []\n for file_indx, fo in enumerate(files):\n if (file_indx + 1) % int(max([len(files) * 0.05, 1])) == 0:\n print('doing {0} out of {1}:'.format(file_indx + 1, len(files)))\n # load up the dataframe containing the shapes as measured with hsm\n shapes, nocut_shapes, conds = load_shapes(fo)\n if (file_indx + 1) % int(max([len(files) * 0.05, 1])) == 0:\n print(len(conds), np.sum(conds))\n\n all_shapes.append(shapes)\n print('concatenating')\n arrays = pd.concat(all_shapes, ignore_index=True)\n # assign indices based on bins_u and bins_v\n print('cut by u and v')\n indx_u = pd.cut(arrays['u'], bins_u, labels=False)\n indx_v = pd.cut(arrays['v'], bins_v, labels=False)\n print('groupbying')\n agg = arrays.groupby((indx_u, indx_v)).agg(np.median)\n\n # save\n print('saving agg')\n agg.to_hdf('{0}_agg.h5'.format(file_out_base), 'agg', mode='w')\n\n # make figures\n for key in arrays:\n print('doing key {0}'.format(key))\n fig = Figure(figsize = (12, 9))\n ax = fig.add_subplot(1,1,1)\n\n ax.set_xlabel('u')\n ax.set_ylabel('v')\n ax.set_title(key)\n\n C = agg_to_array(agg, key, bins_u, bins_v)\n vmin = np.nanpercentile(C.data[~C.mask], q=2)\n vmax = np.nanpercentile(C.data[~C.mask], q=98)\n IM = ax.pcolor(bins_u, bins_v, C, vmin=vmin, vmax=vmax)\n ax.set_xlim(min(bins_u), max(bins_u))\n ax.set_ylim(min(bins_v), max(bins_v))\n fig.colorbar(IM, ax=ax)\n\n canvas = FigureCanvasAgg(fig)\n # Do this after we've set the canvas to use Agg to avoid warning.\n fig.set_tight_layout(True)\n\n # save files based on what is listed\n plot_path = '{0}_{1}.pdf'.format(file_out_base, key)\n print('saving plot to {0}'.format(plot_path))\n canvas.print_figure(plot_path, dpi=100)\n\n print('saving stars')\n arrays.to_hdf('{0}_stars.h5'.format(file_out_base), 'stars', mode='w')\n\ndef _add_twodhists(z, indx_u, indx_v, unique_indx, C):\n for unique in unique_indx:\n ui, vi = unique\n\n sample = z[(indx_u == ui) & (indx_v == vi)]\n if len(sample) > 0:\n value = np.sum(sample)\n C[vi, ui] += value\n C.mask[vi, ui] = 0\n\ndef collect(directory, piff_name, out_directory, do_optatmo=False, skip_rho=False, skip_oned=False, skip_twod=False):\n if not os.path.exists(out_directory):\n os.makedirs(out_directory)\n\n # params\n if do_optatmo:\n files = sorted(glob.glob('{0}/*/{1}.piff'.format(directory, piff_name)))\n if len(files) > 0:\n print('collecting optatmo params for {0} for {1} psfs'.format(piff_name, len(files)))\n file_out = '{0}/fit_parameters_{1}.h5'.format(out_directory, piff_name)\n run_collect_optics(files, file_out)\n\n if not skip_rho:\n # rho stats\n # for uv_coord in [False]: # only do RA\n # for uv_coord in [True]:\n for uv_coord in [True, False]:\n # for label in ['test']:\n for label in ['test', 'train']:\n files = sorted(glob.glob('{0}//*/shapes_{1}_{2}.h5'.format(directory, label, piff_name)))\n if len(files) > 0:\n print('computing rho stats for {0} for {1} psfs'.format(piff_name, len(files)))\n if uv_coord:\n file_out = '{0}/rhouv_{1}_{2}.pdf'.format(out_directory, label, piff_name)\n\n else:\n file_out = '{0}/rhora_{1}_{2}.pdf'.format(out_directory, label, piff_name)\n run_rho(files, file_out, uv_coord)\n\n if not skip_twod:\n # twod hists\n for label, sep in zip(['test', 'train'], [30, 15]):\n files = sorted(glob.glob('{0}//*/shapes_{1}_{2}.h5'.format(directory, label, piff_name)))\n if len(files) > 0:\n print('computing twod stats for {0} for {1} psfs'.format(piff_name, len(files)))\n file_out_base = '{0}/twodhists_{1}_{2}'.format(out_directory, label, piff_name)\n run_twodhists(files, file_out_base, sep=sep)\n\n plotdict = {}\n for shape_key in ['data_e0', 'de0', 'data_e1', 'de1', 'data_e2', 'de2',\n 'data_delta1', 'ddelta1', 'data_delta2', 'ddelta2',\n 'data_zeta1', 'dzeta1', 'data_zeta2', 'dzeta2',\n 'atmo_size', 'atmo_g1', 'atmo_g2']:\n plotdict[shape_key] = {'key_x': 'data_flux', 'key_y': shape_key,\n 'bins_x': np.logspace(3, 7, 501), 'log_x': True}\n if not skip_oned:\n for label in ['test', 'train']:\n files = sorted(glob.glob('{0}//*/shapes_{1}_{2}.h5'.format(directory, label, piff_name)))\n if len(files) > 0:\n # make plotdict\n for key in plotdict:\n plotdict[key]['plot_path'] = '{0}/onedhists_{1}_{2}_{3}.pdf'.format(out_directory, key, label, piff_name)\n\n print('computing onedhists stats for {0} for {1} psfs'.format(piff_name, len(files)))\n run_onedhists(files, plotdict)\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--directory', action='store', dest='directory',\n help='where to look for psf files')\n parser.add_argument('--out_directory', action='store', dest='out_directory',\n help='where to save files')\n parser.add_argument('--piff_name', action='store', dest='piff_name',\n help='what psf file to look for')\n parser.add_argument('--do_optatmo', action='store_true', dest='do_optatmo',\n help='Load up and save optatmo parameters.')\n parser.add_argument('--skip_rho', action='store_true', dest='skip_rho')\n parser.add_argument('--skip_oned', action='store_true', dest='skip_oned')\n parser.add_argument('--skip_twod', action='store_true', dest='skip_twod')\n\n options = parser.parse_args()\n kwargs = vars(options)\n\n collect(**kwargs)\n" ]
[ [ "pandas.merge", "numpy.sqrt", "pandas.DataFrame", "numpy.square", "pandas.read_csv", "matplotlib.backends.backend_agg.FigureCanvasAgg", "numpy.arange", "pandas.cut", "numpy.ma.zeros", "pandas.read_hdf", "pandas.concat", "numpy.logspace", "numpy.multiply.at", "numpy.array", "numpy.sum", "numpy.add.at", "numpy.nanpercentile", "numpy.isfinite", "matplotlib.figure.Figure", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
surprisedong/RepDistiller
[ "91e34141588524a4bf628873552b4919db8506ca" ]
[ "helper/loops.py" ]
[ "from __future__ import print_function, division\n\nimport sys\nimport time\nimport torch\nimport pdb\nfrom .util import AverageMeter, accuracy\nimport math\n\ndef train_vanilla(epoch, train_loader, model, criterion, optimizer, opt):\n \"\"\"vanilla training\"\"\"\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n end = time.time()\n for idx, (input, target) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n input = input.float()\n if opt.gpu is not None:\n input = input.cuda(opt.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(opt.gpu, non_blocking=True)\n\n # ===================forward=====================\n output = model(input)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # ===================backward=====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # ===================meters=====================\n batch_time.update(time.time() - end)\n end = time.time()\n\n # tensorboard logger\n pass\n\n # print info\n if idx % opt.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, idx, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n sys.stdout.flush()\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, losses.avg\n\n\ndef train_distill(epoch, train_loader, module_list, criterion_list, optimizer, opt):\n \"\"\"One epoch distillation\"\"\"\n\n ## fix bug: 'DataParallel' object is not iterable\n if opt.distributed:\n module_list = module_list.module\n\n # set modules as train()\n for module in module_list:\n module.train()\n # set teacher as eval()\n module_list[-1].eval()\n\n if opt.distill == 'abound':\n module_list[1].eval()\n elif opt.distill == 'factor':\n module_list[2].eval()\n\n criterion_cls = criterion_list[0]\n criterion_div = criterion_list[1]\n criterion_kd = criterion_list[2]\n\n model_s = module_list[0]\n model_t = module_list[-1]\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n losses_cls = AverageMeter()\n losses_div = AverageMeter()\n losses_kd = AverageMeter()\n\n end = time.time()\n for idx, data in enumerate(train_loader):\n if opt.distill in ['crd']:\n input, target, index, contrast_idx = data\n else:\n input, target, index = data\n data_time.update(time.time() - end)\n\n input = input.float()\n if opt.gpu is not None:\n input = input.cuda(opt.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(opt.gpu, non_blocking=True)\n index = index.cuda(opt.gpu, non_blocking=True)\n if opt.distill in ['crd']:\n contrast_idx = contrast_idx.cuda(opt.gpu, non_blocking=True)\n if opt.distill in ['PCA']:\n for crit in criterion_kd:\n crit.u = crit.u.cuda(opt.gpu, non_blocking=True)\n\n\n # ===================forward=====================\n feat_s, logit_s = model_s(input, is_feat=True, preact=opt.preact)\n with torch.no_grad():\n feat_t, logit_t = model_t(input, is_feat=True, preact=opt.preact)\n feat_t = [f.detach() for f in feat_t]\n\n # cls + kl div\n loss_cls = criterion_cls(logit_s, target)\n loss_div = criterion_div(logit_s, logit_t)\n\n # other kd beyond KL divergence\n if opt.distill == 'kd':\n loss_kd = 0\n elif opt.distill == 'hint':\n f_s = module_list[1](feat_s[opt.hint_layer])\n f_t = feat_t[opt.hint_layer]\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'crd':\n f_s = feat_s[-1]\n f_t = feat_t[-1]\n loss_kd = criterion_kd(f_s, f_t, index, contrast_idx)\n elif opt.distill == 'attention':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'nst':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'similarity':\n g_s = [feat_s[-2]]\n g_t = [feat_t[-2]]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'rkd':\n f_s = feat_s[-1]\n f_t = feat_t[-1]\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'pkt':\n f_s = feat_s[-1]\n f_t = feat_t[-1]\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'kdsvd':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = criterion_kd(g_s, g_t)\n loss_kd = sum(loss_group)\n elif opt.distill == 'correlation':\n f_s = module_list[1](feat_s[-1])\n f_t = module_list[2](feat_t[-1])\n loss_kd = criterion_kd(f_s, f_t)\n elif opt.distill == 'vid':\n g_s = feat_s[1:-1]\n g_t = feat_t[1:-1]\n loss_group = [c(f_s, f_t) for f_s, f_t, c in zip(g_s, g_t, criterion_kd)]\n loss_kd = sum(loss_group)\n elif opt.distill == 'abound':\n # can also add loss to this stage\n loss_kd = 0\n elif opt.distill == 'fsp':\n # can also add loss to this stage\n loss_kd = 0\n elif opt.distill == 'factor':\n factor_s = module_list[1](feat_s[-2])\n factor_t = module_list[2](feat_t[-2], is_factor=True)\n loss_kd = criterion_kd(factor_s, factor_t)\n elif opt.distill == 'PCA':\n g_s = [feat_s[idx] for idx in opt.pcalayer]\n g_t = [feat_t[idx] for idx in opt.pcalayer]\n criterion_kd_ = [criterion_kd[idx].eval() for idx in opt.pcalayer]\n loss_group = [c(f_s, f_t) for f_s, f_t, c in zip(g_s, g_t, criterion_kd_)]\n loss_kd = sum(loss_group)\n\n else:\n raise NotImplementedError(opt.distill)\n\n loss = opt.gamma * loss_cls + opt.alpha * loss_div + opt.beta * loss_kd\n\n acc1, acc5 = accuracy(logit_s, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n losses_cls.update(loss_cls.item(), input.size(0))\n losses_div.update(loss_div.item(), input.size(0))\n losses_kd.update(loss_kd.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # ===================backward=====================\n optimizer.zero_grad()\n loss.backward(retain_graph=True)\n optimizer.step()\n\n # ===================meters=====================\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print info\n if idx % opt.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, idx, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n sys.stdout.flush()\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, losses.avg, losses_cls.avg, losses_div.avg, losses_kd.avg\n\n\ndef validate(val_loader, model, criterion, opt):\n \"\"\"validation\"\"\"\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for idx, (input, target) in enumerate(val_loader):\n\n input = input.float()\n if opt.gpu is not None:\n input = input.cuda(opt.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(opt.gpu, non_blocking=True)\n\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if idx % opt.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n idx, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jmontgom10/pyPol
[ "7c63475856256fa754cb1df04dac4dce9eff7bd8" ]
[ "01_buildIndex.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nRestructures the rawFileIndex from PRISM_pyBDP to contain ONLY the science\nimages, and break those up into individual groups based on changes in\n\n1) OBJECT (object name)\n2) FILTER (optical filter value)\n3) EXPTIME (the exposure time of the images)\n4) BINNING (the 1x1, 2x2, 3x3, or 4x4 binning of the images)\n5) Pointing changes (more than 1.5 degrees of chang is considered a new group)\n\nAttempts to associate each group with a target in the 'targetList' variable on\nthe basis of the string in the OBJECT column of that group.\n\nSaves the index file with a USE and GROUP_ID columns added to the table.\n\"\"\"\n\n#Import whatever modules will be used\nimport os\nimport sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table, Column\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom scipy import stats\n\n# Add the AstroImage class\nimport astroimage as ai\n\n# Set the directory for the pyPol reduced data\npyPol_data = 'C:\\\\Users\\\\Jordan\\\\FITS_data\\\\PRISM_data\\\\pyPol_data\\\\201612\\\\'\n\n# Set the filename for the reduced data indexFile\nindexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')\n\n# Compose a list of expected targets. All groups will be assigned to ONE of\n# these targets within a given tolerance. If no match is found for a group\n# within this list of targets, then an error will be raised.\ntargetList = [\n 'NGC7023',\n 'NGC2023',\n 'Merope',\n 'NGC4565',\n 'NGC891',\n 'Orion_Cal',\n 'Cyg_OB2'\n]\n\n# Force all the targets to be upper case to remove ambiguity\ntargetList = [t.upper() for t in targetList]\n\n#==============================================================================\n# *********************** CUSTOM USER CODE ************************************\n# this is where the user specifies where the raw data is stored\n# and some of the subdirectory structure to find the actual .FITS images\n#==============================================================================\n# This is the location of all pyBDP data (index, calibration images, reduced...)\npyBDP_data='C:\\\\Users\\\\Jordan\\\\FITS_data\\\\PRISM_data\\\\pyBDP_data\\\\201612'\n\n# This is the location where all pyPol data will be saved\npyPol_data='C:\\\\Users\\\\Jordan\\\\FITS_data\\\\PRISM_data\\\\pyPol_data\\\\201612'\nif (not os.path.isdir(pyPol_data)):\n os.mkdir(pyPol_data, 0o755)\n\n# This is the location of the pyBDP processed Data\npyBDP_reducedDir = os.path.join(pyBDP_data, 'pyBDP_reduced_images')\n\n# Load the index of raw files\nrawFileIndexFile = os.path.join(pyBDP_data, 'rawFileIndex.csv')\nrawFileIndex = Table.read(rawFileIndexFile)\n\n# Cull the index to only include the science images\nscienceBool = rawFileIndex['OBSTYPE'] == 'OBJECT'\nscienceInds = np.where(scienceBool)\nreducedFileIndex = rawFileIndex[scienceInds]\n\n# Remap the filenames to be the reduced filenames\nfileBasenames = [os.path.basename(f) for f in reducedFileIndex['FILENAME']]\nreducedFilenames = [os.path.join(pyBDP_reducedDir, f) for f in fileBasenames]\nreducedFileIndex['FILENAME'] = reducedFilenames\n\n# Find the breaks in observation procedure. These are candidates for group\n# boundaries.\n# 1) OBJECT changes\nobjectChange = (reducedFileIndex['OBJECT'][1:] != reducedFileIndex['OBJECT'][0:-1])\n\n# 2) OBSTYPE changes\nobstypeChange = (reducedFileIndex['OBSTYPE'][1:] != reducedFileIndex['OBSTYPE'][0:-1])\n\n# 3) FILTER changes\nfilterChange = (reducedFileIndex['FILTER'][1:] != reducedFileIndex['FILTER'][0:-1])\n\n# 4) EXPTIME changes\nexpTimeChange = (reducedFileIndex['EXPTIME'][1:] != reducedFileIndex['EXPTIME'][0:-1])\n\n# 5) BINNING changes\nbinningChange = (reducedFileIndex['BINNING'][1:] != reducedFileIndex['BINNING'][0:-1])\n\n# 6) Pointing changes\n# Look for any pointing differences 1.5 degree (or more) for further separations\nallPointings = SkyCoord(\n reducedFileIndex['TELRA'],\n reducedFileIndex['TELDEC'],\n unit=(u.hour, u.degree)\n)\nmedianDecs = 0.5*(allPointings[1:].ra.to(u.rad) + allPointings[0:-1].ra.to(u.rad))\ndeltaDec = allPointings[1:].dec - allPointings[0:-1].dec\ndeltaRA = (allPointings[1:].ra - allPointings[0:-1].ra)*np.cos(medianDecs)\ndeltaPointing = np.sqrt(deltaRA**2 + deltaDec**2)\npointingChange = deltaPointing > (1.5*u.deg)\n\n# Identify all changes\nallChanges = objectChange\nallChanges = np.logical_or(allChanges, obstypeChange)\nallChanges = np.logical_or(allChanges, filterChange)\nallChanges = np.logical_or(allChanges, expTimeChange)\nallChanges = np.logical_or(allChanges, binningChange)\nallChanges = np.logical_or(allChanges, pointingChange)\n\n# Assign a GROUP_ID for each group\ngroupBoundaries = np.hstack([0, np.where(allChanges)[0] + 1, allChanges.size])\ngroupIDs = []\nfor i in range(groupBoundaries.size - 1):\n # Find the start and end indices of the group\n groupStartInd = groupBoundaries[i]\n groupEndInd = groupBoundaries[i+1]\n\n # Build the gorup ID number\n groupID = i + 1\n\n # Count the number of images in this group\n numberOfImages = groupEndInd - groupStartInd\n\n # Build the list of ID numbers for THIS group and append it to the full list\n thisGroupID = numberOfImages*[groupID]\n groupIDs.extend(thisGroupID)\n\n# Fill in the final entry\ngroupIDs.append(groupID)\n\n# Store the groupID number in the reducedFileIndex\ngroupIDcolumn = Column(name='GROUP_ID', data=groupIDs)\nreducedFileIndex.add_column(groupIDcolumn, index=2)\n\n# Now remove any GROUPS with less than 8 images\ngroupIndex = reducedFileIndex.group_by('GROUP_ID')\ngoodGroupInds = []\ngroupInds = groupIndex.groups.indices\nfor startInd, endInd in zip(groupInds[:-1], groupInds[+1:]):\n # Count the number of images in this group and test if it's any good.\n if (endInd - startInd) >= 8:\n goodGroupInds.extend(range(startInd, endInd))\n\n\n# Cull the reducedFileIndex to only include viable groups\ngoodGroupInds = np.array(goodGroupInds)\nreducedFileIndex = reducedFileIndex[goodGroupInds]\n\n# Match a dither type for each group (\"ABBA\" or \"HEX\")\ngroupIndex = reducedFileIndex.group_by('GROUP_ID')\nditherType = []\nfor group in groupIndex.groups:\n # Count the number of images in this group\n numberOfImages = len(group)\n\n # Test if this is an ABBA or HEX dither\n if ('A' in group['AB']) and ('B' in group['AB']):\n ditherType.extend(numberOfImages*['ABBA'])\n if ('A' in group['AB']) and not ('B' in group['AB']):\n ditherType.extend(numberOfImages*['HEX'])\n\n# Store the ditherNames number in the reducedFileIndex\nditherTypeColumn = Column(name='DITHER_TYPE', data=ditherType)\ngroupIndex.add_column(ditherTypeColumn, index=10)\n\n# Identify meta-groups pointing at a single target with a single dither style.\ntargets = []\nfor group in groupIndex.groups:\n # Count the number of images in this group\n numberOfImages = len(group)\n\n # Get the group name\n groupName = np.unique(group['OBJECT'])[0]\n\n # Capitalize the group name to remove ambiguity\n groupName = groupName.upper()\n\n # Test it a target name occurs in this group name\n for target in targetList:\n if target in groupName:\n targets.extend(numberOfImages*[target])\n\n break\n else:\n import pdb; pdb.set_trace()\n raise ValueError('Gorup {} found no match in the target list'.format(groupName))\n\n# Add the target identifications to the groupIndex table\ntargetColumn = Column(name='TARGET', data=targets)\ngroupIndex.add_column(targetColumn, index=5)\n\n# Re-order by filename. Start by getting the sorting array\nsortInds = groupIndex['FILENAME'].data.argsort()\nreducedFileIndex = groupIndex[sortInds]\n\n# Finally, add a column of \"use\" flags at the first index\nuseColumn = Column(name='USE', data=np.ones((len(reducedFileIndex),), dtype=int))\nreducedFileIndex.add_column(useColumn, index=0)\n\n# Save the index to disk.\nreducedFileIndex.write(indexFile, format='ascii.csv', overwrite=True)\n\nprint('Done!')\n" ]
[ [ "numpy.sqrt", "numpy.unique", "numpy.cos", "numpy.logical_or", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abhyuday07/market-plots
[ "335fb5f09a64471cf6260cc9f306d0fcd91cb4b2" ]
[ "variance.py" ]
[ "import sys\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pathlib\nimport matplotlib.style as style\n\nimport alpha_vantage\nimport plot_style\n\n\ndef show_variance(symbol, interval='MONTHLY'):\n returns = alpha_vantage.get_stock_returns_history(symbol, interval)\n variance = np.var(returns)\n standard_deviation = np.sqrt(variance)\n mean_return = np.mean(returns)\n\n plot_style.hist()\n\n n, bins, patches = plt.hist(returns, density=True, bins=25)\n\n for item in patches:\n item.set_height(item.get_height() / sum(n))\n\n max_y = max(n) / sum(n)\n plt.ylim(0, max_y + max_y / 10)\n\n plt.gca().set_xticklabels(['{:.0f}%'.format(x*100)\n for x in plt.gca().get_xticks()])\n\n plt.gca().set_yticklabels(['{:.0f}%'.format(y*100)\n for y in plt.gca().get_yticks()])\n\n title_line_1 = f'{symbol} {interval} return distribution'\n title_line_2 = 'Standard deviation = %.2f%% Mean return = %.2f%%' % (\n standard_deviation * 100, mean_return * 100)\n plt.title(f'{title_line_1}\\n{title_line_2}')\n plt.xlabel('Return')\n plt.ylabel('Probability')\n\n pathlib.Path('img/variance').mkdir(parents=True, exist_ok=True)\n plt.savefig(f'img/variance/{symbol}.png')\n plt.close()\n\n\nshow_variance(sys.argv[1])\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.sqrt", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "numpy.mean", "matplotlib.pyplot.close", "numpy.var", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PacktPublishing/The-Deep-Learning-Challenge
[ "0539bd010b67cc6a7d53b59d7a32fc72051eb216" ]
[ "Section 2/source/solution2.py" ]
[ "# Alternative solutions:\n# - Get classes names from:\n# ~/.keras/models/imagenet_class_index.json\n# - Get classes at http://image-net.org/challenges/LSVRC/2014/browse-synsets\nimport conf\nfrom keras.applications.vgg19 import VGG19\nfrom keras.applications.vgg19 import decode_predictions\nimport numpy as np\nfrom pprint import pprint\n\nif __name__ == '__main__':\n model = VGG19(classes=1000)\n classes=decode_predictions(np.ones((1,1000), dtype=float), top=1000)\n cnames=[ c[1] for c in classes[0]]\n cnames.sort()\n pprint(cnames)\n \n" ]
[ [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fzahle/OpenMDAO
[ "709401e535cf6933215abd942d4b4d49dbf61b2b", "709401e535cf6933215abd942d4b4d49dbf61b2b", "709401e535cf6933215abd942d4b4d49dbf61b2b", "ce53b0a0862ac1162d5daad7b0ca34ae085ee47c", "ce53b0a0862ac1162d5daad7b0ca34ae085ee47c" ]
[ "openmdao/core/tests/test_driver.py", "openmdao/solvers/linear/tests/test_linear_block_jac.py", "openmdao/test_suite/components/misc_components.py", "openmdao/core/tests/test_group.py", "openmdao/solvers/solver.py" ]
[ "\"\"\" Unit tests for the Driver base class.\"\"\"\n\nfrom __future__ import print_function\n\nfrom distutils.version import LooseVersion\nfrom six import StringIO\nimport sys\nimport unittest\n\nimport numpy as np\n\nimport openmdao\nfrom openmdao.api import Problem, IndepVarComp, Group, ExecComp, ScipyOptimizeDriver\nfrom openmdao.utils.assert_utils import assert_rel_error\nfrom openmdao.utils.general_utils import printoptions\nfrom openmdao.test_suite.components.sellar import SellarDerivatives\nfrom openmdao.test_suite.components.simple_comps import DoubleArrayComp, NonSquareArrayComp\n\n\nclass TestDriver(unittest.TestCase):\n\n def test_basic_get(self):\n\n prob = Problem()\n prob.model = model = SellarDerivatives()\n\n model.add_design_var('z')\n model.add_objective('obj')\n model.add_constraint('con1', lower=0)\n prob.set_solver_print(level=0)\n\n prob.setup(check=False)\n prob.run_driver()\n\n designvars = prob.driver.get_design_var_values()\n self.assertEqual(designvars['pz.z'][0], 5.0 )\n\n designvars = prob.driver.get_objective_values()\n self.assertEqual(designvars['obj_cmp.obj'], prob['obj'] )\n\n designvars = prob.driver.get_constraint_values()\n self.assertEqual(designvars['con_cmp1.con1'], prob['con1'] )\n\n def test_scaled_design_vars(self):\n\n prob = Problem()\n prob.model = model = SellarDerivatives()\n\n model.add_design_var('z', ref=5.0, ref0=3.0)\n model.add_objective('obj')\n model.add_constraint('con1', lower=0)\n prob.set_solver_print(level=0)\n\n prob.setup(check=False)\n\n # Conclude setup but don't run model.\n prob.final_setup()\n\n dv = prob.driver.get_design_var_values()\n self.assertEqual(dv['pz.z'][0], 1.0)\n self.assertEqual(dv['pz.z'][1], -0.5)\n\n prob.driver.set_design_var('pz.z', np.array((2.0, -2.0)))\n self.assertEqual(prob['z'][0], 7.0)\n self.assertEqual(prob['z'][1], -1.0)\n\n def test_scaled_constraints(self):\n\n prob = Problem()\n prob.model = model = SellarDerivatives()\n\n model.add_design_var('z')\n model.add_objective('obj')\n model.add_constraint('con1', lower=0, ref=2.0, ref0=3.0)\n prob.set_solver_print(level=0)\n\n prob.setup(check=False)\n prob.run_model()\n\n cv = prob.driver.get_constraint_values()['con_cmp1.con1'][0]\n base = prob['con1']\n self.assertEqual((base-3.0)/(2.0-3.0), cv)\n\n def test_scaled_objectves(self):\n\n prob = Problem()\n prob.model = model = SellarDerivatives()\n\n model.add_design_var('z')\n model.add_objective('obj', ref=2.0, ref0=3.0)\n model.add_constraint('con1', lower=0)\n prob.set_solver_print(level=0)\n\n prob.setup(check=False)\n prob.run_model()\n\n cv = prob.driver.get_objective_values()['obj_cmp.obj'][0]\n base = prob['obj']\n self.assertEqual((base-3.0)/(2.0-3.0), cv)\n\n def test_scaled_derivs(self):\n\n prob = Problem()\n prob.model = model = SellarDerivatives()\n\n model.add_design_var('z')\n model.add_objective('obj')\n model.add_constraint('con1')\n prob.set_solver_print(level=0)\n\n prob.setup(check=False)\n prob.run_model()\n\n base = prob.compute_totals(of=['obj', 'con1'], wrt=['z'])\n\n prob = Problem()\n prob.model = model = SellarDerivatives()\n\n model.add_design_var('z', ref=2.0, ref0=0.0)\n model.add_objective('obj', ref=1.0, ref0=0.0)\n model.add_constraint('con1', lower=0, ref=2.0, ref0=0.0)\n prob.set_solver_print(level=0)\n\n prob.setup(check=False)\n prob.run_model()\n\n derivs = prob.driver._compute_totals(of=['obj_cmp.obj', 'con_cmp1.con1'], wrt=['pz.z'],\n return_format='dict')\n assert_rel_error(self, base[('con1', 'z')][0], derivs['con_cmp1.con1']['pz.z'][0], 1e-5)\n assert_rel_error(self, base[('obj', 'z')][0]*2.0, derivs['obj_cmp.obj']['pz.z'][0], 1e-5)\n\n def test_vector_scaled_derivs(self):\n\n prob = Problem()\n prob.model = model = Group()\n\n model.add_subsystem('px', IndepVarComp(name=\"x\", val=np.ones((2, ))))\n comp = model.add_subsystem('comp', DoubleArrayComp())\n model.connect('px.x', 'comp.x1')\n\n model.add_design_var('px.x', ref=np.array([2.0, 3.0]), ref0=np.array([0.5, 1.5]))\n model.add_objective('comp.y1', ref=np.array([[7.0, 11.0]]), ref0=np.array([5.2, 6.3]))\n model.add_constraint('comp.y2', lower=0.0, upper=1.0, ref=np.array([[2.0, 4.0]]), ref0=np.array([1.2, 2.3]))\n\n prob.setup(check=False)\n prob.run_driver()\n\n derivs = prob.driver._compute_totals(of=['comp.y1'], wrt=['px.x'],\n return_format='dict')\n\n oscale = np.array([1.0/(7.0-5.2), 1.0/(11.0-6.3)])\n iscale = np.array([2.0-0.5, 3.0-1.5])\n J = comp.JJ[0:2, 0:2]\n\n # doing this manually so that I don't inadvertantly make an error in the vector math in both the code and test.\n J[0, 0] *= oscale[0]*iscale[0]\n J[0, 1] *= oscale[0]*iscale[1]\n J[1, 0] *= oscale[1]*iscale[0]\n J[1, 1] *= oscale[1]*iscale[1]\n assert_rel_error(self, J, derivs['comp.y1']['px.x'], 1.0e-3)\n\n obj = prob.driver.get_objective_values()\n obj_base = np.array([ (prob['comp.y1'][0]-5.2)/(7.0-5.2), (prob['comp.y1'][1]-6.3)/(11.0-6.3) ])\n assert_rel_error(self, obj['comp.y1'], obj_base, 1.0e-3)\n\n con = prob.driver.get_constraint_values()\n con_base = np.array([ (prob['comp.y2'][0]-1.2)/(2.0-1.2), (prob['comp.y2'][1]-2.3)/(4.0-2.3) ])\n assert_rel_error(self, con['comp.y2'], con_base, 1.0e-3)\n\n def test_vector_bounds_inf(self):\n\n # make sure no overflow when there is no specified upper/lower bound and significatn scaling\n prob = Problem()\n prob.model = model = Group()\n\n model.add_subsystem('px', IndepVarComp(name=\"x\", val=np.ones((2, ))))\n comp = model.add_subsystem('comp', DoubleArrayComp())\n model.connect('px.x', 'comp.x1')\n\n model.add_design_var('px.x', ref=np.array([.1, 1e-6]))\n model.add_constraint('comp.y2', ref=np.array([.2, 2e-6]))\n\n prob.setup()\n\n desvars = model.get_design_vars()\n\n self.assertFalse(np.any(np.isinf(desvars['px.x']['upper'])))\n self.assertFalse(np.any(np.isinf(-desvars['px.x']['lower'])))\n\n responses = prob.model.get_responses()\n\n self.assertFalse(np.any(np.isinf(responses['comp.y2']['upper'])))\n self.assertFalse(np.any(np.isinf(-responses['comp.y2']['lower'])))\n\n def test_vector_scaled_derivs_diff_sizes(self):\n\n prob = Problem()\n prob.model = model = Group()\n\n model.add_subsystem('px', IndepVarComp(name=\"x\", val=np.ones((2, ))))\n comp = model.add_subsystem('comp', NonSquareArrayComp())\n model.connect('px.x', 'comp.x1')\n\n model.add_design_var('px.x', ref=np.array([2.0, 3.0]), ref0=np.array([0.5, 1.5]))\n model.add_objective('comp.y1', ref=np.array([[7.0, 11.0, 2.0]]), ref0=np.array([5.2, 6.3, 1.2]))\n model.add_constraint('comp.y2', lower=0.0, upper=1.0, ref=np.array([[2.0]]), ref0=np.array([1.2]))\n\n prob.setup(check=False)\n prob.run_driver()\n\n derivs = prob.driver._compute_totals(of=['comp.y1'], wrt=['px.x'],\n return_format='dict')\n\n oscale = np.array([1.0/(7.0-5.2), 1.0/(11.0-6.3), 1.0/(2.0-1.2)])\n iscale = np.array([2.0-0.5, 3.0-1.5])\n J = comp.JJ[0:3, 0:2]\n\n # doing this manually so that I don't inadvertantly make an error in the vector math in both the code and test.\n J[0, 0] *= oscale[0]*iscale[0]\n J[0, 1] *= oscale[0]*iscale[1]\n J[1, 0] *= oscale[1]*iscale[0]\n J[1, 1] *= oscale[1]*iscale[1]\n J[2, 0] *= oscale[2]*iscale[0]\n J[2, 1] *= oscale[2]*iscale[1]\n assert_rel_error(self, J, derivs['comp.y1']['px.x'], 1.0e-3)\n\n obj = prob.driver.get_objective_values()\n obj_base = np.array([ (prob['comp.y1'][0]-5.2)/(7.0-5.2), (prob['comp.y1'][1]-6.3)/(11.0-6.3), (prob['comp.y1'][2]-1.2)/(2.0-1.2) ])\n assert_rel_error(self, obj['comp.y1'], obj_base, 1.0e-3)\n\n con = prob.driver.get_constraint_values()\n con_base = np.array([ (prob['comp.y2'][0]-1.2)/(2.0-1.2)])\n assert_rel_error(self, con['comp.y2'], con_base, 1.0e-3)\n\n def test_debug_print_option(self):\n\n prob = Problem()\n prob.model = model = SellarDerivatives()\n\n model.add_design_var('z')\n model.add_objective('obj')\n model.add_constraint('con1', lower=0)\n model.add_constraint('con2', lower=0, linear=True)\n prob.set_solver_print(level=0)\n\n prob.setup(check=False)\n\n # Make sure nothing prints if debug_print is the default of empty list\n stdout = sys.stdout\n strout = StringIO()\n sys.stdout = strout\n try:\n prob.run_driver()\n finally:\n sys.stdout = stdout\n output = strout.getvalue().split('\\n')\n self.assertEqual(output, [''])\n\n # Make sure everything prints when all options are on\n prob.driver.options['debug_print'] = ['desvars','ln_cons','nl_cons','objs']\n stdout = sys.stdout\n strout = StringIO()\n sys.stdout = strout\n try:\n prob.run_driver(reset_iter_counts=False)\n finally:\n sys.stdout = stdout\n output = strout.getvalue().split('\\n')\n self.assertEqual(output.count(\"Driver debug print for iter coord: rank0:Driver|1\"), 1)\n self.assertEqual(output.count(\"Design Vars\"), 1)\n self.assertEqual(output.count(\"Nonlinear constraints\"), 1)\n self.assertEqual(output.count(\"Linear constraints\"), 1)\n self.assertEqual(output.count(\"Objectives\"), 1)\n\n # Check to make sure an invalid debug_print option raises an exception\n with self.assertRaises(ValueError) as context:\n prob.driver.options['debug_print'] = ['bad_option']\n self.assertEqual(str(context.exception),\n \"Option 'debug_print' contains value 'bad_option' which is not one of ['desvars', 'nl_cons', 'ln_cons', 'objs', 'totals'].\")\n\n def test_debug_print_desvar_physical_with_indices(self):\n prob = Problem()\n model = prob.model = Group()\n\n size = 3\n model.add_subsystem('p1', IndepVarComp('x', np.array([50.0] * size)))\n model.add_subsystem('p2', IndepVarComp('y', np.array([50.0] * size)))\n model.add_subsystem('comp', ExecComp('f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0',\n x=np.zeros(size), y=np.zeros(size),\n f_xy=np.zeros(size)))\n model.add_subsystem('con', ExecComp('c = - x + y',\n c=np.zeros(size), x=np.zeros(size),\n y=np.zeros(size)))\n\n model.connect('p1.x', 'comp.x')\n model.connect('p2.y', 'comp.y')\n model.connect('p1.x', 'con.x')\n model.connect('p2.y', 'con.y')\n\n prob.set_solver_print(level=0)\n\n prob.driver = ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.options['tol'] = 1e-9\n prob.driver.options['disp'] = False\n\n model.add_design_var('p1.x', indices=[1], lower=-50.0, upper=50.0, ref=[5.0,])\n model.add_design_var('p2.y', indices=[1], lower=-50.0, upper=50.0)\n model.add_objective('comp.f_xy', index=1)\n model.add_constraint('con.c', indices=[1], upper=-15.0)\n\n prob.setup(check=False)\n\n prob.driver.options['debug_print'] = ['desvars',]\n stdout = sys.stdout\n strout = StringIO()\n sys.stdout = strout\n\n try:\n # formatting has changed in numpy 1.14 and beyond.\n if LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n with printoptions(precision=2, legacy=\"1.13\"):\n prob.run_driver()\n else:\n with printoptions(precision=2):\n prob.run_driver()\n finally:\n sys.stdout = stdout\n output = strout.getvalue().split('\\n')\n # should see unscaled (physical) and the full arrays, not just what is indicated by indices\n self.assertEqual(output[3], \"{'p1.x': array([ 50., 50., 50.]), 'p2.y': array([ 50., 50., 50.])}\")\n\n def test_debug_print_response_physical(self):\n prob = Problem()\n model = prob.model = Group()\n\n size = 3\n model.add_subsystem('p1', IndepVarComp('x', np.array([50.0] * size)))\n model.add_subsystem('p2', IndepVarComp('y', np.array([50.0] * size)))\n model.add_subsystem('comp', ExecComp('f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0',\n x=np.zeros(size), y=np.zeros(size),\n f_xy=np.zeros(size)))\n model.add_subsystem('con', ExecComp('c = - x + y + 1',\n c=np.zeros(size), x=np.zeros(size),\n y=np.zeros(size)))\n\n model.connect('p1.x', 'comp.x')\n model.connect('p2.y', 'comp.y')\n model.connect('p1.x', 'con.x')\n model.connect('p2.y', 'con.y')\n\n prob.set_solver_print(level=0)\n\n prob.driver = ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.options['tol'] = 1e-9\n prob.driver.options['disp'] = False\n\n model.add_design_var('p1.x', indices=[1], lower=-50.0, upper=50.0)\n model.add_design_var('p2.y', indices=[1], lower=-50.0, upper=50.0)\n model.add_objective('comp.f_xy', index=1, ref=1.5)\n model.add_constraint('con.c', indices=[1], upper=-15.0, ref=1.02)\n\n prob.setup(check=False)\n\n prob.driver.options['debug_print'] = ['objs', 'nl_cons']\n stdout = sys.stdout\n strout = StringIO()\n sys.stdout = strout\n\n try:\n # formatting has changed in numpy 1.14 and beyond.\n if LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n with printoptions(precision=2, legacy=\"1.13\"):\n prob.run_driver()\n else:\n with printoptions(precision=2):\n prob.run_driver()\n finally:\n sys.stdout = stdout\n output = strout.getvalue().split('\\n')\n # should see unscaled (physical) and the full arrays, not just what is indicated by indices\n self.assertEqual(output[3], \"{'con.c': array([ 1.])}\")\n self.assertEqual(output[6], \"{'comp.f_xy': array([ 7622.])}\")\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"Test the LinearBlockJac class.\"\"\"\n\nfrom __future__ import division, print_function\n\nimport unittest\n\nimport numpy as np\n\nfrom openmdao.api import Group, IndepVarComp, Problem, LinearBlockJac, \\\n ExecComp, NonlinearBlockGS\nfrom openmdao.utils.assert_utils import assert_rel_error\nfrom openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\nfrom openmdao.test_suite.components.expl_comp_simple import TestExplCompSimpleDense\nfrom openmdao.solvers.linear.tests.linear_test_base import LinearSolverTests\n\n\nclass TestLinearBlockJacSolver(LinearSolverTests.LinearSolverTestCase):\n\n linear_solver_class = LinearBlockJac\n\n def test_globaljac_err(self):\n prob = Problem()\n model = prob.model = Group(assembled_jac_type='dense')\n model.add_subsystem('x_param', IndepVarComp('length', 3.0),\n promotes=['length'])\n model.add_subsystem('mycomp', TestExplCompSimpleDense(),\n promotes=['length', 'width', 'area'])\n\n model.linear_solver = LinearBlockJac(assemble_jac=True)\n prob.setup(check=False)\n\n with self.assertRaises(RuntimeError) as context:\n prob.run_model()\n self.assertEqual(str(context.exception),\n \"Linear solver 'LN: LNBJ' doesn't support assembled jacobians.\")\n\n\nclass TestBJacSolverFeature(unittest.TestCase):\n\n def test_specify_solver(self):\n import numpy as np\n\n from openmdao.api import Problem, Group, IndepVarComp, ExecComp, LinearBlockJac, NonlinearBlockGS\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = Problem()\n model = prob.model = Group()\n\n model.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = NonlinearBlockGS()\n model.linear_solver = LinearBlockJac()\n\n prob.setup()\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_rel_error(self, J['obj', 'z'][0][0], 9.61001056, .00001)\n assert_rel_error(self, J['obj', 'z'][0][1], 1.78448534, .00001)\n\n def test_feature_maxiter(self):\n import numpy as np\n\n from openmdao.api import Problem, Group, IndepVarComp, ExecComp, LinearBlockJac, NonlinearBlockGS\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = Problem()\n model = prob.model = Group()\n\n model.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = NonlinearBlockGS()\n\n model.linear_solver = LinearBlockJac()\n model.linear_solver.options['maxiter'] = 5\n\n prob.setup()\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_rel_error(self, J['obj', 'z'][0][0], 9.60230118004, .00001)\n assert_rel_error(self, J['obj', 'z'][0][1], 1.78022500547, .00001)\n\n def test_feature_atol(self):\n import numpy as np\n\n from openmdao.api import Problem, Group, IndepVarComp, ExecComp, LinearBlockJac, NonlinearBlockGS\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = Problem()\n model = prob.model = Group()\n\n model.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = NonlinearBlockGS()\n\n model.linear_solver = LinearBlockJac()\n model.linear_solver.options['atol'] = 1.0e-3\n\n prob.setup(mode='rev')\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_rel_error(self, J['obj', 'z'][0][0], 9.61016296175, .00001)\n assert_rel_error(self, J['obj', 'z'][0][1], 1.78456955704, .00001)\n\n def test_feature_rtol(self):\n import numpy as np\n\n from openmdao.api import Problem, Group, IndepVarComp, ExecComp, LinearBlockJac, NonlinearBlockGS\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = Problem()\n model = prob.model = Group()\n\n model.add_subsystem('px', IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.nonlinear_solver = NonlinearBlockGS()\n\n model.linear_solver = LinearBlockJac()\n model.linear_solver.options['rtol'] = 1.0e-3\n\n prob.setup(mode='rev')\n prob.run_model()\n\n wrt = ['z']\n of = ['obj']\n\n J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')\n assert_rel_error(self, J['obj', 'z'][0][0], 9.61016296175, .00001)\n assert_rel_error(self, J['obj', 'z'][0][1], 1.78456955704, .00001)\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"\nMisc components.\n\nContains some general test components that are used in multiple places for testing, but aren't\nfeatured as examples, and are not meant to be showcased as the proper way to write components\nin OpenMDAO.\n\"\"\"\nfrom __future__ import division, print_function\n\nimport numpy as np\n\nfrom openmdao.api import ImplicitComponent\n\n\nclass Comp4LinearCacheTest(ImplicitComponent):\n \"\"\"\n Component needed for testing cached linear solutions.\n\n Generally, needed an implicit component that was challenging enough that it took a few\n iterations to solve with the petsc and scipy iterative linear solvers. Equation just\n came from playing around. It does not represent any academic or real world problem, so\n it does not need to be explained.\n \"\"\"\n def setup(self):\n \"\"\"\n Set up the model and define derivatives.\n \"\"\"\n self.add_input('x', val=1.0)\n self.add_output('y', val=np.sqrt(3))\n\n self.declare_partials(of='*', wrt='*')\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n \"\"\"\n Compute residuals.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables read via inputs[key].\n outputs : Vector\n Unscaled, dimensional output variables read via outputs[key].\n residuals : Vector\n Unscaled, dimensional residuals written to via residuals[key].\n \"\"\"\n x = inputs['x']\n y = outputs['y']\n residuals['y'] = x * y ** 3 - 3.0 * y * x ** 3\n\n def linearize(self, inputs, outputs, partials):\n \"\"\"\n Compute derivatives.\n\n These derivatives are correct.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables read via inputs[key].\n outputs : Vector\n Unscaled, dimensional output variables read via outputs[key].\n partials : `Jacobian`\n Contains sub-jacobians.\n \"\"\"\n x = inputs['x']\n y = outputs['y']\n partials['y', 'x'] = y ** 3 - 9.0 * y * x ** 2\n partials['y', 'y'] = 3.0 * x * y ** 2 - 3.0 * y * x ** 3", "from __future__ import print_function\n\nimport unittest\n\nfrom six import assertRaisesRegex\nfrom six.moves import range\n\nimport itertools\n\nimport numpy as np\n\ntry:\n from parameterized import parameterized\nexcept ImportError:\n from openmdao.utils.assert_utils import SkipParameterized as parameterized\n\nfrom openmdao.api import Problem, Group, IndepVarComp, ExecComp, ExplicitComponent, \\\n NonlinearRunOnce, NonLinearRunOnce, BalanceComp, NewtonSolver, DirectSolver\nfrom openmdao.utils.assert_utils import assert_rel_error, assert_warning\nfrom openmdao.test_suite.components.sellar import SellarDis2\n\n\nclass SimpleGroup(Group):\n\n def __init__(self):\n super(SimpleGroup, self).__init__()\n\n self.add_subsystem('comp1', IndepVarComp('x', 5.0))\n self.add_subsystem('comp2', ExecComp('b=2*a'))\n self.connect('comp1.x', 'comp2.a')\n\n\nclass BranchGroup(Group):\n\n def __init__(self):\n super(BranchGroup, self).__init__()\n\n b1 = self.add_subsystem('Branch1', Group())\n g1 = b1.add_subsystem('G1', Group())\n g2 = g1.add_subsystem('G2', Group())\n g2.add_subsystem('comp1', ExecComp('b=2.0*a', a=3.0, b=6.0))\n\n b2 = self.add_subsystem('Branch2', Group())\n g3 = b2.add_subsystem('G3', Group())\n g3.add_subsystem('comp2', ExecComp('b=3.0*a', a=4.0, b=12.0))\n\n\nclass SetOrderGroup(Group):\n def setup(self):\n self.add_subsystem('C1', ExecComp('y=2.0*x'))\n self.add_subsystem('C2', ExecComp('y=2.0*x'))\n self.add_subsystem('C3', ExecComp('y=2.0*x'))\n\n self.set_order(['C1', 'C3', 'C2'])\n\n self.connect('C1.y', 'C3.x')\n self.connect('C3.y', 'C2.x')\n\n\nclass ReportOrderComp(ExplicitComponent):\n def __init__(self, order_list):\n super(ReportOrderComp, self).__init__()\n self._order_list = order_list\n\n def setup(self):\n self.add_input('x', 0.0)\n self.add_output('y', 0.0)\n\n def compute(self, inputs, outputs):\n self._order_list.append(self.pathname)\n\n\nclass TestGroup(unittest.TestCase):\n\n def test_add_subsystem_class(self):\n p = Problem()\n try:\n p.model.add_subsystem('comp', IndepVarComp)\n except TypeError as err:\n self.assertEqual(str(err), \"Subsystem 'comp' should be an instance, \"\n \"but a class object was found.\")\n else:\n self.fail('Exception expected.')\n\n def test_same_sys_name(self):\n \"\"\"Test error checking for the case where we add two subsystems with the same name.\"\"\"\n p = Problem()\n p.model.add_subsystem('comp1', IndepVarComp('x', 5.0))\n p.model.add_subsystem('comp2', ExecComp('b=2*a'))\n\n try:\n p.model.add_subsystem('comp2', ExecComp('b=2*a'))\n except Exception as err:\n self.assertEqual(str(err), \"Subsystem name 'comp2' is already used.\")\n else:\n self.fail('Exception expected.')\n\n def test_deprecated_runonce(self):\n p = Problem()\n p.model.add_subsystem('indep', IndepVarComp('x', 5.0))\n p.model.add_subsystem('comp', ExecComp('b=2*a'))\n\n msg = \"NonLinearRunOnce is deprecated. Use NonlinearRunOnce instead.\"\n\n with assert_warning(DeprecationWarning, msg):\n p.model.nonlinear_solver = NonLinearRunOnce()\n\n def test_group_simple(self):\n from openmdao.api import ExecComp, Problem\n\n p = Problem()\n p.model.add_subsystem('comp1', ExecComp('b=2.0*a', a=3.0, b=6.0))\n\n p.setup()\n\n self.assertEqual(p['comp1.a'], 3.0)\n self.assertEqual(p['comp1.b'], 6.0)\n\n def test_group_add(self):\n model = Group()\n ecomp = ExecComp('b=2.0*a', a=3.0, b=6.0)\n\n msg = \"The 'add' method provides backwards compatibility with OpenMDAO <= 1.x ; \" \\\n \"use 'add_subsystem' instead.\"\n\n with assert_warning(DeprecationWarning, msg):\n comp1 = model.add('comp1', ecomp)\n\n self.assertTrue(ecomp is comp1)\n\n def test_group_simple_promoted(self):\n from openmdao.api import ExecComp, Problem, IndepVarComp\n\n p = Problem()\n p.model.add_subsystem('indep', IndepVarComp('a', 3.0),\n promotes_outputs=['a'])\n p.model.add_subsystem('comp1', ExecComp('b=2.0*a'),\n promotes_inputs=['a'])\n\n p.setup()\n p.run_model()\n\n self.assertEqual(p['a'], 3.0)\n self.assertEqual(p['comp1.b'], 6.0)\n\n def test_group_rename_connect(self):\n from openmdao.api import Problem, IndepVarComp, ExecComp\n\n p = Problem()\n p.model.add_subsystem('indep', IndepVarComp('aa', 3.0),\n promotes=['aa'])\n p.model.add_subsystem('comp1', ExecComp('b=2.0*aa'),\n promotes_inputs=['aa'])\n\n # here we alias 'a' to 'aa' so that it will be automatically\n # connected to the independent variable 'aa'.\n p.model.add_subsystem('comp2', ExecComp('b=3.0*a'),\n promotes_inputs=[('a', 'aa')])\n\n p.setup()\n p.run_model()\n\n self.assertEqual(p['comp1.b'], 6.0)\n self.assertEqual(p['comp2.b'], 9.0)\n\n def test_subsys_attributes(self):\n p = Problem()\n\n class MyGroup(Group):\n def setup(self):\n # two subsystems added during setup\n self.add_subsystem('comp1', ExecComp('b=2.0*a', a=3.0, b=6.0))\n self.add_subsystem('comp2', ExecComp('b=3.0*a', a=4.0, b=12.0))\n\n # subsystems become attributes\n my_group = p.model.add_subsystem('gg', MyGroup())\n self.assertTrue(p.model.gg is my_group)\n\n # after calling setup(), MyGroup's subsystems are also attributes\n p.setup()\n self.assertTrue(hasattr(p.model.gg, 'comp1'))\n self.assertTrue(hasattr(p.model.gg, 'comp2'))\n\n # calling setup() again doesn't break anything\n p.setup()\n self.assertTrue(p.model.gg is my_group)\n self.assertTrue(hasattr(p.model.gg, 'comp1'))\n self.assertTrue(hasattr(p.model.gg, 'comp2'))\n\n # name cannot start with an underscore\n with self.assertRaises(Exception) as err:\n p.model.add_subsystem('_bad_name', Group())\n self.assertEqual(str(err.exception),\n \"'_bad_name' is not a valid system name.\")\n\n # 'name', 'pathname', 'comm' and 'options' are reserved names\n for reserved in ['name', 'pathname', 'comm', 'options']:\n with self.assertRaises(Exception) as err:\n p.model.add_subsystem(reserved, Group())\n self.assertEqual(str(err.exception),\n \"Group '' already has an attribute '%s'.\" %\n reserved)\n\n def test_group_nested(self):\n from openmdao.api import ExecComp, Problem, Group\n\n p = Problem()\n p.model.add_subsystem('G1', Group())\n p.model.G1.add_subsystem('comp1', ExecComp('b=2.0*a', a=3.0, b=6.0))\n p.model.G1.add_subsystem('comp2', ExecComp('b=3.0*a', a=4.0, b=12.0))\n\n p.setup()\n\n self.assertEqual(p['G1.comp1.a'], 3.0)\n self.assertEqual(p['G1.comp1.b'], 6.0)\n self.assertEqual(p['G1.comp2.a'], 4.0)\n self.assertEqual(p['G1.comp2.b'], 12.0)\n\n def test_group_getsystem_top(self):\n from openmdao.api import Problem\n from openmdao.core.tests.test_group import BranchGroup\n\n p = Problem(model=BranchGroup())\n p.setup()\n\n c1 = p.model.Branch1.G1.G2.comp1\n self.assertEqual(c1.pathname, 'Branch1.G1.G2.comp1')\n\n c2 = p.model.Branch2.G3.comp2\n self.assertEqual(c2.pathname, 'Branch2.G3.comp2')\n\n def test_group_nested_promoted1(self):\n from openmdao.api import Problem, Group, ExecComp\n\n # promotes from bottom level up 1\n p = Problem()\n g1 = p.model.add_subsystem('G1', Group())\n g1.add_subsystem('comp1', ExecComp('b=2.0*a', a=3.0, b=6.0),\n promotes_inputs=['a'], promotes_outputs=['b'])\n g1.add_subsystem('comp2', ExecComp('b=3.0*a', a=4.0, b=12.0),\n promotes_inputs=['a'])\n p.setup()\n\n # output G1.comp1.b is promoted\n self.assertEqual(p['G1.b'], 6.0)\n # output G1.comp2.b is not promoted\n self.assertEqual(p['G1.comp2.b'], 12.0)\n\n # use unpromoted names for the following 2 promoted inputs\n self.assertEqual(p['G1.comp1.a'], 3.0)\n self.assertEqual(p['G1.comp2.a'], 4.0)\n\n def test_group_nested_promoted2(self):\n from openmdao.api import Problem, Group, ExecComp\n\n # promotes up from G1 level\n p = Problem()\n g1 = Group()\n g1.add_subsystem('comp1', ExecComp('b=2.0*a', a=3.0, b=6.0))\n g1.add_subsystem('comp2', ExecComp('b=3.0*a', a=4.0, b=12.0))\n\n # use glob pattern 'comp?.a' to promote both comp1.a and comp2.a\n # use glob pattern 'comp?.b' to promote both comp1.b and comp2.b\n p.model.add_subsystem('G1', g1,\n promotes_inputs=['comp?.a'],\n promotes_outputs=['comp?.b'])\n p.setup()\n\n # output G1.comp1.b is promoted\n self.assertEqual(p['comp1.b'], 6.0)\n # output G1.comp2.b is promoted\n self.assertEqual(p['comp2.b'], 12.0)\n\n # access both promoted inputs using unpromoted names.\n self.assertEqual(p['G1.comp1.a'], 3.0)\n self.assertEqual(p['G1.comp2.a'], 4.0)\n\n def test_group_promotes(self):\n \"\"\"Promoting a single variable.\"\"\"\n p = Problem()\n p.model.add_subsystem('comp1', IndepVarComp([('a', 2.0), ('x', 5.0)]),\n promotes_outputs=['x'])\n p.model.add_subsystem('comp2', ExecComp('y=2*x'), promotes_inputs=['x'])\n p.setup()\n\n p.set_solver_print(level=0)\n p.run_model()\n\n self.assertEqual(p['comp1.a'], 2)\n self.assertEqual(p['x'], 5)\n self.assertEqual(p['comp2.y'], 10)\n\n def test_group_renames(self):\n p = Problem()\n p.model.add_subsystem('comp1', IndepVarComp('x', 5.0),\n promotes_outputs=[('x', 'foo')])\n p.model.add_subsystem('comp2', ExecComp('y=2*foo'), promotes_inputs=['foo'])\n p.setup()\n\n p.set_solver_print(level=0)\n p.run_model()\n\n self.assertEqual(p['foo'], 5)\n self.assertEqual(p['comp2.y'], 10)\n\n def test_group_renames_errors_single_string(self):\n p = Problem()\n with self.assertRaises(Exception) as err:\n p.model.add_subsystem('comp1', IndepVarComp('x', 5.0),\n promotes_outputs='x')\n self.assertEqual(str(err.exception),\n \": promotes must be an iterator of strings and/or tuples.\")\n\n def test_group_renames_errors_not_found(self):\n p = Problem()\n p.model.add_subsystem('comp1', IndepVarComp('x', 5.0),\n promotes_outputs=[('xx', 'foo')])\n p.model.add_subsystem('comp2', ExecComp('y=2*foo'), promotes_inputs=['foo'])\n\n with self.assertRaises(Exception) as err:\n p.setup(check=False)\n self.assertEqual(str(err.exception),\n \"comp1: 'promotes_outputs' failed to find any matches for \"\n \"the following names or patterns: ['xx'].\")\n\n def test_group_renames_errors_bad_tuple(self):\n p = Problem()\n p.model.add_subsystem('comp1', IndepVarComp('x', 5.0),\n promotes_outputs=[('x', 'foo', 'bar')])\n p.model.add_subsystem('comp2', ExecComp('y=2*foo'), promotes_inputs=['foo'])\n\n with self.assertRaises(Exception) as err:\n p.setup(check=False)\n self.assertEqual(str(err.exception),\n \"when adding subsystem 'comp1', entry '('x', 'foo', 'bar')' \"\n \"is not a string or tuple of size 2\")\n\n def test_group_promotes_multiple(self):\n \"\"\"Promoting multiple variables.\"\"\"\n p = Problem()\n p.model.add_subsystem('comp1', IndepVarComp([('a', 2.0), ('x', 5.0)]),\n promotes_outputs=['a', 'x'])\n p.model.add_subsystem('comp2', ExecComp('y=2*x'),\n promotes_inputs=['x'])\n p.setup()\n\n p.set_solver_print(level=0)\n p.run_model()\n\n self.assertEqual(p['a'], 2)\n self.assertEqual(p['x'], 5)\n self.assertEqual(p['comp2.y'], 10)\n\n def test_group_promotes_all(self):\n \"\"\"Promoting all variables with asterisk.\"\"\"\n p = Problem()\n p.model.add_subsystem('comp1', IndepVarComp([('a', 2.0), ('x', 5.0)]),\n promotes_outputs=['*'])\n p.model.add_subsystem('comp2', ExecComp('y=2*x'),\n promotes_inputs=['x'])\n p.setup()\n\n p.set_solver_print(level=0)\n p.run_model()\n\n self.assertEqual(p['a'], 2)\n self.assertEqual(p['x'], 5)\n self.assertEqual(p['comp2.y'], 10)\n\n def test_group_promotes2(self):\n\n class Sellar(Group):\n def setup(self):\n dv = self.add_subsystem('des_vars', IndepVarComp(), promotes=['*'])\n dv.add_output('x', 1.0)\n dv.add_output('z', np.array([5.0, 2.0]))\n\n self.add_subsystem('d1', SellarDis2(),\n promotes_inputs=['y1'], promotes_outputs=['foo'])\n self.add_subsystem('d2', SellarDis2())\n\n p = Problem()\n p.model = Sellar()\n\n with self.assertRaises(Exception) as err:\n p.setup(check=False)\n self.assertEqual(str(err.exception),\n \"d1: 'promotes_outputs' failed to find any matches for \"\n \"the following names or patterns: ['foo'].\")\n\n def test_group_nested_conn(self):\n \"\"\"Example of adding subsystems and issuing connections with nested groups.\"\"\"\n g1 = Group()\n c1_1 = g1.add_subsystem('comp1', IndepVarComp('x', 5.0))\n c1_2 = g1.add_subsystem('comp2', ExecComp('b=2*a'))\n g1.connect('comp1.x', 'comp2.a')\n g2 = Group()\n c2_1 = g2.add_subsystem('comp1', ExecComp('b=2*a'))\n c2_2 = g2.add_subsystem('comp2', ExecComp('b=2*a'))\n g2.connect('comp1.b', 'comp2.a')\n\n model = Group()\n model.add_subsystem('group1', g1)\n model.add_subsystem('group2', g2)\n model.connect('group1.comp2.b', 'group2.comp1.a')\n\n p = Problem(model=model)\n p.setup()\n\n c1_1 = p.model.group1.comp1\n c1_2 = p.model.group1.comp2\n c2_1 = p.model.group2.comp1\n c2_2 = p.model.group2.comp2\n self.assertEqual(c1_1.name, 'comp1')\n self.assertEqual(c1_2.name, 'comp2')\n self.assertEqual(c2_1.name, 'comp1')\n self.assertEqual(c2_2.name, 'comp2')\n\n c1_1 = p.model.group1.comp1\n c1_2 = p.model.group1.comp2\n c2_1 = p.model.group2.comp1\n c2_2 = p.model.group2.comp2\n self.assertEqual(c1_1.name, 'comp1')\n self.assertEqual(c1_2.name, 'comp2')\n self.assertEqual(c2_1.name, 'comp1')\n self.assertEqual(c2_2.name, 'comp2')\n\n s = p.model._get_subsystem('')\n self.assertEqual(s, None)\n\n p.set_solver_print(level=0)\n p.run_model()\n\n self.assertEqual(p['group1.comp1.x'], 5.0)\n self.assertEqual(p['group1.comp2.b'], 10.0)\n self.assertEqual(p['group2.comp1.b'], 20.0)\n self.assertEqual(p['group2.comp2.b'], 40.0)\n\n def test_reused_output_promoted_names(self):\n prob = Problem()\n prob.model.add_subsystem('px1', IndepVarComp('x1', 100.0))\n G1 = prob.model.add_subsystem('G1', Group())\n G1.add_subsystem(\"C1\", ExecComp(\"y=2.0*x\"), promotes=['y'])\n G1.add_subsystem(\"C2\", ExecComp(\"y=2.0*x\"), promotes=['y'])\n msg = r\"Output name 'y' refers to multiple outputs: \\['G1.C1.y', 'G1.C2.y'\\].\"\n with assertRaisesRegex(self, Exception, msg):\n prob.setup(check=False)\n\n def test_basic_connect_units(self):\n import numpy as np\n\n from openmdao.api import Problem, IndepVarComp, ExecComp\n\n p = Problem()\n\n indep_comp = IndepVarComp()\n indep_comp.add_output('x', np.ones(5), units='ft')\n\n exec_comp = ExecComp('y=sum(x)',\n x={'value': np.zeros(5), 'units': 'inch'},\n y={'units': 'inch'})\n\n p.model.add_subsystem('indep', indep_comp)\n p.model.add_subsystem('comp1', exec_comp)\n p.model.connect('indep.x', 'comp1.x')\n\n p.setup()\n p.run_model()\n\n assert_rel_error(self, p['indep.x'], np.ones(5))\n assert_rel_error(self, p['comp1.x'], np.ones(5)*12.)\n assert_rel_error(self, p['comp1.y'], 60.)\n\n def test_connect_1_to_many(self):\n import numpy as np\n\n from openmdao.api import Problem, IndepVarComp, ExecComp\n\n p = Problem()\n\n p.model.add_subsystem('indep', IndepVarComp('x', np.ones(5)))\n p.model.add_subsystem('C1', ExecComp('y=sum(x)*2.0', x=np.zeros(5)))\n p.model.add_subsystem('C2', ExecComp('y=sum(x)*4.0', x=np.zeros(5)))\n p.model.add_subsystem('C3', ExecComp('y=sum(x)*6.0', x=np.zeros(5)))\n\n p.model.connect('indep.x', ['C1.x', 'C2.x', 'C3.x'])\n\n p.setup()\n p.run_model()\n\n assert_rel_error(self, p['C1.y'], 10.)\n assert_rel_error(self, p['C2.y'], 20.)\n assert_rel_error(self, p['C3.y'], 30.)\n\n def test_double_src_indices(self):\n class MyComp1(ExplicitComponent):\n def setup(self):\n self.add_input('x', np.ones(3), src_indices=[0, 1, 2])\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])*2.0\n\n p = Problem()\n\n p.model.add_subsystem('indep', IndepVarComp('x', np.ones(5)))\n p.model.add_subsystem('C1', MyComp1())\n p.model.connect('indep.x', 'C1.x', src_indices=[1, 0, 2])\n\n with self.assertRaises(Exception) as context:\n p.setup(check=False)\n self.assertEqual(str(context.exception),\n \": src_indices has been defined in both \"\n \"connect('indep.x', 'C1.x') and add_input('C1.x', ...).\")\n\n def test_connect_src_indices(self):\n import numpy as np\n\n from openmdao.api import Problem, IndepVarComp, ExecComp\n\n p = Problem()\n\n p.model.add_subsystem('indep', IndepVarComp('x', np.ones(5)))\n p.model.add_subsystem('C1', ExecComp('y=sum(x)*2.0', x=np.zeros(3)))\n p.model.add_subsystem('C2', ExecComp('y=sum(x)*4.0', x=np.zeros(2)))\n\n # connect C1.x to the first 3 entries of indep.x\n p.model.connect('indep.x', 'C1.x', src_indices=[0, 1, 2])\n\n # connect C2.x to the last 2 entries of indep.x\n # use -2 (same as 3 in this case) to show that negative indices work.\n p.model.connect('indep.x', 'C2.x', src_indices=[-2, 4])\n\n p.setup()\n p.run_model()\n\n assert_rel_error(self, p['C1.x'], np.ones(3))\n assert_rel_error(self, p['C1.y'], 6.)\n assert_rel_error(self, p['C2.x'], np.ones(2))\n assert_rel_error(self, p['C2.y'], 8.)\n\n def test_connect_src_indices_noflat(self):\n import numpy as np\n\n from openmdao.api import Problem, IndepVarComp, ExecComp\n\n p = Problem()\n\n p.model.add_subsystem('indep', IndepVarComp('x', np.arange(12).reshape((4, 3))))\n p.model.add_subsystem('C1', ExecComp('y=sum(x)*2.0', x=np.zeros((2, 2))))\n\n # connect C1.x to entries (0,0), (-1,1), (2,1), (1,1) of indep.x\n p.model.connect('indep.x', 'C1.x',\n src_indices=[[(0, 0), (-1, 1)],\n [(2, 1), (1, 1)]], flat_src_indices=False)\n\n p.setup()\n p.run_model()\n\n assert_rel_error(self, p['C1.x'], np.array([[0., 10.],\n [7., 4.]]))\n assert_rel_error(self, p['C1.y'], 42.)\n\n def test_promote_not_found1(self):\n p = Problem()\n p.model.add_subsystem('indep', IndepVarComp('x', np.ones(5)),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', ExecComp('y=x'), promotes_inputs=['x'])\n p.model.add_subsystem('C2', ExecComp('y=x'), promotes_outputs=['x*'])\n\n with self.assertRaises(Exception) as context:\n p.setup(check=False)\n self.assertEqual(str(context.exception),\n \"C2: 'promotes_outputs' failed to find any matches for \"\n \"the following names or patterns: ['x*'].\")\n\n def test_promote_not_found2(self):\n p = Problem()\n p.model.add_subsystem('indep', IndepVarComp('x', np.ones(5)),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', ExecComp('y=x'), promotes_inputs=['x'])\n p.model.add_subsystem('C2', ExecComp('y=x'), promotes_inputs=['xx'])\n\n with self.assertRaises(Exception) as context:\n p.setup(check=False)\n self.assertEqual(str(context.exception),\n \"C2: 'promotes_inputs' failed to find any matches for \"\n \"the following names or patterns: ['xx'].\")\n\n def test_promote_not_found3(self):\n p = Problem()\n p.model.add_subsystem('indep', IndepVarComp('x', np.ones(5)),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', ExecComp('y=x'), promotes=['x'])\n p.model.add_subsystem('C2', ExecComp('y=x'), promotes=['xx'])\n\n with self.assertRaises(Exception) as context:\n p.setup(check=False)\n self.assertEqual(str(context.exception),\n \"C2: 'promotes' failed to find any matches for \"\n \"the following names or patterns: ['xx'].\")\n\n def test_missing_promote_var(self):\n p = Problem()\n\n indep_var_comp = IndepVarComp('z', val=2.)\n p.model.add_subsystem('indep_vars', indep_var_comp, promotes=['*'])\n\n p.model.add_subsystem('d1', ExecComp(\"y1=z+bar\"),\n promotes_inputs=['z', 'foo'])\n\n with self.assertRaises(Exception) as context:\n p.setup(check=False)\n self.assertEqual(str(context.exception),\n \"d1: 'promotes_inputs' failed to find any matches for \"\n \"the following names or patterns: ['foo'].\")\n\n def test_missing_promote_var2(self):\n p = Problem()\n\n indep_var_comp = IndepVarComp('z', val=2.)\n p.model.add_subsystem('indep_vars', indep_var_comp, promotes=['*'])\n\n p.model.add_subsystem('d1', ExecComp(\"y1=z+bar\"),\n promotes_outputs=['y1', 'blammo', ('bar', 'blah')])\n\n with self.assertRaises(Exception) as context:\n p.setup(check=False)\n self.assertEqual(str(context.exception),\n \"d1: 'promotes_outputs' failed to find any matches for \"\n \"the following names or patterns: ['bar', 'blammo'].\")\n\n def test_promote_src_indices(self):\n import numpy as np\n\n from openmdao.api import ExplicitComponent, Problem, IndepVarComp\n\n class MyComp1(ExplicitComponent):\n def setup(self):\n # this input will connect to entries 0, 1, and 2 of its source\n self.add_input('x', np.ones(3), src_indices=[0, 1, 2])\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])*2.0\n\n class MyComp2(ExplicitComponent):\n def setup(self):\n # this input will connect to entries 3 and 4 of its source\n self.add_input('x', np.ones(2), src_indices=[3, 4])\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])*4.0\n\n p = Problem()\n\n # by promoting the following output and inputs to 'x', they will\n # be automatically connected\n p.model.add_subsystem('indep', IndepVarComp('x', np.ones(5)),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', MyComp1(), promotes_inputs=['x'])\n p.model.add_subsystem('C2', MyComp2(), promotes_inputs=['x'])\n\n p.setup()\n p.run_model()\n\n assert_rel_error(self, p['C1.x'], np.ones(3))\n assert_rel_error(self, p['C1.y'], 6.)\n assert_rel_error(self, p['C2.x'], np.ones(2))\n assert_rel_error(self, p['C2.y'], 8.)\n\n def test_promote_src_indices_nonflat(self):\n import numpy as np\n\n from openmdao.api import ExplicitComponent, Problem, IndepVarComp\n\n class MyComp(ExplicitComponent):\n def setup(self):\n # We want to pull the following 4 values out of the source:\n # [(0,0), (3,1), (2,1), (1,1)].\n # Because our input is also non-flat we arrange the\n # source index tuples into an array having the same shape\n # as our input. If we didn't set flat_src_indices to False,\n # we could specify src_indices as a 1D array of indices into\n # the flattened source.\n self.add_input('x', np.ones((2, 2)),\n src_indices=[[(0, 0), (3, 1)],\n [(2, 1), (1, 1)]],\n flat_src_indices=False)\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n p = Problem()\n\n # by promoting the following output and inputs to 'x', they will\n # be automatically connected\n p.model.add_subsystem('indep',\n IndepVarComp('x', np.arange(12).reshape((4, 3))),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', MyComp(),\n promotes_inputs=['x'])\n\n p.setup()\n p.run_model()\n\n assert_rel_error(self, p['C1.x'],\n np.array([[0., 10.],\n [7., 4.]]))\n assert_rel_error(self, p['C1.y'], 21.)\n\n def test_promote_src_indices_nonflat_to_scalars(self):\n class MyComp(ExplicitComponent):\n def setup(self):\n self.add_input('x', 1.0, src_indices=[(3, 1)], shape=(1,))\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = inputs['x']*2.0\n\n p = Problem()\n\n p.model.add_subsystem('indep',\n IndepVarComp('x', np.arange(12).reshape((4, 3))),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', MyComp(), promotes_inputs=['x'])\n\n p.set_solver_print(level=0)\n p.setup()\n p.run_model()\n assert_rel_error(self, p['C1.x'], 10.)\n assert_rel_error(self, p['C1.y'], 20.)\n\n def test_promote_src_indices_nonflat_error(self):\n class MyComp(ExplicitComponent):\n def setup(self):\n self.add_input('x', 1.0, src_indices=[(3, 1)])\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n p = Problem()\n\n p.model.add_subsystem('indep',\n IndepVarComp('x', np.arange(12).reshape((4, 3))),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', MyComp(), promotes_inputs=['x'])\n\n with self.assertRaises(Exception) as context:\n p.setup(check=False)\n self.assertEqual(str(context.exception),\n \"src_indices for 'x' is not flat, so its input shape \"\n \"must be provided. src_indices may contain an extra \"\n \"dimension if the connected source is not flat, making \"\n \"the input shape ambiguous.\")\n\n @parameterized.expand(itertools.product(\n [((4, 3), [(0, 0), (3, 1), (2, 1), (1, 1)]),\n ((1, 12), [(0, 0), (0, 10), (0, 7), (0, 4)]),\n ((12,), [0, 10, 7, 4]),\n ((12, 1), [(0, 0), (10, 0), (7, 0), (4, 0)])],\n [(2, 2), (4,), (4, 1), (1, 4)],\n ), name_func=lambda f, n, p: 'test_promote_src_indices_'+'_'.join(str(a) for a in p.args))\n def test_promote_src_indices_param(self, src_info, tgt_shape):\n src_shape, idxvals = src_info\n\n class MyComp(ExplicitComponent):\n def setup(self):\n if len(tgt_shape) == 1:\n tshape = None # don't need to set shape if input is flat\n sidxs = idxvals\n else:\n tshape = tgt_shape\n sidxs = []\n i = 0\n for r in range(tgt_shape[0]):\n sidxs.append([])\n for c in range(tgt_shape[1]):\n sidxs[-1].append(idxvals[i])\n i += 1\n\n self.add_input('x', np.ones(4).reshape(tgt_shape),\n src_indices=sidxs, shape=tshape)\n self.add_output('y', 1.0)\n\n def compute(self, inputs, outputs):\n outputs['y'] = np.sum(inputs['x'])\n\n p = Problem()\n\n p.model.add_subsystem('indep',\n IndepVarComp('x', np.arange(12).reshape(src_shape)),\n promotes_outputs=['x'])\n p.model.add_subsystem('C1', MyComp(), promotes_inputs=['x'])\n\n p.set_solver_print(level=0)\n p.setup(check=False)\n p.run_model()\n assert_rel_error(self, p['C1.x'],\n np.array([0., 10., 7., 4.]).reshape(tgt_shape))\n assert_rel_error(self, p['C1.y'], 21.)\n\n def test_set_order_feature(self):\n from openmdao.api import Problem, IndepVarComp, ExplicitComponent\n\n class ReportOrderComp(ExplicitComponent):\n \"\"\"Adds name to list.\"\"\"\n\n def __init__(self, order_list):\n super(ReportOrderComp, self).__init__()\n self._order_list = order_list\n\n def compute(self, inputs, outputs):\n self._order_list.append(self.pathname)\n\n # this list will record the execution order of our C1, C2, and C3 components\n order_list = []\n\n prob = Problem()\n model = prob.model\n\n model.add_subsystem('indeps', IndepVarComp('x', 1.))\n model.add_subsystem('C1', ReportOrderComp(order_list))\n model.add_subsystem('C2', ReportOrderComp(order_list))\n model.add_subsystem('C3', ReportOrderComp(order_list))\n\n prob.setup()\n prob.run_model()\n\n self.assertEqual(order_list, ['C1', 'C2', 'C3'])\n\n # reset the shared order list\n order_list[:] = []\n\n # now swap C2 and C1 in the order\n model.set_order(['indeps', 'C2', 'C1', 'C3'])\n\n # after changing the order, we must call setup again\n prob.setup()\n prob.run_model()\n\n self.assertEqual(order_list, ['C2', 'C1', 'C3'])\n\n def test_set_order(self):\n\n order_list = []\n prob = Problem()\n model = prob.model\n model.nonlinear_solver = NonlinearRunOnce()\n model.add_subsystem('indeps', IndepVarComp('x', 1.))\n model.add_subsystem('C1', ReportOrderComp(order_list))\n model.add_subsystem('C2', ReportOrderComp(order_list))\n model.add_subsystem('C3', ReportOrderComp(order_list))\n model.connect('indeps.x', 'C1.x')\n model.connect('C1.y', 'C2.x')\n model.connect('C2.y', 'C3.x')\n prob.set_solver_print(level=0)\n\n self.assertEqual(['indeps', 'C1', 'C2', 'C3'],\n [s.name for s in model._static_subsystems_allprocs])\n\n prob.setup(check=False)\n prob.run_model()\n\n self.assertEqual(['C1', 'C2', 'C3'], order_list)\n\n order_list[:] = []\n\n # Big boy rules\n model.set_order(['indeps', 'C2', 'C1', 'C3'])\n\n prob.setup(check=False)\n prob.run_model()\n self.assertEqual(['C2', 'C1', 'C3'], order_list)\n\n # Extra\n with self.assertRaises(ValueError) as cm:\n model.set_order(['indeps', 'C2', 'junk', 'C1', 'C3'])\n\n self.assertEqual(str(cm.exception),\n \": subsystem(s) ['junk'] found in subsystem order but don't exist.\")\n\n # Missing\n with self.assertRaises(ValueError) as cm:\n model.set_order(['indeps', 'C2', 'C3'])\n\n self.assertEqual(str(cm.exception),\n \": ['C1'] expected in subsystem order and not found.\")\n\n # Extra and Missing\n with self.assertRaises(ValueError) as cm:\n model.set_order(['indeps', 'C2', 'junk', 'C1', 'junk2'])\n\n self.assertEqual(str(cm.exception),\n \": ['C3'] expected in subsystem order and not found.\\n\"\n \": subsystem(s) ['junk', 'junk2'] found in subsystem order \"\n \"but don't exist.\")\n\n # Dupes\n with self.assertRaises(ValueError) as cm:\n model.set_order(['indeps', 'C2', 'C1', 'C3', 'C1'])\n\n self.assertEqual(str(cm.exception),\n \": Duplicate name(s) found in subsystem order list: ['C1']\")\n\n def test_set_order_init_subsystems(self):\n prob = Problem()\n model = prob.model\n model.add_subsystem('indeps', IndepVarComp('x', 1.))\n model.add_subsystem('G1', SetOrderGroup())\n prob.setup(check=False)\n prob.run_model()\n\n # this test passes if it doesn't raise an exception\n\n def test_guess_nonlinear_feature(self):\n from openmdao.api import Problem, Group, ExecComp, IndepVarComp, BalanceComp, NewtonSolver, DirectSolver\n\n class Discipline(Group):\n\n def setup(self):\n self.add_subsystem('comp0', ExecComp('y=x**2'))\n self.add_subsystem('comp1', ExecComp('z=2*external_input'),\n promotes_inputs=['external_input'])\n\n self.add_subsystem('balance', BalanceComp('x', lhs_name='y', rhs_name='z'),\n promotes_outputs=['x'])\n\n self.connect('comp0.y', 'balance.y')\n self.connect('comp1.z', 'balance.z')\n\n self.connect('x', 'comp0.x')\n\n self.nonlinear_solver = NewtonSolver(iprint=2, solve_subsystems=True)\n self.linear_solver = DirectSolver()\n\n def guess_nonlinear(self, inputs, outputs, residuals):\n # inputs are addressed using full path name, regardless of promotion\n external_input = inputs['comp1.external_input']\n\n # balance drives x**2 = 2*external_input\n x_guess = (2*external_input)**.5\n\n # outputs are addressed by the their promoted names\n outputs['x'] = x_guess # perfect guess should converge in 0 iterations\n\n p = Problem()\n\n p.model.add_subsystem('parameters', IndepVarComp('input_value', 1.))\n p.model.add_subsystem('discipline', Discipline())\n\n p.model.connect('parameters.input_value', 'discipline.external_input')\n\n p.setup()\n p.run_model()\n\n self.assertEqual(p.model.nonlinear_solver._iter_count, 0)\n\n assert_rel_error(self, p['discipline.x'], 1.41421356, 1e-6)\n\n\nclass MyComp(ExplicitComponent):\n def __init__(self, input_shape, src_indices=None, flat_src_indices=False):\n super(MyComp, self).__init__()\n self._input_shape = input_shape\n self._src_indices = src_indices\n self._flat_src_indices = flat_src_indices\n\n def setup(self):\n self.add_input('x', val=np.zeros(self._input_shape),\n src_indices=self._src_indices, flat_src_indices=self._flat_src_indices)\n self.add_output('y', val=np.zeros(self._input_shape))\n\n def compute(self, inputs, outputs):\n outputs['y'] = 2.0 * inputs['x']\n\n\ndef src_indices_model(src_shape, tgt_shape, src_indices=None, flat_src_indices=False,\n promotes=None):\n prob = Problem()\n prob.model.add_subsystem('indeps', IndepVarComp('x', shape=src_shape),\n promotes=promotes)\n prob.model.add_subsystem('C1', MyComp(tgt_shape,\n src_indices=src_indices if promotes else None,\n flat_src_indices=flat_src_indices),\n promotes=promotes)\n if promotes is None:\n prob.model.connect('indeps.x', 'C1.x', src_indices=src_indices,\n flat_src_indices=flat_src_indices)\n prob.setup(check=False)\n return prob\n\n\nclass TestConnect(unittest.TestCase):\n\n def setUp(self):\n prob = Problem(Group())\n\n sub = prob.model.add_subsystem('sub', Group())\n\n idv = sub.add_subsystem('src', IndepVarComp())\n idv.add_output('x', np.arange(15).reshape((5, 3))) # array\n idv.add_output('s', 3.) # scalar\n\n sub.add_subsystem('tgt', ExecComp('y = x'))\n sub.add_subsystem('cmp', ExecComp('z = x'))\n sub.add_subsystem('arr', ExecComp('a = x', x=np.zeros(2)))\n\n self.sub = sub\n self.prob = prob\n\n def test_src_indices_as_int_list(self):\n self.sub.connect('src.x', 'tgt.x', src_indices=[1])\n\n def test_src_indices_as_int_array(self):\n self.sub.connect('src.x', 'tgt.x', src_indices=np.zeros(1, dtype=int))\n\n def test_src_indices_as_float_list(self):\n msg = \"src_indices must contain integers, but src_indices for \" + \\\n \"connection from 'src.x' to 'tgt.x' is <.* 'numpy.float64'>.\"\n\n with assertRaisesRegex(self, TypeError, msg):\n self.sub.connect('src.x', 'tgt.x', src_indices=[1.0])\n\n def test_src_indices_as_float_array(self):\n msg = \"src_indices must contain integers, but src_indices for \" + \\\n \"connection from 'src.x' to 'tgt.x' is <.* 'numpy.float64'>.\"\n\n with assertRaisesRegex(self, TypeError, msg):\n self.sub.connect('src.x', 'tgt.x', src_indices=np.zeros(1))\n\n def test_src_indices_as_str(self):\n msg = \"src_indices must be an index array, \" + \\\n \"did you mean connect('src.x', [tgt.x, cmp.x])?\"\n\n with assertRaisesRegex(self, TypeError, msg):\n self.sub.connect('src.x', 'tgt.x', 'cmp.x')\n\n def test_already_connected(self):\n msg = \"Input 'tgt.x' is already connected to 'src.x'.\"\n\n self.sub.connect('src.x', 'tgt.x', src_indices=[1])\n with assertRaisesRegex(self, RuntimeError, msg):\n self.sub.connect('cmp.x', 'tgt.x', src_indices=[1])\n\n def test_invalid_source(self):\n msg = \"Output 'src.z' does not exist for connection \" + \\\n \"in 'sub' from 'src.z' to 'tgt.x'.\"\n\n # source and target names can't be checked until setup\n # because setup is not called until then\n self.sub.connect('src.z', 'tgt.x', src_indices=[1])\n with assertRaisesRegex(self, NameError, msg):\n self.prob.setup(check=False)\n\n def test_invalid_target(self):\n msg = \"Input 'tgt.z' does not exist for connection \" + \\\n \"in 'sub' from 'src.x' to 'tgt.z'.\"\n\n # source and target names can't be checked until setup\n # because setup is not called until then\n self.sub.connect('src.x', 'tgt.z', src_indices=[1])\n with assertRaisesRegex(self, NameError, msg):\n self.prob.setup(check=False)\n\n def test_connect_within_system(self):\n msg = \"Output and input are in the same System for connection \" + \\\n \"from 'tgt.y' to 'tgt.x'.\"\n\n with assertRaisesRegex(self, RuntimeError, msg):\n self.sub.connect('tgt.y', 'tgt.x', src_indices=[1])\n\n def test_connect_within_system_with_promotes(self):\n prob = Problem(Group())\n\n sub = prob.model.add_subsystem('sub', Group())\n sub.add_subsystem('tgt', ExecComp('y = x'), promotes_outputs=['y'])\n sub.connect('y', 'tgt.x', src_indices=[1])\n\n msg = \"Output and input are in the same System for connection \" + \\\n \"in 'sub' from 'y' to 'tgt.x'.\"\n\n with assertRaisesRegex(self, RuntimeError, msg):\n prob.setup(check=False)\n\n def test_connect_units_with_unitless(self):\n prob = Problem(Group())\n prob.model.add_subsystem('px1', IndepVarComp('x1', 100.0))\n prob.model.add_subsystem('src', ExecComp('x2 = 2 * x1', x2={'units': 'degC'}))\n prob.model.add_subsystem('tgt', ExecComp('y = 3 * x', x={'units': 'unitless'}))\n\n prob.model.connect('px1.x1', 'src.x1')\n prob.model.connect('src.x2', 'tgt.x')\n\n msg = \"Output 'src.x2' with units of 'degC' is connected \" \\\n \"to input 'tgt.x' which has no units.\"\n\n with assert_warning(UserWarning, msg):\n prob.setup(check=False)\n\n def test_connect_incompatible_units(self):\n msg = \"Output units of 'degC' for 'src.x2' are incompatible \" \\\n \"with input units of 'm' for 'tgt.x'.\"\n\n prob = Problem(Group())\n prob.model.add_subsystem('px1', IndepVarComp('x1', 100.0))\n prob.model.add_subsystem('src', ExecComp('x2 = 2 * x1', x2={'units': 'degC'}))\n prob.model.add_subsystem('tgt', ExecComp('y = 3 * x', x={'units': 'm'}))\n\n prob.model.connect('px1.x1', 'src.x1')\n prob.model.connect('src.x2', 'tgt.x')\n\n with assertRaisesRegex(self, RuntimeError, msg):\n prob.setup(check=False)\n\n def test_connect_units_with_nounits(self):\n prob = Problem(Group())\n prob.model.add_subsystem('px1', IndepVarComp('x1', 100.0))\n prob.model.add_subsystem('src', ExecComp('x2 = 2 * x1'))\n prob.model.add_subsystem('tgt', ExecComp('y = 3 * x', x={'units': 'degC'}))\n\n prob.model.connect('px1.x1', 'src.x1')\n prob.model.connect('src.x2', 'tgt.x')\n\n prob.set_solver_print(level=0)\n\n msg = \"Input 'tgt.x' with units of 'degC' is \" \\\n \"connected to output 'src.x2' which has no units.\"\n\n with assert_warning(UserWarning, msg):\n prob.setup(check=False)\n\n prob.run_model()\n\n assert_rel_error(self, prob['tgt.y'], 600.)\n\n def test_connect_units_with_nounits_prom(self):\n prob = Problem(Group())\n prob.model.add_subsystem('px1', IndepVarComp('x', 100.0), promotes_outputs=['x'])\n prob.model.add_subsystem('src', ExecComp('y = 2 * x'), promotes=['x', 'y'])\n prob.model.add_subsystem('tgt', ExecComp('z = 3 * y', y={'units': 'degC'}), promotes=['y'])\n\n prob.set_solver_print(level=0)\n\n msg = \"Input 'tgt.y' with units of 'degC' is \" \\\n \"connected to output 'src.y' which has no units.\"\n\n with assert_warning(UserWarning, msg):\n prob.setup(check=False)\n\n prob.run_model()\n\n assert_rel_error(self, prob['tgt.z'], 600.)\n\n def test_mix_promotes_types(self):\n prob = Problem()\n prob.model.add_subsystem('src', ExecComp(['y = 2 * x', 'y2 = 3 * x']),\n promotes=['x', 'y'], promotes_outputs=['y2'])\n\n with self.assertRaises(RuntimeError) as context:\n prob.setup(check=False)\n\n self.assertEqual(str(context.exception),\n \"src: 'promotes' cannot be used at the same time as \"\n \"'promotes_inputs' or 'promotes_outputs'.\")\n\n def test_mix_promotes_types2(self):\n prob = Problem()\n prob.model.add_subsystem('src', ExecComp(['y = 2 * x', 'y2 = 3 * x2']),\n promotes=['x', 'y'], promotes_inputs=['x2'])\n with self.assertRaises(RuntimeError) as context:\n prob.setup(check=False)\n\n self.assertEqual(str(context.exception),\n \"src: 'promotes' cannot be used at the same time as \"\n \"'promotes_inputs' or 'promotes_outputs'.\")\n\n def test_nested_nested_conn(self):\n prob = Problem()\n root = prob.model\n\n root.add_subsystem('p', IndepVarComp('x', 1.0))\n\n G1 = root.add_subsystem('G1', Group())\n par1 = G1.add_subsystem('par1', Group())\n\n par1.add_subsystem('c2', ExecComp('y = x * 2.0'))\n par1.add_subsystem('c4', ExecComp('y = x * 4.0'))\n\n prob.model.add_design_var('p.x')\n prob.model.add_constraint('G1.par1.c4.y', upper=0.0)\n\n root.connect('p.x', 'G1.par1.c2.x')\n root.connect('G1.par1.c2.y', 'G1.par1.c4.x')\n\n prob.setup(check=False)\n prob.run_driver()\n\n assert_rel_error(self, prob['G1.par1.c4.y'], 8.0)\n\n def test_bad_shapes(self):\n self.sub.connect('src.s', 'arr.x')\n\n msg = (\"The source and target shapes do not match or are ambiguous for the connection \"\n \"'sub.src.s' to 'sub.arr.x'.\")\n\n with assertRaisesRegex(self, ValueError, msg):\n self.prob.setup(check=False)\n\n def test_bad_indices_shape(self):\n p = Problem()\n p.model.add_subsystem('IV', IndepVarComp('x', np.arange(12).reshape((4, 3))))\n p.model.add_subsystem('C1', ExecComp('y=sum(x)*2.0', x=np.zeros((2, 2))))\n\n p.model.connect('IV.x', 'C1.x', src_indices=[(1, 1)])\n\n msg = (r\"The source indices \\[\\[1 1\\]\\] do not specify a valid shape for \"\n r\"the connection 'IV.x' to 'C1.x'. The target \"\n r\"shape is \\(2.*, 2.*\\) but indices are \\(1.*, 2.*\\).\")\n\n with assertRaisesRegex(self, ValueError, msg):\n p.setup(check=False)\n\n def test_bad_indices_dimensions(self):\n self.sub.connect('src.x', 'arr.x', src_indices=[(2, -1, 2), (2, 2, 2)],\n flat_src_indices=False)\n\n msg = (\"The source indices [[ 2 -1 2] [ 2 2 2]] do not specify a \"\n \"valid shape for the connection 'sub.src.x' to 'sub.arr.x'. \"\n \"The source has 2 dimensions but the indices expect 3.\")\n\n try:\n self.prob.setup(check=False)\n except ValueError as err:\n self.assertEqual(str(err), msg)\n else:\n self.fail('Exception expected.')\n\n def test_bad_indices_index(self):\n # the index value within src_indices is outside the valid range for the source\n self.sub.connect('src.x', 'arr.x', src_indices=[(2, -1), (4, 4)],\n flat_src_indices=False)\n\n msg = (\"The source indices do not specify a valid index for the \"\n \"connection 'sub.src.x' to 'sub.arr.x'. Index '4' \"\n \"is out of range for source dimension of size 3.\")\n\n try:\n self.prob.setup(check=False)\n except ValueError as err:\n self.assertEqual(str(err), msg)\n else:\n self.fail('Exception expected.')\n\n def test_src_indices_shape(self):\n src_indices_model(src_shape=(3, 3), tgt_shape=(2, 2),\n src_indices=[[4, 5], [7, 8]],\n flat_src_indices=True)\n\n def test_src_indices_shape_bad_idx_flat(self):\n try:\n src_indices_model(src_shape=(3, 3), tgt_shape=(2, 2),\n src_indices=[[4, 5], [7, 9]],\n flat_src_indices=True)\n except Exception as err:\n self.assertEqual(str(err), \"The source indices do not specify a valid index \"\n \"for the connection 'indeps.x' to 'C1.x'. \"\n \"Index '9' is out of range for a flat source of size 9.\")\n else:\n self.fail(\"Exception expected.\")\n\n def test_src_indices_shape_bad_idx_flat_promotes(self):\n try:\n src_indices_model(src_shape=(3, 3), tgt_shape=(2, 2),\n src_indices=[[4, 5], [7, 9]],\n flat_src_indices=True, promotes=['x'])\n except Exception as err:\n self.assertEqual(str(err), \"The source indices do not specify a valid index \"\n \"for the connection 'indeps.x' to 'C1.x'. \"\n \"Index '9' is out of range for a flat source of size 9.\")\n else:\n self.fail(\"Exception expected.\")\n\n def test_src_indices_shape_bad_idx_flat_neg(self):\n try:\n src_indices_model(src_shape=(3, 3), tgt_shape=(2, 2),\n src_indices=[[-10, 5], [7, 8]],\n flat_src_indices=True)\n except Exception as err:\n self.assertEqual(str(err), \"The source indices do not specify a valid index \"\n \"for the connection 'indeps.x' to 'C1.x'. \"\n \"Index '-10' is out of range for a flat source of size 9.\")\n else:\n self.fail(\"Exception expected.\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\"\"\"Define the base Solver, NonlinearSolver, and LinearSolver classes.\"\"\"\n\nfrom __future__ import division, print_function\n\nfrom six import iteritems, reraise\nfrom collections import OrderedDict\nimport os\nimport pprint\nimport re\nimport sys\n\nimport numpy as np\n\nfrom openmdao.core.analysis_error import AnalysisError\nfrom openmdao.recorders.recording_iteration_stack import Recording\nfrom openmdao.recorders.recording_manager import RecordingManager\nfrom openmdao.utils.mpi import MPI\nfrom openmdao.utils.options_dictionary import OptionsDictionary\nfrom openmdao.utils.record_util import create_local_meta, check_path\n\n_emptyset = set()\n\n\nclass SolverInfo(object):\n \"\"\"\n Communal object for storing some formatting for solver iprint.\n\n Attributes\n ----------\n prefix : str\n Prefix to prepend during this iprint.\n stack : List\n List of strings; strings are popped and appended as needed.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize.\n \"\"\"\n self.prefix = \"\"\n self.stack = []\n\n def clear(self):\n \"\"\"\n Clear out the iprint stack, in case something is left over from a handled exception.\n \"\"\"\n self.prefix = \"\"\n self.stack = []\n\n def pop(self):\n \"\"\"\n Remove one level of solver depth in the printing.\n \"\"\"\n last_string = self.stack.pop()\n nchar = len(last_string)\n self.prefix = self.prefix[:-nchar]\n\n def append_solver(self):\n \"\"\"\n Add a new level for the main solver in a group.\n \"\"\"\n new_str = '+ '\n self.prefix += new_str\n self.stack.append(new_str)\n\n def append_subsolver(self):\n \"\"\"\n Add a new level for any sub-solver for your solver.\n \"\"\"\n new_str = '| '\n self.prefix += new_str\n self.stack.append(new_str)\n\n def append_precon(self):\n \"\"\"\n Add a new level for any preconditioner to a linear solver.\n \"\"\"\n new_str = '| precon:'\n self.prefix += new_str\n self.stack.append(new_str)\n\n def save_cache(self):\n \"\"\"\n Save prefix and stack so that they can be restored later in event of an exception recovery.\n\n Returns\n -------\n tuple(str, list)\n cache of current stack\n \"\"\"\n return (self.prefix, self.stack)\n\n def restore_cache(self, cache):\n \"\"\"\n Restore previously saved iprint stack names.\n\n Parameters\n ----------\n cache : tuple(str, list)\n cache of current stack\n \"\"\"\n self.prefix, self.stack = cache\n\n\nclass Solver(object):\n \"\"\"\n Base solver class.\n\n This class is subclassed by NonlinearSolver and LinearSolver,\n which are in turn subclassed by actual solver implementations.\n\n Attributes\n ----------\n _system : <System>\n Pointer to the owning system.\n _depth : int\n How many subsolvers deep this solver is (0 means not a subsolver).\n _vec_names : [str, ...]\n List of right-hand-side (RHS) vector names.\n _mode : str\n 'fwd' or 'rev', applicable to linear solvers only.\n _iter_count : int\n Number of iterations for the current invocation of the solver.\n _rec_mgr : <RecordingManager>\n object that manages all recorders added to this solver\n cite : str\n Listing of relevant citations that should be referenced when\n publishing work that uses this class.\n options : <OptionsDictionary>\n Options dictionary.\n recording_options : <OptionsDictionary>\n Recording options dictionary.\n supports : <OptionsDictionary>\n Options dictionary describing what features are supported by this\n solver.\n _filtered_vars_to_record: Dict\n Dict of list of var names to record\n _norm0: float\n Normalization factor\n _solver_info : SolverInfo\n A stack-like object shared by all Solvers in the model.\n \"\"\"\n\n # Object to store some formatting for iprint that is shared across all solvers.\n SOLVER = 'base_solver'\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize all attributes.\n\n Parameters\n ----------\n **kwargs : dict of keyword arguments\n Keyword arguments that will be mapped into the Solver options.\n \"\"\"\n self._system = None\n self._depth = 0\n self._vec_names = None\n self._mode = 'fwd'\n self._iter_count = 0\n self._solver_info = None\n\n # Solver options\n self.options = OptionsDictionary()\n self.options.declare('maxiter', types=int, default=10,\n desc='maximum number of iterations')\n self.options.declare('atol', default=1e-10,\n desc='absolute error tolerance')\n self.options.declare('rtol', default=1e-10,\n desc='relative error tolerance')\n self.options.declare('iprint', types=int, default=1,\n desc='whether to print output')\n self.options.declare('err_on_maxiter', types=bool, default=False,\n desc=\"When True, AnalysisError will be raised if we don't converge.\")\n\n # Case recording options\n self.recording_options = OptionsDictionary()\n self.recording_options.declare('record_abs_error', types=bool, default=True,\n desc='Set to True to record absolute error at the \\\n solver level')\n self.recording_options.declare('record_rel_error', types=bool, default=True,\n desc='Set to True to record relative error at the \\\n solver level')\n self.recording_options.declare('record_inputs', types=bool, default=True,\n desc='Set to True to record inputs at the solver level')\n self.recording_options.declare('record_outputs', types=bool, default=True,\n desc='Set to True to record outputs at the solver level')\n self.recording_options.declare('record_solver_residuals', types=bool, default=False,\n desc='Set to True to record residuals at the solver level')\n self.recording_options.declare('record_metadata', types=bool, desc='Record metadata',\n default=True)\n self.recording_options.declare('includes', types=list, default=['*'],\n desc='Patterns for variables to include in recording')\n self.recording_options.declare('excludes', types=list, default=[],\n desc='Patterns for vars to exclude in recording '\n '(processed post-includes)')\n # Case recording related\n self._filtered_vars_to_record = {}\n self._norm0 = 0.0\n\n # What the solver supports.\n self.supports = OptionsDictionary()\n self.supports.declare('gradients', types=bool, default=False)\n self.supports.declare('implicit_components', types=bool, default=False)\n\n self._declare_options()\n self.options.update(kwargs)\n\n self._rec_mgr = RecordingManager()\n\n self.cite = \"\"\n\n def _assembled_jac_solver_iter(self):\n \"\"\"\n Return an empty generator of lin solvers using assembled jacs.\n \"\"\"\n for i in ():\n yield\n\n def add_recorder(self, recorder):\n \"\"\"\n Add a recorder to the solver's RecordingManager.\n\n Parameters\n ----------\n recorder : <CaseRecorder>\n A recorder instance to be added to RecManager.\n \"\"\"\n if MPI:\n raise RuntimeError(\n \"Recording of Solvers when running parallel code is not supported yet\")\n self._rec_mgr.append(recorder)\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n\n This is optionally implemented by subclasses of Solver.\n \"\"\"\n pass\n\n def _setup_solvers(self, system, depth):\n \"\"\"\n Assign system instance, set depth, and optionally perform setup.\n\n Parameters\n ----------\n system : <System>\n pointer to the owning system.\n depth : int\n depth of the current system (already incremented).\n \"\"\"\n self._system = system\n self._depth = depth\n self._solver_info = system._solver_info\n self._recording_iter = system._recording_iter\n\n if isinstance(self, LinearSolver) and not system._use_derivatives:\n return\n\n self._rec_mgr.startup(self)\n self._rec_mgr.record_metadata(self)\n\n myoutputs = myresiduals = myinputs = set()\n incl = self.recording_options['includes']\n excl = self.recording_options['excludes']\n\n if self.recording_options['record_solver_residuals']:\n if isinstance(self, NonlinearSolver):\n residuals = system._residuals\n else: # it's a LinearSolver\n residuals = system._vectors['residual']['linear']\n\n myresiduals = {n for n in residuals._names if check_path(n, incl, excl)}\n\n if self.recording_options['record_outputs']:\n if isinstance(self, NonlinearSolver):\n outputs = system._outputs\n else: # it's a LinearSolver\n outputs = system._vectors['output']['linear']\n\n myoutputs = {n for n in outputs._names if check_path(n, incl, excl)}\n\n if self.recording_options['record_inputs']:\n if isinstance(self, NonlinearSolver):\n inputs = system._inputs\n else:\n inputs = system._vectors['input']['linear']\n\n myinputs = {n for n in inputs._names if check_path(n, incl, excl)}\n\n self._filtered_vars_to_record = {\n 'in': myinputs,\n 'out': myoutputs,\n 'res': myresiduals\n }\n\n def _set_solver_print(self, level=2, type_='all'):\n \"\"\"\n Control printing for solvers and subsolvers in the model.\n\n Parameters\n ----------\n level : int\n iprint level. Set to 2 to print residuals each iteration; set to 1\n to print just the iteration totals; set to 0 to disable all printing\n except for failures, and set to -1 to disable all printing including failures.\n type_ : str\n Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.\n \"\"\"\n self.options['iprint'] = level\n\n def _mpi_print(self, iteration, abs_res, rel_res):\n \"\"\"\n Print residuals from an iteration.\n\n Parameters\n ----------\n iteration : int\n iteration counter, 0-based.\n abs_res : float\n current absolute residual norm.\n rel_res : float\n current relative residual norm.\n \"\"\"\n if (self.options['iprint'] == 2 and self._system.comm.rank == 0):\n\n prefix = self._solver_info.prefix\n solver_name = self.SOLVER\n\n if prefix.endswith('precon:'):\n solver_name = solver_name[3:]\n\n print_str = prefix + solver_name\n print_str += ' %d ; %.9g %.9g' % (iteration, abs_res, rel_res)\n print(print_str)\n\n def _mpi_print_header(self):\n \"\"\"\n Print header text before solving.\n \"\"\"\n pass\n\n def _solve(self):\n \"\"\"\n Run the iterative solver.\n \"\"\"\n maxiter = self.options['maxiter']\n atol = self.options['atol']\n rtol = self.options['rtol']\n iprint = self.options['iprint']\n\n self._mpi_print_header()\n\n self._iter_count = 0\n norm0, norm = self._iter_initialize()\n\n self._norm0 = norm0\n\n self._mpi_print(self._iter_count, norm, norm / norm0)\n\n while self._iter_count < maxiter and norm > atol and norm / norm0 > rtol:\n with Recording(type(self).__name__, self._iter_count, self) as rec:\n self._single_iteration()\n self._iter_count += 1\n self._run_apply()\n norm = self._iter_get_norm()\n # With solvers, we want to record the norm AFTER the call, but the call needs to\n # be wrapped in the with for stack purposes, so we locally assign norm & norm0\n # into the class.\n rec.abs = norm\n rec.rel = norm / norm0\n\n if norm0 == 0:\n norm0 = 1\n self._mpi_print(self._iter_count, norm, norm / norm0)\n\n if self._system.comm.rank == 0 or os.environ.get('USE_PROC_FILES'):\n prefix = self._solver_info.prefix + self.SOLVER\n if np.isinf(norm) or np.isnan(norm) or (norm > atol and norm / norm0 > rtol):\n if iprint > -1:\n msg = ' Failed to Converge in {} iterations'.format(self._iter_count)\n print(prefix + msg)\n\n # Raise AnalysisError if requested.\n if self.options['err_on_maxiter']:\n msg = \"Solver '{}' on system '{}' failed to converge.\"\n raise AnalysisError(msg.format(self.SOLVER, self._system.pathname))\n\n elif iprint == 1:\n print(prefix + ' Converged in {} iterations'.format(self._iter_count))\n elif iprint == 2:\n print(prefix + ' Converged')\n\n def _iter_initialize(self):\n \"\"\"\n Perform any necessary pre-processing operations.\n\n Returns\n -------\n float\n initial error.\n float\n error at the first iteration.\n \"\"\"\n pass\n\n def _run_apply(self):\n \"\"\"\n Run the appropriate apply method on the system.\n \"\"\"\n pass\n\n def _linearize(self):\n \"\"\"\n Perform any required linearization operations such as matrix factorization.\n \"\"\"\n pass\n\n def _linearize_children(self):\n \"\"\"\n Return a flag that is True when we need to call linearize on our subsystems' solvers.\n\n Returns\n -------\n boolean\n Flag for indicating child linerization\n \"\"\"\n return True\n\n def __str__(self):\n \"\"\"\n Return a string representation of the solver.\n\n Returns\n -------\n str\n String representation of the solver.\n \"\"\"\n return self.SOLVER\n\n def record_iteration(self, **kwargs):\n \"\"\"\n Record an iteration of the current Solver.\n\n Parameters\n ----------\n **kwargs : dict\n Keyword arguments (used for abs and rel error).\n \"\"\"\n if not self._rec_mgr._recorders:\n return\n\n metadata = create_local_meta(self.SOLVER)\n\n # Get the data\n data = {}\n\n if self.recording_options['record_abs_error']:\n data['abs'] = kwargs.get('abs')\n else:\n data['abs'] = None\n\n if self.recording_options['record_rel_error']:\n data['rel'] = kwargs.get('rel')\n else:\n data['rel'] = None\n\n system = self._system\n if isinstance(self, NonlinearSolver):\n outputs = system._outputs\n inputs = system._inputs\n residuals = system._residuals\n else: # it's a LinearSolver\n outputs = system._vectors['output']['linear']\n inputs = system._vectors['input']['linear']\n residuals = system._vectors['residual']['linear']\n\n if self.recording_options['record_outputs']:\n data['o'] = {}\n if 'out' in self._filtered_vars_to_record:\n for out in self._filtered_vars_to_record['out']:\n if out in outputs._names:\n data['o'][out] = outputs._views[out]\n else:\n data['o'] = outputs\n else:\n data['o'] = None\n\n if self.recording_options['record_inputs']:\n data['i'] = {}\n if 'in' in self._filtered_vars_to_record:\n for inp in self._filtered_vars_to_record['in']:\n if inp in inputs._names:\n data['i'][inp] = inputs._views[inp]\n else:\n data['i'] = inputs\n else:\n data['i'] = None\n\n if self.recording_options['record_solver_residuals']:\n data['r'] = {}\n if 'res' in self._filtered_vars_to_record:\n for res in self._filtered_vars_to_record['res']:\n if res in residuals._names:\n data['r'][res] = residuals._views[res]\n else:\n data['r'] = residuals\n else:\n data['r'] = None\n\n self._rec_mgr.record_iteration(self, data, metadata)\n\n def cleanup(self):\n \"\"\"\n Clean up resources prior to exit.\n \"\"\"\n # shut down all recorders\n self._rec_mgr.shutdown()\n\n\nclass NonlinearSolver(Solver):\n \"\"\"\n Base class for nonlinear solvers.\n\n Attributes\n ----------\n _err_cache : dict\n Dictionary holding input and output vectors at start of iteration, if requested.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize all attributes.\n\n Parameters\n ----------\n **kwargs : dict\n options dictionary.\n \"\"\"\n super(NonlinearSolver, self).__init__(**kwargs)\n self._err_cache = OrderedDict()\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n \"\"\"\n self.options.declare('debug_print', types=bool, default=False,\n desc='If true, the values of input and output variables at '\n 'the start of iteration are printed and written to a file '\n 'after a failure to converge.')\n\n def solve(self):\n \"\"\"\n Run the solver.\n \"\"\"\n try:\n self._solve()\n except Exception:\n exc = sys.exc_info()\n if self.options['debug_print']:\n self._print_exc_debug_info()\n reraise(*exc)\n\n def _iter_initialize(self):\n \"\"\"\n Perform any necessary pre-processing operations.\n\n Returns\n -------\n float\n initial error.\n float\n error at the first iteration.\n \"\"\"\n if self.options['debug_print']:\n self._err_cache['inputs'] = self._system._inputs._copy_views()\n self._err_cache['outputs'] = self._system._outputs._copy_views()\n\n if self.options['maxiter'] > 0:\n self._run_apply()\n norm = self._iter_get_norm()\n else:\n norm = 1.0\n norm0 = norm if norm != 0.0 else 1.0\n return norm0, norm\n\n def _run_apply(self):\n \"\"\"\n Run the apply_nonlinear method on the system.\n \"\"\"\n self._recording_iter.stack.append(('_run_apply', 0))\n try:\n self._system._apply_nonlinear()\n finally:\n self._recording_iter.stack.pop()\n\n def _iter_get_norm(self):\n \"\"\"\n Return the norm of the residual.\n\n Returns\n -------\n float\n norm.\n \"\"\"\n return self._system._residuals.get_norm()\n\n def _disallow_discrete_outputs(self):\n \"\"\"\n Raise an exception if any discrete outputs exist in our System.\n \"\"\"\n if self._system._var_allprocs_discrete['output']:\n raise RuntimeError(\"System '%s' has a %s solver and contains discrete outputs %s.\" %\n (self._system.pathname, type(self).__name__,\n sorted(self._system._var_allprocs_discrete['output'])))\n\n def _print_exc_debug_info(self):\n coord = self._recording_iter.get_formatted_iteration_coordinate()\n\n out_str = \"\\n# Inputs and outputs at start of iteration '%s':\\n\" % coord\n for vec_type, views in iteritems(self._err_cache):\n out_str += '\\n'\n out_str += '# nonlinear %s\\n' % vec_type\n out_str += pprint.pformat(views)\n out_str += '\\n'\n\n print(out_str)\n\n filename = coord.replace('._solve_nonlinear', '')\n filename = re.sub('[^0-9a-zA-Z]', '_', filename) + '.dat'\n with open(filename, 'w') as f:\n f.write(out_str)\n print(\"Inputs and outputs at start of iteration have been \"\n \"saved to '%s'.\" % filename)\n sys.stdout.flush()\n\n def _gs_iter(self):\n \"\"\"\n Perform a Gauss-Seidel iteration over this Solver's subsystems.\n \"\"\"\n system = self._system\n if system._subsystems_allprocs:\n loc = system._loc_subsys_map\n\n for isub, subsys in enumerate(system._subsystems_allprocs):\n system._transfer('nonlinear', 'fwd', isub)\n\n if subsys.name in loc:\n subsys._solve_nonlinear()\n\n system._check_reconf_update(subsys)\n\n\nclass LinearSolver(Solver):\n \"\"\"\n Base class for linear solvers.\n\n Attributes\n ----------\n _rel_systems : set of str\n Names of systems relevant to the current solve.\n _assembled_jac : AssembledJacobian or None\n If not None, the AssembledJacobian instance used by this solver.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize all attributes.\n\n Parameters\n ----------\n **kwargs : dict\n options dictionary.\n \"\"\"\n self._rel_systems = None\n self._assembled_jac = None\n super(LinearSolver, self).__init__(**kwargs)\n\n def _assembled_jac_solver_iter(self):\n \"\"\"\n Return a generator of linear solvers using assembled jacs.\n \"\"\"\n if self.options['assemble_jac']:\n yield self\n\n def add_recorder(self, recorder):\n \"\"\"\n Add a recorder to the solver's RecordingManager.\n\n Parameters\n ----------\n recorder : <CaseRecorder>\n A recorder instance to be added to RecManager.\n \"\"\"\n raise RuntimeError('Recording is not supported on Linear Solvers.')\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n \"\"\"\n self.options.declare('assemble_jac', default=False, types=bool,\n desc='Activates use of assembled jacobian by this solver.')\n\n self.supports.declare('assembled_jac', types=bool, default=True)\n\n def _setup_solvers(self, system, depth):\n \"\"\"\n Assign system instance, set depth, and optionally perform setup.\n\n Parameters\n ----------\n system : <System>\n pointer to the owning system.\n depth : int\n depth of the current system (already incremented).\n \"\"\"\n super(LinearSolver, self)._setup_solvers(system, depth)\n if self.options['assemble_jac'] and not self.supports['assembled_jac']:\n raise RuntimeError(\"Linear solver '%s' in system '%s' doesn't support assembled \"\n \"jacobians.\" % (self.SOLVER, system.pathname))\n\n def solve(self, vec_names, mode, rel_systems=None):\n \"\"\"\n Run the solver.\n\n Parameters\n ----------\n vec_names : [str, ...]\n list of names of the right-hand-side vectors.\n mode : str\n 'fwd' or 'rev'.\n rel_systems : set of str\n Set of names of relevant systems based on the current linear solve.\n \"\"\"\n raise NotImplementedError(\"class %s does not implement solve().\" % (type(self).__name__))\n\n def _run_apply(self):\n \"\"\"\n Run the apply_linear method on the system.\n \"\"\"\n self._recording_iter.stack.append(('_run_apply', 0))\n\n system = self._system\n scope_out, scope_in = system._get_scope()\n\n try:\n system._apply_linear(self._assembled_jac, self._vec_names, self._rel_systems,\n self._mode, scope_out, scope_in)\n finally:\n self._recording_iter.stack.pop()\n\n def _set_complex_step_mode(self, active):\n \"\"\"\n Turn on or off complex stepping mode.\n\n Recurses to turn on or off complex stepping mode in all subsystems and their vectors.\n\n Parameters\n ----------\n active : bool\n Complex mode flag; set to True prior to commencing complex step.\n \"\"\"\n pass\n\n\nclass BlockLinearSolver(LinearSolver):\n \"\"\"\n A base class for LinearBlockGS and LinearBlockJac.\n \"\"\"\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n \"\"\"\n super(BlockLinearSolver, self)._declare_options()\n self.supports['assembled_jac'] = False\n\n def _setup_solvers(self, system, depth):\n \"\"\"\n Assign system instance, set depth, and optionally perform setup.\n\n Parameters\n ----------\n system : <System>\n pointer to the owning system.\n depth : int\n depth of the current system (already incremented).\n \"\"\"\n super(BlockLinearSolver, self)._setup_solvers(system, depth)\n if system._use_derivatives:\n self._create_rhs_vecs()\n\n def _create_rhs_vecs(self):\n self._rhs_vecs = rhs = {}\n for vec_name in self._system._lin_rel_vec_name_list:\n if self._mode == 'fwd':\n rhs[vec_name] = self._system._vectors['residual'][vec_name]._data.copy()\n else:\n rhs[vec_name] = self._system._vectors['output'][vec_name]._data.copy()\n\n def _update_rhs_vecs(self):\n for vec_name in self._system._lin_rel_vec_name_list:\n if self._mode == 'fwd':\n self._rhs_vecs[vec_name][:] = self._system._vectors['residual'][vec_name]._data\n else:\n self._rhs_vecs[vec_name][:] = self._system._vectors['output'][vec_name]._data\n\n def _set_complex_step_mode(self, active):\n \"\"\"\n Turn on or off complex stepping mode.\n\n Recurses to turn on or off complex stepping mode in all subsystems and their vectors.\n\n Parameters\n ----------\n active : bool\n Complex mode flag; set to True prior to commencing complex step.\n \"\"\"\n for vec_name in self._system._lin_rel_vec_name_list:\n if active:\n self._rhs_vecs[vec_name] = self._rhs_vecs[vec_name].astype(np.complex)\n else:\n self._rhs_vecs[vec_name] = self._rhs_vecs[vec_name].real\n\n def _iter_initialize(self):\n \"\"\"\n Perform any necessary pre-processing operations.\n\n Returns\n -------\n float\n initial error.\n float\n error at the first iteration.\n \"\"\"\n self._update_rhs_vecs()\n if self.options['maxiter'] > 1:\n self._run_apply()\n norm = self._iter_get_norm()\n else:\n norm = 1.0\n norm0 = norm if norm != 0.0 else 1.0\n return norm0, norm\n\n def _iter_get_norm(self):\n \"\"\"\n Return the norm of the residual.\n\n Note: This has the side effect of modifying the residual vector in fwd mode\n and the output vector in rev mode.\n\n Returns\n -------\n float\n norm.\n \"\"\"\n system = self._system\n\n if self._mode == 'fwd':\n b_vecs = system._vectors['residual']\n else: # rev\n b_vecs = system._vectors['output']\n\n norm = 0\n for vec_name in system._lin_rel_vec_name_list:\n b_vecs[vec_name]._data -= self._rhs_vecs[vec_name]\n norm += b_vecs[vec_name].get_norm()**2\n\n return norm ** 0.5\n\n def solve(self, vec_names, mode, rel_systems=None):\n \"\"\"\n Run the solver.\n\n Parameters\n ----------\n vec_names : [str, ...]\n list of names of the right-hand-side vectors.\n mode : str\n 'fwd' or 'rev'.\n rel_systems : set of str\n Set of names of relevant systems based on the current linear solve.\n \"\"\"\n self._vec_names = vec_names\n self._rel_systems = rel_systems\n self._mode = mode\n self._solve()\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.isinf", "numpy.ones" ], [ "numpy.array" ], [ "numpy.sqrt" ], [ "numpy.arange", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.isnan", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lilyminium/openff-toolkit
[ "a5ed656b84e1999560f4898a87f238a6ddf0aac6" ]
[ "openff/toolkit/utils/toolkits.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nWrapper classes for providing a minimal consistent interface to cheminformatics toolkits\n\nCurrently supported toolkits:\n\n* The `OpenEye Toolkit <https://docs.eyesopen.com/toolkits/python/quickstart-python/index.html>`_\n* The `RDKit <http://www.rdkit.org/>`_\n* `AmberTools <http://ambermd.org/AmberTools.php>`_\n\n.. todo::\n\n * Add checks at the beginning of each toolkit method call to make sure toolkit is licened\n * Switch toolkit methods to object methods instead of static methods\n * Should this be under ``openff.toolkit.utils.toolkits`` or ``openff.toolkit.toolkits``?\n * Add singleton global toolkit registry that registers all available toolkits by default when this file is imported\n * Add description fields for each toolkit wrapper\n * Eliminate global variables in favor of a singleton pattern\n * Change global variables from _INSTALLED to _AVAILABLE\n\n\"\"\"\n\n__all__ = [\n \"DEFAULT_AROMATICITY_MODEL\",\n \"ALLOWED_AROMATICITY_MODELS\",\n \"DEFAULT_FRACTIONAL_BOND_ORDER_MODEL\",\n \"ALLOWED_FRACTIONAL_BOND_ORDER_MODELS\",\n \"DEFAULT_CHARGE_MODEL\",\n \"ALLOWED_CHARGE_MODELS\",\n \"LicenseError\",\n \"MissingPackageError\",\n \"ToolkitUnavailableException\",\n \"InvalidToolkitError\",\n \"InvalidToolkitRegistryError\",\n \"UndefinedStereochemistryError\",\n \"GAFFAtomTypeWarning\",\n \"ToolkitWrapper\",\n \"BuiltInToolkitWrapper\",\n \"OpenEyeToolkitWrapper\",\n \"RDKitToolkitWrapper\",\n \"AmberToolsToolkitWrapper\",\n \"BuiltInToolkitWrapper\",\n \"ToolkitRegistry\",\n \"GLOBAL_TOOLKIT_REGISTRY\",\n \"OPENEYE_AVAILABLE\",\n \"RDKIT_AVAILABLE\",\n \"AMBERTOOLS_AVAILABLE\",\n \"BASIC_CHEMINFORMATICS_TOOLKITS\",\n]\n\n\n# =============================================================================================\n# GLOBAL IMPORTS\n# =============================================================================================\n\nimport copy\nimport importlib\nimport inspect\nimport itertools\nimport logging\nimport re\nimport subprocess\nimport tempfile\nfrom collections import defaultdict\nfrom distutils.spawn import find_executable\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, List, Optional, Tuple\n\nimport numpy as np\nfrom simtk import unit\n\nfrom openff.toolkit.utils.utils import (\n MessageException,\n all_subclasses,\n inherit_docstrings,\n temporary_cd,\n)\n\nif TYPE_CHECKING:\n from openforcefield.topology.molecule import Molecule\n\n# =============================================================================================\n# CONFIGURE LOGGER\n# =============================================================================================\n\nlogger = logging.getLogger(__name__)\n\n# =============================================================================================\n# SUPPORTED MODELS\n#\n# TODO: We may no longer need these since we now require SMIRNOFF to specify these models explicitly.\n# =============================================================================================\n\nDEFAULT_AROMATICITY_MODEL = \"OEAroModel_MDL\" # TODO: Is there a more specific name and reference for the aromaticity model?\nALLOWED_AROMATICITY_MODELS = [\"OEAroModel_MDL\"]\n\nDEFAULT_FRACTIONAL_BOND_ORDER_MODEL = \"Wiberg\" # TODO: Is there a more specific name and reference for the fractional bond order models?\nALLOWED_FRACTIONAL_BOND_ORDER_MODELS = [\"Wiberg\"]\n\nDEFAULT_CHARGE_MODEL = \"AM1-BCC\" # TODO: Should this be `AM1-BCC`, or should we encode BCCs explicitly via AM1-CM2 preprocessing?\nALLOWED_CHARGE_MODELS = [\"AM1-BCC\"] # TODO: Which models do we want to support?\n\n\n# =============================================================================================\n# Exceptions\n# =============================================================================================\n\n\nclass MissingPackageError(MessageException):\n \"\"\"This function requires a package that is not installed.\"\"\"\n\n\nclass ToolkitUnavailableException(MessageException):\n \"\"\"The requested toolkit is unavailable.\"\"\"\n\n # TODO: Allow toolkit to be specified and used in formatting/printing exception.\n\n\nclass LicenseError(ToolkitUnavailableException):\n \"\"\"This function requires a license that cannot be found.\"\"\"\n\n\nclass InvalidToolkitError(MessageException):\n \"\"\"A non-toolkit object was received when a toolkit object was expected\"\"\"\n\n\nclass InvalidToolkitRegistryError(MessageException):\n \"\"\"An object other than a ToolkitRegistry or toolkit wrapper was received\"\"\"\n\n\nclass UndefinedStereochemistryError(MessageException):\n \"\"\"A molecule was attempted to be loaded with undefined stereochemistry\"\"\"\n\n\nclass GAFFAtomTypeWarning(RuntimeWarning):\n \"\"\"A warning raised if a loaded mol2 file possibly uses GAFF atom types.\"\"\"\n\n\nclass ChargeMethodUnavailableError(MessageException):\n \"\"\"A toolkit does not support the requested partial_charge_method combination\"\"\"\n\n\nclass IncorrectNumConformersError(MessageException):\n \"\"\"The requested partial_charge_method expects a different number of conformers than was provided\"\"\"\n\n\nclass IncorrectNumConformersWarning(Warning):\n \"\"\"The requested partial_charge_method expects a different number of conformers than was provided\"\"\"\n\n\nclass ChargeCalculationError(MessageException):\n \"\"\"An unhandled error occured in an external toolkit during charge calculation\"\"\"\n\n\nclass InvalidIUPACNameError(MessageException):\n \"\"\"Failed to parse IUPAC name\"\"\"\n\n\nclass AntechamberNotFoundError(MessageException):\n \"\"\"The antechamber executable was not found\"\"\"\n\n\n# =============================================================================================\n# TOOLKIT UTILITY DECORATORS\n# =============================================================================================\n\n# =============================================================================================\n# UTILITY FUNCTIONS\n# =============================================================================================\n\n# =============================================================================================\n# CHEMINFORMATICS TOOLKIT WRAPPERS\n# =============================================================================================\n\n\nclass ToolkitWrapper:\n \"\"\"\n Toolkit wrapper base class.\n\n .. warning :: This API is experimental and subject to change.\n \"\"\"\n\n _is_available = None # True if toolkit is available\n _toolkit_version = None\n _toolkit_name = None # Name of the toolkit\n _toolkit_installation_instructions = (\n None # Installation instructions for the toolkit\n )\n\n # @staticmethod\n # TODO: Right now, to access the class definition, I have to make this a classmethod\n # and thereby call it with () on the outermost decorator. Is this wasting time? Are we caching\n # the is_available results?\n @classmethod\n def requires_toolkit(cls): # remember cls is a ToolkitWrapper subclass here\n def decorator(func):\n @wraps(func)\n def wrapped_function(*args, **kwargs):\n if not cls.is_available():\n msg = \"This function requires the {} toolkit\".format(\n cls._toolkit_name\n )\n raise ToolkitUnavailableException(msg)\n value = func(*args, **kwargs)\n return value\n\n return wrapped_function\n\n return decorator\n\n @property\n # @classmethod\n def toolkit_name(self):\n \"\"\"\n Return the name of the toolkit wrapped by this class as a str\n\n .. warning :: This API is experimental and subject to change.\n\n Returns\n -------\n toolkit_name : str\n The name of the wrapped toolkit\n\n \"\"\"\n return self.__class__._toolkit_name\n\n @property\n # @classmethod\n def toolkit_installation_instructions(self):\n \"\"\"\n Instructions on how to install the wrapped toolkit.\n \"\"\"\n return self._toolkit_installation_instructions\n\n # @classmethod\n @property\n def toolkit_file_read_formats(self):\n \"\"\"\n List of file formats that this toolkit can read.\n \"\"\"\n return self._toolkit_file_read_formats\n\n # @classmethod\n @property\n def toolkit_file_write_formats(self):\n \"\"\"\n List of file formats that this toolkit can write.\n \"\"\"\n return self._toolkit_file_write_formats\n\n @classmethod\n def is_available(cls):\n \"\"\"\n Check whether the corresponding toolkit can be imported\n\n Returns\n -------\n is_installed : bool\n True if corresponding toolkit is installed, False otherwise.\n\n \"\"\"\n return NotImplementedError\n\n @property\n def toolkit_version(self):\n \"\"\"\n Return the version of the wrapped toolkit as a str\n\n .. warning :: This API is experimental and subject to change.\n\n Returns\n -------\n toolkit_version : str\n The version of the wrapped toolkit\n\n \"\"\"\n return self._toolkit_version\n\n def from_file(self, file_path, file_format, allow_undefined_stereo=False):\n \"\"\"\n Return an openff.toolkit.topology.Molecule from a file using this toolkit.\n\n Parameters\n ----------\n file_path : str\n The file to read the molecule from\n file_format : str\n Format specifier, usually file suffix (eg. 'MOL2', 'SMI')\n Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details.\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if any molecules contain undefined stereochemistry.\n _cls : class\n Molecule constructor\n Returns\n -------\n molecules : Molecule or list of Molecules\n a list of Molecule objects is returned.\n\n \"\"\"\n return NotImplementedError\n\n def from_file_obj(\n self, file_obj, file_format, allow_undefined_stereo=False, _cls=None\n ):\n \"\"\"\n Return an openff.toolkit.topology.Molecule from a file-like object (an object with a \".read()\" method using this\n toolkit.\n\n Parameters\n ----------\n file_obj : file-like object\n The file-like object to read the molecule from\n file_format : str\n Format specifier, usually file suffix (eg. 'MOL2', 'SMI')\n Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details.\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if any molecules contain undefined stereochemistry. If false, the function\n skips loading the molecule.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecules : Molecule or list of Molecules\n a list of Molecule objects is returned.\n \"\"\"\n return NotImplementedError\n\n def _check_n_conformers(\n self,\n molecule,\n partial_charge_method,\n min_confs=None,\n max_confs=None,\n strict_n_conformers=False,\n ):\n \"\"\"\n Private method for validating the number of conformers on a molecule prior to partial\n charge calculation\n\n Parameters\n ----------\n molecule : Molecule\n Molecule for which partial charges are to be computed\n partial_charge_method : str, optional, default=None\n The name of the charge method being used\n min_confs : int, optional, default=None\n The minimum number of conformers required to use this charge method\n max_confs : int, optional, default=None\n The maximum number of conformers required to use this charge method\n strict_n_conformers : bool, default=False\n Whether to raise an exception if an invalid number of conformers is provided.\n If this is False and an invalid number of conformers is found, a warning will be raised.\n\n Raises\n ------\n IncorrectNumConformersError\n If the wrong number of conformers is attached to the input molecule, and strict_n_conformers is True.\n \"\"\"\n import warnings\n\n n_confs = molecule.n_conformers\n wrong_confs_msg = (\n f\"Molecule '{molecule}' has {n_confs} conformers, \"\n f\"but charge method '{partial_charge_method}' expects\"\n )\n exception_suffix = (\n \"You can disable this error by setting `strict_n_conformers=False' \"\n \"when calling 'molecule.assign_partial_charges'.\"\n )\n # If there's no n_confs filter, then this molecule automatically passes\n if min_confs is None and max_confs is None:\n return\n # If there's constraints on both ends, check both limits\n elif min_confs is not None and max_confs is not None:\n if not (min_confs <= n_confs <= max_confs):\n if min_confs == max_confs:\n wrong_confs_msg += f\" exactly {min_confs}.\"\n else:\n wrong_confs_msg += f\" between {min_confs} and {max_confs}.\"\n\n else:\n return\n # If there's only a max constraint, check that\n elif min_confs is not None and max_confs is None:\n if not (min_confs <= n_confs):\n wrong_confs_msg += f\" at least {min_confs}.\"\n else:\n return\n # If there's only a maximum constraint, check that\n elif min_confs is None and max_confs is not None:\n if not (n_confs <= max_confs):\n wrong_confs_msg += f\" at most {max_confs}.\"\n else:\n return\n # If we've made it this far, the molecule has the wrong number of conformers\n if strict_n_conformers:\n wrong_confs_msg += exception_suffix\n raise IncorrectNumConformersError(wrong_confs_msg)\n else:\n warnings.warn(wrong_confs_msg, IncorrectNumConformersWarning)\n\n def __repr__(self):\n return (\n f\"ToolkitWrapper around {self.toolkit_name} version {self.toolkit_version}\"\n )\n\n\n@inherit_docstrings\nclass BuiltInToolkitWrapper(ToolkitWrapper):\n \"\"\"\n Built-in ToolkitWrapper for very basic functionality. This is intended for use in testing and not much more.\n\n .. warning :: This API is experimental and subject to change.\n \"\"\"\n\n _toolkit_name = \"Built-in Toolkit\"\n _toolkit_installation_instructions = (\n \"This toolkit is installed with the Open Force Field Toolkit and does \"\n \"not require additional dependencies.\"\n )\n\n def __init__(self):\n super().__init__()\n\n self._toolkit_file_read_formats = []\n self._toolkit_file_write_formats = []\n\n def assign_partial_charges(\n self,\n molecule,\n partial_charge_method=None,\n use_conformers=None,\n strict_n_conformers=False,\n _cls=None,\n ):\n \"\"\"\n Compute partial charges with the built-in toolkit using simple arithmetic operations, and assign\n the new values to the partial_charges attribute.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.Molecule\n Molecule for which partial charges are to be computed\n partial_charge_method: str, optional, default=None\n The charge model to use. One of ['zeros', 'formal_charge']. If None, 'formal_charge' will be used.\n use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None\n Coordinates to use for partial charge calculation. If None, an appropriate number of conformers\n will be generated.\n strict_n_conformers : bool, default=False\n Whether to raise an exception if an invalid number of conformers is provided for the given charge method.\n If this is False and an invalid number of conformers is found, a warning will be raised\n instead of an Exception.\n _cls : class\n Molecule constructor\n\n Raises\n ------\n ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit\n\n IncorrectNumConformersError if strict_n_conformers is True and use_conformers is provided and specifies an\n invalid number of conformers for the requested method\n\n ChargeCalculationError if the charge calculation is supported by this toolkit, but fails\n \"\"\"\n\n PARTIAL_CHARGE_METHODS = {\n \"zeros\": {\"rec_confs\": 0, \"min_confs\": 0, \"max_confs\": 0},\n \"formal_charge\": {\"rec_confs\": 0, \"min_confs\": 0, \"max_confs\": 0},\n }\n\n if partial_charge_method is None:\n partial_charge_method = \"formal_charge\"\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n # Make a temporary copy of the molecule, since we'll be messing with its conformers\n mol_copy = _cls(molecule)\n\n partial_charge_method = partial_charge_method.lower()\n if partial_charge_method not in PARTIAL_CHARGE_METHODS:\n raise ChargeMethodUnavailableError(\n f'Partial charge method \"{partial_charge_method}\"\" is not supported by '\n f\"the Built-in toolkit. Available charge methods are \"\n f\"{list(PARTIAL_CHARGE_METHODS.keys())}\"\n )\n\n if use_conformers is None:\n # Note that this refers back to the GLOBAL_TOOLKIT_REGISTRY by default, since\n # BuiltInToolkitWrapper can't generate conformers\n mol_copy.generate_conformers(\n n_conformers=PARTIAL_CHARGE_METHODS[partial_charge_method][\"rec_confs\"]\n )\n else:\n mol_copy._conformers = None\n for conformer in use_conformers:\n mol_copy._add_conformer(conformer)\n self._check_n_conformers(\n mol_copy,\n partial_charge_method=partial_charge_method,\n min_confs=0,\n max_confs=0,\n strict_n_conformers=strict_n_conformers,\n )\n\n partial_charges = unit.Quantity(\n np.zeros((molecule.n_particles)), unit.elementary_charge\n )\n if partial_charge_method == \"zeroes\":\n pass\n elif partial_charge_method == \"formal_charge\":\n for part_idx, particle in enumerate(molecule.particles):\n partial_charges[part_idx] = particle.formal_charge\n\n molecule.partial_charges = partial_charges\n\n\n@inherit_docstrings\nclass OpenEyeToolkitWrapper(ToolkitWrapper):\n \"\"\"\n OpenEye toolkit wrapper\n\n .. warning :: This API is experimental and subject to change.\n \"\"\"\n\n _toolkit_name = \"OpenEye Toolkit\"\n _toolkit_installation_instructions = (\n \"The OpenEye toolkit requires a (free for academics) license, and can be \"\n \"found at: \"\n \"https://docs.eyesopen.com/toolkits/python/quickstart-python/install.html\"\n )\n # This could belong to ToolkitWrapper, although it seems strange\n # to carry that data for open-source toolkits\n _is_licensed = None\n # Only for OpenEye is there potentially a difference between\n # being available and installed\n _is_installed = None\n _license_functions = {\n \"oechem\": \"OEChemIsLicensed\",\n \"oequacpac\": \"OEQuacPacIsLicensed\",\n \"oeiupac\": \"OEIUPACIsLicensed\",\n \"oeomega\": \"OEOmegaIsLicensed\",\n }\n\n def __init__(self):\n\n self._toolkit_file_read_formats = [\n \"CAN\",\n \"CDX\",\n \"CSV\",\n \"FASTA\",\n \"INCHI\",\n \"INCHIKEY\",\n \"ISM\",\n \"MDL\",\n \"MF\",\n \"MMOD\",\n \"MOL2\",\n \"MOL2H\",\n \"MOPAC\",\n \"OEB\",\n \"PDB\",\n \"RDF\",\n \"SDF\",\n \"SKC\",\n \"SLN\",\n \"SMI\",\n \"USM\",\n \"XYC\",\n ]\n self._toolkit_file_write_formats = [\n \"CAN\",\n \"CDX\",\n \"CSV\",\n \"FASTA\",\n \"INCHI\",\n \"INCHIKEY\",\n \"ISM\",\n \"MDL\",\n \"MF\",\n \"MMOD\",\n \"MOL2\",\n \"MOL2H\",\n \"MOPAC\",\n \"OEB\",\n \"PDB\",\n \"RDF\",\n \"SDF\",\n \"SKC\",\n \"SLN\",\n \"SMI\",\n \"USM\",\n \"XYC\",\n ]\n\n # check if the toolkit can be loaded\n if not self.is_available():\n msg = (\n f\"The required toolkit {self._toolkit_name} is not \"\n f\"available. {self._toolkit_installation_instructions}\"\n )\n if self._is_installed is False:\n raise ToolkitUnavailableException(msg)\n if self._is_licensed is False:\n raise LicenseError(msg)\n\n from openeye import __version__ as openeye_version\n\n self._toolkit_version = openeye_version\n\n @classmethod\n def _check_licenses(cls):\n \"\"\"Check license of all known OpenEye tools. Returns True if any are found\n to be licensed, False if any are not.\"\"\"\n for (tool, license_func) in cls._license_functions.items():\n try:\n module = importlib.import_module(\"openeye.\" + tool)\n except (ImportError, ModuleNotFoundError):\n continue\n else:\n if getattr(module, license_func)():\n return True\n return False\n\n @classmethod\n def is_available(cls):\n \"\"\"\n Check if the given OpenEye toolkit components are available.\n\n If the OpenEye toolkit is not installed or no license is found\n for at least one the required toolkits , ``False`` is returned.\n\n Returns\n -------\n all_installed : bool\n ``True`` if all required OpenEye tools are installed and licensed,\n ``False`` otherwise\n\n \"\"\"\n if cls._is_available is None:\n if cls._is_licensed is None:\n cls._is_licensed = cls._check_licenses()\n if cls._is_installed is None:\n for tool in cls._license_functions.keys():\n cls._is_installed = True\n try:\n importlib.import_module(\"openeye.\" + tool)\n except (ImportError, ModuleNotFoundError):\n cls._is_installed = False\n cls._is_available = cls._is_installed and cls._is_licensed\n return cls._is_available\n\n def from_object(self, obj, allow_undefined_stereo=False, _cls=None):\n \"\"\"\n If given an OEMol (or OEMol-derived object), this function will load it into an openff.toolkit.topology.molecule\n\n Parameters\n ----------\n obj : A molecule-like object\n An object to by type-checked.\n allow_undefined_stereo : bool, default=False\n Whether to accept molecules with undefined stereocenters. If False,\n an exception will be raised if a molecule with undefined stereochemistry\n is passed into this function.\n _cls : class\n Molecule constructor\n Returns\n -------\n Molecule\n An openff.toolkit.topology.molecule Molecule.\n\n Raises\n ------\n NotImplementedError\n If the object could not be converted into a Molecule.\n \"\"\"\n # TODO: Add tests for the from_object functions\n from openeye import oechem\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n if isinstance(obj, oechem.OEMolBase):\n return self.from_openeye(\n oemol=obj, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls\n )\n raise NotImplementedError(\n \"Cannot create Molecule from {} object\".format(type(obj))\n )\n\n def from_file(\n self, file_path, file_format, allow_undefined_stereo=False, _cls=None\n ):\n \"\"\"\n Return an openff.toolkit.topology.Molecule from a file using this toolkit.\n\n Parameters\n ----------\n file_path : str\n The file to read the molecule from\n file_format : str\n Format specifier, usually file suffix (eg. 'MOL2', 'SMI')\n Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details.\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if oemol contains undefined stereochemistry.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecules : List[Molecule]\n The list of ``Molecule`` objects in the file.\n\n Raises\n ------\n GAFFAtomTypeWarning\n If the loaded mol2 file possibly uses GAFF atom types, which\n are not supported.\n\n Examples\n --------\n\n Load a mol2 file into an OpenFF ``Molecule`` object.\n\n >>> from openff.toolkit.utils import get_data_file_path\n >>> mol2_file_path = get_data_file_path('molecules/cyclohexane.mol2')\n >>> toolkit = OpenEyeToolkitWrapper()\n >>> molecule = toolkit.from_file(mol2_file_path, file_format='mol2')\n\n \"\"\"\n from openeye import oechem\n\n ifs = oechem.oemolistream(file_path)\n return self._read_oemolistream_molecules(\n ifs, allow_undefined_stereo, file_path=file_path, _cls=_cls\n )\n\n def from_file_obj(\n self, file_obj, file_format, allow_undefined_stereo=False, _cls=None\n ):\n \"\"\"\n Return an openff.toolkit.topology.Molecule from a file-like object (an object with a \".read()\" method using\n this toolkit.\n\n Parameters\n ----------\n file_obj : file-like object\n The file-like object to read the molecule from\n file_format : str\n Format specifier, usually file suffix (eg. 'MOL2', 'SMI')\n Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details.\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if oemol contains undefined stereochemistry.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecules : List[Molecule]\n The list of Molecule objects in the file object.\n\n Raises\n ------\n GAFFAtomTypeWarning\n If the loaded mol2 file possibly uses GAFF atom types, which\n are not supported.\n\n \"\"\"\n from openeye import oechem\n\n # Configure input molecule stream.\n ifs = oechem.oemolistream()\n ifs.openstring(file_obj.read())\n oeformat = getattr(oechem, \"OEFormat_\" + file_format)\n ifs.SetFormat(oeformat)\n\n return self._read_oemolistream_molecules(ifs, allow_undefined_stereo, _cls=_cls)\n\n def to_file_obj(self, molecule, file_obj, file_format):\n \"\"\"\n Writes an OpenFF Molecule to a file-like object\n\n Parameters\n ----------\n molecule : an OpenFF Molecule\n The molecule to write\n file_obj\n The file-like object to write to\n file_format\n The format for writing the molecule data\n\n \"\"\"\n with tempfile.TemporaryDirectory() as tmpdir:\n with temporary_cd(tmpdir):\n outfile = \"temp_molecule.\" + file_format\n self.to_file(molecule, outfile, file_format)\n file_data = open(outfile).read()\n file_obj.write(file_data)\n\n def to_file(self, molecule, file_path, file_format):\n \"\"\"\n Writes an OpenFF Molecule to a file-like object\n\n Parameters\n ----------\n molecule : an OpenFF Molecule\n The molecule to write\n file_path\n The file path to write to.\n file_format\n The format for writing the molecule data\n\n \"\"\"\n from openeye import oechem\n\n oemol = self.to_openeye(molecule)\n ofs = oechem.oemolostream(file_path)\n openeye_format = getattr(oechem, \"OEFormat_\" + file_format.upper())\n ofs.SetFormat(openeye_format)\n\n # OFFTK strictly treats SDF as a single-conformer format.\n # We need to override OETK's behavior here if the user is saving a multiconformer molecule.\n\n # Remove all but the first conformer when writing to SDF as we only support single conformer format\n if (file_format.lower() == \"sdf\") and oemol.NumConfs() > 1:\n conf1 = [conf for conf in oemol.GetConfs()][0]\n flat_coords = list()\n for idx, coord in conf1.GetCoords().items():\n flat_coords.extend(coord)\n oemol.DeleteConfs()\n oecoords = oechem.OEFloatArray(flat_coords)\n oemol.NewConf(oecoords)\n # We're standardizing on putting partial charges into SDFs under the `atom.dprop.PartialCharge` property\n if (file_format.lower() == \"sdf\") and (molecule.partial_charges is not None):\n partial_charges_list = [\n oeatom.GetPartialCharge() for oeatom in oemol.GetAtoms()\n ]\n partial_charges_str = \" \".join([f\"{val:f}\" for val in partial_charges_list])\n # TODO: \"dprop\" means \"double precision\" -- Is there any way to make Python more accurately\n # describe/infer the proper data type?\n oechem.OESetSDData(oemol, \"atom.dprop.PartialCharge\", partial_charges_str)\n\n # If the file format is \"pdb\" using OEWriteMolecule() rearranges the atoms (hydrogens are pushed to the bottom)\n # Issue #475 (https://github.com/openforcefield/openff-toolkit/issues/475)\n # dfhahn's workaround: Using OEWritePDBFile does not alter the atom arrangement\n if file_format.lower() == \"pdb\":\n if oemol.NumConfs() > 1:\n for conf in oemol.GetConfs():\n oechem.OEWritePDBFile(ofs, conf, oechem.OEOFlavor_PDB_BONDS)\n else:\n oechem.OEWritePDBFile(ofs, oemol, oechem.OEOFlavor_PDB_BONDS)\n else:\n oechem.OEWriteMolecule(ofs, oemol)\n ofs.close()\n\n @staticmethod\n def _turn_oemolbase_sd_charges_into_partial_charges(oemol):\n \"\"\"\n Process an OEMolBase object and check to see whether it has an SD data pair\n where the tag is \"atom.dprop.PartialCharge\", indicating that it has a list of\n atomic partial charges. If so, apply those charges to the OEAtoms in the OEMolBase,\n and delete the SD data pair.\n\n Parameters\n ----------\n oemol : openeye.oechem.OEMolBase\n The molecule to process\n\n Returns\n -------\n charges_are_present : bool\n Whether charges are present in the SD file. This is necessary because OEAtoms\n have a default partial charge of 0.0, which makes truly zero-charge molecules\n (eg \"N2\", \"Ar\"...) indistinguishable from molecules for which partial charges\n have not been assigned. The OFF Toolkit allows this distinction with\n mol.partial_charges=None. In order to complete roundtrips within the OFFMol\n spec, we must interpret the presence or absence of this tag as a proxy for\n mol.partial_charges=None.\n \"\"\"\n from openeye import oechem\n\n for dp in oechem.OEGetSDDataPairs(oemol):\n if dp.GetTag() == \"atom.dprop.PartialCharge\":\n charges_str = oechem.OEGetSDData(oemol, \"atom.dprop.PartialCharge\")\n charges_unitless = [float(i) for i in charges_str.split()]\n assert len(charges_unitless) == oemol.NumAtoms()\n for charge, oeatom in zip(charges_unitless, oemol.GetAtoms()):\n oeatom.SetPartialCharge(charge)\n oechem.OEDeleteSDData(oemol, \"atom.dprop.PartialCharge\")\n return True\n return False\n\n def _read_oemolistream_molecules(\n self, oemolistream, allow_undefined_stereo, file_path=None, _cls=None\n ):\n \"\"\"\n Reads and return the Molecules in a OEMol input stream.\n\n Parameters\n ----------\n oemolistream : oechem.oemolistream\n The OEMol input stream to read from.\n allow_undefined_stereo : bool\n If false, raises an exception if oemol contains undefined stereochemistry.\n file_path : str, optional\n The path to the mol2 file. This is used exclusively to make\n the error message more meaningful when the mol2 files doesn't\n use Tripos atom types.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecules : List[Molecule]\n The list of Molecule objects in the stream.\n\n \"\"\"\n from openeye import oechem\n\n mols = list()\n oemol = oechem.OEMol()\n while oechem.OEReadMolecule(oemolistream, oemol):\n oechem.OEPerceiveChiral(oemol)\n oechem.OEAssignAromaticFlags(oemol, oechem.OEAroModel_MDL)\n oechem.OE3DToInternalStereo(oemol)\n\n # If this is either a multi-conformer or multi-molecule SD file, check to see if there are partial charges\n if (oemolistream.GetFormat() == oechem.OEFormat_SDF) and hasattr(\n oemol, \"GetConfs\"\n ):\n # The openFF toolkit treats each conformer in a \"multiconformer\" SDF as\n # a separate molecule.\n # https://github.com/openforcefield/openff-toolkit/issues/202\n # Note that there is ambiguity about how SD data and \"multiconformer\" SD files should be stored.\n # As a result, we have to do some weird stuff below, as discussed in\n # https://docs.eyesopen.com/toolkits/python/oechemtk/oemol.html#dude-where-s-my-sd-data\n\n # Jeff: I was unable to find a way to distinguish whether a SDF was multiconformer or not.\n # The logic below should handle either single- or multi-conformer SDFs.\n for conf in oemol.GetConfIter():\n # First, we turn \"conf\" into an OEMCMol (OE multiconformer mol), since OTHER file formats\n # really are multiconformer, and we will eventually feed this into the `from_openeye` function,\n # which is made to ingest multiconformer mols.\n this_conf_oemcmol = conf.GetMCMol()\n\n # Then, we take any SD data pairs that were on the oemol, and copy them on to \"this_conf_oemcmol\".\n # These SD pairs will be populated if we're dealing with a single-conformer SDF.\n for dp in oechem.OEGetSDDataPairs(oemol):\n oechem.OESetSDData(\n this_conf_oemcmol, dp.GetTag(), dp.GetValue()\n )\n # On the other hand, these SD pairs will be populated if we're dealing with a MULTI-conformer SDF.\n for dp in oechem.OEGetSDDataPairs(conf):\n oechem.OESetSDData(\n this_conf_oemcmol, dp.GetTag(), dp.GetValue()\n )\n # This function fishes out the special SD data tag we use for partial charge\n # (\"atom.dprop.PartialCharge\"), and applies those as OETK-supported partial charges on the OEAtoms\n has_charges = self._turn_oemolbase_sd_charges_into_partial_charges(\n this_conf_oemcmol\n )\n\n # Finally, we feed the molecule into `from_openeye`, where it converted into an OFFMol\n mol = self.from_openeye(\n this_conf_oemcmol,\n allow_undefined_stereo=allow_undefined_stereo,\n _cls=_cls,\n )\n\n # If the molecule didn't even have the `PartialCharges` tag, we set it from zeroes to None here.\n if not (has_charges):\n mol.partial_charges = None\n mols.append(mol)\n\n else:\n # In case this is being read from a SINGLE-molecule SD file, convert the SD field where we\n # stash partial charges into actual per-atom partial charges\n self._turn_oemolbase_sd_charges_into_partial_charges(oemol)\n mol = self.from_openeye(\n oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls\n )\n mols.append(mol)\n\n # Check if this is an AMBER-produced mol2 file, which we can not load because they use GAFF atom types.\n if oemolistream.GetFormat() == oechem.OEFormat_MOL2:\n self._check_mol2_gaff_atom_type(mol, file_path)\n\n return mols\n\n def enumerate_protomers(self, molecule, max_states=10):\n \"\"\"\n Enumerate the formal charges of a molecule to generate different protomoers.\n\n Parameters\n ----------\n molecule: openff.toolkit.topology.Molecule\n The molecule whose state we should enumerate\n\n max_states: int optional, default=10,\n The maximum number of protomer states to be returned.\n\n Returns\n -------\n molecules: List[openff.toolkit.topology.Molecule],\n A list of the protomers of the input molecules not including the input.\n \"\"\"\n\n from openeye import oequacpac\n\n options = oequacpac.OEFormalChargeOptions()\n # add one as the input is included\n options.SetMaxCount(max_states + 1)\n\n molecules = []\n\n oemol = self.to_openeye(molecule=molecule)\n for protomer in oequacpac.OEEnumerateFormalCharges(oemol, options):\n\n mol = self.from_openeye(\n protomer, allow_undefined_stereo=True, _cls=molecule.__class__\n )\n\n if mol != molecule:\n molecules.append(mol)\n\n return molecules\n\n def enumerate_stereoisomers(\n self, molecule, undefined_only=False, max_isomers=20, rationalise=True\n ):\n \"\"\"\n Enumerate the stereocenters and bonds of the current molecule.\n\n Parameters\n ----------\n molecule: openff.toolkit.topology.Molecule\n The molecule whose state we should enumerate\n\n undefined_only: bool optional, default=False\n If we should enumerate all stereocenters and bonds or only those with undefined stereochemistry\n\n max_isomers: int optional, default=20\n The maximum amount of molecules that should be returned\n\n rationalise: bool optional, default=True\n If we should try to build and rationalise the molecule to ensure it can exist\n\n\n Returns\n --------\n molecules: List[openff.toolkit.topology.Molecule]\n A list of openff.toolkit.topology.Molecule instances\n\n \"\"\"\n from openeye import oechem, oeomega\n\n oemol = self.to_openeye(molecule=molecule)\n\n # arguments for this function can be found here\n # <https://docs.eyesopen.com/toolkits/python/omegatk/OEConfGenFunctions/OEFlipper.html?highlight=stereoisomers>\n\n molecules = []\n for isomer in oeomega.OEFlipper(oemol, 200, not undefined_only, True, False):\n\n if rationalise:\n # try and determine if the molecule is reasonable by generating a conformer with\n # strict stereo, like embedding in rdkit\n omega = oeomega.OEOmega()\n omega.SetMaxConfs(1)\n omega.SetCanonOrder(False)\n # Don't generate random stereoisomer if not specified\n omega.SetStrictStereo(True)\n mol = oechem.OEMol(isomer)\n status = omega(mol)\n if status:\n isomol = self.from_openeye(mol, _cls=molecule.__class__)\n if isomol != molecule:\n molecules.append(isomol)\n\n else:\n isomol = self.from_openeye(isomer, _cls=molecule.__class__)\n if isomol != molecule:\n molecules.append(isomol)\n\n return molecules[:max_isomers]\n\n def enumerate_tautomers(self, molecule, max_states=20):\n \"\"\"\n Enumerate the possible tautomers of the current molecule\n\n Parameters\n ----------\n molecule: openff.toolkit.topology.Molecule\n The molecule whose state we should enumerate\n\n max_states: int optional, default=20\n The maximum amount of molecules that should be returned\n\n Returns\n -------\n molecules: List[openff.toolkit.topology.Molecule]\n A list of openff.toolkit.topology.Molecule instances excluding the input molecule.\n \"\"\"\n from openeye import oequacpac\n\n oemol = self.to_openeye(molecule=molecule)\n\n tautomers = []\n\n # set the options\n tautomer_options = oequacpac.OETautomerOptions()\n tautomer_options.SetApplyWarts(False)\n tautomer_options.SetMaxTautomersGenerated(max_states + 1)\n tautomer_options.SetSaveStereo(True)\n # this aligns the outputs of rdkit and openeye for the example cases\n tautomer_options.SetCarbonHybridization(False)\n\n for tautomer in oequacpac.OEEnumerateTautomers(oemol, tautomer_options):\n # remove the input tautomer from the output\n taut = self.from_openeye(\n tautomer, allow_undefined_stereo=True, _cls=molecule.__class__\n )\n if taut != molecule:\n tautomers.append(\n self.from_openeye(\n tautomer, allow_undefined_stereo=True, _cls=molecule.__class__\n )\n )\n\n return tautomers\n\n @staticmethod\n def _check_mol2_gaff_atom_type(molecule, file_path=None):\n \"\"\"Attempts to detect the presence of GAFF atom types in a molecule loaded from a mol2 file.\n\n For now, this raises a ``GAFFAtomTypeWarning`` if the molecule\n include Osmium and Holmium atoms, which have GAFF types OS and\n HO respectively.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.molecule.Molecule\n The loaded molecule.\n file_path : str, optional\n The path to the mol2 file. This is used exclusively to make\n the error message more meaningful.\n\n \"\"\"\n # Handle default.\n if file_path is None:\n file_path = \"\"\n else:\n # Append a ':' character that will separate the file\n # path from the molecule string representation.\n file_path = file_path + \":\"\n # atomic_number: (GAFF_type, element_name)\n warning_atomic_numbers = {76: (\"OS\", \"Osmium\"), 67: (\"HO\", \"Holmium\")}\n\n for atom in molecule.atoms:\n try:\n atom_type, element_name = warning_atomic_numbers[atom.atomic_number]\n except KeyError:\n pass\n else:\n import warnings\n\n warn_msg = (\n f'OpenEye interpreted the type \"{atom_type}\" in {file_path}{molecule.name}'\n f\" as {element_name}. Does your mol2 file uses Tripos SYBYL atom types?\"\n \" Other atom types such as GAFF are not supported.\"\n )\n warnings.warn(warn_msg, GAFFAtomTypeWarning)\n\n @staticmethod\n def _openeye_cip_atom_stereochemistry(oemol, oeatom):\n \"\"\"\n Determine CIP stereochemistry (R/S) for the specified atom\n\n Parameters\n ----------\n oemol : openeye.oechem.OEMolBase\n The molecule of interest\n oeatom : openeye.oechem.OEAtomBase\n The atom whose stereochemistry is to be computed\n\n Returns\n -------\n stereochemistry : str\n 'R', 'S', or None if no stereochemistry is specified or the atom is not a stereocenter\n \"\"\"\n from openeye import oechem\n\n if not oeatom.HasStereoSpecified():\n # No stereochemical information has been stored, so this could be unknown stereochemistry\n # TODO: Should we raise an exception?\n return None\n\n cip = oechem.OEPerceiveCIPStereo(oemol, oeatom)\n\n if cip == oechem.OECIPAtomStereo_S:\n return \"S\"\n elif cip == oechem.OECIPAtomStereo_R:\n return \"R\"\n elif cip == oechem.OECIPAtomStereo_NotStereo:\n # Not a stereocenter\n # TODO: Should this be a different case from ``None``?\n return None\n\n @staticmethod\n def _openeye_cip_bond_stereochemistry(oemol, oebond):\n \"\"\"\n Determine CIP stereochemistry (E/Z) for the specified bond\n\n Parameters\n ----------\n oemol : openeye.oechem.OEMolBase\n The molecule of interest\n oebond : openeye.oechem.OEBondBase\n The bond whose stereochemistry is to be computed\n\n Returns\n -------\n stereochemistry : str\n 'E', 'Z', or None if stereochemistry is unspecified or the bond is not a stereo bond\n\n \"\"\"\n from openeye import oechem\n\n if not oebond.HasStereoSpecified():\n # No stereochemical information has been stored, so this could be unknown stereochemistry\n # TODO: Should we raise an exception?\n return None\n\n cip = oechem.OEPerceiveCIPStereo(oemol, oebond)\n\n if cip == oechem.OECIPBondStereo_E:\n return \"E\"\n elif cip == oechem.OECIPBondStereo_Z:\n return \"Z\"\n elif cip == oechem.OECIPBondStereo_NotStereo:\n return None\n\n @staticmethod\n def from_openeye(oemol, allow_undefined_stereo=False, _cls=None):\n \"\"\"\n Create a Molecule from an OpenEye molecule. If the OpenEye molecule has\n implicit hydrogens, this function will make them explicit.\n\n ``OEAtom`` s have a different set of allowed value for partial charges than\n ``openff.toolkit.topology.Molecule`` s. In the OpenEye toolkits, partial charges\n are stored on individual ``OEAtom`` s, and their values are initialized to ``0.0``.\n In the Open Force Field Toolkit, an ``openff.toolkit.topology.Molecule``'s\n ``partial_charges`` attribute is initialized to ``None`` and can be set to a\n ``simtk.unit.Quantity``-wrapped numpy array with units of\n elementary charge. The Open Force\n Field Toolkit considers an ``OEMol`` where every ``OEAtom`` has a partial\n charge of ``float('nan')`` to be equivalent to an Open Force Field Toolkit `Molecule`'s\n ``partial_charges = None``.\n This assumption is made in both ``to_openeye`` and ``from_openeye``.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n oemol : openeye.oechem.OEMol\n An OpenEye molecule\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if oemol contains undefined stereochemistry.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n An OpenFF molecule\n\n Examples\n --------\n\n Create a Molecule from an OpenEye OEMol\n\n >>> from openeye import oechem\n >>> from openff.toolkit.tests.utils import get_data_file_path\n >>> ifs = oechem.oemolistream(get_data_file_path('systems/monomers/ethanol.mol2'))\n >>> oemols = list(ifs.GetOEGraphMols())\n\n >>> toolkit_wrapper = OpenEyeToolkitWrapper()\n >>> molecule = toolkit_wrapper.from_openeye(oemols[0])\n\n \"\"\"\n import math\n\n from openeye import oechem\n\n oemol = oechem.OEMol(oemol)\n\n # Add explicit hydrogens if they're implicit\n if oechem.OEHasImplicitHydrogens(oemol):\n oechem.OEAddExplicitHydrogens(oemol)\n\n # TODO: Is there any risk to perceiving aromaticity here instead of later?\n oechem.OEAssignAromaticFlags(oemol, oechem.OEAroModel_MDL)\n\n oechem.OEPerceiveChiral(oemol)\n\n # Check that all stereo is specified\n # Potentially better OE stereo check: OEFlipper — Toolkits - - Python\n # https: // docs.eyesopen.com / toolkits / python / omegatk / OEConfGenFunctions / OEFlipper.html\n\n unspec_chiral = False\n unspec_db = False\n problematic_atoms = list()\n problematic_bonds = list()\n\n for oeatom in oemol.GetAtoms():\n if oeatom.IsChiral():\n if not (oeatom.HasStereoSpecified()):\n unspec_chiral = True\n problematic_atoms.append(oeatom)\n for oebond in oemol.GetBonds():\n if oebond.IsChiral():\n if not (oebond.HasStereoSpecified()):\n unspec_db = True\n problematic_bonds.append(oebond)\n if unspec_chiral or unspec_db:\n\n def oeatom_to_str(oeatom):\n return \"atomic num: {}, name: {}, idx: {}, aromatic: {}, chiral: {}\".format(\n oeatom.GetAtomicNum(),\n oeatom.GetName(),\n oeatom.GetIdx(),\n oeatom.IsAromatic(),\n oeatom.IsChiral(),\n )\n\n def oebond_to_str(oebond):\n return \"order: {}, chiral: {}\".format(\n oebond.GetOrder(), oebond.IsChiral()\n )\n\n def describe_oeatom(oeatom):\n description = \"Atom {} with bonds:\".format(oeatom_to_str(oeatom))\n for oebond in oeatom.GetBonds():\n description += \"\\nbond {} to atom {}\".format(\n oebond_to_str(oebond), oeatom_to_str(oebond.GetNbr(oeatom))\n )\n return description\n\n msg = (\n \"OEMol has unspecified stereochemistry. \"\n \"oemol.GetTitle(): {}\\n\".format(oemol.GetTitle())\n )\n if len(problematic_atoms) != 0:\n msg += \"Problematic atoms are:\\n\"\n for problematic_atom in problematic_atoms:\n msg += describe_oeatom(problematic_atom) + \"\\n\"\n if len(problematic_bonds) != 0:\n msg += \"Problematic bonds are: {}\\n\".format(problematic_bonds)\n if allow_undefined_stereo:\n msg = \"Warning (not error because allow_undefined_stereo=True): \" + msg\n logger.warning(msg)\n else:\n msg = \"Unable to make OFFMol from OEMol: \" + msg\n raise UndefinedStereochemistryError(msg)\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n molecule = _cls()\n molecule.name = oemol.GetTitle()\n\n # Copy any attached SD tag information\n for dp in oechem.OEGetSDDataPairs(oemol):\n molecule._properties[dp.GetTag()] = dp.GetValue()\n\n map_atoms = dict() # {oemol_idx: molecule_idx}\n atom_mapping = {}\n for oeatom in oemol.GetAtoms():\n oe_idx = oeatom.GetIdx()\n map_id = oeatom.GetMapIdx()\n atomic_number = oeatom.GetAtomicNum()\n formal_charge = oeatom.GetFormalCharge() * unit.elementary_charge\n is_aromatic = oeatom.IsAromatic()\n stereochemistry = OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry(\n oemol, oeatom\n )\n # stereochemistry = self._openeye_cip_atom_stereochemistry(oemol, oeatom)\n name = \"\"\n if oeatom.HasData(\"name\"):\n name = oeatom.GetData(\"name\")\n atom_index = molecule._add_atom(\n atomic_number,\n formal_charge,\n is_aromatic,\n stereochemistry=stereochemistry,\n name=name,\n )\n map_atoms[\n oe_idx\n ] = atom_index # store for mapping oeatom to molecule atom indices below\n atom_mapping[atom_index] = map_id\n\n # If we have a full / partial atom map add it to the molecule. Zeroes 0\n # indicates no mapping\n if {*atom_mapping.values()} != {0}:\n\n molecule._properties[\"atom_map\"] = {\n idx: map_idx for idx, map_idx in atom_mapping.items() if map_idx != 0\n }\n\n for oebond in oemol.GetBonds():\n atom1_index = map_atoms[oebond.GetBgnIdx()]\n atom2_index = map_atoms[oebond.GetEndIdx()]\n bond_order = oebond.GetOrder()\n is_aromatic = oebond.IsAromatic()\n stereochemistry = OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry(\n oemol, oebond\n )\n if oebond.HasData(\"fractional_bond_order\"):\n fractional_bond_order = oebond.GetData(\"fractional_bond_order\")\n else:\n fractional_bond_order = None\n\n molecule._add_bond(\n atom1_index,\n atom2_index,\n bond_order,\n is_aromatic=is_aromatic,\n stereochemistry=stereochemistry,\n fractional_bond_order=fractional_bond_order,\n )\n\n # TODO: Copy conformations, if present\n # TODO: Come up with some scheme to know when to import coordinates\n # From SMILES: no\n # From MOL2: maybe\n # From other: maybe\n if hasattr(oemol, \"GetConfs\"):\n for conf in oemol.GetConfs():\n n_atoms = molecule.n_atoms\n positions = unit.Quantity(\n np.zeros(shape=[n_atoms, 3], dtype=np.float64), unit.angstrom\n )\n for oe_id in conf.GetCoords().keys():\n off_atom_coords = unit.Quantity(\n conf.GetCoords()[oe_id], unit.angstrom\n )\n off_atom_index = map_atoms[oe_id]\n positions[off_atom_index, :] = off_atom_coords\n if (positions == 0 * unit.angstrom).all() and n_atoms > 1:\n continue\n molecule._add_conformer(positions)\n\n # Copy partial charges, if present\n partial_charges = unit.Quantity(\n np.zeros(shape=molecule.n_atoms, dtype=np.float64),\n unit=unit.elementary_charge,\n )\n\n # If all OEAtoms have a partial charge of NaN, then the OFFMol should\n # have its partial_charges attribute set to None\n any_partial_charge_is_not_nan = False\n for oe_atom in oemol.GetAtoms():\n oe_idx = oe_atom.GetIdx()\n off_idx = map_atoms[oe_idx]\n unitless_charge = oe_atom.GetPartialCharge()\n if not math.isnan(unitless_charge):\n any_partial_charge_is_not_nan = True\n # break\n charge = unitless_charge * unit.elementary_charge\n partial_charges[off_idx] = charge\n\n if any_partial_charge_is_not_nan:\n molecule.partial_charges = partial_charges\n else:\n molecule.partial_charges = None\n\n return molecule\n\n @staticmethod\n def to_openeye(molecule, aromaticity_model=DEFAULT_AROMATICITY_MODEL):\n \"\"\"\n Create an OpenEye molecule using the specified aromaticity model\n\n ``OEAtom`` s have a different set of allowed value for partial\n charges than ``openff.toolkit.topology.Molecule``\\ s. In the\n OpenEye toolkits, partial charges are stored on individual\n ``OEAtom``\\ s, and their values are initialized to ``0.0``. In\n the Open Force Field Toolkit, an``openff.toolkit.topology.Molecule``'s\n ``partial_charges`` attribute is initialized to ``None`` and can\n be set to a ``simtk.unit.Quantity``-wrapped numpy array with\n units of elementary charge. The Open Force Field Toolkit\n considers an ``OEMol`` where every ``OEAtom`` has a partial\n charge of ``float('nan')`` to be equivalent to an Open Force\n Field Toolkit ``Molecule``'s ``partial_charges = None``. This\n assumption is made in both ``to_openeye`` and ``from_openeye``.\n\n .. todo ::\n\n * Should the aromaticity model be specified in some other way?\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.molecule.Molecule object\n The molecule to convert to an OEMol\n aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL\n The aromaticity model to use\n\n Returns\n -------\n oemol : openeye.oechem.OEMol\n An OpenEye molecule\n\n Examples\n --------\n\n Create an OpenEye molecule from a Molecule\n\n >>> from openff.toolkit.topology import Molecule\n >>> toolkit_wrapper = OpenEyeToolkitWrapper()\n >>> molecule = Molecule.from_smiles('CC')\n >>> oemol = toolkit_wrapper.to_openeye(molecule)\n\n \"\"\"\n from openeye import oechem\n\n if hasattr(oechem, aromaticity_model):\n oe_aro_model = getattr(oechem, aromaticity_model)\n else:\n raise ValueError(\n \"Error: provided aromaticity model not recognized by oechem.\"\n )\n\n oemol = oechem.OEMol()\n # if not(molecule.name is None):\n oemol.SetTitle(molecule.name)\n map_atoms = {} # {off_idx : oe_idx}\n # Add atoms\n oemol_atoms = list() # list of corresponding oemol atoms\n for atom in molecule.atoms:\n oeatom = oemol.NewAtom(atom.atomic_number)\n oeatom.SetFormalCharge(\n atom.formal_charge.value_in_unit(unit.elementary_charge)\n ) # simtk.unit.Quantity(1, unit.elementary_charge)\n # TODO: Do we want to provide _any_ pathway for Atom.is_aromatic to influence the OEMol?\n # oeatom.SetAromatic(atom.is_aromatic)\n oeatom.SetData(\"name\", atom.name)\n oeatom.SetPartialCharge(float(\"nan\"))\n oemol_atoms.append(oeatom)\n map_atoms[atom.molecule_atom_index] = oeatom.GetIdx()\n\n # Add bonds\n oemol_bonds = list() # list of corresponding oemol bonds\n for bond in molecule.bonds:\n # atom1_index = molecule.atoms.index(bond.atom1)\n # atom2_index = molecule.atoms.index(bond.atom2)\n atom1_index = bond.atom1_index\n atom2_index = bond.atom2_index\n oebond = oemol.NewBond(oemol_atoms[atom1_index], oemol_atoms[atom2_index])\n oebond.SetOrder(bond.bond_order)\n # TODO: Do we want to provide _any_ pathway for Bond.is_aromatic to influence the OEMol?\n # oebond.SetAromatic(bond.is_aromatic)\n if not (bond.fractional_bond_order is None):\n oebond.SetData(\"fractional_bond_order\", bond.fractional_bond_order)\n oemol_bonds.append(oebond)\n\n oechem.OEAssignAromaticFlags(oemol, oe_aro_model)\n\n # Set atom stereochemistry now that all connectivity is in place\n for atom, oeatom in zip(molecule.atoms, oemol_atoms):\n if not atom.stereochemistry:\n continue\n\n # Set arbitrary initial stereochemistry\n neighs = [n for n in oeatom.GetAtoms()]\n oeatom.SetStereo(\n neighs, oechem.OEAtomStereo_Tetra, oechem.OEAtomStereo_Right\n )\n\n # Flip chirality if stereochemistry isincorrect\n oeatom_stereochemistry = (\n OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry(oemol, oeatom)\n )\n if oeatom_stereochemistry != atom.stereochemistry:\n # Flip the stereochemistry\n oeatom.SetStereo(\n neighs, oechem.OEAtomStereo_Tetra, oechem.OEAtomStereo_Left\n )\n # Verify it matches now as a sanity check\n oeatom_stereochemistry = (\n OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry(\n oemol, oeatom\n )\n )\n if oeatom_stereochemistry != atom.stereochemistry:\n raise Exception(\n \"Programming error: OpenEye atom stereochemistry assumptions failed.\"\n )\n\n # Set bond stereochemistry\n for bond, oebond in zip(molecule.bonds, oemol_bonds):\n if not bond.stereochemistry:\n continue\n\n atom1_index = bond.molecule.atoms.index(bond.atom1)\n atom2_index = bond.molecule.atoms.index(bond.atom2)\n # Set arbitrary initial stereochemistry\n oeatom1, oeatom2 = oemol_atoms[atom1_index], oemol_atoms[atom2_index]\n oeatom1_neighbor = [n for n in oeatom1.GetAtoms() if not n == oeatom2][0]\n oeatom2_neighbor = [n for n in oeatom2.GetAtoms() if not n == oeatom1][0]\n # oebond.SetStereo([oeatom1, oeatom2], oechem.OEBondStereo_CisTrans, oechem.OEBondStereo_Cis)\n oebond.SetStereo(\n [oeatom1_neighbor, oeatom2_neighbor],\n oechem.OEBondStereo_CisTrans,\n oechem.OEBondStereo_Cis,\n )\n\n # Flip stereochemistry if incorrect\n oebond_stereochemistry = (\n OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry(oemol, oebond)\n )\n if oebond_stereochemistry != bond.stereochemistry:\n # Flip the stereochemistry\n oebond.SetStereo(\n [oeatom1_neighbor, oeatom2_neighbor],\n oechem.OEBondStereo_CisTrans,\n oechem.OEBondStereo_Trans,\n )\n # Verify it matches now as a sanity check\n oebond_stereochemistry = (\n OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry(\n oemol, oebond\n )\n )\n if oebond_stereochemistry != bond.stereochemistry:\n raise Exception(\n \"Programming error: OpenEye bond stereochemistry assumptions failed.\"\n )\n\n # Retain conformations, if present\n if molecule.n_conformers != 0:\n oemol.DeleteConfs()\n for conf in molecule._conformers:\n # OE needs a 1 x (3*n_Atoms) double array as input\n flat_coords = np.zeros(shape=oemol.NumAtoms() * 3, dtype=np.float64)\n for index, oe_idx in map_atoms.items():\n (x, y, z) = conf[index, :] / unit.angstrom\n flat_coords[(3 * oe_idx)] = x\n flat_coords[(3 * oe_idx) + 1] = y\n flat_coords[(3 * oe_idx) + 2] = z\n\n oecoords = oechem.OEFloatArray(flat_coords)\n oemol.NewConf(oecoords)\n\n # Retain charges, if present. All atoms are initialized above with a partial charge of NaN.\n if molecule._partial_charges is not None:\n oe_indexed_charges = np.zeros(shape=molecule.n_atoms, dtype=np.float64)\n for off_idx, charge in enumerate(molecule._partial_charges):\n oe_idx = map_atoms[off_idx]\n charge_unitless = charge / unit.elementary_charge\n oe_indexed_charges[oe_idx] = charge_unitless\n # TODO: This loop below fails if we try to use an \"enumerate\"-style loop.\n # It's worth investigating whether we make this assumption elsewhere in the codebase, since\n # the OE docs may indicate that this sort of usage is a very bad thing to do.\n # https://docs.eyesopen.com/toolkits/python/oechemtk/atombondindices.html#indices-for-molecule-lookup-considered-harmful\n # for oe_idx, oe_atom in enumerate(oemol.GetAtoms()):\n for oe_atom in oemol.GetAtoms():\n oe_idx = oe_atom.GetIdx()\n oe_atom.SetPartialCharge(oe_indexed_charges[oe_idx])\n\n # Retain properties, if present\n for key, value in molecule.properties.items():\n oechem.OESetSDData(oemol, str(key), str(value))\n\n # Clean Up phase\n # The only feature of a molecule that wasn't perceived above seemed to be ring connectivity, better to run it\n # here then for someone to inquire about ring sizes and get 0 when it shouldn't be\n oechem.OEFindRingAtomsAndBonds(oemol)\n\n return oemol\n\n def to_smiles(self, molecule, isomeric=True, explicit_hydrogens=True, mapped=False):\n \"\"\"\n Uses the OpenEye toolkit to convert a Molecule into a SMILES string.\n A partially mapped smiles can also be generated for atoms of interest by supplying an `atom_map` to the\n properties dictionary.\n\n Parameters\n ----------\n molecule : An openff.toolkit.topology.Molecule\n The molecule to convert into a SMILES.\n isomeric: bool optional, default= True\n return an isomeric smiles\n explicit_hydrogens: bool optional, default=True\n return a smiles string containing all hydrogens explicitly\n mapped: bool optional, default=False\n return a explicit hydrogen mapped smiles, the atoms to be mapped can be controlled by supplying an\n atom map into the properties dictionary. If no mapping is passed all atoms will be mapped in order, else\n an atom map dictionary from the current atom index to the map id should be supplied with no duplicates.\n The map ids (values) should start from 0 or 1.\n\n Returns\n -------\n smiles : str\n The SMILES of the input molecule.\n \"\"\"\n from openeye import oechem\n\n oemol = self.to_openeye(molecule)\n\n # this sets up the default settings following the old DEFAULT flag\n # more information on flags can be found here\n # <https://docs.eyesopen.com/toolkits/python/oechemtk/OEChemConstants/OESMILESFlag.html#OEChem::OESMILESFlag>\n smiles_options = (\n oechem.OESMILESFlag_Canonical\n | oechem.OESMILESFlag_Isotopes\n | oechem.OESMILESFlag_RGroups\n )\n\n # check if we want an isomeric smiles\n if isomeric:\n # add the atom and bond stereo flags\n smiles_options |= (\n oechem.OESMILESFlag_AtomStereo | oechem.OESMILESFlag_BondStereo\n )\n\n if explicit_hydrogens:\n # add the hydrogen flag\n smiles_options |= oechem.OESMILESFlag_Hydrogens\n\n if mapped:\n assert explicit_hydrogens is True, (\n \"Mapped smiles require all hydrogens and \"\n \"stereochemsitry to be defined to retain order\"\n )\n\n # if we only want to map specific atoms check for an atom map\n atom_map = molecule._properties.get(\"atom_map\", None)\n if atom_map is not None:\n # make sure there are no repeated indices\n map_ids = set(atom_map.values())\n if len(map_ids) < len(atom_map):\n atom_map = None\n elif 0 in atom_map.values():\n # we need to increment the map index\n for atom, map in atom_map.items():\n atom_map[atom] = map + 1\n\n if atom_map is None:\n # now we need to add the atom map to the atoms\n for oeatom in oemol.GetAtoms():\n oeatom.SetMapIdx(oeatom.GetIdx() + 1)\n else:\n for atom in oemol.GetAtoms():\n try:\n # try to set the atom map\n map_idx = atom_map[atom.GetIdx()]\n atom.SetMapIdx(map_idx)\n except KeyError:\n continue\n\n smiles_options |= oechem.OESMILESFlag_AtomMaps\n\n smiles = oechem.OECreateSmiString(oemol, smiles_options)\n return smiles\n\n def to_inchi(self, molecule, fixed_hydrogens=False):\n \"\"\"\n Create an InChI string for the molecule using the RDKit Toolkit.\n InChI is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen\n layer.\n\n For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/\n\n Parameters\n ----------\n molecule : An openff.toolkit.topology.Molecule\n The molecule to convert into a SMILES.\n\n fixed_hydrogens: bool, default=False\n If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific\n InChI string of the molecule.\n\n Returns\n --------\n inchi: str\n The InChI string of the molecule.\n \"\"\"\n\n from openeye import oechem\n\n oemol = self.to_openeye(molecule)\n\n if fixed_hydrogens:\n opts = oechem.OEInChIOptions()\n opts.SetFixedHLayer(True)\n inchi = oechem.OEMolToInChI(oemol)\n\n else:\n inchi = oechem.OEMolToSTDInChI(oemol)\n\n return inchi\n\n def to_inchikey(self, molecule, fixed_hydrogens=False):\n \"\"\"\n Create an InChIKey for the molecule using the RDKit Toolkit.\n InChIKey is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen\n layer.\n\n For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/\n\n Parameters\n ----------\n molecule : An openff.toolkit.topology.Molecule\n The molecule to convert into a SMILES.\n\n fixed_hydrogens: bool, default=False\n If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific\n InChI string of the molecule.\n\n Returns\n --------\n inchi_key: str\n The InChIKey representation of the molecule.\n \"\"\"\n\n from openeye import oechem\n\n oemol = self.to_openeye(molecule)\n\n if fixed_hydrogens:\n opts = oechem.OEInChIOptions()\n opts.SetFixedHLayer(True)\n inchi_key = oechem.OEMolToInChIKey(oemol)\n\n else:\n inchi_key = oechem.OEMolToSTDInChIKey(oemol)\n\n return inchi_key\n\n def to_iupac(self, molecule):\n \"\"\"Generate IUPAC name from Molecule\n\n Parameters\n ----------\n molecule : An openff.toolkit.topology.Molecule\n The molecule to convert into a SMILES.\n\n Returns\n -------\n iupac_name : str\n IUPAC name of the molecule\n\n Examples\n --------\n\n >>> from openff.toolkit.topology import Molecule\n >>> from openff.toolkit.utils import get_data_file_path\n >>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf')\n >>> molecule = Molecule(sdf_filepath)\n >>> toolkit = OpenEyeToolkitWrapper()\n >>> iupac_name = toolkit.to_iupac(molecule)\n\n \"\"\"\n from openeye import oeiupac\n\n oemol = self.to_openeye(molecule)\n\n return oeiupac.OECreateIUPACName(oemol)\n\n def canonical_order_atoms(self, molecule):\n \"\"\"\n Canonical order the atoms in the molecule using the OpenEye toolkit.\n\n Parameters\n ----------\n molecule: openff.toolkit.topology.Molecule\n The input molecule\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n The input molecule, with canonically-indexed atoms and bonds.\n \"\"\"\n\n from openeye import oechem\n\n oemol = self.to_openeye(molecule)\n\n oechem.OECanonicalOrderAtoms(oemol)\n oechem.OECanonicalOrderBonds(oemol)\n\n # reorder the iterator\n vatm = []\n for atom in oemol.GetAtoms():\n if atom.GetAtomicNum() != oechem.OEElemNo_H:\n vatm.append(atom)\n oemol.OrderAtoms(vatm)\n\n vbnd = []\n for bond in oemol.GetBonds():\n if (\n bond.GetBgn().GetAtomicNum() != oechem.OEElemNo_H\n and bond.GetEnd().GetAtomicNum() != oechem.OEElemNo_H\n ):\n vbnd.append(bond)\n oemol.OrderBonds(vbnd)\n\n oemol.Sweep()\n\n for bond in oemol.GetBonds():\n if bond.GetBgnIdx() > bond.GetEndIdx():\n bond.SwapEnds()\n\n return self.from_openeye(\n oemol, allow_undefined_stereo=True, _cls=molecule.__class__\n )\n\n def from_smiles(\n self,\n smiles,\n hydrogens_are_explicit=False,\n allow_undefined_stereo=False,\n _cls=None,\n ):\n \"\"\"\n Create a Molecule from a SMILES string using the OpenEye toolkit.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n smiles : str\n The SMILES string to turn into a molecule\n hydrogens_are_explicit : bool, default = False\n If False, OE will perform hydrogen addition using OEAddExplicitHydrogens\n allow_undefined_stereo : bool, default=False\n Whether to accept SMILES with undefined stereochemistry. If False,\n an exception will be raised if a SMILES with undefined stereochemistry\n is passed into this function.\n _cls : class\n Molecule constructor\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n An OpenFF style molecule.\n \"\"\"\n from openeye import oechem\n\n oemol = oechem.OEGraphMol()\n oechem.OESmilesToMol(oemol, smiles)\n if not (hydrogens_are_explicit):\n result = oechem.OEAddExplicitHydrogens(oemol)\n if not result:\n raise ValueError(\n \"Addition of explicit hydrogens failed in from_openeye\"\n )\n elif hydrogens_are_explicit and oechem.OEHasImplicitHydrogens(oemol):\n raise ValueError(\n f\"'hydrogens_are_explicit' was specified as True, but OpenEye Toolkit interpreted \"\n f\"SMILES '{smiles}' as having implicit hydrogen. If this SMILES is intended to \"\n f\"express all explicit hydrogens in the molecule, then you should construct the \"\n f\"desired molecule as an OEMol (where oechem.OEHasImplicitHydrogens(oemol) returns \"\n f\"False), and then use Molecule.from_openeye() to create the desired OFFMol.\"\n )\n\n # Set partial charges to None, since they couldn't have been stored in a SMILES\n for atom in oemol.GetAtoms():\n atom.SetPartialCharge(float(\"nan\"))\n\n molecule = self.from_openeye(\n oemol, _cls=_cls, allow_undefined_stereo=allow_undefined_stereo\n )\n return molecule\n\n def from_inchi(self, inchi, allow_undefined_stereo=False, _cls=None):\n \"\"\"\n Construct a Molecule from a InChI representation\n\n Parameters\n ----------\n inchi : str\n The InChI representation of the molecule.\n\n allow_undefined_stereo : bool, default=False\n Whether to accept InChI with undefined stereochemistry. If False,\n an exception will be raised if a InChI with undefined stereochemistry\n is passed into this function.\n\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n \"\"\"\n\n from openeye import oechem\n\n # This calls the same functions as OESmilesToMol\n oemol = oechem.OEGraphMol()\n oechem.OEInChIToMol(oemol, inchi)\n\n # try and catch InChI parsing fails\n # if there are no atoms don't build the molecule\n if oemol.NumAtoms() == 0:\n raise RuntimeError(\n \"There was an issue parsing the InChI string, please check and try again.\"\n )\n\n molecule = self.from_openeye(\n oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls\n )\n\n return molecule\n\n def from_iupac(self, iupac_name, allow_undefined_stereo=False, _cls=None, **kwargs):\n \"\"\"\n Construct a Molecule from an IUPAC name\n\n Parameters\n ----------\n iupac_name : str\n The IUPAC or common name of the molecule.\n allow_undefined_stereo : bool, default=False\n Whether to accept a molecule name with undefined stereochemistry. If False,\n an exception will be raised if a molecule name with undefined stereochemistry\n is passed into this function.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n\n \"\"\"\n from openeye import oechem, oeiupac\n\n oemol = oechem.OEMol()\n parsing_result = oeiupac.OEParseIUPACName(oemol, iupac_name)\n if not parsing_result:\n raise InvalidIUPACNameError(\n f\"OpenEye failed to parse {iupac_name} as a IUPAC name\"\n )\n oechem.OETriposAtomNames(oemol)\n result = oechem.OEAddExplicitHydrogens(oemol)\n if not result:\n raise Exception(\"Addition of explicit hydrogens failed in from_iupac\")\n\n molecule = self.from_openeye(\n oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls, **kwargs\n )\n\n return molecule\n\n def generate_conformers(\n self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True\n ):\n \"\"\"\n Generate molecule conformers using OpenEye Omega.\n\n .. warning :: This API is experimental and subject to change.\n\n .. todo ::\n\n * which parameters should we expose? (or can we implement a general system with \\*\\*kwargs?)\n * will the coordinates be returned in the OpenFF Molecule's own indexing system? Or is there a chance that\n they'll get reindexed when we convert the input into an OEmol?\n\n Parameters\n ----------\n molecule : a :class:`Molecule`\n The molecule to generate conformers for.\n n_conformers : int, default=1\n The maximum number of conformers to generate.\n rms_cutoff : simtk.Quantity-wrapped float, in units of distance, optional, default=None\n The minimum RMS value at which two conformers are considered redundant and one is deleted.\n If None, the cutoff is set to 1 Angstrom\n clear_existing : bool, default=True\n Whether to overwrite existing conformers for the molecule\n \"\"\"\n from openeye import oeomega\n\n oemol = self.to_openeye(molecule)\n omega = oeomega.OEOmega()\n omega.SetMaxConfs(n_conformers)\n omega.SetCanonOrder(False)\n omega.SetSampleHydrogens(True)\n omega.SetEnergyWindow(15.0) # unit?\n if rms_cutoff is None:\n omega.SetRMSThreshold(1.0)\n else:\n omega.SetRMSThreshold(rms_cutoff.value_in_unit(unit.angstrom))\n # Don't generate random stereoisomer if not specified\n omega.SetStrictStereo(True)\n status = omega(oemol)\n\n if status is False:\n omega.SetStrictStereo(False)\n new_status = omega(oemol)\n if new_status is False:\n raise Exception(\"OpenEye Omega conformer generation failed\")\n\n molecule2 = self.from_openeye(\n oemol, allow_undefined_stereo=True, _cls=molecule.__class__\n )\n\n if clear_existing:\n molecule._conformers = list()\n\n for conformer in molecule2._conformers:\n molecule._add_conformer(conformer)\n\n def apply_elf_conformer_selection(\n self,\n molecule: \"Molecule\",\n percentage: float = 2.0,\n limit: int = 10,\n ):\n \"\"\"Applies the `ELF method\n <https://docs.eyesopen.com/toolkits/python/quacpactk/molchargetheory.html#elf-conformer-selection>`_\n to select a set of diverse\n conformers which have minimal electrostatically strongly interacting functional\n groups from a molecules conformers.\n\n Notes\n -----\n * The input molecule should have a large set of conformers already\n generated to select the ELF conformers from.\n * The selected conformers will be retained in the `molecule.conformers` list\n while unselected conformers will be discarded.\n\n See Also\n --------\n RDKitToolkitWrapper.apply_elf_conformer_selection\n\n Parameters\n ----------\n molecule\n The molecule which contains the set of conformers to select from.\n percentage\n The percentage of conformers with the lowest electrostatic interaction\n energies to greedily select from.\n limit\n The maximum number of conformers to select.\n \"\"\"\n\n from openeye import oechem, oequacpac\n\n if molecule.n_conformers == 0:\n return\n\n oe_molecule = molecule.to_openeye()\n\n # Select a subset of the OMEGA generated conformers using the ELF10 method.\n oe_elf_options = oequacpac.OEELFOptions()\n oe_elf_options.SetElfLimit(limit)\n oe_elf_options.SetPercent(percentage)\n\n oe_elf = oequacpac.OEELF(oe_elf_options)\n\n output_stream = oechem.oeosstream()\n\n oechem.OEThrow.SetOutputStream(output_stream)\n oechem.OEThrow.Clear()\n\n status = oe_elf.Select(oe_molecule)\n\n oechem.OEThrow.SetOutputStream(oechem.oeerr)\n\n output_string = output_stream.str().decode(\"UTF-8\")\n output_string = output_string.replace(\"Warning: \", \"\")\n output_string = re.sub(\"^: +\", \"\", output_string, flags=re.MULTILINE)\n output_string = re.sub(\"\\n$\", \"\", output_string)\n\n # Check to make sure the call to OE was succesful, and re-route any\n # non-fatal warnings to the correct logger.\n if not status:\n raise RuntimeError(\"\\n\" + output_string)\n elif len(output_string) > 0:\n logger.warning(output_string)\n\n # Extract and store the ELF conformers on the input molecule.\n conformers = []\n\n for oe_conformer in oe_molecule.GetConfs():\n\n conformer = np.zeros((oe_molecule.NumAtoms(), 3))\n\n for atom_index, coordinates in oe_conformer.GetCoords().items():\n conformer[atom_index, :] = coordinates\n\n conformers.append(conformer * unit.angstrom)\n\n molecule._conformers = conformers\n\n def assign_partial_charges(\n self,\n molecule,\n partial_charge_method=None,\n use_conformers=None,\n strict_n_conformers=False,\n _cls=None,\n ):\n \"\"\"\n Compute partial charges with OpenEye quacpac, and assign\n the new values to the partial_charges attribute.\n\n .. warning :: This API is experimental and subject to change.\n\n .. todo ::\n\n * Should the default be ELF?\n * Can we expose more charge models?\n\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.Molecule\n Molecule for which partial charges are to be computed\n partial_charge_method : str, optional, default=None\n The charge model to use. One of ['amberff94', 'mmff', 'mmff94', `am1-mulliken`, 'am1bcc',\n 'am1bccnosymspt', 'am1bccelf10']\n If None, 'am1-mulliken' will be used.\n use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None\n Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated.\n strict_n_conformers : bool, default=False\n Whether to raise an exception if an invalid number of conformers is provided for the given charge method.\n If this is False and an invalid number of conformers is found, a warning will be raised.\n _cls : class\n Molecule constructor\n\n Raises\n ------\n ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit\n\n ChargeCalculationError if the charge method is supported by this toolkit, but fails\n \"\"\"\n\n import numpy as np\n from openeye import oechem, oequacpac\n\n from openff.toolkit.topology import Molecule\n\n SUPPORTED_CHARGE_METHODS = {\n \"am1bcc\": {\n \"oe_charge_method\": oequacpac.OEAM1BCCCharges,\n \"min_confs\": 1,\n \"max_confs\": 1,\n \"rec_confs\": 1,\n },\n \"am1-mulliken\": {\n \"oe_charge_method\": oequacpac.OEAM1Charges,\n \"min_confs\": 1,\n \"max_confs\": 1,\n \"rec_confs\": 1,\n },\n \"gasteiger\": {\n \"oe_charge_method\": oequacpac.OEGasteigerCharges,\n \"min_confs\": 0,\n \"max_confs\": 0,\n \"rec_confs\": 0,\n },\n \"mmff94\": {\n \"oe_charge_method\": oequacpac.OEMMFF94Charges,\n \"min_confs\": 0,\n \"max_confs\": 0,\n \"rec_confs\": 0,\n },\n \"am1bccnosymspt\": {\n \"oe_charge_method\": oequacpac.OEAM1BCCCharges,\n \"min_confs\": 1,\n \"max_confs\": 1,\n \"rec_confs\": 1,\n },\n \"am1elf10\": {\n \"oe_charge_method\": oequacpac.OEELFCharges(\n oequacpac.OEAM1Charges(optimize=True, symmetrize=True), 10\n ),\n \"min_confs\": 1,\n \"max_confs\": None,\n \"rec_confs\": 500,\n },\n \"am1bccelf10\": {\n \"oe_charge_method\": oequacpac.OEAM1BCCELF10Charges,\n \"min_confs\": 1,\n \"max_confs\": None,\n \"rec_confs\": 500,\n },\n }\n\n if partial_charge_method is None:\n partial_charge_method = \"am1-mulliken\"\n\n partial_charge_method = partial_charge_method.lower()\n\n if partial_charge_method not in SUPPORTED_CHARGE_METHODS:\n raise ChargeMethodUnavailableError(\n f\"partial_charge_method '{partial_charge_method}' is not available from OpenEyeToolkitWrapper. \"\n f\"Available charge methods are {list(SUPPORTED_CHARGE_METHODS.keys())} \"\n )\n\n charge_method = SUPPORTED_CHARGE_METHODS[partial_charge_method]\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n # Make a temporary copy of the molecule, since we'll be messing with its conformers\n mol_copy = _cls(molecule)\n\n if use_conformers is None:\n if charge_method[\"rec_confs\"] == 0:\n mol_copy._conformers = None\n else:\n self.generate_conformers(\n mol_copy,\n n_conformers=charge_method[\"rec_confs\"],\n rms_cutoff=0.25 * unit.angstrom,\n )\n # TODO: What's a \"best practice\" RMS cutoff to use here?\n else:\n mol_copy._conformers = None\n for conformer in use_conformers:\n mol_copy._add_conformer(conformer)\n self._check_n_conformers(\n mol_copy,\n partial_charge_method=partial_charge_method,\n min_confs=charge_method[\"min_confs\"],\n max_confs=charge_method[\"max_confs\"],\n strict_n_conformers=strict_n_conformers,\n )\n\n oemol = mol_copy.to_openeye()\n\n errfs = oechem.oeosstream()\n oechem.OEThrow.SetOutputStream(errfs)\n oechem.OEThrow.Clear()\n\n # The OpenFF toolkit has always supported a version of AM1BCC with no geometry optimization\n # or symmetry correction. So we include this keyword to provide a special configuration of quacpac\n # if requested.\n if partial_charge_method == \"am1bccnosymspt\":\n optimize = False\n symmetrize = False\n quacpac_status = oequacpac.OEAssignCharges(\n oemol, charge_method[\"oe_charge_method\"](optimize, symmetrize)\n )\n else:\n oe_charge_method = charge_method[\"oe_charge_method\"]\n\n if callable(oe_charge_method):\n oe_charge_method = oe_charge_method()\n\n quacpac_status = oequacpac.OEAssignCharges(oemol, oe_charge_method)\n\n oechem.OEThrow.SetOutputStream(oechem.oeerr) # restoring to original state\n # This logic handles errors encountered in #34, which can occur when using ELF10 conformer selection\n if not quacpac_status:\n\n oe_charge_engine = (\n oequacpac.OEAM1Charges\n if partial_charge_method == \"am1elf10\"\n else oequacpac.OEAM1BCCCharges\n )\n\n if \"SelectElfPop: issue with removing trans COOH conformers\" in (\n errfs.str().decode(\"UTF-8\")\n ):\n logger.warning(\n f\"Warning: charge assignment involving ELF10 conformer selection failed due to a known bug (toolkit issue \"\n f\"#346). Downgrading to {oe_charge_engine.__name__} charge assignment for this molecule. More information\"\n f\"is available at https://github.com/openforcefield/openff-toolkit/issues/346\"\n )\n quacpac_status = oequacpac.OEAssignCharges(oemol, oe_charge_engine())\n\n if quacpac_status is False:\n raise ChargeCalculationError(\n f'Unable to assign charges: {errfs.str().decode(\"UTF-8\")}'\n )\n\n # Extract and return charges\n ## TODO: Make sure atom mapping remains constant\n\n charges = unit.Quantity(\n np.zeros(shape=oemol.NumAtoms(), dtype=np.float64), unit.elementary_charge\n )\n for oeatom in oemol.GetAtoms():\n index = oeatom.GetIdx()\n charge = oeatom.GetPartialCharge()\n charge = charge * unit.elementary_charge\n charges[index] = charge\n\n molecule.partial_charges = charges\n\n def compute_partial_charges_am1bcc(\n self, molecule, use_conformers=None, strict_n_conformers=False\n ):\n \"\"\"\n Compute AM1BCC partial charges with OpenEye quacpac. This function will attempt to use\n the OEAM1BCCELF10 charge generation method, but may print a warning and fall back to\n normal OEAM1BCC if an error is encountered. This error is known to occur with some\n carboxylic acids, and is under investigation by OpenEye.\n\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : Molecule\n Molecule for which partial charges are to be computed\n use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None\n Coordinates to use for partial charge calculation. If None, an appropriate number of conformers\n will be generated.\n strict_n_conformers : bool, default=False\n Whether to raise an exception if an invalid number of conformers is provided.\n If this is False and an invalid number of conformers is found, a warning will be raised\n instead of an Exception.\n\n Returns\n -------\n charges : numpy.array of shape (natoms) of type float\n The partial charges\n \"\"\"\n\n import warnings\n\n warnings.warn(\n \"compute_partial_charges_am1bcc will be deprecated in an upcoming release. \"\n \"Use assign_partial_charges(partial_charge_method='am1bccelf10') instead.\",\n DeprecationWarning,\n )\n self.assign_partial_charges(\n molecule,\n partial_charge_method=\"am1bccelf10\",\n use_conformers=use_conformers,\n strict_n_conformers=strict_n_conformers,\n )\n return molecule.partial_charges\n\n def assign_fractional_bond_orders(\n self, molecule, bond_order_model=None, use_conformers=None, _cls=None\n ):\n \"\"\"\n Update and store list of bond orders this molecule. Bond orders are stored on each\n bond, in the `bond.fractional_bond_order` attribute.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.molecule Molecule\n The molecule to assign wiberg bond orders to\n bond_order_model : str, optional, default=None\n The charge model to use. One of ['am1-wiberg', 'am1-wiberg-elf10',\n 'pm3-wiberg', 'pm3-wiberg-elf10']. If None, 'am1-wiberg' will be used.\n use_conformers : iterable of simtk.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance, optional, default=None\n The conformers to use for fractional bond order calculation. If None, an\n appropriate number of conformers will be generated by an available\n ToolkitWrapper. If the chosen ``bond_order_model`` is an ELF variant, the ELF\n conformer selection method will be applied to the provided conformers.\n _cls : class\n Molecule constructor\n \"\"\"\n from openeye import oechem, oequacpac\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n # Make a copy since we'll be messing with this molecule's conformers\n temp_mol = _cls(molecule)\n\n if bond_order_model is None:\n bond_order_model = \"am1-wiberg\"\n\n is_elf_method = bond_order_model in [\"am1-wiberg-elf10\", \"pm3-wiberg-elf10\"]\n\n if use_conformers is None:\n temp_mol.generate_conformers(\n n_conformers=1 if not is_elf_method else 500,\n # 0.05 is the recommended RMS when generating a 'Dense' amount of\n # conformers using Omega: https://docs.eyesopen.com/toolkits/python/\n # omegatk/OEConfGenConstants/OEFragBuilderMode.html.\n rms_cutoff=None if not is_elf_method else 0.05 * unit.angstrom,\n )\n else:\n temp_mol._conformers = None\n for conformer in use_conformers:\n temp_mol._add_conformer(conformer)\n if temp_mol.n_conformers == 0:\n raise Exception(\n \"No conformers present in molecule submitted for fractional bond order calculation. Consider \"\n \"loading the molecule from a file with geometry already present or running \"\n \"molecule.generate_conformers() before calling molecule.compute_wiberg_bond_orders()\"\n )\n\n if is_elf_method:\n # Apply the ELF10 conformer selection method.\n temp_mol.apply_elf_conformer_selection()\n\n # Set the options to use when computing the WBOs. This is based on example at\n # https://docs.eyesopen.com/toolkits/python/quacpactk/examples_summary_wibergbondorders.html\n am1 = oequacpac.OEAM1()\n\n am1results = oequacpac.OEAM1Results()\n am1options = am1.GetOptions()\n\n if bond_order_model.startswith(\"am1-wiberg\"):\n am1options.SetSemiMethod(oequacpac.OEMethodType_AM1)\n elif bond_order_model.startswith(\"pm3-wiberg\"):\n # TODO: Make sure that modifying am1options actually works\n am1options.SetSemiMethod(oequacpac.OEMethodType_PM3)\n else:\n raise ValueError(\n f\"Bond order model '{bond_order_model}' is not supported by \"\n f\"OpenEyeToolkitWrapper. Supported models are ['am1-wiberg', \"\n f\"'am1-wiberg-elf10', 'pm3-wiberg', 'pm3-wiberg-elf10'].\"\n )\n\n # Convert the conformers into OE friendly objects to make setting them one\n # at a time easier.\n oe_conformers = [\n oechem.OEFloatArray(conformer.value_in_unit(unit.angstrom).flatten())\n for conformer in temp_mol.conformers\n ]\n\n oemol = self.to_openeye(temp_mol)\n bond_orders = defaultdict(list)\n\n for oe_conformer in oe_conformers:\n\n oemol.DeleteConfs()\n oemol.NewConf(oe_conformer)\n\n status = am1.CalcAM1(am1results, oemol)\n\n if status is False:\n\n raise Exception(\n \"Unable to assign charges (in the process of calculating \"\n \"fractional bond orders)\"\n )\n\n for bond in oemol.GetBonds():\n\n bond_orders[bond.GetIdx()].append(\n am1results.GetBondOrder(bond.GetBgnIdx(), bond.GetEndIdx())\n )\n\n # TODO: Will bonds always map back to the same index? Consider doing a\n # topology mapping.\n for bond_idx, conformer_bond_orders in bond_orders.items():\n\n # Get bond order\n order = np.mean(conformer_bond_orders)\n\n mol_bond = molecule._bonds[bond_idx]\n mol_bond.fractional_bond_order = order\n\n def get_tagged_smarts_connectivity(self, smarts):\n \"\"\"\n Returns a tuple of tuples indicating connectivity between tagged atoms in a SMARTS string. Does not\n return bond order.\n\n Parameters\n ----------\n smarts : str\n The tagged SMARTS to analyze\n\n Returns\n -------\n unique_tags : tuple of int\n A sorted tuple of all unique tagged atom map indices.\n tagged_atom_connectivity : tuple of tuples of int, shape n_tagged_bonds x 2\n A tuple of tuples, where each inner tuple is a pair of tagged atoms (tag_idx_1, tag_idx_2) which are\n bonded. The inner tuples are ordered smallest-to-largest, and the tuple of tuples is ordered\n lexically. So the return value for an improper torsion would be ((1, 2), (2, 3), (2, 4)).\n\n Raises\n ------\n SMIRKSParsingError\n If OpenEye toolkit was unable to parse the provided smirks/tagged smarts\n \"\"\"\n from openeye import oechem\n\n from openff.toolkit.typing.chemistry import SMIRKSParsingError\n\n qmol = oechem.OEQMol()\n status = oechem.OEParseSmarts(qmol, smarts)\n if not status:\n raise SMIRKSParsingError(\n f\"OpenEye Toolkit was unable to parse SMIRKS {smarts}\"\n )\n\n unique_tags = set()\n connections = set()\n for at1 in qmol.GetAtoms():\n if at1.GetMapIdx() == 0:\n continue\n unique_tags.add(at1.GetMapIdx())\n for at2 in at1.GetAtoms():\n if at2.GetMapIdx() == 0:\n continue\n cxn_to_add = sorted([at1.GetMapIdx(), at2.GetMapIdx()])\n connections.add(tuple(cxn_to_add))\n connections = tuple(sorted(list(connections)))\n unique_tags = tuple(sorted(list(unique_tags)))\n return tuple(unique_tags), tuple(connections)\n\n @staticmethod\n def _find_smarts_matches(\n oemol, smarts, aromaticity_model=DEFAULT_AROMATICITY_MODEL\n ):\n \"\"\"Find all sets of atoms in the provided OpenEye molecule that match the provided SMARTS string.\n\n Parameters\n ----------\n oemol : openeye.oechem.OEMol or similar\n oemol to process with the SMIRKS in order to find matches\n smarts : str\n SMARTS string with any number of sequentially tagged atoms.\n If there are N tagged atoms numbered 1..N, the resulting matches will be N-tuples of atoms that match the corresponding tagged atoms.\n aromaticity_model : str, optional, default=None\n OpenEye aromaticity model designation as a string, such as ``OEAroModel_MDL``.\n Molecule is prepared with this aromaticity model prior to querying.\n\n Returns\n -------\n matches : list of tuples of atoms indices within the ``oemol``\n matches[index] is an N-tuple of atom numbers from the ``oemol``\n Matches are returned in no guaranteed order.\n # TODO: What is returned if no matches are found? An empty list, or None?\n # TODO: Ensure that SMARTS numbers 1, 2, 3... are rendered into order of returnd matches indexed by 0, 1, 2...\n\n .. notes ::\n\n * Raises ``LicenseError`` if valid OpenEye tools license is not found, rather than causing program to terminate\n * Raises ``ValueError`` if ``smarts`` query is malformed\n\n \"\"\"\n from openeye import oechem\n from openeye.oechem import OESubSearch\n\n # Make a copy of molecule so we don't influence original (probably safer than deepcopy per C Bayly)\n mol = oechem.OEMol(oemol)\n # Set up query\n qmol = oechem.OEQMol()\n if not oechem.OEParseSmarts(qmol, smarts):\n raise ValueError(f\"Error parsing SMARTS '{smarts}'\")\n\n # Apply aromaticity model\n if type(aromaticity_model) == str:\n # Check if the user has provided a manually-specified aromaticity_model\n if hasattr(oechem, aromaticity_model):\n oearomodel = getattr(oechem, aromaticity_model)\n else:\n raise ValueError(\n \"Error: provided aromaticity model not recognized by oechem.\"\n )\n else:\n raise ValueError(\"Error: provided aromaticity model must be a string.\")\n\n # OEPrepareSearch will clobber our desired aromaticity model if we don't sync up mol and qmol ahead of time\n # Prepare molecule\n oechem.OEClearAromaticFlags(mol)\n oechem.OEAssignAromaticFlags(mol, oearomodel)\n\n # If aromaticity model was provided, prepare query molecule\n oechem.OEClearAromaticFlags(qmol)\n oechem.OEAssignAromaticFlags(qmol, oearomodel)\n oechem.OEAssignHybridization(mol)\n oechem.OEAssignHybridization(qmol)\n\n # Build list of matches\n # TODO: The MoleculeImage mapping should preserve ordering of template molecule for equivalent atoms\n # and speed matching for larger molecules.\n unique = False # We require all matches, not just one of each kind\n substructure_search = OESubSearch(qmol)\n substructure_search.SetMaxMatches(0)\n oechem.OEPrepareSearch(mol, substructure_search)\n matches = list()\n for match in substructure_search.Match(mol, unique):\n # Compile list of atom indices that match the pattern tags\n atom_indices = dict()\n for matched_atom in match.GetAtoms():\n if matched_atom.pattern.GetMapIdx() != 0:\n atom_indices[\n matched_atom.pattern.GetMapIdx() - 1\n ] = matched_atom.target.GetIdx()\n # Compress into list\n atom_indices = [atom_indices[index] for index in range(len(atom_indices))]\n # Convert to tuple\n matches.append(tuple(atom_indices))\n return matches\n\n def find_smarts_matches(self, molecule, smarts, aromaticity_model=\"OEAroModel_MDL\"):\n \"\"\"\n Find all SMARTS matches for the specified molecule, using the specified aromaticity model.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.Molecule\n The molecule for which all specified SMARTS matches are to be located\n smarts : str\n SMARTS string with optional SMIRKS-style atom tagging\n aromaticity_model : str, optional, default='OEAroModel_MDL'\n Molecule is prepared with this aromaticity model prior to querying.\n\n .. note :: Currently, the only supported ``aromaticity_model`` is ``OEAroModel_MDL``\n\n \"\"\"\n oemol = self.to_openeye(molecule)\n return self._find_smarts_matches(\n oemol, smarts, aromaticity_model=aromaticity_model\n )\n\n\ndef requires_openeye_module(module_name):\n def inner_decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n module = importlib.import_module(\"openeye.\" + module_name)\n except (ImportError, ModuleNotFoundError):\n # TODO: Custom exception\n raise Exception(\"openeye.\" + module_name)\n try:\n license_func = OpenEyeToolkitWrapper._license_functions[module_name]\n except KeyError:\n # TODO: Custom exception\n raise Exception(f\"we do not currently use {module_name}\")\n\n # TODO: Custom exception\n assert getattr(module, license_func)()\n\n return function(*args, **kwargs)\n\n return wrapper\n\n return inner_decorator\n\n\nclass RDKitToolkitWrapper(ToolkitWrapper):\n \"\"\"\n RDKit toolkit wrapper\n\n .. warning :: This API is experimental and subject to change.\n \"\"\"\n\n _toolkit_name = \"The RDKit\"\n _toolkit_installation_instructions = (\n \"A conda-installable version of the free and open source RDKit cheminformatics \"\n \"toolkit can be found at: https://anaconda.org/rdkit/rdkit\"\n )\n\n def __init__(self):\n super().__init__()\n\n self._toolkit_file_read_formats = [\"SDF\", \"MOL\", \"SMI\"] # TODO: Add TDT support\n\n if not self.is_available():\n raise ToolkitUnavailableException(\n f\"The required toolkit {self._toolkit_name} is not \"\n f\"available. {self._toolkit_installation_instructions}\"\n )\n else:\n from rdkit import __version__ as rdkit_version\n\n self._toolkit_version = rdkit_version\n\n from rdkit import Chem\n\n # we have to make sure the toolkit can be loaded before formatting this dict\n # Note any new file write formats should be added here only\n self._toolkit_file_write_formats = {\n \"SDF\": Chem.SDWriter,\n \"MOL\": Chem.SDWriter,\n \"SMI\": Chem.SmilesWriter,\n \"PDB\": Chem.PDBWriter,\n \"TDT\": Chem.TDTWriter,\n }\n\n @property\n def toolkit_file_write_formats(self):\n \"\"\"\n List of file formats that this toolkit can write.\n \"\"\"\n return list(self._toolkit_file_write_formats.keys())\n\n @classmethod\n def is_available(cls):\n \"\"\"\n Check whether the RDKit toolkit can be imported\n\n Returns\n -------\n is_installed : bool\n True if RDKit is installed, False otherwise.\n\n \"\"\"\n if cls._is_available is None:\n try:\n importlib.import_module(\"rdkit\", \"Chem\")\n except ImportError:\n cls._is_available = False\n else:\n cls._is_available = True\n return cls._is_available\n\n def from_object(self, obj, allow_undefined_stereo=False, _cls=None):\n \"\"\"\n If given an rdchem.Mol (or rdchem.Mol-derived object), this function will load it into an\n openff.toolkit.topology.molecule. Otherwise, it will return False.\n\n Parameters\n ----------\n obj : A rdchem.Mol-derived object\n An object to be type-checked and converted into a Molecule, if possible.\n allow_undefined_stereo : bool, default=False\n Whether to accept molecules with undefined stereocenters. If False,\n an exception will be raised if a molecule with undefined stereochemistry\n is passed into this function.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n Molecule or False\n An openff.toolkit.topology.molecule Molecule.\n\n Raises\n ------\n NotImplementedError\n If the object could not be converted into a Molecule.\n \"\"\"\n # TODO: Add tests for the from_object functions\n from rdkit import Chem\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n if isinstance(obj, Chem.rdchem.Mol):\n return _cls.from_rdkit(obj, allow_undefined_stereo=allow_undefined_stereo)\n raise NotImplementedError(\n \"Cannot create Molecule from {} object\".format(type(obj))\n )\n\n def from_pdb_and_smiles(\n self, file_path, smiles, allow_undefined_stereo=False, _cls=None\n ):\n \"\"\"\n Create a Molecule from a pdb file and a SMILES string using RDKit.\n\n Requires RDKit to be installed.\n\n The molecule is created and sanitised based on the SMILES string, we then find a mapping\n between this molecule and one from the PDB based only on atomic number and connections.\n The SMILES molecule is then reindex to match the PDB, the conformer is attached and the\n molecule returned.\n\n Parameters\n ----------\n file_path: str\n PDB file path\n smiles : str\n a valid smiles string for the pdb, used for seterochemistry and bond order\n\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if oemol contains undefined stereochemistry.\n _cls : class\n Molecule constructor\n\n Returns\n --------\n molecule : openff.toolkit.Molecule (or _cls() type)\n An OFFMol instance with ordering the same as used in the PDB file.\n\n Raises\n ------\n InvalidConformerError : if the SMILES and PDB molecules are not isomorphic.\n \"\"\"\n\n from rdkit import Chem\n\n from openff.toolkit.topology.molecule import InvalidConformerError, Molecule\n\n # Make the molecule from smiles\n offmol = self.from_smiles(\n smiles, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls\n )\n\n # Make another molecule from the PDB, allow stero errors here they are expected\n pdbmol = self.from_rdkit(\n Chem.MolFromPDBFile(file_path, removeHs=False),\n allow_undefined_stereo=True,\n hydrogens_are_explicit=True,\n _cls=_cls,\n )\n\n # check isomorphic and get the mapping if true the mapping will be\n # Dict[pdb_index: offmol_index] sorted by pdb_index\n isomorphic, mapping = _cls.are_isomorphic(\n pdbmol,\n offmol,\n return_atom_map=True,\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )\n\n if mapping is not None:\n new_mol = offmol.remap(mapping)\n\n # the pdb conformer is in the correct order so just attach it here\n new_mol._add_conformer(pdbmol.conformers[0])\n\n return new_mol\n\n else:\n raise InvalidConformerError(\"The PDB and SMILES structures do not match.\")\n\n def from_file(\n self, file_path, file_format, allow_undefined_stereo=False, _cls=None\n ):\n \"\"\"\n Create an openff.toolkit.topology.Molecule from a file using this toolkit.\n\n\n\n Parameters\n ----------\n file_path : str\n The file to read the molecule from\n file_format : str\n Format specifier, usually file suffix (eg. 'MOL2', 'SMI')\n Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details.\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if oemol contains undefined stereochemistry.\n _cls : class\n Molecule constructor\n Returns\n -------\n molecules : iterable of Molecules\n a list of Molecule objects is returned.\n\n \"\"\"\n from rdkit import Chem\n\n file_format = file_format.upper()\n\n mols = list()\n if (file_format == \"MOL\") or (file_format == \"SDF\"):\n for rdmol in Chem.SupplierFromFilename(\n file_path, removeHs=False, sanitize=False, strictParsing=True\n ):\n if rdmol is None:\n continue\n\n # Sanitize the molecules (fails on nitro groups)\n try:\n Chem.SanitizeMol(\n rdmol,\n Chem.SANITIZE_ALL\n ^ Chem.SANITIZE_SETAROMATICITY\n ^ Chem.SANITIZE_ADJUSTHS,\n )\n Chem.AssignStereochemistryFrom3D(rdmol)\n except ValueError as e:\n logger.warning(rdmol.GetProp(\"_Name\") + \" \" + str(e))\n continue\n Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL)\n mol = self.from_rdkit(\n rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls\n )\n mols.append(mol)\n\n elif file_format == \"SMI\":\n # TODO: We have to do some special stuff when we import SMILES (currently\n # just adding H's, but could get fancier in the future). It might be\n # worthwhile to parse the SMILES file ourselves and pass each SMILES\n # through the from_smiles function instead\n for rdmol in Chem.SmilesMolSupplier(file_path, titleLine=False):\n rdmol = Chem.AddHs(rdmol)\n mol = self.from_rdkit(\n rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls\n )\n mols.append(mol)\n\n elif file_format == \"PDB\":\n raise Exception(\n \"RDKit can not safely read PDBs on their own. Information about bond order and aromaticity \"\n \"is likely to be lost. To read a PDB using RDKit use Molecule.from_pdb_and_smiles()\"\n )\n # TODO: See if we can implement PDB+mol/smi combinations to get complete bond information.\n # testing to see if we can make a molecule from smiles and then use the PDB conformer as the geometry\n # and just reorder the molecule\n # https://github.com/openforcefield/openff-toolkit/issues/121\n # rdmol = Chem.MolFromPDBFile(file_path, removeHs=False)\n # mol = Molecule.from_rdkit(rdmol, _cls=_cls)\n # mols.append(mol)\n # TODO: Add SMI, TDT(?) support\n\n return mols\n\n def from_file_obj(\n self, file_obj, file_format, allow_undefined_stereo=False, _cls=None\n ):\n \"\"\"\n Return an openff.toolkit.topology.Molecule from a file-like object (an object with a \".read()\" method using\n this toolkit.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n file_obj : file-like object\n The file-like object to read the molecule from\n file_format : str\n Format specifier, usually file suffix (eg. 'MOL2', 'SMI')\n Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details.\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if oemol contains undefined stereochemistry.\n _cls : class\n Molecule constructor\n Returns\n -------\n molecules : Molecule or list of Molecules\n a list of Molecule objects is returned.\n\n \"\"\"\n from rdkit import Chem\n\n mols = []\n\n if (file_format == \"MOL\") or (file_format == \"SDF\"):\n # TODO: Iterate over all mols in file_data\n for rdmol in Chem.ForwardSDMolSupplier(file_obj):\n mol = self.from_rdkit(rdmol, _cls=_cls)\n mols.append(mol)\n\n if file_format == \"SMI\":\n # TODO: Find a cleaner way to parse SMILES lines\n file_data = file_obj.read()\n lines = [line.strip() for line in file_data.split(\"\\n\")]\n # remove blank lines\n lines.remove(\"\")\n for line in lines:\n mol = self.from_smiles(line, _cls=_cls)\n mols.append(mol)\n\n elif file_format == \"PDB\":\n raise Exception(\n \"RDKit can not safely read PDBs on their own. Information about bond order and aromaticity \"\n \"is likely to be lost. To read a PDB using RDKit use Molecule.from_pdb_and_smiles()\"\n )\n # TODO: See if we can implement PDB+mol/smi combinations to get complete bond information.\n # https://github.com/openforcefield/openff-toolkit/issues/121\n # file_data = file_obj.read()\n # rdmol = Chem.MolFromPDBBlock(file_data)\n # mol = Molecule.from_rdkit(rdmol, _cls=_cls)\n # mols.append(mol)\n # TODO: TDT file support\n return mols\n\n def to_file_obj(self, molecule, file_obj, file_format):\n \"\"\"\n Writes an OpenFF Molecule to a file-like object\n\n Parameters\n ----------\n molecule : an OpenFF Molecule\n The molecule to write\n file_obj\n The file-like object to write to\n file_format\n The format for writing the molecule data\n\n Returns\n -------\n\n \"\"\"\n\n file_format = file_format.upper()\n rdmol = self.to_rdkit(molecule)\n try:\n writer = self._toolkit_file_write_formats[file_format](file_obj)\n writer.write(rdmol)\n writer.close()\n # if we can not write to that file type catch the error here\n except KeyError:\n raise ValueError(\n f\"The requested file type ({file_format}) is not supported to be written using \"\n f\"RDKitToolkitWrapper.\"\n )\n\n def to_file(self, molecule, file_path, file_format):\n \"\"\"\n Writes an OpenFF Molecule to a file-like object\n\n Parameters\n ----------\n molecule : an OpenFF Molecule\n The molecule to write\n file_path\n The file path to write to\n file_format\n The format for writing the molecule data\n\n Returns\n ------\n\n \"\"\"\n\n # open a file object and pass to the object writer\n with open(file_path, \"w\") as file_obj:\n self.to_file_obj(\n molecule=molecule, file_obj=file_obj, file_format=file_format\n )\n\n def enumerate_stereoisomers(\n self, molecule, undefined_only=False, max_isomers=20, rationalise=True\n ):\n \"\"\"\n Enumerate the stereocenters and bonds of the current molecule.\n\n Parameters\n ----------\n molecule: openff.toolkit.topology.Molecule\n The molecule whose state we should enumerate\n\n undefined_only: bool optional, default=False\n If we should enumerate all stereocenters and bonds or only those with undefined stereochemistry\n\n max_isomers: int optional, default=20\n The maximum amount of molecules that should be returned\n\n rationalise: bool optional, default=True\n If we should try to build and rationalise the molecule to ensure it can exist\n\n Returns\n --------\n molecules: List[openff.toolkit.topology.Molecule]\n A list of openff.toolkit.topology.Molecule instances\n\n \"\"\"\n from rdkit import Chem\n from rdkit.Chem.EnumerateStereoisomers import (\n EnumerateStereoisomers,\n StereoEnumerationOptions,\n )\n\n # create the molecule\n rdmol = self.to_rdkit(molecule=molecule)\n\n # in case any bonds/centers are missing stereo chem flag it here\n Chem.AssignStereochemistry(\n rdmol, cleanIt=True, force=True, flagPossibleStereoCenters=True\n )\n Chem.FindPotentialStereoBonds(rdmol)\n\n # set up the options\n stereo_opts = StereoEnumerationOptions(\n tryEmbedding=rationalise,\n onlyUnassigned=undefined_only,\n maxIsomers=max_isomers,\n )\n\n isomers = tuple(EnumerateStereoisomers(rdmol, options=stereo_opts))\n\n molecules = []\n for isomer in isomers:\n # isomer has CIS/TRANS tags so convert back to E/Z\n Chem.SetDoubleBondNeighborDirections(isomer)\n Chem.AssignStereochemistry(isomer, force=True, cleanIt=True)\n mol = self.from_rdkit(isomer, _cls=molecule.__class__)\n if mol != molecule:\n molecules.append(mol)\n\n return molecules\n\n def enumerate_tautomers(self, molecule, max_states=20):\n \"\"\"\n Enumerate the possible tautomers of the current molecule.\n\n Parameters\n ----------\n molecule: openff.toolkit.topology.Molecule\n The molecule whose state we should enumerate\n\n max_states: int optional, default=20\n The maximum amount of molecules that should be returned\n\n Returns\n -------\n molecules: List[openff.toolkit.topology.Molecule]\n A list of openff.toolkit.topology.Molecule instances not including the input molecule.\n \"\"\"\n\n from rdkit import Chem\n from rdkit.Chem.MolStandardize import rdMolStandardize\n\n enumerator = rdMolStandardize.TautomerEnumerator()\n enumerator.SetMaxTautomers(max_states)\n rdmol = Chem.RemoveHs(molecule.to_rdkit())\n\n tautomers = enumerator.Enumerate(rdmol)\n\n # make a list of OpenFF molecules excluding the input molecule\n molecules = []\n for taut in tautomers:\n taut_hs = Chem.AddHs(taut)\n mol = self.from_smiles(\n Chem.MolToSmiles(taut_hs), allow_undefined_stereo=True\n )\n if mol != molecule:\n molecules.append(mol)\n\n return molecules[:max_states]\n\n def canonical_order_atoms(self, molecule):\n \"\"\"\n Canonical order the atoms in the molecule using the RDKit.\n\n Parameters\n ----------\n molecule: openff.toolkit.topology.Molecule\n The input molecule\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n The input molecule, with canonically-indexed atoms and bonds.\n \"\"\"\n\n from rdkit import Chem\n\n rdmol = self.to_rdkit(molecule)\n\n # get the canonical ordering with hydrogens first\n # this is the default behaviour of RDKit\n atom_order = list(Chem.CanonicalRankAtoms(rdmol, breakTies=True))\n\n heavy_atoms = rdmol.GetNumHeavyAtoms()\n hydrogens = rdmol.GetNumAtoms() - heavy_atoms\n\n # now go through and change the rankings to get the heavy atoms first if hydrogens are present\n if hydrogens != 0:\n for i in range(len(atom_order)):\n if rdmol.GetAtomWithIdx(i).GetAtomicNum() != 1:\n atom_order[i] -= hydrogens\n else:\n atom_order[i] += heavy_atoms\n\n # make an atom mapping from the atom_order and remap the molecule\n atom_mapping = dict((i, rank) for i, rank in enumerate(atom_order))\n\n return molecule.remap(atom_mapping, current_to_new=True)\n\n def to_smiles(self, molecule, isomeric=True, explicit_hydrogens=True, mapped=False):\n \"\"\"\n Uses the RDKit toolkit to convert a Molecule into a SMILES string.\n A partially mapped smiles can also be generated for atoms of interest by supplying an `atom_map` to the\n properties dictionary.\n\n Parameters\n ----------\n molecule : An openff.toolkit.topology.Molecule\n The molecule to convert into a SMILES.\n isomeric: bool optional, default= True\n return an isomeric smiles\n explicit_hydrogens: bool optional, default=True\n return a smiles string containing all hydrogens explicitly\n mapped: bool optional, default=False\n return a explicit hydrogen mapped smiles, the atoms to be mapped can be controlled by supplying an\n atom map into the properties dictionary. If no mapping is passed all atoms will be mapped in order, else\n an atom map dictionary from the current atom index to the map id should be supplied with no duplicates.\n The map ids (values) should start from 0 or 1.\n\n Returns\n -------\n smiles : str\n The SMILES of the input molecule.\n \"\"\"\n from rdkit import Chem\n\n rdmol = self.to_rdkit(molecule)\n\n if not explicit_hydrogens:\n # remove the hydrogens from the molecule\n rdmol = Chem.RemoveHs(rdmol)\n\n if mapped:\n assert explicit_hydrogens is True, (\n \"Mapped smiles require all hydrogens and \"\n \"stereochemistry to be defined to retain order\"\n )\n\n # if we only want to map specific atoms check for an atom map\n atom_map = molecule._properties.get(\"atom_map\", None)\n if atom_map is not None:\n # make sure there are no repeated indices\n map_ids = set(atom_map.values())\n if len(map_ids) < len(atom_map):\n atom_map = None\n elif 0 in atom_map.values():\n # we need to increment the map index\n for atom, map in atom_map.items():\n atom_map[atom] = map + 1\n\n if atom_map is None:\n # now we need to add the indexing to the rdmol to get it in the smiles\n for atom in rdmol.GetAtoms():\n # the mapping must start from 1, as RDKit uses 0 to represent no mapping.\n atom.SetAtomMapNum(atom.GetIdx() + 1)\n else:\n for atom in rdmol.GetAtoms():\n try:\n # try to set the atom map\n map_idx = atom_map[atom.GetIdx()]\n atom.SetAtomMapNum(map_idx)\n except KeyError:\n continue\n\n return Chem.MolToSmiles(\n rdmol, isomericSmiles=isomeric, allHsExplicit=explicit_hydrogens\n )\n\n def from_smiles(\n self,\n smiles,\n hydrogens_are_explicit=False,\n allow_undefined_stereo=False,\n _cls=None,\n ):\n \"\"\"\n Create a Molecule from a SMILES string using the RDKit toolkit.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n smiles : str\n The SMILES string to turn into a molecule\n hydrogens_are_explicit : bool, default=False\n If False, RDKit will perform hydrogen addition using Chem.AddHs\n allow_undefined_stereo : bool, default=False\n Whether to accept SMILES with undefined stereochemistry. If False,\n an exception will be raised if a SMILES with undefined stereochemistry\n is passed into this function.\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n An OpenFF style molecule.\n \"\"\"\n from rdkit import Chem\n\n rdmol = Chem.MolFromSmiles(smiles, sanitize=False)\n # strip the atom map from the molecule if it has one\n # so we don't affect the sterochemistry tags\n for atom in rdmol.GetAtoms():\n if atom.GetAtomMapNum() != 0:\n # set the map back to zero but hide the index in the atom prop data\n atom.SetProp(\"_map_idx\", str(atom.GetAtomMapNum()))\n # set it back to zero\n atom.SetAtomMapNum(0)\n\n # Chem.SanitizeMol calls updatePropertyCache so we don't need to call it ourselves\n # https://www.rdkit.org/docs/cppapi/namespaceRDKit_1_1MolOps.html#a8d831787aaf2d65d9920c37b25b476f5\n Chem.SanitizeMol(\n rdmol,\n Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY,\n )\n Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL)\n\n # Chem.MolFromSmiles adds bond directions (i.e. ENDDOWNRIGHT/ENDUPRIGHT), but\n # doesn't set bond.GetStereo(). We need to call AssignStereochemistry for that.\n Chem.AssignStereochemistry(rdmol)\n\n # Throw an exception/warning if there is unspecified stereochemistry.\n if not allow_undefined_stereo:\n self._detect_undefined_stereo(\n rdmol, err_msg_prefix=\"Unable to make OFFMol from SMILES: \"\n )\n\n # Add explicit hydrogens if they aren't there already\n if not hydrogens_are_explicit:\n rdmol = Chem.AddHs(rdmol)\n elif hydrogens_are_explicit:\n for atom_idx in range(rdmol.GetNumAtoms()):\n atom = rdmol.GetAtomWithIdx(atom_idx)\n if atom.GetNumImplicitHs() != 0:\n raise ValueError(\n f\"'hydrogens_are_explicit' was specified as True, but RDKit toolkit interpreted \"\n f\"SMILES '{smiles}' as having implicit hydrogen. If this SMILES is intended to \"\n f\"express all explicit hydrogens in the molecule, then you should construct the \"\n f\"desired molecule as an RDMol with no implicit hydrogens, and then use \"\n f\"Molecule.from_rdkit() to create the desired OFFMol.\"\n )\n\n molecule = self.from_rdkit(\n rdmol,\n _cls=_cls,\n allow_undefined_stereo=allow_undefined_stereo,\n hydrogens_are_explicit=hydrogens_are_explicit,\n )\n\n return molecule\n\n def from_inchi(self, inchi, allow_undefined_stereo=False, _cls=None):\n \"\"\"\n Construct a Molecule from a InChI representation\n\n Parameters\n ----------\n inchi : str\n The InChI representation of the molecule.\n\n allow_undefined_stereo : bool, default=False\n Whether to accept InChI with undefined stereochemistry. If False,\n an exception will be raised if a InChI with undefined stereochemistry\n is passed into this function.\n\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n \"\"\"\n\n from rdkit import Chem\n\n # this seems to always remove the hydrogens\n rdmol = Chem.MolFromInchi(inchi, sanitize=False, removeHs=False)\n\n # try and catch an InChI parsing error\n if rdmol is None:\n raise RuntimeError(\n \"There was an issue parsing the InChI string, please check and try again.\"\n )\n\n # process the molecule\n # TODO do we need this with inchi?\n rdmol.UpdatePropertyCache(strict=False)\n Chem.SanitizeMol(\n rdmol,\n Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY,\n )\n Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL)\n\n # add hydrogens back here\n rdmol = Chem.AddHs(rdmol)\n\n molecule = self.from_rdkit(\n rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls\n )\n\n return molecule\n\n def generate_conformers(\n self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True, _cls=None\n ):\n \"\"\"\n Generate molecule conformers using RDKit.\n\n .. warning :: This API is experimental and subject to change.\n\n .. todo ::\n\n * which parameters should we expose? (or can we implement a general system with \\*\\*kwargs?)\n * will the coordinates be returned in the OpenFF Molecule's own indexing system? Or is there a chance that they'll get reindexed when we convert the input into an RDMol?\n\n Parameters\n ----------\n molecule : a :class:`Molecule`\n The molecule to generate conformers for.\n n_conformers : int, default=1\n Maximum number of conformers to generate.\n rms_cutoff : simtk.Quantity-wrapped float, in units of distance, optional, default=None\n The minimum RMS value at which two conformers are considered redundant and one is deleted.\n If None, the cutoff is set to 1 Angstrom\n\n clear_existing : bool, default=True\n Whether to overwrite existing conformers for the molecule.\n _cls : class\n Molecule constructor\n\n \"\"\"\n from rdkit.Chem import AllChem\n\n if rms_cutoff is None:\n rms_cutoff = 1.0 * unit.angstrom\n rdmol = self.to_rdkit(molecule)\n # TODO: This generates way more conformations than omega, given the same nConfs and RMS threshold. Is there some way to set an energy cutoff as well?\n AllChem.EmbedMultipleConfs(\n rdmol,\n numConfs=n_conformers,\n pruneRmsThresh=rms_cutoff / unit.angstrom,\n randomSeed=1,\n # params=AllChem.ETKDG()\n )\n molecule2 = self.from_rdkit(\n rdmol, allow_undefined_stereo=True, _cls=molecule.__class__\n )\n\n if clear_existing:\n molecule._conformers = list()\n\n for conformer in molecule2._conformers:\n molecule._add_conformer(conformer)\n\n def assign_partial_charges(\n self,\n molecule,\n partial_charge_method=None,\n use_conformers=None,\n strict_n_conformers=False,\n _cls=None,\n ):\n \"\"\"\n Compute partial charges with RDKit, and assign\n the new values to the partial_charges attribute.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.Molecule\n Molecule for which partial charges are to be computed\n partial_charge_method : str, optional, default=None\n The charge model to use. One of ['mmff94']. If None, 'mmff94' will be used.\n\n * 'mmff94': Applies partial charges using the Merck Molecular Force Field\n (MMFF). This method does not make use of conformers, and hence\n ``use_conformers`` and ``strict_n_conformers`` will not impact\n the partial charges produced.\n use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None\n Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated.\n strict_n_conformers : bool, default=False\n Whether to raise an exception if an invalid number of conformers is provided for the given charge method.\n If this is False and an invalid number of conformers is found, a warning will be raised.\n _cls : class\n Molecule constructor\n\n Raises\n ------\n ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit\n\n ChargeCalculationError if the charge method is supported by this toolkit, but fails\n \"\"\"\n\n import numpy as np\n from rdkit.Chem import AllChem\n\n SUPPORTED_CHARGE_METHODS = {\"mmff94\"}\n\n if partial_charge_method is None:\n partial_charge_method = \"mmff94\"\n\n partial_charge_method = partial_charge_method.lower()\n\n if partial_charge_method not in SUPPORTED_CHARGE_METHODS:\n raise ChargeMethodUnavailableError(\n f\"partial_charge_method '{partial_charge_method}' is not available from RDKitToolkitWrapper. \"\n f\"Available charge methods are {list(SUPPORTED_CHARGE_METHODS)} \"\n )\n\n rdkit_molecule = molecule.to_rdkit()\n charges = None\n\n if partial_charge_method == \"mmff94\":\n\n mmff_properties = AllChem.MMFFGetMoleculeProperties(\n rdkit_molecule, \"MMFF94\"\n )\n charges = np.array(\n [\n mmff_properties.GetMMFFPartialCharge(i)\n for i in range(molecule.n_atoms)\n ]\n )\n\n molecule.partial_charges = charges * unit.elementary_charge\n\n @classmethod\n def _elf_is_problematic_conformer(\n cls, molecule: \"Molecule\", conformer: unit.Quantity\n ) -> Tuple[bool, Optional[str]]:\n \"\"\"A function which checks if a particular conformer is known to be problematic\n when computing ELF partial charges.\n\n Currently this includes conformers which:\n\n * contain a trans-COOH configuration. The trans conformer is discarded because\n it leads to strong electrostatic interactions when assigning charges, and these\n result in unreasonable charges. Downstream calculations have observed up to a\n 4 log unit error in water-octanol logP calculations when using charges assigned\n from trans conformers.\n\n Returns\n -------\n A tuple of a bool stating whether the conformer is problematic and, if it\n is, a string message explaing why. If the conformer is not problematic, the\n second return value will be none.\n \"\"\"\n from rdkit.Chem.rdMolTransforms import GetDihedralRad\n\n # Create a copy of the molecule which contains only this conformer.\n molecule_copy = copy.deepcopy(molecule)\n molecule_copy._conformers = [conformer]\n\n rdkit_molecule = molecule_copy.to_rdkit()\n\n # Check for trans-COOH configurations\n carboxylic_acid_matches = cls._find_smarts_matches(\n rdkit_molecule, \"[#6X3:2](=[#8:1])(-[#8X2H1:3]-[#1:4])\"\n )\n\n for match in carboxylic_acid_matches:\n\n dihedral_angle = GetDihedralRad(rdkit_molecule.GetConformer(0), *match)\n\n if dihedral_angle > np.pi / 2.0:\n # Discard the 'trans' conformer.\n return (\n True,\n \"Molecules which contain COOH functional groups in a trans \"\n \"configuration are discarded by the ELF method.\",\n )\n\n return False, None\n\n @classmethod\n def _elf_prune_problematic_conformers(\n cls, molecule: \"Molecule\"\n ) -> List[unit.Quantity]:\n \"\"\"A function which attempts to remove conformers which are known to be\n problematic when computing ELF partial charges.\n\n Currently this includes conformers which:\n\n * contain a trans-COOH configuration. These conformers ... TODO add reason.\n\n Notes\n -----\n * Problematic conformers are flagged by the\n ``RDKitToolkitWrapper._elf_is_problematic_conformer`` function.\n\n Returns\n -------\n The conformers to retain.\n \"\"\"\n\n valid_conformers = []\n\n for i, conformer in enumerate(molecule.conformers):\n\n is_problematic, reason = cls._elf_is_problematic_conformer(\n molecule, conformer\n )\n\n if is_problematic:\n logger.warning(f\"Discarding conformer {i}: {reason}\")\n else:\n valid_conformers.append(conformer)\n\n return valid_conformers\n\n @classmethod\n def _elf_compute_electrostatic_energy(\n cls, molecule: \"Molecule\", conformer: unit.Quantity\n ) -> float:\n \"\"\"Computes the 'electrostatic interaction energy' of a particular conformer\n of a molecule.\n\n The energy is computed as the sum of ``|q_i * q_j| * r_ij^-1`` over all pairs\n of atoms (i, j) excluding 1-2 and 1-3 terms, where q_i is the partial charge\n of atom i and r_ij the Euclidean distance between atoms i and j.\n\n Notes\n -----\n * The partial charges will be taken from the molecule directly.\n\n Parameters\n ----------\n molecule\n The molecule containing the partial charges.\n conformer\n The conformer to compute the energy of. This should be a unit wrapped\n numpy array with shape=(n_atoms, 3) with units compatible with angstroms.\n\n Returns\n -------\n The electrostatic interaction energy in units of [e^2 / Angstrom].\n \"\"\"\n\n if molecule.partial_charges is None:\n raise ValueError(\"The molecule has no partial charges assigned.\")\n\n partial_charges = np.abs(\n molecule.partial_charges.value_in_unit(unit.elementary_charge)\n ).reshape(-1, 1)\n\n # Build an exclusion list for 1-2 and 1-3 interactions.\n excluded_pairs = {\n *[(bond.atom1_index, bond.atom2_index) for bond in molecule.bonds],\n *[\n (angle[0].molecule_atom_index, angle[-1].molecule_atom_index)\n for angle in molecule.angles\n ],\n }\n\n # Build the distance matrix between all pairs of atoms.\n coordinates = conformer.value_in_unit(unit.angstrom)\n\n distances = np.sqrt(\n np.sum(np.square(coordinates)[:, np.newaxis, :], axis=2)\n - 2 * coordinates.dot(coordinates.T)\n + np.sum(np.square(coordinates), axis=1)\n )\n # Handle edge cases where the squared distance is slightly negative due to\n # precision issues\n np.fill_diagonal(distances, 0.0)\n\n inverse_distances = np.reciprocal(\n distances, out=np.zeros_like(distances), where=~np.isclose(distances, 0.0)\n )\n\n # Multiply by the charge products.\n charge_products = partial_charges @ partial_charges.T\n\n for x, y in excluded_pairs:\n charge_products[x, y] = 0.0\n charge_products[y, x] = 0.0\n\n interaction_energies = inverse_distances * charge_products\n\n return 0.5 * interaction_energies.sum()\n\n @classmethod\n def _elf_compute_rms_matrix(cls, molecule: \"Molecule\") -> np.ndarray:\n \"\"\"Computes the symmetric RMS matrix of all conformers in a molecule taking\n only heavy atoms into account.\n\n Parameters\n ----------\n molecule\n The molecule containing the conformers.\n\n Returns\n -------\n The RMS matrix with shape=(n_conformers, n_conformers).\n \"\"\"\n\n from rdkit import Chem\n from rdkit.Chem import AllChem\n\n rdkit_molecule: Chem.RWMol = Chem.RemoveHs(molecule.to_rdkit())\n\n n_conformers = len(molecule.conformers)\n\n conformer_ids = [conf.GetId() for conf in rdkit_molecule.GetConformers()]\n\n # Compute the RMS matrix making sure to take into account any automorhism (e.g\n # a phenyl or nitro substituent flipped 180 degrees.\n rms_matrix = np.zeros((n_conformers, n_conformers))\n\n for i, j in itertools.combinations(conformer_ids, 2):\n\n rms_matrix[i, j] = AllChem.GetBestRMS(\n rdkit_molecule,\n rdkit_molecule,\n conformer_ids[i],\n conformer_ids[j],\n )\n\n rms_matrix += rms_matrix.T\n return rms_matrix\n\n @classmethod\n def _elf_select_diverse_conformers(\n cls,\n molecule: \"Molecule\",\n ranked_conformers: List[unit.Quantity],\n limit: int,\n rms_tolerance: unit.Quantity,\n ) -> List[unit.Quantity]:\n \"\"\"Attempt to greedily select a specified number conformers which are maximally\n diverse.\n\n The conformer with the lowest electrostatic energy (the first conformer in the\n ``ranked_conformers`` list) is always chosen. After that selection proceeds by:\n\n a) selecting an un-selected conformer which is the most different from those\n already selected, and whose RMS compared to each selected conformer is\n greater than ``rms_tolerance``. Here most different means the conformer\n which has the largest sum of RMS with the selected conformers.\n\n b) repeating a) until either ``limit`` number of conformers have been selected,\n or there are no more distinct conformers to select from.\n\n Notes\n -----\n\n * As the selection is greedy there is no guarantee that the selected conformers\n will be the optimal distinct i.e. there may be other selections of conformers\n which are more distinct.\n\n Parameters\n ----------\n molecule\n The molecule object which matches the conformers to select from.\n ranked_conformers\n A list of conformers to select from, ranked by their electrostatic\n interaction energy (see ``_compute_electrostatic_energy``).\n limit\n The maximum number of conformers to select.\n rms_tolerance\n Conformers whose RMS is within this amount will be treated as identical and\n the duplicate discarded.\n\n Returns\n -------\n The select list of conformers.\n \"\"\"\n\n # Compute the RMS between all pairs of conformers\n molecule = copy.deepcopy(molecule)\n molecule.conformers.clear()\n\n for conformer in ranked_conformers:\n molecule.add_conformer(conformer)\n\n rms_matrix = cls._elf_compute_rms_matrix(molecule)\n\n # Apply the greedy selection process.\n closed_list = np.zeros(limit).astype(int)\n closed_mask = np.zeros(rms_matrix.shape[0], dtype=bool)\n\n n_selected = 1\n\n for i in range(min(molecule.n_conformers, limit - 1)):\n\n distances = rms_matrix[closed_list[: i + 1], :].sum(axis=0)\n\n # Exclude already selected conformers or conformers which are too similar\n # to those already selected.\n closed_mask[\n np.any(\n rms_matrix[closed_list[: i + 1], :]\n < rms_tolerance.value_in_unit(unit.angstrom),\n axis=0,\n )\n ] = True\n\n if np.all(closed_mask):\n # Stop of there are no more distinct conformers to select from.\n break\n\n distant_index = np.ma.array(distances, mask=closed_mask).argmax()\n closed_list[i + 1] = distant_index\n\n n_selected += 1\n\n return [ranked_conformers[i.item()] for i in closed_list[:n_selected]]\n\n def apply_elf_conformer_selection(\n self,\n molecule: \"Molecule\",\n percentage: float = 2.0,\n limit: int = 10,\n rms_tolerance: unit.Quantity = 0.05 * unit.angstrom,\n ):\n \"\"\"Applies the `ELF method\n <https://docs.eyesopen.com/toolkits/python/quacpactk/molchargetheory.html#elf-conformer-selection>`_\n to select a set of diverse conformers which have minimal electrostatically\n strongly interacting functional groups from a molecules conformers.\n\n The diverse conformer selection is performed by the ``_elf_select_diverse_conformers``\n function, which attempts to greedily select conformers which are most distinct\n according to their RMS.\n\n Warnings\n --------\n * Although this function is inspired by the OpenEye ELF10 method, this\n implementation may yield slightly different conformers due to potential\n differences in this and the OE closed source implementation.\n\n Notes\n -----\n * The input molecule should have a large set of conformers already\n generated to select the ELF10 conformers from.\n * The selected conformers will be retained in the `molecule.conformers` list\n while unselected conformers will be discarded.\n * Only heavy atoms are included when using the RMS to select diverse conformers.\n\n See Also\n --------\n RDKitToolkitWrapper._elf_select_diverse_conformers\n\n Parameters\n ----------\n molecule\n The molecule which contains the set of conformers to select from.\n percentage\n The percentage of conformers with the lowest electrostatic interaction\n energies to greedily select from.\n limit\n The maximum number of conformers to select.\n rms_tolerance\n Conformers whose RMS is within this amount will be treated as identical and\n the duplicate discarded.\n \"\"\"\n\n if molecule.n_conformers == 0:\n return\n\n # Copy the input molecule so we can directly perturb it within the method.\n molecule_copy = copy.deepcopy(molecule)\n\n # Prune any problematic conformers, such as trans-COOH configurations.\n conformers = self._elf_prune_problematic_conformers(molecule_copy)\n\n if len(conformers) == 0:\n\n raise ValueError(\n \"There were no conformers to select from after discarding conformers \"\n \"which are known to be problematic when computing ELF partial charges. \"\n \"Make sure to generate a diverse array of conformers before calling the \"\n \"`RDKitToolkitWrapper.apply_elf_conformer_selection` method.\"\n )\n\n # Generate a set of absolute MMFF94 partial charges for the molecule and use\n # these to compute the electrostatic interaction energy of each conformer.\n self.assign_partial_charges(molecule_copy, \"mmff94\")\n\n conformer_energies = [\n (\n self._elf_compute_electrostatic_energy(molecule_copy, conformer),\n conformer,\n )\n for conformer in conformers\n ]\n\n # Rank the conformer energies and retain `percentage`% with the lowest energies.\n conformer_energies = sorted(conformer_energies, key=lambda x: x[0])\n cutoff_index = max(1, int(len(conformer_energies) * percentage / 100.0))\n\n low_energy_conformers = [\n conformer for _, conformer in conformer_energies[:cutoff_index]\n ]\n\n # Attempt to greedily select `limit` conformers which are maximally diverse.\n diverse_conformers = self._elf_select_diverse_conformers(\n molecule_copy, low_energy_conformers, limit, rms_tolerance\n )\n\n molecule._conformers = diverse_conformers\n\n def from_rdkit(\n self,\n rdmol,\n allow_undefined_stereo=False,\n hydrogens_are_explicit=False,\n _cls=None,\n ):\n \"\"\"\n Create a Molecule from an RDKit molecule.\n\n Requires the RDKit to be installed.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n rdmol : rkit.RDMol\n An RDKit molecule\n allow_undefined_stereo : bool, default=False\n If false, raises an exception if rdmol contains undefined stereochemistry.\n hydrogens_are_explicit : bool, default=False\n If False, RDKit will perform hydrogen addition using Chem.AddHs\n _cls : class\n Molecule constructor\n\n Returns\n -------\n molecule : openff.toolkit.topology.Molecule\n An OpenFF molecule\n\n Examples\n --------\n\n Create a molecule from an RDKit molecule\n\n >>> from rdkit import Chem\n >>> from openff.toolkit.tests.utils import get_data_file_path\n >>> rdmol = Chem.MolFromMolFile(get_data_file_path('systems/monomers/ethanol.sdf'))\n\n >>> toolkit_wrapper = RDKitToolkitWrapper()\n >>> molecule = toolkit_wrapper.from_rdkit(rdmol)\n\n \"\"\"\n from rdkit import Chem\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n # Make a copy of the RDKit Mol as we'll need to change it (e.g. assign stereo).\n rdmol = Chem.Mol(rdmol)\n\n if not hydrogens_are_explicit:\n rdmol = Chem.AddHs(rdmol, addCoords=True)\n\n # Sanitizing the molecule. We handle aromaticity and chirality manually.\n # This SanitizeMol(...) calls cleanUp, updatePropertyCache, symmetrizeSSSR,\n # assignRadicals, setConjugation, and setHybridization.\n Chem.SanitizeMol(\n rdmol,\n (\n Chem.SANITIZE_ALL\n ^ Chem.SANITIZE_SETAROMATICITY\n ^ Chem.SANITIZE_ADJUSTHS\n ^ Chem.SANITIZE_CLEANUPCHIRALITY\n ^ Chem.SANITIZE_KEKULIZE\n ),\n )\n Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL)\n # SetAromaticity set aromatic bonds to 1.5, but Molecule.bond_order is an\n # integer (contrarily to fractional_bond_order) so we need the Kekule order.\n Chem.Kekulize(rdmol)\n\n # Make sure the bond stereo tags are set before checking for\n # undefined stereo. RDKit can figure out bond stereo from other\n # information in the Mol object like bond direction properties.\n # Do not overwrite eventual chiral tags provided by the user.\n Chem.AssignStereochemistry(rdmol, cleanIt=False)\n\n # Check for undefined stereochemistry.\n self._detect_undefined_stereo(\n rdmol,\n raise_warning=allow_undefined_stereo,\n err_msg_prefix=\"Unable to make OFFMol from RDMol: \",\n )\n\n # Create a new OpenFF Molecule\n offmol = _cls()\n\n # If RDMol has a title save it\n if rdmol.HasProp(\"_Name\"):\n # raise Exception('{}'.format(rdmol.GetProp('name')))\n offmol.name = rdmol.GetProp(\"_Name\")\n else:\n offmol.name = \"\"\n\n # Store all properties\n # TODO: Should there be an API point for storing properties?\n properties = rdmol.GetPropsAsDict()\n offmol._properties = properties\n\n # setting chirality in openeye requires using neighbor atoms\n # therefore we can't do it until after the atoms and bonds are all added\n map_atoms = {}\n map_bonds = {}\n # if we are loading from a mapped smiles extract the mapping\n atom_mapping = {}\n for rda in rdmol.GetAtoms():\n rd_idx = rda.GetIdx()\n # if the molecule was made from a mapped smiles this has been hidden\n # so that it does not affect the sterochemistry tags\n try:\n map_id = int(rda.GetProp(\"_map_idx\"))\n except KeyError:\n map_id = rda.GetAtomMapNum()\n\n # create a new atom\n # atomic_number = oemol.NewAtom(rda.GetAtomicNum())\n atomic_number = rda.GetAtomicNum()\n formal_charge = rda.GetFormalCharge() * unit.elementary_charge\n is_aromatic = rda.GetIsAromatic()\n if rda.HasProp(\"_Name\"):\n name = rda.GetProp(\"_Name\")\n else:\n # check for PDB names\n try:\n name = rda.GetMonomerInfo().GetName().strip()\n except AttributeError:\n name = \"\"\n\n # If chiral, store the chirality to be set later\n stereochemistry = None\n # tag = rda.GetChiralTag()\n if rda.HasProp(\"_CIPCode\"):\n stereo_code = rda.GetProp(\"_CIPCode\")\n # if tag == Chem.CHI_TETRAHEDRAL_CCW:\n if stereo_code == \"R\":\n stereochemistry = \"R\"\n # if tag == Chem.CHI_TETRAHEDRAL_CW:\n elif stereo_code == \"S\":\n stereochemistry = \"S\"\n else:\n raise UndefinedStereochemistryError(\n \"In from_rdkit: Expected atom stereochemistry of R or S. \"\n \"Got {} instead.\".format(stereo_code)\n )\n\n atom_index = offmol._add_atom(\n atomic_number,\n formal_charge,\n is_aromatic,\n name=name,\n stereochemistry=stereochemistry,\n )\n map_atoms[rd_idx] = atom_index\n atom_mapping[atom_index] = map_id\n\n # If we have a full / partial atom map add it to the molecule. Zeroes 0\n # indicates no mapping\n if {*atom_mapping.values()} != {0}:\n\n offmol._properties[\"atom_map\"] = {\n idx: map_idx for idx, map_idx in atom_mapping.items() if map_idx != 0\n }\n\n # Similar to chirality, stereochemistry of bonds in OE is set relative to their neighbors\n for rdb in rdmol.GetBonds():\n rdb_idx = rdb.GetIdx()\n a1 = rdb.GetBeginAtomIdx()\n a2 = rdb.GetEndAtomIdx()\n\n # Determine bond aromaticity and Kekulized bond order\n is_aromatic = rdb.GetIsAromatic()\n order = rdb.GetBondTypeAsDouble()\n # Convert floating-point bond order to integral bond order\n order = int(order)\n\n # create a new bond\n bond_index = offmol._add_bond(\n map_atoms[a1], map_atoms[a2], order, is_aromatic\n )\n map_bonds[rdb_idx] = bond_index\n\n # Now fill in the cached (structure-dependent) properties. We have to have the 2D structure of the molecule\n # in place first, because each call to add_atom and add_bond invalidates all cached properties\n for rdb in rdmol.GetBonds():\n rdb_idx = rdb.GetIdx()\n offb_idx = map_bonds[rdb_idx]\n offb = offmol.bonds[offb_idx]\n # determine if stereochemistry is needed\n # Note that RDKit has 6 possible values of bond stereo: CIS, TRANS, E, Z, ANY, or NONE\n # The logic below assumes that \"ANY\" and \"NONE\" mean the same thing.\n stereochemistry = None\n tag = rdb.GetStereo()\n if tag == Chem.BondStereo.STEREOZ:\n stereochemistry = \"Z\"\n elif tag == Chem.BondStereo.STEREOE:\n stereochemistry = \"E\"\n elif tag == Chem.BondStereo.STEREOTRANS or tag == Chem.BondStereo.STEREOCIS:\n raise ValueError(\n \"Expected RDKit bond stereochemistry of E or Z, got {} instead\".format(\n tag\n )\n )\n offb._stereochemistry = stereochemistry\n fractional_bond_order = None\n if rdb.HasProp(\"fractional_bond_order\"):\n fractional_bond_order = rdb.GetDoubleProp(\"fractional_bond_order\")\n offb.fractional_bond_order = fractional_bond_order\n\n # TODO: Save conformer(s), if present\n # If the rdmol has a conformer, store its coordinates\n if len(rdmol.GetConformers()) != 0:\n for conf in rdmol.GetConformers():\n n_atoms = offmol.n_atoms\n # TODO: Will this always be angstrom when loading from RDKit?\n positions = unit.Quantity(np.zeros((n_atoms, 3)), unit.angstrom)\n for rd_idx, off_idx in map_atoms.items():\n atom_coords = conf.GetPositions()[rd_idx, :] * unit.angstrom\n positions[off_idx, :] = atom_coords\n offmol._add_conformer(positions)\n\n partial_charges = unit.Quantity(\n np.zeros(shape=offmol.n_atoms, dtype=np.float64),\n unit=unit.elementary_charge,\n )\n\n any_atom_has_partial_charge = False\n for rd_idx, rd_atom in enumerate(rdmol.GetAtoms()):\n off_idx = map_atoms[rd_idx]\n if rd_atom.HasProp(\"PartialCharge\"):\n charge = rd_atom.GetDoubleProp(\"PartialCharge\") * unit.elementary_charge\n partial_charges[off_idx] = charge\n any_atom_has_partial_charge = True\n else:\n # If some other atoms had partial charges but this one doesn't, raise an Exception\n if any_atom_has_partial_charge:\n raise ValueError(\n \"Some atoms in rdmol have partial charges, but others do not.\"\n )\n if any_atom_has_partial_charge:\n offmol.partial_charges = partial_charges\n else:\n offmol.partial_charges = None\n return offmol\n\n @classmethod\n def to_rdkit(cls, molecule, aromaticity_model=DEFAULT_AROMATICITY_MODEL):\n \"\"\"\n Create an RDKit molecule\n\n Requires the RDKit to be installed.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL\n The aromaticity model to use\n\n Returns\n -------\n rdmol : rkit.RDMol\n An RDKit molecule\n\n Examples\n --------\n\n Convert a molecule to RDKit\n\n >>> from openff.toolkit.topology import Molecule\n >>> ethanol = Molecule.from_smiles('CCO')\n >>> rdmol = ethanol.to_rdkit()\n\n \"\"\"\n from rdkit import Chem, Geometry\n\n # Create an editable RDKit molecule\n rdmol = Chem.RWMol()\n\n # Set name\n # TODO: What is the best practice for how this should be named?\n if not (molecule.name is None):\n rdmol.SetProp(\"_Name\", molecule.name)\n\n # TODO: Set other properties\n for name, value in molecule.properties.items():\n if type(value) == str:\n rdmol.SetProp(name, value)\n elif type(value) == int:\n rdmol.SetIntProp(name, value)\n elif type(value) == float:\n rdmol.SetDoubleProp(name, value)\n elif type(value) == bool:\n rdmol.SetBoolProp(name, value)\n else:\n # Shove everything else into a string\n rdmol.SetProp(name, str(value))\n\n _bondtypes = {\n 1: Chem.BondType.SINGLE,\n 1.5: Chem.BondType.AROMATIC,\n 2: Chem.BondType.DOUBLE,\n 3: Chem.BondType.TRIPLE,\n 4: Chem.BondType.QUADRUPLE,\n 5: Chem.BondType.QUINTUPLE,\n 6: Chem.BondType.HEXTUPLE,\n 7: Chem.BondType.ONEANDAHALF,\n }\n\n for index, atom in enumerate(molecule.atoms):\n rdatom = Chem.Atom(atom.atomic_number)\n rdatom.SetFormalCharge(\n atom.formal_charge.value_in_unit(unit.elementary_charge)\n )\n rdatom.SetIsAromatic(atom.is_aromatic)\n rdatom.SetProp(\"_Name\", atom.name)\n\n ## Stereo handling code moved to after bonds are added\n if atom.stereochemistry == \"S\":\n rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CW)\n elif atom.stereochemistry == \"R\":\n rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CCW)\n\n rd_index = rdmol.AddAtom(rdatom)\n\n # Let's make sure al the atom indices in the two molecules\n # are the same, otherwise we need to create an atom map.\n assert index == atom.molecule_atom_index\n assert index == rd_index\n\n for bond in molecule.bonds:\n atom_indices = (\n bond.atom1.molecule_atom_index,\n bond.atom2.molecule_atom_index,\n )\n rdmol.AddBond(*atom_indices)\n rdbond = rdmol.GetBondBetweenAtoms(*atom_indices)\n if not (bond.fractional_bond_order is None):\n rdbond.SetDoubleProp(\n \"fractional_bond_order\", bond.fractional_bond_order\n )\n # Assign bond type, which is based on order unless it is aromatic\n if bond.is_aromatic:\n rdbond.SetBondType(_bondtypes[1.5])\n rdbond.SetIsAromatic(True)\n else:\n rdbond.SetBondType(_bondtypes[bond.bond_order])\n rdbond.SetIsAromatic(False)\n\n Chem.SanitizeMol(\n rdmol,\n Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY,\n )\n\n # Fix for aromaticity being lost\n if aromaticity_model == \"OEAroModel_MDL\":\n Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL)\n else:\n raise ValueError(f\"Aromaticity model {aromaticity_model} not recognized\")\n\n # Assign atom stereochemsitry and collect atoms for which RDKit\n # can't figure out chirality. The _CIPCode property of these atoms\n # will be forcefully set to the stereo we want (see #196).\n undefined_stereo_atoms = {}\n for index, atom in enumerate(molecule.atoms):\n rdatom = rdmol.GetAtomWithIdx(index)\n\n # Skip non-chiral atoms.\n if atom.stereochemistry is None:\n continue\n\n # Let's randomly assign this atom's (local) stereo to CW\n # and check if this causes the (global) stereo to be set\n # to the desired one (S or R).\n rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CW)\n # We need to do force and cleanIt to recalculate CIP stereo.\n Chem.AssignStereochemistry(rdmol, force=True, cleanIt=True)\n # If our random initial assignment worked, then we're set.\n if (\n rdatom.HasProp(\"_CIPCode\")\n and rdatom.GetProp(\"_CIPCode\") == atom.stereochemistry\n ):\n continue\n\n # Otherwise, set it to CCW.\n rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CCW)\n # We need to do force and cleanIt to recalculate CIP stereo.\n Chem.AssignStereochemistry(rdmol, force=True, cleanIt=True)\n # Hopefully this worked, otherwise something's wrong\n if (\n rdatom.HasProp(\"_CIPCode\")\n and rdatom.GetProp(\"_CIPCode\") == atom.stereochemistry\n ):\n continue\n\n # Keep track of undefined stereo atoms. We'll force stereochemistry\n # at the end to avoid the next AssignStereochemistry to overwrite.\n if not rdatom.HasProp(\"_CIPCode\"):\n undefined_stereo_atoms[rdatom] = atom.stereochemistry\n continue\n\n # Something is wrong.\n err_msg = (\n \"Unknown atom stereochemistry encountered in to_rdkit. \"\n \"Desired stereochemistry: {}. Set stereochemistry {}\".format(\n atom.stereochemistry, rdatom.GetProp(\"_CIPCode\")\n )\n )\n raise RuntimeError(err_msg)\n\n # Copy bond stereo info from molecule to rdmol.\n cls._assign_rdmol_bonds_stereo(molecule, rdmol)\n\n # Set coordinates if we have them\n if molecule._conformers:\n for conformer in molecule._conformers:\n rdmol_conformer = Chem.Conformer()\n for atom_idx in range(molecule.n_atoms):\n x, y, z = conformer[atom_idx, :].value_in_unit(unit.angstrom)\n rdmol_conformer.SetAtomPosition(atom_idx, Geometry.Point3D(x, y, z))\n rdmol.AddConformer(rdmol_conformer, assignId=True)\n\n # Retain charges, if present\n if not (molecule._partial_charges is None):\n\n rdk_indexed_charges = np.zeros(shape=molecule.n_atoms, dtype=float)\n for atom_idx, charge in enumerate(molecule._partial_charges):\n charge_unitless = charge.value_in_unit(unit.elementary_charge)\n rdk_indexed_charges[atom_idx] = charge_unitless\n for atom_idx, rdk_atom in enumerate(rdmol.GetAtoms()):\n rdk_atom.SetDoubleProp(\"PartialCharge\", rdk_indexed_charges[atom_idx])\n\n # Note: We could put this outside the \"if\" statement, which would result in all partial charges in the\n # resulting file being set to \"n/a\" if they weren't set in the Open Force Field Toolkit ``Molecule``\n Chem.CreateAtomDoublePropertyList(rdmol, \"PartialCharge\")\n\n # Cleanup the rdmol\n rdmol.UpdatePropertyCache(strict=False)\n Chem.GetSSSR(rdmol)\n\n # Forcefully assign stereo information on the atoms that RDKit\n # can't figure out. This must be done last as calling AssignStereochemistry\n # again will delete these properties (see #196).\n for rdatom, stereochemistry in undefined_stereo_atoms.items():\n rdatom.SetProp(\"_CIPCode\", stereochemistry)\n\n # Return non-editable version\n return Chem.Mol(rdmol)\n\n def to_inchi(self, molecule, fixed_hydrogens=False):\n \"\"\"\n Create an InChI string for the molecule using the RDKit Toolkit.\n InChI is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen\n layer.\n\n For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/\n\n Parameters\n ----------\n molecule : An openff.toolkit.topology.Molecule\n The molecule to convert into a SMILES.\n\n fixed_hydrogens: bool, default=False\n If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific\n InChI string of the molecule.\n\n Returns\n --------\n inchi: str\n The InChI string of the molecule.\n \"\"\"\n\n from rdkit import Chem\n\n rdmol = self.to_rdkit(molecule)\n if fixed_hydrogens:\n inchi = Chem.MolToInchi(rdmol, options=\"-FixedH\")\n else:\n inchi = Chem.MolToInchi(rdmol)\n return inchi\n\n def to_inchikey(self, molecule, fixed_hydrogens=False):\n \"\"\"\n Create an InChIKey for the molecule using the RDKit Toolkit.\n InChIKey is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen\n layer.\n\n For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/\n\n Parameters\n ----------\n molecule : An openff.toolkit.topology.Molecule\n The molecule to convert into a SMILES.\n\n fixed_hydrogens: bool, default=False\n If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific\n InChI string of the molecule.\n\n Returns\n --------\n inchi_key: str\n The InChIKey representation of the molecule.\n \"\"\"\n\n from rdkit import Chem\n\n rdmol = self.to_rdkit(molecule)\n if fixed_hydrogens:\n inchi_key = Chem.MolToInchiKey(rdmol, options=\"-FixedH\")\n else:\n inchi_key = Chem.MolToInchiKey(rdmol)\n return inchi_key\n\n def get_tagged_smarts_connectivity(self, smarts):\n \"\"\"\n Returns a tuple of tuples indicating connectivity between tagged atoms in a SMARTS string. Does not\n return bond order.\n\n Parameters\n ----------\n smarts : str\n The tagged SMARTS to analyze\n\n Returns\n -------\n unique_tags : tuple of int\n A sorted tuple of all unique tagged atom map indices.\n tagged_atom_connectivity : tuple of tuples of int, shape n_tagged_bonds x 2\n A tuple of tuples, where each inner tuple is a pair of tagged atoms (tag_idx_1, tag_idx_2) which are\n bonded. The inner tuples are ordered smallest-to-largest, and the tuple of tuples is ordered\n lexically. So the return value for an improper torsion would be ((1, 2), (2, 3), (2, 4)).\n\n Raises\n ------\n SMIRKSParsingError\n If RDKit was unable to parse the provided smirks/tagged smarts\n \"\"\"\n from rdkit import Chem\n\n from openff.toolkit.typing.chemistry import SMIRKSParsingError\n\n ss = Chem.MolFromSmarts(smarts)\n\n if ss is None:\n raise SMIRKSParsingError(f\"RDKit was unable to parse SMIRKS {smarts}\")\n\n unique_tags = set()\n connections = set()\n for at1 in ss.GetAtoms():\n if at1.GetAtomMapNum() == 0:\n continue\n unique_tags.add(at1.GetAtomMapNum())\n for at2 in at1.GetNeighbors():\n if at2.GetAtomMapNum() == 0:\n continue\n cxn_to_add = sorted([at1.GetAtomMapNum(), at2.GetAtomMapNum()])\n connections.add(tuple(cxn_to_add))\n connections = tuple(sorted(list(connections)))\n unique_tags = tuple(sorted(list(unique_tags)))\n return unique_tags, connections\n\n @staticmethod\n def _find_smarts_matches(rdmol, smirks, aromaticity_model=\"OEAroModel_MDL\"):\n \"\"\"Find all sets of atoms in the provided RDKit molecule that match the provided SMARTS string.\n\n Parameters\n ----------\n rdmol : rdkit.Chem.Mol\n rdmol to process with the SMIRKS in order to find matches\n smarts : str\n SMARTS string with any number of sequentially tagged atoms.\n If there are N tagged atoms numbered 1..N, the resulting matches will be N-tuples of atoms that match the corresponding tagged atoms.\n aromaticity_model : str, optional, default='OEAroModel_MDL'\n OpenEye aromaticity model designation as a string, such as ``OEAroModel_MDL``.\n Molecule is prepared with this aromaticity model prior to querying.\n\n Returns\n -------\n matches : list of tuples of atoms indices within the ``rdmol``\n matches[index] is an N-tuple of atom numbers from the ``rdmol``\n Matches are returned in no guaranteed order.\n # TODO: What is returned if no matches are found? An empty list, or None?\n # TODO: Ensure that SMARTS numbers 1, 2, 3... are rendered into order of returnd matches indexed by 0, 1, 2...\n\n .. notes ::\n\n * Raises ``ValueError`` if ``smarts`` query is malformed\n\n \"\"\"\n from rdkit import Chem\n\n # Make a copy of the molecule\n rdmol = Chem.Mol(rdmol)\n # Use designated aromaticity model\n if aromaticity_model == \"OEAroModel_MDL\":\n Chem.SanitizeMol(rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY)\n Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL)\n else:\n # Only the OEAroModel_MDL is supported for now\n raise ValueError(\"Unknown aromaticity model: {}\".aromaticity_models)\n\n # Set up query.\n qmol = Chem.MolFromSmarts(smirks) # cannot catch the error\n if qmol is None:\n raise ValueError(\n 'RDKit could not parse the SMIRKS string \"{}\"'.format(smirks)\n )\n\n # Create atom mapping for query molecule\n idx_map = dict()\n for atom in qmol.GetAtoms():\n smirks_index = atom.GetAtomMapNum()\n if smirks_index != 0:\n idx_map[smirks_index - 1] = atom.GetIdx()\n map_list = [idx_map[x] for x in sorted(idx_map)]\n\n # Perform matching\n matches = list()\n\n # choose the largest unsigned int without overflow\n # since the C++ signature is a uint\n max_matches = np.iinfo(np.uintc).max\n for match in rdmol.GetSubstructMatches(\n qmol, uniquify=False, maxMatches=max_matches, useChirality=True\n ):\n mas = [match[x] for x in map_list]\n matches.append(tuple(mas))\n\n return matches\n\n def find_smarts_matches(self, molecule, smarts, aromaticity_model=\"OEAroModel_MDL\"):\n \"\"\"\n Find all SMARTS matches for the specified molecule, using the specified aromaticity model.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.Molecule\n The molecule for which all specified SMARTS matches are to be located\n smarts : str\n SMARTS string with optional SMIRKS-style atom tagging\n aromaticity_model : str, optional, default='OEAroModel_MDL'\n Molecule is prepared with this aromaticity model prior to querying.\n\n .. note :: Currently, the only supported ``aromaticity_model`` is ``OEAroModel_MDL``\n\n \"\"\"\n rdmol = self.to_rdkit(molecule, aromaticity_model=aromaticity_model)\n return self._find_smarts_matches(\n rdmol, smarts, aromaticity_model=\"OEAroModel_MDL\"\n )\n\n # --------------------------------\n # Stereochemistry RDKit utilities.\n # --------------------------------\n\n def find_rings(self, molecule):\n \"\"\"Find the rings in a given molecule.\n\n .. note ::\n\n For systems containing some special cases of connected rings, this\n function may not be well-behaved and may report a different number\n rings than expected. Some problematic cases include networks of many\n (5+) rings or bicyclic moieties (i.e. norbornane).\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.Molecule\n The molecule for which rings are to be found\n\n Returns\n -------\n rings : tuple of tuples of atom indices\n Nested tuples, each containing the indices of atoms in each ring\n\n \"\"\"\n rdmol = molecule.to_rdkit()\n ring_info = rdmol.GetRingInfo()\n rings = ring_info.AtomRings()\n return rings\n\n @staticmethod\n def _find_undefined_stereo_atoms(rdmol, assign_stereo=False):\n \"\"\"Find the chiral atoms with undefined stereochemsitry in the RDMol.\n\n Parameters\n ----------\n rdmol : rdkit.RDMol\n The RDKit molecule.\n assign_stereo : bool, optional, default=False\n As a side effect, this function calls ``Chem.AssignStereochemistry()``\n so by default we work on a molecule copy. Set this to ``True`` to avoid\n making a copy and assigning the stereochemistry to the Mol object.\n\n Returns\n -------\n undefined_atom_indices : List[int]\n A list of atom indices that are chiral centers with undefined\n stereochemistry.\n\n See Also\n --------\n rdkit.Chem.FindMolChiralCenters\n\n \"\"\"\n from rdkit import Chem\n\n if not assign_stereo:\n # Avoid modifying the original molecule.\n rdmol = copy.deepcopy(rdmol)\n\n # Flag possible chiral centers with the \"_ChiralityPossible\".\n Chem.AssignStereochemistry(rdmol, force=True, flagPossibleStereoCenters=True)\n\n # Find all atoms with undefined stereo.\n undefined_atom_indices = []\n for atom_idx, atom in enumerate(rdmol.GetAtoms()):\n if atom.GetChiralTag() == Chem.ChiralType.CHI_UNSPECIFIED and atom.HasProp(\n \"_ChiralityPossible\"\n ):\n undefined_atom_indices.append(atom_idx)\n return undefined_atom_indices\n\n @staticmethod\n def _find_undefined_stereo_bonds(rdmol):\n \"\"\"Find the chiral atoms with undefined stereochemsitry in the RDMol.\n\n Parameters\n ----------\n rdmol : rdkit.RDMol\n The RDKit molecule.\n\n Returns\n -------\n undefined_bond_indices : List[int]\n A list of bond indices with undefined stereochemistry.\n\n See Also\n --------\n Chem.EnumerateStereoisomers._getFlippers\n\n Links\n -----\n https://github.com/rdkit/rdkit/blob/master/Code/GraphMol/Chirality.cpp#L1509-L1515\n This comment in FindPotentialStereoBonds mention that the method\n ignores ring bonds.\n https://github.com/DrrDom/rdk/blob/master/gen_stereo_rdkit3.py\n The function get_unspec_double_bonds() in this module looks like\n may solve the problem with the rings.\n\n \"\"\"\n from rdkit import Chem\n\n # Copy the molecule to avoid side effects. Chem.FindPotentialStereoBonds\n # assign Bond.STEREOANY to unspecific bond, which make subsequent calls\n # of Chem.AssignStereochemistry ignore the bond even if there are\n # ENDDOWNRIGHT/ENDUPRIGHT bond direction indications.\n rdmol_copy = copy.deepcopy(rdmol)\n\n # Clear any previous assignments on the bonds, since FindPotentialStereo may not overwrite it\n for bond in rdmol_copy.GetBonds():\n bond.SetStereo(Chem.BondStereo.STEREONONE)\n\n # This function assigns Bond.GetStereo() == Bond.STEREOANY to bonds with\n # possible stereochemistry.\n Chem.FindPotentialStereoBonds(rdmol_copy, cleanIt=True)\n\n # Any TRULY stereogenic bonds in the molecule are now marked as STEREOANY in rdmol_copy.\n # Iterate through all the bonds, and for the ones where rdmol_copy is marked as STEREOANY,\n # ensure that they are cis/trans/E/Z (tested here be ensuring that they're NOT either\n # # of the other possible types (NONE or ANY))\n undefined_bond_indices = []\n for bond_idx, (orig_bond, repercieved_bond) in enumerate(\n zip(rdmol.GetBonds(), rdmol_copy.GetBonds())\n ):\n # print(repercieved_bond.GetStereo(), orig_bond.GetStereo())\n if (repercieved_bond.GetStereo() == Chem.BondStereo.STEREOANY) and (\n (orig_bond.GetStereo() == Chem.BondStereo.STEREOANY)\n or (orig_bond.GetStereo() == Chem.BondStereo.STEREONONE)\n ):\n undefined_bond_indices.append(bond_idx)\n return undefined_bond_indices\n\n @classmethod\n def _detect_undefined_stereo(cls, rdmol, err_msg_prefix=\"\", raise_warning=False):\n \"\"\"Raise UndefinedStereochemistryError if the RDMol has undefined stereochemistry.\n\n Parameters\n ----------\n rdmol : rdkit.Chem.Mol\n The RDKit molecule.\n err_msg_prefix : str, optional\n A string to prepend to the error/warning message.\n raise_warning : bool, optional, default=False\n If True, a warning is issued instead of an exception.\n\n Raises\n ------\n UndefinedStereochemistryError\n If the RDMol has undefined atom or bond stereochemistry.\n\n \"\"\"\n # Find undefined atom/bond stereochemistry.\n undefined_atom_indices = cls._find_undefined_stereo_atoms(rdmol)\n undefined_bond_indices = cls._find_undefined_stereo_bonds(rdmol)\n\n # Build error message.\n if len(undefined_atom_indices) == 0 and len(undefined_bond_indices) == 0:\n msg = None\n else:\n msg = err_msg_prefix + \"RDMol has unspecified stereochemistry. \"\n # The \"_Name\" property is not always assigned.\n if rdmol.HasProp(\"_Name\"):\n msg += \"RDMol name: \" + rdmol.GetProp(\"_Name\")\n\n # Details about undefined atoms.\n if len(undefined_atom_indices) > 0:\n msg += \"Undefined chiral centers are:\\n\"\n for undefined_atom_idx in undefined_atom_indices:\n msg += \" - Atom {symbol} (index {index})\\n\".format(\n symbol=rdmol.GetAtomWithIdx(undefined_atom_idx).GetSymbol(),\n index=undefined_atom_idx,\n )\n\n # Details about undefined bond.\n if len(undefined_bond_indices) > 0:\n msg += \"Bonds with undefined stereochemistry are:\\n\"\n for undefined_bond_idx in undefined_bond_indices:\n bond = rdmol.GetBondWithIdx(undefined_bond_idx)\n atom1, atom2 = bond.GetBeginAtom(), bond.GetEndAtom()\n msg += \" - Bond {bindex} (atoms {aindex1}-{aindex2} of element ({symbol1}-{symbol2})\\n\".format(\n bindex=undefined_bond_idx,\n aindex1=atom1.GetIdx(),\n aindex2=atom2.GetIdx(),\n symbol1=atom1.GetSymbol(),\n symbol2=atom2.GetSymbol(),\n )\n\n if msg is not None:\n if raise_warning:\n msg = \"Warning (not error because allow_undefined_stereo=True): \" + msg\n logger.warning(msg)\n else:\n msg = \"Unable to make OFFMol from RDMol: \" + msg\n raise UndefinedStereochemistryError(msg)\n\n @staticmethod\n def _flip_rdbond_direction(rdbond, paired_rdbonds):\n \"\"\"Flip the rdbond and all those paired to it.\n\n Parameters\n ----------\n rdbond : rdkit.Chem.Bond\n The Bond whose direction needs to be flipped.\n paired_rdbonds : Dict[Tuple[int], List[rdkit.Chem.Bond]]\n Maps bond atom indices that are assigned a bond direction to\n the bonds on the other side of the double bond.\n \"\"\"\n from rdkit import Chem\n\n # The function assumes that all bonds are either up or down.\n supported_directions = {Chem.BondDir.ENDUPRIGHT, Chem.BondDir.ENDDOWNRIGHT}\n\n def _flip(b, paired, flipped, ignored):\n # The function assumes that all bonds are either up or down.\n assert b.GetBondDir() in supported_directions\n bond_atom_indices = (b.GetBeginAtomIdx(), b.GetEndAtomIdx())\n\n # Check that we haven't flipped this bond already.\n if bond_atom_indices in flipped:\n # This should never happen.\n raise RuntimeError(\"Cannot flip the bond direction consistently.\")\n\n # Flip the bond.\n if b.GetBondDir() == Chem.BondDir.ENDUPRIGHT:\n b.SetBondDir(Chem.BondDir.ENDDOWNRIGHT)\n else:\n b.SetBondDir(Chem.BondDir.ENDUPRIGHT)\n flipped.add(bond_atom_indices)\n\n # Flip all the paired bonds as well (if there are any).\n if bond_atom_indices in paired:\n for paired_rdbond in paired[bond_atom_indices]:\n # Don't flip the bond that was flipped in the upper-level recursion.\n if (\n paired_rdbond.GetBeginAtomIdx(),\n paired_rdbond.GetEndAtomIdx(),\n ) != ignored:\n # Don't flip this bond in the next recursion.\n _flip(paired_rdbond, paired, flipped, ignored=bond_atom_indices)\n\n _flip(rdbond, paired_rdbonds, flipped=set(), ignored=None)\n\n @classmethod\n def _assign_rdmol_bonds_stereo(cls, offmol, rdmol):\n \"\"\"Copy the info about bonds stereochemistry from the OFF Molecule to RDKit Mol.\"\"\"\n from rdkit import Chem\n\n # Map the bonds indices that are assigned bond direction\n # to the bond on the other side of the double bond.\n # (atom_index1, atom_index2) -> List[rdkit.Chem.Bond]\n paired_bonds = {}\n\n for bond in offmol.bonds:\n # No need to do anything with bonds without stereochemistry.\n if not bond.stereochemistry:\n continue\n\n # Isolate stereo RDKit bond object.\n rdbond_atom_indices = (\n bond.atom1.molecule_atom_index,\n bond.atom2.molecule_atom_index,\n )\n stereo_rdbond = rdmol.GetBondBetweenAtoms(*rdbond_atom_indices)\n\n # Collect all neighboring rdbonds of atom1 and atom2.\n neighbor_rdbonds1 = [\n rdmol.GetBondBetweenAtoms(\n n.molecule_atom_index, bond.atom1.molecule_atom_index\n )\n for n in bond.atom1.bonded_atoms\n if n != bond.atom2\n ]\n neighbor_rdbonds2 = [\n rdmol.GetBondBetweenAtoms(\n bond.atom2.molecule_atom_index, n.molecule_atom_index\n )\n for n in bond.atom2.bonded_atoms\n if n != bond.atom1\n ]\n\n # Select only 1 neighbor bond per atom out of the two.\n neighbor_rdbonds = []\n for i, rdbonds in enumerate([neighbor_rdbonds1, neighbor_rdbonds2]):\n # If there are no neighbors for which we have already\n # assigned the bond direction, just pick the first one.\n neighbor_rdbonds.append(rdbonds[0])\n # Otherwise, pick neighbor that was already assigned to\n # avoid inconsistencies and keep the tree non-cyclic.\n for rdb in rdbonds:\n if (rdb.GetBeginAtomIdx(), rdb.GetBeginAtomIdx()) in paired_bonds:\n neighbor_rdbonds[i] = rdb\n break\n\n # Assign a random direction to the bonds that were not already assigned\n # keeping track of which bond would be best to flip later (i.e. does that\n # are not already determining the stereochemistry of another double bond).\n flipped_rdbond = neighbor_rdbonds[0]\n for rdb in neighbor_rdbonds:\n if (rdb.GetBeginAtomIdx(), rdb.GetEndAtomIdx()) not in paired_bonds:\n rdb.SetBondDir(Chem.BondDir.ENDUPRIGHT)\n # Set this bond as a possible bond to flip.\n flipped_rdbond = rdb\n\n Chem.AssignStereochemistry(rdmol, cleanIt=True, force=True)\n\n # Verify that the current directions give us the desired stereochemistries.\n assert bond.stereochemistry in {\"E\", \"Z\"}\n if bond.stereochemistry == \"E\":\n desired_rdk_stereo_code = Chem.rdchem.BondStereo.STEREOE\n else:\n desired_rdk_stereo_code = Chem.rdchem.BondStereo.STEREOZ\n\n # If that doesn't work, flip the direction of one bond preferring\n # those that are not already determining the stereo of another bond.\n if stereo_rdbond.GetStereo() != desired_rdk_stereo_code:\n cls._flip_rdbond_direction(flipped_rdbond, paired_bonds)\n Chem.AssignStereochemistry(rdmol, cleanIt=True, force=True)\n\n # The stereo should be set correctly here.\n assert stereo_rdbond.GetStereo() == desired_rdk_stereo_code\n\n # Update paired bonds map.\n neighbor_bond_indices = [\n (rdb.GetBeginAtomIdx(), rdb.GetEndAtomIdx()) for rdb in neighbor_rdbonds\n ]\n for i, bond_indices in enumerate(neighbor_bond_indices):\n try:\n paired_bonds[bond_indices].append(neighbor_rdbonds[1 - i])\n except KeyError:\n paired_bonds[bond_indices] = [neighbor_rdbonds[1 - i]]\n\n\nclass AmberToolsToolkitWrapper(ToolkitWrapper):\n \"\"\"\n AmberTools toolkit wrapper\n\n .. warning :: This API is experimental and subject to change.\n \"\"\"\n\n _toolkit_name = \"AmberTools\"\n _toolkit_installation_instructions = (\n \"The AmberTools toolkit (free and open source) can be found at \"\n \"https://anaconda.org/conda-forge/ambertools\"\n )\n\n def __init__(self):\n super().__init__()\n\n self._toolkit_file_read_formats = []\n self._toolkit_file_write_formats = []\n\n if not self.is_available():\n raise ToolkitUnavailableException(\n f\"The required toolkit {self._toolkit_name} is not \"\n f\"available. {self._toolkit_installation_instructions}\"\n )\n\n # TODO: More reliable way to extract AmberTools version\n out = subprocess.check_output([\"antechamber\", \"-L\"])\n ambertools_version = out.decode(\"utf-8\").split(\"\\n\")[1].split()[3].strip(\":\")\n self._toolkit_version = ambertools_version\n\n # TODO: Find AMBERHOME or executable home, checking miniconda if needed\n # Store an instance of an RDKitToolkitWrapper for file I/O\n self._rdkit_toolkit_wrapper = RDKitToolkitWrapper()\n\n @staticmethod\n def is_available():\n \"\"\"\n Check whether the AmberTools toolkit is installed\n\n Returns\n -------\n is_installed : bool\n True if AmberTools is installed, False otherwise.\n\n \"\"\"\n # TODO: Check all tools needed\n # TODO: How should we implement find_executable?\n ANTECHAMBER_PATH = find_executable(\"antechamber\")\n if ANTECHAMBER_PATH is None:\n return False\n # AmberToolsToolkitWrapper needs RDKit to do basically anything, since its interface requires SDF I/O\n if not (RDKitToolkitWrapper.is_available()):\n return False\n return True\n\n def assign_partial_charges(\n self,\n molecule,\n partial_charge_method=None,\n use_conformers=None,\n strict_n_conformers=False,\n _cls=None,\n ):\n \"\"\"\n Compute partial charges with AmberTools using antechamber/sqm, and assign\n the new values to the partial_charges attribute.\n\n .. warning :: This API experimental and subject to change.\n\n .. todo ::\n\n * Do we want to also allow ESP/RESP charges?\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.Molecule\n Molecule for which partial charges are to be computed\n partial_charge_method : str, optional, default=None\n The charge model to use. One of ['gasteiger', 'am1bcc', 'am1-mulliken']. If None, 'am1-mulliken' will be used.\n use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None\n List of (n_atoms x 3) simtk.unit.Quantities to use for partial charge calculation.\n If None, an appropriate number of conformers will be generated.\n strict_n_conformers : bool, default=False\n Whether to raise an exception if an invalid number of conformers is provided for the given charge method.\n If this is False and an invalid number of conformers is found, a warning will be raised.\n _cls : class\n Molecule constructor\n\n Raises\n ------\n ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit\n\n ChargeCalculationError if the charge method is supported by this toolkit, but fails\n \"\"\"\n\n import os\n import subprocess\n\n from openff.toolkit.topology import Molecule\n\n if partial_charge_method is None:\n partial_charge_method = \"am1-mulliken\"\n else:\n # Standardize method name for string comparisons\n partial_charge_method = partial_charge_method.lower()\n\n SUPPORTED_CHARGE_METHODS = {\n \"am1bcc\": {\n \"antechamber_keyword\": \"bcc\",\n \"min_confs\": 1,\n \"max_confs\": 1,\n \"rec_confs\": 1,\n },\n \"am1-mulliken\": {\n \"antechamber_keyword\": \"mul\",\n \"min_confs\": 1,\n \"max_confs\": 1,\n \"rec_confs\": 1,\n },\n \"gasteiger\": {\n \"antechamber_keyword\": \"gas\",\n \"min_confs\": 0,\n \"max_confs\": 0,\n \"rec_confs\": 0,\n },\n }\n\n if partial_charge_method not in SUPPORTED_CHARGE_METHODS:\n raise ChargeMethodUnavailableError(\n f\"partial_charge_method '{partial_charge_method}' is not available from AmberToolsToolkitWrapper. \"\n f\"Available charge methods are {list(SUPPORTED_CHARGE_METHODS.keys())} \"\n )\n\n charge_method = SUPPORTED_CHARGE_METHODS[partial_charge_method]\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n # Make a temporary copy of the molecule, since we'll be messing with its conformers\n mol_copy = _cls(molecule)\n\n if use_conformers is None:\n if charge_method[\"rec_confs\"] == 0:\n mol_copy._conformers = None\n else:\n mol_copy.generate_conformers(\n n_conformers=charge_method[\"rec_confs\"],\n rms_cutoff=0.25 * unit.angstrom,\n toolkit_registry=RDKitToolkitWrapper(),\n )\n # TODO: What's a \"best practice\" RMS cutoff to use here?\n else:\n mol_copy._conformers = None\n for conformer in use_conformers:\n mol_copy._add_conformer(conformer)\n self._check_n_conformers(\n mol_copy,\n partial_charge_method=partial_charge_method,\n min_confs=charge_method[\"min_confs\"],\n max_confs=charge_method[\"max_confs\"],\n strict_n_conformers=strict_n_conformers,\n )\n\n # Find the path to antechamber\n # TODO: How should we implement find_executable?\n ANTECHAMBER_PATH = find_executable(\"antechamber\")\n if ANTECHAMBER_PATH is None:\n raise AntechamberNotFoundError(\n \"Antechamber not found, cannot run charge_mol()\"\n )\n\n # Compute charges\n with tempfile.TemporaryDirectory() as tmpdir:\n with temporary_cd(tmpdir):\n net_charge = mol_copy.total_charge / unit.elementary_charge\n # Write out molecule in SDF format\n ## TODO: How should we handle multiple conformers?\n self._rdkit_toolkit_wrapper.to_file(\n mol_copy, \"molecule.sdf\", file_format=\"sdf\"\n )\n # Compute desired charges\n # TODO: Add error handling if antechamber chokes\n short_charge_method = charge_method[\"antechamber_keyword\"]\n subprocess.check_output(\n [\n \"antechamber\",\n \"-i\",\n \"molecule.sdf\",\n \"-fi\",\n \"sdf\",\n \"-o\",\n \"charged.mol2\",\n \"-fo\",\n \"mol2\",\n \"-pf\",\n \"yes\",\n \"-dr\",\n \"n\",\n \"-c\",\n short_charge_method,\n \"-nc\",\n str(net_charge),\n ]\n )\n # Write out just charges\n subprocess.check_output(\n [\n \"antechamber\",\n \"-dr\",\n \"n\",\n \"-i\",\n \"charged.mol2\",\n \"-fi\",\n \"mol2\",\n \"-o\",\n \"charges2.mol2\",\n \"-fo\",\n \"mol2\",\n \"-c\",\n \"wc\",\n \"-cf\",\n \"charges.txt\",\n \"-pf\",\n \"yes\",\n ]\n )\n # Check to ensure charges were actually produced\n if not os.path.exists(\"charges.txt\"):\n # TODO: copy files into local directory to aid debugging?\n raise ChargeCalculationError(\n \"Antechamber/sqm partial charge calculation failed on \"\n \"molecule {} (SMILES {})\".format(\n molecule.name, molecule.to_smiles()\n )\n )\n # Read the charges\n with open(\"charges.txt\", \"r\") as infile:\n contents = infile.read()\n text_charges = contents.split()\n charges = np.zeros([molecule.n_atoms], np.float64)\n for index, token in enumerate(text_charges):\n charges[index] = float(token)\n # TODO: Ensure that the atoms in charged.mol2 are in the same order as in molecule.sdf\n charges = unit.Quantity(charges, unit.elementary_charge)\n molecule.partial_charges = charges\n\n def compute_partial_charges_am1bcc(\n self, molecule, use_conformers=None, strict_n_conformers=False\n ):\n \"\"\"\n Compute partial charges with AmberTools using antechamber/sqm. This will calculate AM1-BCC charges on the first\n conformer only.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : Molecule\n Molecule for which partial charges are to be computed\n use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None\n Coordinates to use for partial charge calculation. If None, an appropriate number of conformers\n will be generated.\n strict_n_conformers : bool, default=False\n Whether to raise an exception if an invalid number of conformers is provided.\n If this is False and an invalid number of conformers is found, a warning will be raised\n instead of an Exception.\n\n Returns\n -------\n charges : numpy.array of shape (natoms) of type float\n The partial charges\n \"\"\"\n\n import warnings\n\n warnings.warn(\n \"compute_partial_charges_am1bcc will be deprecated in an upcoming release. \"\n \"Use assign_partial_charges(partial_charge_method='am1bcc') instead.\",\n DeprecationWarning,\n )\n\n self.assign_partial_charges(\n molecule,\n partial_charge_method=\"AM1BCC\",\n use_conformers=use_conformers,\n strict_n_conformers=strict_n_conformers,\n )\n return molecule.partial_charges\n\n def _modify_sqm_in_to_request_bond_orders(self, file_path):\n \"\"\"\n Modify a sqm.in file produced by antechamber to include the \"printbondorders=1\" directive\n in the header. This method will overwrite the original file.\n\n Parameters\n ----------\n file_path : str\n The path to sqm.in\n \"\"\"\n\n data = open(file_path).read()\n\n # Original sqm.in file headerlooks like:\n\n # Run semi-empirical minimization\n # &qmmm\n # qm_theory='AM1', grms_tol=0.0005,\n # scfconv=1.d-10, ndiis_attempts=700, qmcharge=0,\n # /\n # ... (atom coordinates in something like XYZ format) ...\n\n # To get WBOs, we need to add \"printbondorders=1\" to the list of keywords\n\n # First, split the sqm.in text at the \"/\" mark at the end of the header\n datasp = data.split(\"/\")\n # Insert the \"printbondorders\" directive in a new line and re-add the \"/\"\n datasp.insert(1, \"printbondorders=1, \\n /\")\n # Reassemble the file text\n new_data = \"\".join(datasp)\n # Write the new file contents, overwriting the original file.\n with open(file_path, \"w\") as of:\n of.write(new_data)\n\n def _get_fractional_bond_orders_from_sqm_out(\n self, file_path, validate_elements=None\n ):\n \"\"\"\n Process a SQM output file containing bond orders, and return a dict of the form\n dict[atom_1_index, atom_2_index] = fractional_bond_order\n\n Parameters\n ----------\n file_path : str\n File path for sqm output file\n validate_elements : iterable of str\n The element symbols expected in molecule index order. A ValueError will be raised\n if the elements are not found in this order.\n\n Returns\n -------\n bond_orders : dict[(int, int)]: float\n A dictionary where the keys are tuples of two atom indices and the values are\n floating-point bond orders. The keys are sorted in ascending order, such that\n the lower atom index is key[0] and the higher is key[1].\n \"\"\"\n\n # Example sqm.out section with WBOs:\n # Bond Orders\n #\n # QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER\n # QMMM: 2 C 1 C 1.41107532\n # QMMM: 3 C 1 C 1.41047804\n # ...\n # QMMM: 15 H 13 H 0.00000954\n # QMMM: 15 H 14 H 0.00000813\n #\n # --------- Calculation Completed ----------\n\n data = open(file_path).read()\n\n begin_sep = \"\"\" Bond Orders\n \n QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER\n\"\"\"\n end_sep = \"\"\"\n\n --------- Calculation Completed ----------\n\"\"\"\n # Extract the chunk of text between begin_sep and end_sep, and split it by newline\n fbo_lines = data.split(begin_sep)[1].split(end_sep)[0].split(\"\\n\")\n\n # Iterate over the lines and populate the dict to return\n bond_orders = dict()\n for line in fbo_lines:\n linesp = line.split()\n atom_index_1 = int(linesp[1])\n atom_element_1 = linesp[2]\n atom_index_2 = int(linesp[3])\n atom_element_2 = linesp[4]\n bond_order = float(linesp[5])\n\n # If validate_elements was provided, ensure that the ordering of element symbols is what we expected\n if validate_elements is not None:\n if (atom_element_1 != validate_elements[atom_index_1 - 1]) or (\n atom_element_2 != validate_elements[atom_index_2 - 1]\n ):\n # raise ValueError('\\n'.join(fbo_lines))\n raise ValueError(\n f\"Elements or indexing in sqm output differ from expectation. \"\n f\"Expected {validate_elements[atom_index_1]} with index {atom_index_1} and \"\n f\"{validate_elements[atom_index_2]} with index {atom_index_2}, \"\n f\"but SQM output has {atom_element_1} and {atom_element_2} for the same atoms.\"\n )\n\n # To make lookup easier, we identify bonds as integer tuples with the lowest atom index\n # first and the highest second.\n index_tuple = tuple(sorted([atom_index_1, atom_index_2]))\n bond_orders[index_tuple] = bond_order\n return bond_orders\n\n def assign_fractional_bond_orders(\n self, molecule, bond_order_model=None, use_conformers=None, _cls=None\n ):\n \"\"\"\n Update and store list of bond orders this molecule. Bond orders are stored on each\n bond, in the `bond.fractional_bond_order` attribute.\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n molecule : openff.toolkit.topology.molecule Molecule\n The molecule to assign wiberg bond orders to\n bond_order_model : str, optional, default=None\n The charge model to use. Only allowed value is 'am1-wiberg'. If None, 'am1-wiberg' will be used.\n use_conformers : iterable of simtk.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance, optional, default=None\n The conformers to use for fractional bond order calculation. If None, an appropriate number\n of conformers will be generated by an available ToolkitWrapper.\n _cls : class\n Molecule constructor\n \"\"\"\n from openff.toolkit.topology import Molecule\n\n # Find the path to antechamber\n # TODO: How should we implement find_executable?\n ANTECHAMBER_PATH = find_executable(\"antechamber\")\n if ANTECHAMBER_PATH is None:\n raise AntechamberNotFoundError(\n \"Antechamber not found, cannot run \"\n \"AmberToolsToolkitWrapper.assign_fractional_bond_orders()\"\n )\n\n if _cls is None:\n from openff.toolkit.topology.molecule import Molecule\n\n _cls = Molecule\n\n # Make a copy since we'll be messing with this molecule's conformers\n temp_mol = _cls(molecule)\n\n if use_conformers is None:\n temp_mol.generate_conformers(\n n_conformers=1,\n toolkit_registry=self._rdkit_toolkit_wrapper,\n )\n else:\n temp_mol._conformers = None\n for conformer in use_conformers:\n temp_mol._add_conformer(conformer)\n\n if len(temp_mol.conformers) == 0:\n raise ValueError(\n \"No conformers present in molecule submitted for fractional bond order calculation. Consider \"\n \"loading the molecule from a file with geometry already present or running \"\n \"molecule.generate_conformers() before calling molecule.assign_fractional_bond_orders\"\n )\n\n # Compute bond orders\n bond_order_model_to_antechamber_keyword = {\"am1-wiberg\": \"mul\"}\n supported_bond_order_models = list(\n bond_order_model_to_antechamber_keyword.keys()\n )\n if bond_order_model is None:\n bond_order_model = \"am1-wiberg\"\n\n bond_order_model = bond_order_model.lower()\n\n if bond_order_model not in supported_bond_order_models:\n raise ValueError(\n f\"Bond order model '{bond_order_model}' is not supported by AmberToolsToolkitWrapper. \"\n f\"Supported models are {supported_bond_order_models}\"\n )\n ac_charge_keyword = bond_order_model_to_antechamber_keyword[bond_order_model]\n\n bond_orders = defaultdict(list)\n\n for conformer in [*temp_mol.conformers]:\n\n with tempfile.TemporaryDirectory() as tmpdir:\n\n with temporary_cd(tmpdir):\n net_charge = temp_mol.total_charge\n # Write out molecule in SDF format\n temp_mol._conformers = [conformer]\n self._rdkit_toolkit_wrapper.to_file(\n temp_mol, \"molecule.sdf\", file_format=\"sdf\"\n )\n # Prepare sqm.in file as if we were going to run charge calc\n # TODO: Add error handling if antechamber chokes\n subprocess.check_output(\n [\n \"antechamber\",\n \"-i\",\n \"molecule.sdf\",\n \"-fi\",\n \"sdf\",\n \"-o\",\n \"sqm.in\",\n \"-fo\",\n \"sqmcrt\",\n \"-pf\",\n \"yes\",\n \"-c\",\n ac_charge_keyword,\n \"-nc\",\n str(net_charge),\n ]\n )\n # Modify sqm.in to request bond order calculation\n self._modify_sqm_in_to_request_bond_orders(\"sqm.in\")\n # Run sqm to get bond orders\n subprocess.check_output(\n [\"sqm\", \"-i\", \"sqm.in\", \"-o\", \"sqm.out\", \"-O\"]\n )\n # Ensure that antechamber/sqm did not change the indexing by checking against\n # an ordered list of element symbols for this molecule\n expected_elements = [at.element.symbol for at in molecule.atoms]\n conformer_bond_orders = (\n self._get_fractional_bond_orders_from_sqm_out(\n \"sqm.out\", validate_elements=expected_elements\n )\n )\n\n for bond_indices, value in conformer_bond_orders.items():\n bond_orders[bond_indices].append(value)\n\n # Note that sqm calculate WBOs for ALL PAIRS of atoms, not just those that have\n # bonds defined in the original molecule. So here we iterate over the bonds in\n # the original molecule and only nab the WBOs for those.\n for bond in molecule.bonds:\n # The atom index tuples that act as bond indices are ordered from lowest to highest by\n # _get_fractional_bond_orders_from_sqm_out, so here we make sure that we look them up in\n # sorted order as well\n sorted_atom_indices = sorted(\n tuple([bond.atom1_index + 1, bond.atom2_index + 1])\n )\n bond.fractional_bond_order = np.mean(\n bond_orders[tuple(sorted_atom_indices)]\n )\n\n\n# =============================================================================================\n# Toolkit registry\n# =============================================================================================\n\n\nclass ToolkitRegistry:\n \"\"\"\n Registry for ToolkitWrapper objects\n\n Examples\n --------\n\n Register toolkits in a specified order, skipping if unavailable\n\n >>> from openff.toolkit.utils.toolkits import ToolkitRegistry\n >>> toolkit_precedence = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n >>> toolkit_registry = ToolkitRegistry(toolkit_precedence)\n >>> toolkit_registry\n ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools\n\n Register all available toolkits (in the order OpenEye, RDKit, AmberTools, built-in)\n\n >>> toolkits = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper]\n >>> toolkit_registry = ToolkitRegistry(toolkit_precedence=toolkits)\n >>> toolkit_registry\n ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit\n\n Retrieve the global singleton toolkit registry, which is created when this module is imported from all available\n toolkits:\n\n >>> from openff.toolkit.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY as toolkit_registry\n >>> toolkit_registry\n ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit\n\n Note that this will contain different ToolkitWrapper objects based on what toolkits\n are currently installed.\n\n .. warning :: This API is experimental and subject to change.\n \"\"\"\n\n def __init__(\n self,\n toolkit_precedence=[],\n exception_if_unavailable=True,\n _register_imported_toolkit_wrappers=False,\n ):\n \"\"\"\n Create an empty toolkit registry.\n\n Parameters\n ----------\n toolkit_precedence : list, default=[]\n List of toolkit wrapper classes, in order of desired precedence when performing molecule operations. If\n None, no toolkits will be registered.\n\n exception_if_unavailable : bool, optional, default=True\n If True, an exception will be raised if the toolkit is unavailable\n\n _register_imported_toolkit_wrappers : bool, optional, default=False\n If True, will attempt to register all imported ToolkitWrapper subclasses that can be\n found in the order of toolkit_precedence, if specified. If toolkit_precedence is not\n specified, the default order is [OpenEyeToolkitWrapper, RDKitToolkitWrapper,\n AmberToolsToolkitWrapper, BuiltInToolkitWrapper].\n\n \"\"\"\n self._toolkits = list()\n\n toolkits_to_register = list()\n\n if _register_imported_toolkit_wrappers:\n if toolkit_precedence is None:\n toolkit_precedence = [\n OpenEyeToolkitWrapper,\n RDKitToolkitWrapper,\n AmberToolsToolkitWrapper,\n BuiltInToolkitWrapper,\n ]\n all_importable_toolkit_wrappers = all_subclasses(ToolkitWrapper)\n for toolkit in toolkit_precedence:\n if toolkit in all_importable_toolkit_wrappers:\n toolkits_to_register.append(toolkit)\n else:\n if toolkit_precedence:\n toolkits_to_register = toolkit_precedence\n\n if toolkits_to_register:\n for toolkit in toolkits_to_register:\n self.register_toolkit(\n toolkit, exception_if_unavailable=exception_if_unavailable\n )\n\n @property\n def registered_toolkits(self):\n \"\"\"\n List registered toolkits.\n\n .. warning :: This API is experimental and subject to change.\n\n .. todo :: Should this return a generator? Deep copies? Classes? Toolkit names?\n\n Returns\n -------\n toolkits : iterable of toolkit objects\n \"\"\"\n return list(self._toolkits)\n\n @property\n def registered_toolkit_versions(self):\n \"\"\"\n Return a dict containing the version of each registered toolkit.\n\n .. warning :: This API is experimental and subject to change.\n\n Returns\n -------\n toolkit_versions : dict[str, str]\n A dictionary mapping names and versions of wrapped toolkits\n\n \"\"\"\n return dict(\n (tk.toolkit_name, tk.toolkit_version) for tk in self.registered_toolkits\n )\n\n def register_toolkit(self, toolkit_wrapper, exception_if_unavailable=True):\n \"\"\"\n Register the provided toolkit wrapper class, instantiating an object of it.\n\n .. warning :: This API is experimental and subject to change.\n\n .. todo ::\n\n This method should raise an exception if the toolkit is unavailable, unless an optional argument\n is specified that silently avoids registration of toolkits that are unavailable.\n\n Parameters\n ----------\n toolkit_wrapper : instance or subclass of ToolkitWrapper\n The toolkit wrapper to register or its class.\n exception_if_unavailable : bool, optional, default=True\n If True, an exception will be raised if the toolkit is unavailable\n\n \"\"\"\n # Instantiate class if class, or just add if already instantiated.\n if isinstance(toolkit_wrapper, type):\n try:\n toolkit_wrapper = toolkit_wrapper()\n except ToolkitUnavailableException:\n msg = \"Unable to load toolkit '{}'. \".format(\n toolkit_wrapper._toolkit_name\n )\n if exception_if_unavailable:\n raise ToolkitUnavailableException(msg)\n else:\n if \"OpenEye\" in msg:\n msg += (\n \"The Open Force Field Toolkit does not require the OpenEye Toolkits, and can \"\n \"use RDKit/AmberTools instead. However, if you have a valid license for the \"\n \"OpenEye Toolkits, consider installing them for faster performance and additional \"\n \"file format support: \"\n \"https://docs.eyesopen.com/toolkits/python/quickstart-python/linuxosx.html \"\n \"OpenEye offers free Toolkit licenses for academics: \"\n \"https://www.eyesopen.com/academic-licensing\"\n )\n logger.warning(f\"Warning: {msg}\")\n return\n\n # Add toolkit to the registry.\n self._toolkits.append(toolkit_wrapper)\n\n def deregister_toolkit(self, toolkit_wrapper):\n \"\"\"\n Remove a ToolkitWrapper from the list of toolkits in this ToolkitRegistry\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n toolkit_wrapper : instance or subclass of ToolkitWrapper\n The toolkit wrapper to remove from the registry\n\n Raises\n ------\n InvalidToolkitError\n If toolkit_wrapper is not a ToolkitWrapper or subclass\n ToolkitUnavailableException\n If toolkit_wrapper is not found in the registry\n \"\"\"\n # If passed a class, instantiate it\n if inspect.isclass(toolkit_wrapper):\n toolkit_wrapper = toolkit_wrapper()\n\n if not isinstance(toolkit_wrapper, ToolkitWrapper):\n msg = (\n f\"Argument {toolkit_wrapper} must an ToolkitWrapper \"\n f\"or subclass of it. Found type {type(toolkit_wrapper)}.\"\n )\n raise InvalidToolkitError(msg)\n\n toolkits_to_remove = []\n\n for toolkit in self._toolkits:\n if type(toolkit) == type(toolkit_wrapper):\n toolkits_to_remove.append(toolkit)\n\n if not toolkits_to_remove:\n msg = (\n f\"Did not find {toolkit_wrapper} in registry. \"\n f\"Currently registered toolkits are {self._toolkits}\"\n )\n raise ToolkitUnavailableException(msg)\n\n for toolkit_to_remove in toolkits_to_remove:\n self._toolkits.remove(toolkit_to_remove)\n\n def add_toolkit(self, toolkit_wrapper):\n \"\"\"\n Append a ToolkitWrapper onto the list of toolkits in this ToolkitRegistry\n\n .. warning :: This API is experimental and subject to change.\n\n Parameters\n ----------\n toolkit_wrapper : openff.toolkit.utils.ToolkitWrapper\n The ToolkitWrapper object to add to the list of registered toolkits\n\n Raises\n ------\n InvalidToolkitError\n If toolkit_wrapper is not a ToolkitWrapper or subclass\n \"\"\"\n if not isinstance(toolkit_wrapper, ToolkitWrapper):\n msg = \"Something other than a ToolkitWrapper object was passed to ToolkitRegistry.add_toolkit()\\n\"\n msg += \"Given object {} of type {}\".format(\n toolkit_wrapper, type(toolkit_wrapper)\n )\n raise InvalidToolkitError(msg)\n self._toolkits.append(toolkit_wrapper)\n\n # TODO: Can we automatically resolve calls to methods that are not explicitly defined using some Python magic?\n\n def resolve(self, method_name):\n \"\"\"\n Resolve the requested method name by checking all registered toolkits in\n order of precedence for one that provides the requested method.\n\n Parameters\n ----------\n method_name : str\n The name of the method to resolve\n\n Returns\n -------\n method\n The method of the first registered toolkit that provides the requested method name\n\n Raises\n ------\n NotImplementedError if the requested method cannot be found among the registered toolkits\n\n Examples\n --------\n\n Create a molecule, and call the toolkit ``to_smiles()`` method directly\n\n >>> from openff.toolkit.topology import Molecule\n >>> molecule = Molecule.from_smiles('Cc1ccccc1')\n >>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper])\n >>> method = toolkit_registry.resolve('to_smiles')\n >>> smiles = method(molecule)\n\n .. todo :: Is there a better way to figure out which toolkits implement given methods by introspection?\n\n \"\"\"\n for toolkit in self._toolkits:\n if hasattr(toolkit, method_name):\n method = getattr(toolkit, method_name)\n return method\n\n # No toolkit was found to provide the requested capability\n # TODO: Can we help developers by providing a check for typos in expected method names?\n msg = 'No registered toolkits can provide the capability \"{}\".\\n'.format(\n method_name\n )\n msg += \"Available toolkits are: {}\\n\".format(self.registered_toolkits)\n raise NotImplementedError(msg)\n\n # TODO: Can we instead register available methods directly with `ToolkitRegistry`, so we can just use `ToolkitRegistry.method()`?\n def call(self, method_name, *args, raise_exception_types=None, **kwargs):\n \"\"\"\n Execute the requested method by attempting to use all registered toolkits in order of precedence.\n\n ``*args`` and ``**kwargs`` are passed to the desired method, and return values of the method are returned\n\n This is a convenient shorthand for ``toolkit_registry.resolve_method(method_name)(*args, **kwargs)``\n\n Parameters\n ----------\n method_name : str\n The name of the method to execute\n raise_exception_types : list of Exception subclasses, default=None\n A list of exception-derived types to catch and raise immediately. If None, this will be set to [Exception],\n which will raise an error immediately if the first ToolkitWrapper in the registry fails. To try each\n ToolkitWrapper that provides a suitably-named method, set this to the empty list ([]). If all\n ToolkitWrappers run without raising any exceptions in this list, a single ValueError will be raised\n containing the each ToolkitWrapper that was tried and the exception it raised.\n\n Raises\n ------\n NotImplementedError if the requested method cannot be found among the registered toolkits\n\n ValueError if no exceptions in the raise_exception_types list were raised by ToolkitWrappers, and\n all ToolkitWrappers in the ToolkitRegistry were tried.\n\n Other forms of exceptions are possible if raise_exception_types is specified.\n These are defined by the ToolkitWrapper method being called.\n\n Examples\n --------\n\n Create a molecule, and call the toolkit ``to_smiles()`` method directly\n\n >>> from openff.toolkit.topology import Molecule\n >>> molecule = Molecule.from_smiles('Cc1ccccc1')\n >>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper])\n >>> smiles = toolkit_registry.call('to_smiles', molecule)\n\n \"\"\"\n if raise_exception_types is None:\n raise_exception_types = [Exception]\n\n errors = list()\n for toolkit in self._toolkits:\n if hasattr(toolkit, method_name):\n method = getattr(toolkit, method_name)\n try:\n return method(*args, **kwargs)\n except Exception as e:\n for exception_type in raise_exception_types:\n if isinstance(e, exception_type):\n raise e\n errors.append((toolkit, e))\n\n # No toolkit was found to provide the requested capability\n # TODO: Can we help developers by providing a check for typos in expected method names?\n msg = (\n f'No registered toolkits can provide the capability \"{method_name}\" '\n f'for args \"{args}\" and kwargs \"{kwargs}\"\\n'\n )\n\n msg += \"Available toolkits are: {}\\n\".format(self.registered_toolkits)\n # Append information about toolkits that implemented the method, but could not handle the provided parameters\n for toolkit, error in errors:\n msg += \" {} {} : {}\\n\".format(toolkit, type(error), error)\n raise ValueError(msg)\n\n def __repr__(self):\n return f\"ToolkitRegistry containing \" + \", \".join(\n [tk.toolkit_name for tk in self._toolkits]\n )\n\n\n# =============================================================================================\n# GLOBAL TOOLKIT REGISTRY\n# =============================================================================================\n\n# Create global toolkit registry, where all available toolkits are registered\nGLOBAL_TOOLKIT_REGISTRY = ToolkitRegistry(\n toolkit_precedence=[\n OpenEyeToolkitWrapper,\n RDKitToolkitWrapper,\n AmberToolsToolkitWrapper,\n BuiltInToolkitWrapper,\n ],\n exception_if_unavailable=False,\n)\n\n# =============================================================================================\n# SET GLOBAL TOOLKIT-AVAIABLE VARIABLES\n# =============================================================================================\n\nOPENEYE_AVAILABLE = False\nRDKIT_AVAILABLE = False\nAMBERTOOLS_AVAILABLE = False\n\n# Only available toolkits will have made it into the GLOBAL_TOOLKIT_REGISTRY\nfor toolkit in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits:\n if type(toolkit) is OpenEyeToolkitWrapper:\n OPENEYE_AVAILABLE = True\n elif type(toolkit) is RDKitToolkitWrapper:\n RDKIT_AVAILABLE = True\n elif type(toolkit) is AmberToolsToolkitWrapper:\n AMBERTOOLS_AVAILABLE = True\n\n# =============================================================================================\n# WARN IF INSUFFICIENT TOOLKITS INSTALLED\n# =============================================================================================\n\n# Define basic toolkits that handle essential file I/O\n\nBASIC_CHEMINFORMATICS_TOOLKITS = [RDKitToolkitWrapper, OpenEyeToolkitWrapper]\n\n# Ensure we have at least one basic toolkit\nif (\n sum(\n [\n tk.is_available()\n for tk in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits\n if type(tk) in BASIC_CHEMINFORMATICS_TOOLKITS\n ]\n )\n == 0\n):\n msg = \"WARNING: No basic cheminformatics toolkits are available.\\n\"\n msg += \"At least one basic toolkit is required to handle SMARTS matching and file I/O. \\n\"\n msg += \"Please install at least one of the following basic toolkits:\\n\"\n for wrapper in all_subclasses(ToolkitWrapper):\n if wrapper.toolkit_name is not None:\n msg += \"{} : {}\\n\".format(\n wrapper._toolkit_name, wrapper._toolkit_installation_instructions\n )\n print(msg)\n" ]
[ [ "numpy.square", "numpy.all", "numpy.mean", "numpy.fill_diagonal", "numpy.iinfo", "numpy.zeros_like", "numpy.ma.array", "numpy.zeros", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Dahk/pywren-ibm-cloud
[ "560a587e35dfe8f6dff4b85cc4bc722ec5f7fd9d" ]
[ "lithops/plots.py" ]
[ "#\n# (C) Copyright IBM Corp. 2020\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pylab\nimport time\nimport logging\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.patches as mpatches\nfrom matplotlib.collections import LineCollection\n\nsns.set_style('whitegrid')\npylab.switch_backend(\"Agg\")\nlogger = logging.getLogger(__name__)\n\n\ndef create_timeline(fs, dst):\n stats = [f.stats for f in fs]\n host_job_create_tstamp = min([cm['host_job_create_tstamp'] for cm in stats])\n\n stats_df = pd.DataFrame(stats)\n total_calls = len(stats_df)\n\n palette = sns.color_palette(\"deep\", 6)\n\n fig = pylab.figure(figsize=(10, 6))\n ax = fig.add_subplot(1, 1, 1)\n\n y = np.arange(total_calls)\n point_size = 10\n\n fields = [('host submit', stats_df.host_submit_tstamp - host_job_create_tstamp),\n ('worker start', stats_df.worker_start_tstamp - host_job_create_tstamp),\n ('worker done', stats_df.worker_end_tstamp - host_job_create_tstamp)]\n\n fields.append(('status fetched', stats_df.host_status_done_tstamp - host_job_create_tstamp))\n\n if 'host_result_done_tstamp' in stats_df:\n fields.append(('results fetched', stats_df.host_result_done_tstamp - host_job_create_tstamp))\n\n patches = []\n for f_i, (field_name, val) in enumerate(fields):\n ax.scatter(val, y, c=[palette[f_i]], edgecolor='none', s=point_size, alpha=0.8)\n patches.append(mpatches.Patch(color=palette[f_i], label=field_name))\n\n ax.set_xlabel('Execution Time (sec)')\n ax.set_ylabel('Function Call')\n\n legend = pylab.legend(handles=patches, loc='upper right', frameon=True)\n legend.get_frame().set_facecolor('#FFFFFF')\n\n yplot_step = int(np.max([1, total_calls/20]))\n y_ticks = np.arange(total_calls//yplot_step + 2) * yplot_step\n ax.set_yticks(y_ticks)\n ax.set_ylim(-0.02*total_calls, total_calls*1.02)\n for y in y_ticks:\n ax.axhline(y, c='k', alpha=0.1, linewidth=1)\n\n if 'host_result_done_tstamp' in stats_df:\n max_seconds = np.max(stats_df.host_result_done_tstamp - host_job_create_tstamp)*1.25\n elif 'host_status_done_tstamp' in stats_df:\n max_seconds = np.max(stats_df.host_status_done_tstamp - host_job_create_tstamp)*1.25\n else:\n max_seconds = np.max(stats_df.end_tstamp - host_job_create_tstamp)*1.25\n xplot_step = max(int(max_seconds/8), 1)\n x_ticks = np.arange(max_seconds//xplot_step + 2) * xplot_step\n ax.set_xlim(0, max_seconds)\n\n ax.set_xticks(x_ticks)\n for x in x_ticks:\n ax.axvline(x, c='k', alpha=0.2, linewidth=0.8)\n\n ax.grid(False)\n fig.tight_layout()\n\n if dst is None:\n os.makedirs('plots', exist_ok=True)\n dst = os.path.join(os.getcwd(), 'plots', '{}_{}'.format(int(time.time()), 'timeline.png'))\n else:\n dst = os.path.expanduser(dst) if '~' in dst else dst\n dst = '{}_{}'.format(os.path.realpath(dst), 'timeline.png')\n\n fig.savefig(dst)\n\n\ndef create_histogram(fs, dst):\n stats = [f.stats for f in fs]\n host_job_create_tstamp = min([cm['host_job_create_tstamp'] for cm in stats])\n\n total_calls = len(stats)\n max_seconds = int(max([cs['worker_end_tstamp']-host_job_create_tstamp for cs in stats])*2.5)\n\n runtime_bins = np.linspace(0, max_seconds, max_seconds)\n\n def compute_times_rates(time_rates):\n x = np.array(time_rates)\n tzero = host_job_create_tstamp\n start_time = x[:, 0] - tzero\n end_time = x[:, 1] - tzero\n\n N = len(start_time)\n\n runtime_calls_hist = np.zeros((N, len(runtime_bins)))\n\n for i in range(N):\n s = start_time[i]\n e = end_time[i]\n a, b = np.searchsorted(runtime_bins, [s, e])\n if b-a > 0:\n runtime_calls_hist[i, a:b] = 1\n\n return {'start_tstamp': start_time,\n 'end_tstamp': end_time,\n 'runtime_calls_hist': runtime_calls_hist}\n\n fig = pylab.figure(figsize=(10, 6))\n ax = fig.add_subplot(1, 1, 1)\n\n time_rates = [(cs['worker_start_tstamp'], cs['worker_end_tstamp']) for cs in stats]\n\n time_hist = compute_times_rates(time_rates)\n\n N = len(time_hist['start_tstamp'])\n line_segments = LineCollection([[[time_hist['start_tstamp'][i], i],\n [time_hist['end_tstamp'][i], i]] for i in range(N)],\n linestyles='solid', color='k', alpha=0.6, linewidth=0.4)\n\n ax.add_collection(line_segments)\n\n ax.plot(runtime_bins, time_hist['runtime_calls_hist'].sum(axis=0), label='Total Active Calls', zorder=-1)\n\n yplot_step = int(np.max([1, total_calls/20]))\n y_ticks = np.arange(total_calls//yplot_step + 2) * yplot_step\n ax.set_yticks(y_ticks)\n ax.set_ylim(-0.02*total_calls, total_calls*1.02)\n\n xplot_step = max(int(max_seconds/8), 1)\n x_ticks = np.arange(max_seconds//xplot_step + 2) * xplot_step\n ax.set_xlim(0, max_seconds)\n ax.set_xticks(x_ticks)\n for x in x_ticks:\n ax.axvline(x, c='k', alpha=0.2, linewidth=0.8)\n\n ax.set_xlabel(\"Execution Time (sec)\")\n ax.set_ylabel(\"Function Call\")\n ax.grid(False)\n ax.legend(loc='upper right')\n\n fig.tight_layout()\n\n if dst is None:\n os.makedirs('plots', exist_ok=True)\n dst = os.path.join(os.getcwd(), 'plots', '{}_{}'.format(int(time.time()), 'histogram.png'))\n else:\n dst = os.path.expanduser(dst) if '~' in dst else dst\n dst = '{}_{}'.format(os.path.realpath(dst), 'histogram.png')\n\n fig.savefig(dst)\n pylab.close(fig)\n" ]
[ [ "matplotlib.patches.Patch", "numpy.linspace", "numpy.arange", "pandas.DataFrame", "numpy.max", "numpy.searchsorted", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
knshnb/optuna
[ "9c3d6685f0814bcbd66d7aaa22d50789d969f8e7", "9c3d6685f0814bcbd66d7aaa22d50789d969f8e7" ]
[ "tests/test_transform.py", "tutorial/20_recipes/009_ask_and_tell.py" ]
[ "import math\nfrom typing import Any\n\nimport numpy\nimport pytest\n\nfrom optuna._transform import _SearchSpaceTransform\nfrom optuna._transform import _untransform_numerical_param\nfrom optuna.distributions import BaseDistribution\nfrom optuna.distributions import CategoricalDistribution\nfrom optuna.distributions import FloatDistribution\nfrom optuna.distributions import IntDistribution\n\n\[email protected](\n \"param,distribution\",\n [\n (0, IntDistribution(0, 3)),\n (1, IntDistribution(1, 10, log=True)),\n (2, IntDistribution(0, 10, step=2)),\n (0.0, FloatDistribution(0, 3)),\n (1.0, FloatDistribution(1, 10, log=True)),\n (0.2, FloatDistribution(0, 1, step=0.2)),\n (\"foo\", CategoricalDistribution([\"foo\"])),\n (\"bar\", CategoricalDistribution([\"foo\", \"bar\", \"baz\"])),\n ],\n)\ndef test_search_space_transform_shapes_dtypes(param: Any, distribution: BaseDistribution) -> None:\n trans = _SearchSpaceTransform({\"x0\": distribution})\n trans_params = trans.transform({\"x0\": param})\n\n if isinstance(distribution, CategoricalDistribution):\n expected_bounds_shape = (len(distribution.choices), 2)\n expected_params_shape = (len(distribution.choices),)\n else:\n expected_bounds_shape = (1, 2)\n expected_params_shape = (1,)\n assert trans.bounds.shape == expected_bounds_shape\n assert trans.bounds.dtype == numpy.float64\n assert trans_params.shape == expected_params_shape\n assert trans_params.dtype == numpy.float64\n\n\ndef test_search_space_transform_encoding() -> None:\n trans = _SearchSpaceTransform({\"x0\": IntDistribution(0, 3)})\n\n assert len(trans.column_to_encoded_columns) == 1\n numpy.testing.assert_equal(trans.column_to_encoded_columns[0], numpy.array([0]))\n numpy.testing.assert_equal(trans.encoded_column_to_column, numpy.array([0]))\n\n trans = _SearchSpaceTransform({\"x0\": CategoricalDistribution([\"foo\", \"bar\", \"baz\"])})\n\n assert len(trans.column_to_encoded_columns) == 1\n numpy.testing.assert_equal(trans.column_to_encoded_columns[0], numpy.array([0, 1, 2]))\n numpy.testing.assert_equal(trans.encoded_column_to_column, numpy.array([0, 0, 0]))\n\n trans = _SearchSpaceTransform(\n {\n \"x0\": FloatDistribution(0, 3),\n \"x1\": CategoricalDistribution([\"foo\", \"bar\", \"baz\"]),\n \"x3\": FloatDistribution(0, 1, step=0.2),\n }\n )\n\n assert len(trans.column_to_encoded_columns) == 3\n numpy.testing.assert_equal(trans.column_to_encoded_columns[0], numpy.array([0]))\n numpy.testing.assert_equal(trans.column_to_encoded_columns[1], numpy.array([1, 2, 3]))\n numpy.testing.assert_equal(trans.column_to_encoded_columns[2], numpy.array([4]))\n numpy.testing.assert_equal(trans.encoded_column_to_column, numpy.array([0, 1, 1, 1, 2]))\n\n\[email protected](\"transform_log\", [True, False])\[email protected](\"transform_step\", [True, False])\[email protected](\n \"param,distribution\",\n [\n (0, IntDistribution(0, 3)),\n (3, IntDistribution(0, 3)),\n (1, IntDistribution(1, 10, log=True)),\n (10, IntDistribution(1, 10, log=True)),\n (2, IntDistribution(0, 10, step=2)),\n (10, IntDistribution(0, 10, step=2)),\n (0.0, FloatDistribution(0, 3)),\n (3.0, FloatDistribution(0, 3)),\n (1.0, FloatDistribution(1, 10, log=True)),\n (10.0, FloatDistribution(1, 10, log=True)),\n (0.2, FloatDistribution(0, 1, step=0.2)),\n (1.0, FloatDistribution(0, 1, step=0.2)),\n ],\n)\ndef test_search_space_transform_numerical(\n transform_log: bool,\n transform_step: bool,\n param: Any,\n distribution: BaseDistribution,\n) -> None:\n trans = _SearchSpaceTransform({\"x0\": distribution}, transform_log, transform_step)\n\n expected_low = distribution.low # type: ignore\n expected_high = distribution.high # type: ignore\n\n if isinstance(distribution, FloatDistribution):\n if transform_log and distribution.log:\n expected_low = math.log(expected_low)\n expected_high = math.log(expected_high)\n if transform_step and distribution.step is not None:\n half_step = 0.5 * distribution.step\n expected_low -= half_step\n expected_high += half_step\n elif isinstance(distribution, IntDistribution):\n if transform_step:\n half_step = 0.5 * distribution.step\n expected_low -= half_step\n expected_high += half_step\n if distribution.log and transform_log:\n expected_low = math.log(expected_low)\n expected_high = math.log(expected_high)\n\n for bound in trans.bounds:\n assert bound[0] == expected_low\n assert bound[1] == expected_high\n\n trans_params = trans.transform({\"x0\": param})\n assert trans_params.size == 1\n assert expected_low <= trans_params <= expected_high\n\n\[email protected](\n \"param,distribution\",\n [\n (\"foo\", CategoricalDistribution([\"foo\"])),\n (\"bar\", CategoricalDistribution([\"foo\", \"bar\", \"baz\"])),\n ],\n)\ndef test_search_space_transform_values_categorical(\n param: Any, distribution: CategoricalDistribution\n) -> None:\n trans = _SearchSpaceTransform({\"x0\": distribution})\n\n for bound in trans.bounds:\n assert bound[0] == 0.0\n assert bound[1] == 1.0\n\n trans_params = trans.transform({\"x0\": param})\n\n for trans_param in trans_params:\n assert trans_param in (0.0, 1.0)\n\n\ndef test_search_space_transform_untransform_params() -> None:\n search_space = {\n \"x0\": CategoricalDistribution([\"corge\"]),\n \"x1\": CategoricalDistribution([\"foo\", \"bar\", \"baz\", \"qux\"]),\n \"x2\": CategoricalDistribution([\"quux\", \"quuz\"]),\n \"x3\": FloatDistribution(2, 3),\n \"x4\": FloatDistribution(-2, 2),\n \"x5\": FloatDistribution(1, 10, log=True),\n \"x6\": FloatDistribution(1, 1, log=True),\n \"x7\": FloatDistribution(0, 1, step=0.2),\n \"x8\": IntDistribution(2, 4),\n \"x9\": IntDistribution(1, 10, log=True),\n \"x10\": IntDistribution(1, 9, step=2),\n }\n\n params = {\n \"x0\": \"corge\",\n \"x1\": \"qux\",\n \"x2\": \"quux\",\n \"x3\": 2.0,\n \"x4\": -2,\n \"x5\": 1.0,\n \"x6\": 1.0,\n \"x7\": 0.2,\n \"x8\": 2,\n \"x9\": 1,\n \"x10\": 3,\n }\n\n trans = _SearchSpaceTransform(search_space)\n trans_params = trans.transform(params)\n untrans_params = trans.untransform(trans_params)\n\n for name in params.keys():\n assert untrans_params[name] == params[name]\n\n\[email protected](\"transform_log\", [True, False])\[email protected](\"transform_step\", [True, False])\[email protected](\n \"distribution\",\n [\n FloatDistribution(0, 1, step=0.2),\n IntDistribution(2, 4),\n IntDistribution(1, 10, log=True),\n ],\n)\ndef test_transform_untransform_params_at_bounds(\n transform_log: bool, transform_step: bool, distribution: BaseDistribution\n) -> None:\n EPS = 1e-12\n\n # Skip the following two conditions that do not clip in `_untransform_numerical_param`:\n # 1. `IntDistribution(log=True)` without `transform_log`\n if not transform_log and (isinstance(distribution, IntDistribution) and distribution.log):\n return\n\n trans = _SearchSpaceTransform({\"x0\": distribution}, transform_log, transform_step)\n\n # Manually create round-off errors.\n lower_bound = trans.bounds[0][0] - EPS\n upper_bound = trans.bounds[0][1] + EPS\n\n trans_lower_param = _untransform_numerical_param(lower_bound, distribution, transform_log)\n trans_upper_param = _untransform_numerical_param(upper_bound, distribution, transform_log)\n assert trans_lower_param == distribution.low # type: ignore\n assert trans_upper_param == distribution.high # type: ignore\n", "\"\"\"\n.. _ask_and_tell:\n\nAsk-and-Tell Interface\n=======================\n\nOptuna has an `Ask-and-Tell` interface, which provides a more flexible interface for hyperparameter optimization.\nThis tutorial explains three use-cases when the ask-and-tell interface is beneficial:\n\n- :ref:`Apply-optuna-to-an-existing-optimization-problem-with-minimum-modifications`\n- :ref:`Define-and-Run`\n- :ref:`Batch-Optimization`\n\n.. _Apply-optuna-to-an-existing-optimization-problem-with-minimum-modifications:\n\n----------------------------------------------------------------------------\nApply Optuna to an existing optimization problem with minimum modifications\n----------------------------------------------------------------------------\n\nLet's consider the traditional supervised classification problem; you aim to maximize the validation accuracy.\nTo do so, you train `LogisticRegression` as a simple model.\n\"\"\"\nimport numpy as np\nfrom sklearn.datasets import make_classification\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\nimport optuna\n\n\nX, y = make_classification(n_features=10)\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\nC = 0.01\nclf = LogisticRegression(C=C)\nclf.fit(X_train, y_train)\nval_accuracy = clf.score(X_test, y_test) # the objective\n\n###################################################################################################\n# Then you try to optimize hyperparameters ``C`` and ``solver`` of the classifier by using optuna.\n# When you introduce optuna naively, you define an ``objective`` function\n# such that it takes ``trial`` and calls ``suggest_*`` methods of ``trial`` to sample the hyperparameters:\n\n\ndef objective(trial):\n X, y = make_classification(n_features=10)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n C = trial.suggest_float(\"C\", 1e-7, 10.0, log=True)\n solver = trial.suggest_categorical(\"solver\", (\"lbfgs\", \"saga\"))\n\n clf = LogisticRegression(C=C, solver=solver)\n clf.fit(X_train, y_train)\n val_accuracy = clf.score(X_test, y_test)\n\n return val_accuracy\n\n\nstudy = optuna.create_study(direction=\"maximize\")\nstudy.optimize(objective, n_trials=10)\n\n###################################################################################################\n# This interface is not flexible enough.\n# For example, if ``objective`` requires additional arguments other than ``trial``,\n# you need to define a class as in\n# `How to define objective functions that have own arguments? <../../faq.html#how-to-define-objective-functions-that-have-own-arguments>`_.\n# The ask-and-tell interface provides a more flexible syntax to optimize hyperparameters.\n# The following example is equivalent to the previous code block.\n\nstudy = optuna.create_study(direction=\"maximize\")\n\nn_trials = 10\nfor _ in range(n_trials):\n trial = study.ask() # `trial` is a `Trial` and not a `FrozenTrial`.\n\n C = trial.suggest_float(\"C\", 1e-7, 10.0, log=True)\n solver = trial.suggest_categorical(\"solver\", (\"lbfgs\", \"saga\"))\n\n clf = LogisticRegression(C=C, solver=solver)\n clf.fit(X_train, y_train)\n val_accuracy = clf.score(X_test, y_test)\n\n study.tell(trial, val_accuracy) # tell the pair of trial and objective value\n\n###################################################################################################\n# The main difference is to use two methods: :func:`optuna.study.Study.ask`\n# and :func:`optuna.study.Study.tell`.\n# :func:`optuna.study.Study.ask` creates a trial that can sample hyperparameters, and\n# :func:`optuna.study.Study.tell` finishes the trial by passing ``trial`` and an objective value.\n# You can apply Optuna's hyperparameter optimization to your original code\n# without an ``objective`` function.\n#\n# If you want to make your optimization faster with a pruner, you need to explicitly pass the state of trial\n# to the argument of :func:`optuna.study.Study.tell` method as follows:\n#\n# .. code-block:: python\n#\n# import numpy as np\n# from sklearn.datasets import load_iris\n# from sklearn.linear_model import SGDClassifier\n# from sklearn.model_selection import train_test_split\n#\n# import optuna\n#\n#\n# X, y = load_iris(return_X_y=True)\n# X_train, X_valid, y_train, y_valid = train_test_split(X, y)\n# classes = np.unique(y)\n# n_train_iter = 100\n#\n# # define study with hyperband pruner.\n# study = optuna.create_study(\n# direction=\"maximize\",\n# pruner=optuna.pruners.HyperbandPruner(\n# min_resource=1, max_resource=n_train_iter, reduction_factor=3\n# ),\n# )\n#\n# for _ in range(20):\n# trial = study.ask()\n#\n# alpha = trial.suggest_float(\"alpha\", 0.0, 1.0)\n#\n# clf = SGDClassifier(alpha=alpha)\n# pruned_trial = False\n#\n# for step in range(n_train_iter):\n# clf.partial_fit(X_train, y_train, classes=classes)\n#\n# intermediate_value = clf.score(X_valid, y_valid)\n# trial.report(intermediate_value, step)\n#\n# if trial.should_prune():\n# pruned_trial = True\n# break\n#\n# if pruned_trial:\n# study.tell(trial, state=optuna.trial.TrialState.PRUNED) # tell the pruned state\n# else:\n# score = clf.score(X_valid, y_valid)\n# study.tell(trial, score) # tell objective value\n\n###################################################################################################\n# .. note::\n#\n# :func:`optuna.study.Study.tell` method can take a trial number rather than the trial object.\n# ``study.tell(trial.number, y)`` is equivalent to ``study.tell(trial, y)``.\n\n\n###################################################################################################\n# .. _Define-and-Run:\n#\n# ---------------\n# Define-and-Run\n# ---------------\n# The ask-and-tell interface supports both `define-by-run` and `define-and-run` APIs.\n# This section shows the example of the `define-and-run` API\n# in addition to the define-by-run example above.\n#\n# Define distributions for the hyperparameters before calling the\n# :func:`optuna.study.Study.ask` method for define-and-run API.\n# For example,\n\ndistributions = {\n \"C\": optuna.distributions.FloatDistribution(1e-7, 10.0, log=True),\n \"solver\": optuna.distributions.CategoricalDistribution((\"lbfgs\", \"saga\")),\n}\n\n###################################################################################################\n# Pass ``distributions`` to :func:`optuna.study.Study.ask` method at each call.\n# The retuned ``trial`` contains the suggested hyperparameters.\n\nstudy = optuna.create_study(direction=\"maximize\")\nn_trials = 10\nfor _ in range(n_trials):\n trial = study.ask(distributions) # pass the pre-defined distributions.\n\n # two hyperparameters are already sampled from the pre-defined distributions\n C = trial.params[\"C\"]\n solver = trial.params[\"solver\"]\n\n clf = LogisticRegression(C=C, solver=solver)\n clf.fit(X_train, y_train)\n val_accuracy = clf.score(X_test, y_test)\n\n study.tell(trial, val_accuracy)\n\n\n###################################################################################################\n# .. _Batch-Optimization:\n#\n# -------------------\n# Batch Optimization\n# -------------------\n# The ask-and-tell interface enables us to optimize a batched objective for faster optimization.\n# For example, parallelizable evaluation, operation over vectors, etc.\n\n###################################################################################################\n# The following objective takes batched hyperparameters ``xs`` and ``ys`` instead of a single\n# pair of hyperparameters ``x`` and ``y`` and calculates the objective over the full vectors.\n\n\ndef batched_objective(xs: np.ndarray, ys: np.ndarray):\n return xs**2 + ys\n\n\n###################################################################################################\n# In the following example, the number of pairs of hyperparameters in a batch is :math:`10`,\n# and ``batched_objective`` is evaluated three times.\n# Thus, the number of trials is :math:`30`.\n# Note that you need to store either ``trial_ids`` or ``trial`` to call\n# :func:`optuna.study.Study.tell` method after the batched evaluations.\n\nbatch_size = 10\nstudy = optuna.create_study(sampler=optuna.samplers.CmaEsSampler())\n\nfor _ in range(3):\n\n # create batch\n trial_ids = []\n x_batch = []\n y_batch = []\n for _ in range(batch_size):\n trial = study.ask()\n trial_ids.append(trial.number)\n x_batch.append(trial.suggest_float(\"x\", -10, 10))\n y_batch.append(trial.suggest_float(\"y\", -10, 10))\n\n # evaluate batched objective\n x_batch = np.array(x_batch)\n y_batch = np.array(y_batch)\n objectives = batched_objective(x_batch, y_batch)\n\n # finish all trials in the batch\n for trial_id, objective in zip(trial_ids, objectives):\n study.tell(trial_id, objective)\n" ]
[ [ "numpy.array" ], [ "numpy.array", "sklearn.datasets.make_classification", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LogisticRegression" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yethranayeh/Daily-Mood-Rater
[ "0bf2ee688d066dd01bb388e00172a2d9e15ec043" ]
[ "mood_graph.py" ]
[ "import sys, mood_db\nimport matplotlib.pyplot as plt\nimport mplcursors\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom PyQt5.QtGui import QIcon\nfrom pathlib import Path\nfrom datetime import datetime\n\ncur_dir = Path.cwd()\n\nclass Canvas(FigureCanvas):\n def __init__(self, parent, date):\n self.fig, self.ax = plt.subplots(figsize=(10,4))\n super().__init__(self.fig)\n self.setParent(parent)\n self.values = mood_db.show_values(date)\n\n self.x = [x[2] for x in self.values]\n self.y = [y[0] for y in self.values]\n self.descriptions = [d[1] for d in self.values]\n self.ax1 = plt.subplot2grid((1,1), (0,0))\n\n self.lines = self.ax1.bar(self.x, self.y, color=\"lightsteelblue\", edgecolor=\"black\", width=0.95)\n\n for label in self.ax1.xaxis.get_ticklabels():\n label.set_rotation(45)\n \n self.ax1.tick_params(axis=\"x\", colors=\"tab:blue\")\n self.ax1.tick_params(axis=\"y\", colors=\"tab:blue\")\n\n self.ax1.xaxis.label.set_color(\"tab:blue\")\n plt.xlabel(\"Days (Year - Month - Day)\")\n\n self.ax1.yaxis.label.set_color(\"tab:blue\")\n plt.ylabel(\"Mood Rating\")\n\n date = datetime.strptime(date, \"%Y-%m\")\n plt.title(f\"Mood Rating Graph for {date.strftime('%B %Y')}\")\n\n plt.yticks([1,2,3,4,5,6,7,8,9,10]) # Only shows the available Y values\n plt.subplots_adjust(left=0.060, bottom=0.250, right=0.990, top=0.922)\n\n # Cursor Hover Annotions\n # This adds the functionality of showing mood descriptions for each day.\n cursor = mplcursors.cursor(self.lines, hover=mplcursors.HoverMode.Transient)\n cursor.connect(\n \"add\", \n lambda sel: sel.annotation.set_text(self.descriptions[sel.target.index]))\n\n\nclass AppWindow(QWidget):\n def __init__(self, date):\n super().__init__()\n self.resize(1000, 400)\n self.setMaximumSize(1000, 400)\n self.setMinimumSize(1000, 400)\n self.setWindowTitle(f\"Your Mood Rating Graph\")\n self.setWindowIcon(QIcon((cur_dir / \"test/icon.png\").as_posix()))\n\n self.graph = Canvas(self, date)\n\nif __name__ == \"__main__\":\n print(\"Name Main\")\n app = QApplication(sys.argv)\n graph = AppWindow(\"2021-10\")\n graph.show()\n sys.exit(app.exec_())\n\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nileshkulkarni/acsm
[ "83bc7f22582c594a60381fe60ea7b83ec295d93c" ]
[ "acsm/utils/visualizer.py" ]
[ "'''Code adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix'''\nimport numpy as np\nimport os\nimport ntpath\nimport time\nimport visdom\nfrom . import visutil as util\nfrom . import html\nfrom make_html import HTML\nimport os.path as osp\nimport pdb\nfrom .logger import Logger\n# from loguru import logger\n\nfrom . import visutil\nfrom absl import flags\nimport socket\n# server = 'http://nileshk.pc.cs.cmu.edu'\nif 'umich' in socket.getfqdn():\n server = 'http://fouheylab.eecs.umich.edu'\nelse:\n # server = 'http://compute-2-3.local'\n server = 'http://fouheylab.eecs.umich.edu'\n\nflags.DEFINE_boolean('use_html', False, 'Save html visualizations')\nflags.DEFINE_string('env_name', 'main', 'env name for experiments')\nflags.DEFINE_integer('display_id', 1, 'Display Id')\nflags.DEFINE_integer('display_winsize', 256, 'Display Size')\nflags.DEFINE_integer('display_port', 7098, 'Display port')\nflags.DEFINE_integer(\n 'display_single_pane_ncols', 0,\n 'if positive, display all images in a single visdom web panel with certain number of images per row.'\n)\n\n\nclass Visualizer():\n def __init__(self, opt):\n # self.opt = opt\n self.display_id = opt.display_id\n self.use_html = opt.use_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n if opt.env_name == 'main':\n self.env_name = opt.name\n else:\n self.env_name = opt.env_name\n html_name = self.env_name + \"_webpage\"\n self.result_dir = osp.join(opt.result_dir, opt.split, opt.env_name)\n if self.display_id > 0:\n print('Visdom Env Name {}'.format(self.env_name))\n self.vis = visdom.Visdom(\n server=server,\n port=opt.display_port,\n env=self.env_name,\n )\n self.display_single_pane_ncols = opt.display_single_pane_ncols\n\n if self.use_html:\n self.web_dir = os.path.join(opt.cache_dir, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n # util.mkdirs([self.web_dir, self.img_dir])\n util.mkdirs([self.web_dir])\n self.html_doc = HTML(self.web_dir, '{}.html'.format(html_name))\n\n self.log_name = os.path.join(\n opt.checkpoint_dir, opt.name, 'loss_log.txt'\n )\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write(\n '================ Training Loss (%s) ================\\n' % now\n )\n\n # |visuals|: dictionary of images to display or save\n def display_current_results(self, visuals, epoch):\n if self.display_id > 0: # show images in the browser\n if self.display_single_pane_ncols > 0:\n h, w = next(iter(visuals.values())).shape[:2]\n table_css = \"\"\"<style>\n table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}\n table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}\n</style>\"\"\" % (w, h)\n ncols = self.display_single_pane_ncols\n title = self.name\n label_html = ''\n label_html_row = ''\n nrows = int(np.ceil(len(visuals.items()) / ncols))\n images = []\n idx = 0\n # for label, image_numpy in visuals.items():\n img_keys = visuals.keys()\n list.sort(img_keys)\n for label in img_keys:\n image_numpy = visuals[label]\n label_html_row += '<td>%s</td>' % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += '<tr>%s</tr>' % label_html_row\n label_html_row = ''\n white_image = np.ones_like(\n image_numpy.transpose([2, 0, 1])\n ) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += '<td></td>'\n idx += 1\n if label_html_row != '':\n label_html += '<tr>%s</tr>' % label_html_row\n # pane col = image row\n self.vis.images(\n images,\n nrow=ncols,\n win=self.display_id + 1,\n padding=2,\n opts=dict(title=title + ' images')\n )\n label_html = '<table>%s</table>' % label_html\n self.vis.text(\n table_css + label_html,\n win=self.display_id + 2,\n opts=dict(title=title + ' labels')\n )\n else:\n idx = 1\n for label, image_numpy in visuals.items():\n self.vis.image(\n image_numpy.transpose([2, 0, 1]),\n opts=dict(title=label),\n win=self.display_id + idx\n )\n idx += 1\n\n def save_current_results(self, step, visuals):\n current_dir = osp.join(self.result_dir, \"{}\".format(step))\n visutil.mkdir(current_dir)\n tuple_list = []\n for bx, bv in enumerate(visuals):\n entry = {}\n entry['a_step'] = step\n # entry['aa_id'] = bx\n entry['ind'] = bv['ind']\n ind = bv['ind']\n for key in bv:\n if 'ind' in key:\n continue\n save_path = osp.join(current_dir, \"{}_{}.png\".format(ind, key))\n util.save_image(bv[key], save_path)\n entry[key] = save_path\n tuple_list.append(entry)\n if self.use_html:\n self.html_doc.add_images(tuple_list)\n return\n\n # scalars: dictionary of scalar labels and values\n def plot_current_scalars(self, epoch, counter_ratio, opt, scalars):\n if not hasattr(self, 'plot_data'):\n self.plot_data = {'X': [], 'Y': [], 'legend': list(scalars.keys())}\n self.plot_data['X'].append(epoch + counter_ratio)\n self.plot_data['Y'].append(\n [scalars[k] for k in self.plot_data['legend']]\n )\n self.vis.line(\n X=np.stack(\n [np.array(self.plot_data['X'])] * len(self.plot_data['legend']),\n 1\n ),\n Y=np.array(self.plot_data['Y']),\n opts={\n 'title': self.name + ' loss over time',\n 'legend': self.plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'\n },\n win=self.display_id\n )\n\n def plot_adj_histogram(self, arrays):\n i = 3\n for key, item in arrays.items():\n self.vis.histogram(\n item.cpu().numpy().reshape((-1)),\n opts={'title': self.name + ' {}_hist'.format(key)},\n win=self.display_id + i\n )\n i += 1\n\n # scatter plots\n def plot_current_points(self, points, disp_offset=10):\n idx = disp_offset\n for label, pts in points.items():\n #image_numpy = np.flipud(image_numpy)\n self.vis.scatter(\n pts,\n opts=dict(title=label, markersize=1),\n win=self.display_id + idx\n )\n idx += 1\n\n # scalars: same format as |scalars| of plot_current_scalars\n def print_current_scalars(self, t, epoch, i, scalars):\n #message = '(epoch: %d, iters: %d) ' % (epoch, i)\n message = '(time : %0.3f, epoch: %d, iters: %d) ' % (t, epoch, i)\n for k, v in scalars.items():\n message += '%s: %.3f ' % (k, v)\n\n Logger.info(message)\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message)\n\n # save image to the disk\n def save_images(self, webpage, visuals, image_path):\n image_dir = webpage.get_image_dir()\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n webpage.add_header(name)\n ims = []\n txts = []\n links = []\n\n for label, image_numpy in visuals.items():\n image_name = '%s_%s.png' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n util.save_image(image_numpy, save_path)\n\n ims.append(image_name)\n txts.append(label)\n links.append(image_name)\n webpage.add_images(ims, txts, links, width=self.win_size)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vuiseng9/nncf_pytorch
[ "c2b1f069c867327203629201aecae3b7815e7895", "c2b1f069c867327203629201aecae3b7815e7895", "c2b1f069c867327203629201aecae3b7815e7895" ]
[ "nncf/experimental/onnx/statistics/statistics.py", "examples/experimental/torch/classification/bootstrap_nas.py", "tests/tensorflow/test_sanity_sample.py" ]
[ "\"\"\"\n Copyright (c) 2022 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\n\nfrom nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic\n\n\nclass ONNXMinMaxTensorStatistic(MinMaxTensorStatistic):\n @staticmethod\n def tensor_eq(tensor1: np.ndarray, tensor2: np.ndarray, rtol=1e-6) -> bool:\n return bool(np.allclose(tensor1, tensor2, rtol=rtol))\n", "\"\"\"\n Copyright (c) 2022 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport os.path as osp\nimport sys\nimport warnings\nfrom pathlib import Path\nfrom shutil import copyfile\n\nfrom torch import nn\n\nfrom examples.torch.classification.main import create_data_loaders\nfrom examples.torch.classification.main import create_datasets\nfrom examples.torch.classification.main import get_argument_parser\nfrom examples.torch.classification.main import inception_criterion_fn\nfrom examples.torch.classification.main import train_epoch\nfrom examples.torch.classification.main import validate\nfrom examples.torch.common.argparser import parse_args\nfrom examples.torch.common.example_logger import logger\nfrom examples.torch.common.execution import get_execution_mode\nfrom examples.torch.common.execution import set_seed\nfrom examples.torch.common.execution import start_worker\nfrom examples.torch.common.model_loader import load_model\nfrom examples.torch.common.optimizer import get_parameter_groups\nfrom examples.torch.common.optimizer import make_optimizer\nfrom examples.torch.common.sample_config import SampleConfig\nfrom examples.torch.common.sample_config import create_sample_config\nfrom examples.torch.common.utils import SafeMLFLow\nfrom examples.torch.common.utils import configure_device\nfrom examples.torch.common.utils import configure_logging\nfrom examples.torch.common.utils import configure_paths\nfrom examples.torch.common.utils import create_code_snapshot\nfrom examples.torch.common.utils import is_pretrained_model_requested\nfrom examples.torch.common.utils import print_args\nfrom nncf.config.structures import BNAdaptationInitArgs\nfrom nncf.experimental.torch.nas.bootstrapNAS import EpochBasedTrainingAlgorithm\nfrom nncf.torch.initialization import default_criterion_fn\nfrom nncf.torch.initialization import wrap_dataloader_for_init\nfrom nncf.torch.model_creation import create_nncf_network\nfrom nncf.torch.utils import is_main_process\n\n\ndef get_nas_argument_parser():\n parser = get_argument_parser()\n parser.add_argument('--train-steps', default=None, type=int,\n help='Enables running training for the given number of steps')\n return parser\n\n\ndef main(argv):\n parser = get_nas_argument_parser()\n args = parse_args(parser, argv)\n config = create_sample_config(args, parser)\n\n if config.dist_url == \"env://\":\n config.update_from_env()\n\n configure_paths(config)\n copyfile(args.config, osp.join(config.log_dir, 'config.json'))\n source_root = Path(__file__).absolute().parents[2] # nncf root\n create_code_snapshot(source_root, osp.join(config.log_dir, \"snapshot.tar.gz\"))\n\n if config.seed is not None:\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n config.execution_mode = get_execution_mode(config)\n\n start_worker(main_worker, config)\n\n\n# pylint:disable=too-many-branches,too-many-statements\ndef main_worker(current_gpu, config: SampleConfig):\n configure_device(current_gpu, config)\n config.mlflow = SafeMLFLow(config)\n if is_main_process():\n configure_logging(logger, config)\n print_args(config)\n\n set_seed(config)\n\n # define loss function (criterion)\n criterion = nn.CrossEntropyLoss()\n criterion = criterion.to(config.device)\n\n model_name = config['model']\n train_criterion_fn = inception_criterion_fn if 'inception' in model_name else default_criterion_fn\n\n nncf_config = config.nncf_config\n pretrained = is_pretrained_model_requested(config)\n\n # Data loading code\n train_dataset, val_dataset = create_datasets(config)\n train_loader, _, val_loader, _ = create_data_loaders(config, train_dataset, val_dataset)\n\n bn_adapt_args = BNAdaptationInitArgs(data_loader=wrap_dataloader_for_init(train_loader), device=config.device)\n nncf_config.register_extra_structs([bn_adapt_args])\n # create model\n model = load_model(model_name,\n pretrained=pretrained,\n num_classes=config.get('num_classes', 1000),\n model_params=config.get('model_params'),\n weights_path=config.get('weights'))\n\n model.to(config.device)\n\n # define optimizer\n params_to_optimize = get_parameter_groups(model, config)\n optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)\n\n def train_epoch_fn(loader, model_, compression_ctrl, epoch, optimizer_):\n train_epoch(loader, model_, criterion, train_criterion_fn, optimizer_, compression_ctrl, epoch, config,\n train_iters=config.train_steps, log_training_info=True)\n\n def validate_model_fn(model_, loader):\n top1, top5, loss = validate(loader, model_, criterion, config, log_validation_info=False)\n return top1, top5, loss\n\n nncf_network = create_nncf_network(model, nncf_config)\n\n resuming_checkpoint_path = config.resuming_checkpoint_path\n if resuming_checkpoint_path is None:\n training_algorithm = EpochBasedTrainingAlgorithm.from_config(nncf_network, nncf_config)\n else:\n training_algorithm = EpochBasedTrainingAlgorithm.from_checkpoint(nncf_network, bn_adapt_args,\n resuming_checkpoint_path)\n\n if 'train' in config.mode:\n training_algorithm.run(train_epoch_fn, train_loader, lr_scheduler,\n validate_model_fn, val_loader, optimizer,\n config.checkpoint_save_dir, config.tb)\n\n if 'test' in config.mode:\n validate(val_loader, model, criterion, config)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "\"\"\"\n Copyright (c) 2022 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport json\nimport os\nimport tempfile\nfrom functools import partial\nimport pytest\nimport tensorflow as tf\n\nfrom tests.common.config_factory import ConfigFactory\nfrom tests.common.helpers import TEST_ROOT\nfrom tests.tensorflow.helpers import get_coco_dataset_builders\nfrom tests.tensorflow.helpers import get_cifar10_dataset_builders\nfrom tests.tensorflow.test_models import SequentialModel, SequentialModelNoInput\n\nfrom examples.tensorflow.classification import main as cls_main\nfrom examples.tensorflow.object_detection import main as od_main\nfrom examples.tensorflow.segmentation import train as seg_train\nfrom examples.tensorflow.segmentation import evaluation as seg_eval\nfrom examples.tensorflow.common.model_loader import AVAILABLE_MODELS\nfrom examples.tensorflow.common.prepare_checkpoint import main as prepare_checkpoint_main\n\nAVAILABLE_MODELS.update({\n 'SequentialModel': SequentialModel,\n 'SequentialModelNoInput': SequentialModelNoInput\n})\n\n\[email protected](autouse=True)\ndef run_around_tests():\n original_cls_main_get_dataset_builders = cls_main.get_dataset_builders\n original_od_main_get_dataset_builders = od_main.get_dataset_builders\n original_seg_train_get_dataset_builders = seg_train.get_dataset_builders\n original_seg_eval_get_dataset_builders = seg_eval.get_dataset_builders\n cls_main.get_dataset_builders = get_cifar10_dataset_builders\n od_main.get_dataset_builders = partial(get_coco_dataset_builders, train=True, validation=True)\n seg_train.get_dataset_builders = partial(get_coco_dataset_builders, train=True, calibration=True)\n seg_eval.get_dataset_builders = partial(get_coco_dataset_builders, validation=True, calibration=True)\n yield\n cls_main.get_dataset_builders = original_cls_main_get_dataset_builders\n od_main.get_dataset_builders = original_od_main_get_dataset_builders\n seg_train.get_dataset_builders = original_seg_train_get_dataset_builders\n seg_eval.get_dataset_builders = original_seg_eval_get_dataset_builders\n\n\ndef convert_to_argv(args):\n return ' '.join(key if val is None else '{} {}'.format(key, val) for key, val in args.items()).split()\n\n\nSAMPLE_TYPES = [\n 'classification',\n 'object_detection',\n 'segmentation',\n]\n\nSAMPLES = {\n 'classification': {\n 'train-test-export': cls_main.main\n },\n 'object_detection': {\n 'train-test-export': od_main.main\n },\n 'segmentation': {\n 'train': seg_train.main,\n 'test-export': seg_eval.main\n },\n}\n\nDATASETS = {\n 'classification': [('cifar10', 'tfrecords'), ('cifar10', 'tfrecords'), ('cifar10', 'tfrecords')],\n 'object_detection': [('coco2017', 'tfrecords')],\n 'segmentation': [('coco2017', 'tfrecords')],\n}\n\nTEST_CONFIG_ROOT = TEST_ROOT.joinpath('tensorflow', 'data', 'configs')\nCONFIGS = {\n 'classification': [\n TEST_CONFIG_ROOT.joinpath('resnet50_cifar10_magnitude_sparsity_int8.json'),\n TEST_CONFIG_ROOT.joinpath('sequential_model_cifar10_magnitude_sparsity_int8.json'),\n TEST_CONFIG_ROOT.joinpath('sequential_model_no_input_cifar10_magnitude_sparsity_int8.json'),\n ],\n 'object_detection': [\n TEST_CONFIG_ROOT.joinpath('retinanet_coco2017_magnitude_sparsity_int8.json'),\n ],\n 'segmentation': [\n TEST_CONFIG_ROOT.joinpath('mask_rcnn_coco2017_magnitude_sparsity_int8.json'),\n ],\n}\n\nBATCH_SIZE_PER_GPU = {\n 'classification': [1, 1, 1],\n 'object_detection': [1],\n 'segmentation': [1],\n}\n\n\ndef get_global_batch_size():\n num_gpus = len(tf.config.list_physical_devices('GPU'))\n coeff = num_gpus if num_gpus else 1\n global_batch_size = {}\n for sample_type, batch_sizes in BATCH_SIZE_PER_GPU.items():\n global_batch_size[sample_type] = [coeff * bs for bs in batch_sizes]\n return global_batch_size\n\n\nGLOBAL_BATCH_SIZE = get_global_batch_size()\n\nDATASET_PATHS = {\n 'classification': {\n x: lambda dataset_root, dataset_name=x:\n os.path.join(dataset_root, dataset_name) if dataset_root else\n os.path.join(tempfile.gettempdir(), dataset_name)\n for x, _ in DATASETS['classification']\n },\n 'object_detection': {\n 'coco2017': lambda dataset_root: TEST_ROOT.joinpath('tensorflow', 'data', 'mock_datasets', 'coco2017')\n },\n 'segmentation': {\n 'coco2017': lambda dataset_root: TEST_ROOT.joinpath('tensorflow', 'data', 'mock_datasets', 'coco2017')\n },\n}\n\nDATASET_PATHS['classification']['cifar10'] = lambda dataset_root: TEST_ROOT.joinpath('tensorflow', 'data',\n 'mock_datasets', 'cifar10')\n\n\ndef get_sample_fn(sample_type, modes):\n variants = []\n for key in SAMPLES[sample_type].keys():\n supported_modes = set(key.split('-'))\n if set(modes).issubset(supported_modes):\n variants.append(key)\n\n if len(variants) != 1:\n raise Exception('Can not choose a function for given arguments')\n\n return SAMPLES[sample_type][variants[0]]\n\n\ndef generate_config_params():\n config_params = []\n for sample_id, sample_type in enumerate(SAMPLE_TYPES):\n config_paths, batch_sizes = CONFIGS[sample_type], GLOBAL_BATCH_SIZE[sample_type]\n dataset_names, dataset_types = zip(*DATASETS[sample_type])\n\n for params_id, params in enumerate(zip(config_paths, dataset_names, dataset_types, batch_sizes)):\n config_params.append((sample_type, *params, '{}_{}'.format(sample_id, params_id)))\n return config_params\n\n\ndef generate_id(value):\n sample_type, config_path, dataset_name, dataset_type, batch_size, _ = value\n filename = config_path.name\n return '-'.join([sample_type, filename, dataset_name, dataset_type, str(batch_size)])\n\n\nCONFIG_PARAMS = generate_config_params()\n\n\[email protected](params=CONFIG_PARAMS, ids=generate_id)\ndef _config(request, dataset_dir):\n sample_type, config_path, dataset_name, dataset_type, batch_size, tid = request.param\n dataset_path = DATASET_PATHS[sample_type][dataset_name](dataset_dir)\n\n with config_path.open() as f:\n jconfig = json.load(f)\n\n if 'checkpoint_save_dir' in jconfig.keys():\n del jconfig['checkpoint_save_dir']\n\n jconfig['dataset'] = dataset_name\n jconfig['dataset_type'] = dataset_type\n\n return {\n 'sample_type': sample_type,\n 'nncf_config': jconfig,\n 'model_name': jconfig['model'],\n 'dataset_path': dataset_path,\n 'batch_size': batch_size,\n 'tid': tid\n }\n\n\[email protected](scope='module')\ndef _case_common_dirs(tmp_path_factory):\n return {\n 'checkpoint_save_dir': str(tmp_path_factory.mktemp('models')),\n 'optimized_checkpoint_save_dir': str(tmp_path_factory.mktemp('optimized_models'))\n }\n\n\ndef test_model_eval(_config, tmp_path):\n if _config['sample_type'] == 'segmentation':\n pytest.skip(\"ticket #58759\")\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n args = {\n '--mode': 'test',\n '--data': _config['dataset_path'],\n '--config': config_factory.serialize(),\n '--log-dir': tmp_path,\n '--batch-size': _config['batch_size']\n }\n main = get_sample_fn(_config['sample_type'], modes=['test'])\n main(convert_to_argv(args))\n\n\[email protected](name='tf_test_model_train')\ndef test_model_train(_config, tmp_path, _case_common_dirs):\n if _config['sample_type'] == 'segmentation':\n pytest.skip(\"ticket #58759\")\n checkpoint_save_dir = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n args = {\n '--data': _config['dataset_path'],\n '--config': config_factory.serialize(),\n '--log-dir': tmp_path,\n '--batch-size': _config['batch_size'],\n '--epochs': 1,\n '--checkpoint-save-dir': checkpoint_save_dir\n }\n\n if _config['sample_type'] != 'segmentation':\n args['--mode'] = 'train'\n\n main = get_sample_fn(_config['sample_type'], modes=['train'])\n main(convert_to_argv(args))\n\n assert tf.io.gfile.isdir(checkpoint_save_dir)\n assert tf.train.latest_checkpoint(checkpoint_save_dir)\n\n\[email protected](depends=['tf_test_model_train'])\ndef test_trained_model_eval(_config, tmp_path, _case_common_dirs):\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])\n args = {\n '--mode': 'test',\n '--data': _config['dataset_path'],\n '--config': config_factory.serialize(),\n '--log-dir': tmp_path,\n '--batch-size': _config['batch_size'],\n '--resume': ckpt_path\n }\n\n main = get_sample_fn(_config['sample_type'], modes=['test'])\n main(convert_to_argv(args))\n\n\[email protected](depends=['tf_test_model_train'])\ndef test_resume(_config, tmp_path, _case_common_dirs):\n checkpoint_save_dir = os.path.join(str(tmp_path), 'models')\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])\n\n args = {\n '--data': _config['dataset_path'],\n '--config': config_factory.serialize(),\n '--log-dir': tmp_path,\n '--batch-size': _config['batch_size'],\n '--epochs': 2,\n '--checkpoint-save-dir': checkpoint_save_dir,\n '--resume': ckpt_path\n }\n\n if _config['sample_type'] != 'segmentation':\n args['--mode'] = 'train'\n\n main = get_sample_fn(_config['sample_type'], modes=['train'])\n main(convert_to_argv(args))\n\n assert tf.io.gfile.isdir(checkpoint_save_dir)\n assert tf.train.latest_checkpoint(checkpoint_save_dir)\n\n\[email protected](depends=['tf_test_model_train'])\ndef test_trained_model_resume_train_test_export_last_ckpt(_config, tmp_path, _case_common_dirs):\n if _config['sample_type'] == 'segmentation':\n pytest.skip()\n\n checkpoint_save_dir = os.path.join(str(tmp_path), 'models')\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])\n\n export_path = os.path.join(str(tmp_path), 'model.pb')\n args = {\n '--mode': 'train test export',\n '--data': _config['dataset_path'],\n '--config': config_factory.serialize(),\n '--log-dir': tmp_path,\n '--batch-size': _config['batch_size'],\n '--epochs': 2,\n '--checkpoint-save-dir': checkpoint_save_dir,\n '--resume': ckpt_path,\n '--to-frozen-graph': export_path\n }\n\n main = get_sample_fn(_config['sample_type'], modes=['train', 'test', 'export'])\n main(convert_to_argv(args))\n\n assert tf.io.gfile.isdir(checkpoint_save_dir)\n assert tf.train.latest_checkpoint(checkpoint_save_dir)\n assert os.path.exists(export_path)\n\n\nFORMATS = [\n 'frozen-graph',\n 'saved-model',\n 'h5'\n]\n\n\ndef get_export_model_name(export_format):\n model_name = 'model'\n if export_format == 'frozen-graph':\n model_name = 'model.pb'\n elif export_format == 'h5':\n model_name = 'model.h5'\n return model_name\n\n\[email protected](depends=['tf_test_model_train'])\[email protected]('export_format', FORMATS, ids=FORMATS)\ndef test_export_with_resume(_config, tmp_path, export_format, _case_common_dirs):\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n ckpt_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])\n\n if export_format == 'saved-model':\n compression_config = _config['nncf_config'].get('compression', {})\n if isinstance(compression_config, dict):\n compression_config = [compression_config]\n for config in compression_config:\n if config.get('algorithm', '') == 'quantization':\n pytest.skip()\n\n if _config['sample_type'] == 'segmentation' and export_format == 'h5':\n pytest.skip('The {} sample does not support export to {} format.'.format(_config['sample_type'],\n export_format))\n\n export_path = os.path.join(str(tmp_path), get_export_model_name(export_format))\n args = {\n '--mode': 'export',\n '--config': config_factory.serialize(),\n '--log-dir': tmp_path,\n '--resume': ckpt_path,\n '--to-{}'.format(export_format): export_path,\n }\n\n main = get_sample_fn(_config['sample_type'], modes=['export'])\n main(convert_to_argv(args))\n\n model_path = os.path.join(export_path, 'saved_model.pb') \\\n if export_format == 'saved-model' else export_path\n assert os.path.exists(model_path)\n\n\nPREPARE_CHECKPOINTS_SUPPORTED_SAMPLE_TYPES = ['object_detection', 'segmentation']\n\n\[email protected](name='tf_test_prepare_checkpoint', depends=['tf_test_model_train'])\ndef test_prepare_checkpoint(_config, tmp_path, _case_common_dirs):\n if _config['sample_type'] not in PREPARE_CHECKPOINTS_SUPPORTED_SAMPLE_TYPES:\n pytest.skip('Unsupported sample type for test_prepare_checkpoints')\n\n resume_path = os.path.join(_case_common_dirs['checkpoint_save_dir'], _config['tid'])\n checkpoint_save_dir = os.path.join(_case_common_dirs['optimized_checkpoint_save_dir'], _config['tid'])\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n args = {\n '--model-type': _config['sample_type'],\n '--config': config_factory.serialize(),\n '--checkpoint-save-dir': checkpoint_save_dir,\n '--resume': resume_path,\n }\n\n prepare_checkpoint_main(convert_to_argv(args))\n\n assert tf.io.gfile.isdir(checkpoint_save_dir)\n assert tf.train.latest_checkpoint(checkpoint_save_dir)\n\n\[email protected](depends=['tf_test_prepare_checkpoint'])\ndef test_eval_prepared_checkpoint(_config, tmp_path, _case_common_dirs):\n if _config['sample_type'] not in PREPARE_CHECKPOINTS_SUPPORTED_SAMPLE_TYPES:\n pytest.skip('Unsupported sample type for test_prepare_checkpoints')\n\n config_factory = ConfigFactory(_config['nncf_config'], tmp_path / 'config.json')\n resume_path = os.path.join(_case_common_dirs['optimized_checkpoint_save_dir'], _config['tid'])\n\n args = {\n '--mode': 'test',\n '--data': _config['dataset_path'],\n '--config': config_factory.serialize(),\n '--batch-size': _config['batch_size'],\n '--resume': resume_path,\n }\n\n main = get_sample_fn(_config['sample_type'], modes=['test'])\n main(convert_to_argv(args))\n\n\[email protected](params=[TEST_ROOT.joinpath(\"tensorflow\", \"data\", \"configs\", \"sequential_pruning_accuracy_aware.json\"),\n TEST_ROOT.joinpath(\"tensorflow\", \"data\", \"configs\", \"sequential_int8_accuracy_aware.json\")])\ndef _accuracy_aware_config(request, dataset_dir):\n config_path = request.param\n sample_type = 'classification'\n dataset_name, dataset_type = 'cifar10', 'tfrecords'\n dataset_path = DATASET_PATHS[sample_type][dataset_name](dataset_dir)\n with config_path.open() as f:\n jconfig = json.load(f)\n\n jconfig['dataset'] = dataset_name\n jconfig['dataset_type'] = dataset_type\n\n num_gpus = len(tf.config.list_physical_devices('GPU'))\n batch_size = num_gpus if num_gpus else 1\n\n return {\n 'sample_type': sample_type,\n 'nncf_config': jconfig,\n 'model_name': jconfig['model'],\n 'dataset_path': dataset_path,\n 'batch_size': batch_size,\n }\n\n\[email protected](name='tf_test_model_train')\ndef test_model_accuracy_aware_train(_accuracy_aware_config, tmp_path):\n checkpoint_save_dir = tmp_path\n config_factory = ConfigFactory(_accuracy_aware_config['nncf_config'], tmp_path / 'config.json')\n args = {\n '--data': _accuracy_aware_config['dataset_path'],\n '--config': config_factory.serialize(),\n '--log-dir': tmp_path,\n '--batch-size': _accuracy_aware_config['batch_size'],\n '--epochs': 1,\n '--checkpoint-save-dir': tmp_path\n }\n\n main = get_sample_fn(_accuracy_aware_config['sample_type'], modes=['train'])\n main(convert_to_argv(args))\n\n assert tf.io.gfile.isdir(checkpoint_save_dir)\n from glob import glob\n time_dir_1 = os.path.join(checkpoint_save_dir, glob(os.path.join(checkpoint_save_dir, '*/'))[0].split('/')[-2])\n time_dir_1 = os.path.join(time_dir_1, glob(os.path.join(time_dir_1, '*/'))[0].split('/')[-2],\n 'accuracy_aware_training')\n time_dir_2 = os.path.join(time_dir_1, glob(os.path.join(time_dir_1, '*/'))[0].split('/')[-2])\n assert tf.train.latest_checkpoint(time_dir_2)\n" ]
[ [ "numpy.allclose" ], [ "torch.nn.CrossEntropyLoss" ], [ "tensorflow.io.gfile.isdir", "tensorflow.train.latest_checkpoint", "tensorflow.config.list_physical_devices" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
swsachith/differential-privacy-library
[ "b5014276204d70c6fb1f0e2aff11fefc1a0d226d" ]
[ "diffprivlib/models/logistic_regression.py" ]
[ "# MIT License\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n#\n# New BSD License\n#\n# Copyright (c) 2007–2019 The scikit-learn developers.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# a. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# b. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n# c. Neither the name of the Scikit-learn Developers nor the names of its contributors may be used to endorse or\n# promote products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\"\"\"\nLogistic Regression classifier satisfying differential privacy.\n\"\"\"\nimport numbers\nimport warnings\n\nimport numpy as np\nfrom joblib import delayed, Parallel\nfrom scipy import optimize\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn import linear_model\nfrom sklearn.linear_model._logistic import _logistic_loss_and_grad\nfrom sklearn.utils import check_X_y, check_array, check_consistent_length\nfrom sklearn.utils.fixes import _joblib_parallel_args\nfrom sklearn.utils.multiclass import check_classification_targets\n\nfrom diffprivlib.accountant import BudgetAccountant\nfrom diffprivlib.mechanisms import Vector\nfrom diffprivlib.utils import PrivacyLeakWarning, DiffprivlibCompatibilityWarning, warn_unused_args\nfrom diffprivlib.validation import clip_to_norm\n\n\nclass LogisticRegression(linear_model.LogisticRegression):\n r\"\"\"Logistic Regression (aka logit, MaxEnt) classifier with differential privacy.\n\n This class implements regularised logistic regression using :ref:`Scipy's L-BFGS-B algorithm\n <scipy:optimize.minimize-lbfgsb>`. :math:`\\epsilon`-Differential privacy is achieved relative to the maximum norm\n of the data, as determined by `data_norm`, by the :class:`.Vector` mechanism, which adds a Laplace-distributed\n random vector to the objective. Adapted from the work presented in [CMS11]_.\n\n This class is a child of :obj:`sklearn.linear_model.LogisticRegression`, with amendments to allow for the\n implementation of differential privacy. Some parameters of `Scikit Learn`'s model have therefore had to be fixed,\n including:\n\n - The only permitted solver is 'lbfgs'. Specifying the ``solver`` option will result in a warning.\n - Consequently, the only permitted penalty is 'l2'. Specifying the ``penalty`` option will result in a warning.\n - In the multiclass case, only the one-vs-rest (OvR) scheme is permitted. Specifying the ``multi_class`` option\n will result in a warning.\n\n Parameters\n ----------\n epsilon : float, default: 1.0\n Privacy parameter :math:`\\epsilon`.\n\n data_norm : float, optional\n The max l2 norm of any row of the data. This defines the spread of data that will be protected by\n differential privacy.\n\n If not specified, the max norm is taken from the data when ``.fit()`` is first called, but will result in a\n :class:`.PrivacyLeakWarning`, as it reveals information about the data. To preserve differential privacy fully,\n `data_norm` should be selected independently of the data, i.e. with domain knowledge.\n\n tol : float, default: 1e-4\n Tolerance for stopping criteria.\n\n C : float, default: 1.0\n Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values\n specify stronger regularization.\n\n fit_intercept : bool, default: True\n Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function.\n\n max_iter : int, default: 100\n Maximum number of iterations taken for the solver to converge. For smaller `epsilon` (more noise), `max_iter`\n may need to be increased.\n\n verbose : int, default: 0\n Set to any positive number for verbosity.\n\n warm_start : bool, default: False\n When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase\n the previous solution.\n\n n_jobs : int, optional\n Number of CPU cores used when parallelising over classes. ``None`` means 1 unless in a context. ``-1`` means\n using all processors.\n\n accountant : BudgetAccountant, optional\n Accountant to keep track of privacy budget.\n\n Attributes\n ----------\n classes_ : array, shape (n_classes, )\n A list of class labels known to the classifier.\n\n coef_ : array, shape (1, n_features) or (n_classes, n_features)\n Coefficient of the features in the decision function.\n\n `coef_` is of shape (1, n_features) when the given problem is binary.\n\n intercept_ : array, shape (1,) or (n_classes,)\n Intercept (a.k.a. bias) added to the decision function.\n\n If `fit_intercept` is set to False, the intercept is set to zero. `intercept_` is of shape (1,) when the\n given problem is binary.\n\n n_iter_ : array, shape (n_classes,) or (1, )\n Actual number of iterations for all classes. If binary, it returns only 1 element.\n\n Examples\n --------\n >>> from sklearn.datasets import load_iris\n >>> from diffprivlib.models import LogisticRegression\n >>> X, y = load_iris(return_X_y=True)\n >>> clf = LogisticRegression(data_norm=12, epsilon=2).fit(X, y)\n >>> clf.predict(X[:2, :])\n array([0, 0])\n >>> clf.predict_proba(X[:2, :])\n array([[7.35362932e-01, 2.16667422e-14, 2.64637068e-01],\n [9.08384378e-01, 3.47767052e-13, 9.16156215e-02]])\n >>> clf.score(X, y)\n 0.5266666666666666\n\n See also\n --------\n sklearn.linear_model.LogisticRegression : The implementation of logistic regression in scikit-learn, upon which this\n implementation is built.\n .Vector : The mechanism used by the model to achieve differential privacy.\n\n References\n ----------\n .. [CMS11] Chaudhuri, Kamalika, Claire Monteleoni, and Anand D. Sarwate. \"Differentially private empirical risk\n minimization.\" Journal of Machine Learning Research 12, no. Mar (2011): 1069-1109.\n\n \"\"\"\n\n def __init__(self, epsilon=1.0, data_norm=None, tol=1e-4, C=1.0, fit_intercept=True, max_iter=100, verbose=0,\n warm_start=False, n_jobs=None, accountant=None, **unused_args):\n super().__init__(penalty='l2', dual=False, tol=tol, C=C, fit_intercept=fit_intercept, intercept_scaling=1.0,\n class_weight=None, random_state=None, solver='lbfgs', max_iter=max_iter, multi_class='ovr',\n verbose=verbose, warm_start=warm_start, n_jobs=n_jobs)\n self.epsilon = epsilon\n self.data_norm = data_norm\n self.classes_ = None\n self.accountant = BudgetAccountant.load_default(accountant)\n\n warn_unused_args(unused_args)\n\n # noinspection PyAttributeOutsideInit\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and n_features is the number of features.\n\n y : array-like, shape (n_samples,)\n Target vector relative to X.\n\n sample_weight : ignored\n Ignored by diffprivlib. Present for consistency with sklearn API.\n\n Returns\n -------\n self : class\n\n \"\"\"\n self.accountant.check(self.epsilon, 0)\n\n if sample_weight is not None:\n warn_unused_args(\"sample_weight\")\n\n if not isinstance(self.C, numbers.Real) or self.C < 0:\n raise ValueError(\"Penalty term must be positive; got (C=%r)\" % self.C)\n if not isinstance(self.max_iter, numbers.Integral) or self.max_iter < 0:\n raise ValueError(\"Maximum number of iteration must be positive; got (max_iter=%r)\" % self.max_iter)\n if not isinstance(self.tol, numbers.Real) or self.tol < 0:\n raise ValueError(\"Tolerance for stopping criteria must be positive; got (tol=%r)\" % self.tol)\n\n solver = _check_solver(self.solver, self.penalty, self.dual)\n X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order=\"C\",\n accept_large_sparse=solver != 'liblinear')\n check_classification_targets(y)\n self.classes_ = np.unique(y)\n _, n_features = X.shape\n\n if self.data_norm is None:\n warnings.warn(\"Data norm has not been specified and will be calculated on the data provided. This will \"\n \"result in additional privacy leakage. To ensure differential privacy and no additional \"\n \"privacy leakage, specify `data_norm` at initialisation.\", PrivacyLeakWarning)\n self.data_norm = np.linalg.norm(X, axis=1).max()\n\n X = clip_to_norm(X, self.data_norm)\n\n self.multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_))\n\n n_classes = len(self.classes_)\n classes_ = self.classes_\n if n_classes < 2:\n raise ValueError(\"This solver needs samples of at least 2 classes in the data, but the data contains only \"\n \"one class: %r\" % classes_[0])\n\n if len(self.classes_) == 2:\n n_classes = 1\n classes_ = classes_[1:]\n\n if self.warm_start:\n warm_start_coef = getattr(self, 'coef_', None)\n else:\n warm_start_coef = None\n if warm_start_coef is not None and self.fit_intercept:\n warm_start_coef = np.append(warm_start_coef, self.intercept_[:, np.newaxis], axis=1)\n\n self.coef_ = list()\n self.intercept_ = np.zeros(n_classes)\n\n if warm_start_coef is None:\n warm_start_coef = [None] * n_classes\n\n path_func = delayed(_logistic_regression_path)\n\n fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, **_joblib_parallel_args(prefer='processes'))(\n path_func(X, y, epsilon=self.epsilon / n_classes, data_norm=self.data_norm, pos_class=class_, Cs=[self.C],\n fit_intercept=self.fit_intercept, max_iter=self.max_iter, tol=self.tol, verbose=self.verbose,\n coef=warm_start_coef_, check_input=False)\n for class_, warm_start_coef_ in zip(classes_, warm_start_coef))\n\n fold_coefs_, _, n_iter_ = zip(*fold_coefs_)\n self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]\n\n self.coef_ = np.asarray(fold_coefs_)\n self.coef_ = self.coef_.reshape(n_classes, n_features + int(self.fit_intercept))\n\n if self.fit_intercept:\n self.intercept_ = self.coef_[:, -1]\n self.coef_ = self.coef_[:, :-1]\n\n self.accountant.spend(self.epsilon, 0)\n\n return self\n\n def set_coeff(self, coefficients):\n self.coef_ = coefficients\n\n\ndef _logistic_regression_path(X, y, epsilon, data_norm, pos_class=None, Cs=10, fit_intercept=True, max_iter=100,\n tol=1e-4, verbose=0, coef=None, check_input=True, **unused_args):\n \"\"\"Compute a Logistic Regression model with differential privacy for a list of regularization parameters. Takes\n inspiration from ``_logistic_regression_path`` in scikit-learn, specified to the LBFGS solver and one-vs-rest\n multi class fitting.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Input data.\n\n y : array-like, shape (n_samples,) or (n_samples, n_targets)\n Input data, target values.\n\n epsilon : float\n Privacy parameter for differential privacy.\n\n data_norm : float\n Max norm of the data for which differential privacy is satisfied.\n\n pos_class : int, optional\n The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem\n is binary.\n\n Cs : int | array-like, shape (n_cs,), default: 10\n List of values for the regularization parameter or integer specifying the number of regularization parameters\n that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4.\n\n fit_intercept : bool, default: True\n Whether to fit an intercept for the model. In this case the shape of the returned array is\n (n_cs, n_features + 1).\n\n max_iter : int, default: 100\n Maximum number of iterations for the solver.\n\n tol : float, default: 1e-4\n Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1,\n ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient.\n\n verbose : int, default: 0\n For the liblinear and lbfgs solvers set verbose to any positive number for verbosity.\n\n coef : array-like, shape (n_features,), optional\n Initialization value for coefficients of logistic regression. Useless for liblinear solver.\n\n check_input : bool, default: True\n If False, the input arrays X and y will not be checked.\n\n Returns\n -------\n coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)\n List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second\n dimension will be n_features + 1, where the last item represents the intercept. For\n ``multiclass='multinomial'``, the shape is (n_classes, n_cs, n_features) or (n_classes, n_cs, n_features + 1).\n\n Cs : ndarray\n Grid of Cs used for cross-validation.\n\n n_iter : array, shape (n_cs,)\n Actual number of iteration for each Cs.\n\n \"\"\"\n warn_unused_args(unused_args)\n\n if isinstance(Cs, numbers.Integral):\n Cs = np.logspace(-4, 4, int(Cs))\n\n solver = 'lbfgs'\n\n # Data norm increases if intercept is included\n if fit_intercept:\n data_norm = np.sqrt(data_norm ** 2 + 1)\n\n # Pre-processing.\n if check_input:\n X = check_array(X, accept_sparse='csr', dtype=np.float64, accept_large_sparse=solver != 'liblinear')\n y = check_array(y, ensure_2d=False, dtype=None)\n check_consistent_length(X, y)\n _, n_features = X.shape\n\n classes = np.unique(y)\n\n if pos_class is None:\n if classes.size > 2:\n raise ValueError('To fit OvR, use the pos_class argument')\n # np.unique(y) gives labels in sorted order.\n pos_class = classes[1]\n\n sample_weight = np.ones(X.shape[0], dtype=X.dtype)\n\n # For doing a ovr, we need to mask the labels first.\n output_vec = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)\n mask = (y == pos_class)\n y_bin = np.ones(y.shape, dtype=X.dtype)\n y_bin[~mask] = -1.\n # for compute_class_weight\n\n if coef is not None:\n # it must work both giving the bias term and not\n if coef.size not in (n_features, output_vec.size):\n raise ValueError('Initialization coef is of shape %d, expected shape %d or %d' % (coef.size, n_features,\n output_vec.size))\n output_vec[:coef.size] = coef\n\n target = y_bin\n\n coefs = list()\n n_iter = np.zeros(len(Cs), dtype=np.int32)\n for i, C in enumerate(Cs):\n vector_mech = Vector(epsilon=epsilon, dimension=n_features + int(fit_intercept), alpha=1. / C,\n function_sensitivity=0.25, data_sensitivity=data_norm)\n noisy_logistic_loss = vector_mech.randomise(_logistic_loss_and_grad)\n\n iprint = [-1, 50, 1, 100, 101][np.searchsorted(np.array([0, 1, 2, 3]), verbose)]\n output_vec, _, info = optimize.fmin_l_bfgs_b(noisy_logistic_loss, output_vec, fprime=None,\n args=(X, target, 1. / C, sample_weight), iprint=iprint, pgtol=tol,\n maxiter=max_iter)\n if info[\"warnflag\"] == 1:\n warnings.warn(\"lbfgs failed to converge. Increase the number of iterations.\", ConvergenceWarning)\n\n coefs.append(output_vec.copy())\n\n n_iter[i] = info['nit']\n\n return np.array(coefs), np.array(Cs), n_iter\n\n\ndef _check_solver(solver, penalty, dual):\n if solver != 'lbfgs':\n warnings.warn(\"For diffprivlib, solver must be 'lbfgs'.\", DiffprivlibCompatibilityWarning)\n solver = 'lbfgs'\n\n if penalty != 'l2':\n raise ValueError(\"Solver %s supports only l2 penalties, got %s penalty.\" % (solver, penalty))\n if dual:\n raise ValueError(\"Solver %s supports only dual=False, got dual=%s\" % (solver, dual))\n return solver\n\n\ndef _check_multi_class(multi_class, solver, n_classes):\n del solver, n_classes\n\n if multi_class != 'ovr':\n warnings.warn(\"For diffprivlib, multi_class must be 'ovr'.\", DiffprivlibCompatibilityWarning)\n multi_class = 'ovr'\n\n return multi_class\n" ]
[ [ "sklearn.utils.check_X_y", "numpy.sqrt", "numpy.unique", "numpy.asarray", "sklearn.utils.check_array", "sklearn.utils.multiclass.check_classification_targets", "sklearn.utils.check_consistent_length", "scipy.optimize.fmin_l_bfgs_b", "numpy.linalg.norm", "numpy.ones", "sklearn.utils.fixes._joblib_parallel_args", "numpy.append", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
Open-EO/openeo-udf
[ "cdd910c5da1f752991415d8732e5dbdcfeac2366" ]
[ "src/openeo_udf/api/udf_wrapper.py" ]
[ "from openeo_udf.api.datacube import DataCube\nfrom openeo_udf.api.udf_data import UdfData\nfrom typing import Dict, Callable\nimport xarray\nimport numpy\nimport pandas\nfrom pandas import Series\n\n\ndef apply_timeseries(series: Series, context:Dict)->Series:\n \"\"\"\n Do something with the timeseries\n :param series:\n :param context:\n :return:\n \"\"\"\n return series\n\ndef apply_timeseries_generic(udf_data: UdfData, callback: Callable = apply_timeseries):\n \"\"\"\n Implements the UDF contract by calling a user provided time series transformation function (apply_timeseries).\n Multiple bands are currently handled separately, another approach could provide a dataframe with a timeseries for each band.\n\n :param udf_data:\n :return:\n \"\"\"\n # The list of tiles that were created\n tile_results = []\n\n # Iterate over each cube\n for cube in udf_data.get_datacube_list():\n array3d = []\n #use rollaxis to make the time dimension the last one\n for time_x_slice in numpy.rollaxis(cube.array.values, 1):\n time_x_result = []\n for time_slice in time_x_slice:\n series = pandas.Series(time_slice)\n transformed_series = callback(series,udf_data.user_context)\n time_x_result.append(transformed_series)\n array3d.append(time_x_result)\n\n # We need to create a new 3D array with the correct shape for the computed aggregate\n result_tile = numpy.rollaxis(numpy.asarray(array3d),1)\n assert result_tile.shape == cube.array.shape\n # Create the new raster collection cube\n rct = DataCube(xarray.DataArray(result_tile))\n tile_results.append(rct)\n # Insert the new tiles as list of raster collection tiles in the input object. The new tiles will\n # replace the original input tiles.\n udf_data.set_datacube_list(tile_results)\n return udf_data\n\n" ]
[ [ "numpy.rollaxis", "numpy.asarray", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Naman9639/sunpy
[ "24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56", "24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56", "24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56", "24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56" ]
[ "examples/plotting/simple_differential_rotation.py", "examples/plotting/great_arc_example.py", "examples/plotting/aia_example.py", "sunpy/visualization/wcsaxes_compat.py" ]
[ "\"\"\"\n============================\nSimple Differential Rotation\n============================\n\nThe Sun is known to rotate differentially, meaning that the rotation rate\nnear the poles (rotation period of approximately 35 days) is not the same as\nthe rotation rate near the equator (rotation period of approximately 25 days).\nThis is possible because the Sun is not a solid body. Though it is still poorly\nunderstood, it is fairly well measured and must be taken into account\nwhen comparing observations of features on the Sun over time.\nA good review can be found in Beck 1999 Solar Physics 191, 47–70.\nThis example illustrates solar differential rotation.\n\"\"\"\n\n##############################################################################\n# Start by importing the necessary modules.\nfrom datetime import timedelta\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\n\nimport sunpy.map\nimport sunpy.data.sample\nfrom sunpy.physics.differential_rotation import diff_rot, solar_rotate_coordinate\n\n##############################################################################\n# Next lets explore solar differential rotation by replicating Figure 1\n# in Beck 1999\n\nlatitudes = u.Quantity(np.arange(0, 90, 1), 'deg')\ndt = 1 * u.day\nrotation_rate = [diff_rot(dt, this_lat) / dt for this_lat in latitudes]\nrotation_period = [360 * u.deg / this_rate for this_rate in rotation_rate]\n\nfig = plt.figure()\nplt.plot(np.sin(latitudes), [this_period.value for this_period in rotation_period])\nplt.ylim(38, 24)\nplt.ylabel('Rotation Period [{0}]'.format(rotation_period[0].unit))\nplt.xlabel('Sin(Latitude)')\nplt.title('Solar Differential Rotation Rate')\n\n##############################################################################\n# Next let's show how to this looks like on the Sun.\n# Load in an AIA map:\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\n\n##############################################################################\n# Let's define our starting coordinates\n\nhpc_y = u.Quantity(np.arange(-700, 800, 100), u.arcsec)\nhpc_x = np.zeros_like(hpc_y)\n\n##############################################################################\n# Let's define how many days in the future we want to rotate to\n\ndt = timedelta(days=4)\nfuture_date = aia_map.date + dt\n\n##############################################################################\n# Now let's plot the original and rotated positions on the AIA map.\n\nfig = plt.figure()\nax = plt.subplot(projection=aia_map)\naia_map.plot()\nax.set_title('The effect of {0} days of differential rotation'.format(dt.days))\naia_map.draw_grid()\n\nfor this_hpc_x, this_hpc_y in zip(hpc_x, hpc_y):\n start_coord = SkyCoord(this_hpc_x, this_hpc_y, frame=aia_map.coordinate_frame)\n rotated_coord = solar_rotate_coordinate(start_coord, future_date)\n coord = SkyCoord([start_coord.Tx, rotated_coord.Tx],\n [start_coord.Ty, rotated_coord.Ty],\n frame=aia_map.coordinate_frame)\n ax.plot_coord(coord, 'o-')\n\nplt.ylim(0, aia_map.data.shape[1])\nplt.xlim(0, aia_map.data.shape[0])\nplt.show()\n", "\"\"\"\n=============================\nDrawing and using a Great Arc\n=============================\n\nThis example shows you how to define and draw a great arc on an image of the\nSun, and to extract intensity values along that arc from the image data.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\n\nimport sunpy.map\nfrom sunpy.coordinates.utils import GreatArc\nfrom sunpy.data.sample import AIA_171_IMAGE\n\n###############################################################################\n# Make a map.\nm = sunpy.map.Map(AIA_171_IMAGE)\n\n###############################################################################\n# Let's define the start and end co-ordinates of the arc on the Sun.\nstart = SkyCoord(735 * u.arcsec, -471 * u.arcsec, frame=m.coordinate_frame)\nend = SkyCoord(-100 * u.arcsec, 800 * u.arcsec, frame=m.coordinate_frame)\n\n###############################################################################\n# Create the great arc between the start and end points.\ngreat_arc = GreatArc(start, end)\n\n###############################################################################\n# Plot the great arc on the Sun.\nfig = plt.figure()\nax = plt.subplot(projection=m)\nm.plot(axes=ax)\nax.plot_coord(great_arc.coordinates(), color='c')\nplt.show()\n\n###############################################################################\n# Now we can calculate the nearest integer pixels of the data that correspond\n# to the location of arc.\npixels = np.asarray(np.rint(m.world_to_pixel(great_arc.coordinates())), dtype=int)\nx = pixels[0, :]\ny = pixels[1, :]\n\n###############################################################################\n# Get the intensity along the arc from the start to the end point.\nintensity_along_arc = m.data[y, x]\n\n###############################################################################\n# Define the angular location of each pixel along the arc from the start point\n# to the end.\nangles = great_arc.inner_angles().to(u.deg)\n\n###############################################################################\n# Plot the intensity along the arc from the start to the end point.\nfig, ax = plt.subplots()\nax.plot(angles, intensity_along_arc)\nax.set_xlabel('degrees of arc from start')\nax.set_ylabel('intensity')\nax.grid(linestyle='dotted')\nplt.show()\n", "\"\"\"\n================\nAIA Plot Example\n================\n\nThis is a very simple way to plot a sample AIA image.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport sunpy.map\nfrom sunpy.data.sample import AIA_171_IMAGE\n\n###############################################################################\n# We now create the Map using the sample data.\n\naiamap = sunpy.map.Map(AIA_171_IMAGE)\n\n###############################################################################\n# Now we do a quick plot.\n\naiamap.peek()\nplt.show()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nHelpers and Functions to make WCSAxes work in SunPy\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\ntry:\n from astropy.visualization import wcsaxes\nexcept ImportError:\n raise ImportError(\"Astropy >= 1.3 is required to use SunPy\")\n\n# Force is put here to enable disabling all checks in this module. It should\n# only be used by tests and other such hacks.\n_FORCE_NO_WCSAXES = False\n\n__all__ = ['is_wcsaxes']\n\n\ndef is_wcsaxes(axes):\n \"\"\"\n Test a matplotlib Axes object to see if it is an instance of WCSAxes.\n\n Parameters\n ----------\n axes : `matplotlib.axes` Object\n Axes to test\n\n Returns\n -------\n result : `bool`\n Result of the test\n \"\"\"\n\n if not _FORCE_NO_WCSAXES:\n return isinstance(axes, wcsaxes.WCSAxes)\n else:\n return False\n\n\ndef gca_wcs(wcs, fig=None, slices=None):\n \"\"\"\n Get the current axes, and return a WCSAxes if possible.\n\n Parameters\n ----------\n wcs : `astropy.wcs.WCS`\n A `~astropy.wcs.WCS` object used to create a new axes.\n fig : `matplotlib.figure.Figure`\n The figure in which to check for the axes.\n slices : `tuple`\n ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes`\n to describe which two dimensions of the `~astropy.wcs.WCS` object\n are being plotted.\n This slices the multidimensional wcs object in the way it needs\n to be sliced.\n\n Returns\n -------\n ax : `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`\n object. The current axes, or a new one if created.\n\n \"\"\"\n\n if not fig:\n fig = plt.gcf()\n\n if not len(fig.get_axes()):\n if not _FORCE_NO_WCSAXES:\n ax = plt.gca(projection=wcs, slices=slices)\n else:\n ax = plt.gca()\n\n else:\n ax = plt.gca()\n\n return ax\n\n\ndef get_world_transform(axes):\n \"\"\"\n Get the transformation to world coordinates.\n\n If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this\n returns the transform to the ``'world'`` coordinates, otherwise it returns\n the transform to the matplotlib data coordinates, which are assumed to be in\n world coordinates.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`\n object. The axes to get the transform from.\n\n Returns\n -------\n transform : `~matplotlib.transforms.CompositeGenericTransform`\n The transformation object.\n \"\"\"\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n\n return transform\n\n\ndef solar_coord_type_from_ctype(ctype):\n \"\"\"\n Determine whether a particular WCS ctype corresponds to an angle or scalar\n coordinate.\n \"\"\"\n\n if ctype[2:4] == 'LN':\n if ctype[:4] in ['HPLN', 'HGLN']:\n return 'longitude', 180.\n\n return 'longitude', None\n\n elif ctype[2:4] == 'LT':\n return 'latitude', None\n\n else:\n return 'scalar', None\n\n\ndef default_wcs_ticks(axes, units, ctypes):\n \"\"\"\n Set the ticks and axes type on a solar WCSAxes plot.\n \"\"\"\n\n if not isinstance(axes, wcsaxes.WCSAxes):\n raise TypeError(\"This axes is not a WCSAxes\")\n\n x = axes.coords[0]\n y = axes.coords[1]\n\n if x.ticks.get_tick_out() == 'in':\n x.set_ticks(color='white')\n if y.ticks.get_tick_out() == 'in':\n y.set_ticks(color='white')\n\n x.set_ticks_position('bl')\n y.set_ticks_position('bl')\n\n xtype = solar_coord_type_from_ctype(ctypes[0])\n ytype = solar_coord_type_from_ctype(ctypes[1])\n\n x.set_coord_type(*xtype)\n y.set_coord_type(*ytype)\n\n if xtype[0] == 'scalar':\n x.set_major_formatter('x.x')\n elif units[0] is u.deg:\n x.set_major_formatter('d.d')\n elif units[0] is u.arcsec:\n x.set_major_formatter('s.s')\n else:\n x.set_major_formatter('x.x')\n\n if ytype[0] == 'scalar':\n x.set_major_formatter('x.x')\n elif units[1] is u.deg:\n y.set_major_formatter('d.d')\n elif units[1] is u.arcsec:\n y.set_major_formatter('s.s')\n else:\n y.set_major_formatter('x.x')\n\n\ndef default_wcs_grid(axes, units, ctypes):\n \"\"\"\n Apply some default wcsaxes grid formatting.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` object.\n The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world\n coordinate grid on.\n\n units : `tuple`\n The axes units axes x y order.\n \"\"\"\n\n default_wcs_ticks(axes, units, ctypes)\n\n axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',\n linewidth=0.5)\n\n\[email protected]_input(grid_spacing=u.deg)\ndef wcsaxes_heliographic_overlay(axes, grid_spacing=10*u.deg, **kwargs):\n \"\"\"\n Create a heliographic overlay using wcsaxes.\n\n Also draw a grid and label the top axes.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` object.\n The `~astropy.visualization.wcsaxes.WCSAxes` object to create the HGS overlay on.\n\n grid_spacing: `~astropy.units.Quantity`\n Spacing for longitude and latitude grid in degrees.\n\n Returns\n -------\n overlay : `~astropy.visualization.wcsaxes.WCSAxes` overlay\n The overlay object.\n\n Notes\n -----\n Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.\n\n \"\"\"\n\n # Unpack spacing\n if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:\n lon_space = lat_space = grid_spacing\n elif grid_spacing.size == 2:\n lon_space, lat_space = grid_spacing\n else:\n raise ValueError(\"grid_spacing must be a Quantity of length one or two.\")\n\n overlay = axes.get_coords_overlay('heliographic_stonyhurst')\n\n lon = overlay[0]\n lat = overlay[1]\n\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n\n lon.set_axislabel('Solar Longitude', minpad=0.8)\n lat.set_axislabel('Solar Latitude', minpad=0.9)\n\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n\n lon.set_ticks(spacing=lon_space, color=grid_kw['color'])\n lat.set_ticks(spacing=lat_space, color=grid_kw['color'])\n\n overlay.grid(**grid_kw)\n\n if axes.title:\n x, y = axes.title.get_position()\n axes.title.set_position([x, y + 0.08])\n\n return overlay\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "numpy.arange", "numpy.sin", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "numpy.zeros_like", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.gca", "matplotlib.pyplot.gcf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Medicine-and-Algorithm/sidechainnet
[ "bd3abe3482157380d2221468500aabf468cb82d1", "bd3abe3482157380d2221468500aabf468cb82d1" ]
[ "sidechainnet/utils/align.py", "sidechainnet/utils/parse.py" ]
[ "\"\"\"Functionality for aligning protein sequences in ProteinNet vs SidechainNet.\"\"\"\n\nimport numpy as np\nfrom Bio import Align\n\nfrom sidechainnet.structure.build_info import NUM_COORDS_PER_RES, PRODY_CA_DIST\nfrom sidechainnet.utils.download import ASTRAL_ID_MAPPING, determine_pnid_type\nfrom sidechainnet.utils.measure import GLOBAL_PAD_CHAR\n\n\ndef init_basic_aligner(allow_mismatches=False):\n \"\"\"Returns an aligner with minimal assumptions about gaps.\"\"\"\n a = Align.PairwiseAligner()\n if allow_mismatches:\n a.mismatch_score = -1\n a.gap_score = -3\n a.target_gap_score = -np.inf\n if not allow_mismatches:\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n return a\n\n\ndef init_aligner(allow_target_gaps=False, allow_target_mismatches=False):\n \"\"\"Creates an aligner whose weights penalize excessive gaps, make gaps in the\n ProteinNet sequence impossible, and prefer gaps at the tail ends of sequences.\"\"\"\n a = Align.PairwiseAligner()\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n\n # Don't allow for gaps or mismatches with the target sequence\n if not allow_target_gaps:\n a.target_gap_score = -np.inf\n\n # Do not let matching items overwhelm determining where gaps should go\n if not allow_target_gaps:\n a.match = 10\n else:\n a.match = 200\n\n if allow_target_mismatches:\n a.mismatch = 200\n\n # Generally, prefer to extend gaps than to create them\n a.query_extend_gap_score = 99\n a.query_open_gap_score = 49\n\n # Set slight preference for open gaps on the edges, however, if present, strongly prefer single edge gaps\n a.query_end_open_gap_score = 50\n a.query_end_extend_gap_score = 100\n\n return a\n\n\ndef get_mask_from_alignment(al):\n \"\"\"For a single alignment, return the mask as a string of '+' and '-'s.\"\"\"\n alignment_str = str(al).split(\"\\n\")[1]\n return alignment_str.replace(\"|\", \"+\")\n\n\ndef get_padded_second_seq_from_alignment(al):\n \"\"\"For a single alignment, return the second padded string.\"\"\"\n alignment_str = str(al).split(\"\\n\")[2]\n return alignment_str\n\n\ndef locate_char(c, s):\n \"\"\"Returns a list of indices of character c in string s.\"\"\"\n return [i for i, l in enumerate(s) if l == c]\n\n\ndef masks_match(pn, new):\n \"\"\"Returns true if the two masks match, or if pn is a subset of new.\"\"\"\n if pn == new:\n return True\n elif new.count(\"-\") > pn.count(\"-\"):\n # If all of the gaps specified by ProteinNet are found by our\n # alignment, but there are some additional gaps, this is acceptable.\n new_gap_locs = locate_char(\"-\", new)\n pn_gap_locs = locate_char(\"-\", pn)\n pn_gaps_still_present = all([pn_gap in new_gap_locs for pn_gap in pn_gap_locs])\n return pn_gaps_still_present\n else:\n return False\n\n\ndef shorten_ends(s1, s2, s1_ang, s1_crd):\n \"\"\"Shortens s1 by removing characters at either end that don't match s2.\n\n Args:\n s1: String, longer than s2\n s2: String\n\n Returns:\n A possibly shortened version of s1, with non-matching start and end\n characters trimmed off.\n \"\"\"\n aligner = init_aligner(allow_target_gaps=True)\n a = aligner.align(s1, s2)\n mask = get_padded_second_seq_from_alignment(a[0])\n i = len(mask) - 1\n while mask[i] == \"-\":\n s1 = s1[:-1]\n s1_ang = s1_ang[:-1]\n s1_crd = s1_crd[:-NUM_COORDS_PER_RES]\n mask = mask[:-1]\n i -= 1\n while mask[0] == \"-\":\n s1 = s1[1:]\n s1_ang = s1_ang[1:]\n s1_crd = s1_crd[NUM_COORDS_PER_RES:]\n mask = mask[1:]\n return s1, s1_ang, s1_crd\n\n\ndef merge(aligner, pn_seq, my_seq, ang, crd, pn_mask, pnid, attempt_number=0):\n \"\"\"Returns True iff when pn_seq and my_seq are aligned, the resultant mask is the same\n as reported by ProteinNet.\n\n Also returns the computed_mask that matches with ProteinNet\n \"\"\"\n a = aligner.align(pn_seq, my_seq)\n pn_mask = binary_mask_to_str(pn_mask)\n warning = None\n\n try:\n n_alignments = len(a)\n except OverflowError:\n n_alignments = 50\n warning = \"failed\"\n return None, None, ang, crd, warning\n\n if n_alignments == 0 and attempt_number == 0:\n # Use aligner with a typical set of assumptions.\n aligner = init_aligner()\n return merge(aligner, pn_seq, my_seq, ang, crd, pn_mask, pnid, attempt_number=1)\n\n if n_alignments == 0 and attempt_number == 1:\n # If there appear to be no alignments, it may be the case that there\n # were residues observed that were not present in the ProteinNet\n # sequence. If this occurs at the edges, we can safely trim the\n # observed sequence and try alignment once again\n my_seq, ang, crd = shorten_ends(my_seq, pn_seq, ang, crd)\n return merge(aligner, pn_seq, my_seq, ang, crd, pn_mask, pnid, attempt_number=2)\n\n if n_alignments == 0 and attempt_number == 2:\n # Try making very few assumptions about gaps before allowing mismatches/gaps in\n # the target sequence.\n aligner = init_basic_aligner(allow_mismatches=True)\n return merge(aligner, pn_seq, my_seq, ang, crd, pn_mask, pnid, attempt_number=3)\n\n elif n_alignments == 0 and attempt_number == 3:\n aligner = init_aligner(allow_target_gaps=True, allow_target_mismatches=True)\n mask, a0, ang, crd, warning = merge(aligner,\n pn_seq,\n my_seq,\n ang,\n crd,\n pn_mask,\n pnid,\n attempt_number=4)\n warning = warning + \", mismatch used in alignment\" if warning else \"mismatch used in alignment\"\n return mask, a0, ang, crd, warning\n\n elif n_alignments == 0 and attempt_number == 4:\n warning = \"failed\"\n return None, None, ang, crd, warning\n\n elif n_alignments == 1:\n a0 = a[0]\n computed_mask = get_mask_from_alignment(a0)\n if attempt_number == 4:\n if computed_mask.count(\"X\") + computed_mask.count(\".\") > 5:\n warning = \"too many wrong AAs\"\n computed_mask = computed_mask.replace(\"X\", \"+\").replace(\".\", \"+\")\n if not masks_match(pn_mask, computed_mask):\n if \"astral\" in determine_pnid_type(pnid):\n pdbid, chain = ASTRAL_ID_MAPPING[pnid.split(\"_\")[1].replace(\"-\", \"_\")]\n if \"A\" not in chain:\n # This suggests that ProteinNet made a mistake and parsed\n # chain A when they should have parsed the correct chain.\n # This is therefore not an alignment error.\n pass\n else:\n # If the above case is not True, then we should still expect\n # the mask we compute to match the one computed by ProteinNet\n warning = \"single alignment, mask mismatch\"\n else:\n warning = \"single alignment, mask mismatch\"\n return computed_mask, a0, ang, crd, warning\n\n elif n_alignments > 1:\n best_mask = None\n found_a_match = False\n best_alignment = None\n best_idx = 0\n has_many_alignments = n_alignments >= 200\n for i, a0 in enumerate(a):\n if has_many_alignments and i >= 200:\n break\n computed_mask = get_mask_from_alignment(a0)\n if attempt_number == 4:\n if computed_mask.count(\"X\") + computed_mask.count(\".\") > 5:\n warning = \"too many wrong AAs\"\n computed_mask = computed_mask.replace(\"X\", \"+\").replace(\".\", \"+\")\n if not best_mask:\n best_mask = computed_mask\n best_idx = i\n if not best_alignment:\n best_alignment = a0\n # if masks_match(pn_mask, computed_mask) or assert_mask_gaps_are_correct(\n # computed_mask, crd)[0]:\n if assert_mask_gaps_are_correct(computed_mask, crd)[0]:\n found_a_match = True\n best_mask = computed_mask\n best_alignment = a0\n best_idx = i\n break\n if found_a_match:\n warning = \"multiple alignments, found matching mask\" if not warning else warning + \", multiple alignments, found matching mask\"\n if has_many_alignments:\n warning += \", many alignments\"\n return best_mask, best_alignment, ang, crd, warning\n else:\n mask = get_mask_from_alignment(a[0])\n warning = \"multiple alignments, mask mismatch\" if not warning else warning + \", multiple alignments, mask mismatch\"\n if has_many_alignments:\n warning += \", many alignments\"\n return mask, a[0], ang, crd, warning\n\n\ndef other_alignments_with_same_score(all_alignments, cur_alignment_idx,\n cur_alignment_score):\n \"\"\"Returns True if there are other alignments with identical scores.\n\n Args:\n all_alignments: PairwiseAlignment iterable object from BioPython.Align\n cur_alignment_idx: The index of the desired alignment\n cur_alignment_score: The score of the desired alignment\n\n Returns:\n True if any alignments other than the one specified have scores that\n are identical to the specified alignment.\n \"\"\"\n if len(all_alignments) <= 1:\n return False\n\n for i, a0 in enumerate(all_alignments):\n if i > 0 and a0.score < cur_alignment_score:\n break\n if i == cur_alignment_idx:\n continue\n elif a0.score == cur_alignment_score:\n return True\n\n return False\n\n\ndef binary_mask_to_str(m):\n \"\"\"Given an iterable or list of 1s and 0s representing a mask, this returns a string\n mask with '+'s and '-'s.\"\"\"\n m = list(map(lambda x: \"-\" if x == 0 else \"+\", m))\n return \"\".join(m)\n\n\ndef coordinate_iterator(coords, atoms_per_res):\n \"\"\"Iterates over coordinates in a numpy array grouped by residue.\n\n Args:\n coords: Numpy array of coordinates. (L x atoms_per_res) x 3.\n atoms_per_res: Number of atomic coordinates per residue.\n\n Returns:\n An iterator where every next call returns the next atoms_per_res\n coordinates.\n \"\"\"\n assert len(coords) % atoms_per_res == 0, f\"There must be {atoms_per_res}\" \\\n f\" atoms for every residue.\\n\" \\\n f\"len(coords) = {len(coords)}\"\n i = 0\n while i + atoms_per_res <= len(coords):\n yield coords[i:i + atoms_per_res]\n i += atoms_per_res\n\n\ndef expand_data_with_mask(data, mask):\n \"\"\"Uses mask to expand data as necessary.\n\n Args:\n data: May be evolutionary (numpy array, Lx21), secondary (unsupported),\n angles (2D numpy array, Lx12), coordinates (2D numpy array,\n (Lx13)x3).\n mask: String of '+'s and '-'s representing if data is present with\n respect to protein primary sequence.\n\n Returns:\n Data in the same format, possibly extending L to match the length of\n the mask, that now contains padding.\n \"\"\"\n if mask.count(\"-\") == 0 and data.shape[0] == len(mask):\n return data\n\n size = data.shape[-1]\n if size == 3:\n data = coordinate_iterator(data, NUM_COORDS_PER_RES)\n blank = np.empty((NUM_COORDS_PER_RES, 3))\n else:\n data = iter(data)\n blank = np.empty((size,))\n\n blank[:] = GLOBAL_PAD_CHAR\n\n new_data = []\n for m in mask:\n if m == \"+\" or m == \".\":\n new_data.append(next(data))\n elif m == \"-\":\n new_data.append(blank.copy())\n else:\n raise ValueError(f\"Unknown mask character '{m}'.\")\n\n return np.vstack(new_data)\n\n\ndef pad_seq_with_mask(seq, mask):\n \"\"\"Given a shorter sequence, expands it to match the padding in mask.\n\n Args:\n seq: String with length smaller than mask.\n mask: String of '+'s and '-'s used to expand seq.\n\n Returns:\n New string of seq but with added '-'s where indicated by mask.\n \"\"\"\n seq_iter = iter(seq)\n new_seq = \"\"\n for m in mask:\n if m == \"+\":\n new_seq += next(seq_iter)\n elif m == \"-\":\n new_seq += \"-\"\n return new_seq\n\n\ndef assert_mask_gaps_are_correct(mask, coordinates):\n \"\"\"Returns True if the structure supports the mask.\n\n Args:\n mask: string of \"+\" and \"-\"s, denoting missing residues\n coordinates: numpy array (L x 14 x 3) of atomic coordinates\n\n Returns:\n True iff the mask is supported by the structure. If False, also returns length\n of the offending Ca-Ca distance.\n \"\"\"\n CA_IDX = 1\n if mask.count(\"-\") == 0:\n return True, 0\n\n # This should never happen\n if mask.count(\"+\") != len(coordinates) // NUM_COORDS_PER_RES:\n return False, 0\n\n # First, build a nested list that holds all contiguous regions of the data\n # according to the mask\n coord_iter = coordinate_iterator(coordinates, NUM_COORDS_PER_RES)\n coord_contigs = []\n cur_contig = []\n\n for m in mask:\n if m == \"-\":\n if cur_contig != []:\n coord_contigs.append(cur_contig.copy())\n cur_contig = []\n continue\n else:\n cur_contig.append(next(coord_iter))\n if cur_contig != []:\n coord_contigs.append(cur_contig.copy())\n\n # Once the contiguous regions are reported, we check that the distance\n # between all alpha-carbons is less than ProDy's cutoff (4.1 Angstrom)\n resnum = 1\n for coord_contig in coord_contigs:\n if len(coord_contig) == 1:\n continue\n prev_ca = coord_contig[0][CA_IDX]\n for cur_res in coord_contig[1:]:\n cur_ca = cur_res[CA_IDX]\n if np.linalg.norm(cur_ca - prev_ca) > PRODY_CA_DIST * 1.85:\n return False, np.linalg.norm(cur_ca - prev_ca)\n prev_ca = cur_ca.copy()\n resnum += 1\n\n return True, 0\n", "\"\"\"Functionality for parsing raw ProteinNet files.\"\"\"\n\nimport itertools\nimport multiprocessing\nimport os\nimport pickle\nimport re\nfrom glob import glob\n\nimport numpy as np\nimport prody as pr\n\n\ndef load_ids_from_text_files(directory, training_set):\n \"\"\"Given a directory where raw ProteinNet records are stored along with .ids files,\n reads and returns the contents of those files.\n\n Effectively returns a list of IDs associated with the training, validation,\n and test sets.\n \"\"\"\n with open(os.path.join(directory, f\"training_{training_set}_ids.txt\"),\n \"r\") as trainf, open(os.path.join(directory, \"validation_ids.txt\"),\n \"r\") as validf, open(\n os.path.join(directory, \"testing_ids.txt\"),\n \"r\") as testf:\n train_ids = trainf.read().splitlines()\n valid_ids = validf.read().splitlines()\n test_ids = testf.read().splitlines()\n return train_ids, valid_ids, test_ids\n\n\ndef read_protein_from_file(file_pointer, include_tertiary):\n \"\"\"Parses a single record from a text-based ProteinNet file as a dictionary.\n\n This function was originally written by Jeppe Hallgren, though I have made\n slight modifications. The most recent version is available here:\n https://github.com/biolib/openprotein/blob/master/preprocessing.py\n Because Mr. Hallgren's software caries an MIT license, I have included his\n copyright notice which describes the method below. All other portions of\n this software are licensed according to the LICENSE file in this\n repository's home directory.\n\n MIT License\n\n Copyright (c) 2018 Jeppe Hallgren\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in\n all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n Args:\n file_pointer: Opened file object that contains ProteinNet text records\n include_tertiary: boolean, whether or not to parse atomic coordinates\n\n Returns:\n A dictionary containing various data entries for a single ProteinNet ID.\n\n ex:\n { \"id\" : \"1A9U_1_A\",\n \"primary\" : \"MRYSKKKNACEWNA\",\n \"evolutionary\": np.ndarray(...),\n ...\n }\n \"\"\"\n dict_ = {}\n _dssp_dict = {'L': 0, 'H': 1, 'B': 2, 'E': 3, 'G': 4, 'I': 5, 'T': 6, 'S': 7}\n _mask_dict = {'-': 0, '+': 1}\n\n while True:\n next_line = file_pointer.readline()\n if next_line == '[ID]\\n':\n id_ = file_pointer.readline()[:-1]\n dict_.update({'id': id_})\n elif next_line == '[PRIMARY]\\n':\n primary = file_pointer.readline()[:-1]\n dict_.update({'primary': primary})\n elif next_line == '[EVOLUTIONARY]\\n':\n evolutionary = []\n for residue in range(21):\n evolutionary.append(\n [float(step) for step in file_pointer.readline().split()])\n evolutionary = np.asarray(evolutionary).T\n dict_.update({'evolutionary': evolutionary})\n elif next_line == '[SECONDARY]\\n':\n secondary = list([_dssp_dict[dssp] for dssp in file_pointer.readline()[:-1]])\n dict_.update({'secondary': secondary})\n elif next_line == '[TERTIARY]\\n' and include_tertiary:\n tertiary = []\n # 3 dimension\n for axis in range(3):\n tertiary.append(\n [float(coord) for coord in file_pointer.readline().split()])\n dict_.update({'tertiary': tertiary})\n elif next_line == '[MASK]\\n':\n mask = list([_mask_dict[aa] for aa in file_pointer.readline()[:-1]])\n dict_.update({'mask': mask})\n elif next_line == '\\n':\n return dict_\n elif next_line == '':\n return None\n\n\ndef process_file(input_filename_out_dir, return_ids=False):\n \"\"\"Parallelizable method for processing a raw ProteinNet file.\n\n Creates and returns a pickled dictionary of the data.\n \"\"\"\n all_ids = []\n input_filename, out_dir = input_filename_out_dir\n print(\" \" + input_filename)\n text_file = open(os.path.join(out_dir,\n os.path.basename(input_filename) + '_ids.txt'), \"w\")\n input_file = open(input_filename, \"r\")\n meta_dict = {}\n while True:\n next_protein = read_protein_from_file(input_file, include_tertiary=True)\n if next_protein is None:\n break\n id_ = next_protein[\"id\"]\n del next_protein[\"id\"]\n meta_dict.update({id_: next_protein})\n text_file.write(f\"{id_}\\n\")\n if return_ids:\n all_ids.append(id_)\n with open(os.path.join(out_dir,\n os.path.basename(input_filename) + \".pkl\"), \"wb\") as f:\n pickle.dump(meta_dict, f)\n input_file.close()\n text_file.close()\n print(f\"{input_filename} finished.\")\n if return_ids:\n return (input_filename, all_ids)\n\n\ndef parse_raw_proteinnet(proteinnet_in_dir, proteinnet_out_dir, training_set):\n \"\"\"Extracts and saves information for a single ProteinNet dataset.\n\n Preprocesses raw ProteinNet records by reading them and transforming them\n into PyTorch-saved dictionaries. Files are kept separate due to file size.\n For ease of inspection, the ProteinNet IDs are extracted and save as `.ids` files.\n\n Args:\n proteinnet_in_dir: Directory where all raw ProteinNet files are kept\n proteinnet_out_dir: Directory to save processed data\n training_set: Which thinning of ProteinNet is requested\n\n Returns:\n relevant_ids: A list of ProteinNet IDs from corresponding training_set\n \"\"\"\n train_file = f\"training_{training_set}.pkl\"\n\n # If the desired ProteinNet dataset has already been processed, load its IDs\n if os.path.exists(os.path.join(proteinnet_out_dir, train_file)):\n print(f\"Raw ProteinNet files already preprocessed (\"\n f\"{os.path.join(proteinnet_out_dir, train_file)}).\")\n relevant_ids = retrieve_relevant_proteinnetids_from_files(\n proteinnet_out_dir, training_set)\n return relevant_ids\n\n # If the preprocessed ProteinNet dictionaries don't exist, create them.\n if not os.path.exists(proteinnet_out_dir):\n os.makedirs(proteinnet_out_dir)\n\n # Look for the target ProteinNet files\n if not os.path.isdir(os.path.join(proteinnet_in_dir, \"targets\")):\n print(\"There must be a subdirectory containing all protein targets with \"\n \"the name 'targets'.\\nYou can download the .tgz file from the \"\n \"following link: http://predictioncenter.org/download_area/CASP12/targets\"\n \"/\\n\"\n \"(replace 'CASP12' with the CASP version of interest and download \"\n \"the most recent, largest compressed file in the list.\")\n raise ValueError(\"Could not find ProteinNet targets.\")\n # Look for the raw ProteinNet files\n input_files = [\n f for f in glob(os.path.join(proteinnet_in_dir, \"*[!.ids]\"))\n if not os.path.isdir(f)\n ]\n assert len(input_files) == 8, (\n f\"Looking for raw ProteinNet files in '{proteinnet_in_dir}', but\"\n \"could not find all 8.\\n Please download from Mohammed \"\n \"AlQuraishi's repository: \"\n \"https://github.com/aqlaboratory/proteinnet\")\n\n # Process each ProteinNet file by turning them into PyTorch saved dictionaries\n print(\"Preprocessing raw ProteinNet files...\")\n with multiprocessing.Pool(multiprocessing.cpu_count()) as p:\n p.map(process_file, zip(input_files, itertools.repeat(proteinnet_out_dir)))\n print(f\"Done. Processed ProteinNet files saved to {proteinnet_out_dir}.\")\n\n # Return the ProteinNet IDs associated with the target dataset\n relevant_ids = retrieve_relevant_proteinnetids_from_files(proteinnet_out_dir,\n training_set)\n return relevant_ids\n\n\ndef retrieve_relevant_proteinnetids_from_files(proteinnet_out_dir, training_set):\n \"\"\"Returns a list of ProteinNet IDs relevant for a particular training set.\n\n Args:\n proteinnet_out_dir: Directory containing preprocessed ProteinNet files.\n training_set: Which training set thinning of CASP to use.\n\n Returns:\n A list of ProteinNet IDs (training, validation, and test set).\n \"\"\"\n train_file = f\"training_{training_set}.pkl\"\n relevant_training_file = os.path.join(proteinnet_out_dir,\n train_file.replace(\".pkl\", \"_ids.txt\"))\n relevant_id_files = [\n os.path.join(proteinnet_out_dir, \"testing_ids.txt\"),\n os.path.join(proteinnet_out_dir, \"validation_ids.txt\"), relevant_training_file\n ]\n relevant_ids = []\n for fname in relevant_id_files:\n with open(fname, \"r\") as f:\n relevant_ids += f.read().splitlines()\n\n return relevant_ids\n\n\ndef parse_astral_summary_file(lines):\n \"\"\"Given a path to the ASTRAL database summary file, this function parses that file\n and returns a dictionary that maps ASTRAL IDs to (pdbid, chain).\"\"\"\n d = {}\n for line in lines:\n if line.startswith(\"#\"):\n continue\n line_items = line.split()\n if line_items[3] == \"-\":\n continue\n if line_items[3] not in d.keys():\n d[line_items[3]] = (line_items[4], line_items[5])\n return d\n\n\ndef get_chain_from_astral_id(astral_id, d):\n \"\"\"Given an ASTRAL ID and the ASTRAL->PDB/chain mapping dictionary, this function\n attempts to return the relevant, parsed ProDy object.\"\"\"\n pdbid, chain = d[astral_id]\n assert \",\" not in chain, f\"Issue parsing {astral_id} with chain {chain} and pdbid \" \\\n f\"{pdbid}.\"\n chain, resnums = chain.split(\":\")\n\n if astral_id == \"d4qrye_\" or astral_id in ASTRAL_IDS_INCORRECTLY_PARSED:\n chain = \"A\"\n resnums = \"\"\n\n # Handle special case https://github.com/prody/ProDy/issues/1197\n if astral_id == \"d1tocr1\":\n a = pr.parsePDB(\"1toc\", chain=\"R\")\n a = a.select(\"resnum 2 to 59 or resnum 1A\") # Note there is no 1B\n return a\n\n a = pr.parsePDB(pdbid, chain=chain)\n if resnums != \"\":\n # This pattern matches ASTRAL number ranges like 1-100, 1A-100, -1-39, -4--1, etc.\n p = re.compile(r\"((?P<d1>-?\\d+)(?P<ic1>\\w?))-((?P<d2>-?\\d+)(?P<ic2>\\w?))\")\n match = p.match(resnums)\n start, start_icode = int(match.group(\"d1\")), match.group(\"ic1\")\n end, end_icode = int(match.group(\"d2\")), match.group(\"ic2\")\n\n # Ranges with negative numbers must be escaped with ` character\n range_str = f\"{start} to {end}\"\n if start < 0 or end < 0:\n range_str = f\"`{range_str}`\"\n\n if not start_icode and not end_icode:\n # There are no insertion codes. Easy case.\n selection_str = f\"resnum {range_str}\"\n elif (start_icode and not end_icode) or (not start_icode and end_icode):\n # If there's only one insertion code, this selection is not well defined\n # and must be handled by special cases above.\n raise ValueError(f\"Unsupported ASTRAL range {astral_id}.\")\n elif start_icode and end_icode:\n if start_icode == end_icode:\n selection_str = f\"resnum {range_str} and icode {start_icode}\"\n else:\n raise ValueError(f\"Unsupported ASTRAL range {astral_id}.\")\n\n a = a.select(selection_str)\n\n return a\n\n\n# Defines a list of ASTRAL IDs that may have been parsed incorrectly in ProteinNet.\n# For instance, several ASTRAL IDs in ProteinNet contain sequences for chain A, even\n# though the ASTRAL ID specifies a different chain.\nFULL_ASTRAL_IDS_INCORRECTLY_PARSED = [\n '1EU3_d1eu3a1', '1FPO_d1fpoc1', '1GL9_d1gl9c1', '1GQ3_d1gq3b2', '1N2A_d1n2ab2',\n '1N9W_d1n9wb2', '1NSA_d1nsaa2', '1NYR_d1nyrb3', '1RQ2_d1rq2b1', '1SA0_d1sa0c2',\n '1UYV_d1uyvb2', '1V8O_d1v8oc1', '1V8P_d1v8pc1', '1XES_d1xesd1', '1XP4_d1xp4d2',\n '1Z2B_d1z2bc2', '2AL1_d2al1b1', '2AUA_d2auab1', '2E0A_d2e0ab2', '2QJJ_d2qjjd2',\n '2RCY_d2rcye1', '2V83_d2v83c2', '2WLJ_d2wljb1', '2Z9I_d2z9ic1', '3EQV_d3eqvb1',\n '3GFT_d3gftf1', '3GLJ_d3glja1', '3OMZ_d3omzc2', '3OYT_d3oytb2', '3PUW_d3puwb2',\n '3R3L_d3r3lc2', '3UGX_d3ugxd2', '4KLY_d4klye1', '4L4J_d4l4jb1', '4M9A_d4m9ad1',\n '4OCR_d4ocrl2', '5CTB_d5ctbc2'\n]\n\nASTRAL_IDS_INCORRECTLY_PARSED = [\n aid.split(\"_\")[1] for aid in FULL_ASTRAL_IDS_INCORRECTLY_PARSED\n]\n" ]
[ [ "numpy.empty", "numpy.linalg.norm", "numpy.vstack" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hunto/image_classification_sota
[ "8264872d58fb1735978fc796fea7e6bceb302eae", "8264872d58fb1735978fc796fea7e6bceb302eae" ]
[ "tools/convert.py", "lib/utils/scheduler.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nimport logging\nimport time\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom lib.models.builder import build_model\nfrom lib.models.loss import CrossEntropyLabelSmooth\nfrom lib.models.utils.dbb.dbb_block import DiverseBranchBlock\nfrom lib.dataset.builder import build_dataloader\nfrom lib.utils.args import parse_args\nfrom lib.utils.dist_utils import init_dist, init_logger\nfrom lib.utils.misc import accuracy, AverageMeter, CheckpointManager\nfrom lib.utils.model_ema import ModelEMA\nfrom lib.utils.measure import get_params, get_flops\n\ntorch.backends.cudnn.benchmark = True\n'''init logger'''\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef main():\n args, args_text = parse_args()\n assert args.resume != ''\n args.exp_dir = f'{os.path.dirname(args.resume)}/convert'\n\n '''distributed'''\n init_dist(args)\n init_logger(args)\n\n '''build dataloader'''\n train_dataset, val_dataset, train_loader, val_loader = \\\n build_dataloader(args)\n\n '''build model'''\n if args.smoothing == 0.:\n loss_fn = nn.CrossEntropyLoss().cuda()\n else:\n loss_fn = CrossEntropyLabelSmooth(num_classes=args.num_classes,\n epsilon=args.smoothing).cuda()\n\n model = build_model(args)\n logger.info(\n f'Model {args.model} created, params: {get_params(model)}, '\n f'FLOPs: {get_flops(model, input_shape=args.input_shape)}')\n\n # Diverse Branch Blocks\n if args.dbb:\n # convert 3x3 convs to dbb blocks\n from lib.models.utils.dbb_converter import convert_to_dbb\n convert_to_dbb(model)\n logger.info(model)\n logger.info(\n f'Converted to DBB blocks, model params: {get_params(model)}, '\n f'FLOPs: {get_flops(model, input_shape=args.input_shape)}')\n\n model.cuda()\n model = DDP(model,\n device_ids=[args.local_rank],\n find_unused_parameters=False)\n\n if args.model_ema:\n model_ema = ModelEMA(model, decay=args.model_ema_decay)\n else:\n model_ema = None\n\n '''dyrep'''\n if args.dyrep:\n from lib.models.utils.dyrep import DyRep\n dyrep = DyRep(\n model.module,\n None)\n logger.info('Init DyRep done.')\n else:\n dyrep = None\n\n '''resume'''\n ckpt_manager = CheckpointManager(model,\n ema_model=model_ema,\n save_dir=args.exp_dir,\n rank=args.rank,\n additions={\n 'dyrep': dyrep\n })\n\n if args.resume:\n epoch = ckpt_manager.load(args.resume)\n if args.dyrep:\n model = DDP(model.module,\n device_ids=[args.local_rank],\n find_unused_parameters=True)\n logger.info(\n f'Resume ckpt {args.resume} done, '\n f'epoch {epoch}'\n )\n else:\n epoch = 0\n\n # validate\n test_metrics = validate(args, epoch, model, val_loader, loss_fn)\n # convert dyrep / dbb model to inference model\n for m in model.module.modules():\n if isinstance(m, DiverseBranchBlock):\n m.switch_to_deploy()\n logger.info(str(model))\n logger.info(\n f'Converted DBB / DyRep model to inference model, params: {get_params(model)}, '\n f'FLOPs: {get_flops(model, input_shape=args.input_shape)}')\n test_metrics = validate(args, epoch, model, val_loader, loss_fn)\n\n '''save converted checkpoint'''\n if args.rank == 0:\n save_path = os.path.join(args.exp_dir, 'model.ckpt')\n torch.save(model.module.state_dict(), save_path)\n logger.info(f'Saved converted model checkpoint into {save_path} .')\n\n\ndef validate(args, epoch, model, loader, loss_fn, log_suffix=''):\n loss_m = AverageMeter(dist=True)\n top1_m = AverageMeter(dist=True)\n top5_m = AverageMeter(dist=True)\n batch_time_m = AverageMeter(dist=True)\n start_time = time.time()\n\n model.eval()\n for batch_idx, (input, target) in enumerate(loader):\n with torch.no_grad():\n output = model(input)\n loss = loss_fn(output, target)\n\n top1, top5 = accuracy(output, target, topk=(1, 5))\n loss_m.update(loss.item(), n=input.size(0))\n top1_m.update(top1 * 100, n=input.size(0))\n top5_m.update(top5 * 100, n=input.size(0))\n\n batch_time = time.time() - start_time\n batch_time_m.update(batch_time)\n if batch_idx % args.log_interval == 0 or batch_idx == len(loader) - 1:\n logger.info('Test{}: {} [{:>4d}/{}] '\n 'Loss: {loss.val:.3f} ({loss.avg:.3f}) '\n 'Top-1: {top1.val:.3f}% ({top1.avg:.3f}%) '\n 'Top-5: {top5.val:.3f}% ({top5.avg:.3f}%) '\n 'Time: {batch_time.val:.2f}s'.format(\n log_suffix,\n epoch,\n batch_idx,\n len(loader),\n loss=loss_m,\n top1=top1_m,\n top5=top5_m,\n batch_time=batch_time_m))\n start_time = time.time()\n\n return {'test_loss': loss_m.avg, 'top1': top1_m.avg, 'top5': top5_m.avg}\n\n\nif __name__ == '__main__':\n main()\n", "from collections import OrderedDict\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.optim.lr_scheduler import StepLR, CosineAnnealingLR, LambdaLR\n\n\ndef build_scheduler(sched_type, optimizer, warmup_steps, warmup_lr, step_size, decay_rate, total_steps=-1, multiplier=1, steps_per_epoch=1, decay_by_epoch=True, min_lr=1e-5):\n if sched_type == 'step':\n scheduler = StepLR(optimizer, step_size, gamma=decay_rate)\n decay_by_epoch = False\n elif sched_type == 'cosine':\n scheduler = CosineAnnealingLR(optimizer, T_max=total_steps - warmup_steps, eta_min=min_lr)\n elif sched_type == 'linear':\n scheduler = LambdaLR(optimizer, lambda epoch: (total_steps - warmup_steps - epoch) / (total_steps - warmup_steps))\n else:\n raise NotImplementedError(f'Scheduler {sched_type} not implemented.')\n scheduler = GradualWarmupScheduler(optimizer, multiplier=multiplier, total_epoch=warmup_steps, after_scheduler=scheduler, warmup_lr=warmup_lr, step_size=steps_per_epoch, decay_by_epoch=decay_by_epoch)\n return scheduler\n\n\nclass GradualWarmupScheduler(_LRScheduler):\n \"\"\" Gradually warm-up(increasing) learning rate in optimizer.\n Modified based on: https://github.com/ildoonet/pytorch-gradual-warmup-lr\n Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.\n total_epoch: target learning rate is reached at total_epoch, gradually\n after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)\n warmup_lr: warmup learning rate for the first epoch\n step_size: step number in one epoch\n decay_by_epoch: if True, decay lr in after_scheduler after each epoch; otherwise decay after every step\n \"\"\"\n\n def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None, warmup_lr=1e-6, step_size=1, decay_by_epoch=True):\n self.multiplier = multiplier\n if self.multiplier < 1.:\n raise ValueError('multiplier should be greater thant or equal to 1.')\n self.total_epoch = total_epoch\n self.after_scheduler = after_scheduler\n self.warmup_lr = warmup_lr\n self.step_size = step_size\n self.finished = False\n if self.total_epoch == 0:\n self.finished = True\n self.total_epoch = -1\n self.decay_by_epoch = decay_by_epoch\n super(GradualWarmupScheduler, self).__init__(optimizer)\n\n def get_lr(self):\n if self.last_epoch > self.total_epoch or self.finished:\n if self.after_scheduler:\n if not self.finished:\n self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]\n self.finished = True\n return self.after_scheduler.get_lr()\n return [base_lr * self.multiplier for base_lr in self.base_lrs]\n\n if self.multiplier == 1.0:\n return [self.warmup_lr + (base_lr - self.warmup_lr) * (float(self.last_epoch // self.step_size * self.step_size) / self.total_epoch) for base_lr in self.base_lrs]\n else:\n return [base_lr * ((self.multiplier - 1.) * (self.last_epoch // self.step_size * self.step_size) / self.total_epoch + 1.) for base_lr in self.base_lrs]\n\n def step_ReduceLROnPlateau(self, metrics, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n if self.last_epoch <= self.total_epoch:\n if self.multiplier == 1.0:\n warmup_lr = [self.warmup_lr + (base_lr - self.warmup_lr) * (float(self.last_epoch // self.step_size * self.step_size) / self.total_epoch) for base_lr in self.base_lrs]\n else:\n warmup_lr = [base_lr * ((self.multiplier - 1.) * (self.last_epoch // self.step_size * self.step_size) / self.total_epoch + 1.) for base_lr in self.base_lrs]\n for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):\n param_group['lr'] = lr\n else:\n if epoch is None:\n self.after_scheduler.step(metrics, None)\n else:\n if self.decay_by_epoch:\n self.after_scheduler.step(metrics, (epoch - self.total_epoch - 1) // self.step_size * self.step_size)\n else:\n self.after_scheduler.step(metrics, epoch - self.total_epoch - 1)\n\n def step(self, epoch=None, metrics=None):\n if type(self.after_scheduler) != ReduceLROnPlateau:\n if self.finished and self.after_scheduler:\n if epoch is None:\n self.after_scheduler.step(None)\n else:\n if self.decay_by_epoch:\n self.after_scheduler.step((epoch - self.total_epoch - 1) // self.step_size * self.step_size)\n else:\n self.after_scheduler.step(epoch - self.total_epoch - 1)\n self._last_lr = self.after_scheduler.get_lr()\n else:\n return super(GradualWarmupScheduler, self).step(epoch)\n else:\n self.step_ReduceLROnPlateau(metrics, epoch)\n\n\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.no_grad", "torch.nn.parallel.DistributedDataParallel" ], [ "torch.optim.lr_scheduler.LambdaLR", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dimasad/ceacoest
[ "4a5eb946e118145d054cac785cc8b4d834c28192" ]
[ "ceacoest/modelling/symquat.py" ]
[ "\"\"\"Symbolic quaterion operations, dynamics and rotations.\"\"\"\n\n\nimport numpy as np\nimport sympy\n\n\ndef derivative(quat, omega, renorm_gain=0):\n [q0, q1, q2, q3] = quat\n [p, q, r] = omega\n err = 1 - (q0**2 + q1**2 + q2**2 + q3**2)\n q0dot = -0.5 * (p*q1 + q*q2 + r*q3) + renorm_gain*err*q0\n q1dot = -0.5 * (-p*q0 - r*q2 + q*q3) + renorm_gain*err*q1\n q2dot = -0.5 * (-q*q0 + r*q1 - p*q3) + renorm_gain*err*q2\n q3dot = -0.5 * (-r*q0 - q*q1 + p*q2) + renorm_gain*err*q3\n return np.array([q0dot, q1dot, q2dot, q3dot])\n\n\ndef rotmat(quat):\n \"\"\"Quaternion rotation matrix.\"\"\"\n q0, q1, q2, q3 = quat\n return np.array(\n [[q0**2 + q1**2 - q2**2 - q3**2, 2*(q1*q2 + q0*q3), 2*(q1*q3 - q0*q2)],\n [2*(q1*q2 - q0*q3), q0**2 - q1**2 + q2**2 - q3**2, 2*(q2*q3 + q0*q1)],\n [2*(q1*q3 + q0*q2), 2*(q2*q3 - q0*q1), q0**2 - q1**2 - q2**2 + q3**2]]\n )\n\n\ndef toeuler(quat):\n \"\"\"Convert quaternion rotation to roll-pitch-yaw Euler angles.\"\"\"\n q0, q1, q2, q3 = quat\n roll = sympy.atan2(2*(q2*q3 + q0*q1), q0**2 - q1**2 - q2**2 + q3**2)\n pitch = -sympy.asin(2*(q1*q3 - q0*q2))\n yaw = sympy.atan2(2*(q1*q2 + q0*q3), q0**2 + q1**2 - q2**2 - q3**2)\n return np.array([roll, pitch, yaw])\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ElieKadoche/floris
[ "d18f4d263ecabf502242592f9d60815a07c7b89c" ]
[ "floris/tools/flow_data.py" ]
[ "# Copyright 2021 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# See https://floris.readthedocs.io for documentation\n\n\nimport os\n\nimport numpy as np\nfrom sklearn import neighbors\n\nfrom ..utilities import Vec3\n\n\nclass FlowData:\n \"\"\"\n FlowData objects represent a saved 3D flow from a FLORIS simulation\n or other data source.\n \"\"\"\n\n # TODO handle none case, maybe default values apply like 0 origin and auto\n # determine spacing and dimensions\n def __init__(self, x, y, z, u, v, w, spacing=None, dimensions=None, origin=None):\n \"\"\"\n Initialize FlowData object with coordinates, velocity fields,\n and meta-data.\n\n Args:\n x (np.array): Cartesian coordinate data.\n y (np.array): Cartesian coordinate data.\n z (np.array): Cartesian coordinate data.\n u (np.array): x-component of velocity.\n v (np.array): y-component of velocity.\n w (np.array): z-component of velocity.\n spacing (Vec3, optional): Spatial resolution.\n Defaults to None.\n dimensions (Vec3, optional): Named dimensions\n (e.g. x1, x2, x3). Defaults to None.\n origin (Vec3, optional): Coordinates of origin.\n Defaults to None.\n \"\"\"\n\n self.x = x\n self.y = y\n self.z = z\n self.u = u\n self.v = v\n self.w = w\n\n self.spacing = spacing\n self.dimensions = dimensions\n self.origin = origin\n\n # Technically resolution is a restating of above, but it is useful to have\n self.resolution = Vec3(len(np.unique(x)), len(np.unique(y)), len(np.unique(z)))\n\n def save_as_vtk(self, filename):\n \"\"\"\n Save FlowData Object to vtk format.\n\n Args:\n filename (str): Write-to path for vtk file.\n \"\"\"\n n_points = self.dimensions.x1 * self.dimensions.x2 * self.dimensions.x3\n\n ln = \"\\n\"\n vtk_file = open(filename, \"w\")\n vtk_file.write(\"# vtk DataFile Version 3.0\" + ln)\n vtk_file.write(\"array.mean0D\" + ln)\n vtk_file.write(\"ASCII\" + ln)\n vtk_file.write(\"DATASET STRUCTURED_POINTS\" + ln)\n vtk_file.write(\"DIMENSIONS {}\".format(self.dimensions) + ln)\n vtk_file.write(\"ORIGIN {} {} {}\".format(self.origin.x1, self.origin.x2, self.origin.x3) + ln)\n vtk_file.write(\"SPACING {}\".format(self.spacing) + ln)\n vtk_file.write(\"POINT_DATA {}\".format(n_points) + ln)\n vtk_file.write(\"FIELD attributes 1\" + ln)\n vtk_file.write(\"UAvg 3 {} float\".format(n_points) + ln)\n for u, v, w in zip(self.u, self.v, self.w):\n vtk_file.write(\"{}\".format(Vec3(u, v, w)) + ln)\n\n @staticmethod\n def crop(ff, x_bnds, y_bnds, z_bnds):\n \"\"\"\n Crop FlowData object to within stated bounds.\n\n Args:\n ff (:py:class:`~.tools.flow_data.FlowData`):\n FlowData object.\n x_bnds (iterable): Min and max of x-coordinate.\n y_bnds (iterable): Min and max of y-coordinate.\n z_bnds (iterable): Min and max of z-coordinate.\n\n Returns:\n (:py:class:`~.tools.flow_data.FlowData`):\n Cropped FlowData object.\n \"\"\"\n\n map_values = (\n (ff.x > x_bnds[0])\n & (ff.x < x_bnds[1])\n & (ff.y > y_bnds[0])\n & (ff.y < y_bnds[1])\n & (ff.z > z_bnds[0])\n & (ff.z < z_bnds[1])\n )\n\n x = ff.x[map_values]\n y = ff.y[map_values]\n z = ff.z[map_values]\n\n # Work out new dimensions\n dimensions = Vec3(len(np.unique(x)), len(np.unique(y)), len(np.unique(z)))\n\n # Work out origin\n origin = Vec3(\n ff.origin.x1 + np.min(x),\n ff.origin.x2 + np.min(y),\n ff.origin.x3 + np.min(z),\n )\n\n return FlowData(\n x - np.min(x),\n y - np.min(y),\n z - np.min(z),\n ff.u[map_values],\n ff.v[map_values],\n ff.w[map_values],\n spacing=ff.spacing, # doesn't change\n dimensions=dimensions,\n origin=origin,\n )\n\n # Define a quick function for getting arbitrary points from sowfa\n\n def get_points_from_flow_data(self, x_points, y_points, z_points):\n \"\"\"\n Return the u-value of a set of points from with a FlowData object.\n Use a simple nearest neighbor regressor to do internal interpolation.\n\n Args:\n x_points (np.array): Array of x-locations of points.\n y_points (np.array): Array of y-locations of points.\n z_points (np.array): Array of z-locations of points.\n\n Returns:\n np.array: Array of u-velocity at specified points.\n \"\"\"\n # print(x_points,y_points,z_points)\n # X = np.column_stack([self.x, self.y, self.z])\n n_neighbors = 1\n knn = neighbors.KNeighborsRegressor(n_neighbors)\n # y_ = knn.fit(X, self.u) # .predict(T)\n\n # Predict new points\n T = np.column_stack([x_points, y_points, z_points])\n return knn.predict(T)\n" ]
[ [ "sklearn.neighbors.KNeighborsRegressor", "numpy.min", "numpy.unique", "numpy.column_stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CameronBodine/dash_doodler
[ "6b1936d48bf2f9742d0ddf1225cbb960bef3d8bd" ]
[ "utils/plot_label_generation.py" ]
[ "# Written by Dr Daniel Buscombe, Marda Science LLC\n# for the USGS Coastal Change Hazards Program\n#\n# MIT License\n#\n# Copyright (c) 2020, Marda Science LLC\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# ##========================================================\n\n# allows loading of functions from the src directory\nimport sys, os, getopt\nsys.path.insert(1, '../src')\nfrom annotations_to_segmentations import *\nfrom image_segmentation import *\n\nfrom glob import glob\nimport skimage.util\nfrom tqdm import tqdm\n\nfrom tkinter import Tk\nfrom tkinter.filedialog import askopenfilename, askdirectory\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom imageio import imwrite\n\n###===========================================================\ntry:\n sys.path.insert(1, '../')\n from my_defaults import *\n print(\"Your session defaults loaded\")\nexcept:\n DEFAULT_PEN_WIDTH = 3\n DEFAULT_CRF_DOWNSAMPLE = 2\n DEFAULT_RF_DOWNSAMPLE = 2\n DEFAULT_CRF_THETA = 1\n DEFAULT_CRF_MU = 1\n DEFAULT_CRF_GTPROB = 0.9\n\nDEFAULT_RF_NESTIMATORS = 3\n\n###===========================================================\ndef tta_crf(img, rf_result_filt_inp, k):\n k = int(k)\n result2, n = crf_refine(np.roll(rf_result_filt_inp,k), np.roll(img,k), DEFAULT_CRF_THETA, DEFAULT_CRF_MU, DEFAULT_CRF_DOWNSAMPLE, DEFAULT_CRF_GTPROB)\n result2 = np.roll(result2, -k)\n if k==0:\n w=.1\n else:\n w = 1/np.sqrt(k)\n\n return result2, w,n\n\n\n###===========================================================\ndef gen_plot_seq(orig_distance, save_mode):\n\n\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n direc = askdirectory(title='Select directory of results (annotations)', initialdir=os.getcwd()+os.sep+'results')\n files = sorted(glob(direc+'/*.npz'))\n\n files = [f for f in files if 'labelgen' not in f]\n files = [f for f in files if '4zoo' not in f]\n\n #### loop through each file\n for anno_file in tqdm(files):\n\n\n if os.path.exists(anno_file.replace('.npz','_label.png')):\n print('%s exists ... skipping' % (anno_file.replace('.npz','_label.png')))\n continue\n else:\n\n\n # print(\"Working on %s\" % (file))\n print(\"Working on %s\" % (anno_file))\n dat = np.load(anno_file)\n data = dict()\n for k in dat.keys():\n data[k] = dat[k]\n del dat\n # print(data['image'].shape)\n\n if 'classes' not in locals():\n\n try:\n classes = data['classes']\n except:\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n classfile = askopenfilename(title='Select file containing class (label) names', filetypes=[(\"Pick classes.txt file\",\"*.txt\")])\n\n with open(classfile) as f:\n classes = f.readlines()\n\n class_label_names = [c.strip() for c in classes]\n NUM_LABEL_CLASSES = len(class_label_names)\n\n if NUM_LABEL_CLASSES<=10:\n class_label_colormap = px.colors.qualitative.G10\n else:\n class_label_colormap = px.colors.qualitative.Light24\n\n # we can't have fewer colors than classes\n assert NUM_LABEL_CLASSES <= len(class_label_colormap)\n\n colormap = [\n tuple([fromhex(h[s : s + 2]) for s in range(0, len(h), 2)])\n for h in [c.replace(\"#\", \"\") for c in class_label_colormap]\n ]\n\n cmap = matplotlib.colors.ListedColormap(class_label_colormap[:NUM_LABEL_CLASSES])\n cmap2 = matplotlib.colors.ListedColormap(['#000000']+class_label_colormap[:NUM_LABEL_CLASSES])\n\n\n savez_dict = dict()\n\n ## if more than one label ...\n if len(np.unique(data['doodles']))>2:\n\n img = data['image']\n del data['image']\n\n #================================\n ##fig1 - img versus standardized image\n plt.subplot(121)\n plt.imshow(img); plt.axis('off')\n plt.title('a) Original', loc='left', fontsize=7)\n\n # #standardization using adjusted standard deviation\n img = standardize(img)\n\n #================================\n ##fig2 - img / doodles\n plt.subplot(122)\n plt.imshow(img); plt.axis('off')\n plt.title('b) Filtered', loc='left', fontsize=7)\n plt.savefig(anno_file.replace('.npz','_image_filt_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n\n tmp = data['doodles'].astype('float')\n tmp[tmp==0] = np.nan\n\n ## do plot of images and doodles\n plt.imshow(img)\n plt.imshow(tmp, alpha=0.25, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2) #'inferno')\n plt.axis('off')\n plt.colorbar(shrink=0.5)\n plt.savefig(anno_file.replace('.npz','_image_doodles_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n del tmp\n\n ## \"analytical toola\" e.g. compute annotations per unit area of image and per class label - is there an ideal number or threshold not to go below or above?\n\n #####=========================== RF\n\n if np.ndim(img)==3:\n features = extract_features(\n img,\n multichannel=True,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=1, #SIGMA_MIN,\n sigma_max=16, #SIGMA_MAX,\n )\n else:\n features = extract_features(\n np.dstack((img,img,img)),\n multichannel=True,\n intensity=True,\n edges=True,\n texture=True,\n sigma_min=1, #SIGMA_MIN,\n sigma_max=16, #SIGMA_MAX,\n )\n\n counter=1\n for k in [0,1,2,3,4]:\n plt.subplot(2,5,counter)\n plt.imshow(features[k].reshape((img.shape[0], img.shape[1])), cmap='gray'); plt.axis('off')\n if k==0:\n plt.title('a) Smallest scale', loc='left', fontsize=7)\n counter+=1\n\n for k in [70,71,72,73,74]:\n plt.subplot(2,5,counter)\n plt.imshow(features[k].reshape((img.shape[0], img.shape[1])), cmap='gray'); plt.axis('off')\n if k==70:\n plt.title('b) Largest scale', loc='left', fontsize=7)\n counter+=1\n\n plt.savefig(anno_file.replace('.npz','_image_feats_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n\n #================================\n doodles = data['doodles']\n training_data = features[:, doodles > 0].T\n training_labels = doodles[doodles > 0].ravel()\n del doodles\n\n training_data = training_data[::DEFAULT_RF_DOWNSAMPLE]\n training_labels = training_labels[::DEFAULT_RF_DOWNSAMPLE]\n\n if save_mode:\n savez_dict['color_doodles'] = data['color_doodles'].astype('uint8')\n savez_dict['doodles'] = data['doodles'].astype('uint8')\n savez_dict['settings'] = data['settings']\n savez_dict['label'] = data['label'].astype('uint8')\n\n del data\n\n #================================\n clf = make_pipeline(\n StandardScaler(),\n MLPClassifier(\n solver='adam', alpha=1, random_state=1, max_iter=2000,\n early_stopping=True, hidden_layer_sizes=[100, 60],\n ))\n clf.fit(training_data, training_labels)\n\n #================================\n\n del training_data, training_labels\n\n # use model in predictive mode\n sh = features.shape\n features_use = features.reshape((sh[0], np.prod(sh[1:]))).T\n\n if save_mode:\n savez_dict['features'] = features.astype('float16')\n del features\n\n rf_result = clf.predict(features_use)\n #del features_use\n rf_result = rf_result.reshape(sh[1:])\n\n #================================\n plt.imshow(img)\n plt.imshow(rf_result-1, alpha=0.25, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap) #'inferno')\n plt.axis('off')\n plt.colorbar(shrink=0.5)\n plt.savefig(anno_file.replace('.npz','_image_label_RF_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n\n #================================\n plt.subplot(221); plt.imshow(rf_result-1, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap); plt.axis('off')\n plt.title('a) Original', loc='left', fontsize=7)\n\n rf_result_filt = filter_one_hot(rf_result, 2*rf_result.shape[0])\n if save_mode:\n savez_dict['rf_result_filt'] = rf_result_filt\n\n plt.subplot(222); plt.imshow(rf_result_filt, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('b) Filtered', loc='left', fontsize=7)\n\n if rf_result_filt.shape[0]>512:\n ## filter based on distance\n rf_result_filt = filter_one_hot_spatial(rf_result_filt, orig_distance)\n\n if save_mode:\n savez_dict['rf_result_spatfilt'] = rf_result_filt\n\n plt.subplot(223); plt.imshow(rf_result_filt, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('c) Spatially filtered', loc='left', fontsize=7)\n\n # rf_result_filt_inp = inpaint_zeros(rf_result_filt).astype('uint8')\n\n rf_result_filt = rf_result_filt.astype('float')\n rf_result_filt[rf_result_filt==0] = np.nan\n rf_result_filt_inp = inpaint_nans(rf_result_filt).astype('uint8')\n\n plt.subplot(224); plt.imshow(rf_result_filt_inp, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('d) Inpainted', loc='left', fontsize=7)\n\n plt.savefig(anno_file.replace('.npz','_rf_label_filtered_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n\n ###========================================================\n #### demo of the spatial filter\n\n if NUM_LABEL_CLASSES==2:\n\n distance = orig_distance #3\n shrink_factor= 0.66\n rf_result_filt = filter_one_hot(rf_result, 2*rf_result.shape[0])\n\n lstack = (np.arange(rf_result_filt.max()) == rf_result_filt[...,None]-1).astype(int) #one-hot encode\n\n plt.figure(figsize=(12,16))\n plt.subplots_adjust(wspace=0.2, hspace=0.5)\n\n plt.subplot(631)\n plt.imshow(img); plt.imshow(rf_result_filt-1, cmap='gray', alpha=0.25)\n plt.axis('off'); plt.title('a) Label', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n\n plt.subplot(635)\n plt.imshow(img); plt.imshow(lstack[:,:,0], cmap='gray', alpha=0.25)\n plt.axis('off'); plt.title('b) \"Zero-hot\"', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n\n plt.subplot(636)\n plt.imshow(img); plt.imshow(lstack[:,:,1], cmap='gray', alpha=0.25)\n plt.axis('off'); plt.title('c) \"One-hot\"', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n\n tmp = np.zeros_like(rf_result_filt)\n for kk in range(lstack.shape[-1]):\n l = lstack[:,:,kk]\n d = ndimage.distance_transform_edt(l)\n l[d<distance] = 0\n lstack[:,:,kk] = np.round(l).astype(np.uint8)\n del l\n tmp[d<=distance] += 1\n\n if kk==0:\n plt.subplot(637)\n plt.imshow(d, cmap='inferno') # plt.imshow(img); 'gray', alpha=0.5)\n plt.axis('off'); plt.title('d) Zero-hot distance < '+str(distance)+' px', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n else:\n plt.subplot(638)\n plt.imshow(d, cmap='inferno') #'gray', alpha=0.5)\n plt.axis('off'); plt.title('e) One-hot distance < '+str(distance)+' px', loc='left', fontsize=7) # plt.colorbar(shrink=shrink_factor);\n del d\n\n plt.subplot(6,3,11)\n plt.imshow(img); plt.imshow(tmp==rf_result_filt.max(), cmap='gray', alpha=0.25)\n plt.axis('off'); plt.title('f) Distance < threshold (= '+str(distance)+' px)', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n\n rf_result_filt = np.argmax(lstack, -1)+1\n\n rf_result_filt[tmp==rf_result_filt.max()] = 0\n del tmp\n\n plt.subplot(6,3,12)\n plt.imshow(img); plt.imshow(rf_result_filt, cmap='gray', alpha=0.25)\n plt.axis('off'); plt.title('g) Label encoded with zero class', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n\n ##double distance\n distance *= 3\n tmp = np.zeros_like(rf_result_filt)\n for kk in range(lstack.shape[-1]):\n l = lstack[:,:,kk]\n d = ndimage.distance_transform_edt(l)\n l[d<distance] = 0\n lstack[:,:,kk] = np.round(l).astype(np.uint8)\n del l\n tmp[d<=distance] += 1\n\n if kk==0:\n plt.subplot(6,3,13)\n plt.imshow(d, cmap='inferno') # plt.imshow(img); 'gray', alpha=0.5)\n plt.axis('off'); plt.title('d) Zero-hot distance < '+str(distance)+' px', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n else:\n plt.subplot(6,3,14)\n plt.imshow(d, cmap='inferno') #'gray', alpha=0.5)\n plt.axis('off'); plt.title('e) One-hot distance < '+str(distance)+' px', loc='left', fontsize=7) #plt.colorbar(shrink=shrink_factor);\n del d\n\n plt.subplot(6,3,17)\n plt.imshow(img); plt.imshow(tmp==rf_result_filt.max(), cmap='gray', alpha=0.25)\n plt.axis('off');plt.title('h) Distance < threshold (= '+str(distance)+' px)', loc='left', fontsize=7) # plt.colorbar(shrink=shrink_factor);\n\n ###========================================================\n rf_result_filt = np.argmax(lstack, -1)+1\n\n\n rf_result_filt[tmp==rf_result_filt.max()] = 0\n del tmp\n\n plt.subplot(6,3,18)\n plt.imshow(img); plt.imshow(rf_result_filt, cmap='gray', alpha=0.25)\n plt.axis('off'); plt.title('i) Label encoded with zero class', loc='left', fontsize=7); #plt.colorbar(shrink=shrink_factor);\n\n plt.savefig(anno_file.replace('.npz','_rf_spatfilt_dist_labelgen.png'), dpi=300, bbox_inches='tight')\n plt.close()\n\n if save_mode:\n savez_dict['rf_result'] = rf_result\n\n del rf_result, rf_result_filt\n if save_mode:\n savez_dict['rf_result_filt_inp'] = rf_result_filt_inp\n\n #####=========================== CRF\n if NUM_LABEL_CLASSES==2:\n # R = W = n = []\n # for k in np.linspace(0,int(img.shape[0]),10):\n # out1, out2, out3 = tta_crf(img, rf_result_filt_inp, k)\n # R.append(out1)\n # W.append(out2)\n # n.append(out3)\n # this parallel call replaces the above commented out loop\n w = Parallel(n_jobs=-2, verbose=0)(delayed(tta_crf)(img, rf_result_filt_inp, k) for k in np.linspace(0,int(img.shape[0])/5,10))\n R,W,n = zip(*w)\n del rf_result_filt_inp\n\n for counter,r in enumerate(R):\n plt.subplot(5,2,counter+1)\n plt.imshow(img)\n plt.imshow(r-1, alpha=0.25, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap) #'inferno')\n plt.axis('off')\n plt.savefig(anno_file.replace('.npz','_crf_tta_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n\n if save_mode:\n savez_dict['crf_tta'] = [r.astype('uint8') for r in R]\n savez_dict['crf_tta_weights'] = W\n\n crf_result = np.round(np.average(np.dstack(R), axis=-1, weights = W)).astype('uint8')\n del R, W, n, w, r\n\n #================================\n plt.subplot(221); plt.imshow(crf_result-1, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap); plt.axis('off')\n plt.title('a) Original', loc='left', fontsize=7)\n\n crf_result_filt = filter_one_hot(crf_result, 2*crf_result.shape[0])\n\n if save_mode:\n savez_dict['crf_result_filt'] = crf_result_filt\n savez_dict['crf_result'] = crf_result-1\n\n del crf_result\n\n plt.subplot(222); plt.imshow(crf_result_filt, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('b) Filtered', loc='left', fontsize=7)\n\n if crf_result_filt.shape[0]>512:\n ## filter based on distance\n crf_result_filt = filter_one_hot_spatial(crf_result_filt, distance)\n\n if save_mode:\n savez_dict['rf_result_spatfilt'] = crf_result_filt\n\n plt.subplot(223); plt.imshow(crf_result_filt, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('c) Spatially filtered', loc='left', fontsize=7)\n\n crf_result_filt = crf_result_filt.astype('float')\n crf_result_filt[crf_result_filt==0] = np.nan\n crf_result_filt_inp = inpaint_nans(crf_result_filt).astype('uint8')\n del crf_result_filt\n\n plt.subplot(224); plt.imshow(crf_result_filt_inp, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('d) Inpainted (final label)', loc='left', fontsize=7)\n\n plt.savefig(anno_file.replace('.npz','_crf_label_filtered_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n\n else:\n\n if len(np.unique(rf_result_filt_inp.flatten()))>1:\n\n crf_result, n = crf_refine(rf_result_filt_inp, img, DEFAULT_CRF_THETA, DEFAULT_CRF_MU, DEFAULT_CRF_DOWNSAMPLE, DEFAULT_CRF_GTPROB)\n\n #================================\n plt.subplot(221); plt.imshow(crf_result-1, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap); plt.axis('off')\n plt.title('a) Original', loc='left', fontsize=7)\n\n crf_result_filt = filter_one_hot(crf_result, 2*crf_result.shape[0])\n\n if save_mode:\n savez_dict['crf_result_filt'] = crf_result_filt\n savez_dict['crf_result'] = crf_result-1\n\n del crf_result\n\n plt.subplot(222); plt.imshow(crf_result_filt, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('b) Filtered', loc='left', fontsize=7)\n\n if crf_result_filt.shape[0]>512:\n ## filter based on distance\n crf_result_filt = filter_one_hot_spatial(crf_result_filt, orig_distance)\n\n if save_mode:\n savez_dict['rf_result_spatfilt'] = crf_result_filt\n\n plt.subplot(223); plt.imshow(crf_result_filt, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('c) Spatially filtered', loc='left', fontsize=7)\n\n #crf_result_filt_inp = inpaint_zeros(crf_result_filt).astype('uint8')\n crf_result_filt = crf_result_filt.astype('float')\n crf_result_filt[crf_result_filt==0] = np.nan\n crf_result_filt_inp = inpaint_nans(crf_result_filt).astype('uint8')\n del crf_result_filt\n\n plt.subplot(224); plt.imshow(crf_result_filt_inp, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap2); plt.axis('off')\n plt.title('d) Inpainted (final label)', loc='left', fontsize=7)\n\n plt.savefig(anno_file.replace('.npz','_crf_label_filtered_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n else:\n crf_result_filt_inp = rf_result_filt_inp.copy()\n\n #================================\n plt.imshow(img)\n plt.imshow(crf_result_filt_inp-1, alpha=0.25, vmin=0, vmax=NUM_LABEL_CLASSES, cmap=cmap) #'inferno')\n plt.axis('off')\n plt.colorbar(shrink=0.5)\n plt.savefig(anno_file.replace('.npz','_image_label_final_labelgen.png'), dpi=200, bbox_inches='tight')\n plt.close()\n\n\n if save_mode:\n tosave = (np.arange(crf_result_filt_inp.max()) == crf_result_filt_inp[...,None]-1).astype(int)\n savez_dict['final_label'] = tosave.astype('uint8')#crf_result_filt_inp-1\n savez_dict['image'] = (255*img).astype('uint8')\n del img, crf_result_filt_inp\n\n imwrite(anno_file.replace('.npz','_label.png'), np.argmax(savez_dict['final_label'],-1).astype('uint8'))\n imwrite(anno_file.replace('.npz','_doodles.png'), savez_dict['doodles'].astype('uint8'))\n\n\n ### if only one label\n else:\n if save_mode:\n savez_dict['color_doodles'] = data['color_doodles'].astype('uint8')\n savez_dict['doodles'] = data['doodles'].astype('uint8')\n savez_dict['settings'] = data['settings']\n savez_dict['label'] = data['label'].astype('uint8')\n v = np.unique(data['doodles']).max()#[0]-1\n if v==2:\n tmp = np.zeros_like(data['label'])\n tmp+=1\n else:\n tmp = np.ones_like(data['label'])*v\n tosave = (np.arange(tmp.max()) == tmp[...,None]-1).astype(int)\n savez_dict['final_label'] = tosave.astype('uint8').squeeze()\n savez_dict['crf_tta'] = None\n savez_dict['crf_tta_weights'] = None\n savez_dict['crf_result'] =None\n savez_dict['rf_result_spatfilt'] = None\n savez_dict['crf_result_filt'] = None\n savez_dict['image'] = data['image'].astype('uint8')\n del data\n\n np.savez(anno_file.replace('.npz','_labelgen.npz'), **savez_dict )\n del savez_dict\n plt.close('all')\n\n\n\n###==================================================================\n#===============================================================\nif __name__ == '__main__':\n\n argv = sys.argv[1:]\n try:\n opts, args = getopt.getopt(argv,\"h:d:m:\") #m:p:l:\")\n except getopt.GetoptError:\n print('======================================')\n print('python plot_label_generation.py [-m save mode -p make image/label overlay plots -l print label images]') #\n print('======================================')\n print('Example usage: python plot_label_generation.py [default -m 1 -p 0]') #, save mode mode 1 (default, minimal), make plots 0 (no), print labels 0 (no)\n print('.... which means: save mode mode 1 (default, minimal), make image/label overlay plots 0 (no), print label images 0 (no)') #, save mode mode 1 , dont make plots,\n print('======================================')\n\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('======================================')\n print('Example usage: python plot_label_generation.py [default -m 1 -p 0]') #, save mode mode 1 (default, minimal), make plots 0 (no), print labels 0 (no)\n print('.... which means: save mode mode 1 (default, all), make plots 0 (no), print labels 0 (no)') #, save mode mode 1 , dont make plots,\n print('======================================')\n sys.exit()\n\n elif opt in (\"-d\"):\n orig_distance = arg\n orig_distance = int(orig_distance)\n elif opt in (\"-m\"):\n save_mode = arg\n save_mode = bool(save_mode)\n\n if 'orig_distance' not in locals():\n orig_distance = 2\n if 'save_mode' not in locals():\n save_mode = True\n\n print(\"save mode: %i\" % (save_mode))\n print(\"threshold intra-label distance: %i\" % (orig_distance))\n\n #ok, dooo it\n gen_plot_seq(orig_distance, save_mode)\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "matplotlib.colors.ListedColormap", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leoninekev/training-frcnn-google-ml-engine
[ "0df1a4e772c806d35aeb5b74d3394a828489cd9a" ]
[ "move_to_cloudshell/trainer/data_augment.py" ]
[ "import cv2\nimport numpy as np\nimport copy\n\nfrom tensorflow.python.lib.io import file_io\nimport io\n\nfrom PIL import Image\n\ndef url2img(uri):#IO function compliant to loading gs:// url images \n\tfile= file_io.FileIO(uri, mode='rb')\n\tfile = file.read()\n\timg= Image.open(io.BytesIO(file)).convert('RGB')\n\treturn np.asarray(img)\n\ndef augment(img_data, config, augment=True):\n\tassert 'filepath' in img_data\n\tassert 'bboxes' in img_data\n\tassert 'width' in img_data\n\tassert 'height' in img_data\n\n\timg_data_aug = copy.deepcopy(img_data)\n\n\timg = url2img(img_data_aug['filepath'])\n\n\tif augment:\n\t\trows, cols = img.shape[:2]\n\n\t\tif config.use_horizontal_flips and np.random.randint(0, 2) == 0:\n\t\t\timg = cv2.flip(img, 1)\n\t\t\tfor bbox in img_data_aug['bboxes']:\n\t\t\t\tx1 = bbox['x1']\n\t\t\t\tx2 = bbox['x2']\n\t\t\t\tbbox['x2'] = cols - x1\n\t\t\t\tbbox['x1'] = cols - x2\n\n\t\tif config.use_vertical_flips and np.random.randint(0, 2) == 0:\n\t\t\timg = cv2.flip(img, 0)\n\t\t\tfor bbox in img_data_aug['bboxes']:\n\t\t\t\ty1 = bbox['y1']\n\t\t\t\ty2 = bbox['y2']\n\t\t\t\tbbox['y2'] = rows - y1\n\t\t\t\tbbox['y1'] = rows - y2\n\n\t\tif config.rot_90:\n\t\t\tangle = np.random.choice([0,90,180,270],1)[0]\n\t\t\tif angle == 270:\n\t\t\t\timg = np.transpose(img, (1,0,2))\n\t\t\t\timg = cv2.flip(img, 0)\n\t\t\telif angle == 180:\n\t\t\t\timg = cv2.flip(img, -1)\n\t\t\telif angle == 90:\n\t\t\t\timg = np.transpose(img, (1,0,2))\n\t\t\t\timg = cv2.flip(img, 1)\n\t\t\telif angle == 0:\n\t\t\t\tpass\n\n\t\t\tfor bbox in img_data_aug['bboxes']:\n\t\t\t\tx1 = bbox['x1']\n\t\t\t\tx2 = bbox['x2']\n\t\t\t\ty1 = bbox['y1']\n\t\t\t\ty2 = bbox['y2']\n\t\t\t\tif angle == 270:\n\t\t\t\t\tbbox['x1'] = y1\n\t\t\t\t\tbbox['x2'] = y2\n\t\t\t\t\tbbox['y1'] = cols - x2\n\t\t\t\t\tbbox['y2'] = cols - x1\n\t\t\t\telif angle == 180:\n\t\t\t\t\tbbox['x2'] = cols - x1\n\t\t\t\t\tbbox['x1'] = cols - x2\n\t\t\t\t\tbbox['y2'] = rows - y1\n\t\t\t\t\tbbox['y1'] = rows - y2\n\t\t\t\telif angle == 90:\n\t\t\t\t\tbbox['x1'] = rows - y2\n\t\t\t\t\tbbox['x2'] = rows - y1\n\t\t\t\t\tbbox['y1'] = x1\n\t\t\t\t\tbbox['y2'] = x2 \n\t\t\t\telif angle == 0:\n\t\t\t\t\tpass\n\n\timg_data_aug['width'] = img.shape[1]\n\timg_data_aug['height'] = img.shape[0]\n\treturn img_data_aug, img\n" ]
[ [ "numpy.random.choice", "numpy.asarray", "tensorflow.python.lib.io.file_io.FileIO", "numpy.transpose", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
andrearosasco/DistilledReplay
[ "2a4efa88d22b9afc7016f07549114688f346dbe8", "2a4efa88d22b9afc7016f07549114688f346dbe8", "2a4efa88d22b9afc7016f07549114688f346dbe8", "2a4efa88d22b9afc7016f07549114688f346dbe8" ]
[ "model/cnn2.py", "pmnist_exp/config_joint.py", "scifar10_exp/config_naive.py", "smnist_exp/config_joint.py" ]
[ "import torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass Model(nn.Module):\r\n def __init__(self, config):\r\n super(Model, self).__init__()\r\n self.conv1 = nn.Conv2d(3, 16, (3, 3), padding=1)\r\n self.conv1_bn = nn.BatchNorm2d(16)\r\n\r\n self.conv2 = nn.Conv2d(16, 32, (3, 3), padding=1, stride=2)\r\n self.conv2_bn = nn.BatchNorm2d(32)\r\n\r\n self.conv3 = nn.Conv2d(32, 64, (3, 3), padding=1, stride=2)\r\n self.conv3_bn = nn.BatchNorm2d(64)\r\n\r\n self.conv4 = nn.Conv2d(64, 128, (3, 3), padding=1, stride=2)\r\n self.conv4_bn = nn.BatchNorm2d(128)\r\n\r\n self.conv5 = nn.Conv2d(128, 256, (3, 3), padding=1, stride=2)\r\n self.conv5_bn = nn.BatchNorm2d(256)\r\n\r\n self.fc1 = nn.Linear(256 * 2 * 2, 2000)\r\n self.fc2 = nn.Linear(2000, 2000)\r\n self.fc3 = nn.Linear(2000, config['n_classes'])\r\n\r\n def forward(self, x):\r\n # [3, 32, 32] -> [16, 32, 32]\r\n x = F.relu(self.conv1_bn(self.conv1(x)))\r\n # [16, 32, 32] -> [32, 16, 16]\r\n x = F.relu(self.conv2_bn(self.conv2(x)))\r\n # [32, 16, 16] -> [64, 8, 8]\r\n x = F.relu(self.conv3_bn(self.conv3(x)))\r\n # [64, 8, 8] -> [128, 4, 4]\r\n x = F.relu(self.conv4_bn(self.conv4(x)))\r\n # [128, 4, 4] -> [256, 2, 2]\r\n x = F.relu(self.conv5_bn(self.conv5(x)))\r\n # [128, 2, 2] -> [512]\r\n x = x.view(-1, 256 * 2 * 2)\r\n # 1024 -> 2000\r\n x = F.relu(F.dropout((self.fc1(x)), 0.0))\r\n # 2000 -> 2000\r\n # x = F.relu(F.dropout((self.fc2(x)), 0.5))\r\n # 2000 -> 100\r\n x = self.fc3(x)\r\n return x\r\n\r\n def freeze_conv(self):\r\n self.conv1.weight.requires_grad = False\r\n self.conv1_bn.weight.requires_grad = False\r\n self.conv1_bn.bias.requires_grad = False\r\n\r\n self.conv2.weight.requires_grad = False\r\n self.conv2_bn.weight.requires_grad = False\r\n self.conv2_bn.bias.requires_grad = False\r\n\r\n self.conv3.weight.requires_grad = False\r\n self.conv3_bn.weight.requires_grad = False\r\n self.conv3_bn.bias.requires_grad = False\r\n\r\n self.conv4.weight.requires_grad = False\r\n self.conv4_bn.weight.requires_grad = False\r\n self.conv4_bn.bias.requires_grad = False\r\n\r\n self.conv5.weight.requires_grad = False\r\n self.conv5_bn.weight.requires_grad = False\r\n self.conv5_bn.bias.requires_grad = False", "import importlib\nimport os\nfrom collections import OrderedDict\n\nimport torch\nfrom torchvision.transforms import transforms\n\nmodel_config = OrderedDict([\n ('arch', 'mlp2'),\n ('n_classes', 10),\n ('dropout', 0.5)\n])\n\ndata_config = OrderedDict([\n ('dataset', 'PermutedMNIST'),\n ('valid', 0.0),\n ('num_workers', 4),\n ('train_transform', transforms.Compose([\n lambda x: torch.FloatTensor(x),\n lambda x: x / 255.0,\n lambda x: (x - 0.1307) / 0.3081,\n ])),\n ('test_transform', transforms.Compose([\n lambda x: torch.FloatTensor(x),\n lambda x: x / 255.0,\n lambda x: (x - 0.1307) / 0.3081,\n ]))\n])\n\n\nrun_config = OrderedDict([\n ('experiment', 'run'), # This configuration will be executed by distill.py\n ('device', 'cuda'),\n ('tasks', list(range(10))),\n ('save', 'task1.distilled'), # Path for the distilled dataset\n ('seed', 1234),\n])\n\nlog_config = OrderedDict([\n ('wandb', True),\n ('wandb_name', 'joint'),\n ('print', True),\n ('images', True), # Save the distilled images\n])\n\nparam_config = OrderedDict([\n ('no_steps', 3), # Training epoch performed by the model on the distilled dataset\n ('steps', 'epoch'), # epoch or minibatch\n ('meta_lr', 0.1), # Learning rate for distilling images\n ('model_lr', 0.05), # Base learning rate for the model\n ('lr_lr', 0.0), # Learning rate for the lrs of the model at each optimization step\n ('outer_steps', 0), # Distillation epochs\n ('inner_steps', 0), # Optimization steps of the model\n ('batch_size', 128), # Minibatch size used during distillation\n ('distill_batch_size', 128),\n ('buffer_size', -1), # Number of examples per class kept in the buffer\n])\n\nconfig = OrderedDict([\n ('model_config', model_config),\n ('param_config', param_config),\n ('data_config', data_config),\n ('run_config', run_config),\n ('log_config', log_config),\n])\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '3'\n experiment = importlib.import_module(config['run_config']['experiment'])\n experiment.run(config)", "import importlib\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom PIL import Image\nfrom torchvision.transforms import transforms\n\nmodel_config = OrderedDict([\n ('arch', 'lenet5'),\n ('n_classes', 10),\n ('input_shape', (3, 32, 32)),\n])\n\ndata_config = OrderedDict([\n ('dataset', 'SplitCIFAR10'),\n ('valid', 0.0),\n ('num_workers', 4),\n ('train_transform', transforms.Compose([\n lambda x: Image.fromarray(x.reshape((3, 32, 32)).transpose((1, 2, 0))),\n transforms.ToTensor(),\n transforms.Normalize(np.array([0.5]), np.array([0.5]))])),\n ('test_transform', transforms.Compose([\n lambda x: Image.fromarray(x.reshape((3, 32, 32)).transpose((1, 2, 0))),\n transforms.ToTensor(),\n transforms.Normalize(np.array([0.5]), np.array([0.5]))\n ]))\n])\n\n\nrun_config = OrderedDict([\n ('experiment', 'run'), # This configuration will be executed by run.py\n ('device', 'cuda'),\n ('tasks', [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]), # , [4, 5], [6, 7], [8, 9]\n ('seed', 1234),\n])\n\nlog_config = OrderedDict([\n ('wandb', True),\n ('wandb_name', 'naive'),\n ('print', True),\n ('images', True), # Save the distilled images\n])\n\nparam_config = OrderedDict([\n ('no_steps', 40), # Training epoch performed by the model on the distilled dataset\n ('steps', 'minibatch'), # epoch or minibatch('meta_lr', 0.1), # Learning rate for distilling images\n ('meta_lr', 0.1),\n ('model_lr', 0.05), # Base learning rate for the model\n ('lr_lr', 0.0), # Learning rate for the lrs of the model at each optimization step\n ('outer_steps', 0), # Distillation epochs\n ('inner_steps', 0), # Optimization steps of the model\n ('batch_size', 1024), # Minibatch size used during distillation\n ('distill_batch_size', 128),\n ('buffer_size', 0), # Number of examples per class kept in the buffer\n])\n\nconfig = OrderedDict([\n ('model_config', model_config),\n ('param_config', param_config),\n ('data_config', data_config),\n ('run_config', run_config),\n ('log_config', log_config),\n])\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '3'\n experiment = importlib.import_module(config['run_config']['experiment'])\n experiment.run(config)", "import importlib\nimport os\nfrom collections import OrderedDict\n\nimport torch\nimport numpy as np\nfrom torchvision.transforms import transforms\n\nmodel_config = OrderedDict([\n ('arch', 'lenet5'),\n ('n_classes', 10),\n ('input_shape', (1, 28, 28)),\n])\n\ndata_config = OrderedDict([\n ('dataset', 'SplitMNIST'),\n ('valid', 0.0),\n ('num_workers', 4),\n ('train_transform', transforms.Compose([\n lambda x: np.array(x).reshape((1, 28, 28)),\n lambda x: np.pad(x, ((0, 0), (2, 2), (2, 2)), mode='minimum'), # Padding is only required by LeNet\n lambda x: torch.FloatTensor(x),\n lambda x: x / 255.0,\n transforms.Normalize(np.array([0.1307]), np.array([0.3081]))\n ])),\n ('test_transform', transforms.Compose([\n lambda x: np.array(x).reshape((1, 28, 28)),\n lambda x: np.pad(x, ((0, 0), (2, 2), (2, 2)), mode='minimum'), # Padding is only required by LeNet\n lambda x: torch.FloatTensor(x),\n lambda x: x / 255.0,\n transforms.Normalize(np.array([0.1307]), np.array([0.3081]))\n ])),\n])\n\n\nrun_config = OrderedDict([\n ('experiment', 'run'), # This configuration will be executed by run.py\n ('device', 'cuda'),\n ('tasks', [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]), # , [4, 5], [6, 7], [8, 9]\n ('seed', 1234),\n])\n\nlog_config = OrderedDict([\n ('wandb', True),\n ('wandb_name', 'joint'),\n ('print', True),\n ('images', True), # Save the distilled images\n])\n\nparam_config = OrderedDict([\n ('no_steps', 80), # Training epoch performed by the model on the distilled dataset\n ('step', 'minibatch'),\n ('meta_lr', 0.1), # Learning rate for distilling images\n ('model_lr', 0.05), # Base learning rate for the model\n ('lr_lr', 0.0), # Learning rate for the lrs of the model at each optimization step\n ('outer_steps', 0), # Distillation epochsa\n ('inner_steps', 0), # Optimization steps of the model\n ('batch_size', 128), # Minibatch size used during distillation\n ('distill_batch_size', 128),\n ('buffer_size', -1), # Number of examples per class kept in the buffer\n])\n\nconfig = OrderedDict([\n ('model_config', model_config),\n ('param_config', param_config),\n ('data_config', data_config),\n ('run_config', run_config),\n ('log_config', log_config),\n])\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n experiment = importlib.import_module(config['run_config']['experiment'])\n experiment.run(config)" ]
[ [ "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.BatchNorm2d" ], [ "torch.FloatTensor" ], [ "numpy.array" ], [ "numpy.array", "torch.FloatTensor", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JThissen/machine_learning
[ "82e2b003fb25111dc2d9ac1c1b2fd637e9f4fdbc" ]
[ "recurrent_neural_network_1/program.py" ]
[ "import torch\nimport torch.nn as nn\nimport string\nimport os\nimport time\nimport random\nfrom torch.functional import Tensor\nfrom utils import Utils\nfrom recurrent_neural_network import RecurrentNeuralNetwork\nfrom typing import List, Tuple, Any\n\nclass Program():\n def __init__(self, learning_rate: float = 0.005, iterations: int = 100000, hidden_length: int = 128):\n self.category_lines = {}\n self.all_categories = []\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.all_letters = string.ascii_letters + \".,;\"\n self.learning_rate = learning_rate\n self.iterations = iterations\n self.hidden_length = hidden_length\n self.loss_func = nn.NLLLoss()\n\n def run(self, train_model: bool = True):\n files = Utils.get_files(\"./data/names/*.txt\")\n\n for i in files:\n name = os.path.splitext(os.path.basename(i))[0]\n self.all_categories.append(name)\n self.category_lines[name] = Utils.read_lines(i, self.all_letters)\n\n total_loss = 0\n correct_count = 0\n losses: List[str] = []\n rnn = RecurrentNeuralNetwork(len(self.all_letters), self.hidden_length, len(self.all_categories))\n start = time.time()\n\n if(train_model):\n for i in range(self.iterations):\n category, _, category_tensor, line_tensor = self.random_example()\n\n if((line_tensor.size()[0]) == 0):\n continue\n\n output, loss = self.train(rnn, category_tensor, line_tensor)\n total_loss += loss\n losses.append(loss)\n result, _ = Utils.category_from_output(output, self.all_categories)\n correct = result == category\n\n if(correct is True):\n correct_count += 1\n \n print(f\"iter: {i}, correct: {correct}\")\n\n print(f\"correct percentage: {(correct_count / self.iterations) * 100.0}\")\n print(f\"elapsed time: {time.time() - start}\")\n torch.save(rnn.state_dict(), \"./network.pt\")\n else:\n rnn.load_state_dict(torch.load(\"./network.pt\"))\n rnn.eval()\n self.predict(rnn, \"Thissen\", 3)\n\n def train(self, rnn: RecurrentNeuralNetwork, category_tensor: Tensor, line_tensor: Tensor) -> Tuple[Any, float]:\n hidden = rnn.get_hidden()\n rnn.zero_grad()\n\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n\n loss = self.loss_func(output, category_tensor)\n loss.backward()\n\n for i in rnn.parameters():\n i.data.add_(i.grad.data, alpha=-self.learning_rate)\n\n return output, loss.item()\n\n def predict(self, rnn: RecurrentNeuralNetwork, line: str, predictions_amount: int = 5) -> None:\n with torch.no_grad():\n hidden = rnn.get_hidden()\n line_tensor = Utils.line_to_tensor(line, self.all_letters)\n for i in range(line_tensor.size()[0]):\n output, hidden = rnn(line_tensor[i], hidden)\n _, indices = output.topk(predictions_amount, 1, True)\n\n for i in range(predictions_amount):\n print(f\"prediction: {self.all_categories[indices[0][i].item()]}\")\n\n def random_example(self) -> Tuple[str, str, Tensor, Tensor]:\n random_category = self.all_categories[random.randint(0, len(self.all_categories)-1)]\n random_word = self.category_lines[random_category][random.randint(0, len(self.category_lines[random_category])-1)]\n category_tensor = torch.tensor([self.all_categories.index(random_category)], dtype=torch.long)\n line_tensor = Utils.line_to_tensor(random_word, self.all_letters)\n return random_category, random_word, category_tensor, line_tensor\n" ]
[ [ "torch.nn.NLLLoss", "torch.no_grad", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
4evernaive/YOLOv3Tiny_Face_Mask
[ "4053aac90d6eaece71662b1fcc96b3d974663bc2" ]
[ "yolo_face.py" ]
[ "import os\nimport numpy as np\nimport cv2.cv2 as cv2\nfrom imutils.video import FPS\nimport argparse\nimport datetime\nimport time\nimport uuid\nimport random\nimport pyimgur\nfrom store_image import storeImage\nfrom linebot import LineBotApi\nfrom fire import getName, getDevice, getLink, getAll\nfrom bigdataProxy import injectNotificationDataSet\nfrom linebot.exceptions import LineBotApiError\nfrom linebot.models import TextSendMessage, TemplateSendMessage, ButtonsTemplate, URITemplateAction, MessageAction, URIAction\nfrom camera.app import getHost\n#from pyngrok import ngrok\n#http_tunnel = ngrok.connect(5051)\n# use index 1 for mac camera\nif(getLink() == '0'):\n cap = cv2.VideoCapture(0)\nelse:\n cap = cv2.VideoCapture(getLink())\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nout = cv2.VideoWriter('output.avi', fourcc, 20.0, (width, height))\n# print(getDevice())\nline_bot_api = LineBotApi(\n \"5OjwvGGPi4zutObUFkeeOQ5Cf712R7cwPFinDqyNbMFrWi4zTOF4/QXAbM1Vj/Be5LriCleS8HQmjABnGrKWb1WocThH1l6Q5QyQySDQss57hkE5sS76x2hdEKfqOWcW7+PEp5WD/yHXurbCa2fR0gdB04t89/1O/w1cDnyilFU=\")\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-y\", \"--yolo\", required=True,\n help=\"base path to YOLO directory\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n help=\"minimum probability to filter weak detections\")\nap.add_argument(\"-t\", \"--threshold\", type=float, default=0.3,\n help=\"threshold when applyong non-maxima suppression\")\nargs = vars(ap.parse_args())\n\n# load the COCO class labels our YOLO model was trained on\nlabelsPath = os.path.sep.join([args[\"yolo\"], \".names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n# initialize a list of colors to represent each possible class label\nnp.random.seed(42)\nCOLORS = [[0, 255, 0], [0, 0, 255]]\n\n# derive the paths to the YOLO weights and model configuration\nweightsPath = os.path.sep.join([args[\"yolo\"], \"yolo.weights\"])\nconfigPath = os.path.sep.join([args[\"yolo\"], \"yolo.cfg\"])\n\n# load our YOLO object detector trained on COCO dataset (80 classes)\nprint(\"[INFO] loading YOLO from disk...\")\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\nnext_frame_towait = 5 # for sms\nfps = FPS().start()\nframeId = 0\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n # DETECTION WITH YOLO\n # load our input image and grab its spatial dimensions\n (H, W) = frame.shape[:2]\n frameId += 1\n # determine only the *output* layer names that we need from YOLO\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # construct a blob from the input image and then perform a forward\n # pass of the YOLO object detector, giving us our bounding boxes and\n # associated probabilities\n blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layerOutputs = net.forward(ln)\n end = time.time()\n\n # show timing information on YOLO\n\n # initialize our lists of detected bounding boxes, confidences, and\n # class IDs, respectively\n boxes = []\n confidences = []\n classIDs = []\n\n # loop over each of the layer outputs\n for output in layerOutputs:\n # loop over each of the detections\n for detection in output:\n # extract the class ID and confidence (i.e., probability) of\n # the current object detection\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n\n # filter out weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > args[\"confidence\"]:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n # use the center (x, y)-coordinates to derive the top and\n # and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates, confidences,\n # and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n # apply non-maxima suppression to suppress weak, overlapping bounding\n # boxes\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, args[\"confidence\"],\n args[\"threshold\"])\n border_size = 100\n border_text_color = [255, 255, 255]\n\n filtered_classids = np.take(classIDs, idxs)\n mask_count = (filtered_classids == 0).sum()\n nomask_count = (filtered_classids == 1).sum()\n # display count\n text = \"NoMaskCount: {}\".format(nomask_count)\n cv2.putText(frame, text, (W-170, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, border_text_color, 2)\n text = \"MaskCount: {}\".format(mask_count)\n cv2.putText(frame, text, (W-170, 125),\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, border_text_color, 2)\n text = \"AllCount: {}\".format(mask_count+nomask_count)\n cv2.putText(frame, text, (W-170, 150),\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, border_text_color, 2)\n # display status\n ratio = nomask_count/(mask_count+nomask_count+0.000001)\n out.write(frame)\n if len(idxs) > 0:\n # loop over the indexes we are keeping\n for i in idxs.flatten():\n # extract the bounding box coordinates\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n\n # draw a bounding box rectangle and label on the image\n color = [int(c) for c in COLORS[classIDs[i]]]\n cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n text = \"{}: {:.4f}\".format(LABELS[classIDs[i]], confidences[i])\n cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, color, 2)\n if ratio != 0 and np.isnan(ratio) != True:\n text = \"Warning !\"\n cv2.putText(frame, text, (W-170, int(border_size-50)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, [13, 23, 227], 2)\n if fps._numFrames >= next_frame_towait:\n print(\"[INFO] YOLOV3 took {:.6f} seconds to capture the person without the mask.\".format(\n end - start))\n all_info = getAll()\n sb_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n sb_url = str(storeImage(frame, str(uuid.uuid5(\n uuid.NAMESPACE_DNS, str(uuid.uuid1()) + str(random.random())))))\n total_count = int(nomask_count)+int(mask_count)\n if(getLink() == '0'):\n line_bot_api.broadcast(TemplateSendMessage(alt_text='偵測到有人未帶口罩,請盡速查看!', template=ButtonsTemplate(title='場域:'+str(getName()), thumbnail_image_url=sb_url, text=\"場域內總人數:\"+str(total_count)+\"\\n警示事件:有 \"+str(\n nomask_count)+\" 人沒戴口罩\\n\"+\"擷取時間:\"+str(sb_time), actions=[URIAction(label='統計報表', uri='https://datastudio.google.com/u/5/reporting/0420b197-cbec-4bbe-84e6-29f95dd1fe08/page/9qmvB')])))\n line_bot_api.broadcast(TextSendMessage(\n text=\"辨識裝置:\"+str(all_info['device'])+\"\\n辨識時間:{:.6f} seconds\".format(end - start)+\"\\n串流鏈接:http://\"+str(getHost())+\":5051/video\"))\n injectNotificationDataSet(str(all_info['device']), sb_url, str(sb_time), str(all_info['area']), str(\n all_info['stream_link']), str(nomask_count), str(int(nomask_count)+int(mask_count)))\n else:\n line_bot_api.broadcast(TemplateSendMessage(alt_text='偵測到有人未帶口罩,請盡速查看!', template=ButtonsTemplate(title='場域:'+str(getName()), thumbnail_image_url=sb_url, text=\"場域內總人數:\"+str(total_count)+\"\\n警示事件:有 \"+str(\n nomask_count)+\" 人沒戴口罩\\n\"+\"擷取時間:\"+str(sb_time), actions=[URIAction(label='統計報表', uri='https://datastudio.google.com/u/5/reporting/0420b197-cbec-4bbe-84e6-29f95dd1fe08/page/9qmvB')])))\n line_bot_api.broadcast(TextSendMessage(text=\"辨識裝置:\"+str(all_info['device'])+\"\\n辨識時間:{:.6f} seconds\".format(\n end - start)+\"\\n串流鏈接:http://\"+getLink()))\n injectNotificationDataSet(str(all_info['device']), sb_url, str(sb_time), str(all_info['area']), str(\n all_info['stream_link']), str(nomask_count), str(int(nomask_count)+int(mask_count)))\n\n next_frame_towait = fps._numFrames+(5*15)\n\n else:\n text = \"Safe \"\n cv2.putText(frame, text, (W-100, int(border_size-50)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, [0, 255, 0], 2)\n\n\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n fps.update()\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nfps.stop()\n# When everything done, release the capture\ncap.release()\nout.release()\ncv2.destroyAllWindows()\nprint(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n" ]
[ [ "numpy.take", "numpy.random.seed", "numpy.isnan", "numpy.argmax", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ypeng22/ProgLearn
[ "671ff6a03c156bab3eedbd9e112705eeabd59da7" ]
[ "proglearn/tests/test_transformer.py" ]
[ "import pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom sklearn.exceptions import NotFittedError\n\nfrom proglearn.transformers import TreeClassificationTransformer\n\n\nclass TestTreeClassificationTransformer:\n def test_init(self):\n TreeClassificationTransformer()\n assert True\n\n def test_predict_without_fit(self):\n # Generate random data\n X = np.random.normal(0, 1, size=(100, 3))\n\n with pytest.raises(NotFittedError):\n trt = TreeClassificationTransformer()\n trt.transform(X)\n\n def test_correct_transformation(self):\n np.random.seed(1)\n\n trt = TreeClassificationTransformer()\n\n X = np.concatenate((np.zeros(100), np.ones(100))).reshape(-1, 1)\n y = np.concatenate((np.zeros(100), np.ones(100)))\n\n trt.fit(X, y)\n u1 = trt.transform(np.array([0]).reshape(1, -1))\n u2 = trt.transform(np.array([1]).reshape(1, -1))\n assert u1 != u2\n" ]
[ [ "numpy.random.seed", "numpy.ones", "numpy.random.normal", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
donfiguerres/COVID-19-Tracker-PH
[ "af98f3cdc83093363035e775c4b517535f8d950d" ]
[ "tests/unit/test_trackerchart.py" ]
[ "\"\"\"Unit tests for the trackerchart module.\"\"\"\n\nimport pandas as pd\nimport pytest\n\nimport trackerchart as tc\n\n\ndef test_filter_date_range():\n columns = [\"date\", \"fruit\"]\n data = [\n [\"2021-10-09\", \"apple\"],\n [\"2021-10-10\", \"banana\"],\n [\"2021-10-11\", \"calamansi\"],\n [\"2021-10-12\", \"durian\"],\n [\"2021-10-13\", \"eggplant\"]\n ]\n df = pd.DataFrame(data, columns=columns)\n df['date'] = df.apply(lambda row : pd.to_datetime(row['date']), axis=1)\n # with date column arg\n # both start and end\n filtered = tc.filter_date_range(df, start=pd.to_datetime(\"2021-10-10\"),\n end=pd.to_datetime(\"2021-10-12\"),\n date_column='date')\n assert 'apple' not in filtered.values\n assert 'banana' in filtered.values\n assert 'calamansi' in filtered.values\n assert 'durian' in filtered.values\n assert 'eggplant' not in filtered.values\n # start only\n filtered = tc.filter_date_range(df, start=pd.to_datetime(\"2021-10-10\"),\n date_column='date')\n assert 'apple' not in filtered.values\n assert 'banana' in filtered.values\n assert 'calamansi' in filtered.values\n assert 'durian' in filtered.values\n assert 'eggplant' in filtered.values\n # end only\n filtered = tc.filter_date_range(df, end=pd.to_datetime(\"2021-10-12\"),\n date_column='date')\n assert 'apple' in filtered.values\n assert 'banana' in filtered.values\n assert 'calamansi' in filtered.values\n assert 'durian' in filtered.values\n assert 'eggplant' not in filtered.values\n # neither start nor end\n with pytest.raises(ValueError):\n filtered = tc.filter_date_range(df, date_column='date')\n # without date column arg\n df_indexed = df.set_index('date')\n # both start and end\n filtered = tc.filter_date_range(df_indexed,\n start=pd.to_datetime(\"2021-10-10\"),\n end=pd.to_datetime(\"2021-10-12\"))\n assert 'apple' not in filtered.values\n assert 'banana' in filtered.values\n assert 'calamansi' in filtered.values\n assert 'durian' in filtered.values\n assert 'eggplant' not in filtered.values\n # start only\n filtered = tc.filter_date_range(df_indexed,\n start=pd.to_datetime(\"2021-10-10\"))\n assert 'apple' not in filtered.values\n assert 'banana' in filtered.values\n assert 'calamansi' in filtered.values\n assert 'durian' in filtered.values\n assert 'eggplant' in filtered.values\n # end only\n filtered = tc.filter_date_range(df_indexed,\n end=pd.to_datetime(\"2021-10-12\"))\n assert 'apple' in filtered.values\n assert 'banana' in filtered.values\n assert 'calamansi' in filtered.values\n assert 'durian' in filtered.values\n assert 'eggplant' not in filtered.values\n # neither start nor end\n with pytest.raises(ValueError):\n filtered = tc.filter_date_range(df_indexed)\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Zyun-Y/BiconNets
[ "f5ea85dc58550969b99a2ccccccd8133dda4358c", "f5ea85dc58550969b99a2ccccccd8133dda4358c", "f5ea85dc58550969b99a2ccccccd8133dda4358c" ]
[ "paper_result/MINet/bicon/train/network/MINet.py", "paper_result/PoolNet/bicon/train/train.py", "paper_result/MINet/bicon/train/utils/tensor_ops.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom module.BaseBlocks import BasicConv2d\nfrom utils.tensor_ops import cus_sample, upsample_add\nfrom backbone.origin.from_origin import Backbone_ResNet50_in3, Backbone_VGG16_in3\nfrom module.MyModule import AIM, SIM\n\n\nclass MINet_VGG16(nn.Module):\n def __init__(self):\n super(MINet_VGG16, self).__init__()\n self.upsample_add = upsample_add\n self.upsample = cus_sample\n\n (\n self.encoder1,\n self.encoder2,\n self.encoder4,\n self.encoder8,\n self.encoder16,\n ) = Backbone_VGG16_in3()\n\n self.trans = AIM((64, 128, 256, 512, 512), (32, 64, 64, 64, 64))\n\n self.sim16 = SIM(64, 32)\n self.sim8 = SIM(64, 32)\n self.sim4 = SIM(64, 32)\n self.sim2 = SIM(64, 32)\n self.sim1 = SIM(32, 16)\n\n self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1)\n self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n\n self.classifier = nn.Conv2d(32, 1, 1)\n\n def forward(self, in_data):\n in_data_1 = self.encoder1(in_data)\n in_data_2 = self.encoder2(in_data_1)\n in_data_4 = self.encoder4(in_data_2)\n in_data_8 = self.encoder8(in_data_4)\n in_data_16 = self.encoder16(in_data_8)\n\n in_data_1, in_data_2, in_data_4, in_data_8, in_data_16 = self.trans(\n in_data_1, in_data_2, in_data_4, in_data_8, in_data_16\n )\n\n out_data_16 = self.upconv16(self.sim16(in_data_16) + in_data_16) # 1024\n\n out_data_8 = self.upsample_add(out_data_16, in_data_8)\n out_data_8 = self.upconv8(self.sim8(out_data_8) + out_data_8) # 512\n\n out_data_4 = self.upsample_add(out_data_8, in_data_4)\n out_data_4 = self.upconv4(self.sim4(out_data_4) + out_data_4) # 256\n\n out_data_2 = self.upsample_add(out_data_4, in_data_2)\n out_data_2 = self.upconv2(self.sim2(out_data_2) + out_data_2) # 64\n\n out_data_1 = self.upsample_add(out_data_2, in_data_1)\n out_data_1 = self.upconv1(self.sim1(out_data_1) + out_data_1) # 32\n\n out_data = self.classifier(out_data_1)\n\n return out_data\n\n\nclass MINet_Res50(nn.Module):\n def __init__(self):\n super(MINet_Res50, self).__init__()\n self.div_2, self.div_4, self.div_8, self.div_16, self.div_32 = Backbone_ResNet50_in3()\n\n self.upsample_add = upsample_add\n self.upsample = cus_sample\n\n self.trans = AIM(iC_list=(64, 256, 512, 1024, 2048), oC_list=(64, 64, 64, 64, 64))\n\n self.sim32 = SIM(64, 32)\n self.sim16 = SIM(64, 32)\n self.sim8 = SIM(64, 32)\n self.sim4 = SIM(64, 32)\n self.sim2 = SIM(64, 32)\n\n self.upconv32 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1)\n self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n\n self.classifier = nn.Conv2d(32, 1, 1)\n\n def forward(self, in_data):\n in_data_2 = self.div_2(in_data)\n in_data_4 = self.div_4(in_data_2)\n in_data_8 = self.div_8(in_data_4)\n in_data_16 = self.div_16(in_data_8)\n in_data_32 = self.div_32(in_data_16)\n\n in_data_2, in_data_4, in_data_8, in_data_16, in_data_32 = self.trans(\n in_data_2, in_data_4, in_data_8, in_data_16, in_data_32\n )\n\n out_data_32 = self.upconv32(self.sim32(in_data_32) + in_data_32) # 1024\n\n out_data_16 = self.upsample_add(out_data_32, in_data_16) # 1024\n out_data_16 = self.upconv16(self.sim16(out_data_16) + out_data_16)\n\n out_data_8 = self.upsample_add(out_data_16, in_data_8)\n out_data_8 = self.upconv8(self.sim8(out_data_8) + out_data_8) # 512\n\n out_data_4 = self.upsample_add(out_data_8, in_data_4)\n out_data_4 = self.upconv4(self.sim4(out_data_4) + out_data_4) # 256\n\n out_data_2 = self.upsample_add(out_data_4, in_data_2)\n out_data_2 = self.upconv2(self.sim2(out_data_2) + out_data_2) # 64\n\n out_data_1 = self.upconv1(self.upsample(out_data_2, scale_factor=2)) # 32\n out_data = self.classifier(out_data_1)\n\n return out_data\n\n\nif __name__ == \"__main__\":\n in_data = torch.randn((1, 3, 288, 288))\n net = MINet_VGG16()\n print(net(in_data).size())\n", "import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.autograd import Variable\n\nimport glob\nfrom torchvision import datasets, transforms\nfrom skimage.io import imread, imsave\nfrom solver import Solver\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nfrom network.poolnet import PoolNet, build_model,weights_init\nfrom get_data import get_data, Config\nimport dataset\n\n\ntorch.cuda.set_device(1)\n\nmode = 'norm_by_data'\n\ndef my_collate(batch):\n # print(len(batch))\n\n \n batch.sort(key=lambda x:x[1].shape[2],reverse=True)\n w=batch[0][1].shape[2]\n batch.sort(key=lambda x:x[1].shape[1],reverse=True)\n h=batch[0][1].shape[1]\n # print(h,w)\n c = len(batch)\n\n data0 = torch.zeros([c,3,h,w])\n gt0 = torch.zeros([c,1,h,w])\n conn0 = torch.zeros([c,8,h,w])\n i = 0\n for item in batch:\n # print(item[0].shape)\n hh = item[0].shape[1]\n ww = item[0].shape[2]\n data0[i,:3,:hh,:ww] = item[0]\n gt0[i,0,:hh,:ww] = item[1]\n conn0[i,:8,:hh,:ww] = item[2]\n i=i+1\n # target = torch.LongTensor(target)\n return [data0,gt0,conn0]\n\n\nfor exp_id in range(1):\n\n if exp_id ==0:\n dataset = 'ECSSD'\n\n train_data, test_data = get_data(mode=mode,data=dataset)\n\n\n\n train_loader = torch.utils.data.DataLoader(train_data,pin_memory=(torch.cuda.is_available()), batch_size=1,shuffle=True, num_workers=4)\n val_loader = torch.utils.data.DataLoader(test_data,pin_memory=(torch.cuda.is_available()), batch_size=1, shuffle=False, num_workers=4)\n\n print(\"Train size: %i\" % len(train_loader))\n print(\"Test size: %i\" % len(val_loader))\n\n\n\n model = build_model('resnet').cuda()\n model.apply(weights_init)\n model.base.load_pretrained_model('/github/resnet50-19c8e357.pth')\n\n \n print(model)\n device = torch.device('cpu')\n\n cfg = Config(lr=2e-5, momen=0.9, decay=5e-4, epoch=30,exp_id= exp_id+1,real_batch=10, betas=(0.9, 0.999),eps=1e-8)\n solver = Solver()\n\n solver.train(model, train_loader, val_loader,cfg)\n\n\n", "# -*- coding: utf-8 -*-\n\nimport torch.nn.functional as F\n\n\ndef cus_sample(feat, **kwargs):\n \"\"\"\n :param feat: 输入特征\n :param kwargs: size或者scale_factor\n \"\"\"\n assert len(kwargs.keys()) == 1 and list(kwargs.keys())[0] in [\"size\", \"scale_factor\"]\n return F.interpolate(feat, **kwargs, mode=\"bilinear\", align_corners=False)\n\n\ndef upsample_add(*xs):\n y = xs[-1]\n for x in xs[:-1]:\n y = y + F.interpolate(x, size=y.size()[2:], mode=\"bilinear\", align_corners=False)\n return y\n\n\ndef upsample_cat(*xs):\n y = xs[-1]\n out = []\n for x in xs[:-1]:\n out.append(F.interpolate(x, size=y.size()[2:], mode=\"bilinear\", align_corners=False))\n return torch.cat([*out, y], dim=1)\n\n\ndef upsample_reduce(b, a):\n \"\"\"\n 上采样所有特征到最后一个特征的尺度以及前一个特征的通道数\n \"\"\"\n _, C, _, _ = b.size()\n N, _, H, W = a.size()\n\n b = F.interpolate(b, size=(H, W), mode=\"bilinear\", align_corners=False)\n a = a.reshape(N, -1, C, H, W).mean(1)\n\n return b + a\n\n\ndef shuffle_channels(x, groups):\n \"\"\"\n Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,W] -> [N,C,H,W]\n 一共C个channel要分成g组混合的channel,先把C reshape成(g, C/g)的形状,\n 然后转置成(C/g, g)最后平坦成C组channel\n \"\"\"\n N, C, H, W = x.size()\n x = x.reshape(N, groups, C // groups, H, W).permute(0, 2, 1, 3, 4)\n return x.reshape(N, C, H, W)\n\n\nif __name__ == \"__main__\":\n a = torch.rand(3, 4, 10, 10)\n b = torch.rand(3, 2, 5, 5)\n print(upsample_reduce(b, a).size())\n" ]
[ [ "torch.randn", "torch.nn.Conv2d" ], [ "torch.device", "torch.cuda.is_available", "torch.cuda.set_device", "torch.zeros" ], [ "torch.nn.functional.interpolate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
casutton/bayes-qnet
[ "0fff4b38394e47b02c0b4f0a4e433049b78fa09c" ]
[ "src/utils.py" ]
[ "import numpy\n\ndef as_str (l):\n return ' '.join (map(str, l))\n\ndef randelt (l):\n return l[numpy.random.randint(len(l))]\n\ndef append (*l):\n result = []\n for x in l: result.append (x)\n return result\n\ndef roll_die (p):\n if sum(p) > 1 + 1e-12:\n raise \"Huh? p: %s\" % (p,) \n # Weird numpy thing\n if sum(p) > 1 - 1e-10:\n for i in range(len(p)):\n if p[i] > 1e-10:\n p[i] -= 1e-10\n break\n a = numpy.random.multinomial (1, p)\n return int(numpy.where(a==1)[0])\n\ndef delete_all (l, elt):\n ndel = 0\n for x in l:\n if x == elt: ndel+=1\n nrem = len(l) - ndel\n newl = [None] * nrem\n i = 0\n for x in l:\n if x != elt:\n newl[i] = x\n i += 1\n return newl\n" ]
[ [ "numpy.random.multinomial", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vbacher/PYRO-NN
[ "e476024a44802b983c6f2ebd6f4444d9fe54120f", "e476024a44802b983c6f2ebd6f4444d9fe54120f", "e476024a44802b983c6f2ebd6f4444d9fe54120f" ]
[ "examples/ct_reconstruction/example_fdk.py", "gradient_check/gradient_check.py", "pyronn/ct_reconstruction/layers/projection_3d.py" ]
[ "# Copyright [2019] [Christopher Syben, Markus Michen]\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom pyronn.ct_reconstruction.helpers.misc import generate_sinogram\nfrom pyronn.ct_reconstruction.layers.projection_3d import cone_projection3d\nfrom pyronn.ct_reconstruction.layers.backprojection_3d import cone_backprojection3d\nfrom pyronn.ct_reconstruction.geometry.geometry_cone_3d import GeometryCone3D\nfrom pyronn.ct_reconstruction.helpers.phantoms import shepp_logan\nfrom pyronn.ct_reconstruction.helpers.trajectories import circular_trajectory\nfrom pyronn.ct_reconstruction.helpers.filters.filters import ram_lak_3D\nimport pyronn.ct_reconstruction.helpers.filters.weights as ct_weights\n\n\n\nclass nn_model:\n def __init__(self, geometry):\n self.geometry = geometry\n\n self.cosine_weight = tf.Variable(name='cosine_weight', dtype=tf.float32,\n initial_value=ct_weights.cosine_weights_3d(self.geometry), trainable=False)\n\n self.redundancy_weight = tf.Variable(name='redundancy_weight', dtype=tf.float32,\n initial_value=ct_weights.parker_weights_3d(self.geometry), trainable=False)\n\n self.filter = tf.Variable(name='reco_filter', dtype=tf.float32, initial_value=ram_lak_3D(self.geometry), trainable=False)\n\n\n\n def model(self, sinogram):\n self.sinogram_cos = tf.multiply(sinogram, self.cosine_weight)\n self.redundancy_weighted_sino = tf.multiply(self.sinogram_cos,self.redundancy_weight)\n\n self.weighted_sino_fft = tf.signal.fft(tf.cast(self.redundancy_weighted_sino, dtype=tf.complex64))\n self.filtered_sinogram_fft = tf.multiply(self.weighted_sino_fft, tf.cast(self.filter,dtype=tf.complex64))\n self.filtered_sinogram = tf.math.real(tf.signal.ifft(self.filtered_sinogram_fft))\n\n self.reconstruction = cone_backprojection3d(self.filtered_sinogram,self.geometry, hardware_interp=True)\n\n return self.reconstruction, self.redundancy_weighted_sino\n\n\ndef example_cone_3d():\n # ------------------ Declare Parameters ------------------\n\n # Volume Parameters:\n volume_size = 256\n volume_shape = [volume_size, volume_size, volume_size]\n v_spacing = 0.25\n volume_spacing = [v_spacing,v_spacing,v_spacing]\n\n # Detector Parameters:\n detector_shape = [450 , 450]\n d_spacing = 0.33\n detector_spacing = [d_spacing,d_spacing]\n\n # Trajectory Parameters:\n number_of_projections = 248\n angular_range = np.pi+2*np.arctan(detector_shape[0] / 2 / 1200)\n\n source_detector_distance = 1200\n source_isocenter_distance = 750\n\n # create Geometry class\n geometry = GeometryCone3D(volume_shape, volume_spacing, detector_shape, detector_spacing, number_of_projections, angular_range, source_detector_distance, source_isocenter_distance)\n geometry.angular_range = np.radians(200)\n projection_geometry = circular_trajectory.circular_trajectory_3d(geometry)\n\n geometry.set_trajectory(projection_geometry)\n\n # Get Phantom 3d\n phantom = shepp_logan.shepp_logan_3d(volume_shape)\n phantom = np.expand_dims(phantom,axis=0)\n\n # gpus = tf.config.experimental.list_physical_devices('GPU')\n # if gpus:\n # try:\n # for gpu in gpus:\n # tf.config.experimental.set_memory_growth(gpu, True)\n # except RunetimeError as e:\n # print(e)\n # ------------------ Call Layers ------------------\n\n sinogram = generate_sinogram.generate_sinogram(phantom, cone_projection3d, geometry)\n\n model = nn_model(geometry)\n reco, redundancy_weighted_sino = model.model(sinogram)\n\n plt.figure()\n plt.imshow(np.squeeze(reco)[volume_shape[0]//2], cmap=plt.get_cmap('gist_gray'), vmin=0, vmax=0.4)\n plt.axis('off')\n plt.savefig('fdk_reco.png', dpi=150, transparent=False, bbox_inches='tight')\n\n\nif __name__ == '__main__':\n example_cone_3d()\n", "import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom pyronn.ct_reconstruction.layers.projection_2d import parallel_projection2d\nfrom pyronn.ct_reconstruction.layers.projection_2d import fan_projection2d\nfrom pyronn.ct_reconstruction.layers.projection_3d import cone_projection3d\nfrom pyronn.ct_reconstruction.layers.backprojection_2d import parallel_backprojection2d\nfrom pyronn.ct_reconstruction.layers.backprojection_2d import fan_backprojection2d\nfrom pyronn.ct_reconstruction.layers.backprojection_3d import cone_backprojection3d\nfrom pyronn.ct_reconstruction.geometry.geometry_parallel_2d import GeometryParallel2D\nfrom pyronn.ct_reconstruction.geometry.geometry_fan_2d import GeometryFan2D\nfrom pyronn.ct_reconstruction.geometry.geometry_cone_3d import GeometryCone3D\nfrom pyronn.ct_reconstruction.helpers.filters import filters\nfrom pyronn.ct_reconstruction.helpers.phantoms import shepp_logan\nfrom pyronn.ct_reconstruction.helpers.trajectories import circular_trajectory\n\n\ndef example_parallel_2d():\n # ------------------ Declare Parameters ------------------\n\n # Volume Parameters:\n volume_size = 64\n volume_shape = [volume_size, volume_size]\n volume_spacing = [1, 1]\n\n # Detector Parameters:\n detector_shape = 100\n detector_spacing = 1\n\n # Trajectory Parameters:\n number_of_projections = 90\n angular_range = np.pi\n\n # create Geometry class\n geometry = GeometryParallel2D(volume_shape, volume_spacing, detector_shape, detector_spacing, number_of_projections, angular_range)\n geometry.set_trajectory(circular_trajectory.circular_trajectory_2d(geometry))\n\n # Get Phantom\n phantom = shepp_logan.shepp_logan_enhanced(volume_shape).astype(np.float32)\n # Add required batch dimension\n phantom = np.expand_dims(phantom, axis=0)\n sino = parallel_projection2d(phantom,geometry)\n @tf.function\n def test_func_proj(x):\n return parallel_projection2d(x,geometry)\n\n @tf.function\n def test_func_reco(x):\n return parallel_backprojection2d(x,geometry)\n\n proj_theoretical, proj_numerical = tf.test.compute_gradient(test_func_proj, [sino])\n reco_theoretical, reco_numerical = tf.test.compute_gradient(test_func_reco, [sino])\n\ndef example_fan_2d():\n # ------------------ Declare Parameters ------------------\n\n # Volume Parameters:\n volume_size = 64\n volume_shape = [volume_size, volume_size]\n volume_spacing = [1, 1]\n\n # Detector Parameters:\n detector_shape = 100\n detector_spacing = 1\n\n # Trajectory Parameters:\n number_of_projections = 90\n angular_range = np.pi\n\n source_detector_distance = 1200\n source_isocenter_distance = 750\n\n # create Geometry class\n geometry = GeometryFan2D(volume_shape, volume_spacing, detector_shape, detector_spacing, number_of_projections, angular_range, source_detector_distance, source_isocenter_distance)\n geometry.set_trajectory(circular_trajectory.circular_trajectory_2d(geometry))\n\n # Get Phantom\n phantom = shepp_logan.shepp_logan_enhanced(volume_shape).astype(np.float32)\n # Add required batch dimension\n phantom = np.expand_dims(phantom, axis=0)\n sino = fan_projection2d(phantom,geometry)\n @tf.function\n def test_func_proj(x):\n return fan_projection2d(x,geometry)\n\n @tf.function\n def test_func_reco(x):\n return fan_backprojection2d(x,geometry)\n\n proj_theoretical, proj_numerical = tf.test.compute_gradient(test_func_proj, [sino])\n reco_theoretical, reco_numerical = tf.test.compute_gradient(test_func_reco, [sino])\n\ndef example_cone_3d():\n # ------------------ Declare Parameters ------------------\n\n # Volume Parameters:\n volume_size = 8\n volume_shape = [volume_size, volume_size, volume_size]\n volume_spacing = [1, 1, 1]\n\n # Detector Parameters:\n detector_shape = [12, 12]\n detector_spacing = [1,1]\n\n # Trajectory Parameters:\n number_of_projections = 12\n angular_range = np.pi\n\n source_detector_distance = 1200\n source_isocenter_distance = 750\n\n # create Geometry class\n geometry = GeometryCone3D(volume_shape, volume_spacing, detector_shape, detector_spacing, number_of_projections, angular_range, source_detector_distance, source_isocenter_distance)\n geometry.set_trajectory(circular_trajectory.circular_trajectory_3d(geometry))\n\n # Get Phantom\n phantom = shepp_logan.shepp_logan_3d(volume_shape).astype(np.float32)\n # Add required batch dimension\n phantom = np.expand_dims(phantom, axis=0)\n sino = cone_projection3d(phantom,geometry)\n @tf.function\n def test_func_proj(x):\n return cone_projection3d(x,geometry)\n\n @tf.function\n def test_func_reco(x):\n return cone_backprojection3d(x,geometry)\n\n proj_theoretical, proj_numerical = tf.test.compute_gradient(test_func_proj, [sino])\n reco_theoretical, reco_numerical = tf.test.compute_gradient(test_func_reco, [sino])\n\nif __name__ == '__main__':\n example_parallel_2d()\n example_fan_2d()\n example_cone_3d()", "# Copyright [2019] [Christopher Syben, Markus Michen]\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nimport pyronn_layers\nimport numpy as np\n\n# cone_projection3d\ndef cone_projection3d(volume, geometry, hardware_interp=True, step_size=1.0):\n \"\"\"\n Wrapper function for making the layer call.\n Args:\n volume: Input volume to project.\n geometry: Corresponding GeometryCone3D Object defining parameters.\n hardware_interp: Controls if interpolation is done by GPU.\n step_size: step_size along ray direction in voxel.\n Returns:\n Initialized lme_custom_ops.cone_projection3d layer.\n \"\"\"\n batch = np.shape(volume)[0]\n return pyronn_layers.cone_projection3d(volume,\n projection_shape=geometry.sinogram_shape,\n volume_origin=np.broadcast_to(geometry.volume_origin, [batch, *np.shape(geometry.volume_origin)]),\n volume_spacing=np.broadcast_to(geometry.volume_spacing, [batch, *np.shape(geometry.volume_spacing)]),\n projection_matrices=np.broadcast_to(geometry.projection_matrices, [batch, *np.shape(geometry.projection_matrices)]),\n step_size=np.broadcast_to(step_size, [batch, *np.shape(step_size)]),\n projection_multiplier=np.broadcast_to(geometry.projection_multiplier, [batch, *np.shape(geometry.projection_multiplier)]),\n hardware_interp=hardware_interp)\n\n\[email protected](\"ConeProjection3D\")\ndef _project_grad(op, grad):\n '''\n Compute the gradient of the projection op by invoking the backprojector.\n '''\n reco = pyronn_layers.cone_backprojection3d(\n sinogram=grad,\n volume_shape=op.inputs[0].shape[1:],\n volume_origin=op.inputs[2],\n volume_spacing=op.inputs[3],\n projection_matrices=op.inputs[4],\n step_size=op.inputs[5],\n projection_multiplier=op.inputs[6],\n hardware_interp=op.get_attr(\"hardware_interp\")\n )\n return [reco, tf.stop_gradient(op.inputs[1]), tf.stop_gradient(op.inputs[2]), tf.stop_gradient(op.inputs[3]), tf.stop_gradient(op.inputs[4]), tf.stop_gradient(op.inputs[5]), tf.stop_gradient(op.inputs[6])]\n" ]
[ [ "numpy.radians", "numpy.expand_dims", "tensorflow.multiply", "numpy.arctan", "numpy.squeeze", "tensorflow.cast", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.savefig", "matplotlib.pyplot.axis", "tensorflow.signal.ifft", "matplotlib.pyplot.figure" ], [ "numpy.expand_dims", "tensorflow.test.compute_gradient" ], [ "numpy.shape", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.stop_gradient" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
yuki-inaho/Open3D
[ "ff7003d542c4fcf88a2d9e7fe08508b3e52dc702", "cbbee4e19a45551ada223f491e667f1868115ead" ]
[ "examples/Python/Advanced/non_blocking_visualization.py", "examples/Python/Basic/open3d_tutorial.py" ]
[ "# Open3D: www.open3d.org\n# The MIT License (MIT)\n# See license file or visit www.open3d.org for details\n\n# examples/Python/Advanced/non_blocking_visualization.py\n\nimport open3d as o3d\nimport numpy as np\nimport copy\n\nif __name__ == \"__main__\":\n o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)\n source_raw = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_0.pcd\")\n target_raw = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_1.pcd\")\n source = source_raw.voxel_down_sample(voxel_size=0.02)\n target = target_raw.voxel_down_sample(voxel_size=0.02)\n trans = [[0.862, 0.011, -0.507, 0.0], [-0.139, 0.967, -0.215, 0.7],\n [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]]\n source.transform(trans)\n\n flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]\n source.transform(flip_transform)\n target.transform(flip_transform)\n\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n vis.add_geometry(source)\n vis.add_geometry(target)\n threshold = 0.05\n icp_iteration = 100\n save_image = False\n\n for i in range(icp_iteration):\n reg_p2l = o3d.registration.registration_icp(\n source, target, threshold, np.identity(4),\n o3d.registration.TransformationEstimationPointToPlane(),\n o3d.registration.ICPConvergenceCriteria(max_iteration=1))\n source.transform(reg_p2l.transformation)\n vis.update_geometry(source)\n vis.poll_events()\n vis.update_renderer()\n if save_image:\n vis.capture_screen_image(\"temp_%04d.jpg\" % i)\n vis.destroy_window()\n", "import open3d as o3d\nimport numpy as np\nimport PIL.Image\nimport IPython.display\n\n\ndef jupyter_draw_geometries(\n geoms,\n window_name=\"Open3D\",\n width=1920,\n height=1080,\n left=50,\n top=50,\n point_show_normal=False,\n):\n vis = o3d.visualization.Visualizer()\n vis.create_window(window_name=window_name,\n width=width,\n height=height,\n left=left,\n top=top)\n vis.get_render_option().point_show_normal = point_show_normal\n for geom in geoms:\n vis.add_geometry(geom)\n vis.run()\n im = vis.capture_screen_float_buffer()\n vis.destroy_window()\n im = (255 * np.asarray(im)).astype(np.uint8)\n IPython.display.display(PIL.Image.fromarray(im, \"RGB\"))\n\n\no3d.visualization.draw_geometries = jupyter_draw_geometries\n" ]
[ [ "numpy.identity" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
51616/split
[ "58b6efa8ab2c24e85c0a14922ee6a2a83aaa7e19", "58b6efa8ab2c24e85c0a14922ee6a2a83aaa7e19" ]
[ "augmentation.py", "spair/utils.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport os \n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nwidth = height = 32\nchannel = 3\npatch_size_x = 8 ;patch_size_y = 8\n\nclass Augmentator(object):\n def __init__(self,type,size=1,mean=0,std=1):\n self.size = size\n if type=='scramble':\n self.augment = self.scramble\n elif type=='mix_scramble':\n self.augment = self.mix_scramble \n elif type=='blur':\n self.augment = self.gaussian_blur\n self.pointwise_filter = tf.eye(3, batch_shape=[1, 1])\n\n elif type=='high_low_pass':\n self.augment = self.high_low_pass\n self.kernel = self.gaussian_kernel(size,mean,std)\n self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])\n self.pointwise_filter = tf.eye(3, batch_shape=[1, 1])\n self.paddings = [[size,size],[size,size],[0,0]]\n elif type=='no_op':\n self.augment = self.no_op\n\n\n def gaussian_kernel(self,size,mean,std):\n \"\"\"Makes 2D gaussian Kernel for convolution.\"\"\"\n d = tfp.distributions.Normal(mean, std)\n vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))\n gauss_kernel = tf.einsum('i,j->ij',vals,vals)\n return gauss_kernel / tf.reduce_sum(gauss_kernel)\n\n def get_random_patch_size(self):\n return np.random.choice([1,2,4,8])\n\n def scramble(self,x):\n # assume square patch\n n_row,n_col,n_channel = x.shape\n n_patch = n_row*n_col // (self.size**2)\n patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=[1,self.size,self.size,1],strides=[1,self.size,self.size,1],rates=[1, 1, 1, 1],padding='VALID')\n patches = tf.reshape(patches,[n_patch,self.size,self.size,n_channel])\n patches = tf.random.shuffle(patches)\n # rand_idx = tf.reshape(tf.random.shuffle(tf.range(0,n_patch)),[n_patch])\n # patches = tf.gather(patches, rand_idx, axis=0)\n rows = tf.split(patches,n_col//self.size,axis=0)\n rows = [tf.concat(tf.unstack(x),axis=1) for x in rows]\n x_aug = tf.concat(rows,axis=0)\n\n x_aug = tf.convert_to_tensor(x_aug)\n return tf.concat([x, x_aug],axis=2)\n\n def mix_scramble(self,x):\n # assume square patch\n # sizes = tf.convert_to_tensor([1,2,4,8])\n # idx = tf.random.categorical([tf.ones_like(sizes)], 1)\n # print(idx)\n # patch_size = int(sizes[idx[0][0]])\n patch_size = self.get_random_patch_size()\n print('Patch size:',patch_size)\n window = [1,patch_size,patch_size,1]\n print('Window:',window)\n\n n_row,n_col,n_channel = x.shape\n n_patch = n_row*n_col // (patch_size**2)\n patches = tf.image.extract_patches(tf.expand_dims(x,0),sizes=window,strides=window,rates=[1, 1, 1, 1],padding='VALID')\n patches = tf.reshape(patches,[n_patch,patch_size,patch_size,n_channel])\n patches = tf.random.shuffle(patches)\n rows = tf.split(patches,n_col//patch_size,axis=0)\n rows = [tf.concat(tf.unstack(x),axis=1) for x in rows]\n x_aug = tf.concat(rows,axis=0)\n\n x_aug = tf.convert_to_tensor(x_aug)\n\n return tf.concat([x, x_aug],axis=2)\n\n def gaussian_blur(self,x):\n #create random gaussian blur filter\n mean = 0\n std = tf.random.uniform(shape=[],minval=5,maxval=10,dtype=tf.float32) # std [5-10]\n size = tf.random.uniform(shape=[],minval=3,maxval=7,dtype=tf.int32) # size [7-15]\n\n self.kernel = self.gaussian_kernel(size,mean,std)\n self.kernel = tf.tile(self.kernel[:, :, tf.newaxis, tf.newaxis], [1, 1, 3, 1])\n self.paddings = tf.convert_to_tensor([[size,size],[size,size],[0,0]])\n x_aug = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')\n x_aug = tf.squeeze(x_aug)\n return tf.concat([x, x_aug],axis=2)\n\n\n def high_low_pass(self,x):\n x_low = tf.nn.separable_conv2d(tf.expand_dims(tf.pad(x,self.paddings,'SYMMETRIC'), 0), self.kernel, self.pointwise_filter,strides=[1, 1, 1, 1], padding='VALID')\n x_low = tf.squeeze(x_low)\n x_high = x - x_low\n return tf.concat([x, x_high, x_low],axis=2)\n\n def no_op(self,x):\n return x\n\n\n\n", "import tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.keras.layers import Layer, Dense\n# import cv2\n\nclass dotdict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n# https://github.com/e2crawfo/auto_yolo/blob/d7a070549999d42566db66f6c25b88e20730fd27/auto_yolo/models/core.py#L37\ndef concrete_binary_pre_sigmoid_sample(log_odds, temperature, eps=1e-8):\n u = tf.random.uniform(tf.shape(log_odds), minval=0, maxval=1)\n noise = tf.math.log(u + eps) - tf.math.log(1.0 - u + eps)\n return (log_odds + noise) / temperature\n\nclass Sampling(Layer):\n def call(self, inputs):\n z_mean, z_sigma = inputs\n epsilon = tf.random.normal(shape=tf.shape(z_sigma))\n \n return z_mean + z_sigma * epsilon\n\n\n\n\nclass GumbelSM_Sampling(Layer):\n def __init__(self, name='GumbelSM', tau=0.4, **kwargs):\n super(GumbelSM_Sampling, self).__init__(name=name, **kwargs)\n self.tau = tau\n\n def softmax(self, logits, axis):\n x = logits/self.tau\n b = tf.reduce_max(x, axis=axis, keepdims=True)\n exp_logits = tf.exp(x-b)\n return exp_logits / tf.reduce_sum(exp_logits, axis, keepdims=True)\n\n def call(self, inputs):\n # beta_logits = inputs\n\n G = - tf.math.log(-tf.math.log( tf.random.uniform(shape=tf.shape(inputs), minval=0, maxval=1) ))\n return self.softmax(inputs + G, axis=-1)\n\n\nclass STN(Layer):\n \"\"\"\n Adapted from https://github.com/kevinzakka/spatial-transformer-network/blob/master/stn/transformer.py\n \"\"\"\n def __init__(self, name='STN', inverse=False, H_out=32, W_out=32, **kwargs):\n super(STN, self).__init__(name=name, \n **kwargs)\n \n self.inverse = inverse\n self.H_img = H_out\n self.W_img = W_out\n \"\"\"\n if self.inverse:\n self.H_img, self.W_img = H_img, W_img = IMAGE_SIZE_Y, IMAGE_SIZE_X\n else:\n self.H_img, self.W_img = H_img, W_img = GRID_SIZE_Y, GRID_SIZE_X # the number of points in grid \n\t\t\"\"\"\n\n def build(self, input_shape):\n \"\"\"\n This function create the Grid points with size equal to the output size.\n Also, this function generates lists for (tx, ty) which specify the origin of each SPAIR object. \n\n \"\"\"\n\n\n z_where_shape = input_shape[1]\n \n H_obj, W_obj = z_where_shape[1], z_where_shape[2]\n\n H_img = self.H_img\n W_img = self.W_img\n \n # normal grid\n x = np.linspace(-1.0, 1.0, W_img)\n y = np.linspace(-1.0, 1.0, H_img)\n X, Y = np.meshgrid(x, y) \n x_grids = tf.convert_to_tensor(X, dtype=tf.float32) #[W_img, H_img]\n y_grids = tf.convert_to_tensor(Y, dtype=tf.float32) \n\n x_grids = tf.expand_dims( tf.reshape(x_grids, [-1]), axis=0)\n y_grids = tf.expand_dims( tf.reshape(y_grids, [-1]), axis=0) #[1, H_img*W_img]\n\n x_grids = tf.tile(x_grids, [H_obj*W_obj, 1])\n y_grids = tf.tile(y_grids, [H_obj*W_obj, 1]) #[B', H_img*W_img]\n\n ones = tf.ones_like(x_grids) # [B', H_img*W_img]\n\n self.sampling_grids = tf.expand_dims( tf.stack([x_grids, y_grids, ones], axis=1), axis=0) #[1, B', 3, H_img*W_img]\n \n self.Bp = H_obj*W_obj \n\n bias_tx = np.zeros([H_obj, W_obj])\n bias_ty = np.zeros([H_obj, W_obj])\n \n self.cell_width_ratio = (2.0 * 12) / 48 # HACK todo\n self.cell_height_ratio = (2.0 * 12) / 48\n\n for i in range(H_obj):\n i_p = (2.-self.cell_height_ratio)*i/(H_obj-1) - (1-0.5*self.cell_height_ratio) #put bias in the middle of the cell\n #i_p = 2.*i/(H_obj - 1.) - 1.\n for j in range(W_obj):\n j_p = (2.-self.cell_width_ratio)*j/(W_obj-1) - (1-0.5*self.cell_width_ratio) \n #j_p = 2.*j/(W_obj - 1.) - 1. \n\n bias_ty[i, j] = i_p\n bias_tx[i, j] = j_p\n\n self.bias_tx = tf.expand_dims(tf.convert_to_tensor(bias_tx, dtype=tf.float32), axis=0)#[1, H_obj, W_obj]\n self.bias_ty = tf.expand_dims(tf.convert_to_tensor(bias_ty, dtype=tf.float32), axis=0)#[1, H_obj, W_obj] \n \n\n def call(self, inputs):\n\n \"\"\"\n This function:\n 1. take the grid and do affine transformation according to the parameters sx, sy, tx, ty\n 2. perform bilinear sampling using the grid\n \"\"\"\n\n x, z_where = inputs\n # z_where :: [batch, H, W, 4]\n # z_where[0,0,0,:] :: x_offset, y_offset, box_width, box_height\n # transform this parameterisation into sx, sy, tx, ty for STN\n\n \n\n shape = tf.shape(z_where)\n B = shape[0]\n Bp = self.Bp\n\n H_img = self.H_img\n W_img = self.W_img\n\n \n sx = 0.5 * tf.nn.sigmoid( z_where[:, :, :, 0] ) # 0 < sx < s_max / avoid reflection\n sy = 0.5 * tf.nn.sigmoid( z_where[:, :, :, 1] ) # 0 < sy < s_max / \n tx = 0.5 * tf.nn.tanh( z_where[:, :, :, 2] ) + self.bias_tx # offset limit ?\n ty = 0.5 * tf.nn.tanh( z_where[:, :, :, 3] ) + self.bias_ty # [B, H_obj, W_obj] \n\n box_height = sy / tf.constant(2.0)\n box_height = box_height[:,:,:,tf.newaxis]\n box_width = sx / tf.constant(2.0)\n box_width = box_width[:,:,:,tf.newaxis]\n bbox_ty = (ty[:,:,:,tf.newaxis] + tf.constant(1.0)) / 2.0\n bbox_tx = (tx[:,:,:,tf.newaxis] + tf.constant(1.0)) / 2.0\n obj_bbox_mask = tf.concat([bbox_ty-box_height, bbox_tx-box_width, bbox_ty+box_height, bbox_tx+box_width],axis=-1) # [B,4,4,4]\n # print('obj_bbox_mask.shape:',obj_bbox_mask.shape)\n obj_bbox_mask = tf.reshape(obj_bbox_mask, [obj_bbox_mask.shape[0],obj_bbox_mask.shape[1]*obj_bbox_mask.shape[2],obj_bbox_mask.shape[3]]) #[B,B',4]\n # print('obj_bbox_mask.shape:',obj_bbox_mask.shape)\n\n if self.inverse:\n tx = -tx / (sx + 1e-5)\n ty = -ty / (sy + 1e-5)\n sx = 1/(sx + 1e-5)\n sy = 1/(sy + 1e-5)\n\n # theta_in = tf.transpose(tf.stack([sx,sy,tx,ty]),[1,2,3,0]) # [4,B,cell_y,cell_x] -> [B,cell_y,cell_x,4]\n # theta_in = tf.reshape(theta_in,[-1,4]) #flatten\n # theta = self.dense(theta_in)\n\n # sx = theta[:,0]\n # sy = theta[:,1]\n # tx = theta[:,2]\n # ty = theta[:,3]\n\n # sx = 0.5 * tf.nn.sigmoid(sx)\n # sy = 0.5 * tf.nn.sigmoid(sy)\n # tx = tf.nn.tanh(tx) + self.bias_tx\n # ty = tf.nn.tanh(ty) + self.bias_ty\n\n \n #(x,y) = A * (x,y,1)\n sx = tf.reshape(sx, [B, Bp]) #from [B, H_obj, W_obj] --> [B, B']\n sy = tf.reshape(sy, [B, Bp]) #\n tx = tf.reshape(tx, [B, Bp])\n ty = tf.reshape(ty, [B, Bp])\n zeros = tf.zeros_like(sx)\n\n\n A_top = tf.stack([sx, zeros, tx], axis=2) # [B, B', 3]\n A_bottom = tf.stack([zeros, sy, ty], axis=2) # [B, B', 3]\n A = tf.stack([A_top, A_bottom], axis=2) # [B, B', 2, 3]\n\n sampling_grids = tf.tile(self.sampling_grids, [B, 1, 1, 1]) #[B, B', 3, H_img*W_img]\n \n batch_grids = tf.matmul(A, sampling_grids) #[B, B', 2, H_img*W_img]\n \n batch_grids = tf.reshape(batch_grids, [B, Bp, 2, H_img, W_img])\n\n\n outputs = self.bilinear_sampler(x, batch_grids) #[B, B', H_img, W_img, C]\n\n return outputs, obj_bbox_mask\n\n @tf.function\n def bilinear_sampler(self, img, batch_grids):\n \"\"\"\n At each point in the Grid, \n we map that into the position in the original image.\n We gather 4 corner pixel of the gridpoint and do bilinear interpolation. \n \"\"\"\n if self.inverse:\n input_shape = tf.shape(img) # [B, B', H, W, C]\n H_x, W_x = input_shape[2], input_shape[3]\n else:\n input_shape = tf.shape(img) # [B, H, W, C]\n H_x, W_x = input_shape[1], input_shape[2]\n\n\n x = batch_grids[:, :, 0, :, :] # [B, B', H, W]\n y = batch_grids[:, :, 1, :, :] # [B, B', H, W]\n\n\n W_xf = tf.cast(W_x, tf.float32)\n H_xf = tf.cast(H_x, tf.float32)\n\n # rescale x,y with Ht and Wt\n x = 0.5 * (x+1.0) * (W_xf - 1) #* tf.cast(W_x - 1, tf.float32) # [0, W-1]\n y = 0.5 * (y+1.0) * (H_xf - 1) #tf.cast(H_x - 1, tf.float32) # [0, H-1]\n \n # grab four nearest corner \n x0 = tf.floor(x)\n x1 = x0 + 1\n y0 = tf.floor(y)\n y1 = y0 + 1\n\n \n # clip to range [0, H/W] to not violate img boundaries\n x0 = tf.clip_by_value(x0, 0., W_xf-1)\n x1 = tf.clip_by_value(x1, 0., W_xf-1)\n y0 = tf.clip_by_value(y0, 0., H_xf-1)\n y1 = tf.clip_by_value(y1, 0., H_xf-1)\n\n\n # calculate deltas\n wa = (x1-x) * (y1-y)\n wb = (x1-x) * (y-y0)\n wc = (x-x0) * (y1-y)\n wd = (x-x0) * (y-y0)\n # delta has to be zero if the pixel violate boundaries\n\n\n #recast as int for index calculation\n x0 = tf.cast(x0, 'int32')\n x1 = tf.cast(x1, 'int32')\n y0 = tf.cast(y0, 'int32')\n y1 = tf.cast(y1, 'int32')\n\n\n # get pixel value at corner coords\n Ia = self.get_pixel_value(img, x0, y0) # (B, B', H_img, W_img, C)\n Ib = self.get_pixel_value(img, x0, y1)\n Ic = self.get_pixel_value(img, x1, y0)\n Id = self.get_pixel_value(img, x1, y1)\n\n # add dimension for addition\n wa = tf.expand_dims(wa, axis=4)\n wb = tf.expand_dims(wb, axis=4)\n wc = tf.expand_dims(wc, axis=4)\n wd = tf.expand_dims(wd, axis=4)\n\n # compute output\n out = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])\n\n return out\n \n @tf.function\n def get_pixel_value(self, img, x, y):\n \"\"\"\n Gathers the pixel from original image according to coordinate x,y\n \"\"\"\n\n if self.inverse:\n \"\"\"\n img: (B, B', H_x, W_x, C)\n x: (B, B', H_img, W_img)\n y: (B, B', H_img, W_img)\n\n indices: (B, B', H_img, W_img, 4)\n \"\"\" \n\n x_shape = tf.shape(x)\n B = x_shape[0]\n Bp = x_shape[1]\n H_img = x_shape[2]\n W_img = x_shape[3]\n\n # assert Bp == tf.shape(img)[1]\n\n\n B_idx = tf.range(0, B)\n B_idx = tf.reshape(B_idx, (B, 1, 1, 1))\n B_idx = tf.tile(B_idx, (1, Bp, H_img, W_img))\n\n Bp_idx = tf.range(0, Bp)\n Bp_idx = tf.reshape(Bp_idx, (1, Bp, 1, 1))\n Bp_idx = tf.tile(Bp_idx, (B, 1, H_img, W_img))\n\n indices = tf.stack([B_idx, Bp_idx, y, x], axis=4)\n \n return tf.gather_nd(img, indices)\n\n else:\n \"\"\"\n img: (B, H_x, W_x, C)\n x: (B, B', H_img, W_img)\n y: (B, B', H_img, W_img)\n\n indices : (B, B', H_img, W_img, 3) -> out: (B, B', H_img, W_img, C)\n \"\"\"\n # assert len(tf.shape(img)) == 4\n\n x_shape = tf.shape(x)\n B = x_shape[0]\n Bp = x_shape[1]\n H_img = x_shape[2]\n W_img = x_shape[3]\n\n B_idx = tf.range(0, B)\n B_idx = tf.reshape(B_idx, (B, 1, 1, 1))\n B_idx = tf.tile(B_idx, (1, Bp, H_img, W_img))\n indices = tf.stack([B_idx, y, x], axis=4) #[B, B', H_img, W_img, 3]\n return tf.gather_nd(img, indices)" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.range", "numpy.random.choice", "tensorflow.unstack", "tensorflow.reduce_sum", "tensorflow.reshape", "tensorflow.random.uniform", "tensorflow.squeeze", "tensorflow.expand_dims", "tensorflow.einsum", "tensorflow.eye", "tensorflow.random.shuffle", "tensorflow.pad", "tensorflow.split", "tensorflow.tile" ], [ "tensorflow.convert_to_tensor", "tensorflow.concat", "numpy.linspace", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.add_n", "tensorflow.floor", "numpy.zeros", "tensorflow.tile", "tensorflow.matmul", "tensorflow.nn.sigmoid", "tensorflow.gather_nd", "tensorflow.shape", "tensorflow.exp", "tensorflow.nn.tanh", "tensorflow.zeros_like", "numpy.meshgrid", "tensorflow.clip_by_value", "tensorflow.reduce_max", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.math.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
rasoolims/fairseq
[ "57b6a6dbfb290718eaa25040551d9db8a6b68a9b" ]
[ "fairseq/models/fconv.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq import options, utils\nfrom fairseq.modules import (\n AdaptiveSoftmax, BeamableMM, GradMultiply, LearnedPositionalEmbedding,\n LinearizedConvolution,\n)\n\nfrom . import (\n FairseqEncoder, FairseqIncrementalDecoder, FairseqModel,\n FairseqLanguageModel, register_model, register_model_architecture,\n)\n\n\n@register_model('fconv')\nclass FConvModel(FairseqModel):\n \"\"\"\n A fully convolutional model, i.e. a convolutional encoder and a\n convolutional decoder, as described in `\"Convolutional Sequence to Sequence\n Learning\" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_.\n\n Args:\n encoder (FConvEncoder): the encoder\n decoder (FConvDecoder): the decoder\n\n The Convolutional model provides the following named architectures and\n command-line arguments:\n\n .. argparse::\n :ref: fairseq.models.fconv_parser\n :prog:\n \"\"\"\n\n def __init__(self, encoder, decoder):\n super().__init__(encoder, decoder)\n self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability')\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--encoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained encoder embedding')\n parser.add_argument('--encoder-layers', type=str, metavar='EXPR',\n help='encoder layers [(dim, kernel_size), ...]')\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-embed-path', type=str, metavar='STR',\n help='path to pre-trained decoder embedding')\n parser.add_argument('--decoder-layers', type=str, metavar='EXPR',\n help='decoder layers [(dim, kernel_size), ...]')\n parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',\n help='decoder output embedding dimension')\n parser.add_argument('--decoder-attention', type=str, metavar='EXPR',\n help='decoder attention [True, ...]')\n parser.add_argument('--share-input-output-embed', action='store_true',\n help='share input and output embeddings (requires'\n ' --decoder-out-embed-dim and --decoder-embed-dim'\n ' to be equal)')\n # fmt: on\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n # make sure that all args are properly defaulted (in case there are any new ones)\n base_architecture(args)\n\n encoder_embed_dict = None\n if args.encoder_embed_path:\n encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path)\n utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary)\n\n decoder_embed_dict = None\n if args.decoder_embed_path:\n decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)\n utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)\n\n encoder = FConvEncoder(\n dictionary=task.source_dictionary,\n embed_dim=args.encoder_embed_dim,\n embed_dict=encoder_embed_dict,\n convolutions=eval(args.encoder_layers),\n dropout=args.dropout,\n max_positions=args.max_source_positions,\n )\n decoder = FConvDecoder(\n dictionary=task.target_dictionary,\n embed_dim=args.decoder_embed_dim,\n embed_dict=decoder_embed_dict,\n convolutions=eval(args.decoder_layers),\n out_embed_dim=args.decoder_out_embed_dim,\n attention=eval(args.decoder_attention),\n dropout=args.dropout,\n max_positions=args.max_target_positions,\n share_embed=args.share_input_output_embed,\n )\n return FConvModel(encoder, decoder)\n\n\n@register_model('fconv_lm')\nclass FConvLanguageModel(FairseqLanguageModel):\n def __init__(self, decoder):\n super().__init__(decoder)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability')\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\n help='decoder embedding dimension')\n parser.add_argument('--decoder-layers', type=str, metavar='EXPR',\n help='decoder layers [(dim, kernel_size), ...]')\n parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',\n help='decoder output embedding dimension')\n parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',\n help='comma separated list of adaptive softmax cutoff points. '\n 'Must be used with adaptive_loss criterion')\n parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',\n help='sets adaptive softmax dropout for the tail projections')\n parser.add_argument('--decoder-attention', type=str, metavar='EXPR',\n help='decoder attention [True, ...]')\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n # make sure all arguments are present in older models\n base_lm_architecture(args)\n\n if hasattr(args, 'max_target_positions'):\n args.tokens_per_sample = args.max_target_positions\n\n decoder = FConvDecoder(\n dictionary=task.target_dictionary,\n embed_dim=args.decoder_embed_dim,\n convolutions=eval(args.decoder_layers),\n out_embed_dim=args.decoder_embed_dim,\n attention=eval(args.decoder_attention),\n dropout=args.dropout,\n max_positions=args.tokens_per_sample,\n share_embed=False,\n positional_embeddings=False,\n adaptive_softmax_cutoff=(\n options.eval_str_list(args.adaptive_softmax_cutoff, type=int)\n if args.criterion == 'adaptive_loss' else None\n ),\n adaptive_softmax_dropout=args.adaptive_softmax_dropout,\n )\n return FConvLanguageModel(decoder)\n\n\nclass FConvEncoder(FairseqEncoder):\n \"\"\"\n Convolutional encoder consisting of `len(convolutions)` layers.\n\n Args:\n dictionary (~fairseq.data.Dictionary): encoding dictionary\n embed_dim (int, optional): embedding dimension\n embed_dict (str, optional): filename from which to load pre-trained\n embeddings\n max_positions (int, optional): maximum supported input sequence length\n convolutions (list, optional): the convolutional layer structure. Each\n list item `i` corresponds to convolutional layer `i`. Layers are\n given as ``(out_channels, kernel_width, [residual])``. Residual\n connections are added between layers when ``residual=1`` (which is\n the default behavior).\n dropout (float, optional): dropout to be applied before each conv layer\n \"\"\"\n\n def __init__(\n self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024,\n convolutions=((512, 3),) * 20, dropout=0.1,\n ):\n super().__init__(dictionary)\n self.dropout = dropout\n self.num_attention_layers = None\n\n num_embeddings = len(dictionary)\n self.padding_idx = dictionary.pad()\n self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)\n if embed_dict:\n self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)\n\n self.embed_positions = PositionalEmbedding(\n max_positions,\n embed_dim,\n self.padding_idx,\n )\n\n convolutions = extend_conv_spec(convolutions)\n in_channels = convolutions[0][0]\n self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)\n self.projections = nn.ModuleList()\n self.convolutions = nn.ModuleList()\n self.residuals = []\n\n layer_in_channels = [in_channels]\n for _, (out_channels, kernel_size, residual) in enumerate(convolutions):\n if residual == 0:\n residual_dim = out_channels\n else:\n residual_dim = layer_in_channels[-residual]\n self.projections.append(Linear(residual_dim, out_channels)\n if residual_dim != out_channels else None)\n if kernel_size % 2 == 1:\n padding = kernel_size // 2\n else:\n padding = 0\n self.convolutions.append(\n ConvTBC(in_channels, out_channels * 2, kernel_size,\n dropout=dropout, padding=padding)\n )\n self.residuals.append(residual)\n in_channels = out_channels\n layer_in_channels.append(out_channels)\n self.fc2 = Linear(in_channels, embed_dim)\n\n def forward(self, src_tokens, src_lengths):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n\n Returns:\n dict:\n - **encoder_out** (tuple): a tuple with two elements, where the\n first element is the last encoder layer's output and the\n second element is the same quantity summed with the input\n embedding (used for attention). The shape of both tensors is\n `(batch, src_len, embed_dim)`.\n - **encoder_padding_mask** (ByteTensor): the positions of\n padding elements of shape `(batch, src_len)`\n \"\"\"\n # embed tokens and positions\n x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)\n x = F.dropout(x, p=self.dropout, training=self.training)\n input_embedding = x\n\n # project to size of convolution\n x = self.fc1(x)\n\n # used to mask padding in input\n encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B\n if not encoder_padding_mask.any():\n encoder_padding_mask = None\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n residuals = [x]\n # temporal convolutions\n for proj, conv, res_layer in zip(self.projections, self.convolutions, self.residuals):\n if res_layer > 0:\n residual = residuals[-res_layer]\n residual = residual if proj is None else proj(residual)\n else:\n residual = None\n\n if encoder_padding_mask is not None:\n x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n if conv.kernel_size[0] % 2 == 1:\n # padding is implicit in the conv\n x = conv(x)\n else:\n padding_l = (conv.kernel_size[0] - 1) // 2\n padding_r = conv.kernel_size[0] // 2\n x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))\n x = conv(x)\n x = F.glu(x, dim=2)\n\n if residual is not None:\n x = (x + residual) * math.sqrt(0.5)\n residuals.append(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(1, 0)\n\n # project back to size of embedding\n x = self.fc2(x)\n\n if encoder_padding_mask is not None:\n encoder_padding_mask = encoder_padding_mask.t() # -> B x T\n x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)\n\n # scale gradients (this only affects backward, not forward)\n x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))\n\n # add output to input embedding for attention\n y = (x + input_embedding) * math.sqrt(0.5)\n\n return {\n 'encoder_out': (x, y),\n 'encoder_padding_mask': encoder_padding_mask, # B x T\n }\n\n def reorder_encoder_out(self, encoder_out, new_order):\n if encoder_out['encoder_out'] is not None:\n encoder_out['encoder_out'] = (\n encoder_out['encoder_out'][0].index_select(0, new_order),\n encoder_out['encoder_out'][1].index_select(0, new_order),\n )\n if encoder_out['encoder_padding_mask'] is not None:\n encoder_out['encoder_padding_mask'] = \\\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\n return encoder_out\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return self.embed_positions.max_positions()\n\n\nclass AttentionLayer(nn.Module):\n def __init__(self, conv_channels, embed_dim, bmm=None):\n super().__init__()\n # projects from output of convolution to embedding dimension\n self.in_projection = Linear(conv_channels, embed_dim)\n # projects from embedding dimension to convolution size\n self.out_projection = Linear(embed_dim, conv_channels)\n\n self.bmm = bmm if bmm is not None else torch.bmm\n\n def forward(self, x, target_embedding, encoder_out, encoder_padding_mask):\n residual = x\n\n # attention\n x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5)\n x = self.bmm(x, encoder_out[0])\n\n # don't attend over padding\n if encoder_padding_mask is not None:\n x = x.float().masked_fill(\n encoder_padding_mask.unsqueeze(1),\n float('-inf')\n ).type_as(x) # FP16 support: cast to float and back\n\n # softmax over last dim\n sz = x.size()\n x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)\n x = x.view(sz)\n attn_scores = x\n\n x = self.bmm(x, encoder_out[1])\n\n # scale attention output (respecting potentially different lengths)\n s = encoder_out[1].size(1)\n if encoder_padding_mask is None:\n x = x * (s * math.sqrt(1.0 / s))\n else:\n s = s - encoder_padding_mask.type_as(x).sum(dim=1, keepdim=True) # exclude padding\n s = s.unsqueeze(-1)\n x = x * (s * s.rsqrt())\n\n # project back\n x = (self.out_projection(x) + residual) * math.sqrt(0.5)\n return x, attn_scores\n\n def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):\n \"\"\"Replace torch.bmm with BeamableMM.\"\"\"\n if beamable_mm_beam_size is not None:\n del self.bmm\n self.add_module('bmm', BeamableMM(beamable_mm_beam_size))\n\n\nclass FConvDecoder(FairseqIncrementalDecoder):\n \"\"\"Convolutional decoder\"\"\"\n\n def __init__(\n self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256,\n max_positions=1024, convolutions=((512, 3),) * 20, attention=True,\n dropout=0.1, share_embed=False, positional_embeddings=True,\n adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0,\n ):\n super().__init__(dictionary)\n self.register_buffer('version', torch.Tensor([2]))\n self.dropout = dropout\n self.need_attn = True\n\n convolutions = extend_conv_spec(convolutions)\n in_channels = convolutions[0][0]\n if isinstance(attention, bool):\n # expand True into [True, True, ...] and do the same with False\n attention = [attention] * len(convolutions)\n if not isinstance(attention, list) or len(attention) != len(convolutions):\n raise ValueError('Attention is expected to be a list of booleans of '\n 'length equal to the number of layers.')\n\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)\n if embed_dict:\n self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)\n\n self.embed_positions = PositionalEmbedding(\n max_positions,\n embed_dim,\n padding_idx,\n ) if positional_embeddings else None\n\n self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)\n self.projections = nn.ModuleList()\n self.convolutions = nn.ModuleList()\n self.attention = nn.ModuleList()\n self.residuals = []\n\n layer_in_channels = [in_channels]\n for i, (out_channels, kernel_size, residual) in enumerate(convolutions):\n if residual == 0:\n residual_dim = out_channels\n else:\n residual_dim = layer_in_channels[-residual]\n self.projections.append(Linear(residual_dim, out_channels)\n if residual_dim != out_channels else None)\n self.convolutions.append(\n LinearizedConv1d(in_channels, out_channels * 2, kernel_size,\n padding=(kernel_size - 1), dropout=dropout)\n )\n self.attention.append(AttentionLayer(out_channels, embed_dim)\n if attention[i] else None)\n self.residuals.append(residual)\n in_channels = out_channels\n layer_in_channels.append(out_channels)\n\n self.adaptive_softmax = None\n self.fc2 = self.fc3 = None\n\n if adaptive_softmax_cutoff is not None:\n assert not share_embed\n self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff,\n dropout=adaptive_softmax_dropout)\n else:\n self.fc2 = Linear(in_channels, out_embed_dim)\n if share_embed:\n assert out_embed_dim == embed_dim, \\\n \"Shared embed weights implies same dimensions \" \\\n \" out_embed_dim={} vs embed_dim={}\".format(out_embed_dim, embed_dim)\n self.fc3 = nn.Linear(out_embed_dim, num_embeddings)\n self.fc3.weight = self.embed_tokens.weight\n else:\n self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)\n\n def forward(self, prev_output_tokens, encoder_out_dict=None, incremental_state=None):\n if encoder_out_dict is not None:\n encoder_out = encoder_out_dict['encoder_out']\n encoder_padding_mask = encoder_out_dict['encoder_padding_mask']\n\n # split and transpose encoder outputs\n encoder_a, encoder_b = self._split_encoder_out(encoder_out, incremental_state)\n\n if self.embed_positions is not None:\n pos_embed = self.embed_positions(prev_output_tokens, incremental_state)\n else:\n pos_embed = 0\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n x = self._embed_tokens(prev_output_tokens, incremental_state)\n\n # embed tokens and combine with positional embeddings\n x += pos_embed\n x = F.dropout(x, p=self.dropout, training=self.training)\n target_embedding = x\n\n # project to size of convolution\n x = self.fc1(x)\n\n # B x T x C -> T x B x C\n x = self._transpose_if_training(x, incremental_state)\n\n # temporal convolutions\n avg_attn_scores = None\n num_attn_layers = len(self.attention)\n residuals = [x]\n for proj, conv, attention, res_layer in zip(self.projections, self.convolutions, self.attention,\n self.residuals):\n if res_layer > 0:\n residual = residuals[-res_layer]\n residual = residual if proj is None else proj(residual)\n else:\n residual = None\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = conv(x, incremental_state)\n x = F.glu(x, dim=2)\n\n # attention\n if attention is not None:\n x = self._transpose_if_training(x, incremental_state)\n\n x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask)\n\n if not self.training and self.need_attn:\n attn_scores = attn_scores / num_attn_layers\n if avg_attn_scores is None:\n avg_attn_scores = attn_scores\n else:\n avg_attn_scores.add_(attn_scores)\n\n x = self._transpose_if_training(x, incremental_state)\n\n # residual\n if residual is not None:\n x = (x + residual) * math.sqrt(0.5)\n residuals.append(x)\n\n # T x B x C -> B x T x C\n x = self._transpose_if_training(x, incremental_state)\n\n # project back to size of vocabulary if not using adaptive softmax\n if self.fc2 is not None and self.fc3 is not None:\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.fc3(x)\n\n return x, avg_attn_scores\n\n def reorder_incremental_state(self, incremental_state, new_order):\n super().reorder_incremental_state(incremental_state, new_order)\n encoder_out = utils.get_incremental_state(self, incremental_state, 'encoder_out')\n if encoder_out is not None:\n encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out)\n utils.set_incremental_state(self, incremental_state, 'encoder_out', encoder_out)\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n return self.embed_positions.max_positions() if self.embed_positions is not None else float('inf')\n\n def upgrade_state_dict(self, state_dict):\n if utils.item(state_dict.get('decoder.version', torch.Tensor([1]))[0]) < 2:\n # old models use incorrect weight norm dimension\n for i, conv in enumerate(self.convolutions):\n # reconfigure weight norm\n nn.utils.remove_weight_norm(conv)\n self.convolutions[i] = nn.utils.weight_norm(conv, dim=0)\n state_dict['decoder.version'] = torch.Tensor([1])\n return state_dict\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n\n def _embed_tokens(self, tokens, incremental_state):\n if incremental_state is not None:\n # keep only the last token for incremental forward pass\n tokens = tokens[:, -1:]\n return self.embed_tokens(tokens)\n\n def _split_encoder_out(self, encoder_out, incremental_state):\n \"\"\"Split and transpose encoder outputs.\n\n This is cached when doing incremental inference.\n \"\"\"\n cached_result = utils.get_incremental_state(self, incremental_state, 'encoder_out')\n if cached_result is not None:\n return cached_result\n\n # transpose only once to speed up attention layers\n encoder_a, encoder_b = encoder_out\n encoder_a = encoder_a.transpose(1, 2).contiguous()\n result = (encoder_a, encoder_b)\n\n if incremental_state is not None:\n utils.set_incremental_state(self, incremental_state, 'encoder_out', result)\n return result\n\n def _transpose_if_training(self, x, incremental_state):\n if incremental_state is None:\n x = x.transpose(0, 1)\n return x\n\n\ndef extend_conv_spec(convolutions):\n \"\"\"\n Extends convolutional spec that is a list of tuples of 2 or 3 parameters\n (kernel size, dim size and optionally how many layers behind to look for residual)\n to default the residual propagation param if it is not specified\n \"\"\"\n extended = []\n for spec in convolutions:\n if len(spec) == 3:\n extended.append(spec)\n elif len(spec) == 2:\n extended.append(spec + (1,))\n else:\n raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3')\n return tuple(extended)\n\n\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, 0, 0.1)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):\n m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)\n nn.init.normal_(m.weight, 0, 0.1)\n nn.init.constant_(m.weight[padding_idx], 0)\n return m\n\n\ndef Linear(in_features, out_features, dropout=0):\n \"\"\"Weight-normalized Linear layer (input: N x T x C)\"\"\"\n m = nn.Linear(in_features, out_features)\n nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features))\n nn.init.constant_(m.bias, 0)\n return nn.utils.weight_norm(m)\n\n\ndef LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):\n \"\"\"Weight-normalized Conv1d layer optimized for decoding\"\"\"\n m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n nn.init.normal_(m.weight, mean=0, std=std)\n nn.init.constant_(m.bias, 0)\n return nn.utils.weight_norm(m, dim=2)\n\n\ndef ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):\n \"\"\"Weight-normalized Conv1d layer\"\"\"\n from fairseq.modules import ConvTBC\n m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n nn.init.normal_(m.weight, mean=0, std=std)\n nn.init.constant_(m.bias, 0)\n return nn.utils.weight_norm(m, dim=2)\n\n\n@register_model_architecture('fconv_lm', 'fconv_lm')\ndef base_lm_architecture(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)\n args.decoder_layers = getattr(args, 'decoder_layers', '[(1268, 4)] * 13')\n args.decoder_attention = getattr(args, 'decoder_attention', 'False')\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\n args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)\n\n\n@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_wikitext103')\ndef fconv_lm_dauphin_wikitext103(args):\n layers = '[(850, 6)] * 3'\n layers += ' + [(850, 1)] * 1'\n layers += ' + [(850, 5)] * 4'\n layers += ' + [(850, 1)] * 1'\n layers += ' + [(850, 4)] * 3'\n layers += ' + [(1024, 4)] * 1'\n layers += ' + [(2048, 4)] * 1'\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280)\n args.decoder_layers = getattr(args, 'decoder_layers', layers)\n args.decoder_attention = getattr(args, 'decoder_attention', 'False')\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000')\n base_lm_architecture(args)\n\n\n@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_gbw')\ndef fconv_lm_dauphin_gbw(args):\n layers = '[(512, 5)]'\n layers += ' + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3'\n layers += ' + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3'\n layers += ' + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6'\n layers += ' + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]'\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)\n args.decoder_layers = getattr(args, 'decoder_layers', layers)\n args.decoder_attention = getattr(args, 'decoder_attention', 'False')\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')\n base_lm_architecture(args)\n\n\n@register_model_architecture('fconv', 'fconv')\ndef base_architecture(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20')\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20')\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)\n args.decoder_attention = getattr(args, 'decoder_attention', 'True')\n args.share_input_output_embed = getattr(args, 'share_input_output_embed', False)\n\n\n@register_model_architecture('fconv', 'fconv_iwslt_de_en')\ndef fconv_iwslt_de_en(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)\n args.encoder_layers = getattr(args, 'encoder_layers', '[(256, 3)] * 4')\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)\n args.decoder_layers = getattr(args, 'decoder_layers', '[(256, 3)] * 3')\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)\n base_architecture(args)\n\n\n@register_model_architecture('fconv', 'fconv_wmt_en_ro')\ndef fconv_wmt_en_ro(args):\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)\n base_architecture(args)\n\n\n@register_model_architecture('fconv', 'fconv_wmt_en_de')\ndef fconv_wmt_en_de(args):\n convs = '[(512, 3)] * 9' # first 9 layers have 512 units\n convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units\n convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions\n\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)\n args.encoder_layers = getattr(args, 'encoder_layers', convs)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)\n args.decoder_layers = getattr(args, 'decoder_layers', convs)\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)\n base_architecture(args)\n\n\n@register_model_architecture('fconv', 'fconv_wmt_en_fr')\ndef fconv_wmt_en_fr(args):\n convs = '[(512, 3)] * 6' # first 6 layers have 512 units\n convs += ' + [(768, 3)] * 4' # next 4 layers have 768 units\n convs += ' + [(1024, 3)] * 3' # next 3 layers have 1024 units\n convs += ' + [(2048, 1)] * 1' # next 1 layer uses 1x1 convolutions\n convs += ' + [(4096, 1)] * 1' # final 1 layer uses 1x1 convolutions\n\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)\n args.encoder_layers = getattr(args, 'encoder_layers', convs)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)\n args.decoder_layers = getattr(args, 'decoder_layers', convs)\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)\n base_architecture(args)\n" ]
[ [ "torch.nn.functional.glu", "torch.Tensor", "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.nn.utils.weight_norm", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.utils.remove_weight_norm", "torch.nn.Linear", "torch.nn.init.normal_", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Wanggcong/Spatial-Temporal-Re-identification
[ "6978365af1cdbbbe654002889a4c1574314af861" ]
[ "test_st_duke.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, division\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport time\nimport os\nimport scipy.io\nfrom model import ft_net, ft_net_dense, PCB, PCB_test\n\n######################################################################\n# Options\n# --------\nparser = argparse.ArgumentParser(description='Training')\nparser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')\nparser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')\nparser.add_argument('--test_dir',default='/home/zzd/Market/pytorch',type=str, help='./test_data')\nparser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')\nparser.add_argument('--batchsize', default=32, type=int, help='batchsize')\nparser.add_argument('--use_dense', action='store_true', help='use densenet121' )\nparser.add_argument('--PCB', action='store_true', help='use PCB' )\n\nopt = parser.parse_args()\n\nstr_ids = opt.gpu_ids.split(',')\n#which_epoch = opt.which_epoch\nname = opt.name\ntest_dir = opt.test_dir\n\ngpu_ids = []\nfor str_id in str_ids:\n id = int(str_id)\n if id >=0:\n gpu_ids.append(id)\n\n# set gpu ids\nif len(gpu_ids)>0:\n torch.cuda.set_device(gpu_ids[0])\n\n######################################################################\n# Load Data\n# ---------\n#\n# We will use torchvision and torch.utils.data packages for loading the\n# data.\n#\ndata_transforms = transforms.Compose([\n transforms.Resize((288,144), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n############### Ten Crop \n #transforms.TenCrop(224),\n #transforms.Lambda(lambda crops: torch.stack(\n # [transforms.ToTensor()(crop) \n # for crop in crops]\n # )),\n #transforms.Lambda(lambda crops: torch.stack(\n # [transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(crop)\n # for crop in crops]\n # ))\n])\n\nif opt.PCB:\n data_transforms = transforms.Compose([\n transforms.Resize((384,192), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) \n ])\n\n\ndata_dir = test_dir\nimage_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,\n shuffle=False, num_workers=16) for x in ['gallery','query']}\n\nclass_names = image_datasets['query'].classes\nuse_gpu = torch.cuda.is_available()\n\n######################################################################\n# Load model\n#---------------------------\ndef load_network(network):\n save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)\n network.load_state_dict(torch.load(save_path))\n return network\n\n\n######################################################################\n# Extract feature\n# ----------------------\n#\n# Extract feature from a trained model.\n#\ndef fliplr(img):\n '''flip horizontal'''\n inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W\n img_flip = img.index_select(3,inv_idx)\n return img_flip\n\ndef extract_feature(model,dataloaders):\n features = torch.FloatTensor()\n count = 0\n for data in dataloaders:\n img, label = data\n n, c, h, w = img.size()\n count += n\n print(count)\n if opt.use_dense:\n ff = torch.FloatTensor(n,1024).zero_()\n else:\n ff = torch.FloatTensor(n,2048).zero_()\n if opt.PCB:\n ff = torch.FloatTensor(n,2048,6).zero_() # we have four parts\n for i in range(2):\n if(i==1):\n img = fliplr(img)\n input_img = Variable(img.cuda())\n outputs = model(input_img) \n f = outputs.data.cpu()\n ff = ff+f\n # norm feature\n if opt.PCB:\n # feature size (n,2048,4)\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n ff = ff.view(ff.size(0), -1)\n else:\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n\n features = torch.cat((features,ff), 0)\n return features\n\ndef get_id(img_path):\n camera_id = []\n labels = []\n frames = []\n for path, v in img_path:\n filename = path.split('/')[-1]\n label = filename[0:4]\n camera = filename.split('c')[1]\n frame = filename[9:16]\n if label[0:2]=='-1':\n labels.append(-1)\n else:\n labels.append(int(label))\n camera_id.append(int(camera[0]))\n frames.append(int(frame))\n return camera_id, labels, frames\n\ngallery_path = image_datasets['gallery'].imgs\nquery_path = image_datasets['query'].imgs\n\ngallery_cam,gallery_label, gallery_frames = get_id(gallery_path)\nquery_cam,query_label, query_frames = get_id(query_path)\n\n######################################################################\n# Load Collected data Trained model\nclass_num=702\n# class_num=751\nprint('-------test-----------')\nif opt.use_dense:\n model_structure = ft_net_dense(class_num)\nelse:\n model_structure = ft_net(class_num)\n\nif opt.PCB:\n model_structure = PCB(class_num)\n\nmodel = load_network(model_structure)\n\n# Remove the final fc layer and classifier layer\nif not opt.PCB:\n model.model.fc = nn.Sequential()\n model.classifier = nn.Sequential()\nelse:\n model = PCB_test(model)\n\n# Change to test mode\nmodel = model.eval()\nif use_gpu:\n model = model.cuda()\n\n# Extract feature\ngallery_feature = extract_feature(model,dataloaders['gallery'])\nquery_feature = extract_feature(model,dataloaders['query'])\n\n# Save to Matlab for check\nresult = {'gallery_f':gallery_feature.numpy(),'gallery_label':gallery_label,'gallery_cam':gallery_cam,'gallery_frames':gallery_frames,'query_f':query_feature.numpy(),'query_label':query_label,'query_cam':query_cam,'query_frames':query_frames}\nscipy.io.savemat('model/'+name+'/'+'pytorch_result.mat',result)\n" ]
[ [ "torch.nn.Sequential", "torch.norm", "torch.cuda.set_device", "torch.load", "torch.cat", "torch.utils.data.DataLoader", "torch.FloatTensor", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JeremyBYU/FastGaussianAccumulator
[ "995c1cb53485212bc2c71dad3ed3834caaa1f45b", "995c1cb53485212bc2c71dad3ed3834caaa1f45b", "995c1cb53485212bc2c71dad3ed3834caaa1f45b" ]
[ "tests/python/helpers/setup_helper.py", "examples/python/util/line_mesh.py", "src/Python/slowga/projections.py" ]
[ "import numpy as np\n\nfrom fastgac import GaussianAccumulatorS2Beta, IcoCharts, MatX3d\n\nfrom scipy.spatial.transform import Rotation as R\n\n \ndef setup_fastgac(normals:np.ndarray, level=4):\n kwargs_s2 = dict(level=level)\n # Create Gaussian Accumulator\n ga_cpp_s2 = GaussianAccumulatorS2Beta(**kwargs_s2)\n _ = ga_cpp_s2.integrate(MatX3d(normals))\n \n ico_chart_ = IcoCharts(level)\n normalized_bucket_counts_by_vertex = ga_cpp_s2.get_normalized_bucket_counts_by_vertex(True)\n ico_chart_.fill_image(normalized_bucket_counts_by_vertex)\n\n return dict(ga=ga_cpp_s2, ico=ico_chart_, normals=normals)\n\ndef setup_fastgac_simple(level=4):\n ga_cpp_s2 = GaussianAccumulatorS2Beta(level=level) \n ico_chart = IcoCharts(level)\n return dict(ga=ga_cpp_s2, ico=ico_chart)\n\ndef polar_to_catersian(theta, phi):\n x = np.sin(phi) * np.cos(theta)\n y = np.sin(phi) * np.sin(theta)\n z = np.cos(phi)\n\n normals = np.column_stack([x, y, z])\n return normals\n\n\ndef cartesian_to_polar(x, y, z):\n phi = np.arccos(z)\n theta = np.arctan2(y, x)\n return np.column_stack([theta, phi])\n\n\ndef sample_sphere_cap(n=100, deg=10, normal=[1, 0, 0]):\n min_z = np.cos(np.radians(deg))\n z = np.random.uniform(min_z, 1.0, n)\n r = np.sqrt(1 - z * z)\n theta = np.random.uniform(0, 1.0, n) * 2 * np.pi\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n\n rm, _, = R.align_vectors(np.array([normal]), np.array([[0.0, 0.0, 1.0]]))\n points = np.column_stack([x, y, z])\n points = rm.apply(points)\n\n return points\n\ndef generate_random_normals(n=1, umin=0.0, umax=1.0, vmin=0.0, vmax=1.0):\n normals = np.zeros((n, 3))\n uniform_number_1 = np.random.uniform(umin, umax, n)\n uniform_number_2 = np.random.uniform(vmin, vmax, n)\n theta = 2 * np.pi * uniform_number_1\n phi = np.arccos(2 * uniform_number_2 - 1)\n normals = polar_to_catersian(theta, phi)\n polars = np.column_stack([theta, phi])\n return normals, polars\n\n\ndef cluster_normals(num_clusters=2, normals_per_cluster=5, patch_deg=10, normals=None):\n if normals is not None:\n num_clusters = normals.shape[0]\n else:\n normals, _ = generate_random_normals(num_clusters)\n \n clusters = []\n for i in range(num_clusters):\n normal = normals[i]\n cluster_normals = sample_sphere_cap(n=normals_per_cluster, deg=patch_deg, normal=normal.tolist())\n clusters.append(cluster_normals)\n return clusters, normals\n\ndef sort_by_distance_from_point(array, point=[0.0, 0.0, 1.0]):\n diff = array - point\n diff = np.sum(np.power(diff, 2), axis=1)\n idx = np.argsort(diff)[::-1]\n return array[idx]\n", "\"\"\"Module which creates mesh lines from a line set\nOpen3D relies upon using glLineWidth to set line width on a LineSet\nHowever, this method is now deprecated and not fully supporeted in newer OpenGL versions\nSee:\n Open3D Github Pull Request - https://github.com/intel-isl/Open3D/pull/738\n Other Framework Issues - https://github.com/openframeworks/openFrameworks/issues/3460\n\nThis module aims to solve this by converting a line into a triangular mesh (which has thickness)\nThe basic idea is to create a cylinder for each line segment, translate it, and then rotate it.\n\nLicense: MIT\n\n\"\"\"\nimport numpy as np\nimport open3d as o3d\n\n\ndef align_vector_to_another(a=np.array([0, 0, 1]), b=np.array([1, 0, 0])):\n \"\"\"\n Aligns vector a to vector b with axis angle rotation\n \"\"\"\n if np.array_equal(a, b):\n return None, None\n axis_ = np.cross(a, b)\n axis_ = axis_ / np.linalg.norm(axis_)\n angle = np.arccos(np.dot(a, b))\n\n return axis_, angle\n\n\ndef normalized(a, axis=-1, order=2):\n \"\"\"Normalizes a numpy array of points\"\"\"\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2 == 0] = 1\n return a / np.expand_dims(l2, axis), l2\n\n\nclass LineMesh(object):\n def __init__(self, points, lines=None, colors=[0, 1, 0], radius=0.15):\n \"\"\"Creates a line represented as sequence of cylinder triangular meshes\n\n Arguments:\n points {ndarray} -- Numpy array of ponts Nx3.\n\n Keyword Arguments:\n lines {list[list] or None} -- List of point index pairs denoting line segments. If None, implicit lines from ordered pairwise points. (default: {None})\n colors {list} -- list of colors, or single color of the line (default: {[0, 1, 0]})\n radius {float} -- radius of cylinder (default: {0.15})\n \"\"\"\n self.points = np.array(points)\n self.lines = np.array(\n lines) if lines is not None else self.lines_from_ordered_points(self.points)\n self.colors = np.array(colors)\n self.radius = radius\n self.cylinder_segments = []\n\n self.create_line_mesh()\n\n @staticmethod\n def lines_from_ordered_points(points):\n lines = [[i, i + 1] for i in range(0, points.shape[0] - 1, 1)]\n return np.array(lines)\n\n def create_line_mesh(self):\n first_points = self.points[self.lines[:, 0], :]\n second_points = self.points[self.lines[:, 1], :]\n line_segments = second_points - first_points\n line_segments_unit, line_lengths = normalized(line_segments)\n\n z_axis = np.array([0, 0, 1])\n # Create triangular mesh cylinder segments of line\n for i in range(line_segments_unit.shape[0]):\n line_segment = line_segments_unit[i, :]\n line_length = line_lengths[i]\n # get axis angle rotation to allign cylinder with line segment\n axis, angle = align_vector_to_another(z_axis, line_segment)\n # Get translation vector\n translation = first_points[i, :] + line_segment * line_length * 0.5\n # create cylinder and apply transformations\n cylinder_segment = o3d.geometry.TriangleMesh.create_cylinder(\n self.radius, line_length)\n cylinder_segment = cylinder_segment.translate(\n translation, relative=False)\n if axis is not None:\n axis_a = axis * angle\n cylinder_segment = cylinder_segment.rotate(\n R=o3d.geometry.get_rotation_matrix_from_axis_angle(axis_a), center=True)\n # cylinder_segment = cylinder_segment.rotate(\n # axis_a, center=True, type=o3d.geometry.RotationType.AxisAngle)\n # color cylinder\n color = self.colors if self.colors.ndim == 1 else self.colors[i, :]\n cylinder_segment.paint_uniform_color(color)\n\n self.cylinder_segments.append(cylinder_segment)\n\n def add_line(self, vis):\n \"\"\"Adds this line to the visualizer\"\"\"\n for cylinder in self.cylinder_segments:\n vis.add_geometry(cylinder)\n\n def remove_line(self, vis):\n \"\"\"Removes this line from the visualizer\"\"\"\n for cylinder in self.cylinder_segments:\n vis.remove_geometry(cylinder)\n\n\ndef main():\n print(\"Demonstrating LineMesh vs LineSet\")\n # Create Line Set\n points = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1],\n [0, 1, 1], [1, 1, 1]]\n lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],\n [0, 4], [1, 5], [2, 6], [3, 7]]\n colors = [[1, 0, 0] for i in range(len(lines))]\n\n line_set = o3d.geometry.LineSet()\n line_set.points = o3d.utility.Vector3dVector(points)\n line_set.lines = o3d.utility.Vector2iVector(lines)\n line_set.colors = o3d.utility.Vector3dVector(colors)\n\n # Create Line Mesh 1\n points = np.array(points) + [0, 0, 2]\n line_mesh1 = LineMesh(points, lines, colors, radius=0.02)\n line_mesh1_geoms = line_mesh1.cylinder_segments\n\n # Create Line Mesh 2\n points = np.array(points) + [0, 2, 0]\n line_mesh2 = LineMesh(points, radius=0.03)\n line_mesh2_geoms = line_mesh2.cylinder_segments\n\n o3d.visualization.draw_geometries(\n [line_set, *line_mesh1_geoms, *line_mesh2_geoms])\n\n\nif __name__ == \"__main__\":\n main()\n", "import numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom .helper import normalize_box, normalized\n\ndef convert_phi_theta(normals, top_half=True):\n phi_theta = np.zeros((normals.shape[0], 2))\n xy = normals[:, 0]**2 + normals[:, 1]**2\n phi_theta[:, 0] = np.arctan2(np.sqrt(xy), normals[:, 2]) # for elevation angle defined from Z-axis down\n phi_theta[:, 1] = np.arctan2(normals[:, 1], normals[:, 0])\n return phi_theta\n\n\ndef convert_lat_long(normals, degrees=True, return_mask=True):\n normals_new = np.copy(normals)\n phi_theta = np.zeros((normals_new.shape[0], 2))\n phi_theta[:, 0] = np.arccos(normals_new[:, 2]) # for elevation angle defined from Z-axis down\n phi_theta[:, 1] = np.arctan2(normals_new[:, 1], normals_new[:, 0])\n phi_theta[:, 0] = np.ones_like(phi_theta[:, 0]) * np.pi/2.0 - phi_theta[:, 0]\n if degrees:\n phi_theta[:, 0] = np.rad2deg(phi_theta[:, 0])\n phi_theta[:, 1] = np.rad2deg(phi_theta[:, 1])\n\n return phi_theta\n\ndef convert_stereographic(normals, top_half=True):\n normals_new = np.copy(normals)\n projection = np.zeros((normals_new.shape[0], 2))\n projection[:, 0] = normals_new[:, 0] / (1 - normals_new[:, 2])\n projection[:, 1] = normals_new[:, 1] / (1 - normals_new[:, 2])\n return projection\n\n\ndef convert_phi_theta_centered(normals, top_half=True):\n normals_new = np.copy(normals)\n projection = np.zeros((normals_new.shape[0], 2))\n xy = normals_new[:, 0]**2 + normals_new[:, 1]**2\n for i in range(normals_new.shape[0]):\n normal = normals_new[i, :]\n phi = np.arccos(normal[2])\n phi = -phi if normal[1] < 0 else phi\n theta = np.arctan2(normal[1], normal[0])\n theta = theta + np.pi / 2.0\n theta = theta - np.pi if theta > np.pi / 2.0 else theta\n projection[i, :] = [phi, theta]\n\n return projection\n\n\ndef down_proj(normals, top_half=True):\n normals_new = np.copy(normals)\n projection = np.zeros((normals_new.shape[0], 2))\n projection = normals_new[:, :2]\n\n return projection\n\ndef azimuth_equidistant(normals):\n projection = np.zeros((normals.shape[0], 2))\n theta = np.arctan2(normals[:, 1], normals[:, 0])\n phi = np.arccos(normals[:, 2])\n\n projection[:, 0] = phi * np.sin(theta)\n projection[:, 1] = - phi * np.cos(theta)\n # Normalize to 0-1\n projection = normalize_box(projection)\n\n return projection\n\ndef azimuth_equidistant_fast(normals):\n projection = np.zeros((normals.shape[0], 2))\n\n l2 = np.atleast_1d(np.linalg.norm(normals[:, :2], 2, -1))\n scaling = 1.0 / l2\n phi = np.arccos(normals[:, 2])\n\n projection[:, 0] = phi * normals[:, 1] * scaling\n projection[:, 1] = -phi * normals[:, 0] * scaling\n # Normalize to 0-1\n projection = normalize_box(projection)\n\n return projection\n\n\ndef plot_projection(ga):\n\n projections = [(\"Spherical Coordinates\", \"phi\", \"theta\", \"convert_phi_theta\"),\n (\"Spherical Coordinates Centered\", \"phi\", \"theta\", \"convert_phi_theta_centered\"),\n (\"Geodetic\", \"lat\", \"lon\", \"convert_lat_long\"),\n # (\"Steographic Projection\", \"x*\", \"y*\", \"convert_stereographic\"),\n (\"Project To Plane\", \"x\", \"y\", \"down_proj\"),\n (\"Azimuth Equidistant\", \"x*\", \"y*\", \"azimuth_equidistant\"),\n (\"Azimuth Equidistant (Fast)\", \"x\", \"y\", \"azimuth_equidistant_fast\"),\n ]\n fig, axs = plt.subplots(3, 2, figsize=(5, 6))\n axs = axs.reshape(-1)\n for i, (title_name, xlabel, ylabel, function_name) in enumerate(projections):\n ax = axs[i]\n t0 = time.perf_counter()\n proj = globals()[function_name](ga.gaussian_normals)\n t1 = time.perf_counter()\n ax.scatter(proj[:, 0], proj[:, 1], c=ga.colors)\n ax.set_title(title_name)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.axis('equal')\n # print(\"{} took {}\".format(title_name, t1-t0 ))\n fig.tight_layout()\n\n plt.show()" ]
[ [ "numpy.radians", "numpy.sqrt", "numpy.power", "numpy.arccos", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.column_stack", "numpy.argsort", "numpy.random.uniform", "numpy.array", "numpy.zeros" ], [ "numpy.dot", "numpy.expand_dims", "numpy.array_equal", "numpy.linalg.norm", "numpy.cross", "numpy.array" ], [ "numpy.ones_like", "numpy.sqrt", "numpy.arccos", "matplotlib.pyplot.subplots", "numpy.rad2deg", "numpy.arctan2", "numpy.sin", "numpy.copy", "numpy.cos", "numpy.linalg.norm", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jGaboardi/splot
[ "b30f13eed864e237b36d9d678a6a69a0d54f5319" ]
[ "splot/tests/test_viz_esda_mpl.py" ]
[ "import matplotlib.pyplot as plt\nfrom libpysal.weights.contiguity import Queen\nimport libpysal as lp\nfrom libpysal import examples\nimport geopandas as gpd\nimport numpy as np\nfrom pytest import raises, warns\n\nfrom esda.moran import (Moran_Local, Moran, Moran_BV,\n Moran_Local_BV, Moran_BV_matrix)\nfrom splot.esda import (moran_scatterplot,\n plot_moran_simulation,\n plot_moran,\n plot_moran_bv_simulation,\n plot_moran_bv,\n plot_local_autocorrelation,\n lisa_cluster,\n moran_facet)\n\nfrom splot._viz_esda_mpl import (_moran_global_scatterplot,\n _moran_loc_scatterplot,\n _moran_bv_scatterplot,\n _moran_loc_bv_scatterplot)\n\n\ndef _test_data():\n guerry = examples.load_example('Guerry')\n link_to_data = guerry.get_path('guerry.shp')\n gdf = gpd.read_file(link_to_data)\n return gdf\n\n\ndef _test_data_columbus():\n columbus = examples.load_example('Columbus')\n link_to_data = columbus.get_path('columbus.shp')\n df = gpd.read_file(link_to_data)\n return df\n\n\ndef _test_LineString():\n link_to_data = examples.get_path('streets.shp')\n gdf = gpd.read_file(link_to_data)\n return gdf\n\n\ndef test_moran_scatterplot():\n gdf = _test_data()\n x = gdf['Suicids'].values\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calculate `esda.moran` Objects\n moran = Moran(y, w)\n moran_bv = Moran_BV(y, x, w)\n moran_loc = Moran_Local(y, w)\n moran_loc_bv = Moran_Local_BV(y, x, w)\n # try with p value so points are colored or warnings apply\n fig, _ = moran_scatterplot(moran, p=0.05, aspect_equal=False)\n plt.close(fig)\n fig, _ = moran_scatterplot(moran_loc, p=0.05)\n plt.close(fig)\n fig, _ = moran_scatterplot(moran_bv, p=0.05)\n plt.close(fig)\n fig, _ = moran_scatterplot(moran_loc_bv, p=0.05)\n plt.close(fig)\n\n\ndef test_moran_global_scatterplot():\n # Load data and apply statistical analysis\n gdf = _test_data()\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calc Global Moran\n w = Queen.from_dataframe(gdf)\n moran = Moran(y, w)\n # plot\n fig, _ = _moran_global_scatterplot(moran)\n plt.close(fig)\n # customize\n fig, _ = _moran_global_scatterplot(moran, zstandard=False,\n aspect_equal=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n\ndef test_plot_moran_simulation():\n # Load data and apply statistical analysis\n gdf = _test_data()\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calc Global Moran\n w = Queen.from_dataframe(gdf)\n moran = Moran(y, w)\n # plot\n fig, _ = plot_moran_simulation(moran)\n plt.close(fig)\n # customize\n fig, _ = plot_moran_simulation(moran,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n\ndef test_plot_moran():\n # Load data and apply statistical analysis\n gdf = _test_data()\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calc Global Moran\n w = Queen.from_dataframe(gdf)\n moran = Moran(y, w)\n # plot\n fig, _ = plot_moran(moran)\n plt.close(fig)\n # customize\n fig, _ = plot_moran(moran, zstandard=False,\n aspect_equal=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n\ndef test_moran_bv_scatterplot():\n gdf = _test_data()\n x = gdf['Suicids'].values\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calculate Bivariate Moran\n moran_bv = Moran_BV(x, y, w)\n # plot\n fig, _ = _moran_bv_scatterplot(moran_bv)\n plt.close(fig)\n # customize plot\n fig, _ = _moran_bv_scatterplot(moran_bv, aspect_equal=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n\ndef test_plot_moran_bv_simulation():\n # Load data and calculate weights\n gdf = _test_data()\n x = gdf['Suicids'].values\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calculate Bivariate Moran\n moran_bv = Moran_BV(x, y, w)\n # plot\n fig, _ = plot_moran_bv_simulation(moran_bv)\n plt.close(fig)\n # customize plot\n fig, _ = plot_moran_bv_simulation(moran_bv, aspect_equal=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n\ndef test_plot_moran_bv():\n # Load data and calculate weights\n gdf = _test_data()\n x = gdf['Suicids'].values\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calculate Bivariate Moran\n moran_bv = Moran_BV(x, y, w)\n # plot\n fig, _ = plot_moran_bv(moran_bv)\n plt.close(fig)\n # customize plot\n fig, _ = plot_moran_bv(moran_bv,aspect_equal=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n\ndef test_moran_loc_scatterplot():\n df = _test_data_columbus()\n\n x = df['INC'].values\n y = df['HOVAL'].values\n w = Queen.from_dataframe(df)\n w.transform = 'r'\n\n moran_loc = Moran_Local(y, w)\n moran_bv = Moran_BV(x, y, w)\n\n # try without p value\n fig, _ = _moran_loc_scatterplot(moran_loc)\n plt.close(fig)\n\n # try with p value and different figure size\n fig, _ = _moran_loc_scatterplot(moran_loc, p=0.05,\n aspect_equal=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n # try with p value and zstandard=False\n fig, _ = _moran_loc_scatterplot(moran_loc, p=0.05, zstandard=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n # try without p value and zstandard=False\n fig, _ = _moran_loc_scatterplot(moran_loc, zstandard=False,\n fitline_kwds=dict(color='#4393c3'))\n plt.close(fig)\n\n raises(ValueError, _moran_loc_scatterplot, moran_bv, p=0.5)\n warns(UserWarning, _moran_loc_scatterplot, moran_loc, p=0.5,\n scatter_kwds=dict(c='#4393c3'))\n\n\ndef _test_calc_moran_loc(gdf, var='HOVAL'):\n y = gdf[var].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n\n moran_loc = Moran_Local(y, w)\n return moran_loc\n\n\ndef test_lisa_cluster():\n df = _test_data_columbus()\n moran_loc = _test_calc_moran_loc(df)\n\n fig, _ = lisa_cluster(moran_loc, df)\n plt.close(fig)\n\n # test LineStrings\n df_line = _test_LineString()\n moran_loc = _test_calc_moran_loc(df_line, var=\"Length\")\n\n fig, _ = lisa_cluster(moran_loc, df_line)\n plt.close(fig)\n\n\ndef test_plot_local_autocorrelation():\n df = _test_data_columbus()\n moran_loc = _test_calc_moran_loc(df)\n\n fig, _ = plot_local_autocorrelation(moran_loc, df, 'HOVAL', p=0.05)\n plt.close(fig)\n\n # also test with quadrant and mask\n fig, _ = plot_local_autocorrelation(moran_loc, df, 'HOVAL', p=0.05,\n region_column='POLYID',\n aspect_equal=False,\n mask=['1', '2', '3'], quadrant=1)\n plt.close(fig)\n\n # also test with quadrant and mask\n raises(ValueError, plot_local_autocorrelation, moran_loc,\n df, 'HOVAL', p=0.05, region_column='POLYID',\n mask=['100', '200', '300'], quadrant=1)\n\n\ndef test_moran_loc_bv_scatterplot():\n gdf = _test_data()\n x = gdf['Suicids'].values\n y = gdf['Donatns'].values\n w = Queen.from_dataframe(gdf)\n w.transform = 'r'\n # Calculate Univariate and Bivariate Moran\n moran_loc = Moran_Local(y, w)\n moran_loc_bv = Moran_Local_BV(x, y, w)\n # try with p value so points are colored\n fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv)\n plt.close(fig)\n\n # try with p value and different figure size\n fig, _ = _moran_loc_bv_scatterplot(moran_loc_bv, p=0.05,\n aspect_equal=False)\n plt.close(fig)\n\n raises(ValueError, _moran_loc_bv_scatterplot, moran_loc, p=0.5)\n warns(UserWarning, _moran_loc_bv_scatterplot, moran_loc_bv, p=0.5,\n scatter_kwds=dict(c='r'))\n\n\ndef test_moran_facet():\n sids2 = examples.load_example('sids2')\n f = lp.io.open(sids2.get_path('sids2.dbf'))\n varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']\n vars = [np.array(f.by_col[var]) for var in varnames]\n w = lp.io.open(examples.get_path(\"sids2.gal\")).read()\n # calculate moran matrix\n moran_matrix = Moran_BV_matrix(vars, w, varnames=varnames)\n # plot\n fig, axarr = moran_facet(moran_matrix)\n plt.close(fig)\n # customize\n fig, axarr = moran_facet(moran_matrix, scatter_glob_kwds=dict(color='r'),\n fitline_bv_kwds=dict(color='y'))\n plt.close(fig)\n" ]
[ [ "numpy.array", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marcinz/cunumeric
[ "c40b038d4eb0611f7bb16d5bd11891a633ef7892", "c40b038d4eb0611f7bb16d5bd11891a633ef7892", "c40b038d4eb0611f7bb16d5bd11891a633ef7892", "c40b038d4eb0611f7bb16d5bd11891a633ef7892", "c40b038d4eb0611f7bb16d5bd11891a633ef7892", "c40b038d4eb0611f7bb16d5bd11891a633ef7892", "c40b038d4eb0611f7bb16d5bd11891a633ef7892", "c40b038d4eb0611f7bb16d5bd11891a633ef7892" ]
[ "tests/universal_functions_tests/true_divide_tests/inplace_broadcast.py", "tests/universal_functions_tests/tan_tests/scalar.py", "tests/universal_functions_tests/arcsin_tests/normal.py", "tests/diag.py", "tests/universal_functions_tests/add_tests/complex_data.py", "tests/universal_functions_tests/greater_equal_tests/broadcast.py", "tests/universal_functions_tests/greater_tests/normal.py", "tests/dot.py" ]
[ "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport random\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n # test inplace divide\n anp = np.random.randn(4, 5)\n b = random.randint(1, 13)\n a = num.array(anp)\n\n np.true_divide(anp, b, out=anp)\n num.true_divide(a, b, out=a)\n\n assert np.array_equal(a, anp)\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nfrom test_tools.generators import scalar_gen\n\nimport cunumeric as num\n\n\ndef test():\n test_values = [-np.pi, 0, np.pi / 2, np.pi]\n for a in test_values:\n for (la, na) in zip(scalar_gen(num, a), scalar_gen(np, a)):\n assert np.array_equal(num.tan(la), np.tan(na))\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n assert np.array_equal(num.arcsin([1, -1]), np.arcsin([1, -1]))\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n for k in [0, -1, 1, -2, 2]:\n print(f\"diag(k={k})\")\n a = num.array(\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n ]\n )\n an = np.array(\n [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n ]\n )\n\n b = num.diag(a, k=k)\n bn = np.diag(an, k=k)\n assert np.array_equal(b, bn)\n\n c = num.diag(b, k=k)\n cn = np.diag(bn, k=k)\n assert np.array_equal(c, cn)\n\n d = num.array(\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n ]\n )\n dn = np.array(\n [\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n ]\n )\n\n e = num.diag(d, k=k)\n en = np.diag(dn, k=k)\n assert np.array_equal(e, en)\n\n f = num.diag(e, k=k)\n fn = np.diag(en, k=k)\n assert np.array_equal(f, fn)\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n anp = np.random.random(10) + np.random.random(10) * 1j\n bnp = np.random.random(10) + np.random.random(10) * 1j\n\n a = num.array(anp)\n b = num.array(bnp)\n\n assert np.array_equal(num.add(a, b), np.add(anp, bnp))\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport random\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n anp = np.random.randn(4, 5)\n b = random.randint(1, 13)\n a = num.array(anp)\n\n # test with scalar on the rhs\n assert np.array_equal(num.greater_equal(a, b), np.greater_equal(anp, b))\n\n # test with scalar on the lhs\n assert np.array_equal(num.greater_equal(b, a), np.greater_equal(b, anp))\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n anp = np.random.randn(4, 5)\n bnp = np.random.randn(4, 5)\n a = num.array(anp)\n b = num.array(bnp)\n\n assert np.array_equal(num.greater(a, b), np.greater(anp, bnp))\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n", "# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nimport cunumeric as num\n\n\ndef test():\n for dtype in [np.double, np.complex64]:\n np.random.seed(42)\n x_np = np.array(np.random.randn(11), dtype=dtype)\n y_np = np.array(np.random.randn(11), dtype=dtype)\n\n x_num = num.array(x_np)\n y_num = num.array(y_np)\n\n out_np = x_np.dot(y_np)\n out_num = x_num.dot(y_num)\n\n assert num.allclose(out_np, out_num)\n\n\nif __name__ == \"__main__\":\n test()\n" ]
[ [ "numpy.true_divide", "numpy.random.randn", "numpy.array_equal" ], [ "numpy.tan" ], [ "numpy.arcsin" ], [ "numpy.diag", "numpy.array", "numpy.array_equal" ], [ "numpy.add", "numpy.random.random" ], [ "numpy.greater_equal", "numpy.random.randn" ], [ "numpy.random.randn", "numpy.greater" ], [ "numpy.random.randn", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BalasaravananB/Visualizer
[ "16451c1be9edcaa5be5303b2b8834ff26012b456", "16451c1be9edcaa5be5303b2b8834ff26012b456" ]
[ "public/js/ecllipes-detection-test.py", "public/js/multi-image-circle.py" ]
[ "import cv2 \nimport numpy as np \n \n# Load image \nimage = cv2.imread('/bala/projects/inoru/WheelVisualizer/storage/app/public/demo_cars/0777_cc1280_032_KH3.jpg', 0) \n \n# Set our filtering parameters \n# Initialize parameter settiing using cv2.SimpleBlobDetector \nparams = cv2.SimpleBlobDetector_Params() \n \n# Set Area filtering parameters \nparams.filterByArea = True\nparams.minArea = 100\n \n# # Set Circularity filtering parameters \n# params.filterByCircularity = True \n# params.minCircularity = 0.9\n \n# # Set Convexity filtering parameters \n# params.filterByConvexity = True\n# params.minConvexity = 0.2\n \n# # Set inertia filtering parameters \n# params.filterByInertia = True\n# params.minInertiaRatio = 0.5\n \n# Create a detector with the parameters \ndetector = cv2.SimpleBlobDetector_create(params) \n \n# Detect blobs \nkeypoints = detector.detect(image) \n \n# Draw blobs on our image as red circles \nblank = np.zeros((1, 1)) \nblobs = cv2.drawKeypoints(image, keypoints, blank, (0, 0, 255), \n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) \n \nnumber_of_blobs = len(keypoints) \ntext = \"Number of Circular Blobs: \" + str(len(keypoints)) \ncv2.putText(blobs, text, (20, 550), \n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 255), 2) \n \n# Show blobs \ncv2.imshow(\"Filtering Circular Blobs Only\", blobs) \ncv2.waitKey(0) \ncv2.destroyAllWindows() ", "import cv2 \nimport numpy as np \n\n# Read image. \n# img = cv2.imread('/home/css/Desktop/shadow cropped.png', cv2.IMREAD_COLOR) \nimg1 = cv2.imread('/bala/projects/inoru/WheelVisualizer/storage/app/public/demo_cars/0777_cc1280_032_KH3.jpg', cv2.IMREAD_COLOR) \nimg2 = cv2.imread('/bala/projects/inoru/WheelVisualizer/storage/app/public/demo_cars/3674_cc1280_032_PRN.jpg', cv2.IMREAD_COLOR) \nimg3 = cv2.imread('/bala/projects/inoru/WheelVisualizer/storage/app/public/demo_cars/3714_cc1280_032_3P2.jpg', cv2.IMREAD_COLOR) \n# /bala/projects/inoru/WheelVisualizer/storage/app/public/demo_cars/2962_cc1280_032_8N6.jpg\n# Convert to grayscale. \n\ngray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) \ngray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) \ngray3 = cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY) \n\nparam1 = 55;\nparam2 = 60;\nminRadius = 0;\nmaxRadius = 50;\n\n\n# Blur using 3 * 3 kernel. \n# gray_blurred = cv2.blur(gray, (5, 5)) \ngray_blurred1 = cv2.GaussianBlur(gray1,(3,3),0)\ngray_blurred2 = cv2.GaussianBlur(gray2,(3,3),0)\ngray_blurred3 = cv2.GaussianBlur(gray3,(3,3),0)\n# Apply Hough transform on the blurred image. \n# detected_circles = cv2.HoughCircles(gray_blurred, \n# \t\t\t\tcv2.HOUGH_GRADIENT, 1, 80, param1 = 55, \n# \t\t\tparam2 = 20, minRadius = 30, maxRadius = 35) \ndetected_circles1 = cv2.HoughCircles(gray_blurred1, \n\t\t\t\tcv2.HOUGH_GRADIENT, 1, 100, param1 = param1,param2 = param2, minRadius = minRadius, maxRadius = maxRadius) \ndetected_circles2 = cv2.HoughCircles(gray_blurred2, \n\t\t\t\tcv2.HOUGH_GRADIENT, 1, 100, param1 = param1,param2 = param2, minRadius = minRadius, maxRadius = maxRadius) \ndetected_circles3 = cv2.HoughCircles(gray_blurred3, \n\t\t\t\tcv2.HOUGH_GRADIENT, 1, 100, param1 = param1,param2 = param2, minRadius = minRadius, maxRadius = maxRadius) \n\n# Draw circles that are detected. \nif detected_circles1 is not None: \n\n\t# Convert the circle parameters a, b and r to integers. \n\tdetected_circles1 = np.uint16(np.around(detected_circles1)) \n\n\tfor pt in detected_circles1[0, :]: \n\t\ta, b, r = pt[0], pt[1], pt[2] \n\n\t\t# Draw the circumference of the circle. \n\t\tcv2.circle(img1, (a, b), r, (0, 255, 0), 2) \n\n\t\t# Draw a small circle (of radius 1) to show the center. \n\t\tcv2.circle(img1, (a, b), 1, (0, 0, 255), 3) \n\n\n\n\n# Draw circles that are detected. \nif detected_circles2 is not None: \n\n\t# Convert the circle parameters a, b and r to integers. \n\tdetected_circles2 = np.uint16(np.around(detected_circles2)) \n\n\tfor pt in detected_circles2[0, :]: \n\t\ta, b, r = pt[0], pt[1], pt[2] \n\n\t\t# Draw the circumference of the circle. \n\t\tcv2.circle(img2, (a, b), r, (0, 255, 0), 2) \n\n\t\t# Draw a small circle (of radius 1) to show the center. \n\t\tcv2.circle(img2, (a, b), 1, (0, 0, 255), 3) \n\n\n\n# Draw circles that are detected. \nif detected_circles3 is not None: \n\n\t# Convert the circle parameters a, b and r to integers. \n\tdetected_circles3 = np.uint16(np.around(detected_circles3)) \n\n\tfor pt in detected_circles3[0, :]: \n\t\ta, b, r = pt[0], pt[1], pt[2] \n\n\t\t# Draw the circumference of the circle. \n\t\tcv2.circle(img3, (a, b), r, (0, 255, 0), 2) \n\n\t\t# Draw a small circle (of radius 1) to show the center. \n\t\tcv2.circle(img3, (a, b), 1, (0, 0, 255), 3) \n\n\n\n# cv2.imshow(\"Detected Circle\", np.hstack([img1,img2,img3]))\ncv2.imshow(\"Detected Circle1\", img1)\ncv2.imshow(\"Detected Circle2\", img2)\ncv2.imshow(\"Detected Circle3\", img3)\ncv2.waitKey(0) \n" ]
[ [ "numpy.zeros" ], [ "numpy.around" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yaoxingcheng/cogdl
[ "fad95b189207f78fce720383584da52b14a9850e", "fad95b189207f78fce720383584da52b14a9850e" ]
[ "examples/gcn.py", "cogdl/models/nn/gat.py" ]
[ "import torch\nfrom cogdl.tasks import build_task\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom cogdl.utils import build_args_from_dict\n\n\ndef get_default_args():\n cuda_available = torch.cuda.is_available()\n default_dict = {'hidden_size': 16,\n 'dropout': 0.5,\n 'patience': 100,\n 'max_epoch': 500,\n 'cpu': not cuda_available,\n 'lr': 0.01,\n 'weight_decay': 5e-4}\n return build_args_from_dict(default_dict)\n\n\nif __name__ == \"__main__\":\n args = get_default_args()\n args.task = 'node_classification'\n args.dataset = 'cora'\n args.model = 'pyg_gcn'\n dataset = build_dataset(args)\n args.num_features = dataset.num_features\n args.num_classes = dataset.num_classes\n args.num_layers = 2\n model = build_model(args)\n task = build_task(args, dataset=dataset, model=model)\n ret = task.train()\n", "import math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .. import BaseModel, register_model\nfrom cogdl.utils import add_remaining_self_loops\n\nclass GraphAttentionLayer(nn.Module):\n \"\"\"\n Simple GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttentionLayer, self).__init__()\n self.dropout = dropout\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_uniform_(self.W.data, gain=1.414)\n self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n\n def forward(self, input, adj):\n h = torch.mm(input, self.W)\n N = h.size()[0]\n\n a_input = torch.cat(\n [h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1\n ).view(N, -1, 2 * self.out_features)\n e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))\n\n zero_vec = -9e15 * torch.ones_like(e)\n attention = torch.where(adj > 0, e, zero_vec)\n attention = F.softmax(attention, dim=1)\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.matmul(attention, h)\n\n if self.concat:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def __repr__(self):\n return (\n self.__class__.__name__\n + \" (\"\n + str(self.in_features)\n + \" -> \"\n + str(self.out_features)\n + \")\"\n )\n\n\nclass SpecialSpmmFunction(torch.autograd.Function):\n \"\"\"Special function for only sparse region backpropataion layer.\"\"\"\n\n @staticmethod\n def forward(ctx, indices, values, shape, b):\n assert indices.requires_grad == False\n a = torch.sparse_coo_tensor(indices, values, shape)\n ctx.save_for_backward(a, b)\n ctx.N = shape[0]\n return torch.matmul(a, b)\n\n @staticmethod\n def backward(ctx, grad_output):\n a, b = ctx.saved_tensors\n grad_values = grad_b = None\n if ctx.needs_input_grad[1]:\n grad_a_dense = grad_output.matmul(b.t())\n edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]\n grad_values = grad_a_dense.view(-1)[edge_idx]\n if ctx.needs_input_grad[3]:\n grad_b = a.t().matmul(grad_output)\n return None, grad_values, None, grad_b\n\n\nclass SpecialSpmm(nn.Module):\n def forward(self, indices, values, shape, b):\n return SpecialSpmmFunction.apply(indices, values, shape, b)\n\n\nclass SpGraphAttentionLayer(nn.Module):\n \"\"\"\n Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903\n \"\"\"\n\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(SpGraphAttentionLayer, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_normal_(self.W.data, gain=1.414)\n\n self.a = nn.Parameter(torch.zeros(size=(1, 2 * out_features)))\n nn.init.xavier_normal_(self.a.data, gain=1.414)\n\n self.dropout = nn.Dropout(dropout)\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n self.special_spmm = SpecialSpmm()\n\n def forward(self, input, edge):\n N = input.size()[0]\n\n h = torch.mm(input, self.W)\n # h: N x out\n assert not torch.isnan(h).any()\n\n # Self-attention on the nodes - Shared attention mechanism\n edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()\n # edge: 2*D x E\n\n edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))\n assert not torch.isnan(edge_e).any()\n # edge_e: E\n\n e_rowsum = self.special_spmm(\n edge, edge_e, torch.Size([N, N]), torch.ones(size=(N, 1)).to(input.device)\n )\n # e_rowsum: N x 1\n\n edge_e = self.dropout(edge_e)\n # edge_e: E\n\n h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)\n assert not torch.isnan(h_prime).any()\n # h_prime: N x out\n\n h_prime = h_prime.div(e_rowsum + 1e-8)\n # h_prime: N x out\n assert not torch.isnan(h_prime).any()\n\n if self.concat:\n # if this layer is not last layer,\n return F.elu(h_prime)\n else:\n # if this layer is last layer,\n return h_prime\n\n def __repr__(self):\n return (\n self.__class__.__name__\n + \" (\"\n + str(self.in_features)\n + \" -> \"\n + str(self.out_features)\n + \")\"\n )\n\n\nclass PetarVGAT(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument(\"--num-features\", type=int)\n parser.add_argument(\"--num-classes\", type=int)\n parser.add_argument(\"--hidden-size\", type=int, default=8)\n parser.add_argument(\"--dropout\", type=float, default=0.6)\n parser.add_argument(\"--alpha\", type=float, default=0.2)\n parser.add_argument(\"--nheads\", type=int, default=8)\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.num_features,\n args.hidden_size,\n args.num_classes,\n args.dropout,\n args.alpha,\n args.nheads,\n )\n\n def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n \"\"\"Dense version of GAT.\"\"\"\n super(PetarVGAT, self).__init__()\n self.dropout = dropout\n\n self.attentions = [\n GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True)\n for _ in range(nheads)\n ]\n for i, attention in enumerate(self.attentions):\n self.add_module(\"attention_{}\".format(i), attention)\n\n self.out_att = GraphAttentionLayer(\n nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False\n )\n\n def forward(self, x, adj):\n x = F.dropout(x, self.dropout, training=self.training)\n x = torch.cat([att(x, adj) for att in self.attentions], dim=1)\n x = F.dropout(x, self.dropout, training=self.training)\n x = F.elu(self.out_att(x, adj))\n return F.log_softmax(x, dim=1)\n\n\n@register_model(\"gat\")\nclass PetarVSpGAT(PetarVGAT):\n r\"\"\"The GAT model from the `\"Graph Attention Networks\"\n <https://arxiv.org/abs/1710.10903>`_ paper\n\n Args:\n num_features (int) : Number of input features.\n num_classes (int) : Number of classes.\n hidden_size (int) : The dimension of node representation.\n dropout (float) : Dropout rate for model training.\n alpha (float) : Coefficient of leaky_relu.\n nheads (int) : Number of attention heads.\n \"\"\"\n\n def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):\n \"\"\"Sparse version of GAT.\"\"\"\n BaseModel.__init__(self)\n self.dropout = dropout\n\n self.attentions = [\n SpGraphAttentionLayer(\n nfeat, nhid, dropout=dropout, alpha=alpha, concat=True\n )\n for _ in range(nheads)\n ]\n for i, attention in enumerate(self.attentions):\n self.add_module(\"attention_{}\".format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(\n nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False\n )\n\n def forward(self, x, edge_index):\n edge_index, _ = add_remaining_self_loops(edge_index)\n x = F.dropout(x, self.dropout, training=self.training)\n x = torch.cat([att(x, edge_index) for att in self.attentions], dim=1)\n x = F.dropout(x, self.dropout, training=self.training)\n x = F.elu(self.out_att(x, edge_index))\n return F.log_softmax(x, dim=1)\n \n def loss(self, data):\n return F.nll_loss(\n self.forward(data.x, data.edge_index)[data.train_mask],\n data.y[data.train_mask],\n )\n \n def predict(self, data):\n return self.forward(data.x, data.edge_index)\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.nn.functional.softmax", "torch.mm", "torch.nn.Dropout", "torch.Size", "torch.nn.functional.log_softmax", "torch.nn.functional.dropout", "torch.zeros", "torch.cat", "torch.isnan", "torch.ones", "torch.nn.init.xavier_normal_", "torch.sparse_coo_tensor", "torch.matmul", "torch.nn.LeakyReLU", "torch.where", "torch.nn.init.xavier_uniform_", "torch.nn.functional.elu", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jammerware/pandashape
[ "37e45e830cf71b28486aa4949f1ba7106ae6ed56" ]
[ "src/pandashape/internal/TransformerExecutor.py" ]
[ "import pandas as pd\nfrom pandas import DataFrame\nfrom ..transformers import Transformer\nfrom ..internal import ColumnResolver, listify\n\n\nclass TransformerExecutor:\n def validate(self, df, columnDefinitions):\n for columnDef in columnDefinitions:\n for transformer in listify(columnDef['transformers']):\n assert(isinstance(transformer, Transformer))\n\n def transform(self, df, transformations):\n # convert the transformations to an array (so people can pass\n # either an array of definitions or just one)\n transformations = listify(transformations)\n\n # validate the call\n self.validate(df, transformations)\n\n # loop and execute the transformations\n col_resolver = ColumnResolver()\n # This df holds each column that undergoes a transformation during this call.\n # At the end, we'll append its columns to the original dataframe.\n df_transformed = DataFrame()\n\n for transformation in transformations:\n # resolve column names (could be Columns.All, a single column name, or an array of them)\n transform_column_names = col_resolver.resolve(transformation['columns'], df)\n\n for transform_column_name in transform_column_names:\n df_transformed[f\"{transform_column_name}_transformed\"] = self.__transformColumn(\n df[transform_column_name],\n listify(transformation['transformers'])\n )\n\n # after we're done transforming, append all transformed columns to the original df\n return pd.concat([df, df_transformed], axis=1)\n\n def __transformColumn(self, column, transformers):\n for transformer in transformers:\n column = transformer.transform(column)\n\n return column\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Beerend/TReNDS
[ "3cbd954881fe0b992e93e1b65d54cf3c9f7f05e3" ]
[ "notebooks/models/resnet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\n\n__all__ = [\n 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet200'\n]\n\ndef conv3x3x3(in_planes, out_planes, stride=1, dilation=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n dilation=dilation,\n stride=stride,\n padding=dilation,\n bias=False)\n\ndef downsample_basic_block(x, planes, stride, no_cuda=False):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.Tensor(out.size(0), planes - out.size(1), out.size(2), out.size(3), out.size(4)).zero_()\n if not no_cuda:\n if isinstance(out.data, torch.cuda.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = Variable(torch.cat([out.data, zero_pads], dim=1))\n return out\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3x3(inplanes, planes, stride=stride, dilation=dilation)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3x3(planes, planes, dilation=dilation)\n self.bn2 = nn.BatchNorm3d(planes)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n self.conv2 = nn.Conv3d(\n planes, planes, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\nclass ResNet3D(nn.Module):\n\n def __init__(self,\n block,\n layers,\n shortcut_type='B',\n num_class = 5,\n no_cuda=False):\n\n self.inplanes = 64\n self.no_cuda = no_cuda\n super(ResNet3D, self).__init__()\n\n # 3D conv net\n self.conv1 = nn.Conv3d(53, 64, kernel_size=7, stride=(2, 2, 2), padding=(3, 3, 3), bias=False)\n# self.conv1 = nn.Conv3d(1, 64, kernel_size=7, stride=(2, 2, 2), padding=(3, 3, 3), bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)\n self.layer2 = self._make_layer(\n block, 64*2, layers[1], shortcut_type, stride=2)\n self.layer3 = self._make_layer(\n block, 128*2, layers[2], shortcut_type, stride=2) # stride=1, dilation=2\n self.layer4 = self._make_layer(\n block, 256*2, layers[3], shortcut_type, stride=2) # stride=1, dilation=4\n\n self.fea_dim = 256*2 * block.expansion\n self.fc = nn.Sequential(nn.Linear(self.fea_dim, num_class, bias=True))\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, dilation=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n\n if shortcut_type == 'A':\n downsample = partial(\n downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride,\n no_cuda=self.no_cuda)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1( x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = F.adaptive_avg_pool3d(x, (1, 1, 1))\n emb_3d = x.view((-1, self.fea_dim))\n out = self.fc(emb_3d)\n return out\n\n\ndef resnet10(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet3D(BasicBlock, [1, 1, 1, 1],**kwargs)\n return model\n\ndef resnet3d_10(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet3D(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet3D(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = ResNet3D(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet3D(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\ndef resnet152(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet3D(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\ndef resnet200(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet3D(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model\n\n\"\"\"\nSizes of tensors during ResNet-10 processing:\nInput [1, 53, 52, 63, 53]\nConv1 [1, 64, 26, 32, 27]\nMaxpool [1, 64, 13, 16, 14]\n\n-- BB r [1, 64, 13, 16, 14]\n-- BB x [1, 64, 13, 16, 14]\nLayer 1 [1, 64, 13, 16, 14]\n\n-- BB r [1, 128, 7, 8, 7] (downsampled)\n-- BB x [1, 128, 7, 8, 7]\nLayer 2 [1, 128, 7, 8, 7]\n\n-- BB r [1, 128, 7, 8, 7] (downsampled)\n-- BB x [1, 128, 7, 8, 7]\nLayer 3 [1, 256, 7, 8, 7]\n\n-- BB r [1, 256, 7, 8, 7] (downsampled)\n-- BB x [1, 256, 7, 8, 7]\nLayer 4 [1, 512, 7, 8, 7]\n\n\"\"\"" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.functional.adaptive_avg_pool3d", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.Linear", "torch.nn.functional.avg_pool3d", "torch.nn.ReLU", "torch.nn.BatchNorm3d", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Data-is-life/lots-of-data
[ "932e605ca2b880c7aa9e23e3ea0861c802d00b80", "932e605ca2b880c7aa9e23e3ea0861c802d00b80", "932e605ca2b880c7aa9e23e3ea0861c802d00b80" ]
[ "src/old_files/dummies_bins_test_train_cv.py", "src/old_files/tf_model.py", "src/old_files/keras_grid_search_dtw.py" ]
[ "# Author: Mohit Gangwani\n# Date: 11/05/2018\n# Git-Hub: Data-is-Life\n\nimport pandas as pd\nimport os\nimport pickle\n# import numpy as np\nfrom random import randint\nfrom sklearn.preprocessing import PolynomialFeatures as MMXs\nfrom sklearn.model_selection import train_test_split\n\n\ndef save_pickle(something, path):\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n with open(path, 'wb') as fh:\n pickle.dump(something, fh, pickle.DEFAULT_PROTOCOL)\n\n\ndef initial_df():\n '''Input:\n file_name = name & location of the csv file\n\n Removes all the games where the result was a draw.\n\n Returns:\n df = initial dataframe\n df_len = length of dataframe'''\n\n df = pd.read_csv('../data/use_for_predictions.csv')\n for i in df.index:\n if df.loc[i, 'result'] == 0.5 and df.loc[i, 'color'] == 1.0:\n df.loc[i, 'result'] = 1.0 if df.loc[i, 'diff'] <= -30 else 0.0\n elif df.loc[i, 'result'] == 0.5 and df.loc[i, 'color'] == 0.0:\n df.loc[i, 'result'] = 1.0 if df.loc[i, 'diff'] <= 30 else 0.0\n\n df = df.loc[df['result'] != 0.5].copy()\n df.reset_index(inplace=True)\n df.drop(columns=['index'], inplace=True)\n\n return df\n\n# df = initial_df()\n\ndef bin_df_get_y(df):\n '''Input:\n df = clean dataframe\n\n This creates y array, converts negative values to positive by taking the\n absolute of the minimum and adding it to all elo differences, and rounds\n elo, opp_elo, and difference to nearest 10.\n\n Returns:\n df = cleaner dataframe\n y = results as a 1D numpy array'''\n\n y = df['result'].values\n\n df.loc[:, 'diff'] = round(df['diff'] / 10) * 10\n df.loc[:, 'elo'] = round(df['elo'] / 10) * 10\n df.loc[:, 'opp_elo'] = round(df['opp_elo'] / 10) * 10\n# a = pd.get_dummies(df.weekday, prefix='wd', drop_first=True)\n# df = pd.concat([df, a], axis=1, sort=False)\n df.drop(columns=['result', 'day', 'weekday', 'day_game_num', 'color',\n 'start_time'], inplace=True)\n\n# df.rename(columns={'start_time': 'time', 'day_game_num': 'game_num'},\n# inplace=True)\n\n return df, y\n\n\ndef partial_df(df, perc, start_row_order, cv, rando):\n '''Input:\n df = clean dataframe\n perc = split the dataframe in fraction\n\n Using all the games will not produce the best results, since the players\n playing style and level of play changes over time. Hence, this is used to\n get a part of the dataframe.\n\n eg: perc = 1/4, len(df) = 2000.\n This will give last 25% (500 rows) of the dataframe.\n\n Returns:\n df_s = smaller dataframe'''\n\n if int(perc) == 1:\n return df\n\n DFLN = int(len(df) * perc)\n x = len(df) - DFLN - 1\n LNDF = len(df)\n\n if rando.lower() == 'y':\n start_row = randint(int(x*(cv-1)/cv), x)\n end_row = (start_row + DFLN)\n\n if end_row >= (len(df) - 1):\n df_s = df[start_row:].copy()\n else:\n df_s = df[start_row: (end_row + 1)].copy()\n\n elif start_row_order >= 0:\n start_row = int(((LNDF - DFLN) / cv) * start_row_order)\n end_row = (start_row + DFLN)\n if end_row >= (len(df) - 1):\n df_s = df[start_row:].copy()\n else:\n df_s = df[start_row: (end_row + 1)].copy()\n\n else:\n df_s = df[x:].copy()\n\n df_s.reset_index(inplace=True)\n df_s.drop(columns=['index'])\n\n return df_s\n\n\ndef xy_tt(df, splt):\n '''Input:\n X = array used for prediction\n y = results\n splt = desired split for X and y\n\n If a number less than 1 is given for split, the split is considered for\n training data percentage.\n If a number greater than 1 is given for split, the split is considered for\n number of test data samples.\n\n Example:\n Total # of samples = 1,000\n\n splt=0.90\n training data = 900 samples, test data = 100 samples\n\n splt=100\n training data = 900 samples, test data = 100 samples\n\n Returns:\n X_train = array to train\n X_test = array to test\n y_train = results to train\n y_test = results to test predictions\n X = array used for prediction'''\n\n df, y = bin_df_get_y(df)\n X = df.values\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=splt, shuffle=False)\n\n X_train = X_train.astype(float)\n X_test = X_test.astype(float)\n y_train = y_train.astype(int)\n y_test = y_test.astype(int)\n\n SCaler = MMXs().fit(X_train)\n X_train = SCaler.transform(X_train)\n X_test = SCaler.transform(X_test)\n save_pickle(SCaler, '../data/scale.pickle')\n\n return X_train, X_test, y_train, y_test\n\n\n# X_train, X_test, y_train, y_test, X = xy_tt(df, .1)\n\n\n# def xy_custom(df, y, splt, cols):\n# '''Input:\n# df = cleaned dataframe\n# y = all result values in an Numpy Array\n# splt = Split size for test set in % as 0.80 or # as 200\n# cols = list of columns to create X values to predict over\n#\n# This function creates X array, X_train, X_test, y_train, and y_test.\n# If the columns are not elo difference or color, it creates dummy columns.\n#\n# Returns:\n# X = values to run predictions\n# X_train = training prediction set\n# X_test = testing prediction set\n# y_train = training result set\n# y_test = testing result set'''\n#\n# X = df.values\n#\n# if X.shape[1] <= 1:\n# X = X.reshape(-1, 1)\n#\n# X_train, X_test, y_train, y_test = xy_tt(X, y, splt)\n#\n# return X_train, X_test, y_train, y_test, X\n", "import torch\nimport torch.utils.data\nfrom torch import device\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom sklearn.preprocessing import MaxAbsScaler as RAS\nfrom col_info import all_cols\nfrom dummies_bins_test_train_cv import initial_df\nfrom dummies_bins_test_train_cv import bin_df_get_y\nfrom dummies_bins_test_train_cv import partial_df\nfrom dummies_bins_test_train_cv import xy_custom\ndevice = device(\"cuda:0\")\n\n\nclass SAE(nn.Module):\n def __init__(self, ):\n super(SAE, self).__init__()\n self.fc1 = nn.Linear(len_X_h, 160)\n self.fc2 = nn.Linear(160, 320)\n self.fcd = nn.Dropout(p=0.1)\n self.fc3 = nn.Linear(320, 80)\n self.fc4 = nn.Linear(80, 1)\n self.activation = nn.ReLU()\n\n def forward(self, x):\n x = self.activation(self.fc1(x))\n x = self.fcd(x)\n x = self.activation(self.fc2(x))\n x = self.activation(self.fc3(x))\n x = self.fc4(x)\n return x\n\n\ndf = initial_df('../data/use_for_predictions.csv')\n\ndf_s = partial_df(df, 1, 0, 10, 'n')\ndf_s, y = bin_df_get_y(df_s)\nclm = all_cols[0]\nprint(clm)\nX_train, X_test, y_train, y_test, X = xy_custom(df_s, y, 0.90, clm)\nras = RAS().fit(X_train)\nX_train = ras.transform(X_train)\nX_test = ras.transform(X_test)\nly = (len(y_train))\ny_train.resize(ly, 1)\ny_train = torch.as_tensor(y_train, dtype=torch.float)\ny_train = y_train.cuda().to(device)\nX_train = torch.FloatTensor(X_train).cuda().to(device)\nlen_X_h = len(X_train[0])\nlen_X = len(X_train)\nsae = SAE().cuda().to(device)\ncriterion = nn.BCEWithLogitsLoss()\noptimizer = optim.Adagrad(sae.parameters(), lr=1e-3, weight_decay=0.115)\nepochs = 200\ntrain_loss = 0\ns = 0.\nfor epoch in range(epochs):\n for num in range(len_X):\n input = Variable(X_train)\n target = y_train\n if torch.sum(target.data > 0) > 0:\n output = sae(input)\n target.require_grad = False\n output[target == 0] = 0\n loss = criterion(output, target)\n# mean_corrector = len_X/float(torch.sum(target.data > 0) + 1e-10)\n # optimizer.zero_grad()\n loss.backward()\n train_loss += loss.data\n s += 1.\n optimizer.step()\n print(f'epoch: {epoch+1} loss: {train_loss/s}')\n\ns = 0.\nwhile s <= 20:\n test_loss = 0\n y_test = y_test[int(ly*s/21):ly]\n ly = (len(y_test))\n y_test.resize(ly, 1)\n y_test = torch.as_tensor(y_test, dtype=torch.float)\n y_test = y_test.cuda().to(device)\n X_test = torch.FloatTensor(X_test).cuda().to(device)\n len_X = len(X_test)\n for num in range(len_X):\n input = Variable(X_test)\n target = Variable(y_test)\n if torch.sum(target.data > 0) > 0:\n output = sae(input)\n target.require_grad = False\n output[target == 0] = 0\n loss = criterion(output, target)\n# mean_corrector = len_X/float(torch.sum(target.data > 0.5) + 1e-10)\n test_loss += loss.data\n s += 1.\n print(f'test loss: {test_loss/s}')\n\nprint(torch.sum(target.data > 0))\n\nprint(loss.backward())\n# Print model's state_dict\nprint(\"Model's state_dict:\")\nfor param_tensor in sae.state_dict():\n print(param_tensor, \"\\t\", sae.state_dict()[param_tensor].size())\n\n# Print optimizer's state_dict\nprint(\"Optimizer's state_dict:\")\nfor var_name in optimizer.state_dict():\n print(var_name, \"\\t\", optimizer.state_dict()[var_name])\n\ntorch.save(sae.state_dict(), '../data/tm-bce')\n", "# Author: Mohit Gangwani\n# Date: 11/21/2018\n# Git-Hub: Data-is-Life\n\nimport pandas as pd\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nfrom pandas import get_dummies as gd\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import GridSearchCV\n\nfrom keras.wrappers.scikit_learn import KerasClassifier\nimport keras\nimport tensorflow as tf\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\nkeras.backend.device_lib.list_local_devices(\n tf.ConfigProto(log_device_placement=True))\n\n\ndef clean_df_y(df):\n '''\n Input:\n df = clean dataframe\n\n Creates bins for all differences. All bins labels are roughly calculated\n based on winning probability from original ELO equation:\n\n 1/(1+10^m), where m = (elo difference)/400\n Also, bins the start time\n Returns:\n\n df = cleaner dataframe\n y = results as a 1D numpy array\n '''\n\n dif_bn = list(range(-1000, -600, 100))\n dif_bn.extend(list(range(-600, -250, 50)))\n dif_bn.extend(list(range(-250, -200, 25)))\n dif_bn.extend(list(range(-200, -100, 10)))\n dif_bn.extend(list(range(-100, 105, 5)))\n dif_bn.extend(list(range(110, 210, 10)))\n dif_bn.extend(list(range(225, 325, 25)))\n dif_bn.extend(list(range(350, 550, 50)))\n dif_bn.extend(list(range(600, 1100, 100)))\n\n dif_lbl = list(range(8))\n dif_lbl.extend(list(range(8, 23, 2)))\n dif_lbl.extend(list(range(23, 79)))\n dif_lbl.extend(list(range(80, 93, 2)))\n dif_lbl.extend(list(range(93, 100)))\n\n df.loc[:, 'diff_bin'] = pd.cut(df['diff'], bins=dif_bn, labels=dif_lbl)\n df.loc[:, 'time_bin'] = pd.cut(df['start_time'], bins=24, labels=False)\n\n y = np.array(df['result'])\n\n df.drop(columns=['result', 'opp_elo', 'elo', 'start_time', 'diff', 'day'],\n inplace=True)\n\n return df, y\n\n\ndef xy_tt(X, y, splt):\n '''\n Input:\n X = array used for prediction\n y = results\n splt = desired split for X and y\n\n If a number less than 1 is given for split, the split is considered for\n training data percentage.\n If a number greater than 1 is given for split, the split is considered for\n number of test data samples.\n\n Example:\n Total # of samples = 1,000\n\n splt=0.90\n training data = 900 samples, test data = 100 samples\n\n splt=100\n training data = 900 samples, test data = 100 samples\n\n Returns:\n X_train = array to train\n X_test = array to test\n y_train = results to train\n y_test = results to test predictions\n X = array used for prediction\n '''\n\n if splt > 1:\n splitze = len(X) - int(splt)\n else:\n splitze = int(len(X) * splt)\n\n X_train = X[:splitze]\n y_train = y[:splitze]\n X_test = X[splitze:]\n y_test = y[splitze:]\n\n print(f'y Shape: {y.shape}')\n print(f'X Shape: {X.shape}')\n print(f'X_train Shape: {X_train.shape}')\n print(f'X_test Shape: {X_test.shape}')\n print(f'y_train Shape: {y_train.shape}')\n print(f'y_test Shape: {y_test.shape}')\n\n return X_train, X_test, y_train, y_test\n\n\ndef xy_custom(df, y, splt, cols):\n '''\n Input:\n df = cleaned dataframe\n y = all result values in an Numpy Array\n splt = Split size for test set in % as 0.80 or # as 200\n cols = list of columns to create X values to predict over\n\n This function creates X array, X_train, X_test, y_train, and y_test.\n If the columns are not elo difference or color, it creates dummy columns.\n\n Returns:\n X = values to run predictions \n X_train = training prediction set\n X_test = testing prediction set\n y_train = training result set\n y_test = testing result set\n '''\n\n df_n = df[cols].copy()\n\n if len(cols) == 1:\n X = df_n.values\n\n elif len(cols) == 2:\n df_n = gd(df_n, prefix='a', drop_first=True, columns=[cols[1]])\n X = df_n.values\n\n elif len(cols) == 3:\n df_n = gd(df_n, prefix='a', drop_first=True, columns=[cols[1]])\n df_n = gd(df_n, prefix='b', drop_first=True, columns=[cols[2]])\n X = df_n.values\n\n elif len(cols) == 4:\n df_n = gd(df_n, prefix='a', drop_first=True, columns=[cols[1]])\n df_n = gd(df_n, prefix='b', drop_first=True, columns=[cols[2]])\n df_n = gd(df_n, prefix='c', drop_first=True, columns=[cols[3]])\n X = df_n.values\n\n else:\n df_n = gd(df_n, prefix='a', drop_first=True, columns=[cols[2]])\n df_n = gd(df_n, prefix='b', drop_first=True, columns=[cols[3]])\n df_n = gd(df_n, prefix='c', drop_first=True, columns=[cols[4]])\n X = df_n.values\n\n X_train, X_test, y_train, y_test = xy_tt(X, y, splt)\n\n X_train = X_train.astype('float64')\n X_test = X_test.astype('float64')\n y_train = y_train.astype('int64')\n y_test = y_test.astype('int64')\n\n return X_train, X_test, y_train, y_test, X\n\n\ndef gc_classifier(optm, lss):\n '''\n Input:\n optm = Optimizer\n lss = Loss Function\n\n Creates Keras Sequential Model.\n input layer: 64 units, softmax activation\n hidden layer # 1: 128 units, relu activation\n hidden layer # 2: 32 units, softmax activation\n output layer: sigmoid activation\n metrics: 'accuracy'\n\n Returns:\n classifier = Created model\n '''\n\n classifier = Sequential()\n classifier.add(Dense(units=64, activation='softmax', input_dim=X.shape[1]))\n classifier.add(Dense(units=128, activation='relu'))\n classifier.add(Dense(units=32, activation='softmax'))\n classifier.add(Dense(units=1, activation='sigmoid'))\n classifier.compile(\n optimizer=optm, loss=lss, metrics=['accuracy'])\n\n return classifier\n\n\ndf = pd.read_csv('~/lots-of-data/data/use_for_predictions.csv')\ndf = df.loc[df['result'] != 0.5].copy()\ndf.reset_index(inplace=True)\ndf.drop(columns=['index'], inplace=True)\n\ndf, y = clean_df_y(df)\nX_train, X_test, y_train, y_test, X = xy_custom(\n df, y, 100, ['diff_bin', 'time_bin', 'game_time', 'weekday'])\nstd_sclr = StandardScaler()\nX_train = std_sclr.fit_transform(X_train)\nX_test = std_sclr.fit_transform(X_test)\n\nclassifier = KerasClassifier(build_fn=gc_classifier)\n\nparameters = {'batch_size': [8, 20, 44], 'nb_epoch': [64, 128, 256],\n 'optm': ['nadam', 'adagrad', 'rmsprop', 'adam'],\n 'lss': ['mae', 'mse', 'binary_crossentropy']}\n\ngrid_search = GridSearchCV(classifier, parameters, scoring='accuracy',\n cv=7, return_train_score=True, n_jobs=-2)\n\ngrid_search = grid_search.fit(X=X_train, y=y_train, verbose=1)\n" ]
[ [ "sklearn.preprocessing.PolynomialFeatures", "pandas.read_csv", "sklearn.model_selection.train_test_split" ], [ "torch.nn.Dropout", "sklearn.preprocessing.MaxAbsScaler", "torch.sum", "torch.autograd.Variable", "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "torch.FloatTensor", "torch.nn.ReLU", "torch.as_tensor" ], [ "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "tensorflow.ConfigProto", "pandas.cut", "sklearn.preprocessing.StandardScaler", "numpy.array", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
tracysaber/terngrad
[ "cd7e5f1c59e87712a208fc1351defa029a340146", "cd7e5f1c59e87712a208fc1351defa029a340146" ]
[ "slim/datasets/download_convert_and_shard_cifar10.py", "terngrad/inception/inception_distributed_train.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Downloads and converts cifar10 data to TFRecords of TF-Example protos.\n\nThis module downloads the cifar10 data, uncompresses it, reads the files\nthat make up the cifar10 data and creates two TFRecord datasets: one for train\nand one for test. Each TFRecord dataset is comprised of a set of TF-Example\nprotocol buffers, each of which contain a single image and label.\n\nThe script should take several minutes to run.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport cPickle\nimport os\nimport sys\nimport tarfile\n\nimport numpy as np\nimport math\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom datasets import dataset_utils\n\ntf.app.flags.DEFINE_integer('train_shards', 1000,\n 'Number of shards in training TFRecord files.')\nFLAGS = tf.app.flags.FLAGS\n\n# The URL where the CIFAR data can be downloaded.\n_DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n\n# The number of training files.\n_NUM_TRAIN_FILES = 5\n\n# The number of training images.\n_NUM_TRAIN_IMAGES = 50000\n\n# The height and width of each image.\n_IMAGE_SIZE = 32\n\n# The names of the classes.\n_CLASS_NAMES = [\n 'airplane',\n 'automobile',\n 'bird',\n 'cat',\n 'deer',\n 'dog',\n 'frog',\n 'horse',\n 'ship',\n 'truck',\n]\n\n\ndef _add_to_tfrecord(filenames, name, dataset_dir):\n \"\"\"Loads data from the cifar10 pickle files and writes files to a TFRecord.\n\n Args:\n filename: The filename of the cifar10 pickle file.\n name: name of dataset -- 'train' or 'test'.\n offset: An offset into the absolute number of images previously written.\n\n Returns:\n The new offset.\n \"\"\"\n assert _NUM_TRAIN_IMAGES % FLAGS.train_shards == 0\n offset = 0\n shard = 0\n images_per_shard = _NUM_TRAIN_IMAGES / FLAGS.train_shards\n\n if 'train' == name:\n record_filename = _get_output_filename(dataset_dir, name, shard, FLAGS.train_shards)\n elif 'test' == name:\n record_filename = _get_output_filename(dataset_dir, name)\n else:\n raise ValueError('Illegal dataset name')\n\n tfrecord_writer = tf.python_io.TFRecordWriter(record_filename)\n\n for filename in filenames:\n with tf.gfile.Open(filename, 'r') as f:\n data = cPickle.load(f)\n\n images = data['data']\n num_images = images.shape[0]\n\n images = images.reshape((num_images, 3, 32, 32))\n labels = data['labels']\n\n with tf.Graph().as_default():\n image_placeholder = tf.placeholder(dtype=tf.uint8)\n encoded_image = tf.image.encode_png(image_placeholder)\n\n with tf.Session('') as sess:\n\n for j in range(num_images):\n sys.stdout.write('\\r>> Reading file [%s] image %d' % (\n filename, offset + 1))\n sys.stdout.flush()\n\n if ('train' == name) and ( math.floor(offset / images_per_shard) > shard) :\n tfrecord_writer.close()\n shard = shard + 1\n record_filename = _get_output_filename(dataset_dir, name, shard, FLAGS.train_shards)\n tfrecord_writer = tf.python_io.TFRecordWriter(record_filename)\n\n image = np.squeeze(images[j]).transpose((1, 2, 0))\n label = labels[j]\n\n png_string = sess.run(encoded_image,\n feed_dict={image_placeholder: image})\n\n example = dataset_utils.image_to_tfexample(\n png_string, 'png', _IMAGE_SIZE, _IMAGE_SIZE, label, _CLASS_NAMES[label])\n tfrecord_writer.write(example.SerializeToString())\n offset = offset + 1\n\n tfrecord_writer.close()\n return offset\n\n\ndef _get_output_filename(dataset_dir, split_name, shard=0, num_shards=1):\n \"\"\"Creates the output filename.\n\n Args:\n dataset_dir: The dataset directory where the dataset is stored.\n split_name: The name of the train/test split.\n\n Returns:\n An absolute file path.\n \"\"\"\n return '%s/%s-%.5d-of-%.5d' % (dataset_dir, split_name, shard, num_shards)\n\n\ndef _download_and_uncompress_dataset(dataset_dir):\n \"\"\"Downloads cifar10 and uncompresses it locally.\n\n Args:\n dataset_dir: The directory where the temporary files are stored.\n \"\"\"\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)\n\n\ndef _clean_up_temporary_files(dataset_dir):\n \"\"\"Removes temporary files used to create the dataset.\n\n Args:\n dataset_dir: The directory where the temporary files are stored.\n \"\"\"\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.gfile.Remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'cifar-10-batches-py')\n tf.gfile.DeleteRecursively(tmp_dir)\n\n\ndef run(dataset_dir):\n \"\"\"Runs the download and conversion operation.\n\n Args:\n dataset_dir: The dataset directory where the dataset is stored.\n \"\"\"\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)\n\n # First, process the training data:\n #with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:\n filenames = []\n for i in range(_NUM_TRAIN_FILES):\n filenames.append(os.path.join(dataset_dir,\n 'cifar-10-batches-py',\n 'data_batch_%d' % (i + 1))) # 1-indexed.\n _add_to_tfrecord(filenames, 'train', dataset_dir)\n\n # Next, process the testing data:\n #with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:\n filenames = []\n filenames.append( os.path.join(dataset_dir,\n 'cifar-10-batches-py',\n 'test_batch'))\n _add_to_tfrecord(filenames, 'test', dataset_dir)\n\n # Finally, write the labels file:\n labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))\n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n\n _clean_up_temporary_files(dataset_dir)\n print('\\nFinished converting the Cifar10 dataset!')\n", "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A library to train Inception using multiple replicas with synchronous update.\n\nPlease see accompanying README.md for details and instructions.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os.path\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom inception import image_processing\nfrom inception import inception_model as inception\nfrom inception.slim import slim\nimport inception.bingrad_common as bingrad_common\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('job_name', '', 'One of \"ps\", \"worker\"')\ntf.app.flags.DEFINE_string('ps_hosts', '',\n \"\"\"Comma-separated list of hostname:port for the \"\"\"\n \"\"\"parameter server jobs. e.g. \"\"\"\n \"\"\"'machine1:2222,machine2:1111,machine2:2222'\"\"\")\ntf.app.flags.DEFINE_string('worker_hosts', '',\n \"\"\"Comma-separated list of hostname:port for the \"\"\"\n \"\"\"worker jobs. e.g. \"\"\"\n \"\"\"'machine1:2222,machine2:1111,machine2:2222'\"\"\")\n\ntf.app.flags.DEFINE_string('train_dir', '/tmp/dataset_distributed_train',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('subset', 'train', 'Either \"train\", \"validation\" or \"test\".')\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n 'Whether to log device placement.')\n\n# Task ID is used to select the chief and also to access the local_step for\n# each replica to check staleness of the gradients in SyncReplicasOptimizer.\ntf.app.flags.DEFINE_integer(\n 'task_id', 0, 'Task ID of the worker/replica running the training.')\n\n# More details can be found in the SyncReplicasOptimizer class:\n# tensorflow/python/training/sync_replicas_optimizer.py\ntf.app.flags.DEFINE_integer('num_replicas_to_aggregate', -1,\n \"\"\"Number of gradients to collect before \"\"\"\n \"\"\"updating the parameters.\"\"\")\ntf.app.flags.DEFINE_integer('save_interval_secs', 10 * 60,\n 'Save interval seconds.')\ntf.app.flags.DEFINE_integer('save_summaries_secs', 180,\n 'Save summaries interval seconds.')\n\n# **IMPORTANT**\n# Please note that this learning rate schedule is heavily dependent on the\n# hardware architecture, batch size and any changes to the model architecture\n# specification. Selecting a finely tuned learning rate schedule is an\n# empirical process that requires some experimentation. Please see README.md\n# more guidance and discussion.\n#\ntf.app.flags.DEFINE_float('initial_learning_rate', 0.01,\n 'Initial learning rate.')\ntf.app.flags.DEFINE_float('num_epochs_per_decay', 20.0,\n 'Epochs after which learning rate decays.')\ntf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.1,\n 'Learning rate decay factor.')\n\ntf.app.flags.DEFINE_string('optimizer', 'momentum',\n \"\"\"The optimizer of SGD (momentum, adam, gd, rmsprop).\"\"\")\ntf.app.flags.DEFINE_float('momentum', 0.9,\n \"\"\"The momentum value of optimizer.\"\"\")\ntf.app.flags.DEFINE_string('learning_rate_decay_type','exponential',\n 'Specifies how the learning rate is decayed. One of \"fixed\", \"exponential\",'\n ' or \"polynomial\"')\n\n# Configurations for BinGrad\ntf.app.flags.DEFINE_integer('grad_bits', 32,\n \"\"\"The number of gradient bits.\"\"\")\ntf.app.flags.DEFINE_float('clip_factor', 0.0,\n \"\"\"The factor of stddev to clip gradients.\"\"\")\ntf.app.flags.DEFINE_integer('floating_grad_epoch', 0,\n \"\"\"Performing floating gradients every # epochs. 0 means bingrad is always used.\"\"\")\ntf.app.flags.DEFINE_integer('save_tower', -1,\n \"\"\"Save the variables in a specific tower. -1 refers all towers\"\"\")\ntf.app.flags.DEFINE_bool('use_encoding', False,\n \"\"\"If use encoder-decoder to communicate. Current implementation is NOT efficient.\"\"\")\n\ntf.app.flags.DEFINE_bool('benchmark_mode', False,\n \"\"\"benchmarking mode to test the scalability.\"\"\")\n\n# Constants dictating the learning rate schedule.\nRMSPROP_DECAY = 0.9 # Decay term for RMSProp.\n#RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.\nRMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.\n\n\ndef train(target, dataset, cluster_spec):\n \"\"\"Train Inception on a dataset for a number of steps.\"\"\"\n # Number of workers and parameter servers are inferred from the workers and ps\n # hosts string.\n num_workers = len(cluster_spec.as_dict()['worker'])\n num_parameter_servers = len(cluster_spec.as_dict()['ps'])\n # If no value is given, num_replicas_to_aggregate defaults to be the number of\n # workers.\n if FLAGS.num_replicas_to_aggregate == -1:\n num_replicas_to_aggregate = num_workers\n else:\n num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate\n\n # Both should be greater than 0 in a distributed training.\n assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and '\n 'num_parameter_servers'\n ' must be > 0.')\n\n # Choose worker 0 as the chief. Note that any worker could be the chief\n # but there should be only one chief.\n is_chief = (FLAGS.task_id == 0)\n\n # Ops are assigned to worker by default.\n with tf.device('/job:worker/task:%d' % FLAGS.task_id):\n # Variables and its related init/assign ops are assigned to ps.\n with slim.scopes.arg_scope(\n [slim.variables.variable, slim.variables.global_step],\n device=slim.variables.VariableDeviceChooser(num_parameter_servers)):\n # Create a variable to count the number of train() calls. This equals the\n # number of updates applied to the variables.\n global_step = slim.variables.global_step()\n\n # Calculate the learning rate schedule.\n num_batches_per_epoch = (dataset.num_examples_per_epoch() /\n FLAGS.batch_size)\n # Decay steps need to be divided by the number of replicas to aggregate.\n decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay /\n num_replicas_to_aggregate)\n\n # Decay the learning rate exponentially based on the number of steps.\n if ('fixed' == FLAGS.learning_rate_decay_type or 'adam' == FLAGS.optimizer):\n lr = FLAGS.initial_learning_rate\n elif 'exponential' == FLAGS.learning_rate_decay_type:\n lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,\n global_step,\n decay_steps,\n FLAGS.learning_rate_decay_factor,\n staircase=True)\n elif 'polynomial' == FLAGS.learning_rate_decay_type:\n lr = tf.train.polynomial_decay(FLAGS.initial_learning_rate,\n global_step,\n FLAGS.max_steps,\n end_learning_rate=0.0,\n power=0.5)\n else:\n raise ValueError('Wrong learning_rate_decay_type!')\n\n # Add a summary to track the learning rate.\n tf.summary.scalar('learning_rate', lr)\n\n # Create an optimizer that performs gradient descent.\n if ('gd' == FLAGS.optimizer):\n opt = tf.train.GradientDescentOptimizer(lr)\n elif ('momentum' == FLAGS.optimizer):\n opt = tf.train.MomentumOptimizer(lr, FLAGS.momentum)\n elif ('adam' == FLAGS.optimizer):\n opt = tf.train.AdamOptimizer(lr)\n elif ('rmsprop' == FLAGS.optimizer):\n opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,\n momentum=FLAGS.momentum,\n epsilon=RMSPROP_EPSILON)\n else:\n raise ValueError(\"Wrong optimizer!\")\n\n images, labels = image_processing.distorted_inputs(\n dataset,\n batch_size=FLAGS.batch_size,\n num_preprocess_threads=FLAGS.num_preprocess_threads)\n\n # Number of classes in the Dataset label set plus 1.\n # Label 0 is reserved for an (unused) background class.\n if FLAGS.dataset_name == 'imagenet':\n num_classes = dataset.num_classes() + 1\n else:\n num_classes = dataset.num_classes()\n logits = inception.inference(images, num_classes, net=FLAGS.net, for_training=True, scope=None)\n # Add classification loss.\n inception.loss(logits, labels, aux_logits=('inception_v3'==FLAGS.net))\n\n # Gather all of the losses including regularization losses.\n losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)\n losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n\n total_loss = tf.add_n(losses, name='total_loss')\n\n if is_chief:\n # Compute the moving average of all individual losses and the\n # total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summmary to all individual losses and the total loss;\n # do the same for the averaged version of the losses.\n for l in losses + [total_loss]:\n loss_name = l.op.name\n # Name each loss as '(raw)' and name the moving average version of the\n # loss as the original loss name.\n tf.summary.scalar(loss_name + '_raw', l)\n tf.summary.scalar(loss_name, loss_averages.average(l))\n\n # Add dependency to compute loss_averages.\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n\n # Track the moving averages of all trainable variables.\n # Note that we maintain a 'double-average' of the BatchNormalization\n # global statistics.\n # This is not needed when the number of replicas are small but important\n # for synchronous distributed training with tens of workers/replicas.\n exp_moving_averager = tf.train.ExponentialMovingAverage(\n inception.MOVING_AVERAGE_DECAY, global_step)\n\n variables_to_average = (\n tf.trainable_variables() + tf.moving_average_variables())\n\n # Add histograms for model variables.\n for var in variables_to_average:\n tf.summary.histogram(var.op.name, var)\n\n # Create synchronous replica optimizer.\n opt = tf.train.SyncReplicasOptimizer(\n opt,\n replicas_to_aggregate=num_replicas_to_aggregate,\n total_num_replicas=num_workers,\n variable_averages=exp_moving_averager,\n variables_to_average=variables_to_average)\n\n batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)\n batchnorm_updates = batchnorm_updates + \\\n tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=None)\n if 0==len(batchnorm_updates):\n 'Batchnorm updates are missing'\n batchnorm_updates_op = tf.group(*batchnorm_updates)\n # Add dependency to compute batchnorm_updates.\n # put in the front compared with multi-gpu version\n #with tf.control_dependencies([batchnorm_updates_op]):\n # total_loss = tf.identity(total_loss)\n\n # Compute gradients with respect to the loss.\n grads = opt.compute_gradients(total_loss)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.summary.histogram(var.op.name + '/gradients', grad)\n\n apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)\n\n with tf.control_dependencies([apply_gradients_op, batchnorm_updates_op]):\n train_op = tf.identity(total_loss, name='train_op')\n\n # Get chief queue_runners and init_tokens, which is used to synchronize\n # replicas. More details can be found in SyncReplicasOptimizer.\n chief_queue_runners = [opt.get_chief_queue_runner()]\n init_tokens_op = opt.get_init_tokens_op()\n\n # Create a saver.\n saver = tf.train.Saver()\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n # Build an initialization operation to run below.\n init_op = tf.global_variables_initializer()\n\n # We run the summaries in the same thread as the training operations by\n # passing in None for summary_op to avoid a summary_thread being started.\n # Running summaries and training operations in parallel could run out of\n # GPU memory.\n sv = tf.train.Supervisor(is_chief=is_chief,\n logdir=FLAGS.train_dir,\n init_op=init_op,\n summary_op=None,\n global_step=global_step,\n saver=saver,\n save_model_secs=FLAGS.save_interval_secs)\n\n tf.logging.info('%s Supervisor' % datetime.now())\n\n sess_config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=FLAGS.log_device_placement)\n sess_config.gpu_options.allow_growth = True\n\n # Get a session.\n sess = sv.prepare_or_wait_for_session(target, config=sess_config)\n\n # Start the queue runners.\n queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)\n sv.start_queue_runners(sess, queue_runners)\n tf.logging.info('Started %d queues for processing input data.',\n len(queue_runners))\n\n if is_chief:\n sv.start_queue_runners(sess, chief_queue_runners)\n sess.run(init_tokens_op)\n\n # Train, checking for Nans. Concurrently run the summary operation at a\n # specified interval. Note that the summary_op and train_op never run\n # simultaneously in order to prevent running out of GPU memory.\n next_summary_time = time.time() + FLAGS.save_summaries_secs\n while not sv.should_stop():\n try:\n start_time = time.time()\n loss_value, step = sess.run([train_op, global_step])\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n if step > FLAGS.max_steps:\n break\n duration = time.time() - start_time\n\n if step % 30 == 0:\n examples_per_sec = FLAGS.batch_size / float(duration)\n format_str = ('Worker %d: %s: step %d, loss = %.2f'\n '(%.1f examples/sec; %.3f sec/batch)')\n tf.logging.info(format_str %\n (FLAGS.task_id, datetime.now(), step, loss_value,\n examples_per_sec, duration))\n\n # Determine if the summary_op should be run on the chief worker.\n if is_chief and next_summary_time < time.time():\n tf.logging.info('Running Summary operation on the chief.')\n summary_str = sess.run(summary_op)\n sv.summary_computed(sess, summary_str)\n tf.logging.info('Finished running Summary operation.')\n\n # Determine the next time for running the summary.\n next_summary_time += FLAGS.save_summaries_secs\n except:\n if is_chief:\n tf.logging.info('Chief got exception while running!')\n raise\n\n # Save after the training ends.\n if is_chief:\n saver.save(sess,\n os.path.join(FLAGS.train_dir, 'model.ckpt'),\n global_step=global_step)\n\n # Stop the supervisor. This also waits for service threads to finish.\n sv.stop()\n" ]
[ [ "tensorflow.Graph", "tensorflow.gfile.DeleteRecursively", "tensorflow.gfile.Open", "tensorflow.gfile.Exists", "numpy.squeeze", "tensorflow.app.flags.DEFINE_integer", "tensorflow.placeholder", "tensorflow.python_io.TFRecordWriter", "tensorflow.image.encode_png", "tensorflow.gfile.MakeDirs", "tensorflow.gfile.Remove", "tensorflow.Session" ], [ "tensorflow.device", "tensorflow.control_dependencies", "tensorflow.train.ExponentialMovingAverage", "tensorflow.app.flags.DEFINE_string", "tensorflow.train.AdamOptimizer", "tensorflow.moving_average_variables", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.group", "tensorflow.add_n", "tensorflow.summary.scalar", "tensorflow.get_collection", "tensorflow.app.flags.DEFINE_integer", "tensorflow.train.exponential_decay", "tensorflow.ConfigProto", "tensorflow.train.MomentumOptimizer", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.app.flags.DEFINE_bool", "tensorflow.train.RMSPropOptimizer", "numpy.isnan", "tensorflow.identity", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.summary.merge_all", "tensorflow.logging.info", "tensorflow.summary.histogram", "tensorflow.train.polynomial_decay", "tensorflow.train.SyncReplicasOptimizer", "tensorflow.train.Supervisor", "tensorflow.app.flags.DEFINE_float" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LeonidBeynenson/nncf_pytorch
[ "f8ded2752aded28d3559308c354235e5011ccbe0" ]
[ "tests/quantization/test_algo_quantization.py" ]
[ "\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom copy import deepcopy\nfrom typing import List\nfrom typing import Tuple\n\nimport pytest\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nfrom torchvision.models import resnet50\nfrom torchvision.models import squeezenet1_1\n\nfrom nncf.api.compression import CompressionScheduler\nfrom nncf.checkpoint_loading import load_state\nfrom nncf.common.quantization.structs import QuantizationMode\nfrom nncf.common.quantization.structs import QuantizerConfig\nfrom nncf.composite_compression import PTCompositeCompressionAlgorithmBuilder\nfrom nncf.compression_method_api import PTCompressionLoss\nfrom nncf.dynamic_graph.context import Scope\nfrom nncf.dynamic_graph.context import ScopeElement\nfrom nncf.hw_config import HWConfigType\nfrom nncf.layers import NNCFConv2d\nfrom nncf.module_operations import UpdateInputs\nfrom nncf.module_operations import UpdateWeight\nfrom nncf.nncf_network import ExtraCompressionModuleType\nfrom nncf.quantization.algo import QuantizationBuilder\nfrom nncf.quantization.algo import QuantizationController\nfrom nncf.quantization.layers import BaseQuantizer\nfrom nncf.quantization.layers import PTQuantizerSpec\nfrom nncf.quantization.layers import QUANTIZATION_MODULES\nfrom nncf.quantization.layers import SymmetricQuantizer\nfrom nncf.quantization.quantizer_id import NonWeightQuantizerId\nfrom nncf.quantization.quantizer_id import WeightQuantizerId\nfrom nncf.utils import get_all_modules_by_type\nfrom tests.helpers import BasicConvTestModel\nfrom tests.helpers import TwoConvTestModel\nfrom tests.helpers import create_compressed_model_and_algo_for_test\nfrom tests.helpers import get_empty_config\nfrom tests.quantization.test_quantization_helpers import get_quantization_config_without_range_init\nfrom tests.quantization.test_quantization_helpers import get_squeezenet_quantization_config\n\n\ndef compare_qspecs(qspec: PTQuantizerSpec, quantizer: BaseQuantizer):\n assert qspec.narrow_range == quantizer.narrow_range\n assert qspec.num_bits == quantizer.num_bits\n assert isinstance(quantizer, QUANTIZATION_MODULES.get(qspec.mode))\n assert qspec.scale_shape == quantizer.scale_shape\n #pylint:disable=protected-access\n assert qspec.signedness_to_force == quantizer._signedness_to_force\n\n\ndef test_quantization_configs__with_defaults():\n model = BasicConvTestModel()\n config = get_quantization_config_without_range_init()\n _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n\n assert isinstance(compression_ctrl, QuantizationController)\n weight_quantizers = compression_ctrl.weight_quantizers\n activation_quantizer_infos = compression_ctrl.non_weight_quantizers\n\n ref_weight_qspec = PTQuantizerSpec(num_bits=8,\n mode=QuantizationMode.SYMMETRIC,\n signedness_to_force=True,\n narrow_range=True,\n half_range=False,\n scale_shape=model.wq_scale_shape_per_channel,\n logarithm_scale=False)\n for wq_info in weight_quantizers.values():\n compare_qspecs(ref_weight_qspec, wq_info.quantizer_module_ref)\n\n ref_activation_qspec = PTQuantizerSpec(num_bits=8,\n mode=QuantizationMode.SYMMETRIC,\n signedness_to_force=None,\n narrow_range=False,\n half_range=False,\n scale_shape=(1, ),\n logarithm_scale=False)\n for aq_info in activation_quantizer_infos.values():\n compare_qspecs(ref_activation_qspec, aq_info.quantizer_module_ref)\n\n\ndef test_quantization_configs__custom():\n model = BasicConvTestModel()\n\n config = get_quantization_config_without_range_init()\n config['compression'].update({\n \"weights\": {\n \"mode\": \"asymmetric\",\n \"per_channel\": True,\n \"bits\": 4\n },\n \"activations\": {\n \"mode\": \"asymmetric\",\n \"bits\": 4,\n \"signed\": True,\n },\n })\n config['target_device'] = 'TRIAL'\n _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)\n\n assert isinstance(compression_ctrl, QuantizationController)\n weight_quantizers = compression_ctrl.weight_quantizers\n activation_quantizer_infos = compression_ctrl.non_weight_quantizers\n\n ref_weight_qspec = PTQuantizerSpec(num_bits=4,\n mode=QuantizationMode.ASYMMETRIC,\n signedness_to_force=None,\n scale_shape=model.wq_scale_shape_per_channel,\n narrow_range=True,\n half_range=False,\n logarithm_scale=False)\n for wq_info in weight_quantizers.values():\n compare_qspecs(ref_weight_qspec, wq_info.quantizer_module_ref)\n\n ref_activation_qspec = PTQuantizerSpec(num_bits=4,\n mode=QuantizationMode.ASYMMETRIC,\n signedness_to_force=True,\n scale_shape=(1, ),\n narrow_range=False,\n half_range=False,\n logarithm_scale=False)\n\n for aq_info in activation_quantizer_infos.values():\n compare_qspecs(ref_activation_qspec, aq_info.quantizer_module_ref)\n\n\ndef compare_weights_activation_quantizers_pairs(actual_pairs: List[Tuple[List[WeightQuantizerId],\n NonWeightQuantizerId]],\n algo, ref_pair_names, model_name):\n def get_wq_name(name):\n return '/'.join([model_name, name])\n\n def get_aq_name(name):\n if name == '/nncf_model_input_0':\n return name + '|OUTPUT'\n return '/'.join([model_name, name]) + '|OUTPUT'\n\n all_quantizations = {str(key): quantizer for key, quantizer in algo.all_quantizations.items()}\n assert len(actual_pairs) == len(ref_pair_names)\n for (wq_ids, aq_id), (wqs_names, aq_name) in zip(actual_pairs, ref_pair_names):\n wqs = [algo.all_quantizations[wq_id] for wq_id in wq_ids]\n aq = algo.all_quantizations[aq_id]\n assert not aq.narrow_range\n assert aq == all_quantizations[get_aq_name(aq_name)]\n ref_weight_quantizers = [all_quantizations[get_wq_name(name)] for name in wqs_names]\n for weight_quantizer in wqs:\n assert weight_quantizer.narrow_range\n assert weight_quantizer in ref_weight_quantizers\n\n\ndef test_can_load_quant_algo__with_defaults():\n model = BasicConvTestModel()\n config = get_quantization_config_without_range_init()\n composite_builder = PTCompositeCompressionAlgorithmBuilder(config)\n assert len(composite_builder.child_builders) == 1\n assert isinstance(composite_builder.child_builders[0], QuantizationBuilder)\n\n quant_model, _ = create_compressed_model_and_algo_for_test(deepcopy(model), config)\n\n model_conv = get_all_modules_by_type(model, 'Conv2d')\n quant_model_conv = get_all_modules_by_type(quant_model.get_nncf_wrapped_model(), 'NNCFConv2d')\n assert len(model_conv) == len(quant_model_conv)\n\n for module_scope, _ in model_conv.items():\n quant_scope = deepcopy(module_scope) # type: Scope\n quant_scope.pop()\n quant_scope.push(ScopeElement('NNCFConv2d', 'conv'))\n assert quant_scope in quant_model_conv.keys()\n\n store = []\n for op in quant_model_conv[quant_scope].pre_ops.values():\n if isinstance(op, (UpdateInputs, UpdateWeight)) and isinstance(op.operand, SymmetricQuantizer):\n assert op.__class__.__name__ not in store\n store.append(op.__class__.__name__)\n assert UpdateWeight.__name__ in store\n\n\ndef test_can_create_quant_loss_and_scheduler():\n config = get_quantization_config_without_range_init()\n _, compression_ctrl = create_compressed_model_and_algo_for_test(BasicConvTestModel(), config)\n\n loss = compression_ctrl.loss\n assert isinstance(loss, PTCompressionLoss)\n\n scheduler = compression_ctrl.scheduler\n assert isinstance(scheduler, CompressionScheduler)\n\n\ndef get_path_to_keys(tmp_path, rank):\n return '{}_{}'.format(tmp_path, str(rank))\n\n\ndef activation_quantizers_dumping_worker(current_gpu, config, tmp_path):\n model = resnet50(pretrained=False)\n quant_model, _ = create_compressed_model_and_algo_for_test(model, config)\n path = get_path_to_keys(tmp_path, current_gpu)\n print(path)\n with open(path, 'w') as f:\n f.writelines(\"%s\\n\" % key for key in quant_model.activation_quantizers.keys())\n\n\ndef test_activation_quantizers_order_is_the_same__for_resnet50(tmp_path, runs_subprocess_in_precommit):\n if not torch.cuda.is_available():\n pytest.skip(\"Skipping CUDA test cases for CPU only setups\")\n config = get_empty_config(input_sample_sizes=[1, 3, 224, 224])\n config['compression'] = {'algorithm': 'quantization', \"initializer\": {\"range\": {\"num_init_samples\": 0}}}\n ngpus_per_node = torch.cuda.device_count()\n\n torch.multiprocessing.spawn(activation_quantizers_dumping_worker,\n nprocs=ngpus_per_node,\n args=(config, tmp_path),\n join=True)\n\n with open(get_path_to_keys(tmp_path, 0), 'r') as f:\n ref_list = f.readlines()\n for i in range(1, ngpus_per_node):\n with open(get_path_to_keys(tmp_path, i), 'r') as f:\n curr_list = f.readlines()\n assert curr_list == ref_list\n\n\ndef test_load_state_sets_initialized_flag():\n config = get_quantization_config_without_range_init()\n\n model = TwoConvTestModel()\n quant_model, _ = create_compressed_model_and_algo_for_test(model, config)\n\n load_state(quant_model, {\n 'module.features.0.0.pre_ops.0.op.signed_tensor': torch.tensor([1.0]), # quantizer of 1st conv's weights\n 'module.features.1.0.pre_ops.0.op.scale': torch.ones(1, 1, 1, 1) # quantizer of 2nd conv's weights\n })\n\n quantizers = get_all_modules_by_type(quant_model, 'SymmetricQuantizer')\n for scope, module in quantizers.items():\n if 'activation_quantizers' in str(scope) or 'UpdateInputs' in str(scope):\n assert not module.initialized\n else:\n assert module.initialized\n\n\ndef test_quantizers_have_proper_narrow_range_set():\n class Model(nn.Module):\n def __init__(self, size=1):\n super().__init__()\n self.size = size\n self.conv = nn.Conv2d(size, size, size)\n\n def forward(self, x):\n return self.conv(x)\n\n model = Model()\n config = get_quantization_config_without_range_init(model_size=2)\n quant_model, _ = create_compressed_model_and_algo_for_test(model, config)\n\n for module in quant_model.modules():\n if isinstance(module, NNCFConv2d):\n for op in module.pre_ops.values():\n assert isinstance(op, (UpdateWeight, UpdateInputs))\n assert op.operand.narrow_range == isinstance(op, UpdateWeight)\n for _, aq in quant_model.get_compression_modules_by_type(ExtraCompressionModuleType.ACTIVATION_QUANTIZER).items():\n assert aq.narrow_range is False\n\n\[email protected](name=\"hw_config_type\", params=HWConfigType)\ndef hw_config_type_(request):\n return request.param\n\n\ndef test_hw_config_quantization_can_quantize_squeezenet(hw_config_type):\n config = get_squeezenet_quantization_config()\n config[\"hw_config\"] = hw_config_type.value\n model = squeezenet1_1()\n create_compressed_model_and_algo_for_test(model, config)\n\n\nclass QuantizeInputsTestModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3)\n self.conv2 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3)\n self.conv3 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3)\n self.conv4 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3)\n self.conv5 = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=1)\n self.conv6 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=2)\n self.linear = nn.Linear(in_features=8, out_features=8)\n\n # (1) (2) (3) (4) (5)\n # | | | | |-----\\\n # (conv1) (MP) (MP) (MP) (MP) |\n # | | | | | |\n # | | (+) | | |\n # | |--\\ | | | |\n # | | \\ | | | |\n # | (conv2) | (conv3) | | |\n # | | | | \\ / |\n # | (AvP) \\ | (cat) |\n # | | \\ | | |\n # (conv4) (linear) \\ | (conv6) |\n # | | (cat) | |\n # | | | (+)------/\n # | | (conv5) |\n # (AvP) | | |\n # | | (AvP) |\n # \\ | / |\n # \\---(cat)---------------/\n\n def forward(self, input_1, input_2, input_3, input_4, input_5):\n x_1 = self.conv1(input_1)\n x_1 = self.conv4(x_1)\n x_1 = F.adaptive_avg_pool2d(x_1, output_size=1)\n x_1 = x_1.flatten(start_dim=1)\n\n x_2_br = F.max_pool2d(input_2, kernel_size=2)\n x_2 = self.conv2(x_2_br)\n x_2 = F.adaptive_avg_pool2d(x_2, output_size=1)\n x_2 = x_2.flatten(start_dim=1)\n x_2 = self.linear(x_2)\n\n x_3 = F.max_pool2d(input_3, kernel_size=2)\n x_3 = x_3 + torch.ones_like(x_3)\n x_3 = self.conv3(x_3)\n x_3 = x_3.flatten(start_dim=1)\n x_2_br = x_2_br.flatten(start_dim=1)\n x_3 = torch.cat([x_2_br, x_3], dim=-1)\n x_3 = self.conv5(x_3.unsqueeze(2).unsqueeze(3).transpose(1, 2))\n x_3 = F.adaptive_avg_pool2d(x_3, output_size=1)\n x_3 = x_3.flatten(start_dim=1)\n\n x_4 = F.max_pool2d(input_4, kernel_size=2)\n x_5 = F.max_pool2d(input_5, kernel_size=2)\n x_45 = torch.cat([x_4, x_5], dim=1)\n x_45 = self.conv6(x_45)\n x_45 = x_45.flatten(start_dim=1)\n in_5_flat = input_5.flatten(start_dim=1)\n x_45 += F.pad(input_5.flatten(start_dim=1), [0, x_45.shape[1] - in_5_flat.shape[1]])\n\n return torch.cat([x_1, x_2, x_3, x_45], dim=-1)\n\n\ndef test_quantize_inputs():\n model = QuantizeInputsTestModel()\n config = get_quantization_config_without_range_init()\n config[\"input_info\"] = [\n {\n \"sample_size\": [2, 3, 32, 32],\n },\n {\n \"sample_size\": [2, 3, 32, 32],\n },\n {\n \"sample_size\": [2, 3, 32, 32],\n },\n {\n \"sample_size\": [2, 3, 32, 32],\n },\n {\n \"sample_size\": [2, 3, 32, 32],\n }\n ]\n\n model, _ = create_compressed_model_and_algo_for_test(model, config)\n REF_QUANTIZED_INPUT_MODULE_SCOPES = [\n '/nncf_model_input_0|OUTPUT',\n '/nncf_model_input_1|OUTPUT',\n '/nncf_model_input_2|OUTPUT',\n '/nncf_model_input_3|OUTPUT',\n '/nncf_model_input_4|OUTPUT'\n ]\n actual_input_quantizer_str_scopes = \\\n [str_scope for str_scope in model.activation_quantizers if 'nncf_model_input' in str_scope]\n assert len(REF_QUANTIZED_INPUT_MODULE_SCOPES) == len(actual_input_quantizer_str_scopes)\n for ref_qinput_scope_str in REF_QUANTIZED_INPUT_MODULE_SCOPES:\n assert isinstance(model.activation_quantizers[ref_qinput_scope_str], SymmetricQuantizer)\n\n\n\[email protected](\n ('requanting_qconf', 'base_qconf', 'is_valid_requant'),\n (\n (QuantizerConfig(), QuantizerConfig(), True),\n\n (QuantizerConfig(num_bits=8), QuantizerConfig(num_bits=6), False),\n (QuantizerConfig(num_bits=6), QuantizerConfig(num_bits=8), True),\n\n # Technically placing a per-channel quantization after a per-tensor should not break\n # anything or limit the set of output values w.r.t to a single per-tensor quantizer.\n (QuantizerConfig(num_bits=6, per_channel=True), QuantizerConfig(num_bits=6, per_channel=False), True),\n (QuantizerConfig(num_bits=6, per_channel=False), QuantizerConfig(num_bits=6, per_channel=True), True),\n\n (QuantizerConfig(num_bits=5, per_channel=True), QuantizerConfig(num_bits=6, per_channel=False), True),\n (QuantizerConfig(num_bits=5, per_channel=False), QuantizerConfig(num_bits=6, per_channel=True), True),\n\n (\n QuantizerConfig(num_bits=5, mode=QuantizationMode.SYMMETRIC),\n QuantizerConfig(num_bits=5, mode=QuantizationMode.ASYMMETRIC),\n True\n ),\n (\n QuantizerConfig(num_bits=5, mode=QuantizationMode.ASYMMETRIC),\n QuantizerConfig(num_bits=5, mode=QuantizationMode.SYMMETRIC),\n False\n ),\n\n\n (QuantizerConfig(signedness_to_force=True), QuantizerConfig(), True),\n (QuantizerConfig(), QuantizerConfig(signedness_to_force=True), False),\n\n (QuantizerConfig(signedness_to_force=False), QuantizerConfig(), True),\n (QuantizerConfig(), QuantizerConfig(signedness_to_force=False), False),\n\n (QuantizerConfig(signedness_to_force=True), QuantizerConfig(signedness_to_force=False), False),\n (QuantizerConfig(signedness_to_force=False), QuantizerConfig(signedness_to_force=True), True),\n\n (\n QuantizerConfig(num_bits=4, mode=QuantizationMode.SYMMETRIC, per_channel=False),\n QuantizerConfig(num_bits=8, mode=QuantizationMode.SYMMETRIC, per_channel=True),\n True\n ),\n\n (\n QuantizerConfig(num_bits=4, mode=QuantizationMode.SYMMETRIC, per_channel=False),\n QuantizerConfig(num_bits=8, mode=QuantizationMode.ASYMMETRIC, per_channel=False),\n True\n ),\n\n # Neither of the two configs here can requantize the other\n (\n QuantizerConfig(num_bits=6, mode=QuantizationMode.ASYMMETRIC),\n QuantizerConfig(num_bits=8, mode=QuantizationMode.SYMMETRIC),\n False\n ),\n (\n QuantizerConfig(num_bits=8, mode=QuantizationMode.SYMMETRIC),\n QuantizerConfig(num_bits=6, mode=QuantizationMode.ASYMMETRIC),\n False\n )\n )\n)\ndef test_quantizer_ordering(requanting_qconf: QuantizerConfig,\n base_qconf: QuantizerConfig, is_valid_requant: bool):\n test_result = requanting_qconf.is_valid_requantization_for(base_qconf)\n assert test_result == is_valid_requant\n" ]
[ [ "torch.ones", "torch.multiprocessing.spawn", "torch.cat", "torch.nn.Conv2d", "torch.nn.functional.adaptive_avg_pool2d", "torch.tensor", "torch.nn.Linear", "torch.cuda.is_available", "torch.cuda.device_count", "torch.nn.functional.max_pool2d", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qadmus/panda
[ "fd4751c77b84a531ba2d6812d7eeb31823db93e5" ]
[ "tests/safety/test_volkswagen_pq.py" ]
[ "#!/usr/bin/env python3\nimport unittest\nimport numpy as np\nfrom panda import Panda\nfrom panda.tests.safety import libpandasafety_py\nimport panda.tests.safety.common as common\nfrom panda.tests.safety.common import make_msg, MAX_WRONG_COUNTERS\n\nMAX_RATE_UP = 4\nMAX_RATE_DOWN = 10\nMAX_STEER = 300\nMAX_RT_DELTA = 75\nRT_INTERVAL = 250000\n\nDRIVER_TORQUE_ALLOWANCE = 80\nDRIVER_TORQUE_FACTOR = 3\n\nMSG_LENKHILFE_3 = 0x0D0 # RX from EPS, for steering angle and driver steering torque\nMSG_HCA_1 = 0x0D2 # TX by OP, Heading Control Assist steering torque\nMSG_MOTOR_2 = 0x288 # RX from ECU, for CC state and brake switch state\nMSG_MOTOR_3 = 0x380 # RX from ECU, for driver throttle input\nMSG_GRA_NEU = 0x38A # TX by OP, ACC control buttons for cancel/resume\nMSG_BREMSE_3 = 0x4A0 # RX from ABS, for wheel speeds\nMSG_LDW_1 = 0x5BE # TX by OP, Lane line recognition and text alerts\n\n\ndef volkswagen_pq_checksum(msg, addr, len_msg):\n msg_bytes = msg.RDLR.to_bytes(4, 'little') + msg.RDHR.to_bytes(4, 'little')\n msg_bytes = msg_bytes[1:len_msg]\n\n checksum = 0\n for i in msg_bytes:\n checksum ^= i\n return checksum\n\nclass TestVolkswagenPqSafety(common.PandaSafetyTest):\n cruise_engaged = False\n brake_pressed = False\n cnt_lenkhilfe_3 = 0\n cnt_hca_1 = 0\n\n # Transmit of GRA_Neu is allowed on bus 0 and 2 to keep compatibility with gateway and camera integration\n TX_MSGS = [[MSG_HCA_1, 0], [MSG_GRA_NEU, 0], [MSG_GRA_NEU, 2], [MSG_LDW_1, 0]]\n STANDSTILL_THRESHOLD = 1\n RELAY_MALFUNCTION_ADDR = MSG_HCA_1\n RELAY_MALFUNCTION_BUS = 0\n FWD_BLACKLISTED_ADDRS = {2: [MSG_HCA_1, MSG_LDW_1]}\n FWD_BUS_LOOKUP = {0: 2, 2: 0}\n\n def setUp(self):\n self.safety = libpandasafety_py.libpandasafety\n self.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_PQ, 0)\n self.safety.init_tests()\n\n def _set_prev_torque(self, t):\n self.safety.set_desired_torque_last(t)\n self.safety.set_rt_torque_last(t)\n\n # Wheel speeds (Bremse_3)\n def _speed_msg(self, speed):\n wheel_speed_scaled = int(speed / 0.01)\n to_send = make_msg(0, MSG_BREMSE_3)\n to_send[0].RDLR = (wheel_speed_scaled | (wheel_speed_scaled << 16)) << 1\n to_send[0].RDHR = (wheel_speed_scaled | (wheel_speed_scaled << 16)) << 1\n return to_send\n\n # Brake light switch (shared message Motor_2)\n def _brake_msg(self, brake):\n to_send = make_msg(0, MSG_MOTOR_2)\n to_send[0].RDLR = (0x1 << 16) if brake else 0\n # since this siganl's used for engagement status, preserve current state\n to_send[0].RDLR |= (self.safety.get_controls_allowed() & 0x3) << 22\n return to_send\n\n # ACC engaged status (shared message Motor_2)\n def _pcm_status_msg(self, enable):\n self.__class__.cruise_engaged = enable\n return self._motor_2_msg()\n\n # Driver steering input torque\n def _lenkhilfe_3_msg(self, torque):\n to_send = make_msg(0, MSG_LENKHILFE_3, 6)\n t = abs(torque)\n to_send[0].RDLR = ((t & 0x3FF) << 16)\n if torque < 0:\n to_send[0].RDLR |= 0x1 << 26\n to_send[0].RDLR |= (self.cnt_lenkhilfe_3 % 16) << 12\n to_send[0].RDLR |= volkswagen_pq_checksum(to_send[0], MSG_LENKHILFE_3, 8)\n self.__class__.cnt_lenkhilfe_3 += 1\n return to_send\n\n # openpilot steering output torque\n def _hca_1_msg(self, torque):\n to_send = make_msg(0, MSG_HCA_1, 5)\n t = abs(torque) << 5 # DBC scale from centi-Nm to PQ network (approximated)\n to_send[0].RDLR = (t & 0x7FFF) << 16\n if torque < 0:\n to_send[0].RDLR |= 0x1 << 31\n to_send[0].RDLR |= (self.cnt_hca_1 % 16) << 8\n to_send[0].RDLR |= volkswagen_pq_checksum(to_send[0], MSG_HCA_1, 8)\n self.__class__.cnt_hca_1 += 1\n return to_send\n\n # ACC engagement and brake light switch status\n # Called indirectly for compatibility with common.py tests\n def _motor_2_msg(self):\n to_send = make_msg(0, MSG_MOTOR_2)\n to_send[0].RDLR = (0x1 << 16) if self.__class__.brake_pressed else 0\n to_send[0].RDLR |= (self.__class__.cruise_engaged & 0x3) << 22\n return to_send\n\n # Driver throttle input (motor_3)\n def _gas_msg(self, gas):\n to_send = make_msg(0, MSG_MOTOR_3)\n to_send[0].RDLR = (gas & 0xFF) << 16\n return to_send\n\n # Cruise control buttons\n def _gra_neu_msg(self, bit):\n to_send = make_msg(2, MSG_GRA_NEU, 4)\n to_send[0].RDLR = 1 << bit\n to_send[0].RDLR |= volkswagen_pq_checksum(to_send[0], MSG_GRA_NEU, 8)\n return to_send\n\n def test_steer_safety_check(self):\n for enabled in [0, 1]:\n for t in range(-500, 500):\n self.safety.set_controls_allowed(enabled)\n self._set_prev_torque(t)\n if abs(t) > MAX_STEER or (not enabled and abs(t) > 0):\n self.assertFalse(self._tx(self._hca_1_msg(t)))\n else:\n self.assertTrue(self._tx(self._hca_1_msg(t)))\n\n def test_spam_cancel_safety_check(self):\n BIT_CANCEL = 9\n BIT_SET = 16\n BIT_RESUME = 17\n self.safety.set_controls_allowed(0)\n self.assertTrue(self._tx(self._gra_neu_msg(BIT_CANCEL)))\n self.assertFalse(self._tx(self._gra_neu_msg(BIT_RESUME)))\n self.assertFalse(self._tx(self._gra_neu_msg(BIT_SET)))\n # do not block resume if we are engaged already\n self.safety.set_controls_allowed(1)\n self.assertTrue(self._tx(self._gra_neu_msg(BIT_RESUME)))\n\n def test_non_realtime_limit_up(self):\n self.safety.set_torque_driver(0, 0)\n self.safety.set_controls_allowed(True)\n\n self._set_prev_torque(0)\n self.assertTrue(self._tx(self._hca_1_msg(MAX_RATE_UP)))\n self._set_prev_torque(0)\n self.assertTrue(self._tx(self._hca_1_msg(-MAX_RATE_UP)))\n\n self._set_prev_torque(0)\n self.assertFalse(self._tx(self._hca_1_msg(MAX_RATE_UP + 1)))\n self.safety.set_controls_allowed(True)\n self._set_prev_torque(0)\n self.assertFalse(self._tx(self._hca_1_msg(-MAX_RATE_UP - 1)))\n\n def test_non_realtime_limit_down(self):\n self.safety.set_torque_driver(0, 0)\n self.safety.set_controls_allowed(True)\n\n def test_against_torque_driver(self):\n self.safety.set_controls_allowed(True)\n\n for sign in [-1, 1]:\n for t in np.arange(0, DRIVER_TORQUE_ALLOWANCE + 1, 1):\n t *= -sign\n self.safety.set_torque_driver(t, t)\n self._set_prev_torque(MAX_STEER * sign)\n self.assertTrue(self._tx(self._hca_1_msg(MAX_STEER * sign)))\n\n self.safety.set_torque_driver(DRIVER_TORQUE_ALLOWANCE + 1, DRIVER_TORQUE_ALLOWANCE + 1)\n self.assertFalse(self._tx(self._hca_1_msg(-MAX_STEER)))\n\n # spot check some individual cases\n for sign in [-1, 1]:\n driver_torque = (DRIVER_TORQUE_ALLOWANCE + 10) * sign\n torque_desired = (MAX_STEER - 10 * DRIVER_TORQUE_FACTOR) * sign\n delta = 1 * sign\n self._set_prev_torque(torque_desired)\n self.safety.set_torque_driver(-driver_torque, -driver_torque)\n self.assertTrue(self._tx(self._hca_1_msg(torque_desired)))\n self._set_prev_torque(torque_desired + delta)\n self.safety.set_torque_driver(-driver_torque, -driver_torque)\n self.assertFalse(self._tx(self._hca_1_msg(torque_desired + delta)))\n\n self._set_prev_torque(MAX_STEER * sign)\n self.safety.set_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)\n self.assertTrue(self._tx(self._hca_1_msg((MAX_STEER - MAX_RATE_DOWN) * sign)))\n self._set_prev_torque(MAX_STEER * sign)\n self.safety.set_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)\n self.assertTrue(self._tx(self._hca_1_msg(0)))\n self._set_prev_torque(MAX_STEER * sign)\n self.safety.set_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)\n self.assertFalse(self._tx(self._hca_1_msg((MAX_STEER - MAX_RATE_DOWN + 1) * sign)))\n\n def test_realtime_limits(self):\n self.safety.set_controls_allowed(True)\n\n for sign in [-1, 1]:\n self.safety.init_tests()\n self._set_prev_torque(0)\n self.safety.set_torque_driver(0, 0)\n for t in np.arange(0, MAX_RT_DELTA, 1):\n t *= sign\n self.assertTrue(self._tx(self._hca_1_msg(t)))\n self.assertFalse(self._tx(self._hca_1_msg(sign * (MAX_RT_DELTA + 1))))\n\n self._set_prev_torque(0)\n for t in np.arange(0, MAX_RT_DELTA, 1):\n t *= sign\n self.assertTrue(self._tx(self._hca_1_msg(t)))\n\n # Increase timer to update rt_torque_last\n self.safety.set_timer(RT_INTERVAL + 1)\n self.assertTrue(self._tx(self._hca_1_msg(sign * (MAX_RT_DELTA - 1))))\n self.assertTrue(self._tx(self._hca_1_msg(sign * (MAX_RT_DELTA + 1))))\n\n def test_torque_measurements(self):\n self._rx(self._lenkhilfe_3_msg(50))\n self._rx(self._lenkhilfe_3_msg(-50))\n self._rx(self._lenkhilfe_3_msg(0))\n self._rx(self._lenkhilfe_3_msg(0))\n self._rx(self._lenkhilfe_3_msg(0))\n self._rx(self._lenkhilfe_3_msg(0))\n\n self.assertEqual(-50, self.safety.get_torque_driver_min())\n self.assertEqual(50, self.safety.get_torque_driver_max())\n\n self._rx(self._lenkhilfe_3_msg(0))\n self.assertEqual(0, self.safety.get_torque_driver_max())\n self.assertEqual(-50, self.safety.get_torque_driver_min())\n\n self._rx(self._lenkhilfe_3_msg(0))\n self.assertEqual(0, self.safety.get_torque_driver_max())\n self.assertEqual(0, self.safety.get_torque_driver_min())\n\n def test_rx_hook(self):\n # checksum checks\n # TODO: Would be ideal to check non-checksum non-counter messages as well,\n # but I'm not sure if we can easily validate Panda's simple temporal\n # reception-rate check here.\n for msg in [MSG_LENKHILFE_3]:\n self.safety.set_controls_allowed(1)\n if msg == MSG_LENKHILFE_3:\n to_push = self._lenkhilfe_3_msg(0)\n self.assertTrue(self._rx(to_push))\n to_push[0].RDHR ^= 0xFF\n self.assertFalse(self._rx(to_push))\n self.assertFalse(self.safety.get_controls_allowed())\n\n # counter\n # reset wrong_counters to zero by sending valid messages\n for i in range(MAX_WRONG_COUNTERS + 1):\n self.__class__.cnt_lenkhilfe_3 += 1\n if i < MAX_WRONG_COUNTERS:\n self.safety.set_controls_allowed(1)\n self._rx(self._lenkhilfe_3_msg(0))\n else:\n self.assertFalse(self._rx(self._lenkhilfe_3_msg(0)))\n self.assertFalse(self.safety.get_controls_allowed())\n\n # restore counters for future tests with a couple of good messages\n for i in range(2):\n self.safety.set_controls_allowed(1)\n self._rx(self._lenkhilfe_3_msg(0))\n self.assertTrue(self.safety.get_controls_allowed())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Pandinosaurus/pyLiDAR-SLAM
[ "1baa21a67bd32f144f8e17583251ac777f81345e", "1baa21a67bd32f144f8e17583251ac777f81345e", "1baa21a67bd32f144f8e17583251ac777f81345e" ]
[ "slam/common/projection.py", "slam/dataset/rosbag_dataset.py", "slam/initialization.py" ]
[ "from typing import Optional, Any\n\nimport torch\nimport numpy as np\nfrom abc import ABC, abstractmethod\n\nfrom functools import lru_cache\nfrom slam.common.utils import check_tensor, assert_debug\n\n\ndef torch__spherical_projection(t_pointcloud: torch.Tensor,\n height: int,\n width: int,\n min_vertical_fov: float,\n max_vertical_fov: float,\n min_horizontal_fov: float = -180.0,\n max_horizontal_fov: float = 180.0) -> tuple:\n \"\"\"\n Computes a spherical projection of the points of a point cloud\n\n It will compute the pixel values of the points in t_pointcloud\n\n Parameters\n ----------\n t_pointcloud: torch.Tensor [B, N, 3]\n A batch of tensor to projection to spherical coordinates\n height: int\n the height of the destination image\n width: int\n the width of the destination image\n min_vertical_fov: float (angle in degrees)\n the field of view up of the image\n max_vertical_fov: float (angle in degrees)\n the field of view down of the image\n min_horizontal_fov: float (optional)\n the horizontal field of view left (of the image)\n max_horizontal_fov: float (optional)\n the horizontal field of view right (of the image)\n\n Returns\n -------\n (t_row, t_col) : pair of torch.Tensor of size [B, N] (torch.float32)\n t_row : the pixels' rows as a float for each point in the point cloud\n t_col : the pixels' cols as a float for each point in the point cloud\n \"\"\"\n check_tensor(t_pointcloud, [-1, -1, 3])\n fov_up = min_vertical_fov / 180.0 * np.pi\n fov_down = max_vertical_fov / 180.0 * np.pi\n fov = abs(fov_down) + abs(fov_up)\n\n # get depth of all points\n r = torch.norm(t_pointcloud, p=2, dim=2)\n\n # Define a mask of validity to avoid nan\n mask_0 = (r == 0.0).to(dtype=t_pointcloud.dtype)\n mask_valid = 1.0 - mask_0\n r = mask_0 * 0.001 + mask_valid * r\n\n x = t_pointcloud[:, :, 0]\n y = t_pointcloud[:, :, 1]\n z = t_pointcloud[:, :, 2]\n\n # compute angles\n theta = - torch.atan2(y, x)\n phi = torch.asin(z / r)\n\n proj_col = 0.5 * (theta / np.pi + 1.0)\n proj_row = 1.0 - (phi + abs(fov_down)) / fov\n\n proj_col *= width\n proj_row *= height\n\n return proj_row * mask_valid - mask_0, proj_col * mask_valid - mask_0, r * mask_valid\n\n\ndef xyz_conversion(t_point_cloud: (torch.Tensor, np.ndarray)) -> torch.Tensor:\n \"\"\"\n Extracts the xyz fields of a point cloud\n\n Parameters\n ----------\n t_point_cloud : A [B, N, C >= 3] or a [N, C >= 3] array\n Extracts the first three channels of a tensor\n\n >>> assert (xyz_conversion(np.array([[1.0, 2.0, 3.0, 4.0]])) - np.array([[1.0, 2.0, 3.0]]) == 0.0).all()\n \"\"\"\n if len(list(t_point_cloud.shape)) == 2:\n n, c = t_point_cloud.shape\n assert_debug(c >= 3)\n return t_point_cloud[:, :3]\n else:\n check_tensor(t_point_cloud, [-1, -1, -1])\n b, n, c = t_point_cloud.shape\n assert_debug(c >= 3)\n return t_point_cloud[:, :, :3]\n\n\ndef depth_conversion(t_point_cloud: (torch.Tensor, np.ndarray)) -> (torch.Tensor, np.ndarray):\n \"\"\"\n Extracts the depth from a point cloud\n\n Parameters\n ----------\n t_point_cloud : (torch.Tensor, np.ndarray) [B, N, 3] of [N, 3]\n A Point cloud which can be either a tensor or a numpy ndarray\n\n Returns\n -------\n (torch.Tensor, np.ndarray) : [B, N, 1]\n A Tensor of the same type as the input tensor\n >>> check_tensor(depth_conversion(torch.randn(4, 10, 3)), [4, 10, 1])\n >>> check_tensor(depth_conversion(np.random.randn(4, 10, 3)), [4, 10, 1])\n >>> check_tensor(depth_conversion(np.random.randn(40, 3)), [40, 1])\n \"\"\"\n if len(t_point_cloud.shape) == 2:\n assert_debug(isinstance(t_point_cloud, np.ndarray) and t_point_cloud.shape[1] >= 3)\n\n return np.linalg.norm(t_point_cloud, ord=2, axis=1, keepdims=True)\n\n else:\n check_tensor(t_point_cloud, [-1, -1, -1])\n if isinstance(t_point_cloud, np.ndarray):\n return np.linalg.norm(t_point_cloud[:, :, :3], ord=2, axis=2, keepdims=True)\n else:\n return torch.norm(t_point_cloud[:, :, :3], p=2, dim=2, keepdim=True)\n\n\ndef build_spherical_image(t_point_cloud: torch.Tensor,\n destination: torch.Tensor,\n min_vertical_fov: float,\n max_vertical_fov: float,\n min_horizontal_fov: float = -180.0,\n max_horizontal_fov: float = 180.0,\n conversion_function: callable = lambda x: x):\n \"\"\"\n Builds a Spherical Image from a Point Cloud in place\n\n Parameters\n ----------\n t_point_cloud: torch.Tensor [B, N, C >= 3]\n The first 3 channels corresponding to the coordinates X, Y, Z\n destination: torch.Tensor [B, C_dest, H, W]\n In which the image will be projected. The projection is done in place\n min_vertical_fov: float in [0.0, 180.0]\n The angle in degrees of the upward boundary of the fov\n max_vertical_fov: float in [min_vertical_fov, 180.0]\n The angle in degrees of the downward boundary of the fov\n min_horizontal_fov: float in [-180.0, 180.0]\n The angle in degrees of the leftward boundary of the fov\n max_horizontal_fov: float in [min_horizontal_fov, 180.0]\n The angle in degrees of the rightward boundary of the fov\n conversion_function: callable\n The function to convert a point cloud [B, N, C] into\n a point cloud with the specific channels to put in the image [B, N, C_dest]\n\n \"\"\"\n check_tensor(destination, [-1, 3, -1, -1])\n check_tensor(t_point_cloud, [-1, -1, -1])\n # Extract channels to put in destination\n channels_extracted = conversion_function(t_point_cloud)\n b, n, c = t_point_cloud.shape\n assert_debug(c >= 3, \"The point cloud must have at least 3 channels\")\n\n bp, c_dest, height, width = destination.shape\n assert_debug(bp == b, \"Mismatch between the batch size of the destination and the source point cloud\")\n\n proj_row, proj_col, depth = torch__spherical_projection(t_point_cloud[:, :, :3],\n height,\n width,\n min_vertical_fov,\n max_vertical_fov,\n min_horizontal_fov,\n max_horizontal_fov)\n proj_row = torch.floor(proj_row)\n proj_row = proj_row.clamp(min=0, max=height - 1)\n\n proj_col = torch.floor(proj_col)\n proj_col = proj_col.clamp(min=0, max=width - 1)\n\n b_idx = torch.arange(b, dtype=torch.int64, device=t_point_cloud.device).view(b, 1).expand(b, n).reshape(b * n)\n order = torch.argsort(depth, dim=1).reshape(b * n)\n proj_row = proj_row[b_idx, order].to(torch.int64)\n proj_col = proj_col[b_idx, order].to(torch.int64)\n destination[b_idx, :, proj_row, proj_col] = channels_extracted[b_idx, order, :]\n\n\nclass Projector(ABC):\n \"\"\"\n A Projector is an object which can project a PointCloud in an image\n And construct a PointCloud from a Depth image\n \"\"\"\n\n def __init__(self,\n transform: callable = lambda x: x,\n height: Optional[int] = None,\n width: Optional[int] = None):\n # The transform mapping a pointcloud to a array or tensor of color values\n # Used to construct an image from the point cloud\n self.transform = transform\n\n self.height: Optional[int] = height\n self.width: Optional[int] = width\n\n @abstractmethod\n def project_pointcloud(self, pointcloud: torch.Tensor, **kwargs) -> torch.tensor:\n \"\"\"\n Projects the points of a PointCloud tensor in the image plane\n\n Parameters\n ----------\n pointcloud : torch.Tensor [B, N, 3]\n A Pointcloud tensor (X, Y, Z) to be projected in the image plane\n\n kwargs :\n Additional arguments required for the projection\n\n Returns\n -------\n torch.Tensor\n The tensor of size [B, N, 2] of pixel values (as float) in the image plane\n The first coordinate is the pixel of the row, and the second the pixel coordinate in the columns\n When considering the image as a matrix. (The values can be outside of the image plane dimension)\n\n \"\"\"\n raise NotImplementedError(\"\")\n\n def project_normalized(self, pointcloud: torch.Tensor, height=None, width=None, **kwargs) -> torch.Tensor:\n \"\"\"\n\n Parameters\n ----------\n pointcloud : torch.Tensor\n The point cloud tensor [B, N, 3] to project in the image plane\n height : int\n The optional height of the image\n Uses member height if it is None\n width :\n The optional width of the image\n Uses member width if it is None\n kwargs\n\n Returns\n -------\n torch.Tensor [B, N, 2]\n A Tensor of pixels normalized between -1, 1\n\n \"\"\"\n height = self.swap(height=height)\n width = self.swap(width=width)\n pixels = self.project_pointcloud(pointcloud, height=height, width=width, **kwargs)\n rows = pixels[:, :, 0] * 2.0 / height\n cols = pixels[:, :, 1] * 2.0 / width\n pixels: torch.Tensor = (-1.0 + torch.cat([rows.unsqueeze(2), cols.unsqueeze(2)], dim=2))\n return pixels\n\n @abstractmethod\n def rescaled_projector(self, new_height: int, new_width: int):\n \"\"\"\n Parameters\n ----------\n new_height : int\n The new height of the projector\n new_width\n The new width of the projector\n Returns\n -------\n Projector\n A similar Projector, with its dimension reset to new_height and new_width\n And its appropriate parameters reset (intrinsics)\n\n \"\"\"\n raise NotImplementedError(\"\")\n\n def rescale_intrinsics(self, new_height: int, new_width: int, **kwargs) -> Any:\n \"\"\"\n Rescales the intrinsics parameters of the projection from the arguments\n\n Parameters\n ----------\n new_height : int\n The height of the new image\n new_width : int\n The width of the new image\n kwargs\n arguments to rescale\n (Depends on the type of the projector)\n\n Returns\n -------\n Any\n The intrinsics rescaled : depends on the type of Projector\n\n \"\"\"\n raise NotImplementedError(\"\")\n\n @abstractmethod\n def set_projection_params(self, height: int = None, width: int = None, transform: callable = None, **kwargs):\n \"\"\"\n Reads projection params from the arguments and set the appropriate parameters\n All named arguments are optional, and will only be set if they are not None\n\n Parameters\n ----------\n height : int\n The height of the image created from a point cloud\n width : int\n The width of the image created from a point cloud\n transform : callable\n The transformation applied to a pointcloud to extract color channels to build\n The projection image from\n\n **kwargs : other variables\n\n \"\"\"\n if height is not None:\n self.height = height\n if width is not None:\n self.width = width\n if transform is not None:\n self.transform = transform\n\n def swap(self, **kwargs):\n for key, value in kwargs.items():\n assert_debug(hasattr(self, key))\n if value is None:\n member_value = getattr(self, key)\n assert_debug(member_value is not None)\n value = member_value\n return value\n\n def build_projection_map(self,\n pointcloud: torch.Tensor,\n default_value: float = 0.0,\n height: Optional[int] = None,\n width: Optional[int] = None,\n transform: Optional[callable] = None,\n **kwargs) -> torch.Tensor:\n \"\"\"\n Builds a projection image from a PointCloud (torch.Tensor)\n\n Parameters\n ----------\n pointcloud : torch.Tensor\n A [B, N, C>=3] torch.Tensor with the first 3 channels the cartesian coordinates X, Y, Z\n default_value : float\n The default value for the image being built\n height : int\n Optional value of the height of the image created\n (If it is None, the member height will be used)\n width : int\n Optional value of the width of the image created\n (If it is None, the member height will be used)\n transform : Optional callable\n The function called on a point cloud which maps the input pointcloud\n to the channels desired in the image created.\n Transforms a [B, N, C] pointcloud to a [B, N, C_dest] point cloud\n kwargs\n\n Returns\n -------\n torch.Tensor : [B, C_dest, height, width]\n An image of size (height, width)\n (Either the height and width of the parameters or the member height and width)\n\n \"\"\"\n height = self.swap(height=height)\n width = self.swap(width=width)\n transform = self.swap(transform=transform)\n\n check_tensor(pointcloud, [-1, -1, -1])\n b, n, _ = pointcloud.shape\n image_channels = pointcloud\n if transform is not None:\n image_channels = transform(image_channels)\n c_dest = image_channels.shape[2]\n\n # Build destination tensor\n if default_value == 0.:\n destination_image = torch.zeros(pointcloud.size(0),\n c_dest,\n height,\n width,\n device=pointcloud.device,\n dtype=pointcloud.dtype)\n else:\n destination_image = torch.ones(pointcloud.size(0),\n c_dest,\n height,\n width,\n device=pointcloud.device,\n dtype=pointcloud.dtype) * default_value\n\n pixels = self.project_pointcloud(pointcloud, height=height, width=width, **kwargs)\n r = pointcloud.norm(dim=2)\n pixel_rows = pixels[:, :, 0].round()\n pixel_cols = pixels[:, :, 1].round()\n\n invalidity_mask = ~((pixel_rows[:] >= 0.0) * \\\n (pixel_rows[:] <= (height - 1)) * \\\n (pixel_cols[:] >= 0.0) * \\\n (pixel_cols[:] <= (width - 1)))\n\n b_idx = torch.arange(b, dtype=torch.int64, device=pointcloud.device).view(b, 1).expand(b, n)\n r[invalidity_mask] = -1.0\n order = torch.argsort(r, dim=1, descending=True)\n order = order.reshape(b, n)\n b_idx = b_idx.reshape(b, n)\n\n mask = r[b_idx, order] > 0.0\n\n order = order[mask]\n b_idx = b_idx[mask]\n proj_row = pixel_rows[b_idx, order].to(torch.int64)\n proj_col = pixel_cols[b_idx, order].to(torch.int64)\n destination_image[b_idx, :, proj_row, proj_col] = image_channels[b_idx, order, :]\n\n # TODO DEAL WITH [0, 0] coordinates clamping problem\n return destination_image\n\n\n@lru_cache(maxsize=10)\ndef torch_ones(b: int, n: int, dtype: torch.dtype, device: torch.device):\n return torch.ones(b, n, 1, dtype=dtype, device=device)\n\n\nclass SphericalProjector(Projector):\n \"\"\"\n A SphericalProjector projects a pointcloud in a spherical image\n\n Parameters\n ----------\n up_fov : float\n The field of view upward in degrees [-90, 90]\n down_fov : float\n The field of view downward in degrees [-90, up_vertical_fov]\n\n \"\"\"\n\n def __init__(self,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_channels: Optional[int] = None,\n up_fov: Optional[float] = None,\n down_fov: Optional[float] = None,\n conversion: Optional[callable] = xyz_conversion):\n super().__init__(transform=conversion, height=height, width=width)\n self.num_channels = num_channels\n self.up_fov = up_fov\n self.down_fov = down_fov\n self.conversion = conversion\n\n def project_pointcloud(self,\n pointcloud: torch.Tensor,\n height: Optional[int] = None,\n width: Optional[int] = None,\n up_fov: Optional[float] = None,\n down_fov: Optional[float] = None, **kwargs) -> torch.tensor:\n \"\"\"\n Project the pointcloud in the Spherical image\n\n Parameters\n ----------\n pointcloud : torch.Tensor [B, N, K>=3]\n height: Optional[int]\n The height of the spherical image built\n width: Optional[int]\n The width of the spherical image built\n up_fov: Optional[float]\n down_fov: Optional[float]\n\n Returns\n -------\n pixel_tensor : torch.Tensor [B, N, 2]\n The pixel tensor of the pointcloud projected in the Spherical image plane\n First coordinates are the row values, Second are the column values\n\n \"\"\"\n check_tensor(pointcloud, [-1, -1, -1])\n height: int = self.swap(height=height)\n width = self.swap(width=width)\n up_fov = self.swap(up_fov=up_fov)\n down_fov = self.swap(down_fov=down_fov)\n t_rows, t_cols, r = torch__spherical_projection(pointcloud[:, :, :3], height, width, up_fov, down_fov)\n return torch.cat([t_rows.unsqueeze(2), t_cols.unsqueeze(2)], dim=2)\n\n def rescaled_projector(self, new_height: int, new_width: int):\n \"\"\"\n Returns a rescaled Spherical projector\n \"\"\"\n return SphericalProjector(height=new_height,\n width=new_width,\n num_channels=self.num_channels,\n up_fov=self.up_fov,\n down_fov=self.down_fov,\n conversion=self.conversion)\n\n def rescale_intrinsics(self, new_height: int, new_width: int, **kwargs) -> Any:\n \"\"\"\n The Spherical projection does not need to rescale its intrinsics parameters\n \"\"\"\n raise NotImplementedError(\"\")\n\n def set_projection_params(self, up_fov: float = None, down_fov: float = None, **kwargs):\n super().set_projection_params(**kwargs)\n if up_fov is not None:\n self.up_fov = up_fov\n if down_fov is not None:\n self.down_fov = down_fov\n", "import logging\nfrom dataclasses import MISSING\nfrom pathlib import Path\nfrom typing import Optional, Tuple\nimport os\n\nfrom torch.utils.data import IterableDataset\nimport numpy as np\n\nfrom hydra.conf import field, dataclass\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom slam.common.projection import SphericalProjector\nfrom slam.common.utils import assert_debug, remove_nan\nfrom slam.dataset import DatasetLoader, DatasetConfig\n\ntry:\n import rosbag\n import sensor_msgs.point_cloud2 as pc2\n from sensor_msgs.msg import PointCloud2, PointField\n\n _with_rosbag = True\nexcept ImportError:\n _with_rosbag = False\n\nif _with_rosbag:\n\n @dataclass\n class RosbagConfig(DatasetConfig):\n \"\"\"Config for a Rosbag Dataset\"\"\"\n dataset: str = \"rosbag\"\n file_path: str = field(\n default_factory=lambda: \"\" if not \"ROSBAG_PATH\" in os.environ else os.environ[\"ROSBAG_PATH\"])\n main_topic: str = \"numpy_pc\" # The Key of the main topic (which determines the number of frames)\n xyz_fields: str = \"xyz\"\n\n accumulate_scans: bool = False # Whether to accumulate the pointcloud messages (in case of raw sensor data)\n frame_size: int = 60 # The number of accumulated message which constitute a frame\n\n topic_mapping: dict = field(default_factory=lambda: {})\n\n lidar_height: int = 720\n lidar_width: int = 720\n up_fov: float = 45.\n down_fov: float = -45.\n\n\n class RosbagDataset(IterableDataset):\n \"\"\"A Dataset which wraps a RosBag\n\n Note:\n The dataset can only read data sequentially, and will raise an error when two calls are not consecutives\n\n Args:\n file_path (str): The path on disk to the rosbag\n main_topic (str): The name of the main topic (which sets the number of frames to be extracted)\n frame_size (int): The number of messages to accumulate in a frame\n topic_mapping (dict): The mapping topic name to key in the data_dict\n \"\"\"\n\n def _lazy_initialization(self, prefix: str = \"\"):\n if not self.initialized:\n logging.info(f\"[RosbagDataset]{prefix}Loading ROSBAG {self.file_path}. May take some time\")\n self.rosbag = rosbag.Bag(self.file_path, \"r\")\n logging.info(f\"Done.\")\n\n topic_info = self.rosbag.get_type_and_topic_info()\n for topic in self.topic_mapping:\n assert_debug(topic in topic_info.topics,\n f\"{topic} is not a topic of the rosbag \"\n f\"(existing topics : {list(topic_info.topics.keys())}\")\n self._len = self.rosbag.get_message_count(self.main_topic) // self._frame_size\n self.initialized = True\n\n def init(self):\n self._lazy_initialization()\n\n def __init__(self, config: RosbagConfig, file_path: str, main_topic: str, frame_size: int,\n topic_mapping: Optional[dict] = None):\n self.config = config\n self.rosbag = None\n self.initialized = False\n assert_debug(Path(file_path).exists(), f\"The path to {file_path} does not exist.\")\n self.file_path = file_path\n self.topic_mapping = topic_mapping if topic_mapping is not None else {}\n if main_topic not in self.topic_mapping:\n self.topic_mapping[main_topic] = \"numpy_pc\"\n self.main_topic: str = main_topic\n self.frame_size = frame_size\n self._frame_size: int = frame_size if self.config.accumulate_scans else 1\n self._len = -1 #\n self._idx = 0\n self._topics = list(topic_mapping.keys())\n self.__iter = None\n\n def __iter__(self):\n self._lazy_initialization(\"[ITER]\")\n self.__iter = self.rosbag.read_messages(self._topics)\n self._idx = 0\n return self\n\n @staticmethod\n def decode_pointcloud(msg: pc2.PointCloud2, timestamp, xyz_fieldname: str = \"xyz\") -> Tuple[\n Optional[np.ndarray], Optional[np.ndarray]]:\n assert_debug(\"PointCloud2\" in msg._type)\n pc = np.array(list(pc2.read_points(msg, field_names=xyz_fieldname)))\n timestamps = np.ones((pc.shape[0],),\n dtype=np.float64) * (float(timestamp.secs * 10e9) + timestamp.nsecs)\n return pc, timestamps\n\n def aggregate_messages(self, data_dict: dict):\n \"\"\"Aggregates the point clouds of the main topic\"\"\"\n main_key = self.topic_mapping[self.main_topic]\n pcs = data_dict[main_key]\n data_dict[main_key] = np.concatenate(pcs, axis=0)\n timestamps_topic = f\"{main_key}_timestamps\"\n if timestamps_topic in data_dict:\n data_dict[timestamps_topic] = np.concatenate(data_dict[timestamps_topic], axis=0)\n return data_dict\n\n def _save_topic(self, data_dict, key, topic, msg, t, **kwargs):\n if \"PointCloud2\" in msg._type:\n data, timestamps = self.decode_pointcloud(msg, t)\n data_dict[key].append(data)\n timestamps_key = f\"{key}_timestamps\"\n if timestamps_key not in data_dict:\n data_dict[timestamps_key] = []\n data_dict[timestamps_key].append(timestamps)\n\n def __getitem__(self, index) -> dict:\n self._lazy_initialization(\"[GETITEM]\")\n assert_debug(index == self._idx, \"A RosbagDataset does not support Random access\")\n assert isinstance(self.config, RosbagConfig)\n if self.__iter is None:\n self.__iter__()\n\n data_dict = {key: [] for key in self.topic_mapping.values()}\n main_topic_key = self.topic_mapping[self.main_topic]\n\n # Append Messages until the main topic has the required number of messages\n while len(data_dict[main_topic_key]) < self._frame_size:\n topic, msg, t = next(self.__iter)\n _key = self.topic_mapping[topic]\n self._save_topic(data_dict, _key, topic, msg, t, frame_index=index)\n\n self._idx += 1\n # Aggregate data\n data_dict = self.aggregate_messages(data_dict)\n return data_dict\n\n def __next__(self):\n return self[self._idx]\n\n def __len__(self):\n self._lazy_initialization(\"[LEN]\")\n return self._len\n\n def close(self):\n if self.initialized:\n if self.rosbag is not None:\n self.rosbag.close()\n del self.rosbag\n self.rosbag = None\n self.initialized = False\n self._len = -1\n self._idx = 0\n self.__iter = None\n\n def __del__(self):\n self.close()\n\n\n # Hydra -- stores a RosbagConfig `rosbag` in the `dataset` group\n cs = ConfigStore.instance()\n cs.store(group=\"dataset\", name=\"rosbag\", node=RosbagConfig)\n\n\n class RosbagDatasetConfiguration(DatasetLoader):\n \"\"\"Returns the configuration of a Dataset built for ROS\"\"\"\n\n def __init__(self, config: RosbagConfig, **kwargs):\n if isinstance(config, DictConfig):\n config = RosbagConfig(**config)\n super().__init__(config)\n\n @classmethod\n def max_num_workers(cls):\n return 1\n\n def projector(self) -> SphericalProjector:\n return SphericalProjector(height=self.config.lidar_height, width=self.config.lidar_width,\n up_fov=self.config.up_fov, down_fov=self.config.down_fov)\n\n def sequences(self):\n assert isinstance(self.config, RosbagConfig)\n file_path = self.config.file_path\n dataset = RosbagDataset(self.config, file_path, self.config.main_topic,\n self.config.frame_size,\n OmegaConf.to_container(self.config.topic_mapping) if isinstance(\n self.config.topic_mapping, DictConfig) else self.config.topic_mapping)\n\n return ([dataset], [Path(file_path).stem]), None, None, lambda x: x\n\n def get_ground_truth(self, sequence_name):\n \"\"\"No ground truth can be read from the ROSBAG\"\"\"\n return None\n", "from abc import ABC, abstractmethod\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Dict, Optional, Any\nimport numpy as np\n\n# Hydra and omegaconf\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import DictConfig, MISSING, OmegaConf\nfrom hydra.conf import field, dataclass\n\n# Project imports\nfrom slam.common.pose import Pose\nfrom slam.common.timer import *\nfrom slam.common.modules import _with_cv2\nfrom slam.common.utils import assert_debug, ObjectLoaderEnum\nfrom slam.training.prediction_modules import _PoseNetPredictionModule\n\nif _with_cv2:\n import cv2\n\n from slam.common.registration import ElevationImageRegistration\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n@dataclass\nclass InitializationConfig:\n \"\"\"The Initialization Config for registration based SLAM\"\"\"\n type: str = MISSING\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass Initialization(ABC):\n \"\"\"The initialization module provides for each frame a prior estimate of the relative motion\n\n Each child class adds an [4, 4] numpy ndarray of the relative pose prediction\n to the data_dict with key \"Initialization.initial_pose_key()\"\n \"\"\"\n\n def __init__(self, config: InitializationConfig, **kwargs):\n super().__init__()\n self.config = config\n\n @staticmethod\n def initial_pose_key():\n \"\"\"Returns the key where the initial pose estimate is saved\"\"\"\n return \"init_rpose\"\n\n @abstractmethod\n def init(self):\n \"\"\"Initializes the Algorithm ()\"\"\"\n raise NotImplementedError(\"\")\n\n def next_frame(self, data_dict: dict, **kwargs):\n data_dict[self.initial_pose_key()] = self.next_initial_pose(data_dict=data_dict, **kwargs)\n\n @abstractmethod\n def next_initial_pose(self, data_dict: Optional[dict] = None, **kwargs):\n \"\"\"Initializes the Algorithm ()\"\"\"\n return None\n\n @abstractmethod\n def save_real_motion(self, new_pose: np.ndarray, data_dict: dict):\n \"\"\"Saves the real new motion into the algorithm\"\"\"\n raise NotImplementedError(\"\")\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n@dataclass\nclass NIConfig(InitializationConfig):\n \"\"\"The configuration without initialization\"\"\"\n type: str = \"ni\"\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass NoInitialization(Initialization):\n \"\"\"Initialize motion with identity\"\"\"\n\n def __init__(self, config: InitializationConfig, **kwargs):\n super().__init__(config)\n\n def init(self):\n \"\"\"Sets the predicted motion as the identity systematically\"\"\"\n pass\n\n def next_initial_pose(self, data_dict: Optional[dict] = None, **kwargs):\n \"\"\"Returns the identity\"\"\"\n return None\n\n def save_real_motion(self, relative_pose: torch.Tensor, data_dict: dict):\n \"\"\"No actions required\"\"\"\n pass\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n@dataclass\nclass CVConfig(InitializationConfig):\n \"\"\"The configuration without initialization\"\"\"\n type: str = \"cv\"\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass ConstantVelocityInitialization(Initialization):\n \"\"\"A Constant Velocity model for initialization (returns the last registered relative pose at each time step)\"\"\"\n\n def __init__(self, config: CVConfig, pose: Pose, device: torch.device = torch.device(\"cpu\"), **kwargs):\n super().__init__(config)\n self.pose = pose\n self.device = device\n self.initial_estimate = None\n\n def init(self):\n self.initial_estimate = np.eye(4)\n\n def next_initial_pose(self, **kwargs):\n return self.initial_estimate\n\n def save_real_motion(self, relative_pose: np.ndarray, data_dict: dict):\n self.initial_estimate = relative_pose\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nif _with_cv2:\n\n @dataclass\n class EIConfig(InitializationConfig):\n \"\"\"Config for Elevation Image feature based 2D alignment\"\"\"\n type: str = \"ei\"\n debug: bool = False\n ni_if_failure: bool = False\n registration_config: DictConfig = field(default_factory=lambda: DictConfig({}))\n\n\n class ElevationImageInitialization(Initialization):\n \"\"\"Initialize motion by resolving a planar motion registration\"\"\"\n\n def __init__(self, ei_config: EIConfig, pose: Pose, device: torch.device = torch.device(\"cpu\"), **kwargs):\n super().__init__(ei_config)\n self.pose = pose\n self.device = device\n\n self.next_estimate = None\n\n # Local variables\n self._previous_kpts = None\n self._previous_desc = None\n self._previous_image = None\n\n self.algorithm = ElevationImageRegistration(DictConfig(ei_config.registration_config))\n self.debug = ei_config.debug\n self.ni_if_failure = ei_config.ni_if_failure\n\n if self.debug:\n self._previous_pc = None\n cv2.namedWindow(\"matches\", cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)\n\n def __del__(self):\n if hasattr(self, \"debug\") and self.debug:\n cv2.destroyWindow(\"matches\")\n\n def init(self):\n self.next_estimate = np.eye(4)\n # Local variables\n self._previous_kpts = None\n self._previous_desc = None\n self._previous_image = None\n\n def next_initial_pose(self, data_dict: Optional[dict] = None, **kwargs):\n assert_debug(data_dict is not None and \"numpy_pc_0\" in data_dict)\n next_estimate = self.next_estimate\n\n # Convert new vmap to numpy\n pc_numpy = data_dict[\"numpy_pc_0\"]\n\n # Build elevation image\n image, kpts, desc, _ = self.algorithm.compute_features(pc_numpy)\n\n # Extract KeyPoints and descriptors\n result = None\n if self._previous_image is not None and len(kpts) > 50:\n result, inliers, inliers_matches = self.algorithm.align_2d(self._previous_kpts, self._previous_desc,\n kpts, desc,\n self._previous_image, image)\n if result is not None:\n next_estimate = result\n\n if self.debug:\n matches_image = cv2.drawMatches(self._previous_image, self._previous_kpts,\n image, kpts, inliers_matches, None)\n cv2.imshow(\"matches\", matches_image)\n cv2.waitKey(5)\n\n if desc is not None:\n self._previous_kpts = kpts\n self._previous_desc = desc\n self._previous_image = image\n\n return next_estimate\n\n def save_real_motion(self, relative_pose: np.ndarray, data_dict: dict):\n if not self.ni_if_failure:\n self.next_estimate = relative_pose\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n@dataclass\nclass PNConfig(InitializationConfig):\n \"\"\"\n The Initialization config for PoseNet\n\n TODO : Refactor to avoid repetition with PoseNet for Odometry\n \"\"\"\n type: str = \"posenet\"\n\n train_dir: str = MISSING\n checkpoint_file: str = \"checkpoint.ckp\" # Default checkpoint file generated by trainer\n train_config_file: str = \"config.yaml\" # Default config file generated by trainer\n\n prediction: Dict[str, Any] = MISSING\n\n\nclass PoseNetInitialization(Initialization):\n \"\"\"Initialization using a PoseNet for LiDAR odometry\"\"\"\n\n def __init__(self, config: PNConfig, pose: Pose, device: torch.device = torch.device(\"cpu\"), **kwargs):\n super().__init__(config)\n self.device = device\n self.pose = pose\n\n # Loads the train config from the disk\n # TODO refactor\n train_dir = Path(config.train_dir)\n assert_debug(train_dir.exists())\n train_config_path = train_dir / config.train_config_file\n checkpoint_path = train_dir / config.checkpoint_file\n assert_debug(train_config_path.exists() and checkpoint_path.exists())\n self.checkpoint_path = str(checkpoint_path)\n\n # Reads the prediction config from the dict\n with open(str(train_config_path), \"r\") as stream:\n train_config = OmegaConf.load(stream)\n prediction_config: DictConfig = train_config[\"training\"][\"prediction\"]\n\n # Construct the Prediction module from the config read from disk\n self.prediction_module = _PoseNetPredictionModule(prediction_config,\n pose=self.pose)\n self.prediction_module = self.prediction_module.to(self.device)\n\n checkpoint_file = config.checkpoint_file\n self.train_dir = Path(config.train_dir)\n self.checkpoint_file = self.train_dir / checkpoint_file\n assert_debug(self.checkpoint_file.exists())\n\n # ----------------------\n # Local variable\n self.previous_vertex_map = None\n self._iter = 0\n self.relative_poses = []\n\n def init(self):\n self.relative_poses = []\n self._iter = 0\n\n # Load PoseNet params from disk\n state_dict = torch.load(str(self.checkpoint_file))\n self.prediction_module.load_state_dict(state_dict[\"prediction_module\"])\n\n def next_initial_pose(self, data_dict: Optional[dict] = None, **kwargs):\n vertex_map = data_dict[\"vertex_map\"]\n if self.previous_vertex_map is None:\n estimate = torch.eye(4, dtype=torch.float32, device=self.device).reshape(1, 4, 4)\n else:\n with torch.no_grad():\n input_ = torch.cat([self.previous_vertex_map, vertex_map], dim=0).unsqueeze(0)\n estimate_params = self.prediction_module(dict(vertex_map=input_))[\"pose_params\"]\n estimate = self.pose.build_pose_matrix(estimate_params)\n\n self.previous_vertex_map = vertex_map\n return estimate\n\n def save_real_motion(self, new_pose: torch.Tensor, data_dict: dict):\n pass\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n# Hydra Config Store : for the group odometry/initialization\ncs = ConfigStore.instance()\ncs.store(group=\"slam/initialization\", name=\"CV\", node=CVConfig)\ncs.store(group=\"slam/initialization\", name=\"PoseNet\", node=PNConfig)\ncs.store(group=\"slam/initialization\", name=\"NI\", node=NIConfig())\n\nif _with_cv2:\n cs.store(group=\"slam/initialization\", name=\"EI\", node=EIConfig)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nclass INITIALIZATION(ObjectLoaderEnum, Enum):\n \"\"\"A Convenient enum to load the Algorithm from a config dictionary\"\"\"\n\n ni = (NoInitialization, NIConfig)\n cv = (ConstantVelocityInitialization, CVConfig)\n posenet = (PoseNetInitialization, PNConfig)\n\n if _with_cv2:\n ei = (ElevationImageInitialization, EIConfig)\n\n @classmethod\n def type_name(cls):\n return \"type\"\n" ]
[ [ "torch.norm", "torch.ones", "torch.floor", "numpy.linalg.norm", "torch.asin", "torch.arange", "torch.argsort", "torch.atan2" ], [ "numpy.concatenate", "numpy.ones" ], [ "numpy.eye" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liuzhenhai/numba
[ "a41c85fdd7d6abf8ea1ebe9116939ddc2217193b", "a41c85fdd7d6abf8ea1ebe9116939ddc2217193b", "a41c85fdd7d6abf8ea1ebe9116939ddc2217193b", "a41c85fdd7d6abf8ea1ebe9116939ddc2217193b", "a41c85fdd7d6abf8ea1ebe9116939ddc2217193b" ]
[ "numba/tests/test_diffusion.py", "numba/tests/issues/test_issue_50.py", "numba/tests/test_mandelbrot_2.py", "numba/tests/issues/test_issue_164.py", "numba/tests/broken_issues/test_log1p_vectorize.py" ]
[ "import unittest\n\nimport numpy as np\nfrom numba import autojit \n\nmu = 0.1\nLx, Ly = 101, 101\n\n@autojit \ndef diffusionObstacleStep(u,tempU,iterNum):\n for n in range(iterNum):\n for i in range(1, Lx - 1):\n for j in range(1, Ly - 1):\n u[i,j] = mu * (tempU[i+1,j]-2*tempU[i,j]+tempU[i-1,j] +\n tempU[i,j+1]-2*tempU[i,j]+tempU[i,j-1])\n\n # Bug in Meta??\n # tempU, u = u, tempU\n # -> Assign(targets=[Name(id='tempU', ctx=Store()),\n # Name(id='u', ctx=Store())],\n # value=Name(id='u', ctx=Load()))\n temp = u\n u = tempU\n tempU = temp\n\ndef get_arrays():\n u = np.zeros([Lx, Ly], dtype=np.float64)\n tempU = np.zeros([Lx, Ly], dtype=np.float64)\n u[Lx / 2, Ly / 2] = 1000.0\n return tempU, u\n\ndef test_diffusion():\n tempU, u = get_arrays()\n iterNum = 10\n diffusionObstacleStep(u, tempU, iterNum)\n\n tempU_numpy, u_numpy = get_arrays()\n diffusionObstacleStep.py_func(u_numpy, tempU_numpy, iterNum)\n\n print(u)\n print(u_numpy)\n assert np.allclose(u, u_numpy)\n\nif __name__ == \"__main__\":\n test_diffusion()\n", "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\nfrom numba import jit\nfrom numpy import zeros\n\nimport unittest\n\n@jit()\ndef test():\n foo = zeros((1,))\n foo[0] = 0\n\n@jit()\ndef test2():\n foo = [0]\n foo[0] = 0\n\nclass TestIssue50(unittest.TestCase):\n def test_1d_arr_setitem(self):\n self.assertEquals(test(), None)\n\n def test_list_setitem(self):\n self.assertEqual(test2(), None)\n\nif __name__ == \"__main__\":\n unittest.main()\n", "#! /usr/bin/env python\n'''test_mandelbrot_2\n\nTest the Numba compiler on several variants of Mandelbrot set membership\ncomputations.\n'''\nfrom numba import *\nimport unittest\nimport numpy as np\nfrom numba.testing import test_support\n\n\ndef mandel_1(real_coord, imag_coord, max_iters):\n '''Given a the real and imaginary parts of a complex number,\n determine if it is a candidate for membership in the Mandelbrot\n set given a fixed number of iterations.\n Inspired by code at http://wiki.cython.org/examples/mandelbrot\n '''\n # Ideally we'd want to use a for loop, but we'll need to be able\n # to detect and desugar for loops over range/xrange/arange first.\n i = 0\n z_real = 0.\n z_imag = 0.\n while i < max_iters:\n z_real_n = z_real * z_real - z_imag * z_imag + real_coord\n z_imag = 2. * z_real * z_imag + imag_coord\n z_real = z_real_n\n if (z_real * z_real + z_imag * z_imag) >= 4:\n return i\n i += 1\n return -1\n\nmandel_1c = jit('i4(f8,f8,i4)')(mandel_1)\n\ndef mandel_driver_1(min_x, max_x, min_y, nb_iterations, colors, image):\n nb_colors = len(colors)\n width = image.shape[0]\n height = image.shape[1]\n pixel_size = (max_x - min_x) / width\n for x in range(width):\n real = min_x + x * pixel_size\n for y in range(height):\n imag = min_y + y * pixel_size\n # For the following to actually compile, mandel_1 must\n # have already been compiled.\n color = mandel_1(real, imag, nb_iterations)\n\n # Would prefer the following, just to show off:\n # image[x, y, :] = colors[color % nb_colors]\n # But that'd require Numba to handle slicing (it doesn't\n # at the time this version was writen), and it wouldn't\n # have the type information about the shape.\n\n col_index = color % nb_colors # Ohh for wont of CSE...\n image[x, y, 0] = colors[col_index, 0]\n image[x, y, 1] = colors[col_index, 1]\n image[x, y, 2] = colors[col_index, 2]\n\nmandel_driver_1c = jit('void(f8,f8,f8,i4,u1[:,:],u1[:,:,:])')(\n mandel_driver_1)\n\n\ndef make_palette():\n '''Shamefully stolen from\n http://wiki.cython.org/examples/mandelbrot, though we did correct\n their spelling mistakes (*smirk*).'''\n colors = []\n for i in range(0, 25):\n colors.append( (i*10, i*8, 50 + i*8), )\n for i in range(25, 5, -1):\n colors.append( (50 + i*8, 150+i*2, i*10), )\n for i in range(10, 2, -1):\n colors.append( (0, i*15, 48), )\n return np.array(colors, dtype=np.uint8)\n\n\ndef mandel_2(x, max_iterations):\n z = complex(0)\n for i in range(max_iterations):\n z = z**2 + x\n if abs(z) >= 2:\n return i\n return -1\n\nmandel_2c = jit(i4(c16,i4))(mandel_2)\n\ndef mandel_driver_2(min_x, max_x, min_y, nb_iterations, colors, image):\n nb_colors = len(colors)\n width = image.shape[0]\n height = image.shape[1]\n pixel_size = (max_x - min_x) / width\n dy = pixel_size * 1j\n for x in range(width):\n coord = complex(min_x + x * pixel_size, min_y)\n for y in range(height):\n color = mandel_2(coord, nb_iterations)\n image[x,y,:] = colors[color % nb_colors,:]\n coord += dy\n\nmandel_driver_2c = jit(void(f8,f8,f8,i4,u1[:,:],u1[:,:,:]))(mandel_driver_2)\n\n\ndef benchmark(dx = 500, dy = 500):\n import time\n min_x = -1.5\n max_x = 0\n min_y = -1.5\n colors = make_palette()\n nb_iterations = colors.shape[0]\n img0 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125\n start = time.time()\n mandel_driver_1(min_x, max_x, min_y, nb_iterations, colors, img0)\n dt0 = time.time() - start\n img1 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125\n start = time.time()\n mandel_driver_1c(min_x, max_x, min_y, nb_iterations, colors, img1)\n dt1 = time.time() - start\n img2 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125\n start = time.time()\n mandel_driver_2(min_x, max_x, min_y, nb_iterations, colors, img2)\n dt2 = time.time() - start\n img3 = np.zeros((dx, dy, 3), dtype=np.uint8) + 125\n start = time.time()\n mandel_driver_2c(min_x, max_x, min_y, nb_iterations, colors, img3)\n dt3 = time.time() - start\n return (dt0, dt1, dt2, dt3), (img0, img1, img2, img3)\n\n\nclass TestMandelbrot(unittest.TestCase):\n def test_mandel_1_sanity(self):\n self.assertEqual(mandel_1c(0., 0., 20), -1)\n\n def test_mandel_1(self):\n vals = np.arange(-1., 1.000001, 0.1)\n for real in vals:\n for imag in vals:\n self.assertEqual(mandel_1(real, imag, 20),\n mandel_1c(real, imag, 20))\n\n def test_mandel_driver_1(self):\n palette = make_palette()\n control_image = np.zeros((50, 50, 3), dtype = np.uint8)\n mandel_driver_1(-1., 1., -1., len(palette), palette, control_image)\n test_image = np.zeros_like(control_image)\n self.assertTrue((control_image - test_image == control_image).all())\n mandel_driver_1c(-1., 1., -1., len(palette), palette, test_image)\n image_diff = control_image - test_image\n self.assertTrue((image_diff == 0).all())\n\n def test_mandel_driver_2(self):\n palette = make_palette()\n control_image = np.zeros((50, 50, 3), dtype = np.uint8)\n mandel_driver_2(-1., 1., -1., len(palette), palette, control_image)\n test_image = np.zeros_like(control_image)\n self.assertTrue((control_image - test_image == control_image).all())\n mandel_driver_2c(-1., 1., -1., len(palette), palette, test_image)\n image_diff = control_image - test_image\n self.assertTrue((image_diff == 0).all())\n\n\nif __name__ == \"__main__\":\n test_support.main()\n", "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\nimport numpy as np\nfrom numba import jit, double\n\ndef expr_py(a, b, c):\n length = len(a)\n result = np.empty(length, dtype=np.float64)\n for i in range(length):\n result[i] = b[i] ** 2 if a[i] > 0.1 else c[i] ** 3\n return result\nexpr_nb = jit(double[:](double[:], double[:], double[:]))(expr_py)\n\nsize = 1e6\n\na = np.random.rand(size)\nb = np.random.rand(size)\nc = np.random.rand(size)\n\nassert np.allclose(expr_nb(a, b, c), expr_py(a, b, c))\n", "# -*- coding: utf-8 -*-\n# from __future__ import division, absolute_import\n\nfrom math import log1p\nfrom numba import *\nfrom numba.vectorize import vectorize\nimport numpy as np\n\n@jit(double(double))\ndef jit_log1p(x):\n return log1p(x)\n\nx = 3.4\nassert np.allclose([jit_log1p(x)], [jit_log1p.py_func(x)])\n\n@vectorize([double(double)])\ndef vec_log1p(x):\n return log1p(x)\n\nx = np.array([x])\nassert np.allclose(vec_log1p(x), [jit_log1p.py_func(x)])\n" ]
[ [ "numpy.zeros", "numpy.allclose" ], [ "numpy.zeros" ], [ "numpy.arange", "numpy.array", "numpy.zeros", "numpy.zeros_like" ], [ "numpy.random.rand", "numpy.empty" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
samuelpucek/ep-stats
[ "040ac2038f086e03c7c95596cba2ce9cca1382b6", "040ac2038f086e03c7c95596cba2ce9cca1382b6" ]
[ "src/epstats/toolkit/check.py", "src/epstats/toolkit/experiment.py" ]
[ "from typing import List\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import chisquare\n\nfrom .parser import Parser\n\n\nclass Check:\n \"\"\"\n Perform data quality check that accompanies metric evaluation in the experiment.\n\n See [Data Quality Checks](../stats/basics.md#data-quality-checks) for details about\n data quality checks and [`Evaluation`][epstats.toolkit.experiment.Evaluation] for description of output.\n \"\"\"\n\n def __init__(self, id: int, name: str, denominator: str):\n self.id = id\n self.name = name\n self.denominator = denominator\n self._parser = Parser(denominator, denominator)\n self._goals = self._parser.get_goals()\n\n def get_goals(self) -> List:\n \"\"\"\n List of all goals needed to evaluate the check in the experiment.\n\n Returns:\n list of parsed structured goals\n \"\"\"\n return self._goals\n\n def evaluate_agg(self, goals: pd.DataFrame, default_exp_variant_id: str) -> pd.DataFrame:\n \"\"\"\n Evaluate this check from pre-aggregated goals.\n\n Arguments:\n goals: one row per experiment variant\n default_exp_variant_id: default variant\n\n See [`Experiment.evaluate_agg`][epstats.toolkit.experiment.Experiment.evaluate_agg] for details\n on `goals` at input.\n\n Returns:\n `checks` dataframe with columns:\n\n `checks` dataframe with columns:\n\n 1. `timestamp` - timestamp of evaluation\n 1. `exp_id` - experiment id\n 1. `check_id` - check id as in [`Experiment`][epstats.toolkit.experiment.Experiment] definition\n 1. `variable_id` - name of the variable in check evaluation, SRM check has following variables `p_value`,\n `test_stat`, `confidence_level`\n 1. `value` - value of the variable\n \"\"\"\n raise NotImplementedError()\n\n def evaluate_by_unit(self, goals: pd.DataFrame, default_exp_variant_id: str) -> pd.DataFrame:\n \"\"\"\n Evaluate this check from goals aggregated by unit.\n\n Arguments:\n goals: ne row per experiment variant\n default_exp_variant_id: default variant\n\n See [`Experiment.evaluate_by_unit`][epstats.toolkit.experiment.Experiment.evaluate_by_unit] for details\n on `goals` at input.\n\n Returns:\n `checks` dataframe with columns:\n\n `checks` dataframe with columns:\n\n 1. `timestamp` - timestamp of evaluation\n 1. `exp_id` - experiment id\n 1. `check_id` - check id as in [`Experiment`][epstats.toolkit.experiment.Experiment] definition\n 1. `variable_id` - name of the variable in check evaluation, SRM check has following variables `p_value`,\n `test_stat`, `confidence_level`\n 1. `value` - value of the variable\n \"\"\"\n raise NotImplementedError()\n\n\nclass SrmCheck(Check):\n \"\"\"\n [Sample ratio mismatch check](../stats/basics.md#sample-ratio-mismatch-check) checking randomization\n of units to variants using [Chi-square test](https://en.wikipedia.org/wiki/Chi-squared_test).\n \"\"\"\n\n def __init__(\n self,\n id: int,\n name: str,\n denominator: str,\n confidence_level: float = 0.999,\n ):\n \"\"\"\n Constructor of the SRM check.\n\n Arguments:\n id: check (order) id\n name: check name\n denominator: values to check\n confidence_level: confidence level of the statistical test\n\n Usage:\n ```python\n SrmCheck(1, 'SRM', 'count(test_unit_type.global.exposure)')\n ```\n \"\"\"\n super().__init__(id, name, denominator)\n self.confidence_level = confidence_level\n\n def evaluate_agg(self, goals: pd.DataFrame, default_exp_variant_id: str) -> pd.DataFrame:\n \"\"\"\n See [`Check.evaluate_agg`][epstats.toolkit.check.Check.evaluate_agg].\n \"\"\"\n # input example:\n # test - srm, a, global.exposure, 10000, 10010, 10010, 0.0, 0.0\n # test - srm, b, global.exposure, 10010, 10010, 10010, 0.0, 0.0\n # test - srm, c, global.exposure, 10040, 10040, 10040, 0.0, 0.0\n\n # output example:\n # test - srm, 1, SRM, p_value, 0.20438\n # test - srm, 1, SRM, test_stat, 3.17552\n # test - srm, 1, SRM, confidence_level, 0.999\n\n # prepare data - we only need exposures\n exposures, _, _ = self._parser.evaluate_agg(goals)\n\n # chi-square test\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n # we fill in zeros, when goal data are missing for some variant.\n # There could be division by zero here which is expected as we return\n # nan or inf values to the caller.\n stat, pval = chisquare(exposures)\n\n r = pd.DataFrame(\n {\n \"check_id\": [self.id, self.id, self.id],\n \"check_name\": [self.name, self.name, self.name],\n \"variable_id\": [\"p_value\", \"test_stat\", \"confidence_level\"],\n \"value\": [pval, stat, self.confidence_level],\n }\n )\n return r\n\n def evaluate_by_unit(self, goals: pd.DataFrame, default_exp_variant_id: str) -> pd.DataFrame:\n \"\"\"\n See [`Check.evaluate_by_unit`][epstats.toolkit.check.Check.evaluate_by_unit].\n \"\"\"\n\n exposures, _, _ = self._parser.evaluate_by_unit(goals)\n\n # chi-square test\n stat, pval = chisquare(exposures)\n\n r = pd.DataFrame(\n {\n \"check_id\": [self.id, self.id, self.id],\n \"check_name\": [self.name, self.name, self.name],\n \"variable_id\": [\"p_value\", \"test_stat\", \"confidence_level\"],\n \"value\": [pval, stat, self.confidence_level],\n }\n )\n return r\n\n\nclass SimpleSrmCheck(SrmCheck):\n \"\"\"Simplified definition of SRM check.\"\"\"\n\n def __init__(\n self,\n id: int,\n name: str,\n denominator: str,\n confidence_level: float = 0.999,\n unit_type: str = \"test_unit_type\",\n ):\n \"\"\"\n Constructor of the simplified SRM check.\n\n It modifies parameter denominator in a way that it is in line with general SRM Check definition. It adds all\n the niceties necessary for proper SrmCheck format. Finaly it calls constructor of the parent SrmCheck class.\n\n Arguments:\n id: check (order) id\n name: check name\n denominator: value (column) of the denominator\n confidence_level: confidence level of the statistical test\n unit_type: unit type\n\n Usage:\n ```python\n SimpleSrmCheck(1, 'SRM', 'exposures')\n ```\n \"\"\"\n agg_type = \"global\"\n den = \"value\" + \"(\" + unit_type + \".\" + agg_type + \".\" + denominator + \")\"\n super().__init__(id, name, den, confidence_level)\n", "import logging\nfrom typing import List, Any\nfrom enum import Enum\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom statsd import StatsClient\nfrom dataclasses import dataclass\n\nfrom .metric import Metric\nfrom .check import Check\nfrom .utils import get_utc_timestamp, goals_wide_to_long\nfrom .parser import EpGoal, UnitType, AggType, Goal\n\nfrom .statistics import Statistics\n\n\nclass Evaluation:\n \"\"\"\n Results of an experiment evaluation.\n \"\"\"\n\n def __init__(self, metrics: pd.DataFrame, checks: pd.DataFrame, exposures: pd.DataFrame):\n self.metrics = metrics\n self.checks = checks\n self.exposures = exposures\n\n @classmethod\n def metric_columns(cls) -> List[str]:\n \"\"\"\n `metrics` dataframe with columns:\n\n 1. `timestamp` - timestamp of evaluation\n 1. `exp_id` - experiment id\n 1. `metric_id` - metric id as in [`Experiment`][epstats.toolkit.experiment.Experiment] definition\n 1. `metric_name` - metric name as in [`Experiment`][epstats.toolkit.experiment.Experiment] definition\n 1. `exp_variant_id` - variant id\n 1. `count` - number of exposures, value of metric denominator\n 1. `mean` - `sum_value` / `count`\n 1. `std` - sample standard deviation\n 1. `sum_value` - value of goals, value of metric nominator\n 1. `confidence_level` - current confidence level used to calculate `p_value` and `confidence_interval`\n 1. `diff` - relative diff between sample means of this and control variant\n 1. `test_stat` - value of test statistic of the relative difference in means\n 1. `p_value` - p-value of the test statistic under current `confidence_level`\n 1. `confidence_interval` - confidence interval of the `diff` under current `confidence_level`\n 1. `standard_error` - standard error of the `diff`\n 1. `degrees_of_freedom` - degrees of freedom of this variant mean\n \"\"\"\n return [\n \"timestamp\",\n \"exp_id\",\n \"metric_id\",\n \"metric_name\",\n \"exp_variant_id\",\n \"count\",\n \"mean\",\n \"std\",\n \"sum_value\",\n \"confidence_level\",\n \"diff\",\n \"test_stat\",\n \"p_value\",\n \"confidence_interval\",\n \"standard_error\",\n \"degrees_of_freedom\",\n ]\n\n @classmethod\n def check_columns(cls) -> List[str]:\n \"\"\"\n `checks` dataframe with columns:\n\n 1. `timestamp` - timestamp of evaluation\n 1. `exp_id` - experiment id\n 1. `check_id` - check id as in [`Experiment`][epstats.toolkit.experiment.Experiment] definition\n 1. `variable_id` - name of the variable in check evaluation, SRM check has following variables `p_value`,\n `test_stat`, `confidence_level`\n 1. `value` - value of the variable\n \"\"\"\n return [\"timestamp\", \"exp_id\", \"check_id\", \"check_name\", \"variable_id\", \"value\"]\n\n @classmethod\n def exposure_columns(cls) -> List[str]:\n \"\"\"\n `exposures` dataframe with columns:\n\n 1. `timestamp` - timestamp of evaluation\n 1. `exp_id` - experiment id\n 1. `exp_variant_id` - variant id\n 1. `exposures` - number of exposures of this variant\n \"\"\"\n return [\"exp_variant_id\", \"exposures\"]\n\n\nclass FilterScope(str, Enum):\n \"\"\"\n Scope of data where to apply the filter.\n \"\"\"\n\n exposure = \"exposure\"\n goal = \"goal\"\n\n\n@dataclass\nclass Filter:\n \"\"\"\n Filter specification for data to evaluate.\n \"\"\"\n\n dimension: str\n value: List[Any]\n scope: FilterScope\n\n\nclass Experiment:\n \"\"\"\n Evaluate one experiment described as a list of metrics and checks.\n\n See [Statistics](../stats/basics.md) for details about statistical method used\n and [`Evaluation`][epstats.toolkit.experiment.Evaluation] for description of output.\n \"\"\"\n\n def __init__(\n self,\n id: str,\n control_variant: str,\n metrics: List[Metric],\n checks: List[Check],\n unit_type: str,\n date_from: str = None,\n date_to: str = None,\n date_for: str = None,\n confidence_level: float = 0.95,\n variants: List[str] = None,\n statsd: StatsClient = StatsClient(),\n filters: List[Filter] = None,\n ):\n self._logger = logging.getLogger(f\"{__name__}.{self.__class__.__name__}\")\n self.id = id\n self.control_variant = control_variant\n self.unit_type = unit_type\n self.metrics = metrics\n self.checks = checks\n self.date_from = datetime.strptime(date_from, \"%Y-%m-%d\").date() if date_from is not None else None\n self.date_to = datetime.strptime(date_to, \"%Y-%m-%d\").date() if date_to is not None else None\n self.date_for = (\n datetime.strptime(date_for, \"%Y-%m-%d\").date() if date_for is not None else datetime.today().date()\n )\n self.confidence_level = confidence_level\n self.variants = variants\n self._exposure_goals = [\n EpGoal(\n [\n \"count\",\n \"(\",\n UnitType([unit_type]),\n \".\",\n AggType([\"global\"]),\n \".\",\n Goal([\"exposure\"]),\n \")\",\n ]\n )\n ]\n self.statsd = statsd\n self.filters = filters if filters is not None else []\n\n def evaluate_agg(self, goals: pd.DataFrame) -> Evaluation:\n \"\"\"\n Evaluate all metrics and checks in the experiment from already pre-aggregated goals.\n\n This method is usefull when there are too many units in the experiment to evaluate it\n using [`evaluate_by_unit`][epstats.toolkit.experiment.Experiment.evaluate_by_unit].\n\n Does best effort to fill in missing goals and variants with zeros.\n\n Arguments:\n goals: dataframe with one row per goal and aggregated data in columns\n\n `goals` dataframe columns:\n\n 1. `exp_id` - experiment id\n 1. `exp_variant_id` - variant\n 1. `unit_type` - randomization unit type\n 1. `agg_type` - level of aggregation\n 1. `goal` - goal name\n 1. `dimension` - name of the dimension, e.g. `product`\n 1. `dimension_value` - value of the dimension, e.g. `p_1`\n 1. `count` - number of observed goals (e.g. conversions)\n 1. `sum_sqr_count` - summed squared number of observed goals per unit, it is similar\n to `sum_sqr_value`\n 1. `sum_value` - value of observed goals\n 1. `sum_sqr_value` - summed squared value per unit. This is used to calculate\n sample standard deviation from pre-aggregated data (it is a term $\\sum x^2$\n in $\\hat{\\sigma}^2 = \\\\frac{\\sum x^2 - \\\\frac{(\\sum x)^2}{n}}{n-1}$).\n 1. `count_unique` - number of units with at least 1 observed goal\n\n Returns:\n set of dataframes with evaluation\n\n Usage:\n\n ```python\n from epstats.toolkit import Experiment, Metric, SrmCheck\n experiment = Experiment(\n 'test-conversion',\n 'a',\n [Metric(\n 1,\n 'Click-through Rate',\n 'count(test_unit_type.unit.click)',\n 'count(test_unit_type.global.exposure)'),\n ],\n [SrmCheck(1, 'SRM', 'count(test_unit_type.global.exposure)')],\n unit_type='test_unit_type')\n\n # This gets testing data, use other Dao or get aggregated goals in some other way.\n from epstats.toolkit.testing import TestData\n goals = TestData.load_goals_agg(experiment.id)\n\n # evaluate experiment\n ev = experiment.evaluate_agg(goals)\n\n # work with results\n print(ev.exposures)\n print(ev.metrics[ev.metrics == 1])\n print(ev.checks[ev.checks == 1])\n\n # this is to assert that this code sample works correctly\n from epstats.toolkit.testing import TestDao\n assert_experiment(experiment, ev, TestDao(TestData()))\n ```\n\n Input data frame example:\n\n ```\n exp_id exp_variant_id unit_type agg_type goal dimension dimension_value count sum_sqr_count sum_value sum_sqr_value count_unique\n test-srm a test_unit_type global exposure 100000 100000 100000 100000 100000\n test-srm b test_unit_type global exposure 100100 100100 100100 100100 100100\n test-srm a test_unit_type unit conversion 1200 1800 32000 66528 900\n test-srm a test_unit_type_2 global conversion product product_1 1000 1700 31000 55000 850\n ```\n \"\"\"\n g = self._fix_missing_agg(goals)\n return self._evaluate(\n g,\n Experiment._metrics_column_fce_agg,\n Experiment._checks_fce_agg,\n Experiment._exposures_fce_agg,\n )\n\n def evaluate_wide_agg(self, goals: pd.DataFrame) -> Evaluation:\n \"\"\"\n This is a simplified version of the method [`evaluate_agg`][epstats.toolkit.experiment.Experiment.evaluate_agg].\n\n It consumes simple input `goals` dataframe, transfers it into suitable dataframe format and evaluate it using general method [`evaluate_agg`][epstats.toolkit.experiment.Experiment.evaluate_agg].\n\n It assumes that the first two columns are name of the experiment and variants. Than follows columns with data.\n\n See usage of the method in the notebook [Ad-hoc A/B test evaluation using Ep-Stats](../user_guide/ab_test_simple_evaluation.html).\n\n Arguments:\n goals: dataframe with one row per variant and aggregated data in columns\n\n Possible `goals` dataframe columns (check the input dataframe example):\n\n 1. `exp_id` - experiment id\n 1. `exp_variant_id` - variant\n 1. `clicks` - sum of clicks\n 1. `views` - sum of views\n 1. `bookings` - sum of bookings\n 1. `bookings_squared` - sum of bookings squared\n\n Returns:\n set of dataframes with evaluation\n\n Usage:\n\n ```python\n from epstats.toolkit import Experiment, SimpleMetric, SimpleSrmCheck\n from epstats.toolkit.results import results_long_to_wide, format_results\n from epstats.toolkit.testing import TestData\n\n # Load Test Data\n goals = TestData.load_goals_simple_agg()\n\n # Define the experiment\n unit_type = 'test_unit_type'\n experiment = Experiment(\n 'my-experiment',\n 'a',\n [\n SimpleMetric(1, 'Click-through Rate (CTR)', 'clicks', 'views', unit_type),\n SimpleMetric(2, 'Conversion Rate', 'conversions', 'views', unit_type),\n SimpleMetric(3, 'Revenue per Mille (RPM)', 'bookings', 'views', unit_type, metric_format='${:,.2f}', metric_value_multiplier=1000),\n ],\n [SimpleSrmCheck(1, 'SRM', 'views')],\n unit_type=unit_type)\n\n # Evaluate the experiment\n ev = experiment.evaluate_wide_agg(goals)\n\n # Work with results\n print(ev.exposures)\n print(ev.metrics)\n print(ev.checks)\n\n # Possible formatting of metrics\n ev.metrics.pipe(results_long_to_wide).pipe(format_results, experiment, format_pct='{:.1%}', format_pval='{:.3f}')\n ```\n\n Input dataframe example:\n ```\n experiment_id variant_id views clicks conversions bookings bookings_squared\n my-exp a 473661 48194 413 17152 803105\n my-exp b 471485 47184 360 14503 677178\n my-exp c 477159 48841 406 15892 711661\n my-exp d 474934 49090 289 11995 566700\n ```\n \"\"\"\n g = goals_wide_to_long(goals)\n return self.evaluate_agg(g)\n\n def evaluate_by_unit(self, goals: pd.DataFrame) -> Evaluation:\n \"\"\"\n Evaluate all metrics and checks in the experiment from goals grouped by `unit_id`.\n\n This method is usefull when there are not many (<1M) units in the experiment to evaluate it.\n If there many units exposed to the experiment, pre-aggregate data and use [`evaluate_agg`][epstats.toolkit.experiment.Experiment.evaluate_agg].\n\n Does best effort to fill in missing goals and variants with zeros.\n\n Arguments:\n goals: dataframe with one row per goal and aggregated data in columns\n\n `goals` dataframe columns:\n\n 1. `exp_id` - experiment id\n 1. `exp_variant_id` - variant\n 1. `unit_type` - randomization unit type\n 1. `unit_id` - (randomization) unit id\n 1. `agg_type` - level of aggregation\n 1. `goal` - goal name\n 1. `dimension` - name of the dimension, e.g. `product`\n 1. `dimension_value` - value of the dimension, e.g. `p_1`\n 1. `count` - number of observed goals\n 1. `sum_value` - value of observed goals\n\n Returns:\n set of dataframes with evaluation\n\n Usage:\n\n ```python\n from epstats.toolkit import Experiment, Metric, SrmCheck\n experiment = Experiment(\n 'test-real-valued',\n 'a',\n [Metric(\n 2,\n 'Average Bookings',\n 'value(test_unit_type.unit.conversion)',\n 'count(test_unit_type.unit.exposure)')\n ],\n [],\n unit_type='test_unit_type')\n\n # This gets testing data, use other Dao or get aggregated goals in some other way.\n from epstats.toolkit.testing import TestData\n goals = TestData.load_goals_by_unit(experiment.id)\n\n # evaluate experiment\n ev = experiment.evaluate_by_unit(goals)\n\n # work with results\n print(ev.exposures)\n print(ev.metrics[ev.metrics == 1])\n print(ev.checks[ev.checks == 1])\n\n # this is to assert that this code sample works correctly\n from epstats.toolkit.testing import TestDao\n assert_experiment(experiment, ev, TestDao(TestData()))\n ```\n\n Input data frame example:\n\n ```\n exp_id exp_variant_id unit_type unit_id agg_type goal dimension dimension_value count sum_value\n test-srm a test_unit_type test_unit_type_1 unit exposure 1 1\n test-srm a test_unit_type test_unit_type_1 unit conversion product product_1 2 75\n test-srm b test_unit_type test_unit_type_2 unit exposure 1 1\n test-srm b test_unit_type test_unit_type_3 unit exposure 1 1\n test-srm b test_unit_type test_unit_type_3 unit conversion product product_2 1 1\n ```\n \"\"\"\n g = self._fix_missing_by_unit(goals)\n\n # We need to pivot table to get all goals per `unit_id` on the same row in the data frame.\n # This is needed to be able to vector-evaluate compound metrics\n # eg. `value(test_unit_type.unit.conversion) - value(test_unit_type.unit.refund)`\n g = (\n pd.pivot_table(\n g,\n values=[\"count\", \"sum_value\"],\n index=[\n \"exp_id\",\n \"exp_variant_id\",\n \"unit_type\",\n \"agg_type\",\n \"unit_id\",\n \"dimension\",\n \"dimension_value\",\n ],\n columns=\"goal\",\n aggfunc=np.sum,\n fill_value=0,\n )\n .swaplevel(axis=1)\n .reset_index()\n )\n\n return self._evaluate(\n g,\n Experiment._metrics_column_fce_by_unit,\n Experiment._checks_fce_by_unit,\n Experiment._exposures_fce_by_unit,\n )\n\n def get_goals(self) -> List[EpGoal]:\n \"\"\"\n List of all goals needed to evaluate all metrics and checks in the experiment.\n\n Returns:\n list of parsed structured goals\n \"\"\"\n res = set()\n for m in self.metrics:\n res = res.union(m.get_goals())\n for c in self.checks:\n res = res.union(c.get_goals())\n res = res.union(self._exposure_goals)\n return list(res)\n\n @staticmethod\n def _metrics_column_fce_agg(m: Metric, goals: pd.DataFrame):\n \"\"\"\n Gets count, sum_value, sum_sqr_value columns by expression from already aggregated goals.\n \"\"\"\n return m.get_evaluate_columns_agg(goals)\n\n @staticmethod\n def _metrics_column_fce_by_unit(m: Metric, goals: pd.DataFrame):\n \"\"\"\n Gets count, sum_value, sum_sqr_value columns by expression from goals grouped by `unit_id`.\n \"\"\"\n return m.get_evaluate_columns_by_unit(goals)\n\n @staticmethod\n def _checks_fce_agg(c: Check, goals: pd.DataFrame, control_variant: str):\n \"\"\"\n Evaluates checks from already aggregated goals.\n \"\"\"\n return c.evaluate_agg(goals, control_variant)\n\n @staticmethod\n def _checks_fce_by_unit(c: Check, goals: pd.DataFrame, control_variant: str):\n \"\"\"\n Evaluates checks from goals grouped by `unit_id`.\n \"\"\"\n return c.evaluate_by_unit(goals, control_variant)\n\n @staticmethod\n def _exposures_fce_agg(goals: pd.DataFrame, exp_id: str, unit_type: str):\n \"\"\"\n Evaluates checks from already aggregated goals.\n \"\"\"\n df = (\n goals[(goals[\"unit_type\"] == unit_type) & (goals[\"agg_type\"] == \"global\") & (goals[\"goal\"] == \"exposure\")]\n .groupby(\"exp_variant_id\")\n .agg(exposures=(\"count\", \"sum\"))\n .reset_index()\n )\n df[\"exp_id\"] = exp_id\n return df\n\n @staticmethod\n def _exposures_fce_by_unit(goals: pd.DataFrame, exp_id: str, unit_type: str):\n \"\"\"\n Evaluates checks from already aggregated goals.\n \"\"\"\n df = goals[(goals[\"unit_type\"] == unit_type) & (goals[\"agg_type\"] == \"unit\")][\n [(\"exp_variant_id\", \"\"), (\"exposure\", \"count\")]\n ]\n df = df.droplevel(0, axis=1)\n df.columns = [\"exp_variant_id\", \"exposures\"]\n d = df.groupby(\"exp_variant_id\").agg(exposures=(\"exposures\", \"sum\")).reset_index()\n d[\"exp_id\"] = exp_id\n return d\n\n def _evaluate(self, goals: pd.DataFrame, metrics_column_fce, checks_fce, exposures_fce):\n metrics = self._evaluate_metrics(goals, metrics_column_fce)\n checks = self._evaluate_checks(goals, checks_fce)\n exposures = self._evaluate_exposures(goals, exposures_fce)\n return Evaluation(metrics, checks, exposures)\n\n def _evaluate_exposures(self, goals: pd.DataFrame, exposures_fce) -> pd.DataFrame:\n return exposures_fce(goals, self.id, self.unit_type)\n\n def _evaluate_checks(self, goals: pd.DataFrame, check_fce) -> pd.DataFrame:\n res = []\n for c in self.checks:\n try:\n r = check_fce(c, goals, self.control_variant)\n r[\"exp_id\"] = self.id\n res.append(r)\n except Exception as e:\n self._logger.warning(f\"Cannot evaluate check [{c.id} in experiment [{self.id}] because of {e}\")\n self.statsd.incr(\"errors.check\")\n\n c = pd.concat(res, axis=1) if res != [] else pd.DataFrame([], columns=Evaluation.check_columns())\n c[\"timestamp\"] = round(get_utc_timestamp(datetime.now()).timestamp())\n return c[Evaluation.check_columns()]\n\n def _fix_missing_agg(self, goals: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Adds zero values for missing goals and variants that are needed for metric evaluation.\n\n Does that in the best effort - fills in `count`, `sum_sqr_count`, `sum_value`, `sum_sqr_value` and `count_unique` with zeros.\n \"\"\"\n # what variants and goals there should be from all the goals needed to evaluate all metrics\n self.variants = (\n self.variants\n if self.variants is not None\n else np.unique(np.append(goals[\"exp_variant_id\"], self.control_variant))\n )\n g = goals[goals.exp_variant_id.isin(self.variants)]\n nvs = self.variants\n ngs = self.get_goals()\n\n # variants * goals is the number of variant x goals combinations we expect in the data\n lnvs = len(nvs)\n lngs = len(ngs)\n ln = lnvs * lngs\n\n # create zero data frame for all variants and goals\n empty_df = pd.DataFrame(\n {\n \"exp_variant_id\": np.tile(nvs, lngs),\n \"unit_type\": np.repeat([g.unit_type for g in ngs], lnvs),\n \"agg_type\": np.repeat([g.agg_type for g in ngs], lnvs),\n \"goal\": np.repeat([g.goal for g in ngs], lnvs),\n \"dimension\": np.repeat([g.dimension for g in ngs], lnvs),\n \"dimension_value\": np.repeat([g.dimension_value for g in ngs], lnvs),\n \"count\": np.zeros(ln),\n \"sum_sqr_count\": np.zeros(ln),\n \"sum_value\": np.zeros(ln),\n \"sum_sqr_value\": np.zeros(ln),\n \"count_unique\": np.zeros(ln),\n }\n )\n\n # join to existing data and use zeros for only missing variants and goals\n m = (\n pd.concat([g, empty_df], axis=0)\n .groupby(\n [\n \"exp_variant_id\",\n \"unit_type\",\n \"agg_type\",\n \"dimension\",\n \"dimension_value\",\n \"goal\",\n ]\n )\n .sum()\n .reset_index()\n )\n return m\n\n def _fix_missing_by_unit(self, goals: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Adds zero values for missing goals and variants that are needed for metric evaluation.\n\n Does that in the best effort - fills in `count`, `sum_value` with zeros.\n \"\"\"\n # what variants and goals there should be from all the goals needed to evaluate all metrics\n self.variants = (\n self.variants\n if self.variants is not None\n else np.unique(np.append(goals[\"exp_variant_id\"], self.control_variant))\n )\n g = goals[goals.exp_variant_id.isin(self.variants)]\n nvs = self.variants\n ngs = self.get_goals()\n\n # variants * goals is the number of variant x goals combinations we expect in the data\n lnvs = len(nvs)\n lngs = len(ngs)\n ln = lnvs * lngs\n\n # create zero data frame for all variants and goals\n empty_df = pd.DataFrame(\n {\n \"exp_id\": np.repeat(self.id, ln),\n \"exp_variant_id\": np.tile(nvs, lngs),\n \"unit_type\": np.repeat([g.unit_type for g in ngs], lnvs),\n \"agg_type\": np.repeat([g.agg_type for g in ngs], lnvs),\n \"goal\": np.repeat([g.goal for g in ngs], lnvs),\n \"dimension\": np.repeat([g.dimension for g in ngs], lnvs),\n \"dimension_value\": np.repeat([g.dimension_value for g in ngs], lnvs),\n \"unit_id\": np.repeat(\"fillna\", ln),\n \"count\": np.zeros(ln),\n \"sum_value\": np.zeros(ln),\n }\n )\n\n # join to existing data and use zeros for only missing variants and goals\n m = pd.concat([g, empty_df], axis=0)\n return m[\n [\n \"exp_id\",\n \"exp_variant_id\",\n \"unit_type\",\n \"agg_type\",\n \"dimension\",\n \"dimension_value\",\n \"goal\",\n \"unit_id\",\n \"count\",\n \"sum_value\",\n ]\n ]\n\n def _evaluate_metrics(self, goals: pd.DataFrame, column_fce) -> pd.DataFrame:\n if not self.metrics:\n return pd.DataFrame([], columns=Evaluation.metric_columns())\n\n sts = []\n for m in self.metrics:\n count, sum_value, sum_sqr_value = column_fce(m, goals)\n sts.append([count, sum_value, sum_sqr_value])\n stats = np.array(sts).transpose(0, 2, 1)\n metrics = stats.shape[0]\n variants = stats.shape[1]\n\n count = stats[:, :, 0]\n sum_value = stats[:, :, 1]\n sum_sqr_value = stats[:, :, 2]\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n # We fill in zeros, when goal data are missing for some variant.\n # There could be division by zero here which is expected as we return\n # nan or inf values to the caller.\n mean = sum_value / count\n std = np.sqrt((sum_sqr_value - sum_value * sum_value / count) / (count - 1))\n\n # sequential testing correction\n if self.date_from is not None and self.date_to is not None:\n # Parameters\n test_length = (self.date_to - self.date_from).days + 1 # test length in days\n actual_day = (self.date_for - self.date_from).days + 1 # day(s) since beginning of the test\n actual_day = min(actual_day, test_length) # actual day of evaluation must be in interval [1, test_length]\n\n # confidence level adjustment - applied when actual_day < test_length (test is still running)\n confidence_level = Statistics.obf_alpha_spending_function(self.confidence_level, test_length, actual_day)\n else:\n confidence_level = self.confidence_level # no change\n\n stats = np.dstack((count, mean, std, sum_value, np.ones(count.shape) * confidence_level))\n stats = np.dstack(\n (\n np.repeat([m.id for m in self.metrics], variants).reshape(metrics, variants, -1),\n np.repeat([m.name for m in self.metrics], variants).reshape(metrics, variants, -1),\n np.tile(goals[\"exp_variant_id\"].unique(), metrics).reshape(metrics, variants, -1),\n stats,\n )\n )\n\n # dimensions of `stats` array: (metrics, variants, stats)\n # elements of `stats` array: metrics_id, exp_variant_id, count, mean, std, sum_value, confidence_level\n # hypothesis evaluation (standard way using t-test)\n c = Statistics.ttest_evaluation(stats, self.control_variant)\n\n # multiple variants (comparisons) correction - applied when we have multiple treatment variants\n if variants > 2:\n c = Statistics.multiple_comparisons_correction(c, variants, metrics, confidence_level)\n\n c[\"exp_id\"] = self.id\n c[\"timestamp\"] = round(get_utc_timestamp(datetime.now()).timestamp())\n return c[Evaluation.metric_columns()]\n" ]
[ [ "numpy.errstate", "pandas.DataFrame", "scipy.stats.chisquare" ], [ "pandas.concat", "numpy.sqrt", "numpy.tile", "numpy.ones", "numpy.append", "numpy.errstate", "numpy.repeat", "numpy.array", "numpy.zeros", "pandas.pivot_table" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ynysjtu/Adversarial_Invariant_Learning
[ "fccabab643853d1bf34410e0fc185706093e04c3" ]
[ "domainbed/hparams_registry.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\r\n\r\nimport numpy as np\r\n\r\ndef _hparams(algorithm, dataset, random_state):\r\n \"\"\"\r\n Global registry of hyperparams. Each entry is a (default, random) tuple.\r\n New algorithms / networks / etc. should add entries here.\r\n \"\"\"\r\n SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST', 'CKMNIST', \"CFMNIST\"]\r\n NLP_DATASETS = ['PSST']\r\n hparams = {}\r\n \r\n hparams['data_augmentation'] = (False, False)\r\n hparams['resnet18'] = (True, True)\r\n hparams['resnet_dropout'] = (0., random_state.choice([0., 0.1, 0.5]))\r\n hparams['class_balanced'] = (False, False)\r\n\r\n if (dataset not in SMALL_IMAGES) and (dataset not in NLP_DATASETS):\r\n hparams['lr'] = (5e-5, 10**random_state.uniform(-5, -3.5))\r\n if dataset == 'DomainNet':\r\n hparams['batch_size'] = (32, int(2**random_state.uniform(3, 5)))\r\n else:\r\n hparams['batch_size'] = (16, int(2**random_state.uniform(3, 5.5)))\r\n if algorithm == \"ARM\":\r\n hparams['batch_size'] = (8, 8)\r\n else:\r\n hparams['lr'] = (1e-3, 10**random_state.uniform(-4.5, -2.5))\r\n hparams['batch_size'] = (64, int(2**random_state.uniform(3, 9)))\r\n if algorithm == \"AIL\":\r\n hparams['batch_size'] = (128, int(2**random_state.uniform(6, 9)))\r\n\r\n if dataset in SMALL_IMAGES:\r\n hparams['weight_decay'] = (0., 0.)\r\n else:\r\n hparams['weight_decay'] = (0., 10**random_state.uniform(-6, -2))\r\n\r\n if algorithm in ['DANN', 'CDANN']:\r\n if dataset not in SMALL_IMAGES:\r\n hparams['lr_g'] = (5e-5, 10**random_state.uniform(-5, -3.5))\r\n hparams['lr_d'] = (5e-5, 10**random_state.uniform(-5, -3.5))\r\n else:\r\n hparams['lr_g'] = (1e-3, 10**random_state.uniform(-4.5, -2.5))\r\n hparams['lr_d'] = (1e-3, 10**random_state.uniform(-4.5, -2.5))\r\n\r\n if dataset in SMALL_IMAGES:\r\n hparams['weight_decay_g'] = (0., 0.)\r\n else:\r\n hparams['weight_decay_g'] = (0., 10**random_state.uniform(-6, -2))\r\n\r\n hparams['lambda'] = (1.0, 10**random_state.uniform(-2, 2))\r\n hparams['weight_decay_d'] = (0., 10**random_state.uniform(-6, -2))\r\n hparams['d_steps_per_g_step'] = (1, int(2**random_state.uniform(0, 3)))\r\n hparams['grad_penalty'] = (0., 10**random_state.uniform(-2, 1))\r\n hparams['beta1'] = (0.5, random_state.choice([0., 0.5]))\r\n hparams['mlp_width'] = (256, int(2 ** random_state.uniform(6, 10)))\r\n hparams['mlp_depth'] = (3, int(random_state.choice([3, 4, 5])))\r\n hparams['mlp_dropout'] = (0., random_state.choice([0., 0.1, 0.5]))\r\n elif algorithm == \"RSC\":\r\n hparams['rsc_f_drop_factor'] = (1/3, random_state.uniform(0, 0.5))\r\n hparams['rsc_b_drop_factor'] = (1/3, random_state.uniform(0, 0.5))\r\n elif algorithm == \"SagNet\":\r\n hparams['sag_w_adv'] = (0.1, 10**random_state.uniform(-2, 1))\r\n elif algorithm == \"IRM\":\r\n hparams['irm_lambda'] = (1e2, 10**random_state.uniform(-1, 5))\r\n hparams['irm_penalty_anneal_iters'] = (500, int(10**random_state.uniform(0, 4)))\r\n elif algorithm == \"AIL\":\r\n if dataset in NLP_DATASETS:\r\n hparams['irm_lambda'] = (500, 10**random_state.uniform(2,5))\r\n else: \r\n hparams['irm_lambda'] = (500, 10**random_state.uniform(1, 5))\r\n hparams['irm_penalty_anneal_iters'] = (500, int(10**random_state.uniform(1, 3)))\r\n hparams['vae_lr'] = (1e-5, 10**random_state.uniform(-5.5, -3))\r\n elif algorithm == \"Mixup\":\r\n hparams['mixup_alpha'] = (0.2, 10**random_state.uniform(-1, -1))\r\n elif algorithm == \"GroupDRO\":\r\n hparams['groupdro_eta'] = (1e-2, 10**random_state.uniform(-3, -1))\r\n elif algorithm == \"MMD\" or algorithm == \"CORAL\":\r\n hparams['mmd_gamma'] = (1., 10**random_state.uniform(-1, 1))\r\n elif algorithm == \"MLDG\":\r\n hparams['mldg_beta'] = (1., 10**random_state.uniform(-1, 1))\r\n elif algorithm == \"MTL\":\r\n hparams['mtl_ema'] = (.99, random_state.choice([0.5, 0.9, 0.99, 1.]))\r\n elif algorithm == \"VREx\":\r\n hparams['vrex_lambda'] = (1e1, 10**random_state.uniform(-1, 5))\r\n hparams['vrex_penalty_anneal_iters'] = (500, int(10**random_state.uniform(0, 4)))\r\n\r\n return hparams\r\n\r\ndef default_hparams(algorithm, dataset):\r\n dummy_random_state = np.random.RandomState(0)\r\n return {a: b for a,(b,c) in\r\n _hparams(algorithm, dataset, dummy_random_state).items()}\r\n\r\ndef random_hparams(algorithm, dataset, seed):\r\n random_state = np.random.RandomState(seed)\r\n return {a: c for a,(b,c) in _hparams(algorithm, dataset, random_state).items()}\r\n" ]
[ [ "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
catalinnega/Cryptocurrencies-TA-on-Dash
[ "498f152e06fe959e2c9672bd4d862820e6155884" ]
[ "indicators/rsi/rsi_db.py" ]
[ "import indicators.rsi.rsi_settings as rsi_settings\nfrom indicators.rsi.rsi_obj import RSI\nimport ochlv_db.db_settings as db_settings\nimport sqlite3\nimport pandas as pd\nfrom tqdm import tqdm\nimport time\nfrom datetime import datetime\n\nstop_streaming = False\n\ndef rsi_fn(n = '-------rsi_idle-------'):\n kwargs = {\n 'path': rsi_settings.db_path, \n 'table_name': rsi_settings.table_name,\n 'table_list': rsi_settings.table_list,\n 'update_tdiff': rsi_settings.update_tdiff,\n }\n kwargs.update(rsi_settings.settings)\n rsi = RSI(**kwargs)\n rsi.update_latest()\n while stop_streaming == False:\n print(n)\n \n last_ts = rsi.get_latest_time(None, rsi_settings.table_name, \"TIMESTAMP\")\n if(last_ts is None): last_ts = 0\n\n con_ochlv = sqlite3.connect(db_settings.db_path)\n df = pd.read_sql_query(f\"SELECT TIMESTAMP, close from Bitfinex_OCHLV_15m WHERE TIMESTAMP >= {last_ts}\", con_ochlv)\n c = df.close.values\n ts_vals = df.TIMESTAMP.values\n if(len(ts_vals) > 0):\n for i in tqdm(range(len(c))):\n if(ts_vals[i] > last_ts):\n sample = {\n 'close': c[i]\n }\n rsi.update(ts_vals[i], sample)\n rsi.flush()\n print(n, f\"Finished update. Last at {datetime.utcfromtimestamp(ts_vals[-1]/1000).strftime('%Y-%m-%d %H:%M:%S')} local datetime.\")\n time.sleep(1 * 60)\n" ]
[ [ "pandas.read_sql_query" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ZhengRanran/motion_imitation
[ "246b2e786ccb2033cf0f4eff6fe6c62dbfb51eee" ]
[ "motion_imitation/envs/sensors/sensor.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A sensor prototype class.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport typing\n\n\n_ARRAY = typing.Iterable[float]\n_FLOAT_OR_ARRAY = typing.Union[float, _ARRAY]\n_DATATYPE_LIST = typing.Iterable[typing.Any]\n\n\nclass Sensor(object):\n \"\"\"A prototype class of sensors.\"\"\"\n\n def __init__(self,\n name: typing.Text):\n \"\"\"A basic constructor of the sensor.\n\n This initialized a robot as none. This instance may be regularly updated\n by the environment, when it resets the simulation environment.\n\n Args:\n name: the name of the sensor\n \"\"\"\n self._robot = None\n self._name = name\n\n def get_name(self) -> typing.Text:\n return self._name\n\n def get_observation_datatype(self):\n \"\"\"Returns the data type for the numpy structured array.\n\n It is recommended to define a list of tuples: (name, datatype, shape)\n Reference: https://docs.scipy.org/doc/numpy-1.15.0/user/basics.rec.html\n Ex:\n return [('motor_angles', np.float64, (8, ))] # motor angle sensor\n return [('IMU_x', np.float64), ('IMU_z', np.float64), ] # IMU\n\n Returns:\n datatype: a list of data types.\n \"\"\"\n pass\n\n def get_lower_bound(self):\n \"\"\"Returns the lower bound of the observation.\n\n\n Returns:\n lower_bound: the lower bound of sensor values in np.array format\n \"\"\"\n pass\n\n def get_upper_bound(self):\n \"\"\"Returns the upper bound of the observation.\n\n Returns:\n upper_bound: the upper bound of sensor values in np.array format\n \"\"\"\n pass\n\n def get_observation(self):\n \"\"\"Returns the observation data.\n\n Returns:\n observation: the observed sensor values in np.array format\n \"\"\"\n pass\n\n def set_robot(self, robot):\n \"\"\"Set a robot instance.\"\"\"\n self._robot = robot\n\n def get_robot(self):\n \"\"\"Returns the robot instance.\"\"\"\n return self._robot\n\n def on_reset(self, env):\n \"\"\"A callback function for the reset event.\n\n Args:\n env: the environment who invokes this callback function.\n \"\"\"\n pass\n\n def on_step(self, env):\n \"\"\"A callback function for the step event.\n\n Args:\n env: the environment who invokes this callback function.\n \"\"\"\n pass\n\n def on_terminate(self, env):\n \"\"\"A callback function for the terminate event.\n\n Args:\n env: the environment who invokes this callback function.\n \"\"\"\n pass\n\n\nclass BoxSpaceSensor(Sensor):\n \"\"\"A prototype class of sensors with Box shapes.\"\"\"\n\n def __init__(self,\n name: typing.Text,\n shape: typing.Tuple[int, ...],\n lower_bound: _FLOAT_OR_ARRAY = -np.pi,\n upper_bound: _FLOAT_OR_ARRAY = np.pi,\n dtype=np.float64) -> None:\n \"\"\"Constructs a box type sensor.\n\n Args:\n name: the name of the sensor\n shape: the shape of the sensor values\n lower_bound: the lower_bound of sensor value, in float or np.array.\n upper_bound: the upper_bound of sensor value, in float or np.array.\n dtype: data type of sensor value\n \"\"\"\n super(BoxSpaceSensor, self).__init__(name)\n\n self._shape = shape\n self._dtype = dtype\n\n if isinstance(lower_bound, float):\n self._lower_bound = np.full(shape, lower_bound, dtype=dtype)\n else:\n self._lower_bound = np.array(lower_bound)\n\n if isinstance(upper_bound, float):\n self._upper_bound = np.full(shape, upper_bound, dtype=dtype)\n else:\n self._upper_bound = np.array(upper_bound)\n\n def get_shape(self) -> typing.Tuple[int, ...]:\n return self._shape\n\n def get_dimension(self) -> int:\n return len(self._shape)\n\n def get_dtype(self):\n return self._dtype\n\n def get_observation_datatype(self) -> _DATATYPE_LIST:\n \"\"\"Returns box-shape data type.\"\"\"\n return [(self._name, self._dtype, self._shape)]\n\n def get_lower_bound(self) -> _ARRAY:\n \"\"\"Returns the computed lower bound.\"\"\"\n return self._lower_bound\n\n def get_upper_bound(self) -> _ARRAY:\n \"\"\"Returns the computed upper bound.\"\"\"\n return self._upper_bound\n\n def get_observation(self) -> np.ndarray:\n return np.asarray(self._get_observation(), dtype=self._dtype)\n" ]
[ [ "numpy.array", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Khoale1096/stupidNMT
[ "894536c16dc7ff958aa5571828a89ecabfcb72d7" ]
[ "models/new_transformer.py" ]
[ "'''\nA module which implements the basic Transformer\n'''\nimport uuid\nimport threading\nimport pdb\nimport torch\nfrom torch import nn\n\nfrom models.new_attention import NewAttention\nfrom models.attention import MultiHeadedAttention\nfrom models.embeddings import PositionEmbedding, TokenEmbedding\nfrom models.utils import LabelSmoothingLoss, Translator\nfrom utils import left_shift, right_shift, triu\n\n\nclass TransformerSublayer(nn.Module):\n '''\n Implements a sub layer of the transformer model, which consists of:\n 1) A sub layer module\n 2) Followed by dropout\n 3) Plus a residual connection\n 4) With layer normalization\n '''\n def __init__(self, sublayer, sublayer_shape, dropout_p=0.1):\n ''' Initialize the transformer sublayer '''\n super(TransformerSublayer, self).__init__()\n\n self.sublayer = sublayer\n self.norm = nn.LayerNorm(sublayer_shape)\n self.dropout = nn.Dropout(dropout_p, inplace=True)\n self.reset_parameters()\n\n def reset_parameters(self):\n ''' Reset parameters using xavier initialiation '''\n self.norm.reset_parameters()\n\n def forward(self, inputs, *sublayer_args, **sublayer_kwargs): # pylint:disable=arguments-differ\n ''' The forward pass of the sublayer '''\n return self.norm(inputs + self.dropout(self.sublayer(*sublayer_args, **sublayer_kwargs)))\n\n\nclass TransformerFFN(nn.Module):\n ''' Implements the Transformer feed-forward network '''\n def __init__(self, embedding_size, hidden_dim):\n super(TransformerFFN, self).__init__()\n\n self.relu = nn.ReLU()\n\n self.hidden = nn.Linear(embedding_size, hidden_dim)\n self.output = nn.Linear(hidden_dim, embedding_size)\n self.reset_parameters()\n\n def reset_parameters(self):\n ''' Reset parameters using xavier initialiation '''\n gain = nn.init.calculate_gain('relu')\n nn.init.xavier_uniform_(self.hidden.weight, gain)\n nn.init.constant_(self.hidden.bias, 0.)\n\n gain = nn.init.calculate_gain('linear')\n nn.init.xavier_uniform_(self.output.weight, gain)\n nn.init.constant_(self.output.bias, 0.)\n\n def forward(self, inputs): # pylint:disable=arguments-differ\n ''' The forward pass of the feed-forward network '''\n return self.output(self.relu(self.hidden(inputs)))\n\n\nclass TransformerEncoderLayer(nn.Module):\n ''' Implements a single encoder layer in a transformer encoder stack '''\n def __init__(self, attn_config, num_heads, dim, hidden_dim, layer_i, dropout_p=0.1):\n ''' Initialize the transformer layer '''\n super(TransformerEncoderLayer, self).__init__()\n\n if attn_config['ffn_layer'][layer_i]:\n self.ffn = TransformerSublayer(\n TransformerFFN(dim, hidden_dim),\n dim, dropout_p\n )\n print('enc layer %i has ffn' % layer_i)\n\n self.self_attention = TransformerSublayer(\n NewAttention(attn_config, dim, num_heads),\n dim, dropout_p\n )\n\n def reset_parameters(self):\n ''' Reset the parameters of the module '''\n self.ffn.reset_parameters()\n self.self_attention.reset_parameters()\n\n def forward(self, inputs, layer_i): # pylint:disable=arguments-differ\n ''' The forward pass '''\n mask = inputs['mask']\n state = inputs['state']\n\n # print(\"encoder self attention\")\n\n state = self.self_attention(\n state, # residual\n state, state, state, mask, # passed to multiheaded attention\n layer_i=layer_i\n )\n\n if hasattr(self, 'ffn'):\n state = self.ffn(\n state, # residual\n state # passed to feed-forward network\n )\n\n return {'state': state, 'mask': mask}\n\n\nclass TransformerDecoderLayer(nn.Module):\n ''' Implements a single decoder layer in a transformer decoder stack '''\n def __init__(self, dec_attn_config, enc_dec_attn_config, num_heads, dim, hidden_dim, layer_i, causal=True,\n dropout_p=0.1):\n ''' Initialize the transformer layer '''\n super(TransformerDecoderLayer, self).__init__()\n\n self.causal = causal\n self.uuid = uuid.uuid4()\n\n self.enc_dec_attn_config = enc_dec_attn_config\n\n if dec_attn_config['ffn_layer'][layer_i]:\n self.ffn = TransformerSublayer(\n TransformerFFN(dim, hidden_dim),\n dim, dropout_p\n )\n print('dec layer %i has ffn' % layer_i)\n\n self.self_attention = TransformerSublayer(\n NewAttention(dec_attn_config, dim, num_heads),\n dim, dropout_p\n )\n\n if self.enc_dec_attn_config['enc_dec_attn_layer'] == 1 or \\\n (type(self.enc_dec_attn_config['enc_dec_attn_layer'] is list) and\n self.enc_dec_attn_config['enc_dec_attn_layer'][layer_i] == 1):\n if self.enc_dec_attn_config['enc_dec_attn_num_heads'] == -1:\n src_num_heads = num_heads\n elif type(self.enc_dec_attn_config['enc_dec_attn_num_heads']) is not list:\n src_num_heads = self.enc_dec_attn_config['enc_dec_attn_num_heads']\n else:\n src_num_heads = self.enc_dec_attn_config['enc_dec_attn_num_heads'][layer_i]\n assert src_num_heads != 0\n\n self.source_attention = TransformerSublayer(\n NewAttention(enc_dec_attn_config, dim, src_num_heads),\n dim, dropout_p\n )\n\n print('layer %i num of src heads %i' % (layer_i, src_num_heads))\n\n def reset_parameters(self):\n ''' Reset the parameters of the module '''\n self.ffn.reset_parameters()\n self.self_attention.reset_parameters()\n if hasattr(self, 'source_attention'):\n self.source_attention.reset_parameters()\n\n def forward(self, inputs, sources, layer_i): # pylint:disable=arguments-differ\n ''' The forward pass '''\n mask = inputs['mask']\n state = inputs['state']\n cache = inputs.get('cache')\n\n decoder_position = state.shape[1] - 1\n\n kwargs = {'layer_i': layer_i}\n if self.causal and cache is not None:\n # If caching, only want the last one sequence values. Requires no causal masking.\n residual = state[:, -1:]\n kwargs['decoder_position'] = decoder_position\n else:\n # If not caching, use the full sequence and ensure an appropriate causal mask\n residual = state\n kwargs['key_mask'] = mask\n kwargs['attention_mask'] = self.mask(state)\n\n # print(\"decoder self attention\")\n state = self.self_attention(\n residual, # residual\n state, state, state, **kwargs # passed to multiheaded attention\n )\n\n source = sources['state']\n # print(\"source\", source)\n kwargs = {'key_mask': sources['mask'], 'layer_i': layer_i}\n if self.causal and cache is not None:\n kwargs['decoder_position'] = decoder_position\n\n # print(\"decoder source attention\")\n\n if hasattr(self, 'source_attention'):\n # print(\"in source, state\", state.shape)\n state = self.source_attention(\n state, # residual\n source, source, state, **kwargs # passed to multiheaded attention\n )\n\n if hasattr(self, 'ffn'):\n state = self.ffn(\n state, # residual\n state # passed to feed-forward network\n )\n\n if self.causal and cache is not None:\n cached = cache.get(self.uuid)\n if cached is None:\n cache[self.uuid] = state\n else:\n # print(\"cached\", cached.shape)\n # print(\"state\", state.shape)\n try:\n state = cache[self.uuid] = torch.cat((cached, state), 1)\n except:\n pdb.set_trace()\n\n return {'state': state, 'mask': mask, 'cache': cache}\n\n _masks = threading.local()\n def mask(self, inputs):\n '''\n Get a self-attention mask\n The mask will be of shape [T x T] containing elements from the set {0, -inf}\n Input shape: (B x T x E)\n Output shape: (T x T)\n '''\n if not self.causal:\n return None\n\n dim = inputs.shape[1]\n device = inputs.device\n mask_store = TransformerDecoderLayer._masks.__dict__\n if device not in mask_store:\n mask = inputs.new_full((dim, dim), float('-inf'))\n mask_store[device] = triu(mask, 1, 1, 1)\n\n mask = mask_store[device]\n if mask.shape[0] < dim:\n mask = mask.resize_(dim, dim).fill_(float('-inf'))\n mask_store[device] = triu(mask, 1, 1, 1)\n mask = mask_store[device]\n\n return mask[None, :dim, :dim]\n\n\nclass NewTransformer(nn.Module):\n ''' The New Transformer module '''\n def __init__(self, config, dataset):\n ''' Initialize the Transformer '''\n super(NewTransformer, self).__init__()\n\n self.dataset = dataset\n self.embedding = TokenEmbedding(\n dataset.vocab_size,\n config.embedding_size,\n padding_idx=self.padding_idx\n )\n self.position_embedding = PositionEmbedding(config.embedding_size)\n self.dropout = nn.Dropout(config.dropout_p, inplace=True)\n\n # Uniq attn attributes\n self.attn_ofs_uniq = list(set(\n config.enc_attn_offset + config.dec_attn_offset + config.enc_dec_attn_offset))\n self.attn_std_uniq = list(set(\n config.enc_attn_std + config.dec_attn_std + config.enc_dec_attn_std))\n\n # Allow for overriding the encoders and decoders in dervied classes\n self.encoders = self.create_encoders(config)\n self.decoders = self.create_decoders(config)\n\n self.label_smoothing = LabelSmoothingLoss(\n config.label_smoothing or 0,\n ignore_index=self.padding_idx,\n reduction='none'\n )\n self.cross_entropy = nn.CrossEntropyLoss(\n ignore_index=self.padding_idx,\n reduction='none'\n )\n\n def create_encoders(self, config):\n ''' Create the transformer encoders '''\n kwargs = {'dropout_p': config.dropout_p}\n\n if config.ffn_layer == -1:\n config.ffn_layer = [1] * config.num_layers\n assert len(config.ffn_layer) == config.num_layers\n\n attn_config = {'attn_type': config.enc_attn_type,\n 'attn_std': config.enc_attn_std,\n 'attn_offset': config.enc_attn_offset,\n 'num_layers': config.num_layers,\n 'num_heads': config.num_heads,\n 'which_attn': 'encoder',\n 'attn_threshold': config.enc_attn_threshold,\n 'attn_window': config.enc_attn_window,\n 'attn_impl': config.enc_attn_impl,\n 'ffn_layer': config.ffn_layer,\n 'attn_ofs_uniq': self.attn_ofs_uniq,\n 'attn_std_uniq': self.attn_std_uniq}\n args = [attn_config, config.num_heads, config.embedding_size, config.hidden_dim]\n encoders = nn.ModuleList([\n TransformerEncoderLayer(*args, layer_i, **kwargs)\n for layer_i in range(config.num_layers)\n ])\n\n return encoders\n\n def create_decoders(self, config):\n ''' Create the transformer decoders '''\n kwargs = {'dropout_p': config.dropout_p}\n\n if config.ffn_layer == -1:\n config.ffn_layer = [1] * config.num_layers\n assert len(config.ffn_layer) == config.num_layers\n\n dec_attn_config = {'attn_type': config.dec_attn_type,\n 'attn_std': config.dec_attn_std,\n 'attn_offset': config.dec_attn_offset,\n 'num_layers': config.num_layers,\n 'num_heads': config.num_heads,\n 'which_attn': 'decoder',\n 'attn_threshold': config.dec_attn_threshold,\n 'attn_window': config.dec_attn_window,\n 'attn_impl': config.dec_attn_impl,\n 'ffn_layer': config.ffn_layer,\n 'attn_ofs_uniq': self.attn_ofs_uniq,\n 'attn_std_uniq': self.attn_std_uniq\n }\n enc_dec_attn_config = {'attn_type': config.enc_dec_attn_type,\n 'attn_std': config.enc_dec_attn_std,\n 'attn_offset': config.enc_dec_attn_offset,\n 'num_layers': config.num_layers,\n 'num_heads': config.num_heads,\n 'word_count_ratio': self.dataset.word_count_ratio,\n 'which_attn': 'source',\n 'enc_dec_attn_layer': config.enc_dec_attn_layer,\n 'enc_dec_attn_num_heads': config.enc_dec_attn_num_heads,\n 'attn_threshold': config.enc_dec_attn_threshold,\n 'attn_window': config.enc_dec_attn_window,\n 'attn_impl': config.enc_dec_attn_impl,\n 'ffn_layer': config.ffn_layer,\n 'attn_ofs_uniq': self.attn_ofs_uniq,\n 'attn_std_uniq': self.attn_std_uniq\n }\n args = [dec_attn_config, enc_dec_attn_config, config.num_heads, config.embedding_size, config.hidden_dim]\n decoders = nn.ModuleList([\n TransformerDecoderLayer(*args, layer_i, **kwargs)\n for layer_i in range(config.num_layers)\n ])\n\n return decoders\n\n\n @property\n def sos_idx(self):\n ''' Return the sos index '''\n return self.dataset.sos_idx\n\n @property\n def padding_idx(self):\n ''' Return the padding index '''\n return self.dataset.padding_idx\n\n def translator(self, config):\n ''' Get a translator for this model '''\n return Translator(config, self, self.dataset)\n\n def reset_named_parameters(self, modules):\n ''' Get a translator for this model '''\n if 'encoder' in modules:\n for encoder in self.encoders:\n encoder.reset_parameters()\n if 'decoder' in modules:\n for decoder in self.decoders:\n decoder.reset_parameters()\n if 'embeddings' in modules:\n self.embedding.reset_parameters()\n\n def forward(self, batch): # pylint:disable=arguments-differ\n ''' A batch of inputs and targets '''\n decoded = self.decode(\n self.encode(batch['inputs']),\n right_shift(batch['targets']),\n input_lens=batch['input_lens']\n )\n\n logits = decoded['logits']\n dims = list(range(1, logits.dim()))\n targets = left_shift(batch['targets'])\n nll = self.cross_entropy(logits, targets).sum(dims[:-1])\n smoothed_nll = self.label_smoothing(logits, targets).sum(dims)\n return smoothed_nll, nll\n\n def encode(self, inputs):\n ''' Encode the inputs '''\n word_embedding = self.embed(inputs, self.embedding)\n encoded = {\n 'state': word_embedding,\n 'mask': inputs.eq(self.padding_idx)\n }\n for i, encoder in enumerate(self.encoders):\n encoded = encoder(encoded, i)\n\n return encoded\n\n def decode(self, encoded, targets, decoders=None, embedding=None, cache=None, mask=None, input_lens=None):\n ''' Decode the encoded sequence to the targets '''\n if decoders is None:\n decoders = self.decoders\n\n if embedding is None:\n embedding = self.embedding\n\n word_embedding = self.embed(targets, embedding)\n\n decoded = {\n 'cache': cache,\n 'state': word_embedding,\n 'mask': targets.eq(self.padding_idx) if mask is None else mask\n }\n for i, decoder in enumerate(decoders):\n # print(\"i\", i)\n decoded = decoder(decoded, encoded, i)\n\n # compute projection to the vocabulary\n state = decoded['state']\n if cache is not None:\n state = state[:, -1:]\n\n return {\n 'cache': decoded.get('cache'),\n 'logits': embedding(state, transpose=True).transpose(2, 1), # transpose to B x C x ...\n }\n\n def embed(self, inputs, token_embedding):\n ''' Embed the given inputs '''\n return self.dropout(token_embedding(inputs) + self.position_embedding(inputs))\n" ]
[ [ "torch.nn.init.calculate_gain", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.cat", "torch.nn.init.constant_", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mo5mami/retinanet-examples
[ "f7ad4ff6a99fe3e66f8a9c8e8a6e03b870f84700" ]
[ "odtk/backbones/resnet.py" ]
[ "import torchvision\nfrom torchvision.models import resnet as vrn\nimport torch.utils.model_zoo as model_zoo\n\nfrom .utils import register\n\nclass ResNet(vrn.ResNet):\n 'Deep Residual Network - https://arxiv.org/abs/1512.03385'\n\n def __init__(self, layers=[3, 4, 6, 3], bottleneck=vrn.Bottleneck, outputs=[5], groups=1, width_per_group=64, url=None):\n self.stride = 128 \n self.bottleneck = bottleneck\n self.outputs = outputs\n self.url = url\n\n kwargs = {'block': bottleneck, 'layers': layers, 'groups': groups, 'width_per_group': width_per_group}\n super().__init__(**kwargs)\n self.unused_modules = ['fc']\n\n def initialize(self):\n if self.url:\n self.load_state_dict(model_zoo.load_url(self.url))\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n outputs = []\n for i, layer in enumerate([self.layer1, self.layer2, self.layer3, self.layer4]):\n level = i + 2\n if level > max(self.outputs):\n break\n x = layer(x)\n if level in self.outputs:\n outputs.append(x)\n\n return outputs\n\n@register\ndef ResNet18C4():\n return ResNet(layers=[2, 2, 2, 2], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet18'])\n\n@register\ndef ResNet34C4():\n return ResNet(layers=[3, 4, 6, 3], bottleneck=vrn.BasicBlock, outputs=[4], url=vrn.model_urls['resnet34'])\n" ]
[ [ "torch.utils.model_zoo.load_url" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
markliou/tool_scripts
[ "d9f7d8f23edeb294dac1c9d29a2d7358751922b7" ]
[ "python/tensorflow/BatchNorm_SELU/convolutional_network_SELU.py" ]
[ "'''\nA Convolutional Network implementation example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\n# mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\nmnist = input_data.read_data_sets(\"./\", one_hot=True)\n\n# Parameters\nlearning_rate = 0.001\ntraining_iters = 1000000\nbatch_size = 200\ndisplay_step = 10\n\n# Network Parameters\nn_input = 784 # MNIST data input (img shape: 28*28)\nn_classes = 10 # MNIST total classes (0-9 digits)\ndropout = 0.75 # Dropout, probability to keep units\n\n# tf Graph input\nx = tf.placeholder(tf.float32, [None, n_input])\ny = tf.placeholder(tf.float32, [None, n_classes])\n#keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)\n\n\n# Create some wrappers for simplicity\ndef conv2d(x, W, b, strides=1):\n # Conv2D wrapper, with bias and relu activation\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n #return tf.nn.tanh(x)\n # return tf.nn.elu(x)\n # return selu(x)\n #return lrelu(x, np.random.ranf()*0.618*0.618+1.618)\n \n# selu in tensorflow markliou\ndef selu(x):\n alpha = 1.6732632423543772848170429916717\n scale = 1.0507009873554804934193349852946\n return scale * tf.where(x>=0.0, x, alpha*tf.nn.elu(x))\npass\n\n# leaky relu in tensorflow markliou\ndef lrelu(x, alpha=0.3):\n return tf.nn.relu(x) - alpha * tf.nn.relu(-x)\npass\n \n# batch normalization layer markliou\ndef BN(x, beta=0, gamma=1, epsilon=0.1):\n axis = list(range(len(x.get_shape()) - 1))\n mean, variance = tf.nn.moments(x, axis)\n return tf.nn.batch_normalization(x, mean, variance, beta, gamma, 1e-3)\npass\n\ndef maxpool2d(x, k=2):\n # MaxPool2D wrapper\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\n padding='SAME')\n\n\n# Create model\ndef conv_net(x, weights, biases, dropout):\n # Reshape input picture\n x = tf.reshape(x, shape=[-1, 28, 28, 1])\n x = tf.layers.batch_normalization(x, training=True, trainable=True)\n \n\n # Convolution Layer\n conv1 = conv2d(x, weights['wc1'], biases['bc1'])\n # Max Pooling (down-sampling)\n # conv1 = maxpool2d(conv1, k=2)\n #x = BN(x) # batch normalization markliou\n # conv1 = tf.layers.batch_normalization(conv1, training=True, trainable=True)\n \n # Convolution Layer\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n # Max Pooling (down-sampling)\n # conv2 = maxpool2d(conv2, k=2)\n #conv2 = BN(conv2) # batch normalization markliou\n # conv2 = tf.layers.batch_normalization(conv2, training=True, trainable=True)\n \n # appending layers markliou\n conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])\n # conv3 = BN(conv3) # batch normalization markliou\n # conv3 = tf.layers.batch_normalization(conv3, training=True, trainable=True)\n \n conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])\n # conv4 = BN(conv4) # batch normalization markliou\n # conv4 = tf.layers.batch_normalization(conv4, training=True, trainable=True)\n \n conv5 = conv2d(conv4, weights['wc5'], biases['bc5'])\n # conv5 = BN(conv5) # batch normalization markliou\n # conv5 = tf.layers.batch_normalization(conv5, training=True, trainable=True)\n \n conv6 = conv2d(conv5, weights['wc6'], biases['bc6'])\n\n # Fully connected layer\n # Reshape conv2 output to fit fully connected layer input\n fc1 = tf.reshape(conv6, [-1, weights['wd1'].get_shape().as_list()[0]])\n fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\n # fc1 = tf.nn.elu(fc1)\n # fc1 = selu(fc1)\n fc1 = lrelu(fc1)\n \n fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])\n # fc2 = tf.nn.elu(fc2)\n # fc2 = selu(fc2)\n fc2 = lrelu(fc2)\n fc3 = tf.add(tf.matmul(fc2, weights['wd3']), biases['bd3'])\n # fc3 = tf.nn.elu(fc3)\n # fc3 = selu(fc3)\n fc3 = lrelu(fc3)\n \n # Apply Dropout\n #fc1 = tf.nn.dropout(fc1, dropout)\n\n # Output, class prediction\n out = tf.add(tf.matmul(fc3, weights['out']), biases['out'])\n return out\n\nnodeno = 128\n# Store layers weight & bias\nweights = {\n # 5x5 conv, 1 input, 128 outputs\n 'wc1': tf.Variable(tf.random_normal([7, 7, 1, nodeno] ,stddev=(1/(28*28)) ) ),\n # 5x5 conv, 128 inputs, 512 outputs\n 'wc2': tf.Variable(tf.random_normal([7, 7, nodeno, nodeno] ,stddev=1/64 ) ),\n \n # 5x5 conv, 512 inputs, 512 outputs\n 'wc3': tf.Variable(tf.random_normal([7, 7, nodeno, nodeno] ,stddev=1/64 ) ),\n # 5x5 conv, 512 inputs, 128 outputs\n 'wc4': tf.Variable(tf.random_normal([7, 7, nodeno, nodeno] ,stddev=1/64 ) ),\n # 5x5 conv, 128 inputs, 64 outputs\n 'wc5': tf.Variable(tf.random_normal([7, 7, nodeno, nodeno] ,stddev=1/64 ) ),\n # 5x5 conv, 128 inputs, 64 outputs\n 'wc6': tf.Variable(tf.random_normal([7, 7, nodeno, nodeno] ,stddev=1/64 ) ),\n \n # fully connected, 7*7*64 inputs, 1024 outputs\n #'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),\n 'wd1': tf.Variable(tf.random_normal([28*28*nodeno, nodeno] ,stddev=(1/(28*28*64)) ) ),\n \n 'wd2': tf.Variable(tf.random_normal([nodeno, nodeno] ,stddev=1/64 ) ),\n 'wd3': tf.Variable(tf.random_normal([nodeno, nodeno] ,stddev=1/64 ) ),\n \n # 1024 inputs, 10 outputs (class prediction)\n 'out': tf.Variable(tf.random_normal([nodeno, n_classes]))\n}\n\nbiases = {\n # 'bc1': tf.Variable(tf.random_normal([128] )),\n # 'bc2': tf.Variable(tf.random_normal([128] )),\n \n # 'bc3': tf.Variable(tf.random_normal([512] )),\n # 'bc4': tf.Variable(tf.random_normal([128] )),\n # 'bc5': tf.Variable(tf.random_normal([64] )),\n \n # 'bd1': tf.Variable(tf.random_normal([1024] )),\n \n # 'bd2': tf.Variable(tf.random_normal([1024] )),\n # 'bd3': tf.Variable(tf.random_normal([1024] )),\n \n # 'out': tf.Variable(tf.random_normal([n_classes] ))\n \n 'bc1': tf.Variable(tf.zeros([nodeno] )),\n 'bc2': tf.Variable(tf.zeros([nodeno] )),\n \n 'bc3': tf.Variable(tf.zeros([nodeno] )),\n 'bc4': tf.Variable(tf.zeros([nodeno] )),\n 'bc5': tf.Variable(tf.zeros([nodeno] )),\n 'bc6': tf.Variable(tf.zeros([nodeno] )),\n \n 'bd1': tf.Variable(tf.zeros([nodeno] )),\n \n 'bd2': tf.Variable(tf.zeros([nodeno] )),\n 'bd3': tf.Variable(tf.zeros([nodeno] )),\n \n 'out': tf.Variable(tf.zeros([n_classes] ))\n}\n\n\n# Construct model\n# pred = conv_net(x, weights, biases, keep_prob)\npred = conv_net(x, weights, biases, 0)\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n# Launch the graph\ntfconfig = tf.ConfigProto()\ntfconfig.gpu_options.allow_growth = True\nwith tf.Session(config = tfconfig) as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < training_iters:\n #while 1:\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop)\n # sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,\n # keep_prob: dropout})\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n\n \n # print(batch_y[12])\n # print(len(batch_x[12]))\n # for i in range(0,28,1):\n # print(batch_x[12][i*28:i*28+28])\n # exit()\n\n if step % display_step == 0:\n # Calculate batch loss and accuracy\n # loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,\n # y: batch_y,\n # keep_prob: 1.})\n loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,\n y: batch_y})\n print(\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n step += 1\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for 256 mnist test images\n print(\"Testing Accuracy:\", \\\n # sess.run(accuracy, feed_dict={x: mnist.test.images[:256],\n # y: mnist.test.labels[:256],\n # keep_prob: 1.}))\n sess.run(accuracy, feed_dict={x: mnist.test.images[:256],\n y: mnist.test.labels[:256]}))\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.zeros", "tensorflow.nn.max_pool", "tensorflow.cast", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.nn.moments", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.matmul", "tensorflow.nn.batch_normalization", "tensorflow.nn.elu", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.reshape", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mfaytak/ultramisc
[ "1701cfaa37b05897c2946b4a292c5269bac75f2a" ]
[ "scripts/dim-reduction/punjabi-cache-frames.py" ]
[ "\"\"\"\npunjabi-cache-frames: frame caching method used in Punjabi dental/retroflex project (Kochetov, Faytak, Nara)\n\"\"\"\n\n# TODO: actually using?\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport numpy as np\nimport os\nimport pandas as pd\nimport re\nimport struct\nimport subprocess\nimport sys\n\nfrom collections import OrderedDict\nfrom hashlib import sha1\nfrom operator import itemgetter\nfrom PIL import Image\nfrom scipy import ndimage\n\n# read in args\nparser = argparse.ArgumentParser()\nparser.add_argument(\"directory\", help=\"Experiment directory containing all subjects\")\nargs = parser.parse_args()\n\n# check for appropriate directory\nexpdir = args.directory\ntry:\n assert os.path.exists(args.directory)\nexcept AssertionError:\n # TODO raise exception\n print(\"\\tDirectory provided doesn't exist\")\n parser.print_help()\n sys.exit(2)\n\ndata = None\nrecs = []\nframes_out = os.path.join(expdir,\"frames.npy\")\nmetadata_out = os.path.join(expdir,\"frames_metadata.pickle\")\npng_glob_exp = os.path.join(os.path.normpath(expdir),\"*.png\")\n\n# for filename in list-of-files:\nfor filename in glob.glob(png_glob_exp): \n\n # get filename and other metadata\n fname = os.path.split(filename)[1]\n fname_bare = os.path.splitext(fname)[0]\n attr = fname_bare.split('_')\n subj = attr[0]\n lang = re.sub(r'[0-9]', '', attr[0]) \n\n # subj is crucial for subsetting data. Users will want to define this on their own.\n # But it might be good to have a function with flat directory structure and subj IDs as inputs...\n # ...that caches all data at once.\n # this would let people select their desired frame subset however they'd like, and then run all at once.\n # on the other hand, having subject as a variable and pulling the data apart is much easier conceptually, and the data is easier to move around as a single large file.\n\n if len(attr) > 2:\n stim = attr[1] \n token = re.sub(r'[a-zA-Z]', '', attr[2]) \n\n else:\n stim = re.sub(r'[0-9]', '', attr[1]) \n token = re.sub(r'[a-zA-Z]', '', attr[1]) \n\n if stim in [\"banab\", \"batab\"]:\n place = \"alv\"\n if stim == \"banab\":\n phone = \"n\"\n else:\n phone = \"t\"\n elif stim in [\"baNab\", \"baTab\"]:\n place = \"ret\"\n if stim == \"baNab\":\n phone = \"nr\"\n else:\n phone = \"tr\"\n\n # get ndarray from image file. issue is probably here. Unconverted RGB:\n inframe = np.asarray(Image.open(filename)) \n # converted from RGB to grayscale (one-channel):\n inframe = np.asarray(Image.open(filename).convert(\"L\")) \n # converted to uint8:\n rawdata = inframe.astype(np.uint8)\n # the ravel() seems to work correctly, at least in terms of producing an array of the right size:\n\n # generate metadata object for the current acquisition\n\n recs.append(\n OrderedDict([\n ('filename', fname), \n ('subject', subj),\n ('stim', stim),\n ('token', token),\n ('phone', phone), \n ('place', place),\n ('sha1', sha1(rawdata.ravel()).hexdigest()), # tuple error is thrown here.\n ('sha1_dtype', rawdata.dtype)\n ])\n )\n\n # add frame ndarray to frames list\n if data is None:\n data = np.expand_dims(rawdata, axis=0)\n else:\n data = np.concatenate([data, np.expand_dims(rawdata, axis=0)])\n\n# convert metadata to a DataFrame\nmd = pd.DataFrame.from_records(recs, columns=recs[0].keys())\n\n# make sure there is one metadata row for each ndarray in the pickle\nassert(len(md) == data.shape[0])\n\n# compare checksums\nassert(md.loc[0, 'sha1'] == sha1(data[0].ravel()).hexdigest())\nassert(md.loc[len(md)-1,'sha1'] == sha1(data[-1].ravel()).hexdigest())\n\nnp.save(frames_out, data)\nmd.to_pickle(metadata_out)\n" ]
[ [ "numpy.expand_dims", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
peterhan91/Medical-Robust-Training
[ "02928666ddca6803ae6df8169c291d3f427c2970", "02928666ddca6803ae6df8169c291d3f427c2970" ]
[ "visualization/visualize_attack.py", "visualization/visualize.py" ]
[ "\nimport os\nimport torch\nimport torchvision as tv\nfrom torchvision import models\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom utils import makedirs, tensor2cuda, load_model, LabelDict\nfrom argument import parser\nfrom visualization import VanillaBackprop\nfrom attack import FastGradientSignUntargeted\nfrom model.madry_model import WideResNet\nfrom model.resnetdsbn import *\nfrom model import *\nimport patch_dataset as patd\nimport matplotlib.pyplot as plt \n\nperturbation_type = 'linf'\n# out_num = 100\nargs = parser()\nmax_epsilon = 0.002\nalpha = max_epsilon / 2\nsave_folder = '%s_%s' % (args.dataset, args.affix)\nimg_folder = os.path.join(args.log_root, save_folder)\nmakedirs(img_folder)\nargs = parser()\n# label_dict = LabelDict(args.dataset)\n\nte_dataset = patd.PatchDataset(path_to_images=args.data_root,\n fold='test',\n transform=tv.transforms.Compose([\n tv.transforms.Resize(256),\n tv.transforms.ToTensor(),\n ]))\nte_loader = DataLoader(te_dataset, batch_size=1, shuffle=True, num_workers=1)\n\nadv_list = []\nin_list = []\n# model = MLP_bns(input_dim=32*32, output_dim=1)\nmodel = models.resnet50(pretrained=False)\nnum_classes=8\nmodel.fc = nn.Linear(model.fc.in_features, num_classes)\nload_model(model, args.load_checkpoint)\nif torch.cuda.is_available():\n model.cuda()\nattack = FastGradientSignUntargeted(model, \n max_epsilon, \n alpha, \n min_val=0, \n max_val=1, \n max_iters=args.k, \n _type=perturbation_type)\n\nfor data, label in te_loader:\n data, label = tensor2cuda(data), tensor2cuda(label)\n # data = data.view(-1, 32*32)\n # break\n with torch.no_grad():\n adv_data = attack.perturb(data, label, 'mean', False)\n model.eval()\n output = model(adv_data)\n pred = torch.max(output, dim=1)[1]\n adv_list.append(adv_data.cpu().numpy().squeeze()) # (N, 28, 28)\n in_list.append(data.cpu().numpy().squeeze())\n\n# data = data.cpu().numpy().squeeze() # (N, 28, 28)\n# data *= 255.0\n# label = label.cpu().numpy()\n# adv_list.insert(0, data)\n# pred_list.insert(0, label)\nprint(np.array(adv_list).shape)\nprint(np.array(in_list).shape)\nnp.save(os.path.join(img_folder, 'sample_advx.npy'), np.array(adv_list))\nnp.save(os.path.join(img_folder, 'sample_x.npy'), np.array(in_list))\n# print(np.array(pred_list).shape)\n\n'''\ntypes = ['Original', 'Your Model']\nfig, _axs = plt.subplots(nrows=len(adv_list), ncols=out_num)\naxs = _axs\nfor j, _type in enumerate(types):\n axs[j, 0].set_ylabel(_type)\n for i in range(out_num):\n # print(pred_list[j][i])\n axs[j, i].set_xlabel('%s' % label_dict.label2class(int(pred_list[j][i])))\n img = adv_list[j][i]\n # print(img.shape)\n img = np.transpose(img, (1, 2, 0))\n img = img.astype(np.uint8)\n axs[j, i].imshow(img)\n axs[j, i].get_xaxis().set_ticks([])\n axs[j, i].get_yaxis().set_ticks([])\nplt.tight_layout()\nplt.savefig(os.path.join(img_folder, 'Image_large_%s_%s.jpg' % (perturbation_type, args.affix)))\n'''", "import sys\nsys.path.append(\"..\")\n\nimport os\nimport torch\nimport torchvision as tv\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom torchvision import models\nimport torch.nn as nn\nfrom utils import makedirs, tensor2cuda, load_model\nfrom argument import parser\nfrom visualization import VanillaBackprop\nimport patch_dataset as patd\nfrom model.resnetdsbn import *\n\nargs = parser()\nimg_folder = 'grad_img'\nimg_folder = os.path.join(img_folder, args.dataset, args.affix)\nmakedirs(img_folder)\nout_num = 1\n\ntransform_test = tv.transforms.Compose([\n tv.transforms.Resize(256),\n tv.transforms.ToTensor()\n ])\nte_dataset = patd.PatchDataset(path_to_images=args.data_root, fold='test',\n transform=tv.transforms.ToTensor())\nte_loader = DataLoader(te_dataset, batch_size=1, shuffle=False, num_workers=1)\n\n\ncounter = 0\ninput_list = []\ngrad_list = []\nlabel_list = []\nfor data, label in te_loader:\n if int(np.sum(label.squeeze().numpy())) > 0:\n disease = ''\n for i in range(int(np.sum(label.squeeze().numpy()))):\n disease_index = np.nonzero(label.squeeze().numpy())[0][i]\n dis_temp = te_dataset.PRED_LABEL[disease_index]\n disease = disease + ' ' + dis_temp\n\n data, label = tensor2cuda(data), tensor2cuda(label)\n # model_bns = resnet50dsbn(pretrained=args.pretrain, widefactor=args.widefactor)\n model_std = models.resnet50()\n num_classes=8\n # model_bns.fc = nn.Linear(model_bns.fc.in_features, num_classes)\n model_std.fc = nn.Linear(model_std.fc.in_features, num_classes)\n # load_model(model_bns, args.load_checkpoint)\n load_model(model_std, '../checkpoint/chexpert_gaussn_0.1/checkpoint_best.pth')\n if torch.cuda.is_available():\n # model_bns.cuda()\n model_std.cuda()\n\n # VBP = VanillaBackprop(model_bns)\n VBP_std = VanillaBackprop(model_std)\n # grad_bn0 = VBP.generate_gradients(data, label, [0]) # data: (1,3,96,96) label: (1,3)\n # grad_bn1 = VBP.generate_gradients(data, label, [1])\n grad_std = VBP_std.generate_gradients(data, label)\n grads = []\n # print(grad.shape)\n for grad in [grad_std]:\n grad_flat = grad.view(grad.shape[0], -1) # grad: (1, 3x96x96)\n mean = grad_flat.mean(1, keepdim=True).unsqueeze(2).unsqueeze(3) # (1,1,1,1)\n std = grad_flat.std(1, keepdim=True).unsqueeze(2).unsqueeze(3) # (1,1,1,1)\n mean = mean.repeat(1, 1, data.shape[2], data.shape[3])\n std = std.repeat(1, 1, data.shape[2], data.shape[3])\n grad = torch.max(torch.min(grad, mean+3*std), mean-3*std)\n print(grad.min(), grad.max())\n grad -= grad.min()\n grad /= grad.max()\n grad = grad.cpu().numpy().squeeze() # (N, 28, 28)\n grads.append(grad)\n # grad *= 255.0\n # label = label.cpu().numpy()\n data = data.cpu().numpy().squeeze()\n # data *= 255.0\n # print('data shape ', data.shape)\n # print('grad shape ', grad.shape)\n input_list.append(data)\n label_list.append(disease)\n grad_list.append(grads)\n# np.save(os.path.join(img_folder, 'data.npy'), np.array(input_list))\nnp.save(os.path.join(img_folder, 'label.npy'), np.array(label_list))\nnp.save(os.path.join(img_folder, 'grad.npy'), np.array(grad_list))" ]
[ [ "torch.max", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "numpy.array" ], [ "torch.min", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.cuda.is_available", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZhenghengLi/lcls2
[ "94e75c6536954a58c8937595dcac295163aa1cdf", "94e75c6536954a58c8937595dcac295163aa1cdf", "94e75c6536954a58c8937595dcac295163aa1cdf", "94e75c6536954a58c8937595dcac295163aa1cdf" ]
[ "psana/psana/graphqt/CMWDBDocEditor.py", "psana/psana/tests/det.py", "psana/psana/detector/test_detectors.py", "psana/psana/app/epix10ka_calib_components.py" ]
[ "#------------------------------\n\"\"\"Class :py:class:`CMWDBDocEditor` implementation for CMWDBDocsBase\n====================================================================\n\nUsage ::\n #### Test: python lcls2/psana/psana/graphqt/CMWDBDocEditor.py\n\n # Import\n from psana.graphqt.CMWDBDocEditor import *\n\nSee:\n - :class:`CMWDBDocs`\n - `on github <https://github.com/slac-lcls/lcls2>`_.\n\nCreated on 2017-04-20 by Mikhail Dubrovin\n\"\"\"\n#------------------------------\nimport os \nimport numpy as np \n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom psana.graphqt.CMConfigParameters import cp\n\nfrom psana.graphqt.QWTable import QWTable, QStandardItem, icon\nfrom psana.graphqt.CMDBUtils import dbu #ObjectId, get_data_for_doc, doc_add_id_ts, time_and_timestamp #, timestamp_id\n\nfrom psana.graphqt.QWUtils import get_open_fname_through_dialog_box\n\nfrom psana.pyalgos.generic.NDArrUtils import info_ndarr\nimport psana.pyalgos.generic.Utils as gu\nfrom psana.pscalib.calib.NDArrIO import load_txt#, save_txt\n\nfrom PyQt5.QtWidgets import QHeaderView\nfrom PyQt5.QtGui import QBrush\nfrom PyQt5.QtCore import Qt\n\n#------------------------------\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QLabel, QHBoxLayout\n\nclass CMWDBDocEditorItem(QWidget) :\n def __init__(self, txt) :\n QWidget.__init__(self, parent=None)\n self.lab = QLabel(txt)\n self.but = QPushButton('Select')\n self.hbox = QHBoxLayout()\n self.hbox.addWidget(self.lab)\n self.hbox.addWidget(self.but)\n self.setLayout(self.hbox)\n self.set_style()\n self.set_tool_tips()\n\n\n def set_style(self):\n self.but.setFixedWidth(60)\n self.but.setFixedHeight(26)\n self.layout().setContentsMargins(0,0,0,0)\n\n #self. setStyleSheet(style.styleBkgd)\n #self.lab.setStyleSheet(style.styleTitle)\n #self.but.setStyleSheet(style.styleButton)\n \n def set_tool_tips(self) :\n pass\n\n#------------------------------\n\nclass CMWDBDocEditor(QWTable) :\n data_fname = 'data_fname'\n data_fname_value = '<Click and select calibration data file>'\n\n def __init__(self) :\n QWTable.__init__(self, parent=None)\n logger.debug('c-tor CMWDBDocEditor')\n cp.cmwdbdoceditor = self\n self.data_nda = None\n\n self.setToolTip('Document editor')\n\n\n def __del__(self) :\n #QWTable.__del__(self)\n cp.cmwdbdoceditor = None\n\n#------------------------------\n\n def show_document(self, dbname, colname, doc) : \n \"\"\"Implementation of the abstract method in CMWDBDocsBase\n \"\"\"\n #CMWDBDocsBase.show_documents(self, dbname, colname, docs)\n msg = 'Show document for db: %s col: %s'%(dbname, colname)\n logger.debug(msg)\n\n if doc.get('id_data', None) is not None : doc[self.data_fname] = ''\n #for doc in docs : print(doc)\n self.fill_table_model(doc)\n\n self.data_nda = dbu.get_data_for_doc(dbname, doc)\n logger.debug(info_ndarr(self.data_nda, 'array from DB linked to the document'))\n\n#------------------------------\n\n def item_is_editable_for_key(self, k):\n forbid = ('id_exp', 'host', 'extpars', 'time_sec', 'data_fname', 'data_size', 'data_dtype',\\\n 'data_type', 'uid', 'cwd', 'id_data', 'id_data_ts', '_id', '_id_ts', 'md5')\n return not (k in forbid)\n\n#------------------------------\n\n def fill_table_model(self, doc=None):\n \"\"\"Re-implementation of the method in QWList.fill_table_model\n \"\"\"\n self.disconnect_item_changed_from(self.on_item_changed)\n\n self.clear_model()\n\n if doc is None :\n self.model.setVerticalHeaderLabels(['Select document']) \n else :\n self.model.setHorizontalHeaderLabels(('key', 'value')) \n self.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)\n\n dbu.doc_add_id_ts(doc) # adds time stamps for all id-s\n\n for r,k in enumerate(sorted(doc.keys())):\n v = doc[k]\n\n # set key item\n item = QStandardItem(k)\n item.setEnabled(True)\n item.setEditable(False)\n self.model.setItem(r,0,item)\n\n # set value item\n cond = any([isinstance(v,o) for o in (int, str, dict, dbu.ObjectId)])\n s = str(v) if (cond and len(str(v))<512) else 'str longer 512 chars'\n item = QStandardItem(s)\n\n editable = self.item_is_editable_for_key(k) # and k!=self.data_fname\n #item.setCheckable(editable)\n #if editable : item.setCheckState(1)\n item.setEditable(editable)\n item.setEnabled(editable)\n item.setToolTip('Double-click on item\\nor click on checkbox\\nto change value' if editable else\\\n 'This field is auto-generated')\n\n self.model.setItem(r,1,item)\n\n if k==self.data_fname :\n item.setText(self.data_fname_value)\n item.setEnabled(False)\n #item.setCheckable(True)\n #item.setCheckState(1)\n item.setToolTip('Data file name - click to change')\n item.setBackground(QBrush(Qt.yellow))\n #self.widg = QPushButton('Select file')\n #self.widg = CMWDBDocEditorItem('file-name')\n #index = self.model.indexFromItem(item)\n #self.setIndexWidget(index, self.widg)\n\n self.setColumnWidth(1, 300) # QTableView\n #self.horizontalHeader().setResizeMode(1, 1) # (index, mode)\n\n self.connect_item_changed_to(self.on_item_changed)\n\n#------------------------------\n# Overloaded methods\n#------------------------------\n\n def on_item_changed(self, item):\n \"\"\"Override method in QWTable\"\"\"\n value = self.getFullNameFromItem(item)\n cbxst = item.checkState()\n state = ['UNCHECKED', 'TRISTATE', 'CHECKED'][item.checkState()]\n #logger.debug('on_item_changed: \"%s\" state: %s' % (item.text(), state))\n logger.info('Field value changed to \"%s\"' % item.text())\n\n #if cbxst == Qt.Checked : \n # logger.debug('on_item_changed: \"%s\" checked' % (value))\n # self.select_file_name(item)\n #else :\n # logger.debug('on_item_changed \"%s\" state: %d' % (value, cbxst))\n\n\n def select_file_name(self, item):\n index = self.model.indexFromItem(item)\n value = self.getFullNameFromItem(item)\n row = index.row()\n key = self.model.item(row, 0).text()\n logger.info('Select calibration array file name: %s' % value)\n #if key == self.data_fname : logger.debug(\"XXX that's it =====================\")\n path0 = './'\n path = get_open_fname_through_dialog_box(self, path0, 'Select data file', filter='Text files (*.txt *.dat *.data *.npy)\\nAll files (*)')\n logger.debug('select_file_name: %s' % (path))\n if path is None : \n logger.info('Selection cancelled')\n return\n\n #item.setCheckState(0)\n self.change_value(item, key, path)\n\n\n def change_value(self, item, key, path):\n logger.debug('change_value for key: %s' % (key))\n if key == self.data_fname :\n item.setText(str(path))\n self.data_nda = self.load_nda_from_file(path)\n logger.info(info_ndarr(self.data_nda, 'From file %s loaded array' % path))\n self.set_metadata_values()\n item.setBackground(QBrush(Qt.cyan))\n else :\n txt = gu.load_textfile(path)\n logger.info('From file %s fill field: %s' % (path,txt))\n item.setText(txt)\n\n\n def set_metadata_values(self):\n \"\"\"Sets metadata values associated with self.data_nda\n \"\"\"\n logger.debug('in set_metadata_values')\n model = self.model\n nda = self.data_nda\n colk, colv = 0, 1\n for row in range(model.rowCount()) :\n key = model.item(row, colk).text()\n if key == 'data_size' : model.item(row, colv).setText(str(nda.size))\n elif key == 'data_dtype' : model.item(row, colv).setText(str(nda.dtype))\n elif key == 'data_ndim' : model.item(row, colv).setText(str(nda.ndim))\n elif key == 'data_shape' : model.item(row, colv).setText(str(nda.shape))\n elif key == 'host' : model.item(row, colv).setText(gu.get_hostname())\n elif key == 'uid' : model.item(row, colv).setText(gu.get_login())\n elif key == 'cwd' : model.item(row, colv).setText(gu.get_cwd())\n\n logger.info('Model document content:\\n %s\\n%s' % (self.info_model_dicdoc(), info_ndarr(self.data_nda, 'data n-d array ')))\n\n\n def info_model_dicdoc(self):\n return '\\n '.join(['%12s : %s' % (k,v) for k,v in self.get_model_dicdoc().items()])\n\n\n def get_data_nda(self):\n return self.data_nda\n\n\n def get_model_dicdoc(self, discard_id_ts=True):\n \"\"\"Returns dictionary of key-values of current model\n \"\"\"\n m = self.model\n d = dict([(m.item(r, 0).text(), m.item(r, 1).text()) for r in range(m.rowCount())])\n if d[self.data_fname] == self.data_fname_value : d[self.data_fname] = None\n d['time_sec'] = dbu.time_and_timestamp(**d)[0] # 'time_stamp' is used to fill 'time_sec'\n\n # remove info items added for display purpose\n if discard_id_ts :\n for k in ('_id_ts', 'id_data_ts', 'id_exp_ts') :\n if d.get(k, None) is not None : del d[k]\n\n return d\n\n\n def load_nda_from_file(self, path): \n ext = os.path.splitext(path)[1]\n nda = np.load(path) if ext in ('.npy', ) else load_txt(path)\n return nda\n\n\n def on_click(self, index):\n \"\"\"Override method in QWTable\"\"\"\n item = self.model.itemFromIndex(index)\n value = self.getFullNameFromItem(item)\n row = index.row()\n key = self.model.item(row, 0).text()\n msg = 'on_click item in row:%02d text: %s' % (index.row(), item.text())\n logger.debug(msg)\n\n if key == self.data_fname :\n #logger.debug('on_clic: \"%s\"' % value)\n self.select_file_name(item)\n else :\n if item.isEditable() : logger.info('To edit \"%s\" use double-click' % value)\n else : logger.info('Value for key \"%s\" is auto-filled' % key)\n\n\n def on_double_click(self, index):\n \"\"\"Override method in QWTable\"\"\"\n item = self.model.itemFromIndex(index)\n msg = 'on_double_click: begin edit \"%s\"' % (item.text())\n logger.debug('on_double_click: begin edit \"%s\"' % (item.text()))\n\n \n def on_item_selected(self, ind_sel, ind_desel):\n item = self.model.itemFromIndex(ind_sel)\n logger.debug('on_item_selected \"%s\"' % (item.text() if item is not None else None))\n\n\n def keyPressEvent(self, e) :\n \"\"\"Override method in QWTable\"\"\"\n pass\n\n#------------------------------\n#------------------------------\n\nif __name__ == \"__main__\" :\n def test_CMWDBDocEditor() :\n import sys\n from PyQt5.QtWidgets import QApplication\n\n doc = {'key0':'val0', 'key1':'val1', 'key2':'val2', 'key3':'val3'}\n\n logging.basicConfig(format='%(levelname)s %(name)s : %(message)s', level=logging.DEBUG)\n app = QApplication(sys.argv)\n w = CMWDBDocEditor()\n #w.setMinimumSize(600, 300)\n w.fill_table_model(doc)\n w.show()\n app.exec_()\n del w\n del app\n\n#------------------------------\n\nif __name__ == \"__main__\" :\n test_CMWDBDocEditor()\n\n#------------------------------\n", "import sys\nfrom psana import DataSource\nimport numpy as np\nimport vals\n\ndef det(files):\n ds = DataSource(files=files)\n for run in ds.runs(): # Detector is created based on per-run config. \n hsd = run.Detector('xpphsd')\n cspad = run.Detector('xppcspad')\n for evt in run.events():\n assert(hsd.raw.calib(evt).shape==(5,))\n assert(hsd.fex.calib(evt).shape==(6,))\n padarray = vals.padarray\n assert(np.array_equal(cspad.raw.calib(evt),np.stack((padarray,padarray))))\n assert(np.array_equal(cspad.raw.image(evt),np.vstack((padarray,padarray))))\n\ndef calib():\n # Test calib_constants here prior to user.py, which uses mpi\n # and tends to hang without error...\n # Use cxid9114 with run 96 (known to work) as a test case.\n exp = \"cxid9114\"\n run_no = 96\n det_str = \"cspad_0002\"\n from psana.pscalib.calib.MDBWebUtils import calib_constants\n \n pedestals, _ = calib_constants(det_str, exp=exp, ctype='pedestals', run=run_no)\n assert pedestals.shape == (32, 185, 388)\n\n common_mode, _ = calib_constants(det_str, exp=exp, ctype='common_mode', run=run_no)\n assert commmon_mode.shape == (32, 185, 388)\n\n geometry_string, _ = calib_constants(det_str, exp=exp, ctype='geometry', run=run_no)\n try:\n if not isinstance(geometry_string, str) and geometry_string is not None:\n import unicodedata\n geometry_string = unicodedata.normalize('NFKD', geometry_string).encode('ascii','ignore')\n except Exception as e:\n raise(\"Error getting geometry from calib_constants: %s\"%e)\n\n\nclass MyCustomArgs(object):\n raw = False\n epics = False\n scan = False\n def __init__(self, dsname, option):\n self.dsname = dsname\n if option == \"-r\":\n self.raw = True\n elif option == \"-e\":\n self.epics = True\n elif option == \"-s\":\n self.scan = True\n\ndef detnames(xtc_file):\n ds = DataSource(files=xtc_file)\n myrun = next(ds.runs())\n\n assert ('xppcspad', 'cspad', 'raw', '2_3_42') in myrun.xtcinfo\n assert myrun.epicsinfo[('HX2:DVD:GCC:01:PMON', 'HX2:DVD:GCC:01:PMON,hello1')] == 'HX2:DVD:GCC:01:PMON,hello1'\n assert myrun.scaninfo[('motor1', 'raw')] == 'raw'\n\ndef det_container(xtc_file):\n ds = DataSource(files=xtc_file)\n myrun = next(ds.runs())\n det = myrun.Detector('bogusdet', accept_missing=True)\n for config in det.raw._configs:\n for e in config.__dict__['bogusdet'].items(): \n pass \n\nif __name__ == '__main__':\n det()\n calib()\n", "import numpy as np\nfrom psana.detector.detector_impl import DetectorImpl\nfrom amitypes import Array1d, Array2d, Array3d\n\nclass hsd_raw_0_0_0(DetectorImpl):\n def __init__(self, *args):\n super(hsd_raw_0_0_0, self).__init__(*args)\n def calib(self, evt) -> Array1d:\n return np.zeros((5))\n\nclass hexanode_raw_0_0_1(DetectorImpl):\n def __init__(self, *args):\n super().__init__(*args)\n def waveforms(self, evt):\n segments = self._segments(evt)\n return segments[0].waveforms[2:7,...]\n def times(self, evt):\n segments = self._segments(evt)\n return segments[0].times[2:7,...]\n\nclass hsd_fex_4_5_6(DetectorImpl):\n def __init__(self, *args):\n super(hsd_fex_4_5_6, self).__init__(*args)\n def calib(self, evt) -> Array1d:\n return np.zeros((6))\n\n# for the fake cameras in the teststand\nclass cspad_cspadRawAlg_1_2_3(DetectorImpl):\n def __init__(self, *args):\n super().__init__(*args)\n def raw(self, evt) -> Array2d:\n segs = self._segments(evt)\n if segs is None: return None\n if segs[0] is None: return None\n return segs[0].array_raw\nclass fakecam_raw_2_0_0(cspad_cspadRawAlg_1_2_3):\n def __init__(self, *args):\n super().__init__(*args)\n\n# for the pva detector in the teststand\nclass pva_pvaAlg_1_2_3(DetectorImpl):\n def __init__(self, *args):\n super().__init__(*args)\n def raw(self, evt) -> Array1d:\n segs = self._segments(evt)\n if segs is None: return None\n if segs[0] is None: return None\n return segs[0].value\nclass andor_raw_0_0_1(pva_pvaAlg_1_2_3):\n def __init__(self, *args):\n super().__init__(*args)\n\nclass cspad_raw_2_3_42(DetectorImpl):\n def __init__(self, *args):\n super(cspad_raw_2_3_42, self).__init__(*args)\n def raw(self, evt) -> Array3d:\n # an example of how to handle multiple segments\n segs = self._segments(evt)\n return np.stack([segs[i].arrayRaw for i in range(len(segs))])\n def calib(self, evt) -> Array3d:\n return self.raw(evt)\n def image(self, evt) -> Array2d:\n segs = self._segments(evt)\n return np.vstack([segs[i].arrayRaw for i in range(len(segs))])\n\nclass ele_opal_raw_1_2_3(DetectorImpl):\n def __init__(self, *args):\n super().__init__(*args)\n def image(self, evt) -> Array2d:\n return self._segments(evt)[0].img\n\nclass eventid_valseid_0_0_1(DetectorImpl):\n def __init__(self, *args):\n DetectorImpl.__init__(self, *args)\n def experiment(self, evt):\n return self._segments(evt)[0].experiment if self._segments(evt) is not None else None\n def run(self, evt):\n return self._segments(evt)[0].run if self._segments(evt) is not None else None\n def fiducials(self, evt):\n return self._segments(evt)[0].fiducials if self._segments(evt) is not None else None\n def time(self, evt):\n return self._segments(evt)[0].time if self._segments(evt) is not None else None\n\n\nclass gasdetector_valsgd_0_0_1(DetectorImpl):\n def __init__(self, *args):\n DetectorImpl.__init__(self, *args)\n def f_11_ENRC(self, evt):\n return self._segments(evt)[0].f_11_ENRC if self._segments(evt) is not None else None\n def f_12_ENRC(self, evt):\n return self._segments(evt)[0].f_12_ENRC if self._segments(evt) is not None else None\n def f_21_ENRC(self, evt):\n return self._segments(evt)[0].f_21_ENRC if self._segments(evt) is not None else None\n def f_22_ENRC(self, evt):\n return self._segments(evt)[0].f_22_ENRC if self._segments(evt) is not None else None\n def f_63_ENRC(self, evt):\n return self._segments(evt)[0].f_63_ENRC if self._segments(evt) is not None else None\n def f_64_ENRC(self, evt):\n return self._segments(evt)[0].f_64_ENRC if self._segments(evt) is not None else None\n\n\nclass xtcavpars_valsxtp_0_0_1(DetectorImpl):\n def __init__(self, *args):\n DetectorImpl.__init__(self, *args)\n def XTCAV_Analysis_Version (self, evt): return self._segments(evt)[0].XTCAV_Analysis_Version if self._segments(evt) is not None else None\n def XTCAV_ROI_sizeX (self, evt): return self._segments(evt)[0].XTCAV_ROI_sizeX if self._segments(evt) is not None else None\n def XTCAV_ROI_sizeY (self, evt): return self._segments(evt)[0].XTCAV_ROI_sizeY if self._segments(evt) is not None else None\n def XTCAV_ROI_startX (self, evt): return self._segments(evt)[0].XTCAV_ROI_startX if self._segments(evt) is not None else None\n def XTCAV_ROI_startY (self, evt): return self._segments(evt)[0].XTCAV_ROI_startY if self._segments(evt) is not None else None\n def XTCAV_calib_umPerPx (self, evt): return self._segments(evt)[0].XTCAV_calib_umPerPx if self._segments(evt) is not None else None\n def OTRS_DMP1_695_RESOLUTION (self, evt): return self._segments(evt)[0].OTRS_DMP1_695_RESOLUTION if self._segments(evt) is not None else None\n def XTCAV_strength_par_S (self, evt): return self._segments(evt)[0].XTCAV_strength_par_S if self._segments(evt) is not None else None\n def OTRS_DMP1_695_TCAL_X (self, evt): return self._segments(evt)[0].OTRS_DMP1_695_TCAL_X if self._segments(evt) is not None else None\n def XTCAV_Amp_Des_calib_MV (self, evt): return self._segments(evt)[0].XTCAV_Amp_Des_calib_MV if self._segments(evt) is not None else None\n def SIOC_SYS0_ML01_AO214 (self, evt): return self._segments(evt)[0].SIOC_SYS0_ML01_AO214 if self._segments(evt) is not None else None\n def XTCAV_Phas_Des_calib_deg (self, evt): return self._segments(evt)[0].XTCAV_Phas_Des_calib_deg if self._segments(evt) is not None else None\n def SIOC_SYS0_ML01_AO215 (self, evt): return self._segments(evt)[0].SIOC_SYS0_ML01_AO215 if self._segments(evt) is not None else None\n def XTCAV_Beam_energy_dump_GeV (self, evt): return self._segments(evt)[0].XTCAV_Beam_energy_dump_GeV if self._segments(evt) is not None else None\n def REFS_DMP1_400_EDES (self, evt): return self._segments(evt)[0].REFS_DMP1_400_EDES if self._segments(evt) is not None else None\n def XTCAV_calib_disp_posToEnergy(self, evt): return self._segments(evt)[0].XTCAV_calib_disp_posToEnergy if self._segments(evt) is not None else None\n def SIOC_SYS0_ML01_AO216 (self, evt): return self._segments(evt)[0].SIOC_SYS0_ML01_AO216 if self._segments(evt) is not None else None\n\n\nclass ebeam_valsebm_0_0_1(DetectorImpl):\n def __init__(self, *args):\n DetectorImpl.__init__(self, *args)\n def Charge(self, evt):\n return self._segments(evt)[0].Charge if self._segments(evt) is not None else None\n def DumpCharge(self, evt):\n return self._segments(evt)[0].DumpCharge if self._segments(evt) is not None else None\n def XTCAVAmpl(self, evt):\n return self._segments(evt)[0].XTCAVAmpl if self._segments(evt) is not None else None\n def XTCAVPhase(self, evt):\n return self._segments(evt)[0].XTCAVPhase if self._segments(evt) is not None else None\n def PkCurrBC2(self, evt):\n return self._segments(evt)[0].PkCurrBC2 if self._segments(evt) is not None else None\n def L3Energy(self, evt):\n return self._segments(evt)[0].L3Energy if self._segments(evt) is not None else None\n\n\nclass ebeam_raw_2_3_42(DetectorImpl):\n def __init__(self, *args):\n super(ebeam_raw_2_3_42, self).__init__(*args)\n def energy(self, evt):\n return self._segments(evt)[0].energy\n\n\nclass cspad_raw_2_3_43(cspad_raw_2_3_42):\n def __init__(self, *args):\n super(cspad_raw_2_3_43, self).__init__(*args)\n def raw(self, evt) -> None:\n raise NotImplementedError()\n\nclass ebeam_raw_2_3_42(DetectorImpl):\n def __init__(self, *args):\n super(ebeam_raw_2_3_42, self).__init__(*args)\n def energy(self, evt):\n return self._segments(evt)[0].energy\n\nclass ebeam_raw_2_3_42(DetectorImpl):\n def __init__(self, *args):\n super(ebeam_raw_2_3_42, self).__init__(*args)\n def energy(self, evt) -> float:\n return self._segments(evt)[0].energy\n\nclass laser_raw_2_3_42(DetectorImpl):\n def __init__(self, *args):\n super(laser_raw_2_3_42, self).__init__(*args)\n def laserOn(self, evt) -> int:\n return self._segments(evt)[0].laserOn\n\nclass hsd_raw_2_3_42(DetectorImpl):\n def __init__(self, *args):\n super(hsd_raw_2_3_42, self).__init__(*args)\n def waveform(self, evt) -> Array1d:\n # example of how to check for missing detector in event\n if self._segments(evt) is None:\n return None\n else:\n return self._segments(evt)[0].waveform\n\nclass camera_raw_0_0_1(DetectorImpl):\n def __init__(self, *args):\n DetectorImpl.__init__(self, *args)\n def array(self, evt) -> Array2d:\n if self._segments(evt) is None:\n return None\n else:\n return self._segments(evt)[0].array\n def __call__(self, evt) -> Array2d:\n \"\"\"Alias for self.raw(evt)\"\"\"\n return self.array(evt)\n\n# for early cctbx/psana2 development\nclass cspad_raw_1_2_3(DetectorImpl):\n def __init__(self, *args):\n super(cspad_raw_1_2_3, self).__init__(*args)\n def raw(self, evt):\n #quad0 = self._segments(evt)[0].quads0_data\n #quad1 = self._segments(evt)[0].quads1_data\n #quad2 = self._segments(evt)[0].quads2_data\n #quad3 = self._segments(evt)[0].quads3_data\n #return np.concatenate((quad0, quad1, quad2, quad3), axis=0)\n return self._segments(evt)[0].raw\n\n def raw_data(self, evt):\n return self.raw(evt)\n\n def photonEnergy(self, evt):\n return self._segments(evt)[0].photonEnergy\n\n def calib(self, evt):\n data = self.raw(evt)\n data = data.astype(np.float64) # convert raw photon counts to float for other math operations.\n data -= self.pedestals()\n self.common_mode_apply(data, None)\n gain_mask = self.gain_mask()\n if gain_mask is not None:\n data *= gain_mask\n data *= self.gain()\n return data\n\n def image(self, data, verbose=0): print(\"cspad.image\")\n\n def _fetch(self, key):\n val = None\n if key in self._calibconst:\n val, _ = self._calibconst[key]\n return val\n\n def pedestals(self):\n return self._fetch('pedestals')\n\n def gain_mask(self, gain=0):\n return self._fetch('gain_mask')\n\n def common_mode_apply(self, data, common_mode):\n # FIXME: apply common_mode\n return data\n\n def gain(self):\n # default gain is set to 1.0 (FIXME)\n return 1.0\n\n def geometry(self):\n geometry_string = self._fetch('geometry')\n geometry_access = None\n if geometry_string is not None:\n from psana.pscalib.geometry.GeometryAccess import GeometryAccess\n geometry_access = GeometryAccess()\n geometry_access.load_pars_from_str(geometry_string)\n return geometry_access\n\n", "#!/usr/bin/env python\n\"\"\"\nThis script grew-up from test of specific issue -\n2021-10-18:\nXiaozhe complains that too many pixels outside signal region in ueddaq02 r401 shows up in selection of intensities between 100 and 500 keV.\nSee:\n - `github: <https://github.com/slac-lcls/lcls2>`_.\n - `confluence: <https://confluence.slac.stanford.edu/display/PSDM/EPIXQUAD+ueddaq02+r401+issue+calib+hot+banks+2021-10-18>`_.\n\nCreated on 2021-10-18 by Mikhail Dubrovin\n\"\"\"\nimport sys\nimport math\nimport numpy as np\nimport logging\nlogger = logging.getLogger(__name__)\nDICT_NAME_TO_LEVEL = logging._nameToLevel # {'INFO': 20, 'WARNING': 30, 'WARN': 30,...\n\nfrom psana.pyalgos.generic.NDArrUtils import info_ndarr, divide_protected\nfrom psana import DataSource\nfrom psana.detector.UtilsGraphics import gr, fleximagespec#, fleximage, flexhist\n\nfrom psana.detector.UtilsEpix10ka import event_constants\nimport argparse\n\n\nSCRNAME = sys.argv[0].rsplit('/')[-1]\nUSAGE = '\\n %s -r554 -t1' % SCRNAME\\\n + '\\n %s -e ueddaq02 -d epixquad -r554 -t1' % SCRNAME\\\n + '\\n -t, --tname - test name/number:'\\\n + '\\n 1 - segment numeration'\\\n + '\\n 2 - gain range index'\\\n + '\\n 3 - gain, ADU/keV'\\\n + '\\n 4 - pedestals'\\\n + '\\n 5 - rms'\\\n + '\\n 6 - raw'\\\n + '\\n 7 - raw-peds'\\\n + '\\n 8 - (raw-peds)/gain, keV'\\\n + '\\n 9 - calib, keV'\\\n + '\\n 10 - status'\\\n + '\\n 11 - gain factor = 1/gain, keV/ADU'\\\n + '\\n ----'\\\n + '\\n 21 - run 401 two-threshold selection issue'\\\n + '\\n 22 - (raw-peds)/gain, keV hot - specific isuue test'\\\n + '\\n 23 - (raw-peds)/gain, keV cold - specific isuue test'\n\nd_tname = '0'\nd_detname = 'epixquad'\nd_expname = 'ueddaq02'\nd_run = 554\nd_events = 5\nd_evskip = 0\nd_stepnum = None\nd_saveimg = False\nd_grindex = None\nd_amin = None\nd_amax = None\nd_cframe = 0\nd_loglev = 'INFO'\n\nparser = argparse.ArgumentParser(usage=USAGE, description='%s - test per-event components of the det.raw.calib method'%SCRNAME)\nparser.add_argument('-t', '--tname', default=d_tname, type=str, help='test name, def=%s' % d_tname)\nparser.add_argument('-d', '--detname', default=d_detname, type=str, help='detector name, def=%s' % d_detname)\nparser.add_argument('-e', '--expname', default=d_expname, type=str, help='experiment name, def=%s' % d_expname)\nparser.add_argument('-r', '--run', default=d_run, type=int, help='run number, def=%s' % d_run)\nparser.add_argument('-N', '--events', default=d_events, type=int, help='maximal number of events, def=%s' % d_events)\nparser.add_argument('-K', '--evskip', default=d_evskip, type=int, help='number of events to skip in the beginning of run, def=%s' % d_evskip)\nparser.add_argument('-s', '--stepnum', default=d_stepnum, type=int, help='step number counting from 0 or None for all steps, def=%s' % d_stepnum)\nparser.add_argument('-S', '--saveimg', default=d_saveimg, action='store_true', help='save image in file, def=%s' % d_saveimg)\nparser.add_argument('-g', '--grindex', default=d_grindex, type=int, help='gain range index [0,6] for peds, gains etc., def=%s' % str(d_grindex))\nparser.add_argument('-l', '--loglev', default=d_loglev, type=str, help='logger level (DEBUG, INFO, WARNING, etc.), def.=%s' % str(d_loglev))\nparser.add_argument('--amin', default=d_amin, type=float, help='spectrum minimal value, def=%s' % str(d_amin))\nparser.add_argument('--amax', default=d_amax, type=float, help='spectrum maximal value, def=%s' % str(d_amax))\nparser.add_argument('--cframe', default=d_cframe, type=int, help='coordinate frame for images 0/1 for psana/LAB, def=%s' % str(d_cframe))\n\n\nargs = parser.parse_args()\nprint('*** parser.parse_args: %s' % str(args))\n\nlogging.basicConfig(format='[%(levelname).1s] %(name)s L%(lineno)04d: %(message)s', level=DICT_NAME_TO_LEVEL[args.loglev])\nlogging.getLogger('matplotlib').setLevel(logging.WARNING)\nlogging.getLogger('psana.psexp.event_manager').setLevel(logging.INFO)\n\ntname = args.tname # sys.argv[1] if len(sys.argv) > 1 else '0'\nTHRMIN = 100\nTHRMAX = 500\nAMIN = 1\nAMAX = 200\nCROP1_IMG = False\nCROP2_IMG = False\n\nflims = None\nfname = 'ims.png'\n\ndef selection(arr): return np.where((arr>THRMIN) & (arr<THRMAX), arr, 0)\n\ndef amin_amax(args, amin_def=None, amax_def=None):\n return args.amin if args.amin else amin_def,\\\n args.amax if args.amax else amax_def\n\nds = DataSource(exp=args.expname, run=args.run)\norun = next(ds.runs())\ndet = orun.Detector(args.detname)\n\nMDB = det.raw._data_bit_mask # M14 if det.raw._dettype == 'epix10ka' else M15\nprefix = 'ims-%s-r%04d' % (orun.expt, orun.runnum)\n\nprint('*** det.raw._data_bit_mask_: %s' % oct(MDB))\nprint('*** det.raw._calibconst.keys:', det.raw._calibconst.keys())\nprint('*** pedestal metadata:', det.raw._calibconst['pedestals'][1])\nprint('*** gain metadata:', det.raw._calibconst['pixel_gain'][1])\n#print('*** rms metadata:', det.raw._calibconst['pixel_rms'][1])\n#print('*** status metadata:', det.raw._calibconst['pixel_status'][1])\n\npeds = det.raw._calibconst['pedestals'][0]\ngain = det.raw._calibconst['pixel_gain'][0]\nrms = det.raw._calibconst['pixel_rms'][0]\nstatus = det.raw._calibconst['pixel_status'][0]\nprint(info_ndarr(peds,'pedestals'))\nprint(info_ndarr(rms,'rms'))\nprint(info_ndarr(gain,'gain, ADU/keV'))\n\narr, img = None, None\nsuffix = ''\nevt_peds, evt_gfac = None, None\n\nfor nstep,step in enumerate(orun.steps()):\n\n if args.stepnum is not None and nstep<args.stepnum:\n print('skip nstep %d < stepnum=%d' % (nstep, args.stepnum))\n continue\n\n if args.stepnum is not None and nstep>args.stepnum:\n print('break at nstep %d > stepnum=%d' % (nstep, args.stepnum))\n break\n\n print('=== Step %d' % nstep)\n\n for nevt,evt in enumerate(step.events()):\n\n if nevt>args.events:\n print('break at nevt %d' % nevt)\n break\n\n if nevt<args.evskip:\n print('skip nevt %d' % nevt)\n continue\n\n if tname in ('4', '7', '8', '22', '23'):\n evt_peds = peds[args.grindex,:] if args.grindex is not None else\\\n event_constants(det.raw, evt, peds) #(7, 4, 352, 384) -> (4, 352, 384)\n print(info_ndarr(evt_peds,'evt_peds'))\n\n if tname in ('8', '11', '22', '23'):\n gfac = divide_protected(np.ones_like(gain), gain)\n evt_gfac = gfac[args.grindex,:] if args.grindex is not None else\\\n event_constants(det.raw, evt, gfac) #(7, 4, 352, 384) -> (4, 352, 384)\n print(info_ndarr(evt_gfac,'evt_gfac, keV/ADU'))\n\n step_evt = 's%02d-e%04d' % (nstep, nevt)\n\n if tname=='1':\n suffix = 'segment-nums'\n ones = np.ones(det.raw._seg_geo.shape()) # (352,384)\n seginds = det.raw._segment_indices() #_segments(evt)\n print('seginds', seginds)\n arr = np.stack([ones*i for i in seginds])\n AMIN, AMAX = amin_amax(args, amin_def=-1, amax_def=4)\n\n elif tname=='2':\n suffix = 'gain-range-index-%s' % step_evt\n arr = det.raw._gain_range_index(evt)\n AMIN, AMAX = amin_amax(args, amin_def=-1, amax_def=8)\n\n elif tname=='3':\n suffix = 'gain-%s' % step_evt\n arr = event_constants(det.raw, evt, gain) #(4, 352, 384)\n AMIN, AMAX = amin_amax(args, amin_def=0, amax_def=20)\n\n elif tname=='4':\n suffix = 'pedestals-%s' % step_evt\n arr = evt_peds\n AMIN, AMAX = amin_amax(args, amin_def=2000, amax_def=4000)\n\n elif tname=='5':\n suffix = 'rms-%s' % step_evt\n arr = rms[args.grindex,:] if args.grindex is not None else\\\n event_constants(det.raw, evt, rms) #(4, 352, 384)\n AMIN, AMAX = amin_amax(args, amin_def=0, amax_def=8)\n\n elif tname=='6':\n suffix = 'raw-%s' % step_evt\n arr = det.raw.raw(evt) & MDB\n AMIN, AMAX = amin_amax(args, amin_def=2000, amax_def=4000)\n\n elif tname=='7':\n suffix = 'raw-peds-%s' % step_evt\n arr = (det.raw.raw(evt) & MDB) - evt_peds\n AMIN, AMAX = amin_amax(args, amin_def=-40, amax_def=40)\n\n elif tname=='8':\n suffix = 'raw-peds-x-gain-%s' % step_evt\n arr = ((det.raw.raw(evt) & MDB) - evt_peds)*evt_gfac\n AMIN, AMAX = amin_amax(args, amin_def=-5, amax_def=5)\n\n elif tname=='9':\n suffix = 'calib-%s' % step_evt\n arr = det.raw.calib(evt)\n AMIN, AMAX = amin_amax(args, amin_def=-5, amax_def=5)\n\n elif tname=='10':\n suffix = 'status-%s' % step_evt\n arr = event_constants(det.raw, evt, status) #(4, 352, 384)\n AMIN, AMAX = amin_amax(args, amin_def=0, amax_def=32)\n\n elif tname=='11':\n suffix = 'gain-factor-%s' % step_evt\n arr = evt_gfac\n AMIN, AMAX = amin_amax(args, amin_def=0, amax_def=20)\n\n elif tname=='21':\n suffix = 'calib-issue-with-thresholds-%s' % step_evt\n arr = selection(det.raw.calib(evt))\n AMIN, AMAX = amin_amax(args, amin_def=50, amax_def=200)\n\n elif tname=='22':\n suffix = 'raw-peds-x-gain-region-hot-%s' % step_evt\n arr = np.array(((det.raw.raw(evt) & MDB) - evt_peds)*evt_gfac)\n CROP1_IMG = True\n AMIN, AMAX = amin_amax(args, amin_def=-5, amax_def=5)\n\n elif tname=='23':\n suffix = 'raw-peds-x-gain-region-cold-%s' % step_evt\n arr = np.array(((det.raw.raw(evt) & MDB) - evt_peds)*evt_gfac)\n CROP2_IMG = True\n AMIN, AMAX = amin_amax(args, amin_def=-5, amax_def=5)\n\n else:\n suffix = 'calib-%s' % step_evt\n arr = det.raw.calib(evt)\n AMIN, AMAX = amin_amax(args, amin_def=-100, amax_def=100)\n\n print(info_ndarr(arr,'Event %d det.raw.calib'%nevt))\n\n img = det.raw.image(evt, nda=arr, vbase=-1, cframe=args.cframe)\n\n if CROP1_IMG:\n img0 = np.zeros_like(img)\n img0[:352,600:] = img[:352,600:]\n img = img0\n arr = img[:352,600:]\n\n if CROP2_IMG:\n img0 = np.zeros_like(img)\n img0[:352,:192] = img[:352,:192]\n img = img0\n arr = img[:352,:192]\n\n print(info_ndarr(img,' img'))\n\n if flims is None:\n flims = fleximagespec(img, arr=arr, bins=100, w_in=11, h_in=8, amin=AMIN, amax=AMAX) #fraclo=0.01, frachi=0.99\n flims.move(10,20)\n else:\n fname = '%s-%s.png' % (prefix, suffix)\n flims.update(img, arr=arr, amin=AMIN, amax=AMAX)\n flims.axtitle('Event %d %s'%(nevt,fname))\n\n gr.show(mode=1)\n\n if tname in ('0','9') and args.saveimg:\n flims.save(fname)\n\ngr.show()\n\nif args.saveimg: flims.save(fname)\n\nsys.exit('END OF %s -t %s' % (SCRNAME, tname))\n\n# EOF\n" ]
[ [ "numpy.load" ], [ "numpy.vstack", "numpy.stack" ], [ "numpy.zeros" ], [ "numpy.ones_like", "numpy.where", "numpy.zeros_like", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brettkoonce/nlp-architect
[ "29b72c39b28dbd8ca8d341075b82a2cdc396e8f8", "b8c56b8b542ec9cda5db00323f7fa729cf2928ea" ]
[ "nlp_architect/data/cdc_resources/relations/word_embedding_relation_extraction.py", "nlp_architect/models/gnmt_model.py" ]
[ "# ******************************************************************************\n# Copyright 2017-2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\n\nimport logging\nimport math\nfrom typing import List, Set\n\nfrom scipy.spatial.distance import cosine as cos\n\nfrom nlp_architect.common.cdc.mention_data import MentionDataLight\nfrom nlp_architect.data.cdc_resources.embedding.embed_elmo import ElmoEmbedding, \\\n ElmoEmbeddingOffline\nfrom nlp_architect.data.cdc_resources.embedding.embed_glove import GloveEmbedding, \\\n GloveEmbeddingOffline\nfrom nlp_architect.data.cdc_resources.relations.relation_extraction import RelationExtraction\nfrom nlp_architect.data.cdc_resources.relations.relation_types_enums import EmbeddingMethod, \\\n RelationType\nfrom nlp_architect.utils.string_utils import StringUtils\n\nlogger = logging.getLogger(__name__)\n\n\nclass WordEmbeddingRelationExtraction(RelationExtraction):\n def __init__(self, method: EmbeddingMethod = EmbeddingMethod.GLOVE,\n glove_file: str = None, elmo_file: str = None):\n \"\"\"\n Extract Relation between two mentions according to Word Embedding cosine distance\n\n Args:\n method (optional): EmbeddingMethod.{GLOVE/GLOVE_OFFLINE/ELMO/ELMO_OFFLINE}\n (default = GLOVE)\n glove_file (required on GLOVE/GLOVE_OFFLINE mode): str Location of Glove file\n elmo_file (required on ELMO_OFFLINE mode): str Location of Elmo file\n \"\"\"\n if method == EmbeddingMethod.GLOVE:\n self.embedding = GloveEmbedding(glove_file)\n elif method == EmbeddingMethod.GLOVE_OFFLINE:\n self.embedding = GloveEmbeddingOffline(glove_file)\n elif method == EmbeddingMethod.ELMO:\n self.embedding = ElmoEmbedding()\n elif method == EmbeddingMethod.ELMO_OFFLINE:\n self.embedding = ElmoEmbeddingOffline(elmo_file)\n\n super(WordEmbeddingRelationExtraction, self).__init__()\n\n def extract_all_relations(self, mention_x: MentionDataLight,\n mention_y: MentionDataLight) -> Set[RelationType]:\n ret_ = set()\n ret_.add(self.extract_sub_relations(mention_x, mention_y,\n RelationType.WORD_EMBEDDING_MATCH))\n return ret_\n\n def extract_sub_relations(self, mention_x: MentionDataLight, mention_y: MentionDataLight,\n relation: RelationType) -> RelationType:\n \"\"\"\n Check if input mentions has the given relation between them\n\n Args:\n mention_x: MentionDataLight\n mention_y: MentionDataLight\n relation: RelationType\n\n Returns:\n RelationType: relation in case mentions has given relation or\n RelationType.NO_RELATION_FOUND otherwise\n \"\"\"\n if relation is not RelationType.WORD_EMBEDDING_MATCH:\n return RelationType.NO_RELATION_FOUND\n\n mention_x_str = mention_x.tokens_str\n mention_y_str = mention_y.tokens_str\n if StringUtils.is_pronoun(mention_x_str.lower()) or StringUtils.is_pronoun(\n mention_y_str.lower()):\n return RelationType.NO_RELATION_FOUND\n\n if self.is_word_embed_match(mention_x, mention_y):\n return RelationType.WORD_EMBEDDING_MATCH\n\n return RelationType.NO_RELATION_FOUND\n\n def is_word_embed_match(self, mention_x: MentionDataLight, mention_y: MentionDataLight):\n \"\"\"\n Check if input mentions Word Embedding cosine distance below above 0.65\n\n Args:\n mention_x: MentionDataLight\n mention_y: MentionDataLight\n\n Returns:\n bool\n \"\"\"\n match_result = False\n x_embed = self.embedding.get_feature_vector(mention_x)\n y_embed = self.embedding.get_feature_vector(mention_y)\n # make sure words are not 'unk/None/0'\n if x_embed is not None and y_embed is not None:\n dist = cos(x_embed, y_embed)\n if not math.isnan(dist):\n sim = 1 - dist\n if sim > 0.65:\n match_result = True\n\n return match_result\n\n @staticmethod\n def get_supported_relations() -> List[RelationType]:\n \"\"\"\n Return all supported relations by this class\n\n Returns:\n List[RelationType]\n \"\"\"\n return [RelationType.WORD_EMBEDDING_MATCH]\n", "# ******************************************************************************\n# Copyright 2017-2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\n# Changes Made from original:\n# import paths\n# ******************************************************************************\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: skip-file\n\"\"\"GNMT attention sequence-to-sequence model with dynamic RNN support.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nlp_architect.models.gnmt import attention_model\nfrom nlp_architect.models.gnmt import model_helper\nfrom nlp_architect.models.gnmt.utils import misc_utils as utils\n\n__all__ = [\"GNMTModel\"]\n\n\nclass GNMTModel(attention_model.AttentionModel):\n \"\"\"Sequence-to-sequence dynamic model with GNMT attention architecture\n with sparsity policy support.\n \"\"\"\n\n def __init__(self,\n hparams,\n mode,\n iterator,\n source_vocab_table,\n target_vocab_table,\n reverse_target_vocab_table=None,\n scope=None,\n extra_args=None):\n self.is_gnmt_attention = (\n hparams.attention_architecture in [\"gnmt\", \"gnmt_v2\"])\n\n super(GNMTModel, self).__init__(\n hparams=hparams,\n mode=mode,\n iterator=iterator,\n source_vocab_table=source_vocab_table,\n target_vocab_table=target_vocab_table,\n reverse_target_vocab_table=reverse_target_vocab_table,\n scope=scope,\n extra_args=extra_args)\n\n def _build_encoder(self, hparams):\n \"\"\"Build a GNMT encoder.\"\"\"\n if hparams.encoder_type == \"uni\" or hparams.encoder_type == \"bi\":\n return super(GNMTModel, self)._build_encoder(hparams)\n\n if hparams.encoder_type != \"gnmt\":\n raise ValueError(\"Unknown encoder_type %s\" % hparams.encoder_type)\n\n # Build GNMT encoder.\n num_bi_layers = 1\n num_uni_layers = self.num_encoder_layers - num_bi_layers\n utils.print_out(\"# Build a GNMT encoder\")\n utils.print_out(\" num_bi_layers = %d\" % num_bi_layers)\n utils.print_out(\" num_uni_layers = %d\" % num_uni_layers)\n\n iterator = self.iterator\n source = iterator.source\n if self.time_major:\n source = tf.transpose(source)\n\n with tf.variable_scope(\"encoder\") as scope:\n dtype = scope.dtype\n\n self.encoder_emb_inp = self.encoder_emb_lookup_fn(\n self.embedding_encoder, source)\n\n # Execute _build_bidirectional_rnn from Model class\n bi_encoder_outputs, bi_encoder_state = self._build_bidirectional_rnn(\n inputs=self.encoder_emb_inp,\n sequence_length=iterator.source_sequence_length,\n dtype=dtype,\n hparams=hparams,\n num_bi_layers=num_bi_layers,\n num_bi_residual_layers=0, # no residual connection\n )\n\n # Build unidirectional layers\n if self.extract_encoder_layers:\n encoder_state, encoder_outputs = self._build_individual_encoder_layers(\n bi_encoder_outputs, num_uni_layers, dtype, hparams)\n else:\n encoder_state, encoder_outputs = self._build_all_encoder_layers(\n bi_encoder_outputs, num_uni_layers, dtype, hparams)\n\n # Pass all encoder states to the decoder\n # except the first bi-directional layer\n encoder_state = (bi_encoder_state[1],) + (\n (encoder_state,) if num_uni_layers == 1 else encoder_state)\n\n return encoder_outputs, encoder_state\n\n def _build_all_encoder_layers(self, bi_encoder_outputs,\n num_uni_layers, dtype, hparams):\n \"\"\"Build encoder layers all at once.\"\"\"\n uni_cell = model_helper.create_rnn_cell(\n unit_type=hparams.unit_type,\n num_units=hparams.num_units,\n num_layers=num_uni_layers,\n num_residual_layers=self.num_encoder_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n num_gpus=self.num_gpus,\n base_gpu=1,\n mode=self.mode,\n single_cell_fn=self.single_cell_fn)\n encoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n uni_cell,\n bi_encoder_outputs,\n dtype=dtype,\n sequence_length=self.iterator.source_sequence_length,\n time_major=self.time_major)\n\n # Use the top layer for now\n self.encoder_state_list = [encoder_outputs]\n\n return encoder_state, encoder_outputs\n\n def _build_individual_encoder_layers(self, bi_encoder_outputs,\n num_uni_layers, dtype, hparams):\n \"\"\"Run each of the encoder layer separately, not used in general seq2seq.\"\"\"\n uni_cell_lists = model_helper._cell_list(\n unit_type=hparams.unit_type,\n num_units=hparams.num_units,\n num_layers=num_uni_layers,\n num_residual_layers=self.num_encoder_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n num_gpus=self.num_gpus,\n base_gpu=1,\n mode=self.mode,\n single_cell_fn=self.single_cell_fn)\n\n encoder_inp = bi_encoder_outputs\n encoder_states = []\n self.encoder_state_list = [bi_encoder_outputs[:, :, :hparams.num_units],\n bi_encoder_outputs[:, :, hparams.num_units:]]\n with tf.variable_scope(\"rnn/multi_rnn_cell\"):\n for i, cell in enumerate(uni_cell_lists):\n with tf.variable_scope(\"cell_%d\" % i) as scope:\n encoder_inp, encoder_state = tf.nn.dynamic_rnn(\n cell,\n encoder_inp,\n dtype=dtype,\n sequence_length=self.iterator.source_sequence_length,\n time_major=self.time_major,\n scope=scope)\n encoder_states.append(encoder_state)\n self.encoder_state_list.append(encoder_inp)\n\n encoder_state = tuple(encoder_states)\n encoder_outputs = self.encoder_state_list[-1]\n return encoder_state, encoder_outputs\n\n def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state,\n source_sequence_length):\n \"\"\"Build a RNN cell with GNMT attention architecture.\"\"\"\n # Standard attention\n if not self.is_gnmt_attention:\n return super(GNMTModel, self)._build_decoder_cell(\n hparams, encoder_outputs, encoder_state, source_sequence_length)\n\n # GNMT attention\n attention_option = hparams.attention\n attention_architecture = hparams.attention_architecture\n num_units = hparams.num_units\n infer_mode = hparams.infer_mode\n\n dtype = tf.float32\n\n if self.time_major:\n memory = tf.transpose(encoder_outputs, [1, 0, 2])\n else:\n memory = encoder_outputs\n\n if self.mode == tf.contrib.learn.ModeKeys.INFER and infer_mode == \"beam_search\":\n memory, source_sequence_length, encoder_state, batch_size = (\n self._prepare_beam_search_decoder_inputs(\n hparams.beam_width, memory, source_sequence_length,\n encoder_state))\n else:\n batch_size = self.batch_size\n\n attention_mechanism = self.attention_mechanism_fn(\n attention_option, num_units, memory, source_sequence_length, self.mode)\n\n cell_list = model_helper._cell_list( # pylint: disable=protected-access\n unit_type=hparams.unit_type,\n num_units=num_units,\n num_layers=self.num_decoder_layers,\n num_residual_layers=self.num_decoder_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n num_gpus=self.num_gpus,\n mode=self.mode,\n single_cell_fn=self.single_cell_fn,\n residual_fn=gnmt_residual_fn\n )\n\n # Only wrap the bottom layer with the attention mechanism.\n attention_cell = cell_list.pop(0)\n\n # Only generate alignment in greedy INFER mode.\n alignment_history = (self.mode == tf.contrib.learn.ModeKeys.INFER\n and infer_mode != \"beam_search\")\n attention_cell = tf.contrib.seq2seq.AttentionWrapper(\n attention_cell,\n attention_mechanism,\n attention_layer_size=None, # don't use attention layer.\n output_attention=False,\n alignment_history=alignment_history,\n name=\"attention\")\n\n if attention_architecture == \"gnmt\":\n cell = GNMTAttentionMultiCell(\n attention_cell, cell_list)\n elif attention_architecture == \"gnmt_v2\":\n cell = GNMTAttentionMultiCell(\n attention_cell, cell_list, use_new_attention=True)\n else:\n raise ValueError(\n \"Unknown attention_architecture %s\" % attention_architecture)\n\n if hparams.pass_hidden_state:\n decoder_initial_state = tuple(\n zs.clone(cell_state=es)\n if isinstance(zs, tf.contrib.seq2seq.AttentionWrapperState) else es\n for zs, es in zip(\n cell.zero_state(batch_size, dtype), encoder_state))\n else:\n decoder_initial_state = cell.zero_state(batch_size, dtype)\n\n return cell, decoder_initial_state\n\n def _get_infer_summary(self, hparams):\n if hparams.infer_mode == \"beam_search\":\n return tf.no_op()\n elif self.is_gnmt_attention:\n return attention_model._create_attention_images_summary(\n self.final_context_state[0])\n else:\n return super(GNMTModel, self)._get_infer_summary(hparams)\n\n\nclass GNMTAttentionMultiCell(tf.nn.rnn_cell.MultiRNNCell):\n \"\"\"A MultiCell with GNMT attention style.\"\"\"\n\n def __init__(self, attention_cell, cells, use_new_attention=False):\n \"\"\"Creates a GNMTAttentionMultiCell.\n\n Args:\n attention_cell: An instance of AttentionWrapper.\n cells: A list of RNNCell wrapped with AttentionInputWrapper.\n use_new_attention: Whether to use the attention generated from current\n step bottom layer's output. Default is False.\n \"\"\"\n cells = [attention_cell] + cells\n self.use_new_attention = use_new_attention\n super(GNMTAttentionMultiCell, self).__init__(cells, state_is_tuple=True)\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell with bottom layer's attention copied to all upper layers.\"\"\"\n if not tf.contrib.framework.nest.is_sequence(state):\n raise ValueError(\n \"Expected state to be a tuple of length %d, but received: %s\"\n % (len(self.state_size), state))\n\n with tf.variable_scope(scope or \"multi_rnn_cell\"):\n new_states = []\n\n with tf.variable_scope(\"cell_0_attention\"):\n attention_cell = self._cells[0]\n attention_state = state[0]\n cur_inp, new_attention_state = attention_cell(inputs, attention_state)\n new_states.append(new_attention_state)\n\n for i in range(1, len(self._cells)):\n with tf.variable_scope(\"cell_%d\" % i):\n\n cell = self._cells[i]\n cur_state = state[i]\n\n if self.use_new_attention:\n cur_inp = tf.concat([cur_inp, new_attention_state.attention], -1)\n else:\n cur_inp = tf.concat([cur_inp, attention_state.attention], -1)\n\n cur_inp, new_state = cell(cur_inp, cur_state)\n new_states.append(new_state)\n\n return cur_inp, tuple(new_states)\n\n\ndef gnmt_residual_fn(inputs, outputs):\n \"\"\"Residual function that handles different inputs and outputs inner dims.\n\n Args:\n inputs: cell inputs, this is actual inputs concatenated with the attention\n vector.\n outputs: cell outputs\n\n Returns:\n outputs + actual inputs\n \"\"\"\n def split_input(inp, out):\n out_dim = out.get_shape().as_list()[-1]\n inp_dim = inp.get_shape().as_list()[-1]\n return tf.split(inp, [out_dim, inp_dim - out_dim], axis=-1)\n\n actual_inputs, _ = tf.contrib.framework.nest.map_structure(\n split_input, inputs, outputs)\n\n def assert_shape_match(inp, out):\n inp.get_shape().assert_is_compatible_with(out.get_shape())\n\n tf.contrib.framework.nest.assert_same_structure(actual_inputs, outputs)\n tf.contrib.framework.nest.map_structure(\n assert_shape_match, actual_inputs, outputs)\n return tf.contrib.framework.nest.map_structure(\n lambda inp, out: inp + out, actual_inputs, outputs)\n" ]
[ [ "scipy.spatial.distance.cosine" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.transpose", "tensorflow.concat", "tensorflow.contrib.framework.nest.is_sequence", "tensorflow.contrib.framework.nest.map_structure", "tensorflow.no_op", "tensorflow.contrib.seq2seq.AttentionWrapper", "tensorflow.variable_scope", "tensorflow.split", "tensorflow.contrib.framework.nest.assert_same_structure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
roliveira/pyxel
[ "7725639172cf29861b6e924a5b0103d66277086f" ]
[ "pyxel/app.py" ]
[ "import datetime\nimport gzip\nimport inspect\nimport math\nimport os\nimport pickle\nimport time\n\nimport glfw\nimport numpy as np\nimport PIL.Image\n\nfrom . import utilities\nfrom .audio_player import AudioPlayer\nfrom .constants import (\n APP_GIF_CAPTURE_COUNT,\n APP_GIF_CAPTURE_SCALE,\n APP_MEASURE_FRAME_COUNT,\n APP_SCREEN_MAX_SIZE,\n APP_SCREEN_SCALE_CUTDOWN,\n APP_SCREEN_SCALE_MINIMUM,\n AUDIO_SOUND_COUNT,\n GLFW_VERSION,\n KEY_0,\n KEY_1,\n KEY_2,\n KEY_3,\n KEY_ALT,\n KEY_CONTROL,\n KEY_LEFT_ALT,\n KEY_LEFT_BUTTON,\n KEY_LEFT_CONTROL,\n KEY_LEFT_SHIFT,\n KEY_LEFT_SUPER,\n KEY_MIDDLE_BUTTON,\n KEY_RIGHT_ALT,\n KEY_RIGHT_BUTTON,\n KEY_RIGHT_CONTROL,\n KEY_RIGHT_SHIFT,\n KEY_RIGHT_SUPER,\n KEY_SHIFT,\n KEY_SUPER,\n MOUSE_CURSOR_DATA,\n MOUSE_CURSOR_HEIGHT,\n MOUSE_CURSOR_IMAGE_X,\n MOUSE_CURSOR_IMAGE_Y,\n MOUSE_CURSOR_WIDTH,\n RENDERER_IMAGE_COUNT,\n RENDERER_TILEMAP_COUNT,\n)\nfrom .renderer import Renderer\n\n\nclass App:\n def __init__(\n self,\n module,\n width,\n height,\n caption,\n scale,\n palette,\n fps,\n border_width,\n border_color,\n ):\n if glfw.get_version() < tuple(map(int, GLFW_VERSION.split(\".\"))):\n raise RuntimeError(\"glfw version is lower than {}\".format(GLFW_VERSION))\n\n if width > APP_SCREEN_MAX_SIZE or height > APP_SCREEN_MAX_SIZE:\n raise ValueError(\n \"screen size is larger than {}x{}\".format(\n APP_SCREEN_MAX_SIZE, APP_SCREEN_MAX_SIZE\n )\n )\n\n global pyxel\n pyxel = module\n\n self._palette = palette[:]\n self._fps = fps\n self._border_width = border_width\n self._border_color = border_color\n self._next_update_time = 0\n self._one_frame_time = 1 / fps\n self._key_state = {}\n self._is_mouse_visible = False\n self._update = None\n self._draw = None\n self._capture_start = 0\n self._capture_index = 0\n self._capture_images = [None] * APP_GIF_CAPTURE_COUNT\n\n self._perf_monitor_is_enabled = False\n self._perf_fps_count = 0\n self._perf_fps_start_time = 0\n self._perf_fps = 0\n self._perf_update_count = 0\n self._perf_update_total_time = 0\n self._perf_update_time = 0\n self._perf_draw_count = 0\n self._perf_draw_total_time = 0\n self._perf_draw_time = 0\n\n # exports variables\n pyxel._app = self\n pyxel.width = width\n pyxel.height = height\n pyxel.mouse_x = 0\n pyxel.mouse_y = 0\n pyxel.mouse_cursor = False\n pyxel.frame_count = 0\n\n # initialize window\n if not glfw.init():\n exit()\n\n monitor = glfw.get_primary_monitor()\n display_width, display_height = glfw.get_video_mode(monitor)[0]\n\n if scale == 0:\n scale = max(\n min(\n (display_width // width) - APP_SCREEN_SCALE_CUTDOWN,\n (display_height // height) - APP_SCREEN_SCALE_CUTDOWN,\n ),\n APP_SCREEN_SCALE_MINIMUM,\n )\n\n window_width = width * scale + border_width\n window_height = height * scale + border_width\n self._window = glfw.create_window(\n window_width, window_height, caption, None, None\n )\n\n if not self._window:\n glfw.terminate()\n exit()\n\n glfw.set_window_pos(\n self._window,\n (display_width - window_width) // 2,\n (display_height - window_height) // 2,\n )\n\n glfw.make_context_current(self._window)\n glfw.set_window_size_limits(\n self._window, width, height, glfw.DONT_CARE, glfw.DONT_CARE\n )\n self._hidpi_scale = (\n glfw.get_framebuffer_size(self._window)[0]\n / glfw.get_window_size(self._window)[0]\n )\n self._update_viewport()\n\n glfw.set_key_callback(self._window, self._key_callback)\n glfw.set_mouse_button_callback(self._window, self._mouse_button_callback)\n\n glfw.set_window_icon(self._window, 1, [utilities.get_icon_image()])\n glfw.set_input_mode(self._window, glfw.CURSOR, glfw.CURSOR_HIDDEN)\n\n # initialize renderer\n self._renderer = Renderer(width, height)\n\n # initialize audio player\n self._audio_player = AudioPlayer()\n\n # export module functions\n pyxel.btn = self.btn\n pyxel.btnp = self.btnp\n pyxel.btnr = self.btnr\n pyxel.mouse = self.mouse\n pyxel.run = self.run\n pyxel.run_with_profiler = self.run_with_profiler\n pyxel.quit = self.quit\n pyxel.save = self.save\n pyxel.load = self.load\n pyxel.image = self._renderer.image\n pyxel.tilemap = self._renderer.tilemap\n pyxel.clip = self._renderer.draw_command.clip\n pyxel.pal = self._renderer.draw_command.pal\n pyxel.cls = self._renderer.draw_command.cls\n pyxel.pix = self._renderer.draw_command.pix\n pyxel.line = self._renderer.draw_command.line\n pyxel.rect = self._renderer.draw_command.rect\n pyxel.rectb = self._renderer.draw_command.rectb\n pyxel.circ = self._renderer.draw_command.circ\n pyxel.circb = self._renderer.draw_command.circb\n pyxel.blt = self._renderer.draw_command.blt\n pyxel.bltm = self._renderer.draw_command.bltm\n pyxel.text = self._renderer.draw_command.text\n pyxel.sound = self._audio_player.sound\n pyxel.play = self._audio_player.play\n pyxel.playm = None # self._audio_player.playm\n pyxel.stop = self._audio_player.stop\n\n # initialize mouse cursor\n pyxel.image(3, system=True).set(\n MOUSE_CURSOR_IMAGE_X, MOUSE_CURSOR_IMAGE_Y, MOUSE_CURSOR_DATA\n )\n\n def btn(self, key):\n press_frame = self._key_state.get(key, 0)\n return press_frame > 0 or press_frame == -pyxel.frame_count - 1\n\n def btnp(self, key, hold=0, period=0):\n press_frame = self._key_state.get(key, 0)\n hold_frame = pyxel.frame_count - press_frame - hold\n return (\n press_frame == pyxel.frame_count\n or press_frame == -pyxel.frame_count - 1\n or press_frame > 0\n and period > 0\n and hold_frame >= 0\n and hold_frame % period == 0\n )\n\n def btnr(self, key):\n return self._key_state.get(key, 0) == -pyxel.frame_count\n\n def mouse(self, visible):\n self._is_mouse_visible = visible\n\n def run(self, update, draw):\n self._update = update\n self._draw = draw\n\n pyxel.frame_count = 1\n self._next_update_time = self._perf_fps_start_time = time.time()\n\n def main_loop():\n while not glfw.window_should_close(self._window):\n glfw.poll_events()\n\n self._measure_fps()\n self._update_viewport()\n self._update_frame()\n self._draw_frame()\n\n glfw.swap_buffers(self._window)\n glfw.terminate()\n\n if self._audio_player.output_stream:\n with self._audio_player.output_stream:\n main_loop()\n else:\n main_loop()\n\n def run_with_profiler(self, update, draw):\n import cProfile\n import pstats\n\n profile = cProfile.Profile()\n profile.enable()\n profile.runcall(self.run, update, draw)\n profile.disable()\n\n stats = pstats.Stats(profile).strip_dirs().sort_stats(\"tottime\")\n frame_count = pyxel.frame_count\n\n for key in stats.stats:\n cc, nc, tt, ct, callers = stats.stats[key]\n cc = round(cc / frame_count, 3)\n nc = round(nc / frame_count, 3)\n tt *= 1000 / frame_count\n ct *= 1000 / frame_count\n stats.stats[key] = (cc, nc, tt, ct, callers)\n\n stats.print_stats(30)\n\n def quit(self):\n glfw.set_window_should_close(self._window, True)\n\n def save(self, filename):\n data = {\"version\": pyxel.VERSION}\n\n image_list = [\n pyxel.image(i).data.dumps() for i in range(RENDERER_IMAGE_COUNT - 1)\n ]\n data[\"image\"] = image_list\n\n tilemap_list = [\n pyxel.tilemap(i).data.dumps() for i in range(RENDERER_TILEMAP_COUNT)\n ]\n data[\"tilemap\"] = tilemap_list\n\n sound_list = [pyxel.sound(i) for i in range(AUDIO_SOUND_COUNT)]\n data[\"sound\"] = sound_list\n\n pickled_data = pickle.dumps(data)\n\n dirname = os.path.dirname(inspect.stack()[-1].filename)\n filename = os.path.join(dirname, filename)\n\n with gzip.open(filename, mode=\"wb\") as fp:\n fp.write(pickled_data)\n\n def load(self, filename):\n dirname = os.path.dirname(inspect.stack()[-1].filename)\n filename = os.path.join(dirname, filename)\n\n with gzip.open(filename, mode=\"rb\") as fp:\n pickled_data = fp.read()\n\n data = pickle.loads(pickled_data)\n\n # todo: version check\n\n image_list = data.get(\"image\")\n if image_list:\n for i in range(RENDERER_IMAGE_COUNT - 1):\n pyxel.image(i).data[:, :] = np.loads(image_list[i])\n\n tilemap_list = data.get(\"tilemap\")\n if tilemap_list:\n for i in range(RENDERER_TILEMAP_COUNT):\n pyxel.tilemap(i).data[:, :] = np.loads(tilemap_list[i])\n\n sound_list = data.get(\"sound\")\n if sound_list:\n for i in range(AUDIO_SOUND_COUNT):\n src = sound_list[i]\n dest = pyxel.sound(i)\n\n dest.note = src.note\n dest.tone = src.tone\n dest.volume = src.volume\n dest.effect = src.effect\n dest.speed = src.speed\n\n def _key_callback(self, window, key, scancode, action, mods):\n if action == glfw.PRESS:\n state = pyxel.frame_count\n elif action == glfw.RELEASE:\n if self._key_state[key] == pyxel.frame_count:\n state = -pyxel.frame_count - 1\n else:\n state = -pyxel.frame_count\n else:\n return\n\n self._key_state[key] = state\n\n if key == KEY_LEFT_SHIFT or key == KEY_RIGHT_SHIFT:\n self._key_state[KEY_SHIFT] = state\n elif key == KEY_LEFT_CONTROL or key == KEY_RIGHT_CONTROL:\n self._key_state[KEY_CONTROL] = state\n elif key == KEY_LEFT_ALT or key == KEY_RIGHT_ALT:\n self._key_state[KEY_ALT] = state\n elif key == KEY_LEFT_SUPER or key == KEY_RIGHT_SUPER:\n self._key_state[KEY_SUPER] = state\n\n def _update_mouse_pos(self):\n x, y = glfw.get_cursor_pos(self._window)\n pyxel.mouse_x = int((x - self._viewport_left) / self._viewport_scale)\n pyxel.mouse_y = int((y - self._viewport_top) / self._viewport_scale)\n\n def _mouse_button_callback(self, window, button, action, mods):\n if button == glfw.MOUSE_BUTTON_LEFT:\n button = KEY_LEFT_BUTTON\n elif button == glfw.MOUSE_BUTTON_MIDDLE:\n button = KEY_MIDDLE_BUTTON\n elif button == glfw.MOUSE_BUTTON_RIGHT:\n button = KEY_RIGHT_BUTTON\n else:\n return\n\n if action == glfw.PRESS:\n self._key_state[button] = pyxel.frame_count\n elif action == glfw.RELEASE:\n if self._key_state.get(button) == pyxel.frame_count:\n self._key_state[button] = -pyxel.frame_count - 1\n else:\n self._key_state[button] = -pyxel.frame_count\n\n def _update_viewport(self):\n win_width, win_height = glfw.get_window_size(self._window)\n scale_x = win_width // pyxel.width\n scale_y = win_height // pyxel.height\n scale = min(scale_x, scale_y)\n\n self._viewport_scale = scale\n self._viewport_width = pyxel.width * scale\n self._viewport_height = pyxel.height * scale\n self._viewport_left = (win_width - self._viewport_width) // 2\n self._viewport_top = (win_height - self._viewport_height) // 2\n self._viewport_bottom = win_height - self._viewport_height - self._viewport_top\n\n def _update_frame(self):\n # wait for update time\n while True:\n cur_time = time.time()\n wait_time = self._next_update_time - cur_time\n\n if wait_time > 0:\n time.sleep(wait_time)\n else:\n break\n\n update_count = math.floor(-wait_time / self._one_frame_time) + 1\n self._next_update_time += update_count * self._one_frame_time\n\n self._update_mouse_pos()\n\n # update frame\n for _ in range(update_count):\n update_start_time = time.time()\n self._check_special_input()\n\n self._update()\n\n pyxel.frame_count += 1\n self._measure_update_time(update_start_time)\n\n def _draw_frame(self):\n draw_start_time = time.time()\n\n self._draw()\n\n self._draw_perf_monitor()\n self._draw_mouse_cursor()\n\n hs = self._hidpi_scale\n image = self._renderer.render(\n self._viewport_left * hs,\n self._viewport_bottom * hs,\n self._viewport_width * hs,\n self._viewport_height * hs,\n self._palette,\n self._border_color,\n )\n self._capture_images[self._capture_index % APP_GIF_CAPTURE_COUNT] = image\n self._capture_index += 1\n\n self._measure_draw_time(draw_start_time)\n\n def _toggle_fullscreen(self):\n if glfw.get_window_monitor(self._window): # fullscreen to window\n glfw.set_window_monitor(self._window, None, *self._window_info, 0)\n else: # window to fullscreen\n info = [0] * 4\n info[0], info[1] = glfw.get_window_pos(self._window)\n info[2], info[3] = glfw.get_window_size(self._window)\n self._window_info = info\n\n monitor = glfw.get_primary_monitor()\n size = glfw.get_video_mode(monitor)[0]\n glfw.set_window_monitor(self._window, monitor, 0, 0, *size, 0)\n\n def _check_special_input(self):\n if self.btn(KEY_ALT):\n if self.btnp(glfw.KEY_ENTER):\n self._toggle_fullscreen()\n\n if self.btnp(KEY_0):\n self._perf_monitor_is_enabled = not self._perf_monitor_is_enabled\n\n if self.btnp(KEY_1):\n self._save_capture_image()\n\n if self.btnp(KEY_2):\n self._capture_start = self._capture_index - 1\n\n if self.btnp(KEY_3):\n self._save_capture_animation()\n\n if self.btnp(glfw.KEY_ESCAPE):\n self.quit()\n\n def _save_capture_image(self):\n index = (self._capture_index - 1) % APP_GIF_CAPTURE_COUNT\n image = self._get_capture_image(index)\n image.save(self._get_capture_filename() + \".png\")\n\n def _save_capture_animation(self):\n image_count = min(\n self._capture_index - self._capture_start, APP_GIF_CAPTURE_COUNT\n )\n\n if image_count <= 0:\n return\n\n start_index = self._capture_index - image_count\n images = []\n\n for i in range(image_count):\n index = (start_index + i) % APP_GIF_CAPTURE_COUNT\n images.append(self._get_capture_image(index))\n\n images[0].save(\n self._get_capture_filename() + \".gif\",\n save_all=True,\n append_images=images[1:],\n duration=self._one_frame_time * 1000,\n loop=0,\n optimize=True,\n )\n\n def _get_capture_image(self, index):\n image = PIL.Image.frombuffer(\n \"RGB\",\n (pyxel.width, pyxel.height),\n self._capture_images[index],\n \"raw\",\n \"RGB\",\n 0,\n 1,\n )\n\n image = utilities.palettize_pil_image(image)\n\n image = image.resize(\n (pyxel.width * APP_GIF_CAPTURE_SCALE, pyxel.height * APP_GIF_CAPTURE_SCALE)\n )\n\n return image\n\n @staticmethod\n def _get_capture_filename():\n return os.path.join(\n utilities.get_desktop_path(),\n datetime.datetime.now().strftime(\"pyxel-%y%m%d-%H%M%S\"),\n )\n\n def _measure_fps(self):\n cur_time = time.time()\n self._perf_fps_count += 1\n\n if self._perf_fps_count == APP_MEASURE_FRAME_COUNT:\n self._perf_fps = self._perf_fps_count / (\n cur_time - self._perf_fps_start_time\n )\n self._perf_fps_count = 0\n self._perf_fps_start_time = cur_time\n\n def _measure_update_time(self, update_start_time):\n self._perf_update_count += 1\n self._perf_update_total_time += time.time() - update_start_time\n\n if self._perf_update_count == APP_MEASURE_FRAME_COUNT:\n self._perf_update_time = (\n self._perf_update_total_time / self._perf_update_count\n ) * 1000\n self._perf_update_total_time = 0\n self._perf_update_count = 0\n\n def _measure_draw_time(self, draw_start_time):\n self._perf_draw_count += 1\n self._perf_draw_total_time += time.time() - draw_start_time\n\n if self._perf_draw_count == APP_MEASURE_FRAME_COUNT:\n self._perf_draw_time = (\n self._perf_draw_total_time / self._perf_draw_count\n ) * 1000\n self._perf_draw_total_time = 0\n self._perf_draw_count = 0\n\n def _draw_perf_monitor(self):\n if not self._perf_monitor_is_enabled:\n return\n\n fps = \"{:.2f}\".format(self._perf_fps)\n update = \"{:.2f}\".format(self._perf_update_time)\n draw = \"{:.2f}\".format(self._perf_draw_time)\n\n text = self._renderer.draw_command.text\n text(1, 0, fps, 1)\n text(0, 0, fps, 9)\n text(1, 6, update, 1)\n text(0, 6, update, 9)\n text(1, 12, draw, 1)\n text(0, 12, draw, 9)\n\n def _draw_mouse_cursor(self):\n if not self._is_mouse_visible:\n return\n\n pyxel.blt(\n pyxel.mouse_x,\n pyxel.mouse_y,\n 3,\n MOUSE_CURSOR_IMAGE_X,\n MOUSE_CURSOR_IMAGE_Y,\n MOUSE_CURSOR_WIDTH,\n MOUSE_CURSOR_HEIGHT,\n 1,\n )\n" ]
[ [ "numpy.loads" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ASUPychron/pychron
[ "f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc", "f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc" ]
[ "pychron/core/geometry/centroid_pyx/setup.py", "pychron/core/tests/curvature.py" ]
[ "# ===============================================================================\n# Copyright 2011 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n\n\"\"\"\n\nhttp://docs.cython.org/pychron/quickstart/build.html\n\npython setup.py build_ext --inplace\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom distutils.core import setup\nfrom distutils.extension import Extension\n\nimport numpy\nfrom Cython.Distutils import build_ext\n\next_modules = [\n Extension(\n \"calculate_centroid\", [\"centroid.pyx\"], include_dirs=[numpy.get_include()]\n )\n]\n\nsetup(\n name=\"calculate_centroid\",\n cmdclass={\"build_ext\": build_ext},\n ext_modules=ext_modules,\n)\n", "from __future__ import absolute_import\nimport unittest\n\nfrom numpy import linspace\n\nfrom pychron.core.geometry.geometry import curvature\n\n\nclass CurvatureTestCase(unittest.TestCase):\n def test_line(self):\n xs = linspace(0, 5)\n ys = 1 * xs ** 2\n\n self.assertEqual(True, curvature(ys)[2])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.get_include" ], [ "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maharshi-neu/disease-simulation-team-18
[ "f20c2dc7ff08f7660159d5e8dd593def3dc16649" ]
[ "src/main/Simulator.py" ]
[ "import pygame\nimport numpy as np\nfrom collections import deque\nimport logging\nimport os\n\nfrom . import (Particle, cfg, calculate_r_naught,\n bounce_wall, build_walls, random_coord, draw_walls,\n draw_line, display_text, euclidean_distance, bounce_particle,\n uniform_probability, bar_chart, make_grid_array, which_grid)\n\n# ALSA lib pcm.c:8306:(snd_pcm_recover) underrun occurred\nos.environ['SDL_AUDIODRIVER'] = 'dsp'\n\n\nclass Simulator:\n def process_input(self):\n \"\"\"\n Keyboard input for exitting\n \"\"\"\n\n event = pygame.event.poll()\n if event.type == pygame.QUIT:\n self.running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.running = False\n elif event.key == pygame.K_SPACE:\n if not self.pause:\n self.pause = True\n else:\n self.pause = False\n\n def __init__(self):\n\n x = 370\n y = 0\n os.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (x,y)\n\n pygame.init()\n self.clock = pygame.time.Clock()\n self.clock_tick = cfg.FPS\n self.pause = False\n\n pygame.display.set_caption(cfg.GAME_TITLE)\n\n self.X = cfg.GAME_WIDTH\n self.Y = cfg.GAME_HEIGHT\n\n self.main_x = self.X\n self.main_y = self.Y\n\n self.wall_width = cfg.WALL_SIZE\n\n # if cfg.QUARANTINE:\n self.main_x = self.X - cfg.QUARANTINE_CENTRE_WIDTH\n self.q_centre_wall_vector = build_walls(\n self.wall_width, self.main_x, self.X, 0, cfg.QUARANTINE_CENTRE_HEIGHT)\n\n # Wall co-ordinates\n self.wall_vectors = list()\n\n self.xpart = self.main_x / cfg.COMMUNITY_COLS\n self.ypart = self.main_y / cfg.COMMUNITY_ROWS\n\n self.wall_vector_list = list()\n for y in range(cfg.COMMUNITY_ROWS):\n for x in range(cfg.COMMUNITY_COLS):\n x0, y0 = x * self.xpart, y * self.ypart\n x1, y1 = x0 + self.xpart, y0 + self.ypart\n self.wall_vector_list.append(build_walls(self.wall_width, x0, x1, y0, y1))\n\n self.window = pygame.display.set_mode((self.X, self.Y))\n\n self.running = True\n\n self.susceptible_container = list()\n self.infected_container = list()\n self.removed_container = list()\n self.all_container = list()\n\n self.n_susceptible = cfg.POPULATION - cfg.I0 - cfg.R0\n self.n_infected = cfg.I0\n self.n_recovered = cfg.R0\n self.T = cfg.POPULATION\n\n self.font = pygame.font.SysFont(None, 18)\n\n self.grid = make_grid_array(cfg.N_GRID_ROW, cfg.N_GRID_COL)\n self.cell_size_w = (cfg.GAME_WIDTH / cfg.N_GRID_COL)\n self.cell_size_h = (cfg.GAME_HEIGHT / cfg.N_GRID_ROW)\n\n self.init_groups()\n\n self.day = 0\n self.tick = 0\n self.init_render_stats()\n\n self.infection_timeseries = list()\n self.diff_infection_timeseries = list()\n self.BETA = list()\n self.Ro = 0.0\n\n self.bar_chart_height = (cfg.GAME_HEIGHT * .3)\n\n used_height = cfg.GAME_HEIGHT - (cfg.QUARANTINE_CENTRE_HEIGHT + self.bar_chart_height)\n h = cfg.GAME_HEIGHT - used_height\n self.central_wall_width = h * .1\n self.central_location_wall_vector = build_walls(\n self.central_wall_width, self.main_x, cfg.GAME_WIDTH,\n h,\n cfg.GAME_HEIGHT)\n\n self.in_central_location = set()\n\n self.asymptomatic_container = set()\n\n self.Rmax = -99\n self.to_contact_trace = deque()\n\n self.vaccine_availability = 0\n\n @property\n def suslen(self):\n \"\"\"\n Returns length of susceptible container\n \"\"\"\n return len(self.susceptible_container)\n\n @property\n def inflen(self):\n \"\"\"\n Returns length of infected container\n \"\"\"\n return len(self.infected_container)\n\n @property\n def reclen(self):\n \"\"\"\n Returns length of infected container\n \"\"\"\n return len(self.removed_container)\n\n @property\n def alllen(self):\n \"\"\"\n Returns length of all container\n \"\"\"\n return len(self.all_container)\n\n def disperse_vaccine(self):\n \"\"\"\n Disperses vaccine per day\n \"\"\"\n if cfg.VACCINE and self.day % 1 == 0:\n self.vaccine_availability += cfg.VACCINE_DISPERSION_RATE\n\n def init_groups(self):\n \"\"\"\n Called in __init__ populates all the containers with Particles\n \"\"\"\n min_ct = self.clock_tick / 2\n max_ct = self.clock_tick * 2\n\n w = 0\n # SUSCEPTIBLE\n for i in range(self.n_susceptible):\n fps = np.random.randint(min_ct, max_ct)\n wv = self.wall_vector_list[w]\n x = random_coord(wv['x0'], wv['x1'], cfg.PARTICLE_RADIUS)\n y = random_coord(wv['y0'], wv['y1'], cfg.PARTICLE_RADIUS)\n p = Particle(\n x=x,\n y=y,\n status=cfg.SUSCEPTIBLE_TYPE,\n color=cfg.SUSCEPTIBLE_COLOR,\n clock_tick=fps)\n p.wear_mask()\n p.my_boundries = wv\n self.susceptible_container.append(p)\n self.all_container.append(p)\n\n row_col = which_grid(self.cell_size_w, x, self.cell_size_h, y)\n p.grid = row_col\n self.grid[row_col[0]][row_col[1]].append(p)\n\n w += 1\n if w >= len(self.wall_vector_list):\n w = 0\n\n # INFECTED\n for _ in range(self.n_infected):\n fps = np.random.randint(min_ct, max_ct)\n wv = self.wall_vector_list[w]\n x = random_coord(wv['x0'], wv['x1'], cfg.PARTICLE_RADIUS)\n y = random_coord(wv['y0'], wv['y1'], cfg.PARTICLE_RADIUS)\n p = Particle(\n x=x,\n y=y,\n status=cfg.INFECTED_TYPE,\n color=cfg.INFECTED_COLOR,\n clock_tick=fps)\n p.wear_mask()\n p.my_boundries = wv\n self.infected_container.append(p)\n self.all_container.append(p)\n row_col = which_grid(self.cell_size_w, x, self.cell_size_h, y)\n p.grid = row_col\n self.grid[row_col[0]][row_col[1]].append(p)\n\n w += 1\n if w >= len(self.wall_vector_list):\n w = 0\n\n # RECOVERED\n for _ in range(self.n_recovered):\n fps = np.random.randint(min_ct, max_ct)\n wv = self.wall_vector_list[w]\n x = random_coord(wv['x0'], wv['x1'], cfg.PARTICLE_RADIUS)\n y = random_coord(wv['y0'], wv['y1'], cfg.PARTICLE_RADIUS)\n p = Particle(\n x=x,\n y=y,\n status=cfg.REMOVED_TYPE,\n color=cfg.REMOVED_COLOR,\n clock_tick=fps)\n p.my_boundries = wv\n self.removed_container.append(p)\n self.all_container.append(p)\n row_col = which_grid(self.cell_size_w, x, self.cell_size_h, y)\n p.grid = row_col\n self.grid[row_col[0]][row_col[1]].append(p)\n\n w += 1\n if w >= len(self.wall_vector_list):\n w = 0\n\n ''' COMMENT START\n def handle_particle_collision(self, i):\n \"\"\"\n Sweep and prune\n \"\"\"\n diameter = cfg.PARTICLE_RADIUS * 2\n newly_infected = list()\n\n ip = self.all_container[i]\n for j in range(i + 1, self.alllen):\n jp = self.all_container[j]\n\n travelling = jp.is_travelling + ip.is_travelling\n if (jp.status != cfg.REMOVED_TYPE != ip.status) and not travelling:\n condition = (jp.is_infected) + (ip.is_infected)\n if condition == 1:\n d, dx, dy = euclidean_distance(ip.x, ip.y, jp.x, jp.y)\n if diameter >= d:\n bounce_particle(ip, jp, dx, dy)\n if jp.is_infected:\n if(ip.infect(jp, self.day)):\n newly_infected.append(ip)\n if not ip.will_show_symptoms and ip not in self.asymptomatic_container:\n self.asymptomatic_container.add(ip)\n jp.came_in_contact_with.append(ip)\n else:\n if(jp.infect(ip, self.day)):\n newly_infected.append(jp)\n if not jp.will_show_symptoms and jp not in self.asymptomatic_container:\n self.asymptomatic_container.add(jp)\n ip.came_in_contact_with.append(jp)\n else:\n break\n else:\n break\n\n return newly_infected\n COMMENT END'''\n\n def handle_particle_collision(self):\n \"\"\"\n Uniform grid spatial partition\n \"\"\"\n diameter = cfg.PARTICLE_DIAMETER\n newly_infected = list()\n for i in range(cfg.N_GRID_ROW):\n for j in range(cfg.N_GRID_COL):\n tocheck = self.grid[i][j]\n for m in range(len(tocheck) - 1):\n for n in range(m, len(tocheck)):\n p1 = tocheck[m]\n p2 = tocheck[n]\n\n travelling = p1.is_travelling + p2.is_travelling\n if (p1.status != cfg.REMOVED_TYPE != p2.status) and not travelling:\n condition = (p1.status == cfg.INFECTED_TYPE) + (p2.status == cfg.INFECTED_TYPE)\n if condition == 1:\n d, dx, dy = euclidean_distance(p1.x, p1.y, p2.x, p2.y)\n\n if diameter >= d:\n bounce_particle(p1, p2, dx, dy)\n if p1.is_infected:\n if(p2.infect(p1, self.day)):\n newly_infected.append(p2)\n if not p2.will_show_symptoms and p2 not in self.asymptomatic_container:\n self.asymptomatic_container.add(p2)\n p1.came_in_contact_with.append(p2)\n else:\n if(p1.infect(p2, self.day)):\n newly_infected.append(p1)\n if not p1.will_show_symptoms and p1 not in self.asymptomatic_container:\n self.asymptomatic_container.add(p1)\n p2.came_in_contact_with.append(p1)\n return newly_infected\n\n def update_fps(self):\n \"\"\"\n Updates & returns FPS per tick\n \"\"\"\n fps = str(int(self.clock.get_fps()))\n return fps\n\n def update_time(self):\n \"\"\"\n Updates & returns(day) time per tick\n \"\"\"\n self.day = np.round(self.tick / cfg.DAY_IN_CLOCK_TICK, 2)\n return 'Day {}'.format(self.day)\n\n def move_to_quarantine(self, p, override=False):\n \"\"\"\n Input:\n p = Particle\n override = flag\n\n Moves intected particle to quarantine @ recovery rate mentioned in config file.\n In case of contact tracing override flag is passed and even asymptomatic particles \\\n are quarantined\n\n \"\"\"\n if cfg.QUARANTINE and p.is_infected and not p.quarantined and not p.is_travelling:\n\n if ((self.day - p.infected_since) > cfg.QUARANTINE_AT_DAY and p.will_show_symptoms) or override:\n p.fly_to_in_peace(\n (cfg.QUARANTINE_CENTRE_WIDTH / 2) + self.main_x,\n (cfg.QUARANTINE_CENTRE_HEIGHT / 2),\n self.q_centre_wall_vector\n )\n p.quarantined = True\n self.to_contact_trace.append(p)\n\n def update_containers(self, newly_infected, newly_recovered):\n \"\"\"\n Input:\n newly_infected = list of newly infected particles\n newly_recovered = list of newly recovered particles\n\n Updates the infected container with list of newly infected particles\n Updates the removed container with list of newly recovered particles\n \"\"\"\n if newly_infected:\n self.susceptible_container = [\n sus for sus in self.susceptible_container if sus.is_susceptible]\n self.infected_container.extend(newly_infected)\n if newly_recovered:\n self.infected_container = [\n inf for inf in self.infected_container if inf.is_infected]\n self.removed_container.extend(newly_recovered)\n\n def trace_line(self, p):\n \"\"\"\n Input:\n p = Particle\n\n Draws a straight line per tick between the input particle and the particles that \\\n it has come in contact with\n \"\"\"\n if cfg.CONTACT_TRACING and p.is_infected:\n for i in p.came_in_contact_with:\n draw_line(self.window, cfg.LIGHTYELLOW, p.x, p.y, i.x, i.y)\n\n def update_stats(self):\n \"\"\"\n Updates line chart per tick with shaded area with color code of \\\n Susceptible, Infected, Removed.\n Scale is determined with the the number of ticks the simulation will run \\\n (config = RUN_TIME_IN_TICK)\n \"\"\"\n stats_height = self.stats.get_height()\n stats_width = self.stats.get_width()\n\n n_sus_now = self.suslen\n n_inf_now = self.inflen\n n_pop_now = self.alllen\n n_rec_now = self.reclen\n\n t = int((self.tick / cfg.RUN_TIME_IN_TICK) * stats_width) - 1\n\n y_infect = int(\n stats_height - (n_inf_now / n_pop_now) * stats_height\n )\n\n y_susceptible = int((n_sus_now / n_pop_now) * stats_height)\n\n stats_graph = pygame.PixelArray(self.stats)\n stats_graph[t, :y_susceptible] = pygame.Color(*cfg.SUSCEPTIBLE_COLOR)\n stats_graph[t, y_infect:] = pygame.Color(*cfg.INFECTED_COLOR)\n\n def init_render_stats(self):\n \"\"\"\n Renders the stats box where chart is drawn in __init__.\n This is called once\n\n retruns None\n \"\"\"\n stats_x, stats_y = cfg.GAME_WIDTH // 4, cfg.GAME_HEIGHT // 4\n self.stats = pygame.Surface((stats_x, stats_y))\n self.stats.fill(cfg.GREY)\n self.stats.set_alpha(200)\n self.stats_pos = (10, cfg.GAME_HEIGHT - (stats_y + 10))\n\n def render_stats(self):\n \"\"\"\n Renders the chart SIR chart every tick\n \"\"\"\n self.stats.unlock()\n self.window.blit(self.stats, self.stats_pos)\n\n def update_tick(self):\n \"\"\"\n Updates tick counter per tick\n \"\"\"\n self.tick += 1\n\n def update_infection_timeseries(self):\n \"\"\"\n Updates infected time series per day, used to calculate Ro\n \"\"\"\n if self.day % 1 == 0 and self.inflen != self.T:\n self.infection_timeseries.append(self.inflen)\n if len(self.infection_timeseries) > 1:\n self.diff_infection_timeseries.append(\n (self.inflen - self.infection_timeseries[-2])\n )\n else:\n self.diff_infection_timeseries.append(self.inflen)\n\n def travel_to_central_location(self):\n \"\"\"\n Below runs every 2nd day\n Chosen particle travels to the central localtion right bottom room \\\n which has a smaller area than any other room in the simulation.\n The small area increases the probability of a particle of contracting the virus \\\n iff the a particle with the virus is in the room already.\n Max: 5 particles are chosen at random from the all container\n Min: 0 also can be chosen if the particles do not meet the following criteria.\n\n Filter critiera:\n - Particle should not already be already in the room\n - Particle should not be quarantined\n\n Below runs every 10th day\n Particles in central location are transfered back to their original room \\\n only criteria filtering this is if the particle is quarantined.\n \"\"\"\n if not cfg.CENTRAL_LOCATION and self.day % .5 == 0:\n return\n\n now_there = self.in_central_location\n if (self.day % 10 == 0):\n for p in self.in_central_location:\n if p.quarantined:\n continue\n p.fly_to_in_peace(p.prev_xy_b[0], p.prev_xy_b[1], p.prev_xy_b[2])\n self.in_central_location = set()\n\n if (self.day % 2 != 0):\n return\n\n how_many = 5\n for _ in range(how_many):\n c = np.random.randint(0, self.alllen)\n\n p = self.all_container[c]\n\n if p in now_there or p.quarantined:\n continue\n\n xd = (self.central_location_wall_vector['x1'] - self.central_location_wall_vector['x0']) / 2\n x = self.central_location_wall_vector['x0'] + (xd/2) + self.central_wall_width\n yd = (self.central_location_wall_vector['y1'] - self.central_location_wall_vector['y0']) / 2\n y = self.central_location_wall_vector['y0'] + (yd/2) + self.central_wall_width\n p.fly_to_in_peace(x, y, self.central_location_wall_vector)\n\n self.in_central_location.add(p)\n\n def pick_lucky_winners_for_travel(self):\n \"\"\"\n A particle pair travels inter community per tick if there is more than one community\n Max 2 tries are made to find unique particles suitable for travel\n Max 1 pair travels\n Min 0 pair travels\n\n Particles are chosen at random form all container\n\n Filtering criteria:\n - Particles should not be in the same community\n - No particle should be quarantined\n - No particle should be in traveling phase\n \"\"\"\n if not cfg.TRAVEL:\n return\n\n should_travel_happen = uniform_probability()\n if should_travel_happen <= cfg.TRAVEL_FREQUENCY:\n\n p1, p2 = self.all_container[0], self.all_container[0]\n\n try_till = 2\n i = 0\n\n while p1.my_boundries == p2.my_boundries and i < try_till:\n c1 = np.random.randint(0, self.alllen)\n c2 = np.random.randint(0, self.alllen)\n p1 = self.all_container[c1]\n p2 = self.all_container[c2]\n\n i += 1\n\n q = p1.quarantined + p2.quarantined + p1.is_travelling + p2.is_travelling\n if q != 0:\n continue\n\n tmp = p1.my_boundries\n p1.fly_to_in_peace(p2.x, p2.y, p2.my_boundries)\n p2.fly_to_in_peace(p1.x, p1.y, tmp)\n break\n\n def contact_trace(self):\n \"\"\"\n Contact tracking done every day\n Contact tracing is constrained with resources, we assume that there exits only \\\n one team of contact tracers for the given population.\n When a infected particle is quarantined it is put into a queue for contact tracing\n\n Queue.pop() gives the particle to trace\n Every particles maintains a stack of particles whom it came in contact with\n Stack.pop() is done on the trace particle\n if the poped particle is infected then quarantine\n when Stack empty (trace complete) remove particle from queue\n \"\"\"\n if not cfg.CONTACT_TRACING or self.day % 1:\n return\n if self.to_contact_trace:\n trace = self.to_contact_trace[0]\n if trace.came_in_contact_with:\n to_q = trace.came_in_contact_with.pop()\n self.move_to_quarantine(to_q, True)\n else:\n self.to_contact_trace.popleft()\n\n def vaccinate(self, p):\n \"\"\"\n Vaccine sessions are held twice per day\n if vaccine are available then it is provided to particle\n vaccine provides shield to partilce shield brings down the probability \\\n drastically (config.SHIELD_PROVIDED_BY_VACCINE)\n 2 doses can be given to a particle, 2nd dose gives complete immunity(depending on the shield value)\n\n VACCINE_DISPERSION_RATE / suslen = When susceptible length is high than vaccine distribution is slow \\\n when susceptible length drops distribution is higher than before\n \"\"\"\n if cfg.VACCINE and self.day % .2 and p.vaccinated < (2 * cfg.SHIELD_PROVIDED_BY_VACCINE) and not p.is_infected:\n probability_of_getting_vaccine = (cfg.VACCINE_DISPERSION_RATE / self.suslen)\n will_p_get_vaccine = uniform_probability()\n if p.vaccinated:\n will_p_get_vaccine += .3\n if will_p_get_vaccine <= probability_of_getting_vaccine and self.vaccine_availability >= 1:\n p.vaccinated += cfg.SHIELD_PROVIDED_BY_VACCINE\n if p.vaccinated == cfg.SHIELD_PROVIDED_BY_VACCINE:\n p.color = cfg.LIGHTPINK\n elif p.vaccinated > cfg.SHIELD_PROVIDED_BY_VACCINE:\n p.color = cfg.LIGHTBLUE\n\n p.radius -= 1\n self.vaccine_availability -= 1\n\n\n def lockdown(self, p):\n \"\"\"\n \n \"\"\"\n if cfg.LOCKDOWN:\n cfg.TRAVEL_FREQUENCY = 0.0\n\n def update_the_grid(self, p, old_row_col):\n \"\"\"\n Updates the grid cell in which the particle has moved to\n \"\"\"\n self.grid[old_row_col[0]][old_row_col[1]].remove(p)\n self.grid[p.grid[0]][p.grid[1]].append(p)\n\n def update_and_render(self):\n \"\"\"\n This function is where everything happnes in terms of updates/renders\n Called in the main game loop\n \"\"\"\n self.update_tick()\n self.window.fill(cfg.BACKGROUND)\n\n for wv in self.wall_vector_list:\n draw_walls(self.window, wv, self.wall_width)\n\n self.window.fill(cfg.BACKGROUND, (self.main_x, 0, cfg.GAME_WIDTH, cfg.GAME_HEIGHT))\n\n # Quarantine walls\n draw_walls(self.window, self.q_centre_wall_vector, self.wall_width)\n\n # Central location walls\n draw_walls(self.window, self.central_location_wall_vector, self.central_wall_width)\n\n self.travel_to_central_location()\n\n day = self.update_time()\n fps = self.update_fps()\n\n self.disperse_vaccine()\n\n self.all_container.sort(key=lambda p: p.x)\n\n newly_infected = list()\n newly_recovered = list()\n self.contact = 0\n self.time = 0\n for pi in range(self.alllen):\n # update -------\n p = self.all_container[pi]\n\n if p.is_removed:\n continue\n\n p.update_2d_vectors()\n bounce_wall(p, p.my_boundries)\n\n # USEAGE - uniform grid\n row_col = which_grid(self.cell_size_w, p.x, self.cell_size_h, p.y)\n old_row_col = p.update_grid(row_col)\n if (old_row_col):\n self.update_the_grid(p, old_row_col)\n\n # # USEAGE - sweep n prune\n # if pi < self.T - 1:\n # newly_infected.extend(self.handle_particle_collision(pi))\n\n # render ------\n pygame.draw.circle(self.window, p.color, (p.x, p.y), p.radius)\n if(p.is_infected and p.recover(self.day)):\n newly_recovered.append(p)\n self.trace_line(p)\n self.move_to_quarantine(p)\n self.vaccinate(p)\n\n # USEAGE - uniform grid\n newly_infected = self.handle_particle_collision()\n\n self.contact_trace()\n self.update_stats()\n self.render_stats()\n self.update_containers(newly_infected, newly_recovered)\n\n self.update_infection_timeseries()\n self.Ro = calculate_r_naught(self.diff_infection_timeseries, self.Ro)\n self.BETA.append(self.Ro)\n\n if self.Ro > self.Rmax:\n self.Rmax = self.Ro\n\n bar_data = {\n 'S': (self.suslen, cfg.SUSCEPTIBLE_COLOR),\n 'I': (self.inflen, cfg.INFECTED_COLOR),\n 'R': (self.reclen, cfg.REMOVED_COLOR),\n 'seq': ['S', 'I', 'R'],\n 'font': self.font,\n\n }\n bar_chart(\n self.window, self.main_x,\n cfg.QUARANTINE_CENTRE_HEIGHT, self.X,\n self.T, bar_data, self.bar_chart_height)\n\n Ro_avg = np.round(np.average(self.BETA), 2)\n\n display_text(self.window, self.font, fps, 10, 10)\n display_text(self.window, self.font, day, self.main_x / 2 - 10, 10)\n display_text(self.window, self.font, 'Ro {}'.format(self.Ro), 10, 25)\n display_text(self.window, self.font, 'Ro Avg {}'.format(Ro_avg), 10, 35)\n display_text(self.window, self.font, 'Ro max {}'.format(self.Rmax), 10, 45)\n k = np.round(len(self.asymptomatic_container) / self.T, 2)\n display_text(self.window, self.font, 'k {}'.format(k), 10, 55)\n\n self.pick_lucky_winners_for_travel()\n\n pygame.display.update()\n self.log()\n\n def log(self):\n \"\"\"\n format made easy for data tools\n FORMAT: DAY S I R Ro\n \"\"\"\n if cfg.LOGGING:\n if self.day % 1 == 0:\n logging.info(\n \" {0} {1} {2} {3} {4}\".format(self.day, self.suslen, self.inflen, self.reclen, self.Ro)\n )\n\n def run(self):\n \"\"\"\n The main game loop\n \"\"\"\n if cfg.LOGGING:\n logging.info(\"START\")\n\n while self.running:\n if cfg.RUN_TIME_IN_DAYS < self.day:\n font = pygame.font.SysFont(None, 32)\n display_text(\n self.window, font,\n \"[ Simulation DONE, reached days limit ]\",\n cfg.GAME_WIDTH // 4, cfg.GAME_HEIGHT // 2)\n self.pause = True\n pygame.display.update()\n self.process_input()\n if not self.pause:\n self.update_and_render()\n self.clock.tick(self.clock_tick)\n\n if cfg.LOGGING:\n logging.info(\"END\")\n\n pygame.quit()\n\n" ]
[ [ "numpy.round", "numpy.average", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wookayin/tensorflow-agents
[ "a0f25a4b44d4fc844c9b804bab60af2167f8d2bb", "ae3751dfeed52422a350227047648dd82297960b", "1455410dffed3cfdede793b87c179965cdd27d22", "ae3751dfeed52422a350227047648dd82297960b" ]
[ "tf_agents/environments/tf_py_environment_test.py", "tf_agents/specs/array_spec.py", "tf_agents/environments/atari_preprocessing.py", "tf_agents/networks/value_network.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for reinforment_learning.environment.tf_py_environment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tf_agents import specs\nfrom tf_agents.environments import batched_py_environment\nfrom tf_agents.environments import py_environment\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.environments import time_step as ts\n\nnest = tf.contrib.framework.nest\n\n\nclass PYEnvironmentMock(py_environment.Base):\n \"\"\"MockPyEnvironment.\n\n Stores all actions taken in `actions_taken`. The returned values are:\n\n step: step_type, discount, reward, observation\n\n step: FIRST, 1., 0., [0]\n step: MID, 1., 0., [1]\n step: LAST, 0., 1. [2]\n ...repeated\n \"\"\"\n\n def __init__(self):\n self.actions_taken = []\n self.steps = 0\n self.episodes = 0\n self.resets = 0\n self._state = 0\n\n def reset(self):\n self._state = 0\n self.resets += 1\n return ts.restart([self._state])\n\n def step(self, action):\n self._state = (self._state + 1) % 3\n self.steps += 1\n self.actions_taken.append(action)\n\n observation = [self._state]\n if self._state == 0:\n return ts.restart(observation)\n elif self._state == 2:\n self.episodes += 1\n return ts.termination(observation, reward=1.0)\n return ts.transition(observation, reward=0.0)\n\n def action_spec(self):\n return specs.BoundedArraySpec(\n [], np.int32, minimum=0, maximum=10, name='action')\n\n def observation_spec(self):\n return specs.ArraySpec([], np.int64, name='observation')\n\n\nclass TFPYEnvironmentTest(tf.test.TestCase, parameterized.TestCase):\n\n def testPyenv(self):\n py_env = PYEnvironmentMock()\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n self.assertIsInstance(tf_env.pyenv,\n batched_py_environment.BatchedPyEnvironment)\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testActionSpec(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n self.assertTrue(tf_env.batched)\n self.assertEqual(tf_env.batch_size, 1)\n spec = tf_env.action_spec()\n self.assertEqual(type(spec), specs.BoundedTensorSpec)\n self.assertEqual(spec.dtype, tf.int32)\n self.assertEqual(spec.shape, tf.TensorShape([]))\n self.assertEqual(spec.name, 'action')\n\n def testObservationSpec(self):\n py_env = PYEnvironmentMock()\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n spec = tf_env.observation_spec()\n self.assertEqual(type(spec), specs.TensorSpec)\n self.assertEqual(spec.dtype, tf.int64)\n self.assertEqual(spec.shape, tf.TensorShape([]))\n self.assertEqual(spec.name, 'observation')\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testTimeStepSpec(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n batched_py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n else:\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n spec = tf_env.time_step_spec()\n\n # step_type\n self.assertEqual(type(spec.step_type), specs.TensorSpec)\n self.assertEqual(spec.step_type.dtype, tf.int32)\n self.assertEqual(spec.step_type.shape, tf.TensorShape([]))\n\n # reward\n self.assertEqual(type(spec.reward), specs.TensorSpec)\n self.assertEqual(spec.reward.dtype, tf.float32)\n self.assertEqual(spec.reward.shape, tf.TensorShape([]))\n\n # discount\n self.assertEqual(type(spec.discount), specs.BoundedTensorSpec)\n self.assertEqual(spec.discount.dtype, tf.float32)\n self.assertEqual(spec.discount.shape, tf.TensorShape([]))\n self.assertEqual(spec.discount.minimum, 0.0)\n self.assertEqual(spec.discount.maximum, 1.0)\n\n # observation\n self.assertEqual(type(spec.observation), specs.TensorSpec)\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testResetOp(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n batched_py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n else:\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n reset = tf_env.reset()\n self.evaluate(reset)\n self.assertEqual(1, py_env.resets)\n self.assertEqual(0, py_env.steps)\n self.assertEqual(0, py_env.episodes)\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testMultipleReset(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n batched_py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n else:\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n reset = tf_env.reset()\n self.evaluate(reset)\n self.assertEqual(1, py_env.resets)\n self.evaluate(reset)\n self.assertEqual(2, py_env.resets)\n self.evaluate(reset)\n self.assertEqual(3, py_env.resets)\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testFirstTimeStep(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n batched_py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n else:\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n time_step = tf_env.current_time_step()\n time_step = self.evaluate(time_step)\n self.assertAllEqual([ts.StepType.FIRST], time_step.step_type)\n self.assertAllEqual([0.0], time_step.reward)\n self.assertAllEqual([1.0], time_step.discount)\n self.assertAllEqual([0], time_step.observation)\n self.assertAllEqual([], py_env.actions_taken)\n self.assertEqual(1, py_env.resets)\n self.assertEqual(0, py_env.steps)\n self.assertEqual(0, py_env.episodes)\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testOneStep(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n batched_py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n else:\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n time_step = tf_env.current_time_step()\n with tf.control_dependencies([time_step.step_type]):\n action = tf.constant([1])\n time_step = self.evaluate(tf_env.step(action))\n\n self.assertAllEqual([ts.StepType.MID], time_step.step_type)\n self.assertAllEqual([0.], time_step.reward)\n self.assertAllEqual([1.0], time_step.discount)\n self.assertAllEqual([1], time_step.observation)\n self.assertAllEqual([1], py_env.actions_taken)\n self.assertEqual(1, py_env.resets)\n self.assertEqual(1, py_env.steps)\n self.assertEqual(0, py_env.episodes)\n\n def testBatchedFirstTimeStepAndOneStep(self):\n py_envs = [PYEnvironmentMock() for _ in range(3)]\n batched_py_env = batched_py_environment.BatchedPyEnvironment(py_envs)\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n self.assertEqual(tf_env.batch_size, 3)\n time_step_0 = tf_env.current_time_step()\n time_step_0_val = self.evaluate(time_step_0)\n\n self.assertAllEqual([ts.StepType.FIRST] * 3, time_step_0_val.step_type)\n self.assertAllEqual([0.0] * 3, time_step_0_val.reward)\n self.assertAllEqual([1.0] * 3, time_step_0_val.discount)\n self.assertAllEqual(np.array([0, 0, 0]), time_step_0_val.observation)\n for py_env in py_envs:\n self.assertEqual([], py_env.actions_taken)\n self.assertEqual(1, py_env.resets)\n self.assertEqual(0, py_env.steps)\n self.assertEqual(0, py_env.episodes)\n\n time_step_1 = tf_env.step(np.array([1, 1, 1]))\n\n time_step_1_val = self.evaluate(time_step_1)\n\n self.assertAllEqual([ts.StepType.MID] * 3, time_step_1_val.step_type)\n self.assertAllEqual([0.] * 3, time_step_1_val.reward)\n self.assertAllEqual([1.0] * 3, time_step_1_val.discount)\n self.assertAllEqual(np.array([1, 1, 1]), time_step_1_val.observation)\n for py_env in py_envs:\n self.assertEqual([1], py_env.actions_taken)\n self.assertEqual(1, py_env.resets)\n self.assertEqual(1, py_env.steps)\n self.assertEqual(0, py_env.episodes)\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testTwoStepsDependenceOnTheFirst(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n batched_py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n else:\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n time_step = tf_env.current_time_step()\n with tf.control_dependencies([time_step.step_type]):\n action = tf.constant([1])\n time_step = tf_env.step(action)\n with tf.control_dependencies([time_step.step_type]):\n action = tf.constant([2])\n time_step = self.evaluate(tf_env.step(action))\n\n self.assertEqual(ts.StepType.LAST, time_step.step_type)\n self.assertEqual([2], time_step.observation)\n self.assertEqual(1., time_step.reward)\n self.assertEqual(0., time_step.discount)\n self.assertEqual([1, 2], py_env.actions_taken)\n\n @parameterized.parameters({'batch_py_env': True}, {'batch_py_env': False})\n def testFirstObservationIsPreservedAfterTwoSteps(self, batch_py_env):\n py_env = PYEnvironmentMock()\n if batch_py_env:\n batched_py_env = batched_py_environment.BatchedPyEnvironment([py_env])\n tf_env = tf_py_environment.TFPyEnvironment(batched_py_env)\n else:\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n time_step = tf_env.current_time_step()\n with tf.control_dependencies([time_step.step_type]):\n action = tf.constant([1])\n next_time_step = tf_env.step(action)\n with tf.control_dependencies([next_time_step.step_type]):\n action = tf.constant([2])\n _, observation = self.evaluate([tf_env.step(action), time_step.observation])\n\n self.assertEqual(np.array([0]), observation)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A class to describe the shape and dtype of numpy arrays.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numbers\n\nimport numpy as np\nimport tensorflow as tf\n\nnest = tf.contrib.framework.nest\n\n\ndef sample_bounded_spec(spec, rng):\n \"\"\"Samples the given bounded spec.\n\n Args:\n spec: A BoundedSpec to sample.\n rng: A numpy RandomState to use for the sampling.\n\n Returns:\n An np.array sample of the requested space.\n \"\"\"\n tf_dtype = tf.as_dtype(spec.dtype)\n low = spec.minimum\n high = spec.maximum\n\n if tf_dtype.is_floating:\n if spec.dtype == np.float64 and np.any(np.isinf(high - low)):\n # The min-max interval cannot be represented by the np.float64. This is a\n # problem only for np.float64, np.float32 works as expected.\n # Spec bounds are set to read only so we can't use argumented assignment.\n low = low / 2 # pylint: disable=g-no-augmented-assignment\n high = high / 2 # pylint: disable=g-no-augmented-assignment\n return rng.uniform(\n low,\n high,\n size=spec.shape,\n ).astype(spec.dtype)\n\n else:\n if spec.dtype == np.int64 and np.any(high - low < 0):\n # The min-max interval cannot be represented by the tf_dtype. This is a\n # problem only for int64.\n low = low / 2 # pylint: disable=g-no-augmented-assignment\n high = high / 2 # pylint: disable=g-no-augmented-assignment\n\n if high < tf_dtype.max:\n high = high + 1 # pylint: disable=g-no-augmented-assignment\n elif spec.dtype != np.int64 or spec.dtype != np.uint64:\n # We can still +1 the high if we cast it to the larger dtype.\n high = high.astype(np.int64) + 1\n\n return rng.randint(\n low,\n high,\n size=spec.shape,\n dtype=spec.dtype,\n )\n\n\ndef sample_spec_nest(structure, rng, outer_dims=()):\n \"\"\"Samples the given nest of specs.\n\n Args:\n structure: An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.\n rng: A numpy RandomState to use for the sampling.\n outer_dims: An optional list/tuple specifying outer dimensions to add to the\n spec shape before sampling.\n\n Returns:\n A nest of sampled values following the ArraySpec definition.\n \"\"\"\n\n def sample_fn(spec):\n spec = BoundedArraySpec.from_spec(spec)\n spec = BoundedArraySpec(\n tuple(outer_dims) + tuple(spec.shape), spec.dtype, spec.minimum,\n spec.maximum, spec.name)\n return sample_bounded_spec(spec, rng)\n\n return nest.map_structure(sample_fn, structure)\n\n\ndef check_arrays_nest(arrays, spec):\n \"\"\"Check that the arrays conform to the spec.\n\n Args:\n arrays: A NumPy array, or a nested dict, list or tuple of arrays.\n spec: An `ArraySpec`, or a nested dict, list or tuple of `ArraySpec`s.\n\n Returns:\n True if the arrays conforms to the spec, False otherwise.\n \"\"\"\n # Check that arrays and spec has the same structure.\n try:\n nest.assert_same_structure(arrays, spec)\n except (TypeError, ValueError):\n return False\n\n def check_array(spec, array):\n if not isinstance(spec, ArraySpec):\n return False\n return spec.check_array(array)\n\n # Check all the elements in arrays match to their spec\n checks = nest.map_structure(check_array, spec, arrays)\n # Only return True if all the checks pass.\n return all(nest.flatten(checks))\n\n\ndef add_outer_dims_nest(structure, outer_dims):\n def add_outer_dims(spec):\n name = spec.name\n shape = outer_dims + spec.shape\n if hasattr(spec, 'minimum') and hasattr(spec, 'maximum'):\n return BoundedArraySpec(shape, spec.dtype, spec.minimum,\n spec.maximum, name)\n return ArraySpec(shape, spec.dtype, name=name)\n return nest.map_structure(add_outer_dims, structure)\n\n\nclass ArraySpec(object):\n \"\"\"Describes a numpy array or scalar shape and dtype.\n\n An `ArraySpec` allows an API to describe the arrays that it accepts or\n returns, before that array exists.\n The equivalent version describing a `tf.Tensor` is `TensorSpec`.\n \"\"\"\n __slots__ = ('_shape', '_dtype', '_name')\n\n def __init__(self, shape, dtype, name=None):\n \"\"\"Initializes a new `ArraySpec`.\n\n Args:\n shape: An iterable specifying the array shape.\n dtype: numpy dtype or string specifying the array dtype.\n name: Optional string containing a semantic name for the corresponding\n array. Defaults to `None`.\n\n Raises:\n TypeError: If the shape is not an iterable or if the `dtype` is an invalid\n numpy dtype.\n \"\"\"\n self._shape = tuple(shape)\n self._dtype = np.dtype(dtype)\n self._name = name\n\n @property\n def shape(self):\n \"\"\"Returns a `tuple` specifying the array shape.\"\"\"\n return self._shape\n\n @property\n def dtype(self):\n \"\"\"Returns a numpy dtype specifying the array dtype.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Returns the name of the ArraySpec.\"\"\"\n return self._name\n\n def __repr__(self):\n return 'ArraySpec(shape={}, dtype={}, name={})'.format(\n self.shape, repr(self.dtype), repr(self.name))\n\n def __eq__(self, other):\n \"\"\"Checks if the shape and dtype of two specs are equal.\"\"\"\n if not isinstance(other, ArraySpec):\n return False\n return self.shape == other.shape and self.dtype == other.dtype\n\n def __ne__(self, other):\n return not self == other\n\n def check_array(self, array):\n \"\"\"Return whether the given NumPy array conforms to the spec.\n\n Args:\n array: A NumPy array or a scalar. Tuples and lists will not be converted\n to a NumPy array automatically; they will cause this function to return\n false, even if a conversion to a conforming array is trivial.\n\n Returns:\n True if the array conforms to the spec, False otherwise.\n \"\"\"\n if isinstance(array, np.ndarray):\n return self.shape == array.shape and self.dtype == array.dtype\n elif isinstance(array, numbers.Number):\n return self.shape == tuple() and self.dtype == np.dtype(type(array))\n else:\n return False\n\n @staticmethod\n def from_array(array, name=None):\n \"\"\"Construct a spec from the given array or number.\"\"\"\n if isinstance(array, np.ndarray):\n return ArraySpec(array.shape, array.dtype, name)\n elif isinstance(array, numbers.Number):\n return ArraySpec(tuple(), type(array), name)\n else:\n raise ValueError('Array must be a np.ndarray or number. Got %r.' % array)\n\n @staticmethod\n def from_spec(spec):\n \"\"\"Construct a spec from the given spec.\"\"\"\n return ArraySpec(spec.shape, spec.dtype, spec.name)\n\n\nclass BoundedArraySpec(ArraySpec):\n \"\"\"An `ArraySpec` that specifies minimum and maximum values.\n\n Example usage:\n ```python\n # Specifying the same minimum and maximum for every element.\n spec = BoundedArraySpec((3, 4), np.float64, minimum=0.0, maximum=1.0)\n\n # Specifying a different minimum and maximum for each element.\n spec = BoundedArraySpec(\n (2,), np.float64, minimum=[0.1, 0.2], maximum=[0.9, 0.9])\n\n # Specifying the same minimum and a different maximum for each element.\n spec = BoundedArraySpec(\n (3,), np.float64, minimum=-10.0, maximum=[4.0, 5.0, 3.0])\n ```\n\n Bounds are meant to be inclusive. This is especially important for\n integer types. The following spec will be satisfied by arrays\n with values in the set {0, 1, 2}:\n ```python\n spec = BoundedArraySpec((3, 4), np.int, minimum=0, maximum=2)\n ```\n \"\"\"\n\n __slots__ = ('_minimum', '_maximum')\n\n def __init__(self, shape, dtype, minimum=None, maximum=None, name=None):\n \"\"\"Initializes a new `BoundedArraySpec`.\n\n Args:\n shape: An iterable specifying the array shape.\n dtype: numpy dtype or string specifying the array dtype.\n minimum: Number or sequence specifying the maximum element bounds\n (inclusive). Must be broadcastable to `shape`.\n maximum: Number or sequence specifying the maximum element bounds\n (inclusive). Must be broadcastable to `shape`.\n name: Optional string containing a semantic name for the corresponding\n array. Defaults to `None`.\n\n Raises:\n ValueError: If `minimum` or `maximum` are not broadcastable to `shape` or\n if the limits are outside of the range of the specified dtype.\n TypeError: If the shape is not an iterable or if the `dtype` is an invalid\n numpy dtype.\n \"\"\"\n super(BoundedArraySpec, self).__init__(shape, dtype, name)\n\n try:\n np.broadcast_to(minimum, shape=shape)\n except ValueError as numpy_exception:\n raise ValueError('minimum is not compatible with shape. '\n 'Message: {!r}.'.format(numpy_exception))\n\n try:\n np.broadcast_to(maximum, shape=shape)\n except ValueError as numpy_exception:\n raise ValueError('maximum is not compatible with shape. '\n 'Message: {!r}.'.format(numpy_exception))\n\n tf_dtype = tf.as_dtype(self._dtype)\n low = tf_dtype.min\n high = tf_dtype.max\n\n if minimum is None:\n minimum = low\n if maximum is None:\n maximum = high\n\n self._minimum = np.array(minimum)\n self._maximum = np.array(maximum)\n\n if tf_dtype.is_floating:\n # Replacing infinities with extreme finite float values.\n self._minimum[self._minimum == -np.inf] = low\n self._minimum[self._minimum == np.inf] = high\n\n self._maximum[self._maximum == -np.inf] = low\n self._maximum[self._maximum == np.inf] = high\n\n if np.any(self._minimum > self._maximum):\n raise ValueError(\n 'Spec bounds min has values greater than max: [{},{}]'.format(\n self._minimum, self._maximum))\n if (np.any(self._minimum < low) or np.any(self._minimum > high) or\n np.any(self._maximum < low) or np.any(self._maximum > high)):\n raise ValueError(\n 'Spec bounds [{},{}] not within the range [{}, {}] of the given '\n 'dtype ({})'.format(self._minimum, self._maximum, low, high,\n self._dtype))\n\n self._minimum = self._minimum.astype(self._dtype)\n self._minimum.setflags(write=False)\n\n self._maximum = self._maximum.astype(self._dtype)\n self._maximum.setflags(write=False)\n\n @classmethod\n def from_spec(cls, spec, name=None):\n if name is None:\n name = spec.name\n\n if hasattr(spec, 'minimum') and hasattr(spec, 'maximum'):\n return BoundedArraySpec(spec.shape, spec.dtype, spec.minimum,\n spec.maximum, name)\n\n return BoundedArraySpec(spec.shape, spec.dtype, name=name)\n\n @property\n def minimum(self):\n \"\"\"Returns a NumPy array specifying the minimum bounds (inclusive).\"\"\"\n return self._minimum\n\n @property\n def maximum(self):\n \"\"\"Returns a NumPy array specifying the maximum bounds (inclusive).\"\"\"\n return self._maximum\n\n def __repr__(self):\n template = ('BoundedArraySpec(shape={}, dtype={}, name={}, '\n 'minimum={}, maximum={})')\n return template.format(self.shape, repr(self.dtype), repr(self.name),\n self._minimum, self._maximum)\n\n def __eq__(self, other):\n if not isinstance(other, BoundedArraySpec):\n return False\n return (super(BoundedArraySpec, self).__eq__(other) and\n (self.minimum == other.minimum).all() and\n (self.maximum == other.maximum).all())\n\n def check_array(self, array):\n \"\"\"Return true if the given array conforms to the spec.\"\"\"\n return (super(BoundedArraySpec, self).check_array(array) and\n np.all(array >= self.minimum) and np.all(array <= self.maximum))\n\n\ndef is_bounded(spec):\n return isinstance(spec, BoundedArraySpec)\n\n\ndef is_discrete(spec):\n return np.issubdtype(spec.dtype, np.integer)\n\n\ndef is_continuous(spec):\n return np.issubdtype(spec.dtype, np.float)\n", "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A class implementing minimal Atari 2600 preprocessing.\n\nAdapted from Dopamine.\n\nhttps://github.com/google/dopamine/blob/master/dopamine/atari/preprocessing.py\n\nThis includes:\n . Emitting a terminal signal when losing a life (optional).\n . Frame skipping and color pooling.\n . Resizing the image before it is provided to the agent.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom gym.spaces.box import Box\nimport numpy as np\nimport gin.tf\nimport cv2\n\n\[email protected]\nclass AtariPreprocessing(object):\n \"\"\"A class implementing image preprocessing for Atari 2600 agents.\n\n Specifically, this provides the following subset from the JAIR paper\n (Bellemare et al., 2013) and Nature DQN paper (Mnih et al., 2015):\n\n * Frame skipping (defaults to 4).\n * Terminal signal when a life is lost (off by default).\n * Grayscale and max-pooling of the last two frames.\n * Downsample the screen to a square image (defaults to 84x84).\n\n More generally, this class follows the preprocessing guidelines set down in\n Machado et al. (2018), \"Revisiting the Arcade Learning Environment:\n Evaluation Protocols and Open Problems for General Agents\".\n \"\"\"\n\n def __init__(self,\n environment,\n frame_skip=4,\n terminal_on_life_loss=False,\n screen_size=84):\n \"\"\"Constructor for an Atari 2600 preprocessor.\n\n Args:\n environment: Gym environment whose observations are preprocessed.\n frame_skip: int, the frequency at which the agent experiences the game.\n terminal_on_life_loss: bool, If True, the step() method returns\n is_terminal=True whenever a life is lost. See Mnih et al. 2015.\n screen_size: int, size of a resized Atari 2600 frame.\n\n Raises:\n ValueError: if frame_skip or screen_size are not strictly positive.\n \"\"\"\n if frame_skip <= 0:\n raise ValueError(\n 'Frame skip should be strictly positive, got {}'.format(frame_skip))\n if screen_size <= 0:\n raise ValueError('Target screen size should be strictly positive, got {}'\n .format(screen_size))\n\n self.environment = environment\n self.terminal_on_life_loss = terminal_on_life_loss\n self.frame_skip = frame_skip\n self.screen_size = screen_size\n\n obs_dims = self.environment.observation_space\n # Stores temporary observations used for pooling over two successive\n # frames.\n self.screen_buffer = [\n np.empty((obs_dims.shape[0], obs_dims.shape[1]), dtype=np.uint8),\n np.empty((obs_dims.shape[0], obs_dims.shape[1]), dtype=np.uint8)\n ]\n\n self.game_over = False\n self.lives = 0 # Will need to be set by reset().\n\n @property\n def observation_space(self):\n # Return the observation space adjusted to match the shape of the processed\n # observations.\n return Box(\n low=0,\n high=255,\n shape=(self.screen_size, self.screen_size, 1),\n dtype=np.uint8)\n\n @property\n def action_space(self):\n return self.environment.action_space\n\n @property\n def reward_range(self):\n return self.environment.reward_range\n\n @property\n def metadata(self):\n return self.environment.metadata\n\n def reset(self):\n \"\"\"Resets the environment.\n\n Returns:\n observation: numpy array, the initial observation emitted by the\n environment.\n \"\"\"\n self.environment.reset()\n self.lives = self.environment.ale.lives()\n self._fetch_grayscale_observation(self.screen_buffer[0])\n self.screen_buffer[1].fill(0)\n return self._pool_and_resize()\n\n def render(self, mode):\n \"\"\"Renders the current screen, before preprocessing.\n\n This calls the Gym API's render() method.\n\n Args:\n mode: Mode argument for the environment's render() method.\n Valid values (str) are:\n 'rgb_array': returns the raw ALE image.\n 'human': renders to display via the Gym renderer.\n\n Returns:\n if mode='rgb_array': numpy array, the most recent screen.\n if mode='human': bool, whether the rendering was successful.\n \"\"\"\n return self.environment.render(mode)\n\n def step(self, action):\n \"\"\"Applies the given action in the environment.\n\n Remarks:\n\n * If a terminal state (from life loss or episode end) is reached, this may\n execute fewer than self.frame_skip steps in the environment.\n * Furthermore, in this case the returned observation may not contain valid\n image data and should be ignored.\n\n Args:\n action: The action to be executed.\n\n Returns:\n observation: numpy array, the observation following the action.\n reward: float, the reward following the action.\n is_terminal: bool, whether the environment has reached a terminal state.\n This is true when a life is lost and terminal_on_life_loss, or when the\n episode is over.\n info: Gym API's info data structure.\n \"\"\"\n accumulated_reward = 0.\n\n for time_step in range(self.frame_skip):\n # We bypass the Gym observation altogether and directly fetch the\n # grayscale image from the ALE. This is a little faster.\n _, reward, game_over, info = self.environment.step(action)\n accumulated_reward += reward\n\n if self.terminal_on_life_loss:\n new_lives = self.environment.ale.lives()\n is_terminal = game_over or new_lives < self.lives\n self.lives = new_lives\n else:\n is_terminal = game_over\n\n if is_terminal:\n break\n # We max-pool over the last two frames, in grayscale.\n elif time_step >= self.frame_skip - 2:\n t = time_step - (self.frame_skip - 2)\n self._fetch_grayscale_observation(self.screen_buffer[t])\n\n # Pool the last two observations.\n observation = self._pool_and_resize()\n\n self.game_over = game_over\n return observation, accumulated_reward, is_terminal, info\n\n def _fetch_grayscale_observation(self, output):\n \"\"\"Returns the current observation in grayscale.\n\n The returned observation is stored in 'output'.\n\n Args:\n output: numpy array, screen buffer to hold the returned observation.\n\n Returns:\n observation: numpy array, the current observation in grayscale.\n \"\"\"\n self.environment.ale.getScreenGrayscale(output)\n return output\n\n def _pool_and_resize(self):\n \"\"\"Transforms two frames into a Nature DQN observation.\n\n For efficiency, the transformation is done in-place in self.screen_buffer.\n\n Returns:\n transformed_screen: numpy array, pooled, resized screen.\n \"\"\"\n # Pool if there are enough screens to do so.\n if self.frame_skip > 1:\n np.maximum(\n self.screen_buffer[0],\n self.screen_buffer[1],\n out=self.screen_buffer[0])\n\n transformed_image = cv2.resize(\n self.screen_buffer[0], (self.screen_size, self.screen_size),\n interpolation=cv2.INTER_AREA)\n int_image = np.asarray(transformed_image, dtype=np.uint8)\n return np.expand_dims(int_image, axis=2)\n", "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sample Keras Value Network.\n\nImplements a network that will generate the following layers:\n\n [optional]: Conv2D # conv_layer_params\n Flatten\n [optional]: Dense # fc_layer_params\n Dense -> 1 # Value output\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tf_agents.networks import network\nfrom tf_agents.networks import utils\nfrom tf_agents.utils import nest_utils\n\nimport gin.tf\n\nnest = tf.contrib.framework.nest\n\n\[email protected]\nclass ValueNetwork(network.Network):\n \"\"\"Feed Forward value network. Reduces to 1 value output per batch item.\"\"\"\n\n def __init__(self,\n observation_spec,\n fc_layer_params=(75, 40),\n conv_layer_params=None,\n activation_fn=tf.keras.activations.relu,\n name='ValueNetwork'):\n \"\"\"Creates an instance of `ValueNetwork`.\n\n Network supports calls with shape outer_rank + observation_spec.shape. Note\n outer_rank must be at least 1.\n\n Args:\n observation_spec: A nest of `tensor_spec.TensorSpec` representing the\n observations.\n fc_layer_params: Optional list of fully_connected parameters, where each\n item is the number of units in the layer.\n conv_layer_params: Optional list of convolution layers parameters, where\n each item is a length-three tuple indicating (filters, kernel_size,\n stride).\n activation_fn: Activation function, e.g. tf.keras.activations.relu,.\n name: A string representing name of the network.\n\n Raises:\n ValueError: If `observation_spec` contains more than one observation.\n \"\"\"\n super(ValueNetwork, self).__init__(\n observation_spec=observation_spec,\n action_spec=None,\n state_spec=(),\n name=name)\n\n if len(nest.flatten(observation_spec)) > 1:\n raise ValueError(\n 'Network only supports observation_specs with a single observation.')\n\n self._layers = utils.mlp_layers(\n conv_layer_params,\n fc_layer_params,\n activation_fn=activation_fn,\n kernel_initializer=tf.keras.initializers.glorot_uniform(),\n name='input_mlp')\n\n self._layers.append(\n tf.keras.layers.Dense(\n 1,\n activation=None,\n kernel_initializer=tf.random_uniform_initializer(\n minval=-0.03, maxval=0.03),\n ))\n\n def call(self, observation, step_type=None, network_state=()):\n del step_type # unused.\n\n outer_rank = nest_utils.get_outer_rank(observation, self.observation_spec)\n batch_squash = utils.BatchSquash(outer_rank)\n\n states = tf.cast(nest.flatten(observation)[0], tf.float32)\n states = batch_squash.flatten(states)\n for layer in self.layers:\n states = layer(states)\n\n value = tf.reshape(states, [-1])\n value = batch_squash.unflatten(value)\n return value, network_state\n" ]
[ [ "tensorflow.TensorShape", "tensorflow.constant", "tensorflow.control_dependencies", "tensorflow.test.main", "numpy.array" ], [ "tensorflow.as_dtype", "numpy.issubdtype", "numpy.dtype", "numpy.all", "numpy.any", "numpy.broadcast_to", "numpy.array", "numpy.isinf" ], [ "numpy.asarray", "numpy.expand_dims", "numpy.maximum", "numpy.empty" ], [ "tensorflow.random_uniform_initializer", "tensorflow.reshape", "tensorflow.keras.initializers.glorot_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
temcdrm/dynonet
[ "7c197c0912686111617667fe318fa848b9dde90e" ]
[ "test_code/stable_ocs_param.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sigmoid(val):\n return 1/(1 + np.exp(-val))\n\ndef stable_coeff(rho, psi):\n r = sigmoid(rho)\n theta = np.pi * sigmoid(psi)\n\n a_1 = -2*r*np.cos(theta)\n a_2 = r**2\n return a_1, a_2\n\ndef roots_polynomial(a_1, a_2):\n delta = a_1**2 - 4 * a_2\n delta = delta.astype(np.complex)\n root_1 = (-a_1 + np.sqrt(delta))/2\n root_2 = (-a_1 - np.sqrt(delta))/2\n idx_real = delta > 0\n return root_1, root_2, idx_real\n\n\nif __name__ == '__main__':\n\n N = 100000\n rho = np.random.randn(N)*1\n psi = np.random.randn(N)*1\n\n a_1, a_2 = stable_coeff(rho, psi)\n r_1, r_2, idx_real = roots_polynomial(a_1, a_2)\n\n\n fig, ax = plt.subplots()\n ax.plot(a_1, a_2, '*')\n ax.plot(a_1[idx_real], a_2[idx_real], 'k*')\n ax.set_xlabel('a_1')\n ax.set_ylabel('a_2')\n ax.set_xlim([-2, 2])\n ax.set_ylim([-2, 2])\n\n\n fig, ax = plt.subplots()\n ax.plot(np.real(r_1), np.imag(r_1), 'r*')\n ax.plot(np.real(r_2), np.imag(r_2), 'r*')\n ax.plot(np.real(r_1)[idx_real], np.imag(r_1)[idx_real], 'k*')\n ax.plot(np.real(r_2)[idx_real], np.imag(r_2)[idx_real], 'k*')\n ax.set_xlim([-1.2, 1.2])\n ax.set_ylim([-1.2, 1.2])\n\n perc_real = np.sum(idx_real) / N *100\n print(f\"Real poles in {perc_real:.1f} cases\")\n" ]
[ [ "numpy.imag", "numpy.sqrt", "numpy.cos", "matplotlib.pyplot.subplots", "numpy.real", "numpy.random.randn", "numpy.exp", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eera-l/ML-KWS-for-MCU
[ "1109fc9f675afa9940cb910559114570be1290fa" ]
[ "models.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n#\n# Modifications Copyright 2017 Arm Inc. All Rights Reserved. \n# Added new model definitions for speech command recognition used in\n# the paper: https://arxiv.org/pdf/1711.07128.pdf\n#\n#\n\n\"\"\"Model definitions for simple speech recognition.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.layers.python.layers import layers\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import variable_scope as vs\n\ndef prepare_model_settings(label_count, sample_rate, clip_duration_ms,\n window_size_ms, window_stride_ms,\n dct_coefficient_count):\n \"\"\"Calculates common settings needed for all models.\n\n Args:\n label_count: How many classes are to be recognized.\n sample_rate: Number of audio samples per second.\n clip_duration_ms: Length of each audio clip to be analyzed.\n window_size_ms: Duration of frequency analysis window.\n window_stride_ms: How far to move in time between frequency windows.\n dct_coefficient_count: Number of frequency bins to use for analysis.\n\n Returns:\n Dictionary containing common settings.\n \"\"\"\n desired_samples = int(sample_rate * clip_duration_ms / 1000)\n window_size_samples = int(sample_rate * window_size_ms / 1000)\n window_stride_samples = int(sample_rate * window_stride_ms / 1000)\n length_minus_window = (desired_samples - window_size_samples)\n if length_minus_window < 0:\n spectrogram_length = 0\n else:\n spectrogram_length = 1 + int(length_minus_window / window_stride_samples)\n fingerprint_size = dct_coefficient_count * spectrogram_length\n return {\n 'desired_samples': desired_samples,\n 'window_size_samples': window_size_samples,\n 'window_stride_samples': window_stride_samples,\n 'spectrogram_length': spectrogram_length,\n 'dct_coefficient_count': dct_coefficient_count,\n 'fingerprint_size': fingerprint_size,\n 'label_count': label_count,\n 'sample_rate': sample_rate,\n }\n\n\ndef create_model(fingerprint_input, model_settings, model_architecture,\n model_size_info, is_training, runtime_settings=None):\n \"\"\"Builds a model of the requested architecture compatible with the settings.\n\n There are many possible ways of deriving predictions from a spectrogram\n input, so this function provides an abstract interface for creating different\n kinds of models in a black-box way. You need to pass in a TensorFlow node as\n the 'fingerprint' input, and this should output a batch of 1D features that\n describe the audio. Typically this will be derived from a spectrogram that's\n been run through an MFCC, but in theory it can be any feature vector of the\n size specified in model_settings['fingerprint_size'].\n\n The function will build the graph it needs in the current TensorFlow graph,\n and return the tensorflow output that will contain the 'logits' input to the\n softmax prediction process. If training flag is on, it will also return a\n placeholder node that can be used to control the dropout amount.\n\n See the implementations below for the possible model architectures that can be\n requested.\n\n Args:\n fingerprint_input: TensorFlow node that will output audio feature vectors.\n model_settings: Dictionary of information about the model.\n model_architecture: String specifying which kind of model to create.\n is_training: Whether the model is going to be used for training.\n runtime_settings: Dictionary of information about the runtime.\n\n Returns:\n TensorFlow node outputting logits results, and optionally a dropout\n placeholder.\n\n Raises:\n Exception: If the architecture type isn't recognized.\n \"\"\"\n if model_architecture == 'single_fc':\n return create_single_fc_model(fingerprint_input, model_settings,\n is_training)\n elif model_architecture == 'conv':\n return create_conv_model(fingerprint_input, model_settings, is_training)\n elif model_architecture == 'low_latency_conv':\n return create_low_latency_conv_model(fingerprint_input, model_settings,\n is_training)\n elif model_architecture == 'low_latency_svdf':\n return create_low_latency_svdf_model(fingerprint_input, model_settings,\n is_training, runtime_settings)\n elif model_architecture == 'dnn':\n return create_dnn_model(fingerprint_input, model_settings, model_size_info,\n is_training)\n elif model_architecture == 'cnn':\n return create_cnn_model(fingerprint_input, model_settings, model_size_info,\n is_training)\n elif model_architecture == 'basic_lstm':\n return create_basic_lstm_model(fingerprint_input, model_settings, \n model_size_info, is_training)\n elif model_architecture == 'lstm':\n return create_lstm_model(fingerprint_input, model_settings, \n model_size_info, is_training)\n elif model_architecture == 'gru':\n return create_gru_model(fingerprint_input, model_settings, model_size_info,\n is_training)\n elif model_architecture == 'crnn':\n return create_crnn_model(fingerprint_input, model_settings, model_size_info, \n is_training)\n elif model_architecture == 'ds_cnn':\n return create_ds_cnn_model(fingerprint_input, model_settings, \n model_size_info, is_training)\n else:\n raise Exception('model_architecture argument \"' + model_architecture +\n '\" not recognized, should be one of \"single_fc\", \"conv\",' +\n ' \"low_latency_conv\", \"low_latency_svdf\",'+ \n ' \"dnn\", \"cnn\", \"basic_lstm\", \"lstm\",'+\n ' \"gru\", \"crnn\" or \"ds_cnn\"')\n\n\ndef load_variables_from_checkpoint(sess, start_checkpoint):\n \"\"\"Utility function to centralize checkpoint restoration.\n\n Args:\n sess: TensorFlow session.\n start_checkpoint: Path to saved checkpoint on disk.\n \"\"\"\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)\n\n\ndef create_single_fc_model(fingerprint_input, model_settings, is_training):\n \"\"\"Builds a model with a single hidden fully-connected layer.\n\n This is a very simple model with just one matmul and bias layer. As you'd\n expect, it doesn't produce very accurate results, but it is very fast and\n simple, so it's useful for sanity testing.\n\n Here's the layout of the graph:\n\n (fingerprint_input)\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n\n Args:\n fingerprint_input: TensorFlow node that will output audio feature vectors.\n model_settings: Dictionary of information about the model.\n is_training: Whether the model is going to be used for training.\n\n Returns:\n TensorFlow node outputting logits results, and optionally a dropout\n placeholder.\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n fingerprint_size = model_settings['fingerprint_size']\n label_count = model_settings['label_count']\n weights = tf.Variable(\n tf.truncated_normal([fingerprint_size, label_count], stddev=0.001))\n bias = tf.Variable(tf.zeros([label_count]))\n logits = tf.matmul(fingerprint_input, weights) + bias\n if is_training:\n return logits, dropout_prob\n else:\n return logits\n\n\ndef create_conv_model(fingerprint_input, model_settings, is_training):\n \"\"\"Builds a standard convolutional model.\n\n This is roughly the network labeled as 'cnn-trad-fpool3' in the\n 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:\n http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf\n\n Here's the layout of the graph:\n\n (fingerprint_input)\n v\n [Conv2D]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [Relu]\n v\n [MaxPool]\n v\n [Conv2D]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [Relu]\n v\n [MaxPool]\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n\n This produces fairly good quality results, but can involve a large number of\n weight parameters and computations. For a cheaper alternative from the same\n paper with slightly less accuracy, see 'low_latency_conv' below.\n\n During training, dropout nodes are introduced after each relu, controlled by a\n placeholder.\n\n Args:\n fingerprint_input: TensorFlow node that will output audio feature vectors.\n model_settings: Dictionary of information about the model.\n is_training: Whether the model is going to be used for training.\n\n Returns:\n TensorFlow node outputting logits results, and optionally a dropout\n placeholder.\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size, 1])\n first_filter_width = 8\n first_filter_height = 20\n first_filter_count = 64\n first_weights = tf.Variable(\n tf.truncated_normal(\n [first_filter_height, first_filter_width, 1, first_filter_count],\n stddev=0.01))\n first_bias = tf.Variable(tf.zeros([first_filter_count]))\n first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [1, 1, 1, 1],\n 'SAME') + first_bias\n first_relu = tf.nn.relu(first_conv)\n if is_training:\n first_dropout = tf.nn.dropout(first_relu, dropout_prob)\n else:\n first_dropout = first_relu\n max_pool = tf.nn.max_pool(first_dropout, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')\n second_filter_width = 4\n second_filter_height = 10\n second_filter_count = 64\n second_weights = tf.Variable(\n tf.truncated_normal(\n [\n second_filter_height, second_filter_width, first_filter_count,\n second_filter_count\n ],\n stddev=0.01))\n second_bias = tf.Variable(tf.zeros([second_filter_count]))\n second_conv = tf.nn.conv2d(max_pool, second_weights, [1, 1, 1, 1],\n 'SAME') + second_bias\n second_relu = tf.nn.relu(second_conv)\n if is_training:\n second_dropout = tf.nn.dropout(second_relu, dropout_prob)\n else:\n second_dropout = second_relu\n second_conv_shape = second_dropout.get_shape()\n second_conv_output_width = second_conv_shape[2]\n second_conv_output_height = second_conv_shape[1]\n second_conv_element_count = int(\n second_conv_output_width * second_conv_output_height *\n second_filter_count)\n flattened_second_conv = tf.reshape(second_dropout,\n [-1, second_conv_element_count])\n label_count = model_settings['label_count']\n final_fc_weights = tf.Variable(\n tf.truncated_normal(\n [second_conv_element_count, label_count], stddev=0.01))\n final_fc_bias = tf.Variable(tf.zeros([label_count]))\n final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias\n if is_training:\n return final_fc, dropout_prob\n else:\n return final_fc\n\n\ndef create_low_latency_conv_model(fingerprint_input, model_settings,\n is_training):\n \"\"\"Builds a convolutional model with low compute requirements.\n\n This is roughly the network labeled as 'cnn-one-fstride4' in the\n 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:\n http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf\n\n Here's the layout of the graph:\n\n (fingerprint_input)\n v\n [Conv2D]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [Relu]\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n\n This produces slightly lower quality results than the 'conv' model, but needs\n fewer weight parameters and computations.\n\n During training, dropout nodes are introduced after the relu, controlled by a\n placeholder.\n\n Args:\n fingerprint_input: TensorFlow node that will output audio feature vectors.\n model_settings: Dictionary of information about the model.\n is_training: Whether the model is going to be used for training.\n\n Returns:\n TensorFlow node outputting logits results, and optionally a dropout\n placeholder.\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size, 1])\n first_filter_width = 8\n first_filter_height = input_time_size\n first_filter_count = 186\n first_filter_stride_x = 1\n first_filter_stride_y = 1\n first_weights = tf.Variable(\n tf.truncated_normal(\n [first_filter_height, first_filter_width, 1, first_filter_count],\n stddev=0.01))\n first_bias = tf.Variable(tf.zeros([first_filter_count]))\n first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [\n 1, first_filter_stride_y, first_filter_stride_x, 1\n ], 'VALID') + first_bias\n first_relu = tf.nn.relu(first_conv)\n if is_training:\n first_dropout = tf.nn.dropout(first_relu, dropout_prob)\n else:\n first_dropout = first_relu\n first_conv_output_width = math.floor(\n (input_frequency_size - first_filter_width + first_filter_stride_x) /\n first_filter_stride_x)\n first_conv_output_height = math.floor(\n (input_time_size - first_filter_height + first_filter_stride_y) /\n first_filter_stride_y)\n first_conv_element_count = int(\n first_conv_output_width * first_conv_output_height * first_filter_count)\n flattened_first_conv = tf.reshape(first_dropout,\n [-1, first_conv_element_count])\n first_fc_output_channels = 128\n first_fc_weights = tf.Variable(\n tf.truncated_normal(\n [first_conv_element_count, first_fc_output_channels], stddev=0.01))\n first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))\n first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias\n if is_training:\n second_fc_input = tf.nn.dropout(first_fc, dropout_prob)\n else:\n second_fc_input = first_fc\n second_fc_output_channels = 128\n second_fc_weights = tf.Variable(\n tf.truncated_normal(\n [first_fc_output_channels, second_fc_output_channels], stddev=0.01))\n second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))\n second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias\n if is_training:\n final_fc_input = tf.nn.dropout(second_fc, dropout_prob)\n else:\n final_fc_input = second_fc\n label_count = model_settings['label_count']\n final_fc_weights = tf.Variable(\n tf.truncated_normal(\n [second_fc_output_channels, label_count], stddev=0.01))\n final_fc_bias = tf.Variable(tf.zeros([label_count]))\n final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias\n if is_training:\n return final_fc, dropout_prob\n else:\n return final_fc\n\n\ndef create_low_latency_svdf_model(fingerprint_input, model_settings,\n is_training, runtime_settings):\n \"\"\"Builds an SVDF model with low compute requirements.\n\n This is based in the topology presented in the 'Compressing Deep Neural\n Networks using a Rank-Constrained Topology' paper:\n https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43813.pdf\n\n Here's the layout of the graph:\n\n (fingerprint_input)\n v\n [SVDF]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [Relu]\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n [MatMul]<-(weights)\n v\n [BiasAdd]<-(bias)\n v\n\n This model produces lower recognition accuracy than the 'conv' model above,\n but requires fewer weight parameters and, significantly fewer computations.\n\n During training, dropout nodes are introduced after the relu, controlled by a\n placeholder.\n\n Args:\n fingerprint_input: TensorFlow node that will output audio feature vectors.\n The node is expected to produce a 2D Tensor of shape:\n [batch, model_settings['dct_coefficient_count'] *\n model_settings['spectrogram_length']]\n with the features corresponding to the same time slot arranged contiguously,\n and the oldest slot at index [:, 0], and newest at [:, -1].\n model_settings: Dictionary of information about the model.\n is_training: Whether the model is going to be used for training.\n runtime_settings: Dictionary of information about the runtime.\n\n Returns:\n TensorFlow node outputting logits results, and optionally a dropout\n placeholder.\n\n Raises:\n ValueError: If the inputs tensor is incorrectly shaped.\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n\n # Validation.\n input_shape = fingerprint_input.get_shape()\n if len(input_shape) != 2:\n raise ValueError('Inputs to `SVDF` should have rank == 2.')\n if input_shape[-1].value is None:\n raise ValueError('The last dimension of the inputs to `SVDF` '\n 'should be defined. Found `None`.')\n if input_shape[-1].value % input_frequency_size != 0:\n raise ValueError('Inputs feature dimension %d must be a multiple of '\n 'frame size %d', fingerprint_input.shape[-1].value,\n input_frequency_size)\n\n # Set number of units (i.e. nodes) and rank.\n rank = 2\n num_units = 1280\n # Number of filters: pairs of feature and time filters.\n num_filters = rank * num_units\n # Create the runtime memory: [num_filters, batch, input_time_size]\n batch = 1\n memory = tf.Variable(tf.zeros([num_filters, batch, input_time_size]),\n trainable=False, name='runtime-memory')\n # Determine the number of new frames in the input, such that we only operate\n # on those. For training we do not use the memory, and thus use all frames\n # provided in the input.\n # new_fingerprint_input: [batch, num_new_frames*input_frequency_size]\n if is_training:\n num_new_frames = input_time_size\n else:\n window_stride_ms = int(model_settings['window_stride_samples'] * 1000 /\n model_settings['sample_rate'])\n num_new_frames = tf.cond(\n tf.equal(tf.count_nonzero(memory), 0),\n lambda: input_time_size,\n lambda: int(runtime_settings['clip_stride_ms'] / window_stride_ms))\n new_fingerprint_input = fingerprint_input[\n :, -num_new_frames*input_frequency_size:]\n # Expand to add input channels dimension.\n new_fingerprint_input = tf.expand_dims(new_fingerprint_input, 2)\n\n # Create the frequency filters.\n weights_frequency = tf.Variable(\n tf.truncated_normal([input_frequency_size, num_filters], stddev=0.01))\n # Expand to add input channels dimensions.\n # weights_frequency: [input_frequency_size, 1, num_filters]\n weights_frequency = tf.expand_dims(weights_frequency, 1)\n # Convolve the 1D feature filters sliding over the time dimension.\n # activations_time: [batch, num_new_frames, num_filters]\n activations_time = tf.nn.conv1d(\n new_fingerprint_input, weights_frequency, input_frequency_size, 'VALID')\n # Rearrange such that we can perform the batched matmul.\n # activations_time: [num_filters, batch, num_new_frames]\n activations_time = tf.transpose(activations_time, perm=[2, 0, 1])\n\n # Runtime memory optimization.\n if not is_training:\n # We need to drop the activations corresponding to the oldest frames, and\n # then add those corresponding to the new frames.\n new_memory = memory[:, :, num_new_frames:]\n new_memory = tf.concat([new_memory, activations_time], 2)\n tf.assign(memory, new_memory)\n activations_time = new_memory\n\n # Create the time filters.\n weights_time = tf.Variable(\n tf.truncated_normal([num_filters, input_time_size], stddev=0.01))\n # Apply the time filter on the outputs of the feature filters.\n # weights_time: [num_filters, input_time_size, 1]\n # outputs: [num_filters, batch, 1]\n weights_time = tf.expand_dims(weights_time, 2)\n outputs = tf.matmul(activations_time, weights_time)\n # Split num_units and rank into separate dimensions (the remaining\n # dimension is the input_shape[0] -i.e. batch size). This also squeezes\n # the last dimension, since it's not used.\n # [num_filters, batch, 1] => [num_units, rank, batch]\n outputs = tf.reshape(outputs, [num_units, rank, -1])\n # Sum the rank outputs per unit => [num_units, batch].\n units_output = tf.reduce_sum(outputs, axis=1)\n # Transpose to shape [batch, num_units]\n units_output = tf.transpose(units_output)\n\n # Appy bias.\n bias = tf.Variable(tf.zeros([num_units]))\n first_bias = tf.nn.bias_add(units_output, bias)\n\n # Relu.\n first_relu = tf.nn.relu(first_bias)\n\n if is_training:\n first_dropout = tf.nn.dropout(first_relu, dropout_prob)\n else:\n first_dropout = first_relu\n\n first_fc_output_channels = 256\n first_fc_weights = tf.Variable(\n tf.truncated_normal([num_units, first_fc_output_channels], stddev=0.01))\n first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))\n first_fc = tf.matmul(first_dropout, first_fc_weights) + first_fc_bias\n if is_training:\n second_fc_input = tf.nn.dropout(first_fc, dropout_prob)\n else:\n second_fc_input = first_fc\n second_fc_output_channels = 256\n second_fc_weights = tf.Variable(\n tf.truncated_normal(\n [first_fc_output_channels, second_fc_output_channels], stddev=0.01))\n second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))\n second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias\n if is_training:\n final_fc_input = tf.nn.dropout(second_fc, dropout_prob)\n else:\n final_fc_input = second_fc\n label_count = model_settings['label_count']\n final_fc_weights = tf.Variable(\n tf.truncated_normal(\n [second_fc_output_channels, label_count], stddev=0.01))\n final_fc_bias = tf.Variable(tf.zeros([label_count]))\n final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias\n if is_training:\n return final_fc, dropout_prob\n else:\n return final_fc\n\ndef create_dnn_model(fingerprint_input, model_settings, model_size_info, \n is_training):\n \"\"\"Builds a model with multiple hidden fully-connected layers.\n model_size_info: length of the array defines the number of hidden-layers and\n each element in the array represent the number of neurons \n in that layer \n \"\"\"\n\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n fingerprint_size = model_settings['fingerprint_size']\n label_count = model_settings['label_count']\n num_layers = len(model_size_info)\n layer_dim = [fingerprint_size]\n layer_dim.extend(model_size_info)\n flow = fingerprint_input\n tf.summary.histogram('input', flow)\n for i in range(1, num_layers + 1):\n with tf.variable_scope('fc'+str(i)):\n W = tf.get_variable('W', shape=[layer_dim[i-1], layer_dim[i]], \n initializer=tf.contrib.layers.xavier_initializer())\n tf.summary.histogram('fc_'+str(i)+'_w', W)\n b = tf.get_variable('b', shape=[layer_dim[i]])\n tf.summary.histogram('fc_'+str(i)+'_b', b)\n flow = tf.matmul(flow, W) + b\n flow = tf.nn.relu(flow)\n if is_training:\n flow = tf.nn.dropout(flow, dropout_prob)\n\n weights = tf.get_variable('final_fc', shape=[layer_dim[-1], label_count], \n initializer=tf.contrib.layers.xavier_initializer())\n bias = tf.Variable(tf.zeros([label_count]))\n logits = tf.matmul(flow, weights) + bias\n if is_training:\n return logits, dropout_prob\n else:\n return logits\n\ndef create_cnn_model(fingerprint_input, model_settings, model_size_info,\n is_training):\n \"\"\"Builds a model with 2 convolution layers followed by a linear layer and \n a hidden fully-connected layer.\n model_size_info: defines the first and second convolution parameters in\n {number of conv features, conv filter height, width, stride in y,x dir.},\n followed by linear layer size and fully-connected layer size.\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size, 1])\n\n first_filter_count = model_size_info[0] \n first_filter_height = model_size_info[1] #time axis\n first_filter_width = model_size_info[2] #frequency axis\n first_filter_stride_y = model_size_info[3] #time axis\n first_filter_stride_x = model_size_info[4] #frequency_axis\n\n second_filter_count = model_size_info[5] \n second_filter_height = model_size_info[6] #time axis\n second_filter_width = model_size_info[7] #frequency axis\n second_filter_stride_y = model_size_info[8] #time axis\n second_filter_stride_x = model_size_info[9] #frequency_axis\n \n linear_layer_size = model_size_info[10]\n fc_size = model_size_info[11]\n\n # first conv\n first_weights = tf.Variable(\n tf.truncated_normal(\n [first_filter_height, first_filter_width, 1, first_filter_count],\n stddev=0.01))\n first_bias = tf.Variable(tf.zeros([first_filter_count]))\n first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [\n 1, first_filter_stride_y, first_filter_stride_x, 1\n ], 'VALID') + first_bias\n first_conv = tf.layers.batch_normalization(first_conv, training=is_training,\n name='bn1')\n first_relu = tf.nn.relu(first_conv)\n if is_training:\n first_dropout = tf.nn.dropout(first_relu, dropout_prob)\n else:\n first_dropout = first_relu\n first_conv_output_width = math.ceil(\n (input_frequency_size - first_filter_width + 1) /\n first_filter_stride_x)\n first_conv_output_height = math.ceil(\n (input_time_size - first_filter_height + 1) /\n first_filter_stride_y)\n\n # second conv\n second_weights = tf.Variable(\n tf.truncated_normal(\n [second_filter_height, second_filter_width, first_filter_count, \n second_filter_count],\n stddev=0.01))\n second_bias = tf.Variable(tf.zeros([second_filter_count]))\n second_conv = tf.nn.conv2d(first_dropout, second_weights, [\n 1, second_filter_stride_y, second_filter_stride_x, 1\n ], 'VALID') + second_bias\n second_conv = tf.layers.batch_normalization(second_conv, training=is_training,\n name='bn2')\n second_relu = tf.nn.relu(second_conv)\n if is_training:\n second_dropout = tf.nn.dropout(second_relu, dropout_prob)\n else:\n second_dropout = second_relu\n second_conv_output_width = math.ceil(\n (first_conv_output_width - second_filter_width + 1) /\n second_filter_stride_x)\n second_conv_output_height = math.ceil(\n (first_conv_output_height - second_filter_height + 1) /\n second_filter_stride_y)\n\n second_conv_element_count = int(\n second_conv_output_width*second_conv_output_height*second_filter_count)\n flattened_second_conv = tf.reshape(second_dropout,\n [-1, second_conv_element_count])\n\n # linear layer\n W = tf.get_variable('W', shape=[second_conv_element_count, linear_layer_size],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('b', shape=[linear_layer_size])\n flow = tf.matmul(flattened_second_conv, W) + b\n\n # first fc\n first_fc_output_channels = fc_size\n first_fc_weights = tf.Variable(\n tf.truncated_normal(\n [linear_layer_size, first_fc_output_channels], stddev=0.01))\n first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))\n first_fc = tf.matmul(flow, first_fc_weights) + first_fc_bias\n first_fc = tf.layers.batch_normalization(first_fc, training=is_training, \n name='bn3')\n first_fc = tf.nn.relu(first_fc)\n if is_training:\n final_fc_input = tf.nn.dropout(first_fc, dropout_prob)\n else:\n final_fc_input = first_fc\n label_count = model_settings['label_count']\n final_fc_weights = tf.Variable(\n tf.truncated_normal(\n [first_fc_output_channels, label_count], stddev=0.01))\n final_fc_bias = tf.Variable(tf.zeros([label_count]))\n final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias\n if is_training:\n return final_fc, dropout_prob\n else:\n return final_fc\n\n\ndef create_basic_lstm_model(fingerprint_input, model_settings, model_size_info, \n is_training):\n \"\"\"Builds a model with a basic lstm layer (without output projection and \n peep-hole connections)\n model_size_info: defines the number of memory cells in basic lstm model\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size])\n\n num_classes = model_settings['label_count']\n\n if type(model_size_info) is list:\n LSTM_units = model_size_info[0]\n else:\n LSTM_units = model_size_info \n\n with tf.name_scope('LSTM-Layer'):\n with tf.variable_scope(\"lstm\"): \n lstmcell = tf.contrib.rnn.BasicLSTMCell(LSTM_units, forget_bias=1.0, \n state_is_tuple=True)\n _, last = tf.nn.dynamic_rnn(cell=lstmcell, inputs=fingerprint_4d, \n dtype=tf.float32)\n flow = last[-1]\n\n with tf.name_scope('Output-Layer'):\n W_o = tf.get_variable('W_o', shape=[LSTM_units, num_classes], \n initializer=tf.contrib.layers.xavier_initializer())\n b_o = tf.get_variable('b_o', shape=[num_classes])\n logits = tf.matmul(flow, W_o) + b_o\n\n if is_training:\n return logits, dropout_prob\n else:\n return logits\n\ndef create_lstm_model(fingerprint_input, model_settings, model_size_info, \n is_training):\n \"\"\"Builds a model with a lstm layer (with output projection layer and \n peep-hole connections)\n Based on model described in https://arxiv.org/abs/1705.02411\n model_size_info: [projection size, memory cells in LSTM]\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size])\n\n num_classes = model_settings['label_count']\n projection_units = model_size_info[0]\n LSTM_units = model_size_info[1]\n with tf.name_scope('LSTM-Layer'):\n with tf.variable_scope(\"lstm\"): \n lstmcell = tf.contrib.rnn.LSTMCell(LSTM_units, use_peepholes=True, \n num_proj=projection_units)\n _, last = tf.nn.dynamic_rnn(cell=lstmcell, inputs=fingerprint_4d, \n dtype=tf.float32)\n flow = last[-1]\n\n with tf.name_scope('Output-Layer'):\n W_o = tf.get_variable('W_o', shape=[projection_units, num_classes], \n initializer=tf.contrib.layers.xavier_initializer())\n b_o = tf.get_variable('b_o', shape=[num_classes])\n logits = tf.matmul(flow, W_o) + b_o\n\n if is_training:\n return logits, dropout_prob\n else:\n return logits\n\nclass LayerNormGRUCell(rnn_cell_impl.RNNCell):\n\n def __init__(self, num_units, forget_bias=1.0,\n input_size=None, activation=math_ops.tanh,\n layer_norm=True, norm_gain=1.0, norm_shift=0.0,\n dropout_keep_prob=1.0, dropout_prob_seed=None,\n reuse=None):\n\n super(LayerNormGRUCell, self).__init__(_reuse=reuse)\n\n if input_size is not None:\n tf.logging.info(\"%s: The input_size parameter is deprecated.\", self)\n\n self._num_units = num_units\n self._activation = activation\n self._forget_bias = forget_bias\n self._keep_prob = dropout_keep_prob\n self._seed = dropout_prob_seed\n self._layer_norm = layer_norm\n self._g = norm_gain\n self._b = norm_shift\n self._reuse = reuse\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def _norm(self, inp, scope):\n shape = inp.get_shape()[-1:]\n gamma_init = init_ops.constant_initializer(self._g)\n beta_init = init_ops.constant_initializer(self._b)\n with vs.variable_scope(scope):\n # Initialize beta and gamma for use by layer_norm.\n vs.get_variable(\"gamma\", shape=shape, initializer=gamma_init)\n vs.get_variable(\"beta\", shape=shape, initializer=beta_init)\n normalized = layers.layer_norm(inp, reuse=True, scope=scope)\n return normalized\n\n def _linear(self, args, copy):\n out_size = copy * self._num_units\n proj_size = args.get_shape()[-1]\n weights = vs.get_variable(\"kernel\", [proj_size, out_size])\n out = math_ops.matmul(args, weights)\n if not self._layer_norm:\n bias = vs.get_variable(\"bias\", [out_size])\n out = nn_ops.bias_add(out, bias)\n return out\n\n def call(self, inputs, state):\n \"\"\"LSTM cell with layer normalization and recurrent dropout.\"\"\"\n with vs.variable_scope(\"gates\"):\n h = state\n args = array_ops.concat([inputs, h], 1)\n concat = self._linear(args, 2)\n\n z, r = array_ops.split(value=concat, num_or_size_splits=2, axis=1)\n if self._layer_norm:\n z = self._norm(z, \"update\") \n r = self._norm(r, \"reset\")\n\n with vs.variable_scope(\"candidate\"):\n args = array_ops.concat([inputs, math_ops.sigmoid(r) * h], 1)\n new_c = self._linear(args, 1)\n if self._layer_norm:\n new_c = self._norm(new_c, \"state\")\n new_h = self._activation(new_c) * math_ops.sigmoid(z) + \\\n (1 - math_ops.sigmoid(z)) * h\n return new_h, new_h\n\ndef create_gru_model(fingerprint_input, model_settings, model_size_info, \n is_training):\n \"\"\"Builds a model with multi-layer GRUs\n model_size_info: [number of GRU layers, number of GRU cells per layer]\n Optionally, the bi-directional GRUs and/or GRU with layer-normalization \n can be explored.\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size])\n\n num_classes = model_settings['label_count']\n\n layer_norm = False\n bidirectional = False\n\n num_layers = model_size_info[0]\n gru_units = model_size_info[1]\n\n gru_cell_fw = []\n gru_cell_bw = []\n if layer_norm:\n for i in range(num_layers):\n gru_cell_fw.append(LayerNormGRUCell(gru_units))\n if bidirectional:\n gru_cell_bw.append(LayerNormGRUCell(gru_units))\n else:\n for i in range(num_layers):\n gru_cell_fw.append(tf.contrib.rnn.GRUCell(gru_units))\n if bidirectional:\n gru_cell_bw.append(tf.contrib.rnn.GRUCell(gru_units))\n \n if bidirectional:\n outputs, output_state_fw, output_state_bw = \\\n tf.contrib.rnn.stack_bidirectional_dynamic_rnn(gru_cell_fw, gru_cell_bw, \n fingerprint_4d, dtype=tf.float32)\n flow = outputs[:, -1, :]\n else:\n cells = tf.contrib.rnn.MultiRNNCell(gru_cell_fw)\n _, last = tf.nn.dynamic_rnn(cell=cells, inputs=fingerprint_4d, \n dtype=tf.float32)\n flow = last[-1]\n\n with tf.name_scope('Output-Layer'):\n W_o = tf.get_variable('W_o', shape=[flow.get_shape()[-1], num_classes], \n initializer=tf.contrib.layers.xavier_initializer())\n b_o = tf.get_variable('b_o', shape=[num_classes])\n logits = tf.matmul(flow, W_o) + b_o\n\n if is_training:\n return logits, dropout_prob\n else:\n return logits\n \n\ndef create_crnn_model(fingerprint_input, model_settings,\n model_size_info, is_training):\n \"\"\"Builds a model with convolutional recurrent networks with GRUs\n Based on the model definition in https://arxiv.org/abs/1703.05390\n model_size_info: defines the following convolution layer parameters\n {number of conv features, conv filter height, width, stride in y,x dir.},\n followed by number of GRU layers and number of GRU cells per layer\n Optionally, the bi-directional GRUs and/or GRU with layer-normalization \n can be explored.\n \"\"\"\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size, 1])\n\n layer_norm = False\n bidirectional = False\n\n # CNN part\n first_filter_count = model_size_info[0]\n first_filter_height = model_size_info[1]\n first_filter_width = model_size_info[2]\n first_filter_stride_y = model_size_info[3]\n first_filter_stride_x = model_size_info[4]\n\n first_weights = tf.get_variable('W', shape=[first_filter_height, \n first_filter_width, 1, first_filter_count], \n initializer=tf.contrib.layers.xavier_initializer())\n\n first_bias = tf.Variable(tf.zeros([first_filter_count]))\n first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [\n 1, first_filter_stride_y, first_filter_stride_x, 1\n ], 'VALID') + first_bias\n first_relu = tf.nn.relu(first_conv)\n if is_training:\n first_dropout = tf.nn.dropout(first_relu, dropout_prob)\n else:\n first_dropout = first_relu\n first_conv_output_width = int(math.floor(\n (input_frequency_size - first_filter_width + first_filter_stride_x) /\n first_filter_stride_x))\n first_conv_output_height = int(math.floor(\n (input_time_size - first_filter_height + first_filter_stride_y) /\n first_filter_stride_y))\n\n # GRU part\n num_rnn_layers = model_size_info[5]\n RNN_units = model_size_info[6]\n flow = tf.reshape(first_dropout, [-1, first_conv_output_height, \n first_conv_output_width * first_filter_count])\n cell_fw = []\n cell_bw = []\n if layer_norm:\n for i in range(num_rnn_layers):\n cell_fw.append(LayerNormGRUCell(RNN_units))\n if bidirectional:\n cell_bw.append(LayerNormGRUCell(RNN_units))\n else:\n for i in range(num_rnn_layers):\n cell_fw.append(tf.contrib.rnn.GRUCell(RNN_units))\n if bidirectional:\n cell_bw.append(tf.contrib.rnn.GRUCell(RNN_units))\n\n if bidirectional:\n outputs, output_state_fw, output_state_bw = \\\n tf.contrib.rnn.stack_bidirectional_dynamic_rnn(cell_fw, cell_bw, flow, \n dtype=tf.float32)\n flow_dim = first_conv_output_height*RNN_units*2\n flow = tf.reshape(outputs, [-1, flow_dim])\n else:\n cells = tf.contrib.rnn.MultiRNNCell(cell_fw)\n _, last = tf.nn.dynamic_rnn(cell=cells, inputs=flow, dtype=tf.float32)\n flow_dim = RNN_units\n flow = last[-1]\n\n first_fc_output_channels = model_size_info[7]\n\n first_fc_weights = tf.get_variable('fcw', shape=[flow_dim, \n first_fc_output_channels], \n initializer=tf.contrib.layers.xavier_initializer())\n \n first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))\n first_fc = tf.nn.relu(tf.matmul(flow, first_fc_weights) + first_fc_bias)\n if is_training:\n final_fc_input = tf.nn.dropout(first_fc, dropout_prob)\n else:\n final_fc_input = first_fc\n\n label_count = model_settings['label_count']\n \n final_fc_weights = tf.Variable(\n tf.truncated_normal(\n [first_fc_output_channels, label_count], stddev=0.01))\n \n final_fc_bias = tf.Variable(tf.zeros([label_count]))\n final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias\n if is_training:\n return final_fc, dropout_prob\n else:\n return final_fc\n\ndef create_ds_cnn_model(fingerprint_input, model_settings, model_size_info, \n is_training):\n \"\"\"Builds a model with depthwise separable convolutional neural network\n Model definition is based on https://arxiv.org/abs/1704.04861 and\n Tensorflow implementation: https://github.com/Zehaos/MobileNet\n\n model_size_info: defines number of layers, followed by the DS-Conv layer\n parameters in the order {number of conv features, conv filter height, \n width and stride in y,x dir.} for each of the layers. \n Note that first layer is always regular convolution, but the remaining \n layers are all depthwise separable convolutions.\n \"\"\"\n\n def ds_cnn_arg_scope(weight_decay=0):\n \"\"\"Defines the default ds_cnn argument scope.\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n Returns:\n An `arg_scope` to use for the DS-CNN model.\n \"\"\"\n with slim.arg_scope(\n [slim.convolution2d, slim.separable_convolution2d],\n weights_initializer=slim.initializers.xavier_initializer(),\n biases_initializer=slim.init_ops.zeros_initializer(),\n weights_regularizer=slim.l2_regularizer(weight_decay)) as sc:\n return sc\n\n def _depthwise_separable_conv(inputs,\n num_pwc_filters,\n sc,\n kernel_size,\n stride):\n \"\"\" Helper function to build the depth-wise separable convolution layer.\n \"\"\"\n\n expansion = 6\n\n # skip pointwise by setting num_outputs=None\n expansion_conv = slim.conv2d(inputs=inputs,\n num_outputs=inputs.shape[3].value * expansion,\n stride=stride,\n kernel_size=[1, 1],\n scope=sc + '/exp_conv')\n\n bn = slim.batch_norm(expansion_conv, scope=sc + '/exp_conv/batch_norm')\n depthwise_conv = slim.separable_conv2d(bn, num_outputs=None, stride=stride,\n depth_multiplier=1, kernel_size=kernel_size,\n scope=sc + '/dw_conv')\n bn = slim.batch_norm(depthwise_conv, scope=sc + '/dw_conv/batch_norm')\n\n projection_conv = slim.conv2d(bn, num_outputs=num_pwc_filters, kernel_size=[1, 1], scope=sc + '/pj_conv',\n activation_fn=None)\n bn = slim.batch_norm(projection_conv, scope=sc + '/pj_conv/batch_norm')\n if stride == 2:\n return bn\n else:\n if inputs.shape[3].value != num_pwc_filters:\n bn = slim.conv2d(inputs=bn, num_outputs=num_pwc_filters, kernel_size=[1, 1], scope=sc + '/res_conv')\n return bn\n\n\n if is_training:\n dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n\n label_count = model_settings['label_count']\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size, 1])\n \n t_dim = input_time_size\n f_dim = input_frequency_size\n\n #Extract model dimensions from model_size_info\n num_layers = model_size_info[0]\n conv_feat = [None]*num_layers\n conv_kt = [None]*num_layers\n conv_kf = [None]*num_layers\n conv_st = [None]*num_layers\n conv_sf = [None]*num_layers\n i=1\n for layer_no in range(0,num_layers):\n conv_feat[layer_no] = model_size_info[i]\n i += 1\n conv_kt[layer_no] = model_size_info[i]\n i += 1\n conv_kf[layer_no] = model_size_info[i]\n i += 1\n conv_st[layer_no] = model_size_info[i]\n i += 1\n conv_sf[layer_no] = model_size_info[i]\n i += 1\n\n scope = 'DS-CNN'\n with tf.variable_scope(scope) as sc:\n end_points_collection = sc.name + '_end_points'\n with slim.arg_scope([slim.convolution2d, slim.separable_convolution2d],\n activation_fn=None,\n weights_initializer=slim.initializers.xavier_initializer(),\n biases_initializer=slim.init_ops.zeros_initializer(),\n outputs_collections=[end_points_collection]):\n with slim.arg_scope([slim.batch_norm],\n is_training=is_training,\n decay=0.96,\n updates_collections=None,\n activation_fn=tf.nn.relu6):\n for layer_no in range(0,num_layers):\n if layer_no==0:\n net = slim.convolution2d(fingerprint_4d, conv_feat[layer_no],\\\n [conv_kt[layer_no], conv_kf[layer_no]], stride=[conv_st[layer_no], conv_sf[layer_no]], padding='SAME', scope='conv_1')\n net = slim.batch_norm(net, scope='conv_1/batch_norm')\n else:\n net = _depthwise_separable_conv(net, conv_feat[layer_no], \\\n kernel_size = [conv_kt[layer_no],conv_kf[layer_no]], \\\n stride = [conv_st[layer_no],conv_sf[layer_no]], sc='conv_ds_'+str(layer_no))\n t_dim = math.ceil(t_dim/float(conv_st[layer_no]))\n f_dim = math.ceil(f_dim/float(conv_sf[layer_no]))\n\n net = slim.avg_pool2d(net, [t_dim, f_dim], scope='avg_pool')\n\n net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')\n logits = slim.fully_connected(net, label_count, activation_fn=None, scope='fc1')\n\n if is_training:\n return logits, dropout_prob\n else:\n return logits\n\n\n" ]
[ [ "tensorflow.get_variable", "tensorflow.nn.dynamic_rnn", "tensorflow.concat", "tensorflow.count_nonzero", "tensorflow.zeros", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.python.ops.array_ops.split", "tensorflow.global_variables", "tensorflow.contrib.rnn.GRUCell", "tensorflow.contrib.slim.l2_regularizer", "tensorflow.contrib.slim.init_ops.zeros_initializer", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.nn.conv1d", "tensorflow.contrib.slim.avg_pool2d", "tensorflow.nn.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.squeeze", "tensorflow.contrib.rnn.MultiRNNCell", "tensorflow.python.ops.nn_ops.bias_add", "tensorflow.name_scope", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.math_ops.matmul", "tensorflow.contrib.layers.python.layers.layers.layer_norm", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.contrib.slim.arg_scope", "tensorflow.contrib.slim.separable_conv2d", "tensorflow.placeholder", "tensorflow.contrib.slim.fully_connected", "tensorflow.logging.info", "tensorflow.contrib.slim.batch_norm", "tensorflow.summary.histogram", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.transpose", "tensorflow.python.ops.array_ops.concat", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.reshape", "tensorflow.assign", "tensorflow.expand_dims", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.contrib.slim.initializers.xavier_initializer", "tensorflow.contrib.rnn.LSTMCell", "tensorflow.contrib.slim.conv2d", "tensorflow.contrib.slim.convolution2d", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.10", "1.12" ] } ]
yongjie-lin/open-sesame
[ "4b4d3691dfbba00601db205dda7f5f9d907dda37" ]
[ "bertviz/bertviz/attention.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Change log\n# 12/12/18 Jesse Vig Adapted to BERT model\n# 12/19/18 Jesse Vig Assorted cleanup. Changed orientation of attention matrices. Updated comments.\n\n\n\"\"\"Module for postprocessing and displaying transformer attentions.\n\nThis module is designed to be called from an ipython notebook.\n\"\"\"\n\nimport json\nimport os\nimport numpy as np\n\nimport IPython.display as display\n\n\nvis_html = \"\"\"\n <span style=\"user-select:none\">\n Layer: <select id=\"layer\"></select>\n Attention: <select id=\"att_type\">\n <option value=\"all\">All</option>\n <option value=\"a\">Sentence A self-attention</option>\n <option value=\"b\">Sentence B self-attention</option>\n <option value=\"ab\">Sentence A -> Sentence B</option>\n <option value=\"ba\">Sentence B -> Sentence A</option>\n <option value=\"avg_aa\">Head-averaged sentence A self-attention</option>\n <option value=\"up2k_aa\">Reduced up-to-k sentence A self-attention</option>\n </select>\n </span>\n <div id='vis'></div>\n\"\"\"\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\nvis_js = open(os.path.join(__location__, 'attention.js')).read()\n\n\ndef show(tokens_a, tokens_b, attn, expt_params):\n \"\"\"Displays attention visualization.\n expt_params: a dictionary possibly containing the following keys\n token_groups: array of nonnegative integers indicating how tokens are to be grouped in the viz\n e.g. \"The quick brown fox jumps over the lazy dog .\" with [1,1,1,1,0,0,2,2,2,0] produces\n the target groups \"The quick brown fox\" and \"the lazy dog\". ONLY WORKS WITH A->A, B->A FOR NOW.\n attn_sources: array of indices of the tokens (in tokens_a) with attention distributions that we are interested in.\n attn_target_groups: array of indices corresponding to token group that the self-attention of the corresponding\n source should be focusing on, for the purpose of computing binary cross-entropy. Only allowed values are 1\n and 2.\n \"\"\"\n\n params = expt_params.keys()\n if 'token_groups' in params:\n token_groups = expt_params['token_groups']\n assert(len(token_groups) == len(tokens_a) - 2)\n assert(all(type(i) is int for i in token_groups))\n\n if 'attn_sources' in params and 'attn_target_groups' in params:\n attn_sources = expt_params['attn_sources']\n attn_target_groups = expt_params['attn_target_groups']\n assert(set(token_groups) == set([0,1,2]))\n assert(len(attn_sources) == len(attn_target_groups))\n assert(set(attn_sources).issubset(set(range(len(tokens_a)))))\n assert(set(attn_target_groups).issubset(set([1,2])))\n elif 'attn_sources' in params or 'attn_target_groups' in params:\n raise ValueError('Please provide both attn_sources and attn_target_groups, otherwise omit both of them.')\n\n attentions = _get_attentions(tokens_a, tokens_b, attn, expt_params)\n att_json = json.dumps(attentions)\n _show_attention(att_json)\n\n\ndef _show_attention(att_json):\n display.display(display.HTML(vis_html))\n display.display(display.Javascript('window.attention = %s' % att_json))\n display.display(display.Javascript(vis_js))\n\n\ndef logmatmulexp(A, B): # assuming A,B have shape [1, n, n]\n max_A = np.max(A, -1, keepdims=True)\n max_B = np.max(B, -1, keepdims=True)\n C = np.matmul(np.exp(A - max_A), np.exp(B - max_B))\n np.log(C, out=C)\n C += max_A + np.transpose(max_B, (0,2,1))\n return C\n\ndef _get_attentions(tokens_a, tokens_b, attn, expt_params):\n \"\"\"Compute representation of the attention to pass to the d3 visualization\n\n Args:\n tokens_a: tokens in sentence A\n tokens_b: tokens in sentence B\n attn: numpy array, attention\n [num_layers, batch_size, num_heads, seq_len, seq_len]\n expt_params: dictionary containing customizations for the viz, e.g. target groups and inputs for\n computing cross-entropy\n\n Returns:\n Dictionary of attention representations with the structure:\n {\n 'all': Representations for showing all attentions at the same time. (source = AB, target = AB)\n 'a': Sentence A self-attention (source = A, target = A)\n 'b': Sentence B self-attention (source = B, target = B)\n 'ab': Sentence A -> Sentence B attention (source = A, target = B)\n 'ba': Sentence B -> Sentence A attention (source = B, target = A)\n }\n and each sub-dictionary has structure:\n {\n 'att': list of inter attentions matrices, one for each layer. Each is of shape [num_heads, source_seq_len, target_seq_len]\n 'top_text': list of source tokens, to be displayed on the left of the vis\n 'bot_text': list of target tokens, to be displayed on the right of the vis\n }\n \"\"\"\n\n all_attns = []\n a_attns = []\n b_attns = []\n ab_attns = []\n ba_attns = []\n slice_a = slice(0, len(tokens_a)) # Positions corresponding to sentence A in input\n slice_b = slice(len(tokens_a), len(tokens_a) + len(tokens_b)) # Position corresponding to sentence B in input\n\n avg_attns = []\n up2k_attns = []\n # up2k = np.expand_dims(np.identity(len(tokens_a)), 0) # initialize accumulator for reduction operation\n log_up2k = None\n tokens_a_grouped = None\n no_sep_slice = slice(1, len(tokens_a)-1) # for renormalization so viz is not dominated by [CLS], [SEP] attentions\n\n if 'token_groups' in expt_params.keys():\n token_groups = expt_params['token_groups']\n token_groups.insert(0, 0) # add 0 for [CLS]\n token_groups.append(0) # add 0 for [SEP]\n d = {i: [idx for (idx,grp) in enumerate(token_groups) if grp == i] for i in set(token_groups)}\n tokens_a_grouped = []\n for grp, idx_list in d.items():\n if grp == 0:\n continue\n tokens_a_grouped.append(' '.join(tokens_a[idx] for idx in idx_list))\n print(\"Token groups:\", list(enumerate(tokens_a_grouped, 1)))\n else:\n print('Number of tokens:', len(tokens_a))\n token_groups = None\n\n head_visual_scaling_factor = 1\n up2k_visual_scaling_factor = 1\n num_layers = len(attn)\n for layer in range(num_layers):\n layer_attn = attn[layer][0] # Get layer attention (assume batch size = 1), shape = [num_heads, seq_len, seq_len]\n all_attns.append(layer_attn.tolist()) # Append AB->AB attention for layer, across all heads\n b_attns.append(layer_attn[:, slice_b, slice_b].tolist()) # Append B->B attention for layer, across all heads\n ab_attns.append(layer_attn[:, slice_a, slice_b].tolist()) # Append A->B attention for layer, across all heads\n\n aa_attn = layer_attn[:, slice_a, slice_a] # keep only the a->a attentions\n aa_attn /= aa_attn.sum(axis=2, keepdims=True) # renormalize axis 2 of aa_attn after slicing\n head_avg = np.mean(aa_attn, axis=0, keepdims=True) # mean preserves normalization along axis 2\n\n # normalizer = head_avg[:, :, no_sep_slice].sum(axis=2, keepdims=True)\n # avg_attns.append((head_visual_scaling_factor * head_avg / normalizer).tolist())\n\n if log_up2k is None:\n log_up2k = np.log(head_avg)\n else:\n log_head_avg = np.log(head_avg)\n log_up2k = logmatmulexp(log_head_avg, log_up2k) # more numerically stable than chaining matmuls\n\n # np.matmul(head_avg, up2k, out=up2k)\n # up2k /= up2k.sum(axis=2, keepdims=True)\n # normalizer = np.exp(log_up2k)[:, :, no_sep_slice].sum(axis=2, keepdims=True)\n # up2k_attns.append((up2k_visual_scaling_factor * np.exp(up2k) / normalizer).tolist())\n\n if token_groups is not None:\n a_attn_grouped = None\n ba_attn_grouped = None\n avg_attn_grouped = None\n up2k_attn_grouped = None\n for grp, idx_list in d.items():\n if grp == 0: # group 0 only consists of ignored tokens\n continue\n if a_attn_grouped is None: # first iter\n a_attn_grouped = layer_attn[:, slice_a, idx_list].sum(axis=2, keepdims=True)\n ba_attn_grouped = layer_attn[:, slice_b, idx_list].sum(axis=2, keepdims=True)\n avg_attn_grouped = head_avg[:, slice_a, idx_list].sum(axis=2, keepdims=True)\n up2k_attn_grouped = np.exp(log_up2k)[:, slice_a, idx_list].sum(axis=2, keepdims=True)\n else:\n a_attn_grouped = np.append(a_attn_grouped, layer_attn[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2)\n ba_attn_grouped = np.append(ba_attn_grouped, layer_attn[:, slice_b, idx_list].sum(axis=2, keepdims=True), axis=2)\n avg_attn_grouped = np.append(avg_attn_grouped, head_avg[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2)\n up2k_attn_grouped = np.append(up2k_attn_grouped, np.exp(log_up2k)[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2)\n a_attns.append(a_attn_grouped.tolist()) # Append A->A attention for layer, across all heads\n ba_attns.append(ba_attn_grouped.tolist()) # Append B->A attention for layer, across all heads\n normalizer = avg_attn_grouped.sum(axis=2, keepdims=True)\n avg_attns.append((head_visual_scaling_factor * avg_attn_grouped / normalizer).tolist())\n normalizer = up2k_attn_grouped.sum(axis=2, keepdims=True)\n up2k_attns.append((up2k_visual_scaling_factor * up2k_attn_grouped / normalizer).tolist())\n else:\n a_attns.append(layer_attn[:, slice_a, slice_a].tolist()) # Append A->A attention for layer, across all heads\n ba_attns.append(layer_attn[:, slice_b, slice_a].tolist()) # Append B->A attention for layer, across all heads\n normalizer = head_avg[:, :, no_sep_slice].sum(axis=2, keepdims=True)\n avg_attns.append((head_visual_scaling_factor * head_avg / normalizer).tolist())\n normalizer = np.exp(log_up2k)[:, :, no_sep_slice].sum(axis=2, keepdims=True)\n up2k_attns.append((up2k_visual_scaling_factor * np.exp(log_up2k) / normalizer).tolist())\n\n if 'attn_sources' in expt_params.keys():\n attn_sources, attn_target_groups = expt_params['attn_sources'], expt_params['attn_target_groups']\n print(f\"{'Attention source':<20}{'Target group':<20}{'Binary cross-entropy'}\")\n for idx in range(len(attn_sources)):\n source_idx = attn_sources[idx]\n target_group = attn_target_groups[idx]\n attn_vector = np.array(avg_attns)[:, 0, source_idx, target_group-1]\n\n # since bce(y,y*) = - y*log(y) - (1-y*)log(1-y) and we have y* = 1 in our use case\n bce = - np.log(attn_vector).sum()\n print(f\"{tokens_a[source_idx]:<20}{tokens_a_grouped[target_group - 1]:<20}{bce:.5f}\")\n\n attentions = {\n 'all': {\n 'att': all_attns,\n 'top_text': tokens_a + tokens_b,\n 'bot_text': tokens_a + tokens_b\n },\n 'a': {\n 'att': a_attns,\n 'top_text': tokens_a,\n 'bot_text': tokens_a if token_groups is None else tokens_a_grouped\n },\n 'b': {\n 'att': b_attns,\n 'top_text': tokens_b,\n 'bot_text': tokens_b\n },\n 'ab': {\n 'att': ab_attns,\n 'top_text': tokens_a,\n 'bot_text': tokens_b\n },\n 'ba': {\n 'att': ba_attns,\n 'top_text': tokens_b,\n 'bot_text': tokens_a if token_groups is None else tokens_a_grouped\n },\n 'avg_aa': {\n 'att': avg_attns,\n 'top_text': tokens_a,\n 'bot_text': tokens_a if token_groups is None else tokens_a_grouped\n },\n 'up2k_aa': {\n 'att': up2k_attns,\n 'top_text': tokens_a,\n 'bot_text': tokens_a if token_groups is None else tokens_a_grouped\n },\n }\n return attentions\n" ]
[ [ "numpy.log", "numpy.max", "numpy.mean", "numpy.transpose", "numpy.array", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shyam196/egc
[ "43ddb88e82b6e2e3ac7c9a3736a8e55954168f5e", "43ddb88e82b6e2e3ac7c9a3736a8e55954168f5e" ]
[ "experiments/utils.py", "kernels/test.py" ]
[ "import os\nimport random\nfrom pathlib import Path\n\nimport numpy as np\nimport requests\nimport torch\nimport torch.nn as nn\n\n\ndef seed_all(seed):\n print(f\"Setting seed: {seed}\")\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n\nDATA_LOC_KEY = \"DATASET_LOC\"\n\n\ndef data_location():\n if DATA_LOC_KEY in os.environ.keys():\n return os.getenv(DATA_LOC_KEY)\n else:\n return str(Path.home() / \"datasets\")\n\n\ndef mlp(layers, act=nn.ReLU, dropout=0.0):\n modules = []\n for i, last in enumerate(layers[:-2]):\n current = layers[i + 1]\n modules.append(nn.Linear(last, current))\n modules.append(nn.BatchNorm1d(current))\n modules.append(act())\n modules.append(nn.Dropout(dropout))\n\n modules.append(nn.Linear(layers[-2], layers[-1]))\n return nn.Sequential(*modules)\n\n\ndef print_model_parameters(model, full=False):\n cnt = 0\n for k, v in model.named_parameters():\n if full:\n print(k, v.numel())\n cnt += v.numel()\n print(\"Total Params:\", cnt)\n\n\ndef download(url: str, dest_file: Path):\n print(f\"Downloading from {url}\")\n if not dest_file.parent.exists():\n dest_file.parent.mkdir(parents=True)\n\n r = requests.get(url, stream=True)\n if r.ok:\n with open(dest_file, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024 * 8):\n if chunk:\n f.write(chunk)\n f.flush()\n os.fsync(f.fileno())\n else: # HTTP status code 4XX/5XX\n raise ValueError(\"Failed to download file\")\n\n\ndef load_pretrained(conf, dataset_name, model_name, hidden, model_dir, pretrained_conf):\n req_hidden, url = pretrained_conf[model_name]\n if hidden != req_hidden:\n raise ValueError\n\n model_dir = model_dir / dataset_name / model_name\n model_path = model_dir / \"checkpoint.pt\"\n if not model_path.exists():\n download(url, model_path)\n\n return conf.restore_trial(model_dir, map_location=torch.device(\"cpu\"))\n", "from statistics import mean, stdev\nimport time\n\nimport click\nimport torch\nfrom torch_sparse import SparseTensor\nimport numpy as np\nimport scipy.sparse as sparse\nimport torch_sparse\nimport aggfuse_cpu\nimport aggfuse_gpu\n\nfrom torch_geometric.data import Batch\nfrom torch_geometric.datasets import (\n Planetoid,\n Reddit,\n ZINC,\n SuiteSparseMatrixCollection,\n)\nfrom torch_geometric.utils import to_scipy_sparse_matrix\n\nfrom experiments.code.utils import code_data\nfrom experiments.arxiv.configs import arxiv_data\n\n\ndef random_sparse(n, k, dtype, density, seed=0):\n return sparse.rand(\n n, k, density=density, format=\"csr\", dtype=dtype, random_state=seed\n )\n\n\ndef random_dense(k, n, dtype, seed=0):\n rng = np.random.default_rng(seed)\n return rng.standard_normal(size=(k, n), dtype=dtype)\n\n\ndef time_fn(f, warmups, runs):\n for _ in range(warmups):\n f()\n times = []\n for _ in range(runs):\n start = time.time()\n f()\n times.append(time.time() - start)\n\n return times\n\n\ndef mm_cpu(x, theta):\n return np.matmul(x, theta)\n\n\ndef csr_dmm_cpu(s, d):\n out = np.zeros((s.shape[0], d.shape[1]), dtype=np.float32)\n aggfuse_cpu.csr_sum(s.shape[0], s.shape[1], s.indptr, s.indices, s.data, d, out)\n return out\n\n\ndef csr_fuse_cpu(s, d, w):\n out = np.zeros((s.shape[0], d.shape[1]), dtype=np.float32)\n aggfuse_cpu.aggfuse_fp32(\n s.shape[0], s.shape[1], s.indptr, s.indices, s.data, d, w, out\n )\n return out\n\n\ndef naive_fuse_cpu(s, d, w):\n out_sum = np.zeros((s.shape[0], d.shape[1]), dtype=np.float32)\n out_max = np.zeros((s.shape[0], d.shape[1]), dtype=np.float32)\n out_min = np.zeros((s.shape[0], d.shape[1]), dtype=np.float32)\n\n aggfuse_cpu.csr_sum(s.shape[0], s.shape[1], s.indptr, s.indices, s.data, d, out_sum)\n aggfuse_cpu.csr_max(s.shape[0], s.shape[1], s.indptr, s.indices, s.data, d, out_max)\n aggfuse_cpu.csr_min(s.shape[0], s.shape[1], s.indptr, s.indices, s.data, d, out_min)\n\n w = np.expand_dims(w, -1)\n out = (w[:, 0] * out_sum) + (w[:, 1] * out_max) + (w[:, 2] * out_min)\n return out\n\n\ndef mm_gpu(x, theta):\n with torch.no_grad():\n y = torch.matmul(x, theta)\n torch.cuda.synchronize()\n return y\n\n\ndef csr_dmm_gpu(s, d):\n with torch.no_grad():\n y = torch_sparse.matmul(s, d, reduce=\"sum\")\n torch.cuda.synchronize()\n return y\n\n\ndef csr_fuse_gpu(s, d, w):\n with torch.no_grad():\n rowptr, col, value = s.csr()\n y = aggfuse_gpu.ts_fuse_fp32(rowptr, col, value, d, w)\n torch.cuda.synchronize()\n return y\n\n\ndef naive_fuse_gpu(s, d, w):\n with torch.no_grad():\n y_sum = torch_sparse.matmul(s, d, reduce=\"sum\")\n y_min = torch_sparse.matmul(s, d, reduce=\"min\")\n y_max = torch_sparse.matmul(s, d, reduce=\"max\")\n w = w.unsqueeze(-1)\n y = (y_sum * w[:, 0]) + (y_min * w[:, 1]) + (y_max * w[:, 2])\n torch.cuda.synchronize()\n return y\n\n\ndef load_cora(root):\n return Planetoid(root=root, name=\"Cora\")[0]\n\n\ndef load_reddit(root):\n return Reddit(root)[0]\n\n\ndef load_zinc(root):\n dataset = ZINC(root, subset=True)\n batch = Batch.from_data_list([dataset[i] for i in range(10000)])\n return batch\n\n\ndef load_code(root):\n data = code_data(root=root, batch_size=128)\n dataset = data[0][\"train\"].dataset\n batch = Batch.from_data_list([dataset[i] for i in range(10000)])\n return batch\n\n\ndef load_circuit(root):\n dataset = SuiteSparseMatrixCollection(root=root, group=\"Freescale\", name=\"memchip\")\n return dataset[0]\n\n\ndef load_arxiv(root):\n dataset = arxiv_data(root)\n return dataset[0]\n\n\ndef _mean_std(data):\n return mean(data), stdev(data)\n\n\ndef to_sparse_cpu(data):\n return to_scipy_sparse_matrix(data.edge_index).tocsr().astype(np.float32)\n\n\ndef to_dense_cpu(x):\n return x\n\n\ndef to_sparse_gpu(data):\n (row, col), N = data.edge_index, data.num_nodes\n perm = (col * N + row).argsort()\n row, col = row[perm], col[perm]\n value = torch.ones(data.edge_index.shape[1])\n adj_t = SparseTensor(\n row=col, col=row, value=value, sparse_sizes=(N, N), is_sorted=True\n )\n\n # Pre-process some important attributes.\n adj_t.storage.rowptr()\n adj_t.storage.csr2csc()\n return adj_t.to(torch.float32).to(\"cuda\")\n\n\ndef to_dense_gpu(x):\n x = torch.from_numpy(x).to(\"cuda\")\n return x\n\n\[email protected]()\[email protected](\n \"dataset\", type=click.Choice([\"reddit\", \"cora\", \"zinc\", \"circuit\", \"code\", \"arxiv\"])\n)\[email protected](\"k\", type=int)\[email protected](\"device\", type=click.Choice([\"cpu\", \"gpu\"]))\[email protected](\"--data_dir\", type=click.Path(), default=\"~/datasets\")\[email protected](\"--warmups\", type=int, default=5)\[email protected](\"--runs\", type=int, default=5)\ndef main(dataset, k, device, data_dir, warmups, runs):\n if dataset == \"reddit\":\n data = load_reddit(data_dir)\n elif dataset == \"cora\":\n data = load_cora(data_dir)\n elif dataset == \"zinc\":\n data = load_zinc(data_dir)\n elif dataset == \"circuit\":\n data = load_circuit(data_dir)\n elif dataset == \"code\":\n data = load_code(data_dir)\n elif dataset == \"arxiv\":\n data = load_arxiv(data_dir)\n else:\n raise ValueError\n\n n = data.num_nodes\n d = random_dense(n, k, dtype=np.float32)\n w = random_dense(n, 3, np.float32)\n theta = random_dense(k, k, dtype=np.float32)\n\n if device == \"cpu\":\n s = to_sparse_cpu(data)\n d = to_dense_cpu(d)\n w = to_dense_cpu(w)\n theta = to_dense_cpu(theta)\n dmm = lambda: mm_cpu(d, theta)\n reimple = lambda: csr_dmm_cpu(s, d)\n fused = lambda: csr_fuse_cpu(s, d, w)\n naive_fused = lambda: naive_fuse_cpu(s, d, w)\n else:\n s = to_sparse_gpu(data)\n d = to_dense_gpu(d)\n w = to_dense_gpu(w)\n theta = to_dense_gpu(theta)\n dmm = lambda: mm_gpu(d, theta)\n reimple = lambda: csr_dmm_gpu(s, d)\n fused = lambda: csr_fuse_gpu(s, d, w)\n naive_fused = lambda: naive_fuse_gpu(s, d, w)\n\n m, std = _mean_std(time_fn(dmm, warmups, runs))\n print(f\"{dataset},{device},{runs},{k},dmm,{m},{std}\")\n m, std = _mean_std(time_fn(reimple, warmups, runs))\n print(f\"{dataset},{device},{runs},{k},csr_sum,{m},{std}\")\n m, std = _mean_std(time_fn(fused, warmups, runs))\n print(f\"{dataset},{device},{runs},{k},fused,{m},{std}\")\n m, std = _mean_std(time_fn(naive_fused, warmups, runs))\n print(f\"{dataset},{device},{runs},{k},naive,{m},{std}\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.BatchNorm1d", "torch.nn.Dropout", "numpy.random.seed", "torch.manual_seed", "torch.nn.Linear", "torch.cuda.manual_seed_all", "torch.device" ], [ "torch.cuda.synchronize", "numpy.expand_dims", "torch.ones", "scipy.sparse.rand", "numpy.matmul", "torch.from_numpy", "torch.matmul", "torch.no_grad", "numpy.zeros", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mihirp1998/blender_emblang
[ "4b7092b8f4dfdc5240ed8ecf8e18ec75b9e0141c" ]
[ "image_generation/render_images.py" ]
[ "\n# Copyright 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nfrom __future__ import print_function\nimport _init_paths\nimport math, sys, random, argparse, json, os, tempfile, pickle\n\nfrom datetime import datetime as dt\nfrom collections import Counter\nfrom treeutils import sample_tree, extract_objects, refine_tree_info, remove_function_obj, sample_tree_flexible, add_parent\nfrom modules import Combine, Layout, Describe\nfrom lib.tree import Tree\nimport pdb\nimport subprocess\nimport os\nimport numpy as np\nimport time\nfrom mathutils import Matrix\nfrom math import radians\nimport binvox_rw\n\n\n\"\"\"\nRenders random scenes using Blender, each with with a random number of objects;\neach object has a random size, position, color, and shape. Objects will be\nnonintersecting but may partially occlude each other. Output images will be\nwritten to disk as PNGs, and we will also write a JSON file for each image with\nground-truth scene information.\n\nThis file expects to be run from Blender like this:\n\nblender --background --python render_images.py -- [arguments to this script]\n\"\"\"\n# Made changes to add_objects_from_tree(), render_scene_with_tree()\n# Ready for 3d\n\nINSIDE_BLENDER = True\ntry:\n import bpy, bpy_extras\n from mathutils import Vector\nexcept ImportError as e:\n INSIDE_BLENDER = False\nif INSIDE_BLENDER:\n try:\n import utils\n\n except ImportError as e:\n print(\"\\nERROR\")\n print(\"Running render_images.py from Blender and cannot import utils.py.\")\n print(\"You may need to add a .pth file to the site-packages of Blender's\")\n print(\"bundled python with a command like this:\\n\")\n print(\"echo $PWD >> $BLENDER/$VERSION/python/lib/python3.5/site-packages/clevr.pth\")\n print(\"\\nWhere $BLENDER is the directory where Blender is installed, and\")\n print(\"$VERSION is your Blender version (such as 2.78).\")\n sys.exit(1)\n\nparser = argparse.ArgumentParser()\n\n# Input options\nparser.add_argument('--base_scene_blendfile', default='data/base_scene_full.blend',\n help=\"Base blender file on which all scenes are based; includes \" +\n \"ground plane, lights, and camera.\")\nparser.add_argument('--properties_json', default='data/properties.json',\n help=\"JSON file defining objects, materials, sizes, and colors. \" +\n \"The \\\"colors\\\" field maps from CLEVR color names to RGB values; \" +\n \"The \\\"sizes\\\" field maps from CLEVR size names to scalars used to \" +\n \"rescale object models; the \\\"materials\\\" and \\\"shapes\\\" fields map \" +\n \"from CLEVR material and shape names to .blend files in the \" +\n \"--object_material_dir and --shape_dir directories respectively.\")\nparser.add_argument('--shape_dir', default='data/shapes',\n help=\"Directory where .blend files for object models are stored\")\nparser.add_argument('--material_dir', default='data/materials',\n help=\"Directory where .blend files for materials are stored\")\nparser.add_argument('--shape_color_combos_json', default=None,\n help=\"Optional path to a JSON file mapping shape names to a list of \" +\n \"allowed color names for that shape. This allows rendering images \" +\n \"for CLEVR-CoGenT.\")\n\n# Settings for objects\nparser.add_argument('--min_objects', default=1, type=int,\n help=\"The minimum number of objects to place in each scene\")\nparser.add_argument('--max_objects', default=3, type=int,\n help=\"The maximum number of objects to place in each scene\")\nparser.add_argument('--min_dist', default=0.2, type=float,\n help=\"The minimum allowed distance between object centers\")\nparser.add_argument('--min_obj_2d_size', default=10, type=float,\n help=\"The minimum allowed 2d bounding box size of generated objects\")\nparser.add_argument('--radius', default=13, type=float,\n help=\"The distance of the camera from the origin from where the images are rendered\")\nparser.add_argument('--scene_size', default=8, type=float,\n help=\"The distance of the camera from the origin from where the images are rendered\")\nparser.add_argument('--all_views', default=1, type=float,\n help=\"Render all 36 views or only the 4 needed for testing\")\nparser.add_argument('--filter_out_of_view', default=0, type=int,\n help=\"Reject scenes with out-of-view objects\")\nparser.add_argument('--allow_floating_objects', default=0, type=int,\n help=\"Boolean flag for whether to allow floating objects\")\nparser.add_argument('--include_inside_config', default=0, type=float,\n help=\"Include 'x inside y' scenes \")\nparser.add_argument('--percent_inside_samples', default=0.1, type=float,\n help=\"Percentage of scenes which will have 'inside' layout\"),\nparser.add_argument('--back_front_only_flag', default=0, type=int,\n help=\"Flag for rendering samples with only configurations 'back' and 'front'\")\nparser.add_argument('--margin', default=0.0, type=float,\n help=\"Along all cardinal directions (left, right, front, back), all \" +\n \"objects will be at least this distance apart. This makes resolving \" +\n \"spatial relationships slightly less ambiguous.\")\nparser.add_argument('--min_pixels_per_object', default=20, type=int,\n help=\"All objects will have at least this many visible pixels in the \" +\n \"final rendered images; this ensures that no objects are fully \" +\n \"occluded by other objects.\")\nparser.add_argument('--max_retries', default=50, type=int,\n help=\"The number of times to try placing an object before giving up and \" +\n \"re-placing all objects in the scene.\")\nparser.add_argument('--render_from_given_objects', default=0, type=int,\n help=\"Flag for rendering samples using given object descriptions. Uses the dictionary\" + \n \"specified in the argument 'given_objects_json_path'\")\nparser.add_argument('--given_objects_json_path', default='given_objects.json',\n help=\"Path for the object descriptions to be used for rendering if \" +\n \" the flag 'render_from_given_objects' is on\")\n\n# Output settings\nparser.add_argument('--start_idx', default=0, type=int,\n help=\"The index at which to start for numbering rendered images. Setting \" +\n \"this to non-zero values allows you to distribute rendering across \" +\n \"multiple machines and recombine the results later.\")\nparser.add_argument('--num_images', default=5, type=int,\n help=\"The number of images to render\")\nparser.add_argument('--filename_prefix', default='CLEVR',\n help=\"This prefix will be prepended to the rendered images and JSON scenes\")\nparser.add_argument('--split', default='new',\n help=\"Name of the split for which we are rendering. This will be added to \" +\n \"the names of rendered images, and will also be stored in the JSON \" +\n \"scene structure for each image.\")\nparser.add_argument('--dataset_name', default='CLEVR_DATASET_DEFAULT',\n help=\"Name of the main folder\")\nparser.add_argument('--output_image_dir', default='../output/{}/images/',\n help=\"The directory where output images will be stored. It will be \" +\n \"created if it does not exist.\")\nparser.add_argument('--output_scene_dir', default='../output/{}/scenes/',\n help=\"The directory where output JSON scene structures will be stored. \" +\n \"It will be created if it does not exist.\")\nparser.add_argument('--output_tree_dir', default='../output/{}/trees/',\n help=\"The directory where output trees will be stored. It will be \" +\n \"created if it does not exist.\")\nparser.add_argument('--output_depth_dir', default='../output/{}/depth/',\n help=\"The directory where output trees will be stored. It will be \" +\n \"created if it does not exist.\")\nparser.add_argument('--output_scene_file', default='../output/{}/CLEVR_scenes.json',\n help=\"Path to write a single JSON file containing all scene information\")\nparser.add_argument('--output_blend_dir', default='../output/{}/voxels/',\n help=\"The directory where blender scene files will be stored, if the \" +\n \"user requested that these files be saved using the \" +\n \"--save_blendfiles flag; in this case it will be created if it does \" +\n \"not already exist.\")\nparser.add_argument('--save_depth_maps', type=int, default=0,\n help=\"The flag for whether to save a depth map\")\nparser.add_argument('--save_blendfiles', type=int, default=1,\n help=\"Setting --save_blendfiles 1 will cause the blender scene file for \" +\n \"each generated image to be stored in the directory specified by \" +\n \"the --output_blend_dir flag. These files are not saved by default \" +\n \"because they take up ~5-10MB each.\")\nparser.add_argument('--version', default='1.0',\n help=\"String to store in the \\\"version\\\" field of the generated JSON file\")\nparser.add_argument('--license',\n default=\"Creative Commons Attribution (CC-BY 4.0)\",\n help=\"String to store in the \\\"license\\\" field of the generated JSON file\")\nparser.add_argument('--date', default=dt.today().strftime(\"%m/%d/%Y\"),\n help=\"String to store in the \\\"date\\\" field of the generated JSON file; \" +\n \"defaults to today's date\")\n\n# Rendering options\nparser.add_argument('--use_gpu', default=0, type=int,\n help=\"Setting --use_gpu 1 enables GPU-accelerated rendering using CUDA. \" +\n \"You must have an NVIDIA GPU with the CUDA toolkit installed for \" +\n \"to work.\")\nparser.add_argument('--width', default=64, type=int,\n help=\"The width (in pixels) for the rendered images\")\nparser.add_argument('--height', default=64, type=int,\n help=\"The height (in pixels) for the rendered images\")\nparser.add_argument('--key_light_jitter', default=1.0, type=float,\n help=\"The magnitude of random jitter to add to the key light position.\")\nparser.add_argument('--fill_light_jitter', default=1.0, type=float,\n help=\"The magnitude of random jitter to add to the fill light position.\")\nparser.add_argument('--back_light_jitter', default=1.0, type=float,\n help=\"The magnitude of random jitter to add to the back light position.\")\nparser.add_argument('--camera_jitter', default=0.5, type=float,\n help=\"The magnitude of random jitter to add to the camera position\")\nparser.add_argument('--render_num_samples', default=512, type=int,\n help=\"The number of samples to use when rendering. Larger values will \" +\n \"result in nicer images but will cause rendering to take longer.\")\nparser.add_argument('--render_min_bounces', default=8, type=int,\n help=\"The minimum number of bounces to use for rendering.\")\nparser.add_argument('--render_max_bounces', default=8, type=int,\n help=\"The maximum number of bounces to use for rendering.\")\nparser.add_argument('--render_tile_size', default=256, type=int,\n help=\"The tile size to use for rendering. This should not affect the \" +\n \"quality of the rendered image but may affect the speed; CPU-based \" +\n \"rendering may achieve better performance using smaller tile sizes \" +\n \"while larger tile sizes may be optimal for GPU-based rendering.\")\nparser.add_argument('--train_flag', default=1, type=int,\n help=\"generate training or test, set to 0 for testing\")\nparser.add_argument('--zero_shot', default=0, type=int,\n help=\"Whether to use zero-shot setting when generate the data\")\nparser.add_argument('--add_layout_prob', default=0.5, type=float,\n help=\"probability of adding an extra layout layer\")\n\n\ndef main(args):\n num_digits = 6\n prefix = '%s_%s_' % (args.filename_prefix, args.split)\n img_template = '%s%%0%dd.png' % (prefix, num_digits)\n scene_template = '%s%%0%dd.json' % (prefix, num_digits)\n obj_template = '%s%%0%dd.obj' % (prefix, num_digits)\n tree_template = '%s%%0%dd.tree' % (prefix, num_digits)\n depth_template = '%s%%0%dd.png' % (prefix, num_digits)\n\n if args.train_flag != 0:\n args.train_flag = True\n else:\n args.train_flag = False\n\n if args.zero_shot != 0:\n args.zero_shot = True\n else:\n args.zero_shot = False\n\n args.output_image_dir = args.output_image_dir.format(args.dataset_name)\n args.output_scene_dir = args.output_scene_dir.format(args.dataset_name)\n args.output_tree_dir = args.output_tree_dir.format(args.dataset_name)\n args.output_depth_dir = args.output_depth_dir.format(args.dataset_name)\n args.output_blend_dir = args.output_blend_dir.format(args.dataset_name)\n args.output_scene_file = args.output_scene_file.format(args.dataset_name)\n\n if args.train_flag:\n split_output_image_dir = os.path.join(args.output_image_dir, 'train/')\n split_output_tree_dir = os.path.join(args.output_tree_dir, 'train/')\n split_output_scene_dir = os.path.join(args.output_scene_dir, 'train/')\n split_output_blend_dir = os.path.join(args.output_blend_dir, 'train/')\n split_output_depth_dir = os.path.join(args.output_depth_dir, 'train/')\n else:\n split_output_image_dir = os.path.join(args.output_image_dir, 'test/')\n split_output_tree_dir = os.path.join(args.output_tree_dir, 'test/')\n split_output_scene_dir = os.path.join(args.output_scene_dir, 'test/')\n split_output_blend_dir = os.path.join(args.output_blend_dir, 'test/')\n split_output_depth_dir = os.path.join(args.output_depth_dir, 'test/')\n\n img_template = os.path.join(split_output_image_dir, img_template)\n scene_template = os.path.join(split_output_scene_dir, scene_template)\n obj_template = os.path.join(split_output_blend_dir, obj_template)\n tree_template = os.path.join(split_output_tree_dir, tree_template)\n depth_template = os.path.join(split_output_depth_dir, depth_template)\n\n if not os.path.isdir(args.output_image_dir):\n os.makedirs(args.output_image_dir)\n if not os.path.isdir(split_output_image_dir):\n os.makedirs(split_output_image_dir)\n if not os.path.isdir(args.output_scene_dir):\n os.makedirs(args.output_scene_dir)\n if not os.path.isdir(split_output_scene_dir):\n os.makedirs(split_output_scene_dir)\n if not os.path.isdir(args.output_tree_dir):\n os.makedirs(args.output_tree_dir)\n if not os.path.isdir(split_output_tree_dir):\n os.makedirs(split_output_tree_dir)\n\n if args.save_depth_maps == 1 and not os.path.isdir(split_output_depth_dir):\n os.makedirs(split_output_depth_dir)\n\n if args.save_blendfiles == 1 and not os.path.isdir(args.output_blend_dir):\n os.makedirs(split_output_blend_dir)\n\n all_images = list(sorted(os.listdir(split_output_image_dir)))\n if len(all_images) > 0:\n max_idx = int(all_images[-1][10:])\n start_idx = max_idx + 1\n else:\n start_idx = args.start_idx\n\n all_scene_paths = []\n for i in range(args.num_images):\n img_path = img_template % (i + start_idx)\n scene_path = scene_template % (i + start_idx)\n tree_path = tree_template % (i + start_idx)\n all_scene_paths.append(scene_path)\n obj_path = None\n depth_path = None\n if args.save_blendfiles == 1:\n obj_path = obj_template % (i + start_idx)\n if args.save_depth_maps == 1:\n depth_path = depth_template % (i + start_idx)\n\n while True:\n try:\n render_scene_with_tree(args,\n tree_max_level=3,\n output_index=(i + start_idx),\n output_split=args.split,\n output_image=img_path,\n output_scene=scene_path,\n output_blendfile=obj_path,\n output_tree=tree_path,\n depth_path=depth_path\n )\n break\n except Exception as e:\n print(e)\n import traceback\n print(traceback.print_tb(e.__traceback__))\n import shutil\n import glob\n if os.path.exists(img_path.replace('.png','')):\n shutil.rmtree(img_path.replace('.png',''))\n if os.path.isfile(scene_path):\n os.remove(scene_path)\n voxel_files_del = glob.glob(obj_path.replace('.obj','*'))\n for file in voxel_files_del:\n os.remove(file)\n if os.path.isfile(scene_path):\n os.remove(scene_path)\n if depth_path is not None and os.path.exists(depth_path.replace('.png','')):\n shutil.rmtree(depth_path.replace('.png',''))\n # print(img_path, scene_path, obj_path, tree_path, depth_path)\n # import sys\n # sys.exit()\n\n\n # After rendering all images, combine the JSON files for each scene into a\n # single JSON file.\n all_scenes = []\n for scene_path in all_scene_paths:\n with open(scene_path, 'r') as f:\n all_scenes.append(json.load(f))\n output = {\n 'info': {\n 'date': args.date,\n 'version': args.version,\n 'split': args.split,\n 'license': args.license,\n },\n 'scenes': all_scenes\n }\n with open(args.output_scene_file, 'w') as f:\n json.dump(output, f)\n\n\n'''\n\n Tree-based generation\n\n'''\n\ndef get_calibration_matrix_K_from_blender(camd):\n from mathutils import Matrix\n f_in_mm = camd.lens\n scene = bpy.context.scene\n resolution_x_in_px = scene.render.resolution_x\n resolution_y_in_px = scene.render.resolution_y\n scale = scene.render.resolution_percentage / 100\n sensor_width_in_mm = camd.sensor_width\n sensor_height_in_mm = camd.sensor_height\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n if (camd.sensor_fit == 'VERTICAL'):\n # the sensor height is fixed (sensor fit is horizontal), \n # the sensor width is effectively changed with the pixel aspect ratio\n s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio \n s_v = resolution_y_in_px * scale / sensor_height_in_mm\n else: # 'HORIZONTAL' and 'AUTO'\n # the sensor width is fixed (sensor fit is horizontal), \n # the sensor height is effectively changed with the pixel aspect ratio\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n s_u = resolution_x_in_px * scale / sensor_width_in_mm\n s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm\n\n # print(resolution_x_in_px)\n # print(resolution_y_in_px)\n # print(scene.render.pixel_aspect_x)\n # print(scene.render.pixel_aspect_y)\n # print(f_in_mm)\n # print(s_u, s_v)\n # print(pixel_aspect_ratio)\n # print(sensor_width_in_mm)\n # print(sensor_height_in_mm)\n # import sys\n # sys.exit()\n\n # Parameters of intrinsic calibration matrix K\n alpha_u = f_in_mm * s_u\n alpha_v = f_in_mm * s_v\n u_0 = resolution_x_in_px * scale / 2\n v_0 = resolution_y_in_px * scale / 2\n skew = 0 # only use rectangular pixels\n\n K = Matrix(\n ((alpha_u, skew, u_0),\n ( 0 , alpha_v, v_0),\n ( 0 , 0, 1 )))\n return K\n\n\n\n# Returns camera rotation and translation matrices from Blender.\n# \n# There are 3 coordinate systems involved:\n# 1. The World coordinates: \"world\"\n# - right-handed\n# 2. The Blender camera coordinates: \"bcam\"\n# - x is horizontal\n# - y is up\n# - right-handed: negative z look-at direction\n# 3. The desired computer vision camera coordinates: \"cv\"\n# - x is horizontal\n# - y is down (to align to the actual pixel coordinates \n# used in digital images)\n# - right-handed: positive z look-at direction\ndef get_3x4_RT_matrix_from_blender(cam):\n # bcam stands for blender camera\n R_bcam2cv = Matrix(\n ((1, 0, 0),\n (0, -1, 0),\n (0, 0, -1)))\n\n # Transpose since the rotation is object rotation, \n # and we want coordinate rotation\n # R_world2bcam = cam.rotation_euler.to_matrix().transposed()\n # T_world2bcam = -1*R_world2bcam * location\n #\n # Use matrix_world instead to account for all constraints\n location, rotation = cam.matrix_world.decompose()[0:2]\n R_world2bcam = rotation.to_matrix().transposed()\n\n # Convert camera location to translation vector used in coordinate changes\n # T_world2bcam = -1*R_world2bcam*cam.location\n # Use location from matrix_world to account for constraints: \n T_world2bcam = -1*R_world2bcam * location\n\n # Build the coordinate transform matrix from world to computer vision camera\n R_world2cv = R_bcam2cv*R_world2bcam\n T_world2cv = R_bcam2cv*T_world2bcam\n\n # put into 3x4 matrix\n RT = Matrix((\n R_world2cv[0][:] + (T_world2cv[0],),\n R_world2cv[1][:] + (T_world2cv[1],),\n R_world2cv[2][:] + (T_world2cv[2],)\n ))\n return RT\n\ndef get_3x4_P_matrix_from_blender(cam):\n K = get_calibration_matrix_K_from_blender(cam.data)\n RT = get_3x4_RT_matrix_from_blender(cam)\n return K*RT, K, RT\n\n\ndef get_calib():\n from mathutils import Matrix\n from math import tan\n\n scn = bpy.data.scenes['Scene']\n cam = bpy.data.cameras['Camera']\n w = scn.render.resolution_x*scn.render.resolution_percentage/100.\n h = scn.render.resolution_y*scn.render.resolution_percentage/100.\n\n C = Matrix().to_3x3()\n C[0][0] = -w/2 / tan(cam.angle/2)\n ratio = w/h\n C[1][1] = -h/2. / tan(cam.angle/2) * ratio\n C[0][2] = w / 2.\n C[1][2] = h / 2.\n C[2][2] = 1.\n C.transpose()\n print(cam.angle)\n return C\n\n\ndef render_scene_with_tree(args,\n tree_max_level=3,\n output_index=0,\n output_split='none',\n output_image='render.png',\n output_scene='render_json',\n output_blendfile=None,\n output_tree='tree.tree',\n depth_path=None,\n ):\n # Load the main blendfile\n bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)\n\n # Load materials\n utils.load_materials(args.material_dir)\n\n # Set render arguments so we can get pixel coordinates later.\n # We use functionality specific to the CYCLES renderer so BLENDER_RENDER\n # cannot be used.\n render_args = bpy.context.scene.render\n render_args.engine = \"CYCLES\"\n # render_args.filepath = output_image\n render_args.resolution_x = args.width\n render_args.resolution_y = args.height\n render_args.resolution_percentage = 100\n render_args.tile_x = args.render_tile_size\n render_args.tile_y = args.render_tile_size\n if args.use_gpu == 1:\n # Blender changed the API for enabling CUDA at some point\n if bpy.app.version < (2, 78, 0):\n bpy.context.user_preferences.system.compute_device_type = 'CUDA'\n bpy.context.user_preferences.system.compute_device = 'CUDA_0'\n else:\n cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences\n cycles_prefs.compute_device_type = 'CUDA'\n\n # Some CYCLES-specific stuff\n bpy.data.worlds['World'].cycles.sample_as_light = True\n bpy.context.scene.cycles.blur_glossy = 2.0\n bpy.context.scene.cycles.samples = args.render_num_samples\n bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces\n bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces\n if args.use_gpu == 1:\n bpy.context.scene.cycles.device = 'GPU'\n\n # Setup CompositeNodeMapRange for depth maps if flag is on\n if args.save_depth_maps:\n bpy.context.scene.use_nodes = True\n blender_tree = bpy.context.scene.node_tree\n blender_tree.nodes.new('CompositorNodeMapRange')\n # Range of depth (Hacky values set accordind to the location of the cameras for this project)\n blender_tree.nodes[\"Map Range\"].inputs[\"From Min\"].default_value = 0\n blender_tree.nodes[\"Map Range\"].inputs[\"From Max\"].default_value = 100 \n\n # This will give ground-truth information about the scene and its objects\n scene_struct = {\n 'split': output_split,\n 'image_index': output_index,\n 'image_filename': os.path.basename(output_image),\n 'objects': [],\n 'directions': {},\n }\n\n # Put a plane on the ground so we can compute cardinal directions\n bpy.ops.mesh.primitive_plane_add(radius=args.scene_size)\n plane = bpy.context.object\n\n # Figure out the left, up, and behind directions along the plane and record\n # them in the scene structure\n camera = bpy.data.objects['Camera']\n\n # Assign 90,30 as the base scene wrt which trees are generated\n # 90, 30 corresponds to 0,30 in Ricson's code\n # camera.location = obj_centered_camera_pos(args.radius, 90.0, 30.0)\n\n plane_normal = plane.data.vertices[0].normal\n print('The plane normal is: ', plane_normal)\n cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))\n cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))\n cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))\n plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()\n plane_left = (cam_left - cam_left.project(plane_normal)).normalized()\n plane_up = cam_up.project(plane_normal).normalized()\n\n\n # Delete the plane; we only used it for normals anyway. The base scene file\n # contains the actual ground plane.\n utils.delete_object(plane)\n\n # Save all six axis-aligned directions in the scene struct\n scene_struct['directions']['behind'] = tuple(plane_behind)\n scene_struct['directions']['front'] = tuple(-plane_behind)\n scene_struct['directions']['left'] = tuple(plane_left)\n scene_struct['directions']['right'] = tuple(-plane_left)\n scene_struct['directions']['above'] = tuple(plane_up)\n scene_struct['directions']['below'] = tuple(-plane_up)\n\n if args.render_from_given_objects:\n # Read the specified objects json and render the given objects\n # with open(args.given_objects_json_path, 'rb') as f:\n # given_objects = pickle.load(f)\n # add_objects_from_given_trees(args, given_objects)\n\n with open(args.given_objects_json_path, 'r') as f:\n given_objects = json.load(f)\n add_objects_from_given_trees(args, given_objects)\n\n # Render the scene for all thetas and phis and dump the scene data structure\n # Ricson's code needs an offset of 90 for the thetas/phis to align.\n offset = 90\n if args.all_views:\n THETAS = list(range(0+offset, 360+offset, 30))\n PHIS = list(range(20, 80, 20))\n PHIS.insert(0, 12)\n else:\n THETAS = list(range(0+offset, 360+offset, 90))\n PHIS = [40]\n\n image_name = os.path.basename(output_image).split('.png')[0]\n output_image = os.path.join(os.path.dirname(output_image), image_name)\n\n # Render original view\n render_args.filepath = os.path.join(output_image, image_name + '_orig.png')\n while True:\n try:\n bpy.ops.render.render(write_still=True)\n break\n except Exception as e:\n print(e)\n\n # Render all other views\n for theta in THETAS:\n for phi in PHIS:\n start = time.time()\n camera.location = obj_centered_camera_pos(args.radius, theta, phi)\n render_args.filepath = os.path.join(output_image, image_name + '_' + str(theta - offset) + '_' + str(phi) + '.png')\n while True:\n try:\n bpy.ops.render.render(write_still=True)\n break\n except Exception as e:\n print(e)\n import sys\n sys.exit()\n else:\n # Now make some random objects\n objects, blender_objects, phrase_tree = add_objects_from_tree(scene_struct, args, camera, tree_max_level)\n\n # Store scene struct\n scene_struct['objects'] = objects\n scene_struct['relationships'] = compute_all_relationships(scene_struct)\n\n # *****************************************************************************\n\n '''\n # import bmesh\n\n # bm = bmesh.new()\n # bmesh.ops.create_monkey(bm)\n # mesh = bpy.data.meshes.new('Monkey')\n # bm.to_mesh(mesh)\n # bm.free()\n # obj = bpy.data.objects.new('Object', mesh)\n # obj.location = (1,1,1)\n # bpy.context.scene.objects.link(obj)\n # bpy.context.scene.update()\n\n # Debug zone\n thetas = [90]\n phis = [20]\n\n for theta, phi in zip(thetas, phis):\n filename = str(theta) + '_' + str(phi)\n\n camera.location = obj_centered_camera_pos(args.radius, theta, phi)\n\n bpy.ops.wm.save_as_mainfile(filepath='output_blendfile_{}.blend'.format(filename))\n render_args.filepath = 'rendered_image_{}.png'.format(filename)\n bpy.ops.render.render(write_still=True)\n\n\n bpy.context.scene.use_nodes = True\n blender_tree = bpy.context.scene.node_tree\n blender_tree.nodes.new('CompositorNodeMapRange')\n blender_tree.nodes[\"Map Range\"].inputs[\"From Min\"].default_value = 0\n blender_tree.nodes[\"Map Range\"].inputs[\"From Max\"].default_value = 100 \n blender_tree.links.new(blender_tree.nodes[\"Render Layers\"].outputs[\"Depth\"], blender_tree.nodes[\"Map Range\"].inputs[\"Value\"])\n blender_tree.links.new(blender_tree.nodes[\"Map Range\"].outputs[\"Value\"], blender_tree.nodes[\"Composite\"].inputs[\"Image\"])\n bpy.context.scene.render.image_settings.file_format = 'OPEN_EXR'\n bpy.data.scenes['Scene'].render.filepath = 'rendered_depth_{}.png'.format(filename)\n bpy.ops.render.render(write_still=True)\n\n\n bpy.ops.object.select_all(action='DESELECT')\n for obj in bpy.data.objects:\n if (obj.type == 'MESH' and obj.name != 'Ground'):\n obj.select = True\n\n bpy.ops.export_scene.obj(\n filepath='output_{}.obj'.format(filename),\n use_selection=True,\n use_materials=True\n )\n\n # cmd = subprocess.Popen([\"binvox\", \"-d\", \"64\", \"-e\", \"-bb\" \" -10\" \" -10\" \" -10\" \" 10\" \" 10\" \" 10\", \" output.obj\"], stdout=None, close_fds=True)\n command = 'binvox -d 64 -e -bb {} {} {} {} {} {} -t binvox -e -mb output_{}.obj'.format(-args.scene_size, -args.scene_size, -args.scene_size, args.scene_size, args.scene_size, args.scene_size, filename)\n os.system(command)\n\n command = 'binvox -d 64 -e -bb {} {} {} {} {} {} -t schematic -e -mb output_{}.obj'.format(-args.scene_size, -args.scene_size, -args.scene_size, args.scene_size, args.scene_size, args.scene_size, filename)\n os.system(command)\n\n os.remove('output_{}.obj'.format(filename))\n os.remove('output_{}.mtl'.format(filename))\n\n voxel_file = 'output_{}.schematic'.format(filename)\n cmd = subprocess.Popen([\"python\", \"write_voxels.py\", voxel_file, str(args.width)], stdout=subprocess.PIPE, close_fds=True)\n output = cmd.communicate(timeout=None)[0]\n\n # Read the voxels\n temp_np_file = voxel_file.split('.schematic')[0] + '.npy'\n blocks = np.load(temp_np_file)\n block_scene = np.zeros_like(blocks)\n ids = np.unique(blocks)\n for i in ids:\n if i != 0:\n object_idx = np.where(blocks == i)\n x_top = object_idx[0].min()\n y_top = object_idx[1].min()\n z_top = object_idx[2].min()\n\n x_bottom = object_idx[0].max()\n y_bottom = object_idx[1].max()\n z_bottom = object_idx[2].max()\n\n block_scene[x_top:x_bottom, y_top:y_bottom, z_top:z_bottom] = 1.0\n\n translate = [0.0, 0.0, 0.0]\n model = binvox_rw.Voxels(np.transpose(block_scene, [2, 1, 0]) > 0.5, block_scene.shape, translate, 1.0, 'xyz')\n with open('bbox_scene_{}.binvox'.format(filename), 'wb') as f:\n model.write(f)\n\n import sys\n sys.exit()\n '''\n\n # *****************************************************************************\n\n\n # Keep commented\n # if output_blendfile is not None:\n # bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)\n\n # Write a temp .obj file for binvox\n bpy.ops.object.select_all(action='DESELECT')\n for obj in bpy.data.objects:\n if (obj.type == 'MESH' and obj.name != 'Ground'):\n obj.select = True\n\n bpy.ops.export_scene.obj(\n filepath=output_blendfile,\n use_selection=True,\n use_materials=True\n )\n\n\n # Write .schematic file with voxels using binvox and delete .obj files\n if output_blendfile is not None:\n # cmd = subprocess.Popen([\"./binvox\", \"-d\", \"64\", \"-t\", \"schematic\", \"-e\", \"-mb\", output_blendfile], stdout=subprocess.PIPE, close_fds=True)\n # cmd = subprocess.Popen([\"binvox\", \"-d\", \"64\", \"-t\", \"schematic\", \"-e\", \"-mb -bb -10 -10 -10 10 10 10\", output_blendfile], stdout=None, close_fds=True)\n # output = cmd.communicate(timeout=None)[0]\n command = './binvox -d 64 -e -bb {} {} {} {} {} {} -t schematic -e -mb {}'.format(-args.scene_size, -args.scene_size, -args.scene_size, args.scene_size, args.scene_size, args.scene_size, output_blendfile)\n os.system(command)\n \n command = './binvox -d 64 -e -bb {} {} {} {} {} {} -t binvox -e -mb {}'.format(-args.scene_size, -args.scene_size, -args.scene_size, args.scene_size, args.scene_size, args.scene_size, output_blendfile)\n os.system(command)\n\n if os.path.exists(output_blendfile):\n os.remove(output_blendfile)\n os.remove(output_blendfile.split('.obj')[0] + '.mtl')\n\n voxel_file = output_blendfile.split('.obj')[0] + '.schematic'\n command = 'python write_voxels.py {} {}'.format(voxel_file, str(args.width))\n os.system(command)\n # cmd = subprocess.Popen([\"python\", \"write_voxels.py\", voxel_file, str(args.width)], stdout=subprocess.PIPE, close_fds=True)\n # output = cmd.communicate(timeout=None)[0]\n\n # Read the voxels\n temp_np_file = voxel_file.split('.schematic')[0] + '.npy'\n blocks = np.load(temp_np_file)\n\n if os.path.exists(temp_np_file):\n os.remove(temp_np_file)\n\n # Render the scene for all thetas and phis and dump the scene data structure\n # Ricson's code needs an offset of 90 for the thetas/phis to align.\n offset = 90\n if args.all_views:\n THETAS = list(range(0+offset, 360+offset, 30))\n PHIS = list(range(20, 80, 20))\n PHIS.insert(0, 12)\n else:\n THETAS = list(range(0+offset, 360+offset, 90))\n PHIS = [40]\n\n image_name = os.path.basename(output_image).split('.png')[0]\n output_image = os.path.join(os.path.dirname(output_image), image_name)\n\n # Remove the dummy objects if there's just one real object before rendering\n bpy.ops.object.select_all(action='SELECT')\n objs = bpy.data.objects\n if len(objects) == 1:\n for obj in bpy.data.objects:\n if obj.type == 'MESH' and obj.name != 'Ground' and obj.scale[0] <= 0.25:\n objs.remove(obj, do_unlink=True)\n\n render_args.filepath = os.path.join(output_image, image_name + '_orig.png')\n while True:\n try:\n bpy.ops.render.render(write_still=True)\n break\n except Exception as e:\n print(e)\n\n for theta in THETAS:\n for phi in PHIS:\n start = time.time()\n camera.location = obj_centered_camera_pos(args.radius, theta, phi)\n render_args.filepath = os.path.join(output_image, image_name + '_' + str(theta - offset) + '_' + str(phi) + '.png')\n while True:\n try:\n bpy.ops.render.render(write_still=True)\n break\n except Exception as e:\n print(e)\n view_key = str(theta - offset) + '_' + str(phi)\n scene_struct = get_2d_bboxes(args, camera, scene_struct, view_key)\n print('*'*30)\n print('images')\n print(time.time() - start)\n print('*'*30)\n\n # Render depth maps if flag is on\n if args.save_depth_maps:\n depth_path = os.path.join(os.path.dirname(depth_path), image_name)\n blender_tree.links.new(blender_tree.nodes[\"Render Layers\"].outputs[\"Depth\"], blender_tree.nodes[\"Map Range\"].inputs[\"Value\"])\n blender_tree.links.new(blender_tree.nodes[\"Map Range\"].outputs[\"Value\"], blender_tree.nodes[\"Composite\"].inputs[\"Image\"])\n bpy.context.scene.render.image_settings.file_format = 'OPEN_EXR'\n for theta in THETAS:\n for phi in PHIS:\n start = time.time()\n camera.location = obj_centered_camera_pos(args.radius, theta, phi)\n bpy.data.scenes['Scene'].render.filepath = os.path.join(depth_path, image_name + '_' + str(theta - offset) + '_' + str(phi))\n while True:\n try:\n bpy.ops.render.render(write_still=True)\n break\n except Exception as e:\n print(e)\n print('*'*30)\n print('depth')\n print(time.time() - start)\n print('*'*30)\n\n # Refine the tree, remove function_objs\n phrase_tree = refine_tree_info(phrase_tree, blocks)\n phrase_tree = remove_function_obj(phrase_tree)\n # tree = add_parent(tree)\n\n with open(output_tree, 'wb') as f:\n pickle.dump(phrase_tree, f, protocol=2)\n\n with open(output_scene, 'w') as f:\n json.dump(scene_struct, f, indent=2)\n\n\ndef get_2d_bboxes(args, camera, scene_struct, view_key):\n \"\"\"\n Get 2d bboxes for the current camera view\n \"\"\"\n bpy.ops.mesh.primitive_plane_add(radius=args.scene_size)\n plane = bpy.context.object\n\n # Figure out the left, up, and behind directions along the plane and record\n # them in the scene structure\n plane_normal = plane.data.vertices[0].normal\n\n # Delete the plane; we only used it for normals anyway. The base scene file\n # contains the actual ground plane.\n utils.delete_object(plane)\n\n direction_dict = {}\n direction_dict['directions'] = {}\n cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))\n cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))\n cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))\n plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()\n plane_left = (cam_left - cam_left.project(plane_normal)).normalized()\n plane_up = cam_up.project(plane_normal).normalized()\n\n # Save all six axis-aligned directions in the scene struct\n direction_dict['directions']['behind'] = tuple(plane_behind)\n direction_dict['directions']['front'] = tuple(-plane_behind)\n direction_dict['directions']['left'] = tuple(plane_left)\n direction_dict['directions']['right'] = tuple(-plane_left)\n direction_dict['directions']['above'] = tuple(plane_up)\n direction_dict['directions']['below'] = tuple(-plane_up)\n \n\n with open(args.properties_json, 'r') as f:\n properties = json.load(f)\n\n size_mapping = properties['sizes']\n\n for i, obj in enumerate(scene_struct['objects']):\n obj_loc = obj['3d_coords']\n obj_name_out = obj['shape']\n size_name = obj['size']\n r = size_mapping[size_name]\n pixel_coords_lefttop, pixel_coords_rightbottom = get_bbox(args, camera, direction_dict, Vector(obj_loc), obj_name_out, r)\n if args.filter_out_of_view:\n assert args.width == args.height\n flag_1 = 0 <= pixel_coords_lefttop[0] < args.width\n flag_2 = 0 <= pixel_coords_lefttop[1] < args.width\n flag_3 = 0 <= pixel_coords_rightbottom[1] < args.width\n flag_4 = 0 <= pixel_coords_rightbottom[1] < args.width\n if not (flag_1 and flag_2 and flag_3 and flag_4):\n raise Exception('Object out of view')\n\n scene_struct['objects'][i]['bbox_2d'][view_key] = {}\n scene_struct['objects'][i]['bbox_2d'][view_key]['pixel_coords_lefttop'] = pixel_coords_lefttop\n scene_struct['objects'][i]['bbox_2d'][view_key]['pixel_coords_rightbottom'] = pixel_coords_rightbottom\n return scene_struct\n\n\ndef add_objects_from_given_trees(args, objects):\n \"\"\"\n Add given objects at the given positions\n \"\"\"\n # Load the property file\n with open(args.properties_json, 'r') as f:\n properties = json.load(f)\n color_name_to_rgba = {}\n for name, rgb in properties['colors'].items():\n rgba = [float(c) / 255.0 for c in rgb] + [1.0]\n color_name_to_rgba[name] = rgba\n material_mapping = properties['materials']\n object_mapping = properties['shapes']\n size_mapping = properties['sizes']\n\n for specified_obj in objects[:1]:\n shape = specified_obj['shape']\n size = specified_obj['size']\n color = specified_obj['color']\n material = specified_obj['texture']\n location = specified_obj['location']\n\n obj_name = object_mapping[shape]\n scale = size_mapping[size]\n\n # Add the given object at specified location\n obj_name = utils.add_given_object(args, obj_name, scale, location)\n\n # Add material and color to the given object\n mat_name = material_mapping[material]\n rgba = color_name_to_rgba[color]\n utils.add_material(mat_name, Color=rgba)\n\ndef add_objects_from_tree(scene_struct, args, camera, tree_max_level):\n \"\"\"\n Add random objects to the current blender scene\n \"\"\"\n # tree = sample_tree(tree_max_level, add_layout_prob=args.add_layout_prob, zero_shot=args.zero_shot, train=args.train_flag)\n # tree = sample_tree_flexible(max_layout_level=2, add_layout_prob=0.6, zero_shot=False, train=True, arguments={'fix_num_objs':2})\n\n\n tree = sample_tree_flexible(args.percent_inside_samples, args.include_inside_config, max_layout_level=2, add_layout_prob=0.8, obj_count=0, zero_shot=False, train=True, arguments={'max_num_objs':args.max_objects, 'min_num_objs':args.min_objects}, back_front_only_flag=args.back_front_only_flag)\n # tree = sample_tree_flexible(args.percent_inside_samples, args.include_inside_config, max_layout_level=2, add_layout_prob=0.6, obj_count=0, zero_shot=False, train=True, arguments={'fix_num_objs':args.max_objects}, back_front_only_flag=args.back_front_only_flag)\n specified_objects = extract_objects(tree)\n\n # Load the property file\n with open(args.properties_json, 'r') as f:\n properties = json.load(f)\n color_name_to_rgba = {}\n for name, rgb in properties['colors'].items():\n rgba = [float(c) / 255.0 for c in rgb] + [1.0]\n color_name_to_rgba[name] = rgba\n material_mapping = properties['materials']\n object_mapping = properties['shapes']\n size_mapping = properties['sizes']\n print('size mapping:', size_mapping)\n print('object mapping', object_mapping)\n\n shape_color_combos = None\n if args.shape_color_combos_json is not None:\n with open(args.shape_color_combos_json, 'r') as f:\n shape_color_combos = list(json.load(f).items())\n\n positions = []\n objects = []\n blender_objects = []\n\n # Check if the current scene contains inside configuration\n put_obj_inside = False\n stored_location = None\n min_dist = args.min_dist\n if tree.word == 'inside':\n put_obj_inside = True\n min_dist = 0.0\n\n for obj_counter, specified_obj in enumerate(specified_objects):\n # Choose a random size\n size_name = specified_obj.attributes['size'].attr_val\n # print('\\n'*10)\n # print(size_name)\n # print('\\n'*10)\n\n # with open(\"test_sampled.txt\", \"a\") as myfile:\n # myfile.write(size_name + '\\n')\n\n r = size_mapping[size_name]\n\n # Try to place the object, ensuring that we don't intersect any existing\n # objects and that we are more than the desired margin away from all existing\n # objects along all cardinal directions.\n num_tries = 0\n while True:\n if stored_location:\n break\n\n # If we try and fail to place an object too many times, then delete all\n # the objects in the scene and start over.\n num_tries += 1\n if num_tries > args.max_retries:\n for obj in blender_objects:\n utils.delete_object(obj)\n print(obj_counter)\n return add_objects_from_tree(scene_struct, args, camera, tree_max_level)\n\n x = specified_obj.position[0] * scene_struct['directions']['right'][0] + specified_obj.position[1] * \\\n scene_struct['directions'][\n 'front'][0]\n y = specified_obj.position[0] * scene_struct['directions']['right'][1] + specified_obj.position[1] * \\\n scene_struct['directions'][\n 'front'][1]\n\n # Check to make sure the new object is further than min_dist from all\n # other objects, and further than margin along the four cardinal directions\n dists_good = True\n margins_good = True\n for (xx, yy, rr) in positions:\n dx, dy = x - xx, y - yy\n dist = math.sqrt(dx * dx + dy * dy)\n if dist - r - rr < min_dist:\n print((xx, yy, rr))\n print((x, y, r))\n print('dist is ', dist)\n dists_good = False\n break\n for direction_name in ['left', 'right', 'front', 'behind']:\n direction_vec = scene_struct['directions'][direction_name]\n assert direction_vec[2] == 0\n margin = dx * direction_vec[0] + dy * direction_vec[1]\n if 0 < margin < args.margin:\n print(x, xx)\n print(y, yy)\n print(margin, args.margin, direction_name)\n print('BROKEN MARGIN!')\n margins_good = False\n break\n if not margins_good:\n break\n\n if dists_good and margins_good:\n break\n\n # Choose color and shape\n if shape_color_combos is None:\n # obj_name, obj_name_out = random.choice(object_mapping)\n obj_name_out = specified_obj.object_type\n obj_name = object_mapping[obj_name_out]\n print(obj_name, obj_name_out)\n color_name = specified_obj.attributes['color'].attr_val\n rgba = color_name_to_rgba[color_name]\n else:\n obj_name_out, color_choices = random.choice(shape_color_combos)\n color_name = random.choice(color_choices)\n obj_name = [k for k, v in object_mapping if v == obj_name_out][0]\n rgba = color_name_to_rgba[color_name]\n\n # For cube, adjust the size a bit, and make rotate it to make it face forward\n if obj_name_out == 'cube':\n # r /= math.sqrt(2)\n theta = 45\n else:\n theta = 0\n\n # If inside configuration exists in the sample, store the location for the next object\n # This is used to put the object at the same location, but inside the current object\n if put_obj_inside and stored_location is None:\n stored_location = (x, y)\n\n # Actually add the object to the scene\n obj_name = utils.add_object(args.shape_dir, obj_name, r, (x, y), theta=theta, stored_location=stored_location, put_obj_inside=put_obj_inside, allow_floating=args.allow_floating_objects)\n obj = bpy.context.object\n blender_objects.append(obj)\n positions.append((x, y, r))\n\n # Attach a random material\n mat_name_out = specified_obj.attributes['material'].attr_val\n mat_name = material_mapping[mat_name_out]\n # mat_name, mat_name_out = random.choice(material_mapping)\n\n print(mat_name, mat_name_out)\n utils.add_material(mat_name, Color=rgba)\n\n # Assign block id to material for binvox to work its magic\n object_id = specified_obj.get_block_id()\n bpy.data.objects[obj_name].active_material.name = 'blockid_' + str(object_id)\n\n # Record data about the object in the scene data structure\n pixel_coords_lefttop, pixel_coords_rightbottom = get_bbox(args, camera, scene_struct, obj.location, obj_name_out, r)\n\n # guarantee that objects are all in the image\n if not put_obj_inside:\n if pixel_coords_lefttop[0] < 0 or pixel_coords_lefttop[1] < 0 or pixel_coords_rightbottom[0] >= args.width or \\\n pixel_coords_rightbottom[1] >= args.height:\n for obj in blender_objects:\n utils.delete_object(obj)\n return add_objects_from_tree(scene_struct, args, camera, tree_max_level)\n\n # remove objects that are too small\n if not is_valid_bbox(pixel_coords_lefttop, pixel_coords_rightbottom, size_threshold=args.min_obj_2d_size):\n for obj in blender_objects:\n utils.delete_object(obj)\n return add_objects_from_tree(scene_struct, args, camera, tree_max_level)\n\n specified_obj.bbox = (pixel_coords_lefttop, pixel_coords_rightbottom)\n objects.append({\n 'obj_id': 'blockid_' + str(object_id),\n 'obj_name': obj_name,\n 'shape': obj_name_out,\n 'size': size_name,\n 'material': mat_name_out,\n '3d_coords': tuple(obj.location),\n 'rotation': theta,\n 'pixel_coords_lefttop': pixel_coords_lefttop,\n 'pixel_coords_rightbottom': pixel_coords_rightbottom,\n 'color': color_name,\n 'bbox_2d': {},\n })\n\n # Check that all objects are at least partially visible in the rendered image\n all_visible = check_visibility(blender_objects, args.min_pixels_per_object)\n if not all_visible and not put_obj_inside:\n # If any of the objects are fully occluded then start over; delete all\n # objects from the scene and place them all again.\n print('Some objects are occluded; replacing objects')\n for obj in blender_objects:\n utils.delete_object(obj)\n print('-'*300 + 'VISIBILITY' + '-'*300)\n return add_objects_from_tree(scene_struct, args, camera, tree_max_level)\n\n # with open(\"test_chosen.txt\", \"a\") as myfile:\n # myfile.write(size_name + '\\n')\n\n # In case of 1 object, add extra objects to center the first object. Later make the new ones invisible in voxels\n if len(specified_objects) == 1:\n x_extra, y_extra, r_extra = positions[0]\n r_extra = 0.2\n theta_extra = 0\n rand_id = np.random.randint(1,255)\n while rand_id == object_id:\n rand_id = np.random.randint(1,255)\n\n offset_extra = np.random.uniform(2,4)\n obj_name_1 = utils.add_object(args.shape_dir, 'Sphere', r_extra, (x_extra + offset_extra, y_extra + offset_extra), theta=theta_extra, put_obj_inside=put_obj_inside)\n utils.add_material(mat_name, Color=rgba)\n bpy.data.objects[obj_name_1].active_material.name = 'blockid_' + str(rand_id)\n\n offset_extra = np.random.uniform(2,4)\n obj_name_2 = utils.add_object(args.shape_dir, 'Sphere', r_extra, (x_extra - offset_extra, y_extra - offset_extra), theta=theta_extra, put_obj_inside=put_obj_inside)\n utils.add_material(mat_name, Color=rgba)\n bpy.data.objects[obj_name_2].active_material.name = 'blockid_' + str(rand_id)\n\n return objects, blender_objects, tree\n\n\ndef get_bbox(args, camera, scene_struct, obj_loc, obj_type, r):\n if obj_type == 'sphere':\n points_3d = [obj_loc + r * vector for vector in get_sphere_unit_vectors(scene_struct['directions'])]\n elif obj_type == 'cube':\n points_3d = [obj_loc + r * (\n Vector(scene_struct['directions']['below']) + Vector(scene_struct['directions']['left']) + Vector(\n scene_struct['directions']['front'])),\n obj_loc + r * (Vector(scene_struct['directions']['below']) + Vector(\n scene_struct['directions']['right']) + Vector(scene_struct['directions']['front'])),\n obj_loc + r * (Vector(scene_struct['directions']['above']) + Vector(\n scene_struct['directions']['left']) + Vector(scene_struct['directions']['front'])),\n obj_loc + r * (Vector(scene_struct['directions']['above']) + Vector(\n scene_struct['directions']['right']) + Vector(scene_struct['directions']['front'])),\n obj_loc + r * (Vector(scene_struct['directions']['below']) + Vector(\n scene_struct['directions']['left']) + Vector(scene_struct['directions']['behind'])),\n obj_loc + r * (Vector(scene_struct['directions']['below']) + Vector(\n scene_struct['directions']['right']) + Vector(scene_struct['directions']['behind'])),\n obj_loc + r * (Vector(scene_struct['directions']['above']) + Vector(\n scene_struct['directions']['left']) + Vector(scene_struct['directions']['behind'])),\n obj_loc + r * (Vector(scene_struct['directions']['above']) + Vector(\n scene_struct['directions']['right']) + Vector(scene_struct['directions']['behind']))\n ]\n elif obj_type == 'cylinder':\n points_3d = [obj_loc + r * vector for vector in get_cylinder_unit_vectors(scene_struct['directions'])]\n elif obj_type == 'cup':\n # Copied from cylinder's 3d point calculation. Replace later\n points_3d = [obj_loc + r * vector for vector in get_cylinder_unit_vectors(scene_struct['directions'])]\n else:\n raise RuntimeError('invalid object type name')\n\n points_2d = [utils.get_camera_coords(camera, location) for location in points_3d]\n x_cords = [location[0] for location in points_2d]\n y_cords = [location[1] for location in points_2d]\n left_top = (min(x_cords), min(y_cords))\n right_bottom = (max(x_cords), max(y_cords))\n\n return left_top, right_bottom\n\ndef obj_centered_camera_pos(dist, azimuth_deg, elevation_deg):\n phi = float(elevation_deg) / 180 * math.pi\n theta = float(azimuth_deg) / 180 * math.pi\n x = (dist * math.cos(theta) * math.cos(phi))\n y = (dist * math.sin(theta) * math.cos(phi))\n z = (dist * math.sin(phi))\n return x, y, z\n\ndef get_cylinder_unit_vectors(directions):\n points_3d = list()\n for i in range(30):\n theta = (2 * i * math.pi) / 30\n points_3d.append(\n math.cos(theta) * Vector(directions['right']) + math.sin(theta) * Vector(directions['front']) + Vector(\n directions['above']))\n points_3d.append(\n math.cos(theta) * Vector(directions['right']) + math.sin(theta) * Vector(directions['front']) + Vector(\n directions['below']))\n return points_3d\n\n\ndef get_sphere_unit_vectors(directions):\n points_3d = list()\n for i in range(30):\n alpha = i * math.pi / 30 - math.pi / 2 # range in (-pi/2, pi/2)\n for j in range(30):\n theta = (2 * j * math.pi) / 30 # range in (0, 2*pi)\n points_3d.append(\n math.cos(alpha) * math.cos(theta) * Vector(directions['right']) + math.cos(alpha) * math.sin(theta) *\n Vector(directions['front']) + math.sin(alpha) * Vector(directions['above']))\n\n return points_3d\n\ndef get_cup_unit_vectors(directions):\n points_3d = list()\n for i in range(30):\n alpha = i * math.pi / 30 - math.pi / 2 # range in (-pi/2, pi/2)\n for j in range(30):\n theta = (2 * j * math.pi) / 30 # range in (0, 2*pi)\n points_3d.append(\n math.cos(alpha) * math.cos(theta) * Vector(directions['right']) + math.cos(alpha) * math.sin(theta) *\n Vector(directions['front']) + math.sin(alpha) * Vector(directions['above']))\n\n return points_3d\n\ndef compute_all_relationships(scene_struct, eps=0.2):\n \"\"\"\n Computes relationships between all pairs of objects in the scene.\n\n Returns a dictionary mapping string relationship names to lists of lists of\n integers, where output[rel][i] gives a list of object indices that have the\n relationship rel with object i. For example if j is in output['left'][i] then\n object j is left of object i.\n \"\"\"\n all_relationships = {}\n for name, direction_vec in scene_struct['directions'].items():\n if name == 'above' or name == 'below': continue\n all_relationships[name] = []\n for i, obj1 in enumerate(scene_struct['objects']):\n coords1 = obj1['3d_coords']\n related = set()\n for j, obj2 in enumerate(scene_struct['objects']):\n if obj1 == obj2: continue\n coords2 = obj2['3d_coords']\n diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]\n dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])\n if dot > eps:\n related.add(j)\n all_relationships[name].append(sorted(list(related)))\n return all_relationships\n\n\ndef is_valid_bbox(left_top_coord, right_bottom_coord, size_threshold):\n width = right_bottom_coord[0] - left_top_coord[0]\n height = right_bottom_coord[1] - left_top_coord[1]\n if min(width, height) >= size_threshold:\n return True\n else:\n return False\n\n\ndef check_visibility(blender_objects, min_pixels_per_object):\n \"\"\"\n Check whether all objects in the scene have some minimum number of visible\n pixels; to accomplish this we assign random (but distinct) colors to all\n objects, and render using no lighting or shading or antialiasing; this\n ensures that each object is just a solid uniform color. We can then count\n the number of pixels of each color in the output image to check the visibility\n of each object.\n\n Returns True if all objects are visible and False otherwise.\n \"\"\"\n f, path = tempfile.mkstemp(suffix='.png')\n object_colors = render_shadeless(blender_objects, path=path)\n img = bpy.data.images.load(path)\n p = list(img.pixels)\n color_count = Counter((p[i], p[i + 1], p[i + 2], p[i + 3])\n for i in range(0, len(p), 4))\n os.remove(path)\n if len(color_count) != len(blender_objects) + 1:\n return False\n for _, count in color_count.most_common():\n if count < min_pixels_per_object:\n return False\n return True\n\n\ndef render_shadeless(blender_objects, path='flat.png'):\n \"\"\"\n Render a version of the scene with shading disabled and unique materials\n assigned to all objects, and return a set of all colors that should be in the\n rendered image. The image itself is written to path. This is used to ensure\n that all objects will be visible in the final rendered scene.\n \"\"\"\n render_args = bpy.context.scene.render\n\n # Cache the render args we are about to clobber\n old_filepath = render_args.filepath\n old_engine = render_args.engine\n old_use_antialiasing = render_args.use_antialiasing\n\n # Override some render settings to have flat shading\n render_args.filepath = path\n render_args.engine = 'BLENDER_RENDER'\n render_args.use_antialiasing = False\n\n # Move the lights and ground to layer 2 so they don't render\n utils.set_layer(bpy.data.objects['Lamp_Key'], 2)\n utils.set_layer(bpy.data.objects['Lamp_Fill'], 2)\n utils.set_layer(bpy.data.objects['Lamp_Back'], 2)\n utils.set_layer(bpy.data.objects['Ground'], 2)\n\n # Add random shadeless materials to all objects\n object_colors = set()\n old_materials = []\n for i, obj in enumerate(blender_objects):\n old_materials.append(obj.data.materials[0])\n bpy.ops.material.new()\n mat = bpy.data.materials['Material']\n mat.name = 'Material_%d' % i\n while True:\n r, g, b = [random.random() for _ in range(3)]\n if (r, g, b) not in object_colors: break\n object_colors.add((r, g, b))\n mat.diffuse_color = [r, g, b]\n mat.use_shadeless = True\n obj.data.materials[0] = mat\n\n # Render the scene\n bpy.ops.render.render(write_still=True)\n\n # Undo the above; first restore the materials to objects\n for mat, obj in zip(old_materials, blender_objects):\n obj.data.materials[0] = mat\n\n # Move the lights and ground back to layer 0\n utils.set_layer(bpy.data.objects['Lamp_Key'], 0)\n utils.set_layer(bpy.data.objects['Lamp_Fill'], 0)\n utils.set_layer(bpy.data.objects['Lamp_Back'], 0)\n utils.set_layer(bpy.data.objects['Ground'], 0)\n\n # Set the render settings back to what they were\n render_args.filepath = old_filepath\n render_args.engine = old_engine\n render_args.use_antialiasing = old_use_antialiasing\n\n return object_colors\n\n\nif __name__ == '__main__':\n if INSIDE_BLENDER:\n # Run normally\n argv = utils.extract_args()\n args = parser.parse_args(argv)\n main(args)\n elif '--help' in sys.argv or '-h' in sys.argv:\n parser.print_help()\n else:\n print('This script is intended to be called from blender like this:')\n print()\n print('blender --background --python render_images.py -- [args]')\n print()\n print('You can also run as a standalone python script to view all')\n print('arguments like this:')\n print()\n print('python render_images.py --help')\n\n # import pickle\n #\n # with open('../output/trees/train/CLEVR_new_000000.tree', 'rb') as f:\n # data = pickle.load(f)\n # print(data.function_obj)\n" ]
[ [ "numpy.load", "numpy.random.uniform", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
potpath/simpletransformers
[ "76f1df524c046760fd2938839f014e88678b2a27" ]
[ "simpletransformers/question_answering/question_answering_model.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport json\nimport logging\nimport math\nimport os\nimport random\nimport warnings\nfrom dataclasses import asdict\nfrom multiprocessing import cpu_count\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics import (\n confusion_matrix,\n label_ranking_average_precision_score,\n matthews_corrcoef,\n mean_squared_error,\n)\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm.auto import tqdm, trange\nfrom transformers.optimization import (\n get_constant_schedule,\n get_constant_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n get_cosine_schedule_with_warmup,\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n)\nfrom transformers.optimization import AdamW, Adafactor\nfrom transformers import (\n AlbertConfig,\n AlbertForQuestionAnswering,\n AlbertTokenizer,\n AutoConfig,\n AutoModelForQuestionAnswering,\n AutoTokenizer,\n BartConfig,\n BartForQuestionAnswering,\n BartTokenizer,\n BertConfig,\n BertForQuestionAnswering,\n BertTokenizer,\n CamembertConfig,\n CamembertForQuestionAnswering,\n CamembertTokenizer,\n DistilBertConfig,\n DistilBertForQuestionAnswering,\n DistilBertTokenizer,\n ElectraConfig,\n ElectraTokenizer,\n LongformerConfig,\n LongformerForQuestionAnswering,\n LongformerTokenizer,\n MPNetConfig,\n MPNetForQuestionAnswering,\n MPNetTokenizer,\n MobileBertConfig,\n MobileBertForQuestionAnswering,\n MobileBertTokenizer,\n RobertaConfig,\n RobertaForQuestionAnswering,\n RobertaTokenizer,\n SqueezeBertConfig,\n SqueezeBertForQuestionAnswering,\n SqueezeBertTokenizer,\n WEIGHTS_NAME,\n XLMConfig,\n XLMForQuestionAnswering,\n XLMRobertaConfig,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n)\n\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.config.model_args import QuestionAnsweringArgs\nfrom simpletransformers.config.utils import sweep_config_to_sweep_values\nfrom simpletransformers.custom_models.models import ElectraForQuestionAnswering, XLMRobertaForQuestionAnswering\nfrom simpletransformers.question_answering.question_answering_utils import (\n LazyQuestionAnsweringDataset,\n RawResult,\n RawResultExtended,\n build_examples,\n convert_examples_to_features,\n get_best_predictions,\n get_best_predictions_extended,\n get_examples,\n squad_convert_examples_to_features,\n to_list,\n write_predictions,\n write_predictions_extended,\n)\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass QuestionAnsweringModel:\n def __init__(self, model_type, model_name, args=None, use_cuda=True, cuda_device=-1, **kwargs):\n\n \"\"\"\n Initializes a QuestionAnsweringModel model.\n\n Args:\n model_type: The type of model (bert, xlnet, xlm, distilbert)\n model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).\n args (optional): Default args will be used if this parameter is not provided. If provided,\n it should be a dict containing the args that should be changed in the default args'\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"albert\": (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),\n \"auto\": (AutoConfig, AutoTokenizer, AutoModelForQuestionAnswering),\n \"bart\": (BartConfig, BartForQuestionAnswering, BartTokenizer),\n \"bert\": (BertConfig, BertForQuestionAnswering, BertTokenizer),\n \"camembert\": (CamembertConfig, CamembertForQuestionAnswering, CamembertTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),\n \"electra\": (ElectraConfig, ElectraForQuestionAnswering, ElectraTokenizer),\n \"longformer\": (LongformerConfig, LongformerForQuestionAnswering, LongformerTokenizer),\n \"mobilebert\": (MobileBertConfig, MobileBertForQuestionAnswering, MobileBertTokenizer),\n \"mpnet\": (MPNetConfig, MPNetForQuestionAnswering, MPNetTokenizer),\n \"roberta\": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer),\n \"squeezebert\": (SqueezeBertConfig, SqueezeBertForQuestionAnswering, SqueezeBertTokenizer),\n \"xlm\": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForQuestionAnswering, XLMRobertaTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),\n }\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, QuestionAnsweringArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n self.is_sweeping = True\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = sweep_config_to_sweep_values(sweep_config)\n self.args.update_from_dict(sweep_values)\n else:\n self.is_sweeping = False\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if not use_cuda:\n self.args.fp16 = False\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n self.config = config_class.from_pretrained(model_name, **self.args.config)\n if not self.args.quantized_model:\n self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)\n else:\n quantized_weights = torch.load(os.path.join(model_name, \"pytorch_model.bin\"))\n self.model = model_class.from_pretrained(None, config=self.config, state_dict=quantized_weights)\n\n if self.args.dynamic_quantize:\n self.model = torch.quantization.quantize_dynamic(self.model, {torch.nn.Linear}, dtype=torch.qint8)\n if self.args.quantized_model:\n self.model.load_state_dict(quantized_weights)\n if self.args.dynamic_quantize:\n self.args.quantized_model = True\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n self.results = {}\n\n if self.args.fp16:\n try:\n from torch.cuda import amp\n except AttributeError:\n raise AttributeError(\"fp16 requires Pytorch >= 1.6. Please update Pytorch or turn off fp16.\")\n\n if model_type == \"auto\":\n self.tokenizer = tokenizer_class.from_pretrained(model_name, **kwargs)\n else:\n self.tokenizer = tokenizer_class.from_pretrained(\n model_name, do_lower_case=self.args.do_lower_case, **kwargs\n )\n\n if self.args.special_tokens_list:\n self.tokenizer.add_tokens(self.args.special_tokens_list, special_tokens=True)\n self.model.resize_token_embeddings(len(self.tokenizer))\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n\n if self.args.wandb_project and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args.wandb_project = None\n\n def load_and_cache_examples(self, examples, evaluate=False, no_cache=False, output_examples=False):\n \"\"\"\n Converts a list of examples to a TensorDataset containing InputFeatures. Caches the InputFeatures.\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n \"\"\"\n\n tokenizer = self.tokenizer\n args = self.args\n\n if not no_cache:\n no_cache = args.no_cache\n\n if not no_cache:\n os.makedirs(self.args.cache_dir, exist_ok=True)\n\n examples = get_examples(examples, is_training=not evaluate)\n\n mode = \"dev\" if evaluate else \"train\"\n cached_features_file = os.path.join(\n args.cache_dir, \"cached_{}_{}_{}_{}\".format(mode, args.model_type, args.max_seq_length, len(examples)),\n )\n\n if os.path.exists(cached_features_file) and (\n (not args.reprocess_input_data and not no_cache) or (mode == \"dev\" and args.use_cached_eval_features)\n ):\n features = torch.load(cached_features_file)\n logger.info(f\" Features loaded from cache at {cached_features_file}\")\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)\n all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)\n all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)\n\n if mode == \"dev\":\n all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_attention_masks,\n all_token_type_ids,\n all_feature_index,\n all_cls_index,\n all_p_mask,\n )\n else:\n all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)\n all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_attention_masks,\n all_token_type_ids,\n all_start_positions,\n all_end_positions,\n all_cls_index,\n all_p_mask,\n all_is_impossible,\n )\n else:\n logger.info(\" Converting to features started.\")\n\n features, dataset = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n tqdm_enabled=not args.silent,\n threads=args.process_count,\n args=args,\n )\n\n if not no_cache:\n torch.save(features, cached_features_file)\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n def train_model(\n self, train_data, output_dir=False, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs\n ):\n \"\"\"\n Trains the model using 'train_data'\n\n Args:\n train_data: Path to JSON file containing training data OR list of Python dicts in the correct format. The model will be trained on this data.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_data (optional): Path to JSON file containing evaluation data against which evaluation will be performed when evaluate_during_training is enabled.\n Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n Returns:\n global_step: Number of global steps trained\n training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update_from_dict(args)\n\n if self.args.silent:\n show_running_loss = False\n\n if self.args.evaluate_during_training and eval_data is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_data is not specified.\"\n \" Pass eval_data to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \"Use --overwrite_output_dir to overcome.\".format(output_dir)\n )\n\n self._move_model_to_device()\n\n if self.args.lazy_loading:\n if isinstance(train_data, str):\n train_dataset = LazyQuestionAnsweringDataset(train_data, self.tokenizer, self.args)\n else:\n raise ValueError(\"Input must be given as a path to a file when using lazy loading\")\n else:\n if isinstance(train_data, str):\n with open(train_data, \"r\", encoding=self.args.encoding) as f:\n train_examples = json.load(f)\n else:\n train_examples = train_data\n\n train_dataset = self.load_and_cache_examples(train_examples)\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, training_details = self.train(\n train_dataset, output_dir, show_running_loss=show_running_loss, eval_data=eval_data, **kwargs\n )\n\n self.save_model(model=self.model)\n\n logger.info(\" Training of {} model complete. Saved to {}.\".format(self.args.model_type, output_dir))\n\n return global_step, training_details\n\n def train(self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n\n tb_writer = SummaryWriter(logdir=args.tensorboard_dir)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset,\n sampler=train_sampler,\n batch_size=args.train_batch_size,\n num_workers=self.args.dataloader_num_workers,\n )\n\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n\n optimizer_grouped_parameters = []\n custom_parameter_names = set()\n for group in self.args.custom_parameter_groups:\n params = group.pop(\"params\")\n custom_parameter_names.update(params)\n param_group = {**group}\n param_group[\"params\"] = [p for n, p in model.named_parameters() if n in params]\n optimizer_grouped_parameters.append(param_group)\n\n for group in self.args.custom_layer_parameters:\n layer_number = group.pop(\"layer\")\n layer = f\"layer.{layer_number}.\"\n group_d = {**group}\n group_nd = {**group}\n group_nd[\"weight_decay\"] = 0.0\n params_d = []\n params_nd = []\n for n, p in model.named_parameters():\n if n not in custom_parameter_names and layer in n:\n if any(nd in n for nd in no_decay):\n params_nd.append(p)\n else:\n params_d.append(p)\n custom_parameter_names.add(n)\n group_d[\"params\"] = params_d\n group_nd[\"params\"] = params_nd\n\n optimizer_grouped_parameters.append(group_d)\n optimizer_grouped_parameters.append(group_nd)\n\n if not self.args.train_custom_parameters_only:\n optimizer_grouped_parameters.extend(\n [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names and not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if n not in custom_parameter_names and any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n )\n\n warmup_steps = math.ceil(t_total * args.warmup_ratio)\n args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps\n\n if args.optimizer == \"AdamW\":\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n elif args.optimizer == \"Adafactor\":\n optimizer = Adafactor(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n eps=args.adafactor_eps,\n clip_threshold=args.adafactor_clip_threshold,\n decay_rate=args.adafactor_decay_rate,\n beta1=args.adafactor_beta1,\n weight_decay=args.weight_decay,\n scale_parameter=args.adafactor_scale_parameter,\n relative_step=args.adafactor_relative_step,\n warmup_init=args.adafactor_warmup_init,\n )\n print(\"Using Adafactor for T5\")\n else:\n raise ValueError(\n \"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.\".format(\n args.optimizer\n )\n )\n\n if args.scheduler == \"constant_schedule\":\n scheduler = get_constant_schedule(optimizer)\n\n elif args.scheduler == \"constant_schedule_with_warmup\":\n scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps)\n\n elif args.scheduler == \"linear_schedule_with_warmup\":\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n elif args.scheduler == \"cosine_schedule_with_warmup\":\n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"cosine_with_hard_restarts_schedule_with_warmup\":\n scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"polynomial_decay_schedule_with_warmup\":\n scheduler = get_polynomial_decay_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n lr_end=args.polynomial_decay_schedule_lr_end,\n power=args.polynomial_decay_schedule_power,\n )\n\n else:\n raise ValueError(\"{} is not a valid scheduler.\".format(args.scheduler))\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n global_step = 0\n training_progress_scores = None\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.silent, mininterval=0)\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n steps_trained_in_current_epoch = 0\n epochs_trained = 0\n\n if args.model_name and os.path.exists(args.model_name):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name.split(\"/\")[-1].split(\"-\")\n if len(checkpoint_suffix) > 2:\n checkpoint_suffix = checkpoint_suffix[1]\n else:\n checkpoint_suffix = checkpoint_suffix[-1]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // args.gradient_accumulation_steps\n )\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the current epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n if args.evaluate_during_training:\n training_progress_scores = self._create_training_progress_scores(**kwargs)\n\n if args.wandb_project:\n wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)\n wandb.watch(self.model)\n\n if args.fp16:\n from torch.cuda import amp\n\n scaler = amp.GradScaler()\n\n for _ in train_iterator:\n model.train()\n if epochs_trained > 0:\n epochs_trained -= 1\n continue\n train_iterator.set_description(f\"Epoch {epoch_number + 1} of {args.num_train_epochs}\")\n batch_iterator = tqdm(\n train_dataloader,\n desc=f\"Running Epoch {epoch_number} of {args.num_train_epochs}\",\n disable=args.silent,\n mininterval=0,\n )\n for step, batch in enumerate(batch_iterator):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n batch = tuple(t.to(device) for t in batch)\n\n inputs = self._get_inputs_dict(batch)\n if args.fp16:\n with amp.autocast():\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n else:\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n loss = outputs[0]\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n current_loss = loss.item()\n\n if show_running_loss:\n batch_iterator.set_description(\n f\"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}\"\n )\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n scaler.unscale_(optimizer)\n if args.optimizer == \"AdamW\":\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n if args.fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n tb_writer.add_scalar(\"lr\", scheduler.get_last_lr()[0], global_step)\n tb_writer.add_scalar(\n \"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step,\n )\n logging_loss = tr_loss\n if args.wandb_project or self.is_sweeping:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_last_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n self.save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args.evaluate_during_training and (\n args.evaluate_during_training_steps > 0\n and global_step % args.evaluate_during_training_steps == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results, _ = self.eval_model(eval_data, verbose=False, **kwargs)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n if args.save_eval_checkpoints:\n self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False,\n )\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir, optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(\n args.best_model_dir, optimizer, scheduler, model=model, results=results\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n model.train()\n\n epoch_number += 1\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number))\n\n if args.save_model_every_epoch or args.evaluate_during_training:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args.save_model_every_epoch:\n self.save_model(output_dir_current, optimizer, scheduler, model=model)\n\n if args.evaluate_during_training and args.evaluate_each_epoch:\n results, _ = self.eval_model(eval_data, verbose=False, **kwargs)\n\n self.save_model(output_dir_current, optimizer, scheduler, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False)\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n\n return (\n global_step,\n tr_loss / global_step if not self.args.evaluate_during_training else training_progress_scores,\n )\n\n def eval_model(self, eval_data, output_dir=None, verbose=False, verbose_logging=False, **kwargs):\n \"\"\"\n Evaluates the model on eval_data. Saves results to output_dir.\n\n Args:\n eval_data: Path to JSON file containing evaluation data OR list of Python dicts in the correct format. The model will be evaluated on this data.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n verbose_logging: Log info related to feature conversion and writing predictions.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results. (correct, similar, incorrect)\n text: A dictionary containing the 3 dictionaries correct_text, similar_text (the predicted answer is a substring of the correct answer or vise versa), incorrect_text.\n \"\"\" # noqa: ignore flake8\"\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n self._move_model_to_device()\n\n all_predictions, all_nbest_json, scores_diff_json, eval_loss = self.evaluate(\n eval_data, output_dir, verbose_logging=verbose\n )\n\n if isinstance(eval_data, str):\n with open(eval_data, \"r\", encoding=self.args.encoding) as f:\n truth = json.load(f)\n else:\n truth = eval_data\n\n result, texts = self.calculate_results(truth, all_predictions, **kwargs)\n result[\"eval_loss\"] = eval_loss\n\n self.results.update(result)\n\n if verbose:\n logger.info(self.results)\n\n return result, texts\n\n def evaluate(self, eval_data, output_dir, verbose_logging=False):\n \"\"\"\n Evaluates the model on eval_data.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n tokenizer = self.tokenizer\n device = self.device\n model = self.model\n args = self.args\n\n if isinstance(eval_data, str):\n with open(eval_data, \"r\", encoding=self.args.encoding) as f:\n eval_examples = json.load(f)\n else:\n eval_examples = eval_data\n\n eval_dataset, examples, features = self.load_and_cache_examples(\n eval_examples, evaluate=True, output_examples=True\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n if self.args.fp16:\n from torch.cuda import amp\n\n all_results = []\n for batch in tqdm(eval_dataloader, disable=args.silent, desc=\"Running Evaluation\"):\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n if self.args.model_type in [\n \"xlm\",\n \"roberta\",\n \"distilbert\",\n \"camembert\",\n \"electra\",\n \"xlmroberta\",\n \"bart\",\n ]:\n del inputs[\"token_type_ids\"]\n\n example_indices = batch[3]\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n\n if self.args.fp16:\n with amp.autocast():\n outputs = model(**inputs)\n eval_loss += outputs[0].mean().item()\n else:\n outputs = model(**inputs)\n eval_loss += outputs[0].mean().item()\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n result = RawResultExtended(\n unique_id=unique_id,\n start_top_log_probs=to_list(outputs[0][i]),\n start_top_index=to_list(outputs[1][i]),\n end_top_log_probs=to_list(outputs[2][i]),\n end_top_index=to_list(outputs[3][i]),\n cls_logits=to_list(outputs[4][i]),\n )\n else:\n result = RawResult(\n unique_id=unique_id,\n start_logits=to_list(outputs[0][i]),\n end_logits=to_list(outputs[1][i]),\n )\n all_results.append(result)\n\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n\n prefix = \"test\"\n os.makedirs(output_dir, exist_ok=True)\n\n output_prediction_file = os.path.join(output_dir, \"predictions_{}.json\".format(prefix))\n output_nbest_file = os.path.join(output_dir, \"nbest_predictions_{}.json\".format(prefix))\n output_null_log_odds_file = os.path.join(output_dir, \"null_odds_{}.json\".format(prefix))\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n (all_predictions, all_nbest_json, scores_diff_json,) = write_predictions_extended(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n eval_data,\n model.config.start_n_top,\n model.config.end_n_top,\n True,\n tokenizer,\n verbose_logging,\n )\n else:\n all_predictions, all_nbest_json, scores_diff_json = write_predictions(\n examples,\n features,\n all_results,\n args.n_best_size,\n args.max_answer_length,\n False,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n verbose_logging,\n True,\n args.null_score_diff_threshold,\n )\n\n return all_predictions, all_nbest_json, scores_diff_json, eval_loss\n\n def predict(self, to_predict, n_best_size=None):\n \"\"\"\n Performs predictions on a list of python dicts containing contexts and qas.\n\n Args:\n to_predict: A python list of python dicts containing contexts and questions to be sent to the model for prediction.\n E.g: predict([\n {\n 'context': \"Some context as a demo\",\n 'qas': [\n {'id': '0', 'question': 'What is the context here?'},\n {'id': '1', 'question': 'What is this for?'}\n ]\n }\n ])\n n_best_size (Optional): Number of predictions to return. args.n_best_size will be used if not specified.\n\n Returns:\n list: A python list of dicts containing the predicted answer/answers, and id for each question in to_predict.\n list: A python list of dicts containing the predicted probability/probabilities, and id for each question in to_predict.\n \"\"\" # noqa: ignore flake8\"\n tokenizer = self.tokenizer\n device = self.device\n model = self.model\n args = self.args\n\n if not n_best_size:\n n_best_size = args.n_best_size\n\n self._move_model_to_device()\n\n eval_examples = build_examples(to_predict)\n eval_dataset, examples, features = self.load_and_cache_examples(\n eval_examples, evaluate=True, output_examples=True, no_cache=True\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval()\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n if self.args.fp16:\n from torch.cuda import amp\n\n all_results = []\n for batch in tqdm(eval_dataloader, disable=args.silent, desc=\"Running Prediction\"):\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n if self.args.model_type in [\n \"xlm\",\n \"roberta\",\n \"distilbert\",\n \"camembert\",\n \"electra\",\n \"xlmroberta\",\n \"bart\",\n ]:\n del inputs[\"token_type_ids\"]\n\n example_indices = batch[3]\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[4], \"p_mask\": batch[5]})\n\n if self.args.fp16:\n with amp.autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n if args.model_type in [\"xlnet\", \"xlm\"]:\n # XLNet uses a more complex post-processing procedure\n result = RawResultExtended(\n unique_id=unique_id,\n start_top_log_probs=to_list(outputs[0][i]),\n start_top_index=to_list(outputs[1][i]),\n end_top_log_probs=to_list(outputs[2][i]),\n end_top_index=to_list(outputs[3][i]),\n cls_logits=to_list(outputs[4][i]),\n )\n else:\n result = RawResult(\n unique_id=unique_id,\n start_logits=to_list(outputs[0][i]),\n end_logits=to_list(outputs[1][i]),\n )\n all_results.append(result)\n\n if args.model_type in [\"xlnet\", \"xlm\"]:\n answers = get_best_predictions_extended(\n examples,\n features,\n all_results,\n n_best_size,\n args.max_answer_length,\n model.config.start_n_top,\n model.config.end_n_top,\n True,\n tokenizer,\n args.null_score_diff_threshold,\n )\n else:\n answers = get_best_predictions(\n examples, features, all_results, n_best_size, args.max_answer_length, False, False, True, False,\n )\n\n answer_list = [{\"id\": answer[\"id\"], \"answer\": answer[\"answer\"][:-1]} for answer in answers]\n probability_list = [{\"id\": answer[\"id\"], \"probability\": answer[\"probability\"][:-1]} for answer in answers]\n\n return answer_list, probability_list\n\n def calculate_results(self, truth, predictions, **kwargs):\n truth_dict = {}\n questions_dict = {}\n for item in truth:\n for answer in item[\"qas\"]:\n if answer[\"answers\"]:\n truth_dict[answer[\"id\"]] = answer[\"answers\"][0][\"text\"]\n else:\n truth_dict[answer[\"id\"]] = \"\"\n questions_dict[answer[\"id\"]] = answer[\"question\"]\n\n correct = 0\n incorrect = 0\n similar = 0\n correct_text = {}\n incorrect_text = {}\n similar_text = {}\n predicted_answers = []\n true_answers = []\n\n for q_id, answer in truth_dict.items():\n predicted_answers.append(predictions[q_id])\n true_answers.append(answer)\n if predictions[q_id].strip() == answer.strip():\n correct += 1\n correct_text[q_id] = answer\n elif predictions[q_id].strip() in answer.strip() or answer.strip() in predictions[q_id].strip():\n similar += 1\n similar_text[q_id] = {\n \"truth\": answer,\n \"predicted\": predictions[q_id],\n \"question\": questions_dict[q_id],\n }\n else:\n incorrect += 1\n incorrect_text[q_id] = {\n \"truth\": answer,\n \"predicted\": predictions[q_id],\n \"question\": questions_dict[q_id],\n }\n\n extra_metrics = {}\n for metric, func in kwargs.items():\n extra_metrics[metric] = func(true_answers, predicted_answers)\n\n result = {\"correct\": correct, \"similar\": similar, \"incorrect\": incorrect, **extra_metrics}\n\n texts = {\n \"correct_text\": correct_text,\n \"similar_text\": similar_text,\n \"incorrect_text\": incorrect_text,\n }\n\n return result, texts\n\n def _move_model_to_device(self):\n self.model.to(self.device)\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def _get_inputs_dict(self, batch):\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n \"start_positions\": batch[3],\n \"end_positions\": batch[4],\n }\n\n if self.args.model_type in [\"xlm\", \"roberta\", \"distilbert\", \"camembert\", \"electra\", \"xlmroberta\", \"bart\"]:\n del inputs[\"token_type_ids\"]\n\n if self.args.model_type in [\"xlnet\", \"xlm\"]:\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\n\n return inputs\n\n def _create_training_progress_scores(self, **kwargs):\n extra_metrics = {key: [] for key in kwargs}\n training_progress_scores = {\n \"global_step\": [],\n \"correct\": [],\n \"similar\": [],\n \"incorrect\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n\n return training_progress_scores\n\n def save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):\n if not output_dir:\n output_dir = self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n\n if model and not self.args.no_save:\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n self.tokenizer.save_pretrained(output_dir)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n if optimizer and scheduler and self.args.save_optimizer_and_scheduler:\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n self.save_model_args(output_dir)\n\n if results:\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n self.args.save(output_dir)\n\n def _load_model_args(self, input_dir):\n args = QuestionAnsweringArgs()\n args.load(input_dir)\n return args\n\n def get_named_parameters(self):\n return [n for n, p in self.model.named_parameters()]\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "pandas.DataFrame", "torch.tensor", "torch.cuda.amp.autocast", "torch.cuda.amp.GradScaler", "torch.quantization.quantize_dynamic", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.device", "torch.nn.DataParallel", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Global19/onnxconverter-common
[ "09827b56c0d4f359952aac8e0160ff6290ac37f2" ]
[ "onnxconverter_common/float16.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###########################################################################\n\nimport itertools\nimport numpy as np\nimport onnx\nfrom onnx import helper, numpy_helper\nfrom onnx import onnx_pb as onnx_proto\n\n\ndef _npfloat16_to_int(np_list):\n '''\n Convert numpy float16 to python int.\n\n :param np_list: numpy float16 list\n :return int_list: python int list\n '''\n return [int(bin(_.view('H'))[2:].zfill(16), 2) for _ in np_list]\n\n\ndef convert_np_to_float16(np_array, min_positive_val=1e-7, max_finite_val=1e4):\n '''\n Convert float32 numpy array to float16 without changing sign or finiteness.\n Positive values less than min_positive_val are mapped to min_positive_val.\n Positive finite values greater than max_finite_val are mapped to max_finite_val.\n Similar for negative values. NaN, 0, inf, and -inf are unchanged.\n '''\n def between(a, b, c):\n return np.logical_and(a < b, b < c)\n np_array = np.where(between(0, np_array, min_positive_val), min_positive_val, np_array)\n np_array = np.where(between(-min_positive_val, np_array, 0), -min_positive_val, np_array)\n np_array = np.where(between(max_finite_val, np_array, float('inf')), max_finite_val, np_array)\n np_array = np.where(between(float('-inf'), np_array, -max_finite_val), -max_finite_val, np_array)\n return np.float16(np_array)\n\n\ndef convert_tensor_float_to_float16(tensor, min_positive_val=1e-7, max_finite_val=1e4):\n '''\n Convert tensor float to float16.\n\n :param tensor: TensorProto object\n :return tensor_float16: converted TensorProto object\n\n Example:\n\n ::\n\n from onnxmltools.utils.float16_converter import convert_tensor_float_to_float16\n new_tensor = convert_tensor_float_to_float16(tensor)\n\n '''\n if not isinstance(tensor, onnx_proto.TensorProto):\n raise ValueError('Expected input type is an ONNX TensorProto but got %s' % type(tensor))\n\n if tensor.data_type == onnx_proto.TensorProto.FLOAT:\n tensor.data_type = onnx_proto.TensorProto.FLOAT16\n # convert float_data (float type) to float16 and write to int32_data\n if tensor.float_data:\n float16_data = convert_np_to_float16(np.array(tensor.float_data), min_positive_val, max_finite_val)\n int_list = _npfloat16_to_int(float16_data)\n tensor.int32_data[:] = int_list\n tensor.float_data[:] = []\n # convert raw_data (bytes type)\n if tensor.raw_data:\n # convert n.raw_data to float\n float32_list = np.fromstring(tensor.raw_data, dtype='float32')\n # convert float to float16\n float16_list = convert_np_to_float16(float32_list, min_positive_val, max_finite_val)\n # convert float16 to bytes and write back to raw_data\n tensor.raw_data = float16_list.tostring()\n return tensor\n\n\ndef make_value_info_from_tensor(tensor):\n shape = numpy_helper.to_array(tensor).shape\n return helper.make_tensor_value_info(tensor.name, tensor.data_type, shape)\n\n\ndef convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4,\n keep_io_types=False, disable_shape_infer=False):\n '''\n Convert tensor float type in the ONNX ModelProto input to tensor float16.\n\n :param model: ONNX ModelProto object\n :param disable_shape_infer: Type/shape information is needed for conversion to work.\n Set to True only if the model already has type/shape information for all tensors.\n :return: converted ONNX ModelProto object\n\n Examples:\n\n ::\n\n Example 1: Convert ONNX ModelProto object:\n from onnxmltools.utils.float16_converter import convert_float_to_float16\n new_onnx_model = convert_float_to_float16(onnx_model)\n\n Example 2: Convert ONNX model binary file:\n from onnxmltools.utils.float16_converter import convert_float_to_float16\n from onnxmltools.utils import load_model, save_model\n onnx_model = load_model('model.onnx')\n new_onnx_model = convert_float_to_float16(onnx_model)\n save_model(new_onnx_model, 'new_model.onnx')\n\n '''\n func_infer_shape = None\n if not disable_shape_infer and onnx.__version__ >= '1.2':\n try:\n from onnx.shape_inference import infer_shapes\n func_infer_shape = infer_shapes\n finally:\n pass\n\n if not isinstance(model, onnx_proto.ModelProto):\n raise ValueError('Expected model type is an ONNX ModelProto but got %s' % type(model))\n\n # create black list\n op_black_list = ['ArrayFeatureExtractor', 'Binarizer', 'CastMap', 'CategoryMapper', 'DictVectorizer',\n 'FeatureVectorizer', 'Imputer', 'LabelEncoder', 'LinearClassifier', 'LinearRegressor',\n 'Normalizer', 'OneHotEncoder', 'SVMClassifier', 'SVMRegressor', 'Scaler', 'TreeEnsembleClassifier',\n 'TreeEnsembleRegressor', 'ZipMap', 'NonMaxSuppression', 'TopK', 'RoiAlign', 'Resize',\n 'Range', 'CumSum', 'Min', 'Max']\n # create a queue for BFS\n queue = []\n value_info_list = []\n node_list = []\n # type inference on input model\n if func_infer_shape is not None:\n model = func_infer_shape(model)\n queue.append(model)\n name_mapping = {}\n graph_io_to_skip = set()\n io_casts = set()\n if keep_io_types:\n for i, n in enumerate(model.graph.input):\n if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:\n output_name = 'graph_input_cast_' + str(i)\n name_mapping[n.name] = output_name\n graph_io_to_skip.add(n.name)\n\n node_name = 'graph_input_cast' + str(i)\n new_value_info = model.graph.value_info.add()\n new_value_info.CopyFrom(n)\n new_value_info.name = output_name\n new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16\n # add Cast node (from tensor(float) to tensor(float16) after graph input\n new_node = [helper.make_node('Cast', [n.name], [output_name], to=10, name=node_name)]\n model.graph.node.extend(new_node)\n value_info_list.append(new_value_info)\n io_casts.add(node_name)\n\n for i, n in enumerate(model.graph.output):\n if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:\n input_name = 'graph_output_cast_' + str(i)\n name_mapping[n.name] = input_name\n graph_io_to_skip.add(n.name)\n\n node_name = 'graph_output_cast' + str(i)\n # add Cast node (from tensor(float16) to tensor(float) before graph output\n new_value_info = model.graph.value_info.add()\n new_value_info.CopyFrom(n)\n new_value_info.name = input_name\n new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16\n new_node = [helper.make_node('Cast', [input_name], [n.name], to=1, name=node_name)]\n model.graph.node.extend(new_node)\n value_info_list.append(new_value_info)\n io_casts.add(node_name)\n\n while queue:\n next_level = []\n for q in queue:\n # if q is model, push q.graph (GraphProto)\n if isinstance(q, onnx_proto.ModelProto):\n next_level.append(q.graph)\n # if q is model.graph, push q.node.attribute (AttributeProto)\n if isinstance(q, onnx_proto.GraphProto):\n for n in q.node:\n # if n is in the black list (doesn't support float16), no conversion for the node,\n # and save the node for further processing\n if n.name in io_casts:\n continue\n for i in range(len(n.input)):\n if n.input[i] in name_mapping:\n n.input[i] = name_mapping[n.input[i]]\n for i in range(len(n.output)):\n if n.output[i] in name_mapping:\n n.output[i] = name_mapping[n.output[i]]\n if n.op_type in op_black_list:\n node_list.append(n)\n else:\n if n.op_type == 'Cast':\n for attr in n.attribute:\n if attr.name == 'to' and attr.i == 1:\n attr.i = 10\n break\n for attr in n.attribute:\n next_level.append(attr)\n # if q is model.graph.node.attribute, push q.g and q.graphs (GraphProto)\n # and process node.attribute.t and node.attribute.tensors (TensorProto)\n if isinstance(q, onnx_proto.AttributeProto):\n next_level.append(q.g)\n for n in q.graphs:\n next_level.append(n)\n q.t.CopyFrom(convert_tensor_float_to_float16(q.t, min_positive_val, max_finite_val))\n for n in q.tensors:\n n = convert_tensor_float_to_float16(n, min_positive_val, max_finite_val)\n # if q is graph, process graph.initializer(TensorProto), input, output and value_info (ValueInfoProto)\n if isinstance(q, onnx_proto.GraphProto):\n for n in q.initializer: # TensorProto type\n if n.data_type == onnx_proto.TensorProto.FLOAT:\n n = convert_tensor_float_to_float16(n, min_positive_val, max_finite_val)\n value_info_list.append(make_value_info_from_tensor(n))\n # for all ValueInfoProto with tensor(float) type in input, output and value_info, convert them to\n # tensor(float16) except map and seq(map). And save them in value_info_list for further processing\n for n in itertools.chain(q.input, q.output, q.value_info):\n if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:\n if n.name not in graph_io_to_skip:\n n.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16\n value_info_list.append(n)\n queue = next_level\n\n # process the nodes in black list that doesn't support tensor(float16)\n for node in node_list:\n # if input's name is in the value_info_list meaning input is tensor(float16) type,\n # insert a float16 to float Cast node before the node,\n # change current node's input name and create new value_info for the new name\n for i in range(len(node.input)):\n input = node.input[i]\n for value_info in value_info_list:\n if input == value_info.name:\n # create new value_info for current node's new input name\n new_value_info = model.graph.value_info.add()\n new_value_info.CopyFrom(value_info)\n output_name = node.name + '_input_cast_' + str(i)\n new_value_info.name = output_name\n new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT\n # add Cast node (from tensor(float16) to tensor(float) before current node\n node_name = node.name + '_input_cast' + str(i)\n new_node = [helper.make_node('Cast', [input], [output_name], to=1, name=node_name)]\n model.graph.node.extend(new_node)\n # change current node's input name\n node.input[i] = output_name\n break\n # if output's name is in the value_info_list meaning output is tensor(float16) type, insert a float to\n # float16 Cast node after the node, change current node's output name and create new value_info for the new name\n for i in range(len(node.output)):\n output = node.output[i]\n for value_info in value_info_list:\n if output == value_info.name:\n # create new value_info for current node's new output\n new_value_info = model.graph.value_info.add()\n new_value_info.CopyFrom(value_info)\n input_name = node.name + '_output_cast_' + str(i)\n new_value_info.name = input_name\n new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT\n # add Cast node (from tensor(float) to tensor(float16) after current node\n node_name = node.name + '_output_cast' + str(i)\n new_node = [helper.make_node('Cast', [input_name], [output], to=10, name=node_name)]\n model.graph.node.extend(new_node)\n # change current node's input name\n node.output[i] = input_name\n break\n return model\n\n\ndef convert_float_to_float16_model_path(model_path, min_positive_val=1e-7, max_finite_val=1e4, keep_io_types=False):\n '''\n Convert tensor float type in the ONNX Model to tensor float16.\n *It is to fix an issue that infer_shapes func cannot be used to infer >2GB models.\n *But this function can be applied to all model sizes.\n :param model_path: ONNX Model path\n :return: converted ONNX ModelProto object\n Examples\n ::\n #Convert to ONNX ModelProto object and save model binary file:\n from onnxmltools.utils.float16_converter import convert_float_to_float16_model_path\n new_onnx_model = convert_float_to_float16_model_path('model.onnx')\n onnx.save(new_onnx_model, 'new_model.onnx')\n '''\n\n disable_shape_infer = False\n if onnx.__version__ >= '1.8':\n try:\n # infer_shapes_path can be applied to all model sizes\n from onnx.shape_inference import infer_shapes_path\n import tempfile\n import os\n # shape_infer_model_path should be in the same folder of model_path\n with tempfile.NamedTemporaryFile(dir=os.path.dirname(model_path)) as tmpfile:\n shape_infer_model_path = tmpfile.name\n infer_shapes_path(model_path, shape_infer_model_path)\n model = onnx.load(shape_infer_model_path)\n disable_shape_infer = True\n finally:\n pass\n if not disable_shape_infer:\n model = onnx.load(model_path)\n return convert_float_to_float16(model, min_positive_val, max_finite_val, keep_io_types, disable_shape_infer)\n" ]
[ [ "numpy.array", "numpy.float16", "numpy.logical_and", "numpy.fromstring" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KendallPark/struct2tensor
[ "1b9ada0c4018323a0549cc13437c561d39a46a05", "7e42ed9938dfb88aa451f07b943b6308cd758411" ]
[ "struct2tensor/expression_impl/proto_test_util.py", "struct2tensor/expression_impl/reroot.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for tests on proto expressions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom struct2tensor.test import test_pb2\nfrom struct2tensor.expression_impl import proto\nfrom google.protobuf import text_format\n\n\ndef text_to_tensor(text_list, example_proto_clz):\n as_protos = [text_format.Parse(x, example_proto_clz()) for x in text_list]\n serialized = [x.SerializeToString() for x in as_protos]\n return tf.constant(serialized)\n\n\ndef text_to_expression(text_list, example_proto_clz):\n \"\"\"Create an expression from a list of text format protos.\"\"\"\n return proto.create_expression_from_proto(\n text_to_tensor(text_list, example_proto_clz),\n example_proto_clz().DESCRIPTOR)\n\n\ndef _get_expression_from_session_empty_user_info():\n r\"\"\"Run create_root_prensor on a very deep tree.\n\n In addition, the user_info is empty.\n\n ------*-----------------\n / \\\n ---session0---- session1\n / \\ \\ / \\\n event0 event1 event2 event3 event4\n / \\ | \\ | \\ / / | \\\n act0 act1 act2 act3 act4 act5 act6 act7 act8 act9\n | | | | | | | | |\n a b c e f g h i j\n\n Returns:\n A RootPrensor with the above structure.\n \"\"\"\n return text_to_expression([\n \"\"\"\n event:{\n action:{\n doc_id:\"a\"\n }\n action:{\n doc_id:\"b\"\n }\n event_id: \"A\"\n }\n event:{\n action:{\n doc_id:\"c\"\n }\n action:{\n }\n event_id: \"B\"\n }\n event:{\n action:{\n doc_id:\"e\"\n }\n action:{\n doc_id:\"f\"\n }\n event_id: \"C\"\n }\"\"\", \"\"\"\n event:{\n action:{\n doc_id:\"g\"\n }\n }\n event:{\n event_id: \"D\"\n action:{\n doc_id:\"h\"\n }\n action:{\n doc_id:\"i\"\n }\n action:{\n doc_id:\"j\"\n }\n }\"\"\"\n ], test_pb2.Session)\n", "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Reroot to a subtree, maintaining an input proto index.\n\nreroot is similar to get_descendant_or_error. However, this method allows\nyou to call create_proto_index(...) later on, that gives you a reference to the\noriginal proto.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nfrom struct2tensor import calculate_options\nfrom struct2tensor import expression\nfrom struct2tensor import expression_add\nfrom struct2tensor import path\nfrom struct2tensor import prensor\nimport tensorflow as tf\nfrom typing import FrozenSet, Optional, Sequence\n\n\ndef reroot(root,\n source_path):\n \"\"\"Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the original root.\n source_path: the path to the new root.\n\n Returns:\n the new root.\n \"\"\"\n\n new_root = root\n for step in source_path.field_list:\n new_root = _RerootExpression(new_root, step)\n return new_root\n\n\ndef create_proto_index_field(root,\n new_field_name\n ):\n return expression_add.add_paths(\n root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})\n\n\nclass _RerootRootNodeTensor(prensor.RootNodeTensor):\n \"\"\"The reroot root node.\n\n This contains a map from a current index to the original index of a proto.\n \"\"\"\n\n def __init__(self, size, input_proto_index):\n super(_RerootRootNodeTensor, self).__init__(size)\n self._input_proto_index = input_proto_index\n\n @property\n def input_proto_index(self):\n return self._input_proto_index\n\n\ndef _get_proto_index_parent_index(node):\n return tf.range(node.size)\n\n\ndef _get_input_proto_index(node):\n if isinstance(node, _RerootRootNodeTensor):\n return node.input_proto_index\n return _get_proto_index_parent_index(node)\n\n\nclass _RerootExpression(expression.Expression):\n \"\"\"Reroot to a new path, maintaining a input proto index.\"\"\"\n\n def __init__(self, original_root,\n field_name):\n super(_RerootExpression, self).__init__(True, None)\n self._field_name = field_name\n self._original_root = original_root\n self._new_root = original_root.get_child_or_error(field_name)\n if self._new_root.type is not None:\n raise ValueError(\"New root must be a message type: {}\".format(\n str(self._field_name)))\n # TODO(martinz): Check that the \"original root source expression\" has a type\n # in (_RerootExpression, prensor._ProtoRootExpression)\n # To do this, we need a general technique similar to\n # expression_add._is_true_source_expression: however, this should also cover\n # intermediate operations like \"project\".\n # Since this check is not present, if it should have fired, there will be\n # an error when calculate(...) is called.\n\n def get_source_expressions(self):\n return [self._original_root, self._new_root]\n\n def calculate(\n self,\n sources,\n destinations,\n options,\n side_info = None):\n [old_root_value, new_root_value] = sources\n if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(\n new_root_value, prensor.ChildNodeTensor):\n old_input_proto_index = _get_input_proto_index(old_root_value)\n # Notice that the \"gather\" operation is similar to promote.\n return _RerootRootNodeTensor(\n tf.size(new_root_value.parent_index, out_type=tf.int64),\n tf.gather(old_input_proto_index, new_root_value.parent_index))\n raise ValueError(\"Source types incorrect\")\n\n def calculation_is_identity(self):\n return False\n\n def calculation_equal(self, expr):\n # Although path can vary, it is not used in the calculation, just to\n return isinstance(expr, _RerootExpression)\n\n def _get_child_impl(self,\n field_name):\n return self._new_root.get_child(field_name)\n\n def known_field_names(self):\n return self._new_root.known_field_names()\n\n\nclass _InputProtoIndexExpression(expression.Leaf):\n \"\"\"A proto index expression.\"\"\"\n\n def __init__(self, root):\n \"\"\"Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n \"\"\"\n super(_InputProtoIndexExpression, self).__init__(\n is_repeated=False, my_type=tf.int64)\n self._root = root\n\n def get_source_expressions(self):\n return [self._root]\n\n def calculate(\n self,\n sources,\n destinations,\n options,\n side_info = None):\n [root_node] = sources\n # The following check ensures not just that we can calculate the value,\n # but that no \"improper\" reroots were done.\n if isinstance(root_node, prensor.RootNodeTensor):\n return prensor.LeafNodeTensor(\n _get_proto_index_parent_index(root_node),\n _get_input_proto_index(root_node),\n is_repeated=False)\n raise ValueError(\n \"Illegal operation: expected a true root node: got {}\".format(\n str(root_node)))\n\n def calculation_is_identity(self):\n return False\n\n def calculation_equal(self, expr):\n # Although path can vary, it is not used in the calculation, just to\n return isinstance(expr, _InputProtoIndexExpression)\n" ]
[ [ "tensorflow.constant" ], [ "tensorflow.gather", "tensorflow.size", "tensorflow.range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
NakedKoala/CenterTrack
[ "6b5e2ccfa9e6975892bf0cd6fae1d51086a25b58" ]
[ "src/lib/dataset/generic_dataset.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport math\nimport json\nimport cv2\nimport os\nfrom collections import defaultdict\n\nimport pycocotools.coco as coco\nimport torch\nimport torch.utils.data as data\n\nfrom utils.image import flip, color_aug\nfrom utils.image import get_affine_transform, affine_transform\nfrom utils.image import gaussian_radius, draw_umich_gaussian\nimport copy\n\nclass GenericDataset(data.Dataset):\n is_fusion_dataset = False\n default_resolution = None\n num_categories = None\n class_name = None\n # cat_ids: map from 'category_id' in the annotation files to 1..num_categories\n # Not using 0 because 0 is used for don't care region and ignore loss.\n cat_ids = None\n max_objs = None\n rest_focal_length = 1200\n num_joints = 17\n flip_idx = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], \n [11, 12], [13, 14], [15, 16]]\n edges = [[0, 1], [0, 2], [1, 3], [2, 4], \n [4, 6], [3, 5], [5, 6], \n [5, 7], [7, 9], [6, 8], [8, 10], \n [6, 12], [5, 11], [11, 12], \n [12, 14], [14, 16], [11, 13], [13, 15]]\n\n raft_mean = np.array([0.91924369, 0.9259629, 0.91308366],\n dtype=np.float32).reshape(1, 1, 3)\n raft_std = np.array([0.16230706, 0.12400776, 0.15449273],\n dtype=np.float32).reshape(1, 1, 3)\n\n\n mean = np.array([0.40789654, 0.44719302, 0.47026115],\n dtype=np.float32).reshape(1, 1, 3)\n std = np.array([0.28863828, 0.27408164, 0.27809835],\n dtype=np.float32).reshape(1, 1, 3)\n _eig_val = np.array([0.2141788, 0.01817699, 0.00341571],\n dtype=np.float32)\n _eig_vec = np.array([\n [-0.58752847, -0.69563484, 0.41340352],\n [-0.5832747, 0.00994535, -0.81221408],\n [-0.56089297, 0.71832671, 0.41158938]\n ], dtype=np.float32)\n ignore_val = 1\n nuscenes_att_range = {0: [0, 1], 1: [0, 1], 2: [2, 3, 4], 3: [2, 3, 4], \n 4: [2, 3, 4], 5: [5, 6, 7], 6: [5, 6, 7], 7: [5, 6, 7]}\n def __init__(self, opt=None, split=None, ann_path=None, img_dir=None):\n super(GenericDataset, self).__init__()\n if opt is not None and split is not None:\n self.split = split\n self.opt = opt\n self._data_rng = np.random.RandomState(123)\n \n if ann_path is not None and img_dir is not None:\n print('==> initializing {} data from {}, \\n images from {} ...'.format(\n split, ann_path, img_dir))\n self.coco = coco.COCO(ann_path)\n self.images = self.coco.getImgIds()\n\n if opt.tracking:\n if not ('videos' in self.coco.dataset):\n self.fake_video_data()\n print('Creating video index!')\n self.video_to_images = defaultdict(list)\n for image in self.coco.dataset['images']:\n self.video_to_images[image['video_id']].append(image)\n \n self.img_dir = img_dir\n\n def __getitem__(self, index):\n opt = self.opt\n img, raft, anns, img_info, img_path = self._load_data(index)\n\n height, width = img.shape[0], img.shape[1]\n c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)\n s = max(img.shape[0], img.shape[1]) * 1.0 if not self.opt.not_max_crop \\\n else np.array([img.shape[1], img.shape[0]], np.float32)\n aug_s, rot, flipped = 1, 0, 0\n if self.split == 'train':\n c, aug_s, rot = self._get_aug_param(c, s, width, height)\n s = s * aug_s\n if np.random.random() < opt.flip:\n flipped = 1\n img = img[:, ::-1, :]\n raft = raft[:, ::-1, :]\n anns = self._flip_anns(anns, width)\n\n trans_input = get_affine_transform(\n c, s, rot, [opt.input_w, opt.input_h])\n trans_output = get_affine_transform(\n c, s, rot, [opt.output_w, opt.output_h])\n inp = self._get_input(img, trans_input)\n raft_proc = self._get_input_raft(raft, trans_input)\n ret = {'image': inp, 'raft': raft_proc}\n gt_det = {'bboxes': [], 'scores': [], 'clses': [], 'cts': []}\n\n pre_cts, track_ids = None, None\n if opt.tracking:\n pre_image, _, pre_anns, frame_dist = self._load_pre_data(\n img_info['video_id'], img_info['frame_id'], \n img_info['sensor_id'] if 'sensor_id' in img_info else 1)\n if flipped:\n pre_image = pre_image[:, ::-1, :].copy()\n pre_anns = self._flip_anns(pre_anns, width)\n if opt.same_aug_pre and frame_dist != 0:\n trans_input_pre = trans_input \n trans_output_pre = trans_output\n else:\n c_pre, aug_s_pre, _ = self._get_aug_param(\n c, s, width, height, disturb=True)\n s_pre = s * aug_s_pre\n trans_input_pre = get_affine_transform(\n c_pre, s_pre, rot, [opt.input_w, opt.input_h])\n trans_output_pre = get_affine_transform(\n c_pre, s_pre, rot, [opt.output_w, opt.output_h])\n pre_img = self._get_input(pre_image, trans_input_pre)\n pre_hm, pre_cts, track_ids = self._get_pre_dets(\n pre_anns, trans_input_pre, trans_output_pre)\n ret['pre_img'] = pre_img\n if opt.pre_hm:\n ret['pre_hm'] = pre_hm\n \n ### init samples\n self._init_ret(ret, gt_det)\n calib = self._get_calib(img_info, width, height)\n \n num_objs = min(len(anns), self.max_objs)\n for k in range(num_objs):\n ann = anns[k]\n cls_id = int(self.cat_ids[ann['category_id']])\n if cls_id > self.opt.num_classes or cls_id <= -999:\n continue\n bbox, bbox_amodal = self._get_bbox_output(\n ann['bbox'], trans_output, height, width)\n if cls_id <= 0 or ('iscrowd' in ann and ann['iscrowd'] > 0):\n self._mask_ignore_or_crowd(ret, cls_id, bbox)\n continue\n self._add_instance(\n ret, gt_det, k, cls_id, bbox, bbox_amodal, ann, trans_output, aug_s, \n calib, pre_cts, track_ids)\n\n if self.opt.debug > 0:\n gt_det = self._format_gt_det(gt_det)\n meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_info['id'],\n 'img_path': img_path, 'calib': calib,\n 'flipped': flipped}\n ret['meta'] = meta\n return ret\n\n\n def get_default_calib(self, width, height):\n calib = np.array([[self.rest_focal_length, 0, width / 2, 0], \n [0, self.rest_focal_length, height / 2, 0], \n [0, 0, 1, 0]])\n return calib\n\n def _load_image_anns(self, img_id, coco, img_dir):\n img_info = coco.loadImgs(ids=[img_id])[0]\n file_name = img_info['file_name']\n \n img_path = os.path.join(img_dir, file_name)\n\n ## TODO: to remove\n # img_path = '/content/CenterTrack/data/mot17/train/MOT17-02-SDP/img1/'+ img_path.split('/')[-1]\n # img_path = '/content/CenterTrack/data/mot17/train/MOT17-02-SDP/img1/' + str(4).zfill(6) + '.jpg'\n # img_index = 4\n\n img_index = int(img_path.split('/')[-1].split('.')[0])\n raft_index_str = str(img_index - 1).zfill(5)\n raft_index_str_last= str(img_index - 2).zfill(5)\n raft_path = \"/\".join(img_path.split('/')[:-2] + ['raft']) + \"/\" + raft_index_str + '.png'\n raft_path_last = \"/\".join(img_path.split('/')[:-2] + ['raft']) + \"/\" + raft_index_str_last + '.png'\n \n ann_ids = coco.getAnnIds(imgIds=[img_id])\n anns = copy.deepcopy(coco.loadAnns(ids=ann_ids))\n\n img = cv2.imread(img_path)\n try:\n raft = cv2.imread(raft_path)\n except:\n raft = cv2.imread(raft_path_last)\n \n return img, raft, anns, img_info, img_path\n\n def _load_data(self, index):\n coco = self.coco\n img_dir = self.img_dir\n img_id = self.images[index]\n img, raft, anns, img_info, img_path = self._load_image_anns(img_id, coco, img_dir)\n\n return img, raft, anns, img_info, img_path\n\n\n def _load_pre_data(self, video_id, frame_id, sensor_id=1):\n img_infos = self.video_to_images[video_id]\n # If training, random sample nearby frames as the \"previoud\" frame\n # If testing, get the exact prevous frame\n if 'train' in self.split:\n img_ids = [(img_info['id'], img_info['frame_id']) \\\n for img_info in img_infos \\\n if abs(img_info['frame_id'] - frame_id) < self.opt.max_frame_dist and \\\n (not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]\n else:\n img_ids = [(img_info['id'], img_info['frame_id']) \\\n for img_info in img_infos \\\n if (img_info['frame_id'] - frame_id) == -1 and \\\n (not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]\n if len(img_ids) == 0:\n img_ids = [(img_info['id'], img_info['frame_id']) \\\n for img_info in img_infos \\\n if (img_info['frame_id'] - frame_id) == 0 and \\\n (not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]\n rand_id = np.random.choice(len(img_ids))\n img_id, pre_frame_id = img_ids[rand_id]\n frame_dist = abs(frame_id - pre_frame_id)\n img, raft, anns, _, _ = self._load_image_anns(img_id, self.coco, self.img_dir)\n return img, raft, anns, frame_dist\n\n\n def _get_pre_dets(self, anns, trans_input, trans_output):\n hm_h, hm_w = self.opt.input_h, self.opt.input_w\n down_ratio = self.opt.down_ratio\n trans = trans_input\n reutrn_hm = self.opt.pre_hm\n pre_hm = np.zeros((1, hm_h, hm_w), dtype=np.float32) if reutrn_hm else None\n pre_cts, track_ids = [], []\n for ann in anns:\n cls_id = int(self.cat_ids[ann['category_id']])\n if cls_id > self.opt.num_classes or cls_id <= -99 or \\\n ('iscrowd' in ann and ann['iscrowd'] > 0):\n continue\n bbox = self._coco_box_to_bbox(ann['bbox'])\n bbox[:2] = affine_transform(bbox[:2], trans)\n bbox[2:] = affine_transform(bbox[2:], trans)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, hm_w - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, hm_h - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n max_rad = 1\n if (h > 0 and w > 0):\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius)) \n max_rad = max(max_rad, radius)\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct0 = ct.copy()\n conf = 1\n\n ct[0] = ct[0] + np.random.randn() * self.opt.hm_disturb * w\n ct[1] = ct[1] + np.random.randn() * self.opt.hm_disturb * h\n conf = 1 if np.random.random() > self.opt.lost_disturb else 0\n \n ct_int = ct.astype(np.int32)\n if conf == 0:\n pre_cts.append(ct / down_ratio)\n else:\n pre_cts.append(ct0 / down_ratio)\n\n track_ids.append(ann['track_id'] if 'track_id' in ann else -1)\n if reutrn_hm:\n draw_umich_gaussian(pre_hm[0], ct_int, radius, k=conf)\n\n if np.random.random() < self.opt.fp_disturb and reutrn_hm:\n ct2 = ct0.copy()\n # Hard code heatmap disturb ratio, haven't tried other numbers.\n ct2[0] = ct2[0] + np.random.randn() * 0.05 * w\n ct2[1] = ct2[1] + np.random.randn() * 0.05 * h \n ct2_int = ct2.astype(np.int32)\n draw_umich_gaussian(pre_hm[0], ct2_int, radius, k=conf)\n\n return pre_hm, pre_cts, track_ids\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n\n def _get_aug_param(self, c, s, width, height, disturb=False):\n if (not self.opt.not_rand_crop) and not disturb:\n aug_s = np.random.choice(np.arange(0.6, 1.4, 0.1))\n w_border = self._get_border(128, width)\n h_border = self._get_border(128, height)\n c[0] = np.random.randint(low=w_border, high=width - w_border)\n c[1] = np.random.randint(low=h_border, high=height - h_border)\n else:\n sf = self.opt.scale\n cf = self.opt.shift\n if type(s) == float:\n s = [s, s]\n c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\n c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)\n aug_s = np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)\n \n if np.random.random() < self.opt.aug_rot:\n rf = self.opt.rotate\n rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)\n else:\n rot = 0\n \n return c, aug_s, rot\n\n\n def _flip_anns(self, anns, width):\n for k in range(len(anns)):\n bbox = anns[k]['bbox']\n anns[k]['bbox'] = [\n width - bbox[0] - 1 - bbox[2], bbox[1], bbox[2], bbox[3]]\n \n if 'hps' in self.opt.heads and 'keypoints' in anns[k]:\n keypoints = np.array(anns[k]['keypoints'], dtype=np.float32).reshape(\n self.num_joints, 3)\n keypoints[:, 0] = width - keypoints[:, 0] - 1\n for e in self.flip_idx:\n keypoints[e[0]], keypoints[e[1]] = \\\n keypoints[e[1]].copy(), keypoints[e[0]].copy()\n anns[k]['keypoints'] = keypoints.reshape(-1).tolist()\n\n if 'rot' in self.opt.heads and 'alpha' in anns[k]:\n anns[k]['alpha'] = np.pi - anns[k]['alpha'] if anns[k]['alpha'] > 0 \\\n else - np.pi - anns[k]['alpha']\n\n if 'amodel_offset' in self.opt.heads and 'amodel_center' in anns[k]:\n anns[k]['amodel_center'][0] = width - anns[k]['amodel_center'][0] - 1\n\n if self.opt.velocity and 'velocity' in anns[k]:\n anns[k]['velocity'] = [-10000, -10000, -10000]\n\n return anns\n\n\n def _get_input(self, img, trans_input):\n inp = cv2.warpAffine(img, trans_input, \n (self.opt.input_w, self.opt.input_h),\n flags=cv2.INTER_LINEAR)\n \n inp = (inp.astype(np.float32) / 255.)\n if self.split == 'train' and not self.opt.no_color_aug:\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\n inp = (inp - self.mean) / self.std\n inp = inp.transpose(2, 0, 1)\n return inp\n\n def _get_input_raft(self, img, trans_input):\n inp = cv2.warpAffine(img, trans_input, \n (self.opt.input_w, self.opt.input_h),\n flags=cv2.INTER_LINEAR)\n \n inp = (inp.astype(np.float32) / 255.)\n if self.split == 'train' and not self.opt.no_color_aug:\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\n inp = (inp - self.raft_mean) / self.raft_std\n inp = inp.transpose(2, 0, 1)\n return inp\n\n\n def _init_ret(self, ret, gt_det):\n max_objs = self.max_objs * self.opt.dense_reg\n ret['hm'] = np.zeros(\n (self.opt.num_classes, self.opt.output_h, self.opt.output_w), \n np.float32)\n ret['ind'] = np.zeros((max_objs), dtype=np.int64)\n ret['cat'] = np.zeros((max_objs), dtype=np.int64)\n ret['mask'] = np.zeros((max_objs), dtype=np.float32)\n\n regression_head_dims = {\n 'reg': 2, 'wh': 2, 'tracking': 2, 'ltrb': 4, 'ltrb_amodal': 4, \n 'nuscenes_att': 8, 'velocity': 3, 'hps': self.num_joints * 2, \n 'dep': 1, 'dim': 3, 'amodel_offset': 2}\n\n for head in regression_head_dims:\n if head in self.opt.heads:\n ret[head] = np.zeros(\n (max_objs, regression_head_dims[head]), dtype=np.float32)\n ret[head + '_mask'] = np.zeros(\n (max_objs, regression_head_dims[head]), dtype=np.float32)\n gt_det[head] = []\n\n if 'hm_hp' in self.opt.heads:\n num_joints = self.num_joints\n ret['hm_hp'] = np.zeros(\n (num_joints, self.opt.output_h, self.opt.output_w), dtype=np.float32)\n ret['hm_hp_mask'] = np.zeros(\n (max_objs * num_joints), dtype=np.float32)\n ret['hp_offset'] = np.zeros(\n (max_objs * num_joints, 2), dtype=np.float32)\n ret['hp_ind'] = np.zeros((max_objs * num_joints), dtype=np.int64)\n ret['hp_offset_mask'] = np.zeros(\n (max_objs * num_joints, 2), dtype=np.float32)\n ret['joint'] = np.zeros((max_objs * num_joints), dtype=np.int64)\n \n if 'rot' in self.opt.heads:\n ret['rotbin'] = np.zeros((max_objs, 2), dtype=np.int64)\n ret['rotres'] = np.zeros((max_objs, 2), dtype=np.float32)\n ret['rot_mask'] = np.zeros((max_objs), dtype=np.float32)\n gt_det.update({'rot': []})\n\n\n def _get_calib(self, img_info, width, height):\n if 'calib' in img_info:\n calib = np.array(img_info['calib'], dtype=np.float32)\n else:\n calib = np.array([[self.rest_focal_length, 0, width / 2, 0], \n [0, self.rest_focal_length, height / 2, 0], \n [0, 0, 1, 0]])\n return calib\n\n\n def _ignore_region(self, region, ignore_val=1):\n np.maximum(region, ignore_val, out=region)\n\n\n def _mask_ignore_or_crowd(self, ret, cls_id, bbox):\n # mask out crowd region, only rectangular mask is supported\n if cls_id == 0: # ignore all classes\n self._ignore_region(ret['hm'][:, int(bbox[1]): int(bbox[3]) + 1, \n int(bbox[0]): int(bbox[2]) + 1])\n else:\n # mask out one specific class\n self._ignore_region(ret['hm'][abs(cls_id) - 1, \n int(bbox[1]): int(bbox[3]) + 1, \n int(bbox[0]): int(bbox[2]) + 1])\n if ('hm_hp' in ret) and cls_id <= 1:\n self._ignore_region(ret['hm_hp'][:, int(bbox[1]): int(bbox[3]) + 1, \n int(bbox[0]): int(bbox[2]) + 1])\n\n\n def _coco_box_to_bbox(self, box):\n bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],\n dtype=np.float32)\n return bbox\n\n\n def _get_bbox_output(self, bbox, trans_output, height, width):\n bbox = self._coco_box_to_bbox(bbox).copy()\n\n rect = np.array([[bbox[0], bbox[1]], [bbox[0], bbox[3]],\n [bbox[2], bbox[3]], [bbox[2], bbox[1]]], dtype=np.float32)\n for t in range(4):\n rect[t] = affine_transform(rect[t], trans_output)\n bbox[:2] = rect[:, 0].min(), rect[:, 1].min()\n bbox[2:] = rect[:, 0].max(), rect[:, 1].max()\n\n bbox_amodal = copy.deepcopy(bbox)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1)\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n return bbox, bbox_amodal\n\n def _add_instance(\n self, ret, gt_det, k, cls_id, bbox, bbox_amodal, ann, trans_output,\n aug_s, calib, pre_cts=None, track_ids=None):\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if h <= 0 or w <= 0:\n return\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius)) \n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n ret['cat'][k] = cls_id - 1\n ret['mask'][k] = 1\n if 'wh' in ret:\n ret['wh'][k] = 1. * w, 1. * h\n ret['wh_mask'][k] = 1\n ret['ind'][k] = ct_int[1] * self.opt.output_w + ct_int[0]\n ret['reg'][k] = ct - ct_int\n ret['reg_mask'][k] = 1\n draw_umich_gaussian(ret['hm'][cls_id - 1], ct_int, radius)\n\n gt_det['bboxes'].append(\n np.array([ct[0] - w / 2, ct[1] - h / 2,\n ct[0] + w / 2, ct[1] + h / 2], dtype=np.float32))\n gt_det['scores'].append(1)\n gt_det['clses'].append(cls_id - 1)\n gt_det['cts'].append(ct)\n\n if 'tracking' in self.opt.heads:\n if ann['track_id'] in track_ids:\n pre_ct = pre_cts[track_ids.index(ann['track_id'])]\n ret['tracking_mask'][k] = 1\n ret['tracking'][k] = pre_ct - ct_int\n gt_det['tracking'].append(ret['tracking'][k])\n else:\n gt_det['tracking'].append(np.zeros(2, np.float32))\n\n if 'ltrb' in self.opt.heads:\n ret['ltrb'][k] = bbox[0] - ct_int[0], bbox[1] - ct_int[1], \\\n bbox[2] - ct_int[0], bbox[3] - ct_int[1]\n ret['ltrb_mask'][k] = 1\n\n if 'ltrb_amodal' in self.opt.heads:\n ret['ltrb_amodal'][k] = \\\n bbox_amodal[0] - ct_int[0], bbox_amodal[1] - ct_int[1], \\\n bbox_amodal[2] - ct_int[0], bbox_amodal[3] - ct_int[1]\n ret['ltrb_amodal_mask'][k] = 1\n gt_det['ltrb_amodal'].append(bbox_amodal)\n\n if 'nuscenes_att' in self.opt.heads:\n if ('attributes' in ann) and ann['attributes'] > 0:\n att = int(ann['attributes'] - 1)\n ret['nuscenes_att'][k][att] = 1\n ret['nuscenes_att_mask'][k][self.nuscenes_att_range[att]] = 1\n gt_det['nuscenes_att'].append(ret['nuscenes_att'][k])\n\n if 'velocity' in self.opt.heads:\n if ('velocity' in ann) and min(ann['velocity']) > -1000:\n ret['velocity'][k] = np.array(ann['velocity'], np.float32)[:3]\n ret['velocity_mask'][k] = 1\n gt_det['velocity'].append(ret['velocity'][k])\n\n if 'hps' in self.opt.heads:\n self._add_hps(ret, k, ann, gt_det, trans_output, ct_int, bbox, h, w)\n\n if 'rot' in self.opt.heads:\n self._add_rot(ret, ann, k, gt_det)\n\n if 'dep' in self.opt.heads:\n if 'depth' in ann:\n ret['dep_mask'][k] = 1\n ret['dep'][k] = ann['depth'] * aug_s\n gt_det['dep'].append(ret['dep'][k])\n else:\n gt_det['dep'].append(2)\n\n if 'dim' in self.opt.heads:\n if 'dim' in ann:\n ret['dim_mask'][k] = 1\n ret['dim'][k] = ann['dim']\n gt_det['dim'].append(ret['dim'][k])\n else:\n gt_det['dim'].append([1,1,1])\n \n if 'amodel_offset' in self.opt.heads:\n if 'amodel_center' in ann:\n amodel_center = affine_transform(ann['amodel_center'], trans_output)\n ret['amodel_offset_mask'][k] = 1\n ret['amodel_offset'][k] = amodel_center - ct_int\n gt_det['amodel_offset'].append(ret['amodel_offset'][k])\n else:\n gt_det['amodel_offset'].append([0, 0])\n \n\n def _add_hps(self, ret, k, ann, gt_det, trans_output, ct_int, bbox, h, w):\n num_joints = self.num_joints\n pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3) \\\n if 'keypoints' in ann else np.zeros((self.num_joints, 3), np.float32)\n if self.opt.simple_radius > 0:\n hp_radius = int(simple_radius(h, w, min_overlap=self.opt.simple_radius))\n else:\n hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n hp_radius = max(0, int(hp_radius))\n\n for j in range(num_joints):\n pts[j, :2] = affine_transform(pts[j, :2], trans_output)\n if pts[j, 2] > 0:\n if pts[j, 0] >= 0 and pts[j, 0] < self.opt.output_w and \\\n pts[j, 1] >= 0 and pts[j, 1] < self.opt.output_h:\n ret['hps'][k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int\n ret['hps_mask'][k, j * 2: j * 2 + 2] = 1\n pt_int = pts[j, :2].astype(np.int32)\n ret['hp_offset'][k * num_joints + j] = pts[j, :2] - pt_int\n ret['hp_ind'][k * num_joints + j] = \\\n pt_int[1] * self.opt.output_w + pt_int[0]\n ret['hp_offset_mask'][k * num_joints + j] = 1\n ret['hm_hp_mask'][k * num_joints + j] = 1\n ret['joint'][k * num_joints + j] = j\n draw_umich_gaussian(\n ret['hm_hp'][j], pt_int, hp_radius)\n if pts[j, 2] == 1:\n ret['hm_hp'][j, pt_int[1], pt_int[0]] = self.ignore_val\n ret['hp_offset_mask'][k * num_joints + j] = 0\n ret['hm_hp_mask'][k * num_joints + j] = 0\n else:\n pts[j, :2] *= 0\n else:\n pts[j, :2] *= 0\n self._ignore_region(\n ret['hm_hp'][j, int(bbox[1]): int(bbox[3]) + 1, \n int(bbox[0]): int(bbox[2]) + 1])\n gt_det['hps'].append(pts[:, :2].reshape(num_joints * 2))\n\n def _add_rot(self, ret, ann, k, gt_det):\n if 'alpha' in ann:\n ret['rot_mask'][k] = 1\n alpha = ann['alpha']\n if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:\n ret['rotbin'][k, 0] = 1\n ret['rotres'][k, 0] = alpha - (-0.5 * np.pi) \n if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:\n ret['rotbin'][k, 1] = 1\n ret['rotres'][k, 1] = alpha - (0.5 * np.pi)\n gt_det['rot'].append(self._alpha_to_8(ann['alpha']))\n else:\n gt_det['rot'].append(self._alpha_to_8(0))\n \n def _alpha_to_8(self, alpha):\n ret = [0, 0, 0, 1, 0, 0, 0, 1]\n if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:\n r = alpha - (-0.5 * np.pi)\n ret[1] = 1\n ret[2], ret[3] = np.sin(r), np.cos(r)\n if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:\n r = alpha - (0.5 * np.pi)\n ret[5] = 1\n ret[6], ret[7] = np.sin(r), np.cos(r)\n return ret\n \n def _format_gt_det(self, gt_det):\n if (len(gt_det['scores']) == 0):\n gt_det = {'bboxes': np.array([[0,0,1,1]], dtype=np.float32), \n 'scores': np.array([1], dtype=np.float32), \n 'clses': np.array([0], dtype=np.float32),\n 'cts': np.array([[0, 0]], dtype=np.float32),\n 'pre_cts': np.array([[0, 0]], dtype=np.float32),\n 'tracking': np.array([[0, 0]], dtype=np.float32),\n 'bboxes_amodal': np.array([[0, 0]], dtype=np.float32),\n 'hps': np.zeros((1, 17, 2), dtype=np.float32),}\n gt_det = {k: np.array(gt_det[k], dtype=np.float32) for k in gt_det}\n return gt_det\n\n def fake_video_data(self):\n self.coco.dataset['videos'] = []\n for i in range(len(self.coco.dataset['images'])):\n img_id = self.coco.dataset['images'][i]['id']\n self.coco.dataset['images'][i]['video_id'] = img_id\n self.coco.dataset['images'][i]['frame_id'] = 1\n self.coco.dataset['videos'].append({'id': img_id})\n \n if not ('annotations' in self.coco.dataset):\n return\n\n for i in range(len(self.coco.dataset['annotations'])):\n self.coco.dataset['annotations'][i]['track_id'] = i + 1\n" ]
[ [ "numpy.maximum", "numpy.random.random", "numpy.clip", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.random.randn", "numpy.array", "numpy.zeros", "numpy.random.RandomState", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abhinavchawla/no-save-rrt
[ "dc5e5c37aa6ca1de6459a7676a49d211e63cff0e" ]
[ "rrt-online.py" ]
[ "'''\nMIT License\nCopyright (c) 2019 Fanjin Zeng\nThis work is licensed under the terms of the MIT license, see <https://opensource.org/licenses/MIT>. \n'''\n\nimport numpy as np\nfrom random import random\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections as mc\nfrom collections import deque\n\nclass Line():\n ''' Define line '''\n def __init__(self, p0, p1):\n self.p = np.array(p0)\n self.dirn = np.array(p1) - np.array(p0)\n self.dist = np.linalg.norm(self.dirn)\n self.dirn /= self.dist # normalize\n\n def path(self, t):\n return self.p + t * self.dirn\n\n\ndef Intersection(line, center, radius):\n ''' Check line-sphere (circle) intersection '''\n a = np.dot(line.dirn, line.dirn)\n b = 2 * np.dot(line.dirn, line.p - center)\n c = np.dot(line.p - center, line.p - center) - radius * radius\n\n discriminant = b * b - 4 * a * c\n if discriminant < 0:\n return False\n\n t1 = (-b + np.sqrt(discriminant)) / (2 * a);\n t2 = (-b - np.sqrt(discriminant)) / (2 * a);\n\n if (t1 < 0 and t2 < 0) or (t1 > line.dist and t2 > line.dist):\n return False\n\n return True\n\n\n\ndef distance(x, y):\n return np.linalg.norm(np.array(x) - np.array(y))\n\n\ndef isInObstacle(vex, obstacles, radius):\n for obs in obstacles:\n if distance(obs, vex) < radius:\n return True\n return False\n\n\ndef isThruObstacle(line, obstacles, radius):\n for obs in obstacles:\n if Intersection(line, obs, radius):\n return True\n return False\n\n\ndef nearest(G, vex, obstacles, radius):\n Nvex = None\n Nidx = None\n minDist = float(\"inf\")\n\n for idx, v in enumerate(G.vertices):\n line = Line(v, vex)\n if isThruObstacle(line, obstacles, radius):\n continue\n\n dist = distance(v, vex)\n if dist < minDist:\n minDist = dist\n Nidx = idx\n Nvex = v\n\n return Nvex, Nidx\n\n\ndef newVertex(randvex, nearvex, stepSize):\n dirn = np.array(randvex) - np.array(nearvex)\n length = np.linalg.norm(dirn)\n dirn = (dirn / length) * min (stepSize, length)\n\n newvex = (nearvex[0]+dirn[0], nearvex[1]+dirn[1])\n return newvex\n\n\ndef window(startpos, endpos):\n ''' Define seach window - 2 times of start to end rectangle'''\n width = endpos[0] - startpos[0]\n height = endpos[1] - startpos[1]\n winx = startpos[0] - (width / 2.)\n winy = startpos[1] - (height / 2.)\n return winx, winy, width, height\n\n\ndef isInWindow(pos, winx, winy, width, height):\n ''' Restrict new vertex insides search window'''\n if winx < pos[0] < winx+width and \\\n winy < pos[1] < winy+height:\n return True\n else:\n return False\n\n\nclass Graph:\n ''' Define graph '''\n def __init__(self, startpos, endpos):\n self.startpos = startpos\n self.endpos = endpos\n\n self.vertices = [startpos]\n self.edges = []\n self.success = False\n\n self.vex2idx = {startpos:0}\n self.neighbors = {0:[]}\n self.distances = {0:0.}\n\n self.sx = endpos[0] - startpos[0]\n self.sy = endpos[1] - startpos[1]\n\n def add_vex(self, pos):\n try:\n idx = self.vex2idx[pos]\n except:\n idx = len(self.vertices)\n self.vertices.append(pos)\n self.vex2idx[pos] = idx\n self.neighbors[idx] = []\n return idx\n\n def add_edge(self, idx1, idx2, cost):\n self.edges.append((idx1, idx2))\n self.neighbors[idx1].append((idx2, cost))\n self.neighbors[idx2].append((idx1, cost))\n\n\n def randomPosition(self):\n rx = random()\n ry = random()\n\n posx = self.startpos[0] - (self.sx / 2.) + rx * self.sx * 2\n posy = self.startpos[1] - (self.sy / 2.) + ry * self.sy * 2\n return posx, posy\n\n\ndef RRT(startpos, endpos, obstacles, n_iter, radius, stepSize):\n ''' RRT algorithm '''\n G = Graph(startpos, endpos)\n\n for _ in range(n_iter):\n randvex = G.randomPosition()\n if isInObstacle(randvex, obstacles, radius):\n continue\n\n nearvex, nearidx = nearest(G, randvex, obstacles, radius)\n if nearvex is None:\n continue\n\n newvex = newVertex(randvex, nearvex, stepSize)\n\n newidx = G.add_vex(newvex)\n dist = distance(newvex, nearvex)\n G.add_edge(newidx, nearidx, dist)\n\n dist = distance(newvex, G.endpos)\n if dist < 2 * radius:\n endidx = G.add_vex(G.endpos)\n G.add_edge(newidx, endidx, dist)\n G.success = True\n #print('success')\n # break\n return G\n\n\ndef RRT_star(startpos, endpos, obstacles, n_iter, radius, stepSize):\n ''' RRT star algorithm '''\n G = Graph(startpos, endpos)\n\n for _ in range(n_iter):\n randvex = G.randomPosition()\n if isInObstacle(randvex, obstacles, radius):\n continue\n\n nearvex, nearidx = nearest(G, randvex, obstacles, radius)\n if nearvex is None:\n continue\n\n newvex = newVertex(randvex, nearvex, stepSize)\n\n newidx = G.add_vex(newvex)\n dist = distance(newvex, nearvex)\n G.add_edge(newidx, nearidx, dist)\n G.distances[newidx] = G.distances[nearidx] + dist\n\n # update nearby vertices distance (if shorter)\n for vex in G.vertices:\n if vex == newvex:\n continue\n\n dist = distance(vex, newvex)\n if dist > radius:\n continue\n\n line = Line(vex, newvex)\n if isThruObstacle(line, obstacles, radius):\n continue\n\n idx = G.vex2idx[vex]\n if G.distances[newidx] + dist < G.distances[idx]:\n G.add_edge(idx, newidx, dist)\n G.distances[idx] = G.distances[newidx] + dist\n\n dist = distance(newvex, G.endpos)\n if dist < 2 * radius:\n endidx = G.add_vex(G.endpos)\n G.add_edge(newidx, endidx, dist)\n try:\n G.distances[endidx] = min(G.distances[endidx], G.distances[newidx]+dist)\n except:\n G.distances[endidx] = G.distances[newidx]+dist\n\n G.success = True\n #print('success')\n # break\n return G\n\n\n\ndef dijkstra(G):\n '''\n Dijkstra algorithm for finding shortest path from start position to end.\n '''\n srcIdx = G.vex2idx[G.startpos]\n dstIdx = G.vex2idx[G.endpos]\n\n # build dijkstra\n nodes = list(G.neighbors.keys())\n dist = {node: float('inf') for node in nodes}\n prev = {node: None for node in nodes}\n dist[srcIdx] = 0\n\n while nodes:\n curNode = min(nodes, key=lambda node: dist[node])\n nodes.remove(curNode)\n if dist[curNode] == float('inf'):\n break\n\n for neighbor, cost in G.neighbors[curNode]:\n newCost = dist[curNode] + cost\n if newCost < dist[neighbor]:\n dist[neighbor] = newCost\n prev[neighbor] = curNode\n\n # retrieve path\n path = deque()\n curNode = dstIdx\n while prev[curNode] is not None:\n path.appendleft(G.vertices[curNode])\n curNode = prev[curNode]\n path.appendleft(G.vertices[curNode])\n return list(path)\n\n\n\ndef plot(G, obstacles, radius, path=None):\n '''\n Plot RRT, obstacles and shortest path\n '''\n px = [x for x, y in G.vertices]\n py = [y for x, y in G.vertices]\n fig, ax = plt.subplots()\n\n for obs in obstacles:\n circle = plt.Circle(obs, radius, color='red')\n ax.add_artist(circle)\n\n ax.scatter(px, py, c='cyan')\n ax.scatter(G.startpos[0], G.startpos[1], c='black')\n ax.scatter(G.endpos[0], G.endpos[1], c='black')\n\n lines = [(G.vertices[edge[0]], G.vertices[edge[1]]) for edge in G.edges]\n lc = mc.LineCollection(lines, colors='green', linewidths=2)\n ax.add_collection(lc)\n\n if path is not None:\n paths = [(path[i], path[i+1]) for i in range(len(path)-1)]\n lc2 = mc.LineCollection(paths, colors='blue', linewidths=3)\n ax.add_collection(lc2)\n\n ax.autoscale()\n ax.margins(0.1)\n plt.show()\n\n\ndef pathSearch(startpos, endpos, obstacles, n_iter, radius, stepSize):\n G = RRT_star(startpos, endpos, obstacles, n_iter, radius, stepSize)\n if G.success:\n path = dijkstra(G)\n # plot(G, obstacles, radius, path)\n return path\n\n\nif __name__ == '__main__':\n startpos = (0., 0.)\n endpos = (5., 5.)\n obstacles = [(1., 1.), (2., 2.)]\n n_iter = 200\n radius = 0.5\n stepSize = 0.7\n\n G = RRT_star(startpos, endpos, obstacles, n_iter, radius, stepSize)\n # G = RRT(startpos, endpos, obstacles, n_iter, radius, stepSize)\n\n if G.success:\n path = dijkstra(G)\n print(path)\n plot(G, obstacles, radius, path)\n else:\n plot(G, obstacles, radius)" ]
[ [ "numpy.dot", "numpy.sqrt", "matplotlib.collections.LineCollection", "matplotlib.pyplot.subplots", "numpy.linalg.norm", "matplotlib.pyplot.Circle", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Fanxingye/TsFormer
[ "da6e7eee1bddb44e2e98f07c9f0d374793e80da6" ]
[ "run_autoformer.py" ]
[ "import argparse\nimport random\n\nimport numpy as np\nimport torch\n\nfrom tsformer.exp_autoformer import Exp_Main\n\nfix_seed = 2021\nrandom.seed(fix_seed)\ntorch.manual_seed(fix_seed)\nnp.random.seed(fix_seed)\n\nparser = argparse.ArgumentParser(\n description='Autoformer & Transformer family for Time Series Forecasting')\n\n# basic config\nparser.add_argument(\n '--is_training', type=int, required=True, default=1, help='status')\nparser.add_argument(\n '--model_id', type=str, required=True, default='test', help='model id')\nparser.add_argument(\n '--model',\n type=str,\n required=True,\n default='Autoformer',\n help='model name, options: [Autoformer, Informer, Transformer]')\n\n# data loader\nparser.add_argument(\n '--data', type=str, required=True, default='ETTm1', help='dataset type')\nparser.add_argument(\n '--root_path',\n type=str,\n default='./data/ETT/',\n help='root path of the data file')\nparser.add_argument(\n '--data_path', type=str, default='ETTh1.csv', help='data file')\nparser.add_argument(\n '--features',\n type=str,\n default='M',\n help='forecasting task, options:[M, S, MS]')\nparser.add_argument(\n '--target', type=str, default='OT', help='target feature in S or MS task')\nparser.add_argument(\n '--freq',\n type=str,\n default='h',\n help='freq for time features encoding, options:[s:secondly]')\nparser.add_argument(\n '--checkpoints',\n type=str,\n default='./checkpoints/',\n help='location of model checkpoints')\n\n# forecasting task\nparser.add_argument(\n '--seq_len', type=int, default=96, help='input sequence length')\nparser.add_argument(\n '--label_len', type=int, default=48, help='start token length')\nparser.add_argument(\n '--pred_len', type=int, default=96, help='prediction sequence length')\n\n# model define\nparser.add_argument('--enc_in', type=int, default=7, help='encoder input size')\nparser.add_argument('--dec_in', type=int, default=7, help='decoder input size')\nparser.add_argument('--c_out', type=int, default=7, help='output size')\nparser.add_argument(\n '--d_model', type=int, default=512, help='dimension of model')\nparser.add_argument('--n_heads', type=int, default=8, help='num of heads')\nparser.add_argument(\n '--e_layers', type=int, default=2, help='num of encoder layers')\nparser.add_argument(\n '--d_layers', type=int, default=1, help='num of decoder layers')\nparser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')\nparser.add_argument(\n '--moving_avg', type=int, default=25, help='window size of moving average')\nparser.add_argument('--factor', type=int, default=1, help='attn factor')\nparser.add_argument(\n '--distil',\n action='store_false',\n help='whether to use distilling in encoder,',\n default=True)\nparser.add_argument('--dropout', type=float, default=0.05, help='dropout')\nparser.add_argument(\n '--embed',\n type=str,\n default='timeF',\n help='time features encoding, options:[timeF, fixed, learned]')\nparser.add_argument(\n '--activation', type=str, default='gelu', help='activation')\nparser.add_argument(\n '--output_attention',\n action='store_true',\n help='whether to output attention in ecoder')\nparser.add_argument(\n '--do_predict',\n action='store_true',\n help='whether to predict unseen future data')\n\n# optimization\nparser.add_argument(\n '--num_workers', type=int, default=10, help='data loader num workers')\nparser.add_argument('--itr', type=int, default=2, help='experiments times')\nparser.add_argument(\n '--train_epochs', type=int, default=10, help='train epochs')\nparser.add_argument(\n '--batch_size',\n type=int,\n default=32,\n help='batch size of train input data')\nparser.add_argument(\n '--patience', type=int, default=3, help='early stopping patience')\nparser.add_argument(\n '--learning_rate',\n type=float,\n default=0.0001,\n help='optimizer learning rate')\nparser.add_argument('--des', type=str, default='test', help='exp description')\nparser.add_argument('--loss', type=str, default='mse', help='loss function')\nparser.add_argument(\n '--lradj', type=str, default='type1', help='adjust learning rate')\nparser.add_argument(\n '--use_amp',\n action='store_true',\n help='use automatic mixed precision training',\n default=False)\n\n# GPU\nparser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')\nparser.add_argument('--gpu', type=int, default=0, help='gpu')\nparser.add_argument(\n '--use_multi_gpu',\n action='store_true',\n help='use multiple gpus',\n default=False)\nparser.add_argument(\n '--devices',\n type=str,\n default='0,1,2,3',\n help='device ids of multile gpus')\n\nargs = parser.parse_args()\n\nargs.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False\n\nif args.use_gpu and args.use_multi_gpu:\n args.dvices = args.devices.replace(' ', '')\n device_ids = args.devices.split(',')\n args.device_ids = [int(id_) for id_ in device_ids]\n args.gpu = args.device_ids[0]\n\nprint('Args in experiment:')\nprint(args)\n\nExp = Exp_Main\n\nif args.is_training:\n for ii in range(args.itr):\n # setting record of experiments\n setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n args.model_id, args.model, args.data, args.features, args.seq_len,\n args.label_len, args.pred_len, args.d_model, args.n_heads,\n args.e_layers, args.d_layers, args.d_ff, args.factor, args.embed,\n args.distil, args.des, ii)\n\n exp = Exp(args) # set experiments\n print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(\n setting))\n exp.train(setting)\n\n print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(\n setting))\n exp.test(setting)\n\n if args.do_predict:\n print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.\n format(setting))\n exp.predict(setting, True)\n\n torch.cuda.empty_cache()\nelse:\n ii = 0\n setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(\n args.model_id, args.model, args.data, args.features, args.seq_len,\n args.label_len, args.pred_len, args.d_model, args.n_heads,\n args.e_layers, args.d_layers, args.d_ff, args.factor, args.embed,\n args.distil, args.des, ii)\n\n exp = Exp(args) # set experiments\n print(\n '>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))\n exp.test(setting, test=1)\n torch.cuda.empty_cache()\n" ]
[ [ "torch.manual_seed", "torch.cuda.is_available", "torch.cuda.empty_cache", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OneDirection9/foundation
[ "a43a568b5d062d80cf3ce5777d0ab3f14147a8f2" ]
[ "foundation/visualization/colormap.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# Modified by: Zhipeng Han\n\"\"\"\nAn awesome colormap for really neat visualizations.\nCopied from Detectron, and removed gray colors.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\n__all__ = [\"colormap\", \"random_color\"]\n\n# RGB:\n# fmt: off\n_COLORS = np.array(\n [\n 0.000, 0.447, 0.741,\n 0.850, 0.325, 0.098,\n 0.929, 0.694, 0.125,\n 0.494, 0.184, 0.556,\n 0.466, 0.674, 0.188,\n 0.301, 0.745, 0.933,\n 0.635, 0.078, 0.184,\n 0.300, 0.300, 0.300,\n 0.600, 0.600, 0.600,\n 1.000, 0.000, 0.000,\n 1.000, 0.500, 0.000,\n 0.749, 0.749, 0.000,\n 0.000, 1.000, 0.000,\n 0.000, 0.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.333, 0.333, 0.000,\n 0.333, 0.667, 0.000,\n 0.333, 1.000, 0.000,\n 0.667, 0.333, 0.000,\n 0.667, 0.667, 0.000,\n 0.667, 1.000, 0.000,\n 1.000, 0.333, 0.000,\n 1.000, 0.667, 0.000,\n 1.000, 1.000, 0.000,\n 0.000, 0.333, 0.500,\n 0.000, 0.667, 0.500,\n 0.000, 1.000, 0.500,\n 0.333, 0.000, 0.500,\n 0.333, 0.333, 0.500,\n 0.333, 0.667, 0.500,\n 0.333, 1.000, 0.500,\n 0.667, 0.000, 0.500,\n 0.667, 0.333, 0.500,\n 0.667, 0.667, 0.500,\n 0.667, 1.000, 0.500,\n 1.000, 0.000, 0.500,\n 1.000, 0.333, 0.500,\n 1.000, 0.667, 0.500,\n 1.000, 1.000, 0.500,\n 0.000, 0.333, 1.000,\n 0.000, 0.667, 1.000,\n 0.000, 1.000, 1.000,\n 0.333, 0.000, 1.000,\n 0.333, 0.333, 1.000,\n 0.333, 0.667, 1.000,\n 0.333, 1.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.667, 0.333, 1.000,\n 0.667, 0.667, 1.000,\n 0.667, 1.000, 1.000,\n 1.000, 0.000, 1.000,\n 1.000, 0.333, 1.000,\n 1.000, 0.667, 1.000,\n 0.333, 0.000, 0.000,\n 0.500, 0.000, 0.000,\n 0.667, 0.000, 0.000,\n 0.833, 0.000, 0.000,\n 1.000, 0.000, 0.000,\n 0.000, 0.167, 0.000,\n 0.000, 0.333, 0.000,\n 0.000, 0.500, 0.000,\n 0.000, 0.667, 0.000,\n 0.000, 0.833, 0.000,\n 0.000, 1.000, 0.000,\n 0.000, 0.000, 0.167,\n 0.000, 0.000, 0.333,\n 0.000, 0.000, 0.500,\n 0.000, 0.000, 0.667,\n 0.000, 0.000, 0.833,\n 0.000, 0.000, 1.000,\n 0.000, 0.000, 0.000,\n 0.143, 0.143, 0.143,\n 0.857, 0.857, 0.857,\n 1.000, 1.000, 1.000\n ]\n).astype(np.float32).reshape(-1, 3)\n# fmt: on\n\n\ndef colormap(rgb: bool = False, maximum: int = 255) -> np.ndarray:\n \"\"\"\n Args:\n rgb: Whether to return RGB colors or BGR colors.\n maximum: Either 1 or 255.\n\n Returns:\n A float32 array of Nx3 colors, in range [0, 1] or [0, 255].\n \"\"\"\n if maximum not in (1, 255):\n raise ValueError(\"maximum should be 1 or 255. Got {}\".format(maximum))\n\n c = _COLORS * maximum\n if not rgb:\n c = c[:, ::-1]\n return c\n\n\ndef random_color(rgb: bool = False, maximum: int = 255) -> np.ndarray:\n \"\"\"\n Args:\n rgb: Whether to return RGB colors or BGR colors.\n maximum: Either 1 or 255.\n\n Returns:\n A float32 array of 3 numbers, in range [0, 1] or [0, 255].\n \"\"\"\n if maximum not in (1, 255):\n raise ValueError(\"maximum should be 1 or 255. Got {}\".format(maximum))\n\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret\n\n\nif __name__ == \"__main__\":\n import cv2\n\n H, W, size = 10, 10, 100\n canvas = np.random.rand(H * size, W * size, 3).astype(\"float32\")\n for h in range(H):\n for w in range(W):\n idx = h * W + w\n if idx >= len(_COLORS):\n break\n canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]\n cv2.imshow(\"colors\", canvas)\n cv2.waitKey(0)\n" ]
[ [ "numpy.array", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pushpendradahiya/pegasus
[ "8290444441d187d48082a4034a3ba3182f7b5946" ]
[ "pegasus/eval/estimator_metrics.py" ]
[ "# Copyright 2020 The PEGASUS Authors..\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Eval metrics for the estimators.\"\"\"\n# \n# pylint: disable=invalid-name\n\nimport tensorflow as tf\n\n\ndef _create_generative_metrics(labels, weights, logits):\n \"\"\"Returns a map of metric names to metric tensors for generative task.\n\n Args:\n labels: tensor of [batch_size, seq_len].\n weights: tensor of [batch_size, seq_len].\n logits: tensor of [batch_size, seq_len, vocab_size].\n\n Returns:\n dictionary of tensor metrics.\n \"\"\"\n predictions = tf.argmax(input=logits, axis=-1)\n accuracy_unmasked = tf.compat.v1.metrics.accuracy(\n labels=labels, predictions=predictions)\n accuracy_pad_masked = tf.compat.v1.metrics.accuracy(\n labels=labels, predictions=predictions, weights=weights)\n loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels, logits, weights=weights)\n metrics = {\n \"metrics/accuracy_unmasked\": accuracy_unmasked,\n \"metrics/accuracy_pad_masked\": accuracy_pad_masked,\n \"metrics/perplexity_pad_masked\": tf.compat.v1.metrics.mean(loss),\n }\n return metrics\n\n\ndef gen_eval_metrics_fn(features, outputs):\n \"\"\"Get eval metrics for estimator.\"\"\"\n weights = features.get(\"targets_mask\", None)\n if weights is None:\n weights = 1.0 - tf.cast(tf.equal(features[\"targets\"], 0), tf.float32)\n else:\n weights = tf.cast(weights, tf.float32)\n return (_create_generative_metrics,\n [features[\"targets\"], weights, outputs[\"logits\"]])\n\n\ndef pretrain_eval_metrics_fn(features, outputs):\n \"\"\"Get eval metrics for estimator in the pretraining stage.\"\"\"\n targets_weights = features.get(\"targets_mask\", None)\n if targets_weights is None:\n targets_weights = 1.0 - tf.cast(\n tf.equal(features[\"targets\"], 0), tf.float32)\n else:\n targets_weights = tf.cast(targets_weights, tf.float32)\n\n masked_inputs_weights = features.get(\"masked_inputs_mask\", None)\n\n if \"logits_mlm\" in outputs:\n if masked_inputs_weights is None:\n masked_inputs_weights = 1.0 - tf.cast(\n tf.equal(features[\"masked_inputs\"], 0), tf.float32)\n else:\n masked_inputs_weights = tf.cast(masked_inputs_weights, tf.float32)\n\n def _create_eval_metrics(targets, weights, logits, masked_inputs,\n weights_mlm, logits_mlm):\n \"\"\"Returns a map of metric names to metric tensors.\"\"\"\n metrics = _create_generative_metrics(targets, weights, logits)\n metrics_mlm = _create_generative_metrics(masked_inputs, weights_mlm,\n logits_mlm)\n metrics.update({k + \"_mlm\": v for k, v in metrics_mlm.items()})\n return metrics\n\n if \"masked_inputs\" not in features:\n raise KeyError(\n \"'masked_inputs' not found in features. \"\n \"Please check TransformerEncoderDecoderMLModel when MLM is applied.\")\n\n return (_create_eval_metrics, [\n features[\"targets\"], targets_weights, outputs[\"logits\"],\n features[\"masked_inputs\"], masked_inputs_weights, outputs[\"logits_mlm\"]\n ])\n else:\n return (_create_generative_metrics,\n [features[\"targets\"], targets_weights, outputs[\"logits\"]])\n" ]
[ [ "tensorflow.compat.v1.metrics.mean", "tensorflow.compat.v1.metrics.accuracy", "tensorflow.cast", "tensorflow.equal", "tensorflow.argmax", "tensorflow.compat.v1.losses.sparse_softmax_cross_entropy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]