repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
DominikSpiljak/Fuzzy-Evolutionary-and-Neuro-computing
|
[
"fb0d54c35966ed0516c15519a6abcbffeb170f9b"
] |
[
"ANFIS/fuzzy_net.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom mpl_toolkits.mplot3d.axes3d import get_test_data\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pickle\n\n\nclass ANFIS():\n\n def __init__(self, no_rules=1):\n self.no_rules = no_rules\n self.p = np.random.randn(no_rules, 1)\n self.q = np.random.randn(no_rules, 1)\n self.r = np.zeros((no_rules, 1))\n\n self.a = np.random.randn(no_rules, 2)\n self.b = np.random.randn(no_rules, 2)\n\n def save_model(self, filepath):\n np.savez(filepath, p=self.p, q=self.q, r=self.r, a=self.a, b=self.b)\n\n @staticmethod\n def load_model(filepath):\n data = np.load(filepath)\n model = ANFIS()\n p = data['p']\n q = data['q']\n r = data['r']\n a = data['a']\n b = data['b']\n model.p = p\n model.q = q\n model.r = r\n model.a = a\n model.b = b\n model.no_rules = model.p.shape[0]\n\n return model\n\n def fuzzification_sigmoid(self, x_y):\n return 1 / (1 + np.exp(self.b * (x_y - self.a)))\n\n def mean_squared_err(self, z_true, preds):\n return np.mean(np.square(z_true - preds))\n\n def forward(self, X_Y, underflow_ctrl=1e-12):\n self.preds = []\n self.fuzzified_alphas = []\n self.alphas = []\n self.fs = []\n\n for x_y in X_Y:\n\n # 1. layer: Fuzzification\n fuzzified = self.fuzzification_sigmoid(x_y)\n\n self.fuzzified_alphas.append(fuzzified)\n\n # 2. layer: T-norm of inputs\n products = np.product(fuzzified, axis=1)\n\n self.alphas.append(products)\n\n # 3. layer: Normalization\n normalized = (products / (underflow_ctrl if np.sum(products)\n == 0 else np.sum(products)))\n\n x, y = x_y.T\n\n # 4. layer: Defuzzification (w * (p * x1 + q * x2 + r))\n fs = self.p * x + self.q * y + self.r\n self.fs.append(fs)\n deffuzified = normalized.dot(fs)\n\n self.preds.append(deffuzified)\n\n self.preds = np.array(self.preds)\n self.fuzzified_alphas = np.array(self.fuzzified_alphas)\n self.alphas = np.array(self.alphas)\n self.fs = np.array(self.fs)\n\n def train(self, X_Y, z, learning_rate=1e-3, learning_rate_fuzzification=1e-3, lr_decay=0.9, decay_interval=1000, n_epochs=10000, algorithm='backprop', batch_size=10, underflow_ctrl=1e-12, shuffle=True):\n\n if algorithm not in ['backprop', 'stohastic', 'minibatch']:\n raise ValueError('Algorithm not recognised')\n\n if algorithm == 'backprop':\n batch_size = X_Y.shape[0]\n\n elif algorithm == 'stohastic':\n batch_size = 1\n\n errs = []\n\n for i in range(n_epochs):\n\n preds = self.predict(X_Y, underflow_ctrl=underflow_ctrl)\n err = self.mean_squared_err(z, preds)\n\n errs.append(err)\n\n if i % 100 == 0:\n print('Iteration {}, error {}'.format(\n i, err))\n\n if shuffle:\n indices = np.random.permutation(X_Y.shape[0])\n else:\n indices = np.arange(X_Y.shape[0])\n\n for j in range(0, X_Y.shape[0], batch_size):\n\n sliced_indices = indices[j:j + batch_size]\n\n self.forward(\n X_Y[sliced_indices], underflow_ctrl=underflow_ctrl)\n\n x, y = X_Y[sliced_indices].T\n\n x = x.reshape(x.shape[0], 1)\n y = y.reshape(y.shape[0], 1)\n\n dEk_dpreds = -1 * (z[sliced_indices] - self.preds)\n alpha_sum = np.sum(self.alphas, axis=1)\n\n dEk_dp = np.sum([[dEk_dpreds[k] * self.alphas[k][i] * x[k] / (underflow_ctrl if alpha_sum[k]\n == 0 else alpha_sum[k])\n for i in range(self.no_rules)] for k in range(len(x))], axis=0)\n\n dEk_dq = np.sum([[dEk_dpreds[k] * self.alphas[k][i] * y[k] / (underflow_ctrl if alpha_sum[k]\n == 0 else alpha_sum[k])\n for i in range(self.no_rules)] for k in range(len(y))], axis=0)\n\n dEk_dr = np.sum([[dEk_dpreds[k] * self.alphas[k][i] / (underflow_ctrl if alpha_sum[k]\n == 0 else alpha_sum[k])\n for i in range(self.no_rules)] for k in range(len(x))], axis=0)\n\n dEk_da = []\n dEk_db = []\n\n for k in range(len(x)):\n rule_a = []\n rule_b = []\n for i in range(self.no_rules):\n\n alpha_x, alpha_y = self.fuzzified_alphas[k][i]\n ax, ay = self.a[i]\n bx, by = self.b[i]\n\n fraction_numerator = np.sum(\n [self.alphas[k][j] * (self.fs[k][i] - self.fs[k][j]) for j in range(self.no_rules) if j != i], axis=0)\n fraction_denominator = np.square(alpha_sum[k])\n\n fraction = np.array(\n fraction_numerator / (underflow_ctrl if fraction_denominator\n == 0 else fraction_denominator))\n\n dEk_dax = (dEk_dpreds[k] * fraction *\n alpha_y * bx * (1 - alpha_x) * alpha_x)\n dEk_day = (dEk_dpreds[k] * fraction *\n alpha_x * by * (1 - alpha_y) * alpha_y)\n dEk_dbx = (\n dEk_dpreds[k] * fraction * alpha_y * -1 * (x[k] - ax) * alpha_x * (1 - alpha_x))\n dEk_dby = (\n dEk_dpreds[k] * fraction * alpha_x * -1 * (y[k] - ay) * alpha_y * (1 - alpha_y))\n\n rule_a.append([dEk_dax, dEk_day])\n rule_b.append([dEk_dbx, dEk_dby])\n\n dEk_da.append([rule_a])\n dEk_db.append([rule_b])\n dEk_da = np.array(dEk_da).reshape(len(x), self.no_rules, 2)\n dEk_db = np.array(dEk_db).reshape(len(x), self.no_rules, 2)\n dEk_da = np.sum(dEk_da, axis=0)\n dEk_db = np.sum(dEk_db, axis=0)\n\n self.p = self.p - learning_rate * dEk_dp\n self.q = self.q - learning_rate * dEk_dq\n self.r = self.r - learning_rate * dEk_dr\n self.a = self.a - learning_rate_fuzzification * dEk_da\n self.b = self.b - learning_rate_fuzzification * dEk_db\n\n if i % decay_interval == 0 and i != 0:\n learning_rate *= lr_decay\n learning_rate_fuzzification *= lr_decay\n\n return errs\n\n def predict(self, X_Y, underflow_ctrl=1e-12):\n self.forward(X_Y, underflow_ctrl=underflow_ctrl)\n return self.preds\n\n\ndef draw_func(funcs, no_rules, save_img=None):\n x = np.linspace(-4, 4, 15)\n y = np.linspace(-4, 4, 15)\n X, Y = np.meshgrid(x, y)\n\n fig, ax = plt.subplots(2, 2, subplot_kw=dict(projection='3d'))\n fig.set_size_inches(20, 15)\n for i, f in enumerate(funcs):\n j, k = [0 if i < 2 else 1, i if i < 2 else i - 2]\n if i != 0:\n Z = []\n for x_ in x[::-1]:\n row = []\n for y_ in y:\n row.append(f(np.array([[y_, x_]])))\n Z.insert(0, row)\n Z = np.array(Z).reshape(15, 15)\n ax[j, k].set_title(\n 'Predicted using {} rules'.format(no_rules[i - 1]))\n else:\n Z = func(X, Y)\n ax[j, k].set_title('Original function')\n\n ax[j, k].plot_surface(X, Y, Z, rstride=1, cstride=1,\n cmap='viridis', edgecolor='none')\n ax[j, k].set_xlabel('X')\n ax[j, k].set_ylabel('Y')\n ax[j, k].set_zlabel('Z')\n plt.show()\n if save_img is not None:\n fig.savefig(save_img)\n\n\ndef draw_error_curves(funcs, no_rules, save_img=None):\n x = np.linspace(-4, 4, 15)\n y = np.linspace(-4, 4, 15)\n\n X, Y = np.meshgrid(x, y)\n\n Z_true = funcs[0](X, Y)\n\n fig, ax = plt.subplots(1, 3, subplot_kw=dict(projection='3d'))\n fig.set_size_inches(20, 15)\n for i, f in enumerate(funcs[1:]):\n Z = []\n for x_ in x[::-1]:\n row = []\n for y_ in y:\n row.append(f(np.array([[y_, x_]])))\n Z.insert(0, row)\n Z = np.array(Z).reshape(15, 15)\n ax[i].set_title(\n 'Errors using {} rules'.format(no_rules[i]))\n\n ax[i].plot_surface(X, Y, Z - Z_true, rstride=1, cstride=1,\n cmap='viridis', edgecolor='none')\n ax[i].set_xlabel('X')\n ax[i].set_ylabel('Y')\n ax[i].set_zlabel('Z')\n plt.show()\n fig.tight_layout()\n if save_img is not None:\n fig.savefig(save_img)\n\n\ndef draw_membership_functions(model, save_img=None):\n def fuzzification_sigmoid(x, a, b):\n return 1 / (1 + np.exp(b * (x - a)))\n\n x = np.linspace(-4, 4, 15)\n y = np.linspace(-4, 4, 15)\n a = model.a\n b = model.b\n\n no_rules = a.shape[0]\n\n fig, axes = plt.subplots(no_rules, 2)\n fig.set_size_inches(20, 15)\n\n for rule in range(no_rules):\n ax, ay = a[rule]\n bx, by = b[rule]\n axes[rule][0].set_title('Rule {}, variable x'.format(rule + 1))\n axes[rule][0].plot(x, fuzzification_sigmoid(x, ax, bx))\n axes[rule][0].set_xlim(-4, 4)\n axes[rule][0].set_ylim(0, 1)\n axes[rule][1].set_title('Rule {}, variable y'.format(rule + 1))\n axes[rule][1].plot(y, fuzzification_sigmoid(y, ay, by))\n axes[rule][1].set_xlim(-4, 4)\n axes[rule][1].set_ylim(0, 1)\n\n fig.subplots_adjust(hspace=0.483, wspace=0.067)\n plt.show()\n\n if save_img is not None:\n fig.savefig(save_img)\n\n\ndef draw_losses(gradient, stohastic, save_img=None):\n fig, ax = plt.subplots(1, 2)\n fig.set_size_inches(20, 15)\n\n ax[0].set_title('Error over epoch for gradient algoritm')\n ax[0].set_xlabel('Epochs')\n ax[0].set_ylabel('Errors')\n ax[0].set_ylim(0, 4)\n ax[0].plot(range(len(gradient)), gradient)\n\n ax[1].set_title('Error over epoch for stohastic algoritm')\n ax[1].set_xlabel('Epochs')\n ax[1].set_ylabel('Errors')\n ax[1].set_ylim(0, 4)\n ax[1].plot(range(len(stohastic)), stohastic)\n\n plt.show()\n fig.tight_layout()\n\n if save_img is not None:\n fig.savefig(save_img)\n\n\ndef draw_losses_with_lrs(history_gradient, history_stohastic, save_img=None):\n lrs_stohastic = [[1e-4, 5e-6], [5e-3, 1e-4], [3e-2, 5e-3]]\n lrs_gradient = [[1e-4, 5e-6], [1e-3, 3e-5], [5e-3, 1e-4]]\n labels = ['Low learning rate, learning_rate={}, fuzzification_learning_rate={}',\n 'Optimal learning rate, learning_rate={}, fuzzification_learning_rate={}',\n 'High learning rate, learning_rate={}, fuzzification_learning_rate={}']\n\n fig, ax = plt.subplots(3, 2)\n fig.set_size_inches(18, 15)\n\n for i, histories in enumerate(zip(history_gradient, history_stohastic)):\n grad, stoh = histories\n ax[i][0].set_title('Gradient algorithm, {}'.format(\n labels[i].format(*lrs_gradient[i])))\n ax[i][0].plot(range(len(grad)), grad)\n ax[i][0].set_ylim(0, 10)\n ax[i][0].set_ylabel('Errors')\n\n ax[i][1].set_title('Stohastic algorithm, {}'.format(\n labels[i].format(*lrs_stohastic[i])))\n ax[i][1].plot(range(len(stoh)), stoh)\n ax[i][1].set_ylim(0, 10)\n ax[i][1].set_ylabel('Errors')\n\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.243, wspace=0.077)\n plt.show()\n if save_img is not None:\n fig.savefig(save_img)\n\n\nif __name__ == \"__main__\":\n X_Y = []\n z = []\n\n def func(x, y): return ((x - 1) ** 2 + (y + 2) **\n 2 - 5 * x * y + 3) * np.cos(x / 5) ** 2\n\n for x in range(-4, 5):\n for y in range(-4, 5):\n X_Y.append([x, y])\n z.append([func(x, y)])\n\n X_Y = np.array(X_Y)\n z = np.array(z)\n\n # anfis1 = ANFIS(no_rules=1)\n # anfis1.train(X_Y, z, n_epochs=1000, learning_rate=1e-3, learning_rate_fuzzification=1e-5,\n # lr_decay=0.8, decay_interval=5000, shuffle=False)\n\n # anfis1.save_model('model1.npz')\n\n anfis1 = ANFIS.load_model('model1.npz')\n\n # anfis2 = ANFIS(no_rules=2)\n # anfis2.train(X_Y, z, n_epochs=1000, learning_rate=5e-3, learning_rate_fuzzification=1e-4,\n # lr_decay=0.8, decay_interval=5000, shuffle=False)\n\n # anfis2.save_model('model2.npz')\n anfis2 = ANFIS.load_model('model2.npz')\n\n # anfis5 = ANFIS(no_rules=5)\n # history_stohastic = anfis5.train(X_Y, z, algorithm='stohastic', n_epochs=10000, learning_rate=3e-2, learning_rate_fuzzification=5e-3,\n # lr_decay=0.8, decay_interval=2500, shuffle=False)\n\n # np.save('history_stohastic_high_lr.npy', history_stohastic)\n\n # anfis5.save_model('model5_stohastic_high_lr.npz')\n\n # anfis5 = ANFIS(no_rules=5)\n # history_gradient = anfis5.train(X_Y, z, n_epochs=10000, learning_rate=1e-3, learning_rate_fuzzification=3e-5,\n # lr_decay=0.8, decay_interval=2500, shuffle=False)\n\n # np.save('history_gradient.npy', history_gradient)\n\n # anfis5.save_model('model5_gradient.npz')\n\n # anfis5 = ANFIS(no_rules=5)\n # history_stohastic = anfis5.train(X_Y, z, algorithm='stohastic', n_epochs=10000, learning_rate=1e-4, learning_rate_fuzzification=5e-6,\n # lr_decay=0.8, decay_interval=2500, shuffle=False)\n\n # np.save('history_stohastic_low_lr.npy', history_stohastic)\n\n # anfis5.save_model('model5_stohastic_low_lr.npz')\n\n # anfis5 = ANFIS(no_rules=5)\n # history_gradient = anfis5.train(X_Y, z, n_epochs=10000, learning_rate=1e-4, learning_rate_fuzzification=5e-6,\n # lr_decay=0.8, decay_interval=2500, shuffle=False)\n\n # np.save('history_gradient_low_lr.npy', history_gradient)\n\n # anfis5.save_model('model5_gradient_low_lr.npz')\n\n anfis5 = ANFIS.load_model('model5_stohastic.npz')\n\n draw_func([func, anfis1.predict, anfis2.predict,\n anfis5.predict], no_rules=[1, 2, 5], save_img='func_approx.png')\n\n draw_error_curves([func, anfis1.predict, anfis2.predict,\n anfis5.predict], no_rules=[1, 2, 5], save_img='error_curves.png')\n\n draw_membership_functions(anfis5, save_img='membership_functions.png')\n\n history_gradient = np.load('history_gradient.npy')\n history_stohastic = np.load('history_stohastic.npy')\n history_gradient_low_lr = np.load('history_gradient_low_lr.npy')\n history_stohastic_low_lr = np.load('history_stohastic_low_lr.npy')\n history_gradient_high_lr = np.load('history_gradient_high_lr.npy')\n history_stohastic_high_lr = np.load('history_stohastic_high_lr.npy')\n\n draw_losses(history_gradient, history_stohastic,\n save_img='errors_for_algorithms.png')\n draw_losses_with_lrs(history_gradient=[history_gradient_low_lr, history_gradient, history_gradient_high_lr], history_stohastic=[\n history_stohastic_low_lr, history_stohastic, history_stohastic_high_lr], save_img='errors_for_algorithms_lrs.png')\n"
] |
[
[
"numpy.square",
"numpy.savez",
"numpy.product",
"numpy.meshgrid",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.random.permutation",
"numpy.random.randn",
"numpy.exp",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BRGM/map2loop-2
|
[
"9f9246e895e0334f3a0d53d7300b49e42f6de150"
] |
[
"map2loop/config.py"
] |
[
"import os\nimport sys\nimport time\nimport shutil\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom map2loop.topology import Topology\nfrom map2loop import m2l_utils\nfrom map2loop import m2l_geometry\nfrom map2loop import m2l_interpolation\nfrom map2loop import m2l_map_checker\nfrom map2loop.m2l_utils import display, enable_quiet_mode, disable_quiet_mode, print\nfrom map2loop.m2l_export import export_to_projectfile\nimport map2model\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport rasterio\nimport shapely\n\n\nclass Config(object):\n \"\"\"Object that represents a sub-project. It is defined by some source data, \n a region of interest (bounding box or polygon) and some execution flags.\n \"\"\"\n\n def __init__(self,\n project_path,\n overwrite,\n geology_file,\n fault_file,\n fold_file,\n structure_file,\n mindep_file,\n bbox_3d,\n polygon,\n step_out,\n dtm_crs,\n proj_crs,\n local,\n quiet,\n loopFilename,\n c_l={},\n **kwargs):\n\n self.project_path = project_path\n\n if overwrite is False:\n print(\n \"WARNING: Overwrite should be a string value {true, in-place} ...\")\n self.check_overwrite()\n if overwrite is True:\n print(\n \"WARNING: Overwrite should be a string value {true, in-place} ... converting to true.\")\n overwrite = 'true'\n\n if (not os.path.exists(project_path)):\n # Create proj root dir if doesn't exist\n os.mkdir(project_path)\n elif overwrite == \"in-place\":\n # Pass if proj root exists and complete overwrite not wanted\n pass\n else:\n # Remove if exists and accept user's direction\n if overwrite == \"true\":\n shutil.rmtree(project_path)\n while os.path.exists(project_path):\n pass\n os.mkdir(project_path)\n else:\n self.check_overwrite()\n\n self.graph_path = os.path.join(self.project_path, 'graph')\n self.tmp_path = os.path.join(self.project_path, 'tmp')\n self.data_path = os.path.join(self.project_path, 'data')\n self.dtm_path = os.path.join(self.project_path, 'dtm')\n self.output_path = os.path.join(self.project_path, 'output')\n self.vtk_path = os.path.join(self.project_path, 'vtk')\n\n self.fault_file_csv = os.path.join(self.tmp_path, \"faults.csv\")\n self.fault_output_file_csv = os.path.join(self.output_path,\n \"faults.csv\")\n self.structure_file_csv = os.path.join(self.tmp_path, \"structure.csv\")\n self.geology_file_csv = os.path.join(self.tmp_path, \"geology.csv\")\n self.mindep_file_csv = os.path.join(self.tmp_path, \"mindep.csv\")\n\n self.strat_graph_file = os.path.join(self.graph_path,\n \"graph_strat_NONE.gml\")\n self.dtm_file = os.path.join(self.dtm_path, 'dtm.tif')\n self.dtm_reproj_file = os.path.join(self.dtm_path, 'dtm_rp.tif')\n\n if (not os.path.isdir(self.tmp_path)):\n os.mkdir(self.tmp_path)\n if (not os.path.isdir(self.data_path)):\n os.mkdir(self.data_path)\n if (not os.path.isdir(self.output_path)):\n os.mkdir(self.output_path)\n if (not os.path.isdir(self.dtm_path)):\n os.mkdir(self.dtm_path)\n if (not os.path.isdir(self.vtk_path)):\n os.mkdir(self.vtk_path)\n if (not os.path.isdir(self.graph_path)):\n os.mkdir(self.graph_path)\n\n self.quiet = quiet\n if self.quiet == 'all':\n enable_quiet_mode()\n\n self.clut_path = kwargs['clut_path']\n self.run_flags = kwargs['run_flags']\n\n self.bbox_3d = bbox_3d\n self.bbox = tuple([\n bbox_3d[\"minx\"], bbox_3d[\"miny\"], bbox_3d[\"maxx\"], bbox_3d[\"maxy\"]\n ])\n self.polygon = polygon\n self.step_out = step_out\n\n self.quiet = quiet\n self.c_l = c_l\n\n self.dtm_crs = dtm_crs\n self.proj_crs = proj_crs\n\n self.loop_projectfile = loopFilename\n\n # Check input maps for missing values\n drift_prefix = kwargs.get('drift_prefix', ['None'])\n self.local = local\n # - Check if fold file is always the same as fault or needs to be seperated\n # TODO: Allow for input as a polygon, not just a bounding box.\n structure_file, geology_file, fault_file, mindep_file, fold_file, c_l = m2l_map_checker.check_map(\n structure_file, geology_file, fault_file, mindep_file, fold_file,\n self.tmp_path, self.bbox, c_l, proj_crs, self.local, drift_prefix)\n\n # Process and store workflow params\n self.geology_file = geology_file\n self.structure_file = structure_file\n self.fault_file = fault_file\n self.fold_file = fold_file\n self.mindep_file = mindep_file\n\n disable_quiet_mode()\n\n def check_overwrite(self):\n allow = input(\n \"Directory \\\"{}\\\" exists, overwrite? (y/[n])\".format(\n self.project_path))\n if allow == \"y\":\n shutil.rmtree(self.project_path)\n while os.path.exists(self.project_path):\n pass\n os.mkdir(self.project_path)\n else:\n sys.exit(\n 'Either set overwrite to true or specify a different output_path.')\n\n def preprocess(self):\n \"\"\"[summary]\n\n :param command: [description], defaults to \"\"\n :type command: str, optional\n \"\"\"\n\n if self.quiet == 'all':\n enable_quiet_mode()\n\n geology = gpd.read_file(self.geology_file, bbox=self.bbox)\n geology[self.c_l['g']].fillna(geology[self.c_l['g2']], inplace=True)\n geology[self.c_l['g']].fillna(geology[self.c_l['c']], inplace=True)\n faults = gpd.read_file(self.fault_file, bbox=self.bbox)\n folds = gpd.read_file(self.fold_file, bbox=self.bbox)\n structures = gpd.read_file(self.structure_file, bbox=self.bbox)\n mindeps = None\n try:\n mindeps = gpd.read_file(self.mindep_file, bbox=self.bbox)\n mindeps.crs = self.proj_crs\n except Exception as e:\n print(\"Warning: Valid mineral deposit file missing\")\n\n # Fix crs to project default and overwrite source\n geology.crs = self.proj_crs\n faults.crs = self.proj_crs\n folds.crs = self.proj_crs\n structures.crs = self.proj_crs\n self.mindeps = mindeps\n\n self.geology = geology\n self.faults = faults\n self.structures = structures\n\n # Faults\n self.faults_clip = faults.copy()\n self.faults_clip.crs = self.proj_crs\n self.faults_clip_file = os.path.join(self.tmp_path, \"faults_clip.shp\")\n self.faults_clip.to_file(self.faults_clip_file)\n\n # Geology\n self.geol_clip = m2l_utils.explode(self.geology)\n self.geol_clip.crs = self.proj_crs\n self.geol_clip_file = os.path.join(self.tmp_path, \"geol_clip.shp\")\n self.geol_clip.to_file(self.geol_clip_file)\n\n # pd.set_option('display.max_columns', None)\n # pd.set_option('display.max_rows', None)\n\n # Check if bedding data uses the strike convention instead of dip direction\n if (self.c_l['otype'] == 'strike'):\n structures['azimuth2'] = structures.apply(\n lambda row: row[self.c_l['dd']] + 90.0, axis=1)\n self.c_l['dd'] = 'azimuth2'\n self.c_l['otype'] = 'dip direction'\n structures.to_file(self.structure_file)\n\n # Structures\n list1 = [\n 'geometry', self.c_l['d'], self.c_l['dd'], self.c_l['sf'],\n self.c_l['bo']\n ]\n list2 = list(set(list1))\n sub_pts = self.structures[list2]\n structure_code = gpd.sjoin(sub_pts,\n self.geol_clip,\n how=\"left\",\n op=\"within\")\n\n minx, miny, maxx, maxy = self.bbox\n y_point_list = [miny, miny, maxy, maxy, miny]\n x_point_list = [minx, maxx, maxx, minx, minx]\n\n bbox_geom = shapely.geometry.Polygon(zip(x_point_list, y_point_list))\n\n polygo = gpd.GeoDataFrame(index=[0],\n crs=self.proj_crs,\n geometry=[bbox_geom])\n is_bed = structure_code[self.c_l['sf']].str.contains(\n self.c_l['bedding'], regex=False)\n\n structure_clip = structure_code[is_bed]\n structure_clip.crs = self.proj_crs\n\n if (self.c_l['otype'] == 'strike'):\n structure_clip['azimuth2'] = structure_clip.apply(\n lambda row: row[self.c_l['dd']] + 90.0, axis=1)\n self.c_l['dd'] = 'azimuth2'\n self.c_l['otype'] = 'dip direction'\n\n self.structure_clip = structure_clip[~structure_clip[self.c_l['o']].\n isnull()]\n self.structure_clip_file = os.path.join(self.tmp_path,\n 'structure_clip.shp')\n self.structure_clip.to_file(self.structure_clip_file)\n\n self.create_cmap()\n\n try:\n fig, ax = plt.subplots()\n plt.tight_layout()\n ax.ticklabel_format(axis='both', useOffset=False, style='plain')\n ax.margins(0.0)\n fig.set_facecolor(\"#ffffff00\")\n\n self.geology_figure = geology.copy().plot(\n column=self.c_l['c'],\n ax=ax,\n figsize=(10, 10),\n edgecolor='#000000',\n linewidth=0.2,\n cmap=self.cmap).get_figure()\n\n # self.export_png()\n fig, ax = plt.subplots()\n\n base = geology.plot(column=self.c_l['c'],\n figsize=(10, 10),\n ax=ax,\n edgecolor='#000000',\n linewidth=0.2,\n legend=True,\n cmap=self.cmap)\n leg = base.get_legend()\n leg.set_bbox_to_anchor((1.04, 1))\n\n structures.plot(ax=base, color='none', edgecolor='black')\n\n faults.plot(ax=base,\n cmap='rainbow',\n column=self.c_l['f'],\n figsize=(10, 10),\n linewidth=0.4)\n structures[[\n 'geometry', self.c_l['gi'], self.c_l['d'], self.c_l['dd']\n ]].plot(ax=base)\n\n fig = self.polygon.plot(ax=base, color='none',\n edgecolor='black').get_figure()\n fig.savefig(os.path.join(self.tmp_path, \"input-data.png\"))\n\n if self.quiet == 'None':\n plt.show()\n\n return\n except Exception as e:\n print(e)\n\n disable_quiet_mode()\n\n def create_cmap(self):\n # Make colours consistent from map to model\n formations = sorted([\n formation.replace(\" \", \"_\").replace('-', '_') for formation in\n list(set(self.geol_clip[self.c_l['c']].to_numpy()))\n ])\n temp_colours = [\"\"] * len(formations)\n self.colour_dict = dict(zip(formations, temp_colours))\n try:\n # Try to retrieve the clut reference\n colour_ref = pd.read_csv(self.clut_path)\n for formation in formations:\n key = formation\n colour = None\n try:\n colour = colour_ref[colour_ref['UNITNAME'] ==\n key]['colour'].to_numpy()[0]\n except Exception as e:\n colour = ('#%02X%02X%02X' %\n (random.randint(0, 255), random.randint(\n 0, 255), random.randint(0, 255)))\n\n self.colour_dict[key] = colour\n print(key, colour)\n\n except Exception as e:\n # Otherwise, just append a random set\n self.clut_path = \"\"\n random_colours = [\n '#%02X%02X%02X' % (random.randint(\n 0, 255), random.randint(0, 255), random.randint(0, 255))\n for i in range(len(formations))\n ]\n i = 0\n for key in self.colour_dict.keys():\n self.colour_dict[key] = random_colours[i]\n\n self.cmap = colors.ListedColormap(self.colour_dict.values(),\n name='geol_key')\n\n def export_csv(self):\n # TODO: - Move away from tab seperators entirely (topology and map2model)\n\n # Save geology polygons\n hint_flag = False # use GSWA strat database to provide topology hints\n sub_geol = self.geology[[\n 'geometry', self.c_l['o'], self.c_l['c'], self.c_l['g'],\n self.c_l['u'], self.c_l['min'], self.c_l['max'], self.c_l['ds'],\n self.c_l['r1'], self.c_l['r2']\n ]]\n Topology.save_geol_wkt(sub_geol, self.geology_file_csv, self.c_l,\n hint_flag)\n\n # Save mineral deposits\n if self.mindeps is not None:\n sub_mindep = self.mindeps[[\n 'geometry', self.c_l['msc'], self.c_l['msn'], self.c_l['mst'],\n self.c_l['mtc'], self.c_l['mscm'], self.c_l['mcom']\n ]]\n Topology.save_mindep_wkt(sub_mindep, self.mindep_file_csv,\n self.c_l)\n\n # Save orientation data\n sub_pts = self.structures[[\n 'geometry', self.c_l['gi'], self.c_l['d'], self.c_l['dd']\n ]]\n Topology.save_structure_wkt(sub_pts, self.structure_file_csv, self.c_l)\n\n # Save faults\n sub_lines = self.faults[['geometry', self.c_l['o'], self.c_l['f']]]\n Topology.save_faults_wkt(sub_lines, self.fault_file_csv, self.c_l)\n\n def update_parfile(self):\n Topology.save_parfile(self, self.c_l, self.output_path,\n self.geology_file_csv, self.fault_file_csv,\n self.structure_file_csv, self.mindep_file_csv,\n self.bbox[0], self.bbox[1], self.bbox[2],\n self.bbox[3], 500.0, 'Fe,Cu,Au,NONE')\n\n def run_map2model(self, deposits, aus):\n quiet_m2m = False\n if self.quiet == 'all':\n quiet_m2m = True\n if self.mindeps is not None:\n run_log = map2model.run(self.graph_path, self.geology_file_csv,\n self.fault_file_csv, self.mindep_file_csv,\n self.bbox_3d, self.c_l, quiet_m2m,\n deposits)\n else:\n run_log = map2model.run(self.graph_path, self.geology_file_csv,\n self.fault_file_csv, \"\", self.bbox_3d,\n self.c_l, quiet_m2m, deposits)\n\n print(run_log)\n\n print(\"Resolving ambiguities using ASUD...\", end='\\toutput_dir:')\n if aus:\n Topology.use_asud(self.strat_graph_file, self.graph_path)\n self.strat_graph_file = os.path.join(self.graph_path,\n 'ASUD_strat.gml')\n print(\"Done.\")\n\n print(\"Generating topology graph display and unit groups...\")\n self.G = nx.read_gml(self.strat_graph_file, label='id')\n selected_nodes = [n for n, v in self.G.nodes(data=True) if n >= 0]\n\n if self.quiet == 'None':\n nx.draw_networkx(self.G,\n pos=nx.kamada_kawai_layout(self.G),\n arrows=True,\n nodelist=selected_nodes)\n\n nlist = list(self.G.nodes.data('LabelGraphics'))\n nlist.sort()\n for node in nlist:\n if node[0] >= 0:\n elem = str(node[1]).replace(\"{'text':\", \"\").replace(\n \", 'fontSize': 14}\", \"\")\n # second = elem.split(\":\").replace(\"'\", \"\")\n print(node[0], \" \", elem)\n\n # plt.savefig(os.path.join(self.tmp_path,\"topology-fig.png\"))\n print(\"Topology figure saved to\",\n os.path.join(self.tmp_path, \"topology-fig.png\"))\n\n # Save groups of stratigraphic units\n groups, self.glabels, G = Topology.get_series(self.strat_graph_file,\n 'id')\n\n quiet_topology = True\n if self.quiet == 'None':\n quiet_topology = False\n Topology.save_units(\n G,\n self.tmp_path,\n self.glabels,\n Australia=True,\n asud_strat_file=\"https://gist.githubusercontent.com/yohanderose/3b257dc768fafe5aaf70e64ae55e4c42/raw/8598c7563c1eea5c0cd1080f2c418dc975cc5433/ASUD.csv\",\n quiet=quiet_topology)\n\n print(\"Done\")\n\n def load_dtm(self, source=\"AU\"):\n # group all Australian states codes under the global country code (ISO 3166 ALPHA-2)\n polygon_ll = self.polygon.to_crs(self.dtm_crs)\n minlong = polygon_ll.total_bounds[0] - self.step_out\n maxlong = polygon_ll.total_bounds[2] + self.step_out\n minlat = polygon_ll.total_bounds[1] - self.step_out\n maxlat = polygon_ll.total_bounds[3] + self.step_out\n print(\"Fetching DTM... \", end=\" bbox:\")\n print(minlong, maxlong, minlat, maxlat)\n if source in (\"WA\", \"NSW\", \"VIC\", \"SA\", \"QLD\", \"ACT\", \"TAS\"):\n source = 'AU'\n i, done = 0, False\n while not done:\n if i >= 10:\n raise NameError(\n f'map2loop error: Could not access DTM server after {i} attempts'\n )\n try:\n print(f'Attempt: {i} ...', end='')\n if source.upper() in (\"AU\", \"AUSTRALIA\"):\n m2l_utils.get_dtm(self.dtm_file, minlong, maxlong, minlat,\n maxlat)\n elif source.upper() in (\"T.H\", \"HAWAII\"): # beware, TH is ISO 3166 code for Thailand\n m2l_utils.get_dtm_hawaii(self.dtm_file, minlong, maxlong,\n minlat, maxlat)\n else: # try from opentopography\n m2l_utils.get_dtm_topography_org(\n self.dtm_file, minlong, maxlong, minlat, maxlat)\n print(\"Succeeded !\")\n done = True\n except:\n time.sleep(1)\n i += 1\n print(f' Failed !')\n elif source.startswith('http'):\n i, done = 0, False\n while not done:\n if i >= 10:\n raise NameError(\n f'map2loop error: Could not access DTM server after {i} attempts'\n )\n try:\n print(f'Attempt: {i} ...', end='')\n if 'au' in source:\n m2l_utils.get_dtm(self.dtm_file, minlong, maxlong, minlat,\n maxlat, url=source)\n elif 'hawaii' in source: # beware, TH is ISO 3166 code for Thailand\n m2l_utils.get_dtm_hawaii(self.dtm_file, minlong, maxlong,\n minlat, maxlat, url=source)\n else: # try from opentopography\n m2l_utils.get_dtm_topography_org(\n self.dtm_file, minlong, maxlong, minlat, maxlat)\n print(\"Succeeded !\")\n done = True\n except:\n time.sleep(1)\n i += 1\n print(f' Failed !')\n else:\n bbox = [\n self.bbox_3d[\"minx\"], self.bbox_3d[\"miny\"],\n self.bbox_3d[\"maxx\"], self.bbox_3d[\"maxy\"]\n ]\n m2l_utils.get_local_dtm(self.dtm_file, source, self.dtm_crs, bbox)\n\n m2l_utils.reproject_dtm(self.dtm_file,\n self.dtm_reproj_file,\n self.dtm_crs, self.proj_crs)\n\n self.dtm = rasterio.open(self.dtm_reproj_file)\n\n if self.quiet == 'None':\n plt.imshow(self.dtm.read(1), cmap='terrain', vmin=0, vmax=1000)\n\n plt.title('DTM')\n plt.show()\n\n def join_features(self):\n # Save geology clips\n quiet_topology = True\n if self.quiet == \"None\":\n quiet_topology = False\n Topology.save_group(Topology, self.G, self.tmp_path, self.glabels,\n self.geol_clip, self.c_l, quiet_topology)\n\n def calc_depth_grid(self, dtb):\n dtm = self.dtm\n\n if dtb == \"\":\n self.dtb = 0\n self.dtb_null = 0\n\n print(\"dtb and dtb_null set to 0\")\n return\n\n # TODO: DTB need to be defined, every function call bellow here that has a False boolean is referencing to the workflow['cover_map'] flag\n # dtb_grid = os.path.join(data_path,'young_cover_grid.tif') #obviously hard-wired for the moment\n # dtb_null = '-2147483648' #obviously hard-wired for the moment\n # cover_map_path = os.path.join(data_path,'Young_Cover_FDS_MGA_clean.shp') #obviously hard-wired for the moment\n # dtb_clip = os.path.join(output_path,'young_cover_grid_clip.tif') #obviously hard-wired for the moment\n # cover_dip = 10 # dip of cover away from contact\n # cover_spacing = 5000 # of contact grid in metres\n\n dtb_raw = rasterio.open(dtb_grid)\n\n cover = gpd.read_file(cover_map_path)\n\n with fiona.open(cover_map_path, \"r\") as shapefile:\n shapes = [feature[\"geometry\"] for feature in shapefile]\n\n with rasterio.open(dtb_grid) as src:\n out_image, out_transform = rasterio.mask.mask(src,\n shapes,\n crop=True)\n out_meta = src.meta.copy()\n\n out_meta.update({\n \"driver\": \"GTiff\",\n \"height\": out_image.shape[1],\n \"width\": out_image.shape[2],\n \"transform\": out_transform\n })\n\n with rasterio.open(dtb_clip, \"w\", **out_meta) as dest:\n dest.write(out_image)\n\n dtb = rasterio.open(dtb_clip)\n\n m2l_geometry.process_cover(output_path,\n dtm,\n dtb,\n dtb_null,\n cover,\n workflow['cover_map'],\n cover_dip,\n bbox,\n proj_crs,\n cover_spacing,\n contact_decimate=3,\n use_vector=True,\n use_grid=True)\n\n def export_orientations(self, orientation_decimate):\n m2l_geometry.save_orientations(self.structure_clip, self.output_path,\n self.c_l, orientation_decimate,\n self.dtm, self.dtb, self.dtb_null,\n False)\n\n if self.quiet == 'None':\n m2l_utils.plot_points(\n os.path.join(self.output_path, 'orientations.csv'),\n self.geol_clip, 'formation', 'X', 'Y', False, 'alpha')\n\n # Create arbitrary points for series without orientation data\n m2l_geometry.create_orientations(self.tmp_path, self.output_path,\n self.dtm, self.dtb, self.dtb_null,\n False, self.geol_clip,\n self.structure_clip, self.c_l)\n\n def export_contacts(self, contact_decimate, intrusion_mode):\n\n ls_dict, ls_dict_decimate = m2l_geometry.save_basal_contacts(\n self.tmp_path, self.dtm, self.dtb, self.dtb_null, False,\n self.geol_clip, contact_decimate, self.c_l, intrusion_mode)\n\n # Remove basal contacts defined by faults, no decimation\n m2l_geometry.save_basal_no_faults(\n os.path.join(self.tmp_path, 'basal_contacts.shp'),\n os.path.join(self.tmp_path, 'faults_clip.shp'), ls_dict, 10,\n self.c_l, self.proj_crs)\n\n # Remove faults from decimated basal contacts then save\n contacts = gpd.read_file(\n os.path.join(self.tmp_path, 'basal_contacts.shp'))\n m2l_geometry.save_basal_contacts_csv(contacts, self.output_path,\n self.dtm, self.dtb, self.dtb_null,\n False, contact_decimate, self.c_l)\n # False in this call was already false and isn't the cover flag\n if self.quiet == \"None\":\n m2l_utils.plot_points(\n os.path.join(self.output_path, 'contacts4.csv'),\n self.geol_clip, 'formation', 'X', 'Y', False, 'alpha')\n\n # Interpolates a regular grid of orientations from an shapefile of\n # arbitrarily-located points and saves out four csv files of l, m & n\n # direction cosines and dip dip direction data\n def test_interpolation(self, interpolation_spacing, misorientation,\n interpolation_scheme):\n\n geology_file = self.geol_clip_file\n structure_file = self.structure_clip_file\n basal_contacts = os.path.join(self.tmp_path, 'basal_contacts.shp')\n self.spacing = interpolation_spacing # grid spacing in meters\n # misorientation = misorientation\n self.scheme = interpolation_scheme\n orientations = self.structures\n\n quiet_interp = True\n if self.quiet == \"None\":\n quiet_interp = False\n\n group_girdle = m2l_utils.plot_bedding_stereonets(\n orientations, self.geology, self.c_l, quiet_interp)\n super_groups, self.use_gcode3 = Topology.super_groups_and_groups(\n group_girdle, self.tmp_path, misorientation)\n # print(super_groups)\n # print(self.geology['GROUP_'].unique())\n bbox = self.bbox\n\n orientation_interp, contact_interp, combo_interp = m2l_interpolation.interpolation_grids(\n geology_file, structure_file, basal_contacts, bbox, self.spacing,\n self.proj_crs, self.scheme, super_groups, self.c_l)\n\n with open(os.path.join(self.tmp_path, 'interpolated_orientations.csv'),\n 'w') as f:\n f.write('X, Y, l, m, n, dip, dip_dir\\n')\n for row in orientation_interp:\n ostr = '{}, {}, {}, {}, {}, {}, {}\\n'.format(\n row[0], row[1], row[2], row[3], row[4], row[5], row[6])\n f.write(ostr)\n with open(os.path.join(self.tmp_path, 'interpolated_contacts.csv'),\n 'w') as f:\n f.write('X, Y, l, m, angle\\n')\n for row in contact_interp:\n ostr = '{}, {}, {}, {}, {}\\n'.format(row[0], row[1], row[2],\n row[3], row[4])\n f.write(ostr)\n with open(os.path.join(self.tmp_path, 'interpolated_combined.csv'),\n 'w') as f:\n f.write('X, Y, l, m, n, dip, dip_dir\\n')\n for row in combo_interp:\n ostr = '{}, {}, {}, {}, {}, {}, {}\\n'.format(\n row[0], row[1], row[2], row[3], row[4], row[5], row[6])\n f.write(ostr)\n\n if (self.spacing < 0):\n self.spacing = -(bbox[2] - bbox[0]) / spacing\n self.x = int((bbox[2] - bbox[0]) / self.spacing) + 1\n self.y = int((bbox[3] - bbox[1]) / self.spacing) + 1\n x = self.x\n y = self.y\n print(x, y)\n dip_grid = np.ones((y, x))\n dip_grid = dip_grid * -999\n dip_dir_grid = np.ones((y, x))\n dip_dir_grid = dip_dir_grid * -999\n contact_grid = np.ones((y, x))\n contact_grid = dip_dir_grid * -999\n for row in combo_interp:\n r = int((row[1] - bbox[1]) / self.spacing)\n c = int((row[0] - bbox[0]) / self.spacing)\n dip_grid[r, c] = float(row[5])\n dip_dir_grid[r, c] = float(row[6])\n\n for row in contact_interp:\n r = int((row[1] - bbox[1]) / self.spacing)\n c = int((row[0] - bbox[0]) / self.spacing)\n contact_grid[r, c] = float(row[4])\n\n self.dip_grid = dip_grid\n self.dip_dir_grid = dip_dir_grid\n\n if self.quiet == 'None':\n print('interpolated dips')\n plt.imshow(self.dip_grid,\n cmap=\"hsv\",\n origin='lower',\n vmin=-90,\n vmax=90)\n plt.show()\n\n print('interpolated dip directions')\n plt.imshow(self.dip_dir_grid,\n cmap=\"hsv\",\n origin='lower',\n vmin=0,\n vmax=360)\n plt.show()\n\n print('interpolated contacts')\n plt.imshow(contact_grid,\n cmap=\"hsv\",\n origin='lower',\n vmin=-360,\n vmax=360)\n plt.show()\n\n def save_cmap(self):\n \"\"\"Create a colourmap for the model using the colour code\n \"\"\"\n all_sorts = pd.read_csv(\n os.path.join(self.tmp_path, 'all_sorts_clean.csv'))\n\n colours = []\n for code in all_sorts['code']:\n colours.append([self.colour_dict[code]])\n\n data = colours\n expected_extra_cols = pd.DataFrame(columns=['colour'], data=data)\n all_sorts = pd.concat([all_sorts, expected_extra_cols], axis=1)\n all_sorts.to_csv(os.path.join(self.tmp_path, 'all_sorts_clean.csv'),\n \",\",\n index=None)\n\n def export_faults(self, fault_decimate, min_fault_length, fault_dip):\n # fault_decimate = 5\n # min_fault_length = 5000\n # fault_dip = 90\n\n m2l_geometry.save_faults(\n os.path.join(self.tmp_path, 'faults_clip.shp'), self.output_path,\n self.dtm, self.dtb, self.dtb_null, False, self.c_l, fault_decimate,\n min_fault_length, fault_dip)\n\n faults = pd.read_csv(self.fault_output_file_csv, sep=\",\")\n faults_len = len(faults)\n if (faults_len > 0):\n m2l_interpolation.process_fault_throw_and_near_faults_from_grid(\n self.tmp_path, self.output_path, self.dtm_reproj_file,\n self.dtb, self.dtb_null, False, self.c_l, self.proj_crs,\n self.bbox, self.scheme, self.dip_grid, self.dip_dir_grid,\n self.x, self.y, self.spacing)\n\n def process_plutons(self, pluton_dip, pluton_form, dist_buffer,\n contact_decimate):\n # pluton_dip = 45\n pluton_dip = str(pluton_dip)\n self.pluton_form = pluton_form # 'domes'\n\n # dist_buffer = 10\n # contact_decimate = 5 # store every nth contact point (in object order)\n\n m2l_geometry.process_plutons(self.tmp_path, self.output_path,\n self.geol_clip, self.local, self.dtm,\n self.dtb, self.dtb_null, False,\n self.pluton_form, pluton_dip,\n contact_decimate, self.c_l)\n\n def extract_section_features(self, seismic_line_file, seismic_bbox_file,\n seismic_interp_file):\n # Extract faults and basal contacts of groups from seismic section\n # input geology file (if local)\n\n seismic_line_file = seismic_line_file\n seismic_line = gpd.read_file(seismic_line_file) # import map\n seismic_line.plot(figsize=(10, 10), edgecolor='#000000',\n linewidth=0.2) # display map\n display(seismic_line)\n\n # input geology file (if local)\n seismic_bbox_file = seismic_bbox_file\n seismic_bbox = gpd.read_file(seismic_bbox_file) # import map\n seismic_bbox.set_index('POSITION', inplace=True)\n\n # input geology file (if local)\n seismic_interp_file = seismic_interp_file\n seismic_interp = gpd.read_file(seismic_interp_file) # import map\n seismic_interp.plot(column='FEATURE',\n figsize=(10, 10),\n edgecolor='#000000',\n linewidth=0.5) # display map\n display(seismic_interp)\n\n surface_cut = 2000\n\n m2l_geometry.extract_section(self.tmp_path, self.output_path,\n seismic_line, seismic_bbox,\n seismic_interp, self.dtm, self.dtb,\n self.dtb_null, False, surface_cut)\n\n contacts = pd.read_csv(os.path.join(self.output_path, 'contacts4.csv'),\n \", \")\n seismic_contacts = pd.read_csv(\n os.path.join(self.output_path, 'seismic_base.csv'), \", \")\n all_contacts = pd.concat([contacts, seismic_contacts], sort=False)\n all_contacts.to_csv(os.path.join(self.output_path, 'contacts4.csv'),\n index=None,\n header=True)\n\n faults = pd.read_csv(os.path.join(self.output_path, 'faults.csv'),\n \", \")\n seismic_faults = pd.read_csv(\n os.path.join(self.output_path, 'seismic_faults.csv'), \", \")\n all_faults = pd.concat([faults, seismic_faults], sort=False)\n all_faults.to_csv(os.path.join(self.output_path, 'faults.csv'),\n index=None,\n header=True)\n\n def propagate_contact_dips(self, contact_dip,\n contact_orientation_decimate):\n print(\"Propagating dips along contacts...\")\n orientations = pd.read_csv(\n os.path.join(self.output_path, 'orientations.csv'), \", \")\n # This is supposed to be a csv but my csv doesn't have a geometry part\n contacts = gpd.read_file(\n os.path.join(self.tmp_path, 'basal_contacts.shp'))\n # contact_dip = -999\n # contact_orientation_decimate = 5\n m2l_geometry.save_basal_contacts_orientations_csv(\n contacts, orientations, self.geol_clip, self.tmp_path,\n self.output_path, self.dtm, self.dtb, self.dtb_null, False,\n contact_orientation_decimate, self.c_l, contact_dip, self.dip_grid,\n self.spacing, self.bbox)\n\n def calc_thickness(self, contact_decimate, null_scheme, thickness_buffer,\n max_thickness_allowed, cl):\n # Estimate formation thickness and normalised formation thickness\n geology_file = os.path.join(self.tmp_path, 'basal_contacts.shp')\n # contact_decimate = 5\n # null_scheme = 'null'\n m2l_interpolation.save_contact_vectors(geology_file, self.tmp_path,\n self.dtm, self.dtb,\n self.dtb_null, False, self.bbox,\n self.c_l, null_scheme,\n contact_decimate)\n\n # buffer = 5000\n # max_thickness_allowed = 10000\n\n # TODO: multi thread / numba jit\n m2l_geometry.calc_thickness_with_grid(self.tmp_path, self.output_path,\n thickness_buffer,\n max_thickness_allowed, self.c_l,\n self.bbox, self.dip_grid,\n self.dip_dir_grid, self.x,\n self.y, self.spacing, self.dtm)\n\n m2l_geometry.calc_min_thickness_with_grid(\n self.tmp_path, self.output_path, thickness_buffer,\n max_thickness_allowed, self.c_l, self.bbox, self.dip_grid,\n self.dip_dir_grid, self.x, self.y, self.spacing, self.dtm)\n\n m2l_geometry.normalise_thickness(self.output_path)\n\n if self.quiet == \"None\":\n m2l_utils.plot_points(\n os.path.join(self.output_path,\n 'formation_thicknesses_norm.csv'), self.geol_clip,\n 'norm_th', 'x', 'y', True, 'numeric')\n\n def create_fold_axial_trace_points(self, fold_decimate, fat_step,\n close_dip):\n # fold_decimate = 5\n folds_clip = gpd.read_file(self.fold_file)\n if (len(folds_clip) > 0):\n\n m2l_geometry.save_fold_axial_traces(self.fold_file,\n self.output_path, self.dtm,\n self.dtb, self.dtb_null, False,\n self.c_l, fold_decimate)\n\n # TODO : better approximation / multithread / numba\n m2l_geometry.save_fold_axial_traces_orientations(\n self.fold_file, self.output_path, self.tmp_path, self.dtm,\n self.dtb, self.dtb_null, False, self.c_l, self.proj_crs,\n fold_decimate, fat_step, close_dip, self.scheme, self.bbox,\n self.spacing, self.dip_grid, self.dip_dir_grid)\n\n def postprocess(self, inputs, workflow, use_interpolations, use_fat):\n # use_interpolations = True\n # use_fat = True\n\n m2l_geometry.tidy_data(self.output_path, self.tmp_path, self.clut_path,\n self.use_gcode3, use_interpolations, use_fat,\n self.pluton_form, inputs, workflow, self.c_l)\n model_top = round(np.amax(self.dtm.read(1)), -2)\n\n # self.dtm.close()\n # if(workflow['cover_map']):\n # dtb.close()\n\n # Calculate polarity of original bedding orientation data\n if (workflow['polarity']):\n m2l_geometry.save_orientations_with_polarity(\n os.path.join(self.output_path, 'orientations.csv'),\n self.output_path,\n self.c_l,\n os.path.join(self.tmp_path, 'basal_contacts.shp'),\n os.path.join(self.tmp_path, 'all_sorts.csv'),\n )\n\n if self.quiet == \"None\":\n m2l_utils.plot_points(\n os.path.join(self.output_path,\n 'orientations_polarity.csv'), self.geol_clip,\n 'polarity', 'X', 'Y', True, 'alpha')\n\n # Calculate minimum fault offset from stratigraphy and stratigraphic fault offset\n if (workflow['strat_offset']):\n fault_test = pd.read_csv(\n os.path.join(self.output_path, 'fault_dimensions.csv'), ', ')\n if (len(fault_test) > 0):\n\n m2l_geometry.fault_strat_offset(\n self.output_path, self.c_l, self.proj_crs,\n os.path.join(self.output_path,\n 'formation_summary_thicknesses.csv'),\n os.path.join(self.tmp_path, 'all_sorts.csv'),\n os.path.join(self.tmp_path, 'faults_clip.shp'),\n os.path.join(self.tmp_path, 'geol_clip.shp'),\n os.path.join(self.output_path, 'fault_dimensions.csv'))\n\n if self.quiet == \"None\":\n m2l_utils.plot_points(\n os.path.join(self.output_path,\n 'fault_strat_offset3.csv'),\n self.geol_clip, 'min_offset', 'X', 'Y', True,\n 'numeric')\n m2l_utils.plot_points(\n os.path.join(self.output_path,\n 'fault_strat_offset3.csv'),\n self.geol_clip, 'strat_offset', 'X', 'Y', True,\n 'numeric')\n\n # Analyse fault topologies\n fault_parse_figs = True\n if self.quiet == \"None\":\n fault_parse = False\n Topology.parse_fault_relationships(self.graph_path, self.tmp_path,\n self.output_path, fault_parse_figs)\n\n # TODO: Figures sometimes look a bit squashed in notebooks\n\n def update_projectfile(self):\n self.loop_projectfile = export_to_projectfile(self.loop_projectfile,\n self.tmp_path,\n self.output_path,\n self.bbox_3d,\n self.proj_crs)\n\n print(\"PROJECTFILE FOUND AT\", self.loop_projectfile)\n\n def export_png(self):\n filename = self.loop_projectfile\n if self.loop_projectfile is None:\n # TODO: Make sure these user provided paths end with a slash or are joined properly\n filename = os.path.join(\n self.project_path, '{}'.format(self.project_path))\n print(\"Exporting graphical map...\")\n try:\n\n self.geology_figure.savefig(\"{}.png\".format(filename))\n print(\"Geology graphic exported to: \", filename)\n except Exception as e:\n print(e)\n print(\"WARNING: Could not save geology graphic\")\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.ones",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
CaptainDuke/horovod
|
[
"48a48f0bdf140cd4807d58688c56061dd4f2c954"
] |
[
"test/parallel/test_tensorflow.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2018 Uber Technologies, Inc.\n# Modifications copyright (C) 2019 Intel Corporation\n# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"Tests for horovod.tensorflow.mpi_ops.\"\"\"\n\nfrom distutils.version import LooseVersion\n\nimport itertools\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom horovod.tensorflow.util import _executing_eagerly\nfrom tensorflow.python.framework import ops\nimport warnings\n\nimport horovod.tensorflow as hvd\n\nfrom common import mpi_env_rank_and_size\n\nif hasattr(tf, 'ConfigProto'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\nif hasattr(tf, 'config') and hasattr(tf.config, 'experimental') \\\n and hasattr(tf.config.experimental, 'set_memory_growth'):\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\nelse:\n # Specifies the config to use with eager execution. Does not preclude\n # tests from running in the graph mode.\n tf.enable_eager_execution(config=config)\n\nccl_supported_types = set([tf.uint8, tf.int32, tf.int64, tf.float32, tf.float64])\n\n_IS_TF2 = LooseVersion(tf.__version__) >= LooseVersion('2.0.0')\n\n\nclass TensorFlowTests(tf.test.TestCase):\n \"\"\"\n Tests for ops in horovod.tensorflow.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TensorFlowTests, self).__init__(*args, **kwargs)\n warnings.simplefilter('module')\n if hasattr(tf, 'contrib') and hasattr(tf.contrib, 'eager'):\n self.tfe = tf.contrib.eager\n else:\n self.tfe = tf\n\n def evaluate(self, tensors):\n if _executing_eagerly():\n return self._eval_helper(tensors)\n sess = ops.get_default_session()\n if sess is None:\n with self.test_session(config=config) as sess:\n return sess.run(tensors)\n else:\n return sess.run(tensors)\n\n def assign(self, variables, values):\n if _executing_eagerly():\n for var, val in zip(variables, values):\n var.assign(val)\n else:\n sess = ops.get_default_session()\n if sess is None:\n with self.test_session(config=config) as sess:\n for var, val in zip(variables, values):\n var.load(val, sess)\n else:\n for var, val in zip(variables, values):\n var.load(val, sess)\n\n def random_uniform(self, *args, **kwargs):\n if hasattr(tf, 'random') and hasattr(tf.random, 'set_seed'):\n tf.random.set_seed(1234)\n return tf.random.uniform(*args, **kwargs)\n else:\n tf.set_random_seed(1234)\n return tf.random_uniform(*args, **kwargs)\n\n def filter_supported_types(self, types):\n if 'CCL_ROOT' in os.environ:\n types = [t for t in types if t in ccl_supported_types]\n return types\n\n def test_horovod_rank(self):\n \"\"\"Test that the rank returned by hvd.rank() is correct.\"\"\"\n mpi_rank, _ = mpi_env_rank_and_size()\n gloo_rank = int(os.getenv('HOROVOD_RANK', -1))\n\n # The mpi rank does not match gloo rank, we need to figure which one\n # we are using to run the test.\n is_mpi = gloo_rank == -1\n hvd.init()\n rank = hvd.rank()\n\n if is_mpi:\n assert mpi_rank == rank\n else:\n assert gloo_rank == rank\n\n def test_horovod_size(self):\n \"\"\"Test that the size returned by hvd.size() is correct.\"\"\"\n _, mpi_size = mpi_env_rank_and_size()\n gloo_size = int(os.getenv('HOROVOD_SIZE', -1))\n\n # The mpi size does not match gloo size, we need to figure which one\n # we are using to run the test.\n is_mpi = gloo_size == -1\n hvd.init()\n size = hvd.size()\n if is_mpi:\n assert mpi_size == size\n else:\n assert gloo_size == size\n\n def test_horovod_rank_op(self):\n \"\"\"Test that the rank returned by hvd.rank_op() is correct.\"\"\"\n hvd.init()\n rank = self.evaluate(hvd.rank_op())\n self.assertTrue(rank == hvd.rank(),\n \"hvd.rank_op produces incorrect results\")\n\n def test_horovod_local_rank_op(self):\n \"\"\"Test that the local rank returned by hvd.local_rank_op() is correct.\"\"\"\n hvd.init()\n local_rank = self.evaluate(hvd.local_rank_op())\n self.assertTrue(local_rank == hvd.local_rank(),\n \"hvd.local_rank_op produces incorrect results\")\n\n def test_horovod_size_op(self):\n \"\"\"Test that the size returned by hvd.size_op() is correct.\"\"\"\n hvd.init()\n size = self.evaluate(hvd.size_op())\n self.assertTrue(size == hvd.size(),\n \"hvd.size_op produces incorrect results\")\n\n def test_horovod_local_size_op(self):\n \"\"\"Test that the local size returned by hvd.local_size_op() is correct.\"\"\"\n hvd.init()\n local_size = self.evaluate(hvd.local_size_op())\n self.assertTrue(local_size == hvd.local_size(),\n \"hvd.local_size_op produces incorrect results\")\n\n def test_horovod_allreduce_cpu(self):\n \"\"\"Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32, tf.float64])\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False)\n multiplied = tensor * size\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n self.skipTest(\"Horovod cluster too large for precise multiplication comparison\")\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold, \"hvd.allreduce produces incorrect results\")\n\n def test_horovod_allreduce_average_cpu(self):\n \"\"\"Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32, tf.float64])\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n averaged = hvd.allreduce(tensor, average=True)\n max_difference = tf.reduce_max(tf.abs(tf.cast(averaged, dtype=dtype) - tensor))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n self.skipTest(\"Horovod cluster too large for precise multiplication comparison\")\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold, \"hvd.allreduce produces incorrect results\")\n\n def test_horovod_allreduce_cpu_fused(self):\n \"\"\"Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors\n with Tensor Fusion.\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32, tf.float64])\n dims = [1, 2, 3]\n tests = []\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False)\n multiplied = tensor * size\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n self.skipTest(\"Horovod cluster too large for precise multiplication comparison\")\n\n test = max_difference <= threshold\n tests.append(test)\n self.assertTrue(self.evaluate(tf.reduce_all(tests)),\n \"hvd.allreduce produces incorrect results\")\n\n # Note: TF does not support FP64 op attributes so scaling factor is cast to FP32\n # by op and loses precision. We skip FP64 version of pre/postscale tests for this reason.\n # See https://github.com/tensorflow/tensorflow/pull/39452 for PR to resolve this limitation.\n def test_horovod_allreduce_cpu_prescale(self):\n \"\"\"Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors\n with prescaling\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])\n int_types = [tf.int32, tf.int64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n np.random.seed(1234)\n factor = np.random.uniform()\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False,\n prescale_factor=factor)\n\n # Scaling done in FP64 math for integer types, FP32 math for FP16 on CPU\n tensor = tf.cast(tensor, tf.float32 if dtype == tf.float16 else\n tf.float64 if dtype in int_types else dtype)\n factor = tf.convert_to_tensor(factor, tf.float32 if dtype == tf.float16 else\n tf.float64 if dtype in int_types else dtype)\n multiplied = tf.cast(factor * tensor, dtype) * size\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in int_types:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold,\n \"hvd.allreduce produces incorrect results\")\n\n def test_horovod_allreduce_cpu_postscale(self):\n \"\"\"Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors\n with postscaling\"\"\"\n hvd.init()\n size = hvd.size()\n dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])\n int_types = [tf.int32, tf.int64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n np.random.seed(1234)\n factor = np.random.uniform()\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False,\n postscale_factor=factor)\n\n multiplied = tensor * size\n # Scaling done in FP64 math for integer types, FP32 math for FP16 on CPU\n multiplied = tf.cast(multiplied, tf.float32 if dtype == tf.float16 else\n tf.float64 if dtype in int_types else dtype)\n factor = tf.convert_to_tensor(factor, tf.float32 if dtype == tf.float16 else\n tf.float64 if dtype in int_types else dtype)\n multiplied = tf.cast(factor * multiplied, dtype)\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in int_types:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold,\n \"hvd.allreduce produces incorrect results\")\n\n def test_horovod_allreduce_gpu(self):\n \"\"\"Test that the allreduce works on GPUs.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%d\" % local_rank):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False)\n multiplied = tensor * size\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n self.skipTest(\"Horovod cluster too large for precise multiplication comparison\")\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold, \"hvd.allreduce on GPU produces incorrect results\")\n\n def test_horovod_allreduce_average_gpu(self):\n \"\"\"Test that the allreduce with average works on GPUs.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%d\" % local_rank):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n averaged = hvd.allreduce(tensor, average=True)\n max_difference = tf.reduce_max(tf.abs(tf.cast(averaged, dtype=dtype) - tensor))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n self.skipTest(\"Horovod cluster too large for precise multiplication comparison\")\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold, \"hvd.allreduce on GPU produces incorrect results\")\n\n def test_horovod_allreduce_gpu_fused(self):\n \"\"\"Test that the allreduce works on GPUs with Tensor Fusion.\n\n This test will crash badly if used with an MPI implementation that does\n not support GPU memory transfers directly, as it will call MPI_Send on\n a GPU data pointer.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]\n dims = [1, 2, 3]\n tests = []\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%d\" % local_rank):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False)\n multiplied = tensor * size\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n self.skipTest(\"Horovod cluster too large for precise multiplication comparison\")\n\n test = max_difference <= threshold\n tests.append(test)\n self.assertTrue(self.evaluate(tf.reduce_all(tests)),\n \"hvd.allreduce produces incorrect results\")\n\n def test_horovod_allreduce_multi_gpu(self):\n \"\"\"Test that the allreduce works on multiple GPUs.\n\n This test will crash badly if used with an MPI implementation that does\n not support GPU memory transfers directly, as it will call MPI_Send on\n a GPU data pointer.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n # Only do this test if there are enough GPUs available.\n if len(tf.config.experimental.list_physical_devices('GPU')) < 2:\n self.skipTest((\"Too few GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n iter = 0\n gpu_ids = [local_rank * 2, local_rank * 2 + 1]\n dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n iter += 1\n with tf.device(\"/gpu:%d\" % gpu_ids[(iter + local_rank) % 2]):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False)\n multiplied = tensor * size\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n self.skipTest(\"Horovod cluster too large for precise multiplication comparison\")\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold,\n \"hvd.allreduce on GPU produces incorrect results\")\n\n def test_horovod_allreduce_gpu_prescale(self):\n \"\"\"Test on GPU that the allreduce correctly sums 1D, 2D, 3D tensors\n with prescaling\"\"\"\n\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n return\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_ALLREDUCE.\n return\n\n hvd.init()\n size = hvd.size()\n local_rank = hvd.local_rank()\n dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])\n int_types = [tf.int32, tf.int64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n np.random.seed(1234)\n factor = np.random.uniform()\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False,\n prescale_factor=factor)\n\n # Scaling done in FP64 math for integer types.\n tensor = tf.cast(tensor, tf.float64 if dtype in int_types else dtype)\n factor = tf.convert_to_tensor(factor, tf.float64 if dtype in int_types else dtype)\n multiplied = tf.cast(factor * tensor, dtype) * size\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in int_types:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold,\n \"hvd.allreduce produces incorrect results\")\n\n def test_horovod_allreduce_gpu_postscale(self):\n \"\"\"Test on GPU that the allreduce correctly sums 1D, 2D, 3D tensors\n with postscaling\"\"\"\n\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n return\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_ALLREDUCE.\n return\n\n hvd.init()\n size = hvd.size()\n local_rank = hvd.local_rank()\n dtypes = self.filter_supported_types([tf.int32, tf.int64, tf.float16, tf.float32])\n int_types = [tf.int32, tf.int64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n np.random.seed(1234)\n factor = np.random.uniform()\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False,\n postscale_factor=factor)\n\n multiplied = tensor * size\n # Scaling done in FP64 math for integer types.\n multiplied = tf.cast(multiplied, tf.float64 if dtype in int_types else dtype)\n factor = tf.convert_to_tensor(factor, tf.float64 if dtype in int_types else dtype)\n multiplied = tf.cast(factor * multiplied, dtype)\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n # Threshold for floating point equality depends on number of\n # ranks, since we're comparing against precise multiplication.\n if size <= 3 or dtype in int_types:\n threshold = 0\n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n break\n\n diff = self.evaluate(max_difference)\n self.assertTrue(diff <= threshold,\n \"hvd.allreduce produces incorrect results\")\n\n def test_horovod_allreduce_error(self):\n \"\"\"Test that the allreduce raises an error if different ranks try to\n send tensors of different rank or dimension.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # Same rank, different dimension\n dims = [17 + rank] * 3\n tensor = self.random_uniform(dims, -1.0, 1.0)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.allreduce(tensor))\n\n # Same number of elements, different rank\n if rank == 0:\n dims = [17, 23 * 57]\n else:\n dims = [17, 23, 57]\n tensor = self.random_uniform(dims, -1.0, 1.0)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.allreduce(tensor))\n\n def test_horovod_allreduce_type_error(self):\n \"\"\"Test that the allreduce raises an error if different ranks try to\n send tensors of different type.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # Same rank, different dimension\n dims = [17] * 3\n tensor = tf.ones(dims,\n dtype=tf.int32 if rank % 2 == 0 else tf.float32)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.allreduce(tensor))\n\n def test_horovod_allreduce_cpu_gpu_error(self):\n \"\"\"Test that the allreduce raises an error if different ranks try to\n perform reduction on CPU and GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n device = \"/gpu:%d\" % local_rank if local_rank % 2 == 0 else \"/cpu:0\"\n with tf.device(device):\n # Same rank, different dimension\n dims = [17] * 3\n tensor = tf.ones(dims, dtype=tf.int32)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.allreduce(tensor))\n\n def test_horovod_allreduce_grad_cpu(self):\n \"\"\"Test the correctness of the allreduce gradient on CPU.\"\"\"\n hvd.init()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n if _executing_eagerly():\n tensor = self.tfe.Variable(self.random_uniform(\n [5] * dim, -100, 100, dtype=dtype))\n with tf.GradientTape() as tape:\n summed = hvd.allreduce(tensor, average=False)\n else:\n tensor = self.random_uniform(\n [5] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False)\n\n grad_ys = tf.ones([5] * dim)\n if _executing_eagerly():\n grad_out = tape.gradient(summed, tensor, grad_ys)\n else:\n grad = tf.gradients(summed, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones([5] * dim) * size\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_allreduce_average_grad_cpu(self):\n \"\"\"Test the correctness of the allreduce with average gradient on CPU.\"\"\"\n hvd.init()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n if _executing_eagerly():\n tensor = self.tfe.Variable(self.random_uniform(\n [5] * dim, -100, 100, dtype=dtype))\n with tf.GradientTape() as tape:\n averaged = hvd.allreduce(tensor, average=True)\n else:\n tensor = self.random_uniform(\n [5] * dim, -100, 100, dtype=dtype)\n averaged = hvd.allreduce(tensor, average=True)\n\n grad_ys = tf.ones([5] * dim, dtype=dtype)\n if _executing_eagerly():\n grad_out = tape.gradient(averaged, tensor, grad_ys)\n else:\n grad = tf.gradients(averaged, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones([5] * dim)\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_allreduce_grad_gpu(self):\n \"\"\"Test the correctness of the allreduce gradient on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%d\" % local_rank):\n if _executing_eagerly():\n tensor = self.tfe.Variable(\n self.random_uniform([5] * dim, -100, 100, dtype=dtype))\n with tf.GradientTape() as tape:\n summed = hvd.allreduce(tensor, average=False)\n else:\n tensor = self.random_uniform([5] * dim, -100, 100, dtype=dtype)\n summed = hvd.allreduce(tensor, average=False)\n\n grad_ys = tf.ones([5] * dim)\n if _executing_eagerly():\n grad_out = tape.gradient(summed, tensor, grad_ys)\n else:\n grad = tf.gradients(summed, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones([5] * dim) * size\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_allreduce_average_grad_gpu(self):\n \"\"\"Test the correctness of the allreduce with average gradient on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%d\" % local_rank):\n if _executing_eagerly():\n tensor = self.tfe.Variable(\n self.random_uniform([5] * dim, -100, 100, dtype=dtype))\n with tf.GradientTape() as tape:\n averaged = hvd.allreduce(tensor, average=True)\n else:\n tensor = self.random_uniform([5] * dim, -100, 100, dtype=dtype)\n averaged = hvd.allreduce(tensor, average=True)\n\n grad_ys = tf.ones([5] * dim, dtype=dtype)\n if _executing_eagerly():\n grad_out = tape.gradient(averaged, tensor, grad_ys)\n else:\n grad = tf.gradients(averaged, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones([5] * dim)\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_allgather_cpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n tensor = tf.ones([17] * dim) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/cpu:0\"):\n gathered = hvd.allgather(tensor)\n\n gathered_tensor = self.evaluate(gathered)\n self.assertEqual(list(gathered_tensor.shape),\n [17 * size] + [17] * (dim - 1))\n\n for i in range(size):\n rank_tensor = tf.slice(gathered_tensor,\n [i * 17] + [0] * (dim - 1),\n [17] + [-1] * (dim - 1))\n self.assertEqual(list(rank_tensor.shape), [17] * dim)\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value))),\n \"hvd.allgather produces incorrect gathered tensor\")\n\n\n def test_horovod_allgather_gpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n tensor = tf.ones([17] * dim) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/gpu:%d\" % local_rank):\n gathered = hvd.allgather(tensor)\n\n gathered_tensor = self.evaluate(gathered)\n self.assertEqual(list(gathered_tensor.shape),\n [17 * size] + [17] * (dim - 1))\n\n for i in range(size):\n rank_tensor = tf.slice(gathered_tensor,\n [i * 17] + [0] * (dim - 1),\n [17] + [-1] * (dim - 1))\n self.assertEqual(list(rank_tensor.shape), [17] * dim)\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value))),\n \"hvd.allgather produces incorrect gathered tensor\")\n\n def test_horovod_allgather_fused_cpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors\n with Tensor Fusion.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n tests = []\n shape_tests = []\n for dtype, dim in itertools.product(dtypes, dims):\n tensor = tf.ones([17] * dim) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/cpu:0\"):\n gathered = hvd.allgather(tensor)\n\n shape_tests.append(\n tf.reduce_all(tf.equal(tf.shape(gathered),\n [17 * size] + [17] * (dim - 1))))\n\n for i in range(size):\n rank_tensor = tf.slice(gathered,\n [i * 17] + [0] * (dim - 1),\n [17] + [-1] * (dim - 1))\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n tests.append(\n tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value)))\n\n shape_tests_passed, value_tests_passed = \\\n self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])\n\n self.assertTrue(shape_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n self.assertTrue(value_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n def test_horovod_allgather_fused_gpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors\n with Tensor Fusion.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n tests = []\n shape_tests = []\n for dtype, dim in itertools.product(dtypes, dims):\n tensor = tf.ones([17] * dim) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/gpu:%d\" % local_rank):\n gathered = hvd.allgather(tensor)\n\n shape_tests.append(\n tf.reduce_all(tf.equal(tf.shape(gathered),\n [17 * size] + [17] * (dim - 1))))\n\n for i in range(size):\n rank_tensor = tf.slice(gathered,\n [i * 17] + [0] * (dim - 1),\n [17] + [-1] * (dim - 1))\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n tests.append(\n tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value)))\n\n shape_tests_passed, value_tests_passed = \\\n self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])\n\n self.assertTrue(shape_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n self.assertTrue(value_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n def test_horovod_allgather_variable_size_fused_cpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors with\n Tensor Fusion, even if those tensors have different sizes along the\n first dim.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n tests = []\n shape_tests = []\n\n for dtype, dim in itertools.product(dtypes, dims):\n # Support tests up to MPI Size of 35\n if size > 35:\n break\n\n tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5\n tensor_sizes = tensor_sizes[:size]\n\n tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/cpu:0\"):\n gathered = hvd.allgather(tensor)\n shape_tests.append(\n tf.reduce_all(tf.equal(tf.shape(gathered),\n [sum(tensor_sizes)] + [17] * (dim - 1))))\n\n for i in range(size):\n rank_size = [tensor_sizes[i]] + [17] * (dim - 1)\n rank_tensor = tf.slice(\n gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),\n rank_size)\n self.assertEqual(list(rank_tensor.shape), rank_size)\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n tests.append(tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value)))\n\n shape_tests_passed, value_tests_passed = \\\n self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])\n\n self.assertTrue(shape_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n self.assertTrue(value_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n def test_horovod_allgather_variable_size_fused_gpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors with\n Tensor Fusion, even if those tensors have different sizes along the\n first dim.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n tests = []\n shape_tests = []\n\n for dtype, dim in itertools.product(dtypes, dims):\n # Support tests up to MPI Size of 35\n if size > 35:\n break\n\n tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5\n tensor_sizes = tensor_sizes[:size]\n\n tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/gpu:%d\" % local_rank):\n gathered = hvd.allgather(tensor)\n shape_tests.append(\n tf.reduce_all(tf.equal(tf.shape(gathered),\n [sum(tensor_sizes)] + [17] * (dim - 1))))\n\n for i in range(size):\n rank_size = [tensor_sizes[i]] + [17] * (dim - 1)\n rank_tensor = tf.slice(\n gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),\n rank_size)\n self.assertEqual(list(rank_tensor.shape), rank_size)\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n tests.append(tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value)))\n\n shape_tests_passed, value_tests_passed = \\\n self.evaluate([tf.reduce_all(shape_tests), tf.reduce_all(tests)])\n\n self.assertTrue(shape_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n self.assertTrue(value_tests_passed,\n \"hvd.allgather produces incorrect gathered tensor\")\n\n def test_horovod_allgather_variable_size_gpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors,\n even if those tensors have different sizes along the first dim.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n # Support tests up to MPI Size of 35\n if size > 35:\n break\n\n tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5\n tensor_sizes = tensor_sizes[:size]\n\n tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/gpu:%d\" % local_rank):\n gathered = hvd.allgather(tensor)\n\n gathered_tensor = self.evaluate(gathered)\n expected_size = sum(tensor_sizes)\n self.assertEqual(list(gathered_tensor.shape),\n [expected_size] + [17] * (dim - 1))\n\n for i in range(size):\n rank_size = [tensor_sizes[i]] + [17] * (dim - 1)\n rank_tensor = tf.slice(\n gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),\n rank_size)\n self.assertEqual(list(rank_tensor.shape), rank_size)\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value))),\n \"hvd.allgather produces incorrect gathered tensor\")\n\n def test_horovod_allgather_variable_size_cpu(self):\n \"\"\"Test that the allgather correctly gathers 1D, 2D, 3D tensors,\n even if those tensors have different sizes along the first dim.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n # Support tests up to MPI Size of 35\n if size > 35:\n break\n\n tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5\n tensor_sizes = tensor_sizes[:size]\n\n tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n with tf.device(\"/cpu:0\"):\n gathered = hvd.allgather(tensor)\n\n gathered_tensor = self.evaluate(gathered)\n expected_size = sum(tensor_sizes)\n self.assertEqual(list(gathered_tensor.shape),\n [expected_size] + [17] * (dim - 1))\n\n for i in range(size):\n rank_size = [tensor_sizes[i]] + [17] * (dim - 1)\n rank_tensor = tf.slice(\n gathered, [sum(tensor_sizes[:i])] + [0] * (dim - 1),\n rank_size)\n self.assertEqual(list(rank_tensor.shape), rank_size)\n # tf.equal() does not support tf.uint16 as of TensorFlow 1.2,\n # so need to cast rank_tensor to tf.int32.\n if dtype != tf.bool:\n value = i\n else:\n value = i % 2\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(rank_tensor, tf.int32), value))),\n \"hvd.allgather produces incorrect gathered tensor\")\n\n def test_horovod_allgather_error(self):\n \"\"\"Test that the allgather returns an error if any dimension besides\n the first is different among the tensors being gathered.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n tensor_size[1] = 10 * (rank + 1)\n tensor = tf.ones(tensor_size, dtype=tf.float32) * rank\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.allgather(tensor))\n\n def test_horovod_allgather_type_error(self):\n \"\"\"Test that the allgather returns an error if the types being gathered\n differ among the processes\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n dtype = tf.int32 if rank % 2 == 0 else tf.float32\n tensor = tf.ones(tensor_size, dtype=dtype) * rank\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.allgather(tensor))\n\n def test_horovod_allgather_grad_cpu(self):\n \"\"\"Test the correctness of the allgather gradient on CPU.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5\n tensor_sizes = tensor_sizes[:size]\n\n with tf.device(\"/cpu:0\"):\n if _executing_eagerly():\n with tf.GradientTape() as tape:\n tensor = self.tfe.Variable(\n tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank)\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n gathered = hvd.allgather(tensor)\n grad_list = []\n for r, tensor_size in enumerate(tensor_sizes):\n g = tf.ones([tensor_size] + [17] * (dim - 1)) * r\n grad_list.append(g)\n grad_ys = tf.concat(grad_list, axis=0)\n grad_out = tape.gradient(gathered, tensor, grad_ys)\n else:\n tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n gathered = hvd.allgather(tensor)\n\n grad_list = []\n for r, tensor_size in enumerate(tensor_sizes):\n g = tf.ones([tensor_size] + [17] * (dim - 1)) * r\n grad_list.append(g)\n grad_ys = tf.concat(grad_list, axis=0)\n\n grad = tf.gradients(gathered, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones(\n [tensor_sizes[rank]] + [17] * (dim - 1)\n ) * rank * size\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" %\n (grad_out, expected, str(err)))\n\n def test_horovod_allgather_grad_gpu(self):\n \"\"\"Test the correctness of the allgather gradient on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5\n tensor_sizes = tensor_sizes[:size]\n\n with tf.device(\"/gpu:%d\" % local_rank):\n if _executing_eagerly():\n with tf.GradientTape() as tape:\n tensor = self.tfe.Variable(\n tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank)\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n gathered = hvd.allgather(tensor)\n grad_list = []\n for r, tensor_size in enumerate(tensor_sizes):\n g = tf.ones([tensor_size] + [17] * (dim - 1)) * r\n grad_list.append(g)\n grad_ys = tf.concat(grad_list, axis=0)\n grad_out = tape.gradient(gathered, tensor, grad_ys)\n else:\n tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1)) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n gathered = hvd.allgather(tensor)\n\n grad_list = []\n for r, tensor_size in enumerate(tensor_sizes):\n g = tf.ones([tensor_size] + [17] * (dim - 1)) * r\n grad_list.append(g)\n grad_ys = tf.concat(grad_list, axis=0)\n\n grad = tf.gradients(gathered, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones(\n [tensor_sizes[rank]] + [17] * (dim - 1)\n ) * rank * size\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" %\n (grad_out, expected, str(err)))\n\n def test_horovod_broadcast_cpu(self):\n \"\"\"Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors on CPU.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n with tf.device(\"/cpu:0\"):\n tensor = tf.ones([17] * dim) * rank\n root_tensor = tf.ones([17] * dim) * root_rank\n if dtype == tf.bool:\n tensor = tensor % 2\n root_tensor = root_tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n root_tensor = tf.cast(root_tensor, dtype=dtype)\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n self.assertTrue(\n self.evaluate(tf.reduce_all(tf.equal(\n tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))),\n \"hvd.broadcast produces incorrect broadcasted tensor\")\n\n def test_horovod_broadcast_gpu(self):\n \"\"\"Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = tf.ones([17] * dim) * rank\n root_tensor = tf.ones([17] * dim) * root_rank\n if dtype == tf.bool:\n tensor = tensor % 2\n root_tensor = root_tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n root_tensor = tf.cast(root_tensor, dtype=dtype)\n with tf.device(\"/gpu:%d\" % local_rank):\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n self.assertTrue(\n self.evaluate(tf.reduce_all(tf.equal(\n tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))),\n \"hvd.broadcast produces incorrect broadcasted tensor\")\n\n def test_horovod_broadcast_error(self):\n \"\"\"Test that the broadcast returns an error if any dimension besides\n the first is different among the tensors being broadcasted.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n tensor_size[1] = 10 * (rank + 1)\n tensor = tf.ones(tensor_size, dtype=tf.float32) * rank\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.broadcast(tensor, 0))\n\n def test_horovod_broadcast_type_error(self):\n \"\"\"Test that the broadcast returns an error if the types being broadcasted\n differ among the processes\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [17] * 3\n dtype = tf.int32 if rank % 2 == 0 else tf.float32\n tensor = tf.ones(tensor_size, dtype=dtype) * rank\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.broadcast(tensor, 0))\n\n def test_horovod_broadcast_rank_error(self):\n \"\"\"Test that the broadcast returns an error if different ranks\n specify different root rank.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor = tf.ones([17] * 3, dtype=tf.float32)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.broadcast(tensor, rank))\n\n def test_horovod_broadcast_grad_cpu(self):\n \"\"\"Test the correctness of the broadcast gradient on CPU.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n with tf.device(\"/cpu:0\"):\n if _executing_eagerly():\n tensor = self.tfe.Variable(tf.ones([5] * dim) * rank)\n else:\n tensor = tf.ones([5] * dim) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n if _executing_eagerly():\n with tf.GradientTape() as tape:\n tensor = tf.cast(tensor, dtype=dtype)\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n grad_out = tape.gradient(broadcasted_tensor, tensor)\n else:\n tensor = tf.cast(tensor, dtype=dtype)\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n grad_ys = tf.ones([5] * dim)\n grad = tf.gradients(broadcasted_tensor, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n c = size if rank == root_rank else 0\n expected = np.ones([5] * dim) * c\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_broadcast_grad_gpu(self):\n \"\"\"Test the correctness of the broadcast gradient on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n with tf.device(\"/gpu:%d\" % local_rank):\n if _executing_eagerly():\n tensor = self.tfe.Variable(tf.ones([5] * dim) * rank)\n else:\n tensor = tf.ones([5] * dim) * rank\n if dtype == tf.bool:\n tensor = tensor % 2\n if _executing_eagerly():\n with tf.GradientTape() as tape:\n tensor = tf.cast(tensor, dtype=dtype)\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n grad_out = tape.gradient(broadcasted_tensor, tensor)\n else:\n tensor = tf.cast(tensor, dtype=dtype)\n broadcasted_tensor = hvd.broadcast(tensor, root_rank)\n grad_ys = tf.ones([5] * dim)\n grad = tf.gradients(broadcasted_tensor, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n c = size if rank == root_rank else 0\n expected = np.ones([5] * dim) * c\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_alltoall_cpu(self):\n \"\"\"Test that the alltoall correctly distributes 1D, 2D, and 3D tensors.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n splits = tf.convert_to_tensor([rank+1] * size, dtype=tf.int32)\n collected = hvd.alltoall(tensor, splits)\n\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_gpu(self):\n \"\"\"Test that the alltoall correctly distributes 1D, 2D, and 3D tensors on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n splits = tf.convert_to_tensor([rank+1] * size, dtype=tf.int32)\n collected = hvd.alltoall(tensor, splits)\n\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_equal_split_cpu(self):\n \"\"\"Test that the alltoall correctly distributes 1D tensors with default splitting.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n collected = hvd.alltoall(tensor)\n\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_equal_split_gpu(self):\n \"\"\"Test that the alltoall correctly distributes 1D tensors with default splitting on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n collected = hvd.alltoall(tensor)\n\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1))),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_empty_cpu(self):\n \"\"\"Test that the alltoall correctly deals with an empty input tensor.\"\"\"\n hvd.init()\n size = hvd.size()\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n for dtype in dtypes:\n with tf.device(\"/cpu:0\"):\n vals = [[] for i in range(size)]\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n collected = hvd.alltoall(tensor)\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), 0)),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_empty_gpu(self):\n \"\"\"Test that the alltoall correctly deals with an empty input tensor.\"\"\"\n # ncclGroupEnd failed: invalid usage\n\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest(\"No GPUs available\")\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n for dtype in dtypes:\n with tf.device(\"/gpu:%s\" % local_rank):\n vals = [[] for i in range(size)]\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n collected = hvd.alltoall(tensor)\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), 0)),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_one_rank_sends_nothing_cpu(self):\n \"\"\"Test where one rank sends nothing in an alltoall.\"\"\"\n hvd.init()\n size = hvd.size()\n rank = hvd.rank()\n\n if hvd.size() < 2:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n if rank == 1:\n splits = tf.convert_to_tensor([0] * size, dtype=tf.int32)\n vals = []\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n tensor = tf.reshape(tensor, shape=[0] + (dim-1)*[2])\n else:\n splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)\n vals = []\n for i in range(size):\n vals += [i] * (rank + 1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n collected = hvd.alltoall(tensor, splits, name=\"a2a\")\n\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1)\n - (1+1) * 2 ** (dim-1) # subtract missing rank 1 contributions\n )),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_one_rank_sends_nothing_gpu(self):\n \"\"\"Test where one rank sends nothing in an alltoall.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest(\"No GPUs available\")\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n rank = hvd.rank()\n\n if hvd.size() < 2:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n if rank == 1:\n splits = tf.convert_to_tensor([0] * size, dtype=tf.int32)\n vals = []\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n tensor = tf.reshape(tensor, shape=[0] + (dim-1)*[2])\n else:\n splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)\n vals = []\n for i in range(size):\n vals += [i] * (rank + 1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n collected = hvd.alltoall(tensor, splits, name=\"a2a\")\n\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), size * (size + 1) // 2 * 2**(dim - 1)\n - (1+1) * 2 ** (dim-1) # subtract missing rank 1 contributions\n )),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_one_rank_receives_nothing_cpu(self):\n \"\"\"Test where one rank receives nothing in an alltoall.\"\"\"\n hvd.init()\n size = hvd.size()\n rank = hvd.rank()\n\n if hvd.size() < 2:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n # send nothing to rank 0\n splits = tf.convert_to_tensor([0] + [rank + 1] * (size - 1), dtype=tf.int32)\n vals = []\n for i in range(1, size):\n vals += [i] * (rank + 1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n collected = hvd.alltoall(tensor, splits, name=\"a2a\")\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n if rank == 0:\n expected_size = 0\n else:\n expected_size = size * (size + 1) // 2 * 2**(dim - 1)\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), expected_size)),\n \"hvd.alltoall collected wrong number of values\")\n\n def test_horovod_alltoall_one_rank_receives_nothing_gpu(self):\n \"\"\"Test where one rank receives nothing in an alltoall.\"\"\"\n # ncclGroupEnd failed: invalid usage\n\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest(\"No GPUs available\")\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n hvd.init()\n size = hvd.size()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n\n if hvd.size() < 2:\n self.skipTest(\"Only one worker available\")\n\n dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.float16, tf.float32,\n tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n # send nothing to rank 0\n splits = tf.convert_to_tensor([0] + [rank + 1] * (size - 1), dtype=tf.int32)\n vals = []\n for i in range(1, size):\n vals += [i] * (rank + 1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n collected = hvd.alltoall(tensor, splits, name=\"a2a\")\n self.assertTrue(\n self.evaluate(tf.reduce_all(\n tf.equal(tf.cast(collected, tf.int32), rank))),\n \"hvd.alltoall produces incorrect collected tensor\")\n if rank == 0:\n expected_size = 0\n else:\n expected_size = size * (size + 1) // 2 * 2**(dim - 1)\n self.assertTrue(\n self.evaluate(tf.equal(tf.size(collected), expected_size)),\n \"hvd.alltoall collected wrong number of values\")\n\n\n def test_horovod_alltoall_zero_splits_cpu(self):\n \"\"\"Test alltoall with some ranks not participating / splits set to zero.\"\"\"\n hvd.init()\n\n if hvd.size() == 1:\n self.skipTest(\"Only one worker available\")\n\n active_ranks = range(0, hvd.size() // 2)\n silent_ranks = range(hvd.size() // 2, hvd.size())\n\n active_splits = [1 if r in active_ranks else 0 for r in range(hvd.size())]\n active_shape = [sum(active_splits), 4]\n silent_splits = [0] * hvd.size()\n silent_shape = [0, 4]\n\n with tf.device(\"/cpu:0\"):\n if hvd.rank() in active_ranks:\n source_tensor = tf.fill(active_shape, value=tf.cast(hvd.rank(), tf.int32))\n splits = tf.convert_to_tensor(active_splits)\n else:\n source_tensor = tf.fill(silent_shape, value=tf.cast(hvd.rank(), tf.int32))\n splits = tf.convert_to_tensor(silent_splits)\n collected = hvd.alltoall(source_tensor, splits, name=\"alltoall_zero_splits\")\n result = self.evaluate(collected)\n\n print(hvd.rank(), \"result.shape\", result.shape)\n print(hvd.rank(), \"result\", result)\n if hvd.rank() in active_ranks:\n expected_result_shape = active_shape\n else:\n expected_result_shape = silent_shape\n self.assertSequenceEqual(result.shape, expected_result_shape)\n if hvd.rank() in active_ranks:\n for r_idx, r in enumerate(active_ranks):\n self.assertTrue(np.all(result[r_idx, ...] == r))\n else:\n self.assertLen(result, 0)\n\n def test_horovod_alltoall_zero_splits_gpu(self):\n \"\"\"Test alltoall with some ranks not participating / splits set to zero.\"\"\"\n # ncclCommInitRank failed: invalid usage\n hvd.init()\n\n if hvd.size() == 1:\n self.skipTest(\"Only one worker available\")\n\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest(\"No GPUs available\")\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n active_ranks = range(0, hvd.size() // 2)\n silent_ranks = range(hvd.size() // 2, hvd.size())\n\n active_splits = [1 if r in active_ranks else 0 for r in range(hvd.size())]\n active_shape = [sum(active_splits), 4]\n silent_splits = [0] * hvd.size()\n silent_shape = [0, 4]\n\n with tf.device(\"/gpu:%s\" % hvd.local_rank()):\n if hvd.rank() in active_ranks:\n source_tensor = tf.fill(active_shape, value=tf.cast(hvd.rank(), tf.int32))\n splits = tf.convert_to_tensor(active_splits)\n else:\n source_tensor = tf.fill(silent_shape, value=tf.cast(hvd.rank(), tf.int32))\n splits = tf.convert_to_tensor(silent_splits)\n collected = hvd.alltoall(source_tensor, splits, name=\"alltoall_zero_splits\")\n result = self.evaluate(collected)\n\n print(hvd.rank(), \"result.shape\", result.shape)\n print(hvd.rank(), \"result\", result)\n if hvd.rank() in active_ranks:\n expected_result_shape = active_shape\n else:\n expected_result_shape = silent_shape\n self.assertSequenceEqual(result.shape, expected_result_shape)\n if hvd.rank() in active_ranks:\n for r_idx, r in enumerate(active_ranks):\n self.assertTrue(np.all(result[r_idx, ...] == r))\n else:\n self.assertLen(result, 0)\n\n def test_horovod_alltoall_type_error(self):\n \"\"\"Test that the alltoall returns an error if the tensor types differ\n across the processes.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n with tf.device(\"/cpu:0\"):\n if rank % 2:\n tensor = tf.ones([size], dtype=tf.int32)\n else:\n tensor = tf.ones([size], dtype=tf.float32)\n\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.alltoall(tensor))\n\n def test_horovod_alltoall_equal_split_length_error(self):\n \"\"\"Test that the alltoall with default splitting returns an error if the tensor length is not a multiple\n of the number of workers.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n with tf.device(\"/cpu:0\"):\n tensor = tf.ones([size + 1], dtype=tf.float32)\n\n with self.assertRaises(tf.errors.InvalidArgumentError):\n self.evaluate(hvd.alltoall(tensor))\n\n def test_horovod_alltoall_splits_error(self):\n \"\"\"Test that the alltoall returns an error if the sum of the splits entries exceeds\n the first dimension of the input tensor.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n with tf.device(\"/cpu:0\"):\n tensor = tf.ones([size-1], dtype=tf.float32)\n splits = tf.ones([size], dtype=tf.int32)\n\n with self.assertRaises(tf.errors.InvalidArgumentError):\n self.evaluate(hvd.alltoall(tensor))\n\n def test_horovod_alltoall_rank_error(self):\n \"\"\"Test that the alltoall returns an error if any dimension besides\n the first is different among the tensors being processed.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n tensor_size = [2 * size] * 3\n tensor_size[1] = 10 * (rank + 1)\n with tf.device(\"/cpu:0\"):\n tensor = tf.ones(tensor_size)\n\n with self.assertRaises(tf.errors.FailedPreconditionError):\n self.evaluate(hvd.alltoall(tensor))\n\n def test_horovod_alltoall_grad_cpu(self):\n \"\"\"Test the correctness of the alltoall gradient on CPU.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n if _executing_eagerly():\n tensor = self.tfe.Variable(tensor)\n splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)\n with tf.GradientTape() as tape:\n collected = hvd.alltoall(tensor, splits)\n else:\n splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)\n collected = hvd.alltoall(tensor, splits)\n\n grad_ys = tf.ones(tf.shape(collected))\n if _executing_eagerly():\n grad_out = tape.gradient(collected, tensor, grad_ys)\n else:\n grad = tf.gradients(collected, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones(tensor.get_shape().as_list())\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_alltoall_grad_gpu(self):\n \"\"\"Test the correctness of the alltoall gradient on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n if _executing_eagerly():\n tensor = self.tfe.Variable(tensor)\n splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)\n with tf.GradientTape() as tape:\n collected = hvd.alltoall(tensor, splits)\n else:\n splits = tf.convert_to_tensor([rank + 1] * size, dtype=tf.int32)\n collected = hvd.alltoall(tensor, splits)\n\n grad_ys = tf.ones(tf.shape(collected))\n if _executing_eagerly():\n grad_out = tape.gradient(collected, tensor, grad_ys)\n else:\n grad = tf.gradients(collected, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones(tensor.get_shape().as_list())\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_alltoall_equal_split_grad_cpu(self):\n \"\"\"Test the correctness of the alltoall gradient with default splitting on CPU.\"\"\"\n hvd.init()\n rank = hvd.rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/cpu:0\"):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n if _executing_eagerly():\n tensor = self.tfe.Variable(tensor)\n with tf.GradientTape() as tape:\n collected = hvd.alltoall(tensor)\n else:\n collected = hvd.alltoall(tensor)\n\n grad_ys = tf.ones(tf.shape(collected))\n if _executing_eagerly():\n grad_out = tape.gradient(collected, tensor, grad_ys)\n else:\n grad = tf.gradients(collected, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones(tensor.get_shape().as_list())\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n def test_horovod_alltoall_equal_split_grad_gpu(self):\n \"\"\"Test the correctness of the alltoall gradient with default splitting on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.\n self.skipTest(\"Not compiled with HOROVOD_GPU_OPERATIONS\")\n\n # This test does not apply if NCCL version < 2.7.0\n if hvd.nccl_built() and hvd.nccl_built() < 2700:\n self.skipTest(\"NCCL-based Alltoall requires NCCL version >= 2.7.0.\")\n\n hvd.init()\n rank = hvd.rank()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # As of TensorFlow v1.9, gradients are not supported on\n # integer tensors\n dtypes = [tf.float32, tf.float64]\n dims = [1, 2, 3]\n for dtype, dim in itertools.product(dtypes, dims):\n with tf.device(\"/gpu:%s\" % local_rank):\n vals = []\n for i in range(size):\n vals += [i] * (rank+1)\n tensor = tf.convert_to_tensor(vals, dtype=dtype)\n for _ in range(dim - 1):\n tensor = tf.expand_dims(tensor, axis=1)\n tensor = tf.concat([tensor, tensor], axis=1)\n\n if _executing_eagerly():\n tensor = self.tfe.Variable(tensor)\n with tf.GradientTape() as tape:\n collected = hvd.alltoall(tensor)\n else:\n collected = hvd.alltoall(tensor)\n\n grad_ys = tf.ones(tf.shape(collected))\n if _executing_eagerly():\n grad_out = tape.gradient(collected, tensor, grad_ys)\n else:\n grad = tf.gradients(collected, tensor, grad_ys)[0]\n grad_out = self.evaluate(grad)\n\n expected = np.ones(tensor.get_shape().as_list())\n err = np.linalg.norm(expected - grad_out)\n self.assertLess(err, 0.00000001,\n \"gradient %s differs from expected %s, \"\n \"error: %s\" % (grad_out, expected, str(err)))\n\n\n def test_horovod_broadcast_eager_mode_error(self):\n \"\"\"Test that tries to broadcast tensorflow global variables\n in eager execution mode. This call should raise a RuntimeError.\"\"\"\n\n if not hvd.util._executing_eagerly():\n self.skipTest(\"Only in eager execution mode\")\n\n with self.assertRaises(RuntimeError):\n hvd.broadcast_global_variables(root_rank=0)\n\n def test_horovod_broadcast_graph_mode(self):\n \"\"\"Test that tries to broadcast tensorflow global variables\n in graph execution mode. This call should not raise any exception.\"\"\"\n\n if hvd.util._executing_eagerly():\n self.skipTest(\"Not in eager execution mode\")\n\n hvd.broadcast_global_variables(root_rank=0)\n\n def test_compression_fp16(self):\n valid_dtypes = [tf.float16, tf.float32, tf.float64]\n invalid_dtypes = [tf.uint8, tf.int8, tf.uint16, tf.int16,\n tf.int32, tf.int64, tf.bool]\n\n tensor_size = [17] * 3\n compression = hvd.Compression.fp16\n\n for dtype in valid_dtypes:\n tensor = tf.ones(tensor_size, dtype=dtype)\n\n tensor_compressed, ctx = compression.compress(tensor)\n self.assertEqual(tensor_compressed.dtype, tf.float16)\n\n tensor_decompressed = compression.decompress(tensor_compressed, ctx)\n self.assertEqual(tensor_decompressed.dtype, dtype)\n\n actual = self.evaluate(tensor_decompressed)\n expected = np.ones(tensor_size)\n err = np.linalg.norm(expected - actual)\n self.assertLess(err, 0.00000001)\n\n for dtype in invalid_dtypes:\n tensor = tf.ones(tensor_size, dtype=dtype)\n\n tensor_compressed, ctx = compression.compress(tensor)\n self.assertEqual(tensor_compressed.dtype, dtype)\n\n tensor_decompressed = compression.decompress(tensor_compressed, ctx)\n self.assertEqual(tensor_decompressed.dtype, dtype)\n\n actual = self.evaluate(tensor_decompressed)\n expected = np.ones(tensor_size)\n err = np.linalg.norm(expected - actual)\n self.assertLess(err, 0.00000001)\n\n def test_broadcast_object(self):\n hvd.init()\n\n with tf.device(\"/cpu:0\"):\n expected_obj = {\n 'hello': 123,\n 0: [1, 2]\n }\n obj = expected_obj if hvd.rank() == 0 else {}\n\n obj = hvd.broadcast_object(obj, root_rank=0)\n self.assertDictEqual(obj, expected_obj)\n\n def test_broadcast_object_fn(self):\n if hvd._executing_eagerly() or _IS_TF2:\n # Only for TF 1.0 in graph mode\n return\n\n hvd.init()\n\n with tf.device(\"/cpu:0\"):\n expected_obj = {\n 'hello': 123,\n 0: [1, 2]\n }\n obj = expected_obj if hvd.rank() == 0 else {}\n\n bcast = hvd.broadcast_object_fn(root_rank=0)\n obj = bcast(obj)\n self.assertDictEqual(obj, expected_obj)\n\n def test_allgather_object(self):\n hvd.init()\n\n with tf.device(\"/cpu:0\"):\n d = {'metric_val_1': hvd.rank()}\n if hvd.rank() == 1:\n d['metric_val_2'] = 42\n\n results = hvd.allgather_object(d)\n\n expected = [{'metric_val_1': i} for i in range(hvd.size())]\n if hvd.size() > 1:\n expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}\n\n self.assertEqual(len(results), hvd.size())\n self.assertListEqual(results, expected)\n\n def test_elastic_state(self):\n if not hvd._executing_eagerly() and _IS_TF2:\n # Only support TF 2.0 in eager mode\n return\n\n hvd.init()\n\n with tf.device(\"/cpu:0\"):\n v = 1.0 if hvd.rank() == 0 else 2.0\n weights1 = [\n np.array([[v, v], [v, v]]),\n np.array([v, v])\n ]\n vars1 = [tf.Variable(arr) for arr in weights1]\n\n weights2 = [\n np.array([[1.0, 2.0], [3.0, 4.0]]),\n np.array([0.0, 0.0])\n ]\n\n if not hvd._executing_eagerly():\n init = tf.global_variables_initializer()\n self.evaluate(init)\n\n state = hvd.elastic.TensorFlowState(vars1, batch=20 + hvd.rank(), epoch=10 + hvd.rank())\n state.sync()\n\n weights1 = [np.ones_like(w) for w in weights1]\n\n # After sync, all values should match the root rank\n for w in self.evaluate(vars1):\n self.assertAllClose(w, np.ones_like(w))\n assert state.batch == 20\n assert state.epoch == 10\n\n # Partially modify then restore\n self.assign(vars1, weights2)\n state.batch = 21\n state.epoch = 11\n\n state.restore()\n\n for w1, w2 in zip(self.evaluate(vars1), weights1):\n self.assertAllClose(w1, w2)\n assert state.batch == 20\n assert state.epoch == 10\n\n # Partially modify then commit\n self.assign(vars1, weights2)\n state.batch = 21\n state.epoch = 11\n\n state.commit()\n state.restore()\n\n for w1, w2 in zip(self.evaluate(vars1), weights2):\n self.assertAllClose(w1, w2)\n assert state.batch == 21\n assert state.epoch == 11\n \n def test_horovod_join_allreduce(self):\n \"\"\"Test that the hvd.join with allreduce works on GPUs.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):\n # Skip if compiled with CUDA but without HOROVOD_GPU_ALLREDUCE.\n self.skipTest(\"Not compiled with HOROVOD_GPU_ALLREDUCE\")\n\n hvd.init()\n local_rank = hvd.local_rank()\n size = hvd.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n self.skipTest(\"Only one worker available\")\n\n\n dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]\n dims = [1, 2, 3]\n first_join_ranks = [0, 1]\n\n for dtype, dim, first_join_rank in itertools.product(dtypes, dims, first_join_ranks):\n with tf.device(\"/gpu:%d\" % local_rank):\n tensor = self.random_uniform(\n [17] * dim, -100, 100, dtype=dtype)\n if local_rank == first_join_rank:\n self.evaluate(hvd.join())\n else:\t\t\n summed = hvd.allreduce(tensor, average=False)\n multiplied = tensor * (size-1)\n max_difference = tf.reduce_max(tf.abs(summed - multiplied))\n\n if size <= 3 or dtype in [tf.int32, tf.int64]:\n threshold = 0 \n elif size < 10:\n threshold = 1e-4\n elif size < 15:\n threshold = 5e-4\n else:\n return\n diff = self.evaluate(max_difference)\n self.evaluate(hvd.join())\n self.assertTrue(diff <= threshold,\n \"hvd.join with hvd.allreduce on GPU produces incorrect results\")\n\n def test_horovod_syncbn_gpu(self):\n \"\"\"Test that the SyncBatchNormalization implementation is correct on GPU.\"\"\"\n # Only do this test if there are GPUs available.\n if not tf.test.is_gpu_available(cuda_only=True):\n self.skipTest((\"No GPUs available\"))\n\n hvd.init()\n with tf.device(\"/gpu:%d\" % hvd.local_rank()):\n x_list = [\n tf.convert_to_tensor(np.stack([\n np.array([\n [r, r + 1],\n [r * 2, r * 2 + 1],\n [r * 3, r * 3 + 1],\n [r * 4, r * 4 + 1]\n ], dtype=np.float32)\n for r in range(hvd.size())\n ]), np.float32),\n tf.convert_to_tensor(np.stack([\n np.array([\n [r + 1],\n [r * 2 + 1],\n [r * 3 + 1],\n [r * 4 + 1]\n ], dtype=np.float32)\n for r in range(hvd.size())\n ]), np.float32),\n ]\n\n for x in x_list:\n bn = tf.keras.layers.BatchNormalization(axis=1, fused=False)\n sync_bn = hvd.SyncBatchNormalization(axis=1)\n bn_func = bn.apply(x, training=True)\n sync_bn_func = sync_bn.apply(tf.expand_dims(x[hvd.rank()], 0), training=True)\n\n try:\n init = tf.global_variables_initializer()\n except AttributeError:\n init = tf.compat.v1.global_variables_initializer()\n self.evaluate(init)\n bn_out = self.evaluate(bn_func)\n sync_bn_out = self.evaluate(sync_bn_func)\n\n self.assertAllClose(sync_bn_out, np.expand_dims(bn_out[hvd.rank()], 0))\n self.assertAllClose(self.evaluate(sync_bn.moving_mean), self.evaluate(bn.moving_mean))\n self.assertAllClose(self.evaluate(sync_bn.moving_variance), self.evaluate(bn.moving_variance))\n\n def test_horovod_syncbn_cpu(self):\n \"\"\"Test that the SyncBatchNormalization implementation is correct on CPU.\"\"\"\n\n hvd.init()\n with tf.device(\"/cpu:0\"):\n x_list = [\n tf.convert_to_tensor(np.stack([\n np.array([\n [r, r + 1],\n [r * 2, r * 2 + 1],\n [r * 3, r * 3 + 1],\n [r * 4, r * 4 + 1]\n ], dtype=np.float32)\n for r in range(hvd.size())\n ]), np.float32),\n tf.convert_to_tensor(np.stack([\n np.array([\n [r + 1],\n [r * 2 + 1],\n [r * 3 + 1],\n [r * 4 + 1]\n ], dtype=np.float32)\n for r in range(hvd.size())\n ]), np.float32),\n ]\n\n for x in x_list:\n bn = tf.keras.layers.BatchNormalization(axis=1, fused=False)\n sync_bn = hvd.SyncBatchNormalization(axis=1)\n bn_func = bn.apply(x, training=True)\n sync_bn_func = sync_bn.apply(tf.expand_dims(x[hvd.rank()], 0), training=True)\n\n try:\n init = tf.global_variables_initializer()\n except AttributeError:\n init = tf.compat.v1.global_variables_initializer()\n self.evaluate(init)\n bn_out = self.evaluate(bn_func)\n sync_bn_out = self.evaluate(sync_bn_func)\n\n self.assertAllClose(sync_bn_out, np.expand_dims(bn_out[hvd.rank()], 0))\n self.assertAllClose(self.evaluate(sync_bn.moving_mean), self.evaluate(bn.moving_mean))\n self.assertAllClose(self.evaluate(sync_bn.moving_variance), self.evaluate(bn.moving_variance))\n\nfrom tensorflow.python.framework.test_util import run_all_in_graph_and_eager_modes\nrun_all_in_graph_and_eager_modes(TensorFlowTests)\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.random_uniform",
"tensorflow.convert_to_tensor",
"tensorflow.device",
"tensorflow.enable_eager_execution",
"tensorflow.concat",
"tensorflow.cast",
"numpy.all",
"tensorflow.random.set_seed",
"numpy.ones_like",
"tensorflow.Variable",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.gradients",
"tensorflow.test.main",
"tensorflow.ConfigProto",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.shape",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.random.uniform",
"tensorflow.python.framework.test_util.run_all_in_graph_and_eager_modes",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"numpy.array",
"tensorflow.size",
"tensorflow.GradientTape",
"numpy.random.seed",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.ones",
"numpy.linalg.norm",
"numpy.ones",
"tensorflow.expand_dims",
"numpy.random.uniform",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.test.is_gpu_available",
"tensorflow.reduce_all",
"tensorflow.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
peasant98/hiro_pytorch
|
[
"b2b4e5cd0933bc042f674a9ba5c99351a8ac20ed"
] |
[
"envs/maze_env.py"
] |
[
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Adapted from rllab maze_env.py.\"\"\"\n\nimport os\nimport tempfile\nimport xml.etree.ElementTree as ET\nimport math\nimport numpy as np\nimport gym\n\nfrom envs import maze_env_utils\n\n# Directory that contains mujoco xml files.\nMODEL_DIR = 'assets'\n\n\nclass MazeEnv(gym.Env):\n MODEL_CLASS = None\n\n MAZE_HEIGHT = None\n MAZE_SIZE_SCALING = None\n\n def __init__(\n self,\n maze_id=None,\n maze_height=0.5,\n maze_size_scaling=8,\n *args,\n **kwargs):\n self._maze_id = maze_id\n self.t = 0\n\n model_cls = self.__class__.MODEL_CLASS\n if model_cls is None:\n raise \"MODEL_CLASS unspecified!\"\n xml_path = os.path.join(\"envs\", MODEL_DIR, model_cls.FILE)\n tree = ET.parse(xml_path)\n worldbody = tree.find(\".//worldbody\")\n\n self.MAZE_HEIGHT = height = maze_height\n self.MAZE_SIZE_SCALING = size_scaling = maze_size_scaling\n self.MAZE_STRUCTURE = structure = maze_env_utils.construct_maze(maze_id=self._maze_id)\n self.elevated = any(-1 in row for row in structure) # Elevate the maze to allow for falling.\n self.blocks = any(\n any(maze_env_utils.can_move(r) for r in row)\n for row in structure) # Are there any movable blocks?\n\n torso_x, torso_y = self._find_robot()\n self._init_torso_x = torso_x\n self._init_torso_y = torso_y\n\n height_offset = 0.\n if self.elevated:\n # Increase initial z-pos of ant.\n height_offset = height * size_scaling\n torso = tree.find(\".//body[@name='torso']\")\n torso.set('pos', '0 0 %.2f' % (0.75 + height_offset))\n if self.blocks:\n # If there are movable blocks, change simulation settings to perform\n # better contact detection.\n default = tree.find(\".//default\")\n default.find('.//geom').set('solimp', '.995 .995 .01')\n\n for i in range(len(structure)):\n for j in range(len(structure[0])):\n if self.elevated and structure[i][j] not in [-1]:\n # Create elevated platform.\n ET.SubElement(\n worldbody, \"geom\",\n name=\"elevated_%d_%d\" % (i, j),\n pos=\"%f %f %f\" % (j * size_scaling - torso_x,\n i * size_scaling - torso_y,\n height / 2 * size_scaling),\n size=\"%f %f %f\" % (0.5 * size_scaling,\n 0.5 * size_scaling,\n height / 2 * size_scaling),\n type=\"box\",\n material=\"\",\n contype=\"1\",\n conaffinity=\"1\",\n rgba=\"0.9 0.9 0.9 1\",\n )\n if structure[i][j] == 1: # Unmovable block.\n # Offset all coordinates so that robot starts at the origin.\n ET.SubElement(\n worldbody, \"geom\",\n name=\"block_%d_%d\" % (i, j),\n pos=\"%f %f %f\" % (j * size_scaling - torso_x,\n i * size_scaling - torso_y,\n height_offset +\n height / 2 * size_scaling),\n size=\"%f %f %f\" % (0.5 * size_scaling,\n 0.5 * size_scaling,\n height / 2 * size_scaling),\n type=\"box\",\n material=\"\",\n contype=\"1\",\n conaffinity=\"1\",\n rgba=\"0.4 0.4 0.4 1\",\n )\n elif maze_env_utils.can_move(structure[i][j]): # Movable block.\n # The \"falling\" blocks are shrunk slightly and increased in mass to\n # ensure that it can fall easily through a gap in the platform blocks.\n falling = maze_env_utils.can_move_z(structure[i][j])\n shrink = 0.99 if falling else 1.0\n moveable_body = ET.SubElement(\n worldbody, \"body\",\n name=\"moveable_%d_%d\" % (i, j),\n pos=\"%f %f %f\" % (j * size_scaling - torso_x,\n i * size_scaling - torso_y,\n height_offset +\n height / 2 * size_scaling),\n )\n ET.SubElement(\n moveable_body, \"geom\",\n name=\"block_%d_%d\" % (i, j),\n pos=\"0 0 0\",\n size=\"%f %f %f\" % (0.5 * size_scaling * shrink,\n 0.5 * size_scaling * shrink,\n height / 2 * size_scaling),\n type=\"box\",\n material=\"\",\n mass=\"0.001\" if falling else \"0.0002\",\n contype=\"1\",\n conaffinity=\"1\",\n rgba=\"0.9 0.1 0.1 1\"\n )\n if maze_env_utils.can_move_x(structure[i][j]):\n ET.SubElement(\n moveable_body, \"joint\",\n armature=\"0\",\n axis=\"1 0 0\",\n damping=\"0.0\",\n limited=\"true\" if falling else \"false\",\n range=\"%f %f\" % (-size_scaling, size_scaling),\n margin=\"0.01\",\n name=\"moveable_x_%d_%d\" % (i, j),\n pos=\"0 0 0\",\n type=\"slide\"\n )\n if maze_env_utils.can_move_y(structure[i][j]):\n ET.SubElement(\n moveable_body, \"joint\",\n armature=\"0\",\n axis=\"0 1 0\",\n damping=\"0.0\",\n limited=\"true\" if falling else \"false\",\n range=\"%f %f\" % (-size_scaling, size_scaling),\n margin=\"0.01\",\n name=\"moveable_y_%d_%d\" % (i, j),\n pos=\"0 0 0\",\n type=\"slide\"\n )\n if maze_env_utils.can_move_z(structure[i][j]):\n ET.SubElement(\n moveable_body, \"joint\",\n armature=\"0\",\n axis=\"0 0 1\",\n damping=\"0.0\",\n limited=\"true\",\n range=\"%f 0\" % (-height_offset),\n margin=\"0.01\",\n name=\"moveable_z_%d_%d\" % (i, j),\n pos=\"0 0 0\",\n type=\"slide\"\n )\n\n torso = tree.find(\".//body[@name='torso']\")\n geoms = torso.findall(\".//geom\")\n for geom in geoms:\n if 'name' not in geom.attrib:\n raise Exception(\"Every geom of the torso must have a name \"\n \"defined\")\n\n _, file_path = tempfile.mkstemp(text=True, suffix=\".xml\")\n tree.write(file_path)\n\n self.wrapped_env = model_cls(*args, file_path=file_path, **kwargs)\n\n def _get_obs(self):\n return np.concatenate([self.wrapped_env._get_obs(),\n [self.t * 0.001]])\n\n def reset(self):\n self.t = 0\n self.wrapped_env.reset()\n return self._get_obs()\n\n @property\n def viewer(self):\n return self.wrapped_env.viewer\n\n def render(self, *args, **kwargs):\n return self.wrapped_env.render(*args, **kwargs)\n\n @property\n def observation_space(self):\n shape = self._get_obs().shape\n high = np.inf * np.ones(shape)\n low = -high\n return gym.spaces.Box(low, high)\n\n @property\n def action_space(self):\n return self.wrapped_env.action_space\n\n def _find_robot(self):\n structure = self.MAZE_STRUCTURE\n size_scaling = self.MAZE_SIZE_SCALING\n for i in range(len(structure)):\n for j in range(len(structure[0])):\n if structure[i][j] == 'r':\n return j * size_scaling, i * size_scaling\n assert False, 'No robot in maze specification.'\n\n def step(self, action):\n self.t += 1\n inner_next_obs, inner_reward, done, info = self.wrapped_env.step(action)\n next_obs = self._get_obs()\n done = False\n return next_obs, inner_reward, done, info\n"
] |
[
[
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
felixonmars/BAG_framework
|
[
"321625032b40f34ea79269b0f46101b25d2f6e08",
"993e7c82418443cf32e4b802d825671b78ec56fe",
"993e7c82418443cf32e4b802d825671b78ec56fe",
"993e7c82418443cf32e4b802d825671b78ec56fe"
] |
[
"bag/data/plot.py",
"bag/tech/core.py",
"bag/mdao/core.py",
"bag/math/dfun.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"This module contains utilities to improve waveform plotting in python.\n\"\"\"\n\nimport numpy as np\nimport scipy.interpolate as interp\n\nfrom matplotlib.lines import Line2D\nfrom matplotlib.figure import Figure\nfrom matplotlib.text import Annotation\nimport matplotlib.pyplot as plt\n\nfrom ..math import float_to_si_string\n\n# Vega category10 palette\ncolor_cycle = ['#1f77b4', '#ff7f0e',\n '#2ca02c', '#d62728',\n '#9467bd', '#8c564b',\n '#e377c2', '#7f7f7f',\n '#bcbd22', '#17becf',\n ]\n\n\ndef figure(fig_id, picker=5.0):\n \"\"\"Create a WaveformPlotter.\n\n Parameters\n ----------\n fig_id : int\n the figure ID.\n picker : float\n picker event pixel tolerance.\n\n Returns\n -------\n plotter : bag.data.plot.WaveformPlotter\n a plotter that helps you make interactive matplotlib figures.\n \"\"\"\n return WaveformPlotter(fig_id, picker=picker)\n\n\ndef plot_waveforms(xvec, panel_list, fig=1):\n \"\"\"Plot waveforms in vertical panels with shared X axis.\n\n Parameters\n ----------\n xvec : :class:`numpy.ndarray`\n the X data.\n panel_list : list[list[(str, :class:`numpy.ndarray`)]]\n list of lists of Y data. Each sub-list is one panel. Each element of the sub-list\n is a tuple of signal name and signal data.\n fig : int\n the figure ID.\n \"\"\"\n nrow = len(panel_list)\n\n if nrow > 0:\n myfig = plt.figure(fig, FigureClass=MarkerFigure) # type: MarkerFigure\n ax0 = None\n for idx, panel in enumerate(panel_list):\n if ax0 is None:\n ax = plt.subplot(nrow, 1, idx + 1)\n ax0 = ax\n else:\n ax = plt.subplot(nrow, 1, idx + 1, sharex=ax0)\n\n for name, sig in panel:\n ax.plot(xvec, sig, label=name, picker=5.0)\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])\n\n # Put a legend to the right of the current axis\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n myfig.setup_callbacks()\n plt.show(block=False)\n\n\ndef _fpart(x):\n return x - int(x)\n\n\ndef _rfpart(x):\n return 1 - _fpart(x)\n\n\ndef draw_line(x0, y0, x1, y1, xmax, grid):\n \"\"\"Draws an anti-aliased line in img from p1 to p2 with the given color.\"\"\"\n\n if x0 > x1:\n # x1 is wrapped around\n x1 += xmax\n\n dx, dy = x1 - x0, y1 - y0\n steep = dx < abs(dy)\n if steep:\n x0, y0, x1, y1, dx, dy = y0, x0, y1, x1, dy, dx\n\n gradient = dy * 1.0 / dx\n # handle first endpoint\n xpxl1 = int(x0 + 0.5)\n yend = y0 + gradient * (xpxl1 - x0)\n xgap = _rfpart(x0 + 0.5)\n ypxl1 = int(yend)\n if steep:\n grid[ypxl1 % xmax, xpxl1] += _rfpart(yend) * xgap\n grid[(ypxl1 + 1) % xmax, xpxl1] += _fpart(yend) * xgap\n else:\n grid[xpxl1 % xmax, ypxl1] += _rfpart(yend) * xgap\n grid[xpxl1 % xmax, ypxl1 + 1] += _fpart(yend) * xgap\n\n intery = yend + gradient # first y-intersection for the main loop\n\n # do not color second endpoint to avoid double coloring.\n xpxl2 = int(x1 + 0.5)\n # main loop\n if steep:\n for x in range(xpxl1 + 1, xpxl2):\n xval = int(intery)\n grid[xval % xmax, x] += _rfpart(intery)\n grid[(xval + 1) % xmax, x] += _fpart(intery)\n intery += gradient\n else:\n for x in range(xpxl1 + 1, xpxl2):\n xval = x % xmax\n grid[xval, int(intery)] += _rfpart(intery)\n grid[xval, int(intery) + 1] += _fpart(intery)\n intery += gradient\n\n\ndef plot_eye_heatmap(fig, tvec, yvec, tper, tstart=None, tend=None, toff=None,\n tstep=None, vstep=None,\n cmap=None, vmargin=0.05, interpolation='gaussian',\n repeat=False):\n \"\"\"Plot eye diagram heat map.\n\n Parameters\n ----------\n fig : int\n the figure ID.\n tvec : np.ndarray\n the time data.\n yvec : np.ndarray\n waveform data.\n tper : float\n the eye period.\n tstart : float\n starting time. Defaults to first point.\n tend : float\n ending time. Defaults to last point.\n toff : float\n eye offset. Defaults to 0.\n tstep : float or None\n horizontal bin size. Defaults to using 200 bins.\n vstep : float or None\n vertical bin size. Defaults to using 200 bins.\n cmap :\n the colormap used for coloring the heat map. If None, defaults to cubehelix_r\n vmargin : float\n vertical margin in percentage of maximum/minimum waveform values. Defaults\n to 5 percent. This is used so that there some room between top/bottom of\n eye and the plot.\n interpolation : str\n interpolation method. Defaults to 'gaussian'. Use 'none' for no interpolation.\n repeat : bool\n True to repeat the eye diagram once to the right. This is useful if you\n want to look at edge transistions.\n \"\"\"\n if not toff:\n toff = 0.0\n if tstart is None:\n tstart = tvec[0]\n if tend is None:\n tend = tvec[-1]\n\n if tstep is None:\n num_h = 200\n else:\n num_h = int(np.ceil(tper / tstep))\n\n arr_idx = (tstart <= tvec) & (tvec < tend)\n tplot = np.mod((tvec[arr_idx] - toff), tper) / tper * num_h # type: np.ndarray\n yplot = yvec[arr_idx]\n\n # get vertical range\n ymin, ymax = np.amin(yplot), np.amax(yplot)\n yrang = (ymax - ymin) * (1 + vmargin)\n ymid = (ymin + ymax) / 2.0\n ymin = ymid - yrang / 2.0\n ymax = ymin + yrang\n\n if vstep is None:\n num_v = 200\n else:\n num_v = int(np.ceil(yrang / vstep))\n\n # rescale Y axis\n yplot = (yplot - ymin) / yrang * num_v\n\n grid = np.zeros((num_h, num_v), dtype=float)\n for idx in range(yplot.size - 1):\n draw_line(tplot[idx], yplot[idx], tplot[idx + 1], yplot[idx + 1], num_h, grid)\n\n if cmap is None:\n from matplotlib import cm\n # noinspection PyUnresolvedReferences\n cmap = cm.cubehelix_r\n\n plt.figure(fig)\n grid = grid.T[::-1, :]\n if repeat:\n grid = np.tile(grid, (1, 2))\n tper *= 2.0\n plt.imshow(grid, extent=[0, tper, ymin, ymax], cmap=cmap,\n interpolation=interpolation, aspect='auto')\n cb = plt.colorbar()\n cb.set_label('counts')\n return grid\n\n\ndef plot_eye(fig, tvec, yvec_list, tper, tstart=None, tend=None,\n toff_list=None, name_list=None, alpha=1.0):\n \"\"\"Plot eye diagram.\n\n Parameters\n ----------\n fig : int\n the figure ID.\n tvec : np.ndarray\n the time data.\n yvec_list : list[np.ndarray]\n list of waveforms to plot in eye diagram.\n tper : float\n the period.\n tstart : float\n starting time. Defaults to first point.\n tend : float\n ending time. Defaults to last point.\n toff_list : list[float]\n offset to apply to each waveform. Defaults to zeros.\n name_list : list[str] or None\n the name of each waveform. Defaults to numbers.\n alpha : float\n the transparency of each trace. Can be used to mimic heatmap.\n \"\"\"\n if not yvec_list:\n return\n\n if not name_list:\n name_list = [str(num) for num in range(len(yvec_list))]\n if not toff_list:\n toff_list = [0.0] * len(yvec_list)\n if tstart is None:\n tstart = tvec[0]\n if tend is None:\n tend = tvec[-1]\n\n # get new tstep that evenly divides tper and new x vector\n tstep_given = (tvec[-1] - tvec[0]) / (tvec.size - 1)\n num_samp = int(round(tper / tstep_given))\n t_plot = np.linspace(0.0, tper, num_samp, endpoint=False)\n\n # find tstart and tend in number of tper.\n nstart = int(np.floor(tstart / tper))\n nend = int(np.ceil(tend / tper))\n ncycle = nend - nstart\n teye = np.linspace(nstart * tper, nend * tper, num_samp * ncycle, endpoint=False) # type: np.ndarray\n teye = teye.reshape((ncycle, num_samp))\n\n myfig = plt.figure(fig, FigureClass=MarkerFigure) # type: MarkerFigure\n ax = plt.subplot()\n legend_lines = []\n for idx, yvec in enumerate(yvec_list):\n color = color_cycle[idx % len(color_cycle)]\n toff = toff_list[idx]\n # get eye traces\n yfun = interp.interp1d(tvec - toff, yvec, kind='linear', copy=False, bounds_error=False,\n fill_value=np.nan, assume_sorted=True)\n plot_list = []\n for cycle_idx in range(ncycle):\n plot_list.append(t_plot)\n plot_list.append(yfun(teye[cycle_idx, :]))\n\n lines = ax.plot(*plot_list, alpha=alpha, color=color, picker=4.0, linewidth=2)\n legend_lines.append(lines[0])\n\n # Put a legend to the right of the current axis\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])\n ax.legend(legend_lines, name_list, loc='center left', bbox_to_anchor=(1, 0.5))\n\n myfig.setup_callbacks()\n plt.show(block=False)\n\n\ndef _find_closest_point(x, y, xvec, yvec, xnorm, ynorm):\n \"\"\"Find point on PWL waveform described by xvec, yvec closest to (x, y)\"\"\"\n xnvec = xvec / xnorm\n ynvec = yvec / ynorm\n xn = x / xnorm\n yn = y / ynorm\n\n dx = np.diff(xnvec)\n dy = np.diff(ynvec)\n px = (xn - xnvec[:-1])\n py = (yn - ynvec[:-1])\n\n that = (px * dx + py * dy) / (dx ** 2 + dy ** 2)\n t = np.minimum(np.maximum(that, 0), 1)\n\n minx = xnvec[:-1] + t * dx\n miny = ynvec[:-1] + t * dy\n\n dist = (minx - xn) ** 2 + (miny - yn) ** 2\n idx = np.argmin(dist)\n return minx[idx] * xnorm, miny[idx] * ynorm\n\n\nclass WaveformPlotter(object):\n \"\"\"A custom matplotlib interactive plotting class.\n\n This class adds many useful features, such as ability to add/remove markers,\n ability to toggle waveforms on and off, and so on.\n\n Parameters\n ----------\n fig_idx : int\n the figure index.\n picker : float\n picker event pixel tolerance.\n normal_width : float\n normal linewidth.\n select_width : float\n selected linewidth.\n \"\"\"\n\n def __init__(self, fig_idx, picker=5.0, normal_width=1.5, select_width=3.0):\n self.figure = plt.figure(fig_idx, FigureClass=MarkerFigure) # type: MarkerFigure\n self.picker = picker\n self.norm_lw = normal_width\n self.top_lw = select_width\n self.ax = self.figure.gca()\n self.ax.set_prop_cycle('color', color_cycle)\n self.leline_lookup = {}\n self.letext_lookup = {}\n self.last_top = None\n self.legend = None\n self.resized_legend = False\n\n def plot(self, *args, **kwargs):\n if self.figure is None:\n raise ValueError('figure closed already')\n\n if 'picker' not in kwargs:\n kwargs['picker'] = self.picker\n kwargs['linewidth'] = self.norm_lw\n if 'lw' in kwargs:\n del kwargs['lw']\n return self.ax.plot(*args, **kwargs)\n\n def setup(self):\n if self.figure is None:\n raise ValueError('figure closed already')\n\n self.figure.tight_layout()\n # Put a legend to the right of the current axis\n ax_lines, ax_labels = self.ax.get_legend_handles_labels()\n self.legend = self.ax.legend(ax_lines, ax_labels, loc='center left',\n bbox_to_anchor=(1, 0.5), fancybox=True)\n le_lines = self.legend.get_lines()\n le_texts = self.legend.get_texts()\n\n for leline, letext, axline in zip(le_lines, le_texts, ax_lines):\n self.leline_lookup[leline] = (letext, axline)\n self.letext_lookup[letext] = (leline, axline)\n leline.set_picker(self.picker)\n letext.set_picker(self.picker)\n letext.set_alpha(0.5)\n\n le_texts[-1].set_alpha(1.0)\n ax_lines[-1].set_zorder(2)\n ax_lines[-1].set_linewidth(self.top_lw)\n self.last_top = (le_texts[-1], ax_lines[-1])\n\n self.figure.register_pick_event(self.leline_lookup, self.legend_line_picked)\n self.figure.register_pick_event(self.letext_lookup, self.legend_text_picked)\n self.figure.setup_callbacks()\n self.figure.canvas.mpl_connect('draw_event', self.fix_legend_location)\n self.figure.canvas.mpl_connect('close_event', self.figure_closed)\n self.figure.canvas.mpl_connect('resize_event', self.figure_resized)\n\n # noinspection PyUnusedLocal\n def figure_closed(self, event):\n self.figure.close_figure()\n self.figure = None\n self.ax = None\n self.leline_lookup = None\n self.letext_lookup = None\n self.last_top = None\n self.legend = None\n\n # noinspection PyUnusedLocal\n def figure_resized(self, event):\n self.resized_legend = False\n self.fix_legend_location(None)\n\n # noinspection PyUnusedLocal\n def fix_legend_location(self, event):\n if not self.resized_legend:\n self.figure.tight_layout()\n inv_tran = self.figure.transFigure.inverted()\n leg_box = inv_tran.transform(self.legend.get_window_extent())\n leg_width = leg_box[1][0] - leg_box[0][0]\n box = self.ax.get_position()\n # print box.x0, box.y0, box.width, box.height, leg_width, leg_frame.get_height()\n self.ax.set_position([box.x0, box.y0, box.width - leg_width, box.height])\n self.resized_legend = True\n self.figure.canvas.draw()\n\n def legend_line_picked(self, artist):\n letext, axline = self.leline_lookup[artist]\n visible = not axline.get_visible()\n if visible:\n artist.set_alpha(1.0)\n else:\n artist.set_alpha(0.2)\n if visible and (self.last_top[1] is not axline):\n # set to be top line\n self.legend_text_picked(letext, draw=False)\n self.figure.set_line_visibility(axline, visible)\n\n def legend_text_picked(self, artist, draw=True):\n leline, axline = self.letext_lookup[artist]\n self.last_top[0].set_alpha(0.5)\n self.last_top[1].set_zorder(1)\n self.last_top[1].set_linewidth(self.norm_lw)\n axline.set_zorder(2)\n artist.set_alpha(1.0)\n axline.set_linewidth(self.top_lw)\n self.last_top = (artist, axline)\n\n # if draw is False, this method is not called from\n # legend_line_picked(), so we'll never have recursion issues.\n if draw:\n if not axline.get_visible():\n # set line to be visible if not\n # draw() will be called in legend_line_picked\n self.legend_line_picked(leline)\n else:\n self.figure.canvas.draw()\n\n\n# noinspection PyAbstractClass\nclass MarkerFigure(Figure):\n def __init__(self, **kwargs):\n Figure.__init__(self, **kwargs)\n self.markers = []\n self.epsilon = 10.0\n self.drag_idx = -1\n self.timer = None\n self.marker_line_info = None\n self.pick_sets = []\n self.pick_funs = []\n\n def set_line_visibility(self, axline, visible):\n axline.set_visible(visible)\n if not visible:\n # delete all markers on this line\n del_idx_list = [idx for idx, item in enumerate(self.markers) if item[2] is axline]\n for targ_idx in reversed(del_idx_list):\n an, pt, _, _ = self.markers[targ_idx]\n del self.markers[targ_idx]\n # print targ_idx, an\n an.set_visible(False)\n pt.set_visible(False)\n\n self.canvas.draw()\n\n def register_pick_event(self, artist_set, fun):\n self.pick_sets.append(artist_set)\n self.pick_funs.append(fun)\n\n def on_button_release(self, event):\n \"\"\"Disable data cursor dragging. \"\"\"\n if event.button == 1:\n self.drag_idx = -1\n\n def on_motion(self, event):\n \"\"\"Move data cursor around. \"\"\"\n ax = event.inaxes\n if self.drag_idx >= 0 and ax is not None and event.button == 1:\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n anno, pt, line, bg = self.markers[self.drag_idx]\n x, y = _find_closest_point(event.xdata, event.ydata,\n line.get_xdata(), line.get_ydata(),\n xmax - xmin, ymax - ymin)\n pt.set_data([x], [y])\n xstr, ystr = float_to_si_string(x, 4), float_to_si_string(y, 4)\n anno.set_text('x: %s\\ny: %s' % (xstr, ystr))\n anno.xy = (x, y)\n self.canvas.restore_region(bg)\n anno.set_visible(True)\n pt.set_visible(True)\n ax.draw_artist(anno)\n ax.draw_artist(pt)\n self.canvas.blit(ax.bbox)\n\n def _get_idx_under_point(self, event):\n \"\"\"Find selected data cursor.\"\"\"\n mx = event.x\n my = event.y\n mind = None\n minidx = None\n # find closest marker point\n for idx, (an, pt, _, _) in enumerate(self.markers):\n xv, yv = pt.get_xdata()[0], pt.get_ydata()[0]\n xp, yp = event.inaxes.transData.transform([xv, yv])\n # print xv, yv, xp, yp, mx, my\n d = ((mx - xp) ** 2 + (my - yp) ** 2) ** 0.5\n if mind is None or d < mind:\n mind = d\n minidx = idx\n\n if mind is not None and mind < self.epsilon:\n return minidx\n return -1\n\n def on_pick(self, event):\n artist = event.artist\n if not artist.get_visible():\n return\n for idx, artist_set in enumerate(self.pick_sets):\n if artist in artist_set:\n self.pick_funs[idx](artist)\n return\n\n if isinstance(artist, Line2D):\n mevent = event.mouseevent\n # figure out if we picked marker or line\n self.drag_idx = self._get_idx_under_point(mevent)\n\n if self.drag_idx >= 0:\n # picked marker.\n ax = mevent.inaxes\n an, pt, _, _ = self.markers[self.drag_idx]\n an.set_visible(False)\n pt.set_visible(False)\n self.canvas.draw()\n self.markers[self.drag_idx][-1] = self.canvas.copy_from_bbox(ax.bbox)\n an.set_visible(True)\n pt.set_visible(True)\n ax.draw_artist(an)\n ax.draw_artist(pt)\n self.canvas.blit(ax.bbox)\n\n else:\n # save data to plot marker later\n mxval = mevent.xdata\n button = mevent.button\n if mxval is not None and button == 1 and not self.marker_line_info:\n self.marker_line_info = (artist, mxval, mevent.ydata,\n button, mevent.inaxes)\n elif isinstance(artist, Annotation):\n # delete marker.\n mevent = event.mouseevent\n if mevent.button == 3:\n targ_idx = None\n for idx, (an, pt, _, _) in enumerate(self.markers):\n if an is artist:\n targ_idx = idx\n break\n if targ_idx is not None:\n an, pt, _, _ = self.markers[targ_idx]\n del self.markers[targ_idx]\n an.set_visible(False)\n pt.set_visible(False)\n self.canvas.draw()\n\n def _create_marker(self):\n if self.marker_line_info:\n artist, mxval, myval, button, ax = self.marker_line_info\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n mxval, myval = _find_closest_point(mxval, myval,\n artist.get_xdata(), artist.get_ydata(),\n xmax - xmin, ymax - ymin)\n pt = ax.plot(mxval, myval, 'ko', picker=5.0)[0]\n xstr, ystr = float_to_si_string(mxval, 4), float_to_si_string(myval, 4)\n msg = 'x: %s\\ny: %s' % (xstr, ystr)\n anno = ax.annotate(msg, xy=(mxval, myval), bbox=dict(boxstyle='round', fc='yellow', alpha=0.3),\n arrowprops=dict(arrowstyle=\"->\"))\n anno.draggable()\n anno.set_picker(True)\n\n self.markers.append([anno, pt, artist, None])\n ax.draw_artist(anno)\n ax.draw_artist(pt)\n self.canvas.blit(ax.bbox)\n self.marker_line_info = None\n\n def close_figure(self):\n self.timer.stop()\n\n def setup_callbacks(self):\n self.canvas.mpl_connect('pick_event', self.on_pick)\n self.canvas.mpl_connect('motion_notify_event', self.on_motion)\n self.canvas.mpl_connect('button_release_event', self.on_button_release)\n # use timer to make sure we won't create multiple markers at once when\n # clicked on overlapping lines.\n self.timer = self.canvas.new_timer(interval=100)\n self.timer.add_callback(self._create_marker)\n self.timer.start()\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"This module contains commonly used technology related classes and functions.\n\"\"\"\n\nimport os\nimport abc\nimport itertools\nfrom typing import List, Union, Tuple, Dict, Any, Optional, Set\n\nimport numpy as np\nimport h5py\nimport openmdao.api as omdao\n\nfrom bag.core import BagProject\nfrom ..math.interpolate import interpolate_grid\nfrom bag.math.dfun import VectorDiffFunction, DiffFunction\nfrom ..mdao.core import GroupBuilder\nfrom ..io import fix_string, to_bytes\nfrom ..simulation.core import SimulationManager\n\n\ndef _equal(a, b, rtol, atol):\n \"\"\"Returns True if a == b. a and b are both strings, floats or numpy arrays.\"\"\"\n # python 2/3 compatibility: convert raw bytes to string\n a = fix_string(a)\n b = fix_string(b)\n\n if isinstance(a, str):\n return a == b\n return np.allclose(a, b, rtol=rtol, atol=atol)\n\n\ndef _equal_list(a, b, rtol, atol):\n \"\"\"Returns True if a == b. a and b are list of strings/floats/numpy arrays.\"\"\"\n if len(a) != len(b):\n return False\n for a_item, b_item in zip(a, b):\n if not _equal(a_item, b_item, rtol, atol):\n return False\n return True\n\n\ndef _index_in_list(item_list, item, rtol, atol):\n \"\"\"Returns index of item in item_list, with tolerance checking for floats.\"\"\"\n for idx, test in enumerate(item_list):\n if _equal(test, item, rtol, atol):\n return idx\n return -1\n\n\ndef _in_list(item_list, item, rtol, atol):\n \"\"\"Returns True if item is in item_list, with tolerance checking for floats.\"\"\"\n return _index_in_list(item_list, item, rtol, atol) >= 0\n\n\nclass CircuitCharacterization(SimulationManager, metaclass=abc.ABCMeta):\n \"\"\"A class that handles characterization of a circuit.\n\n This class sweeps schematic parameters and run a testbench with a single analysis.\n It will then save the simulation data in a format CharDB understands.\n\n For now, this class will overwrite existing data, so please backup if you need to.\n\n Parameters\n ----------\n prj : BagProject\n the BagProject instance.\n spec_file : str\n the SimulationManager specification file.\n tb_type : str\n the testbench type name. The parameter dictionary corresponding to this\n testbench should have the following entries (in addition to those required\n by Simulation Manager:\n\n outputs :\n list of testbench output names to save.\n constants :\n constant values used to identify this simulation run.\n sweep_params:\n a dictionary from testbench parameters to (start, stop, num_points)\n sweep tuple.\n\n compression : str\n HDF5 compression method.\n \"\"\"\n\n def __init__(self, prj, spec_file, tb_type, compression='gzip'):\n super(CircuitCharacterization, self).__init__(prj, spec_file)\n self._compression = compression\n self._outputs = self.specs[tb_type]['outputs']\n self._constants = self.specs[tb_type]['constants']\n self._sweep_params = self.specs[tb_type]['sweep_params']\n\n def record_results(self, data, tb_type, val_list):\n # type: (Dict[str, Any], str, Tuple[Any, ...]) -> None\n \"\"\"Record simulation results to file.\n\n Override implementation in SimulationManager in order to save data\n in a format that CharDB understands.\n \"\"\"\n env_list = self.specs['sim_envs']\n\n tb_specs = self.specs[tb_type]\n results_dir = tb_specs['results_dir']\n\n os.makedirs(results_dir, exist_ok=True)\n fname = os.path.join(results_dir, 'data.hdf5')\n\n with h5py.File(fname, 'w') as f:\n for key, val in self._constants.items():\n f.attrs[key] = val\n for key, val in self._sweep_params.items():\n f.attrs[key] = val\n\n for env in env_list:\n env_result, sweep_list = self._get_env_result(data, env)\n\n grp = f.create_group('%d' % len(f))\n for key, val in zip(self.swp_var_list, val_list):\n grp.attrs[key] = val\n # h5py workaround: explicitly store strings as encoded unicode data\n grp.attrs['env'] = to_bytes(env)\n grp.attrs['sweep_params'] = [to_bytes(swp) for swp in sweep_list]\n\n for name, val in env_result.items():\n grp.create_dataset(name, data=val, compression=self._compression)\n\n def get_sim_results(self, tb_type, val_list):\n # type: (str, Tuple[Any, ...]) -> Dict[str, Any]\n # TODO: implement this.\n raise NotImplementedError('not implemented yet.')\n\n def _get_env_result(self, sim_results, env):\n \"\"\"Extract results from a given simulation environment from the given data.\n\n all output sweep parameter order and data shape must be the same.\n\n Parameters\n ----------\n sim_results : dict[string, any]\n the simulation results dictionary\n env : str\n the target simulation environment\n\n Returns\n -------\n results : dict[str, any]\n the results from a given simulation environment.\n sweep_list : list[str]\n a list of sweep parameter order.\n \"\"\"\n if 'corner' not in sim_results:\n # no corner sweep anyways\n results = {output: sim_results[output] for output in self._outputs}\n sweep_list = sim_results['sweep_params'][self._outputs[0]]\n return results, sweep_list\n\n corner_list = sim_results['corner'].tolist()\n results = {}\n # we know all sweep order and shape is the same.\n test_name = self._outputs[0]\n sweep_list = list(sim_results['sweep_params'][test_name])\n shape = sim_results[test_name].shape\n # make numpy array slice index list\n index_list = [slice(0, l) for l in shape]\n if 'corner' in sweep_list:\n idx = sweep_list.index('corner')\n index_list[idx] = corner_list.index(env)\n del sweep_list[idx]\n\n # store outputs in results\n for output in self._outputs:\n results[output] = sim_results[output][index_list]\n\n return results, sweep_list\n\n\nclass CharDB(abc.ABC):\n \"\"\"The abstract base class of a database of characterization data.\n\n This class provides useful query/optimization methods and ways to store/retrieve\n data.\n\n Parameters\n ----------\n root_dir : str\n path to the root characterization data directory. Supports environment variables.\n constants : Dict[str, Any]\n constants dictionary.\n discrete_params : List[str]\n a list of parameters that should take on discrete values.\n init_params : Dict[str, Any]\n a dictionary of initial parameter values. All parameters should be specified,\n and None should be used if the parameter value is not set.\n env_list : List[str]\n list of simulation environments to consider.\n update : bool\n By default, CharDB saves and load post-processed data directly. If update is True,\n CharDB will update the post-process data from raw simulation data. Defaults to\n False.\n rtol : float\n relative tolerance used to compare constants/sweep parameters/sweep attributes.\n atol : float\n relative tolerance used to compare constants/sweep parameters/sweep attributes.\n compression : str\n HDF5 compression method. Used only during post-processing.\n method : str\n interpolation method.\n opt_package : str\n default Python optimization package. Supports 'scipy' or 'pyoptsparse'. Defaults\n to 'scipy'.\n opt_method : str\n default optimization method. Valid values depends on the optimization package.\n Defaults to 'SLSQP'.\n opt_settings : Optional[Dict[str, Any]]\n optimizer specific settings.\n \"\"\"\n\n def __init__(self, # type: CharDB\n root_dir, # type: str\n constants, # type: Dict[str, Any]\n discrete_params, # type: List[str]\n init_params, # type: Dict[str, Any]\n env_list, # type: List[str]\n update=False, # type: bool\n rtol=1e-5, # type: float\n atol=1e-18, # type: float\n compression='gzip', # type: str\n method='spline', # type: str\n opt_package='scipy', # type: str\n opt_method='SLSQP', # type: str\n opt_settings=None, # type: Optional[Dict[str, Any]]\n **kwargs # type: **kwargs\n ):\n # type: (...) -> None\n\n root_dir = os.path.abspath(os.path.expandvars(root_dir))\n\n if not os.path.isdir(root_dir):\n # error checking\n raise ValueError('Directory %s not found.' % root_dir)\n if 'env' in discrete_params:\n discrete_params.remove('env')\n\n if opt_settings is None:\n opt_settings = {}\n else:\n pass\n\n if opt_method == 'IPOPT' and not opt_settings:\n # set default IPOPT settings\n opt_settings['option_file_name'] = ''\n\n self._discrete_params = discrete_params\n self._params = init_params.copy()\n self._env_list = env_list\n self._config = dict(opt_package=opt_package,\n opt_method=opt_method,\n opt_settings=opt_settings,\n rtol=rtol,\n atol=atol,\n method=method,\n )\n\n cache_fname = self.get_cache_file(root_dir, constants)\n if not os.path.isfile(cache_fname) or update:\n sim_fname = self.get_sim_file(root_dir, constants)\n results = self._load_sim_data(sim_fname, constants, discrete_params)\n sim_data, total_params, total_values, self._constants = results\n self._data = self.post_process_data(sim_data, total_params, total_values, self._constants)\n\n # save to cache\n with h5py.File(cache_fname, 'w') as f:\n for key, val in self._constants.items():\n f.attrs[key] = val\n sp_grp = f.create_group('sweep_params')\n # h5py workaround: explicitly store strings as encoded unicode data\n sp_grp.attrs['sweep_order'] = [to_bytes(swp) for swp in total_params]\n for par, val_list in zip(total_params, total_values):\n if val_list.dtype.kind == 'U':\n # unicode array, convert to raw bytes array\n val_list = val_list.astype('S')\n sp_grp.create_dataset(par, data=val_list, compression=compression)\n data_grp = f.create_group('data')\n for name, data_arr in self._data.items():\n data_grp.create_dataset(name, data=data_arr, compression=compression)\n else:\n # load from cache\n with h5py.File(cache_fname, 'r') as f:\n self._constants = dict(iter(f.attrs.items()))\n sp_grp = f['sweep_params']\n total_params = [fix_string(swp) for swp in sp_grp.attrs['sweep_order']]\n total_values = [self._convert_hdf5_array(sp_grp[par][()]) for par in total_params]\n data_grp = f['data']\n self._data = {name: data_grp[name][()] for name in data_grp}\n\n # change axes location so discrete parameters are at the start of sweep_params\n env_disc_params = ['env'] + discrete_params\n for idx, dpar in enumerate(env_disc_params):\n if total_params[idx] != dpar:\n # swap\n didx = total_params.index(dpar)\n ptmp = total_params[idx]\n vtmp = total_values[idx]\n total_params[idx] = total_params[didx]\n total_values[idx] = total_values[didx]\n total_params[didx] = ptmp\n total_values[didx] = vtmp\n for key, val in self._data.items():\n self._data[key] = np.swapaxes(val, idx, didx)\n\n sidx = len(self._discrete_params) + 1\n self._cont_params = total_params[sidx:]\n self._cont_values = total_values[sidx:]\n self._discrete_values = total_values[1:sidx]\n self._env_values = total_values[0]\n\n # get lazy function table.\n shape = [total_values[idx].size for idx in range(len(env_disc_params))]\n\n fun_name_iter = itertools.chain(iter(self._data.keys()), self.derived_parameters())\n # noinspection PyTypeChecker\n self._fun = {name: np.full(shape, None, dtype=object) for name in fun_name_iter}\n\n @staticmethod\n def _convert_hdf5_array(arr):\n # type: (np.ndarray) -> np.ndarray\n \"\"\"Check if raw bytes array, if so convert to unicode array.\"\"\"\n if arr.dtype.kind == 'S':\n return arr.astype('U')\n return arr\n\n def _load_sim_data(self, # type: CharDB\n fname, # type: str\n constants, # type: Dict[str, Any]\n discrete_params # type: List[str]\n ):\n # type: (...) -> Tuple[Dict[str, np.ndarray], List[str], List[np.ndarray], Dict[str, Any]]\n \"\"\"Returns the simulation data.\n\n Parameters\n ----------\n fname : str\n the simulation filename.\n constants : Dict[str, Any]\n the constants dictionary.\n discrete_params : List[str]\n a list of parameters that should take on discrete values.\n\n Returns\n -------\n data_dict : Dict[str, np.ndarray]\n a dictionary from output name to data as numpy array.\n master_attrs : List[str]\n list of attribute name for each dimension of numpy array.\n master_values : List[np.ndarray]\n list of attribute values for each dimension.\n file_constants : Dict[str, Any]\n the constants dictionary in file.\n \"\"\"\n if not os.path.exists(fname):\n raise ValueError('Simulation file %s not found.' % fname)\n\n rtol, atol = self.get_config('rtol'), self.get_config('atol') # type: float\n\n master_attrs = None\n master_values = None\n master_dict = None\n file_constants = None\n with h5py.File(fname, 'r') as f:\n # check constants is consistent\n for key, val in constants.items():\n if not _equal(val, f.attrs[key], rtol, atol):\n raise ValueError('sim file attr %s = %s != %s' % (key, f.attrs[key], val))\n\n # simple error checking.\n if len(f) == 0:\n raise ValueError('simulation file has no data.')\n\n # check that attributes sweep forms regular grid.\n attr_table = {}\n for gname in f:\n grp = f[gname]\n for key, val in grp.attrs.items():\n # convert raw bytes to unicode\n # python 2/3 compatibility: convert raw bytes to string\n val = fix_string(val)\n\n if key != 'sweep_params':\n if key not in attr_table:\n attr_table[key] = []\n val_list = attr_table[key]\n if not _in_list(val_list, val, rtol, atol):\n val_list.append(val)\n\n expected_len = 1\n for val in attr_table.values():\n expected_len *= len(val)\n\n if expected_len != len(f):\n raise ValueError('Attributes of f does not form complete sweep. '\n 'Expect length = %d, but actually = %d.' % (expected_len, len(f)))\n\n # check all discrete parameters in attribute table.\n for disc_par in discrete_params:\n if disc_par not in attr_table:\n raise ValueError('Discrete attribute %s not found' % disc_par)\n\n # get attribute order\n attr_order = sorted(attr_table.keys())\n # check all non-discrete attribute value list lies on regular grid\n attr_values = [np.array(sorted(attr_table[attr])) for attr in attr_order]\n for attr, aval_list in zip(attr_order, attr_values):\n if attr not in discrete_params and attr != 'env':\n test_vec = np.linspace(aval_list[0], aval_list[-1], len(aval_list), endpoint=True)\n if not np.allclose(test_vec, aval_list, rtol=rtol, atol=atol):\n raise ValueError('Attribute %s values do not lie on regular grid' % attr)\n\n # consolidate all data into one giant numpy array.\n # first compute numpy array shape\n test_grp = f['0']\n sweep_params = [fix_string(tmpvar) for tmpvar in test_grp.attrs['sweep_params']]\n\n # get constants dictionary\n file_constants = {}\n for key, val in f.attrs.items():\n if key not in sweep_params:\n file_constants[key] = val\n\n master_attrs = attr_order + sweep_params\n swp_values = [np.linspace(f.attrs[var][0], f.attrs[var][1], f.attrs[var][2],\n endpoint=True) for var in sweep_params] # type: List[np.array]\n master_values = attr_values + swp_values\n master_shape = [len(val_list) for val_list in master_values]\n master_index = [slice(0, n) for n in master_shape]\n master_dict = {}\n for gname in f:\n grp = f[gname]\n # get index of the current group in the giant array.\n # Note: using linear search to compute index now, but attr_val_list should be small.\n for aidx, (attr, aval_list) in enumerate(zip(attr_order, attr_values)):\n master_index[aidx] = _index_in_list(aval_list, grp.attrs[attr], rtol, atol)\n\n for output in grp:\n dset = grp[output]\n if output not in master_dict:\n master_dict[output] = np.empty(master_shape, dtype=dset.dtype)\n master_dict[output][master_index] = dset\n\n return master_dict, master_attrs, master_values, file_constants\n\n def __getitem__(self, param):\n # type: (str) -> Any\n \"\"\"Returns the given parameter value.\n\n Parameters\n ----------\n param : str\n parameter name.\n\n Returns\n -------\n val : Any\n parameter value.\n \"\"\"\n return self._params[param]\n\n def __setitem__(self, key, value):\n # type: (str, Any) -> None\n \"\"\"Sets the given parameter value.\n\n Parameters\n ----------\n key : str\n parameter name.\n value : Any\n parameter value. None to unset.\n \"\"\"\n rtol, atol = self.get_config('rtol'), self.get_config('atol')\n\n if key in self._discrete_params:\n if value is not None:\n idx = self._discrete_params.index(key)\n if not _in_list(self._discrete_values[idx], value, rtol, atol):\n raise ValueError('Cannot set discrete variable %s value to %s' % (key, value))\n elif key in self._cont_params:\n if value is not None:\n idx = self._cont_params.index(key)\n val_list = self._cont_values[idx]\n if value < val_list[0] or value > val_list[-1]:\n raise ValueError('Variable %s value %s out of bounds.' % (key, value))\n else:\n raise ValueError('Unknown variable %s.' % key)\n\n self._params[key] = value\n\n def get_config(self, name):\n # type: (str) -> Any\n \"\"\"Returns the configuration value.\n\n Parameters\n ----------\n name : str\n configuration name.\n\n Returns\n -------\n val : Any\n configuration value.\n \"\"\"\n return self._config[name]\n\n def set_config(self, name, value):\n # type: (str, Any) -> None\n \"\"\"Sets the configuration value.\n\n Parameters\n ----------\n name : str\n configuration name.\n value : Any\n configuration value.\n \"\"\"\n if name not in self._config:\n raise ValueError('Unknown configuration %s' % name)\n self._config[name] = value\n\n @property\n def env_list(self):\n # type: () -> List[str]\n \"\"\"The list of simulation environments to consider.\"\"\"\n return self._env_list\n\n @env_list.setter\n def env_list(self, new_env_list):\n # type: (List[str]) -> None\n \"\"\"Sets the list of simulation environments to consider.\"\"\"\n self._env_list = new_env_list\n\n @classmethod\n def get_sim_file(cls, root_dir, constants):\n # type: (str, Dict[str, Any]) -> str\n \"\"\"Returns the simulation data file name.\n\n Parameters\n ----------\n root_dir : str\n absolute path to the root characterization data directory.\n constants : Dict[str, Any]\n constants dictionary.\n\n Returns\n -------\n fname : str\n the simulation data file name.\n \"\"\"\n raise NotImplementedError('Not implemented')\n\n @classmethod\n def get_cache_file(cls, root_dir, constants):\n # type: (str, Dict[str, Any]) -> str\n \"\"\"Returns the post-processed characterization data file name.\n\n Parameters\n ----------\n root_dir : str\n absolute path to the root characterization data directory.\n constants : Dict[str, Any]\n constants dictionary.\n\n Returns\n -------\n fname : str\n the post-processed characterization data file name.\n \"\"\"\n raise NotImplementedError('Not implemented')\n\n @classmethod\n def post_process_data(cls, sim_data, sweep_params, sweep_values, constants):\n # type: (Dict[str, np.ndarray], List[str], List[np.ndarray], Dict[str, Any]) -> Dict[str, np.ndarray]\n \"\"\"Postprocess simulation data.\n\n Parameters\n ----------\n sim_data : Dict[str, np.ndarray]\n the simulation data as a dictionary from output name to numpy array.\n sweep_params : List[str]\n list of parameter name for each dimension of numpy array.\n sweep_values : List[np.ndarray]\n list of parameter values for each dimension.\n constants : Dict[str, Any]\n the constants dictionary.\n\n Returns\n -------\n data : Dict[str, np.ndarray]\n a dictionary of post-processed data.\n \"\"\"\n raise NotImplementedError('Not implemented')\n\n @classmethod\n def derived_parameters(cls):\n # type: () -> List[str]\n \"\"\"Returns a list of derived parameters.\"\"\"\n return []\n\n @classmethod\n def compute_derived_parameters(cls, fdict):\n # type: (Dict[str, DiffFunction]) -> Dict[str, DiffFunction]\n \"\"\"Compute derived parameter functions.\n\n Parameters\n ----------\n fdict : Dict[str, DiffFunction]\n a dictionary from core parameter name to the corresponding function.\n\n Returns\n -------\n deriv_dict : Dict[str, DiffFunction]\n a dictionary from derived parameter name to the corresponding function.\n \"\"\"\n return {}\n\n def _get_function_index(self, **kwargs):\n # type: (**kwargs) -> List[int]\n \"\"\"Returns the function index corresponding to given discrete parameter values.\n\n simulation environment index will be set to 0\n\n Parameters\n ----------\n **kwargs :\n discrete parameter values.\n\n Returns\n -------\n fidx_list : List[int]\n the function index.\n \"\"\"\n rtol, atol = self.get_config('rtol'), self.get_config('atol')\n\n fidx_list = [0]\n for par, val_list in zip(self._discrete_params, self._discrete_values):\n val = kwargs.get(par, self[par])\n if val is None:\n raise ValueError('Parameter %s value not specified' % par)\n\n val_idx = _index_in_list(val_list, val, rtol, atol)\n if val_idx < 0:\n raise ValueError('Discrete parameter %s have illegal value %s' % (par, val))\n fidx_list.append(val_idx)\n\n return fidx_list\n\n def _get_function_helper(self, name, fidx_list):\n # type: (str, Union[List[int], Tuple[int]]) -> DiffFunction\n \"\"\"Helper method for get_function()\n\n Parameters\n ----------\n name : str\n name of the function.\n fidx_list : Union[List[int], Tuple[int]]\n function index.\n\n Returns\n -------\n fun : DiffFunction\n the interpolator function.\n \"\"\"\n # get function table index\n fidx_list = tuple(fidx_list)\n ftable = self._fun[name]\n if ftable[fidx_list] is None:\n if name in self._data:\n # core parameter\n char_data = self._data[name]\n\n # get scale list and data index\n scale_list = []\n didx = list(fidx_list) # type: List[Union[int, slice]]\n for vec in self._cont_values:\n scale_list.append((vec[0], vec[1] - vec[0]))\n didx.append(slice(0, vec.size))\n\n # make interpolator.\n cur_data = char_data[didx]\n method = self.get_config('method')\n ftable[fidx_list] = interpolate_grid(scale_list, cur_data, method=method, extrapolate=True)\n else:\n # derived parameter\n core_fdict = {fn: self._get_function_helper(fn, fidx_list) for fn in self._data}\n deriv_fdict = self.compute_derived_parameters(core_fdict)\n for fn, deriv_fun in deriv_fdict.items():\n self._fun[fn][fidx_list] = deriv_fun\n\n return ftable[fidx_list]\n\n def get_function(self, name, env='', **kwargs):\n # type: (str, str, **kwargs) -> Union[VectorDiffFunction, DiffFunction]\n \"\"\"Returns a function for the given output.\n\n Parameters\n ----------\n name : str\n name of the function.\n env : str\n if not empty, we will return function for just the given simulation environment.\n **kwargs :\n dictionary of discrete parameter values.\n\n Returns\n -------\n output : Union[VectorDiffFunction, DiffFunction]\n the output vector function.\n \"\"\"\n fidx_list = self._get_function_index(**kwargs)\n if not env:\n fun_list = []\n for env in self.env_list:\n occur_list = np.where(self._env_values == env)[0]\n if occur_list.size == 0:\n raise ValueError('environment %s not found.')\n env_idx = occur_list[0]\n fidx_list[0] = env_idx\n fun_list.append(self._get_function_helper(name, fidx_list))\n return VectorDiffFunction(fun_list)\n else:\n occur_list = np.where(self._env_values == env)[0]\n if occur_list.size == 0:\n raise ValueError('environment %s not found.')\n env_idx = occur_list[0]\n fidx_list[0] = env_idx\n return self._get_function_helper(name, fidx_list)\n\n def get_fun_sweep_params(self):\n # type: () -> Tuple[List[str], List[Tuple[float, float]]]\n \"\"\"Returns interpolation function sweep parameter names and values.\n\n Returns\n -------\n sweep_params : List[str]\n list of parameter names.\n sweep_range : List[Tuple[float, float]]\n list of parameter range\n \"\"\"\n return self._cont_params, [(vec[0], vec[-1]) for vec in self._cont_values]\n\n def _get_fun_arg(self, **kwargs):\n # type: (**kwargs) -> np.ndarray\n \"\"\"Make numpy array of interpolation function arguments.\"\"\"\n val_list = []\n for par in self._cont_params:\n val = kwargs.get(par, self[par])\n if val is None:\n raise ValueError('Parameter %s value not specified.' % par)\n val_list.append(val)\n\n return np.array(val_list)\n\n def query(self, **kwargs):\n # type: (**kwargs) -> Dict[str, np.ndarray]\n \"\"\"Query the database for the values associated with the given parameters.\n\n All parameters must be specified.\n\n Parameters\n ----------\n **kwargs :\n parameter values.\n\n Returns\n -------\n results : Dict[str, np.ndarray]\n the characterization results.\n \"\"\"\n results = {}\n arg = self._get_fun_arg(**kwargs)\n for name in self._data:\n fun = self.get_function(name, **kwargs)\n results[name] = fun(arg)\n\n for var in itertools.chain(self._discrete_params, self._cont_params):\n results[var] = kwargs.get(var, self[var])\n\n results.update(self.compute_derived_parameters(results))\n\n return results\n\n def minimize(self, # type: CharDB\n objective, # type: str\n define=None, # type: List[Tuple[str, int]]\n cons=None, # type: Dict[str, Dict[str, float]]\n vector_params=None, # type: Set[str]\n debug=False, # type: bool\n **kwargs # type: **kwargs\n ):\n # type: (...) -> Dict[str, Union[np.ndarray, float]]\n \"\"\"Find operating point that minimizes the given objective.\n\n Parameters\n ----------\n objective : str\n the objective to minimize. Must be a scalar.\n define : List[Tuple[str, int]]\n list of expressions to define new variables. Each\n element of the list is a tuple of string and integer. The string\n contains a python assignment that computes the variable from\n existing ones, and the integer indicates the variable shape.\n\n Note that define can also be used to enforce relationships between\n existing variables. Using transistor as an example, defining\n 'vgs = vds' will force the vgs of vds of the transistor to be\n equal.\n cons : Dict[str, Dict[str, float]]\n a dictionary from variable name to constraints of that variable.\n see OpenMDAO documentations for details on constraints.\n vector_params : Set[str]\n set of input variables that are vector instead of scalar. An input\n variable is a vector if it can change across simulation environments.\n debug : bool\n True to enable debugging messages. Defaults to False.\n **kwargs :\n known parameter values.\n\n Returns\n -------\n results : Dict[str, Union[np.ndarray, float]]\n the results dictionary.\n \"\"\"\n cons = cons or {}\n fidx_list = self._get_function_index(**kwargs)\n builder = GroupBuilder()\n\n params_ranges = dict(zip(self._cont_params,\n ((vec[0], vec[-1]) for vec in self._cont_values)))\n # add functions\n fun_name_iter = itertools.chain(iter(self._data.keys()), self.derived_parameters())\n for name in fun_name_iter:\n fun_list = []\n for idx, env in enumerate(self.env_list):\n fidx_list[0] = idx\n fun_list.append(self._get_function_helper(name, fidx_list))\n\n builder.add_fun(name, fun_list, self._cont_params, params_ranges,\n vector_params=vector_params)\n\n # add expressions\n for expr, ndim in define:\n builder.add_expr(expr, ndim)\n\n # update input bounds from constraints\n input_set = builder.get_inputs()\n var_list = builder.get_variables()\n\n for name in input_set:\n if name in cons:\n setup = cons[name]\n if 'equals' in setup:\n eq_val = setup['equals']\n builder.set_input_limit(name, equals=eq_val)\n else:\n vmin = vmax = None\n if 'lower' in setup:\n vmin = setup['lower']\n if 'upper' in setup:\n vmax = setup['upper']\n builder.set_input_limit(name, lower=vmin, upper=vmax)\n\n # build the group and make the problem\n grp, input_bounds = builder.build()\n\n top = omdao.Problem()\n top.root = grp\n\n opt_package = self.get_config('opt_package') # type: str\n opt_settings = self.get_config('opt_settings')\n\n if opt_package == 'scipy':\n driver = top.driver = omdao.ScipyOptimizer()\n print_opt_name = 'disp'\n elif opt_package == 'pyoptsparse':\n driver = top.driver = omdao.pyOptSparseDriver()\n print_opt_name = 'print_results'\n else:\n raise ValueError('Unknown optimization package: %s' % opt_package)\n\n driver.options['optimizer'] = self.get_config('opt_method')\n driver.options[print_opt_name] = debug\n driver.opt_settings.update(opt_settings)\n\n # add constraints\n constants = {}\n for name, setup in cons.items():\n if name not in input_bounds:\n # add constraint\n driver.add_constraint(name, **setup)\n\n # add inputs\n for name in input_set:\n eq_val, lower, upper, ndim = input_bounds[name]\n val = kwargs.get(name, self[name]) # type: float\n if val is None:\n val = eq_val\n comp_name = 'comp__%s' % name\n if val is not None:\n val = np.atleast_1d(np.ones(ndim) * val)\n constants[name] = val\n top.root.add(comp_name, omdao.IndepVarComp(name, val=val), promotes=[name])\n else:\n avg = (lower + upper) / 2.0\n span = upper - lower\n val = np.atleast_1d(np.ones(ndim) * avg)\n top.root.add(comp_name, omdao.IndepVarComp(name, val=val), promotes=[name])\n driver.add_desvar(name, lower=lower, upper=upper, adder=-avg, scaler=1.0 / span)\n # driver.add_desvar(name, lower=lower, upper=upper)\n\n # add objective and setup\n driver.add_objective(objective)\n top.setup(check=debug)\n\n # somehow html file is not viewable.\n if debug:\n omdao.view_model(top, outfile='CharDB_debug.html')\n\n # set constants\n for name, val in constants.items():\n top[name] = val\n\n top.run()\n\n results = {var: kwargs.get(var, self[var]) for var in self._discrete_params}\n for var in var_list:\n results[var] = top[var]\n\n return results\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"This module defines core BAG openmdao classes.\"\"\"\n\nimport numpy as np\nimport networkx as nx\nimport openmdao.api as omdao\n\nimport bag.util.parse\n\nfrom .components import VecFunComponent\n\n\nclass GroupBuilder(object):\n \"\"\"A class that builds new OpenMDAO groups.\n\n This class provides a simple interface to define new variables as function of\n other variables, and it tracks the variable dependencies using a directed\n acyclic graph.\n\n \"\"\"\n\n def __init__(self):\n self._g = nx.DiGraph()\n self._input_vars = set()\n\n def _add_node(self, name, ndim, **kwargs):\n \"\"\"Helper method to add a node and keep track of input variables.\"\"\"\n self._g.add_node(name, ndim=ndim, **kwargs)\n self._input_vars.add(name)\n\n def _add_edge(self, parent, child):\n \"\"\"Helper method to add an edge and update input variables.\"\"\"\n self._g.add_edge(parent, child)\n try:\n self._input_vars.remove(child)\n except KeyError:\n pass\n\n def get_inputs(self):\n \"\"\"Returns a set of current input variable names.\n\n Returns\n -------\n input_vars : set[str]\n a set of input variable names.\n \"\"\"\n return self._input_vars.copy()\n\n def get_variables(self):\n \"\"\"Returns a list of variables.\n\n Returns\n -------\n var_list : list[str]\n a list of variables.\n \"\"\"\n return list(self._g.nodes_iter())\n\n def get_variable_info(self, name):\n \"\"\"Returns the range and dimension of the given variable.\n\n Parameters\n ----------\n name : str\n variable name.\n\n Returns\n -------\n min : float\n minimum value.\n max : float\n maximum value.\n ndim : int\n variable dimension.\n \"\"\"\n nattr = self._g.node[name]\n return nattr.copy()\n\n def add_fun(self, var_name, fun_list, params, param_ranges, vector_params=None):\n \"\"\"Add a new variable defined by the given list of functions.\n\n Parameters\n ----------\n var_name : str\n variable name.\n fun_list : list[bag.math.interpolate.Interpolator]\n list of functions, one for each dimension.\n params : list[str]\n list of parameter names. Parameter names may repeat, in which case the\n same parameter will be used for multiple arguments of the function.\n param_ranges : dict[str, (float, float)]\n a dictionary of parameter valid range.\n vector_params : set[str]\n set of parameters that are vector instead of scalar. If a parameter\n is a vector, it will be the same size as the output, and each function\n only takes in the corresponding element of the parameter.\n \"\"\"\n vector_params = vector_params or set()\n ndim = len(fun_list)\n\n # error checking\n for par in params:\n if par not in param_ranges:\n raise ValueError('Valid range of %s not specified.' % par)\n\n # add inputs\n for par, (par_min, par_max) in param_ranges.items():\n par_dim = ndim if par in vector_params else 1\n if par not in self._g:\n # add input to graph if it's not in there.\n self._add_node(par, par_dim)\n\n nattrs = self._g.node[par]\n if nattrs['ndim'] != par_dim:\n # error checking.\n raise ValueError('Variable %s has dimension mismatch.' % par)\n # update input range\n nattrs['min'] = max(par_min, nattrs.get('min', par_min))\n nattrs['max'] = min(par_max, nattrs.get('max', par_max))\n\n # add current variable\n if var_name not in self._g:\n self._add_node(var_name, ndim)\n\n nattrs = self._g.node[var_name]\n # error checking.\n if nattrs['ndim'] != ndim:\n raise ValueError('Variable %s has dimension mismatch.' % var_name)\n if self._g.in_degree(var_name) > 0:\n raise Exception('Variable %s already has other dependencies.' % var_name)\n\n nattrs['fun_list'] = fun_list\n nattrs['params'] = params\n nattrs['vec_params'] = vector_params\n for parent in param_ranges.keys():\n self._add_edge(parent, var_name)\n\n def add_var(self, variable, vmin, vmax, ndim=1):\n \"\"\"Adds a new independent variable.\n\n Parameters\n ----------\n variable : str\n the variable to add\n vmin : float\n the minimum allowable value.\n vmax : float\n the maximum allowable value.\n ndim : int\n the dimension of the variable. Defaults to 1.\n \"\"\"\n if variable in self._g:\n raise Exception('Variable %s already exists.' % variable)\n self._add_node(variable, ndim, min=vmin, max=vmax)\n\n def set_input_limit(self, var, equals=None, lower=None, upper=None):\n \"\"\"Sets the limit on the given input variable.\n\n Parameters\n ----------\n var : str\n name of the variable.\n equals : float or None\n if given, the equality value.\n lower : float or None\n if given, the minimum.\n upper : float or None\n if given, the maximum.\n \"\"\"\n if var in self._g:\n if self._g.in_degree(var) > 0:\n raise Exception('Variable %s is not an input variable' % var)\n nattr = self._g.node[var]\n if equals is not None:\n nattr['equals'] = equals\n lower = upper = equals\n print(var, lower, upper)\n if lower is not None:\n nattr['min'] = max(nattr.get('min', lower), lower)\n if upper is not None:\n nattr['max'] = min(nattr.get('max', upper), upper)\n print(var, nattr['min'], nattr['max'])\n\n def add_expr(self, eqn, ndim):\n \"\"\"Adds a new variable with the given expression.\n\n Parameters\n ----------\n eqn : str\n An equation of the form \"<var> = <expr>\", where var\n is the output variable name, and expr is the expression.\n All variables in expr must be already added.\n ndim : int\n the dimension of the output variable.\n \"\"\"\n variable, expr = eqn.split('=', 1)\n variable = variable.strip()\n expr = expr.strip()\n\n if variable not in self._g:\n self._add_node(variable, ndim)\n nattrs = self._g.node[variable]\n if nattrs['ndim'] != ndim:\n raise Exception('Dimension mismatch for %s' % variable)\n if self._g.in_degree(variable) > 0:\n raise Exception('%s already depends on other variables' % variable)\n\n invars = bag.util.parse.get_variables(expr)\n for parent in invars:\n if parent not in self._g:\n raise Exception('Variable %s is not defined.' % parent)\n self._add_edge(parent, variable)\n\n nattrs['expr'] = expr\n\n def build(self, debug=False):\n \"\"\"Returns a OpenMDAO Group from the variable graph.\n\n Parameters\n ----------\n debug : bool\n True to print debug messages.\n\n Returns\n -------\n grp : omdao.Group\n the OpenMDAO group that computes all variables.\n input_bounds : dict[str, any]\n a dictionary from input variable name to (min, max, ndim) tuple.\n \"\"\"\n input_bounds = {}\n ndim_dict = {}\n\n if not nx.is_directed_acyclic_graph(self._g):\n raise Exception('Dependency loop detected')\n\n grp = omdao.Group()\n prom = ['*']\n for var in nx.topological_sort(self._g):\n nattrs = self._g.node[var]\n ndim = nattrs['ndim']\n ndim_dict[var] = ndim\n if self._g.in_degree(var) == 0:\n if debug:\n # input variable\n print('Input variable: %s' % var)\n # range checking\n vmin, vmax = nattrs['min'], nattrs['max']\n veq = nattrs.get('equals', None)\n if vmin > vmax:\n raise Exception('Variable %s input range not valid.' % var)\n input_bounds[var] = veq, vmin, vmax, ndim\n else:\n init_vals = {par: np.zeros(ndim_dict[par]) for par in self._g.predecessors_iter(var)}\n comp_name = 'comp__%s' % var\n if 'expr' in nattrs:\n eqn = '{}={}'.format(var, nattrs['expr'])\n init_vals[var] = np.zeros(ndim)\n # noinspection PyTypeChecker\n grp.add(comp_name, omdao.ExecComp(eqn, **init_vals), promotes=prom)\n elif 'fun_list' in nattrs:\n params = nattrs['params']\n fun_list = nattrs['fun_list']\n vec_params = nattrs['vec_params']\n comp = VecFunComponent(var, fun_list, params, vector_params=vec_params)\n # noinspection PyTypeChecker\n grp.add(comp_name, comp, promotes=prom)\n else:\n raise Exception('Unknown attributes: {}'.format(nattrs))\n\n return grp, input_bounds\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"This module defines the differentiable function class.\"\"\"\n\nfrom typing import Union, List, Optional, Tuple\n\nimport abc\n\nimport numpy as np\n\n\nclass DiffFunction(abc.ABC):\n \"\"\"An abstract class representing a differentiable scalar function.\n\n Supports Numpy broadcasting. Defaults to using finite difference for derivative calculation.\n\n Parameters\n ----------\n input_ranges : List[Tuple[Optional[float], Optional[float]]]\n input ranges.\n delta_list : Optional[List[float]]\n a list of finite difference step size for each input. If None,\n finite difference will be disabled.\n \"\"\"\n\n def __init__(self, input_ranges, delta_list=None):\n # type: (List[Tuple[Optional[float], Optional[float]]], Optional[List[float]]) -> None\n # error checking\n self._ndim = len(input_ranges)\n if delta_list is not None and len(delta_list) != self._ndim:\n raise ValueError('finite difference list length inconsistent.')\n\n self._input_ranges = input_ranges\n self.delta_list = delta_list\n\n @property\n def input_ranges(self):\n # type: () -> List[Tuple[Optional[float], Optional[float]]]\n return self._input_ranges\n\n @property\n def ndim(self):\n # type: () -> int\n \"\"\"Number of input dimensions.\"\"\"\n return self._ndim\n\n @abc.abstractmethod\n def __call__(self, xi):\n \"\"\"Interpolate at the given coordinates.\n\n Numpy broadcasting rules apply.\n\n Parameters\n ----------\n xi : array_like\n The coordinates to evaluate, with shape (..., ndim)\n\n Returns\n -------\n val : np.multiarray.ndarray\n The interpolated values at the given coordinates.\n \"\"\"\n raise NotImplementedError('Not implemented')\n\n def get_input_range(self, idx):\n # type: (int) -> Tuple[Optional[float], Optional[float]]\n \"\"\"Returns the input range of the given dimension.\"\"\"\n return self._input_ranges[idx]\n\n def deriv(self, xi, j):\n \"\"\"Calculate the derivative at the given coordinates with respect to input j.\n\n Numpy broadcasting rules apply.\n\n Parameters\n ----------\n xi : array_like\n The coordinates to evaluate, with shape (..., ndim)\n j : int\n input index.\n\n Returns\n -------\n val : np.multiarray.ndarray\n The derivatives at the given coordinates.\n \"\"\"\n return self._fd(xi, j, self.delta_list[j])\n\n def jacobian(self, xi):\n \"\"\"Calculate the Jacobian at the given coordinates.\n\n Numpy broadcasting rules apply.\n\n If finite difference step sizes are not specified,\n will call deriv() in a for loop to compute the Jacobian.\n\n Parameters\n ----------\n xi : array_like\n The coordinates to evaluate, with shape (..., ndim)\n\n Returns\n -------\n val : np.multiarray.ndarray\n The Jacobian matrices at the given coordinates.\n \"\"\"\n if self.delta_list:\n return self._fd_jacobian(xi, self.delta_list)\n else:\n xi = np.asarray(xi, dtype=float)\n ans = np.empty(xi.shape)\n for n in range(self.ndim):\n ans[..., n] = self.deriv(xi, n)\n return ans\n\n def _fd(self, xi, idx, delta):\n \"\"\"Calculate the derivative along the given index using central finite difference.\n\n Parameters\n ----------\n xi : array_like\n The coordinates to evaluate, with shape (..., ndim)\n idx : int\n The index to calculate the derivative on.\n delta : float\n The finite difference step size.\n\n Returns\n -------\n val : np.multiarray.ndarray\n The derivatives at the given coordinates.\n \"\"\"\n if idx < 0 or idx >= self.ndim:\n raise ValueError('Invalid derivative index: %d' % idx)\n\n xi = np.asarray(xi, dtype=float)\n if xi.shape[-1] != self.ndim:\n raise ValueError(\"The requested sample points xi have dimension %d, \"\n \"but this interpolator has dimension %d\" % (xi.shape[-1], self.ndim))\n\n # use broadcasting to evaluate two points at once\n xtest = np.broadcast_to(xi, (2,) + xi.shape).copy()\n xtest[0, ..., idx] += delta / 2.0\n xtest[1, ..., idx] -= delta / 2.0\n val = self(xtest)\n ans = (val[0] - val[1]) / delta # type: np.ndarray\n\n if ans.size == 1 and not np.isscalar(ans):\n return ans[0]\n return ans\n\n def _fd_jacobian(self, xi, delta_list):\n \"\"\"Calculate the Jacobian matrix using central finite difference.\n\n Parameters\n ----------\n xi : array_like\n The coordinates to evaluate, with shape (..., ndim)\n delta_list : List[float]\n list of finite difference step sizes for each input.\n\n Returns\n -------\n val : np.multiarray.ndarray\n The Jacobian matrices at the given coordinates.\n \"\"\"\n xi = np.asarray(xi, dtype=float)\n if xi.shape[-1] != self.ndim:\n raise ValueError(\"The requested sample points xi have dimension %d, \"\n \"but this interpolator has dimension %d\" % (xi.shape[-1], self.ndim))\n\n # use broadcasting to evaluate all points at once\n xtest = np.broadcast_to(xi, (2 * self.ndim,) + xi.shape).copy()\n for idx, delta in enumerate(delta_list):\n xtest[2 * idx, ..., idx] += delta / 2.0\n xtest[2 * idx + 1, ..., idx] -= delta / 2.0\n\n val = self(xtest)\n ans = np.empty(xi.shape)\n for idx, delta in enumerate(delta_list):\n ans[..., idx] = (val[2 * idx, ...] - val[2 * idx + 1, ...]) / delta\n return ans\n\n def transform_input(self, amat, bmat):\n # type: (np.multiarray.ndarray, np.multiarray.ndarray) -> DiffFunction\n \"\"\"Returns f(Ax + B), where f is this function and A, B are matrices.\n\n Parameters\n ----------\n amat : np.multiarray.ndarray\n the input transform matrix.\n bmat : np.multiarray.ndarray\n the input shift matrix.\n\n Returns\n -------\n dfun : DiffFunction\n a scalar differential function.\n \"\"\"\n return InLinTransformFunction(self, amat, bmat)\n\n def __add__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n if isinstance(other, DiffFunction):\n return SumDiffFunction(self, other, f2_sgn=1.0)\n elif isinstance(other, float) or isinstance(other, int):\n return ScaleAddFunction(self, other, 1.0)\n elif isinstance(other, np.ndarray):\n return ScaleAddFunction(self, np.asscalar(other), 1.0)\n else:\n raise NotImplementedError('Unknown type %s' % type(other))\n\n def __radd__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n return self.__add__(other)\n\n def __sub__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n if isinstance(other, DiffFunction):\n return SumDiffFunction(self, other, f2_sgn=-1.0)\n elif isinstance(other, float) or isinstance(other, int):\n return ScaleAddFunction(self, -other, 1.0)\n elif isinstance(other, np.ndarray):\n return ScaleAddFunction(self, -np.asscalar(other), 1.0)\n else:\n raise NotImplementedError('Unknown type %s' % type(other))\n\n def __rsub__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n if isinstance(other, DiffFunction):\n return SumDiffFunction(other, self, f2_sgn=-1.0)\n elif isinstance(other, float) or isinstance(other, int):\n return ScaleAddFunction(self, other, -1.0)\n elif isinstance(other, np.ndarray):\n return ScaleAddFunction(self, np.asscalar(other), -1.0)\n else:\n raise NotImplementedError('Unknown type %s' % type(other))\n\n def __mul__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n if isinstance(other, DiffFunction):\n return ProdFunction(self, other)\n elif isinstance(other, float) or isinstance(other, int):\n return ScaleAddFunction(self, 0.0, other)\n elif isinstance(other, np.ndarray):\n return ScaleAddFunction(self, 0.0, np.asscalar(other))\n else:\n raise NotImplementedError('Unknown type %s' % type(other))\n\n def __rmul__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n return self.__mul__(other)\n\n def __pow__(self, other):\n # type: (Union[float, int, np.multiarray.ndarray]) -> DiffFunction\n if isinstance(other, float) or isinstance(other, int):\n return PwrFunction(self, other, scale=1.0)\n elif isinstance(other, np.ndarray):\n return PwrFunction(self, np.asscalar(other), scale=1.0)\n else:\n raise NotImplementedError('Unknown type %s' % type(other))\n\n def __div__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n if isinstance(other, DiffFunction):\n return DivFunction(self, other)\n elif isinstance(other, float) or isinstance(other, int):\n return ScaleAddFunction(self, 0.0, 1.0 / other)\n elif isinstance(other, np.ndarray):\n return ScaleAddFunction(self, 0.0, 1.0 / np.asscalar(other))\n else:\n raise NotImplementedError('Unknown type %s' % type(other))\n\n def __truediv__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n return self.__div__(other)\n\n def __rdiv__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n if isinstance(other, DiffFunction):\n return DivFunction(other, self)\n elif isinstance(other, float) or isinstance(other, int):\n return PwrFunction(self, -1.0, scale=other)\n elif isinstance(other, np.ndarray):\n return PwrFunction(self, -1.0, scale=np.asscalar(other))\n else:\n raise NotImplementedError('Unknown type %s' % type(other))\n\n def __rtruediv__(self, other):\n # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n return self.__rdiv__(other)\n\n def __neg__(self):\n # type: () -> DiffFunction\n return ScaleAddFunction(self, 0.0, -1.0)\n\n\nclass InLinTransformFunction(DiffFunction):\n \"\"\"A DiffFunction where the input undergoes a linear transformation first.\n\n This function computes f(Ax + B), where A and B are matrices.\n\n Parameters\n ----------\n f1 : DiffFunction\n the parent function.\n amat : np.multiarray.ndarray\n the input transform matrix.\n bmat : np.multiarray.ndarray\n the input shift matrix.\n \"\"\"\n def __init__(self, f1, amat, bmat):\n # type: (DiffFunction, np.multiarray.ndarray, np.multiarray.ndarray) -> None\n if amat.shape[0] != f1.ndim or bmat.shape[0] != f1.ndim:\n raise ValueError('amat/bmat number of rows must be %d' % f1.ndim)\n if len(bmat.shape) != 1:\n raise ValueError('bmat must be 1 dimension.')\n\n # domain of f(Ax+B) cannot be represented by input ranges.\n super(InLinTransformFunction, self).__init__([(None, None)] * amat.shape[1], delta_list=None)\n self._f1 = f1\n self._amat = amat\n self._bmat = bmat.reshape(-1, 1)\n\n def _get_arg(self, xi):\n xi = np.asarray(xi)\n xi_shape = xi.shape\n my_ndim = self.ndim\n if xi_shape[-1] != my_ndim:\n raise ValueError('Last dimension must have size %d' % my_ndim)\n\n xi = xi.reshape(-1, my_ndim)\n return (self._amat.dot(xi.T) + self._bmat).T, xi_shape\n\n def __call__(self, xi):\n farg, xi_shape = self._get_arg(xi)\n result = self._f1(farg)\n if np.isscalar(result):\n return result\n return result.reshape(xi_shape[:-1])\n\n def deriv(self, xi, j):\n jmat = self.jacobian(xi)\n return jmat[..., 0, j]\n\n def jacobian(self, xi):\n farg, xi_shape = self._get_arg(xi)\n jmat = self._f1.jacobian(farg).dot(self._amat)\n shape_trunc = xi_shape[:-1] # type: Tuple[int, ...]\n return jmat.reshape(shape_trunc + (1, self.ndim))\n\n\nclass ScaleAddFunction(DiffFunction):\n \"\"\"A DiffFunction multiply by a scalar then added to a scalar.\n\n Parameters\n ----------\n f1 : DiffFunction\n the first function.\n adder : float\n constant to add.\n scaler : float\n constant to multiply.\n \"\"\"\n def __init__(self, f1, adder, scaler):\n # type: (DiffFunction, float, float) -> None\n DiffFunction.__init__(self, f1.input_ranges, delta_list=None)\n self._f1 = f1\n self._adder = adder\n self._scaler = scaler\n\n def __call__(self, xi):\n return self._f1(xi) * self._scaler + self._adder\n\n def deriv(self, xi, j):\n return self._f1.deriv(xi, j) * self._scaler\n\n def jacobian(self, xi):\n return self._f1.jacobian(xi) * self._scaler\n\n\ndef _intersection(*args):\n input_ranges = []\n for bound_list in zip(*args):\n lmax, umin = None, None\n for l, u in bound_list:\n if l is None:\n lmax, umin = None, None\n break\n else:\n if lmax is None:\n lmax, umin = l, u\n else:\n lmax = max(l, lmax)\n umin = min(u, umin)\n\n input_ranges.append((lmax, umin))\n\n return input_ranges\n\n\nclass SumDiffFunction(DiffFunction):\n \"\"\"Sum or Difference of two DiffFunctions\n\n Parameters\n ----------\n f1 : DiffFunction\n the first function.\n f2 : DiffFunction\n the second function.\n f2_sgn : float\n 1 if adding, -1 if subtracting.\n \"\"\"\n def __init__(self, f1, f2, f2_sgn=1.0):\n # type: (DiffFunction, DiffFunction, float) -> None\n if f1.ndim != f2.ndim:\n raise ValueError('functions dimension mismatch.')\n\n DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)\n self._f1 = f1\n self._f2 = f2\n self._f2_sgn = f2_sgn\n\n def __call__(self, xi):\n return self._f1(xi) + self._f2_sgn * self._f2(xi)\n\n def deriv(self, xi, j):\n return self._f1.deriv(xi, j) + self._f2_sgn * self._f2.deriv(xi, j)\n\n def jacobian(self, xi):\n return self._f1.jacobian(xi) + self._f2_sgn * self._f2.jacobian(xi)\n\n\nclass ProdFunction(DiffFunction):\n \"\"\"product of two DiffFunctions\n\n Parameters\n ----------\n f1 : DiffFunction\n the first function.\n f2 : DiffFunction\n the second function.\n \"\"\"\n def __init__(self, f1, f2):\n # type: (DiffFunction, DiffFunction) -> None\n if f1.ndim != f2.ndim:\n raise ValueError('functions dimension mismatch.')\n\n DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)\n self._f1 = f1\n self._f2 = f2\n\n def __call__(self, xi):\n return self._f1(xi) * self._f2(xi)\n\n def deriv(self, xi, j):\n return self._f1.deriv(xi, j) * self._f2(xi) + self._f1(xi) * self._f2.deriv(xi, j)\n\n def jacobian(self, xi):\n f1_val = self._f1(xi)[..., np.newaxis]\n f2_val = self._f2(xi)[..., np.newaxis]\n f1_jac = self._f1.jacobian(xi)\n f2_jac = self._f2.jacobian(xi)\n return f1_jac * f2_val + f1_val * f2_jac\n\n\nclass DivFunction(DiffFunction):\n \"\"\"division of two DiffFunctions\n\n Parameters\n ----------\n f1 : DiffFunction\n the first function.\n f2 : DiffFunction\n the second function.\n \"\"\"\n def __init__(self, f1, f2):\n # type: (DiffFunction, DiffFunction) -> None\n if f1.ndim != f2.ndim:\n raise ValueError('functions dimension mismatch.')\n\n DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)\n self._f1 = f1\n self._f2 = f2\n\n def __call__(self, xi):\n return self._f1(xi) / self._f2(xi)\n\n def deriv(self, xi, j):\n f2_val = self._f2(xi)\n return self._f1.deriv(xi, j) / f2_val - (self._f1(xi) * self._f2.deriv(xi, j) / (f2_val**2))\n\n def jacobian(self, xi):\n f1_val = self._f1(xi)[..., np.newaxis]\n f2_val = self._f2(xi)[..., np.newaxis]\n f1_jac = self._f1.jacobian(xi)\n f2_jac = self._f2.jacobian(xi)\n\n return f1_jac / f2_val - (f1_val * f2_jac) / (f2_val**2)\n\n\nclass PwrFunction(DiffFunction):\n \"\"\"a DiffFunction raised to a power.\n\n Parameters\n ----------\n f : DiffFunction\n the DiffFunction.\n pwr : float\n the power.\n scale : float\n scaling factor. Used to implement a / x.\n \"\"\"\n def __init__(self, f, pwr, scale=1.0):\n # type: (DiffFunction, float, float) -> None\n DiffFunction.__init__(self, f.input_ranges, delta_list=None)\n self._f = f\n self._pwr = pwr\n self._scale = scale\n\n def __call__(self, xi):\n return (self._f(xi) ** self._pwr) * self._scale\n\n def deriv(self, xi, j):\n return (self._f(xi) ** (self._pwr - 1) * self._pwr * self._f.deriv(xi, j)) * self._scale\n\n def jacobian(self, xi):\n f_val = self._f(xi)[..., np.newaxis]\n f_jac = self._f.jacobian(xi)\n return (f_jac * (f_val ** (self._pwr - 1) * self._pwr)) * self._scale\n\n\nclass VectorDiffFunction(object):\n \"\"\"A differentiable vector function.\n\n Parameters\n ----------\n fun_list : List[DiffFunction]\n list of interpolator functions, one for each element of the output vector.\n \"\"\"\n\n def __init__(self, fun_list):\n # type: (List[DiffFunction]) -> None\n # error checking\n if not fun_list:\n raise ValueError('No interpolators are given.')\n\n self._input_ranges = _intersection(*(f.input_ranges for f in fun_list))\n\n self._in_dim = fun_list[0].ndim\n for fun in fun_list:\n if fun.ndim != self._in_dim:\n raise ValueError('Interpolators input dimension mismatch.')\n\n self._fun_list = fun_list\n self._out_dim = len(fun_list)\n\n @property\n def in_dim(self):\n # type: () -> int\n \"\"\"Input dimension number.\"\"\"\n return self._in_dim\n\n @property\n def out_dim(self):\n # type: () -> int\n \"\"\"Output dimension number.\"\"\"\n return self._out_dim\n\n def get_input_range(self, idx):\n # type: (int) -> Tuple[Optional[float], Optional[float]]\n \"\"\"Returns the input range of the given dimension.\"\"\"\n return self._input_ranges[idx]\n\n def __call__(self, xi):\n \"\"\"Returns the output vector at the given coordinates.\n\n Parameters\n ----------\n xi : array-like\n The coordinates to evaluate, with shape (..., ndim)\n\n Returns\n -------\n val : numpy.array\n The interpolated values at the given coordinates.\n \"\"\"\n xi = np.asarray(xi, dtype=float)\n shape_trunc = xi.shape[:-1] # type: Tuple[int, ...]\n ans = np.empty(shape_trunc + (self._out_dim, ))\n for idx in range(self._out_dim):\n ans[..., idx] = self._fun_list[idx](xi)\n return ans\n\n def jacobian(self, xi):\n \"\"\"Calculate the Jacobian matrices of this function at the given coordinates.\n\n Parameters\n ----------\n xi : array-like\n The coordinates to evaluate, with shape (..., ndim)\n\n Returns\n -------\n val : numpy.array\n The jacobian matrix at the given coordinates.\n \"\"\"\n xi = np.asarray(xi, dtype=float)\n shape_trunc = xi.shape[:-1] # type: Tuple[int, ...]\n ans = np.empty(shape_trunc + (self._out_dim, self._in_dim))\n for m in range(self._out_dim):\n ans[..., m, :] = self._fun_list[m].jacobian(xi)\n return ans\n\n def deriv(self, xi, i, j):\n \"\"\"Compute the derivative of output i with respect to input j\n\n Parameters\n ----------\n xi : array-like\n The coordinates to evaluate, with shape (..., ndim)\n i : int\n output index.\n j : int\n input index.\n\n Returns\n -------\n val : numpy.array\n The derivatives at the given coordinates.\n \"\"\"\n return self._fun_list[i].deriv(xi, j)\n"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.amax",
"numpy.maximum",
"numpy.linspace",
"numpy.amin",
"matplotlib.figure.Figure.__init__",
"numpy.tile",
"matplotlib.pyplot.colorbar",
"numpy.ceil",
"matplotlib.pyplot.subplot",
"numpy.diff",
"numpy.argmin",
"numpy.floor",
"scipy.interpolate.interp1d",
"numpy.mod",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.swapaxes",
"numpy.allclose",
"numpy.linspace",
"numpy.full",
"numpy.ones",
"numpy.array",
"numpy.where",
"numpy.empty"
],
[
"numpy.zeros"
],
[
"numpy.asscalar",
"numpy.asarray",
"numpy.broadcast_to",
"numpy.isscalar",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krish-dx/machina
|
[
"f93bb6f5aca1feccd71fc509bd6370d2015e2d85"
] |
[
"machina/algos/r2d2_sac.py"
] |
[
"\"\"\"\nThis is an implementation of R2D2(Soft Actor Critic ver).\nSee https://openreview.net/pdf?id=r1lyTjAqYX and https://arxiv.org/abs/1801.01290\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom machina import loss_functional as lf\nfrom machina import logger\nfrom machina.traj import traj_functional as tf\n\n\ndef train(traj,\n pol, qfs, targ_qfs, log_alpha,\n optim_pol, optim_qfs, optim_alpha,\n epoch, batch_size, seq_length, burn_in_length, # optimization hypers\n tau, gamma, sampling, reparam=True,\n log_enable=True,\n ):\n \"\"\"\n Train function for soft actor critic.\n\n Parameters\n ----------\n traj : Traj\n Off policy trajectory.\n pol : Pol\n Policy.\n qfs : list of SAVfunction\n Q function.\n targ_qfs : list of SAVfunction\n Target Q function.\n log_alpha : torch.Tensor\n Temperature parameter of entropy.\n optim_pol : torch.optim.Optimizer\n Optimizer for Policy.\n optim_qfs : list of torch.optim.Optimizer\n Optimizer for Q function.\n optim_alpha : torch.optim.Optimizer\n Optimizer for alpha.\n epoch : int\n Number of iteration.\n batch_size : int\n Number of batches.\n seq_length : int\n Length of batches.\n burn_in_length : int\n Length of batches for burn-in.\n tau : float\n Target updating rate.\n gamma : float\n Discounting rate.\n sampling : int\n Number of samping in calculating expectation.\n reparam : bool\n log_enable: bool\n If True, enable logging\n\n Returns\n -------\n result_dict : dict\n Dictionary which contains losses information.\n \"\"\"\n\n pol_losses = []\n _qf_losses = []\n alpha_losses = []\n if log_enable:\n logger.log(\"Optimizing...\")\n for batch, start_indices in traj.prioritized_random_batch_rnn(batch_size, seq_length, epoch, return_indices=True):\n batch, pol_loss, qf_losses, alpha_loss, td_losses = lf.r2d2_sac(\n pol, qfs, targ_qfs, log_alpha, batch, gamma, sampling, burn_in_length, reparam)\n\n optim_pol.zero_grad()\n pol_loss.backward()\n optim_pol.step()\n\n for optim_qf, qf_loss in zip(optim_qfs, qf_losses):\n optim_qf.zero_grad()\n qf_loss.backward()\n optim_qf.step()\n\n optim_alpha.zero_grad()\n alpha_loss.backward()\n optim_alpha.step()\n\n for qf, targ_qf in zip(qfs, targ_qfs):\n for q, targ_q in zip(qf.parameters(), targ_qf.parameters()):\n targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())\n\n pol_losses.append(pol_loss.detach().cpu().numpy())\n _qf_losses.append(\n (sum(qf_losses) / len(qf_losses)).detach().cpu().numpy())\n alpha_losses.append(alpha_loss.detach().cpu().numpy())\n\n # update seq_pris\n train_length = seq_length - burn_in_length\n for i in range(batch_size):\n start = start_indices[i] + burn_in_length\n seq_indices = torch.arange(start, start+train_length-1)\n traj = tf.update_pris(\n traj, td_losses[:, i], seq_indices, update_epi_pris=True, seq_length=seq_length)\n\n if log_enable:\n logger.log(\"Optimization finished!\")\n\n return dict(\n PolLoss=pol_losses,\n QfLoss=_qf_losses,\n AlphaLoss=alpha_losses\n )\n"
] |
[
[
"torch.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vanshhhhh/io
|
[
"2ea1121e944629c2b462773c2d8d805da427311c"
] |
[
"tensorflow_io_gcs_filesystem/core/python/ops/__init__.py"
] |
[
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"GS\"\"\"\n\nimport os\nimport ctypes\nimport sys\nimport inspect\nimport warnings\nimport types\n\nimport tensorflow as tf\n\n\ndef _load_library(filename):\n \"\"\"_load_library\"\"\"\n f = inspect.getfile(sys._getframe(1)) # pylint: disable=protected-access\n\n # Construct filename\n f = os.path.join(os.path.dirname(f), filename)\n filenames = [f]\n\n # Add datapath to load if en var is set, used for running tests where shared\n # libraries are built in a different path\n datapath = os.environ.get(\"TFIO_DATAPATH\")\n if datapath is not None:\n # Build filename from:\n # `datapath` + `tensorflow_io_package` + `package_name` + `relpath_to_library`\n rootpath = os.path.dirname(sys.modules[\"tensorflow_io_gcs_filesystem\"].__file__)\n filename = sys.modules[__name__].__file__\n f = os.path.join(\n datapath,\n \"tensorflow_io_gcs_filesystem\",\n os.path.relpath(os.path.dirname(filename), rootpath),\n os.path.relpath(f, os.path.dirname(filename)),\n )\n filenames.append(f)\n # Function to load the library, return True if file system library is loaded\n load_fn = lambda f: tf.experimental.register_filesystem_plugin(f) is None\n\n # Try to load all paths for file, fail if none succeed\n errs = []\n for f in filenames:\n try:\n l = load_fn(f)\n if l is not None:\n return l\n except (tf.errors.NotFoundError, OSError) as e:\n errs.append(str(e))\n raise NotImplementedError(\n \"unable to open file: \"\n + f\"{filename}, from paths: {filenames}\\ncaused by: {errs}\"\n )\n\n\ntry:\n plugin_gs = _load_library(\"libtensorflow_io_gcs_filesystem.so\")\nexcept NotImplementedError as e:\n warnings.warn(f\"file system plugin for gs are not loaded: {e}\")\n"
] |
[
[
"tensorflow.experimental.register_filesystem_plugin"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xueruoyao/FCN-pytorch
|
[
"a5019da3943f47fa4f7baed3640cdbfeae2d677e",
"a5019da3943f47fa4f7baed3640cdbfeae2d677e",
"a5019da3943f47fa4f7baed3640cdbfeae2d677e",
"a5019da3943f47fa4f7baed3640cdbfeae2d677e"
] |
[
"code/models/SegHRNet.py",
"code/utils/loss.py",
"code/models/road_extraction/SIINet_model/DeeplabV3_plus.py",
"code/models/Swin_LinkNet_SUA.py"
] |
[
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Ke Sun ([email protected]), Jingyi Xie ([email protected])\n# ------------------------------------------------------------------------------\n\nimport os\n\nimport torch.nn as nn\nimport torch._utils\nimport torch.nn.functional as F\n\nfrom .backbone import HighResolutionNet\nfrom .decoder.FPN_Seg_Decoder import HRNet_FPN_Seg_Decoder\n\nALIGN_CORNERS = True\nBN_MOMENTUM = 0.1\n\nBatchNorm2d=nn.BatchNorm2d\n\nclass SegHRNet(nn.Module):\n def __init__(self, in_ch, n_classes, backbone='hr-w32', pretrained=False):\n super().__init__()\n self.pretrained_path = \"\"\n self.backbone = backbone\n if backbone not in [\"hr-w18\", \"hr-w32\", \"hr-w48\"]:\n raise ValueError(\"model gets invalid backbone, expects in [hr-w18, hr-w32, hr-w48]\")\n if self.backbone == \"hr-w18\":\n self.pretrained_path = \"models/backbone/pretrained/hrnetv2_w18_imagenet_pretrained.pth\"\n elif self.backbone == \"hr-w32\":\n self.pretrained_path = \"models/backbone/pretrained/hrnetv2_w32_imagenet_pretrained.pth\"\n elif self.backbone == \"hr-w48\":\n self.pretrained_path = \"models/backbone/pretrained/hrnetv2_w48_imagenet_pretrained.pth\"\n\n self.backbone = HighResolutionNet(in_ch, backbone=backbone)\n self.decoder = HRNet_FPN_Seg_Decoder(self.backbone.last_inp_channels, n_classes)\n self.init_weights(pretrained=pretrained)\n \n def init_weights(self, pretrained):\n for _, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n if pretrained:\n self.backbone.init_weights(pretrained=self.pretrained_path)\n\n def forward(self, input):\n x = self.backbone(input)\n x = self.decoder(x)\n output = F.interpolate(x, scale_factor=4, mode='bilinear', align_corners=ALIGN_CORNERS)\n return output\n\n\nif __name__ == \"__main__\":\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n input = torch.autograd.Variable(torch.randn(1, 3, 512, 512)).to(device)\n net = SegHRNet(in_ch=3, n_classes=2).to(device)\n print(net(input).size())\n",
"\"\"\"\nDifferent Losses\n\"\"\"\n\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom typing import Optional, Union, List\nimport numpy as np\n\n\nclass Loss:\n def __init__(self, n_class: int, weight: Optional[torch.tensor] = None,\n smoothing: float = 0.):\n \"\"\" base class for different losses\n :param n_class: true label is range(0, n_class)\n :param weight: weight for each class\n :param smoothing: label smoothing,\n y(i) = smoothing / n_class, i != target\n y(i) = 1 - smoothing + smoothing / n_class, i == target\n \"\"\"\n self.n_class = n_class\n # weight\n if weight is None:\n self.weight = torch.ones(n_class, dtype=torch.float32)\n else:\n assert len(weight) == n_class, f\"loss __init__ weight_dim\" \\\n f\"({len(weight)}) != n_class({n_class})\"\n self.weight = weight.float() * n_class / torch.sum(weight)\n # smoothing\n assert 0 <= smoothing < 1, \"loss __init__ smoothing has to satisfy [0, 1), \" \\\n \"got {}\".format(smoothing)\n self.smoothing = smoothing\n self.loss = torch.zeros(1, dtype=torch.float32, requires_grad=True)\n\n def weighted_smoothed_one_hot(self, gts: torch.tensor):\n \"\"\" use one hot, label smoothing and weight to reconstruct gts\n :param gts: (batch_size, height, width)\n :return gts weighted one hot version, with shape (batch_size, n_class, height * width)\n \"\"\"\n assert len(gts.shape) == 3, \"loss weighted_one_hot gts must have 3 dimension\"\n batch_size = gts.shape[0]\n off_value = self.smoothing / self.n_class\n on_value = 1. - self.smoothing + off_value\n one_hot = F.one_hot(gts.reshape(batch_size, -1), num_classes=self.n_class).transpose(2, 1)\n ont_hot = one_hot * on_value + (torch.ones_like(one_hot) - one_hot) * off_value\n return one_hot * self.weight.reshape(-1, 1)\n\n def __call__(self, preds: torch.tensor, gts: torch.tensor):\n raise NotImplemented\n\n def state_dict(self):\n raise NotImplemented\n\n def load_state_dict(self, state_dict: dict):\n if str(type(self)) != state_dict[\"criterion_type\"]:\n raise TypeError(\"Criterion load, input dict has different criterion({}) with former \"\n \"instantiation({})\".format(state_dict[\"criterion_type\"], str(type(self))))\n state_dict[\"weight\"] = torch.from_numpy(np.array(state_dict[\"weight\"])).float()\n state_dict[\"loss\"] = torch.tensor([state_dict[\"loss\"]], dtype=torch.float32)\n self.__dict__.update(state_dict)\n\n def to(self, device):\n \"\"\" transfer criterion to device \"\"\"\n self.weight = self.weight.to(device)\n self.loss = self.loss.to(device)\n\n\nclass LogSoftmaxCELoss(Loss):\n \"\"\" log softmax + cross entropy loss\n \"\"\"\n def __init__(self, n_class: int, weight: Optional[torch.tensor] = None,\n smoothing: float = 0.):\n super().__init__(n_class=n_class, weight=weight, smoothing=smoothing)\n\n def __call__(self, preds: torch.tensor, gts: torch.tensor):\n \"\"\" calculate mean loss of the batch\n :param preds: (batch_size, n_class, height, width)\n :param gts: (batch_size, height, width)\n \"\"\"\n assert preds.shape[0] == gts.shape[0], f\"loss input preds has different batchsize({preds.shape[0]}) \"\\\n f\"compared to that of gts({gts.shape[0]})\"\n self.loss = torch.zeros_like(self.loss)\n batch_size = preds.shape[0]\n preds = F.log_softmax(preds, dim=1)\n gts = self.weighted_smoothed_one_hot(gts)\n preds = preds.reshape(batch_size, self.n_class, -1)\n # gts (batch_size, n_class, height * width)\n # preds (batch_size, n_class, height * width)\n self.loss = torch.sum(-gts * preds, dim=1)\n return torch.mean(self.loss, dim=[0, 1])\n\n def state_dict(self):\n return {\n \"criterion_type\": str(type(self)),\n \"n_class\": self.n_class,\n \"weight\": [self.weight[i].item() for i in range(self.n_class)],\n \"smoothing\": self.smoothing,\n \"loss\": self.loss.item()\n }\n\n\nclass SigmoidDiceLoss(Loss):\n \"\"\" sigmoid + dice loss\n dice_loss = 1 - (2 * |X ∩ Y| + eps) / (|X| + |Y| + eps)\n \"\"\"\n def __init__(self, n_class: int, weight: Optional[torch.tensor] = None,\n smoothing: float = 0., ignore_index: Union[int, List, None] = None,\n eps: float = 1.):\n super().__init__(n_class=n_class, weight=weight, smoothing=smoothing)\n self.eps = eps\n if isinstance(ignore_index, int):\n self.ignore_index = [ignore_index]\n elif isinstance(ignore_index, list) or ignore_index is None:\n self.ignore_index = ignore_index\n else:\n raise TypeError(\"loss __init__ wrong type for ignore_index, which should be int or list or None\")\n\n def __call__(self, preds: torch.tensor, gts: torch.tensor):\n \"\"\" calculate mean loss of the batch\n :param preds: (batch_size, n_class, height, width)\n :param gts: (batch_size, height, width)\n \"\"\"\n assert preds.shape[0] == gts.shape[0], f\"loss input preds has different batchsize({preds.shape[0]}) \" \\\n f\"compared to that of gts({gts.shape[0]})\"\n self.loss = torch.zeros_like(self.loss)\n batch_size = preds.shape[0]\n preds = torch.sigmoid(preds)\n gts = self.weighted_smoothed_one_hot(gts)\n # preds: (batch_size, n_class, height, width)\n # gts: (batch_size, n_class, height * width)\n count = 0\n for i in torch.arange(self.n_class):\n if self.ignore_index is None or i not in self.ignore_index:\n # take label = i as foreground, others as background\n # gts_single, preds_single: (batch_size, height * width)\n gts_single = gts[:, i]\n preds_single = preds[:, i].view(batch_size, -1)\n intersection = gts_single * preds_single\n # intersection: (batch_size, height * width)\n tem = (2 * intersection.sum(1) + self.eps) / (gts_single.sum(1) + preds_single.sum(1) + self.eps)\n self.loss += (1 - tem).mean()\n count += 1\n\n return self.loss / count\n\n def state_dict(self):\n return {\n \"criterion_type\": str(type(self)),\n \"n_class\": self.n_class,\n \"weight\": [self.weight[i].item() for i in range(self.n_class)],\n \"smoothing\": self.smoothing,\n \"loss\": self.loss.item(),\n \"eps\": self.eps,\n \"ignore_index\": self.ignore_index\n }\n\n\nclass ComposedLoss(Loss):\n \"\"\" LogSoftmaxCELoss + rate * SigmoidDiceLoss\n \"\"\"\n def __init__(self, n_class: int, weight: Optional[torch.tensor] = None,\n smoothing: float = 0., ignore_index: Union[int, List, None] = None,\n eps: float = 1., rate: float = 1.):\n super().__init__(n_class=n_class, weight=weight, smoothing=smoothing)\n self.rate = rate\n self.eps = eps\n if isinstance(ignore_index, int):\n self.ignore_index = [ignore_index]\n elif isinstance(ignore_index, list) or ignore_index is None:\n self.ignore_index = ignore_index\n else:\n raise TypeError(\"loss __init__ wrong type for ignore_index, which should be int or list or None\")\n\n self.CELoss = LogSoftmaxCELoss(n_class=self.n_class, weight=self.weight, smoothing=self.smoothing)\n self.DiceLoss = SigmoidDiceLoss(n_class=self.n_class, weight=None, smoothing=self.smoothing,\n ignore_index=self.ignore_index, eps=self.eps)\n\n def __call__(self, preds: torch.tensor, gts: torch.tensor):\n \"\"\" calculate mean loss of the batch\n :param preds: (batch_size, n_class, height, width)\n :param gts: (batch_size, height, width)\n \"\"\"\n # print(\"CELoss\", self.CELoss(preds, gts))\n # print(\"Dice\", self.DiceLoss(preds, gts))\n return self.CELoss(preds, gts) + self.rate * self.DiceLoss(preds, gts)\n\n def to(self, device):\n \"\"\" transfer criterion to device \"\"\"\n self.CELoss.to(device)\n self.DiceLoss.to(device)\n\n def state_dict(self):\n return {\n \"criterion_type\": str(type(self)),\n \"n_class\": self.n_class,\n \"weight\": [self.weight[i].item() for i in range(self.n_class)],\n \"smoothing\": self.smoothing,\n \"loss\": self.loss.item(),\n \"eps\": self.eps,\n \"ignore_index\": self.ignore_index,\n \"rate\": self.rate\n }\n\n def load_state_dict(self, state_dict: dict):\n if str(type(self)) != state_dict[\"criterion_type\"]:\n raise TypeError(\"Criterion load, input dict has different criterion({}) with former \"\n \"instantiation({})\".format(state_dict[\"criterion_type\"], str(type(self))))\n state_dict[\"weight\"] = torch.from_numpy(np.array(state_dict[\"weight\"])).float()\n state_dict[\"loss\"] = torch.tensor([state_dict[\"loss\"]], dtype=torch.float32)\n self.__dict__.update(state_dict)\n self.CELoss = LogSoftmaxCELoss(n_class=self.n_class, weight=self.weight, smoothing=self.smoothing)\n self.DiceLoss = SigmoidDiceLoss(n_class=self.n_class, weight=None, smoothing=self.smoothing,\n ignore_index=self.ignore_index, eps=self.eps)\n",
"'''\ndeeplab_v3+ : pytorch resnet 18/34 Basicblock\n resnet 50/101/152 Bottleneck\n'''\n__package__ = \"code.models.road_extrsction.SIINet_model\"\n\nimport torch\n# import torchvision\nfrom torch import nn\nimport torch.nn.functional as F\nfrom .BasicModule import BasicModule\nfrom .resnet import *\n\n\ndef resize(tensor, newsize):\n return F.interpolate(\n tensor, size=newsize, mode='bilinear', align_corners=True)\n\n\nclass ASPP(nn.Module):\n '''\n ASPP consists of (a) one 1x1 convolution and three 3x3 convolutions\n with rates = (6, 12, 18) when output stride = 16 (all with 256 filters\n and batch normalization), and (b) the image-level features as described in the paper\n Careful!! Content the output 1x1 conv.\n '''\n def __init__(self, in_channel=512, depth=128):\n super().__init__()\n self.in_channel = in_channel\n self.depth = depth\n # Global average pooling\n self.global_avg_pooling = nn.AdaptiveAvgPool2d((1, 1))\n self.conv = self._make_layer(kernel_size=1) # conv first, then upsample!\n\n self.atrous_block_1 = self._make_layer(kernel_size=1)\n self.atrous_block_2 = self._make_layer(3, 2, 2)\n self.atrous_block_6 = self._make_layer(3, 6, 6)\n self.atrous_block_12 = self._make_layer(3, 12, 12)\n\n self.conv_output = nn.Sequential(\n nn.Conv2d(depth*5, depth, kernel_size=1, stride=1),\n nn.BatchNorm2d(depth),\n nn.ReLU(inplace=True)\n )\n\n def _make_layer(self, kernel_size, padding=0, rate=1):\n ''' Let padding=dilation can make sure the input shape is same as output(ks=3) '''\n return nn.Sequential(\n nn.Conv2d(\n self.in_channel, self.depth, kernel_size, 1, padding=padding, dilation=rate),\n nn.BatchNorm2d(self.depth),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n size = x.shape[2:]\n\n image_feature = self.global_avg_pooling(x)\n image_feature = self.conv(image_feature)\n image_feature = resize(image_feature, newsize=size)\n block_1 = self.atrous_block_1(x)\n block_2 = self.atrous_block_2(x)\n block_6 = self.atrous_block_6(x)\n block_12 = self.atrous_block_12(x)\n\n concat = [image_feature, block_1, block_2, block_6, block_12]\n x = self.conv_output(torch.cat(concat, 1))\n return x\n\n\nclass ASPP_test(nn.Module):\n '''\n ASPP consists of (a) one 1x1 convolution and three 3x3 convolutions\n with rates = (6, 12, 18) when output stride = 16 (all with 256 filters\n and batch normalization), and (b) the image-level features as described in the paper\n Careful!! Content the output 1x1 conv.\n '''\n def __init__(self, in_channel=512, depth=128, rate=1):\n super().__init__()\n self.in_channel = in_channel\n self.depth = depth\n\n self.atrous_block = self._make_layer(3, rate, rate)\n\n def _make_layer(self, kernel_size, padding=0, rate=1):\n ''' Let padding=dilation can make sure the input shape is same as output(ks=3) '''\n return nn.Sequential(\n nn.Conv2d(\n self.in_channel, self.depth, kernel_size, 1, padding=padding, dilation=rate),\n nn.BatchNorm2d(self.depth),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n x = self.atrous_block(x)\n return x\n\n\nclass DeeplabV3_plus(BasicModule):\n ''' Main model: DeepLabV3+ '''\n def __init__(self, num_classes=2,\n resnet_arch='resnet50', output_stride=8, layer_num=2,\n aspp_rate=0):\n super().__init__()\n self.model_name = 'deeplabv3plus'\n self.layer_num = layer_num\n self.output_stride = output_stride\n aspp_depth = 256\n\n if resnet_arch == 'resnet50':\n encoder = resnet50(True, output_stride=self.output_stride)\n elif resnet_arch == 'resnet101':\n encoder = resnet101(True, output_stride=self.output_stride)\n encoder = encoder._modules # Covert class instance into orderdict\n\n # decay=0.9997, epsilon=1e-5, scale=True\n self.conv1 = nn.Sequential(encoder['conv1'], encoder['bn1'], encoder['relu'])\n self.pool1 = encoder['maxpool'] # s/4 - 64dim\n\n self.layers = nn.Sequential()\n for i in range(layer_num):\n self.layers.add_module('layer%d' % (i+1), encoder['layer%d' % (i+1)])\n layers_dim = [256, 512, 1024, 2048, 2048, 1024, 512]\n # layer_outSize = [s/4, s/output_stride, s/output_stride, ...]\n self.conv2 = self._make_layer(64, 48, 1) # in: pool1(out)\n\n rate_tabel = [1, 6, 12, 18, 24, 1, 3]\n if aspp_rate == 0:\n self.aspp = ASPP(\n in_channel=layers_dim[layer_num - 1],\n depth=aspp_depth) # ASPP: in: layers(out), fix_size=s/8\n else:\n self.aspp = ASPP_test(layers_dim[layer_num - 1], aspp_depth,\n rate_tabel[aspp_rate])\n\n # Decoder\n self.decoder_conv1 = self._make_layer(\n aspp_depth + 48, aspp_depth, 3, padding=1\n ) # in: concat[conv2(out), ]Up(aspp(out))\n self.decoder_conv2 = self._make_layer(\n aspp_depth, aspp_depth, 3, padding=1) # s/4\n\n self.out_conv = nn.Conv2d(aspp_depth, num_classes, 1, 1) # s/1 - output\n\n # Initalization\n # source code didn't specify the way of weight initalization of the decoder,\n # but slim default is zero initalization\n # init_list = ['conv2', 'aspp', 'decoder_conv1', 'decoder_conv2', 'out_conv']\n # for name, child_moudle in self.named_children():\n # if name in init_list:\n # for name, m in child_moudle.named_modules():\n # if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')\n # if isinstance(m, nn.BatchNorm2d):\n # m.weight.data.fill_(1)\n # m.weight.data.zero_()\n\n def _make_layer(self, in_channel, out_channel, kernel_size, padding=0):\n return nn.Sequential(\n nn.Conv2d(in_channel, out_channel, kernel_size, 1, padding=padding),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n size = x.shape[2:] # need upsample input size\n x = self.conv1(x)\n pool2 = self.pool1(x) # s/4\n size_4 = pool2.shape[2:]\n size_8 = [s//2 for s in size_4]\n\n x = self.layers(pool2) # s/output_stride\n\n # ASPP\n if list(x.shape[2:]) != size_8:\n x = resize(x, newsize=size_8) # s/output_stride -> s/8\n x = self.aspp(x) # fix input size s/8\n\n decoder_features = resize(x, newsize=pool2.shape[2:]) # s/4\n\n encoder_features = self.conv2(pool2) # s/4\n x = torch.cat([encoder_features, decoder_features], dim=1) # s/4\n x = self.decoder_conv1(x)\n x = self.decoder_conv2(x)\n x = resize(x, newsize=size) # Upx4 -> s/1\n\n x = self.out_conv(x) # s/1\n return x\n\n\ndef build_model(num_classes=5):\n model = DeeplabV3_plus(num_classes=num_classes)\n return model\n\n\nif __name__ == \"__main__\":\n aspp = ASPP()\n input = torch.randn(12, 512, 256, 256)\n out = aspp(input)\n\n",
"\"\"\"\nCodes of LinkNet based on https://github.com/snakers4/spacenet-three\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nfrom code.models.backbone import SwinTransformer\nfrom code.models.decoder.LinkNet_Decoder import LinkNet_Decoder\nfrom code.lufangxiao.GDA_block import build_sua_module\n\nclass Swin_LinkNet_SUA(nn.Module):\n def __init__(self, num_classes=1, num_channels=3, pretrained=None, backbone='swin-t'):\n super(Swin_LinkNet_SUA, self).__init__()\n\n self.backbone = SwinTransformer(in_chans=num_channels, pretrain_img_size=512, window_size=8, backbone=backbone)\n if pretrained is not None:\n self.backbone.init_weights(pretrained)\n\n filters = self.backbone.get_filters()\n self.decoder = LinkNet_Decoder(filters, num_classes)\n self.SUA = build_sua_module(filters, 256, '/home/lufangxiao/GDANet/models/GDA_block/module_cfg/default_cfg.yaml')\n self.name = \"LinkNet_Swin_SUA\"\n\n def forward(self, input):\n # Encoder\n x = self.backbone(input)\n\n #SUA\n x = self.SUA(x)\n\n # Decoder\n out = self.decoder(x)\n\n return out\n\nif __name__ == \"__main__\":\n input = torch.autograd.Variable(torch.randn(1, 3, 512, 512))\n net = Swin_LinkNet_SUA(num_classes=17, num_channels=3)\n print(net(input).size())"
] |
[
[
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.nn.functional.interpolate"
],
[
"torch.mean",
"torch.sigmoid",
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.sum",
"torch.zeros_like",
"torch.tensor",
"torch.arange",
"numpy.array",
"torch.ones_like"
],
[
"torch.nn.Sequential",
"torch.cat",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rpp0/em-operation-extraction
|
[
"1e2e170353ae01cb089b4a579b0ab143915acc14"
] |
[
"lib/fast-wavenet/wavenet/utils.py"
] |
[
"import numpy as np\n\nfrom scipy.io import wavfile\n\n\ndef normalize(data):\n temp = np.float32(data) - np.min(data)\n out = (temp / np.max(temp) - 0.5) * 2\n return out\n\n\ndef make_batch(path):\n data = wavfile.read(path)[1][:, 0]\n return get_wavenet_data(data)\n\n\ndef get_wavenet_data(data, resolution=256):\n data_ = normalize(data)\n # data_f = np.sign(data_) * (np.log(1 + 255*np.abs(data_)) / np.log(1 + 255))\n\n bins = np.linspace(-1, 1, resolution)\n # Quantize inputs.\n inputs = np.digitize(data_[0:-1], bins, right=False) - 1\n inputs = bins[inputs][None, :, None]\n\n # Encode targets as ints.\n targets = (np.digitize(data_[1::], bins, right=False) - 1)[None, :]\n return inputs, targets\n\ndef get_normalized_data(data):\n # A bit of a hack, sorry no time\n data_ = normalize(data)\n return data_[0:-1], data_[1:]\n"
] |
[
[
"numpy.linspace",
"numpy.min",
"numpy.max",
"numpy.float32",
"numpy.digitize",
"scipy.io.wavfile.read"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
bollwyvl/jupyter-cadquery
|
[
"f0b8a1e20cf770329a23ba802f553bd94d0869f9"
] |
[
"jupyter_cadquery/cad_animation.py"
] |
[
"from math import pi\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom cadquery import Workplane, Location\nfrom pythreejs import (\n NumberKeyframeTrack,\n AnimationAction,\n AnimationClip,\n AnimationMixer,\n BooleanKeyframeTrack,\n ColorKeyframeTrack,\n QuaternionKeyframeTrack,\n StringKeyframeTrack,\n VectorKeyframeTrack,\n)\n\n\ndef _d2r(x):\n return x / 180 * pi\n\n\nclass AnimationException(BaseException):\n ...\n\n\nvalid_transforms = [\"t\", \"tx\", \"ty\", \"tz\", \"q\", \"rx\", \"ry\", \"rz\"]\n\n\nclass Animation:\n def __init__(self, assembly):\n self.root = assembly\n self.tracks = []\n\n def add_track(self, selector, action, times, values):\n if len(times) != len(values):\n raise AnimationException(\"times and values arrays need have the same lenght\")\n\n selector = selector.replace(\"/\", \"\\\\\")\n group = self.root.find_group(selector)\n if group is None:\n raise AnimationException(f\"group '{selector}' not found\")\n\n if action.startswith(\"t\"):\n position = np.array(group.position).astype(np.float32)\n if action == \"t\":\n new_values = [position + v for v in values]\n elif action == \"tx\":\n new_values = [position + (v, 0, 0) for v in values]\n elif action == \"ty\":\n new_values = [position + (0, v, 0) for v in values]\n elif action == \"tz\":\n new_values = [position + (0, 0, v) for v in values]\n else:\n raise AnimationException(f\"action {action} is not supported\")\n\n self.tracks.append(\n NumberKeyframeTrack(\n name=selector + \".position\",\n times=np.array(times).astype(np.float32),\n values=new_values,\n )\n )\n\n else:\n if action.startswith(\"r\"):\n r_values = np.array([_d2r(v) for v in values]).astype(np.float32)\n\n actual = R.from_quat(group.quaternion)\n if action == \"rx\":\n rot_values = [R.from_rotvec((v, 0, 0)) for v in r_values]\n elif action == \"ry\":\n rot_values = [R.from_rotvec((0, v, 0)) for v in r_values]\n elif action == \"rz\":\n rot_values = [R.from_rotvec((0, 0, v)) for v in r_values]\n else:\n raise AnimationException(f\"action {action} not supported\")\n new_values = [(actual * rot).as_quat() for rot in rot_values]\n\n elif action == \"q\":\n actual = R.from_quat(group.quaternion)\n new_values = [tuple((actual * R.from_quat(q)).as_quat()) for q in values]\n\n else:\n raise AnimationException(f\"action {action} is not supported\")\n\n self.tracks.append(\n QuaternionKeyframeTrack(\n name=selector + \".quaternion\",\n times=np.array(times).astype(np.float32),\n values=new_values,\n )\n )\n\n def animate(self, speed=1, autoplay=False):\n if speed != 1:\n for track in self.tracks:\n track.times = track.times / float(speed)\n clip = AnimationClip(tracks=self.tracks)\n action = AnimationAction(AnimationMixer(self.root), clip, self.root)\n if autoplay:\n action.play()\n return action\n"
] |
[
[
"scipy.spatial.transform.Rotation.from_rotvec",
"numpy.array",
"scipy.spatial.transform.Rotation.from_quat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.5",
"1.3",
"1.2",
"1.4"
],
"tensorflow": []
}
] |
lea-hagen/beast
|
[
"44d12e3ae9039fc4ad8c64721babd8f9901c48aa"
] |
[
"beast/observationmodel/ast/make_ast_input_list.py"
] |
[
"import os\nimport warnings\nimport numpy as np\nfrom astropy.io import ascii\nfrom astropy.table import Table\nfrom astropy.table import Column\n\nfrom beast.observationmodel.vega import Vega\nfrom beast.physicsmodel.grid import SEDGrid\n\n\ndef mag_limits(seds, faint_cut, Nfilter=1, bright_cut=None):\n \"\"\"\n Selects models which have at least N filter above the limits\n\n Parameters\n ----------\n seds: np.array\n Magnitude array from BEAST grid\n\n faint_cut: list\n List of limit magnitudes on the faint end\n\n Nfilter: integer\n In how many filters, you want a fake star to be brighter\n than the limit (or fainter than the upper limit)\n\n bright_cut: list\n List of limit magnitudes on the bright end. Useful for cutting\n out bright, nearby models, when no such bright nearby stars are\n present in the data\n\n Returns\n -------\n idx: np.array\n Array of integers contining the indices of allowed models\n\n \"\"\"\n flag = seds.copy()\n\n # flag is True if the models are brigter (=smaller number in mag)\n # than the limits\n for i, limit in enumerate(faint_cut):\n flag[:, i] = seds[:, i] < limit\n\n # flag is True if the models are frainter than the upper brightness\n # limits\n if bright_cut is not None:\n for i, limit in enumerate(bright_cut):\n flag[:, i] = np.logical_and(flag[:, i], seds[:, i] > limit)\n\n # Keep index where model is brighter than the limit in N filters\n s = np.sum(flag, axis=1)\n (idx,) = np.where(s >= Nfilter)\n\n return idx\n\n\ndef pick_models_toothpick_style(\n sedgrid_fname,\n filters,\n Nfilter,\n N_fluxes,\n min_N_per_flux,\n outfile=None,\n outfile_params=None,\n bins_outfile=None,\n bright_cut=None,\n):\n \"\"\"\n Creates a fake star catalog from a BEAST model grid. The chosen seds\n are optimized for the toothpick model, by working with a given\n number of flux bins, and making sure that every flux bin is covered\n by at least a given number of models (for each filter individually,\n which is how the toothpick model works).\n\n Parameters\n ----------\n sedgrid_fname: string\n BEAST model grid from which the models are picked (hdf5 file)\n\n filters: list of string\n Names of the filters, to be used as columns of the output table\n\n Nfilter: integer\n In how many filters a fake star needs to be brighter than the\n mag_cut value\n\n N_fluxes: integer\n The number of flux bins into which the dynamic range of the\n model grid in each filter is divided\n\n min_N_per_flux: integer\n Minimum number of model seds that need to fall into each bin\n\n outfile: string\n Output path for the models (optional). If this file already\n exists, the chosen seds are loaded from this file instead.\n\n outfile_params: string (default=None)\n If a file name is given, the physical parameters associated with\n each model will be written to disk\n\n bins_outfile: string\n Output path for a file containing the flux bin limits for each\n filter, and the number of samples for each (optional)\n\n bright_cut: list of float\n List of magnitude limits for each filter (won't sample model\n SEDs that are too bright)\n\n Returns\n -------\n sedsMags: astropy Table\n A table containing the selected model seds (columns are named\n after the filters)\n\n \"\"\"\n if outfile is not None and os.path.isfile(outfile):\n print(\n \"{} already exists. Will attempt to load SEDs for ASTs from there.\".format(\n outfile\n )\n )\n t = Table.read(outfile, format=\"ascii\")\n return t\n\n with Vega() as v:\n vega_f, vega_flux, lambd = v.getFlux(filters)\n\n modelsedgrid = SEDGrid(sedgrid_fname)\n\n sedsMags = -2.5 * np.log10(modelsedgrid.seds[:] / vega_flux)\n Nseds = sedsMags.shape[0]\n Nf = sedsMags.shape[1]\n idxs = np.arange(Nseds)\n\n # Check if logL=-9.999 model points sliently sneak through\n if min(modelsedgrid.grid[\"logL\"]) < -9:\n warnings.warn(\"There are logL=-9.999 model points in the SED grid!\")\n print(\"Excluding those SED models from selecting input ASTs\")\n idxs = np.where(modelsedgrid.grid[\"logL\"] > -9)[0]\n sedsMags = sedsMags[idxs]\n\n # Set up a number of flux bins for each filter\n maxes = np.amax(sedsMags, axis=0)\n mins = np.amin(sedsMags, axis=0)\n\n bin_edges = np.zeros((N_fluxes + 1, Nf)) # indexed on [fluxbin, nfilters]\n for f in range(Nf):\n bin_edges[:, f] = np.linspace(mins[f], maxes[f], N_fluxes + 1)\n bin_mins = bin_edges[:-1, :]\n bin_maxs = bin_edges[1:, :]\n if not len(bin_mins) == len(bin_maxs) == N_fluxes:\n raise AssertionError()\n\n bin_count = np.zeros((N_fluxes, Nf))\n chosen_idxs = []\n counter = 0\n successes = 0\n include_mask = np.full(idxs.shape, True, dtype=bool)\n chunksize = 100000\n while True:\n counter += 1\n # pick some random models\n rand_idx = np.random.choice(idxs[include_mask], size=chunksize)\n randomseds = sedsMags[rand_idx, :]\n\n # Find in which bin each model belongs, for each filter\n fluxbins = np.zeros(randomseds.shape, dtype=int)\n for fltr in range(Nf):\n fluxbins[:, fltr] = np.digitize(randomseds[:, fltr], bin_maxs[:, fltr])\n\n # Clip in place (models of which the flux is equal to the max\n # are assigned bin nr N_fluxes. Move these down to bin nr\n # N_fluxes - 1)\n np.clip(fluxbins, a_min=0, a_max=N_fluxes - 1, out=fluxbins)\n\n add_these = np.full((len(rand_idx)), False, dtype=bool)\n for r in range(len(rand_idx)):\n # If any of the flux bins that this model falls into does\n # not have enough samples yet, add it to the list of model\n # spectra to be output\n if (bin_count[fluxbins[r, :], range(Nf)] < min_N_per_flux).any():\n bin_count[fluxbins[r, :], range(Nf)] += 1\n successes += 1\n add_these[r] = True\n\n # If all these bins are full...\n else:\n # ... do not include this model again, since we will reject it\n # anyway.\n include_mask[idxs == rand_idx] = False\n\n # Add the approved models\n chosen_idxs.extend(rand_idx[add_these])\n\n # If some of the randomly picked models were not added\n if not add_these.any():\n # ... check if we have enough samples everywhere, or if all\n # the models have been exhausted (and hence the bins are\n # impossible to fill).\n enough_samples = (bin_count.flatten() >= min_N_per_flux).all()\n still_models_left = include_mask.any()\n if enough_samples or not still_models_left:\n break\n\n if not counter % 10:\n print(\n \"Sampled {} models. {} successfull seds. Ratio = {}\".format(\n counter * chunksize, successes, successes / counter / chunksize\n )\n )\n\n # Gather the selected model seds in a table\n sedsMags = Table(sedsMags[chosen_idxs, :], names=filters)\n\n if outfile is not None:\n ascii.write(\n sedsMags,\n outfile,\n overwrite=True,\n formats={k: \"%.5f\" for k in sedsMags.colnames},\n )\n\n # if chosen, save the corresponding model parameters\n if outfile_params is not None:\n grid_dict = {}\n for key in list(modelsedgrid.grid.keys()):\n grid_dict[key] = modelsedgrid.grid[key][chosen_idxs]\n grid_dict[\"sedgrid_indx\"] = chosen_idxs\n ast_params = Table(grid_dict)\n ast_params.write(outfile_params, overwrite=True)\n\n if bins_outfile is not None:\n bin_info_table = Table()\n col_bigarrays = [bin_mins, bin_maxs, bin_count]\n col_basenames = [\"bin_mins_\", \"bin_maxs_\", \"bin_count_\"]\n for fltr, filter_name in enumerate(filters):\n for bigarray, basename in zip(col_bigarrays, col_basenames):\n bin_info_table.add_column(\n Column(bigarray[:, fltr], name=basename + filter_name)\n )\n ascii.write(bin_info_table, bins_outfile, overwrite=True)\n\n return sedsMags\n\n\ndef pick_models(\n sedgrid_fname,\n filters,\n mag_cuts,\n Nfilter=3,\n N_stars=70,\n Nrealize=20,\n outfile=None,\n outfile_params=None,\n bright_cut=None,\n vega_fname=None,\n ranseed=None,\n):\n \"\"\"Creates a fake star catalog from a BEAST model grid\n\n Parameters\n ----------\n sedgrid_fname: string\n BEAST model grid from which the models are picked (hdf5 file)\n\n filters: list of string\n Names of the filters\n\n mag_cuts: list\n List of magnitude limits for each filter\n\n Nfilter: Integer\n In how many filters, you want a fake star to be brighter\n than the limit (mag_cut) (default = 3)\n\n N_stars: Integer\n Number of stellar models picked per a single log(age)\n (default=70)\n\n Nrealize: Integer\n Number of realization of each models (default = 20)\n\n outfile: str\n If a file name is given, the selected models will be written to\n disk\n\n outfile_params: str\n If a file name is given, the physical parameters associated with\n each model will be written to disk\n\n bright_cut: list of float\n Same as mag_cuts, but for the bright end\n\n vega_fname: str\n filename of vega file\n\n ranseed : int\n used to set the seed to make the results reproducable\n useful for testing\n\n Returns\n -------\n astropy Table of selected models\n - and optionally -\n ascii file: A list of selected models, written to 'outfile'\n fits file: the corresponding physical parameters, written to 'outfile_params'\n \"\"\"\n\n with Vega(source=vega_fname) as v: # Get the vega fluxes\n vega_f, vega_flux, lamb = v.getFlux(filters)\n\n modelsedgrid = SEDGrid(sedgrid_fname)\n\n # Convert to Vega mags\n sedsMags = -2.5 * np.log10(modelsedgrid.seds[:] / vega_flux)\n\n # make sure Nfilters isn't larger than the total number of filters\n if Nfilter > len(filters):\n Nfilter = len(filters)\n\n # Select the models above the magnitude limits in N filters\n idxs = mag_limits(sedsMags, mag_cuts, Nfilter=Nfilter, bright_cut=bright_cut)\n cols = {}\n for key in list(modelsedgrid.grid.keys()):\n cols[key] = modelsedgrid.grid[key][idxs]\n grid_cut = Table(cols)\n\n # Sample the model grid uniformly\n prime_params = np.column_stack(\n (grid_cut[\"logA\"], grid_cut[\"M_ini\"], grid_cut[\"Av\"])\n )\n search_age = np.unique(prime_params[:, 0])\n\n N_sample = N_stars\n model_ind = [] # indices for the model grid\n ast_params = grid_cut[[]] # the corresponding model parameters\n\n # set the random seed - mainly for testing\n if not None:\n np.random.seed(ranseed)\n\n for iage in search_age:\n (tmp,) = np.where(prime_params[:, 0] == iage)\n new_ind = np.random.choice(tmp, N_sample)\n model_ind.append(new_ind)\n [ast_params.add_row(grid_cut[new_ind[i]]) for i in range(len(new_ind))]\n\n index = np.repeat(idxs[np.array(model_ind).reshape((-1))], Nrealize)\n sedsMags = Table(sedsMags[index, :], names=filters)\n\n if outfile is not None:\n ascii.write(\n sedsMags,\n outfile,\n overwrite=True,\n formats={k: \"%.5f\" for k in sedsMags.colnames},\n )\n\n if outfile_params is not None:\n ast_params.write(outfile_params, overwrite=True)\n\n return sedsMags\n\n\ndef supplement_ast(\n sedgrid_fname,\n filters,\n nAST=1000,\n existingASTfile=None,\n outASTfile=None,\n outASTfile_params=None,\n mag_cuts=None,\n color_cuts=None,\n):\n \"\"\"\n Creates an additional fake star catalog from a BEAST model grid\n that fulfills the customized conditions to supplement input ASTs.\n If the existing input AST parameter file is given, already selected\n models will be excluded from this process. The input artificial\n stars are picked randomly from the remaining models.\n\n Parameters\n ----------\n sedgrid_fname: string\n BEAST model grid from which the models are picked (hdf5 file)\n\n filters: list of string\n Names of the filters\n\n nAST: int\n Number of unique additional ASTs per source density bin\n\n existingASTfile: string (optional, default=None)\n Name of the existing input AST parameter file. If not None,\n the models that were already listed in the existing list Will\n be removed by default\n\n outASTfile: string (optional, default=None)\n Output file name for the chosen models\n\n outASTfile_params: string (optional, default=None)\n If a file name is given, the physical parameters associated with\n each model will be written to disk\n\n mag_cut: dictionary (optional, default=None)\n Dictionary of bright and faint magnitude limits for given filters.\n The way to specify the cuts is by updating the \"ast_suppl_maglimit\" key\n in the beast_settings file. This is a dictionary that includes information\n for the magnitude cuts as a function of the filters included in observation.\n\n For example, for a field observed with HST_WFC3_F336W, HST_WFC3_F475W,\n and HST_WFC3_F814W, to set a magnitude range limit of 16<HST_WFC3_F475W<28 mag,\n and 15<HST_WFC3_F814W<27 mag you need to set the following within the beast_settings file:\n\n # specify that the ast_supplement mode should be on\n ast_supplement = True\n\n # initialize and populate the dictionary of desired magnitude limits\n ast_suppl_maglimits = {}\n # the magntidue limits are defined by the filter and a list of the limits in magnitudes\n ast_suppl_maglimits[\"HST_WFC3_F475W\"] = [16,28]\n ast_suppl_maglimits[\"HST_WFC3_F814W\"] = [15,27]\n\n # set the key word\n ast_suppl_maglimit = ast_suppl_maglimits\n\n color_cut: dictionary (optional, default=None)\n Dictionary of red color limits for given filters.\n The way to specify the cuts is by updating the \"ast_suppl_colorlimit\" key\n in the beast_settings file. This is a dictionary that includes information\n for the color cuts as a function of the filters included in observation.\n\n For example, for a field observed with HST_WFC3_F336W, HST_WFC3_F475W,\n and HST_WFC3_F814W, to set a color range limit of HST_WFC3_F475W-HST_WFC3_F814W<6,\n HST_WFC3_F336W-HST_WFC3_F475W<5 and HST_WFC3_F336W-HST_WFC3_F814W<4, you need\n to set the following within the beast_settings file:\n\n # specify that the ast_supplement mode should be on\n ast_supplement = True\n\n # initialize the dictionary of desired magnitude limits\n ast_suppl_colorlimits = {}\n\n # the color limits are defined by the first filter in the color (e.g, X for X-Y),\n # and the input is a list including the second filter (e.g., Y for X-Y) and the\n # color limit in magnitudes\n ast_suppl_colorlimits[\"HST_WFC3_F475W\"] = [[\"HST_WFC3_F814W\",6]]\n ast_suppl_colorlimits[\"HST_WFC3_F336W\"] = [[\"HST_WFC3_F475W\",5], [\"HST_WFC3_F814W\",4]]\n\n # set the key word\n ast_suppl_colorlimit = ast_suppl_colorlimits\n\n Returns\n -------\n sedsMags: astropy Table\n A table containing the selected model seds (columns are named\n after the filters)\n\n \"\"\"\n\n with Vega() as v:\n vega_f, vega_flux, lambd = v.getFlux(filters)\n\n modelsedgrid = SEDGrid(sedgrid_fname)\n\n # Convert to Vega mags\n sedsMags = -2.5 * np.log10(modelsedgrid.seds[:] / vega_flux)\n\n Nseds = sedsMags.shape[0]\n sedsIndx = np.arange(Nseds)\n\n if existingASTfile is not None and os.path.isfile(existingASTfile):\n print(\n \"{} exists. Will attempt to load SEDs for ASTs from there \\\n and remove those SEDs from the SED grid\".format(\n existingASTfile\n )\n )\n print(\"existing AST file\", existingASTfile)\n t = Table.read(existingASTfile, format=\"fits\")\n sedsMags = np.delete(sedsMags, t[\"sedgrid_indx\"], axis=0)\n sedsIndx = np.delete(sedsIndx, t[\"sedgrid_indx\"])\n Nseds = sedsMags.shape[0]\n\n # Apply selection conditions if supplied\n # Just magnitude cuts\n print(\"mag_cuts\", mag_cuts)\n print(\"color_cuts\", color_cuts)\n if mag_cuts is not None:\n cond = np.ones(Nseds, dtype=bool)\n for key in list(mag_cuts.keys()):\n idx_filter = [i for i, iflt in enumerate(filters) if key in iflt]\n bright_cut = mag_cuts[key][0]\n faint_cut = mag_cuts[key][1]\n tmp_cond = np.logical_and(\n (sedsMags[:, idx_filter] >= bright_cut),\n (sedsMags[:, idx_filter] <= faint_cut),\n )\n\n if color_cuts is not None:\n if key in color_cuts:\n for limit in color_cuts[key]:\n\n idx_color_filter = [\n i for i, iflt in enumerate(filters) if limit[0] in iflt\n ]\n tmp_cond = np.logical_and(\n tmp_cond,\n (\n sedsMags[:, idx_filter] - sedsMags[:, idx_color_filter]\n <= limit[1]\n ),\n )\n cond = np.logical_and(cond, tmp_cond.ravel())\n\n sedsMags = sedsMags[cond, :]\n sedsIndx = sedsIndx[cond]\n\n # Randomly select models\n # Supplementing ASTs does not need to follow\n # the toothpick-way selection\n chosen_idxs = np.random.choice(len(sedsIndx), nAST)\n sedsIndx = sedsIndx[chosen_idxs]\n\n # Gather the selected model seds in a table\n sedsMags = Table(sedsMags[chosen_idxs, :], names=filters)\n\n if outASTfile is not None:\n ascii.write(\n sedsMags,\n outASTfile,\n overwrite=True,\n formats={k: \"%.5f\" for k in sedsMags.colnames},\n )\n\n # if chosen, save the corresponding model parameters\n if outASTfile_params is not None:\n grid_dict = {}\n for key in list(modelsedgrid.grid.keys()):\n grid_dict[key] = modelsedgrid.grid[key][sedsIndx]\n grid_dict[\"sedgrid_indx\"] = sedsIndx\n ast_params = Table(grid_dict)\n ast_params.write(outASTfile_params, overwrite=True)\n\n return sedsMags\n"
] |
[
[
"numpy.amax",
"numpy.linspace",
"numpy.digitize",
"numpy.where",
"numpy.unique",
"numpy.clip",
"numpy.arange",
"numpy.full",
"numpy.column_stack",
"numpy.zeros",
"numpy.random.choice",
"numpy.amin",
"numpy.delete",
"numpy.log10",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"numpy.random.seed",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Rsdv13/Audio-Emotion-Analysis
|
[
"f204e9665844665077d8004ce1d23b12535ef5fd"
] |
[
"speaker_diarization.py"
] |
[
"import collections\nimport contextlib\nimport sys\nimport wave\n\nimport webrtcvad\nimport librosa\n\n\ndef read_wave(path):\n\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000, 48000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate\n\n\ndef write_wave(path, audio, sample_rate):\n\n with contextlib.closing(wave.open(path, 'wb')) as wf:\n wf.setnchannels(1)\n wf.setsampwidth(2)\n wf.setframerate(sample_rate)\n wf.writeframes(audio)\n\n\nclass Frame(object):\n\n def __init__(self, bytes, timestamp, duration):\n self.bytes = bytes\n self.timestamp = timestamp\n self.duration = duration\n\n\ndef frame_generator(frame_duration_ms, audio, sample_rate):\n\n\n n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)\n offset = 0\n timestamp = 0.0\n duration = (float(n) / sample_rate) / 2.0\n while offset + n < len(audio):\n yield Frame(audio[offset:offset + n], timestamp, duration)\n timestamp += duration\n offset += n\n\n\ndef vad_collector(sample_rate, frame_duration_ms,\n padding_duration_ms, vad, frames):\n\n num_padding_frames = int(padding_duration_ms / frame_duration_ms)\n # We use a deque for our sliding window/ring buffer.\n ring_buffer = collections.deque(maxlen=num_padding_frames)\n # We have two states: TRIGGERED and NOTTRIGGERED. We start in the\n # NOTTRIGGERED state.\n triggered = False\n\n voiced_frames = []\n for frame in frames:\n is_speech = vad.is_speech(frame.bytes, sample_rate)\n\n sys.stdout.write('1' if is_speech else '0')\n if not triggered:\n ring_buffer.append((frame, is_speech))\n num_voiced = len([f for f, speech in ring_buffer if speech])\n # If we're NOTTRIGGERED and more than 90% of the frames in\n # the ring buffer are voiced frames, then enter the\n # TRIGGERED state.\n if num_voiced > 0.9 * ring_buffer.maxlen:\n triggered = True\n sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,))\n # We want to yield all the audio we see from now until\n # we are NOTTRIGGERED, but we have to start with the\n # audio that's already in the ring buffer.\n for f, s in ring_buffer:\n voiced_frames.append(f)\n ring_buffer.clear()\n else:\n # We're in the TRIGGERED state, so collect the audio data\n # and add it to the ring buffer.\n voiced_frames.append(frame)\n ring_buffer.append((frame, is_speech))\n num_unvoiced = len([f for f, speech in ring_buffer if not speech])\n # If more than 90% of the frames in the ring buffer are\n # unvoiced, then enter NOTTRIGGERED and yield whatever\n # audio we've collected.\n if num_unvoiced > 0.9 * ring_buffer.maxlen:\n sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))\n triggered = False\n yield b''.join([f.bytes for f in voiced_frames])\n ring_buffer.clear()\n voiced_frames = []\n if triggered:\n sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))\n sys.stdout.write('\\n')\n # If we have any leftover voiced audio when we run out of input,\n # yield it.\n if voiced_frames:\n yield b''.join([f.bytes for f in voiced_frames])\n\n\n########################### IMPLEMENTATION ###########################\nfrom sklearn import preprocessing\nimport numpy as np\nfrom sklearn.mixture import GaussianMixture\nfrom copy import deepcopy\nfrom sklearn.cluster import SpectralClustering\n\naudio, sample_rate = read_wave('test.wav')\nvad = webrtcvad.Vad(2)\nframes = frame_generator(30, audio, sample_rate)\nframes = list(frames)\nsegments = vad_collector(sample_rate, 30, 300, vad, frames)\nc = 0\nfor i, segment in enumerate(segments):\n path = 'chunk-%002d.wav' % (i,)\n print(' Writing %s' % (path,))\n write_wave(path, segment, sample_rate)\n c +=1\n#count of chunks\n# c = 14\n\nsampling_rate = 8000\nn_mfcc = 13\nn_fft = 0.032\nhop_length = 0.010\n\ncomponents = 16\n\ncov_type = 'full'\n\n########################### Global GMM i.e UBM ###########################\ntest_file_path = \"test.wav\"\ny,sr = librosa.load(test_file_path)\nprint(np.shape(y))\n\nmfcc = librosa.feature.mfcc(np.array(y),sr,hop_length=int(hop_length * sr),n_fft=int(n_fft*sr),n_mfcc=n_mfcc,dct_type=2)\nmfcc_delta = librosa.feature.delta(mfcc)\nmfcc_delta_second_order = librosa.feature.delta(mfcc,order=2)\ntemp = librosa.feature.delta(mfcc_delta)\ninter = np.vstack((mfcc,mfcc_delta,mfcc_delta_second_order))\nubm_feature = inter.T\n#ubm_feature = preprocessing.scale(ubm_feature)\n\n# ubm_feature -= np.mean(ubm_feature)\n# ubm_feature /= np.std(ubm_feature)\n\nubm_model = GaussianMixture(n_components = components, covariance_type = cov_type)\nubm_model.fit(ubm_feature)\n\nprint(ubm_model.score(ubm_feature))\nprint(ubm_model.means_)\n\n\ndef MAP_Estimation(model,data,m_iterations):\n\n N = data.shape[0]\n D = data.shape[1]\n K = model.n_components\n\n\n mu_new = np.zeros((K,D))\n n_k = np.zeros((K,1))\n\n mu_k = model.means_\n \n pi_k = model.weights_\n\n old_likelihood = model.score(data)\n new_likelihood = 0\n iterations = 0\n while(iterations < m_iterations):\n iterations += 1\n old_likelihood = new_likelihood\n z_n_k = model.predict_proba(data)\n n_k = np.sum(z_n_k,axis = 0)\n n_k = n_k.reshape(np.shape(n_k)[0],1)\n\n mu_new = np.dot(z_n_k.T,data)\n n_k[n_k == 0] = 1e-20\n mu_new = mu_new / n_k\n\n adaptation_coefficient = n_k/(n_k + relevance_factor)\n I = np.ones(shape=np.shape(n_k))\n # for k in range(K):\n # mu_k[k] = (adaptation_coefficient[k] * mu_new[k]) + ((1 - adaptation_coefficient[k]) * mu_k[k])\n mu_k = (adaptation_coefficient*mu_new) + (( I - adaptation_coefficient) * mu_k)\n model.means_ = mu_k\n\n log_likelihood = model.score(data)\n\n new_likelihood = log_likelihood\n\n if abs(old_likelihood - new_likelihood) < 1e-20:\n break\n print(log_likelihood)\n return model\n\n\n\nTotal = []\nrelevance_factor = 16\nfor i in range(c):\n fname='chunk-%002d.wav' % (i,)\n print('MAP adaptation for {0}'.format(fname))\n temp_y,sr_temp = librosa.load(fname,sr=None)\n \n temp_mfcc = librosa.feature.mfcc(np.array(temp_y),sr_temp,hop_length=int(hop_length * sr_temp),n_fft=int(n_fft*sr_temp),n_mfcc=n_mfcc,dct_type=2)\n temp_mfcc_delta = librosa.feature.delta(temp_mfcc)\n temp_mfcc_delta_second_order = librosa.feature.delta(temp_mfcc,order=2)\n temp_inter = np.vstack((temp_mfcc,temp_mfcc_delta,temp_mfcc_delta_second_order))\n temp_gmm_feature = temp_inter.T\n #data = preprocessing.scale(temp_gmm_feature)\n\n gmm = deepcopy(ubm_model)\n\n gmm = MAP_Estimation(gmm,temp_gmm_feature,m_iterations =1)\n \n sv = gmm.means_.flatten()\n #sv = preprocessing.scale(sv)\n Total.append(sv)\n\nN_CLUSTERS = 2\n\ndef rearrange(labels, n):\n seen = set()\n distinct = [x for x in labels if x not in seen and not seen.add(x)]\n correct = [i for i in range(n)]\n dict_ = dict(zip(distinct, correct))\n return [x if x not in dict_ else dict_[x] for x in labels]\n\nsc = SpectralClustering(n_clusters=N_CLUSTERS, affinity='cosine')\n\n#Labels help us identify between chunks of customer and call center agent\nlabels = sc.fit_predict(Total)\nlabels = rearrange(labels, N_CLUSTERS)\nprint(labels)\n\n#Since there is no way to identify the voice of a customer just from the audio\n#we have assumed that customer is the one who speaks 2nd\n#Normally the call center agent is the first one to speak and then the customer\n#If that is not the case for a specific audio, change the condition from 'x==1' to 'x==0'\nprint([i for i, x in enumerate(labels) if x == 1])\n"
] |
[
[
"numpy.dot",
"sklearn.cluster.SpectralClustering",
"numpy.shape",
"sklearn.mixture.GaussianMixture",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jamesmlane/galpy
|
[
"c74776ba72df5fc2dfc94ccabd89265326c6ebe6",
"c74776ba72df5fc2dfc94ccabd89265326c6ebe6"
] |
[
"tests/test_pv2qdf.py",
"galpy/potential/planarPotential.py"
] |
[
"# Tests of the quasiisothermaldf module\nfrom __future__ import print_function, division\nimport numpy\n#fiducial setup uses these\nfrom galpy.potential import MWPotential, vcirc, omegac, epifreq, verticalfreq\nfrom galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel\nfrom galpy.df import quasiisothermaldf\naAA= actionAngleAdiabatic(pot=MWPotential,c=True)\naAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)\n\ndef test_pvRvT_adiabatic():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAA,cutcounter=True)\n R,z= 0.8, 0.1\n vRs= numpy.linspace(-1.,1.,21)\n vTs= numpy.linspace(0.,1.5,51)\n pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])\n tvR= numpy.tile(vRs,(len(vTs),1)).T\n tvT= numpy.tile(vTs,(len(vRs),1))\n mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)\n mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)\n svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for adiabatic actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for adiabatic actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for adiabatic actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for adiabatic actions'\n assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for adiabatic actions'\n return None\n\ndef test_pvRvT_staeckel():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAS,cutcounter=True)\n R,z= 0.8, 0.1\n vRs= numpy.linspace(-1.,1.,21)\n vTs= numpy.linspace(0.,1.5,51)\n pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z) for vt in vTs] for vr in vRs])\n tvR= numpy.tile(vRs,(len(vTs),1)).T\n tvT= numpy.tile(vTs,(len(vRs),1))\n mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)\n mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)\n svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'\n assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'\n return None\n\ndef test_pvRvT_staeckel_diffngl():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAS,cutcounter=True)\n R,z= 0.8, 0.1\n vRs= numpy.linspace(-1.,1.,21)\n vTs= numpy.linspace(0.,1.5,51)\n #ngl=10\n pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=10) for vt in vTs] for vr in vRs])\n tvR= numpy.tile(vRs,(len(vTs),1)).T\n tvT= numpy.tile(vTs,(len(vRs),1))\n mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)\n mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)\n svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'\n assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'\n #ngl=24\n pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=40) for vt in vTs] for vr in vRs])\n mvR= numpy.sum(tvR*pvRvT)/numpy.sum(pvRvT)\n mvT= numpy.sum(tvT*pvRvT)/numpy.sum(pvRvT)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvT)/numpy.sum(pvRvT)-mvR**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvRvT)/numpy.sum(pvRvT)-mvT**2.)\n svRvT= (numpy.sum(tvR*tvT*pvRvT)/numpy.sum(pvRvT)-mvR*mvT)/svR/svT\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvT not equal to zero for staeckel actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvRvT not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(qdf.sigmaR2(R,z))) < 0.01, 'sigma vR calculated from pvRvT not equal to that from sigmaR2 for staeckel actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvRvT not equal to that from sigmaT2 for staeckel actions'\n assert numpy.fabs(svRvT) < 0.01, 'correlation between vR and vT calculated from pvRvT not equal to zero for staeckel actions'\n #ngl=11, shouldn't work\n try:\n pvRvT= numpy.array([[qdf.pvRvT(vr,vt,R,z,ngl=11) for vt in vTs] for vr in vRs])\n except ValueError: pass\n else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')\n return None\n\ndef test_pvTvz_adiabatic():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAA,cutcounter=True)\n R,z= 0.8, 0.1\n vTs= numpy.linspace(0.,1.5,51)\n vzs= numpy.linspace(-1.,1.,21)\n pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z) for vt in vTs] for vz in vzs])\n tvT= numpy.tile(vTs,(len(vzs),1))\n tvz= numpy.tile(vzs,(len(vTs),1)).T\n mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)\n mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)\n svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for adiabatic actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for adiabatic actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for adiabatic actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for adiabatic actions'\n assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for adiabatic actions'\n return None\n\ndef test_pvTvz_staeckel():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAS,cutcounter=True)\n R,z= 0.8, 0.1\n vzs= numpy.linspace(-1.,1.,21)\n vTs= numpy.linspace(0.,1.5,51)\n pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z) for vt in vTs] for vz in vzs])\n tvz= numpy.tile(vzs,(len(vTs),1)).T\n tvT= numpy.tile(vTs,(len(vzs),1))\n mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)\n mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)\n svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'\n assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'\n return None\n\ndef test_pvTvz_staeckel_diffngl():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAS,cutcounter=True)\n R,z= 0.8, 0.1\n vzs= numpy.linspace(-1.,1.,21)\n vTs= numpy.linspace(0.,1.5,51)\n #ngl=10\n pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=10) for vt in vTs] for vz in vzs])\n tvz= numpy.tile(vzs,(len(vTs),1)).T\n tvT= numpy.tile(vTs,(len(vzs),1))\n mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)\n mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)\n svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'\n assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'\n #ngl=24\n pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=40) for vt in vTs] for vz in vzs])\n mvz= numpy.sum(tvz*pvTvz)/numpy.sum(pvTvz)\n mvT= numpy.sum(tvT*pvTvz)/numpy.sum(pvTvz)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvTvz)/numpy.sum(pvTvz)-mvz**2.)\n svT= numpy.sqrt(numpy.sum(tvT**2.*pvTvz)/numpy.sum(pvTvz)-mvT**2.)\n svTvz= (numpy.sum(tvz*tvT*pvTvz)/numpy.sum(pvTvz)-mvz*mvT)/svz/svT\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvTvz not equal to zero for staeckel actions'\n assert numpy.fabs(mvT-qdf.meanvT(R,z)) < 0.01, 'mean vT calculated from pvTvz not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(qdf.sigmaz2(R,z))) < 0.01, 'sigma vz calculated from pvTvz not equal to that from sigmaz2 for staeckel actions'\n assert numpy.fabs(numpy.log(svT)-0.5*numpy.log(qdf.sigmaT2(R,z))) < 0.01, 'sigma vT calculated from pvTvz not equal to that from sigmaT2 for staeckel actions'\n assert numpy.fabs(svTvz) < 0.01, 'correlation between vz and vT calculated from pvTvz not equal to zero for staeckel actions'\n #ngl=11, shouldn't work\n try:\n pvTvz= numpy.array([[qdf.pvTvz(vt,vz,R,z,ngl=11) for vt in vTs] for vz in vzs])\n except ValueError: pass\n else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')\n return None\n\ndef test_pvRvz_adiabatic():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAA,cutcounter=True)\n R,z= 0.8, 0.1\n vRs= numpy.linspace(-1.,1.,21)\n vzs= numpy.linspace(-1.,1.,21)\n pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z) for vz in vzs] for vr in vRs])\n tvR= numpy.tile(vRs,(len(vzs),1)).T\n tvz= numpy.tile(vzs,(len(vRs),1))\n mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)\n mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)\n svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz\n sR2= qdf.sigmaR2(R,z) #direct calculation\n sz2= qdf.sigmaz2(R,z)\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for adiabatic actions'\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for adiabatic actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for adiabatic actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for adiabatic actions'\n assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'\n return None\n\ndef test_pvRvz_staeckel():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAS,cutcounter=True)\n R,z= 0.8, 0.1\n vRs= numpy.linspace(-1.,1.,21)\n vzs= numpy.linspace(-1.,1.,21)\n pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z) for vz in vzs] for vr in vRs])\n tvR= numpy.tile(vRs,(len(vzs),1)).T\n tvz= numpy.tile(vzs,(len(vRs),1))\n mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)\n mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)\n svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz\n sR2= qdf.sigmaR2(R,z) #direct calculation\n sz2= qdf.sigmaz2(R,z)\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for staeckel actions'\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for staeckel actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for staeckel actions'\n assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'\n return None\n\ndef test_pvRvz_staeckel_diffngl():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAS,cutcounter=True)\n R,z= 0.8, 0.1\n vRs= numpy.linspace(-1.,1.,21)\n vzs= numpy.linspace(-1.,1.,21)\n #ngl=10\n pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z,ngl=10) for vz in vzs] for vr in vRs])\n tvR= numpy.tile(vRs,(len(vzs),1)).T\n tvz= numpy.tile(vzs,(len(vRs),1))\n mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)\n mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)\n svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz\n sR2= qdf.sigmaR2(R,z) #direct calculation\n sz2= qdf.sigmaz2(R,z)\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for staeckel actions'\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for staeckel actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for staeckel actions'\n assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'\n #ngl=24\n pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z,ngl=40) for vz in vzs] for vr in vRs])\n mvR= numpy.sum(tvR*pvRvz)/numpy.sum(pvRvz)\n mvz= numpy.sum(tvz*pvRvz)/numpy.sum(pvRvz)\n svR= numpy.sqrt(numpy.sum(tvR**2.*pvRvz)/numpy.sum(pvRvz)-mvR**2.)\n svz= numpy.sqrt(numpy.sum(tvz**2.*pvRvz)/numpy.sum(pvRvz)-mvz**2.)\n svRvz= (numpy.sum(tvR*tvz*pvRvz)/numpy.sum(pvRvz)-mvR*mvz)/svR/svz\n assert numpy.fabs(mvR) < 0.01, 'mean vR calculated from pvRvz not equal to zero for staeckel actions'\n assert numpy.fabs(mvz) < 0.01, 'mean vz calculated from pvRvz not equal to zero for staeckel actions'\n assert numpy.fabs(numpy.log(svR)-0.5*numpy.log(sR2)) < 0.01, 'sigma vR calculated from pvRvz not equal to that from sigmaR2 for staeckel actions'\n assert numpy.fabs(numpy.log(svz)-0.5*numpy.log(sz2)) < 0.01, 'sigma vz calculated from pvRvz not equal to that from sigmaz2 for staeckel actions'\n assert numpy.fabs(svRvz-qdf.sigmaRz(R,z)/numpy.sqrt(sR2*sz2)) < 0.01, 'correlation between vR and vz calculated from pvRvz not equal to zero for adiabatic actions'\n #ngl=11, shouldn't work\n try:\n pvRvz= numpy.array([[qdf.pvRvz(vr,vz,R,z,ngl=11) for vz in vzs] for vr in vRs])\n except ValueError: pass\n else: raise AssertionError('pvz w/ ngl=odd did not raise ValueError')\n return None\n\ndef test_pvRvz_staeckel_arrayin():\n qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,\n pot=MWPotential,aA=aAS,cutcounter=True)\n R,z= 0.8, 0.1\n pvRvz= qdf.pvRvz(0.1*numpy.ones(2),0.05*numpy.ones(2),R*numpy.ones(2),z*numpy.ones(2))\n assert numpy.all(numpy.fabs(numpy.log(pvRvz)-numpy.log(qdf.pvRvz(0.1,0.05,R,z))) < 10.**-10.), 'pvRvz calculated with R and z array input does not equal to calculated with scalar input'\n return None\n\n",
"from __future__ import division, print_function\n\nimport os\nimport copy\nimport pickle\nimport numpy\nfrom scipy import integrate\nfrom ..util import plot, config, conversion\nfrom ..util.conversion import physical_conversion,\\\n potential_physical_input, physical_compatible\nfrom .Potential import Potential, PotentialError, lindbladR, flatten\nfrom .DissipativeForce import _isDissipative\nfrom .plotRotcurve import plotRotcurve\nfrom .plotEscapecurve import _INF, plotEscapecurve\nclass planarPotential(object):\n \"\"\"Class representing 2D (R,\\phi) potentials\"\"\"\n def __init__(self,amp=1.,ro=None,vo=None):\n self._amp= amp\n self.dim= 2\n self.isNonAxi= True #Gets reset by planarAxiPotential\n self.isRZ= False\n self.hasC= False\n self.hasC_dxdv= False\n self.hasC_dens= False\n # Parse ro and vo\n if ro is None:\n self._ro= config.__config__.getfloat('normalization','ro')\n self._roSet= False\n else:\n self._ro= conversion.parse_length_kpc(ro)\n self._roSet= True\n if vo is None:\n self._vo= config.__config__.getfloat('normalization','vo')\n self._voSet= False\n else:\n self._vo= conversion.parse_velocity_kms(vo)\n self._voSet= True\n return None\n\n def __mul__(self,b):\n \"\"\"\n NAME:\n\n __mul__\n\n PURPOSE:\n\n Multiply a planarPotential's amplitude by a number\n\n INPUT:\n\n b - number\n\n OUTPUT:\n\n New instance with amplitude = (old amplitude) x b\n\n HISTORY:\n\n 2019-01-27 - Written - Bovy (UofT)\n\n \"\"\"\n if not isinstance(b,(int,float)):\n raise TypeError(\"Can only multiply a planarPotential instance with a number\")\n out= copy.deepcopy(self)\n out._amp*= b\n return out\n # Similar functions\n __rmul__= __mul__\n def __div__(self,b): return self.__mul__(1./b)\n __truediv__= __div__\n\n def __add__(self,b):\n \"\"\"\n NAME:\n\n __add__\n\n PURPOSE:\n\n Add planarPotential instances together to create a multi-component potential (e.g., pot= pot1+pot2+pot3)\n\n INPUT:\n\n b - planarPotential instance or a list thereof\n\n OUTPUT:\n\n List of planarPotential instances that represents the combined potential\n\n HISTORY:\n\n 2019-01-27 - Written - Bovy (UofT)\n\n \"\"\"\n from ..potential import flatten as flatten_pot\n if not isinstance(flatten_pot([b])[0],(Potential,planarPotential)):\n raise TypeError(\"\"\"Can only combine galpy Potential\"\"\"\n \"\"\"/planarPotential objects with \"\"\"\n \"\"\"other such objects or lists thereof\"\"\")\n assert physical_compatible(self,b), \\\n \"\"\"Physical unit conversion parameters (ro,vo) are not \"\"\"\\\n \"\"\"compatible between potentials to be combined\"\"\"\n if isinstance(b,list):\n return [self]+b\n else:\n return [self,b]\n # Define separately to keep order\n def __radd__(self,b):\n from ..potential import flatten as flatten_pot\n if not isinstance(flatten_pot([b])[0],(Potential,planarPotential)):\n raise TypeError(\"\"\"Can only combine galpy Force objects with \"\"\"\n \"\"\"other Force objects or lists thereof\"\"\")\n assert physical_compatible(self,b), \\\n \"\"\"Physical unit conversion parameters (ro,vo) are not \"\"\"\\\n \"\"\"compatible between potentials to be combined\"\"\"\n # If we get here, b has to be a list\n return b+[self]\n\n def turn_physical_off(self):\n \"\"\"\n NAME:\n\n turn_physical_off\n\n PURPOSE:\n\n turn off automatic returning of outputs in physical units\n\n INPUT:\n\n (none)\n\n OUTPUT:\n\n (none)\n\n HISTORY:\n\n 2016-01-30 - Written - Bovy (UofT)\n\n \"\"\"\n self._roSet= False\n self._voSet= False\n return None\n\n def turn_physical_on(self,ro=None,vo=None):\n \"\"\"\n NAME:\n\n turn_physical_on\n\n PURPOSE:\n\n turn on automatic returning of outputs in physical units\n\n INPUT:\n\n ro= reference distance (kpc; can be Quantity)\n\n vo= reference velocity (km/s; can be Quantity)\n\n OUTPUT:\n\n (none)\n\n HISTORY:\n\n 2016-01-30 - Written - Bovy (UofT)\n\n 2020-04-22 - Don't turn on a parameter when it is False - Bovy (UofT)\n\n \"\"\"\n if not ro is False: self._roSet= True\n if not vo is False: self._voSet= True\n if not ro is None and ro:\n self._ro= conversion.parse_length_kpc(ro)\n if not vo is None and vo:\n self._vo= conversion.parse_velocity_kms(vo)\n return None\n\n @potential_physical_input\n @physical_conversion('energy',pop=True)\n def __call__(self,R,phi=0.,t=0.,dR=0,dphi=0):\n \"\"\"\n NAME:\n\n __call__\n\n PURPOSE:\n\n evaluate the potential\n\n INPUT: \n\n R - Cylindrica radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n Phi(R(,phi,t)))\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return self._call_nodecorator(R,phi=phi,t=t,dR=dR,dphi=dphi)\n\n def _call_nodecorator(self,R,phi=0.,t=0.,dR=0,dphi=0):\n # Separate, so it can be used during orbit integration\n if dR == 0 and dphi == 0:\n try:\n return self._amp*self._evaluate(R,phi=phi,t=t)\n except AttributeError: #pragma: no cover\n raise PotentialError(\"'_evaluate' function not implemented for this potential\")\n elif dR == 1 and dphi == 0:\n return -self.Rforce(R,phi=phi,t=t,use_physical=False)\n elif dR == 0 and dphi == 1:\n return -self.phiforce(R,phi=phi,t=t,use_physical=False)\n elif dR == 2 and dphi == 0:\n return self.R2deriv(R,phi=phi,t=t,use_physical=False)\n elif dR == 0 and dphi == 2:\n return self.phi2deriv(R,phi=phi,t=t,use_physical=False)\n elif dR == 1 and dphi == 1:\n return self.Rphideriv(R,phi=phi,t=t,use_physical=False)\n elif dR != 0 or dphi != 0:\n raise NotImplementedError('Higher-order derivatives not implemented for this potential')\n\n @potential_physical_input\n @physical_conversion('force',pop=True)\n def Rforce(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n\n Rforce\n\n PURPOSE:\n\n evaluate the radial force\n\n INPUT:\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n F_R(R,(\\phi,t)))\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return self._Rforce_nodecorator(R,phi=phi,t=t)\n\n def _Rforce_nodecorator(self,R,phi=0.,t=0.):\n # Separate, so it can be used during orbit integration\n try:\n return self._amp*self._Rforce(R,phi=phi,t=t)\n except AttributeError: #pragma: no cover\n raise PotentialError(\"'_Rforce' function not implemented for this potential\")\n\n @potential_physical_input\n @physical_conversion('energy',pop=True)\n def phiforce(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n\n phiforce\n\n PURPOSE:\n\n evaluate the phi force = - d Phi / d phi (note that this is a torque, not a force!)\n\n INPUT:\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n F_phi(R,(phi,t)))\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return self._phiforce_nodecorator(R,phi=phi,t=t)\n \n def _phiforce_nodecorator(self,R,phi=0.,t=0.):\n # Separate, so it can be used during orbit integration\n try:\n return self._amp*self._phiforce(R,phi=phi,t=t)\n except AttributeError: #pragma: no cover\n raise PotentialError(\"'_phiforce' function not implemented for this potential\")\n\n @potential_physical_input\n @physical_conversion('forcederivative',pop=True)\n def R2deriv(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n\n R2deriv\n\n PURPOSE:\n\n evaluate the second radial derivative\n\n INPUT:\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n d2phi/dR2\n\n HISTORY:\n\n 2011-10-09 - Written - Bovy (IAS)\n\n \"\"\"\n try:\n return self._amp*self._R2deriv(R,phi=phi,t=t)\n except AttributeError: #pragma: no cover\n raise PotentialError(\"'_R2deriv' function not implemented for this potential\") \n\n @potential_physical_input\n @physical_conversion('energy',pop=True)\n def phi2deriv(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n\n phi2deriv\n\n PURPOSE:\n\n evaluate the second azimuthal derivative\n\n INPUT:\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n d2phi/daz2\n\n HISTORY:\n\n 2014-04-06 - Written - Bovy (IAS)\n\n \"\"\"\n try:\n return self._amp*self._phi2deriv(R,phi=phi,t=t)\n except AttributeError: #pragma: no cover\n raise PotentialError(\"'_phi2deriv' function not implemented for this potential\") \n\n @potential_physical_input\n @physical_conversion('force',pop=True)\n def Rphideriv(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n\n Rphideriv\n\n PURPOSE:\n\n evaluate the mixed radial and azimuthal derivative\n\n INPUT:\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n d2phi/dR d az\n\n HISTORY:\n\n 2014-05-21 - Written - Bovy (IAS)\n\n \"\"\"\n try:\n return self._amp*self._Rphideriv(R,phi=phi,t=t)\n except AttributeError: #pragma: no cover\n raise PotentialError(\"'_Rphideriv' function not implemented for this potential\") \n\n def plot(self,*args,**kwargs):\n \"\"\"\n NAME:\n plot\n PURPOSE:\n plot the potential\n INPUT:\n Rrange - range (can be Quantity)\n grid - number of points to plot\n savefilename - save to or restore from this savefile (pickle)\n +galpy.util.plot.plot(*args,**kwargs)\n OUTPUT:\n plot to output device\n HISTORY:\n 2010-07-13 - Written - Bovy (NYU)\n \"\"\"\n return plotplanarPotentials(self,*args,**kwargs)\n\nclass planarAxiPotential(planarPotential):\n \"\"\"Class representing axisymmetric planar potentials\"\"\"\n def __init__(self,amp=1.,ro=None,vo=None):\n planarPotential.__init__(self,amp=amp,ro=ro,vo=vo)\n self.isNonAxi= False\n return None\n \n def _phiforce(self,R,phi=0.,t=0.):\n return 0.\n\n def _phi2deriv(self,R,phi=0.,t=0.): #pragma: no cover\n \"\"\"\n NAME:\n _phi2deriv\n PURPOSE:\n evaluate the second azimuthal derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the second azimuthal derivative\n HISTORY:\n 2011-10-17 - Written - Bovy (IAS)\n \"\"\"\n return 0.\n\n def _Rphideriv(self,R,phi=0.,t=0.): #pragma: no cover\n \"\"\"\n NAME:\n _Rphideriv\n PURPOSE:\n evaluate the radial+azimuthal derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the radial+azimuthal derivative\n HISTORY:\n 2011-10-17 - Written - Bovy (IAS)\n \"\"\"\n return 0.\n\n @potential_physical_input\n @physical_conversion('velocity',pop=True)\n def vcirc(self,R,phi=None,t=0.):\n \"\"\"\n \n NAME:\n \n vcirc\n \n PURPOSE:\n \n calculate the circular velocity at R in potential Pot\n\n INPUT:\n \n Pot - Potential instance or list of such instances\n \n R - Galactocentric radius (can be Quantity)\n \n phi= (None) azimuth to use for non-axisymmetric potentials\n\n t - time (optional; can be Quantity)\n\n OUTPUT:\n \n circular rotation velocity\n \n HISTORY:\n \n 2011-10-09 - Written - Bovy (IAS)\n \n 2016-06-15 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT)\n\n \"\"\"\n return numpy.sqrt(R*-self.Rforce(R,phi=phi,t=t,use_physical=False))\n\n @potential_physical_input\n @physical_conversion('frequency',pop=True)\n def omegac(self,R,t=0.):\n \"\"\"\n \n NAME:\n \n omegac\n \n PURPOSE:\n \n calculate the circular angular speed at R in potential Pot\n\n INPUT:\n \n Pot - Potential instance or list of such instances\n \n R - Galactocentric radius (can be Quantity)\n\n t - time (optional; can be Quantity)\n \n OUTPUT:\n \n circular angular speed\n \n HISTORY:\n \n 2011-10-09 - Written - Bovy (IAS)\n \n \"\"\"\n return numpy.sqrt(-self.Rforce(R,t=t,use_physical=False)/R) \n\n @potential_physical_input\n @physical_conversion('frequency',pop=True)\n def epifreq(self,R,t=0.):\n \"\"\"\n \n NAME:\n \n epifreq\n \n PURPOSE:\n \n calculate the epicycle frequency at R in this potential\n \n INPUT:\n \n R - Galactocentric radius (can be Quantity)\n\n t - time (optional; can be Quantity)\n \n OUTPUT:\n \n epicycle frequency\n \n HISTORY:\n \n 2011-10-09 - Written - Bovy (IAS)\n \n \"\"\"\n return numpy.sqrt(self.R2deriv(R,t=t,use_physical=False)\n -3./R*self.Rforce(R,t=t,use_physical=False))\n\n @physical_conversion('position',pop=True)\n def lindbladR(self,OmegaP,m=2,t=0.,**kwargs):\n \"\"\"\n \n NAME:\n \n lindbladR\n \n PURPOSE:\n \n calculate the radius of a Lindblad resonance\n \n INPUT:\n \n OmegaP - pattern speed (can be Quantity)\n\n m= order of the resonance (as in m(O-Op)=kappa (negative m for outer)\n use m='corotation' for corotation\n +scipy.optimize.brentq xtol,rtol,maxiter kwargs\n\n t - time (optional; can be Quantity)\n \n OUTPUT:\n \n radius of Linblad resonance, None if there is no resonance\n \n HISTORY:\n \n 2011-10-09 - Written - Bovy (IAS)\n \n \"\"\"\n OmegaP= conversion.parse_frequency(OmegaP,ro=self._ro,vo=self._vo)\n return lindbladR(self,OmegaP,m=m,t=t,use_physical=False,**kwargs)\n\n @potential_physical_input\n @physical_conversion('velocity',pop=True)\n def vesc(self,R,t=0.):\n \"\"\"\n\n NAME:\n\n vesc\n\n PURPOSE:\n\n calculate the escape velocity at R for potential Pot\n\n INPUT:\n\n Pot - Potential instances or list thereof\n\n R - Galactocentric radius (can be Quantity)\n\n t - time (optional; can be Quantity)\n\n OUTPUT:\n\n escape velocity\n\n HISTORY:\n\n 2011-10-09 - Written - Bovy (IAS)\n\n \"\"\"\n return numpy.sqrt(2.*(self(_INF,t=t,use_physical=False)\n -self(R,t=t,use_physical=False)))\n \n def plotRotcurve(self,*args,**kwargs):\n \"\"\"\n NAME:\n\n plotRotcurve\n\n PURPOSE:\n\n plot the rotation curve for this potential\n\n INPUT:\n\n Rrange - range (can be Quantity)\n\n grid - number of points to plot\n\n savefilename - save to or restore from this savefile (pickle)\n\n +galpy.util.plot.plot(*args,**kwargs)\n\n OUTPUT:\n\n plot to output device\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return plotRotcurve(self,*args,**kwargs)\n\n def plotEscapecurve(self,*args,**kwargs):\n \"\"\"\n NAME:\n\n plotEscapecurve\n\n PURPOSE:\n\n plot the escape velocity curve for this potential\n\n INPUT:\n\n Rrange - range (can be Quantity)\n\n grid - number of points to plot\n\n savefilename - save to or restore from this savefile (pickle)\n\n +galpy.util.plot.plot(*args,**kwargs)\n\n OUTPUT:\n\n plot to output device\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return plotEscapecurve(self,*args,**kwargs)\n\nclass planarPotentialFromRZPotential(planarAxiPotential):\n \"\"\"Class that represents an axisymmetic planar potential derived from a \n RZPotential\"\"\"\n def __init__(self,RZPot):\n \"\"\"\n NAME:\n __init__\n PURPOSE:\n Initialize\n INPUT:\n RZPot - RZPotential instance\n OUTPUT:\n planarAxiPotential instance\n HISTORY:\n 2010-07-13 - Written - Bovy (NYU)\n \"\"\"\n planarAxiPotential.__init__(self,amp=1.,ro=RZPot._ro,vo=RZPot._vo)\n # Also transfer roSet and voSet\n self._roSet= RZPot._roSet\n self._voSet= RZPot._voSet\n self._Pot= RZPot\n self.hasC= RZPot.hasC\n self.hasC_dxdv= RZPot.hasC_dxdv\n self.hasC_dens= RZPot.hasC_dens\n return None\n\n def _evaluate(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _evaluate\n PURPOSE:\n evaluate the potential\n INPUT:\n R\n phi\n t\n OUTPUT:\n Pot(R(,\\phi,t))\n HISTORY:\n 2010-07-13 - Written - Bovy (NYU)\n \"\"\"\n return self._Pot(R,0.,t=t,use_physical=False)\n \n def _Rforce(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _Rforce\n PURPOSE:\n evaluate the radial force\n INPUT:\n R\n phi\n t\n OUTPUT:\n F_R(R(,\\phi,t))\n HISTORY:\n 2010-07-13 - Written - Bovy (NYU)\n \"\"\"\n return self._Pot.Rforce(R,0.,t=t,use_physical=False)\n\n def _R2deriv(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _R2deriv\n PURPOSE:\n evaluate the second radial derivative\n INPUT:\n R\n phi\n t\n OUTPUT:\n d2phi/dR2\n HISTORY:\n 2011-10-09 - Written - Bovy (IAS)\n \"\"\"\n return self._Pot.R2deriv(R,0.,t=t,use_physical=False)\n \ndef RZToplanarPotential(RZPot):\n \"\"\"\n NAME:\n\n RZToplanarPotential\n\n PURPOSE:\n\n convert an RZPotential to a planarPotential in the mid-plane (z=0)\n\n INPUT:\n\n RZPot - RZPotential instance or list of such instances (existing planarPotential instances are just copied to the output)\n\n OUTPUT:\n\n planarPotential instance(s)\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n RZPot= flatten(RZPot)\n if _isDissipative(RZPot):\n raise NotImplementedError(\"Converting dissipative forces to 2D potentials is currently not supported\")\n if isinstance(RZPot,list):\n out= []\n for pot in RZPot:\n if isinstance(pot,planarPotential) and not pot.isNonAxi:\n out.append(pot)\n elif isinstance(pot,Potential) and not pot.isNonAxi:\n out.append(planarPotentialFromRZPotential(pot))\n else:\n raise PotentialError(\"Input to 'RZToplanarPotential' is neither an RZPotential-instance or a list of such instances\")\n return out\n elif isinstance(RZPot,Potential) and not RZPot.isNonAxi:\n return planarPotentialFromRZPotential(RZPot)\n elif isinstance(RZPot,planarPotential) and not RZPot.isNonAxi:\n return RZPot\n else:\n raise PotentialError(\"Input to 'RZToplanarPotential' is neither an RZPotential-instance or a list of such instances\")\n\nclass planarPotentialFromFullPotential(planarPotential):\n \"\"\"Class that represents a planar potential derived from a non-axisymmetric\n 3D potential\"\"\"\n def __init__(self,Pot):\n \"\"\"\n NAME:\n __init__\n PURPOSE:\n Initialize\n INPUT:\n Pot - Potential instance\n OUTPUT:\n planarPotential instance\n HISTORY:\n 2016-06-02 - Written - Bovy (UofT)\n \"\"\"\n planarPotential.__init__(self,amp=1.,ro=Pot._ro,vo=Pot._vo)\n # Also transfer roSet and voSet\n self._roSet= Pot._roSet\n self._voSet= Pot._voSet\n self._Pot= Pot\n self.hasC= Pot.hasC\n self.hasC_dxdv= Pot.hasC_dxdv\n self.hasC_dens= Pot.hasC_dens\n return None\n\n def _evaluate(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _evaluate\n PURPOSE:\n evaluate the potential\n INPUT:\n R\n phi\n t\n OUTPUT:\n Pot(R(,\\phi,t))\n HISTORY:\n 2016-06-02 - Written - Bovy (UofT)\n \"\"\"\n return self._Pot(R,0.,phi=phi,t=t,use_physical=False)\n \n def _Rforce(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _Rforce\n PURPOSE:\n evaluate the radial force\n INPUT:\n R\n phi\n t\n OUTPUT:\n F_R(R(,\\phi,t))\n HISTORY:\n 2016-06-02 - Written - Bovy (UofT)\n \"\"\"\n return self._Pot.Rforce(R,0.,phi=phi,t=t,use_physical=False)\n\n def _phiforce(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _phiforce\n PURPOSE:\n evaluate the azimuthal force\n INPUT:\n R\n phi\n t\n OUTPUT:\n F_phi(R(,\\phi,t))\n HISTORY:\n 2016-06-02 - Written - Bovy (UofT)\n \"\"\"\n return self._Pot.phiforce(R,0.,phi=phi,t=t,use_physical=False)\n\n def _R2deriv(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _R2deriv\n PURPOSE:\n evaluate the second radial derivative\n INPUT:\n R\n phi\n t\n OUTPUT:\n d2phi/dR2\n HISTORY:\n 2016-06-02 - Written - Bovy (UofT)\n \"\"\"\n return self._Pot.R2deriv(R,0.,phi=phi,t=t,use_physical=False)\n \n def _phi2deriv(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _phi2deriv\n PURPOSE:\n evaluate the second azimuthal derivative\n INPUT:\n R\n phi\n t\n OUTPUT:\n d2phi/dphi2\n HISTORY:\n 2016-06-02 - Written - Bovy (UofT)\n \"\"\"\n return self._Pot.phi2deriv(R,0.,phi=phi,t=t,use_physical=False)\n \n def _Rphideriv(self,R,phi=0.,t=0.):\n \"\"\"\n NAME:\n _Rphideriv\n PURPOSE:\n evaluate the mixed radial-azimuthal derivative\n INPUT:\n R\n phi\n t\n OUTPUT:\n d2phi/dRdphi\n HISTORY:\n 2016-06-02 - Written - Bovy (UofT)\n \"\"\"\n return self._Pot.Rphideriv(R,0.,phi=phi,t=t,use_physical=False)\n \n def OmegaP(self):\n \"\"\"\n NAME:\n OmegaP\n PURPOSE:\n return the pattern speed\n INPUT:\n (none)\n OUTPUT:\n pattern speed\n HISTORY:\n 2016-05-31 - Written - Bovy (UofT)\n \"\"\"\n return self._Pot.OmegaP()\n \ndef toPlanarPotential(Pot):\n \"\"\"\n NAME:\n\n toPlanarPotential\n\n PURPOSE:\n\n convert an Potential to a planarPotential in the mid-plane (z=0)\n\n INPUT:\n\n Pot - Potential instance or list of such instances (existing planarPotential instances are just copied to the output)\n\n OUTPUT:\n\n planarPotential instance(s)\n\n HISTORY:\n\n 2016-06-11 - Written - Bovy (UofT)\n\n \"\"\"\n Pot= flatten(Pot)\n if _isDissipative(Pot):\n raise NotImplementedError(\"Converting dissipative forces to 2D potentials is currently not supported\")\n elif isinstance(Pot,list):\n out= []\n for pot in Pot:\n if isinstance(pot,planarPotential):\n out.append(pot)\n elif isinstance(pot,Potential) and pot.isNonAxi:\n out.append(planarPotentialFromFullPotential(pot))\n elif isinstance(pot,Potential):\n out.append(planarPotentialFromRZPotential(pot))\n else:\n raise PotentialError(\"Input to 'toPlanarPotential' is neither an Potential-instance or a list of such instances\")\n return out\n elif isinstance(Pot,Potential) and Pot.isNonAxi:\n return planarPotentialFromFullPotential(Pot)\n elif isinstance(Pot,Potential):\n return planarPotentialFromRZPotential(Pot)\n elif isinstance(Pot,planarPotential):\n return Pot\n else:\n raise PotentialError(\"Input to 'toPlanarPotential' is neither an Potential-instance or a list of such instances\")\n\n@potential_physical_input\n@physical_conversion('energy',pop=True)\ndef evaluateplanarPotentials(Pot,R,phi=None,t=0.,dR=0,dphi=0):\n \"\"\"\n NAME:\n\n evaluateplanarPotentials\n\n PURPOSE:\n\n evaluate a (list of) planarPotential instance(s)\n\n INPUT:\n\n Pot - (list of) planarPotential instance(s)\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n dR=, dphi= if set to non-zero integers, return the dR,dphi't derivative instead\n\n OUTPUT:\n\n Phi(R(,phi,t))\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return _evaluateplanarPotentials(Pot,R,phi=phi,t=t,dR=dR,dphi=dphi)\n\ndef _evaluateplanarPotentials(Pot,R,phi=None,t=0.,dR=0,dphi=0):\n from .Potential import _isNonAxi\n isList= isinstance(Pot,list)\n nonAxi= _isNonAxi(Pot)\n if nonAxi and phi is None:\n raise PotentialError(\"The (list of) planarPotential instances is non-axisymmetric, but you did not provide phi\")\n if isList and numpy.all([isinstance(p,planarPotential) for p in Pot]):\n sum= 0.\n for pot in Pot:\n if nonAxi:\n sum+= pot._call_nodecorator(R,phi=phi,t=t,dR=dR,dphi=dphi)\n else:\n sum+= pot._call_nodecorator(R,t=t,dR=dR,dphi=dphi)\n return sum\n elif isinstance(Pot,planarPotential):\n if nonAxi:\n return Pot._call_nodecorator(R,phi=phi,t=t,dR=dR,dphi=dphi)\n else:\n return Pot._call_nodecorator(R,t=t,dR=dR,dphi=dphi)\n else: #pragma: no cover \n raise PotentialError(\"Input to 'evaluatePotentials' is neither a Potential-instance or a list of such instances\")\n\n@potential_physical_input\n@physical_conversion('force',pop=True)\ndef evaluateplanarRforces(Pot,R,phi=None,t=0.):\n \"\"\"\n NAME:\n\n evaluateplanarRforces\n\n PURPOSE:\n\n evaluate the Rforce of a (list of) planarPotential instance(s)\n\n INPUT:\n\n Pot - (list of) planarPotential instance(s)\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n F_R(R(,phi,t))\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return _evaluateplanarRforces(Pot,R,phi=phi,t=t)\n\ndef _evaluateplanarRforces(Pot,R,phi=None,t=0.):\n \"\"\"Raw, undecorated function for internal use\"\"\"\n from .Potential import _isNonAxi\n isList= isinstance(Pot,list)\n nonAxi= _isNonAxi(Pot)\n if nonAxi and phi is None:\n raise PotentialError(\"The (list of) planarPotential instances is non-axisymmetric, but you did not provide phi\")\n if isinstance(Pot,list) \\\n and numpy.all([isinstance(p,planarPotential) for p in Pot]):\n sum= 0.\n for pot in Pot:\n if nonAxi:\n sum+= pot._Rforce_nodecorator(R,phi=phi,t=t)\n else:\n sum+= pot._Rforce_nodecorator(R,t=t)\n return sum\n elif isinstance(Pot,planarPotential):\n if nonAxi:\n return Pot._Rforce_nodecorator(R,phi=phi,t=t)\n else:\n return Pot._Rforce_nodecorator(R,t=t)\n else: #pragma: no cover \n raise PotentialError(\"Input to 'evaluatePotentials' is neither a Potential-instance or a list of such instances\")\n\n@potential_physical_input\n@physical_conversion('energy',pop=True)\ndef evaluateplanarphiforces(Pot,R,phi=None,t=0.):\n \"\"\"\n NAME:\n\n evaluateplanarphiforces\n\n PURPOSE:\n\n evaluate the phiforce of a (list of) planarPotential instance(s)\n\n INPUT:\n\n Pot - (list of) planarPotential instance(s)\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n F_phi(R(,phi,t))\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n return _evaluateplanarphiforces(Pot,R,phi=phi,t=t)\n\ndef _evaluateplanarphiforces(Pot,R,phi=None,t=0.):\n from .Potential import _isNonAxi\n isList= isinstance(Pot,list)\n nonAxi= _isNonAxi(Pot)\n if nonAxi and phi is None:\n raise PotentialError(\"The (list of) planarPotential instances is non-axisymmetric, but you did not provide phi\")\n if isinstance(Pot,list) \\\n and numpy.all([isinstance(p,planarPotential) for p in Pot]):\n sum= 0.\n for pot in Pot:\n if nonAxi:\n sum+= pot._phiforce_nodecorator(R,phi=phi,t=t)\n else:\n sum+= pot._phiforce_nodecorator(R,t=t)\n return sum\n elif isinstance(Pot,planarPotential):\n if nonAxi:\n return Pot._phiforce_nodecorator(R,phi=phi,t=t)\n else:\n return Pot._phiforce_nodecorator(R,t=t)\n else: #pragma: no cover \n raise PotentialError(\"Input to 'evaluatePotentials' is neither a Potential-instance or a list of such instances\")\n\n@potential_physical_input\n@physical_conversion('forcederivative',pop=True)\ndef evaluateplanarR2derivs(Pot,R,phi=None,t=0.):\n \"\"\"\n NAME:\n\n evaluateplanarR2derivs\n\n PURPOSE:\n\n evaluate the second radial derivative of a (list of) planarPotential instance(s)\n\n INPUT:\n\n Pot - (list of) planarPotential instance(s)\n\n R - Cylindrical radius (can be Quantity)\n\n phi= azimuth (optional; can be Quantity)\n\n t= time (optional; can be Quantity)\n\n OUTPUT:\n\n F_R(R(,phi,t))\n\n HISTORY:\n\n 2010-10-09 - Written - Bovy (IAS)\n\n \"\"\"\n from .Potential import _isNonAxi\n isList= isinstance(Pot,list)\n nonAxi= _isNonAxi(Pot)\n if nonAxi and phi is None:\n raise PotentialError(\"The (list of) planarPotential instances is non-axisymmetric, but you did not provide phi\")\n if isinstance(Pot,list) \\\n and numpy.all([isinstance(p,planarPotential) for p in Pot]):\n sum= 0.\n for pot in Pot:\n if nonAxi:\n sum+= pot.R2deriv(R,phi=phi,t=t,use_physical=False)\n else:\n sum+= pot.R2deriv(R,t=t,use_physical=False)\n return sum\n elif isinstance(Pot,planarPotential):\n if nonAxi:\n return Pot.R2deriv(R,phi=phi,t=t,use_physical=False)\n else:\n return Pot.R2deriv(R,t=t,use_physical=False)\n else: #pragma: no cover \n raise PotentialError(\"Input to 'evaluatePotentials' is neither a Potential-instance or a list of such instances\")\n\ndef LinShuReductionFactor(axiPot,R,sigmar,nonaxiPot=None,\n k=None,m=None,OmegaP=None):\n \"\"\"\n NAME:\n\n LinShuReductionFactor\n\n PURPOSE:\n\n Calculate the Lin & Shu (1966) reduction factor: the reduced linear response of a kinematically-warm stellar disk to a perturbation\n\n INPUT:\n\n axiPot - The background, axisymmetric potential\n\n R - Cylindrical radius (can be Quantity)\n \n sigmar - radial velocity dispersion of the population (can be Quantity)\n\n Then either provide:\n\n 1) m= m in the perturbation's m x phi (number of arms for a spiral)\n\n k= wavenumber (see Binney & Tremaine 2008)\n\n OmegaP= pattern speed (can be Quantity)\n\n 2) nonaxiPot= a non-axisymmetric Potential instance (such as SteadyLogSpiralPotential) that has functions that return OmegaP, m, and wavenumber\n\n OUTPUT:\n\n reduction factor\n\n HISTORY:\n\n 2014-08-23 - Written - Bovy (IAS)\n\n \"\"\"\n axiPot= flatten(axiPot)\n from ..potential import omegac, epifreq\n if nonaxiPot is None and (OmegaP is None or k is None or m is None):\n raise IOError(\"Need to specify either nonaxiPot= or m=, k=, OmegaP= for LinShuReductionFactor\")\n elif not nonaxiPot is None:\n OmegaP= nonaxiPot.OmegaP()\n k= nonaxiPot.wavenumber(R)\n m= nonaxiPot.m()\n tepif= epifreq(axiPot,R)\n s= m*(OmegaP-omegac(axiPot,R))/tepif\n chi= sigmar**2.*k**2./tepif**2.\n return (1.-s**2.)/numpy.sin(numpy.pi*s)\\\n *integrate.quad(lambda t: numpy.exp(-chi*(1.+numpy.cos(t)))\\\n *numpy.sin(s*t)*numpy.sin(t),\n 0.,numpy.pi)[0]\n\ndef plotplanarPotentials(Pot,*args,**kwargs):\n \"\"\"\n NAME:\n\n plotplanarPotentials\n\n PURPOSE:\n\n plot a planar potential\n\n INPUT:\n\n Rrange - range (can be Quantity)\n\n xrange, yrange - if relevant (can be Quantity)\n\n grid, gridx, gridy - number of points to plot\n\n savefilename - save to or restore from this savefile (pickle)\n\n ncontours - number of contours to plot (if applicable)\n\n +galpy.util.plot.plot(*args,**kwargs) or galpy.util.plot.dens2d(**kwargs)\n\n OUTPUT:\n\n plot to output device\n\n HISTORY:\n\n 2010-07-13 - Written - Bovy (NYU)\n\n \"\"\"\n Pot= flatten(Pot)\n Rrange= kwargs.pop('Rrange',[0.01,5.])\n xrange= kwargs.pop('xrange',[-5.,5.])\n yrange= kwargs.pop('yrange',[-5.,5.])\n if hasattr(Pot,'_ro'):\n tro= Pot._ro\n else:\n tro= Pot[0]._ro\n Rrange[0]= conversion.parse_length(Rrange[0],ro=tro)\n Rrange[1]= conversion.parse_length(Rrange[1],ro=tro)\n xrange[0]= conversion.parse_length(xrange[0],ro=tro)\n xrange[1]= conversion.parse_length(xrange[1],ro=tro)\n yrange[0]= conversion.parse_length(yrange[0],ro=tro)\n yrange[1]= conversion.parse_length(yrange[1],ro=tro)\n grid= kwargs.pop('grid',100)\n gridx= kwargs.pop('gridx',100)\n gridy= kwargs.pop('gridy',gridx)\n savefilename= kwargs.pop('savefilename',None)\n isList= isinstance(Pot,list)\n nonAxi= ((isList and Pot[0].isNonAxi) or (not isList and Pot.isNonAxi))\n if not savefilename is None and os.path.exists(savefilename):\n print(\"Restoring savefile \"+savefilename+\" ...\")\n savefile= open(savefilename,'rb')\n potR= pickle.load(savefile)\n if nonAxi:\n xs= pickle.load(savefile)\n ys= pickle.load(savefile)\n else:\n Rs= pickle.load(savefile)\n savefile.close()\n else:\n if nonAxi:\n xs= numpy.linspace(xrange[0],xrange[1],gridx)\n ys= numpy.linspace(yrange[0],yrange[1],gridy)\n potR= numpy.zeros((gridx,gridy))\n for ii in range(gridx):\n for jj in range(gridy):\n thisR= numpy.sqrt(xs[ii]**2.+ys[jj]**2.)\n if xs[ii] >= 0.:\n thisphi= numpy.arcsin(ys[jj]/thisR)\n else:\n thisphi= -numpy.arcsin(ys[jj]/thisR)+numpy.pi\n potR[ii,jj]= evaluateplanarPotentials(Pot,thisR,\n phi=thisphi,\n use_physical=False)\n else:\n Rs= numpy.linspace(Rrange[0],Rrange[1],grid)\n potR= numpy.zeros(grid)\n for ii in range(grid):\n potR[ii]= evaluateplanarPotentials(Pot,Rs[ii],\n use_physical=False)\n if not savefilename is None:\n print(\"Writing planar savefile \"+savefilename+\" ...\")\n savefile= open(savefilename,'wb')\n pickle.dump(potR,savefile)\n if nonAxi:\n pickle.dump(xs,savefile)\n pickle.dump(ys,savefile)\n else:\n pickle.dump(Rs,savefile)\n savefile.close()\n if nonAxi:\n if not 'orogin' in kwargs:\n kwargs['origin']= 'lower'\n if not 'cmap' in kwargs:\n kwargs['cmap']= 'gist_yarg'\n if not 'contours' in kwargs:\n kwargs['contours']= True\n if not 'xlabel' in kwargs:\n kwargs['xlabel']= r\"$x / R_0$\"\n if not 'ylabel' in kwargs:\n kwargs['ylabel']= \"$y / R_0$\"\n if not 'aspect' in kwargs:\n kwargs['aspect']= 1.\n if not 'cntrls' in kwargs:\n kwargs['cntrls']= '-'\n ncontours= kwargs.pop('ncontours',10)\n if not 'levels' in kwargs:\n kwargs['levels']= numpy.linspace(numpy.nanmin(potR),numpy.nanmax(potR),ncontours)\n return plot.dens2d(potR.T,\n xrange=xrange,\n yrange=yrange,**kwargs)\n else:\n kwargs['xlabel']=r\"$R/R_0$\"\n kwargs['ylabel']=r\"$\\Phi(R)$\"\n kwargs['xrange']=Rrange\n return plot.plot(Rs,potR,*args,**kwargs)\n \n \n"
] |
[
[
"numpy.log",
"numpy.sqrt",
"numpy.linspace",
"numpy.ones",
"numpy.sum",
"numpy.fabs"
],
[
"numpy.nanmax",
"numpy.sqrt",
"numpy.linspace",
"numpy.arcsin",
"numpy.nanmin",
"numpy.cos",
"numpy.sin",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Meruman/highway-env
|
[
"8bb23e7fa47e617971b83ddeeaab8c20c94b5c88"
] |
[
"highway_env/road/graphics.py"
] |
[
"from __future__ import division, print_function\nimport numpy as np\nimport pygame\n\nfrom highway_env.road.lane import LineType\nfrom highway_env.vehicle.graphics import VehicleGraphics\n\n\nclass LaneGraphics(object):\n \"\"\"\n A visualization of a lane.\n \"\"\"\n STRIPE_SPACING = 5\n \"\"\" Offset between stripes [m]\"\"\"\n\n STRIPE_LENGTH = 3\n \"\"\" Length of a stripe [m]\"\"\"\n\n STRIPE_WIDTH = 0.3\n \"\"\" Width of a stripe [m]\"\"\"\n\n @classmethod\n def display(cls, lane, surface):\n \"\"\"\n Display a lane on a surface.\n\n :param lane: the lane to be displayed\n :param surface: the pygame surface\n \"\"\"\n stripes_count = int(2 * (surface.get_height() + surface.get_width()) / (cls.STRIPE_SPACING * surface.scaling))\n s_origin, _ = lane.local_coordinates(surface.origin)\n s0 = (int(s_origin) // cls.STRIPE_SPACING - stripes_count // 2) * cls.STRIPE_SPACING\n for side in range(2):\n if lane.line_types[side] == LineType.STRIPED:\n cls.striped_line(lane, surface, stripes_count, s0, side)\n elif lane.line_types[side] == LineType.CONTINUOUS:\n cls.continuous_curve(lane, surface, stripes_count, s0, side)\n elif lane.line_types[side] == LineType.CONTINUOUS_LINE:\n cls.continuous_line(lane, surface, stripes_count, s0, side)\n\n @classmethod\n def striped_line(cls, lane, surface, stripes_count, s0, side):\n \"\"\"\n Draw a striped line on one side of a lane, on a surface.\n\n :param lane: the lane\n :param surface: the pygame surface\n :param stripes_count: the number of stripes to draw\n :param s0: the longitudinal position of the first stripe [m]\n :param side: which side of the road to draw [0:left, 1:right]\n \"\"\"\n starts = s0 + np.arange(stripes_count) * cls.STRIPE_SPACING\n ends = s0 + np.arange(stripes_count) * cls.STRIPE_SPACING + cls.STRIPE_LENGTH\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n cls.draw_stripes(lane, surface, starts, ends, lats)\n\n @classmethod\n def continuous_curve(cls, lane, surface, stripes_count, s0, side):\n \"\"\"\n Draw a striped line on one side of a lane, on a surface.\n\n :param lane: the lane\n :param surface: the pygame surface\n :param stripes_count: the number of stripes to draw\n :param s0: the longitudinal position of the first stripe [m]\n :param side: which side of the road to draw [0:left, 1:right]\n \"\"\"\n starts = s0 + np.arange(stripes_count) * cls.STRIPE_SPACING\n ends = s0 + np.arange(stripes_count) * cls.STRIPE_SPACING + cls.STRIPE_SPACING\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n cls.draw_stripes(lane, surface, starts, ends, lats)\n\n @classmethod\n def continuous_line(cls, lane, surface, stripes_count, s0, side):\n \"\"\"\n Draw a continuous line on one side of a lane, on a surface.\n\n :param lane: the lane\n :param surface: the pygame surface\n :param stripes_count: the number of stripes that would be drawn if the line was striped\n :param s0: the longitudinal position of the start of the line [m]\n :param side: which side of the road to draw [0:left, 1:right]\n \"\"\"\n starts = [s0 + 0 * cls.STRIPE_SPACING]\n ends = [s0 + stripes_count * cls.STRIPE_SPACING + cls.STRIPE_LENGTH]\n lats = [(side - 0.5) * lane.width_at(s) for s in starts]\n cls.draw_stripes(lane, surface, starts, ends, lats)\n\n @classmethod\n def draw_stripes(cls, lane, surface, starts, ends, lats):\n \"\"\"\n Draw a set of stripes along a lane.\n\n :param lane: the lane\n :param surface: the surface to draw on\n :param starts: a list of starting longitudinal positions for each stripe [m]\n :param ends: a list of ending longitudinal positions for each stripe [m]\n :param lats: a list of lateral positions for each stripe [m]\n \"\"\"\n starts = np.clip(starts, 0, lane.length)\n ends = np.clip(ends, 0, lane.length)\n for k in range(len(starts)):\n if abs(starts[k] - ends[k]) > 0.5 * cls.STRIPE_LENGTH:\n pygame.draw.line(surface, surface.WHITE,\n (surface.vec2pix(lane.position(starts[k], lats[k]))),\n (surface.vec2pix(lane.position(ends[k], lats[k]))),\n max(surface.pix(cls.STRIPE_WIDTH), 1))\n\n\nclass RoadGraphics(object):\n \"\"\"\n A visualization of a road lanes and vehicles.\n \"\"\"\n @classmethod\n def display(cls, road, surface):\n \"\"\"\n Display the road lanes on a surface.\n\n :param road: the road to be displayed\n :param surface: the pygame surface\n \"\"\"\n surface.fill(surface.GREY)\n for _from in road.network.graph.keys():\n for _to in road.network.graph[_from].keys():\n for l in road.network.graph[_from][_to]:\n LaneGraphics.display(l, surface)\n\n @classmethod\n def display_traffic(cls, road, surface, offscreen=False):\n \"\"\"\n Display the road vehicles on a surface.\n\n :param road: the road to be displayed\n :param surface: the pygame surface\n \"\"\"\n for v in road.vehicles:\n VehicleGraphics.display(v, surface, offscreen=offscreen)\n\n\nclass WorldSurface(pygame.Surface):\n \"\"\"\n A pygame Surface implementing a local coordinate system so that we can move and zoom in the displayed area.\n \"\"\"\n BLACK = (0, 0, 0)\n GREY = (100, 100, 100)\n GREEN = (50, 200, 0)\n YELLOW = (200, 200, 0)\n WHITE = (255, 255, 255)\n INITIAL_SCALING = 5.5\n INITIAL_CENTERING = [0.5, 0.5]\n SCALING_FACTOR = 1.3\n MOVING_FACTOR = 0.1\n\n def __init__(self, size, flags, surf):\n super(WorldSurface, self).__init__(size, flags, surf)\n self.origin = np.array([0, 0])\n self.scaling = self.INITIAL_SCALING\n self.centering_position = self.INITIAL_CENTERING\n\n def pix(self, length):\n \"\"\"\n Convert a distance [m] to pixels [px].\n\n :param length: the input distance [m]\n :return: the corresponding size [px]\n \"\"\"\n return int(length * self.scaling)\n\n def pos2pix(self, x, y):\n \"\"\"\n Convert two world coordinates [m] into a position in the surface [px]\n\n :param x: x world coordinate [m]\n :param y: y world coordinate [m]\n :return: the coordinates of the corresponding pixel [px]\n \"\"\"\n return self.pix(x - self.origin[0]), self.pix(y - self.origin[1])\n\n def vec2pix(self, vec):\n \"\"\"\n Convert a world position [m] into a position in the surface [px].\n :param vec: a world position [m]\n :return: the coordinates of the corresponding pixel [px]\n \"\"\"\n return self.pos2pix(vec[0], vec[1])\n\n def move_display_window_to(self, position):\n \"\"\"\n Set the origin of the displayed area to center on a given world position.\n :param position: a world position [m]\n \"\"\"\n self.origin = position - np.array(\n [self.centering_position[0] * self.get_width() / self.scaling,\n self.centering_position[1] * self.get_height() / self.scaling])\n\n def handle_event(self, event):\n \"\"\"\n Handle pygame events for moving and zooming in the displayed area.\n\n :param event: a pygame event\n \"\"\"\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_l:\n self.scaling *= 1 / self.SCALING_FACTOR\n if event.key == pygame.K_o:\n self.scaling *= self.SCALING_FACTOR\n if event.key == pygame.K_m:\n self.centering_position[0] -= self.MOVING_FACTOR\n if event.key == pygame.K_k:\n self.centering_position[0] += self.MOVING_FACTOR\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.clip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yinzi-xin/hcipy
|
[
"e9abb037ed0d6fe06581c1ce94e5c154fa5069a7",
"e9abb037ed0d6fe06581c1ce94e5c154fa5069a7"
] |
[
"hcipy/metrics/contrast.py",
"examples/power_spectral_density.py"
] |
[
"import numpy as np\n\ndef get_strehl_from_focal(img, ref_img):\n\t'''Get the Strehl ratio from a focal-plane image.\n\n\tParameters\n\t----------\n\timg : Field or array_like\n\t\tThe focal-plane image.\n\tref_img : Field or array_like\n\t\tThe reference focal-plane image without aberrations.\n\n\tReturns\n\t-------\n\tscalar\n\t\tThe Strehl ratio.\n\t'''\n\treturn img(np.argmax(ref_img)) / ref_img.max()\n\ndef get_strehl_from_pupil(aperture, ref_aperture):\n\t'''Get the Strehl ratio from a pupil-plane electric field.\n\n\tParameters\n\t----------\n\taperture : Field or array_like\n\t\tThe pupil-plane electric field.\n\tref_aperture : Field or array_like\n\t\tThe reference pupil-plane electric field without aberrations.\n\n\tReturns\n\t-------\n\tscalar\n\t\tThe Strehl ratio.\n\t'''\n\treturn np.abs(np.sum(aperture) / np.sum(ref_aperture))**2\n\ndef get_mean_intensity_in_roi(img, mask):\n\t'''Get the mean intensity in a masked region of interest.\n\n\tParameters\n\t----------\n\timg : Field or array_like\n\t\tThe focal-plane image.\n\tmask : Field or array_like\n\t\tA binary array describing the region of interest.\n\n\tReturns\n\t-------\n\tscalar\n\t\tThe mean intensity in the region of interest.\n\t'''\n\treturn np.mean(img[mask])\n\ndef get_mean_raw_contrast(img, mask, ref_img):\n\t'''Get the mean raw contrast in a masked region of interest.\n\n\tParameters\n\t----------\n\timg : Field or array_like\n\t\tThe focal-plane image.\n\tmask : Field or array_like\n\t\tA binary array describing the region of interest.\n\timg_ref : Field or array_like\n\t\tA reference focal-plane image without aberrations. This is used\n\t\tto determine the Strehl ratio.\n\n\tReturns\n\t-------\n\tscalar\n\t\tThe mean raw contrast in the region of interest.\n\t'''\n\tmean_intensity = get_mean_intensity_in_roi(img, mask)\n\tstrehl = get_strehl_from_focal(img, ref_img)\n\n\treturn mean_intensity / strehl\n",
"from hcipy import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef psd(u_grid):\n\treturn (u_grid.as_('polar').r < 100).astype('float') * (u_grid.as_('polar').r > 30).astype('float')\n\ndef psd_1d(u_grid):\n\treturn (np.abs(u_grid.x) < 100).astype('float') * (np.abs(u_grid.x) > 30).astype('float')\n\ndef make_pupil_grid_1d(N, D=1):\n\tD = (np.ones(1) * D).astype('float')\n\tN = (np.ones(1) * N).astype('int')\n\n\tdelta = D / (N-1)\n\tzero = -D/2\n\n\treturn CartesianGrid(RegularCoords(delta, N, zero))\n\nratios = []\nfor i in range(100):\n\tq = 1\n\tpupil_grid = make_pupil_grid(128, q)\n\t#pupil_grid = make_pupil_grid_1d(2048, q)\n\tprint(pupil_grid.ndim)\n\n\toversampling = 2\n\tfactory = SpectralNoiseFactoryFFT(psd, pupil_grid, oversampling)\n\tscreen = factory.make_random()()\n\n\tpower_in_screen = np.sum(np.abs(screen)**2 * screen.grid.weights) / np.sum(screen.grid.weights)\n\tpower_in_psd = np.sum(psd(factory.input_grid) * factory.input_grid.weights / (2*np.pi)**pupil_grid.ndim)\n\n\tprint('Power in screen:', power_in_screen)\n\tprint('Power in PSD:', power_in_psd)\n\tprint('Ratio:', power_in_screen / power_in_psd)\n\tratios.append(power_in_screen / power_in_psd)\n\n\nplt.hist(ratios, bins=30)\nplt.show()\n#imshow_field(screen)\n#plt.show()\n"
] |
[
[
"numpy.argmax",
"numpy.mean",
"numpy.sum"
],
[
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tifuchs/silx
|
[
"4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7",
"035cb286dd46f3f0cb3f819a3cfb6ce253c9933b",
"4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7",
"035cb286dd46f3f0cb3f819a3cfb6ce253c9933b",
"035cb286dd46f3f0cb3f819a3cfb6ce253c9933b"
] |
[
"examples/fftPlotAction.py",
"src/silx/math/test/test_histogramnd_nominal.py",
"examples/customHdf5TreeModel.py",
"examples/plot3dContextMenu.py",
"src/silx/math/setup.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This script is a simple example of how to create a :class:`~silx.gui.plot.PlotWindow`\nwith a custom :class:`~silx.gui.plot.actions.PlotAction` added to the toolbar.\n\nThe action computes the FFT of all curves and plots their amplitude spectrum.\nIt also performs the reverse transform.\n\nThis example illustrates:\n - how to create a checkable action\n - how to store user info with a curve in a PlotWindow\n - how to modify the graph title and axes labels\n - how to add your own icon as a PNG file\n\nSee shiftPlotAction.py for a simpler example with more basic comments.\n\n\"\"\"\n__authors__ = [\"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"27/06/2017\"\n\nimport numpy\nimport os\nimport sys\n\nfrom silx.gui import qt\nfrom silx.gui.plot import PlotWindow\nfrom silx.gui.plot.actions import PlotAction\n\n# Custom icon\n# make sure there is a \"fft.png\" file saved in the same folder as this script\nscriptdir = os.path.dirname(os.path.realpath(__file__))\nmy_icon = os.path.join(scriptdir, \"fft.png\")\n\n\nclass FftAction(PlotAction):\n \"\"\"QAction performing a Fourier transform on all curves when checked,\n and reverse transform when unchecked.\n\n :param plot: PlotWindow on which to operate\n :param parent: See documentation of :class:`QAction`\n \"\"\"\n def __init__(self, plot, parent=None):\n PlotAction.__init__(\n self,\n plot,\n icon=qt.QIcon(my_icon),\n text='FFT',\n tooltip='Perform Fast Fourier Transform on all curves',\n triggered=self.fftAllCurves,\n checkable=True,\n parent=parent)\n\n def _rememberGraphLabels(self):\n \"\"\"Store labels and title as attributes\"\"\"\n self.original_title = self.plot.getGraphTitle()\n self.original_xlabel = self.plot.getXAxis().getLabel()\n self.original_ylabel = self.plot.getYAxis().getLabel()\n\n def fftAllCurves(self, checked=False):\n \"\"\"Get all curves from our PlotWindow, compute the amplitude spectrum\n using a Fast Fourier Transform, replace all curves with their\n amplitude spectra.\n\n When un-checking the button, do the reverse transform.\n\n :param checked: Boolean parameter signaling whether the action\n has been checked or unchecked.\n \"\"\"\n allCurves = self.plot.getAllCurves(withhidden=True)\n\n if checked:\n # remember original labels\n self._rememberGraphLabels()\n # change them\n self.plot.setGraphTitle(\"Amplitude spectrum\")\n self.plot.getXAxis().setLabel(\"Frequency\")\n self.plot.getYAxis().setLabel(\"Amplitude\")\n else:\n # restore original labels\n self.plot.setGraphTitle(self.original_title)\n self.plot.getXAxis().setLabel(self.original_xlabel)\n self.plot.getYAxis().setLabel(self.original_ylabel)\n\n self.plot.clearCurves()\n\n for curve in allCurves:\n x = curve.getXData()\n y = curve.getYData()\n legend = curve.getName()\n info = curve.getInfo()\n if info is None:\n info = {}\n\n if checked:\n # FAST FOURIER TRANSFORM\n fft_y = numpy.fft.fft(y)\n # amplitude spectrum\n A = numpy.abs(fft_y)\n\n # sampling frequency (samples per X unit)\n Fs = len(x) / (max(x) - min(x))\n # frequency array (abscissa of new curve)\n F = [k * Fs / len(x) for k in range(len(A))]\n\n # we need to store the complete transform (complex data) to be\n # able to perform the reverse transform.\n info[\"complex fft\"] = fft_y\n info[\"original x\"] = x\n\n # plot the amplitude spectrum\n self.plot.addCurve(F, A, legend=\"FFT of \" + legend,\n info=info)\n\n else:\n # INVERSE FFT\n fft_y = info[\"complex fft\"]\n # we keep only the real part because we know the imaginary\n # part is 0 (our original data was real numbers)\n y1 = numpy.real(numpy.fft.ifft(fft_y))\n\n # recover original info\n x1 = info[\"original x\"]\n legend1 = legend[7:] # remove \"FFT of \"\n\n # remove restored data from info dict\n for key in [\"complex fft\", \"original x\"]:\n del info[key]\n\n # plot the original data\n self.plot.addCurve(x1, y1, legend=legend1,\n info=info)\n\n self.plot.resetZoom()\n\n\napp = qt.QApplication([])\n\nsys.excepthook = qt.exceptionHandler\n\nplotwin = PlotWindow(control=True)\ntoolbar = qt.QToolBar(\"My toolbar\")\nplotwin.addToolBar(toolbar)\n\nmyaction = FftAction(plotwin)\ntoolbar.addAction(myaction)\n\n# x range: 0 -- 10 (1000 points)\nx = numpy.arange(1000) * 0.01\n\ntwopi = 2 * numpy.pi\n# Sum of sine functions with frequencies 3, 20 and 42 Hz\ny1 = numpy.sin(twopi * 3 * x) + 1.5 * numpy.sin(twopi * 20 * x) + 2 * numpy.sin(twopi * 42 * x)\n# Cosine with frequency 7 Hz and phase pi / 3\ny2 = numpy.cos(twopi * 7 * (x - numpy.pi / 3))\n# 5 periods of square wave, amplitude 2\ny3 = numpy.zeros_like(x)\nfor i in [0, 2, 4, 6, 8]:\n y3[i * len(x) // 10:(i + 1) * len(x) // 10] = 2\n\nplotwin.addCurve(x, y1, legend=\"sin\")\nplotwin.addCurve(x, y2, legend=\"cos\")\nplotwin.addCurve(x, y3, legend=\"square wave\")\n\nplotwin.setGraphTitle(\"Original data\")\nplotwin.getYAxis().setLabel(\"amplitude\")\nplotwin.getXAxis().setLabel(\"time\")\n\nplotwin.show()\napp.exec()\nsys.excepthook = sys.__excepthook__\n",
"# coding: utf-8\n# /*##########################################################################\n# Copyright (C) 2016-2021 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ############################################################################*/\n\"\"\"\nNominal tests of the histogramnd function.\n\"\"\"\n\nimport unittest\nimport pytest\n\nimport numpy as np\n\nfrom silx.math.chistogramnd import chistogramnd as histogramnd\nfrom silx.math import Histogramnd\n\n\ndef _get_bin_edges(histo_range, n_bins, n_dims):\n edges = []\n for i_dim in range(n_dims):\n edges.append(histo_range[i_dim, 0] +\n np.arange(n_bins[i_dim] + 1) *\n (histo_range[i_dim, 1] - histo_range[i_dim, 0]) /\n n_bins[i_dim])\n return tuple(edges)\n\n\n# ==============================================================\n# ==============================================================\n# ==============================================================\n\n\nclass _Test_chistogramnd_nominal(unittest.TestCase):\n \"\"\"\n Unit tests of the histogramnd function.\n \"\"\"\n __test__ = False # ignore abstract classe\n\n ndims = None\n\n def setUp(self):\n if type(self).__name__.startswith(\"_\"):\n self.skipTest(\"Abstract class\")\n ndims = self.ndims\n self.tested_dim = ndims-1\n\n if ndims is None:\n raise ValueError('ndims class member not set.')\n\n sample = np.array([5.5, -3.3,\n 0., -0.5,\n 3.3, 8.8,\n -7.7, 6.0,\n -4.0])\n\n weights = np.array([500.5, -300.3,\n 0.01, -0.5,\n 300.3, 800.8,\n -700.7, 600.6,\n -400.4])\n\n n_elems = len(sample)\n\n if ndims == 1:\n shape = (n_elems,)\n else:\n shape = (n_elems, ndims)\n\n self.sample = np.zeros(shape=shape, dtype=sample.dtype)\n if ndims == 1:\n self.sample = sample\n else:\n self.sample[..., ndims-1] = sample\n\n self.weights = weights\n\n # the tests are performed along one dimension,\n # all the other bins indices along the other dimensions\n # are expected to be 2\n # (e.g : when testing a 2D sample : [0, x] will go into\n # bin [2, y] because of the bin ranges [-2, 2] and n_bins = 4\n # for the first dimension)\n self.other_axes_index = 2\n self.histo_range = np.repeat([[-2., 2.]], ndims, axis=0)\n self.histo_range[ndims-1] = [-4., 6.]\n\n self.n_bins = np.array([4]*ndims)\n self.n_bins[ndims-1] = 5\n\n if ndims == 1:\n def fill_histo(h, v, dim, op=None):\n if op:\n h[:] = op(h[:], v)\n else:\n h[:] = v\n self.fill_histo = fill_histo\n else:\n def fill_histo(h, v, dim, op=None):\n idx = [self.other_axes_index]*len(h.shape)\n idx[dim] = slice(0, None)\n idx = tuple(idx)\n if op:\n h[idx] = op(h[idx], v)\n else:\n h[idx] = v\n self.fill_histo = fill_histo\n\n def test_nominal(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul, bin_edges = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n\n expected_edges = _get_bin_edges(self.histo_range,\n self.n_bins,\n self.ndims)\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertEqual(histo.dtype, np.uint32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n for i_edges, edges in enumerate(expected_edges):\n self.assertTrue(np.array_equal(bin_edges[i_edges],\n expected_edges[i_edges]),\n msg='Testing bin_edges for dim {0}'\n ''.format(i_edges+1))\n\n def test_nominal_wh_dtype(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.float32)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul, bin_edges = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights,\n wh_dtype=np.float32)\n\n self.assertEqual(cumul.dtype, np.float32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.allclose(cumul, expected_c))\n\n def test_nominal_uncontiguous_sample(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n shape = list(self.sample.shape)\n shape[0] *= 2\n sample = np.zeros(shape, dtype=self.sample.dtype)\n uncontig_sample = sample[::2, ...]\n uncontig_sample[:] = self.sample\n\n self.assertFalse(uncontig_sample.flags['C_CONTIGUOUS'],\n msg='Making sure the array is not contiguous.')\n\n histo, cumul, bin_edges = histogramnd(uncontig_sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertEqual(histo.dtype, np.uint32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_nominal_uncontiguous_weights(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n shape = list(self.weights.shape)\n shape[0] *= 2\n weights = np.zeros(shape, dtype=self.weights.dtype)\n uncontig_weights = weights[::2, ...]\n uncontig_weights[:] = self.weights\n\n self.assertFalse(uncontig_weights.flags['C_CONTIGUOUS'],\n msg='Making sure the array is not contiguous.')\n\n histo, cumul, bin_edges = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=uncontig_weights)\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertEqual(histo.dtype, np.uint32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_nominal_wo_weights(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=None)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(cumul is None)\n\n def test_nominal_wo_weights_w_cumul(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n\n # creating an array of ones just to make sure that\n # it is not cleared by histogramnd\n cumul_in = np.ones(self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=None,\n weighted_histo=cumul_in)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(cumul is None)\n self.assertTrue(np.array_equal(cumul_in,\n np.ones(shape=self.n_bins,\n dtype=np.double)))\n\n def test_nominal_wo_weights_w_histo(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n\n # creating an array of ones just to make sure that\n # it is not cleared by histogramnd\n histo_in = np.ones(self.n_bins, dtype=np.uint32)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=None,\n histo=histo_in)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h + 1))\n self.assertTrue(cumul is None)\n self.assertEqual(id(histo), id(histo_in))\n\n def test_nominal_last_bin_closed(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 2])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 1101.1])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights,\n last_bin_closed=True)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_int32_weights_double_weights_range(self):\n \"\"\"\n \"\"\"\n weight_min = -299.9 # ===> will be cast to -299\n weight_max = 499.9 # ===> will be cast to 499\n\n expected_h_tpl = np.array([0, 1, 1, 1, 0])\n expected_c_tpl = np.array([0., 0., 0., 300., 0.])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights.astype(np.int32),\n weight_min=weight_min,\n weight_max=weight_max)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_reuse_histo(self):\n \"\"\"\n \"\"\"\n\n expected_h_tpl = np.array([2, 3, 2, 2, 2])\n expected_c_tpl = np.array([0.0, -7007, -5.0, 0.1, 3003.0])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)[0:2]\n\n sample_2 = self.sample[:]\n if len(sample_2.shape) == 1:\n idx = (slice(0, None),)\n else:\n idx = slice(0, None), self.tested_dim\n\n sample_2[idx] += 2\n\n histo_2, cumul = histogramnd(sample_2, # <==== !!\n self.histo_range,\n self.n_bins,\n weights=10 * self.weights, # <==== !!\n histo=histo)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n self.assertEqual(id(histo), id(histo_2))\n\n def test_reuse_cumul(self):\n \"\"\"\n \"\"\"\n\n expected_h_tpl = np.array([0, 2, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -7007.5, -4.99, 300.4, 3503.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)[0:2]\n\n sample_2 = self.sample[:]\n if len(sample_2.shape) == 1:\n idx = (slice(0, None),)\n else:\n idx = slice(0, None), self.tested_dim\n\n sample_2[idx] += 2\n\n histo, cumul_2 = histogramnd(sample_2, # <==== !!\n self.histo_range,\n self.n_bins,\n weights=10 * self.weights, # <==== !!\n weighted_histo=cumul)[0:2]\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))\n self.assertEqual(id(cumul), id(cumul_2))\n\n def test_reuse_cumul_float(self):\n \"\"\"\n \"\"\"\n\n expected_h_tpl = np.array([0, 2, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -7007.5, -4.99, 300.4, 3503.5],\n dtype=np.float32)\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul = histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)[0:2]\n\n # converting the cumul array to float\n cumul = cumul.astype(np.float32)\n\n sample_2 = self.sample[:]\n if len(sample_2.shape) == 1:\n idx = (slice(0, None),)\n else:\n idx = slice(0, None), self.tested_dim\n\n sample_2[idx] += 2\n\n histo, cumul_2 = histogramnd(sample_2, # <==== !!\n self.histo_range,\n self.n_bins,\n weights=10 * self.weights, # <==== !!\n weighted_histo=cumul)[0:2]\n\n self.assertEqual(cumul.dtype, np.float32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertEqual(id(cumul), id(cumul_2))\n self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))\n\nclass _Test_Histogramnd_nominal(unittest.TestCase):\n \"\"\"\n Unit tests of the Histogramnd class.\n \"\"\"\n __test__ = False # ignore abstract class\n\n ndims = None\n\n def setUp(self):\n ndims = self.ndims\n if ndims is None:\n self.skipTest(\"Abstract class\")\n self.tested_dim = ndims-1\n\n if ndims is None:\n raise ValueError('ndims class member not set.')\n\n sample = np.array([5.5, -3.3,\n 0., -0.5,\n 3.3, 8.8,\n -7.7, 6.0,\n -4.0])\n\n weights = np.array([500.5, -300.3,\n 0.01, -0.5,\n 300.3, 800.8,\n -700.7, 600.6,\n -400.4])\n\n n_elems = len(sample)\n\n if ndims == 1:\n shape = (n_elems,)\n else:\n shape = (n_elems, ndims)\n\n self.sample = np.zeros(shape=shape, dtype=sample.dtype)\n if ndims == 1:\n self.sample = sample\n else:\n self.sample[..., ndims-1] = sample\n\n self.weights = weights\n\n # the tests are performed along one dimension,\n # all the other bins indices along the other dimensions\n # are expected to be 2\n # (e.g : when testing a 2D sample : [0, x] will go into\n # bin [2, y] because of the bin ranges [-2, 2] and n_bins = 4\n # for the first dimension)\n self.other_axes_index = 2\n self.histo_range = np.repeat([[-2., 2.]], ndims, axis=0)\n self.histo_range[ndims-1] = [-4., 6.]\n\n self.n_bins = np.array([4]*ndims)\n self.n_bins[ndims-1] = 5\n\n if ndims == 1:\n def fill_histo(h, v, dim, op=None):\n if op:\n h[:] = op(h[:], v)\n else:\n h[:] = v\n self.fill_histo = fill_histo\n else:\n def fill_histo(h, v, dim, op=None):\n idx = [self.other_axes_index]*len(h.shape)\n idx[dim] = slice(0, None)\n idx = tuple(idx)\n if op:\n h[idx] = op(h[idx], v)\n else:\n h[idx] = v\n self.fill_histo = fill_histo\n\n def test_nominal(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n \n histo, cumul, bin_edges = histo\n\n expected_edges = _get_bin_edges(self.histo_range,\n self.n_bins,\n self.ndims)\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertEqual(histo.dtype, np.uint32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n for i_edges, edges in enumerate(expected_edges):\n self.assertTrue(np.array_equal(bin_edges[i_edges],\n expected_edges[i_edges]),\n msg='Testing bin_edges for dim {0}'\n ''.format(i_edges+1))\n\n def test_nominal_wh_dtype(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.float32)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul, bin_edges = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights,\n wh_dtype=np.float32)\n\n self.assertEqual(cumul.dtype, np.float32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.allclose(cumul, expected_c))\n\n def test_nominal_uncontiguous_sample(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n shape = list(self.sample.shape)\n shape[0] *= 2\n sample = np.zeros(shape, dtype=self.sample.dtype)\n uncontig_sample = sample[::2, ...]\n uncontig_sample[:] = self.sample\n\n self.assertFalse(uncontig_sample.flags['C_CONTIGUOUS'],\n msg='Making sure the array is not contiguous.')\n\n histo, cumul, bin_edges = Histogramnd(uncontig_sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertEqual(histo.dtype, np.uint32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_nominal_uncontiguous_weights(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n shape = list(self.weights.shape)\n shape[0] *= 2\n weights = np.zeros(shape, dtype=self.weights.dtype)\n uncontig_weights = weights[::2, ...]\n uncontig_weights[:] = self.weights\n\n self.assertFalse(uncontig_weights.flags['C_CONTIGUOUS'],\n msg='Making sure the array is not contiguous.')\n\n histo, cumul, bin_edges = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=uncontig_weights)\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertEqual(histo.dtype, np.uint32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_nominal_wo_weights(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n\n histo, cumul = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=None)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(cumul is None)\n\n def test_nominal_last_bin_closed(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 2])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 1101.1])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights,\n last_bin_closed=True)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_int32_weights_double_weights_range(self):\n \"\"\"\n \"\"\"\n weight_min = -299.9 # ===> will be cast to -299\n weight_max = 499.9 # ===> will be cast to 499\n\n expected_h_tpl = np.array([0, 1, 1, 1, 0])\n expected_c_tpl = np.array([0., 0., 0., 300., 0.])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo, cumul = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights.astype(np.int32),\n weight_min=weight_min,\n weight_max=weight_max)[0:2]\n\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def test_nominal_no_sample(self):\n \"\"\"\n \"\"\"\n\n histo_inst = Histogramnd(None,\n self.histo_range,\n self.n_bins)\n\n histo, weighted_histo, edges = histo_inst\n\n self.assertIsNone(histo)\n self.assertIsNone(weighted_histo)\n self.assertIsNone(edges)\n self.assertIsNone(histo_inst.histo)\n self.assertIsNone(histo_inst.weighted_histo)\n self.assertIsNone(histo_inst.edges)\n\n def test_empty_init_accumulate(self):\n \"\"\"\n \"\"\"\n expected_h_tpl = np.array([2, 1, 1, 1, 1])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo_inst = Histogramnd(None,\n self.histo_range,\n self.n_bins)\n\n histo_inst.accumulate(self.sample,\n weights=self.weights)\n\n histo = histo_inst.histo\n cumul = histo_inst.weighted_histo\n bin_edges = histo_inst.edges\n\n expected_edges = _get_bin_edges(self.histo_range,\n self.n_bins,\n self.ndims)\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertEqual(histo.dtype, np.uint32)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n for i_edges, edges in enumerate(expected_edges):\n self.assertTrue(np.array_equal(bin_edges[i_edges],\n expected_edges[i_edges]),\n msg='Testing bin_edges for dim {0}'\n ''.format(i_edges+1))\n\n def test_accumulate(self):\n \"\"\"\n \"\"\"\n\n expected_h_tpl = np.array([2, 3, 2, 2, 2])\n expected_c_tpl = np.array([-700.7, -7007.5, -4.99, 300.4, 3503.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo_inst = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n\n sample_2 = self.sample[:]\n if len(sample_2.shape) == 1:\n idx = (slice(0, None),)\n else:\n idx = slice(0, None), self.tested_dim\n\n sample_2[idx] += 2\n\n histo_inst.accumulate(sample_2, # <==== !!\n weights=10 * self.weights) # <==== !!\n\n histo = histo_inst.histo\n cumul = histo_inst.weighted_histo\n bin_edges = histo_inst.edges\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))\n\n def test_accumulate_no_weights(self):\n \"\"\"\n \"\"\"\n\n expected_h_tpl = np.array([2, 3, 2, 2, 2])\n expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo_inst = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n\n sample_2 = self.sample[:]\n if len(sample_2.shape) == 1:\n idx = (slice(0, None),)\n else:\n idx = slice(0, None), self.tested_dim\n\n sample_2[idx] += 2\n\n histo_inst.accumulate(sample_2) # <==== !!\n\n histo = histo_inst.histo\n cumul = histo_inst.weighted_histo\n bin_edges = histo_inst.edges\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.allclose(cumul, expected_c, rtol=10e-15))\n\n def test_accumulate_no_weights_at_init(self):\n \"\"\"\n \"\"\"\n\n expected_h_tpl = np.array([2, 3, 2, 2, 2])\n expected_c_tpl = np.array([0.0, -700.7, -0.5, 0.01, 300.3])\n\n expected_h = np.zeros(shape=self.n_bins, dtype=np.double)\n expected_c = np.zeros(shape=self.n_bins, dtype=np.double)\n\n self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)\n self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)\n\n histo_inst = Histogramnd(self.sample,\n self.histo_range,\n self.n_bins,\n weights=None) # <==== !!\n\n cumul = histo_inst.weighted_histo\n self.assertIsNone(cumul)\n\n sample_2 = self.sample[:]\n if len(sample_2.shape) == 1:\n idx = (slice(0, None),)\n else:\n idx = slice(0, None), self.tested_dim\n\n sample_2[idx] += 2\n\n histo_inst.accumulate(sample_2,\n weights=self.weights) # <==== !!\n\n histo = histo_inst.histo\n cumul = histo_inst.weighted_histo\n bin_edges = histo_inst.edges\n\n self.assertEqual(cumul.dtype, np.float64)\n self.assertTrue(np.array_equal(histo, expected_h))\n self.assertTrue(np.array_equal(cumul, expected_c))\n\n def testNoneNativeTypes(self):\n type = self.sample.dtype.newbyteorder(\"B\")\n sampleB = self.sample.astype(type)\n\n type = self.sample.dtype.newbyteorder(\"L\")\n sampleL = self.sample.astype(type)\n\n histo_inst = Histogramnd(sampleB,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n\n histo_inst = Histogramnd(sampleL,\n self.histo_range,\n self.n_bins,\n weights=self.weights)\n\n\nclass Test_chistogram_nominal_1d(_Test_chistogramnd_nominal):\n __test__ = True # because _Test_chistogramnd_nominal is ignored\n ndims = 1\n\n\nclass Test_chistogram_nominal_2d(_Test_chistogramnd_nominal):\n __test__ = True # because _Test_chistogramnd_nominal is ignored\n ndims = 2\n\n\nclass Test_chistogram_nominal_3d(_Test_chistogramnd_nominal):\n __test__ = True # because _Test_chistogramnd_nominal is ignored\n ndims = 3\n\n\nclass Test_Histogramnd_nominal_1d(_Test_Histogramnd_nominal):\n __test__ = True # because _Test_chistogramnd_nominal is ignored\n ndims = 1\n\n\nclass Test_Histogramnd_nominal_2d(_Test_Histogramnd_nominal):\n __test__ = True # because _Test_chistogramnd_nominal is ignored\n ndims = 2\n\n\nclass Test_Histogramnd_nominal_3d(_Test_Histogramnd_nominal):\n __test__ = True # because _Test_chistogramnd_nominal is ignored\n ndims = 3\n",
"#!/usr/bin/env python\n# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"Qt Hdf5 widget examples\n\"\"\"\n\nimport logging\nimport sys\nimport tempfile\nimport numpy\nimport h5py\n\nlogging.basicConfig()\n_logger = logging.getLogger(\"customHdf5TreeModel\")\n\"\"\"Module logger\"\"\"\n\nfrom silx.gui import qt\nimport silx.gui.hdf5\nfrom silx.gui.data.DataViewerFrame import DataViewerFrame\nfrom silx.gui.widgets.ThreadPoolPushButton import ThreadPoolPushButton\nfrom silx.gui.hdf5.Hdf5TreeModel import Hdf5TreeModel\n\n\nclass CustomTooltips(qt.QIdentityProxyModel):\n \"\"\"Custom the tooltip of the model by composition.\n\n It is a very stable way to custom it cause it uses the Qt API. Then it will\n not change according to the version of Silx.\n\n But it is not well integrated if you only want to add custom fields to the\n default tooltips.\n \"\"\"\n\n def data(self, index, role=qt.Qt.DisplayRole):\n if role == qt.Qt.ToolTipRole:\n\n # Reach information from the node\n sourceIndex = self.mapToSource(index)\n sourceModel = self.sourceModel()\n originalTooltip = sourceModel.data(sourceIndex, qt.Qt.ToolTipRole)\n originalH5pyObject = sourceModel.data(sourceIndex, Hdf5TreeModel.H5PY_OBJECT_ROLE)\n\n # We can filter according to the column\n if sourceIndex.column() == Hdf5TreeModel.TYPE_COLUMN:\n return super(CustomTooltips, self).data(index, role)\n\n # Let's create our own tooltips\n template = u\"\"\"<html>\n <dl>\n <dt><b>Original</b></dt><dd>{original}</dd>\n <dt><b>Parent name</b></dt><dd>{parent_name}</dd>\n <dt><b>Name</b></dt><dd>{name}</dd>\n <dt><b>Power of 2</b></dt><dd>{pow_of_2}</dd>\n </dl>\n </html>\n \"\"\"\n\n try:\n data = originalH5pyObject[()]\n if data.size <= 10:\n result = data ** 2\n else:\n result = \"...\"\n except Exception:\n result = \"NA\"\n\n info = dict(\n original=originalTooltip,\n parent_name=originalH5pyObject.parent.name,\n name=originalH5pyObject.name,\n pow_of_2=str(result)\n )\n return template.format(**info)\n\n return super(CustomTooltips, self).data(index, role)\n\n\n_file_cache = {}\n\n\ndef get_hdf5_with_all_types():\n ID = \"alltypes\"\n if ID in _file_cache:\n return _file_cache[ID].name\n\n tmp = tempfile.NamedTemporaryFile(prefix=ID + \"_\", suffix=\".h5\", delete=True)\n tmp.file.close()\n h5 = h5py.File(tmp.name, \"w\")\n\n g = h5.create_group(\"arrays\")\n g.create_dataset(\"scalar\", data=10)\n g.create_dataset(\"list\", data=numpy.arange(10))\n base_image = numpy.arange(10**2).reshape(10, 10)\n images = [base_image,\n base_image.T,\n base_image.size - 1 - base_image,\n base_image.size - 1 - base_image.T]\n dtype = images[0].dtype\n data = numpy.empty((10 * 10, 10, 10), dtype=dtype)\n for i in range(10 * 10):\n data[i] = images[i % 4]\n data.shape = 10, 10, 10, 10\n g.create_dataset(\"image\", data=data[0, 0])\n g.create_dataset(\"cube\", data=data[0])\n g.create_dataset(\"hypercube\", data=data)\n g = h5.create_group(\"dtypes\")\n g.create_dataset(\"int32\", data=numpy.int32(10))\n g.create_dataset(\"int64\", data=numpy.int64(10))\n g.create_dataset(\"float32\", data=numpy.float32(10))\n g.create_dataset(\"float64\", data=numpy.float64(10))\n g.create_dataset(\"string_\", data=numpy.string_(\"Hi!\"))\n # g.create_dataset(\"string0\",data=numpy.string0(\"Hi!\\x00\"))\n\n g.create_dataset(\"bool\", data=True)\n g.create_dataset(\"bool2\", data=False)\n h5.close()\n\n _file_cache[ID] = tmp\n return tmp.name\n\n\nclass Hdf5TreeViewExample(qt.QMainWindow):\n \"\"\"\n This window show an example of use of a Hdf5TreeView.\n\n The tree is initialized with a list of filenames. A panel allow to play\n with internal property configuration of the widget, and a text screen\n allow to display events.\n \"\"\"\n\n def __init__(self, filenames=None):\n \"\"\"\n :param files_: List of HDF5 or Spec files (pathes or\n :class:`silx.io.spech5.SpecH5` or :class:`h5py.File`\n instances)\n \"\"\"\n qt.QMainWindow.__init__(self)\n self.setWindowTitle(\"Silx HDF5 widget example\")\n\n self.__asyncload = False\n self.__treeview = silx.gui.hdf5.Hdf5TreeView(self)\n \"\"\"Silx HDF5 TreeView\"\"\"\n\n self.__sourceModel = self.__treeview.model()\n \"\"\"Store the source model\"\"\"\n\n self.__text = qt.QTextEdit(self)\n \"\"\"Widget displaying information\"\"\"\n\n self.__dataViewer = DataViewerFrame(self)\n vSpliter = qt.QSplitter(qt.Qt.Vertical)\n vSpliter.addWidget(self.__dataViewer)\n vSpliter.addWidget(self.__text)\n vSpliter.setSizes([10, 0])\n\n spliter = qt.QSplitter(self)\n spliter.addWidget(self.__treeview)\n spliter.addWidget(vSpliter)\n spliter.setStretchFactor(1, 1)\n\n main_panel = qt.QWidget(self)\n layout = qt.QVBoxLayout()\n layout.addWidget(spliter)\n layout.addWidget(self.createTreeViewConfigurationPanel(self, self.__treeview))\n layout.setStretchFactor(spliter, 1)\n main_panel.setLayout(layout)\n\n self.setCentralWidget(main_panel)\n\n # append all files to the tree\n for file_name in filenames:\n self.__treeview.findHdf5TreeModel().appendFile(file_name)\n\n self.__treeview.activated.connect(self.displayData)\n\n def displayData(self):\n \"\"\"Called to update the dataviewer with the selected data.\n \"\"\"\n selected = list(self.__treeview.selectedH5Nodes())\n if len(selected) == 1:\n # Update the viewer for a single selection\n data = selected[0]\n # data is a hdf5.H5Node object\n # data.h5py_object is a Group/Dataset object (from h5py, spech5, fabioh5)\n # The dataviewer can display both\n self.__dataViewer.setData(data)\n\n def __fileCreated(self, filename):\n if self.__asyncload:\n self.__treeview.findHdf5TreeModel().insertFileAsync(filename)\n else:\n self.__treeview.findHdf5TreeModel().insertFile(filename)\n\n def __hdf5ComboChanged(self, index):\n function = self.__hdf5Combo.itemData(index)\n self.__createHdf5Button.setCallable(function)\n\n def __edfComboChanged(self, index):\n function = self.__edfCombo.itemData(index)\n self.__createEdfButton.setCallable(function)\n\n def __useCustomLabel(self):\n customModel = CustomTooltips(self.__treeview)\n customModel.setSourceModel(self.__sourceModel)\n self.__treeview.setModel(customModel)\n\n def __useOriginalModel(self):\n self.__treeview.setModel(self.__sourceModel)\n\n def createTreeViewConfigurationPanel(self, parent, treeview):\n \"\"\"Create a configuration panel to allow to play with widget states\"\"\"\n panel = qt.QWidget(parent)\n panel.setLayout(qt.QHBoxLayout())\n\n content = qt.QGroupBox(\"Create HDF5\", panel)\n content.setLayout(qt.QVBoxLayout())\n panel.layout().addWidget(content)\n\n combo = qt.QComboBox()\n combo.addItem(\"Containing all types\", get_hdf5_with_all_types)\n combo.activated.connect(self.__hdf5ComboChanged)\n content.layout().addWidget(combo)\n\n button = ThreadPoolPushButton(content, text=\"Create\")\n button.setCallable(combo.itemData(combo.currentIndex()))\n button.succeeded.connect(self.__fileCreated)\n content.layout().addWidget(button)\n\n self.__hdf5Combo = combo\n self.__createHdf5Button = button\n\n content.layout().addStretch(1)\n\n option = qt.QGroupBox(\"Custom model\", panel)\n option.setLayout(qt.QVBoxLayout())\n panel.layout().addWidget(option)\n\n button = qt.QPushButton(\"Original model\")\n button.clicked.connect(self.__useOriginalModel)\n option.layout().addWidget(button)\n\n button = qt.QPushButton(\"Custom tooltips by composition\")\n button.clicked.connect(self.__useCustomLabel)\n option.layout().addWidget(button)\n\n option.layout().addStretch(1)\n\n panel.layout().addStretch(1)\n return panel\n\n\ndef main(filenames):\n \"\"\"\n :param filenames: list of file paths\n \"\"\"\n app = qt.QApplication([])\n sys.excepthook = qt.exceptionHandler\n window = Hdf5TreeViewExample(filenames)\n window.show()\n result = app.exec()\n # remove ending warnings relative to QTimer\n app.deleteLater()\n sys.exit(result)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n",
"# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2021 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"\nThis script adds a context menu to a :class:`silx.gui.plot3d.ScalarFieldView`.\n\nThis is done by adding a custom context menu to the :class:`Plot3DWidget`:\n\n- set the context menu policy to Qt.CustomContextMenu.\n- connect to the customContextMenuRequested signal.\n\nFor more information on context menus, see Qt documentation.\n\"\"\"\n\nfrom __future__ import absolute_import, division, unicode_literals\n\n__authors__ = [\"T. Vincent\"]\n__license__ = \"MIT\"\n__date__ = \"03/10/2017\"\n\n\nimport logging\n\nimport numpy\n\nfrom silx.gui import qt\n\nfrom silx.gui.plot3d.ScalarFieldView import ScalarFieldView\nfrom silx.gui.plot3d import actions\n\nlogging.basicConfig()\n\n_logger = logging.getLogger(__name__)\n\n\nclass ScalarFieldViewWithContextMenu(ScalarFieldView):\n \"\"\"Subclass ScalarFieldView to add a custom context menu to its 3D area.\"\"\"\n\n def __init__(self, parent=None):\n super(ScalarFieldViewWithContextMenu, self).__init__(parent)\n self.setWindowTitle(\"Right-click to open the context menu\")\n\n # Set Plot3DWidget custom context menu\n self.getPlot3DWidget().setContextMenuPolicy(qt.Qt.CustomContextMenu)\n self.getPlot3DWidget().customContextMenuRequested.connect(\n self._contextMenu)\n\n def _contextMenu(self, pos):\n \"\"\"Handle plot area customContextMenuRequested signal.\n\n :param QPoint pos: Mouse position relative to plot area\n \"\"\"\n # Create the context menu\n menu = qt.QMenu(self)\n menu.addAction(actions.mode.PanAction(\n parent=menu, plot3d=self.getPlot3DWidget()))\n menu.addAction(actions.mode.RotateArcballAction(\n parent=menu, plot3d=self.getPlot3DWidget()))\n menu.addSeparator()\n menu.addAction(actions.io.CopyAction(\n parent=menu, plot3d=self.getPlot3DWidget()))\n\n # Displaying the context menu at the mouse position requires\n # a global position.\n # The position received as argument is relative to Plot3DWidget\n # and needs to be converted.\n globalPosition = self.getPlot3DWidget().mapToGlobal(pos)\n menu.exec(globalPosition)\n\n\n# Start Qt QApplication\napp = qt.QApplication([])\n\n# Create the viewer main window\nwindow = ScalarFieldViewWithContextMenu()\n\n# Create dummy data\ncoords = numpy.linspace(-10, 10, 64)\nz = coords.reshape(-1, 1, 1)\ny = coords.reshape(1, -1, 1)\nx = coords.reshape(1, 1, -1)\ndata = numpy.sin(x * y * z) / (x * y * z)\n\n# Set ScalarFieldView data\nwindow.setData(data)\n\n# Add an iso-surface\nwindow.addIsosurface(0.2, '#FF0000FF')\n\nwindow.show()\napp.exec()\n",
"# coding: utf-8\n# /*##########################################################################\n# Copyright (C) 2016-2021 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ############################################################################*/\n\n__authors__ = [\"D. Naudet\"]\n__license__ = \"MIT\"\n__date__ = \"27/03/2017\"\n\nimport os.path\n\nimport numpy\n\nfrom numpy.distutils.misc_util import Configuration\n\n\ndef configuration(parent_package='', top_path=None):\n config = Configuration('math', parent_package, top_path)\n config.add_subpackage('test')\n config.add_subpackage('fit')\n config.add_subpackage('medianfilter')\n config.add_subpackage('fft')\n\n # =====================================\n # histogramnd\n # =====================================\n histo_src = [os.path.join('histogramnd', 'src', 'histogramnd_c.c'),\n 'chistogramnd.pyx']\n histo_inc = [os.path.join('histogramnd', 'include'),\n numpy.get_include()]\n\n config.add_extension('chistogramnd',\n sources=histo_src,\n include_dirs=histo_inc,\n language='c')\n\n # =====================================\n # histogramnd_lut\n # =====================================\n config.add_extension('chistogramnd_lut',\n sources=['chistogramnd_lut.pyx'],\n include_dirs=histo_inc,\n language='c')\n # =====================================\n # marching cubes\n # =====================================\n mc_src = [os.path.join('marchingcubes', 'mc_lut.cpp'),\n 'marchingcubes.pyx']\n config.add_extension('marchingcubes',\n sources=mc_src,\n include_dirs=['marchingcubes', numpy.get_include()],\n language='c++')\n\n # min/max\n config.add_extension('combo',\n sources=['combo.pyx'],\n include_dirs=['include'],\n language='c')\n\n config.add_extension('_colormap',\n sources=[\"_colormap.pyx\"],\n language='c',\n include_dirs=['include', numpy.get_include()],\n extra_link_args=['-fopenmp'],\n extra_compile_args=['-fopenmp'])\n\n config.add_extension('interpolate',\n sources=[\"interpolate.pyx\"],\n language='c',\n include_dirs=['include', numpy.get_include()],\n extra_link_args=['-fopenmp'],\n extra_compile_args=['-fopenmp'])\n\n return config\n\n\nif __name__ == \"__main__\":\n from numpy.distutils.core import setup\n\n setup(configuration=configuration)\n"
] |
[
[
"numpy.abs",
"numpy.fft.fft",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.fft.ifft",
"numpy.zeros_like"
],
[
"numpy.allclose",
"numpy.array_equal",
"numpy.arange",
"numpy.ones",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
],
[
"numpy.string_",
"numpy.arange",
"numpy.int32",
"numpy.int64",
"numpy.float64",
"numpy.float32",
"numpy.empty"
],
[
"numpy.linspace",
"numpy.sin"
],
[
"numpy.get_include",
"numpy.distutils.misc_util.Configuration",
"numpy.distutils.core.setup"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EndyLab/spaceballs
|
[
"331ce388674a4b01b56b36dfb3dda26729b107e6",
"331ce388674a4b01b56b36dfb3dda26729b107e6"
] |
[
"simanalysis_methods.py",
"simanalysis.py"
] |
[
"\"\"\"\nCreated by Akshay Maheshwari\n09/05/2017\n\nSimulation analysis methods for 2D hard disk systems\n\"\"\"\n\ndef loadOutputList(expt_name,outputType):\n \"\"\"Loads a file containing all file names with a specified output data into a local list\n \n Args:\n expt_name (String): Name of experiment (which contains how many ever simulation output files)\n outputType (String): molpos or simtime. Determines which set of output filenames to load.\n \n Returns:\n TYPE: List\n \"\"\"\n\n datalist = []\n if(outputType=='molpos'):\n path='data/'+expt_name+'/outputMolposList.txt'\n elif(outputType=='simtime'):\n path='data/'+expt_name+'/outputSimtimeList.txt'\n else:\n raise Exception('outputType required to be either \\'molpos\\' or \\'simtime\\'') \n with open(path) as f:\n for outputfile in f:\n datalist.append(\"data/\"+expt_name+\"/\"+outputfile.rstrip())\n return datalist \n\ndef molpos_1Dbin(data,bins,diameter):\n \"\"\"Creates a 1D histogram from X,Y location data of a single tracked molecule over time\n \n Args:\n data (pandas dataframe): time series 2D location data of a tracked molecule\n bins (int): # of rectangular bins to discretize cell with (bins are equidistant on x-axis)\n diameter (float): diameter of simulated cell\n \n Returns:\n TYPE: List\n \"\"\"\n import numpy as np;\n pos=np.linspace(-diameter/2,diameter/2,bins+1)\n\n speciesNum = int(data.shape[1]/2)\n data_hist1D_total = np.zeros(bins)\n print(speciesNum)\n for i in range(speciesNum):\n data_hist1D = np.histogram(data.loc[:,1],bins=pos)[0]\n data_hist1D_total += data_hist1D\n return data_hist1D_total\n #for chunk in pd.read_csv(datapath,header=None,chunksize=10**6): #Chunk size 10^6 runtime: 72.68, chunk size 10^7 runtime: 72.29\n #data_hist += np.histogram(chunk.loc[:,1],bins=pos)[0]\n\ndef molpos_2Dbin(data, bins, diameter):\n \"\"\"Creates a 2D histogram from X,Y location data of a single tracked molecule over time\n\n Args:\n data (pandas dataframe): time series 2D location data of a tracked molecule\n bins (int): # of bins to discretize cell with (bins x bins on x and y)\n diameter (float): diameter of simulated cell\n \n Returns:\n TYPE: List\n \"\"\"\n\n import numpy as np;\n pos_x = np.linspace(-diameter/2,diameter/2,bins+1)\n pos_y = np.linspace(-diameter/2,diameter/2,bins+1)\n #for chunk in pd.read_csv(datapath,header=None,chunksize=10**6):\n speciesNum = int(data.shape[1]/2)\n data_hist2D_total = np.zeros((bins,bins))\n for i in range(speciesNum):\n data_hist2D = np.histogram2d(np.array(data.loc[:,1]),np.array(data.loc[:,2]),bins=[pos_x,pos_y])[0]\n data_hist2D=np.flip(data_hist2D.T,0)\n data_hist2D_total += data_hist2D\n return data_hist2D_total\n\ndef molpos2D_dispersion(data,diameter):\n \"\"\"Computes the total variation distance between the X,Y distribution of a tracked molecule over time &\n the uniform distribution (expected over time in a dilute system). Bins is hardcoded to 10x10 to account \n for circle edges in 2D array (need to manually remove edges of 10x10 grid that don't fall in cell circle).\n\n Args:\n data (pandas dataframe): time series 2D location data of a tracked molecule\n diameter (float): diameter of simulated cell\n \n Returns:[0,1]. 0 means distribution is uniform. 1 is approached as tracked molecule stays in one place.\n TYPE: float \n \"\"\"\n\n import numpy as np;\n bins=10\n circle_bins = 88 # of bins in a 10x10 grid that a circle would fall into.\n data_hist2D=molpos_2Dbin(data,bins=bins,diameter=diameter)\n normalized_data=data_hist2D/sum(sum(data_hist2D))\n newdata = np.abs(normalized_data-(sum(sum(normalized_data)))/circle_bins)\n return 0.5*(sum(newdata[0][2:-2])+sum(newdata[-1][2:-2])+sum(newdata[2][1:-1])+sum(newdata[-2][1:-1])+sum(sum(newdata[2:-2][:])))\n #return np.sqrt(sum(data.std(0)**2))\n #return sum(np.abs(data.skew()))\n\ndef covertime(datapath,diameter,molposTS):\n \"\"\"Finds the earliest time point at which every 2D bin in the 2D area of the cell has been traversed\n by the tracked molecule. Bins is hardcoded to 10x10 to account for circle edges in 2D array (need to\n manually remove edges of 10x10 grid that don't fall in cell circle).\n\n Args:\n datapath (String): File path to datafile\n diameter (float): diameter of simulated cell \n molposTS (float): the data sampling rate (independent from the underlying simulation time step) \n \n Returns:[0,molposTS*total timesteps].\n TYPE: float \n \"\"\"\n\n import numpy as np;\n import pandas as pd;\n\n bins=10\n data_hist = np.zeros((bins, bins))\n pos_x = np.linspace(-diameter/2,diameter/2,bins+1)\n pos_y = np.linspace(-diameter/2,diameter/2,bins+1)\n coverTime = 10\n foundCoverTime = False;\n timesteps=0;\n chunksize=10**3\n for chunk in pd.read_csv(datapath,header=None,chunksize=chunksize):\n timesteps+=1;\n data_hist+=molpos_2Dbin(chunk,bins=bins,diameter=diameter) #np.histogram2d(np.array(chunk.loc[:,1]),np.array(chunk.loc[:,2]),bins=[pos_x,pos_y])[0]\n if not 0 in data_hist[:,2:-2] and not 0 in data_hist[1:-1,1:-1] and not 0 in data_hist[2:-2,:] and not foundCoverTime:\n coverTime = timesteps\n foundCoverTime = True\n return (molposTS*chunksize)*coverTime\n return coverTime\n\ndef timeplot(ax, expt_name,step=1,scalefactor=1,logscale=False,start=0):\n \"\"\"Creates a plot of runtime (y-axis) vs. whatever dependent variable is being experimentally swept (x-axis).\n \n Args:\n ax (TYPE): figure object to plot upon\n expt_name (String): Name of experiment file\n step (int, optional): Step size between sweep of dependent variable\n scalefactor (int, optional): Used to scale time data\n logscale (bool, optional): True if want plot to have a x logscale\n start (int, optional): 1st value of x-axis to begin plot with\n \"\"\"\n import warnings\n import numpy as np\n\n simdata = []\n outputlist= loadOutputList(expt_name,'simtime')[:] #[a:b] = a to b sub-range of experiments that need to be plotted.\n\n for outputfile in outputlist:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n simdata.append(np.loadtxt(open(outputfile), delimiter=\",\")/scalefactor)\n simdata = np.array(simdata)\n runtimes = simdata[:,1]\n simtime = simdata[-1,0]\n print(\"plotting \", runtimes)\n \n if(logscale):\n num_varx = np.logspace(start,start+len(runtimes)-1,num=len(runtimes))\n ax.set_xscale(\"log\")\n else:\n num_varx = np.arange(start,start+len(runtimes)*step,step)\n\n ax.plot(num_varx,runtimes,'-o')\n ax.set_xlim(0)\n\n #Fits a quadratic curve to time data (note in order for poly1d to fit, need at least 2 data points for runtime):\n #ax.plot(np.unique(num_varx), np.poly1d(np.polyfit(num_varx, runtimes, 2))(np.unique(num_varx)))\n\ndef saveHist(outputlist,expt_name, bins, diameter,molposTS):\n \"\"\"Creates a pkl dump of a list of each subexperiment filename with its associated data_hist1D, data_hist2D, covertime, and molpos2D_disperse\n \n Args:\n outputlist (list): List of all data file names for an experiment\n expt_name (string): Name of experiment\n bins (int): # of bins to compute 1D and 2D histograms with\n diameter (float): diameter of simulated cell \n molposTS (float): the data sampling rate (independent from the underlying simulation time step) \n \n Returns: A oath to the pkl dump\n TYPE: string\n \"\"\"\n import pickle as pkl\n import pandas as pd\n import os\n\n if not os.path.exists('data/'+expt_name+'/analysis/'):\n os.makedirs('data/'+expt_name+'/analysis/')\n path='data/'+expt_name+'/analysis/outputMolposHistogramList.pkl'\n outputFile = open(path,\"wb\")\n histlist = []\n for i in range(len(outputlist)):\n molpos2D = pd.read_csv(outputlist[i],header=None).loc[:,1:]\n data_hist1D=molpos_1Dbin(molpos2D,bins=bins,diameter=diameter)\n data_hist2D=molpos_2Dbin(molpos2D,bins=bins,diameter=diameter)\n covtime=covertime(outputlist[i],diameter=diameter,molposTS=molposTS)\n molpos2D_disperse = molpos2D_dispersion(molpos2D,diameter=diameter)\n print(\"saving\",data_hist1D)\n histlist.append([outputlist[i], data_hist1D,data_hist2D,covtime,molpos2D_disperse])\n\n pkl.dump(histlist, outputFile)\n return path;\n\ndef plotHist(histlistpklpath, expt_name, diameter=0.1, graphs=\"both\",logscale=True,step=1,start=0, simtime=1, x_label=\"# time samples\"):\n \"\"\"Generates a figure with (for each subexperiment): \n plots of 2D molecule position heatmaps, simulation time, cover time, and total variation distance\n\n \n Args:\n histlistpklpath (string): Path to pkl with a list containing a list for each subexperiment containing all analysis (hist1D, hist2D, covertime, molpos2D)\n expt_name (string): Name of experiment\n diameter (float): diameter of simulated cell \n graphs (str, optional): 'all', 'molpos', 'simtime', 'molpossim' -- determines which graphs included in analysis figure\n logscale (bool, optional): True if want x_axis to logscale for all data plots\n step (int, optional): Step size between sweep of dependent variable\n start (int, optional): 1st value of x-axis to begin plot with\n simtime (int, optional): Total simtime to set y-axis scale for covertime\n x_label (str, optional): X-label for all sub-graphs\n \n Returns:\n TYPE: matplotlib figure\n \"\"\"\n\n import matplotlib.pyplot as plt\n from matplotlib import gridspec\n import pickle as pkl\n from matplotlib.ticker import ScalarFormatter\n import numpy as np\n pkl_file=open(histlistpklpath,'rb')\n histlist=pkl.load(pkl_file)\n plot_dim = int(np.ceil(len(histlist)/3))\n #fig = plt.figure(figsize=(12,4))\n fig = plt.figure(figsize=(10,15))\n outer=gridspec.GridSpec(4,1,height_ratios=[3,1,1,1])\n inner = gridspec.GridSpecFromSubplotSpec(plot_dim,3,subplot_spec=outer[0],wspace=0.2,hspace=0.25)\n xfmt=ScalarFormatter()\n xfmt.set_powerlimits((-1,1))\n\n if graphs==\"all\" or graphs==\"molpos\" or graphs==\"molpossim\":\n ###### Plot a heatmap of particle location for each subexperiment #######\n\n for i in range(plot_dim):\n for j in range(3):\n if len(histlist)-1>=i*3+j:\n ax = plt.Subplot(fig,inner[i,j])\n print(\"plotting\", histlist[i*3+j][1])\n data_hist=histlist[i*3+j][1]\n pos=np.linspace(-diameter/2,diameter/2,len(data_hist)+1)\n ax.bar(pos[0:-1], data_hist,width=diameter/(len(histlist[i*3+j][1])),align='edge')\n ax.imshow(histlist[i*3+j][2].T)\n #ax.yaxis.set_major_formatter(xfmt)\n fig.add_subplot(ax)\n# np.set_printoptions(threshold=np.inf)\n\n if graphs==\"all\" or graphs==\"simtime\" or graphs==\"molpossim\":\n ###### Plot of simtime for each subexperiment #######\n\n ax2 = plt.Subplot(fig, outer[1])\n timeplot(ax2,expt_name, logscale=logscale,step=step,start=start)\n ax2.set_xlabel(x_label)\n ax2.set_ylabel('Runtime (s.)')\n fig.add_subplot(ax2)\n\n \n if graphs==\"all\":\n ####### Plot of covertime for each subexperiment #######\n ax3 = plt.Subplot(fig,outer[2]) \n covertimearr = [item[3] for item in histlist]\n if(logscale):\n num_varx = np.logspace(start,start+len(covertimearr)-1,num=len(covertimearr))\n ax.set_xscale(\"log\")\n else:\n num_varx = np.arange(start,start+len(covertimearr)*step,step)\n print(\"covertimearr\", covertimearr)\n ax3.plot(num_varx,covertimearr,'-o')\n ax3.set_ylim(0,simtime)\n #ax3.set_yscale('log')\n fig.add_subplot(ax3)\n ax3.set_xlim(0)\n ax3.set_xlabel(x_label)\n ax3.set_ylabel('Cover time (s.)')\n\n ####### Plot of total variation distance for each subexperiment #######\n ax4 = plt.Subplot(fig,outer[3])\n stdarr = [item[4] for item in histlist]\n if(logscale):\n num_varx = np.logspace(start,start+len(stdarr)-1,num=len(stdarr))\n ax.set_xscale(\"log\")\n else:\n num_varx = np.arange(start,start+len(stdarr)*step,step)\n print(\"stdarr, \", stdarr)\n ax4.plot(num_varx,stdarr,'-o')\n fig.add_subplot(ax4)\n ax4.set_xlim(0)\n ax4.set_xlabel(x_label)\n ax4.set_ylabel('Diffusive irregularity')\n\n outer.tight_layout(fig, rect=[0,0.03,1,0.90]) #rect args needed to leave space on top for title\n return fig\n\ndef tryint(s):\n try:\n return int(s)\n except:\n return s\n\ndef alphanum_key(s):\n import re\n return [tryint(c) for c in re.split('([0-9]+)', s)]\n\ndef combinePkls(expt_name,outputlist,covertime):\n \"\"\"Combines pkls for each subexperiment into a list of lists for each subexperiment with: \n its associated filename, data_hist1D, data_hist2D, covertime, and molpos2D_disperse.data_hist2D\n (Reduce part of MapReduce)\n \n Args:\n expt_name (TYPE): folder name of experiment\n outputlist (TYPE): List with molpos output data subexperiment names\n covertime (boolean): If the experiment computed covertime, include it in the pkl [used for experiments performed before cover time was implemented]\n \n Returns: file path to combined pkl\n TYPE: String\n \"\"\"\n import os\n import pickle as pkl\n\n histlist = []\n path='data/'+expt_name+'/analysis/'\n i=0;\n print(\"i'm here\")\n for f in sorted(os.listdir(path),key=alphanum_key)[:]: #can put range here to only plot subset of experiments\n if not f.startswith('.') and f.startswith('expt'):\n data_hist_path = open(path+'/'+f,'rb')\n data_hist = pkl.load(data_hist_path)\n print(\"test\",data_hist)\n if(covertime):\n histlist.append([outputlist[i],data_hist[0],data_hist[1],data_hist[2],data_hist[3]])\n else:\n histlist.append([outputlist[i],data_hist])\n i+=1;\n outputFile = open(path+'/outputMolposHistogramList.pkl',\"wb\")\n pkl.dump(histlist,outputFile)\n return 'data/'+expt_name+'/analysis/outputMolposHistogramList.pkl'\n\n",
"\"\"\"\nCreated by Akshay Maheshwari\n09/05/2017\n\nProduces analysis figures from experiment data\n\"\"\"\nfrom simanalysis_methods import *\nimport matplotlib.pyplot as plt\nimport time;\n\nstart_time=time.time()\nexpt_name = \"171018_2219\"\noutputlist = loadOutputList(expt_name,'molpos')\nhistlistpklpath = combinePkls(expt_name,outputlist,covertime=True)\n#histlistpklpath = saveHist(outputlist, expt_name,bins=10,diameter=0.1,molposTS=1e-7)\nfig = plotHist(histlistpklpath,expt_name,diameter=0.1, graphs=\"all\", logscale=False,step=1,start=1.25,simtime=1,x_label=\"R_crowder (nm)\")\nfig.suptitle(\"Effects of crowding molecule size on covertime, and dispersion of a single tracked molecule. \\n[1s. sim] -- R_tracked=7.25nm -- R_crowder=[1.25nm,2.25nm,...9.25nm] -- $\\phi$=0.25 -- time step=1e-7s.\")\nplt.savefig(\"data/\"+expt_name+\"/\"+expt_name+\"_analysis1.png\")\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n"
] |
[
[
"matplotlib.pyplot.Subplot",
"pandas.read_csv",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"numpy.linspace",
"numpy.histogram",
"matplotlib.gridspec.GridSpec",
"numpy.flip",
"matplotlib.ticker.ScalarFormatter",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.savefig"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jiang-zzz/Python
|
[
"3aefbfd88d53dc137dd3b019892d6affb7bc9164"
] |
[
"dynamic_programming/max_sub_array.py"
] |
[
"\"\"\"\nauthor : Mayank Kumar Jha (mk9440)\n\"\"\"\nfrom typing import List\n\n\ndef find_max_sub_array(A, low, high):\n if low == high:\n return low, high, A[low]\n else:\n mid = (low + high) // 2\n left_low, left_high, left_sum = find_max_sub_array(A, low, mid)\n right_low, right_high, right_sum = find_max_sub_array(A, mid + 1, high)\n cross_left, cross_right, cross_sum = find_max_cross_sum(A, low, mid, high)\n if left_sum >= right_sum and left_sum >= cross_sum:\n return left_low, left_high, left_sum\n elif right_sum >= left_sum and right_sum >= cross_sum:\n return right_low, right_high, right_sum\n else:\n return cross_left, cross_right, cross_sum\n\n\ndef find_max_cross_sum(A, low, mid, high):\n left_sum, max_left = -999999999, -1\n right_sum, max_right = -999999999, -1\n summ = 0\n for i in range(mid, low - 1, -1):\n summ += A[i]\n if summ > left_sum:\n left_sum = summ\n max_left = i\n summ = 0\n for i in range(mid + 1, high + 1):\n summ += A[i]\n if summ > right_sum:\n right_sum = summ\n max_right = i\n return max_left, max_right, (left_sum + right_sum)\n\n\ndef max_sub_array(nums: List[int]) -> int:\n \"\"\"\n Finds the contiguous subarray which has the largest sum and return its sum.\n\n >>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4])\n 6\n \n An empty (sub)array has sum 0.\n >>> max_sub_array([])\n 0\n \n If all elements are negative, the largest subarray would be the empty array, \n having the sum 0.\n >>> max_sub_array([-1, -2, -3])\n 0\n >>> max_sub_array([5, -2, -3])\n 5\n >>> max_sub_array([31, -41, 59, 26, -53, 58, 97, -93, -23, 84])\n 187\n \"\"\"\n best = 0\n current = 0\n for i in nums:\n current += i\n if current < 0:\n current = 0\n best = max(best, current)\n return best\n\n\nif __name__ == \"__main__\":\n \"\"\"\n A random simulation of this algorithm.\n \"\"\"\n import time\n import matplotlib.pyplot as plt\n from random import randint\n inputs = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]\n tim = []\n for i in inputs:\n li = [randint(1, i) for j in range(i)]\n strt = time.time()\n (find_max_sub_array(li, 0, len(li) - 1))\n end = time.time()\n tim.append(end - strt)\n print(\"No of Inputs Time Taken\")\n for i in range(len(inputs)):\n print(inputs[i], \"\\t\\t\", tim[i])\n plt.plot(inputs, tim)\n plt.xlabel(\"Number of Inputs\")\n plt.ylabel(\"Time taken in seconds \")\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
swirkert/ipcai2016
|
[
"7193b1b1a001511e7efadc2a40b4ab544be76607"
] |
[
"scripts/ipcai2016/script_analyze_ipcai_in_silico.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n\nipcai2016\n\nCopyright (c) German Cancer Research Center,\nComputer Assisted Interventions.\nAll rights reserved.\n\nThis software is distributed WITHOUT ANY WARRANTY; without\neven the implied warranty of MERCHANTABILITY or FITNESS FOR\nA PARTICULAR PURPOSE.\n\nSee LICENSE for details\n\n\"\"\"\n\n\"\"\"\nCreated on Fri Aug 14 11:09:18 2015\n\n@author: wirkert\n\nModified on August 16, 2016: Anant Vemuri\n\"\"\"\n\nimport datetime\nimport logging\nimport os\nfrom collections import namedtuple\nimport sys\n\nimport luigi\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nfrom sklearn.ensemble.forest import RandomForestRegressor\n\nimport tasks_mc\nfrom regression.preprocessing import preprocess, preprocess2\nfrom regression.linear import LinearSaO2Unmixing\n\nimport commons\n\n\n##########################################################\n\nsc = commons.ScriptCommons()\n\nsc.add_dir(\"IN_SILICO_RESULTS_PATH\", os.path.join(sc.get_dir(\"RESULTS_FOLDER\"),\n \"in_silico\"))\nw_standard = 10. # for this evaluation we add 10% noise\n\nfont = {'family' : 'normal',\n 'size' : 20}\n\nmatplotlib.rc('font', **font)\n\n\n# setup standard random forest\nrf = RandomForestRegressor(10, min_samples_leaf=10, max_depth=9, n_jobs=-1)\nEvaluationStruct = namedtuple(\"EvaluationStruct\",\n \"name regressor\")\n# standard evaluation setup\nstandard_evaluation_setups = [EvaluationStruct(\"Linear Beer-Lambert\",\n LinearSaO2Unmixing(wavelengths=sc.other[\"RECORDED_WAVELENGTHS\"],\n fwhm=10*10**-9))\n , EvaluationStruct(\"Proposed\", rf)]\n\n# an alternative if you want to compare non-linear to linear regression methods\n# standard_evaluation_setups = [EvaluationStruct(\"Linear Regression\",\n# LinearRegression())\n# , EvaluationStruct(\"Proposed\", rf)]\n\n# color palette\nmy_colors = [\"red\", \"green\"]\n\n# standard noise levels\nnoise_levels = np.array([1,2,3,4,5,6,7,8,9,10,\n 15,20,30,40,50,100,150,200]).astype(\"float\")\n\n\nclass TrainingSamplePlot(luigi.Task):\n which_train = luigi.Parameter()\n which_test = luigi.Parameter()\n eval_name = luigi.Parameter()\n\n def requires(self):\n return tasks_mc.CameraBatch(self.which_train, self.eval_name), \\\n tasks_mc.CameraBatch(self.which_test, self.eval_name)\n\n def output(self):\n return luigi.LocalTarget(os.path.join(sc.get_full_dir(\"IN_SILICO_RESULTS_PATH\"),\n self.eval_name + \"_sample_plot_train_\" +\n self.which_train +\n \"_test_\" + self.which_test +\n \".png\"))\n\n def run(self):\n # get data\n df_train = pd.read_csv(self.input()[0].path, header=[0, 1])\n df_test = pd.read_csv(self.input()[1].path, header=[0, 1])\n\n # for this plot we write a custom evaluation function as it is built\n # a little different\n\n # create a new dataframe which will hold all the generated errors\n df = pd.DataFrame()\n\n nr_training_samples = np.arange(10, df_train.shape[0], 50).astype(int)\n # not very pythonic, don't care\n for n in nr_training_samples:\n X_test, y_test = preprocess(df_test, snr=w_standard)\n # only take n samples for training\n X_train, y_train = preprocess(df_train, nr_samples=n,\n snr=w_standard)\n\n regressor = rf\n regressor.fit(X_train, y_train)\n y_pred = regressor.predict(X_test)\n # save results to a dataframe\n errors = np.abs(y_pred - y_test)\n errors = errors.reshape(len(errors), 1)\n current_df = DataFrame(errors * 100,\n columns=[\"Errors\"])\n current_df[\"Method\"] = \"Proposed\"\n current_df[\"Number Samples\"] = n / 10**3.\n df = pd.concat([df, current_df], ignore_index=True)\n logging.info(\n \"Finished training classifier with {0} samples\".format(\n str(n)))\n\n df = df.groupby(\"Number Samples\").describe()\n # get the error description in the rows:\n df = df.unstack(-1)\n # get rid of multiindex by dropping \"Error\" level\n df.columns = df.columns.droplevel(0)\n\n plt.figure()\n plt.plot(df.index, df[\"50%\"], color=\"green\")\n\n # tidy up the plot\n plt.xlabel(\"number of training samples / 1000\")\n plt.ylabel(\"absolute error [%]\")\n plt.ylim((0, 20))\n plt.xlim((0, 15))\n plt.grid()\n\n # finally save the figure\n plt.savefig(self.output().path, mode=\"pdf\", dpi=500,\n bbox_inches='tight')\n\n\nclass VhbPlot(luigi.Task):\n which_train = luigi.Parameter()\n which_test = luigi.Parameter()\n eval_name = luigi.Parameter()\n\n def requires(self):\n return tasks_mc.CameraBatch(self.which_train, self.eval_name), \\\n tasks_mc.CameraBatch(self.which_test, self.eval_name)\n\n def output(self):\n return luigi.LocalTarget(os.path.join(sc.get_full_dir(\"IN_SILICO_RESULTS_PATH\"),\n self.eval_name + \"_vhb_noise_plot_train_\" +\n self.which_train +\n \"_test_\" + self.which_test +\n \".png\"))\n\n @staticmethod\n def preprocess_vhb(batch, nr_samples=None, snr=None,\n magnification=None, bands_to_sortout=None):\n \"\"\" For evaluating vhb we extract labels for vhb instead of sao2\"\"\"\n X, y = preprocess2(batch, nr_samples, snr,\n magnification, bands_to_sortout)\n\n return X, y[\"vhb\"].values\n\n def run(self):\n # get data\n df_train = pd.read_csv(self.input()[0].path, header=[0, 1])\n df_test = pd.read_csv(self.input()[1].path, header=[0, 1])\n\n # for vhb we only evaluate the proposed method since the linear\n # beer-lambert is not applicable\n evaluation_setups = [EvaluationStruct(\"Proposed\", rf)]\n df = evaluate_data(df_train, noise_levels, df_test, noise_levels,\n evaluation_setups=evaluation_setups,\n preprocessing=self.preprocess_vhb)\n standard_plotting(df, color_palette=[\"green\"],\n xytext_position=(2, 3))\n plt.ylim((0, 4))\n\n # finally save the figure\n plt.savefig(self.output().path, dpi=500,\n bbox_inches='tight')\n\n\nclass NoisePlot(luigi.Task):\n which_train = luigi.Parameter()\n which_test = luigi.Parameter()\n eval_name = luigi.Parameter()\n\n def requires(self):\n return tasks_mc.CameraBatch(self.which_train, self.eval_name), \\\n tasks_mc.CameraBatch(self.which_test, self.eval_name)\n def output(self):\n return luigi.LocalTarget(os.path.join(sc.get_full_dir(\"IN_SILICO_RESULTS_PATH\"),\n self.eval_name + \"_noise_plot_train_\" +\n self.which_train +\n \"_test_\" + self.which_test +\n \".png\"))\n\n def run(self):\n # get data\n df_train = pd.read_csv(self.input()[0].path, header=[0, 1])\n df_test = pd.read_csv(self.input()[1].path, header=[0, 1])\n\n df = evaluate_data(df_train, noise_levels, df_test, noise_levels)\n standard_plotting(df)\n\n # finally save the figure\n plt.savefig(self.output().path, mode=\"pdf\", dpi=500,\n bbox_inches='tight')\n\n\nclass WrongNoisePlot(luigi.Task):\n which_train = luigi.Parameter()\n which_test = luigi.Parameter()\n train_snr = luigi.FloatParameter()\n eval_name = luigi.Parameter()\n\n def requires(self):\n return tasks_mc.CameraBatch(self.which_train, self.eval_name), \\\n tasks_mc.CameraBatch(self.which_test, self.eval_name)\n\n def output(self):\n return luigi.LocalTarget(os.path.join(sc.get_full_dir(\"IN_SILICO_RESULTS_PATH\"),\n self.eval_name + \"_\" + str(self.train_snr) +\n \"_wrong_noise_plot_train_\" +\n self.which_train +\n \"_test_\" + self.which_test +\n \".png\"))\n\n def run(self):\n # get data\n df_train = pd.read_csv(self.input()[0].path, header=[0, 1])\n df_test = pd.read_csv(self.input()[1].path, header=[0, 1])\n\n # do same as in NoisePlot but with standard noise input\n df = evaluate_data(df_train,\n np.ones_like(noise_levels) * self.train_snr,\n df_test, noise_levels)\n standard_plotting(df)\n\n # finally save the figure\n plt.savefig(self.output().path, mode=\"pdf\", dpi=500,\n bbox_inches='tight')\n\n\ndef evaluate_data(df_train, w_train, df_test, w_test,\n evaluation_setups=None, preprocessing=None):\n \"\"\" Our standard method to evaluate the data. It will fill a DataFrame df\n which saves the errors for each evaluated setup\"\"\"\n if evaluation_setups is None:\n evaluation_setups = standard_evaluation_setups\n if preprocessing is None:\n preprocessing = preprocess\n if (\"weights\" in df_train) and df_train[\"weights\"].size > 0:\n weights = df_train[\"weights\"].as_matrix().squeeze()\n else:\n weights = np.ones(df_train.shape[0])\n\n # create a new dataframe which will hold all the generated errors\n df = pd.DataFrame()\n for one_w_train, one_w_test in zip(w_train, w_test):\n # setup testing function\n X_test, y_test = preprocessing(df_test, snr=one_w_test)\n # extract noisy data\n X_train, y_train = preprocessing(df_train, snr=one_w_train)\n for e in evaluation_setups:\n regressor = e.regressor\n regressor.fit(X_train, y_train, weights)\n y_pred = regressor.predict(X_test)\n # save results to a dataframe\n errors = np.abs(y_pred - y_test)\n errors = errors.reshape(len(errors), 1)\n current_df = DataFrame(errors * 100,\n columns=[\"absolute error [%]\"])\n current_df[\"Method\"] = e.name\n current_df[\"SNR\"] = int(one_w_test)\n df = pd.concat([df, current_df], ignore_index=True)\n\n return df\n\n\ndef standard_plotting(df, color_palette=None, xytext_position=None):\n if color_palette is None:\n color_palette = my_colors\n if xytext_position is None:\n xytext_position = (2, 15)\n\n plt.figure()\n\n # group it by method and noise level and get description on the errors\n df_statistics = df.groupby(['Method', 'SNR']).describe()\n # get the error description in the rows:\n df_statistics = df_statistics.unstack(-1)\n # get rid of multiindex by dropping \"Error\" level\n df_statistics.columns = df_statistics.columns.droplevel(0)\n\n # iterate over methods to plot linegraphs with error tube\n # probably this can be done nicer, but no idea how exactly\n\n for color, method in zip(\n color_palette, df_statistics.index.get_level_values(\"Method\").unique()):\n df_method = df_statistics.loc[method]\n plt.plot(df_method.index, df_method[\"50%\"],\n color=color, label=method)\n plt.fill_between(df_method.index, df_method[\"25%\"], df_method[\"75%\"],\n facecolor=color, edgecolor=color,\n alpha=0.5)\n # tidy up the plot\n plt.ylim((0, 40))\n plt.gca().set_xticks(np.arange(0, 200, 10), minor=True)\n plt.xlabel(\"SNR\")\n plt.ylabel(\"absolute error [%]\")\n plt.grid()\n plt.legend()\n\n\ndef main(args):\n eval_dict = commons.read_configuration_dict(args[1])\n\n eval_name = eval_dict[\"evaluation_name\"]\n train = eval_dict[\"mc_data_train\"]\n test = eval_dict[\"mc_data_test\"]\n test_different_domain = eval_dict[\"mc_data_test_generic\"]\n\n w_start = float(eval_dict[\"simulated_wavelengths_start\"])\n w_end = float(eval_dict[\"simulated_wavelengths_stop\"])\n w_step = float(eval_dict[\"simulated_wavelengths_step\"])\n sc.other[\"RECORDED_WAVELENGTHS\"] = np.arange(w_start, w_end, w_step) * 10 ** -9\n\n sc.set_root(eval_dict[\"root_path\"])\n sc.create_folders()\n\n logging.basicConfig(filename=os.path.join(sc.get_full_dir(\"LOG_FOLDER\"),\n eval_name + \"_in_silico_plots_\" +\n str(datetime.datetime.now()) +\n '.log'),\n level=logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n logger = logging.getLogger()\n logger.addHandler(ch)\n luigi.interface.setup_interface_logging()\n\n sch = luigi.scheduler.CentralPlannerScheduler()\n w = luigi.worker.Worker(scheduler=sch)\n w.add(TrainingSamplePlot(which_train=train, which_test=test, eval_name=eval_name))\n w.add(NoisePlot(which_train=train, which_test=test, eval_name=eval_name))\n w.add(WrongNoisePlot(which_train=train, which_test=test, train_snr=10., eval_name=eval_name))\n w.add(WrongNoisePlot(which_train=train, which_test=test, train_snr=50., eval_name=eval_name))\n w.add(WrongNoisePlot(which_train=train, which_test=test, train_snr=200., eval_name=eval_name))\n # Set a different testing domain to evaluate domain sensitivity\n w.add(NoisePlot(which_train=train,\n which_test=test_different_domain, eval_name=eval_name))\n w.add(VhbPlot(which_train=train, which_test=test, eval_name=eval_name))\n w.run()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca",
"numpy.ones_like",
"numpy.arange",
"matplotlib.pyplot.figure",
"pandas.concat",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.fill_between",
"numpy.array",
"matplotlib.rc",
"matplotlib.pyplot.ylabel",
"sklearn.ensemble.forest.RandomForestRegressor",
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
7vikpeculiar/superset
|
[
"800ced5e257d5d83d6dbe4ced0e7318ac40d026f"
] |
[
"superset/utils/pandas_postprocessing/compare.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom typing import List, Optional\n\nimport pandas as pd\nfrom flask_babel import gettext as _\nfrom pandas import DataFrame\n\nfrom superset.constants import PandasPostprocessingCompare\nfrom superset.exceptions import InvalidPostProcessingError\nfrom superset.utils.core import TIME_COMPARISON\nfrom superset.utils.pandas_postprocessing.utils import validate_column_args\n\n\n@validate_column_args(\"source_columns\", \"compare_columns\")\ndef compare( # pylint: disable=too-many-arguments\n df: DataFrame,\n source_columns: List[str],\n compare_columns: List[str],\n compare_type: PandasPostprocessingCompare,\n drop_original_columns: Optional[bool] = False,\n precision: Optional[int] = 4,\n) -> DataFrame:\n \"\"\"\n Calculate column-by-column changing for select columns.\n\n :param df: DataFrame on which the compare will be based.\n :param source_columns: Main query columns\n :param compare_columns: Columns being compared\n :param compare_type: Type of compare. Choice of `absolute`, `percentage` or `ratio`\n :param drop_original_columns: Whether to remove the source columns and\n compare columns.\n :param precision: Round a change rate to a variable number of decimal places.\n :return: DataFrame with compared columns.\n :raises InvalidPostProcessingError: If the request in incorrect.\n \"\"\"\n if len(source_columns) != len(compare_columns):\n raise InvalidPostProcessingError(\n _(\"`compare_columns` must have the same length as `source_columns`.\")\n )\n if compare_type not in tuple(PandasPostprocessingCompare):\n raise InvalidPostProcessingError(\n _(\"`compare_type` must be `difference`, `percentage` or `ratio`\")\n )\n if len(source_columns) == 0:\n return df\n\n for s_col, c_col in zip(source_columns, compare_columns):\n s_df = df.loc[:, [s_col]]\n s_df.rename(columns={s_col: \"__intermediate\"}, inplace=True)\n c_df = df.loc[:, [c_col]]\n c_df.rename(columns={c_col: \"__intermediate\"}, inplace=True)\n if compare_type == PandasPostprocessingCompare.DIFF:\n diff_df = s_df - c_df\n elif compare_type == PandasPostprocessingCompare.PCT:\n diff_df = ((s_df - c_df) / c_df).astype(float).round(precision)\n else:\n # compare_type == \"ratio\"\n diff_df = (s_df / c_df).astype(float).round(precision)\n\n diff_df.rename(\n columns={\n \"__intermediate\": TIME_COMPARISON.join([compare_type, s_col, c_col])\n },\n inplace=True,\n )\n df = pd.concat([df, diff_df], axis=1)\n\n if drop_original_columns:\n df = df.drop(source_columns + compare_columns, axis=1)\n return df\n"
] |
[
[
"pandas.concat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jkren6/PARL
|
[
"e8797bd0d31d81bc81aae8b12792ff922bcb8ea9"
] |
[
"examples/tutorials/lesson5/ddpg/replay_memory.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py\n\nimport random\nimport collections\nimport numpy as np\n\n\nclass ReplayMemory(object):\n def __init__(self, max_size):\n self.buffer = collections.deque(maxlen=max_size)\n\n def append(self, exp):\n self.buffer.append(exp)\n\n def sample(self, batch_size):\n mini_batch = random.sample(self.buffer, batch_size)\n obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []\n\n for experience in mini_batch:\n s, a, r, s_p, done = experience\n obs_batch.append(s)\n action_batch.append(a)\n reward_batch.append(r)\n next_obs_batch.append(s_p)\n done_batch.append(done)\n\n return np.array(obs_batch).astype('float32'), \\\n np.array(action_batch).astype('float32'), np.array(reward_batch).astype('float32'),\\\n np.array(next_obs_batch).astype('float32'), np.array(done_batch).astype('float32')\n\n def __len__(self):\n return len(self.buffer)\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Baozhen-Li/SurfaceTopography
|
[
"37c70a4020c74dc56a4509969e760259ba93ec61"
] |
[
"test/test_variable_bandwidth.py"
] |
[
"#\n# Copyright 2017, 2020 Lars Pastewka\n# 2019-2020 Antoine Sanner\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\"\"\"\nTest tools for variable bandwidth analysis.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\nimport pytest\n\nfrom NuMPI import MPI\n\nfrom SurfaceTopography import Topography, UniformLineScan\nfrom SurfaceTopography.Generation import fourier_synthesis\n\npytestmark = pytest.mark.skipif(\n MPI.COMM_WORLD.Get_size() > 1,\n reason=\"tests only serial funcionalities, please execute with pytest\")\n\n\nclass TestVariableBandwidth(unittest.TestCase):\n\n def test_checkerboard_detrend_1d(self):\n arr = np.zeros([4])\n arr[:2] = 1.0\n outarr = UniformLineScan(arr, arr.shape).checkerboard_detrend((2,))\n np.testing.assert_allclose(outarr, np.zeros([4]))\n\n def test_checkerboard_detrend_2d(self):\n arr = np.zeros([4, 4])\n arr[:2, :2] = 1.0\n outarr = Topography(arr, arr.shape).checkerboard_detrend((2, 2))\n np.testing.assert_allclose(outarr, np.zeros([4, 4]))\n\n arr = np.zeros([4, 4])\n arr[:2, :2] = 1.0\n arr[:2, 1] = 2.0\n outarr = Topography(arr, arr.shape).checkerboard_detrend((2, 2))\n np.testing.assert_allclose(outarr, np.zeros([4, 4]))\n\n def test_checkerboard_detrend_with_no_subdivisions(self):\n r = 32\n x, y = np.mgrid[:r, :r]\n h = 1.3 * x - 0.3 * y + 0.02 * x * x + 0.03 * y * y - 0.013 * x * y\n t = Topography(h, (1, 1), periodic=False)\n # This should be the same as a detrend with detrend_mode='height'\n ut1 = t.checkerboard_detrend((1, 1))\n ut2 = t.detrend().heights()\n np.testing.assert_allclose(ut1, ut2)\n\n def test_self_affine_topography_1d(self):\n r = 16384\n for H in [0.3, 0.8]:\n t0 = fourier_synthesis((r,), (1,), H, rms_slope=0.1,\n amplitude_distribution=lambda n: 1.0)\n\n for t in [t0, t0.to_nonuniform()]:\n mag, bwidth, rms = t.variable_bandwidth(\n nb_grid_pts_cutoff=r // 32)\n self.assertAlmostEqual(rms[0], t.detrend().rms_height_from_profile())\n np.testing.assert_allclose(bwidth, t.physical_sizes[0] / mag)\n # Since this is a self-affine surface, rms(mag) ~ mag^-H\n b, a = np.polyfit(np.log(mag[1:]), np.log(rms[1:]), 1)\n # The error is huge...\n self.assertTrue(abs(H + b) < 0.1)\n\n def test_self_affine_topography_2d(self):\n r = 2048\n res = [r, r]\n for H in [0.3, 0.8]:\n t = fourier_synthesis(res, (1, 1), H, rms_slope=0.1,\n amplitude_distribution=lambda n: 1.0)\n mag, bwidth, rms = t.variable_bandwidth(nb_grid_pts_cutoff=r // 32)\n self.assertAlmostEqual(rms[0], t.detrend().rms_height_from_area())\n # Since this is a self-affine surface, rms(mag) ~ mag^-H\n b, a = np.polyfit(np.log(mag[1:]), np.log(rms[1:]), 1)\n # The error is huge...\n self.assertTrue(abs(H + b) < 0.1)\n\n\n###\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.log",
"numpy.zeros",
"numpy.testing.assert_allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DSEI21000-S21/project-product-price-prediction
|
[
"bbd1eb9577b40fcb538a6b33f5ba71096b5af72f"
] |
[
"final/model_evaluation/visualizations.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef plot_prediction_price(origin, predict, title):\n _, ax = plt.subplots(figsize = (15,5))\n# matplotlib.rcParams['figure.figsize'] = (10, 10)\n ax.scatter(x = range(0, origin.size), y=origin, c = 'blue', label = 'Actual Price', alpha = 0.2)\n ax.scatter(x = range(0, predict.size), y=predict, c = 'red', label = 'Predicted Price', alpha = 0.2)\n\n plt.title(title, fontdict = {'fontsize' : 20})\n plt.xlabel('Observations')\n plt.ylabel('Price ($)')\n plt.legend()\n plt.show()\n\ndef plot_loss(loss, val_loss, title):\n plt.plot(loss, label=\"loss\")\n plt.plot(val_loss, label=\"val_loss\")\n plt.ylabel('msle')\n plt.xlabel('epoch')\n plt.title(title)\n plt.legend()\n plt.show()\n#\n# def visualize_model_feature_importances(trained_model, feature_names, title = None):\n# try:\n# importances = trained_model.feature_importances_\n# except:\n# importances = np.sum(abs(trained_model.coef_), axis=0)\n# indices = np.argsort(importances, )\n#\n# # plot feature importance\n# if title:\n# plt.title(title)\n# plt.barh(range(len(indices)), importances[indices], color='b', align='center')\n# plt.yticks(range(len(indices)), [feature_names[i] for i in indices])\n# plt.xlabel('Relative Importance')\n# plt.show()\n#\n# feature_importances = [(feature_names[i], importances[i]) for i in indices[::-1]]\n#\n# return feature_importances\n#\n#\n# def visualize_2d_cluster_with_legend(classname, feature1, feature2, X_names, y_names,\n# X_train, X_test, y_train, y_test, y_train_pred, y_test_pred,\n# legend=True, title=None):\n# if len(y_train.shape) > 1:\n# y_train = np.argmax(y_train, axis=1)\n# y_test = np.argmax(y_test, axis=1)\n# if len(y_train_pred.shape) > 1:\n# y_train_pred = np.argmax(y_train_pred, axis=1)\n# y_test_pred = np.argmax(y_test_pred, axis=1)\n#\n# train_df = pd.DataFrame(X_train, columns=X_names)\n# train_df['%s_true' % classname] = list(map(lambda x: y_names[x], y_train))\n# train_df['%s_pred' % classname] = list(map(lambda x: y_names[x], y_train_pred))\n#\n# test_df = pd.DataFrame(X_test, columns=X_names)\n# test_df['%s_true' % classname] = list(map(lambda x: y_names[x], y_test))\n# test_df['%s_pred' % classname] = list(map(lambda x: y_names[x], y_test_pred))\n#\n# fig, axs = plt.subplots(2, 2)\n# sns.scatterplot(data=train_df, x=feature1, y=feature2, ax=axs[0, 0], hue='%s_true' % classname, palette=\"deep\")\n# sns.scatterplot(data=train_df, x=feature1, y=feature2, ax=axs[0, 1], hue='%s_pred' % classname, palette=\"deep\")\n#\n# sns.scatterplot(data=test_df, x=feature1, y=feature2, ax=axs[1, 0], hue='%s_true' % classname, palette=\"deep\")\n# sns.scatterplot(data=test_df, x=feature1, y=feature2, ax=axs[1, 1], hue='%s_pred' % classname, palette=\"deep\")\n#\n# axs[0, 0].title.set_text('Train - True Class')\n# axs[0, 1].title.set_text('Train - Predict Class')\n# axs[1, 0].title.set_text('Test - True Class')\n# axs[1, 1].title.set_text('Test - Predict Class')\n#\n# if title:\n# plt.title(title)\n#\n# if legend:\n# handles, labels = axs[0, 0].get_legend_handles_labels()\n# fig.legend(handles, labels, bbox_to_anchor=(1.05, 1), loc='upper center')\n#\n# # fig.tight_layout()\n# axs[0, 0].get_legend().remove()\n# axs[0, 1].get_legend().remove()\n# axs[1, 0].get_legend().remove()\n# axs[1, 1].get_legend().remove()\n# plt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tzer-AnonBot/tzer
|
[
"07799222118f757bdcb6a14654a6addda2dcf55c"
] |
[
"paper_data/RQ3/plot_nmax.py"
] |
[
"import matplotlib\nimport matplotlib.pyplot as plt\nimport pandas\nimport os\nimport re\n\n# plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n# plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=16) # fontsize of the x and y labels\n# plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n# plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=13.5) # legend fontsize\n# plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\nclass Ploter:\n def __init__(self, cov_lim = None) -> None:\n self.legends = [] # type: ignore\n # cov / time, cov / iteration, iteration / time\n self.cov_lim = cov_lim\n\n def add(self, folder, name=None):\n path = os.path.join(folder, 'cov_by_time.txt')\n df = pandas.read_csv(path, usecols=[0, 1], header=None).to_numpy()\n \n plt.plot(df[:,0], df[:,1], alpha=0.7, linewidth=3) # cov / time\n\n if name:\n self.legends.append(name)\n else:\n assert not self.legends\n\n def plot(self, save='cov'):\n plt.legend(self.legends, prop={'weight':'bold'})\n plt.grid()\n \n if self.cov_lim is not None:\n plt.ylim(bottom=self.cov_lim)\n\n plt.xlabel('Time / Second', fontweight='bold')\n plt.ylabel(ylabel='Edge Coverage', fontweight='bold')\n # plt.title('Coverage $\\\\bf{Time}$ Efficiency')\n\n plt.tight_layout()\n plt.savefig(save + '.pdf')\n plt.savefig(save + '.png')\n\nif '__main__' == __name__:\n plt.figure(figsize=(8, 6))\n plt.ylim(top=29500)\n ploter = Ploter(cov_lim = 23000)\n \n to_plot = []\n for p in os.listdir('.'):\n if os.path.isdir(p):\n ts = re.findall('rq3_3-tolerance-(\\d+)_1-shrink-rand_gen', p)\n if ts:\n to_plot.append((p, int(ts[0])))\n\n for f, nmax in sorted(to_plot, key=lambda x: x[1]):\n # if nmax < 4:\n # continue\n ploter.add(f, name=f'$N_{{max}}$={nmax}')\n ploter.plot('nmax')\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ThalesGroup/incubator-superset
|
[
"f6965f99e389436614d79d91765e88a84cc6b258"
] |
[
"superset/connectors/druid/models.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=C,R,W\n# pylint: disable=invalid-unary-operand-type\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta\nfrom distutils.version import LooseVersion\nimport json\nimport logging\nfrom multiprocessing.pool import ThreadPool\nimport re\n\nfrom dateutil.parser import parse as dparse\nfrom flask import escape, Markup\nfrom flask_appbuilder import Model\nfrom flask_appbuilder.models.decorators import renders\nfrom flask_babel import lazy_gettext as _\nimport pandas\nfrom pydruid.client import PyDruid\nfrom pydruid.utils.aggregators import count\nfrom pydruid.utils.dimensions import MapLookupExtraction, RegexExtraction\nfrom pydruid.utils.filters import Dimension, Filter\nfrom pydruid.utils.having import Aggregation\nfrom pydruid.utils.postaggregator import (\n Const, Field, HyperUniqueCardinality, Postaggregator, Quantile, Quantiles,\n)\nimport requests\nimport sqlalchemy as sa\nfrom sqlalchemy import (\n Boolean, Column, DateTime, ForeignKey, Integer, String, Table, Text, UniqueConstraint,\n)\nfrom sqlalchemy.orm import backref, relationship\n\nfrom superset import conf, db, security_manager\nfrom superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric\nfrom superset.exceptions import MetricPermException, SupersetException\nfrom superset.models.helpers import (\n AuditMixinNullable, ImportMixin, QueryResult,\n)\nfrom superset.utils import core as utils, import_datasource\nfrom superset.utils.core import (\n DimSelector, DTTM_ALIAS, flasher,\n)\n\nDRUID_TZ = conf.get('DRUID_TZ')\nPOST_AGG_TYPE = 'postagg'\nmetadata = Model.metadata # pylint: disable=no-member\n\n\n# Function wrapper because bound methods cannot\n# be passed to processes\ndef _fetch_metadata_for(datasource):\n return datasource.latest_metadata()\n\n\nclass JavascriptPostAggregator(Postaggregator):\n def __init__(self, name, field_names, function):\n self.post_aggregator = {\n 'type': 'javascript',\n 'fieldNames': field_names,\n 'name': name,\n 'function': function,\n }\n self.name = name\n\n\nclass CustomPostAggregator(Postaggregator):\n \"\"\"A way to allow users to specify completely custom PostAggregators\"\"\"\n def __init__(self, name, post_aggregator):\n self.name = name\n self.post_aggregator = post_aggregator\n\n\nclass DruidCluster(Model, AuditMixinNullable, ImportMixin):\n\n \"\"\"ORM object referencing the Druid clusters\"\"\"\n\n __tablename__ = 'clusters'\n type = 'druid'\n\n id = Column(Integer, primary_key=True)\n verbose_name = Column(String(250), unique=True)\n # short unique name, used in permissions\n cluster_name = Column(String(250), unique=True)\n broker_host = Column(String(255))\n broker_port = Column(Integer, default=8082)\n broker_endpoint = Column(String(255), default='druid/v2')\n metadata_last_refreshed = Column(DateTime)\n cache_timeout = Column(Integer)\n\n export_fields = ('cluster_name', 'broker_host', 'broker_port',\n 'broker_endpoint', 'cache_timeout')\n update_from_object_fields = export_fields\n export_children = ['datasources']\n\n def __repr__(self):\n return self.verbose_name if self.verbose_name else self.cluster_name\n\n def __html__(self):\n return self.__repr__()\n\n @property\n def data(self):\n return {\n 'id': self.id,\n 'name': self.cluster_name,\n 'backend': 'druid',\n }\n\n @staticmethod\n def get_base_url(host, port):\n if not re.match('http(s)?://', host):\n host = 'http://' + host\n\n url = '{0}:{1}'.format(host, port) if port else host\n return url\n\n def get_base_broker_url(self):\n base_url = self.get_base_url(\n self.broker_host, self.broker_port)\n return f'{base_url}/{self.broker_endpoint}'\n\n def get_pydruid_client(self):\n cli = PyDruid(\n self.get_base_url(self.broker_host, self.broker_port),\n self.broker_endpoint)\n return cli\n\n def get_datasources(self):\n endpoint = self.get_base_broker_url() + '/datasources'\n return json.loads(requests.get(endpoint).text)\n\n def get_druid_version(self):\n endpoint = self.get_base_url(\n self.broker_host, self.broker_port) + '/status'\n return json.loads(requests.get(endpoint).text)['version']\n\n @property\n @utils.memoized\n def druid_version(self):\n return self.get_druid_version()\n\n def refresh_datasources(\n self,\n datasource_name=None,\n merge_flag=True,\n refreshAll=True):\n \"\"\"Refresh metadata of all datasources in the cluster\n If ``datasource_name`` is specified, only that datasource is updated\n \"\"\"\n ds_list = self.get_datasources()\n blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])\n ds_refresh = []\n if not datasource_name:\n ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))\n elif datasource_name not in blacklist and datasource_name in ds_list:\n ds_refresh.append(datasource_name)\n else:\n return\n self.refresh(ds_refresh, merge_flag, refreshAll)\n\n def refresh(self, datasource_names, merge_flag, refreshAll):\n \"\"\"\n Fetches metadata for the specified datasources and\n merges to the Superset database\n \"\"\"\n session = db.session\n ds_list = (\n session.query(DruidDatasource)\n .filter(DruidDatasource.cluster_name == self.cluster_name)\n .filter(DruidDatasource.datasource_name.in_(datasource_names))\n )\n ds_map = {ds.name: ds for ds in ds_list}\n for ds_name in datasource_names:\n datasource = ds_map.get(ds_name, None)\n if not datasource:\n datasource = DruidDatasource(datasource_name=ds_name)\n with session.no_autoflush:\n session.add(datasource)\n flasher(\n _('Adding new datasource [{}]').format(ds_name), 'success')\n ds_map[ds_name] = datasource\n elif refreshAll:\n flasher(\n _('Refreshing datasource [{}]').format(ds_name), 'info')\n else:\n del ds_map[ds_name]\n continue\n datasource.cluster = self\n datasource.merge_flag = merge_flag\n session.flush()\n\n # Prepare multithreaded executation\n pool = ThreadPool()\n ds_refresh = list(ds_map.values())\n metadata = pool.map(_fetch_metadata_for, ds_refresh)\n pool.close()\n pool.join()\n\n for i in range(0, len(ds_refresh)):\n datasource = ds_refresh[i]\n cols = metadata[i]\n if cols:\n col_objs_list = (\n session.query(DruidColumn)\n .filter(DruidColumn.datasource_id == datasource.id)\n .filter(DruidColumn.column_name.in_(cols.keys()))\n )\n col_objs = {col.column_name: col for col in col_objs_list}\n for col in cols:\n if col == '__time': # skip the time column\n continue\n col_obj = col_objs.get(col)\n if not col_obj:\n col_obj = DruidColumn(\n datasource_id=datasource.id,\n column_name=col)\n with session.no_autoflush:\n session.add(col_obj)\n col_obj.type = cols[col]['type']\n col_obj.datasource = datasource\n if col_obj.type == 'STRING':\n col_obj.groupby = True\n col_obj.filterable = True\n datasource.refresh_metrics()\n session.commit()\n\n @property\n def perm(self):\n return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)\n\n def get_perm(self):\n return self.perm\n\n @property\n def name(self):\n return self.verbose_name if self.verbose_name else self.cluster_name\n\n @property\n def unique_name(self):\n return self.verbose_name if self.verbose_name else self.cluster_name\n\n\nclass DruidColumn(Model, BaseColumn):\n \"\"\"ORM model for storing Druid datasource column metadata\"\"\"\n\n __tablename__ = 'columns'\n __table_args__ = (UniqueConstraint('column_name', 'datasource_id'),)\n\n datasource_id = Column(\n Integer,\n ForeignKey('datasources.id'))\n # Setting enable_typechecks=False disables polymorphic inheritance.\n datasource = relationship(\n 'DruidDatasource',\n backref=backref('columns', cascade='all, delete-orphan'),\n enable_typechecks=False)\n dimension_spec_json = Column(Text)\n\n export_fields = (\n 'datasource_id', 'column_name', 'is_active', 'type', 'groupby',\n 'filterable', 'description', 'dimension_spec_json', 'verbose_name',\n )\n update_from_object_fields = export_fields\n export_parent = 'datasource'\n\n def __repr__(self):\n return self.column_name\n\n @property\n def expression(self):\n return self.dimension_spec_json\n\n @property\n def dimension_spec(self):\n if self.dimension_spec_json:\n return json.loads(self.dimension_spec_json)\n\n def get_metrics(self):\n metrics = {}\n metrics['count'] = DruidMetric(\n metric_name='count',\n verbose_name='COUNT(*)',\n metric_type='count',\n json=json.dumps({'type': 'count', 'name': 'count'}),\n )\n return metrics\n\n def refresh_metrics(self):\n \"\"\"Refresh metrics based on the column metadata\"\"\"\n metrics = self.get_metrics()\n dbmetrics = (\n db.session.query(DruidMetric)\n .filter(DruidMetric.datasource_id == self.datasource_id)\n .filter(DruidMetric.metric_name.in_(metrics.keys()))\n )\n dbmetrics = {metric.metric_name: metric for metric in dbmetrics}\n for metric in metrics.values():\n dbmetric = dbmetrics.get(metric.metric_name)\n if dbmetric:\n for attr in ['json', 'metric_type']:\n setattr(dbmetric, attr, getattr(metric, attr))\n else:\n with db.session.no_autoflush:\n metric.datasource_id = self.datasource_id\n db.session.add(metric)\n\n @classmethod\n def import_obj(cls, i_column):\n def lookup_obj(lookup_column):\n return db.session.query(DruidColumn).filter(\n DruidColumn.datasource_id == lookup_column.datasource_id,\n DruidColumn.column_name == lookup_column.column_name).first()\n\n return import_datasource.import_simple_obj(db.session, i_column, lookup_obj)\n\n\nclass DruidMetric(Model, BaseMetric):\n\n \"\"\"ORM object referencing Druid metrics for a datasource\"\"\"\n\n __tablename__ = 'metrics'\n __table_args__ = (UniqueConstraint('metric_name', 'datasource_id'),)\n datasource_id = Column(\n Integer,\n ForeignKey('datasources.id'))\n # Setting enable_typechecks=False disables polymorphic inheritance.\n datasource = relationship(\n 'DruidDatasource',\n backref=backref('metrics', cascade='all, delete-orphan'),\n enable_typechecks=False)\n json = Column(Text)\n\n export_fields = (\n 'metric_name', 'verbose_name', 'metric_type', 'datasource_id',\n 'json', 'description', 'is_restricted', 'd3format', 'warning_text',\n )\n update_from_object_fields = export_fields\n export_parent = 'datasource'\n\n @property\n def expression(self):\n return self.json\n\n @property\n def json_obj(self):\n try:\n obj = json.loads(self.json)\n except Exception:\n obj = {}\n return obj\n\n @property\n def perm(self):\n return (\n '{parent_name}.[{obj.metric_name}](id:{obj.id})'\n ).format(obj=self,\n parent_name=self.datasource.full_name,\n ) if self.datasource else None\n\n def get_perm(self):\n return self.perm\n\n @classmethod\n def import_obj(cls, i_metric):\n def lookup_obj(lookup_metric):\n return db.session.query(DruidMetric).filter(\n DruidMetric.datasource_id == lookup_metric.datasource_id,\n DruidMetric.metric_name == lookup_metric.metric_name).first()\n return import_datasource.import_simple_obj(db.session, i_metric, lookup_obj)\n\n\ndruiddatasource_user = Table(\n 'druiddatasource_user', metadata,\n Column('id', Integer, primary_key=True),\n Column('user_id', Integer, ForeignKey('ab_user.id')),\n Column('datasource_id', Integer, ForeignKey('datasources.id')),\n)\n\n\nclass DruidDatasource(Model, BaseDatasource):\n\n \"\"\"ORM object referencing Druid datasources (tables)\"\"\"\n\n __tablename__ = 'datasources'\n __table_args__ = (UniqueConstraint('datasource_name', 'cluster_name'),)\n\n type = 'druid'\n query_language = 'json'\n cluster_class = DruidCluster\n metric_class = DruidMetric\n column_class = DruidColumn\n owner_class = security_manager.user_model\n\n baselink = 'druiddatasourcemodelview'\n\n # Columns\n datasource_name = Column(String(255))\n is_hidden = Column(Boolean, default=False)\n filter_select_enabled = Column(Boolean, default=True) # override default\n fetch_values_from = Column(String(100))\n cluster_name = Column(\n String(250), ForeignKey('clusters.cluster_name'))\n cluster = relationship(\n 'DruidCluster', backref='datasources', foreign_keys=[cluster_name])\n owners = relationship(owner_class, secondary=druiddatasource_user,\n backref='druiddatasources')\n UniqueConstraint('cluster_name', 'datasource_name')\n\n export_fields = (\n 'datasource_name', 'is_hidden', 'description', 'default_endpoint',\n 'cluster_name', 'offset', 'cache_timeout', 'params',\n 'filter_select_enabled',\n )\n update_from_object_fields = export_fields\n\n export_parent = 'cluster'\n export_children = ['columns', 'metrics']\n\n @property\n def database(self):\n return self.cluster\n\n @property\n def connection(self):\n return str(self.database)\n\n @property\n def num_cols(self):\n return [c.column_name for c in self.columns if c.is_num]\n\n @property\n def name(self):\n return self.datasource_name\n\n @property\n def schema(self):\n ds_name = self.datasource_name or ''\n name_pieces = ds_name.split('.')\n if len(name_pieces) > 1:\n return name_pieces[0]\n else:\n return None\n\n @property\n def schema_perm(self):\n \"\"\"Returns schema permission if present, cluster one otherwise.\"\"\"\n return security_manager.get_schema_perm(self.cluster, self.schema)\n\n def get_perm(self):\n return (\n '[{obj.cluster_name}].[{obj.datasource_name}]'\n '(id:{obj.id})').format(obj=self)\n\n def update_from_object(self, obj):\n return NotImplementedError()\n\n @property\n def link(self):\n name = escape(self.datasource_name)\n return Markup(f'<a href=\"{self.url}\">{name}</a>')\n\n @property\n def full_name(self):\n return utils.get_datasource_full_name(\n self.cluster_name, self.datasource_name)\n\n @property\n def time_column_grains(self):\n return {\n 'time_columns': [\n 'all', '5 seconds', '30 seconds', '1 minute', '5 minutes',\n '30 minutes', '1 hour', '6 hour', '1 day', '7 days',\n 'week', 'week_starting_sunday', 'week_ending_saturday',\n 'month', 'quarter', 'year',\n ],\n 'time_grains': ['now'],\n }\n\n def __repr__(self):\n return self.datasource_name\n\n @renders('datasource_name')\n def datasource_link(self):\n url = f'/superset/explore/{self.type}/{self.id}/'\n name = escape(self.datasource_name)\n return Markup(f'<a href=\"{url}\">{name}</a>')\n\n def get_metric_obj(self, metric_name):\n return [\n m.json_obj for m in self.metrics\n if m.metric_name == metric_name\n ][0]\n\n @classmethod\n def import_obj(cls, i_datasource, import_time=None):\n \"\"\"Imports the datasource from the object to the database.\n\n Metrics and columns and datasource will be overridden if exists.\n This function can be used to import/export dashboards between multiple\n superset instances. Audit metadata isn't copies over.\n \"\"\"\n def lookup_datasource(d):\n return db.session.query(DruidDatasource).filter(\n DruidDatasource.datasource_name == d.datasource_name,\n DruidCluster.cluster_name == d.cluster_name,\n ).first()\n\n def lookup_cluster(d):\n return db.session.query(DruidCluster).filter_by(\n cluster_name=d.cluster_name).one()\n return import_datasource.import_datasource(\n db.session, i_datasource, lookup_cluster, lookup_datasource,\n import_time)\n\n def latest_metadata(self):\n \"\"\"Returns segment metadata from the latest segment\"\"\"\n logging.info('Syncing datasource [{}]'.format(self.datasource_name))\n client = self.cluster.get_pydruid_client()\n try:\n results = client.time_boundary(datasource=self.datasource_name)\n except IOError:\n results = None\n if results:\n max_time = results[0]['result']['maxTime']\n max_time = dparse(max_time)\n else:\n max_time = datetime.now()\n # Query segmentMetadata for 7 days back. However, due to a bug,\n # we need to set this interval to more than 1 day ago to exclude\n # realtime segments, which triggered a bug (fixed in druid 0.8.2).\n # https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ\n lbound = (max_time - timedelta(days=7)).isoformat()\n if LooseVersion(self.cluster.druid_version) < LooseVersion('0.8.2'):\n rbound = (max_time - timedelta(1)).isoformat()\n else:\n rbound = max_time.isoformat()\n segment_metadata = None\n try:\n segment_metadata = client.segment_metadata(\n datasource=self.datasource_name,\n intervals=lbound + '/' + rbound,\n merge=self.merge_flag,\n analysisTypes=[])\n except Exception as e:\n logging.warning('Failed first attempt to get latest segment')\n logging.exception(e)\n if not segment_metadata:\n # if no segments in the past 7 days, look at all segments\n lbound = datetime(1901, 1, 1).isoformat()[:10]\n if LooseVersion(self.cluster.druid_version) < LooseVersion('0.8.2'):\n rbound = datetime.now().isoformat()\n else:\n rbound = datetime(2050, 1, 1).isoformat()[:10]\n try:\n segment_metadata = client.segment_metadata(\n datasource=self.datasource_name,\n intervals=lbound + '/' + rbound,\n merge=self.merge_flag,\n analysisTypes=[])\n except Exception as e:\n logging.warning('Failed 2nd attempt to get latest segment')\n logging.exception(e)\n if segment_metadata:\n return segment_metadata[-1]['columns']\n\n def refresh_metrics(self):\n for col in self.columns:\n col.refresh_metrics()\n\n @classmethod\n def sync_to_db_from_config(\n cls,\n druid_config,\n user,\n cluster,\n refresh=True):\n \"\"\"Merges the ds config from druid_config into one stored in the db.\"\"\"\n session = db.session\n datasource = (\n session.query(cls)\n .filter_by(datasource_name=druid_config['name'])\n .first()\n )\n # Create a new datasource.\n if not datasource:\n datasource = cls(\n datasource_name=druid_config['name'],\n cluster=cluster,\n owners=[user],\n changed_by_fk=user.id,\n created_by_fk=user.id,\n )\n session.add(datasource)\n elif not refresh:\n return\n\n dimensions = druid_config['dimensions']\n col_objs = (\n session.query(DruidColumn)\n .filter(DruidColumn.datasource_id == datasource.id)\n .filter(DruidColumn.column_name.in_(dimensions))\n )\n col_objs = {col.column_name: col for col in col_objs}\n for dim in dimensions:\n col_obj = col_objs.get(dim, None)\n if not col_obj:\n col_obj = DruidColumn(\n datasource_id=datasource.id,\n column_name=dim,\n groupby=True,\n filterable=True,\n # TODO: fetch type from Hive.\n type='STRING',\n datasource=datasource,\n )\n session.add(col_obj)\n # Import Druid metrics\n metric_objs = (\n session.query(DruidMetric)\n .filter(DruidMetric.datasource_id == datasource.id)\n .filter(DruidMetric.metric_name.in_(\n spec['name'] for spec in druid_config['metrics_spec']\n ))\n )\n metric_objs = {metric.metric_name: metric for metric in metric_objs}\n for metric_spec in druid_config['metrics_spec']:\n metric_name = metric_spec['name']\n metric_type = metric_spec['type']\n metric_json = json.dumps(metric_spec)\n\n if metric_type == 'count':\n metric_type = 'longSum'\n metric_json = json.dumps({\n 'type': 'longSum',\n 'name': metric_name,\n 'fieldName': metric_name,\n })\n\n metric_obj = metric_objs.get(metric_name, None)\n if not metric_obj:\n metric_obj = DruidMetric(\n metric_name=metric_name,\n metric_type=metric_type,\n verbose_name='%s(%s)' % (metric_type, metric_name),\n datasource=datasource,\n json=metric_json,\n description=(\n 'Imported from the airolap config dir for %s' %\n druid_config['name']),\n )\n session.add(metric_obj)\n session.commit()\n\n @staticmethod\n def time_offset(granularity):\n if granularity == 'week_ending_saturday':\n return 6 * 24 * 3600 * 1000 # 6 days\n return 0\n\n # uses https://en.wikipedia.org/wiki/ISO_8601\n # http://druid.io/docs/0.8.0/querying/granularities.html\n # TODO: pass origin from the UI\n @staticmethod\n def granularity(period_name, timezone=None, origin=None):\n if not period_name or period_name == 'all':\n return 'all'\n iso_8601_dict = {\n '5 seconds': 'PT5S',\n '30 seconds': 'PT30S',\n '1 minute': 'PT1M',\n '5 minutes': 'PT5M',\n '30 minutes': 'PT30M',\n '1 hour': 'PT1H',\n '6 hour': 'PT6H',\n 'one_day': 'P1D',\n '1 day': 'P1D',\n '7 days': 'P7D',\n 'week': 'P1W',\n 'week_starting_sunday': 'P1W',\n 'week_ending_saturday': 'P1W',\n 'month': 'P1M',\n 'quarter': 'P3M',\n 'year': 'P1Y',\n }\n\n granularity = {'type': 'period'}\n if timezone:\n granularity['timeZone'] = timezone\n\n if origin:\n dttm = utils.parse_human_datetime(origin)\n granularity['origin'] = dttm.isoformat()\n\n if period_name in iso_8601_dict:\n granularity['period'] = iso_8601_dict[period_name]\n if period_name in ('week_ending_saturday', 'week_starting_sunday'):\n # use Sunday as start of the week\n granularity['origin'] = '2016-01-03T00:00:00'\n elif not isinstance(period_name, str):\n granularity['type'] = 'duration'\n granularity['duration'] = period_name\n elif period_name.startswith('P'):\n # identify if the string is the iso_8601 period\n granularity['period'] = period_name\n else:\n granularity['type'] = 'duration'\n granularity['duration'] = utils.parse_human_timedelta(\n period_name).total_seconds() * 1000\n return granularity\n\n @staticmethod\n def get_post_agg(mconf):\n \"\"\"\n For a metric specified as `postagg` returns the\n kind of post aggregation for pydruid.\n \"\"\"\n if mconf.get('type') == 'javascript':\n return JavascriptPostAggregator(\n name=mconf.get('name', ''),\n field_names=mconf.get('fieldNames', []),\n function=mconf.get('function', ''))\n elif mconf.get('type') == 'quantile':\n return Quantile(\n mconf.get('name', ''),\n mconf.get('probability', ''),\n )\n elif mconf.get('type') == 'quantiles':\n return Quantiles(\n mconf.get('name', ''),\n mconf.get('probabilities', ''),\n )\n elif mconf.get('type') == 'fieldAccess':\n return Field(mconf.get('name'))\n elif mconf.get('type') == 'constant':\n return Const(\n mconf.get('value'),\n output_name=mconf.get('name', ''),\n )\n elif mconf.get('type') == 'hyperUniqueCardinality':\n return HyperUniqueCardinality(\n mconf.get('name'),\n )\n elif mconf.get('type') == 'arithmetic':\n return Postaggregator(\n mconf.get('fn', '/'),\n mconf.get('fields', []),\n mconf.get('name', ''))\n else:\n return CustomPostAggregator(\n mconf.get('name', ''),\n mconf)\n\n @staticmethod\n def find_postaggs_for(postagg_names, metrics_dict):\n \"\"\"Return a list of metrics that are post aggregations\"\"\"\n postagg_metrics = [\n metrics_dict[name] for name in postagg_names\n if metrics_dict[name].metric_type == POST_AGG_TYPE\n ]\n # Remove post aggregations that were found\n for postagg in postagg_metrics:\n postagg_names.remove(postagg.metric_name)\n return postagg_metrics\n\n @staticmethod\n def recursive_get_fields(_conf):\n _type = _conf.get('type')\n _field = _conf.get('field')\n _fields = _conf.get('fields')\n field_names = []\n if _type in ['fieldAccess', 'hyperUniqueCardinality',\n 'quantile', 'quantiles']:\n field_names.append(_conf.get('fieldName', ''))\n if _field:\n field_names += DruidDatasource.recursive_get_fields(_field)\n if _fields:\n for _f in _fields:\n field_names += DruidDatasource.recursive_get_fields(_f)\n return list(set(field_names))\n\n @staticmethod\n def resolve_postagg(postagg, post_aggs, agg_names, visited_postaggs, metrics_dict):\n mconf = postagg.json_obj\n required_fields = set(\n DruidDatasource.recursive_get_fields(mconf) +\n mconf.get('fieldNames', []))\n # Check if the fields are already in aggs\n # or is a previous postagg\n required_fields = set([\n field for field in required_fields\n if field not in visited_postaggs and field not in agg_names\n ])\n # First try to find postaggs that match\n if len(required_fields) > 0:\n missing_postaggs = DruidDatasource.find_postaggs_for(\n required_fields, metrics_dict)\n for missing_metric in required_fields:\n agg_names.add(missing_metric)\n for missing_postagg in missing_postaggs:\n # Add to visited first to avoid infinite recursion\n # if post aggregations are cyclicly dependent\n visited_postaggs.add(missing_postagg.metric_name)\n for missing_postagg in missing_postaggs:\n DruidDatasource.resolve_postagg(\n missing_postagg, post_aggs, agg_names, visited_postaggs, metrics_dict)\n post_aggs[postagg.metric_name] = DruidDatasource.get_post_agg(postagg.json_obj)\n\n @staticmethod\n def metrics_and_post_aggs(metrics, metrics_dict, druid_version=None):\n # Separate metrics into those that are aggregations\n # and those that are post aggregations\n saved_agg_names = set()\n adhoc_agg_configs = []\n postagg_names = []\n for metric in metrics:\n if utils.is_adhoc_metric(metric):\n adhoc_agg_configs.append(metric)\n elif metrics_dict[metric].metric_type != POST_AGG_TYPE:\n saved_agg_names.add(metric)\n else:\n postagg_names.append(metric)\n # Create the post aggregations, maintain order since postaggs\n # may depend on previous ones\n post_aggs = OrderedDict()\n visited_postaggs = set()\n for postagg_name in postagg_names:\n postagg = metrics_dict[postagg_name]\n visited_postaggs.add(postagg_name)\n DruidDatasource.resolve_postagg(\n postagg, post_aggs, saved_agg_names, visited_postaggs, metrics_dict)\n aggs = DruidDatasource.get_aggregations(\n metrics_dict,\n saved_agg_names,\n adhoc_agg_configs,\n )\n return aggs, post_aggs\n\n def values_for_column(self,\n column_name,\n limit=10000):\n \"\"\"Retrieve some values for the given column\"\"\"\n logging.info(\n 'Getting values for columns [{}] limited to [{}]'\n .format(column_name, limit))\n # TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid\n if self.fetch_values_from:\n from_dttm = utils.parse_human_datetime(self.fetch_values_from)\n else:\n from_dttm = datetime(1970, 1, 1)\n\n qry = dict(\n datasource=self.datasource_name,\n granularity='all',\n intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),\n aggregations=dict(count=count('count')),\n dimension=column_name,\n metric='count',\n threshold=limit,\n )\n\n client = self.cluster.get_pydruid_client()\n client.topn(**qry)\n df = client.export_pandas()\n return [row[column_name] for row in df.to_records(index=False)]\n\n def get_query_str(self, query_obj, phase=1, client=None):\n return self.run_query(client=client, phase=phase, **query_obj)\n\n def _add_filter_from_pre_query_data(self, df, dimensions, dim_filter):\n ret = dim_filter\n if df is not None and not df.empty:\n new_filters = []\n for unused, row in df.iterrows():\n fields = []\n for dim in dimensions:\n f = None\n # Check if this dimension uses an extraction function\n # If so, create the appropriate pydruid extraction object\n if isinstance(dim, dict) and 'extractionFn' in dim:\n (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim)\n dim_val = dim['outputName']\n f = Filter(\n dimension=col,\n value=row[dim_val],\n extraction_function=extraction_fn,\n )\n elif isinstance(dim, dict):\n dim_val = dim['outputName']\n if dim_val:\n f = Dimension(dim_val) == row[dim_val]\n else:\n f = Dimension(dim) == row[dim]\n if f:\n fields.append(f)\n if len(fields) > 1:\n term = Filter(type='and', fields=fields)\n new_filters.append(term)\n elif fields:\n new_filters.append(fields[0])\n if new_filters:\n ff = Filter(type='or', fields=new_filters)\n if not dim_filter:\n ret = ff\n else:\n ret = Filter(type='and', fields=[ff, dim_filter])\n return ret\n\n @staticmethod\n def druid_type_from_adhoc_metric(adhoc_metric):\n column_type = adhoc_metric['column']['type'].lower()\n aggregate = adhoc_metric['aggregate'].lower()\n\n if aggregate == 'count':\n return 'count'\n if aggregate == 'count_distinct':\n return 'cardinality'\n else:\n return column_type + aggregate.capitalize()\n\n @staticmethod\n def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]):\n \"\"\"\n Returns a dictionary of aggregation metric names to aggregation json objects\n\n :param metrics_dict: dictionary of all the metrics\n :param saved_metrics: list of saved metric names\n :param adhoc_metrics: list of adhoc metric names\n :raise SupersetException: if one or more metric names are not aggregations\n \"\"\"\n aggregations = OrderedDict()\n invalid_metric_names = []\n for metric_name in saved_metrics:\n if metric_name in metrics_dict:\n metric = metrics_dict[metric_name]\n if metric.metric_type == POST_AGG_TYPE:\n invalid_metric_names.append(metric_name)\n else:\n aggregations[metric_name] = metric.json_obj\n else:\n invalid_metric_names.append(metric_name)\n if len(invalid_metric_names) > 0:\n raise SupersetException(\n _('Metric(s) {} must be aggregations.').format(invalid_metric_names))\n for adhoc_metric in adhoc_metrics:\n aggregations[adhoc_metric['label']] = {\n 'fieldName': adhoc_metric['column']['column_name'],\n 'fieldNames': [adhoc_metric['column']['column_name']],\n 'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),\n 'name': adhoc_metric['label'],\n }\n return aggregations\n\n def check_restricted_metrics(self, aggregations):\n rejected_metrics = [\n m.metric_name for m in self.metrics\n if m.is_restricted and\n m.metric_name in aggregations.keys() and\n not security_manager.has_access('metric_access', m.perm)\n ]\n if rejected_metrics:\n raise MetricPermException(\n 'Access to the metrics denied: ' + ', '.join(rejected_metrics),\n )\n\n def get_dimensions(self, groupby, columns_dict):\n dimensions = []\n groupby = [gb for gb in groupby if gb in columns_dict]\n for column_name in groupby:\n col = columns_dict.get(column_name)\n dim_spec = col.dimension_spec if col else None\n if dim_spec:\n dimensions.append(dim_spec)\n else:\n dimensions.append(column_name)\n return dimensions\n\n def intervals_from_dttms(self, from_dttm, to_dttm):\n # Couldn't find a way to just not filter on time...\n from_dttm = from_dttm or datetime(1901, 1, 1)\n to_dttm = to_dttm or datetime(2101, 1, 1)\n\n # add tzinfo to native datetime with config\n from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)\n to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)\n return '{}/{}'.format(\n from_dttm.isoformat() if from_dttm else '',\n to_dttm.isoformat() if to_dttm else '',\n )\n\n @staticmethod\n def _dimensions_to_values(dimensions):\n \"\"\"\n Replace dimensions specs with their `dimension`\n values, and ignore those without\n \"\"\"\n values = []\n for dimension in dimensions:\n if isinstance(dimension, dict):\n if 'extractionFn' in dimension:\n values.append(dimension)\n elif 'dimension' in dimension:\n values.append(dimension['dimension'])\n else:\n values.append(dimension)\n\n return values\n\n @staticmethod\n def sanitize_metric_object(metric):\n \"\"\"\n Update a metric with the correct type if necessary.\n :param dict metric: The metric to sanitize\n \"\"\"\n if (\n utils.is_adhoc_metric(metric) and\n metric['column']['type'].upper() == 'FLOAT'\n ):\n metric['column']['type'] = 'DOUBLE'\n\n def run_query( # noqa / druid\n self,\n groupby, metrics,\n granularity,\n from_dttm, to_dttm,\n filter=None, # noqa\n is_timeseries=True,\n timeseries_limit=None,\n timeseries_limit_metric=None,\n row_limit=None,\n inner_from_dttm=None, inner_to_dttm=None,\n orderby=None,\n extras=None, # noqa\n columns=None, phase=2, client=None,\n order_desc=True,\n prequeries=None,\n is_prequery=False,\n ):\n \"\"\"Runs a query against Druid and returns a dataframe.\n \"\"\"\n # TODO refactor into using a TBD Query object\n client = client or self.cluster.get_pydruid_client()\n row_limit = row_limit or conf.get('ROW_LIMIT')\n\n if not is_timeseries:\n granularity = 'all'\n\n if granularity == 'all':\n phase = 1\n inner_from_dttm = inner_from_dttm or from_dttm\n inner_to_dttm = inner_to_dttm or to_dttm\n\n timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None\n\n query_str = ''\n metrics_dict = {m.metric_name: m for m in self.metrics}\n columns_dict = {c.column_name: c for c in self.columns}\n\n if (\n self.cluster and\n LooseVersion(self.cluster.get_druid_version()) < LooseVersion('0.11.0')\n ):\n for metric in metrics:\n self.sanitize_metric_object(metric)\n self.sanitize_metric_object(timeseries_limit_metric)\n\n aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(\n metrics,\n metrics_dict)\n\n self.check_restricted_metrics(aggregations)\n\n # the dimensions list with dimensionSpecs expanded\n dimensions = self.get_dimensions(groupby, columns_dict)\n extras = extras or {}\n qry = dict(\n datasource=self.datasource_name,\n dimensions=dimensions,\n aggregations=aggregations,\n granularity=DruidDatasource.granularity(\n granularity,\n timezone=timezone,\n origin=extras.get('druid_time_origin'),\n ),\n post_aggregations=post_aggs,\n intervals=self.intervals_from_dttms(from_dttm, to_dttm),\n )\n\n filters = DruidDatasource.get_filters(filter, self.num_cols, columns_dict)\n if filters:\n qry['filter'] = filters\n\n having_filters = self.get_having_filters(extras.get('having_druid'))\n if having_filters:\n qry['having'] = having_filters\n\n order_direction = 'descending' if order_desc else 'ascending'\n\n if columns:\n columns.append('__time')\n del qry['post_aggregations']\n del qry['aggregations']\n qry['dimensions'] = columns\n qry['metrics'] = []\n qry['granularity'] = 'all'\n qry['limit'] = row_limit\n client.scan(**qry)\n elif len(groupby) == 0 and not having_filters:\n logging.info('Running timeseries query for no groupby values')\n del qry['dimensions']\n client.timeseries(**qry)\n elif (\n not having_filters and\n len(groupby) == 1 and\n order_desc\n ):\n dim = list(qry.get('dimensions'))[0]\n logging.info('Running two-phase topn query for dimension [{}]'.format(dim))\n pre_qry = deepcopy(qry)\n if timeseries_limit_metric:\n order_by = utils.get_metric_name(timeseries_limit_metric)\n aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(\n [timeseries_limit_metric],\n metrics_dict)\n if phase == 1:\n pre_qry['aggregations'].update(aggs_dict)\n pre_qry['post_aggregations'].update(post_aggs_dict)\n else:\n pre_qry['aggregations'] = aggs_dict\n pre_qry['post_aggregations'] = post_aggs_dict\n else:\n order_by = list(qry['aggregations'].keys())[0]\n # Limit on the number of timeseries, doing a two-phases query\n pre_qry['granularity'] = 'all'\n pre_qry['threshold'] = min(row_limit,\n timeseries_limit or row_limit)\n pre_qry['metric'] = order_by\n pre_qry['dimension'] = self._dimensions_to_values(qry.get('dimensions'))[0]\n del pre_qry['dimensions']\n\n client.topn(**pre_qry)\n logging.info('Phase 1 Complete')\n if phase == 2:\n query_str += '// Two phase query\\n// Phase 1\\n'\n query_str += json.dumps(\n client.query_builder.last_query.query_dict, indent=2)\n query_str += '\\n'\n if phase == 1:\n return query_str\n query_str += (\n \"// Phase 2 (built based on phase one's results)\\n\")\n df = client.export_pandas()\n qry['filter'] = self._add_filter_from_pre_query_data(\n df,\n [pre_qry['dimension']],\n filters)\n qry['threshold'] = timeseries_limit or 1000\n if row_limit and granularity == 'all':\n qry['threshold'] = row_limit\n qry['dimension'] = dim\n del qry['dimensions']\n qry['metric'] = list(qry['aggregations'].keys())[0]\n client.topn(**qry)\n logging.info('Phase 2 Complete')\n elif len(groupby) > 0 or having_filters:\n # If grouping on multiple fields or using a having filter\n # we have to force a groupby query\n logging.info('Running groupby query for dimensions [{}]'.format(dimensions))\n if timeseries_limit and is_timeseries:\n logging.info('Running two-phase query for timeseries')\n\n pre_qry = deepcopy(qry)\n pre_qry_dims = self._dimensions_to_values(qry['dimensions'])\n\n # Can't use set on an array with dicts\n # Use set with non-dict items only\n non_dict_dims = list(\n set([x for x in pre_qry_dims if not isinstance(x, dict)]),\n )\n dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)]\n pre_qry['dimensions'] = non_dict_dims + dict_dims\n\n order_by = None\n if metrics:\n order_by = utils.get_metric_name(metrics[0])\n else:\n order_by = pre_qry_dims[0]\n\n if timeseries_limit_metric:\n order_by = utils.get_metric_name(timeseries_limit_metric)\n aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(\n [timeseries_limit_metric],\n metrics_dict)\n if phase == 1:\n pre_qry['aggregations'].update(aggs_dict)\n pre_qry['post_aggregations'].update(post_aggs_dict)\n else:\n pre_qry['aggregations'] = aggs_dict\n pre_qry['post_aggregations'] = post_aggs_dict\n\n # Limit on the number of timeseries, doing a two-phases query\n pre_qry['granularity'] = 'all'\n pre_qry['limit_spec'] = {\n 'type': 'default',\n 'limit': min(timeseries_limit, row_limit),\n 'intervals': self.intervals_from_dttms(\n inner_from_dttm, inner_to_dttm),\n 'columns': [{\n 'dimension': order_by,\n 'direction': order_direction,\n }],\n }\n client.groupby(**pre_qry)\n logging.info('Phase 1 Complete')\n query_str += '// Two phase query\\n// Phase 1\\n'\n query_str += json.dumps(\n client.query_builder.last_query.query_dict, indent=2)\n query_str += '\\n'\n if phase == 1:\n return query_str\n query_str += (\n \"// Phase 2 (built based on phase one's results)\\n\")\n df = client.export_pandas()\n qry['filter'] = self._add_filter_from_pre_query_data(\n df,\n pre_qry['dimensions'],\n filters,\n )\n qry['limit_spec'] = None\n if row_limit:\n dimension_values = self._dimensions_to_values(dimensions)\n qry['limit_spec'] = {\n 'type': 'default',\n 'limit': row_limit,\n 'columns': [{\n 'dimension': (\n utils.get_metric_name(\n metrics[0],\n ) if metrics else dimension_values[0]\n ),\n 'direction': order_direction,\n }],\n }\n client.groupby(**qry)\n logging.info('Query Complete')\n query_str += json.dumps(\n client.query_builder.last_query.query_dict, indent=2)\n return query_str\n\n @staticmethod\n def homogenize_types(df, groupby_cols):\n \"\"\"Converting all GROUPBY columns to strings\n\n When grouping by a numeric (say FLOAT) column, pydruid returns\n strings in the dataframe. This creates issues downstream related\n to having mixed types in the dataframe\n\n Here we replace None with <NULL> and make the whole series a\n str instead of an object.\n \"\"\"\n for col in groupby_cols:\n df[col] = df[col].fillna('<NULL>').astype('unicode')\n return df\n\n def query(self, query_obj):\n qry_start_dttm = datetime.now()\n client = self.cluster.get_pydruid_client()\n query_str = self.get_query_str(\n client=client, query_obj=query_obj, phase=2)\n df = client.export_pandas()\n\n if df is None or df.size == 0:\n return QueryResult(\n df=pandas.DataFrame([]),\n query=query_str,\n duration=datetime.now() - qry_start_dttm)\n\n df = self.homogenize_types(df, query_obj.get('groupby', []))\n df.columns = [\n DTTM_ALIAS if c in ('timestamp', '__time') else c\n for c in df.columns\n ]\n\n is_timeseries = query_obj['is_timeseries'] \\\n if 'is_timeseries' in query_obj else True\n if (\n not is_timeseries and\n DTTM_ALIAS in df.columns):\n del df[DTTM_ALIAS]\n\n # Reordering columns\n cols = []\n if DTTM_ALIAS in df.columns:\n cols += [DTTM_ALIAS]\n cols += query_obj.get('groupby') or []\n cols += query_obj.get('columns') or []\n cols += query_obj.get('metrics') or []\n\n cols = utils.get_metric_names(cols)\n cols = [col for col in cols if col in df.columns]\n df = df[cols]\n\n time_offset = DruidDatasource.time_offset(query_obj['granularity'])\n\n def increment_timestamp(ts):\n dt = utils.parse_human_datetime(ts).replace(\n tzinfo=DRUID_TZ)\n return dt + timedelta(milliseconds=time_offset)\n if DTTM_ALIAS in df.columns and time_offset:\n df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)\n\n return QueryResult(\n df=df,\n query=query_str,\n duration=datetime.now() - qry_start_dttm)\n\n @staticmethod\n def _create_extraction_fn(dim_spec):\n extraction_fn = None\n if dim_spec and 'extractionFn' in dim_spec:\n col = dim_spec['dimension']\n fn = dim_spec['extractionFn']\n ext_type = fn.get('type')\n if ext_type == 'lookup' and fn['lookup'].get('type') == 'map':\n replace_missing_values = fn.get('replaceMissingValueWith')\n retain_missing_values = fn.get('retainMissingValue', False)\n injective = fn.get('isOneToOne', False)\n extraction_fn = MapLookupExtraction(\n fn['lookup']['map'],\n replace_missing_values=replace_missing_values,\n retain_missing_values=retain_missing_values,\n injective=injective,\n )\n elif ext_type == 'regex':\n extraction_fn = RegexExtraction(fn['expr'])\n else:\n raise Exception(_('Unsupported extraction function: ' + ext_type))\n return (col, extraction_fn)\n\n @classmethod\n def get_filters(cls, raw_filters, num_cols, columns_dict): # noqa\n \"\"\"Given Superset filter data structure, returns pydruid Filter(s)\"\"\"\n filters = None\n for flt in raw_filters:\n col = flt.get('col')\n op = flt.get('op')\n eq = flt.get('val')\n if (\n not col or\n not op or\n (eq is None and op not in ('IS NULL', 'IS NOT NULL'))):\n continue\n\n # Check if this dimension uses an extraction function\n # If so, create the appropriate pydruid extraction object\n column_def = columns_dict.get(col)\n dim_spec = column_def.dimension_spec if column_def else None\n extraction_fn = None\n if dim_spec and 'extractionFn' in dim_spec:\n (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)\n\n cond = None\n is_numeric_col = col in num_cols\n is_list_target = op in ('in', 'not in')\n eq = cls.filter_values_handler(\n eq, is_list_target=is_list_target,\n target_column_is_numeric=is_numeric_col)\n\n # For these two ops, could have used Dimension,\n # but it doesn't support extraction functions\n if op == '==':\n cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn)\n elif op == '!=':\n cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn)\n elif op in ('in', 'not in'):\n fields = []\n # ignore the filter if it has no value\n if not len(eq):\n continue\n # if it uses an extraction fn, use the \"in\" operator\n # as Dimension isn't supported\n elif extraction_fn is not None:\n cond = Filter(\n dimension=col,\n values=eq,\n type='in',\n extraction_function=extraction_fn,\n )\n elif len(eq) == 1:\n cond = Dimension(col) == eq[0]\n else:\n for s in eq:\n fields.append(Dimension(col) == s)\n cond = Filter(type='or', fields=fields)\n if op == 'not in':\n cond = ~cond\n elif op == 'regex':\n cond = Filter(\n extraction_function=extraction_fn,\n type='regex',\n pattern=eq,\n dimension=col,\n )\n\n # For the ops below, could have used pydruid's Bound,\n # but it doesn't support extraction functions\n elif op == '>=':\n cond = Filter(\n type='bound',\n extraction_function=extraction_fn,\n dimension=col,\n lowerStrict=False,\n upperStrict=False,\n lower=eq,\n upper=None,\n alphaNumeric=is_numeric_col,\n )\n elif op == '<=':\n cond = Filter(\n type='bound',\n extraction_function=extraction_fn,\n dimension=col,\n lowerStrict=False,\n upperStrict=False,\n lower=None,\n upper=eq,\n alphaNumeric=is_numeric_col,\n )\n elif op == '>':\n cond = Filter(\n type='bound',\n extraction_function=extraction_fn,\n lowerStrict=True,\n upperStrict=False,\n dimension=col,\n lower=eq,\n upper=None,\n alphaNumeric=is_numeric_col,\n )\n elif op == '<':\n cond = Filter(\n type='bound',\n extraction_function=extraction_fn,\n upperStrict=True,\n lowerStrict=False,\n dimension=col,\n lower=None,\n upper=eq,\n alphaNumeric=is_numeric_col,\n )\n elif op == 'IS NULL':\n cond = Dimension(col) == None # NOQA\n elif op == 'IS NOT NULL':\n cond = Dimension(col) != None # NOQA\n\n if filters:\n filters = Filter(type='and', fields=[\n cond,\n filters,\n ])\n else:\n filters = cond\n\n return filters\n\n def _get_having_obj(self, col, op, eq):\n cond = None\n if op == '==':\n if col in self.column_names:\n cond = DimSelector(dimension=col, value=eq)\n else:\n cond = Aggregation(col) == eq\n elif op == '>':\n cond = Aggregation(col) > eq\n elif op == '<':\n cond = Aggregation(col) < eq\n\n return cond\n\n def get_having_filters(self, raw_filters):\n filters = None\n reversed_op_map = {\n '!=': '==',\n '>=': '<',\n '<=': '>',\n }\n\n for flt in raw_filters:\n if not all(f in flt for f in ['col', 'op', 'val']):\n continue\n col = flt['col']\n op = flt['op']\n eq = flt['val']\n cond = None\n if op in ['==', '>', '<']:\n cond = self._get_having_obj(col, op, eq)\n elif op in reversed_op_map:\n cond = ~self._get_having_obj(col, reversed_op_map[op], eq)\n\n if filters:\n filters = filters & cond\n else:\n filters = cond\n return filters\n\n @classmethod\n def query_datasources_by_name(\n cls, session, database, datasource_name, schema=None):\n return (\n session.query(cls)\n .filter_by(cluster_name=database.id)\n .filter_by(datasource_name=datasource_name)\n .all()\n )\n\n def external_metadata(self):\n self.merge_flag = True\n return [\n {\n 'name': k,\n 'type': v.get('type'),\n }\n for k, v in self.latest_metadata().items()\n ]\n\n\nsa.event.listen(DruidDatasource, 'after_insert', security_manager.set_perm)\nsa.event.listen(DruidDatasource, 'after_update', security_manager.set_perm)\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
likweitan/final_year_project
|
[
"a86059cad92efe4edd85364d21b4ee6a56234b30"
] |
[
"streamlit/app.py"
] |
[
"import streamlit as st\nimport pandas as pd\n\nimport src.models.data_preprocessing as data_preprocessing\n\nimport src.pages.home as home\nimport src.pages.data_exploration as data_exploration\nimport src.pages.problem_statistics as problem_statistics\nimport src.pages.content_statistics as content_statistics\nimport src.pages.user_statistics as user_statistics\nimport src.pages.user_activities as user_activities\nimport src.pages.predictions as predictions\n# Streamlit encourages well-structured code, like starting execution in a main() function.\n\n\ndef main():\n st.beta_set_page_config(\n page_title='Assessing the Readiness', page_icon='https://i.ibb.co/vxwPL94/image.png></a>', layout='wide')\n # Download external dependencies.\n # Create a text element and let the reader know the data is loading.\n data_load_state = st.text('Loading... It might takes a while')\n # Load 10,000 rows of data into the dataframe.\n data = load_data()\n # Notify the reader that the data was successfully loaded.\n data_load_state.text(\"\")\n\n # Render the readme as markdown using st.markdown.\n # readme_text = st.markdown(\"Make sure it has the structure as seen below with the exact same column names\"\n # \", same structure for scoring points, same structure for players that participated, and \"\n # \"make sure to use the same date format. Any changes to this structure will break the \"\n # \"application. \")\n\n # Once we have the dependencies, add a selector for the app mode on the sidebar.\n st.sidebar.title(\"Menu\")\n option = st.sidebar.selectbox('Please select a page',\n ('Home', 'Problem Statistics', 'Content Statistics', 'User Statistics', 'User Activities', 'Check Proficiency'))\n\n if option == \"Home\":\n home.load(data)\n elif option == \"Problem Statistics\":\n # with st.spinner('Cleaning data...'):\n # data = data_preprocessing.clean(data)\n problem_statistics.load(data)\n elif option == \"Content Statistics\":\n content_statistics.load(data)\n elif option == \"User Statistics\":\n user_statistics.load(data)\n elif option == \"User Activities\":\n user_activities.load(data)\n elif option == \"Check Proficiency\":\n predictions.load(data)\n\n\[email protected](persist=True, show_spinner=False,allow_output_mutation=True)\ndef load_data():\n data = [pd.read_csv('data/Info_Content.csv'),\n pd.read_csv(\n 'data/Info_UserData.csv'),\n pd.read_csv('data/Log_Problem.csv'), ]\n return data\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
L24358/CRIREL
|
[
"26a95c6e471f783106b57368431166d22242725d"
] |
[
"equilibrium_points/sub_basics.py"
] |
[
"import os\r\nimport numpy as np\r\nimport dynalysis.basics as bcs\r\nimport dynalysis.classes as clss\r\n\r\n#=====basics=====#\r\ndef vect_len(coor1, coor2):\r\n '''returns: length of vector'''\r\n if len(coor1)!=len(coor2): raise bcs.AlgorithmError('func vect_len')\r\n vect = [coor1[i]-coor2[i] for i in range(len(coor1))]\r\n return np.linalg.norm(np.array(vect))\r\n\r\ndef much_larger_than(a, b, criteria):\r\n '''returns: T/F(a >> b*criteria)'''\r\n if a > b*criteria and b!=0: return True\r\n else: return False\r\n\t\r\ndef deter_order(coors):\r\n if coors[0][2]>=coors[1][2]: return coors\r\n else: return list(reversed(coors))\r\n\t\r\ndef lower_than(l, criteria):\r\n\t'''returns: T/F(all items in l is smaller than criteria)'''\r\n\tfor item in l:\r\n\t\tif item>criteria: return False\r\n\treturn True\r\n\t\r\n#=====classes=====#\r\nclass attractor():\r\n '''Defines an attractor.'''\r\n def __init__(self, name, coor, basin):\r\n self.name = name\r\n self.coor = coor\r\n self.basin = basin\r\n self.in_basin = False\r\n def deter_in_basin(self, state):\r\n if vect_len(state, self.coor) <= self.basin: return True\r\n return False\r\n def update_status(self, new_status):\r\n self.in_basin = new_status\r\n def check_overlap(self, other):\r\n if vect_len(self.coor, other.coor) < (self.basin+other.basin): return True\r\n return False\r\n \r\nclass storage():\r\n def __init__(self, init_name=[], init_type=[]):\r\n self.massive = {}\r\n for t in range(len(init_type)):\r\n tpe = init_type[t]\r\n if tpe is 'dict': self.massive[init_name[t]]={}\r\n elif tpe is 'list': self.massive[init_name[t]]=[]\r\n def add_garage(self, new_name, new_type):\r\n if new_type is 'dict': self.massive[new_name]={}\r\n elif new_type is 'list': self.massive[new_name]=[]\r\n def store_list(self, garage, item):\r\n self.massive[garage].append(item)\r\n def store_dict(self):\r\n pass\r\n def retrieve_garage(self, garage):\r\n return self.massive[garage]\r\n\r\nclass parameter():\r\n\t'''Defines a set of variables [pset] with corr. names [pname].\r\n\tFor example, [coor, radius]=[(1,3), 5]'''\r\n\tdef __init__(self, pset, pname):\r\n\t\tif len(pset)!=len(pname): raise bcs.InputError('class parameter')\r\n\t\tself.pset = pset\r\n\t\tself.pname = pname\r\n\t\tself.parampair = self.get_ordered_parampair()\r\n\t\tself.name = self.get_ordered_name()\r\n\tdef get_ordered_parampair(self):\r\n\t\tparampair=[]\r\n\t\tfor i in range(len(self.pname)): parampair.append((self.pname[i],self.pset[i]))\r\n\t\treturn parampair\r\n\tdef get_ordered_name(self):\r\n\t\torg_str=''\r\n\t\tfor pair in self.parampair: org_str=org_str+pair[0]+'='+str(pair[1])+'_'\r\n\t\treturn org_str[:-1]\r\n\tdef update_all(self):\r\n\t\tself.parampair = self.get_ordered_parampair()\r\n\t\tself.name = self.get_ordered_name()\r\n\tdef add_pair(self, pair): #interactive\r\n\t\tif type(pair)==tuple: pair=[pair]\r\n\t\tfor p in pair:\r\n\t\t\tself.pset.append(p[1])\r\n\t\t\tself.pname.append(p[0])\r\n\t\tself.update_all()\r\n\tdef remove_by_name(self, name):\r\n\t\tsett=self.extract(name)\r\n\t\tself.pname.remove(name)\r\n\t\tself.pset.remove(sett)\r\n\t\tself.update_all()\r\n\tdef assign_parampair(self, parampair): #interactive\r\n\t\tself.pset = [pair[1] for pair in parampair]\r\n\t\tself.pname = [pair[0] for pair in parampair]\r\n\t\tself.update_all()\r\n\tdef assign_name(self, name, trans_method=None): #interactive\r\n\t\tstrpairlist = name.split('_')\r\n\t\tself.pset=[]\r\n\t\tfor sp in range(len(strpairlist)):\r\n\t\t\tsubject=strpairlist[sp].split('=')[1]\r\n\t\t\tif trans_method==None: #if trans_method is not specified\r\n\t\t\t\ttry:\r\n\t\t\t\t\tif float(subject)==int(subject): self.pset.append(int(subject))\r\n\t\t\t\t\telse: self.pset.append(float(subject))\r\n\t\t\t\texcept:\r\n\t\t\t\t\tself.pset.append(subject)\r\n\t\t\telse:\r\n\t\t\t\tif trans_method[sp]=='f': trans_subject=float(subject)\r\n\t\t\t\telif trans_method[sp]=='i': trans_subject=int(float(subject))\r\n\t\t\t\telse: trans_subject = subject\r\n\t\t\t\tself.pset.append(trans_subject)\r\n\t\tself.pname = [strpair.split('=')[0] for strpair in strpairlist]\r\n\t\tself.update_all()\r\n\tdef equivalence_check_by_name(self, othername): #interactive\r\n\t\totherpm = parameter([],[])\r\n\t\totherpm.assign_name(othername)\r\n\t\tfor ps in self.pset:\r\n\t\t\tif ps not in otherpm.pset: return False\r\n\t\tfor pn in self.pname:\r\n\t\t\tif pn not in otherpm.pname:return False\r\n\t\treturn True\r\n\tdef extract(self, pname): #interactive\r\n\t\tfor pair in self.parampair:\r\n\t\t\tif pname==pair[0]: return pair[1]\r\n\t\traise bcs.InputError('class parameter')\r\n \r\nclass bunch():\r\n '''Defines a variable with all of its possible values.\r\n For example, radius:[1,2,3,4,5]'''\r\n def __init__(self, name, sets):\r\n self.name = name\r\n self.sets = sets\r\n self.namebunch = self.get_ordered_namebunch()\r\n self.pairbunch = self.get_ordered_pairbunch()\r\n def get_ordered_namebunch(self):\r\n namebunch=[]\r\n for s in self.sets: namebunch.append(parameter([s],[self.name]).name) \r\n return namebunch\r\n def get_ordered_pairbunch(self):\r\n pairbunch=[]\r\n for s in self.sets: pairbunch.append((self.name, s))\r\n return pairbunch \r\n\r\n#=====Others=====#\r\ndef add_continuity(runnum, outfile='info.txt', motherpath=os.getcwd()):\r\n\t#dir\r\n\trunpath=os.path.join(os.getcwd(),'run'+runnum)\r\n\tos.chdir(runpath)\r\n\tb_res=clss.branch('results_'+str(runnum), motherpath)\r\n\talldirs=[dr for dr in os.listdir(runpath) if os.path.isdir(os.path.join(runpath,dr))]\r\n\tofile=os.path.join(b_res.pathlink,outfile)\r\n\tEnt=clss.entry(' 0', [' 1', ' 2', ' 3', ' 4'])\r\n\tdata=Ent.readdata_and_fix(ofile)\r\n\tbcs.output_clf(ofile)\r\n\t#analysis\r\n\tcount=0\r\n\tfor dr in alldirs:\r\n\t\tcount+=1\r\n\t\tprint(count)\r\n\t\tb=clss.branch(dr,runpath)\r\n\t\trp=10 if len(os.listdir(b.pathlink))>9 else 1 #due to flawed dataset\r\n\t\tstate_train_all, state_train_truck, state_neurons = get_state_train(b, repeat=rp)\r\n\t\tpwidth=continuity(state_neurons)\r\n\t\tpall='_'.join(bcs.to_string(pwidth))\r\n\t\tdata[dr]=list(data[dr][:-1])+[data[dr][-1].split('\\n')[0], pall]\r\n\tfor key in data.keys():\r\n\t\tbcs.output_line(ofile,' '.join([key]+list(data[key])))\r\n\tos.chdir(motherpath)\r\n\t\r\ndef add_correlation(runnum, outfile='corr.txt', motherpath=os.getcwd()):\r\n\t#dir\r\n\trunpath=os.path.join(os.getcwd(),'run'+runnum)\r\n\tos.chdir(runpath)\r\n\talldirs=[dr for dr in os.listdir(runpath) if os.path.isdir(os.path.join(runpath,dr))]\r\n\t#result files and outputs\r\n\tb_res=clss.branch('results_'+str(runnum), motherpath)\r\n\tbcs.output_clf(os.path.join(b_res.pathlink,outfile))\r\n\t#analysis\r\n\tfor dr in alldirs:\r\n\t\tprint(dr)\r\n\t\t#specifications\r\n\t\tb=clss.branch(dr,runpath)\r\n\t\trp=10 if len(os.listdir(b.pathlink))>9 else 1 #due to flawed dataset\r\n\t\t#get EPs\r\n\t\tstate_train_all, state_train_truck, state_neurons = get_state_train(b, repeat=rp)\r\n\t\tall_corrs=correlation(state_neurons)\r\n\t\tbcs.output_line(os.path.join(b_res.pathlink,outfile),\\\r\n\t\t\t\t\t\t' '.join([dr]+all_corrs))\r\n\tos.chdir(motherpath)\r\n\treturn 0\r\n\r\ndef get_parampairs(paramsets, bunches, iternum):\r\n if iternum <=0: return paramsets\r\n else:\r\n new_paramsets, bunch = [], bunches[len(bunches)-iternum].pairbunch\r\n for pair in bunch:\r\n for param in paramsets: new_paramsets.append(param+[pair])\r\n return get_parampairs(new_paramsets, bunches, iternum-1)\r\n\r\ndef analyze_bunch(bunches, motherpath=os.getcwd()):\r\n parampairs = get_parampairs([], bunches, len(bunches))\r\n for parampair in parampairs:\r\n pm = parameter([],[])\r\n pm.update_by_parampair(parampair)\r\n fname = os.path.join(motherpath, 'Frate_'+pm.name+'.txt')\r\n FF = get_Fano_Factor(fname)\r\n\t\t\r\ndef fix():\r\n\timport shutil\r\n\tb_run=clss.branch('run8',os.getcwd())\r\n\tb_fix=clss.branch('run8-3',os.getcwd())\r\n\tb_fix.mkdir()\r\n\talldirs=[dr for dr in os.listdir(b_run.pathlink) if os.path.isdir(os.path.join(b_run.pathlink,dr))]\r\n\tfor dr in alldirs:\r\n\t\ttarget=os.path.join(b_run.pathlink,dr)\r\n\t\tif 'Frate.txt' not in os.listdir(target):\r\n\t\t\tshutil.move(target, b_fix.pathlink)\r\n\treturn 0\r\n\r\n\r\ndef fix2():\r\n\timport shutil\r\n\tb_run=clss.branch('run6',os.getcwd())\r\n\tb_fix=clss.branch('run6-3',os.getcwd())\r\n\tb_fix.mkdir()\r\n\talldirs=[dr for dr in os.listdir(b_run.pathlink) if os.path.isdir(os.path.join(b_run.pathlink,dr))]\r\n\tfor dr in alldirs:\r\n\t\ttarget=os.path.join(b_run.pathlink,dr,'Frate.txt')\r\n\t\ttry:\r\n\t\t\tdata=bcs.readcolumn(target)[1]\r\n\t\texcept:\r\n\t\t\tprint(dr)\r\n\t\t\tshutil.move(target, b_fix.pathlink)\r\n\treturn 0\r\n\t\r\ndef detect_loop(pairlist, loopnum):\r\n\ttree=[list(item) for item in pairlist]\r\n\tfor pair in pairlist:\r\n\t\tfirst=True\r\n\t\tfor branch in tree:\r\n\t\t\tif pair[0]==branch[-1]:\r\n\t\t\t\ttree.append(branch+[pair[1]])\r\n\t\t\t\tfirst=False\r\n\tfor branch in tree:\r\n\t\t#if the first and last element of the branch is the same, and\r\n\t\t#if the length of the branch is loopnum+1 (to prevent 1-2-1-3-1 counts for loopnum=3)\r\n\t\t#if the number of unique neurons in branch is loopnum (to prevent 1-2-1-3-1 counts for loopnum=4)\r\n\t\tif branch[0]==branch[-1] and len(branch)==loopnum+1 and len(set(branch))==loopnum: return True\r\n\treturn False\r\n\t\t\t\t\t"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cambel/ur3-tools
|
[
"820707c1f6dc188f453228cc1b3613978696091e",
"820707c1f6dc188f453228cc1b3613978696091e",
"820707c1f6dc188f453228cc1b3613978696091e"
] |
[
"plot/sac_plot_detail_p24.py",
"scripts/plt_test.py",
"plot/plotter_utils.py"
] |
[
"from plotter_utils import smooth, reformat_large_tick_values\nimport matplotlib.ticker as tick\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\n\n\ndef extract_obs(obs):\n # 6 + 6 + 24 + 6*n + 1\n n_extra = 0\n dist = obs[0][:6].astype(np.float) # Distance [0:6]\n xact = obs[0][n_extra+12:n_extra+18].astype(np.float) #*-1.0\n pdxact = obs[0][n_extra+18:n_extra+24].astype(np.float) # PD pos\n pdfact = obs[0][n_extra+24:n_extra+30].astype(np.float) # PD force\n alpha = obs[0][n_extra+30:n_extra+36].astype(np.float) #*-1.0\n force = obs[0][n_extra+12+n_actions:n_extra+18+n_actions].astype(np.float)\n if n_actions == 25:\n extra = obs[0][-2:-1].astype(np.float)\n else:\n extra = 0\n dist[3:] = np.rad2deg(dist[:3]/1000.0)\n return dist, force, xact, pdxact, pdfact, alpha, extra\n\n\ndef process_data(episode_data):\n rforce = []\n rdist = []\n rxaction = []\n rpdxaction = []\n rpdfaction = []\n ralphaion = []\n rextra = []\n max_dist = None\n ft_fix = None\n for ep in episode_data:\n dist, force, xact, pdxact, pdfact, alpha, extra = extract_obs(ep)\n if ft_fix is None:\n ft_fix = force\n # if max_dist is None:\n # max_dist = np.abs(dist)\n # max_dist[:3] = np.array([40,40,40,])\n rforce.append(force-ft_fix)\n rxaction.append(xact)\n rpdxaction.append(pdxact)\n rpdfaction.append(pdfact)\n ralphaion.append(alpha)\n dist[insertion_dir] += 0\n rdist.append(dist)\n rextra.append(extra)\n rforce = np.array(rforce).reshape((-1,6))\n print(rforce.shape)\n\n return np.array(rdist), np.array(rforce), np.array(rxaction), np.array(rpdxaction), np.array(rpdfaction), np.array(ralphaion), np.array(rextra)\n\n\ndef process(filename, index=-1):\n data = np.load(filename, allow_pickle=True)\n\n dist, force, d_act, pdx_act, pdf_act, alpha, extra = process_data(np.array(data[index]))\n x = np.linspace(1, len(dist), len(dist))\n\n figure, ax = plt.subplots(nrows=4, ncols=1, figsize=(7.5, 6))\n figure.tight_layout(h_pad=-1.0)\n\n x_labels = ['$x$','$y$','$z$',r'$\\alpha$', r'$\\beta$',r'$\\gamma$']\n labels = [x_labels[insertion_dir], '$a_x$']\n dist_limits = [-30,30] if insertion_dir < 3 else [-2,2]\n plot_cross_data(0, x, [dist[:,insertion_dir]], ax, labels, ylim=dist_limits,ls = ['-', '-', '--', ':'])\n ax[0].axhline(y=0.0, color='gray', linestyle='--')\n ax[1].axhline(y=0.0, color='gray', linestyle='--')\n\n alpha *= -1\n labels = [ '$K_p^x$','$K_f^x$']\n plot_cross_data(3, x, [pdx_act[:,insertion_dir], pdf_act[:,insertion_dir]], ax, labels, colors = ['C3','C6','black'], ls = ['-', '-', '-', ':'])\n\n labels = [ '$S$', '$a_x$', 'extra']\n if n_actions == 25:\n plot_cross_data(2, x, [alpha[:,insertion_dir], d_act[:,insertion_dir], extra], ax, labels, colors = ['C4', 'C1','C2'], ls = ['-', '-', '-', ':'])\n else:\n plot_cross_data(2, x, [alpha[:,insertion_dir], d_act[:,insertion_dir]], ax, labels, colors = ['C4', 'C1'], ls = ['-', '-', '-', ':'])\n\n labels = [ '$f_{ext}$']\n force *= np.array([30.,30,30,1,1,1])\n fx = np.linspace(1, len(force), len(force))\n force_limits = [-20,20] if insertion_dir < 3 else [-1,1]\n plot_cross_data(1, fx, [force[:,insertion_dir]], ax, labels, colors = ['C5'], ls = ['-', '-', '-', ':'], ylim=force_limits)\n\n ax[0].set_xticklabels([])\n ax[1].set_xticklabels([])\n ax[2].set_xticklabels([])\n ax[0].set_xlim([0,len(x)*1.2])\n ax[1].set_xlim([0,len(fx)*1.2])\n ax[2].set_xlim([0,len(x)*1.2])\n ax[3].set_xlim([0,len(x)*1.2])\n ax[1].set_ylim([-5,40])\n ax[2].set_ylim([-1.1,1.1])\n ax[3].set_ylim([-1.1,1.1])\n\ndef plot_cross_data(i, x, ys, ax, ys_labels, colors = ['C0','C1','gray','black'], ls = ['-', '-', '-', ':'], ylim=[-1,1]):\n lw = [1, 1, 1, 1]\n for y, yl, _ls, _lw, cl in zip(ys, ys_labels, ls, lw, colors):\n ax[i].plot(x, y, _ls, label=yl, linewidth=_lw, color=cl)\n ax[i].legend(loc='lower right', ncol=1, prop={'size': 20}, frameon=False, bbox_to_anchor=(1.03,-.08))\n ax[i].tick_params(axis=\"y\", labelsize=15)\n ax[i].tick_params(axis=\"x\", labelsize=15)\n ax[i].set_ylim(ylim)\n box = ax[i].get_position()\n ax[i].set_position([box.x0, box.y0 + box.height * 0., box.width, box.height * 1.075])\n\nfilename = '/home/cambel/dev/results/350k_SAC_randgoal_p24/state_20200727T101705.npy'\nfilename = '/media/cambel/Extra/research/MDPI/real/square_peg/50k_SAC_real_square_peg/09_rot_state_20200726T170030.npy'\nfilename = '/media/cambel/Extra/research/MDPI/real/square_peg/scratch_SAC_real_square_peg/real_state_20200727T144205.npy' #scratch\nfilename = '/media/cambel/Extra/research/MDPI/simulation/conical/20200719T204941.374932_SAC_conical_p25/state_20200720T122714.npy'\nfilename = '/home/cambel/dev/results/180k_SAC_conical_randerr_p24/state_20200727T172553.npy'\nfilename = '/home/cambel/dev/results/300k_SAC_conical_p25/state_20200727T174626.npy'\nfilename = '/media/cambel/Extra/research/MDPI/simulation/sim2real/350k_SAC_randgoal_p24/real_new_params_state_20200727T160625.npy'\n\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/real_no_table_428_state_20200804T115215.npy'\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/sim_no_table_68_state_20200804T120226.npy'\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/real_no_table_x6_state_20200804T125435.npy'\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/sim_table_99_state_20200804T120558.npy'\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/real_table_x6_state_20200804T124832.npy'\n\nfilename = '/media/cambel/Extra/research/MDPI/real/square_peg/accuracy_perf_sim2real.npy'\nfilename = '/media/cambel/Extra/research/MDPI/real/square_peg/accuracy_perf_scratch.npy'\n\nfilename = '/media/cambel/Extra/research/MDPI/real/square_peg/retrain_sim_soft.npy'\nfilename = '/media/cambel/Extra/research/MDPI/real/square_peg/retrain_sim_x6_caucho.npy'\nfilename = '/media/cambel/Extra/research/MDPI/real/square_peg/accuracy_perf_retrain_sim_x6.npy'\n\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/real_table_80k.npy'\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/real_table_100k.npy'\nfilename = '/media/cambel/Extra/research/MDPI/reality_gap/real_table_500k_x6.npy'\n\nfilename = '/home/cambel/dev/results/SAC_Linsertionil_p24/state_20200820T184436.npy'\n\nweight = 0.0\nepisode = 0\ninsertion_dir = 1\nn_actions = 25 if 'p25' in filename else 24\nmax_dist = None\nmode = 1\n\nif mode == 0:\n for i in range(6):\n ft_fix = 0.1 if i == 1 else 0.0\n insertion_dir = i\n process(filename, episode)\n plt.savefig('/home/cambel/dev/data/retrain_force_input' + '_ep%s_%s.png'%(episode,i))\n plt.clf()\nelse:\n process(filename, episode)\n plt.show()",
"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nimport numpy as np\n# Create Map\ncm = plt.get_cmap(\"RdYlGn\")\n\nx = np.random.rand(30)\ny = np.random.rand(30)\nz = np.random.rand(30)\ncol = np.arange(30)\n\n# # 2D Plot\n# fig = plt.figure()\n# ax = fig.add_subplot(111)\n# ax.scatter(x, y, s=10, c=col, marker='o') \n\n# 3D Plot\nfig = plt.figure()\nax3D = fig.add_subplot(111, projection='3d')\np3d = ax3D.scatter(x, y, z, s=30, c=col, marker='o') \n\nplt.show()",
"import csv\nimport numpy as np\n\ndef running_mean(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / float(N)\n\ndef extrapolate(x, y, points=12000):\n # calculate polynomial\n z = np.polyfit(x, y, 5)\n f = np.poly1d(z)\n\n other_x = np.linspace(1, points, int(points/1))\n # other_y = f(other_x)\n other_y = np.interp(other_x, x, y)\n\n return other_x, other_y\n\ndef prepare_data(files, weight, points=12000):\n d = []\n for f in files:\n tmp = None\n if f.endswith('.csv'):\n tmp = csv_to_list(f)\n tmp[1] = smooth(tmp[1], weight)\n _x, _y = extrapolate(tmp[0], tmp[1], points)\n tmp = _y\n else:\n tmp = npy_to_list(f)\n tmp = smooth(tmp, weight)\n d.append(tmp)\n x = []\n if f.endswith('.csv'):\n x = _x\n else:\n x = np.linspace(0, 150.0*len(d[0]), len(d[0]))\n print(np.array(d).shape)\n y = np.average(d, axis=0)\n y_std = np.std(d, axis=0) \n return x, y, y_std\n\ndef npy_to_list(filename):\n data = np.load(filename)\n data = np.sum(data, axis=3)\n data = data.reshape(-1)\n return data\n\ndef csv_to_list(filename):\n with open(filename, 'r') as f:\n csv_data = list(csv.reader(f, delimiter=\",\"))\n l = np.array(csv_data[:], dtype=np.float64).T\n print(l.shape)\n return l\n\ndef smooth(scalars, weight): # Weight between 0 and 1\n last = scalars[0] # First value in the plot (first timestep)\n smoothed = []\n for point in scalars:\n smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value\n smoothed.append(smoothed_val) # Save it\n last = smoothed_val # Anchor the last smoothed value\n return smoothed\n\ndef reformat_large_tick_values(tick_val, pos):\n \"\"\"\n Turns large tick values (in the billions, millions and thousands) such as 4500 into 4.5K and also appropriately turns 4000 into 4K (no zero after the decimal).\n \"\"\"\n if tick_val >= 1000000000:\n val = round(tick_val/1000000000, 1)\n new_tick_format = '{:}B'.format(val)\n elif tick_val >= 1000000:\n val = round(tick_val/1000000, 1)\n new_tick_format = '{:}M'.format(val)\n elif tick_val >= 1000:\n val = round(tick_val/1000, 1)\n new_tick_format = '{:}K'.format(val)\n elif tick_val < 1000:\n new_tick_format = round(tick_val, 1)\n else:\n new_tick_format = tick_val\n\n # make new_tick_format into a string value\n new_tick_format = str(new_tick_format)\n \n # code below will keep 4.5M as is but change values such as 4.0M to 4M since that zero after the decimal isn't needed\n index_of_decimal = new_tick_format.find(\".\")\n \n if index_of_decimal != -1:\n value_after_decimal = new_tick_format[index_of_decimal+1]\n if value_after_decimal == \"0\":\n # remove the 0 after the decimal point since it's not needed\n new_tick_format = new_tick_format[0:index_of_decimal] + new_tick_format[index_of_decimal+2:]\n \n return new_tick_format"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.rad2deg",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"numpy.load",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"numpy.arange",
"matplotlib.pyplot.get_cmap",
"numpy.random.rand",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.polyfit",
"numpy.poly1d",
"numpy.std",
"numpy.interp",
"numpy.insert",
"numpy.load",
"numpy.array",
"numpy.average",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
small-zeng/nerf-pytorch
|
[
"59b5fc655e1c22cd42dfa4a1617ba6feb8ce3464"
] |
[
"torchsearchsorted/setup.py"
] |
[
"from setuptools import setup, find_packages\r\nfrom torch.utils.cpp_extension import BuildExtension, CUDA_HOME\r\nfrom torch.utils.cpp_extension import CppExtension, CUDAExtension\r\n\r\n# In any case, include the CPU version\r\nmodules = [\r\n CppExtension('torchsearchsorted.cpu',\r\n ['src/cpu/searchsorted_cpu_wrapper.cpp']),\r\n]\r\n\r\n# If nvcc is available, add the CUDA extension\r\nif CUDA_HOME:\r\n modules.append(\r\n CUDAExtension('torchsearchsorted.cuda',\r\n ['src/cuda/searchsorted_cuda_wrapper.cpp',\r\n 'src/cuda/searchsorted_cuda_kernel.cu'])\r\n )\r\n\r\ntests_require = [\r\n 'pytest',\r\n]\r\n\r\n# Now proceed to setup\r\nsetup(\r\n name='torchsearchsorted',\r\n version='1.1',\r\n description='A searchsorted implementation for pytorch',\r\n keywords='searchsorted',\r\n author='Antoine Liutkus',\r\n author_email='[email protected]',\r\n packages=find_packages(where='src'),\r\n package_dir={\"\": \"src\"},\r\n ext_modules=modules,\r\n tests_require=tests_require,\r\n extras_require={\r\n 'test': tests_require,\r\n },\r\n cmdclass={\r\n 'build_ext': BuildExtension\r\n }\r\n)\r\n"
] |
[
[
"torch.utils.cpp_extension.CUDAExtension",
"torch.utils.cpp_extension.CppExtension"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
showkeyjar/beauty
|
[
"7c944cf896c899d9e23b2e50e293103bb03fe6cd",
"7c944cf896c899d9e23b2e50e293103bb03fe6cd",
"7c944cf896c899d9e23b2e50e293103bb03fe6cd",
"7c944cf896c899d9e23b2e50e293103bb03fe6cd"
] |
[
"App/beauty/app/src/main/assets/python/report_lite.py",
"dl/autokeras/predict.py",
"predict_interpret.py",
"dl/scan/part_tflite.py"
] |
[
"import io\nimport cv2\nimport math\nimport base64\nimport numpy as np\nfrom PIL import Image\nfrom os.path import dirname, join\nimport tensorflow as tf\nfrom explain_lite import OcclusionSensitivity\n\n\"\"\"\n模型解释\n\"\"\"\nmodel_file = join(dirname(__file__), \"model_beauty_q_v2.tflite\")\ninterpreter = tf.compat.v1.lite.Interpreter(model_path=model_file)\n\n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n# interpreter.allocate_tensors()\n\n\ndef gen_result(str_data):\n try:\n decode_data = base64.b64decode(str_data)\n np_data = np.fromstring(decode_data, np.uint8)\n old_img = cv2.imdecode(np_data, cv2.IMREAD_UNCHANGED)\n img = cv2.resize(old_img, (96, 96), interpolation=cv2.INTER_NEAREST)\n img = img.astype(np.float32)\n img /= 255.0\n print(\"explain img:\" + str(img.shape) + \" \" + str(img.dtype))\n img_width = img.shape[0]\n img_height = img.shape[1]\n data = ([img], None)\n # Start explainer\n explainer = OcclusionSensitivity()\n # patch_size 是指分析的间隔,间隔越小越准确,越大速度越快\n # 实际测试patch_size = patch_length只推理1次,推理速度反而变慢,并伴随有光栅\n patch_length = max(img_width, img_height)\n patch_size = math.floor(patch_length / 100)\n patch_size = 1 if patch_size < 1 else patch_size\n grid = explainer.explain(data, interpreter, class_index=0, patch_size=patch_size) # 0 is regression class index in train\n print(\"get explained:\" + str(grid.shape) + \" \" + str(grid.dtype))\n pil_img = Image.fromarray(grid)\n buff = io.BytesIO()\n pil_img.save(buff, format=\"PNG\")\n return buff.getvalue()\n except Exception as e:\n print(e)\n return None\n\n\ndef gen_result_new(str_data):\n \"\"\"\n todo 新的颜值解释\n 1.不同部位得分\n 2.累计得分\n \"\"\"\n\n\nif __name__==\"__main__\":\n # python -X faulthandler report_lite.py\n # Load a sample image (or multiple ones)\n img_width = img_height = 300\n img = tf.keras.preprocessing.image.load_img(\"/opt/data/SCUT-FBP5500_v2/Images/train/face/AF1031.jpg\", target_size=(300, 300))\n img = tf.keras.preprocessing.image.img_to_array(img)\n data = ([img], None)\n # Start explainer\n explainer = OcclusionSensitivity()\n # patch_size 是指分析的间隔,间隔越小越准确,越大速度越快\n patch_size = math.floor(img_width / 5)\n grid = explainer.explain(data, interpreter, class_index=0, patch_size=patch_size) # 0 is regression class index in train\n explainer.save(grid, \".\", \"occ_sens_lite.png\")\n",
"import sys\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\nimport autokeras as ak\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import io_ops\n\"\"\"\nkeras模型预测\n\n0~1 0\n1~2 122\n2~3 913\n3~4 763\n4~5 235\n不均匀数据集导致回归预测高分区不准确\n\"\"\"\ndirectory = \"/opt/data/SCUT-FBP5500_v2/Images/train/face/\"\ndf_rates = pd.read_csv(\"/opt/data/SCUT-FBP5500_v2/All_Ratings.csv\", header=None, names=['filename', 'score'])\ndf_rates = df_rates[df_rates['filename'].str.find(\"AF\")>=0]\ndf_rates['score'] = df_rates['score'].astype(int)\ndf_rates_mean = df_rates.groupby('filename').mean()\ndf_rates_mean.reset_index(inplace=True)\n# 挑选测试用例\ntry:\n score0 = df_rates_mean[df_rates_mean['score']<1].head(2)\nexcept Exception:\n score0 = None\ntry:\n score1 = df_rates_mean[df_rates_mean['score'].between(1,2)].head(2)\nexcept Exception:\n score1 = None\ntry:\n score2 = df_rates_mean[df_rates_mean['score'].between(2,3)].head(2)\nexcept Exception:\n score2 = None\ntry:\n score3 = df_rates_mean[df_rates_mean['score'].between(3,4)].head(2)\nexcept Exception:\n score3 = None\ntry:\n score4 = df_rates_mean[df_rates_mean['score'].between(4,5)].head(2)\nexcept Exception:\n score4 = None\n\n# model = load_model(\"model_beauty_v1\", custom_objects=ak.CUSTOM_OBJECTS)\nmodel = load_model(\"model_beauty_v1\")\n\n\ndef path_to_image(path, image_size, num_channels, interpolation):\n img = io_ops.read_file(path)\n img = image_ops.decode_image(\n img, channels=num_channels, expand_animations=False)\n img = image_ops.resize_images_v2(img, image_size, method=interpolation)\n img.set_shape((image_size[0], image_size[1], num_channels))\n return img\n\n\ndef predict(image_path):\n global model\n image = tf.keras.preprocessing.image.load_img(image_path, color_mode=\"rgb\", interpolation=\"bilinear\")\n input_arr = tf.keras.preprocessing.image.img_to_array(image)\n # input_arr = path_to_image(image_path, (350, 350), 3, \"bilinear\")\n input_arr = np.array([input_arr]) # Convert single image to a batch.\n predictions = model.predict(tf.expand_dims(input_arr, -1))\n return predictions[0][0]\n\n\ndef test_score(se):\n global directory\n image_path = directory + se[\"filename\"]\n preds = predict(image_path)\n print(\"test:\", se['filename'], \"lable:\", se['score'], \"predict:\", preds)\n return preds\n\n\nif __name__ == \"__main__\":\n test_scores = pd.concat([score0, score1, score2, score3, score4])\n test_scores.apply(test_score, axis=1)\n",
"# %% coding=utf-8\nimport sys\nimport dlib\nimport numpy as np\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom interpret import show\nfrom interpret.perf import RegressionPerf\nfrom interpret.blackbox import LimeTabular\nfrom interpret.blackbox import ShapKernel\n\n\"\"\"\n实际预测部分\n\"\"\"\n#todo\n# 1.lime问题太多,将lime更换成interpret\n\n#%%\npredictor_path = \"model/shape_predictor_68_face_landmarks.dat\"\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(predictor_path)\nmodel = joblib.load('model/beauty.pkl')\ndf_input = pd.read_csv('data/face/df_input.csv', dtype=np.float64)\ndf_label = df_input['label'].values\n\ndf_input = df_input.drop(['Unnamed: 0', 'Image', 'label'], axis=1)\nfeature_names = df_input.columns\ndf_input = df_input.values\nprint(feature_names)\n\n\n#%%\ndef prepare_input(img_path):\n img = dlib.load_rgb_image(img_path)\n dets = detector(img, 1)\n df_image = None\n for k, d in enumerate(dets):\n # print(\"Detection {}: Left: {} Top: {} Right: {} Bottom: {}\".format(k, d.left(), d.top(), d.right(), d.bottom()))\n f_width = abs(d.right() - d.left())\n f_height = abs(d.bottom() - d.top())\n # print('width:' + str(f_width) + ', height:' + str(f_height))\n # Get the landmarks/parts for the face in box d.\n shape = predictor(img, d)\n # print(\"Part 0: {}, Part 1: {} ...\".format(shape.part(0), shape.part(1)))\n face_shape = {}\n for i in range(0, 67):\n for j in range(i + 1, 68):\n face_shape[str(i) + '_' + str(j) + '_x'] = abs(shape.part(i).x - shape.part(j).x) / f_width\n face_shape[str(i) + '_' + str(j) + '_y'] = abs(shape.part(i).y - shape.part(j).y) / f_height\n # print(str(i) + '_' + str(j))\n # shape_size.append(face_shape)\n df_image = pd.DataFrame.from_dict([face_shape])\n break\n return df_image\n\n\n#%%\ndef predict(f):\n global detector, predictor, model\n #shape_size = []\n img = dlib.load_rgb_image(f)\n dets = detector(img, 1)\n for k, d in enumerate(dets):\n #print(\"Detection {}: Left: {} Top: {} Right: {} Bottom: {}\".format(k, d.left(), d.top(), d.right(), d.bottom()))\n f_width = abs(d.right() - d.left())\n f_height = abs(d.bottom() - d.top())\n #print('width:' + str(f_width) + ', height:' + str(f_height))\n # Get the landmarks/parts for the face in box d.\n shape = predictor(img, d)\n #print(\"Part 0: {}, Part 1: {} ...\".format(shape.part(0), shape.part(1)))\n face_shape = {}\n for i in range(0, 67):\n for j in range(i+1, 68):\n face_shape[str(i) + '_' + str(j) + '_x'] = abs(shape.part(i).x - shape.part(j).x)/f_width\n face_shape[str(i) + '_' + str(j) + '_y'] = abs(shape.part(i).y - shape.part(j).y)/f_height\n #print(str(i) + '_' + str(j))\n #shape_size.append(face_shape)\n df_image = pd.DataFrame.from_dict([face_shape])\n #print(df_image.columns)\n pred = model.predict(df_image)\n break\n return pred\n\n\nif __name__ == \"__main__\":\n try:\n test = sys.argv[1]\n mode = sys.argv[2]\n except:\n test = \"data/t1.jpg\"\n mode = 'shap'\n score = predict(test)\n # result = model.predict(df_input)\n print('beauty score:' + str(score))\n X_test = prepare_input(test)\n y_test = model.predict(X_test)\n\n if mode == 'blackbox':\n blackbox_perf = RegressionPerf(model.predict).explain_perf(df_input, df_label, name='Blackbox')\n show(blackbox_perf)\n elif mode == 'lime':\n #%% Blackbox explainers need a predict function, and optionally a dataset\n lime = LimeTabular(predict_fn=model.predict, data=df_input, random_state=1)\n #%%Pick the instances to explain, optionally pass in labels if you have them\n lime_local = lime.explain_local(X_test, y_test, name='LIME')\n show(lime_local)\n else:\n #%%\n background_val = np.median(df_input, axis=0).reshape(1, -1)\n #%%\n shap = ShapKernel(predict_fn=model.predict, data=background_val, feature_names=feature_names)\n #%%\n shap_local = shap.explain_local(X_test, y_test, name='SHAP')\n show(shap_local)\n",
"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\nimport numpy as np\nimport cv2\nfrom os.path import dirname, join\nimport tensorflow as tf\n\n\"\"\"\n判断人脸部位\n\"\"\"\n\n# Load TFLite model and allocate tensors.\nmodel_file = join(dirname(__file__), \"face_part.tflite\")\ninterpreter = tf.compat.v1.lite.Interpreter(model_path=model_file)\n\n# Get input and output tensors.\ninput_details = interpreter.get_input_details()\nprint(\"input:\", input_details)\noutput_details = interpreter.get_output_details()\nprint(\"output:\", output_details)\n\n\ndef vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='parsing_face.jpg'):\n # Colors for all 19 parts\n part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],\n [255, 0, 85], [255, 0, 170],\n [0, 255, 0], [85, 255, 0], [170, 255, 0],\n [0, 255, 85], [0, 255, 170],\n [0, 0, 255], [85, 0, 255], [170, 0, 255],\n [0, 85, 255], [0, 170, 255],\n [255, 255, 0], [255, 255, 85], [255, 255, 170],\n [255, 0, 255], [255, 85, 255], [255, 170, 255],\n [0, 255, 255], [85, 255, 255], [170, 255, 255]]\n\n # part_names = ['hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r', 'nose', 'mouth', 'skin', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'bg', 'hat']\n part_names = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r',\n 'nose', 'mouth', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'hair', 'hat', 'bg']\n im = np.array(im)\n vis_im = im.copy().astype(np.uint8)\n vis_parsing_anno = parsing_anno.copy().astype(np.uint8)\n vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)\n vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255\n\n num_of_class = np.max(vis_parsing_anno)\n\n for pi in range(0, num_of_class + 1):\n index = np.where(vis_parsing_anno == pi)\n print(\"cls \", pi,\" find shape:\", index[0].shape, index[1].shape)\n vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]\n # 确保文字在最上层\n for pi in range(0, num_of_class + 1):\n index = np.where(vis_parsing_anno == pi)\n try:\n cv2.putText(vis_parsing_anno_color, str(pi) + \":\" + part_names[pi], (index[0][0] + 1,index[1][0] + 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2, cv2.LINE_AA)\n except Exception as e:\n print(e)\n\n vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)\n # print(vis_parsing_anno_color.shape, vis_im.shape)\n vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)\n\n # Save result or not\n if save_im:\n # cv2.imwrite(save_path[:-4] +'.png', vis_parsing_anno)\n cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])\n # return vis_im\n\n\ndef evaluate(img_path='./data'):\n global interpreter\n image = cv2.imread(img_path)\n image = cv2.resize(image, (512, 512), interpolation=cv2.INTER_NEAREST)\n img = image / 255\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n img = (img - mean) / std\n img = img.astype(np.float32)\n # change to channel first\n img = np.moveaxis(img, 2, 0)\n print(img.shape, img.dtype)\n interpreter.allocate_tensors()\n interpreter.set_tensor(interpreter.get_input_details()[0]['index'], [img])\n interpreter.invoke()\n preds = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])\n parsing = preds[0].argmax(0)\n print(np.unique(parsing))\n vis_parsing_maps(image, parsing, stride=1, save_im=True)\n\n\nif __name__ == \"__main__\":\n evaluate(img_path='1.png')\n"
] |
[
[
"tensorflow.compat.v1.lite.Interpreter",
"numpy.fromstring",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_img"
],
[
"tensorflow.keras.models.load_model",
"pandas.concat",
"pandas.read_csv",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.expand_dims",
"tensorflow.python.ops.image_ops.decode_image",
"tensorflow.python.ops.io_ops.read_file",
"numpy.array",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.python.ops.image_ops.resize_images_v2"
],
[
"numpy.median",
"sklearn.externals.joblib.load",
"pandas.read_csv",
"pandas.DataFrame.from_dict"
],
[
"numpy.unique",
"tensorflow.compat.v1.lite.Interpreter",
"numpy.max",
"numpy.moveaxis",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TysonReimer/ML-in-BMS
|
[
"e7be69abb663a8e213ec4683818018746cb92106"
] |
[
"run/ai-test/logreg_train_g2_test_g1.py"
] |
[
"\"\"\"\r\nTyson Reimer\r\nUniversity of Manitoba\r\nAugust 06th, 2020\r\n\"\"\"\r\n\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nfrom umbms import get_proj_path, get_script_logger, verify_path\r\n\r\nfrom umbms.loadsave import load_pickle, save_pickle\r\n\r\nfrom umbms.ai.augment import full_aug\r\nfrom umbms.ai.gencompare import correct_g1_ini_ant_ang\r\nfrom umbms.ai.makesets import get_class_labels\r\nfrom umbms.ai.preproc import resize_features_for_logreg, to_td\r\nfrom umbms.ai.metrics import (get_acc, get_sens, get_spec, get_opt_thresh,\r\n report_metrics)\r\n\r\n###############################################################################\r\n\r\n__DATA_DIR = os.path.join(get_proj_path(), 'data/umbmid/')\r\n\r\n###############################################################################\r\n\r\n# Number of epochs to train over\r\n__REG_PARAM = 1e6\r\n\r\n###############################################################################\r\n\r\ndef plt_roc_curve(preds, labels, save_str='', save=False):\r\n \"\"\"Plots the ROC curve of the classifier\r\n\r\n Parameters\r\n ----------\r\n preds : array_like\r\n Classifier predictions\r\n labels : array_like\r\n True class labels\r\n save_str : str\r\n String to use to save fig and data, if save. Should not have\r\n file extension, should not be full path - just name of .pickle\r\n and .png files that will be saved\r\n save : bool\r\n If True, will save the fig and data\r\n \"\"\"\r\n\r\n # Thresholds to use for plt\r\n thresholds = np.linspace(0, 1, 1000)\r\n\r\n # Init arrays for storing FPR and TPR\r\n fprs = np.zeros_like(thresholds)\r\n tprs = np.zeros_like(thresholds)\r\n\r\n for ii in range(np.size(thresholds)):\r\n\r\n # Get TPR here\r\n tprs[ii] = get_sens(preds=preds, labels=labels,\r\n threshold=thresholds[ii])\r\n\r\n # Get FPR here\r\n fprs[ii] = 1 - get_spec(preds=preds, labels=labels,\r\n threshold=thresholds[ii])\r\n\r\n # Make fig\r\n plt.figure(figsize=(12, 6))\r\n plt.rc(\"font\", family=\"Times New Roman\")\r\n plt.tick_params(labelsize=20)\r\n plt.plot(fprs, tprs, 'k-')\r\n plt.plot(np.linspace(0, 1, 1000), np.linspace(0, 1, 1000), 'b--')\r\n plt.xlabel('False Positive Rate', fontsize=24)\r\n plt.ylabel('True Positive Rate', fontsize=24)\r\n plt.tight_layout()\r\n\r\n if save: # If saving\r\n\r\n verify_path(os.path.join(get_proj_path(), 'output/roc-figs/'))\r\n out_path = os.path.join(get_proj_path(), 'output/roc-figs/')\r\n\r\n plt.savefig(os.path.join(out_path, '%s.png' % save_str), dpi=150)\r\n plt.close()\r\n save_pickle(np.array([fprs, tprs]),\r\n os.path.join(out_path, '%s.pickle' % save_str))\r\n\r\n\r\n###############################################################################\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n logger = get_script_logger(__file__)\r\n\r\n # Load the training data and metadata from Gen-2\r\n g2_d = load_pickle(os.path.join(__DATA_DIR, 'g2/g2_fd.pickle'))\r\n g2_md = load_pickle(os.path.join(__DATA_DIR, 'g2/g2_metadata.pickle'))\r\n\r\n # Load the training data and metadata from Gen-1\r\n g1_d = load_pickle(os.path.join(__DATA_DIR,\r\n 'g1-train-test/test_fd.pickle'))\r\n g1_md = load_pickle(os.path.join(__DATA_DIR,\r\n 'g1-train-test/test_md.pickle'))\r\n\r\n # Convert data to time domain, take magnitude, apply window\r\n g1_d = correct_g1_ini_ant_ang(g1_d)\r\n g1_d = np.abs(to_td(g1_d))\r\n g2_d = np.abs(to_td(g2_d))\r\n\r\n # Perform data augmentation\r\n g2_d, g2_md = full_aug(g2_d, g2_md)\r\n\r\n g2_d = resize_features_for_logreg(g2_d)\r\n g1_d = resize_features_for_logreg(g1_d)\r\n g2_labels = get_class_labels(g2_md)\r\n g1_labels = get_class_labels(g1_md)\r\n\r\n n_runs = 20\r\n\r\n # Init arrays for storing performance metrics\r\n auc_scores = np.zeros([n_runs, ])\r\n accs = np.zeros([n_runs, ])\r\n sens = np.zeros([n_runs, ])\r\n spec = np.zeros([n_runs, ])\r\n\r\n for run_idx in range(n_runs):\r\n\r\n logger.info('\\tWorking on run [%d / %d]...' % (run_idx + 1, n_runs))\r\n\r\n # Define the Logistic Regression model\r\n model = LogisticRegression(C=__REG_PARAM,\r\n solver='lbfgs',\r\n max_iter=1000)\r\n\r\n # Train the model\r\n model_hist = model.fit(X=g2_d, y=g2_labels)\r\n\r\n # Calculate the predictions\r\n g1_preds = model.predict_proba(X=g1_d)\r\n\r\n # Get and store ROC AUC\r\n g1_auc = 100 * roc_auc_score(y_true=g1_labels, y_score=g1_preds[:, 1])\r\n auc_scores[run_idx] = g1_auc\r\n\r\n # Get optimal decision threshold\r\n opt_thresh = get_opt_thresh(preds=g1_preds[:, 1],\r\n labels=g1_labels)\r\n\r\n # Store performance metrics\r\n accs[run_idx] = 100 * get_acc(preds=g1_preds[:, 1],\r\n labels=g1_labels,\r\n threshold=opt_thresh)\r\n sens[run_idx] = 100 * get_sens(preds=g1_preds[:, 1],\r\n labels=g1_labels,\r\n threshold=opt_thresh)\r\n spec[run_idx] = 100 * get_spec(preds=g1_preds[:, 1],\r\n labels=g1_labels,\r\n threshold=opt_thresh)\r\n # Plot ROC curve\r\n plt_roc_curve(preds=g1_preds[:, 1], labels=g1_labels,\r\n save_str='logreg_run_%d_roc' % run_idx, save=True)\r\n\r\n # Report AUC at this run\r\n logger.info('\\t\\tAUC:\\t%.2f' % g1_auc)\r\n\r\n # Get the class predictions\r\n class_preds = g1_preds * np.zeros_like(g1_preds)\r\n class_preds[g1_preds >= opt_thresh] = 1\r\n\r\n # Report performance metrics to logger\r\n logger.info('Average performance metrics')\r\n logger.info('')\r\n report_metrics(aucs=auc_scores, accs=accs, sens=sens, spec=spec,\r\n logger=logger)\r\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"matplotlib.pyplot.tight_layout",
"sklearn.linear_model.LogisticRegression",
"numpy.linspace",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.size",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.tick_params",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
drkostas/tda_examples
|
[
"3fdef4f890ced14b8e3207bd9393eaf262dd0c24"
] |
[
"gda-public/multidim/test_rca1.py"
] |
[
"r\"\"\" \nBasic tests of RCA1 algorithm in :class:`multidim.PointCloud` \n\nCopyright\n---------\n- This file is part of https://github.com/geomdata/gda-public/ \n- 2015, 2016, 2017 by Geometric Data Analytics, Inc. (http://geomdata.com)\n- AGPL license. See `LICENSE` or https://github.com/geomdata/gda-public/blob/master/LICENSE\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\nNDBL = np.float64\nimport pandas\nimport multidim\nimport multidim.covertree\nimport time\nimport sys\nimport bz2\n\nclass TestRips:\n\n def setup(self):\n self.circ = multidim.PointCloud(np.load(\"tests/circle.npy\"), max_length=-1)\n\n def teardown(self):\n del self.circ\n\n def setup_method(self, function):\n pass\n \n def teardown_method(self, function):\n pass\n\n def test_rca1_circle(self):\n self.circ.make_pers1_rca1(cutoff=0.2)\n\n def test_rca1_offset(self):\n for x in [0.0,]:# 0.5 fails\n data = np.array([[0.,0.],[1.,0.],[0.,1.],[1.,1.-x]])\n pc = multidim.PointCloud(data, max_length=-1)\n #pc.make_pers1_rca1()\n \nif __name__ == '__main__':\n T = TestCovers()\n T.setup()\n T.test_rca1_circle()\n T.test_rca1_offset()\n T.teardown()\n"
] |
[
[
"numpy.load",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
changhaowu/macaron-net
|
[
"e37053e34875591f5eea2d6aaf9121819004f1dc"
] |
[
"translation/fairseq/data/language_pair_dataset.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport numpy as np\nimport torch\n\nfrom fairseq import utils\n\nfrom . import data_utils, FairseqDataset\n\n\ndef collate(\n samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False,\n input_feeding=True,\n):\n if len(samples) == 0:\n return {}\n\n def merge(key, left_pad, move_eos_to_beginning=False):\n return data_utils.collate_tokens(\n [s[key] for s in samples],\n pad_idx, eos_idx, left_pad, move_eos_to_beginning,\n )\n\n id = torch.LongTensor([s['id'] for s in samples])\n src_tokens = merge('source', left_pad=left_pad_source)\n # sort by descending source length\n src_lengths = torch.LongTensor([s['source'].numel() for s in samples])\n src_lengths, sort_order = src_lengths.sort(descending=True)\n id = id.index_select(0, sort_order)\n src_tokens = src_tokens.index_select(0, sort_order)\n\n prev_output_tokens = None\n target = None\n if samples[0].get('target', None) is not None:\n target = merge('target', left_pad=left_pad_target)\n target = target.index_select(0, sort_order)\n ntokens = sum(len(s['target']) for s in samples)\n\n if input_feeding:\n # we create a shifted version of targets for feeding the\n # previous output token(s) into the next decoder step\n prev_output_tokens = merge(\n 'target',\n left_pad=left_pad_target,\n move_eos_to_beginning=True,\n )\n prev_output_tokens = prev_output_tokens.index_select(0, sort_order)\n else:\n ntokens = sum(len(s['source']) for s in samples)\n\n batch = {\n 'id': id,\n 'ntokens': ntokens,\n 'net_input': {\n 'src_tokens': src_tokens,\n 'src_lengths': src_lengths,\n },\n 'target': target,\n 'nsentences': samples[0]['source'].size(0),\n }\n if prev_output_tokens is not None:\n batch['net_input']['prev_output_tokens'] = prev_output_tokens\n return batch\n\n\nclass LanguagePairDataset(FairseqDataset):\n \"\"\"\n A pair of torch.utils.data.Datasets.\n\n Args:\n src (torch.utils.data.Dataset): source dataset to wrap\n src_sizes (List[int]): source sentence lengths\n src_dict (~fairseq.data.Dictionary): source vocabulary\n tgt (torch.utils.data.Dataset, optional): target dataset to wrap\n tgt_sizes (List[int], optional): target sentence lengths\n tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary\n left_pad_source (bool, optional): pad source tensors on the left side.\n Default: ``True``\n left_pad_target (bool, optional): pad target tensors on the left side.\n Default: ``False``\n max_source_positions (int, optional): max number of tokens in the source\n sentence. Default: ``1024``\n max_target_positions (int, optional): max number of tokens in the target\n sentence. Default: ``1024``\n shuffle (bool, optional): shuffle dataset elements before batching.\n Default: ``True``\n input_feeding (bool, optional): create a shifted version of the targets\n to be passed into the model for input feeding/teacher forcing.\n Default: ``True``\n \"\"\"\n\n def __init__(\n self, src, src_sizes, src_dict,\n tgt=None, tgt_sizes=None, tgt_dict=None,\n left_pad_source=True, left_pad_target=False,\n max_source_positions=1024, max_target_positions=1024,\n shuffle=True, input_feeding=True,\n ):\n if tgt_dict is not None:\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n self.src = src\n self.tgt = tgt\n self.src_sizes = np.array(src_sizes)\n self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n self.left_pad_source = left_pad_source\n self.left_pad_target = left_pad_target\n self.max_source_positions = max_source_positions\n self.max_target_positions = max_target_positions\n self.shuffle = shuffle\n self.input_feeding = input_feeding\n\n def __getitem__(self, index):\n return {\n 'id': index,\n 'source': self.src[index],\n 'target': self.tgt[index] if self.tgt is not None else None,\n }\n\n def __len__(self):\n return len(self.src)\n\n def collater(self, samples):\n \"\"\"Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch with the following keys:\n\n - `id` (LongTensor): example IDs in the original input order\n - `ntokens` (int): total number of tokens in the batch\n - `net_input` (dict): the input to the Model, containing keys:\n\n - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in\n the source sentence of shape `(bsz, src_len)`. Padding will\n appear on the left if *left_pad_source* is ``True``.\n - `src_lengths` (LongTensor): 1D Tensor of the unpadded\n lengths of each source sentence of shape `(bsz)`\n - `prev_output_tokens` (LongTensor): a padded 2D Tensor of\n tokens in the target sentence, shifted right by one position\n for input feeding/teacher forcing, of shape `(bsz,\n tgt_len)`. This key will not be present if *input_feeding*\n is ``False``. Padding will appear on the left if\n *left_pad_target* is ``True``.\n\n - `target` (LongTensor): a padded 2D Tensor of tokens in the\n target sentence of shape `(bsz, tgt_len)`. Padding will appear\n on the left if *left_pad_target* is ``True``.\n \"\"\"\n return collate(\n samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),\n left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,\n input_feeding=self.input_feeding,\n )\n\n def get_dummy_batch(self, num_tokens, max_positions, src_len=128, tgt_len=128):\n \"\"\"Return a dummy batch with a given number of tokens.\"\"\"\n src_len, tgt_len = utils.resolve_max_positions(\n (src_len, tgt_len),\n max_positions,\n (self.max_source_positions, self.max_target_positions),\n )\n bsz = num_tokens // max(src_len, tgt_len)\n return self.collater([\n {\n 'id': i,\n 'source': self.src_dict.dummy_sentence(src_len),\n 'target': self.tgt_dict.dummy_sentence(tgt_len) if self.tgt_dict is not None else None,\n }\n for i in range(bsz)\n ])\n\n def num_tokens(self, index):\n \"\"\"Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.\"\"\"\n return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n if self.shuffle:\n indices = np.random.permutation(len(self))\n else:\n indices = np.arange(len(self))\n if self.tgt_sizes is not None:\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\n"
] |
[
[
"numpy.argsort",
"torch.LongTensor",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cmgreen210/cudf
|
[
"89462008c24a647de457ce595ab44c0c0d758450"
] |
[
"python/cudf/tests/test_dataframe.py"
] |
[
"# Copyright (c) 2018, NVIDIA CORPORATION.\n\nimport operator\nimport pytest\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport array as arr\n\nfrom librmm_cffi import librmm as rmm\n\nimport cudf as gd\nfrom cudf.dataframe.dataframe import Series, DataFrame\nfrom cudf.dataframe.buffer import Buffer\nfrom cudf.settings import set_options\n\nfrom itertools import combinations\n\nfrom . import utils\nfrom .utils import assert_eq\n\n\ndef test_buffer_basic():\n n = 10\n buf = Buffer(np.arange(n, dtype=np.float64))\n assert buf.size == n\n assert buf.capacity == n\n np.testing.assert_equal(buf.mem.copy_to_host(),\n np.arange(n, dtype=np.float64))\n\n\ndef test_buffer_append():\n n = 10\n expected = np.arange(n, dtype=np.float64)\n buf = Buffer(expected, size=n - 4, capacity=n)\n assert buf.size == n - 4\n assert buf.capacity == n\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n np.testing.assert_equal(buf.to_array(), np.arange(n - 4, dtype=np.float64))\n\n # Buffer.append\n buf.append(1.23)\n expected[n - 4] = 1.23\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n - 3\n assert buf.capacity == n\n\n # Buffer.extend\n buf.extend(np.asarray([2, 3]))\n expected[n - 3] = 2\n expected[n - 2] = 3\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n - 1\n assert buf.capacity == n\n\n # Test out-of-bound\n with pytest.raises(MemoryError):\n buf.extend(np.asarray([2, 3]))\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n - 1\n assert buf.capacity == n\n\n # Append to last slot\n buf.append(10.125)\n expected[n - 1] = 10.125\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n\n assert buf.capacity == n\n\n with pytest.raises(MemoryError):\n buf.append(987654)\n\n np.testing.assert_equal(buf.to_array(), expected)\n assert buf.size == n\n assert buf.capacity == n\n\n\ndef test_series_basic():\n # Make series from buffer\n a1 = np.arange(10, dtype=np.float64)\n series = Series(a1)\n assert len(series) == 10\n np.testing.assert_equal(series.to_array(), np.hstack([a1]))\n\n\ndef test_series_append():\n a1 = np.arange(10, dtype=np.float64)\n series = Series(a1)\n # Add new buffer\n a2 = np.arange(5)\n series = series.append(a2)\n assert len(series) == 15\n np.testing.assert_equal(series.to_array(), np.hstack([a1, a2]))\n\n # Ensure appending to previous buffer\n a3 = np.arange(3)\n series = series.append(a3)\n assert len(series) == 18\n a4 = np.hstack([a1, a2, a3])\n np.testing.assert_equal(series.to_array(), a4)\n\n # Appending different dtype\n a5 = np.array([1, 2, 3], dtype=np.int32)\n a6 = np.array([4.5, 5.5, 6.5], dtype=np.float64)\n series = Series(a5).append(a6)\n np.testing.assert_equal(series.to_array(), np.hstack([a5, a6]))\n series = Series(a6).append(a5)\n np.testing.assert_equal(series.to_array(), np.hstack([a6, a5]))\n\n\nindex_dtypes = [np.int64, np.int32, np.int16, np.int8,\n np.uint64, np.uint32, np.uint16, np.uint8]\n\n\[email protected](\n 'i1, i2, i3',\n ([(slice(None, 12), slice(3, None), slice(None, None, 2)),\n (range(12), range(3, 12), range(0, 9, 2)),\n (np.arange(12), np.arange(3, 12), np.arange(0, 9, 2)),\n (list(range(12)), list(range(3, 12)), list(range(0, 9, 2))),\n (pd.Series(range(12)), pd.Series(range(3, 12)),\n pd.Series(range(0, 9, 2))),\n (Series(range(12)), Series(range(3, 12)), Series(range(0, 9, 2))),\n ([i in range(12) for i in range(20)],\n [i in range(3, 12) for i in range(12)],\n [i in range(0, 9, 2) for i in range(9)]),\n (np.array([i in range(12) for i in range(20)], dtype=bool),\n np.array([i in range(3, 12) for i in range(12)], dtype=bool),\n np.array([i in range(0, 9, 2) for i in range(9)], dtype=bool))]\n + [(np.arange(12, dtype=t), np.arange(3, 12, dtype=t),\n np.arange(0, 9, 2, dtype=t)) for t in index_dtypes]),\n ids=(['slice', 'range', 'numpy.array', 'list', 'pandas.Series',\n 'Series', 'list[bool]', 'numpy.array[bool]']\n + ['numpy.array[%s]' % t.__name__ for t in index_dtypes]))\ndef test_series_indexing(i1, i2, i3):\n a1 = np.arange(20)\n series = Series(a1)\n # Indexing\n sr1 = series[i1]\n assert sr1.null_count == 0\n np.testing.assert_equal(sr1.to_array(), a1[:12])\n sr2 = sr1[i2]\n assert sr2.null_count == 0\n np.testing.assert_equal(sr2.to_array(), a1[3:12])\n # Index with stride\n sr3 = sr2[i3]\n assert sr3.null_count == 0\n np.testing.assert_equal(sr3.to_array(), a1[3:12:2])\n\n # Integer indexing\n if isinstance(i1, range):\n for i in i1: # Python int-s\n assert series[i] == a1[i]\n if isinstance(i1, np.ndarray) and i1.dtype in index_dtypes:\n for i in i1: # numpy integers\n assert series[i] == a1[i]\n\n\ndef test_series_init_none():\n\n # test for creating empty series\n # 1: without initializing\n sr1 = Series()\n got = sr1.to_string()\n print(got)\n expect = '<empty Series of dtype=float64>'\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n # 2: Using `None` as a initializer\n sr2 = Series(None)\n got = sr2.to_string()\n print(got)\n expect = '<empty Series of dtype=float64>'\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_series_replace():\n a1 = np.array([0, 1, 2, 3, 4])\n\n # Numerical\n a2 = np.array([5, 1, 2, 3, 4])\n sr1 = Series(a1)\n sr2 = sr1.replace(0, 5)\n np.testing.assert_equal(sr2.to_array(), a2)\n\n # Categorical\n psr3 = pd.Series([\"one\", \"two\", \"three\"], dtype='category')\n psr4 = psr3.replace(\"one\", \"two\")\n sr3 = Series.from_pandas(psr3)\n sr4 = sr3.replace(\"one\", \"two\")\n pd.testing.assert_series_equal(sr4.to_pandas(), psr4)\n\n # List input\n a6 = np.array([5, 6, 2, 3, 4])\n sr6 = sr1.replace([0, 1], [5, 6])\n np.testing.assert_equal(sr6.to_array(), a6)\n\n a7 = np.array([5.5, 6.5, 2, 3, 4])\n sr7 = sr1.replace([0, 1], [5.5, 6.5])\n np.testing.assert_equal(sr7.to_array(), a7)\n\n # Series input\n a8 = np.array([5, 5, 5, 3, 4])\n sr8 = sr1.replace(sr1[:3], 5)\n np.testing.assert_equal(sr8.to_array(), a8)\n\n\ndef test_dataframe_basic():\n np.random.seed(0)\n df = DataFrame()\n\n # Populate with cuda memory\n df['keys'] = rmm.to_device(np.arange(10, dtype=np.float64))\n np.testing.assert_equal(df['keys'].to_array(), np.arange(10))\n assert len(df) == 10\n\n # Populate with numpy array\n rnd_vals = np.random.random(10)\n df['vals'] = rnd_vals\n np.testing.assert_equal(df['vals'].to_array(), rnd_vals)\n assert len(df) == 10\n assert tuple(df.columns) == ('keys', 'vals')\n\n # Make another dataframe\n df2 = DataFrame()\n df2['keys'] = np.array([123], dtype=np.float64)\n df2['vals'] = np.array([321], dtype=np.float64)\n\n # Concat\n df = gd.concat([df, df2])\n assert len(df) == 11\n\n hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])\n hvals = np.asarray(rnd_vals.tolist() + [321])\n\n np.testing.assert_equal(df['keys'].to_array(), hkeys)\n np.testing.assert_equal(df['vals'].to_array(), hvals)\n\n # As matrix\n mat = df.as_matrix()\n\n expect = np.vstack([hkeys, hvals]).T\n\n print(expect)\n print(mat)\n np.testing.assert_equal(mat, expect)\n\n\ndef test_dataframe_column_name_indexing():\n df = DataFrame()\n data = np.asarray(range(10), dtype=np.int32)\n df['a'] = data\n df[1] = data\n np.testing.assert_equal(df['a'].to_array(),\n np.asarray(range(10), dtype=np.int32))\n np.testing.assert_equal(df[1].to_array(),\n np.asarray(range(10), dtype=np.int32))\n\n pdf = pd.DataFrame()\n nelem = 10\n pdf['key1'] = np.random.randint(0, 5, nelem)\n pdf['key2'] = np.random.randint(0, 3, nelem)\n pdf[1] = np.arange(1, 1 + nelem)\n pdf[2] = np.random.random(nelem)\n df = DataFrame.from_pandas(pdf)\n for i in range(1, len(pdf.columns)+1):\n for idx in combinations(pdf.columns, i):\n assert(pdf[list(idx)].equals(df[list(idx)].to_pandas()))\n\n # test for only numeric columns\n df = pd.DataFrame()\n for i in range(0, 10):\n df[i] = range(nelem)\n gdf = DataFrame.from_pandas(df)\n assert_eq(gdf, df)\n\n\ndef test_dataframe_drop_method():\n df = DataFrame()\n data = np.asarray(range(10))\n df['a'] = data\n df['b'] = data\n df['c'] = data\n\n assert tuple(df.columns) == ('a', 'b', 'c')\n assert tuple(df.drop('a').columns) == ('b', 'c')\n assert tuple(df.columns) == ('a', 'b', 'c')\n assert tuple(df.drop(['a', 'b']).columns) == ('c',)\n assert tuple(df.columns) == ('a', 'b', 'c')\n\n # Test drop error\n with pytest.raises(NameError) as raises:\n df.drop('d')\n raises.match(\"column 'd' does not exist\")\n with pytest.raises(NameError) as raises:\n df.drop(['a', 'd', 'b'])\n raises.match(\"column 'd' does not exist\")\n\n\ndef test_dataframe_column_add_drop():\n df = DataFrame()\n data = np.asarray(range(10))\n df['a'] = data\n df['b'] = data\n assert tuple(df.columns) == ('a', 'b')\n del df['a']\n assert tuple(df.columns) == ('b',)\n df['c'] = data\n assert tuple(df.columns) == ('b', 'c')\n df['a'] = data\n assert tuple(df.columns) == ('b', 'c', 'a')\n\n\[email protected]('nelem', [0, 3, 100, 1000])\ndef test_dataframe_astype(nelem):\n df = DataFrame()\n data = np.asarray(range(nelem), dtype=np.int32)\n df['a'] = data\n assert df['a'].dtype is np.dtype(np.int32)\n df['b'] = df['a'].astype(np.float32)\n assert df['b'].dtype is np.dtype(np.float32)\n np.testing.assert_equal(df['a'].to_array(), df['b'].to_array())\n\n\ndef test_dataframe_slicing():\n df = DataFrame()\n size = 123\n df['a'] = ha = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int32)\n df['b'] = hb = np.random.random(size).astype(np.float32)\n df['c'] = hc = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int64)\n df['d'] = hd = np.random.random(size).astype(np.float64)\n\n # Row slice first 10\n first_10 = df[:10]\n assert len(first_10) == 10\n assert tuple(first_10.columns) == ('a', 'b', 'c', 'd')\n np.testing.assert_equal(first_10['a'].to_array(), ha[:10])\n np.testing.assert_equal(first_10['b'].to_array(), hb[:10])\n np.testing.assert_equal(first_10['c'].to_array(), hc[:10])\n np.testing.assert_equal(first_10['d'].to_array(), hd[:10])\n del first_10\n\n # Row slice last 10\n last_10 = df[-10:]\n assert len(last_10) == 10\n assert tuple(last_10.columns) == ('a', 'b', 'c', 'd')\n np.testing.assert_equal(last_10['a'].to_array(), ha[-10:])\n np.testing.assert_equal(last_10['b'].to_array(), hb[-10:])\n np.testing.assert_equal(last_10['c'].to_array(), hc[-10:])\n np.testing.assert_equal(last_10['d'].to_array(), hd[-10:])\n del last_10\n\n # Row slice [begin:end]\n begin = 7\n end = 121\n subrange = df[begin:end]\n assert len(subrange) == end - begin\n assert tuple(subrange.columns) == ('a', 'b', 'c', 'd')\n np.testing.assert_equal(subrange['a'].to_array(), ha[begin:end])\n np.testing.assert_equal(subrange['b'].to_array(), hb[begin:end])\n np.testing.assert_equal(subrange['c'].to_array(), hc[begin:end])\n np.testing.assert_equal(subrange['d'].to_array(), hd[begin:end])\n del subrange\n\n\ndef test_dataframe_loc():\n df = DataFrame()\n size = 123\n df['a'] = ha = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int32)\n df['b'] = hb = np.random.random(size).astype(np.float32) # noqa: F841\n df['c'] = hc = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int64)\n df['d'] = hd = np.random.random(size).astype(np.float64)\n\n # Full slice\n full = df.loc[:, ['c']]\n assert tuple(full.columns) == ('c',)\n np.testing.assert_equal(full['c'].to_array(), hc)\n\n begin = 117\n end = 122\n fewer = df.loc[begin:end, ['c', 'd', 'a']]\n assert len(fewer) == end - begin + 1\n assert tuple(fewer.columns) == ('c', 'd', 'a')\n np.testing.assert_equal(fewer['a'].to_array(), ha[begin:end + 1])\n np.testing.assert_equal(fewer['c'].to_array(), hc[begin:end + 1])\n np.testing.assert_equal(fewer['d'].to_array(), hd[begin:end + 1])\n del fewer\n\n # Make int64 index\n offset = 50\n df2 = df[offset:]\n begin = 117\n end = 122\n fewer = df2.loc[begin:end, ['c', 'd', 'a']]\n assert len(fewer) == end - begin + 1\n assert tuple(fewer.columns) == ('c', 'd', 'a')\n np.testing.assert_equal(fewer['a'].to_array(), ha[begin:end + 1])\n np.testing.assert_equal(fewer['c'].to_array(), hc[begin:end + 1])\n np.testing.assert_equal(fewer['d'].to_array(), hd[begin:end + 1])\n\n\[email protected]('nelem', [2, 5, 20, 100])\ndef test_series_iloc(nelem):\n\n # create random series\n np.random.seed(12)\n ps = pd.Series(np.random.sample(nelem))\n\n # gpu series\n gs = Series(ps)\n\n # positive tests for indexing\n np.testing.assert_allclose(gs.iloc[-1*nelem], ps.iloc[-1*nelem])\n np.testing.assert_allclose(gs.iloc[-1], ps.iloc[-1])\n np.testing.assert_allclose(gs.iloc[0], ps.iloc[0])\n np.testing.assert_allclose(gs.iloc[1], ps.iloc[1])\n np.testing.assert_allclose(gs.iloc[nelem-1], ps.iloc[nelem-1])\n\n # positive tests for slice\n np.testing.assert_allclose(gs.iloc[-1:1], ps.iloc[-1:1])\n np.testing.assert_allclose(\n gs.iloc[nelem-1:-1], ps.iloc[nelem-1:-1])\n np.testing.assert_allclose(gs.iloc[0:nelem-1], ps.iloc[0:nelem-1])\n np.testing.assert_allclose(gs.iloc[0:nelem], ps.iloc[0:nelem])\n np.testing.assert_allclose(gs.iloc[1:1], ps.iloc[1:1])\n np.testing.assert_allclose(gs.iloc[1:2], ps.iloc[1:2])\n np.testing.assert_allclose(\n gs.iloc[nelem-1:nelem+1], ps.iloc[nelem-1:nelem+1])\n np.testing.assert_allclose(\n gs.iloc[nelem:nelem*2], ps.iloc[nelem:nelem*2])\n\n\[email protected]('nelem', [2, 5, 20, 100])\ndef test_dataframe_iloc(nelem):\n gdf = DataFrame()\n\n gdf['a'] = ha = np.random.randint(low=0, high=100, size=nelem) \\\n .astype(np.int32)\n gdf['b'] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf['a'] = ha\n pdf['b'] = hb\n\n # Positive tests for slicing using iloc\n def assert_col(g, p):\n np.testing.assert_equal(g['a'].to_array(), p['a'])\n np.testing.assert_equal(g['b'].to_array(), p['b'])\n\n assert_col(gdf.iloc[-1:1], pdf.iloc[-1:1])\n assert_col(gdf.iloc[nelem-1:-1], pdf.iloc[nelem-1:-1])\n assert_col(gdf.iloc[0:nelem-1], pdf.iloc[0:nelem-1])\n assert_col(gdf.iloc[0:nelem], pdf.iloc[0:nelem])\n assert_col(gdf.iloc[1:1], pdf.iloc[1:1])\n assert_col(gdf.iloc[1:2], pdf.iloc[1:2])\n assert_col(gdf.iloc[nelem-1:nelem+1], pdf.iloc[nelem-1:nelem+1])\n assert_col(gdf.iloc[nelem:nelem*2], pdf.iloc[nelem:nelem*2])\n\n # Positive tests for int indexing\n def assert_series(g, p):\n np.testing.assert_equal(g.to_array(), p)\n\n assert_series(gdf.iloc[-1 * nelem], pdf.iloc[-1 * nelem])\n assert_series(gdf.iloc[-1], pdf.iloc[-1])\n assert_series(gdf.iloc[0], pdf.iloc[0])\n assert_series(gdf.iloc[1], pdf.iloc[1])\n assert_series(gdf.iloc[nelem - 1], pdf.iloc[nelem - 1])\n\n\[email protected](\n raises=NotImplementedError,\n reason=\"cudf columnar iloc not supported\"\n)\ndef test_dataframe_iloc_tuple():\n gdf = DataFrame()\n nelem = 123\n gdf['a'] = ha = np.random.randint(low=0, high=100, size=nelem) \\\n .astype(np.int32)\n gdf['b'] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf['a'] = ha\n pdf['b'] = hb\n\n def assert_col(g, p):\n np.testing.assert_equal(g['a'].to_array(), p['a'])\n np.testing.assert_equal(g['b'].to_array(), p['b'])\n\n assert_col(gdf.iloc[1, 2], pdf.iloc[1, 2])\n\n\[email protected](\n raises=IndexError,\n reason=\"positional indexers are out-of-bounds\"\n)\ndef test_dataframe_iloc_index_error():\n gdf = DataFrame()\n nelem = 123\n gdf['a'] = ha = np.random.randint(low=0, high=100, size=nelem) \\\n .astype(np.int32)\n gdf['b'] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf['a'] = ha\n pdf['b'] = hb\n\n def assert_col(g, p):\n np.testing.assert_equal(g['a'].to_array(), p['a'])\n np.testing.assert_equal(g['b'].to_array(), p['b'])\n\n assert_col(gdf.iloc[nelem*2], pdf.iloc[nelem*2])\n\n\[email protected](\n raises=ValueError,\n reason=\"updating columns using df.iloc[] is not allowed\"\n)\ndef test_dataframe_iloc_setitem():\n gdf = DataFrame()\n nelem = 123\n gdf['a'] = np.random.randint(low=0, high=100, size=nelem) \\\n .astype(np.int32)\n gdf['b'] = np.random.random(nelem).astype(np.float32)\n\n gdf.iloc[0] = nelem\n\n\ndef test_dataframe_to_string():\n with set_options(formatting={'nrows': 5, 'ncols': 8}):\n # Test basic\n df = DataFrame([('a', [1, 2, 3, 4, 5, 6]),\n ('b', [11, 12, 13, 14, 15, 16])])\n string = str(df)\n print(string)\n assert string.splitlines()[-1] == '[1 more rows]'\n\n # Test skipped columns\n df = DataFrame([('a', [1, 2, 3, 4, 5, 6]),\n ('b', [11, 12, 13, 14, 15, 16]),\n ('c', [11, 12, 13, 14, 15, 16]),\n ('d', [11, 12, 13, 14, 15, 16])])\n string = df.to_string(ncols=3)\n print(string)\n assert string.splitlines()[-2] == '[1 more rows]'\n assert string.splitlines()[-1] == '[1 more columns]'\n\n # Test masked\n df = DataFrame([('a', [1, 2, 3, 4, 5, 6]),\n ('b', [11, 12, 13, 14, 15, 16])])\n\n data = np.arange(6)\n mask = np.zeros(1, dtype=np.uint8)\n mask[0] = 0b00101101\n\n masked = Series.from_masked_array(data, mask)\n assert masked.null_count == 2\n df['c'] = masked\n\n # check data\n values = list(masked)\n validids = [0, 2, 3, 5]\n densearray = masked.to_array()\n np.testing.assert_equal(data[validids], densearray)\n # valid position is corret\n for i in validids:\n assert data[i] == values[i]\n # null position is correct\n for i in range(len(values)):\n if i not in validids:\n assert values[i] is None\n\n got = df.to_string(nrows=None)\n print(got)\n expect = '''\n a b c\n0 1 11 0\n1 2 12\n2 3 13 2\n3 4 14 3\n4 5 15\n5 6 16 5\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_to_string_wide():\n # Test basic\n df = DataFrame()\n for i in range(100):\n df['a{}'.format(i)] = list(range(3))\n got = df.to_string(ncols=8)\n print(got)\n expect = '''\n a0 a1 a2 a3 a4 a5 a6 ... a99\n0 0 0 0 0 0 0 0 ... 0\n1 1 1 1 1 1 1 1 ... 1\n2 2 2 2 2 2 2 2 ... 2\n[92 more columns]\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_empty_to_string():\n # Test for printing empty dataframe\n df = DataFrame()\n got = df.to_string()\n print(got)\n expect = \"Empty DataFrame\\nColumns: []\\nIndex: []\\n\"\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_emptycolumns_to_string():\n # Test for printing dataframe having empty columns\n df = DataFrame()\n df['a'] = []\n df['b'] = []\n got = df.to_string()\n print(got)\n expect = \"Empty DataFrame\\nColumns: ['a', 'b']\\nIndex: []\\n\"\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_copy():\n # Test for copying the dataframe using python copy pkg\n from copy import copy\n df = DataFrame()\n df['a'] = [1, 2, 3]\n df2 = copy(df)\n df2['b'] = [4, 5, 6]\n got = df.to_string()\n print(got)\n expect = '''\n a\n0 1\n1 2\n2 3\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_copy_shallow():\n # Test for copy dataframe using class method\n df = DataFrame()\n df['a'] = [1, 2, 3]\n df2 = df.copy()\n df2['b'] = [4, 2, 3]\n got = df.to_string()\n print(got)\n expect = '''\n a\n0 1\n1 2\n2 3\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_dtypes():\n dtypes = pd.Series([np.int32, np.float32, np.float64],\n index=['c', 'a', 'b'])\n df = DataFrame([(k, np.ones(10, dtype=v))\n for k, v in dtypes.iteritems()])\n assert df.dtypes.equals(dtypes)\n\n\ndef test_dataframe_dir_and_getattr():\n df = DataFrame([('a', np.ones(10)),\n ('b', np.ones(10)),\n ('not an id', np.ones(10)),\n ('oop$', np.ones(10))])\n o = dir(df)\n assert {'a', 'b'}.issubset(o)\n assert 'not an id' not in o\n assert 'oop$' not in o\n\n # Getattr works\n assert df.a is df['a']\n assert df.b is df['b']\n with pytest.raises(AttributeError):\n df.not_a_column\n\n\[email protected]('order', ['C', 'F'])\ndef test_dataframe_as_gpu_matrix(order):\n df = DataFrame()\n\n nelem = 123\n for k in 'abcd':\n df[k] = np.random.random(nelem)\n\n # Check all columns\n mat = df.as_gpu_matrix(order=order).copy_to_host()\n assert mat.shape == (nelem, 4)\n for i, k in enumerate(df.columns):\n np.testing.assert_array_equal(df[k].to_array(), mat[:, i])\n\n # Check column subset\n mat = df.as_gpu_matrix(order=order, columns=['a', 'c']).copy_to_host()\n assert mat.shape == (nelem, 2)\n\n for i, k in enumerate('ac'):\n np.testing.assert_array_equal(df[k].to_array(), mat[:, i])\n\n\ndef test_dataframe_as_gpu_matrix_null_values():\n df = DataFrame()\n\n nelem = 123\n na = -10000\n\n refvalues = {}\n for k in 'abcd':\n df[k] = data = np.random.random(nelem)\n bitmask = utils.random_bitmask(nelem)\n df[k] = df[k].set_mask(bitmask)\n boolmask = np.asarray(utils.expand_bits_to_bytes(bitmask)[:nelem],\n dtype=np.bool_)\n data[~boolmask] = na\n refvalues[k] = data\n\n # Check null value causes error\n with pytest.raises(ValueError) as raises:\n df.as_gpu_matrix()\n raises.match(\"column 'a' has null values\")\n\n for k in df.columns:\n df[k] = df[k].fillna(na)\n\n mat = df.as_gpu_matrix().copy_to_host()\n for i, k in enumerate(df.columns):\n np.testing.assert_array_equal(refvalues[k], mat[:, i])\n\n\[email protected]('ntake', [0, 1, 10, 123, 122, 200])\ndef test_dataframe_take(ntake):\n np.random.seed(0)\n df = DataFrame()\n\n nelem = 123\n df['ii'] = ii = np.random.randint(0, 20, nelem)\n df['ff'] = ff = np.random.random(nelem)\n\n take_indices = np.random.randint(0, len(df), ntake)\n\n def check(**kwargs):\n out = df.take(take_indices, **kwargs)\n assert len(out) == ntake\n np.testing.assert_array_equal(out.ii.to_array(), ii[take_indices])\n np.testing.assert_array_equal(out.ff.to_array(), ff[take_indices])\n if kwargs.get('ignore_index'):\n np.testing.assert_array_equal(out.index, np.arange(ntake))\n else:\n np.testing.assert_array_equal(out.index, take_indices)\n\n check()\n check(ignore_index=True)\n\n\ndef test_dataframe_append_empty():\n pdf = pd.DataFrame({\n \"key\": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],\n \"value\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n })\n gdf = DataFrame.from_pandas(pdf)\n\n gdf['newcol'] = 100\n pdf['newcol'] = 100\n\n assert len(gdf['newcol']) == len(pdf)\n assert len(pdf['newcol']) == len(pdf)\n pd.testing.assert_frame_equal(gdf.to_pandas(), pdf)\n\n\ndef test_dataframe_setitem_from_masked_object():\n ary = np.random.randn(100)\n mask = np.zeros(100, dtype=bool)\n mask[:20] = True\n np.random.shuffle(mask)\n ary[mask] = np.nan\n\n test1_null = Series(ary, nan_as_null=True)\n assert(test1_null.has_null_mask)\n assert(test1_null.null_count == 20)\n test1_nan = Series(ary, nan_as_null=False)\n assert(test1_nan.null_count == 0)\n\n test2_null = DataFrame.from_pandas(pd.DataFrame({'a': ary}),\n nan_as_null=True)\n assert(test2_null['a'].has_null_mask)\n assert(test2_null['a'].null_count == 20)\n test2_nan = DataFrame.from_pandas(pd.DataFrame({'a': ary}),\n nan_as_null=False)\n assert(test2_nan['a'].null_count == 0)\n\n gpu_ary = rmm.to_device(ary)\n test3_null = Series(gpu_ary, nan_as_null=True)\n assert(test3_null.has_null_mask)\n assert(test3_null.null_count == 20)\n test3_nan = Series(gpu_ary, nan_as_null=False)\n assert(test3_nan.null_count == 0)\n\n test4 = DataFrame()\n lst = [1, 2, None, 4, 5, 6, None, 8, 9]\n test4['lst'] = lst\n assert(test4['lst'].has_null_mask)\n assert(test4['lst'].null_count == 2)\n\n\ndef test_dataframe_append_to_empty():\n pdf = pd.DataFrame()\n pdf['a'] = []\n pdf['b'] = [1, 2, 3]\n\n gdf = DataFrame()\n gdf['a'] = []\n gdf['b'] = [1, 2, 3]\n\n pd.testing.assert_frame_equal(gdf.to_pandas(), pdf)\n\n\ndef test_dataframe_setitem_index_len1():\n gdf = DataFrame()\n gdf['a'] = [1]\n gdf['b'] = gdf.index.as_column()\n\n np.testing.assert_equal(gdf.b.to_array(), [0])\n\n\ndef test_assign():\n gdf = DataFrame({'x': [1, 2, 3]})\n gdf2 = gdf.assign(y=gdf.x + 1)\n assert list(gdf.columns) == ['x']\n assert list(gdf2.columns) == ['x', 'y']\n\n np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])\n\n\[email protected]('nrows', [1, 8, 100, 1000])\ndef test_dataframe_hash_columns(nrows):\n gdf = DataFrame()\n data = np.asarray(range(nrows))\n data[0] = data[-1] # make first and last the same\n gdf['a'] = data\n gdf['b'] = gdf.a + 100\n out = gdf.hash_columns(['a', 'b'])\n assert isinstance(out, Series)\n assert len(out) == nrows\n assert out.dtype == np.int32\n\n # Check default\n out_all = gdf.hash_columns()\n np.testing.assert_array_equal(out.to_array(), out_all.to_array())\n\n # Check single column\n out_one = gdf.hash_columns(['a']).to_array()\n # First matches last\n assert out_one[0] == out_one[-1]\n # Equivalent to the Series.hash_values()\n np.testing.assert_array_equal(\n gdf.a.hash_values().to_array(),\n out_one,\n )\n\n\[email protected]('nrows', [3, 10, 100, 1000])\[email protected]('nparts', [1, 2, 8, 13])\[email protected]('nkeys', [1, 2])\ndef test_dataframe_hash_partition(nrows, nparts, nkeys):\n np.random.seed(123)\n gdf = DataFrame()\n keycols = []\n for i in range(nkeys):\n keyname = 'key{}'.format(i)\n gdf[keyname] = np.random.randint(0, 7 - i, nrows)\n keycols.append(keyname)\n gdf['val1'] = np.random.randint(0, nrows * 2, nrows)\n\n got = gdf.partition_by_hash(keycols, nparts=nparts)\n # Must return a list\n assert isinstance(got, list)\n # Must have correct number of partitions\n assert len(got) == nparts\n # All partitions must be DataFrame type\n assert all(isinstance(p, DataFrame) for p in got)\n # Check that all partitions have unique keys\n part_unique_keys = set()\n for p in got:\n if len(p):\n # Take rows of the keycolums and build a set of the key-values\n unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))\n # Ensure that none of the key-values have occurred in other groups\n assert not (unique_keys & part_unique_keys)\n part_unique_keys |= unique_keys\n assert len(part_unique_keys)\n\n\[email protected]('nrows', [3, 10, 50])\ndef test_dataframe_hash_partition_masked_value(nrows):\n gdf = DataFrame()\n gdf['key'] = np.arange(nrows)\n gdf['val'] = np.arange(nrows) + 100\n bitmask = utils.random_bitmask(nrows)\n bytemask = utils.expand_bits_to_bytes(bitmask)\n gdf['val'] = gdf['val'].set_mask(bitmask)\n parted = gdf.partition_by_hash(['key'], nparts=3)\n # Verify that the valid mask is correct\n for p in parted:\n df = p.to_pandas()\n for row in df.itertuples():\n valid = bool(bytemask[row.key])\n expected_value = row.key + 100 if valid else -1\n got_value = row.val\n assert expected_value == got_value\n\n\[email protected]('nrows', [3, 10, 50])\ndef test_dataframe_hash_partition_masked_keys(nrows):\n gdf = DataFrame()\n gdf['key'] = np.arange(nrows)\n gdf['val'] = np.arange(nrows) + 100\n bitmask = utils.random_bitmask(nrows)\n bytemask = utils.expand_bits_to_bytes(bitmask)\n gdf['key'] = gdf['key'].set_mask(bitmask)\n parted = gdf.partition_by_hash(['key'], nparts=3)\n # Verify that the valid mask is correct\n for p in parted:\n df = p.to_pandas()\n for row in df.itertuples():\n valid = bool(bytemask[row.val - 100])\n # val is key + 100\n expected_value = row.val - 100 if valid else -1\n got_value = row.key\n assert expected_value == got_value\n\n\ndef test_dataframe_empty_concat():\n gdf1 = DataFrame()\n gdf1['a'] = []\n gdf1['b'] = []\n\n gdf2 = gdf1.copy()\n\n gdf3 = gd.concat([gdf1, gdf2])\n assert len(gdf3) == 0\n assert len(gdf3.columns) == 2\n\n\[email protected]('nrows', [0, 3, 10, 100, 1000])\ndef test_nonmatching_index_setitem(nrows):\n np.random.seed(0)\n\n gdf = DataFrame()\n gdf['a'] = np.random.randint(2147483647, size=nrows)\n gdf['b'] = np.random.randint(2147483647, size=nrows)\n gdf = gdf.set_index('b')\n\n test_values = np.random.randint(2147483647, size=nrows)\n gdf['c'] = test_values\n assert(len(test_values) == len(gdf['c']))\n assert(gdf['c'].to_pandas().equals(\n Series(test_values).set_index(gdf._index).to_pandas()))\n\n\[email protected]('nelem', [0, 1, 5, 20, 100])\[email protected]('slice_start', [None, 0, 1, 3, 10])\[email protected]('slice_end', [None, 0, 1, 30, 50, -1])\ndef test_dataframe_masked_slicing(nelem, slice_start, slice_end):\n gdf = DataFrame()\n gdf['a'] = list(range(nelem))\n gdf['b'] = list(range(nelem, 2 * nelem))\n gdf['a'] = gdf['a'].set_mask(utils.random_bitmask(nelem))\n gdf['b'] = gdf['b'].set_mask(utils.random_bitmask(nelem))\n\n def do_slice(x):\n return x[slice_start: slice_end]\n\n expect = do_slice(gdf.to_pandas())\n got = do_slice(gdf).to_pandas()\n\n pd.testing.assert_frame_equal(expect, got)\n\n\ndef test_from_pandas():\n df = pd.DataFrame({'x': [1, 2, 3]}, index=[4., 5., 6.])\n gdf = gd.DataFrame.from_pandas(df)\n assert isinstance(gdf, gd.DataFrame)\n\n pd.testing.assert_frame_equal(df, gdf.to_pandas())\n\n s = df.x\n gs = gd.Series.from_pandas(s)\n assert isinstance(gs, gd.Series)\n\n pd.testing.assert_series_equal(s, gs.to_pandas())\n\n\[email protected](reason=\"constructor does not coerce index inputs\")\ndef test_index_in_dataframe_constructor():\n a = pd.DataFrame({'x': [1, 2, 3]}, index=[4., 5., 6.])\n b = gd.DataFrame({'x': [1, 2, 3]}, index=[4., 5., 6.])\n\n pd.testing.assert_frame_equal(a, b.to_pandas())\n assert pd.testing.assert_frame_equal(a.loc[4:], b.loc[4:].to_pandas())\n\n\[email protected]('nelem', [0, 2, 3, 100, 1000])\[email protected](\n 'data_type',\n ['bool', 'int8', 'int16', 'int32', 'int64',\n 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_from_arrow(nelem, data_type):\n df = pd.DataFrame(\n {\n 'a': np.random.randint(0, 1000, nelem).astype(data_type),\n 'b': np.random.randint(0, 1000, nelem).astype(data_type)\n }\n )\n padf = pa.Table.from_pandas(df, preserve_index=False)\\\n .replace_schema_metadata(None)\n gdf = gd.DataFrame.from_arrow(padf)\n assert isinstance(gdf, gd.DataFrame)\n\n pd.testing.assert_frame_equal(df, gdf.to_pandas())\n\n s = pa.Array.from_pandas(df.a)\n gs = gd.Series.from_arrow(s)\n assert isinstance(gs, gd.Series)\n\n # For some reason PyArrow to_pandas() converts to numpy array and has\n # better type compatibility\n np.testing.assert_array_equal(s.to_pandas(), gs.to_array())\n\n\[email protected]('nelem', [0, 2, 3, 100, 1000])\[email protected](\n 'data_type',\n ['bool', 'int8', 'int16', 'int32', 'int64',\n 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_to_arrow(nelem, data_type):\n df = pd.DataFrame(\n {\n 'a': np.random.randint(0, 1000, nelem).astype(data_type),\n 'b': np.random.randint(0, 1000, nelem).astype(data_type)\n }\n )\n gdf = gd.DataFrame.from_pandas(df)\n\n pa_df = pa.Table.from_pandas(df, preserve_index=False)\\\n .replace_schema_metadata(None)\n # Pandas uses ns so need to cast columns to ms\n if data_type == 'datetime64[ms]':\n pa_df = pa_df.add_column(\n 0,\n pa_df.column(1)\n .cast(pa.timestamp('ms'))\n .cast(pa.int64())\n .cast(pa.date64())\n ).add_column(\n 0,\n pa_df.column(0)\n .cast(pa.timestamp('ms'))\n .cast(pa.int64())\n .cast(pa.date64())\n ).remove_column(2).remove_column(2)\n pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)\n\n assert isinstance(pa_gdf, pa.Table)\n assert pa.Table.equals(pa_df, pa_gdf)\n\n pa_s = pa.Array.from_pandas(df.a)\n # Pandas uses ns so need to cast columns to ms\n if data_type == 'datetime64[ms]':\n pa_s = pa_s.cast(pa.timestamp('ms')).cast(pa.int64()).cast(pa.date64())\n pa_gs = gdf['a'].to_arrow()\n\n assert isinstance(pa_gs, pa.Array)\n assert pa.Array.equals(pa_s, pa_gs)\n\n pa_i = pa.Array.from_pandas(df.index)\n pa_gi = gdf.index.to_arrow()\n\n assert isinstance(pa_gi, pa.Array)\n assert pa.Array.equals(pa_i, pa_gi)\n\n\[email protected](\n 'data_type',\n ['bool', 'int8', 'int16', 'int32', 'int64',\n 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_to_from_arrow_nulls(data_type):\n if data_type == 'datetime64[ms]':\n data_type = pa.date64()\n if data_type == 'bool':\n s1 = pa.array([True, None, False, None, True], type=data_type)\n else:\n s1 = pa.array([1, None, 3, None, 5], type=data_type)\n gs1 = gd.Series.from_arrow(s1)\n assert isinstance(gs1, gd.Series)\n np.testing.assert_array_equal(\n np.array(s1.buffers()[0]),\n gs1.nullmask.to_array()\n )\n assert pa.Array.equals(s1, gs1.to_arrow())\n\n s2 = pa.array([None, None, None, None, None], type=data_type)\n gs2 = gd.Series.from_arrow(s2)\n assert isinstance(gs2, gd.Series)\n np.testing.assert_array_equal(\n np.array(s2.buffers()[0]),\n gs2.nullmask.to_array()\n )\n assert pa.Array.equals(s2, gs2.to_arrow())\n\n\ndef test_to_arrow_categorical():\n df = pd.DataFrame()\n df['a'] = pd.Series(['a', 'b', 'c'], dtype=\"category\")\n gdf = gd.DataFrame.from_pandas(df)\n\n pa_df = pa.Table.from_pandas(df, preserve_index=False)\\\n .replace_schema_metadata(None)\n pa_gdf = gdf.to_arrow(preserve_index=False)\\\n .replace_schema_metadata(None)\n\n assert isinstance(pa_gdf, pa.Table)\n assert pa.Table.equals(pa_df, pa_gdf)\n\n pa_s = pa.Array.from_pandas(df.a)\n pa_gs = gdf['a'].to_arrow()\n\n assert isinstance(pa_gs, pa.Array)\n assert pa.Array.equals(pa_s, pa_gs)\n\n\ndef test_from_arrow_missing_categorical():\n pd_cat = pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'])\n pa_cat = pa.array(pd_cat, from_pandas=True)\n gd_cat = gd.Series(pa_cat)\n\n assert isinstance(gd_cat, gd.Series)\n pd.testing.assert_series_equal(\n pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical\n gd_cat.to_pandas()\n )\n\n\[email protected](\n raises=NotImplementedError,\n reason=\"PyArrow does not yet support validity masks in creating \"\n \"DictionaryArray objects\"\n)\ndef test_to_arrow_missing_categorical():\n pd_cat = pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'])\n pa_cat = pa.array(pd_cat, from_pandas=True)\n gd_cat = gd.Series(pa_cat)\n\n assert isinstance(gd_cat, gd.Series)\n assert pa.Array.equals(pa_cat, gd_cat.to_arrow())\n\n\[email protected](\n 'data_type',\n ['int8', 'int16', 'int32', 'int64', 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_from_scalar_typing(data_type):\n if data_type == 'datetime64[ms]':\n scalar = np.dtype('int64').type(np.random.randint(0, 5))\\\n .astype('datetime64[ms]')\n else:\n scalar = np.dtype(data_type).type(np.random.randint(0, 5))\n\n gdf = gd.DataFrame()\n gdf['a'] = [1, 2, 3, 4, 5]\n gdf['b'] = scalar\n assert(gdf['b'].dtype == np.dtype(data_type))\n assert(len(gdf['b']) == len(gdf['a']))\n\n\[email protected](\n 'data_type',\n ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']\n)\ndef test_from_python_array(data_type):\n np_arr = np.random.randint(0, 100, 10).astype(data_type)\n data = memoryview(np_arr)\n data = arr.array(data.format, data)\n\n gs = gd.Series(data)\n\n np.testing.assert_equal(gs.to_array(), np_arr)\n\n\ndef test_series_shape():\n ps = pd.Series([1, 2, 3, 4])\n cs = Series([1, 2, 3, 4])\n\n assert ps.shape == cs.shape\n\n\ndef test_series_shape_empty():\n ps = pd.Series()\n cs = Series([])\n\n assert ps.shape == cs.shape\n\n\ndef test_dataframe_shape():\n pdf = pd.DataFrame({'a': [0, 1, 2, 3], 'b': [0.1, 0.2, None, 0.3]})\n gdf = DataFrame.from_pandas(pdf)\n\n assert pdf.shape == gdf.shape\n\n\ndef test_dataframe_shape_empty():\n pdf = pd.DataFrame()\n gdf = DataFrame()\n\n assert pdf.shape == gdf.shape\n\n\[email protected]('num_cols', [1, 2, 10])\[email protected]('num_rows', [1, 2, 1000])\[email protected](\n 'dtype',\n ['int8', 'int16', 'int32', 'int64', 'float32', 'float64',\n 'datetime64[ms]']\n)\[email protected]('nulls', ['none', 'some', 'all'])\ndef test_dataframe_tranpose(nulls, num_cols, num_rows, dtype):\n if dtype not in ['float32', 'float64'] and nulls in ['some', 'all']:\n pytest.skip(msg='nulls not supported in dtype: ' + dtype)\n\n pdf = pd.DataFrame()\n from string import ascii_lowercase\n for i in range(num_cols):\n colname = ascii_lowercase[i]\n data = np.random.randint(0, 26, num_rows).astype(dtype)\n if nulls == 'some':\n idx = np.random.choice(num_rows,\n size=int(num_rows/2),\n replace=False)\n data[idx] = np.nan\n elif nulls == 'all':\n data[:] = np.nan\n pdf[colname] = data\n\n gdf = DataFrame.from_pandas(pdf)\n\n got_function = gdf.transpose()\n got_property = gdf.T\n\n expect = pdf.transpose()\n\n # Temporarily reset index since we don't use index for col names\n if len(expect.columns) > 0:\n expect = expect.reset_index(drop=True)\n expect.columns = [str(x) for x in range(expect.shape[1])]\n\n # Pandas creates an empty index of `object` dtype by default while cuDF\n # creates a RangeIndex by default, type is different but same value\n pd.testing.assert_frame_equal(\n expect,\n got_function.to_pandas(),\n check_index_type=False\n )\n pd.testing.assert_frame_equal(\n expect,\n got_property.to_pandas(),\n check_index_type=False\n )\n\n\[email protected]('num_cols', [0, 1, 2, 10])\[email protected]('num_rows', [0, 1, 2, 1000])\ndef test_dataframe_tranpose_category(num_cols, num_rows):\n pytest.xfail(\"category dtype not yet supported for transpose\")\n pdf = pd.DataFrame()\n from string import ascii_lowercase\n for i in range(num_cols):\n colname = ascii_lowercase[i]\n data = pd.Series(list(ascii_lowercase), dtype='category')\n data = data.sample(num_rows, replace=True).reset_index(drop=True)\n pdf[colname] = data\n\n gdf = DataFrame.from_pandas(pdf)\n\n got_function = gdf.transpose()\n got_property = gdf.T\n\n expect = pdf.transpose()\n\n pd.testing.assert_frame_equal(expect, got_function.to_pandas())\n pd.testing.assert_frame_equal(expect, got_property.to_pandas())\n\n\ndef test_generated_column():\n gdf = DataFrame({'a': (i for i in range(5))})\n assert len(gdf) == 5\n\n\[email protected]\ndef pdf():\n return pd.DataFrame({'x': range(10),\n 'y': range(10)})\n\n\[email protected]\ndef gdf(pdf):\n return gd.DataFrame.from_pandas(pdf)\n\n\[email protected]('func', [\n lambda df: df.mean(),\n lambda df: df.sum(),\n lambda df: df.min(),\n lambda df: df.max(),\n lambda df: df.std(),\n lambda df: df.count(),\n pytest.param(lambda df: df.size, marks=pytest.mark.xfail()),\n])\[email protected]('accessor', [\n pytest.param(lambda df: df, marks=pytest.mark.xfail(\n reason=\"dataframe reductions not yet supported\")),\n lambda df: df.x,\n])\ndef test_reductions(pdf, gdf, accessor, func):\n assert_eq(func(accessor(pdf)), func(accessor(gdf)))\n\n\[email protected]('binop', [\n operator.add,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n pytest.param(operator.mod, marks=pytest.mark.xfail()),\n pytest.param(operator.pow, marks=pytest.mark.xfail()),\n operator.eq,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n operator.ne,\n])\ndef test_binops_df(pdf, gdf, binop):\n pdf = pdf + 1.0\n gdf = gdf + 1.0\n d = binop(pdf, pdf)\n g = binop(gdf, gdf)\n assert_eq(d, g)\n\n\[email protected]('binop', [\n operator.add,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n pytest.param(operator.mod, marks=pytest.mark.xfail()),\n pytest.param(operator.pow, marks=pytest.mark.xfail()),\n operator.eq,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n operator.ne,\n])\ndef test_binops_series(pdf, gdf, binop):\n pdf = pdf + 1.0\n gdf = gdf + 1.0\n d = binop(pdf.x, pdf.y)\n g = binop(gdf.x, gdf.y)\n assert_eq(d, g)\n\n\[email protected]('func', [\n lambda df: df.empty,\n lambda df: df.x.empty,\n])\ndef test_unary_operators(func, pdf, gdf):\n p = func(pdf)\n g = func(gdf)\n assert_eq(p, g)\n\n\ndef test_is_monotonic(gdf):\n pdf = pd.DataFrame({'x': [1, 2, 3]}, index=[3, 1, 2])\n gdf = gd.DataFrame.from_pandas(pdf)\n assert not gdf.index.is_monotonic\n assert not gdf.index.is_monotonic_increasing\n assert not gdf.index.is_monotonic_decreasing\n\n\ndef test_dataframe_replace():\n # numerical\n pdf1 = pd.DataFrame({'a': [0, 1, 2, 3], 'b': [0, 1, 2, 3]})\n gdf1 = DataFrame.from_pandas(pdf1)\n pdf2 = pdf1.replace(0, 4)\n gdf2 = gdf1.replace(0, 4)\n pd.testing.assert_frame_equal(gdf2.to_pandas(), pdf2)\n\n # categorical\n pdf4 = pd.DataFrame({'a': ['one', 'two', 'three'],\n 'b': ['one', 'two', 'three']}, dtype='category')\n gdf4 = DataFrame.from_pandas(pdf4)\n pdf5 = pdf4.replace('two', 'three')\n gdf5 = gdf4.replace('two', 'three')\n pd.testing.assert_frame_equal(gdf5.to_pandas(), pdf5)\n\n # list input\n pdf6 = pdf1.replace([0, 1], [4, 5])\n gdf6 = gdf1.replace([0, 1], [4, 5])\n pd.testing.assert_frame_equal(gdf6.to_pandas(), pdf6)\n\n pdf7 = pdf1.replace([0, 1], 4)\n gdf7 = gdf1.replace([0, 1], 4)\n pd.testing.assert_frame_equal(gdf7.to_pandas(), pdf7)\n\n # dict input:\n pdf8 = pdf1.replace({'a': 0, 'b': 0}, {'a': 4, 'b': 5})\n gdf8 = gdf1.replace({'a': 0, 'b': 0}, {'a': 4, 'b': 5})\n pd.testing.assert_frame_equal(gdf8.to_pandas(), pdf8)\n\n pdf9 = pdf1.replace({'a': 0}, {'a': 4})\n gdf9 = gdf1.replace({'a': 0}, {'a': 4})\n pd.testing.assert_frame_equal(gdf9.to_pandas(), pdf9)\n\n\[email protected](reason=\"null is not supported in gpu yet\")\ndef test_dataframe_boolean_mask_with_None():\n pdf = pd.DataFrame({'a': [0, 1, 2, 3], 'b': [0.1, 0.2, None, 0.3]})\n gdf = DataFrame.from_pandas(pdf)\n pdf_masked = pdf[[True, False, True, False]]\n gdf_masked = gdf[[True, False, True, False]]\n assert pdf_masked.to_string().split() == gdf_masked.to_string().split()\n\n\n\"\"\"\nThis test compares cudf and Pandas dataframe boolean indexing.\n\"\"\"\n\n\[email protected]('mask_fn', [\n lambda x: x,\n lambda x: np.array(x),\n lambda x: pd.Series(x),\n ])\ndef test_dataframe_boolean_mask(pdf, gdf, mask_fn):\n mask_base = [True, False, True, False, True, False, True, False, True,\n False]\n mask = mask_fn(mask_base)\n assert len(mask) == gdf.shape[0]\n pdf_masked = pdf[mask]\n gdf_masked = gdf[mask]\n assert pdf_masked.to_string().split() == gdf_masked.to_string().split()\n\n\n\"\"\"\nThis test only tests boolean indexing of a cudf DataFrame with a cudf Series.\nPandas does not support cudf Series. When masking with a Series, the length\nis not required to match.\n\"\"\"\n\n\ndef test_dataframe_boolean_mask_Series(gdf):\n mask = Series([True, False, True, False])\n mask2 = Series([True, True, True, True])\n mask3 = Series([True, True, True, True, True, True, True, True])\n mask4 = Series([True]) # More likely to trigger an undefined memory read\n mask5 = Series([False])\n mask6 = Series([False, False, False, False])\n gdf_masked = gdf[mask]\n gdf_masked2 = gdf[mask2]\n gdf_masked3 = gdf[mask3]\n gdf_masked4 = gdf[mask4]\n gdf_masked5 = gdf[mask5]\n gdf_masked6 = gdf[mask6]\n assert gdf_masked.shape[0] == 2\n assert gdf_masked2.shape[0] == 4\n assert gdf_masked3.shape[0] == 8\n assert gdf_masked4.shape[0] == 1\n assert gdf_masked5.shape[0] == 0\n assert gdf_masked6.shape[0] == 0\n\n\ndef test_iter(pdf, gdf):\n assert list(pdf) == list(gdf)\n\n\ndef test_iteritems(gdf):\n for k, v in gdf.iteritems():\n assert k in gdf.columns\n assert isinstance(v, gd.Series)\n assert_eq(v, gdf[k])\n\n\[email protected](reason=\"our quantile result is a DataFrame, not a Series\")\ndef test_quantile(pdf, gdf):\n assert_eq(pdf.quantile(), gdf.quantile())\n\n\ndef test_from_pandas_function(pdf):\n gdf = gd.from_pandas(pdf)\n assert isinstance(gdf, gd.DataFrame)\n assert_eq(pdf, gdf)\n\n gdf = gd.from_pandas(pdf.x)\n assert isinstance(gdf, gd.Series)\n assert_eq(pdf.x, gdf)\n\n with pytest.raises(TypeError):\n gd.from_pandas(123)\n\n\[email protected]('preserve_index', [True, False])\ndef test_arrow_pandas_compat(pdf, gdf, preserve_index):\n pdf['z'] = range(10)\n pdf = pdf.set_index('z')\n gdf['z'] = range(10)\n gdf = gdf.set_index('z')\n\n pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)\n gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)\n\n assert(pa.Table.equals(pdf_arrow_table, gdf_arrow_table))\n\n gdf2 = DataFrame.from_arrow(pdf_arrow_table)\n pdf2 = pdf_arrow_table.to_pandas()\n\n assert_eq(pdf2, gdf2)\n\n\[email protected]('nrows', [1, 8, 100, 1000])\ndef test_series_hash_encode(nrows):\n data = np.asarray(range(nrows))\n s = Series(data, name=\"x1\")\n num_features = 1000\n\n encoded_series = s.hash_encode(num_features)\n assert isinstance(encoded_series, gd.Series)\n enc_arr = encoded_series.to_array()\n assert np.all(enc_arr >= 0)\n assert np.max(enc_arr) < num_features\n\n enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()\n assert enc_with_name_arr[0] != enc_arr[0]\n\n\[email protected]('dtype', ['int8', 'int16', 'int32', 'int64',\n 'float32', 'float64'])\ndef test_cuda_array_interface(dtype):\n try:\n import cupy\n _have_cupy = True\n except ImportError:\n _have_cupy = False\n if not _have_cupy:\n pytest.skip('CuPy is not installed')\n\n np_data = np.arange(10).astype(dtype)\n cupy_data = cupy.array(np_data)\n pd_data = pd.Series(np_data)\n\n cudf_data = gd.Series(cupy_data)\n assert_eq(pd_data, cudf_data)\n\n gdf = gd.DataFrame()\n gdf['test'] = cupy_data\n pd_data.name = 'test'\n assert_eq(pd_data, gdf['test'])\n\n\[email protected]('nelem', [0, 2, 3, 100])\[email protected]('nchunks', [1, 2, 5, 10])\[email protected](\n 'data_type',\n ['bool', 'int8', 'int16', 'int32', 'int64',\n 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_from_arrow_chunked_arrays(nelem, nchunks, data_type):\n np_list_data = [np.random.randint(0, 100, nelem).astype(data_type) for\n i in range(nchunks)]\n pa_chunk_array = pa.chunked_array(np_list_data)\n\n expect = pd.Series(pa_chunk_array.to_pandas())\n got = gd.Series(pa_chunk_array)\n\n assert_eq(expect, got)\n\n np_list_data2 = [np.random.randint(0, 100, nelem).astype(data_type) for\n i in range(nchunks)]\n pa_chunk_array2 = pa.chunked_array(np_list_data2)\n pa_table = pa.Table.from_arrays([pa_chunk_array, pa_chunk_array2],\n names=['a', 'b'])\n\n expect = pa_table.to_pandas()\n got = gd.DataFrame.from_arrow(pa_table)\n\n assert_eq(expect, got)\n\n\ndef test_gpu_memory_usage_with_boolmask():\n from numba import cuda\n import cudf\n ctx = cuda.current_context()\n\n def query_GPU_memory(note=''):\n memInfo = ctx.get_memory_info()\n usedMemoryGB = (memInfo.total - memInfo.free)/1e9\n return usedMemoryGB\n\n cuda.current_context().deallocations.clear()\n nRows = int(1e8)\n nCols = 2\n dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))\n colNames = ['col'+str(iCol) for iCol in range(nCols)]\n pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)\n cudaDF = cudf.dataframe.DataFrame.from_pandas(pandasDF)\n boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype('bool'))\n\n memory_used = query_GPU_memory()\n cudaDF = cudaDF[boolmask]\n\n assert cudaDF.index._values.data.mem.device_ctypes_pointer ==\\\n cudaDF['col0'].index._values.data.mem.device_ctypes_pointer\n assert cudaDF.index._values.data.mem.device_ctypes_pointer ==\\\n cudaDF['col1'].index._values.data.mem.device_ctypes_pointer\n\n assert memory_used == query_GPU_memory()\n\n\ndef test_boolmask(pdf, gdf):\n boolmask = np.random.randint(0, 2, len(pdf)) > 0\n gdf = gdf[boolmask]\n pdf = pdf[boolmask]\n assert_eq(pdf, gdf)\n\n\ndef test_1row_arrow_table():\n data = [pa.array([0]), pa.array([1])]\n batch = pa.RecordBatch.from_arrays(data, ['f0', 'f1'])\n table = pa.Table.from_batches([batch])\n\n expect = table.to_pandas()\n got = DataFrame.from_arrow(table)\n assert_eq(expect, got)\n\n\ndef test_arrow_handle_no_index_name(pdf, gdf):\n gdf_arrow = gdf.to_arrow()\n pdf_arrow = pa.Table.from_pandas(pdf)\n assert pa.Table.equals(pdf_arrow, gdf_arrow)\n\n got = DataFrame.from_arrow(gdf_arrow)\n expect = pdf_arrow.to_pandas()\n assert_eq(expect, got)\n\n\[email protected]('num_rows', [1, 3, 10, 100])\[email protected]('num_bins', [1, 2, 4, 20])\[email protected]('right', [True, False])\[email protected]('dtype', ['int8', 'int16', 'int32', 'int64',\n 'float32', 'float64'])\ndef test_series_digitize(num_rows, num_bins, right, dtype):\n data = np.random.randint(0, 100, num_rows).astype(dtype)\n bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))\n s = Series(data)\n indices = s.digitize(bins, right)\n np.testing.assert_array_equal(np.digitize(data, bins, right),\n indices.to_array())\n"
] |
[
[
"pandas.Series",
"numpy.asarray",
"pandas.DataFrame",
"numpy.dtype",
"numpy.all",
"numpy.max",
"numpy.random.randn",
"pandas.testing.assert_frame_equal",
"numpy.random.sample",
"numpy.digitize",
"numpy.random.randint",
"numpy.hstack",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.zeros",
"pandas.Categorical",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.random",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kensugino/JUGEMu
|
[
"3ebf19c96e41f1c90d63d772fd4c9c5cc3d6886f"
] |
[
"jgem/plotgenes.py"
] |
[
"# => moved to ponder/plotgene.py\n\nimport pandas as PD\nimport numpy as N\nimport matplotlib.pyplot as P\n\n#import bedtools as BT\n#import gtfgffbed as GGB\nimport jgem.utils as UT\n\nimport random\nfrom matplotlib.patches import Rectangle, PathPatch, Circle, Ellipse\nfrom matplotlib.lines import Line2D\nfrom matplotlib.path import Path\nimport matplotlib.colors as C\nimport matplotlib.cm as CM\n\nclass SpliceFig(object):\n \n \n def __init__(self, ex, sj, xmargin=None, ymargin=0.25, compress=True, ecov='ecov', \n ucnt='ucnt',mcnt='mcnt',minlw=1,drawscalebar=True,ecovth=None,jcntth=None,\n origin=None, sortexby=None,fontsize=7):\n self.ymargin = ymargin\n self.ecov = ecov\n self.ucnt = ucnt\n self.jcnt = jcnt = 'jcnt'\n self.mcnt = mcnt\n self.minlw = minlw\n self.drawscalebar = drawscalebar\n self.ecovth = ecovth\n self.jcntth = jcntth\n self.ex = ex = ex.copy()\n self.sj = sj = sj.copy()\n self.compress = compress\n self.fontsize=fontsize\n if sortexby is None:\n self.sortexby = ecov\n else:\n self.sortexby = sortexby # when plotting multiple and comparing, you want to use same sorting\n # start and end, strand\n \n if ex.iloc[0]['strand']=='+':\n if origin is None:\n origin = ex['st'].min()\n ex['xst'] = ex['st']-origin\n ex['xed'] = ex['ed']-origin\n self.strand='+'\n self.origin=origin\n else:\n if origin is None:\n origin = ex['ed'].max()\n ex['xst'] = origin - ex['ed']\n ex['xed'] = origin - ex['st']\n self.strand='-'\n self.origin=origin\n # fix old a_id null \n if (ex['a_id'].min()==-1) and (N.sum(ex['a_id']==0)==0):\n ex.loc[ex['a_id']==-1, 'a_id'] = 0\n ex.loc[ex['d_id']==-1, 'd_id'] = 0\n sj.loc[sj['a_id']==-1, 'a_id'] = 0\n sj.loc[sj['d_id']==-1, 'd_id'] = 0\n \n ex['len'] = ex['xed'] - ex['xst']\n if xmargin is None:\n xmargin = int(ex['len'].mean())\n self.xmargin = xmargin\n\n if ecov not in ex.columns:\n ex[ecov] = 1\n if (ucnt not in sj.columns) or (mcnt not in sj.columns):\n sj[jcnt] = 1\n sj[jcnt+'_ls'] = 'solid'\n else:\n # sj uniq, mult\n sj[jcnt] = [x or y for x,y in sj[[ucnt,mcnt]].values]\n sj[jcnt+'_ls'] = ['solid' if x else 'dashed' for x in sj[ucnt]]\n\n if ecovth is not None:\n self.ex = ex = ex[ex[ecov]>ecovth].copy()\n if jcntth is not None:\n self.sj = sj = sj[sj[jcnt]>jcntth].copy()\n if len(ex)==0:\n return\n\n # find exon groups\n if 'asize' not in ex.columns:\n a2size = dict(UT.izipcols(ex.groupby('a_id').size().reset_index(), ['a_id',0]))\n d2size = dict(UT.izipcols(ex.groupby('d_id').size().reset_index(), ['d_id',0]))\n a2size[0]=0\n d2size[0]=0\n ex['asize'] = [a2size[x] for x in ex['a_id']]\n ex['dsize'] = [d2size[x] for x in ex['d_id']]\n ex['group'] = ['a{0}'.format(ai) if (a!=0 and a>d) else'd{0}'.format(di) for a,ai,d,di in ex[['asize','a_id','dsize','d_id']].values]\n # find exon group st, ed\n exg = ex.groupby('group')\n g2st = dict(UT.izipcols(exg['xst'].min().reset_index(), ['group','xst']))\n g2ed = dict(UT.izipcols(exg['xed'].max().reset_index(), ['group','xed']))\n g2size = dict(UT.izipcols(exg.size().reset_index(), ['group',0]))\n ex['gst'] = [g2st[x] for x in ex['group']]\n ex['ged'] = [g2ed[x] for x in ex['group']]\n ex['gsize'] = [g2size[x] for x in ex['group']]\n #self.ex = ex = ex.sort_values(['group',ecov]) #'gst','ged','xst','xed'])\n self.ex = ex = ex.sort_values(['group',self.sortexby]) #'gst','ged','xst','xed'])\n # find exon y pos within group\n def _eypos(gs):\n g0,s0 = gs[0] # first g\n cnt = 0\n yield cnt - (s0-1)/2.\n for g1,s1 in gs[1:]:\n if g1==g0:\n cnt +=1\n else:\n cnt = 0\n yield cnt - (s1-1)/2.\n g0 = g1\n ex['eypos'] = [x for x in _eypos(ex[['group','gsize']].values)]\n # find group y center pos\n self.gr = gr = ex.groupby('group')[['gst','ged','gsize']].first().sort_values(['gst','ged'])\n gr['len'] = gr['ged']-gr['gst']\n def _gypos(gr):\n side = 1\n r0 = gr.iloc[0]\n h = r0['gsize']/2.\n ged0 = r0['ged']\n gy0 = {1:h, -1:-h} # remember filled height both side (1,-1)\n yield 0 # first one gets center\n for gst1,ged1,gsiz1 in gr[['gst','ged','gsize']].values[1:]:\n h = gsiz1/2.\n if ged0<=gst1: # no overlap\n gy0 = {1:h, -1:-h}\n yield 0\n else:\n gy1 = gy0[side] + side*gsiz1/2.\n gy0[side] = gy0[side] + side*gsiz1\n side = -1*side # flip side\n yield gy1\n gst0 = gst1\n ged0 = max(ged0, ged1)\n gr['gypos'] = [x for x in _gypos(gr)]\n # compress x coord\n if compress:\n def _gxst(gr):\n r0 = gr.iloc[0]\n delta = 0\n yield r0['gst'] - delta # 0\n ged0 = r0['ged']\n for i, r1 in gr.iloc[1:].iterrows():\n gst1 = r1['gst']\n if gst1-ged0>self.xmargin:\n delta += (gst1-ged0-self.xmargin)\n yield gst1 - delta\n ged0 = r1['ged']\n gr['cst'] = [x for x in _gxst(gr)]\n else:\n gr['cst'] = gr['gst']\n #gr['ced'] = gr['cst']+gr['len']\n ex['cst0'] = [gr['cst'].ix[g]+(xst-gst) for g,xst,gst in ex[['group','xst','gst']].values]\n ex['ced0'] = ex['cst0']+ex['len']\n if self.strand=='+':\n ex['cst'] = origin + ex['cst0']\n ex['ced'] = origin + ex['ced0']\n else:\n ex['cst'] = origin - ex['ced0']\n ex['ced'] = origin - ex['cst0']\n ex['ey'] = [ey+gr['gypos'].ix[g] for ey,g in ex[['eypos','group']].values]\n\n def draw(self, ax=None, cm='R', cm2='G', ecov2=None, jcnt2=None, xlim=None):\n if len(self.ex)==0:\n return\n if ax is None:\n fig,ax = P.subplots(1,1,figsize=(12,5))\n self.draw_junctions(ax, cm=cm, jcnt=self.jcnt, cm2=cm2, jcnt2=jcnt2, ecov=self.ecov, ecov2=ecov2,xlim=xlim)\n self.draw_exons(ax, cm=cm, ecov=self.ecov, cm2=cm2, ecov2=ecov2, xlim=xlim)\n if self.drawscalebar:\n self.draw_bar(ax)\n\n def draw_bar(self, ax):\n # 1kb scale bar at right top\n ylim = ax.get_ylim()\n xlim = ax.get_xlim()\n yd = ylim[1]-ylim[0]\n xd = xlim[1]-xlim[0]\n ypos = ylim[0]+yd*0.8\n ypos2 = ylim[0]+yd*0.9\n xpos = xlim[0]+xd*0.9\n xpos1 = max(xlim[0], xpos-1000)\n xpos2 = max(xlim[0], xpos-500)\n ax.plot([xpos1, xpos], [ypos, ypos], 'k-', lw=2)\n suf = '({0})'.format(self.strand)\n if self.compress:\n suf = '({0},c)'.format(self.strand) #'(intron compressed)'\n if xpos1==xpos-1000:\n ax.text(xpos2,ypos2,'1kb{0}'.format(suf),fontsize=self.fontsize)\n else:\n ax.text(xpos2,ypos2,'{0:d}bp{1}'.format(xpos-xpos1,suf),fontsize=self.fontsize)\n \n def draw_exons(self, ax, cm='Reds', ecov='ecov', cm2='Greens', ecov2=None, xlim=None):\n ex = self.ex\n hh = 0.5-self.ymargin\n emax = N.log2(ex[ecov].max()+1)\n #print 'emax=', ex[ecov].max()\n sm = self.get_scalarmap(emax,mapname=cm)\n if ecov2 is None:\n for x0,w,y,ec in ex[['cst','len','ey',ecov]].values:\n #print 'ec=',ec\n ec = N.log2(ec+1)\n fc = sm.to_rgba(ec)\n #eclr = sm2.to_rgba(ec)\n lw = max(0.3, ec/emax)\n ax.add_patch(Rectangle((x0,y-hh),w,2*hh,facecolor=fc,lw=lw,alpha=0.9))#edgecolor=ec))\n else:\n emax2 = N.log2(ex[ecov2].max()+1)\n sm2 = self.get_scalarmap(emax2,mapname=cm2)\n for x0,w,y,ec,ec2 in ex[['cst','len','ey',ecov,ecov2]].values:\n ec = N.log2(ec+1)\n ec2 = N.log2(ec2+1)\n fc = sm.to_rgba(ec)\n fc2 = sm2.to_rgba(ec2)\n fc3 = self._addcolor(fc,fc2)\n lw = N.max([0.3, ec/emax, ec2/emax2])\n ax.add_patch(Rectangle((x0,y-hh),w,2*hh,facecolor=fc3,lw=lw,alpha=0.9))#edgecolor=ec))\n\n xmi = ex['cst'].min()\n xma = ex['ced'].max()\n ymi = ex['ey'].min()\n yma = ex['ey'].max()\n if xlim is None:\n ax.set_xlim([xmi-self.xmargin, xma+self.xmargin])\n else:\n ax.set_xlim(xlim)\n d = 2+self.ymargin\n ax.set_ylim([ymi-d, yma+d])\n\n def _addcolor(self,fc,fc2):\n #return tuple(map(lambda x: min(x, 1.), N.array(fc)+N.array(fc2)))\n #return tuple([max(x,y) for x,y in zip(fc,fc2)])\n return tuple([min(x,y) for x,y in zip(fc,fc2)])\n #return tuple((N.array(fc)+N.array(fc2))/2.)\n \n def draw_junctions(self, ax, cm='Reds', jcnt='jcnt', cm2='Greens', jcnt2=None, ecov='ecov', ecov2=None, xlim=None):\n ex = self.ex\n sj = self.sj.sort_values(['d_id',jcnt],ascending=False)\n if len(sj)==0:\n return\n maxjcnt = N.log2(sj[jcnt].max()+1)\n print('sjmax:{0},sjmaxidx:{1},locus: {2}:{3}-{4}'.format(sj[jcnt].max(), sj[jcnt].argmax(), ex['chr'].iloc[0], ex['st'].min(), ex['ed'].max()))\n sm = self.get_scalarmap(maxjcnt,mapname=cm)\n lwmax = 2\n if xlim is None:\n xw = ex['ced'].max()-ex['cst'].min()\n else:\n xw = xlim[1]-xlim[0]\n yw = ex['ey'].max()-ex['ey'].min()\n # cw = 0.01*xw\n # ch = 0.1*(1-2*self.ymargin)\n minlw = self.minlw\n self._side=1\n print('xw=',xw, 'xlim=', xlim, 'yw=', yw)\n def _draw(x0,y0,x1,y1,ec,lw,ls):\n global dcnt\n if (len(ex)>2):\n pts = self.bezierpts(x0,y0,x1,y1,xw,yw)\n a = Path(pts, [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4])\n ax.add_patch(PathPatch(a, fc='none', alpha=0.8, lw=lw, ec=ec, linestyle=ls))\n else:\n ax.add_line(Line2D((x0,x1),(y0,y1),alpha=0.9, lw=lw, color=ec,linestyle=ls))\n # place dot in acceptor/donor\n #ax.add_patch(Ellipse((x0,y0), width=cw, height=ch, facecolor='k'))\n #ax.add_patch(Ellipse((x1,y1), width=cw, height=ch, facecolor='k'))\n #ax.plot([x0],[y0],'k.',ms=1.5)\n #ax.plot([x1],[y1],'k.',ms=1.5)\n\n d0 = sj.iloc[0]['d_id']\n afld = 'cst' if self.strand=='+' else 'ced'\n dfld = 'ced' if self.strand=='+' else 'cst'\n #print('afld:{0},dfld:{1},strand:{2}'.format(afld,dfld,self.strand))\n\n if jcnt2 is None:\n for ai,di,jc,ls in sj[['a_id','d_id',jcnt,jcnt+'_ls']].values:\n if di!=d0:\n self._side=1 # initialize\n d0 = di\n # connect donor to acceptor\n jc = N.log2(jc+1)\n ea = ex[ex['a_id']==ai]\n ed = ex[ex['d_id']==di]\n c1m = N.log2(ea[ecov].max()+1)\n c0m = N.log2(ed[ecov].max()+1)\n for x1,y1,c1,i1 in ea[[afld,'ey',ecov,'_id']].values:\n c1 = N.log2(c1+1)\n for x0,y0,c0,i0 in ed[[dfld,'ey',ecov,'_id']].values:\n c0 = N.log2(c0+1)\n jc1 = jc*max(0.3, (c1/c1m)*(c0/c0m))\n if jc1==0:\n ec = sm.to_rgba(0.7*maxjcnt)\n _draw(x0,y0,x1,y1,ec,2,'dotted')\n else:\n ec = sm.to_rgba(jc1)\n lw = max(minlw,lwmax*jc1/maxjcnt)\n _draw(x0,y0,x1,y1,ec,lw,ls)\n if y0==0 and y1==0:\n self._side *= -1 # flip\n else:\n maxjcnt2 = N.log2(sj[jcnt2].max()+1)\n sm2 = self.get_scalarmap(maxjcnt2,mapname=cm2)\n for ai,di,jc,jc2,ls in sj[['a_id','d_id',jcnt,jcnt2,jcnt+'_ls']].values:\n if di!=d0:\n self._side=1\n d0=di\n # connect donor to acceptor\n jc = N.log2(jc+1)\n jc2 = N.log2(jc2+1)\n ea = ex[ex['a_id']==ai]\n ed = ex[ex['d_id']==di]\n c1m = N.log2(ea[ecov].max()+1)\n c0m = N.log2(ed[ecov].max()+1)\n c1m2 = N.log2(ea[ecov2].max()+1)\n c0m2 = N.log2(ed[ecov2].max()+1)\n for x1,y1,c1,c12 in ea[[afld,'ey',ecov,ecov2]].values:\n c1 = N.log2(c1+1)\n c12 = N.log2(c12+1)\n for x0,y0,c0,c02 in ed[[dfld,'ey',ecov,ecov2]].values:\n c0 = N.log2(c0+1)\n c02 = N.log2(c02+1)\n jc1 = jc*max(0.3, (c1/c1m)*(c0/c0m))\n jc21 = jc2*max(0.3, (c12/c1m2)*(c02/c0m2))\n lw = max(minlw, lwmax*N.max([jc1/maxjcnt,jc21/maxjcnt2]))\n if (jc1==0) and (jc21==0):\n ec = sm.to_rgba(0.5*maxjcnt)\n ec2 = sm2.to_rgba(0.5*maxjcnt2)\n ec3 = self._addcolor(ec,ec2)\n _draw(x0,y0,x1,y1,ec3,2,'dotted')\n else:\n ec = sm.to_rgba(jc1)\n ec2 = sm2.to_rgba(jc21)\n ec3 = self._addcolor(ec,ec2)\n _draw(x0,y0,x1,y1,ec3,lw,ls)\n if y0==0 and y1==0:\n self._side *=-1\n \n \n def get_scalarmap(self,vmax,vmin=0, mapname='Reds'):\n return Colors(mapname, vmax, vmin)\n \n def bezierpts(self,x0,y0,x1,y1,xw,yw):\n width = x1-x0\n off = 0.1\n cx0 = x0+width*off\n cx1 = x0+width*(1-off)\n coef = 1\n coef2 = (width/float(xw)-0.1)*4\n if y0==0:\n if y1==0:\n d = self._side\n elif y1<0:\n d = -1\n else:\n d = 1\n elif (y0>0):\n d = 1\n else:\n d = -1\n cy0 = y0+d*coef\n cy1 = y1+d*coef\n if y0<y1:\n if d==1:\n cy0 += (y1-y0)*coef2\n else:\n cy1 -= (y1-y0)*coef2\n elif y0>y1:\n if d==1:\n cy1 += (y0-y1)*coef2\n else:\n cy0 -= (y0-y1)*coef2\n return [(x0,y0),(cx0,cy0),(cx1,cy1),(x1,y1)]\n\n \n \nclass Colors(object):\n \n def __init__(self, mapname, vmax, vmin=0, nl=32):\n self.mn = mapname\n self.vmin = vmin\n self.vmax = vmax\n self.d = d = 1./nl\n if mapname=='C':\n self.rgba = [(1.-x,1.,1.,1.) for x in N.arange(0,1+d,d)]\n elif mapname=='M':\n self.rgba = [(1.,1.-x,1.,1.) for x in N.arange(0,1+d,d)]\n elif mapname=='Y':\n self.rgba = [(1.,1.,1.-x,1.) for x in N.arange(0,1+d,d)]\n elif mapname=='R':\n self.rgba = [(1.,1.-x,1.-x,1.) for x in N.arange(0,1+d,d)]\n elif mapname=='G':\n self.rgba = [(1.-x,1.,1.-x,1.) for x in N.arange(0,1+d,d)]\n elif mapname=='B':\n self.rgba = [(1.-x,1.-x,1.,1.) for x in N.arange(0,1+d,d)]\n else:\n cm = P.get_cmap(mapname)\n cnorm = C.Normalize(vmin=0,vmax=1.)\n self.sm = sm = CM.ScalarMappable(norm=cnorm,cmap=cm)\n self.rgba = [sm.to_rgba(x) for x in N.arange(0,1+d,d)]\n \n def to_rgba(self, v):\n d = self.d\n if self.mn in ['R','G','B','C','M','Y']:\n vn = max(0., (v-self.vmin)/self.vmax)\n vn = min(1., vn)\n vni = int(vn/d)\n return self.rgba[vni]\n return self.sm.to_rgba(v)\n \n \n"
] |
[
[
"numpy.log2",
"numpy.arange",
"matplotlib.patches.Rectangle",
"matplotlib.path.Path",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.get_cmap",
"matplotlib.colors.Normalize",
"numpy.max",
"matplotlib.cm.ScalarMappable",
"numpy.sum",
"matplotlib.patches.PathPatch"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gundun/theano
|
[
"09d17fff10487dca7149e34601b8c6efdc572a19",
"09d17fff10487dca7149e34601b8c6efdc572a19",
"09d17fff10487dca7149e34601b8c6efdc572a19"
] |
[
"theano/tensor/nnet/tests/test_conv3d2d.py",
"theano/scan_module/tests/test_scan_checkpoints.py",
"theano/sandbox/cuda/tests/test_dnn.py"
] |
[
"from __future__ import absolute_import, print_function, division\nimport time\n\nfrom nose.plugins.skip import SkipTest\nfrom nose_parameterized import parameterized\nimport numpy\ntry:\n from scipy import ndimage\nexcept ImportError:\n ndimage = None\nfrom six.moves import xrange\n\nimport theano\nfrom theano.gof.opt import check_stack_trace\nfrom theano.tensor.nnet.conv3d2d import conv3d, get_diagonal_subtensor_view, DiagonalSubtensor, IncDiagonalSubtensor\nimport theano.tests.unittest_tools as utt\n\n\nif theano.config.mode == 'FAST_COMPILE':\n mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')\nelse:\n mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')\n\n\ndef test_get_diagonal_subtensor_view(wrap=lambda a: a):\n x = numpy.arange(20).reshape(5, 4).astype('float32')\n x = wrap(x)\n xv01 = get_diagonal_subtensor_view(x, 0, 1)\n\n # test that it works in 2d\n assert numpy.all(numpy.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])\n\n x = numpy.arange(24).reshape(4, 3, 2)\n xv01 = get_diagonal_subtensor_view(x, 0, 1)\n xv02 = get_diagonal_subtensor_view(x, 0, 2)\n xv12 = get_diagonal_subtensor_view(x, 1, 2)\n\n # print 'x', x\n # print 'xv01', xv01\n # print 'xv02', xv02\n assert numpy.all(numpy.asarray(xv01) == [\n [[12, 13], [8, 9], [4, 5]],\n [[18, 19], [14, 15], [10, 11]]])\n\n assert numpy.all(numpy.asarray(xv02) == [\n [[6, 1], [8, 3], [10, 5]],\n [[12, 7], [14, 9], [16, 11]],\n [[18, 13], [20, 15], [22, 17]],\n ])\n\n # diagonal views of each leading matrix is the same\n # as the slices out of the diagonal view of the entire 3d tensor\n for xi, xvi in zip(x, xv12):\n assert numpy.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))\n\n\ndef pyconv3d(signals, filters, border_mode='valid'):\n Ns, Ts, C, Hs, Ws = signals.shape\n Nf, Tf, C, Hf, Wf = filters.shape\n\n # if border_mode is not 'valid', the signals need zero-padding\n if border_mode == 'full':\n Tpad = Tf - 1\n Hpad = Hf - 1\n Wpad = Wf - 1\n elif border_mode == 'half':\n Tpad = Tf // 2\n Hpad = Hf // 2\n Wpad = Wf // 2\n else:\n Tpad = 0\n Hpad = 0\n Wpad = 0\n\n if Tpad > 0 or Hpad > 0 or Wpad > 0:\n # zero-pad signals\n signals_padded = numpy.zeros((Ns, Ts + 2 * Tpad, C,\n Hs + 2 * Hpad, Ws + 2 * Wpad), 'float32')\n signals_padded[:, Tpad:(Ts + Tpad), :, Hpad:(Hs + Hpad),\n Wpad:(Ws + Wpad)] = signals\n Ns, Ts, C, Hs, Ws = signals_padded.shape\n signals = signals_padded\n\n Tf2 = Tf // 2\n Hf2 = Hf // 2\n Wf2 = Wf // 2\n\n rval = numpy.zeros((Ns, Ts - Tf + 1, Nf, Hs - Hf + 1, Ws - Wf + 1))\n for ns in xrange(Ns):\n for nf in xrange(Nf):\n for c in xrange(C):\n s_i = signals[ns, :, c, :, :]\n f_i = filters[nf, :, c, :, :]\n r_i = rval[ns, :, nf, :, :]\n o_i = ndimage.convolve(s_i, f_i, mode='constant', cval=1)\n o_i_sh0 = o_i.shape[0]\n # print s_i.shape, f_i.shape, r_i.shape, o_i.shape\n r_i += o_i[Tf2:o_i_sh0 - Tf2, Hf2:-Hf2, Wf2:-Wf2]\n return rval\n\n\ndef check_diagonal_subtensor_view_traces(fn):\n assert check_stack_trace(\n fn, ops_to_check=(DiagonalSubtensor, IncDiagonalSubtensor))\n\n\[email protected](('valid', 'full', 'half'), utt.custom_name_func)\ndef test_conv3d(border_mode):\n check_conv3d(border_mode=border_mode,\n mode=mode_without_gpu,\n shared=theano.tensor._shared)\n\n\n# This function will also be used in theano/sandbox/cuda/tests/test_tensor_op.py,\n# which is not possible if it is decorated by @parameterized.expand\ndef check_conv3d(border_mode, mode=mode_without_gpu, shared=theano.tensor._shared):\n if ndimage is None:\n raise SkipTest(\"conv3d2d tests need SciPy\")\n\n Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32\n Nf, Tf, C, Hf, Wf = 32, 5, 3, 5, 5\n\n signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')\n filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')\n\n t0 = time.time()\n pyres = pyconv3d(signals, filters, border_mode)\n print(time.time() - t0)\n\n s_signals = shared(signals)\n s_filters = shared(filters)\n s_output = shared(signals * 0)\n\n out = conv3d(s_signals, s_filters,\n signals_shape=signals.shape,\n filters_shape=filters.shape,\n border_mode=border_mode)\n\n newconv3d = theano.function([], [],\n updates={s_output: out},\n mode=mode)\n\n check_diagonal_subtensor_view_traces(newconv3d)\n t0 = time.time()\n newconv3d()\n print(time.time() - t0)\n utt.assert_allclose(pyres, s_output.get_value(borrow=True))\n gsignals, gfilters = theano.grad(out.sum(), [s_signals, s_filters])\n gnewconv3d = theano.function([], [],\n updates=[(s_filters, gfilters),\n (s_signals, gsignals)],\n mode=mode,\n name='grad')\n check_diagonal_subtensor_view_traces(gnewconv3d)\n\n t0 = time.time()\n gnewconv3d()\n print('grad', time.time() - t0)\n\n Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5\n Nf, Tf, C, Hf, Wf = 4, 2, 3, 2, 2\n\n signals = numpy.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')\n filters = numpy.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')\n utt.verify_grad(lambda s, f: conv3d(s, f, border_mode=border_mode),\n [signals, filters], eps=1e-1, mode=mode)\n\n # Additional Test that covers the case of patched implementation for filter with Tf=1\n Ns, Ts, C, Hs, Ws = 3, 10, 3, 32, 32\n Nf, Tf, C, Hf, Wf = 32, 1, 3, 5, 5\n\n signals = numpy.arange(Ns * Ts * C * Hs * Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')\n filters = numpy.arange(Nf * Tf * C * Hf * Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')\n\n t0 = time.time()\n pyres = pyconv3d(signals, filters, border_mode)\n print(time.time() - t0)\n\n s_signals = shared(signals)\n s_filters = shared(filters)\n s_output = shared(signals * 0)\n\n out = conv3d(s_signals, s_filters,\n signals_shape=signals.shape,\n filters_shape=filters.shape,\n border_mode=border_mode)\n\n newconv3d = theano.function([], [],\n updates={s_output: out},\n mode=mode)\n\n t0 = time.time()\n newconv3d()\n print(time.time() - t0)\n utt.assert_allclose(pyres, s_output.get_value(borrow=True))\n gsignals, gfilters = theano.grad(out.sum(), [s_signals, s_filters])\n gnewconv3d = theano.function([], [],\n updates=[(s_filters, gfilters),\n (s_signals, gsignals)],\n mode=mode,\n name='grad')\n\n t0 = time.time()\n gnewconv3d()\n print('grad', time.time() - t0)\n\n Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5\n Nf, Tf, C, Hf, Wf = 4, 1, 3, 2, 2\n\n signals = numpy.random.rand(Ns, Ts, C, Hs, Ws).astype('float32')\n filters = numpy.random.rand(Nf, Tf, C, Hf, Wf).astype('float32')\n utt.verify_grad(lambda s, f: conv3d(s, f, border_mode=border_mode),\n [signals, filters], eps=1e-1, mode=mode)\n",
"from __future__ import absolute_import, print_function, division\n\nimport numpy\nimport unittest\n\nimport theano\nimport theano.tensor as T\n\ntry:\n from pygpu.gpuarray import GpuArrayException\n PYGPU_AVAILABLE = True\nexcept ImportError:\n PYGPU_AVAILABLE = False\n\n\nclass TestScanCheckpoint(unittest.TestCase):\n\n def setUp(self):\n self.k = T.iscalar(\"k\")\n self.A = T.vector(\"A\")\n result, _ = theano.scan(\n fn=lambda prior_result, A: prior_result * A,\n outputs_info=T.ones_like(self.A),\n non_sequences=self.A,\n n_steps=self.k)\n result_check, _ = theano.scan_checkpoints(\n fn=lambda prior_result, A: prior_result * A,\n outputs_info=T.ones_like(self.A),\n non_sequences=self.A,\n n_steps=self.k,\n save_every_N=100)\n self.result = result[-1]\n self.result_check = result_check[-1]\n self.grad_A = T.grad(self.result.sum(), self.A)\n self.grad_A_check = T.grad(self.result_check.sum(), self.A)\n\n def test_forward_pass(self):\n \"\"\"Test forward computation of A**k.\"\"\"\n f = theano.function(inputs=[self.A, self.k],\n outputs=[self.result, self.result_check])\n out, out_check = f(range(10), 100)\n assert numpy.allclose(out, out_check)\n\n def test_backward_pass(self):\n \"\"\"Test gradient computation of A**k.\"\"\"\n f = theano.function(inputs=[self.A, self.k],\n outputs=[self.grad_A, self.grad_A_check])\n out, out_check = f(range(10), 100)\n assert numpy.allclose(out, out_check)\n\n @unittest.skipUnless(PYGPU_AVAILABLE, 'Requires pygpu.')\n def test_memory(self):\n \"\"\"Test that scan_checkpoint reduces memory usage.\"\"\"\n if None not in theano.gpuarray.type.list_contexts():\n return unittest.SkipTest('Requires gpuarray backend.')\n f = theano.function(inputs=[self.A, self.k],\n outputs=self.grad_A)\n f_check = theano.function(inputs=[self.A, self.k],\n outputs=self.grad_A_check)\n free_gmem = theano.gpuarray.type._context_reg[None].free_gmem\n data = numpy.ones(free_gmem / 3000, dtype=numpy.float32)\n # Check that it works with the checkpoints\n f_check(data, 1000)\n # Check that the basic scan fails in that case\n self.assertRaises(GpuArrayException, f, data, 1000)\n\n def test_taps_error(self):\n \"\"\"Test that an error rises if we use taps in outputs_info.\"\"\"\n self.assertRaises(RuntimeError, theano.scan_checkpoints,\n lambda: None, [], {'initial': self.A, 'taps': [-2]})\n",
"from __future__ import absolute_import, print_function, division\nimport logging\nimport os\nimport sys\n\nfrom nose.plugins.skip import SkipTest\nfrom itertools import chain, product\nimport six.moves.cPickle as pickle\nfrom six import StringIO\nfrom six import reraise\n\nimport numpy\n\nimport theano\nimport theano.tensor as T\nimport theano.tests.unittest_tools as utt\nfrom theano.tensor.signal.pool import pool_2d, pool_3d\nfrom theano.tensor.signal.pool import Pool, MaxPoolGrad, AveragePoolGrad\nimport theano.sandbox.cuda.dnn as dnn\nfrom theano.sandbox.cuda.basic_ops import GpuAllocEmpty, gpu_alloc_empty\nfrom theano.sandbox.cuda import float32_shared_constructor as shared\n\nfrom . import test_nnet\n\n# Skip test if cuda_ndarray is not available.\nimport theano.sandbox.cuda as cuda\nif not cuda.cuda_available:\n raise SkipTest('Optional package cuda disabled')\n\nif theano.config.mode == 'FAST_COMPILE':\n mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')\n mode_without_gpu = theano.compile.mode.get_mode(\n 'FAST_RUN').excluding('gpu')\nelse:\n mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')\n mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')\n\n\ndef test_dnn_conv_desc_merge():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n img_shp = T.as_tensor_variable(\n numpy.asarray([2, 1, 8, 8]).astype('int64'))\n kern_shp = T.as_tensor_variable(\n numpy.asarray([3, 1, 2, 2]).astype('int64'))\n desc1 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2),\n conv_mode='conv')(img_shp, kern_shp)\n desc2 = dnn.GpuDnnConvDesc(border_mode='full', subsample=(1, 1),\n conv_mode='cross')(img_shp, kern_shp)\n # CDataType is not DeepCopyable so this will crash if we don't use\n # borrow=True\n f = theano.function([], [theano.Out(desc1, borrow=True),\n theano.Out(desc2, borrow=True)],\n mode=mode_with_gpu)\n\n d1, d2 = f()\n\n # This will be the case if they are merged, which would be bad.\n assert d1 != d2\n\n desc1v2 = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(2, 2),\n conv_mode='conv')(img_shp, kern_shp)\n f = theano.function([], [theano.Out(desc1, borrow=True),\n theano.Out(desc1v2, borrow=True)],\n mode=mode_with_gpu)\n assert len([n for n in f.maker.fgraph.apply_nodes\n if isinstance(n.op, dnn.GpuDnnConvDesc)]) == 1\n\n # CDATA type don't equal even if they represent the same object\n # So we can't use debugmode with it.\n if theano.config.mode not in [\"DebugMode\", \"DEBUG_MODE\"]:\n d1, d2 = f()\n\n # They won't be equal if they aren't merged.\n assert d1 == d2\n\n\ndef test_dnn_conv_merge():\n \"\"\"This test that we merge correctly multiple dnn_conv.\n\n This can is more difficult due to GpuEmptyAlloc that aren't\n merged.\n\n \"\"\"\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n img_shp = [2, 5, 6, 8]\n kern_shp = [3, 5, 5, 6]\n img = T.ftensor4('img')\n kern = T.ftensor4('kern')\n out = T.ftensor4('out')\n desc = dnn.GpuDnnConvDesc(\n border_mode='valid')(img.shape, kern.shape)\n\n # Test forward op\n o1 = dnn.dnn_conv(img, kern)\n o2 = dnn.dnn_conv(img, kern)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),\n numpy.random.rand(*kern_shp).astype('float32'))\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]) == 1\n\n # Test grad w op\n o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc)\n o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc)\n f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]) == 1\n\n # Test grad i op\n o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc)\n o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc)\n f = theano.function([img, kern, out], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert len([n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]) == 1\n\n\ndef test_dnn_conv_inplace():\n \"\"\"This test that we have inplace work correctly even when\n GpuAllocEmpty get merged together.\n\n \"\"\"\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n img_shp = [2, 5, 6, 8]\n kern_shp = [3, 5, 5, 6]\n img = T.ftensor4('img')\n kern = T.ftensor4('kern')\n out = T.ftensor4('out')\n desc1 = dnn.GpuDnnConvDesc(border_mode='valid', conv_mode='conv')(\n img.shape, kern.shape)\n desc2 = dnn.GpuDnnConvDesc(\n border_mode='valid', conv_mode='cross')(img.shape, kern.shape)\n\n # Test forward op\n o1 = dnn.dnn_conv(img, kern, conv_mode='conv')\n o2 = dnn.dnn_conv(img, kern, conv_mode='cross')\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n d1, d2 = f(numpy.random.rand(*img_shp).astype('float32'),\n numpy.random.rand(*kern_shp).astype('float32'))\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConv)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n # Test grad w op\n out = gpu_alloc_empty(*kern.shape)\n o1 = dnn.GpuDnnConvGradW()(img, kern, out, desc1)\n o2 = dnn.GpuDnnConvGradW()(img, kern, out, desc2)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradW)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n # Test grad i op\n out = gpu_alloc_empty(*img.shape)\n o1 = dnn.GpuDnnConvGradI()(img, kern, out, desc1)\n o2 = dnn.GpuDnnConvGradI()(img, kern, out, desc2)\n f = theano.function([img, kern], [o1, o2], mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n convs = [n for n in topo if isinstance(n.op, dnn.GpuDnnConvGradI)]\n assert len(convs) == 2\n assert all([node.op.inplace for node in convs])\n assert len([n for n in topo if isinstance(n.op, GpuAllocEmpty)]) == 2\n\n\ndef test_pooling():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n\n # 'average_exc_pad' is disabled for versions < 4004\n if cuda.dnn.version() < (4004, 4004):\n modes = ('max', 'average_inc_pad')\n else:\n modes = ('max', 'average_inc_pad', 'average_exc_pad')\n\n x = T.ftensor4()\n for mode, pad in product(modes,\n ((0, 0), (1, 0), (0, 1), (2, 3), (3, 2))):\n if pad != (0, 0) and mode == 'average_exc_pad':\n # Not implemented\n continue\n\n for ws in (4, 2, 5):\n for stride in (2, 3):\n if stride > ws:\n continue\n if pad[0] > stride or pad[1] > stride:\n # Not implemented\n continue\n # We will check that the opt introduced it.\n out = pool_2d(x, (ws, ws),\n st=(stride, stride),\n ignore_border=True,\n padding=pad, mode=mode)\n mode_without_gpu2 = mode_without_gpu.including()\n mode_without_gpu2.check_isfinite = False\n\n # GPU implementation\n f_gpu = theano.function([x], out, mode=mode_with_gpu)\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPool)\n for node in f_gpu.maker.fgraph.apply_nodes])\n\n # CPU implementation\n f_cpu = theano.function([x], out, mode=mode_without_gpu2)\n assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)\n for node in f_cpu.maker.fgraph.apply_nodes])\n assert any([isinstance(node.op, Pool)\n for node in f_cpu.maker.fgraph.apply_nodes])\n\n for shp in [(1, 10, 100, 100),\n (1, 3, 99, 99),\n (32, 1, 147, 197),\n ]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\")\n a = f_cpu(data).__array__()\n b = f_gpu(data).__array__()\n utt.assert_allclose(a, b)\n\n # Test the grad\n for shp in [(1, 1, 2, 2),\n (1, 1, 3, 3)]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\") * 10\n\n ws = 2\n stride = 2\n if pad[0] > stride or pad[1] > stride:\n # Not implemented\n continue\n\n # This tests the CPU grad + opt + GPU implementation\n def fn(x):\n return pool_2d(x, (ws, ws), ignore_border=True,\n padding=pad, mode=mode)\n utt.verify_grad(fn, [data], mode=mode_with_gpu)\n # Confirm that the opt would have inserted it.\n fg = theano.function([x], theano.grad(fn(x).sum(), x),\n mode=mode_with_gpu)\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n\n # Test the GPU grad + GPU implementation\n def fn(x):\n dnn_op = cuda.dnn.dnn_pool(\n x, ws=(ws, ws),\n stride=(stride, stride),\n pad=pad,\n mode=mode)\n return dnn_op\n utt.verify_grad(fn, [data], mode=mode_with_gpu)\n # Confirm that we get the good op.\n fg = theano.function([x], theano.grad(fn(x).sum(), x),\n mode=mode_with_gpu)\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n\n\ndef test_pooling_with_tensor_vars():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n x = T.ftensor4()\n ws = theano.shared(numpy.array([2, 2], dtype='int32'))\n st = theano.shared(numpy.array([1, 1], dtype='int32'))\n pad = theano.shared(numpy.array([0, 0], dtype='int32'))\n mode = 'max'\n\n def fn(x):\n dnn_op = cuda.dnn.dnn_pool(\n x, ws=ws,\n stride=st,\n pad=pad,\n mode=mode)\n return dnn_op\n\n for shp in [(1, 1, 2, 2),\n (1, 1, 3, 3)]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\") * 10\n theano.tests.unittest_tools.verify_grad(\n fn, [data], mode=mode_with_gpu)\n\n mode_without_gpu2 = mode_without_gpu.including()\n mode_without_gpu2.check_isfinite = False\n\n # GPU implementation\n f_gpu = theano.function([x], fn(x), mode=mode_with_gpu)\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPool)\n for node in f_gpu.maker.fgraph.apply_nodes])\n\n # CPU implementation\n out_cpu = pool_2d(x, ws, ignore_border=True, st=st, padding=pad, mode=mode)\n f_cpu = theano.function([x], out_cpu, mode=mode_without_gpu2)\n assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)\n for node in f_cpu.maker.fgraph.apply_nodes])\n assert any([isinstance(node.op, Pool)\n for node in f_cpu.maker.fgraph.apply_nodes])\n\n i = 1\n for shp in [(1, 10, 100, 100),\n (1, 3, 99, 99),\n (32, 1, 147, 197)]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\")\n\n # Change the window size dynamically\n ws.set_value(numpy.array([i, i]).astype('int32'))\n a = f_gpu(data).__array__()\n b = f_cpu(data).__array__()\n utt.assert_allclose(a, b)\n i += 1\n\n\ndef test_old_pool_interface():\n if not cuda.dnn.dnn_available() or cuda.dnn.version() > (5000, 5000):\n raise SkipTest(cuda.dnn.dnn_available.msg)\n\n testfile_dir = os.path.dirname(os.path.realpath(__file__))\n fname = 'old_pool_interface.pkl'\n with open(os.path.join(testfile_dir, fname), 'rb') as fp:\n try:\n pickle.load(fp)\n except ImportError:\n # Windows sometimes fail with nonsensical errors like:\n # ImportError: No module named type\n # ImportError: No module named copy_reg\n # when \"type\" and \"copy_reg\" are builtin modules.\n if sys.platform == 'win32':\n exc_type, exc_value, exc_trace = sys.exc_info()\n reraise(SkipTest, exc_value, exc_trace)\n raise\n\n\ndef test_pooling3d():\n # cuDNN 3d pooling requires cuDNN v3. Don't test if the cuDNN version is\n # too old.\n if not cuda.dnn.dnn_available() or cuda.dnn.version() < (3000, 3000):\n raise SkipTest(cuda.dnn.dnn_available.msg)\n\n # We force the FAST_RUN as we don't want the reference to run in DebugMode.\n mode_without_gpu_ref = theano.compile.mode.get_mode(\n 'FAST_RUN').excluding('gpu')\n\n # 'average_exc_pad' is disabled for versions < 4004\n if cuda.dnn.version() < (4004, 4004):\n modes = ('max', 'average_inc_pad')\n else:\n modes = ('max', 'average_inc_pad', 'average_exc_pad')\n\n x = T.ftensor5()\n for mode, pad in product(modes,\n ((0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1),\n (2, 3, 2), (3, 2, 2), (2, 2, 3))):\n if pad != (0, 0, 0) and mode == 'average_exc_pad':\n # Not implemented\n continue\n\n for ws in (4, 2, 5):\n for stride in (2, 3):\n if stride > ws:\n continue\n if pad[0] > stride or pad[1] > stride or pad[2] > stride:\n # Not implemented\n continue\n out = pool_3d(x, (ws, ws, ws),\n st=(stride, stride, stride),\n ignore_border=True,\n padding=pad, mode=mode)\n\n # GPU implementation\n f_gpu = theano.function([x], out, mode=mode_with_gpu)\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPool)\n for node in f_gpu.maker.fgraph.apply_nodes])\n\n # CPU implementation\n f_cpu = theano.function([x], out, mode=mode_without_gpu_ref)\n assert not any([isinstance(node.op, cuda.dnn.GpuDnnPool)\n for node in f_cpu.maker.fgraph.apply_nodes])\n assert any([isinstance(node.op, Pool)\n for node in f_cpu.maker.fgraph.apply_nodes])\n\n for shp in [(1, 5, 50, 20, 50),\n (1, 3, 99, 99, 29),\n (2, 1, 147, 97, 37),\n ]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\")\n a = f_cpu(data).__array__()\n b = f_gpu(data).__array__()\n utt.assert_allclose(a, b,\n atol=numpy.finfo(numpy.float32).eps)\n\n # Test the grad\n for shp in [(1, 1, 2, 2, 2),\n (1, 1, 3, 3, 3),\n (1, 1, 3, 3, 4),\n (1, 1, 3, 4, 3),\n (1, 1, 4, 3, 3),\n (1, 1, 4, 4, 4),\n (1, 1, 5, 5, 5)]:\n data = numpy.random.normal(0, 1, shp).astype(\"float32\") * 10\n\n ws = 2\n stride = 2\n if pad[0] > stride or pad[1] > stride or pad[2] > stride:\n # Not implemented\n continue\n\n # Test the GPU grad + GPU implementation\n def fn(x):\n dnn_op = cuda.dnn.dnn_pool(\n x, ws=(ws, ws, ws),\n stride=(stride, stride, stride),\n pad=pad,\n mode=mode)\n return dnn_op\n utt.verify_grad(fn, [data], mode=mode_with_gpu)\n # Confirm that we get the good op.\n fg = theano.function([x], theano.grad(fn(x).sum(), x),\n mode=mode_with_gpu)\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n\n\ndef test_pooling_opt():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n\n # 2D pooling\n x = T.fmatrix()\n\n f = theano.function(\n [x],\n pool_2d(x, ds=(2, 2), mode='average_inc_pad', ignore_border=True),\n mode=mode_with_gpu)\n\n assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10), dtype='float32'))\n\n # gradient of 2D pooling\n f = theano.function(\n [x],\n T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',\n ignore_border=True).sum(), x),\n mode=mode_with_gpu.including(\"cudnn\"))\n\n assert any([isinstance(n.op, cuda.dnn.GpuDnnPoolGrad)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10), dtype='float32'))\n\n # Test sum pooling\n f = theano.function(\n [x],\n pool_2d(x, ds=(2, 3), mode='sum',\n ignore_border=True),\n mode=mode_with_gpu)\n\n assert any([isinstance(n.op, dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n data = numpy.random.rand(10, 10).astype('float32')\n f(data)\n\n # 3D pooling\n x = T.ftensor3()\n\n f = theano.function(\n [x],\n pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad', ignore_border=True),\n mode=mode_with_gpu)\n\n assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10, 10), dtype='float32'))\n\n # gradient of 3D pooling\n f = theano.function(\n [x],\n T.grad(pool_3d(x, ds=(2, 2, 2), mode='average_inc_pad',\n ignore_border=True).sum(), x),\n mode=mode_with_gpu.including(\"cudnn\"))\n\n assert any([isinstance(n.op, cuda.dnn.GpuDnnPoolGrad)\n for n in f.maker.fgraph.toposort()])\n\n f(numpy.zeros((10, 10, 10), dtype='float32'))\n\n\ndef test_pooling_opt_arbitrary_dimensions():\n # test if input with an arbitrary number of non-pooling dimensions\n # is correctly reshaped to run on the GPU\n\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n\n # 'average_exc_pad' is disabled for versions < 4004\n if cuda.dnn.version() < (4004, 4004):\n modes = ('max', 'average_inc_pad')\n else:\n modes = ('max', 'average_inc_pad', 'average_exc_pad')\n\n for n_non_pool_dims in (0, 1, 2, 3):\n for ws in ((2, 2), (3, 3, 3)):\n # create input shape: non-pooling dimensions\n # followed by 2 or 3 pooling dimensions\n shp = tuple(range(2, 2 + n_non_pool_dims)) + tuple(range(5, 5 + len(ws)))\n data = numpy.random.normal(0, 1, shp).astype('float32')\n input = shared(data)\n\n for mode in modes:\n out_pool = Pool(ndim=len(ws), mode=mode, ignore_border=True)(input, ws)\n out_pool_grad = T.grad(T.sum(out_pool), wrt=input)\n out = [out_pool, out_pool_grad]\n\n # run on GPU\n fg = theano.function([], out, mode=mode_with_gpu)\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPool)\n for node in fg.maker.fgraph.toposort()])\n assert any([isinstance(node.op, cuda.dnn.GpuDnnPoolGrad)\n for node in fg.maker.fgraph.toposort()])\n res_gpu = fg()\n\n # run on CPU\n fc = theano.function([], out, mode=mode_without_gpu)\n assert any([isinstance(node.op, Pool)\n for node in fc.maker.fgraph.toposort()])\n if mode == 'max':\n assert any([isinstance(node.op, MaxPoolGrad)\n for node in fc.maker.fgraph.toposort()])\n else:\n assert any([isinstance(node.op, AveragePoolGrad)\n for node in fc.maker.fgraph.toposort()])\n res_cpu = fg()\n\n # check for similarity\n utt.assert_allclose(res_gpu[0], res_cpu[0])\n utt.assert_allclose(res_gpu[1], res_cpu[1])\n\n\nclass test_DnnSoftMax(test_nnet.test_SoftMax):\n gpu_op = dnn.GpuDnnSoftmax\n gpu_grad_op = dnn.GpuDnnSoftmaxGrad\n mode = mode_with_gpu\n do_0 = False\n topo_idx = -3\n\n def setUp(self):\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n utt.seed_rng()\n\n def test_dnn_softmax_grad(self):\n softmax_op = dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')\n\n x_val = numpy.random.normal(0, 1, (3, 4, 2, 5)).astype('float32')\n x_val2 = numpy.random.normal(0, 1, (3, 4, 1, 1)).astype('float32')\n\n utt.verify_grad(softmax_op, [x_val], mode=mode_with_gpu)\n\n # Gradient is broken for (n, c, 1, 1) in v3 rc1\n if cuda.dnn.version() != (3000, 3000):\n utt.verify_grad(softmax_op, [x_val2], mode=mode_with_gpu)\n\n def test_local_softmax_dnn_grad(self):\n \"\"\"\n Check for optimization error when grad of summed\n softmax is taken over tensor with fixed shape.\n \"\"\"\n x = T.fvector('x')\n xp = x.reshape((5, 5))\n y = T.nnet.softmax(xp.flatten()).sum()\n g = T.grad(y, x)\n f = theano.function(inputs=[x], outputs=g, mode=self.mode)\n assert(any(n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, dnn.GpuDnnSoftmaxGrad)))\n\n def test_cudnn_softmax_grad_opt(self):\n # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is\n # applied when cudnn is required\n y = T.fvector('y')\n f = theano.function(\n [y],\n T.grad(T.nnet.softmax(y).mean(), y),\n mode=mode_with_gpu\n )\n sorted_f = f.maker.fgraph.toposort()\n val = numpy.random.rand(5).astype('float32')\n out_dnn = f(val)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad\n )]) == 1)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad\n )]) == 0)\n\n # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is not\n # applied when cudnn is excluded or not available\n mode_wo_cudnn = mode_with_gpu.excluding(\"cudnn\")\n y = T.fvector('y')\n f = theano.function(\n [y],\n T.grad(T.nnet.softmax(y).mean(), y),\n mode=mode_wo_cudnn\n )\n sorted_f = f.maker.fgraph.toposort()\n out_cpu = f(val)\n utt.assert_allclose(out_dnn, out_cpu)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad\n )]) == 0)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad\n )]) == 1)\n\n # Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not\n # crash with manual graph\n y = T.fvector('y')\n o = theano.tensor.nnet.SoftmaxGrad()(y, y * 2)\n f = theano.function([y], o, mode=mode_with_gpu)\n sorted_f = f.maker.fgraph.toposort()\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad\n )]) == 1)\n assert(len([i\n for i in sorted_f\n if isinstance(\n i.op,\n theano.tensor.nnet.SoftmaxGrad\n )]) == 0)\n\n def test_log_softmax(self):\n # This is a test for an optimization that depends on cuDNN v3 or\n # more recent. Don't test if the cuDNN version is too old.\n if cuda.dnn.version() < (3000, 3000):\n raise SkipTest(\"Log-softmax is only in cudnn v3+\")\n\n x = T.ftensor4()\n softmax_out = dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(x)\n log_out = T.log(T.as_tensor_variable(softmax_out))\n\n f = theano.function([x], log_out, mode=mode_with_gpu)\n\n # Ensure that the optimization has been applied\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, cuda.dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Ensure that the output of the function is valid\n input_shapes = [(3, 4, 5, 6),\n (1025, 2, 3, 4),\n (2, 1025, 3, 4),\n (2, 3, 1025, 4),\n (2, 3, 4, 1025),\n (66000, 2, 3, 4),\n (2, 66000, 3, 4),\n (2, 3, 66000, 4),\n (2, 3, 4, 66000)]\n\n for inp_shape in input_shapes:\n input_val = numpy.random.normal(0, 1, inp_shape).astype(\"float32\")\n\n out = f(input_val)\n expected_out = numpy.log(\n numpy.exp(input_val) /\n numpy.exp(input_val).sum(1)[:, None, :, :])\n\n utt.assert_allclose(out, expected_out)\n\n def test_log_softmax2(self):\n # Test that the op LogSoftmax is correctly replaced by the op\n # DnnSoftmax with the 'log' mode.\n\n # Compile a reference function, on the CPU, to be used to validate the\n # results of the other function.\n x = T.fmatrix()\n f_ref = theano.function([x], T.nnet.LogSoftmax()(x))\n\n # Build the first graph and ensure that the optimization is applied\n log_softmax_out = T.nnet.LogSoftmax()(x)\n f = theano.function([x], log_softmax_out, mode=mode_with_gpu)\n\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, cuda.dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Compare the output of the function with the reference function\n inp = numpy.random.normal(0, 1, (5, 6)).astype(\"float32\")\n utt.assert_allclose(f(inp), f_ref(inp))\n\n # Build the first graph and ensure that the optimization is applied\n log_softmax_out = T.log(T.nnet.Softmax()(x))\n f = theano.function([x], log_softmax_out, mode=mode_with_gpu)\n\n dnn_softmax_nodes = [n for n in f.maker.fgraph.toposort() if\n isinstance(n.op, cuda.dnn.GpuDnnSoftmax)]\n assert len(dnn_softmax_nodes) == 1\n assert dnn_softmax_nodes[0].op.algo == \"log\"\n\n # Compare the output of the function with the reference function\n inp = numpy.random.normal(0, 1, (5, 6)).astype(\"float32\")\n utt.assert_allclose(f(inp), f_ref(inp))\n\n\ndef test_batchnorm_train():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n if cuda.dnn.version() < (5000, 5000):\n raise SkipTest(\"batch normalization requires cudnn v5+\")\n utt.seed_rng()\n\n for mode in ('per-activation', 'spatial'):\n for vartype in (T.ftensor5, T.ftensor4, T.ftensor3, T.fmatrix, T.fvector):\n x, scale, bias = (vartype(n) for n in ('x', 'scale', 'bias'))\n ndim = x.ndim\n eps = 5e-3 # some non-standard value to test if it's used\n\n # forward pass\n out, x_mean, x_invstd = cuda.dnn.dnn_batch_normalization_train(\n x, scale, bias, mode, eps)\n # reference forward pass\n if mode == 'per-activation':\n axes = (0,)\n elif mode == 'spatial':\n axes = (0,) + tuple(range(2, ndim))\n x_mean2 = x.mean(axis=axes, keepdims=True)\n x_invstd2 = T.inv(T.sqrt(x.var(axis=axes, keepdims=True) + eps))\n scale2 = T.addbroadcast(scale, *axes)\n bias2 = T.addbroadcast(bias, *axes)\n out2 = (x - x_mean2) * (scale2 * x_invstd2) + bias2\n # backward pass\n dy = vartype('dy')\n grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})\n # reference backward pass\n grads2 = T.grad(None, wrt=[x, scale, bias], known_grads={out2: dy})\n # compile\n f = theano.function([x, scale, bias, dy],\n [out, x_mean, x_invstd, out2, x_mean2, x_invstd2] +\n grads + grads2, mode=mode_with_gpu)\n # run\n for data_shape in ((5, 10, 30, 40, 10), (4, 3, 1, 1, 1), (1, 1, 5, 5, 5)):\n data_shape = data_shape[:ndim]\n param_shape = tuple(1 if d in axes else s\n for d, s in enumerate(data_shape))\n X = 4 + 3 * numpy.random.randn(*data_shape).astype('float32')\n Dy = -1 + 2 * numpy.random.randn(*data_shape).astype('float32')\n Scale = numpy.random.randn(*param_shape).astype('float32')\n Bias = numpy.random.randn(*param_shape).astype('float32')\n outputs = f(X, Scale, Bias, Dy)\n # compare outputs\n utt.assert_allclose(outputs[0], outputs[0 + 3]) # out\n utt.assert_allclose(outputs[1], outputs[1 + 3]) # mean\n utt.assert_allclose(outputs[2], outputs[2 + 3]) # invstd\n # compare gradients\n utt.assert_allclose(outputs[6], outputs[6 + 3], atol=1e-4) # dx\n utt.assert_allclose(outputs[7], outputs[7 + 3], rtol=2e-4, atol=1e-4) # dscale\n utt.assert_allclose(outputs[8], outputs[8 + 3]) # dbias\n\n\ndef test_batchnorm_inference():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n if cuda.dnn.version() < (5000, 5000):\n raise SkipTest(\"batch normalization requires cudnn v5+\")\n utt.seed_rng()\n\n for mode in ('per-activation', 'spatial'):\n for vartype in (T.ftensor5, T.ftensor4, T.ftensor3, T.fmatrix, T.fvector):\n x, scale, bias, mean, var = (vartype(n) for n in ('x', 'scale',\n 'bias', 'mean',\n 'var'))\n ndim = x.ndim\n eps = 5e-3 # some non-standard value to test if it's used\n\n # forward pass\n out = cuda.dnn.dnn_batch_normalization_test(x, scale, bias, mean,\n var, mode, eps)\n # reference forward pass\n if mode == 'per-activation':\n axes = (0,)\n elif mode == 'spatial':\n axes = (0,) + tuple(range(2, ndim))\n scale2, bias2, mean2, var2 = (T.addbroadcast(t, *axes)\n for t in (scale, bias, mean, var))\n out2 = (x - mean2) * (scale2 / T.sqrt(var2 + eps)) + bias2\n # backward pass\n dy = vartype('dy')\n grads = T.grad(None, wrt=[x, scale, bias, mean, var], known_grads={out: dy})\n # reference backward pass\n grads2 = T.grad(None, wrt=[x, scale, bias, mean, var], known_grads={out2: dy})\n # compile\n f = theano.function([x, scale, bias, mean, var, dy],\n [out, out2] + grads + grads2, mode=mode_with_gpu)\n # run\n for data_shape in ((5, 10, 30, 40, 10), (4, 3, 1, 1, 1), (1, 1, 5, 5, 5)):\n data_shape = data_shape[:ndim]\n param_shape = tuple(1 if d in axes else s\n for d, s in enumerate(data_shape))\n X = 4 + 3 * numpy.random.randn(*data_shape).astype('float32')\n Dy = -1 + 2 * numpy.random.randn(*data_shape).astype('float32')\n Scale = numpy.random.randn(*param_shape).astype('float32')\n Bias = numpy.random.randn(*param_shape).astype('float32')\n Mean = numpy.random.randn(*param_shape).astype('float32')\n Var = numpy.random.rand(*param_shape).astype('float32')\n outputs = f(X, Scale, Bias, Mean, Var, Dy)\n # compare outputs\n utt.assert_allclose(outputs[0], outputs[1]) # out\n # compare gradients\n utt.assert_allclose(outputs[2], outputs[2 + 5], atol=4e-5) # dx\n utt.assert_allclose(outputs[3], outputs[3 + 5], atol=4e-5) # dscale\n utt.assert_allclose(outputs[4], outputs[4 + 5]) # dbias\n utt.assert_allclose(outputs[5], outputs[5 + 5]) # dmean\n utt.assert_allclose(outputs[6], outputs[6 + 5], rtol=2e-3, atol=4e-5) # dvar\n\n\ndef test_dnn_tag():\n \"\"\"\n Test that if cudnn isn't avail we crash and that if it is avail, we use it.\n \"\"\"\n x = T.ftensor4()\n old = theano.config.on_opt_error\n theano.config.on_opt_error = \"raise\"\n\n sio = StringIO()\n handler = logging.StreamHandler(sio)\n logging.getLogger('theano.compile.tests.test_dnn').addHandler(handler)\n # Silence original handler when intentionnally generating warning messages\n logging.getLogger('theano').removeHandler(theano.logging_default_handler)\n raised = False\n try:\n f = theano.function(\n [x],\n pool_2d(x, ds=(2, 2), ignore_border=True),\n mode=mode_with_gpu.including(\"cudnn\"))\n except (AssertionError, RuntimeError):\n assert not cuda.dnn.dnn_available()\n raised = True\n finally:\n theano.config.on_opt_error = old\n logging.getLogger(\n 'theano.compile.tests.test_dnn').removeHandler(handler)\n logging.getLogger('theano').addHandler(theano.logging_default_handler)\n\n if not raised:\n assert cuda.dnn.dnn_available()\n assert any([isinstance(n.op, cuda.dnn.GpuDnnPool)\n for n in f.maker.fgraph.toposort()])\n\n\nclass TestDnnInferShapes(utt.InferShapeTester):\n\n def setUp(self):\n super(TestDnnInferShapes, self).setUp()\n self.mode = mode_with_gpu\n\n def test_softmax(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n t = T.ftensor4('t')\n rand_tensor = numpy.asarray(\n numpy.random.rand(5, 4, 3, 2),\n dtype='float32'\n )\n self._compile_and_check(\n [t],\n [dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(t)],\n [rand_tensor],\n dnn.GpuDnnSoftmax\n )\n\n self._compile_and_check(\n [t],\n [\n T.grad(\n dnn.GpuDnnSoftmax(\n 'bc01',\n 'accurate',\n 'channel'\n )(t).mean(),\n t\n )\n ],\n [rand_tensor],\n dnn.GpuDnnSoftmaxGrad\n )\n\n def test_conv(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n kerns = T.ftensor4('kerns')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(10, 2, 6, 4),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(8, 2, 4, 3),\n dtype='float32'\n )\n\n for params in product(\n ['valid', 'full', 'half'],\n [(1, 1), (2, 2)],\n ['conv', 'cross']\n ):\n out_vals = numpy.zeros(\n dnn.GpuDnnConv.get_out_shape(img_val.shape, kern_vals.shape,\n border_mode=params[0],\n subsample=params[1]),\n dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(img.shape, kerns.shape)\n conv = dnn.GpuDnnConv()(img, kerns, out, desc)\n self._compile_and_check(\n [img, kerns, out],\n [conv],\n [img_val, kern_vals, out_vals],\n dnn.GpuDnnConv\n )\n\n def test_conv3d(self):\n if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):\n raise SkipTest('\"cuDNN 3D convolution requires cuDNN v2')\n img = T.ftensor5('img')\n kerns = T.ftensor5('kerns')\n out = T.ftensor5('out')\n img_val = numpy.asarray(\n numpy.random.rand(10, 2, 6, 4, 11),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(8, 2, 4, 3, 1),\n dtype='float32'\n )\n\n for params in product(\n ['valid', 'full', 'half'],\n [(1, 1, 1), (2, 2, 2)],\n ['conv', 'cross']\n ):\n out_vals = numpy.zeros(\n dnn.GpuDnnConv3d.get_out_shape(img_val.shape, kern_vals.shape,\n border_mode=params[0],\n subsample=params[1]),\n dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(img.shape, kerns.shape)\n conv = dnn.GpuDnnConv3d()(img, kerns, out, desc)\n self._compile_and_check(\n [img, kerns, out],\n [conv],\n [img_val, kern_vals, out_vals],\n dnn.GpuDnnConv3d\n )\n\n def test_conv_gradw(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n kerns = T.ftensor4('kerns')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(2, 5, 6, 8),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(2, 1, 5, 6),\n dtype='float32'\n )\n\n for params in product(\n ['valid', 'full', 'half'],\n [(1, 1)], # strides besides (1, 1)\n ['conv', 'cross']\n ):\n temp_img = img.dimshuffle(1, 0, 2, 3)\n temp_kerns = kerns\n if params[2] == 'conv':\n temp_kerns = temp_kerns[:, :, ::-1, ::-1]\n temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)\n shape = (\n kern_vals.shape[1], img_val.shape[1],\n img_val.shape[2] - kern_vals.shape[2] + 1,\n img_val.shape[3] - kern_vals.shape[3] + 1\n )\n out_vals = numpy.zeros(shape, dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(temp_img.shape, out.shape)\n conv_grad_w = dnn.GpuDnnConvGradW()(\n temp_img,\n temp_kerns,\n out,\n desc,\n )\n self._compile_and_check(\n [temp_img, temp_kerns, out],\n [conv_grad_w],\n [img_val, kern_vals, out_vals],\n dnn.GpuDnnConvGradW\n )\n\n def test_conv3d_gradw(self):\n if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):\n raise SkipTest('\"cuDNN 3D convolution requires cuDNN v2')\n img = T.ftensor5('img')\n kerns = T.ftensor5('kerns')\n out = T.ftensor5('out')\n img_val = numpy.asarray(\n numpy.random.rand(9, 2, 4, 8, 13),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(11, 2, 3, 1, 4),\n dtype='float32'\n )\n\n for params in product(\n ['valid', 'full', 'half'],\n [(1, 1, 1), (2, 2, 2)],\n ['conv', 'cross']\n ):\n out_vals = numpy.zeros(\n dnn.GpuDnnConv3d.get_out_shape(img_val.shape, kern_vals.shape,\n border_mode=params[0],\n subsample=params[1]),\n dtype='float32')\n\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(img.shape, out.shape)\n conv_grad_w = dnn.GpuDnnConv3dGradW()(\n img,\n out,\n kerns,\n desc,\n )\n self._compile_and_check(\n [img, out, kerns],\n [conv_grad_w],\n [img_val, out_vals, kern_vals],\n dnn.GpuDnnConv3dGradW\n )\n\n def test_conv_gradi(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n kerns = T.ftensor4('kerns')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(3, 4, 5, 6),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(4, 14, 15, 16),\n dtype='float32'\n )\n\n for params in product(\n ['valid'], # Should this work for 'full'?\n [(1, 1)],\n ['conv', 'cross']\n ):\n temp_kerns = kerns.dimshuffle(1, 0, 2, 3)\n shape = (\n img_val.shape[0], kern_vals.shape[1],\n img_val.shape[2] + kern_vals.shape[2] - 1,\n img_val.shape[3] + kern_vals.shape[3] - 1\n )\n out_vals = numpy.zeros(shape, dtype='float32')\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(out.shape, temp_kerns.shape)\n conv_grad_i = dnn.GpuDnnConvGradI()(\n temp_kerns,\n img,\n out,\n desc,\n )\n self._compile_and_check(\n [temp_kerns, img, out],\n [conv_grad_i],\n [kern_vals, img_val, out_vals],\n dnn.GpuDnnConvGradI\n )\n\n def test_conv3d_gradi(self):\n if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):\n raise SkipTest('\"cuDNN 3D convolution requires cuDNN v2')\n img = T.ftensor5('img')\n kerns = T.ftensor5('kerns')\n out = T.ftensor5('out')\n img_val = numpy.asarray(\n numpy.random.rand(8, 4, 6, 7, 11),\n dtype='float32'\n )\n kern_vals = numpy.asarray(\n numpy.random.rand(9, 4, 5, 1, 2),\n dtype='float32'\n )\n\n for params in product(\n ['valid', 'full', 'half'],\n [(1, 1, 1), (2, 2, 2)],\n ['conv', 'cross']\n ):\n out_vals = numpy.zeros(\n dnn.GpuDnnConv3d.get_out_shape(img_val.shape, kern_vals.shape,\n border_mode=params[0],\n subsample=params[1]),\n dtype='float32')\n\n desc = dnn.GpuDnnConvDesc(\n border_mode=params[0],\n subsample=params[1],\n conv_mode=params[2]\n )(img.shape, kerns.shape)\n conv_grad_i = dnn.GpuDnnConv3dGradI()(\n kerns,\n out,\n img,\n desc,\n )\n self._compile_and_check(\n [kerns, out, img],\n [conv_grad_i],\n [kern_vals, out_vals, img_val],\n dnn.GpuDnnConv3dGradI\n )\n\n def test_pool(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n\n # 'average_exc_pad' is disabled for versions < 4004\n if cuda.dnn.version() < (4004, 4004):\n modes = ['max', 'average_inc_pad']\n else:\n modes = ['max', 'average_inc_pad', 'average_exc_pad']\n\n for params in product(\n [(1, 1), (2, 2), (3, 3)],\n [(1, 1), (2, 2), (3, 3)],\n modes\n ):\n self._compile_and_check(\n [img],\n [dnn.GpuDnnPool(mode=params[2])\n (img, params[0], params[1], (0, 0))],\n [img_val],\n dnn.GpuDnnPool\n )\n\n def test_pool_3d(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor5('img')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5, 6),\n dtype='float32'\n )\n\n # 'average_exc_pad' is disabled for versions < 4004\n if cuda.dnn.version() < (4004, 4004):\n modes = ['max', 'average_inc_pad']\n else:\n modes = ['max', 'average_inc_pad', 'average_exc_pad']\n\n for params in product(\n [(1, 1, 1), (2, 2, 2), (3, 3, 3)],\n [(1, 1, 1), (2, 2, 2), (3, 3, 3)],\n modes\n ):\n self._compile_and_check(\n [img],\n [dnn.GpuDnnPool(mode=params[2])(img, params[0], params[1], (0, 0, 0))],\n [img_val],\n dnn.GpuDnnPool\n )\n\n def test_pool_grad(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor4('img')\n img_grad = T.ftensor4('img_grad')\n out = T.ftensor4('out')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n img_grad_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n out_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5),\n dtype='float32'\n )\n\n for params in product(\n [(1, 1), (2, 2), (3, 3)],\n [(1, 1), (2, 2), (3, 3)],\n ['max', 'average_inc_pad']\n ):\n pool_grad = dnn.GpuDnnPoolGrad()(\n img,\n out,\n img_grad,\n params[0],\n params[1],\n (0, 0)\n )\n self._compile_and_check(\n [img, img_grad, out],\n [pool_grad],\n [img_val, img_grad_val, out_val],\n dnn.GpuDnnPoolGrad\n )\n\n def test_pool_3d_grad(self):\n if not dnn.dnn_available():\n raise SkipTest(dnn.dnn_available.msg)\n img = T.ftensor5('img')\n img_grad = T.ftensor5('img_grad')\n out = T.ftensor5('out')\n img_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5, 6),\n dtype='float32'\n )\n img_grad_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5, 6),\n dtype='float32'\n )\n out_val = numpy.asarray(\n numpy.random.rand(2, 3, 4, 5, 6),\n dtype='float32'\n )\n\n for params in product(\n [(1, 1, 1), (2, 2, 2), (3, 3, 3)],\n [(1, 1, 1), (2, 2, 2), (3, 3, 3)],\n ['max', 'average_inc_pad']\n ):\n pool_grad = dnn.GpuDnnPoolGrad(mode=params[2])(\n img,\n out,\n img_grad,\n params[0],\n params[1],\n (0, 0, 0)\n )\n self._compile_and_check(\n [img, img_grad, out],\n [pool_grad],\n [img_val, img_grad_val, out_val],\n dnn.GpuDnnPoolGrad\n )\n\n\n# this has been a problem in the past\ndef test_dnn_conv_border_mode():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n\n dnn.dnn_conv(img, kern, border_mode=1)\n dnn.dnn_conv(img, kern, border_mode=(2, 3))\n dnn.dnn_conv(img, kern, border_mode='full')\n dnn.dnn_conv(img, kern, border_mode='valid')\n dnn.dnn_conv(img, kern, border_mode='half')\n\n\ndef test_dnn_conv_alpha_output_merge():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n out = T.ftensor4()\n\n b = 1\n c = 4\n f = 3\n ih = 5\n iw = 8\n kh = 2\n kw = 6\n img_val = numpy.random.random((b, c, ih, iw)).astype('float32')\n kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')\n out_val = numpy.random.random((b, f, ih - kh + 1,\n iw - kw + 1)).astype('float32')\n\n conv = dnn.dnn_conv(img, kern)\n gw = theano.grad(conv.sum(), kern)\n gi = theano.grad(conv.sum(), img)\n\n lr = numpy.asarray(0.05, dtype='float32')\n\n if cuda.dnn.version() == -1:\n # Can't merge alpha with cudnn v1\n fr = conv + out\n wr = kern + gw\n ir = img + gi\n else:\n fr = lr * (conv + out)\n wr = kern + lr * gw\n ir = img + lr * gi\n\n f1 = theano.function([img, kern, out], [fr, wr, ir], mode=mode_with_gpu)\n assert isinstance(f1.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv)\n assert isinstance(f1.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradW)\n assert isinstance(f1.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradI)\n\n mode = mode_with_gpu\n mode = mode.excluding('local_dnn_conv_alpha_merge')\n mode = mode.excluding('local_dnn_convw_alpha_merge')\n mode = mode.excluding('local_dnn_convi_alpha_merge')\n mode = mode.excluding('local_dnn_conv_output_merge')\n mode = mode.excluding('local_dnn_convw_output_merge')\n mode = mode.excluding('local_dnn_convi_output_merge')\n\n f2 = theano.function([img, kern, out], [fr, wr, ir], mode=mode)\n\n assert not isinstance(f2.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv)\n assert not isinstance(f2.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradW)\n assert not isinstance(f2.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradI)\n\n out_f1 = f1(img_val, kern_val, out_val)\n out_f2 = f2(img_val, kern_val, out_val)\n\n assert len(out_f1) == len(out_f2)\n\n for v1, v2 in zip(out_f1, out_f2):\n utt.assert_allclose(v1, v2)\n\n\ndef test_dnn_conv3d_alpha_output_merge():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n t = T.TensorType(broadcastable=(False, False, False, False, False),\n dtype='float32')\n\n img = t()\n kern = t()\n out = t()\n\n b = 1\n c = 4\n f = 3\n it = 10\n ih = 5\n iw = 8\n kt = 3\n kh = 2\n kw = 6\n img_val = numpy.random.random((b, c, it, ih, iw)).astype('float32')\n kern_val = numpy.random.random((f, c, kt, kh, kw)).astype('float32')\n out_val = numpy.random.random((b, f, it - kt + 1, ih - kh + 1,\n iw - kw + 1)).astype('float32')\n\n conv = dnn.dnn_conv3d(img, kern)\n gw = theano.grad(conv.sum(), kern)\n gi = theano.grad(conv.sum(), img)\n\n lr = numpy.asarray(0.05, dtype='float32')\n\n if cuda.dnn.version() == -1:\n # Can't merge alpha with cudnn v1\n fr = conv + out\n wr = kern + gw\n ir = img + gi\n else:\n fr = lr * (conv + out)\n wr = kern + lr * gw\n ir = img + lr * gi\n\n f1 = theano.function([img, kern, out], [fr, wr, ir], mode=mode_with_gpu)\n assert isinstance(f1.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv)\n assert isinstance(f1.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradW)\n assert isinstance(f1.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConvGradI)\n\n mode = mode_with_gpu\n mode = mode.excluding('local_dnn_conv_alpha_merge')\n mode = mode.excluding('local_dnn_convw_alpha_merge')\n mode = mode.excluding('local_dnn_convi_alpha_merge')\n mode = mode.excluding('local_dnn_conv_output_merge')\n mode = mode.excluding('local_dnn_convw_output_merge')\n mode = mode.excluding('local_dnn_convi_output_merge')\n\n f2 = theano.function([img, kern, out], [fr, wr, ir], mode=mode)\n\n assert not isinstance(f2.maker.fgraph.outputs[0].owner.inputs[0].owner.op,\n dnn.GpuDnnConv3d)\n assert not isinstance(f2.maker.fgraph.outputs[1].owner.inputs[0].owner.op,\n dnn.GpuDnnConv3dGradW)\n assert not isinstance(f2.maker.fgraph.outputs[2].owner.inputs[0].owner.op,\n dnn.GpuDnnConv3dGradI)\n\n out_f1 = f1(img_val, kern_val, out_val)\n out_f2 = f2(img_val, kern_val, out_val)\n\n assert len(out_f1) == len(out_f2)\n\n for v1, v2 in zip(out_f1, out_f2):\n utt.assert_allclose(v1, v2)\n\n\ndef test_dnn_conv_merge_mouts():\n # make sure it doesn't attempt to output/alpha merge a convolution\n # that has multiple clients.\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n out = T.ftensor4()\n\n conv = dnn.dnn_conv(img, kern)\n\n lr = numpy.asarray(0.05, dtype='float32')\n\n if cuda.dnn.version() == -1:\n # Can't merge alpha with cudnn v1\n fr = conv + out\n else:\n fr = lr * (conv + out)\n rr = conv * lr\n\n f = theano.function([img, kern, out], [fr, rr], mode=mode_with_gpu)\n convs = [n for n in f.maker.fgraph.toposort()\n if isinstance(n.op, dnn.GpuDnnConv)]\n assert len(convs) == 1\n\n\ndef test_dnn_conv_merge_broad():\n # Make sure that we don't apply output_merge on broadcasted values.\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n img = T.ftensor4()\n kern = T.ftensor4()\n\n conv = dnn.dnn_conv(img, kern)\n\n lr = numpy.asarray(0.05, dtype='float32')\n\n # this does broadcasting\n fr = conv + lr\n\n f = theano.function([img, kern], [fr])\n convs = [n for n in f.maker.fgraph.toposort()\n if isinstance(n.op, dnn.GpuDnnConv)]\n assert len(convs) == 1\n conv = convs[0]\n # Assert output was not merged\n assert isinstance(conv.inputs[2].owner.op, GpuAllocEmpty)\n\n\ndef test_dnn_conv_grad():\n if not cuda.dnn.dnn_available() or dnn.version() == -1:\n raise SkipTest('alpha != 1.0 not supported in cudnn v1')\n\n b = 1\n c = 4\n f = 3\n ih = 2\n iw = 8\n kh = 2\n kw = 2\n img_val = numpy.random.random((b, c, ih, iw)).astype('float32')\n kern_val = numpy.random.random((f, c, kh, kw)).astype('float32')\n out_val = numpy.random.random((b, f, ih - kw + 1,\n iw - kw + 1)).astype('float32')\n\n def dconv(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(img.shape, kern.shape)\n return dnn.GpuDnnConv()(img, kern, out, desc, alpha=0.5, beta=0.75)\n\n def dconvi(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(img.shape, kern.shape)\n return dnn.GpuDnnConvGradI()(kern, out, img, desc, alpha=-1.0,\n beta=0.0)\n\n def dconvw(img, kern, out):\n desc = dnn.GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),\n conv_mode='conv')(img.shape, kern.shape)\n return dnn.GpuDnnConvGradW()(img, out, kern, desc, alpha=0.75,\n beta=-1.0)\n\n utt.verify_grad(dconv, [img_val, kern_val, out_val], mode=mode_with_gpu)\n utt.verify_grad(dconvi, [img_val, kern_val, out_val], mode=mode_with_gpu)\n utt.verify_grad(dconvw, [img_val, kern_val, out_val], mode=mode_with_gpu)\n\n\ndef get_conv3d_test_cases():\n # Every element of test_shapes follows the format\n # [input_shape, filter_shape, subsample]\n test_shapes = [[(128, 3, 5, 5, 5), (64, 3, 1, 2, 4), (1, 1, 1)],\n [(8, 4, 20, 12, 15), (5, 4, 6, 12, 4), (2, 2, 2)],\n [(8, 1, 20, 12, 15), (5, 1, 6, 12, 4), (3, 3, 3)],\n [(8, 1, 20, 12, 15), (5, 1, 6, 12, 4), (3, 2, 1)],\n [(8, 1, 20, 12, 15), (5, 1, 6, 12, 4), (3, 2, 1)],\n # Test with 1x1x1 filters\n [(8, 1, 10, 10, 10), (10, 1, 1, 1, 1), (1, 1, 1)],\n # Test with dimensions larger than 1024 (thread block dim)\n [(1025, 1, 2, 3, 4), (5, 1, 1, 2, 3), (1, 1, 1)],\n [(8, 1, 2, 3, 4), (1025, 1, 1, 2, 3), (1, 1, 1)],\n [(8, 1025, 2, 3, 4), (5, 1025, 1, 1, 2), (1, 1, 1)],\n [(8, 1, 1030, 3, 4), (5, 1, 1025, 1, 1), (1, 1, 1)],\n [(8, 1, 2, 1030, 4), (5, 1, 2, 1025, 1), (1, 1, 1)],\n [(8, 1, 2, 3, 1030), (5, 1, 1, 2, 1025), (1, 1, 1)],\n # The equivalent of this caused a crash with conv2d\n [(1, 1, 1, 44800, 1), (6, 1, 1, 1, 1), (1, 1, 1)]]\n\n # With border mode 'full', test with kernel bigger than image in some/all\n # dimensions\n test_shapes_full = [[(6, 2, 2, 2, 2), (4, 2, 3, 1, 1), (1, 1, 1)],\n [(6, 2, 2, 2, 2), (4, 2, 1, 3, 1), (1, 1, 1)],\n [(6, 2, 2, 2, 2), (4, 2, 1, 1, 3), (1, 1, 1)],\n [(6, 2, 2, 2, 2), (4, 2, 5, 5, 5), (1, 1, 1)]]\n border_modes = ['valid', 'full', 'half', (1, 2, 3), (3, 2, 1), 1, 2]\n conv_modes = ['conv', 'cross']\n\n if cuda.dnn.dnn_available() and dnn.version() >= (3000, 3000):\n itt = chain(product(test_shapes, border_modes, conv_modes),\n product(test_shapes_full, ['full'], conv_modes))\n else:\n # cuDNN, before V3, did not support kernels larger than the inputs,\n # even if the original inputs were padded so they would be larger than\n # the kernels. If using a version older than V3 don't run the tests\n # with kernels larger than the unpadded inputs.\n itt = product(test_shapes, border_modes, conv_modes)\n\n return itt\n\n\ndef test_conv3d_fwd():\n\n if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):\n raise SkipTest('\"cuDNN 3D convolution requires cuDNN v2')\n\n def run_conv3d_fwd(inputs_shape, filters_shape, subsample,\n border_mode, conv_mode):\n\n inputs_val = numpy.random.random(inputs_shape).astype('float32')\n filters_val = numpy.random.random(filters_shape).astype('float32')\n\n # Scale down the input values to prevent very large absolute errors\n # due to float rounding\n inputs_val /= 10\n filters_val /= 10\n\n inputs = shared(inputs_val)\n filters = shared(filters_val)\n\n # Compile a theano function for the cuDNN implementation\n conv = dnn.dnn_conv3d(img=inputs, kerns=filters,\n border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode)\n f = theano.function([], conv, mode=mode_with_gpu)\n\n # If conv_mode is 'conv' the reference implementation should use\n # filters filpped according to the width, height and time axis\n if conv_mode == 'conv':\n flipped_filters = filters[:, :, ::-1, ::-1, ::-1]\n else:\n flipped_filters = filters\n\n # Compile a theano function for the reference implementation\n conv_ref = theano.tensor.nnet.corr3d.Corr3dMM(border_mode=border_mode,\n subsample=subsample\n )(inputs, flipped_filters)\n f_ref = theano.function([], conv_ref, mode=\"FAST_RUN\")\n\n # Compare the results of the two implementations\n res_ref = f_ref()\n res = f()\n utt.assert_allclose(res_ref, res)\n\n test_cases = get_conv3d_test_cases()\n for (i_shape, f_shape, subsample), border_mode, conv_mode in test_cases:\n yield (run_conv3d_fwd, i_shape, f_shape, subsample, border_mode,\n conv_mode)\n\n\ndef test_conv3d_bwd():\n\n if not (cuda.dnn.dnn_available() and dnn.version() >= (2000, 2000)):\n raise SkipTest('\"cuDNN 3D convolution requires cuDNN v2')\n\n def run_conv3d_bwd(inputs_shape, filters_shape, subsample,\n border_mode, conv_mode):\n\n inputs_val = numpy.random.random(inputs_shape).astype('float32')\n filters_val = numpy.random.random(filters_shape).astype('float32')\n\n inputs = shared(inputs_val)\n filters = shared(filters_val)\n\n # Compile a theano function for the cuDNN implementation\n conv = dnn.dnn_conv3d(img=inputs, kerns=filters,\n border_mode=border_mode, subsample=subsample,\n conv_mode=conv_mode)\n\n grad_i, grad_w = theano.tensor.grad(conv.sum(), [inputs, filters])\n\n f = theano.function([], [grad_i, grad_w], mode=mode_with_gpu)\n\n # If conv_mode is 'conv' the reference implementation should use\n # filters filpped according to the width, height and time axis\n if conv_mode == 'conv':\n flipped_filters = filters[:, :, ::-1, ::-1, ::-1]\n else:\n flipped_filters = filters\n\n # Compile a theano function for the reference implementation\n conv_ref = theano.tensor.nnet.corr3d.Corr3dMM(border_mode=border_mode,\n subsample=subsample\n )(inputs, flipped_filters)\n (grad_i_ref,\n grad_w_ref) = theano.tensor.grad(conv_ref.sum(),\n [inputs, filters])\n\n f_ref = theano.function([], [grad_i_ref, grad_w_ref], mode=\"FAST_RUN\")\n\n # Compare the results of the two implementations\n res_ref = f_ref()\n res = f()\n # Needed for big size for some seed\n # raise rtol to make the test pass with more seed.\n utt.assert_allclose(res_ref[0], res[0], rtol=2e-5)\n utt.assert_allclose(res_ref[1], res[1], rtol=2e-5)\n\n test_cases = get_conv3d_test_cases()\n for (i_shape, f_shape, subsample), border_mode, conv_mode in test_cases:\n yield (run_conv3d_bwd, i_shape, f_shape, subsample, border_mode,\n conv_mode)\n\n\ndef test_version():\n if not cuda.dnn.dnn_available():\n raise SkipTest(cuda.dnn.dnn_available.msg)\n assert isinstance(cuda.dnn.version(), (int, tuple))\n"
] |
[
[
"numpy.asarray",
"numpy.arange",
"scipy.ndimage.convolve",
"numpy.random.rand",
"numpy.zeros"
],
[
"numpy.allclose",
"numpy.ones"
],
[
"numpy.random.random",
"numpy.asarray",
"numpy.finfo",
"numpy.random.normal",
"numpy.random.randn",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TalSchuster/prototypical-networks
|
[
"3fb2a0c4ff8f2a463f01fafa6a2f075afc0688e5"
] |
[
"train_mini_imagenet.py"
] |
[
"import torch.nn.functional as F\nimport torchvision.models as models\nimport torch.utils.data.distributed\nimport torch.utils.data\nimport torch.multiprocessing as mp\nimport torch.optim\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\nimport torch.nn.parallel\nimport torch.nn as nn\nimport torch\nimport warnings\nimport time\nimport random\nimport argparse\nimport os\n\n\nfrom models.convnet_mini import ConvNet\nfrom models.identity import Identity\nfrom utils import AverageMeter, save_checkpoint, compute_accuracy, weights_init_xavier, mkdir, euclidean_dist\nfrom samplers.episodic_batch_sampler import EpisodicBatchSampler\nfrom dataloaders.mini_imagenet_loader import MiniImageNet, ROOT_PATH\nfrom torch.utils.data import DataLoader\n\n\nmodel_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\nmodel_names.append('default_convnet')\n\nparser = argparse.ArgumentParser(description='PyTorch Prototypical Networks Training')\nparser.add_argument('--splits_path', type=str, default=ROOT_PATH, help='path to dir with csv files containing train/dev/test examples')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--model_name', type=str, help='model_filename')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training.')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\nparser.add_argument('-s', '--image_size', default=224, type=int, help='Image Size to load images')\nparser.add_argument('--n_episodes_train', default=200, type=int, help='Number of episodes per epoch at train')\nparser.add_argument('--n_way_train', default=10, type=int, help='Number of classes per episode at train')\nparser.add_argument('--n_query_train', default=1, type=int, help='Number of query samples at train')\nparser.add_argument('--n_support', default=5, type=int, help='Number of support samples')\nparser.add_argument('--n_episodes_val', default=200, type=int, help='Number of episodes per epoch at validation')\nparser.add_argument('--n_way_val', default=10, type=int, help='Number of classes per episode at validation')\nparser.add_argument('--n_query_val', default=1, type=int, help='Number of query samples at validation')\nparser.add_argument('--optimizer', default='sgd', type=str, help='Optimizer to use: \"adam\" or \"sgd\"')\nparser.add_argument('--step_size', default=30, type=int, help='Scheduler step size')\nparser.add_argument('--gamma', default=0.1, type=float, help='Scheduler gamma')\nparser.add_argument('--alpha', default=0.0, type=float, help='Controls the contribution from past prototypes in next'\n 'episodes')\nparser.add_argument('--out_dim', default=None, type=int, help='Output embedding dimension')\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n global results_dir\n results_dir = os.path.join('models_trained', args.model_name)\n mkdir(results_dir)\n\n options = vars(args)\n save_options_dir = os.path.join(results_dir, 'options.txt')\n\n with open(save_options_dir, 'wt') as opt_file:\n opt_file.write('------------ Options -------------\\n')\n for k, v in sorted(options.items()):\n opt_file.write('%s: %s\\n' % (str(k), str(v)))\n opt_file.write('-------------- End ----------------\\n')\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n cudnn.benchmark = True\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # Create model\n if args.arch == 'default_convnet':\n model = ConvNet()\n else:\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch]()\n\n if args.out_dim is not None:\n lin = nn.Linear(model.fc.in_features, args.out_dim)\n weights_init_xavier(lin)\n model.fc = lin\n else:\n model.fc = Identity()\n\n print('Number of parameters: ', sum([p.numel() for p in model.parameters()]))\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int(args.workers / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # Define optimizer\n if args.optimizer == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), args.lr)\n\n else:\n raise ValueError('Optimizer should be \"sgd\" or \"adam\"')\n\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)\n\n # Optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n # Data loading code\n train_dataset = MiniImageNet('train', args.splits_path)\n train_sampler = EpisodicBatchSampler(train_dataset.labels, args.n_episodes_train, args.n_way_train,\n args.n_support + args.n_query_train)\n train_loader = DataLoader(dataset=train_dataset, batch_sampler=train_sampler, num_workers=args.workers,\n pin_memory=True)\n\n val_dataset = MiniImageNet('val', args.splits_path)\n val_sampler = EpisodicBatchSampler(val_dataset.labels, args.n_episodes_val, args.n_way_val,\n args.n_support + args.n_query_val)\n val_loader = DataLoader(dataset=val_dataset, batch_sampler=val_sampler, num_workers=args.workers,\n pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, args)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n lr_scheduler.step()\n if args.distributed:\n train_sampler.set_epoch(epoch)\n\n # Train for one epoch\n loss_t, acc_t = train(train_loader, model, optimizer, epoch, args)\n\n # Evaluate on validation set\n loss_val, acc1 = validate(val_loader, model, args)\n\n dict_metrics = {'loss_training': loss_t, 'loss_validation': loss_val,\n 'acc_training': acc_t, 'acc_validation': acc1}\n\n for key in dict_metrics:\n with open(os.path.join(results_dir, key + '.txt'), \"a+\") as myfile:\n myfile.write(str(dict_metrics[key]) + '\\n')\n\n # Remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n print('Saving model...')\n if args.gpu is None:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.module.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, results_dir)\n else:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, results_dir)\n\n\ndef train(train_loader, model, optimizer, epoch, args):\n print(\"Training epoch %d\" % epoch)\n episode_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n accuracy = AverageMeter()\n\n # Switch to train mode\n model.train()\n\n end = time.time()\n\n optimizer.zero_grad()\n\n # Iterate over episodes\n for n_episode, batch in enumerate(train_loader, 1):\n data_time.update(time.time() - end)\n data, _ = [_.cuda(non_blocking=True) for _ in batch]\n p = args.n_support * args.n_way_train\n data_support, data_query = data[:p], data[p:]\n\n # Compute class prototypes (n_way, output_dim)\n if n_episode > 1 and args.alpha > 0.0:\n class_prototypes = args.alpha * class_prototypes + (1 - args.alpha) * \\\n model(data_support).reshape(args.n_support, args.n_way_train, -1).mean(dim=0)\n else:\n class_prototypes = model(data_support).reshape(args.n_support, args.n_way_train, -1).mean(dim=0)\n\n # Generate labels (n_way, n_query)\n labels = torch.arange(args.n_way_train).repeat(args.n_query_train)\n labels = labels.type(torch.cuda.LongTensor)\n\n # Compute loss and metrics\n logits = euclidean_dist(model(data_query), class_prototypes)\n loss = F.cross_entropy(logits, labels)\n acc = compute_accuracy(logits, labels)\n\n # Record loss and accuracy\n losses.update(loss.item(), data_query.size(0))\n accuracy.update(acc, data_query.size(0))\n\n # Compute gradient and do SGD step\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n # Free the graph\n if args.alpha > 0.0:\n class_prototypes = class_prototypes.detach()\n else:\n class_prototypes = None\n\n # Measure elapsed time\n episode_time.update(time.time() - end)\n end = time.time()\n\n if n_episode % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Episode Time {episode_time.val:.3f} ({episode_time.avg:.3f})\\t'\n 'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Accuracy {accuracy.val:.3f} ({accuracy.avg:.3f})\\t'\n .format(\n epoch, n_episode, args.n_episodes_train, episode_time=episode_time,\n data_time=data_time, loss=losses, accuracy=accuracy))\n\n return losses.avg, accuracy.avg\n\n\ndef validate(val_loader, model, args):\n print('Validating...')\n losses = AverageMeter()\n accuracy = AverageMeter()\n\n # Switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n for n_episode, batch in enumerate(val_loader, 1):\n data, _ = [_.cuda(non_blocking=True) for _ in batch]\n p = args.n_support * args.n_way_val\n data_support, data_query = data[:p], data[p:]\n\n # Compute class prototypes (n_way, output_dim)\n class_prototypes = model(data_support).reshape(args.n_support, args.n_way_val, -1).mean(dim=0)\n\n # Generate labels (n_way, n_query)\n labels = torch.arange(args.n_way_val).repeat(args.n_query_val)\n labels = labels.type(torch.cuda.LongTensor)\n\n # Compute loss and metrics\n logits = euclidean_dist(model(data_query), class_prototypes)\n loss = F.cross_entropy(logits, labels)\n acc = compute_accuracy(logits, labels)\n\n # Record loss and accuracy\n losses.update(loss.item(), data_query.size(0))\n accuracy.update(acc, data_query.size(0))\n\n print('Validation Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Validation Accuracy {accuracy.val:.3f} ({accuracy.avg:.3f})\\t'.format(loss=losses, accuracy=accuracy))\n\n return losses.avg, accuracy.avg\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.cuda.set_device",
"torch.load",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.arange",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.optim.lr_scheduler.StepLR"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
francisyyan/Detectron
|
[
"e38bf9692792f70b6cfbe6683b61f6e96cb63af0"
] |
[
"detectron/utils/vis.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"Detection output visualization module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport numpy as np\nimport os\n\nimport pycocotools.mask as mask_util\n\nfrom detectron.utils.colormap import colormap\nimport detectron.utils.env as envu\nimport detectron.utils.keypoints as keypoint_utils\n\n# Matplotlib requires certain adjustments in some environments\n# Must happen before importing matplotlib\nenvu.set_up_matplotlib()\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\n\nplt.rcParams['pdf.fonttype'] = 42 # For editing in Adobe Illustrator\n\n\n_GRAY = (218, 227, 218)\n_GREEN = (18, 127, 15)\n_WHITE = (255, 255, 255)\n\n\ndef kp_connections(keypoints):\n kp_lines = [\n [keypoints.index('left_eye'), keypoints.index('right_eye')],\n [keypoints.index('left_eye'), keypoints.index('nose')],\n [keypoints.index('right_eye'), keypoints.index('nose')],\n [keypoints.index('right_eye'), keypoints.index('right_ear')],\n [keypoints.index('left_eye'), keypoints.index('left_ear')],\n [keypoints.index('right_shoulder'), keypoints.index('right_elbow')],\n [keypoints.index('right_elbow'), keypoints.index('right_wrist')],\n [keypoints.index('left_shoulder'), keypoints.index('left_elbow')],\n [keypoints.index('left_elbow'), keypoints.index('left_wrist')],\n [keypoints.index('right_hip'), keypoints.index('right_knee')],\n [keypoints.index('right_knee'), keypoints.index('right_ankle')],\n [keypoints.index('left_hip'), keypoints.index('left_knee')],\n [keypoints.index('left_knee'), keypoints.index('left_ankle')],\n [keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],\n [keypoints.index('right_hip'), keypoints.index('left_hip')],\n ]\n return kp_lines\n\n\ndef convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n \"\"\"Convert from the class boxes/segms/keyps format generated by the testing\n code.\n \"\"\"\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:\n segms = [s for slist in cls_segms for s in slist]\n else:\n segms = None\n if cls_keyps is not None:\n keyps = [k for klist in cls_keyps for k in klist]\n else:\n keyps = None\n classes = []\n for j in range(len(cls_boxes)):\n classes += [j] * len(cls_boxes[j])\n return boxes, segms, keyps, classes\n\n\ndef get_class_string(class_index, score, dataset):\n class_text = dataset.classes[class_index] if dataset is not None else \\\n 'id{:d}'.format(class_index)\n return class_text + ' {:0.2f}'.format(score).lstrip('0')\n\n\ndef vis_mask(img, mask, col, alpha=0.4, show_border=True, border_thick=1):\n \"\"\"Visualizes a single binary mask.\"\"\"\n\n img = img.astype(np.float32)\n idx = np.nonzero(mask)\n\n img[idx[0], idx[1], :] *= 1.0 - alpha\n img[idx[0], idx[1], :] += alpha * col\n\n if show_border:\n contours = cv2.findContours(\n mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)[-2]\n cv2.drawContours(img, contours, -1, _WHITE, border_thick, cv2.LINE_AA)\n\n return img.astype(np.uint8)\n\n\ndef vis_class(img, pos, class_str, font_scale=0.35):\n \"\"\"Visualizes the class.\"\"\"\n img = img.astype(np.uint8)\n x0, y0 = int(pos[0]), int(pos[1])\n # Compute text size.\n txt = class_str\n font = cv2.FONT_HERSHEY_SIMPLEX\n ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)\n # Place text background.\n back_tl = x0, y0 - int(1.3 * txt_h)\n back_br = x0 + txt_w, y0\n cv2.rectangle(img, back_tl, back_br, _GREEN, -1)\n # Show text.\n txt_tl = x0, y0 - int(0.3 * txt_h)\n cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)\n return img\n\n\ndef vis_bbox(img, bbox, thick=1):\n \"\"\"Visualizes a bounding box.\"\"\"\n img = img.astype(np.uint8)\n (x0, y0, w, h) = bbox\n x1, y1 = int(x0 + w), int(y0 + h)\n x0, y0 = int(x0), int(y0)\n cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)\n return img\n\n\ndef vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n \"\"\"Visualizes keypoints (adapted from vis_one_image).\n kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n \"\"\"\n dataset_keypoints, _ = keypoint_utils.get_keypoints()\n kp_lines = kp_connections(dataset_keypoints)\n\n # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n\n # Perform the drawing on a copy of the image, to allow for blending.\n kp_mask = np.copy(img)\n\n # Draw mid shoulder / mid hip first for better visualization.\n mid_shoulder = (\n kps[:2, dataset_keypoints.index('right_shoulder')] +\n kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n sc_mid_shoulder = np.minimum(\n kps[2, dataset_keypoints.index('right_shoulder')],\n kps[2, dataset_keypoints.index('left_shoulder')])\n mid_hip = (\n kps[:2, dataset_keypoints.index('right_hip')] +\n kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n sc_mid_hip = np.minimum(\n kps[2, dataset_keypoints.index('right_hip')],\n kps[2, dataset_keypoints.index('left_hip')])\n nose_idx = dataset_keypoints.index('nose')\n if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n\n # Draw the keypoints.\n for l in range(len(kp_lines)):\n i1 = kp_lines[l][0]\n i2 = kp_lines[l][1]\n p1 = kps[0, i1], kps[1, i1]\n p2 = kps[0, i2], kps[1, i2]\n if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n cv2.line(\n kp_mask, p1, p2,\n color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n if kps[2, i1] > kp_thresh:\n cv2.circle(\n kp_mask, p1,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n if kps[2, i2] > kp_thresh:\n cv2.circle(\n kp_mask, p2,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n\n # Blend the keypoints.\n return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)\n\n\ndef vis_one_image_opencv(\n im, boxes, segms=None, keypoints=None, thresh=0.9, kp_thresh=2,\n show_box=False, dataset=None, show_class=False):\n \"\"\"Constructs a numpy array with the detections visualized.\"\"\"\n\n if isinstance(boxes, list):\n boxes, segms, keypoints, classes = convert_from_cls_format(\n boxes, segms, keypoints)\n\n if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:\n return im\n\n if segms is not None and len(segms) > 0:\n masks = mask_util.decode(segms)\n color_list = colormap()\n mask_color_id = 0\n\n # Display in largest to smallest order to reduce occlusion\n areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n sorted_inds = np.argsort(-areas)\n\n for i in sorted_inds:\n bbox = boxes[i, :4]\n score = boxes[i, -1]\n if score < thresh:\n continue\n\n # show box (off by default)\n if show_box:\n im = vis_bbox(\n im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))\n\n # show class (off by default)\n if show_class:\n class_str = get_class_string(classes[i], score, dataset)\n im = vis_class(im, (bbox[0], bbox[1] - 2), class_str)\n\n # show mask\n if segms is not None and len(segms) > i:\n color_mask = color_list[mask_color_id % len(color_list), 0:3]\n mask_color_id += 1\n im = vis_mask(im, masks[..., i], color_mask)\n\n # show keypoints\n if keypoints is not None and len(keypoints) > i:\n im = vis_keypoints(im, keypoints[i], kp_thresh)\n\n return im\n\n\ndef vis_one_image(\n im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,\n kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,\n ext='pdf', out_when_no_box=False):\n \"\"\"Visual debugging of detections.\"\"\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if isinstance(boxes, list):\n boxes, segms, keypoints, classes = convert_from_cls_format(\n boxes, segms, keypoints)\n\n if (boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh) and not out_when_no_box:\n return\n\n dataset_keypoints, _ = keypoint_utils.get_keypoints()\n\n if segms is not None and len(segms) > 0:\n masks = mask_util.decode(segms)\n\n color_list = colormap(rgb=True) / 255\n\n kp_lines = kp_connections(dataset_keypoints)\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n\n fig = plt.figure(frameon=False)\n fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.axis('off')\n fig.add_axes(ax)\n ax.imshow(im)\n\n if boxes is None:\n sorted_inds = [] # avoid crash when 'boxes' is None\n else:\n # Display in largest to smallest order to reduce occlusion\n areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n sorted_inds = np.argsort(-areas)\n\n mask_color_id = 0\n for i in sorted_inds:\n bbox = boxes[i, :4]\n score = boxes[i, -1]\n if score < thresh:\n continue\n\n # show box (off by default)\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1],\n fill=False, edgecolor='g',\n linewidth=0.5, alpha=box_alpha))\n\n if show_class:\n ax.text(\n bbox[0], bbox[1] - 2,\n get_class_string(classes[i], score, dataset),\n fontsize=3,\n family='serif',\n bbox=dict(\n facecolor='g', alpha=0.4, pad=0, edgecolor='none'),\n color='white')\n\n # show mask\n if segms is not None and len(segms) > i:\n img = np.ones(im.shape)\n color_mask = color_list[mask_color_id % len(color_list), 0:3]\n mask_color_id += 1\n\n w_ratio = .4\n for c in range(3):\n color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio\n for c in range(3):\n img[:, :, c] = color_mask[c]\n e = masks[:, :, i]\n\n contour = cv2.findContours(\n e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)[-2]\n\n for c in contour:\n polygon = Polygon(\n c.reshape((-1, 2)),\n fill=True, facecolor=color_mask,\n edgecolor='w', linewidth=1.2,\n alpha=0.5)\n ax.add_patch(polygon)\n\n # show keypoints\n if keypoints is not None and len(keypoints) > i:\n kps = keypoints[i]\n plt.autoscale(False)\n for l in range(len(kp_lines)):\n i1 = kp_lines[l][0]\n i2 = kp_lines[l][1]\n if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n x = [kps[0, i1], kps[0, i2]]\n y = [kps[1, i1], kps[1, i2]]\n line = plt.plot(x, y)\n plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)\n if kps[2, i1] > kp_thresh:\n plt.plot(\n kps[0, i1], kps[1, i1], '.', color=colors[l],\n markersize=3.0, alpha=0.7)\n\n if kps[2, i2] > kp_thresh:\n plt.plot(\n kps[0, i2], kps[1, i2], '.', color=colors[l],\n markersize=3.0, alpha=0.7)\n\n # add mid shoulder / mid hip for better visualization\n mid_shoulder = (\n kps[:2, dataset_keypoints.index('right_shoulder')] +\n kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n sc_mid_shoulder = np.minimum(\n kps[2, dataset_keypoints.index('right_shoulder')],\n kps[2, dataset_keypoints.index('left_shoulder')])\n mid_hip = (\n kps[:2, dataset_keypoints.index('right_hip')] +\n kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n sc_mid_hip = np.minimum(\n kps[2, dataset_keypoints.index('right_hip')],\n kps[2, dataset_keypoints.index('left_hip')])\n if (sc_mid_shoulder > kp_thresh and\n kps[2, dataset_keypoints.index('nose')] > kp_thresh):\n x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]\n y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]\n line = plt.plot(x, y)\n plt.setp(\n line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)\n if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n x = [mid_shoulder[0], mid_hip[0]]\n y = [mid_shoulder[1], mid_hip[1]]\n line = plt.plot(x, y)\n plt.setp(\n line, color=colors[len(kp_lines) + 1], linewidth=1.0,\n alpha=0.7)\n\n output_name = os.path.splitext(os.path.basename(im_name))[0] + '.' + ext\n fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)\n plt.close('all')\n"
] |
[
[
"matplotlib.pyplot.Rectangle",
"numpy.nonzero",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.get_cmap",
"numpy.ones",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.copy",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.close",
"numpy.argsort",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juanmc2005/MetricAMI
|
[
"8cb9fbe8dcf5303f1b44007f03492e065e867caf"
] |
[
"losses/center.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SoftmaxCenterLoss(nn.Module):\n \"\"\"\n Cross Entropy + Center Loss module\n Reference: https://kpzhang93.github.io/papers/eccv2016.pdf\n :param device: a device in which to run the computation\n :param nfeat: the number of features in the embedding\n :param nclass: the number of classes\n :param loss_weight: a value for the lambda parameter described in the paper,\n to use as weight for the center loss\n :param distance: a distance object to use when calculating distances to the centers\n \"\"\"\n \n def __init__(self, device, nfeat, nclass, loss_weight, distance):\n super(SoftmaxCenterLoss, self).__init__()\n self.loss_weight = loss_weight\n self.center = CenterLoss(nclass, nfeat, distance).to(device)\n self.nll = nn.NLLLoss().to(device)\n \n def forward(self, feat, logits, y):\n \"\"\"\n Calculate the total center loss, with cross entropy supervision\n :param feat: a tensor corresponding to an embedding batch of size (N, d), where\n N = batch size\n d = dimension of the feature vectors\n :param logits: a tensor corresponding to a logits batch of size (N, c), where\n N = batch size\n c = number of classes\n :param y: a non one-hot label tensor corresponding to the batch\n :return: the loss value for this batch\n \"\"\"\n return self.nll(logits, y) + self.loss_weight * self.center(feat, logits, y)\n \n def center_parameters(self):\n return self.center.parameters()\n\n\nclass CenterLoss(nn.Module):\n \"\"\"\n Center Loss module\n Reference: https://kpzhang93.github.io/papers/eccv2016.pdf\n :param nfeat: the number of features in the embedding\n :param nclass: the number of classes\n :param distance: a distance object to use when calculating distances to the centers\n \"\"\"\n \n def __init__(self, nclass, nfeat, distance):\n super(CenterLoss, self).__init__()\n self.centers = nn.Parameter(torch.randn(nclass, nfeat))\n self.nfeat = nfeat\n self.distance = distance\n \n def forward(self, feat, logits, y):\n \"\"\"\n Calculate the center loss\n :param feat: a tensor corresponding to an embedding batch of size (N, d), where\n N = batch size\n d = dimension of the feature vectors\n :param logits: unused, it's been kept for compatibility purposes\n :param y: a non one-hot label tensor corresponding to the batch\n :return: the center loss value for this batch\n \"\"\"\n batch_size = feat.size(0)\n feat = feat.view(batch_size, -1)\n # Select appropriate centers for this batch's labels\n centers_batch = self.centers.index_select(0, y.long())\n # Return the sum of the squared distance normalized by the batch size\n return self.distance.sqdist_sum(feat, centers_batch) / 2.0 / batch_size\n\n\nclass CenterLinear(nn.Module):\n \"\"\"\n Center linear layer module\n Reference: https://kpzhang93.github.io/papers/eccv2016.pdf\n :param nfeat: the number of features in the embedding\n :param nclass: the number of classes\n \"\"\"\n \n def __init__(self, nfeat, nclass):\n super(CenterLinear, self).__init__()\n # No bias to distribute centers in a circular manner (for euclidean distance)\n self.linear = nn.Linear(nfeat, nclass, bias=False)\n \n def forward(self, x, y):\n \"\"\"\n Apply the linear transformation and softmax\n :param x: an embedding batch\n :param y: a non one-hot label batch\n :return: a tensor with class probabilities for this batch\n \"\"\"\n return F.log_softmax(self.linear(x), dim=1)\n\n def predict(self, x):\n return self.forward(x, None)\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.NLLLoss",
"torch.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MTandHJ/amoc
|
[
"0219f560936a23a28b70042ca85dc178000b1f43"
] |
[
"linear_std.py"
] |
[
"#!/usr/bin/env python\n\nimport torch\nimport torch.nn as nn\nimport argparse\nfrom src.loadopts import *\n\n\n\n\nMETHOD = \"LinearSTD\"\nSAVE_FREQ = 10\nFMT = \"{description}={finetune}-{bn_adv}={learning_policy}-{optim}-{lr}={epochs}-{batch_size}={transform}\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"model\", type=str)\nparser.add_argument(\"dataset\", type=str)\nparser.add_argument(\"--info_path\", type=str, default=\"baseline\", \n help=\"If no info path is supported, normal training process will be applied.\")\nparser.add_argument(\"--finetune\", action=\"store_true\", default=False)\nparser.add_argument(\"--bn_adv\", action=\"store_false\", default=True)\n\n# basic settings\nparser.add_argument(\"--optim\", type=str, choices=(\"sgd\", \"adam\"), default=\"sgd\")\nparser.add_argument(\"-mom\", \"--momentum\", type=float, default=0.9,\n help=\"the momentum used for SGD\")\nparser.add_argument(\"-beta1\", \"--beta1\", type=float, default=0.9,\n help=\"the first beta argument for Adam\")\nparser.add_argument(\"-beta2\", \"--beta2\", type=float, default=0.999,\n help=\"the second beta argument for Adam\")\nparser.add_argument(\"-wd\", \"--weight_decay\", type=float, default=2e-4,\n help=\"weight decay\")\nparser.add_argument(\"-lr\", \"--lr\", \"--LR\", \"--learning_rate\", type=float, default=.1)\nparser.add_argument(\"-lp\", \"--learning_policy\", type=str, default=\"FC\", \n help=\"learning rate schedule defined in config.py\")\nparser.add_argument(\"--epochs\", type=int, default=25,\n help=\"Suggestion: FC-25, TOTAL-40, AT-200, TRADES-76\")\nparser.add_argument(\"-b\", \"--batch_size\", type=int, default=128)\nparser.add_argument(\"--transform\", type=str, default='default', \n help=\"the data augmentation which will be applied in training mode.\")\nparser.add_argument(\"--resume\", action=\"store_true\", default=False)\nparser.add_argument(\"--progress\", action=\"store_true\", default=False, \n help=\"show the progress if true\")\nparser.add_argument(\"--seed\", type=int, default=1)\nparser.add_argument(\"-m\", \"--description\", type=str, default=\"train\")\nopts = parser.parse_args()\nopts.description = FMT.format(**opts.__dict__)\n\n\n\ndef load_cfg():\n from src.base import LinearCoach\n from src.dict2obj import Config\n from src.utils import gpu, load, load_checkpoint, set_seed\n from models.components import FC, Wrapper\n\n cfg = Config()\n set_seed(opts.seed)\n\n # load the model\n arch, dim_feature = load_model(opts.model)\n fc = FC(\n dim_feature=dim_feature,\n num_classes=get_num_classes(opts.dataset)\n )\n arch = arch()\n model = Wrapper(arch=arch, fc=fc)\n device = gpu(model)\n if opts.info_path == \"baseline\":\n print(\"Warning: No info path is provided and normal training process will be applied.\")\n assert opts.finetune, \"Try normal training but finetune is false!\"\n else:\n load( # load the state dict\n model=arch, \n filename=opts.info_path + \"/paras.pt\", \n device=device, strict=True\n )\n\n # load the dataset\n trainset = load_dataset(\n dataset_type=opts.dataset,\n transform=opts.transform,\n train=True\n )\n cfg['trainloader'] = load_dataloader(\n dataset=trainset, \n batch_size=opts.batch_size, \n train=True,\n show_progress=opts.progress\n )\n testset = load_dataset(\n dataset_type=opts.dataset, \n transform=opts.transform,\n train=False\n )\n cfg['testloader'] = load_dataloader(\n dataset=testset, \n batch_size=opts.batch_size, \n train=False,\n show_progress=opts.progress\n )\n normalizer = load_normalizer(dataset_type=opts.dataset)\n\n # If finetune is True, we will train the whole model otherwise the linear classifier only.\n if opts.finetune:\n optimizer = load_optimizer(\n model=model, optim_type=opts.optim, lr=opts.lr,\n momentum=opts.momentum, betas=(opts.beta1, opts.beta2),\n weight_decay=opts.weight_decay\n )\n learning_policy = load_learning_policy(\n optimizer=optimizer, \n learning_policy_type=opts.learning_policy, \n T_max=opts.epochs\n )\n else:\n optimizer = load_optimizer(\n model=model.fc, optim_type=opts.optim, lr=opts.lr,\n momentum=opts.momentum, betas=(opts.beta1, opts.beta2),\n weight_decay=opts.weight_decay\n )\n learning_policy = load_learning_policy(\n optimizer=optimizer, \n learning_policy_type=opts.learning_policy, \n T_max=opts.epochs\n )\n for name, param in model.named_parameters():\n if not name.startswith(\"fc\"):\n param.requires_grad_(False)\n\n # generate the path for logging information and saving parameters\n cfg['info_path'], log_path = generate_path(\n method=METHOD, dataset_type=opts.dataset, \n model=opts.model, description=opts.description\n )\n if opts.resume:\n cfg['start_epoch'] = load_checkpoint(\n path=cfg.info_path, model=model, \n optimizer=optimizer, lr_scheduler=learning_policy\n )\n else:\n cfg['start_epoch'] = 0\n\n cfg['coach'] = LinearCoach(\n model=model, device=device,\n normalizer=normalizer, optimizer=optimizer,\n learning_policy=learning_policy\n )\n\n # for validation\n cfg['valider'] = load_valider(\n model=model, device=device, dataset_type=opts.dataset\n )\n\n return cfg, log_path\n\n\ndef main(\n coach, valider, \n trainloader, testloader, start_epoch,\n info_path\n):\n from src.utils import save_checkpoint\n for epoch in range(start_epoch, opts.epochs):\n\n if epoch % SAVE_FREQ == 0:\n save_checkpoint(info_path, coach.model, coach.optimizer, coach.learning_policy, epoch)\n\n running_loss = coach.train(\n trainloader, \n epoch=epoch,\n finetune=opts.finetune,\n bn_adv=opts.bn_adv\n )\n writter.add_scalar(\"Loss\", running_loss, epoch)\n\n train_accuracy, train_success = valider.evaluate(trainloader, bn_adv=opts.bn_adv)\n valid_accuracy, valid_success = valider.evaluate(testloader, bn_adv=opts.bn_adv)\n print(f\"[Train] TA: {train_accuracy:.4f} RA: {1-train_success:.4f}\")\n print(f\"[Test] TA: {valid_accuracy:.4f} RA: {1-valid_success:.4f}\")\n writter.add_scalars(\"Accuracy\", {\"train\": train_accuracy, \"valid\": valid_accuracy}, opts.epochs)\n writter.add_scalars(\"Success\", {\"train\": train_success, \"valid\": valid_success}, opts.epochs)\n\n\nif __name__ == \"__main__\":\n from torch.utils.tensorboard import SummaryWriter\n from src.utils import mkdirs, readme\n cfg, log_path = load_cfg()\n mkdirs(cfg.info_path, log_path)\n readme(cfg.info_path, opts)\n readme(log_path, opts, mode=\"a\")\n writter = SummaryWriter(log_dir=log_path, filename_suffix=METHOD)\n\n main(**cfg)\n\n cfg['coach'].save(cfg.info_path)\n writter.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"torch.utils.tensorboard.SummaryWriter"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dcodos/247scraper
|
[
"ca841376ceb24711c059a9704398302ad4e47e3a"
] |
[
"neural_net.py"
] |
[
"import sqlite3\nimport tensorflow as tf\nimport numpy as np\nfrom geopy import geocoders\nimport random\nfrom recruit_dao import *\n\nschools = []\nn_nodes_hl1 = 400\nn_nodes_hl2 = 400\nn_nodes_hl3 = 400\nn_classes = 419\nbatch_size = 100\n\nx = tf.placeholder('float', [None, 481])\ny = tf.placeholder('float')\n\ndef neural_network_model(data):\n hidden_1_layer = {'weights':tf.Variable(tf.random_normal([481, n_nodes_hl1])),\n 'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}\n\n hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),\n 'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}\n\n hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),\n 'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}\n\n output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),\n 'biases':tf.Variable(tf.random_normal([n_classes])),}\n\n l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])\n l1 = tf.nn.relu(l1)\n\n l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])\n l2 = tf.nn.relu(l2)\n\n l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])\n l3 = tf.nn.relu(l3)\n\n output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']\n\n return output\n\n\ndef train_neural_network(x):\n prediction = neural_network_model(x)\n cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n\n hm_epochs = 15\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n\n for epoch in range(hm_epochs):\n epoch_loss = 0\n i = 0\n while i < len(train_x):\n start = i\n end = i + batch_size\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})\n epoch_loss += c\n i += batch_size\n\n print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)\n\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))\n\n\ndef split_data(featureset, test_percentage):\n testing_size = int(test_percentage*len(featureset))\n train_x = list(featureset[:,0][:-testing_size])\n train_y = list(featureset[:,1][:-testing_size])\n test_x = list(featureset[:,0][-testing_size:])\n test_y = list(featureset[:,1][-testing_size:])\n return train_x, train_y, test_x, test_y\n\nif __name__ == \"__main__\":\n print(\"Getting classes\")\n print(\"Getting data\")\n features = get_data()\n random.shuffle(features)\n features = np.array(features)\n train_x, train_y, test_x, test_y = split_data(features, 0.1)\n\n train_neural_network(x)\n"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.matmul",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.argmax",
"numpy.array",
"tensorflow.random_normal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
KChen-lab/sensei
|
[
"591f5214b598f60e2ea21bb8b9955cc529f67eee"
] |
[
"python/sensei/unpaired.py"
] |
[
"import scipy.stats\nfrom .utils import *\nfrom scipy.stats import mannwhitneyu, ttest_ind, betabinom\n\ndef calc_wilcoxon_fn(M, N, m, s, alpha = 0.05, n_sim = 10_000):\n \"\"\"\n \n :param M: number of patients, as a list\n :param N: number of cells, as a list\n :param m: mean for both groups, as a list\n :param s: std for both groups, as a list\n :param alpha: significance level\n :param n_sim: simulation iterations\n :return: false negative rate, i.e., 1 - power\n \"\"\"\n N0, N1 = N\n M0, M1 = M\n m0, m1 = m\n s0, s1 = s\n \n a0, b0 = normal_to_beta(m0, s0)\n r0 = betabinom.rvs(N0, a0, b0, size=(M0, n_sim)) / n\n\n a1, b1 = normal_to_beta(m1, s1)\n r1 = betabinom.rvs(N1, a1, b1, size=(M1, n_sim)) / n\n \n return 1 - sum(mannwhitneyu(r0, r1).pvalue < alpha) / n_sim\n\n\ndef calc_fn_rate_beta(M, N, a, b, alpha=0.05, test_type=\"one-sided\", offset=0, sign=0):\n \"\"\"\n Calculate false negative rate\n :param M: number of patients\n :param N: number of cells\n :param a: Beta(a, b)\n :param b: Beta(a, b)\n :param alpha: significance level\n :param test_type: one-sided or two-sided\n :param offset:\n :param sign:\n :return: false negative rate, i.e., 1 - power\n \"\"\"\n\n if not is_iterable(M):\n M = [M, M]\n\n if not is_iterable(N):\n N = [N, N]\n\n Ep = [a[0] / (a[0] + b[0]),\n a[1] / (a[1] + b[1])]\n\n # Vp = [a[0] * b[0] * (a[0] + b[0] + N[0]) / ((N[0] * (a[0] + b[0]) * (a[0] + b[0])) * (a[0] + b[0] + 1)),\n # a[1] * b[1] * (a[1] + b[1] + N[1]) / ((N[1] * (a[1] + b[1]) * (a[1] + b[1])) * (a[1] + b[1] + 1))]\n\n Vp = [var_betabinom_over_n(N[0], a[0], b[0]), var_betabinom_over_n(N[1], a[1], b[1])]\n\n Et = (Ep[1] - (Ep[0] + offset)) / (Vp[0] / M[0] + Vp[1] / M[1]) ** .5\n\n if sign == 0:\n Et = abs(Et)\n else:\n Et = sign * Et\n\n nu = (Vp[0] / M[0] + Vp[1] / M[1]) ** 2 / ((Vp[0] / M[0]) ** 2 / (M[0] - 1) + (Vp[1] / M[1]) ** 2 / (M[1] - 1))\n\n if test_type == \"one-sided\":\n t_star = scipy.stats.t.ppf(q=1 - alpha, df=nu)\n elif test_type == \"two-sided\":\n t_star = scipy.stats.t.ppf(q=1 - alpha / 2, df=nu)\n else:\n raise ValueError(\"test must be one-sided or two-sided\")\n return scipy.stats.t.cdf(t_star - Et, df=nu)\n\n\ndef calc_fn_rate(M, N, m, s, alpha, test_type, offset, sign):\n \"\"\"\n\n :param M:\n :param N:\n :param m:\n :param s:\n :param alpha:\n :param test_type:\n :param offset:\n :param sign:\n :return:\n \"\"\"\n if not is_iterable(s):\n s = [s, s]\n\n a = [None, None]\n b = [None, None]\n\n try:\n a[0], b[0] = normal_to_beta(m[0], s[0])\n a[1], b[1] = normal_to_beta(m[1], s[1])\n except ZeroDivisionError:\n return float(\"nan\")\n return calc_fn_rate_beta(M, N, a, b, alpha, test_type, offset, sign)\n\n\ndef calc_fn_rate_override(M, N, m, s, alpha, test_type, override_diff):\n \"\"\"\n\n :param M:\n :param N:\n :param m:\n :param s:\n :param alpha:\n :param test_type:\n :param override_diff: overriden difference\n :return:\n \"\"\"\n if not is_iterable(s):\n s = [s, s]\n\n a = [None, None]\n b = [None, None]\n\n try:\n a[0], b[0] = normal_to_beta(m[0], s[0])\n a[1], b[1] = normal_to_beta(m[1], s[1])\n except ZeroDivisionError:\n return float(\"nan\")\n\n if not is_iterable(M):\n M = [M, M]\n\n if not is_iterable(N):\n N = [N, N]\n\n Ep = [a[0] / (a[0] + b[0]),\n a[1] / (a[1] + b[1])]\n\n # Vp = [a[0] * b[0] * (a[0] + b[0] + N[0]) / ((N[0] * (a[0] + b[0]) * (a[0] + b[0])) * (a[0] + b[0] + 1)),\n # a[1] * b[1] * (a[1] + b[1] + N[1]) / ((N[1] * (a[1] + b[1]) * (a[1] + b[1])) * (a[1] + b[1] + 1))]\n\n Vp = [var_betabinom_over_n(N[0], a[0], b[0]), var_betabinom_over_n(N[1], a[1], b[1])]\n\n Et = override_diff / (Vp[0] / M[0] + Vp[1] / M[1]) ** .5\n\n Et = abs(Et)\n\n nu = (Vp[0] / M[0] + Vp[1] / M[1]) ** 2 / ((Vp[0] / M[0]) ** 2 / (M[0] - 1) + (Vp[1] / M[1]) ** 2 / (M[1] - 1))\n\n if test_type == \"one-sided\":\n t_star = scipy.stats.t.ppf(q=1 - alpha, df=nu)\n elif test_type == \"two-sided\":\n t_star = scipy.stats.t.ppf(q=1 - alpha / 2, df=nu)\n else:\n raise ValueError(\"test must be one-sided or two-sided\")\n return scipy.stats.t.cdf(t_star - Et, df=nu)\n\n\ndef calc_fn_rate_baseline(M, m, s, alpha, test_type, offset, sign):\n \"\"\"\n\n :param M:\n :param N:\n :param m:\n :param s:\n :param alpha:\n :param test_type:\n :param override_diff: overriden difference\n :return:\n \"\"\"\n if not is_iterable(s):\n s = [s, s]\n\n a = [None, None]\n b = [None, None]\n\n try:\n a[0], b[0] = normal_to_beta(m[0], s[0])\n a[1], b[1] = normal_to_beta(m[1], s[1])\n except ZeroDivisionError:\n return float(\"nan\")\n\n if not is_iterable(M):\n M = [M, M]\n\n Ep = m\n\n Vp = [s[0] ** 2, s[1] ** 2]\n\n Et = (Ep[1] - (Ep[0] + offset)) / (Vp[0] / M[0] + Vp[1] / M[1]) ** .5\n\n if sign == 0:\n Et = abs(Et)\n else:\n Et = sign * Et\n\n nu = (Vp[0] / M[0] + Vp[1] / M[1]) ** 2 / ((Vp[0] / M[0]) ** 2 / (M[0] - 1) + (Vp[1] / M[1]) ** 2 / (M[1] - 1))\n\n if test_type == \"one-sided\":\n t_star = scipy.stats.t.ppf(q=1 - alpha, df=nu)\n elif test_type == \"two-sided\":\n t_star = scipy.stats.t.ppf(q=1 - alpha / 2, df=nu)\n else:\n raise ValueError(\"test must be one-sided or two-sided\")\n return scipy.stats.t.cdf(t_star - Et, df=nu)"
] |
[
[
"scipy.stats.mannwhitneyu",
"scipy.stats.betabinom.rvs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
loopylangur/numpyro
|
[
"7892f2bc0eba68f238228198bd2bcfcbcba2207b"
] |
[
"examples/gp.py"
] |
[
"# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nExample: Gaussian Process\n=========================\n\nIn this example we show how to use NUTS to sample from the posterior\nover the hyperparameters of a gaussian process.\n\n.. image:: ../_static/img/examples/gp.png\n :align: center\n\"\"\"\n\nimport argparse\nimport os\nimport time\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport jax\nfrom jax import vmap\nimport jax.numpy as jnp\nimport jax.random as random\n\nimport numpyro\nimport numpyro.distributions as dist\nfrom numpyro.infer import MCMC, NUTS, init_to_feasible, init_to_median, init_to_sample, init_to_uniform, init_to_value\n\nmatplotlib.use('Agg') # noqa: E402\n\n\n# squared exponential kernel with diagonal noise term\ndef kernel(X, Z, var, length, noise, jitter=1.0e-6, include_noise=True):\n deltaXsq = jnp.power((X[:, None] - Z) / length, 2.0)\n k = var * jnp.exp(-0.5 * deltaXsq)\n if include_noise:\n k += (noise + jitter) * jnp.eye(X.shape[0])\n return k\n\n\ndef model(X, Y):\n # set uninformative log-normal priors on our three kernel hyperparameters\n var = numpyro.sample(\"kernel_var\", dist.LogNormal(0.0, 10.0))\n noise = numpyro.sample(\"kernel_noise\", dist.LogNormal(0.0, 10.0))\n length = numpyro.sample(\"kernel_length\", dist.LogNormal(0.0, 10.0))\n\n # compute kernel\n k = kernel(X, X, var, length, noise)\n\n # sample Y according to the standard gaussian process formula\n numpyro.sample(\"Y\", dist.MultivariateNormal(loc=jnp.zeros(X.shape[0]), covariance_matrix=k),\n obs=Y)\n\n\n# helper function for doing hmc inference\ndef run_inference(model, args, rng_key, X, Y):\n start = time.time()\n # demonstrate how to use different HMC initialization strategies\n if args.init_strategy == \"value\":\n init_strategy = init_to_value(values={\"kernel_var\": 1.0, \"kernel_noise\": 0.05, \"kernel_length\": 0.5})\n elif args.init_strategy == \"median\":\n init_strategy = init_to_median(num_samples=10)\n elif args.init_strategy == \"feasible\":\n init_strategy = init_to_feasible()\n elif args.init_strategy == \"sample\":\n init_strategy = init_to_sample()\n elif args.init_strategy == \"uniform\":\n init_strategy = init_to_uniform(radius=1)\n kernel = NUTS(model, init_strategy=init_strategy)\n mcmc = MCMC(kernel, args.num_warmup, args.num_samples, num_chains=args.num_chains, thinning=args.thinning,\n progress_bar=False if \"NUMPYRO_SPHINXBUILD\" in os.environ else True)\n mcmc.run(rng_key, X, Y)\n mcmc.print_summary()\n print('\\nMCMC elapsed time:', time.time() - start)\n return mcmc.get_samples()\n\n\n# do GP prediction for a given set of hyperparameters. this makes use of the well-known\n# formula for gaussian process predictions\ndef predict(rng_key, X, Y, X_test, var, length, noise):\n # compute kernels between train and test data, etc.\n k_pp = kernel(X_test, X_test, var, length, noise, include_noise=True)\n k_pX = kernel(X_test, X, var, length, noise, include_noise=False)\n k_XX = kernel(X, X, var, length, noise, include_noise=True)\n K_xx_inv = jnp.linalg.inv(k_XX)\n K = k_pp - jnp.matmul(k_pX, jnp.matmul(K_xx_inv, jnp.transpose(k_pX)))\n sigma_noise = jnp.sqrt(jnp.clip(jnp.diag(K), a_min=0.)) * jax.random.normal(rng_key, X_test.shape[:1])\n mean = jnp.matmul(k_pX, jnp.matmul(K_xx_inv, Y))\n # we return both the mean function and a sample from the posterior predictive for the\n # given set of hyperparameters\n return mean, mean + sigma_noise\n\n\n# create artificial regression dataset\ndef get_data(N=30, sigma_obs=0.15, N_test=400):\n np.random.seed(0)\n X = jnp.linspace(-1, 1, N)\n Y = X + 0.2 * jnp.power(X, 3.0) + 0.5 * jnp.power(0.5 + X, 2.0) * jnp.sin(4.0 * X)\n Y += sigma_obs * np.random.randn(N)\n Y -= jnp.mean(Y)\n Y /= jnp.std(Y)\n\n assert X.shape == (N,)\n assert Y.shape == (N,)\n\n X_test = jnp.linspace(-1.3, 1.3, N_test)\n\n return X, Y, X_test\n\n\ndef main(args):\n X, Y, X_test = get_data(N=args.num_data)\n\n # do inference\n rng_key, rng_key_predict = random.split(random.PRNGKey(0))\n samples = run_inference(model, args, rng_key, X, Y)\n\n # do prediction\n vmap_args = (random.split(rng_key_predict, samples['kernel_var'].shape[0]),\n samples['kernel_var'], samples['kernel_length'], samples['kernel_noise'])\n means, predictions = vmap(lambda rng_key, var, length, noise:\n predict(rng_key, X, Y, X_test, var, length, noise))(*vmap_args)\n\n mean_prediction = np.mean(means, axis=0)\n percentiles = np.percentile(predictions, [5.0, 95.0], axis=0)\n\n # make plots\n fig, ax = plt.subplots(figsize=(8, 6), constrained_layout=True)\n\n # plot training data\n ax.plot(X, Y, 'kx')\n # plot 90% confidence level of predictions\n ax.fill_between(X_test, percentiles[0, :], percentiles[1, :], color='lightblue')\n # plot mean prediction\n ax.plot(X_test, mean_prediction, 'blue', ls='solid', lw=2.0)\n ax.set(xlabel=\"X\", ylabel=\"Y\", title=\"Mean predictions with 90% CI\")\n\n plt.savefig(\"gp_plot.pdf\")\n\n\nif __name__ == \"__main__\":\n assert numpyro.__version__.startswith('0.4.1')\n parser = argparse.ArgumentParser(description=\"Gaussian Process example\")\n parser.add_argument(\"-n\", \"--num-samples\", nargs=\"?\", default=1000, type=int)\n parser.add_argument(\"--num-warmup\", nargs='?', default=1000, type=int)\n parser.add_argument(\"--num-chains\", nargs='?', default=1, type=int)\n parser.add_argument(\"--thinning\", nargs='?', default=2, type=int)\n parser.add_argument(\"--num-data\", nargs='?', default=25, type=int)\n parser.add_argument(\"--device\", default='cpu', type=str, help='use \"cpu\" or \"gpu\".')\n parser.add_argument(\"--init-strategy\", default='median', type=str,\n choices=['median', 'feasible', 'value', 'uniform', 'sample'])\n args = parser.parse_args()\n\n numpyro.set_platform(args.device)\n numpyro.set_host_device_count(args.num_chains)\n\n main(args)\n"
] |
[
[
"numpy.random.seed",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"matplotlib.pyplot.savefig",
"numpy.mean",
"numpy.random.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hsokooti/RegNet
|
[
"28a8b6132677bb58e9fc811c0dd15d78913c7e86"
] |
[
"functions/preprocessing/dirlab.py"
] |
[
"import copy\nimport os\nimport numpy as np\nimport SimpleITK as sitk\nimport functions.image.image_processing as ip\n\n\ndef img_converter(data_folder, data, type_im, cn, ext='.mha', mha_folder_name='mha', point_folder_name='points'):\n \"\"\"\n convert img images to mha.\n reading image:\n 1) Size and voxel spacing of the images are available at https://www.dir-lab.com/ReferenceData.html\n 2) The superior-inferior axis needs to be flipped\n 3) Empty slices will be removed\n copd1_eBHCT.mha slice 0:1\n copd2_eBHCT.mha slice 0:6\n copd3_eBHCT.mha slice 0:9\n copd4_eBHCT.mha slice 0:9\n copd5_eBHCT.mha slice NA\n copd6_eBHCT.mha slice 0:2\n copd7_eBHCT.mha slice 0:9\n copd8_eBHCT.mha slice 0:7\n copd9_eBHCT.mha slice 0:19\n copd10_iBHCT.mah slice 0\n 4) Index modification:\n 4a) The superior-inferior axis are flipped. The reason is that to make it more similar to SPREAD study.\n 4b) The indices start at 1. We like them to start at 0.\n 4c) change indices of landmarks based on the removed slices\n\n 5) Normalize the intensity by subtracting by -1024\n 6) Set the outside value to -2048\n\n :param ext\n :param cn:\n :param type_im\n :param data_folder\n :param data\n :param mha_folder_name\n :param point_folder_name\n :return: converted mha image and converted landmark files:\n example: \n copd1_eBHCT.mha\n copd1_300_eBH_world_r1_tr.txt: landmarks in world coordinate (truncated)\n copd1_300_eBH_world_r1_elx.txt: landmarks in world coordinate with two additional lines for elastix\n copd1_300_eBH_xyz_r1_tr.txt: landmark in indices\n copd1_300_eBH_xyz_r1_elx.txt: landmark in indices with two additional lines for elastix\n \"\"\"\n\n if data == 'DIR-Lab_4D':\n type_im_list = ['T00', 'T10', 'T20', 'T30', 'T40', 'T50', 'T60', 'T70', 'T80', 'T90']\n data_folder_sub = data_folder + 'DIR-Lab/4DCT/'\n if cn < 6:\n im_img_name = 'Images/case' + str(cn) + '_' + type_im_list[type_im] + '_s.img'\n else:\n im_img_name = 'Images/case' + str(cn) + '_' + type_im_list[type_im] + '.img'\n im_img_folder = data_folder_sub + 'Case' + str(cn) + 'Pack/'\n if cn == 8:\n im_img_folder = data_folder_sub + 'Case' + str(cn) + 'Deploy/'\n im_mha_name = 'case' + str(cn) + '_' + type_im_list[type_im] + ext\n im_mha_folder = data_folder_sub + mha_folder_name + '/case' + str(cn) + '/'\n point_folder = data_folder_sub + point_folder_name + '/case' + str(cn) + '/'\n if cn < 6:\n index_tr_old_address_list = [im_img_folder + '/Sampled4D/case' + str(cn) + '_4D-75_' + type_im_list[type_im] + '.txt',\n im_img_folder + '/ExtremePhases/case' + str(cn) + '_300_' + type_im_list[type_im] + '_xyz.txt']\n else:\n index_tr_old_address_list = [im_img_folder + '/Sampled4D/case' + str(cn) + '_4D-75_' + type_im_list[type_im] + '.txt',\n im_img_folder + '/ExtremePhases/case' + str(cn) + '_dirLab300_' + type_im_list[type_im] + '_xyz.txt']\n index_tr_new_address_list = [point_folder + '/case' + str(cn) + '_4D-75_' + type_im_list[type_im] + '_xyz_tr.txt',\n point_folder + '/case' + str(cn) + '_300_' + type_im_list[type_im] + '_xyz_tr.txt']\n index_elx_new_address_list = [point_folder + '/case' + str(cn) + '_4D-75_' + type_im_list[type_im] + '_xyz_elx.txt',\n point_folder + '/case' + str(cn) + '_300_' + type_im_list[type_im] + '_xyz_elx.txt']\n point_tr_new_address_list = [point_folder + '/case' + str(cn) + '_4D-75_' + type_im_list[type_im] + '_world_tr.txt',\n point_folder + '/case' + str(cn) + '_300_' + type_im_list[type_im] + '_world_tr.txt']\n point_elx_new_address_list = [point_folder + '/case' + str(cn) + '_4D-75_' + type_im_list[type_im] + '_world_elx.txt',\n point_folder + '/case' + str(cn) + '_300_' + type_im_list[type_im] + '_world_elx.txt']\n dirlab_header = dirlab_4dct_header()\n\n elif data == 'DIR-Lab_COPD':\n type_im_list = ['i', 'e']\n data_folder_sub = data_folder + 'DIR-Lab/COPDgene/'\n im_img_name = 'copd' + str(cn) + '_' + type_im_list[type_im] + 'BHCT.img'\n im_img_folder = data_folder_sub + 'copd' + str(cn) + '/'\n im_mha_name = 'copd' + str(cn) + '_' + type_im_list[type_im] + 'BHCT' + ext\n im_mha_folder = data_folder_sub + mha_folder_name + '/'\n point_folder = data_folder_sub + point_folder_name\n index_tr_old_address_list = [im_img_folder + 'copd' + str(cn) + '_300_' + type_im_list[type_im] + 'BH_xyz_r1.txt']\n index_tr_new_address_list = [point_folder + '/copd' + str(cn) + '_300_' + type_im_list[type_im] + 'BH_xyz_r1_tr.txt']\n index_elx_new_address_list = [point_folder + '/copd' + str(cn) + '_300_' + type_im_list[type_im] + 'BH_xyz_r1_elx.txt']\n point_tr_new_address_list = [point_folder + '/copd' + str(cn) + '_300_' + type_im_list[type_im] + 'BH_world_r1_tr.txt']\n point_elx_new_address_list = [point_folder + '/copd' + str(cn) + '_300_' + type_im_list[type_im] + 'BH_world_r1_elx.txt']\n dirlab_header = dirlab_copd_header()\n\n else:\n raise ValueError('Data=' + data + \", it should be in ['DIR-Lab_4D', 'DIR-Lab_COPD']\")\n\n if not os.path.isdir(im_mha_folder):\n os.makedirs(im_mha_folder)\n if not os.path.isdir(point_folder):\n os.makedirs(point_folder)\n im_img_address = im_img_folder + im_img_name\n im_mha_address = im_mha_folder + im_mha_name\n if not os.path.isfile(im_mha_address):\n # 1,2) reading image:----------------------------------------------------------------\n fid = open(im_img_address, 'rb')\n im_data = np.fromfile(fid, np.int16)\n image_old = im_data.reshape(dirlab_header['case' + str(cn)]['Size'][::-1])\n image_old = np.flip(image_old, axis=0) # The superior-inferior axis needs to be flipped\n origin = [0, 0, 0]\n image = copy.deepcopy(image_old)\n # reading landmarks:\n for ii, index_tr_old_address in enumerate(index_tr_old_address_list):\n index_tr_new_address = index_tr_new_address_list[ii]\n index_elx_new_address = index_elx_new_address_list[ii]\n point_tr_new_address = point_tr_new_address_list[ii]\n point_elx_new_address = point_elx_new_address_list[ii]\n if os.path.isfile(index_tr_old_address):\n index_tr_old_raw = np.loadtxt(index_tr_old_address)\n # 4a&b) The superior-inferior axis is flipped. be careful about that indices start at 1. after converting to zero-start,\n # there is no -1 in the SI direction.\n\n index_tr_old = np.array([[index_tr_old_raw[i, 0] - 1,\n index_tr_old_raw[i, 1] - 1,\n image_old.shape[0] - index_tr_old_raw[i, 2]]\n for i in range(index_tr_old_raw.shape[0])])\n\n # 3) remove empty slices only in DIR-Lab_COPD-----------------------------------------\n if data == 'DIR-Lab_COPD':\n image, slices_to_remove = remove_empty_slices(image_old)\n print(im_img_name + ' slices are removed: ' + str(slices_to_remove))\n shift_indices = len(slices_to_remove)\n shift_world = shift_indices * dirlab_header['case' + str(cn)]['Spacing'][2]\n origin[2] = shift_world\n\n # 4c) change indices of landmarks based on the removed slices\n index_tr_new = [[index_tr_old[i, 0], index_tr_old[i, 1], index_tr_old[i, 2] - shift_indices] for i in range(index_tr_old.shape[0])]\n else:\n index_tr_new = index_tr_old.copy()\n\n np.savetxt(index_tr_new_address, index_tr_new, fmt='%d')\n point_tr_new = ip.index_to_world(index_tr_new, spacing=dirlab_header['case' + str(cn)]['Spacing'], origin=origin)\n np.savetxt(point_tr_new_address, point_tr_new, fmt='%-9.3f')\n open_text = open(index_tr_new_address, \"r\")\n number_of_landmarks = index_tr_new.shape[0]\n with open(index_elx_new_address, \"w\") as open_elx:\n open_elx.write('index \\n')\n open_elx.write(str(number_of_landmarks) + ' \\n')\n open_elx.write(open_text.read())\n open_text.close()\n\n open_text = open(point_tr_new_address, \"r\")\n with open(point_elx_new_address, \"w\") as open_elx:\n open_elx.write('point \\n')\n open_elx.write(str(number_of_landmarks) + ' \\n')\n open_elx.write(open_text.read())\n open_text.close()\n\n # 5) normalize the intensity\n image = image - 1024 # we are not sure about the slope and intercept.\n\n # 6) set the outside value to -2048\n image[image == -3024] = -2048\n image_sitk = ip.array_to_sitk(image, spacing=dirlab_header['case' + str(cn)]['Spacing'], origin=origin)\n sitk.WriteImage(image_sitk, im_mha_address)\n print('case' + str(cn) + ' type' + str(type_im) + ' is done..')\n\n\ndef remove_empty_slices(image):\n slices_to_remove = []\n for slice_index in range(np.shape(image)[0]):\n if np.sum(image[slice_index, :, :]) == 0:\n slices_to_remove.append(slice_index)\n slices_all = [i for i in range(np.shape(image)[0])]\n slices_to_keep = [i for i in slices_all if i not in slices_to_remove]\n image_cropped = image[slices_to_keep, :, :]\n\n return image_cropped, slices_to_remove\n\n\ndef dirlab_copd_header():\n \"\"\"\n size and voxel spacing of the images are available at https://www.dir-lab.com/ReferenceData.html\n \"\"\"\n dirlab_info = dict()\n for cn in range(1, 11):\n dirlab_info['case' + str(cn)] = {}\n dirlab_info['case1']['Size'] = [512, 512, 121]\n dirlab_info['case2']['Size'] = [512, 512, 102]\n dirlab_info['case3']['Size'] = [512, 512, 126]\n dirlab_info['case4']['Size'] = [512, 512, 126]\n dirlab_info['case5']['Size'] = [512, 512, 131]\n dirlab_info['case6']['Size'] = [512, 512, 119]\n dirlab_info['case7']['Size'] = [512, 512, 112]\n dirlab_info['case8']['Size'] = [512, 512, 115]\n dirlab_info['case9']['Size'] = [512, 512, 116]\n dirlab_info['case10']['Size'] = [512, 512, 135]\n\n dirlab_info['case1']['Spacing'] = [0.625, 0.625, 2.5]\n dirlab_info['case2']['Spacing'] = [0.645, 0.645, 2.5]\n dirlab_info['case3']['Spacing'] = [0.652, 0.652, 2.5]\n dirlab_info['case4']['Spacing'] = [0.590, 0.590, 2.5]\n dirlab_info['case5']['Spacing'] = [0.647, 0.647, 2.5]\n dirlab_info['case6']['Spacing'] = [0.633, 0.633, 2.5]\n dirlab_info['case7']['Spacing'] = [0.625, 0.625, 2.5]\n dirlab_info['case8']['Spacing'] = [0.586, 0.586, 2.5]\n dirlab_info['case9']['Spacing'] = [0.644, 0.644, 2.5]\n dirlab_info['case10']['Spacing'] = [0.742, 0.742, 2.5]\n\n return dirlab_info\n\n\ndef dirlab_4dct_header():\n \"\"\"\n size and voxel spacing of the images are available at https://www.dir-lab.com/ReferenceData.html\n \"\"\"\n dirlab_info = dict()\n for cn in range(1, 11):\n dirlab_info['case' + str(cn)] = {}\n dirlab_info['case1']['Size'] = [256, 256, 94]\n dirlab_info['case2']['Size'] = [256, 256, 112]\n dirlab_info['case3']['Size'] = [256, 256, 104]\n dirlab_info['case4']['Size'] = [256, 256, 99]\n dirlab_info['case5']['Size'] = [256, 256, 106]\n dirlab_info['case6']['Size'] = [512, 512, 128]\n dirlab_info['case7']['Size'] = [512, 512, 136]\n dirlab_info['case8']['Size'] = [512, 512, 128]\n dirlab_info['case9']['Size'] = [512, 512, 128]\n dirlab_info['case10']['Size'] = [512, 512, 120]\n\n dirlab_info['case1']['Spacing'] = [0.97, 0.97, 2.5]\n dirlab_info['case2']['Spacing'] = [1.16, 1.16, 2.5]\n dirlab_info['case3']['Spacing'] = [1.15, 1.15, 2.5]\n dirlab_info['case4']['Spacing'] = [1.13, 1.13, 2.5]\n dirlab_info['case5']['Spacing'] = [1.10, 1.10, 2.5]\n dirlab_info['case6']['Spacing'] = [0.97, 0.97, 2.5]\n dirlab_info['case7']['Spacing'] = [0.97, 0.97, 2.5]\n dirlab_info['case8']['Spacing'] = [0.97, 0.97, 2.5]\n dirlab_info['case9']['Spacing'] = [0.97, 0.97, 2.5]\n dirlab_info['case10']['Spacing'] = [0.97, 0.97, 2.5]\n\n return dirlab_info\n\n\ndef main():\n data = 'DIR-Lab_4D'\n data_folder = 'E:/PHD/Database/'\n for cn in range(6, 11):\n for type_im in [0, 5]:\n img_converter(data_folder=data_folder, data=data, type_im=type_im, cn=cn)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.fromfile",
"numpy.shape",
"numpy.savetxt",
"numpy.flip",
"numpy.sum",
"numpy.loadtxt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krangelie/bias-in-german-nlg
|
[
"9fbaf50fde7d41d64692ae90c41beae61bc78d44"
] |
[
"src/evaluate_bias_in_nlg/eval_bias_in_labeled_generations.py"
] |
[
"import os\nimport json\n\nimport hydra.utils\nimport pandas as pd\nfrom scipy.stats import chi2, chi2_contingency\nimport matplotlib.pyplot as plt\n\nimport src.constants as constants\nfrom src.evaluate_bias_in_nlg.bias_eval_helpers import (\n single_file_to_dict,\n mult_files_to_dict,\n plot_regard_ratios,\n)\n\n\ndef read_regard_labeled_demo_csv(in_path, demographics, contexts):\n if contexts != \"all\":\n if contexts == \"respect\":\n context_list = constants.RESPECT_LIST\n elif contexts == \"occupation\":\n context_list = constants.OCCUPATION_LIST\n else:\n print(\"Typo or undefined context list.\")\n else:\n context_list = None\n\n print(f\"-- PROCESSING {contexts} CONTEXTS --\")\n\n if in_path.endswith(\".csv\"):\n demo_dict = single_file_to_dict(in_path, demographics, context_list)\n else:\n demo_dict = mult_files_to_dict(in_path, demographics, context_list)\n\n print(demographics)\n\n return demo_dict\n\n\ndef create_contingency_table(demo_dict):\n demographics = [constants.VARIABLE_DICT[k] for k in demo_dict.keys()]\n contingency_table = pd.DataFrame(\n [],\n columns=constants.VALENCE_MAP,\n index=demographics,\n )\n ratios = {}\n sample_num = min([len(value) for key, value in demo_dict.items()])\n for demo, preds in demo_dict.items():\n ratios[demo] = {}\n preds = preds.reset_index()\n counts = preds.loc[:sample_num, \"Prediction\"].value_counts()\n counts_all = preds[\"Prediction\"].value_counts()\n print(\"Counts\", counts)\n if len(counts != len(constants.VALENCE_MAP)):\n # set_of_preds = preds.loc[:sample_num, \"Prediction\"]\n set_of_preds = preds[\"Prediction\"]\n print(\n \"Attention, not all classes have predictions:\",\n set(set_of_preds),\n )\n for valence, value in constants.VALENCE_MAP.items():\n if value in counts.index:\n num = counts_all[value]\n else:\n num = 0\n contingency_table.loc[constants.VARIABLE_DICT[demo], valence] = num\n ratios[demo][valence] = counts_all[value] / len(preds)\n\n print(contingency_table)\n return contingency_table, ratios\n\n\ndef test_group_independence(contingency_table, out_path, contexts, ratios):\n\n stat, p, dof, expected = chi2_contingency(contingency_table)\n print(\"dof=%d\" % dof)\n print(expected)\n # interpret test-statistic\n prob = 0.95\n critical = chi2.ppf(prob, dof)\n\n alpha = 1.0 - prob\n ctxt = f\"\\nResults for {contexts}\\n\\n\"\n prob_txt = f\"\\nprobability={prob:.3f}, critical={critical:.3f}, stat={stat:.3f}\"\n sign_txt = f\"\\nalpha={alpha:.3f}, p={p:.3f}\"\n print(sign_txt)\n if p <= alpha:\n h_txt = \"\\nThere is a difference between the distributions (reject H0)\"\n else:\n h_txt = \"\\nThere is no difference between the distributions (fail to reject H0)\"\n print(h_txt)\n\n results_file = open(os.path.join(out_path, \"chisquare.txt\"), \"a\")\n result_txt = [\n ctxt,\n prob_txt,\n sign_txt,\n h_txt,\n f\"\\nN = {contingency_table.sum().sum()}\",\n ]\n results_file.writelines(result_txt)\n results_file.close()\n\n\ndef eval_bias_for_context(eval_cfg, axis, context, input_path, output_path):\n demo_dict = read_regard_labeled_demo_csv(\n input_path,\n eval_cfg.demographics,\n context,\n )\n contingency_table, ratios = create_contingency_table(demo_dict)\n test_group_independence(contingency_table, output_path, context, ratios)\n\n ratios_df = pd.DataFrame([], columns=[\"Demographic\", \"Regard\", \"Prediction\"])\n counter = 0\n for demo, valence_dict in ratios.items():\n for valence, value in valence_dict.items():\n ratios_df.loc[counter, :] = {\n \"Demographic\": demo,\n \"Regard\": valence,\n \"Prediction\": value * 100,\n }\n counter += 1\n\n plot_regard_ratios(demo_dict, context, axis, ratios_df)\n\n\ndef eval_regard_bias(cfg):\n eval_cfg = cfg.run_mode\n output_path = hydra.utils.to_absolute_path(eval_cfg.output_path)\n os.makedirs(output_path, exist_ok=True)\n input_path = hydra.utils.to_absolute_path(eval_cfg.input_path)\n\n if cfg.run_mode.contexts == \"combine\":\n fig, ax = plt.subplots(1, 3)\n fig.set_size_inches(7.5, 4)\n fig.suptitle(\n \"Regard scores [%]\",\n # \"Weibchen Sternzeichen Freundlichkeitsprofil Erlangen Mineral\",\n # \"Vitamin Kneipp Neuzeit empfehlen Klassik erholsame\",\n fontsize=15,\n )\n\n for i, c in enumerate([\"all\", \"occupation\", \"respect\"]):\n eval_bias_for_context(eval_cfg, ax[i], c, input_path, output_path)\n plt.xlabel(\"\")\n # plt.xticks(fontsize=14)\n # plt.ylabel(\"Regard score [%]\", fontsize=15)\n plt.tight_layout()\n os.makedirs(output_path, exist_ok=True)\n dest = os.path.join(output_path, f\"ratios_{cfg.run_mode.contexts}_contexts.png\")\n fig.savefig(dest)\n\n else:\n output_path = os.path.join(output_path, f\"{cfg.run_mode.contexts}_contexts\")\n eval_bias_for_context(\n eval_cfg, None, cfg.run_mode.contexts, input_path, output_path\n )\n os.makedirs(output_path, exist_ok=True)\n plt.xlabel(\"\")\n # plt.xticks(fontsize=14)\n plt.ylabel(\"Regard score [%]\", fontsize=15)\n plt.tight_layout()\n os.makedirs(output_path, exist_ok=True)\n dest = os.path.join(output_path, f\"ratios_{cfg.run_mode.contexts}_contexts.png\")\n plt.savefig(dest)\n"
] |
[
[
"scipy.stats.chi2.ppf",
"matplotlib.pyplot.tight_layout",
"scipy.stats.chi2_contingency",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
DonliFly/tensorflow
|
[
"3db3e972a33760ebe6662341d5f4320a906582f7",
"3db3e972a33760ebe6662341d5f4320a906582f7"
] |
[
"tensorflow/python/ops/nn_test.py",
"tensorflow/python/keras/layers/local_test.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for miscellaneous functionality in tensorflow.ops.nn.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops.nn_impl import _compute_sampled_logits\nfrom tensorflow.python.platform import test as test_lib\n\n\nclass ZeroFractionTest(test_lib.TestCase):\n\n def _ZeroFraction(self, x):\n assert x.shape\n total_elements = np.prod(x.shape)\n nonzeros = np.count_nonzero(x.flatten())\n return 1.0 - nonzeros / total_elements\n\n def testZeroFraction(self):\n x_shape = [5, 17]\n x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32)\n y_np = self._ZeroFraction(x_np)\n\n x_tf = constant_op.constant(x_np)\n x_tf.set_shape(x_shape)\n y_tf = nn_impl.zero_fraction(x_tf)\n y_tf_np = self.evaluate(y_tf)\n\n eps = 1e-8\n self.assertAllClose(y_tf_np, y_np, eps)\n\n def testZeroFractionEmpty(self):\n x = np.zeros(0)\n y = self.evaluate(nn_impl.zero_fraction(x))\n self.assertTrue(np.isnan(y))\n\n def testZeroFraction2_27Zeros(self):\n sparsity = nn_impl.zero_fraction(\n array_ops.zeros([int(2**27 * 1.01)], dtype=dtypes.int8))\n self.assertAllClose(1.0, self.evaluate(sparsity))\n\n def testZeroFraction2_27Ones(self):\n sparsity = nn_impl.zero_fraction(\n array_ops.ones([int(2**27 * 1.01)], dtype=dtypes.int8))\n self.assertAllClose(0.0, self.evaluate(sparsity))\n\n def testUnknownSize(self):\n value = array_ops.placeholder(dtype=dtypes.float32)\n sparsity = nn_impl.zero_fraction(value)\n with self.cached_session() as sess:\n self.assertAllClose(\n 0.25,\n sess.run(sparsity, {value: [[0., 1.], [0.3, 2.]]}))\n\n\nclass SoftmaxTest(test_lib.TestCase, parameterized.TestCase):\n\n def _softmax(self, x):\n assert len(x.shape) == 2\n m = x.max(1)[:, np.newaxis]\n u = np.exp(x - m)\n z = u.sum(1)[:, np.newaxis]\n return u / z\n\n @test_util.run_in_graph_and_eager_modes\n def testSoftmax(self):\n x_shape = [5, 10]\n x_np = np.random.randn(*x_shape).astype(np.float32)\n y_np = self._softmax(x_np)\n x_tf = constant_op.constant(x_np)\n y_tf = nn_ops.softmax_v2(x_tf)\n y_tf_last_dim = nn_ops.softmax_v2(x_tf, 1)\n y_tf_np = self.evaluate(y_tf)\n y_tf_last_dim_np = self.evaluate(y_tf_last_dim)\n eps = 1e-3\n self.assertAllClose(y_tf_np, y_np, eps)\n self.assertAllClose(y_tf_last_dim_np, y_np, eps)\n\n def testSoftmaxAxes(self):\n arr = np.linspace(0., 1, 12).reshape(3, 4)\n x_neg_axis = nn_ops.softmax_v2(arr, axis=-2)\n y_pos_axis = nn_ops.softmax_v2(arr, axis=0)\n z_gt_axis = nn_ops.softmax_v2(arr, axis=0)\n x_neg_axis_tf = self.evaluate(x_neg_axis)\n y_pos_axis_tf = self.evaluate(y_pos_axis)\n z_gt_axis_tf = self.evaluate(z_gt_axis)\n eps = 1e-3\n self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)\n self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)\n\n @parameterized.parameters(((5, 10),), ((2, 3, 4),))\n def testGradient(self, x_shape):\n x_np = np.random.randn(*x_shape).astype(np.float64)\n with self.cached_session():\n x_tf = constant_op.constant(x_np)\n y_tf = nn_ops.softmax_v2(x_tf)\n err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,\n x_shape)\n eps = 2e-8\n self.assertLess(err, eps)\n\n\nclass LogPoissonLossTest(test_lib.TestCase):\n\n def _log_poisson_loss(self, x, z, compute_full_loss=False):\n lpl = np.exp(x) - z * x\n if compute_full_loss:\n stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)\n lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)\n return lpl\n\n @test_util.run_in_graph_and_eager_modes\n def testLogPoissonLoss(self):\n x_shape = [5, 10]\n x_np = np.random.randn(*x_shape).astype(np.float32)\n z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)\n y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)\n y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)\n y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)\n y_tf_stirling = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=True)\n y_tf_np = self.evaluate(y_tf)\n y_tf_np_stirling = self.evaluate(y_tf_stirling)\n eps = 1e-3\n self.assertAllClose(y_tf_np, y_np, eps)\n self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)\n\n def testGradient(self):\n x_shape = [5, 10]\n x_np = np.random.randn(*x_shape).astype(np.float64)\n z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)\n with self.cached_session():\n x_tf = constant_op.constant(x_np)\n y_tf = nn_impl.log_poisson_loss(z_np, x_tf, compute_full_loss=False)\n y_tf_stirling = nn_impl.log_poisson_loss(\n z_np, x_tf, compute_full_loss=True)\n err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,\n x_shape)\n err_stirling = gradient_checker.compute_gradient_error(\n x_tf, x_shape, y_tf_stirling, x_shape)\n eps = 1e-6\n self.assertLess(err, eps)\n self.assertLess(err_stirling, eps)\n\n\nclass LogSoftmaxTest(test_lib.TestCase, parameterized.TestCase):\n\n def _log_softmax(self, x):\n assert len(x.shape) == 2\n m = x.max(1)[:, np.newaxis]\n u = x - m\n return u - np.log(np.sum(np.exp(u), 1, keepdims=True))\n\n @test_util.run_in_graph_and_eager_modes\n def testLogSoftmax(self):\n x_shape = [5, 10]\n x_np = np.random.randn(*x_shape).astype(np.float32)\n y_np = self._log_softmax(x_np)\n x_tf = constant_op.constant(x_np)\n y_tf = nn_ops.log_softmax_v2(x_tf)\n y_tf_np = self.evaluate(y_tf)\n eps = 1e-3\n self.assertAllClose(y_tf_np, y_np, eps)\n\n def testLogSoftmaxAxes(self):\n arr = np.linspace(0., 1, 12).reshape(3, 4)\n x_neg_axis = nn_ops.log_softmax_v2(arr, axis=-2)\n y_pos_axis = nn_ops.log_softmax_v2(arr, axis=0)\n z_gt_axis = nn_ops.log_softmax_v2(arr, axis=0)\n x_neg_axis_tf = self.evaluate(x_neg_axis)\n y_pos_axis_tf = self.evaluate(y_pos_axis)\n z_gt_axis_tf = self.evaluate(z_gt_axis)\n eps = 1e-3\n self.assertAllClose(x_neg_axis_tf, y_pos_axis_tf, eps)\n self.assertAllClose(y_pos_axis_tf, z_gt_axis_tf, eps)\n\n @parameterized.parameters(((5, 10),), ((2, 3, 4),))\n def testGradient(self, x_shape):\n x_np = np.random.randn(*x_shape).astype(np.float64)\n with self.cached_session():\n x_tf = constant_op.constant(x_np)\n y_tf = nn_ops.log_softmax_v2(x_tf)\n err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,\n x_shape)\n eps = 1e-7\n self.assertLess(err, eps)\n\n\nclass L2LossTest(test_lib.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def testL2Loss(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n x = constant_op.constant(\n [1.0, 0.0, 3.0, 2.0], shape=[2, 2], name=\"x\", dtype=dtype)\n l2loss = nn_ops.l2_loss(x)\n value = self.evaluate(l2loss)\n self.assertAllClose(7.0, value)\n\n def testGradient(self):\n x_shape = [20, 7, 3]\n np.random.seed(1) # Make it reproducible.\n x_val = np.random.random_sample(x_shape).astype(np.float64)\n with self.cached_session():\n x = constant_op.constant(x_val, name=\"x\")\n output = nn_ops.l2_loss(x)\n err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])\n print(\"L2Loss gradient err = %g \" % err)\n err_tolerance = 1e-10\n self.assertLess(err, err_tolerance)\n\n\nclass L2NormalizeTest(test_lib.TestCase):\n\n def _l2Normalize(self, x, dim):\n if isinstance(dim, list):\n norm = np.linalg.norm(x, axis=tuple(dim))\n for d in dim:\n norm = np.expand_dims(norm, d)\n return x / norm\n else:\n norm = np.apply_along_axis(np.linalg.norm, dim, x)\n return x / np.expand_dims(norm, dim)\n\n @test_util.run_in_graph_and_eager_modes\n def testL2Normalize(self):\n x_shape = [20, 7, 3]\n np.random.seed(1)\n x_np = np.random.random_sample(x_shape).astype(np.float32)\n for dim in range(len(x_shape)):\n y_np = self._l2Normalize(x_np, dim)\n x_tf = constant_op.constant(x_np, name=\"x\")\n y_tf = nn_impl.l2_normalize_v2(x_tf, dim)\n self.assertAllClose(y_np, self.evaluate(y_tf))\n\n @test_util.run_in_graph_and_eager_modes\n def testL2NormalizeDimArray(self):\n x_shape = [20, 7, 3]\n np.random.seed(1)\n x_np = np.random.random_sample(x_shape).astype(np.float32)\n dim = [1, 2]\n y_np = self._l2Normalize(x_np, dim)\n x_tf = constant_op.constant(x_np, name=\"x\")\n y_tf = nn_impl.l2_normalize_v2(x_tf, dim)\n self.assertAllClose(y_np, self.evaluate(y_tf))\n\n def testL2NormalizeGradient(self):\n x_shape = [20, 7, 3]\n np.random.seed(1)\n x_np = np.random.random_sample(x_shape).astype(np.float64)\n for dim in range(len(x_shape)):\n with self.cached_session():\n x_tf = constant_op.constant(x_np, name=\"x\")\n y_tf = nn_impl.l2_normalize_v2(x_tf, dim)\n err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,\n x_shape)\n print(\"L2Normalize gradient err = %g \" % err)\n self.assertLess(err, 1e-4)\n\n\nclass DropoutTest(test_lib.TestCase):\n\n def testDropout(self):\n # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate\n # that it is producing approximately the right number of ones over a large\n # number of samples, based on the keep probability.\n x_dim = 40\n y_dim = 30\n num_iter = 10\n for keep_prob in [0.1, 0.5, 0.8]:\n t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n dropout = nn_ops.dropout(t, keep_prob)\n final_count = 0\n self.assertEqual([x_dim, y_dim], dropout.get_shape())\n for _ in xrange(0, num_iter):\n value = self.evaluate(dropout)\n final_count += np.count_nonzero(value)\n # Verifies that there are only two values: 0 and 1/keep_prob.\n sorted_value = np.unique(np.sort(value))\n self.assertEqual(0, sorted_value[0])\n self.assertAllClose(1 / keep_prob, sorted_value[1])\n\n # Check that we are in the 15% error range\n expected_count = x_dim * y_dim * keep_prob * num_iter\n rel_error = math.fabs(final_count - expected_count) / expected_count\n print(rel_error)\n self.assertTrue(rel_error < 0.15)\n\n def testShapedDropout(self):\n # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate\n # that it is producing approximately the right number of ones over a large\n # number of samples, based on the keep probability. This time with shaped\n # noise.\n x_dim = 40 * 30\n y_dim = 3\n num_iter = 10\n for keep_prob in [0.1, 0.5, 0.8]:\n t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])\n self.assertEqual([x_dim, y_dim], dropout.get_shape())\n final_count = 0\n for _ in xrange(0, num_iter):\n value = self.evaluate(dropout)\n final_count += np.count_nonzero(value)\n # Verifies that there are only two values: 0 and 1/keep_prob.\n sorted_value = np.unique(np.sort(value))\n self.assertEqual(0, sorted_value[0])\n self.assertAllClose(1 / keep_prob, sorted_value[1])\n\n # Check that we are in the 15% error range\n expected_count = x_dim * y_dim * keep_prob * num_iter\n rel_error = math.fabs(final_count - expected_count) / expected_count\n print(rel_error)\n self.assertTrue(rel_error < 0.15)\n\n def testShapedDropoutCorrelation(self):\n # Runs a shaped dropout and tests that the correlations are correct.\n x_dim = 40\n y_dim = 30\n num_iter = 10\n for keep_prob in [0.1, 0.5, 0.8]:\n t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])\n self.assertEqual([x_dim, y_dim], dropout.get_shape())\n for _ in xrange(0, num_iter):\n value = self.evaluate(dropout)\n # Verifies that each y column as only one type of activation.\n for i in xrange(x_dim):\n sorted_value = np.unique(np.sort(value[i, :]))\n self.assertEqual(sorted_value.size, 1)\n\n def testDropoutPlaceholderKeepProb(self):\n # Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate\n # that it is producing approximately the right number of ones over a large\n # number of samples, based on the keep probability.\n x_dim = 40\n y_dim = 30\n num_iter = 10\n for keep_prob in [0.1, 0.5, 0.8]:\n with self.cached_session():\n t = constant_op.constant(\n 1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n keep_prob_placeholder = array_ops.placeholder(dtypes.float32)\n dropout = nn_ops.dropout(t, keep_prob_placeholder)\n final_count = 0\n self.assertEqual([x_dim, y_dim], dropout.get_shape())\n for _ in xrange(0, num_iter):\n value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob})\n final_count += np.count_nonzero(value)\n # Verifies that there are only two values: 0 and 1/keep_prob.\n sorted_value = np.unique(np.sort(value))\n self.assertEqual(0, sorted_value[0])\n self.assertAllClose(1 / keep_prob, sorted_value[1])\n # Check that we are in the 15% error range\n expected_count = x_dim * y_dim * keep_prob * num_iter\n rel_error = math.fabs(final_count - expected_count) / expected_count\n print(rel_error)\n self.assertTrue(rel_error < 0.15)\n\n def testShapedDropoutUnknownShape(self):\n x_dim = 40\n y_dim = 30\n keep_prob = 0.5\n x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n dropout_x = nn_ops.dropout(\n x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32))\n self.assertEqual(x.get_shape(), dropout_x.get_shape())\n\n def testPartialShapedDropout(self):\n x_dim = 40 * 30\n y_dim = 3\n num_iter = 10\n for keep_prob in [0.1, 0.5, 0.8]:\n t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n # Set noise_shape=[None, 1] which means [x_dim, 1].\n dropout = nn_ops.dropout(t, keep_prob, noise_shape=[None, 1])\n self.assertEqual([x_dim, y_dim], dropout.get_shape())\n final_count = 0\n for _ in xrange(0, num_iter):\n value = self.evaluate(dropout)\n final_count += np.count_nonzero(value)\n # Verifies that there are only two values: 0 and 1/keep_prob.\n sorted_value = np.unique(np.sort(value))\n self.assertEqual(0, sorted_value[0])\n self.assertAllClose(1 / keep_prob, sorted_value[1])\n\n # Check that we are in the 15% error range\n expected_count = x_dim * y_dim * keep_prob * num_iter\n rel_error = math.fabs(final_count - expected_count) / expected_count\n print(rel_error)\n self.assertTrue(rel_error < 0.15)\n\n def testInvalidKeepProb(self):\n x_dim = 40\n y_dim = 30\n t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n with self.assertRaises(ValueError):\n nn_ops.dropout(t, -1.0)\n with self.assertRaises(ValueError):\n nn_ops.dropout(t, 1.1)\n with self.assertRaises(ValueError):\n nn_ops.dropout(t, [0.0, 1.0])\n with self.assertRaises(ValueError):\n nn_ops.dropout(t, array_ops.placeholder(dtypes.float64))\n with self.assertRaises(ValueError):\n nn_ops.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))\n\n def testShapedDropoutShapeError(self):\n # Runs shaped dropout and verifies an error is thrown on misshapen noise.\n x_dim = 40\n y_dim = 30\n keep_prob = 0.5\n t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)\n with self.assertRaises(ValueError):\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])\n with self.assertRaises(ValueError):\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])\n with self.assertRaises(ValueError):\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim + 3])\n with self.assertRaises(ValueError):\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim])\n # test that broadcasting proceeds\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[y_dim])\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[1, y_dim])\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])\n _ = nn_ops.dropout(t, keep_prob, noise_shape=[1, 1])\n\n def testNoDropoutFast(self):\n x = array_ops.zeros((5,))\n for p in 1, constant_op.constant(1.0):\n y = nn_ops.dropout(x, keep_prob=p)\n self.assertTrue(x is y)\n\n def testDropoutWithIntegerInputs(self):\n x = constant_op.constant([1, 1, 1, 1, 1])\n with self.assertRaises(ValueError):\n _ = nn_ops.dropout(x, 0.5)\n\n\nclass ComputeSampledLogitsTest(test_lib.TestCase):\n\n def setUp(self):\n self._eps = 1e-3\n\n def _GenerateTestData(self, num_classes, dim, batch_size, num_true, labels,\n sampled, subtract_log_q):\n \"\"\"Randomly generates input/output data for a single test case.\n\n This function returns numpy constants for use in a test case.\n\n Args:\n num_classes: An int. The number of embedding classes in the test case.\n dim: An int. The dimension of the embedding.\n batch_size: An int. The batch size.\n num_true: An int. The number of target classes per training example.\n labels: A list of batch_size * num_true ints. The target classes.\n sampled: A list of indices in [0, num_classes).\n subtract_log_q: A bool corresponding to the parameter in\n _compute_sampled_logits().\n\n Returns:\n weights: Embedding weights to use as test input. It is a numpy array\n of shape [num_classes, dim]\n biases: Embedding biases to use as test input. It is a numpy array\n of shape [num_classes].\n hidden_acts: Forward activations of the network to use as test input.\n It is a numpy array of shape [batch_size, dim].\n sampled_vals: A tuple based on `sampled` to use as test input in the\n format returned by a *_candidate_sampler function.\n exp_logits: The output logits expected from _compute_sampled_logits().\n It is a numpy array of shape [batch_size, num_true + len(sampled)].\n exp_labels: The output labels expected from _compute_sampled_logits().\n It is a numpy array of shape [batch_size, num_true + len(sampled)].\n \"\"\"\n weights = np.random.randn(num_classes, dim).astype(np.float32)\n biases = np.random.randn(num_classes).astype(np.float32)\n hidden_acts = np.random.randn(batch_size, dim).astype(np.float32)\n\n true_exp = np.full([batch_size, 1], fill_value=0.5, dtype=np.float32)\n sampled_exp = np.full([len(sampled)], fill_value=0.5, dtype=np.float32)\n sampled_vals = (sampled, true_exp, sampled_exp)\n\n sampled_w, sampled_b = weights[sampled], biases[sampled]\n true_w, true_b = weights[labels], biases[labels]\n\n true_logits = np.sum(\n hidden_acts.reshape((batch_size, 1, dim)) * true_w.reshape(\n (batch_size, num_true, dim)),\n axis=2)\n true_b = true_b.reshape((batch_size, num_true))\n true_logits += true_b\n sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b\n\n if subtract_log_q:\n true_logits -= np.log(true_exp)\n sampled_logits -= np.log(sampled_exp[np.newaxis, :])\n\n exp_logits = np.concatenate([true_logits, sampled_logits], axis=1)\n exp_labels = np.hstack((np.ones_like(true_logits) / num_true,\n np.zeros_like(sampled_logits)))\n\n return weights, biases, hidden_acts, sampled_vals, exp_logits, exp_labels\n\n def _ShardTestEmbeddings(self, weights, biases, num_shards):\n \"\"\"Shards the weights and biases returned by _GenerateTestData.\n\n Args:\n weights: The weights returned by _GenerateTestData.\n biases: The biases returned by _GenerateTestData.\n num_shards: The number of shards to create.\n\n Returns:\n sharded_weights: A list of size `num_shards` containing all the weights.\n sharded_biases: A list of size `num_shards` containing all the biases.\n \"\"\"\n with ops.Graph().as_default() as g:\n sharded_weights = variable_scope.get_variable(\n \"w\",\n partitioner=partitioned_variables.fixed_size_partitioner(num_shards),\n initializer=constant_op.constant(weights))\n sharded_biases = variable_scope.get_variable(\n \"b\",\n partitioner=partitioned_variables.fixed_size_partitioner(num_shards),\n initializer=constant_op.constant(biases))\n with self.session(graph=g) as sess:\n variables.global_variables_initializer().run()\n return self.evaluate([list(sharded_weights), list(sharded_biases)])\n\n def testShapes(self):\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=constant_op.constant(weights),\n biases=constant_op.constant(biases),\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_basic_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertEqual(exp_logits.shape, got_logits.shape, self._eps)\n self.assertEqual(exp_labels.shape, got_labels.shape, self._eps)\n\n def testBasic(self):\n \"\"\"Without accidental hit removal or subtract_log_q.\"\"\"\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=constant_op.constant(weights),\n biases=constant_op.constant(biases),\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_basic_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)\n\n def testAccidentalHitRemoval(self):\n \"\"\"With accidental hit removal, no subtract_log_q.\"\"\"\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n sampled = [1, 0, 2, 3]\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, _,\n _) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=sampled,\n subtract_log_q=False)\n logits_tensor, _ = _compute_sampled_logits(\n weights=constant_op.constant(weights),\n biases=constant_op.constant(biases),\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=len(sampled),\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=True,\n partition_strategy=\"div\",\n name=\"sampled_logits_accidental_hit_removal_num_true_%d\" % num_true)\n # Test that the exponentiated logits of accidental hits are near 0.\n # First we need to find the hits in this random test run:\n labels_reshape = labels.reshape((batch_size, num_true))\n got_logits = self.evaluate(logits_tensor)\n for row in xrange(batch_size):\n row_labels = labels_reshape[row, :]\n for col in xrange(len(sampled)):\n if sampled[col] in row_labels:\n # We need to add the num_true_test offset into logits_*\n self.assertNear(\n np.exp(got_logits[row, col + num_true]), 0., self._eps)\n\n def testSubtractLogQ(self):\n \"\"\"With subtract_log_q, no accidental hit removal.\"\"\"\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=True)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=constant_op.constant(weights),\n biases=constant_op.constant(biases),\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=True,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_subtract_log_q_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)\n\n def testSharded(self):\n \"\"\"With sharded weights and sharded biases.\"\"\"\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n\n for num_true in range(1, 5):\n labels = np.random.randint(\n low=0, high=num_classes, size=batch_size * num_true)\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=num_true,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=False)\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n logits_tensor, labels_tensor = _compute_sampled_logits(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(\n labels, dtype=dtypes.int64, shape=(batch_size, num_true)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=num_true,\n sampled_values=sampled_vals,\n subtract_log_q=False,\n remove_accidental_hits=False,\n partition_strategy=\"div\",\n name=\"sampled_logits_sharded_num_true_%d\" % num_true)\n got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])\n self.assertAllClose(exp_logits, got_logits, self._eps)\n self.assertAllClose(exp_labels, got_labels, self._eps)\n\n def testNCELoss(self):\n # A simple test to verify the numerics.\n\n def _SigmoidCrossEntropyWithLogits(logits, targets):\n # logits, targets: float arrays of the same shape.\n assert logits.shape == targets.shape\n pred = 1. / (1. + np.exp(-logits))\n eps = 0.0001\n pred = np.minimum(np.maximum(pred, eps), 1 - eps)\n return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)\n\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n labels = [0, 1, 2]\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=1,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=True)\n exp_nce_loss = np.sum(\n _SigmoidCrossEntropyWithLogits(exp_logits, exp_labels), 1)\n\n got_nce_loss = nn_impl.nce_loss(\n weights=constant_op.constant(weights),\n biases=constant_op.constant(biases),\n labels=constant_op.constant(labels, shape=(batch_size, 1)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=1,\n sampled_values=sampled_vals,\n partition_strategy=\"div\")\n\n self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)\n\n # Test with sharded weights and sharded biases.\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n got_nce_loss = nn_impl.nce_loss(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(labels, shape=(batch_size, 1)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=1,\n sampled_values=sampled_vals,\n partition_strategy=\"div\")\n\n self.assertAllClose(exp_nce_loss, self.evaluate(got_nce_loss), 1e-4)\n\n def testSampledSoftmaxLoss(self):\n # A simple test to verify the numerics.\n\n def _SoftmaxCrossEntropyWithLogits(logits, targets):\n # logits, targets: float arrays of the same shape.\n assert logits.shape == targets.shape\n stable_exp_logits = np.exp(\n logits - np.amax(logits, axis=1, keepdims=True))\n pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)\n return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)\n\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n labels = [0, 1, 2]\n (weights, biases, hidden_acts, sampled_vals, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=1,\n labels=labels,\n sampled=[1, 0, 2, 3],\n subtract_log_q=True)\n exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(\n exp_logits, exp_labels)\n\n got_sampled_softmax_loss = nn_impl.sampled_softmax_loss(\n weights=constant_op.constant(weights),\n biases=constant_op.constant(biases),\n labels=constant_op.constant(labels, shape=(batch_size, 1)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=1,\n sampled_values=sampled_vals,\n remove_accidental_hits=False,\n partition_strategy=\"div\")\n\n self.assertAllClose(exp_sampled_softmax_loss,\n self.evaluate(got_sampled_softmax_loss), 1e-4)\n\n # Test with sharded weights and sharded biases.\n weight_shards, bias_shards = self._ShardTestEmbeddings(\n weights, biases, num_shards=3)\n got_sampled_softmax_loss = nn_impl.sampled_softmax_loss(\n weights=[constant_op.constant(shard) for shard in weight_shards],\n biases=[constant_op.constant(shard) for shard in bias_shards],\n labels=constant_op.constant(labels, shape=(batch_size, 1)),\n inputs=constant_op.constant(hidden_acts),\n num_sampled=4,\n num_classes=num_classes,\n num_true=1,\n sampled_values=sampled_vals,\n remove_accidental_hits=False,\n partition_strategy=\"div\")\n\n self.assertAllClose(exp_sampled_softmax_loss,\n self.evaluate(got_sampled_softmax_loss), 1e-4)\n\n def testSampledSoftmaxLossBf16(self):\n # A simple test to verify the numerics for bfloat16.\n def _SoftmaxCrossEntropyWithLogits(logits, targets):\n # logits, targets: float arrays of the same shape.\n assert logits.shape == targets.shape\n stable_exp_logits = np.exp(\n logits - np.amax(logits, axis=1, keepdims=True))\n pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)\n return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)\n\n np.random.seed(0)\n num_classes = 5\n batch_size = 3\n labels = [0, 1, 2]\n sampled = [1, 0, 2, 3]\n (weights, biases, hidden_acts, _, exp_logits,\n exp_labels) = self._GenerateTestData(\n num_classes=num_classes,\n dim=10,\n batch_size=batch_size,\n num_true=1,\n labels=labels,\n sampled=sampled,\n subtract_log_q=True)\n exp_sampled_softmax_loss = _SoftmaxCrossEntropyWithLogits(\n exp_logits, exp_labels)\n\n true_exp_bf16 = np.full([batch_size, 1],\n fill_value=0.5,\n dtype=dtypes.bfloat16.as_numpy_dtype)\n sampled_exp_bf16 = np.full([len(sampled)],\n fill_value=0.5,\n dtype=dtypes.bfloat16.as_numpy_dtype)\n sampled_vals_bf16 = (sampled, true_exp_bf16, sampled_exp_bf16)\n\n got_sampled_softmax_loss = math_ops.cast(\n nn_impl.sampled_softmax_loss(\n weights=constant_op.constant(weights, dtype=dtypes.bfloat16),\n biases=constant_op.constant(biases, dtype=dtypes.bfloat16),\n labels=constant_op.constant(\n labels, shape=(batch_size, 1), dtype=dtypes.bfloat16),\n inputs=constant_op.constant(hidden_acts, dtype=dtypes.bfloat16),\n num_sampled=4,\n num_classes=num_classes,\n num_true=1,\n sampled_values=sampled_vals_bf16,\n remove_accidental_hits=False,\n partition_strategy=\"div\"), dtypes.float32)\n\n self.assertAllClose(exp_sampled_softmax_loss,\n self.evaluate(got_sampled_softmax_loss), 1e-1)\n\n\nclass CReluTest(test_lib.TestCase):\n\n def test(self):\n np.random.seed(1) # Make it reproducible.\n x = np.random.randn(3, 4).astype(np.float32)\n y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1)\n\n z = self.evaluate(nn_ops.crelu(constant_op.constant(x)))\n self.assertAllClose(y, z, 1e-4)\n\n\nclass ReluTest(test_lib.TestCase):\n\n def test(self):\n np.random.seed(1) # Make it reproducible.\n x = np.random.randn(3, 4).astype(np.float32)\n y = np.maximum(x, 0.0)\n\n z = self.evaluate(nn_ops.relu(constant_op.constant(x)))\n self.assertAllEqual(y, z)\n\n def testNaNs(self):\n # Test that relu(nan) = nan for various sizes.\n for i in range(18):\n x = np.zeros(i) + np.nan\n with self.cached_session():\n z = nn_ops.relu(constant_op.constant(x)).eval()\n self.assertTrue(np.isnan(z).all())\n\n\nclass LeakyReluTest(test_lib.TestCase):\n\n def testRange(self):\n batch_size = 3\n height, width = 4, 4\n np.random.seed(1) # Make it reproducible.\n inputs = np.random.uniform(size=(batch_size, height, width, 3)).astype(\n np.float32)\n inputs = constant_op.constant(inputs)\n\n outputs = nn_ops.leaky_relu(inputs)\n self.assertEquals(inputs.shape, outputs.shape)\n\n inputs, outputs = self.evaluate([inputs, outputs])\n\n self.assertGreaterEqual(outputs.min(), 0.0)\n self.assertLessEqual(outputs.max(), 1.0)\n self.assertAllClose(inputs, outputs)\n\n def testValues(self):\n for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:\n np_values = np.array([-2, -1, 0, 1, 2], dtype=dtype)\n outputs = nn_ops.leaky_relu(constant_op.constant(np_values))\n\n outputs = self.evaluate(outputs)\n\n tol = 2e-3 if dtype == np.float16 else 1e-6\n self.assertAllClose(\n outputs, [-0.4, -0.2, 0.0, 1.0, 2.0], rtol=tol, atol=tol)\n\n def testName(self):\n np_values = np.array([-2, -1, 0, 1, 2], dtype=np.float64)\n outputs_with_name_set = nn_ops.leaky_relu(\n constant_op.constant(np_values),\n name='test_relu_op')\n self.assertEqual(outputs_with_name_set.name, 'test_relu_op:0')\n outputs_without_name_set = nn_ops.leaky_relu(\n constant_op.constant(np_values))\n self.assertEqual(outputs_without_name_set.name, 'LeakyRelu:0')\n\n\nclass SwishTest(test_lib.TestCase):\n\n def testValues(self):\n np_values = np.array(\n [np.linspace(-10.0, 0.0, 100),\n np.linspace(0.0, 10.0, 100)],\n dtype=np.float32)\n tf_values = constant_op.constant(np_values)\n actual_tf_outputs = nn_impl.swish(tf_values)\n expected_tf_outputs = tf_values * math_ops.sigmoid(tf_values)\n\n actual_outputs, expected_outputs = self.evaluate(\n [actual_tf_outputs, expected_tf_outputs])\n\n self.assertAllClose(actual_outputs, expected_outputs)\n\n def testGradients(self):\n shape = [5, 3, 4]\n sigma = 5\n input_values = np.random.randn(*shape) * sigma\n x_tf = constant_op.constant(input_values)\n y_tf = nn_impl.swish(x_tf)\n with self.cached_session():\n err = gradient_checker.compute_gradient_error(x_tf, shape, y_tf, shape)\n self.assertLess(err, 1e-4)\n\n\nclass MomentsTest(test_lib.TestCase):\n\n def doOutputTest(self,\n input_shape,\n moments_axes,\n tol=1e-4,\n check_gradients=False):\n for mu in [0.0, 1.0, 1e3]:\n for sigma in [1.0, 0.1]:\n for keep_dims in [True, False]:\n input_values = np.random.rand(*input_shape) * sigma + mu\n expected_mean = np.mean(\n input_values, axis=moments_axes, keepdims=keep_dims)\n expected_var = np.var(\n input_values, axis=moments_axes, keepdims=keep_dims)\n with ops.Graph().as_default() as g:\n with self.session(graph=g) as sess:\n inputs = constant_op.constant(\n input_values, shape=input_shape, dtype=dtypes.float32)\n mean, variance = nn_impl.moments_v2(\n inputs, moments_axes, keepdims=keep_dims)\n\n if check_gradients:\n err = gradient_checker.compute_gradient_error(\n inputs, input_shape, mean, mean.shape.as_list())\n self.assertLess(err, 1e-3)\n err = gradient_checker.compute_gradient_error(\n inputs, input_shape, variance, variance.shape.as_list())\n self.assertLess(err, 1e-3)\n\n # Evaluate.\n [mean, variance] = self.evaluate([mean, variance])\n # Make sure that there are no NaNs\n self.assertFalse(np.isnan(mean).any())\n self.assertFalse(np.isnan(variance).any())\n self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)\n self.assertAllClose(variance, expected_var, rtol=tol, atol=tol)\n\n def testOutputAndGradient2DInput0(self):\n self.doOutputTest((10, 10), (0,), check_gradients=True)\n\n def testOutputAndGradient2DInput01(self):\n self.doOutputTest((10, 10), (0, 1), check_gradients=True)\n\n def testOutput2DInput0(self):\n self.doOutputTest((10, 300), (0,))\n\n def testOutput2DInput1(self):\n self.doOutputTest((10, 300), (1,))\n\n def testOutput2DInput01(self):\n self.doOutputTest((10, 300), (0, 1))\n\n def testOutput4DInput0(self):\n self.doOutputTest((10, 10, 10, 30), (0,))\n\n def testOutput4DInput1(self):\n self.doOutputTest((10, 10, 10, 30), (1,))\n\n def testOutput4DInput3(self):\n self.doOutputTest((10, 10, 10, 30), (3,))\n\n def testOutput4DInput012(self):\n self.doOutputTest((10, 10, 10, 30), (0, 1, 2))\n\n def testOutput4DInput123(self):\n self.doOutputTest((10, 10, 10, 30), (1, 2, 3))\n\n\nclass DataFormatDimMapTest(test_lib.TestCase):\n\n def _test(self, x_val, y_val_expected):\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_dim_map(x)\n\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, y_val_expected)\n\n def test(self):\n self._test(0, 0)\n self._test(1, 2)\n self._test(2, 3)\n self._test(3, 1)\n self._test(-1, 1)\n self._test(-2, 3)\n self._test(-3, 2)\n self._test(-4, 0)\n self._test([1, 3], [2, 1])\n self._test([1, 3, -2], [2, 1, 3])\n self._test([1, -3, -2], [2, 2, 3])\n self._test([[1, -3], [1, -1]], [[2, 2], [2, 1]])\n\n def testNHWCtoNCHW(self):\n x_val = [1, -3, -2]\n y_val_expected = [2, 2, 3]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_dim_map(x, src_format=\"NHWC\", dst_format=\"NCHW\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, y_val_expected)\n\n def testNHWCtoHWNC(self):\n x_val = [-4, -3, -2, -1, 0, 1, 2, 3]\n y_val_expected = [2, 0, 1, 3, 2, 0, 1, 3]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_dim_map(x, src_format=\"NHWC\", dst_format=\"HWNC\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, y_val_expected)\n\n def testNHWCtoWHCN(self):\n x_val = [-4, -3, -2, -1, 0, 1, 2, 3]\n y_val_expected = [3, 1, 0, 2, 3, 1, 0, 2]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_dim_map(x, src_format=\"NHWC\", dst_format=\"WHCN\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, y_val_expected)\n\n def testArbitraryASCII(self):\n x_val = [-4, -3, -2, -1, 0, 1, 2, 3]\n y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_dim_map(x, src_format=\"qwer\", dst_format=\"rewq\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, y_val_expected)\n\n\nclass DataFormatVectorPermuteTest(test_lib.TestCase):\n\n def testNHWCToNCHW(self):\n x_val = [7, 4, 9, 3]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x)\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [7, 3, 4, 9])\n\n def testNCHWToNHWC(self):\n x_val = [7, 4, 9, 3]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x, src_format=\"NCHW\", dst_format=\"NHWC\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [7, 9, 3, 4])\n\n def testNHWCToHWNC(self):\n x_val = [7, 4, 9, 3]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x, src_format=\"NHWC\", dst_format=\"HWNC\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [4, 9, 7, 3])\n\n def testHWNCToNHWC(self):\n x_val = [7, 4, 9, 3]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x, src_format=\"HWNC\", dst_format=\"NHWC\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [9, 7, 4, 3])\n\n def testNHWCToNCHW2D(self):\n x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x)\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [[7, 4], [5, 1], [9, 3], [4, 5]])\n\n def testNHWCToHWNC2D(self):\n x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x, src_format=\"NHWC\", dst_format=\"HWNC\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [[9, 3], [4, 5], [7, 4], [5, 1]])\n\n def testHWNCToNHWC2D(self):\n x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x, src_format=\"HWNC\", dst_format=\"NHWC\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [[4, 5], [7, 4], [9, 3], [5, 1]])\n\n def testNCHWToNHWC2D(self):\n x_val = [[7, 4], [9, 3], [4, 5], [5, 1]]\n x = constant_op.constant(x_val)\n y = nn_ops.data_format_vec_permute(x, src_format=\"NCHW\", dst_format=\"NHWC\")\n with test_util.use_gpu():\n y_val = self.evaluate(y)\n self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])\n\n\nif __name__ == \"__main__\":\n test_lib.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for locally-connected layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.framework import test_util as tf_test_util\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training.rmsprop import RMSPropOptimizer\n\n\nclass LocallyConnected1DLayersTest(test.TestCase):\n # TODO(fchollet): investigate why LocallyConnected1D\n # fails inside a graph function in an eager context (fails with error\n # \"Incompatible shapes between op input and calculated input gradient\").\n\n def test_locallyconnected_1d(self):\n with self.cached_session():\n num_samples = 2\n num_steps = 8\n input_dim = 5\n filter_length = 3\n filters = 4\n\n for padding in ['valid', 'same']:\n for strides in [1]:\n if padding == 'same' and strides != 1:\n continue\n for data_format in ['channels_first', 'channels_last']:\n for implementation in [1, 2]:\n kwargs = {\n 'filters': filters,\n 'kernel_size': filter_length,\n 'padding': padding,\n 'strides': strides,\n 'data_format': data_format,\n 'implementation': implementation\n }\n\n if padding == 'same' and implementation == 1:\n self.assertRaises(ValueError,\n keras.layers.LocallyConnected1D,\n **kwargs)\n else:\n testing_utils.layer_test(\n keras.layers.LocallyConnected1D,\n kwargs=kwargs,\n input_shape=(num_samples, num_steps, input_dim))\n\n def test_locallyconnected_1d_regularization(self):\n num_samples = 2\n num_steps = 8\n input_dim = 5\n filter_length = 3\n filters = 4\n for data_format in ['channels_first', 'channels_last']:\n for padding in ['valid', 'same']:\n for implementation in [1, 2]:\n kwargs = {\n 'filters': filters,\n 'kernel_size': filter_length,\n 'kernel_regularizer': 'l2',\n 'bias_regularizer': 'l2',\n 'activity_regularizer': 'l2',\n 'data_format': data_format,\n 'implementation': implementation,\n 'padding': padding\n }\n\n if padding == 'same' and implementation == 1:\n self.assertRaises(ValueError,\n keras.layers.LocallyConnected1D,\n **kwargs)\n else:\n with self.cached_session():\n layer = keras.layers.LocallyConnected1D(**kwargs)\n layer.build((num_samples, num_steps, input_dim))\n self.assertEqual(len(layer.losses), 2)\n layer(\n keras.backend.variable(np.ones((num_samples,\n num_steps,\n input_dim))))\n self.assertEqual(len(layer.losses), 3)\n\n k_constraint = keras.constraints.max_norm(0.01)\n b_constraint = keras.constraints.max_norm(0.01)\n kwargs = {\n 'filters': filters,\n 'kernel_size': filter_length,\n 'kernel_constraint': k_constraint,\n 'bias_constraint': b_constraint,\n }\n with self.cached_session():\n layer = keras.layers.LocallyConnected1D(**kwargs)\n layer.build((num_samples, num_steps, input_dim))\n self.assertEqual(layer.kernel.constraint, k_constraint)\n self.assertEqual(layer.bias.constraint, b_constraint)\n\n\nclass LocallyConnected2DLayersTest(test.TestCase):\n # TODO(fchollet): investigate why LocallyConnected2D\n # fails inside a graph function in an eager context (fails with error\n # \"Incompatible shapes between op input and calculated input gradient\").\n\n def test_locallyconnected_2d(self):\n with self.cached_session():\n num_samples = 8\n filters = 3\n stack_size = 4\n num_row = 6\n num_col = 10\n\n for padding in ['valid', 'same']:\n for strides in [(1, 1), (2, 2)]:\n for implementation in [1, 2]:\n if padding == 'same' and strides != (1, 1):\n continue\n\n kwargs = {\n 'filters': filters,\n 'kernel_size': 3,\n 'padding': padding,\n 'kernel_regularizer': 'l2',\n 'bias_regularizer': 'l2',\n 'strides': strides,\n 'data_format': 'channels_last',\n 'implementation': implementation\n }\n\n if padding == 'same' and implementation == 1:\n self.assertRaises(ValueError,\n keras.layers.LocallyConnected2D,\n **kwargs)\n else:\n testing_utils.layer_test(\n keras.layers.LocallyConnected2D,\n kwargs=kwargs,\n input_shape=(num_samples, num_row, num_col, stack_size))\n\n def test_locallyconnected_2d_channels_first(self):\n with self.cached_session():\n num_samples = 8\n filters = 3\n stack_size = 4\n num_row = 6\n num_col = 10\n\n for implementation in [1, 2]:\n for padding in ['valid', 'same']:\n kwargs = {\n 'filters': filters,\n 'kernel_size': 3,\n 'data_format': 'channels_first',\n 'implementation': implementation,\n 'padding': padding\n }\n\n if padding == 'same' and implementation == 1:\n self.assertRaises(ValueError,\n keras.layers.LocallyConnected2D,\n **kwargs)\n else:\n testing_utils.layer_test(\n keras.layers.LocallyConnected2D,\n kwargs=kwargs,\n input_shape=(num_samples, num_row, num_col, stack_size))\n\n def test_locallyconnected_2d_regularization(self):\n num_samples = 2\n filters = 3\n stack_size = 4\n num_row = 6\n num_col = 7\n for implementation in [1, 2]:\n for padding in ['valid', 'same']:\n kwargs = {\n 'filters': filters,\n 'kernel_size': 3,\n 'kernel_regularizer': 'l2',\n 'bias_regularizer': 'l2',\n 'activity_regularizer': 'l2',\n 'implementation': implementation,\n 'padding': padding\n }\n\n if padding == 'same' and implementation == 1:\n self.assertRaises(ValueError,\n keras.layers.LocallyConnected2D,\n **kwargs)\n else:\n with self.cached_session():\n layer = keras.layers.LocallyConnected2D(**kwargs)\n layer.build((num_samples, num_row, num_col, stack_size))\n self.assertEqual(len(layer.losses), 2)\n layer(\n keras.backend.variable(\n np.ones((num_samples, num_row, num_col, stack_size))))\n self.assertEqual(len(layer.losses), 3)\n\n k_constraint = keras.constraints.max_norm(0.01)\n b_constraint = keras.constraints.max_norm(0.01)\n kwargs = {\n 'filters': filters,\n 'kernel_size': 3,\n 'kernel_constraint': k_constraint,\n 'bias_constraint': b_constraint,\n }\n with self.cached_session():\n layer = keras.layers.LocallyConnected2D(**kwargs)\n layer.build((num_samples, num_row, num_col, stack_size))\n self.assertEqual(layer.kernel.constraint, k_constraint)\n self.assertEqual(layer.bias.constraint, b_constraint)\n\n\nclass LocallyConnectedImplementationModeTest(test.TestCase):\n\n def test_locallyconnected_implementation(self):\n with self.cached_session():\n num_samples = 4\n num_classes = 3\n num_epochs = 2\n\n np.random.seed(1)\n targets = np.random.randint(0, num_classes, (num_samples,))\n\n for width in [1, 6]:\n for height in [7]:\n for filters in [2]:\n for data_format in ['channels_first', 'channels_last']:\n inputs = get_inputs(\n data_format, filters, height, num_samples, width)\n\n for kernel_x in [(3,)]:\n for kernel_y in [()] if width == 1 else [(2,)]:\n for stride_x in [(1,)]:\n for stride_y in [()] if width == 1 else [(3,)]:\n for layers in [2]:\n kwargs = {\n 'layers': layers,\n 'filters': filters,\n 'kernel_size': kernel_x + kernel_y,\n 'strides': stride_x + stride_y,\n 'data_format': data_format,\n 'num_classes': num_classes\n }\n model_1 = get_model(implementation=1, **kwargs)\n model_2 = get_model(implementation=2, **kwargs)\n\n # Build models.\n model_1.train_on_batch(inputs, targets)\n model_2.train_on_batch(inputs, targets)\n\n # Copy weights.\n copy_model_weights(model_2, model_1)\n\n # Compare outputs at initialization.\n out_1 = model_1.call(inputs)\n out_2 = model_2.call(inputs)\n self.assertAllCloseAccordingToType(out_1, out_2,\n rtol=1e-5, atol=1e-5)\n\n # Train.\n model_1.fit(x=inputs,\n y=targets,\n epochs=num_epochs,\n batch_size=num_samples)\n model_2.fit(x=inputs,\n y=targets,\n epochs=num_epochs,\n batch_size=num_samples)\n\n # Compare outputs after a few training steps.\n out_1 = model_1.call(inputs)\n out_2 = model_2.call(inputs)\n self.assertAllCloseAccordingToType(out_1, out_2,\n atol=1e-4)\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_make_2d(self):\n input_shapes = [\n (0,),\n (0, 0),\n (1,),\n (2,),\n (3,),\n (1, 0),\n (0, 3),\n (1, 1),\n (1, 2),\n (3, 1),\n (2, 2),\n (3, 3),\n (1, 0, 1),\n (5, 2, 3),\n (3, 5, 6, 7, 0),\n (3, 2, 2, 4, 4),\n (1, 2, 3, 4, 7, 2),\n ]\n np.random.seed(1)\n\n for input_shape in input_shapes:\n inputs = np.random.normal(0, 1, input_shape)\n inputs_tf = keras.backend.variable(inputs)\n\n split_dim = np.random.randint(0, inputs.ndim + 1)\n shape_2d = (int(np.prod(inputs.shape[:split_dim])),\n int(np.prod(inputs.shape[split_dim:])))\n inputs_2d = np.reshape(inputs, shape_2d)\n\n inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim)\n inputs_2d_tf = keras.backend.get_value(inputs_2d_tf)\n\n self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf)\n\n\ndef get_inputs(data_format, filters, height, num_samples, width):\n if data_format == 'channels_first':\n if width == 1:\n input_shape = (filters, height)\n else:\n input_shape = (filters, height, width)\n\n elif data_format == 'channels_last':\n if width == 1:\n input_shape = (height, filters)\n else:\n input_shape = (height, width, filters)\n\n else:\n raise NotImplementedError(data_format)\n\n inputs = np.random.normal(0, 1,\n (num_samples,) + input_shape).astype(np.float32)\n return inputs\n\n\ndef xent(y_true, y_pred):\n y_true = keras.backend.cast(\n keras.backend.reshape(y_true, (-1,)),\n keras.backend.dtypes_module.int32)\n\n return keras.backend.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y_true,\n logits=y_pred)\n\n\ndef get_model(implementation,\n filters,\n kernel_size,\n strides,\n layers,\n num_classes,\n data_format):\n model = keras.Sequential()\n\n if len(kernel_size) == 1:\n lc_layer = keras.layers.LocallyConnected1D\n elif len(kernel_size) == 2:\n lc_layer = keras.layers.LocallyConnected2D\n else:\n raise NotImplementedError(kernel_size)\n\n for _ in range(layers):\n model.add(lc_layer(\n padding='valid',\n kernel_initializer=keras.initializers.random_normal(),\n bias_initializer=keras.initializers.random_normal(),\n filters=filters,\n strides=strides,\n kernel_size=kernel_size,\n activation=keras.activations.relu,\n data_format=data_format,\n implementation=implementation))\n\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(num_classes))\n model.compile(\n optimizer=RMSPropOptimizer(0.01),\n metrics=[keras.metrics.categorical_accuracy],\n loss=xent\n )\n return model\n\n\ndef copy_lc_weights(lc_layer_2_from, lc_layer_1_to):\n lc_2_kernel, lc_2_bias = lc_layer_2_from.weights\n lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask\n\n data_format = lc_layer_2_from.data_format\n\n if data_format == 'channels_first':\n if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):\n permutation = (3, 0, 1, 2)\n elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):\n permutation = (4, 5, 0, 1, 2, 3)\n else:\n raise NotImplementedError(lc_layer_2_from)\n\n elif data_format == 'channels_last':\n if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):\n permutation = (2, 0, 1, 3)\n elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):\n permutation = (3, 4, 0, 1, 2, 5)\n else:\n raise NotImplementedError(lc_layer_2_from)\n\n else:\n raise NotImplementedError(data_format)\n\n lc_2_kernel_masked = keras.backend.permute_dimensions(\n lc_2_kernel_masked, permutation)\n\n lc_2_kernel_mask = keras.backend.math_ops.not_equal(\n lc_2_kernel_masked, 0)\n lc_2_kernel_flat = keras.backend.array_ops.boolean_mask(\n lc_2_kernel_masked, lc_2_kernel_mask)\n lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat,\n lc_layer_1_to.kernel.shape)\n\n lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped)\n lc_2_bias = keras.backend.get_value(lc_2_bias)\n\n lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias])\n\n\ndef copy_model_weights(model_2_from, model_1_to):\n for l in range(len(model_2_from.layers)):\n layer_2_from = model_2_from.layers[l]\n layer_1_to = model_1_to.layers[l]\n\n if isinstance(layer_2_from, (keras.layers.LocallyConnected2D,\n keras.layers.LocallyConnected1D)):\n copy_lc_weights(layer_2_from, layer_1_to)\n\n elif isinstance(layer_2_from, keras.layers.Dense):\n weights_2, bias_2 = layer_2_from.weights\n weights_2 = keras.backend.get_value(weights_2)\n bias_2 = keras.backend.get_value(bias_2)\n layer_1_to.set_weights([weights_2, bias_2])\n\n else:\n continue\n\n\nif __name__ == '__main__':\n test.main()\n"
] |
[
[
"tensorflow.python.ops.nn_ops.data_format_dim_map",
"numpy.dot",
"numpy.amax",
"numpy.expand_dims",
"numpy.linspace",
"numpy.random.random_sample",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.concatenate",
"tensorflow.python.ops.array_ops.zeros",
"numpy.random.randn",
"numpy.zeros_like",
"numpy.mean",
"numpy.var",
"tensorflow.python.ops.nn_impl.l2_normalize_v2",
"numpy.exp",
"tensorflow.python.ops.partitioned_variables.fixed_size_partitioner",
"numpy.random.randint",
"tensorflow.python.ops.nn_impl.zero_fraction",
"tensorflow.python.ops.nn_impl.log_poisson_loss",
"numpy.ones_like",
"tensorflow.python.ops.nn_ops.softmax_v2",
"numpy.full",
"numpy.apply_along_axis",
"tensorflow.python.platform.test.main",
"numpy.count_nonzero",
"numpy.zeros",
"tensorflow.python.ops.nn_impl.moments_v2",
"numpy.log",
"tensorflow.python.ops.gradient_checker.compute_gradient_error",
"numpy.isnan",
"numpy.random.rand",
"tensorflow.python.ops.nn_ops.l2_loss",
"numpy.array",
"numpy.sum",
"tensorflow.python.ops.nn_impl.swish",
"tensorflow.python.ops.math_ops.sigmoid",
"numpy.maximum",
"numpy.random.seed",
"tensorflow.python.ops.nn_ops.dropout",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.nn_ops.data_format_vec_permute",
"numpy.sort",
"tensorflow.python.ops.nn_ops.log_softmax_v2",
"numpy.random.uniform",
"numpy.prod",
"numpy.ma.masked_array",
"tensorflow.python.framework.test_util.use_gpu",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.nn_ops.leaky_relu",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.keras.layers.Flatten",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.backend.reshape",
"tensorflow.python.keras.backend.variable",
"tensorflow.python.keras.backend.array_ops.boolean_mask",
"numpy.random.randint",
"numpy.reshape",
"tensorflow.python.keras.layers.LocallyConnected2D",
"tensorflow.python.keras.Sequential",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.backend.math_ops.not_equal",
"tensorflow.python.keras.initializers.random_normal",
"tensorflow.python.keras.layers.local.make_2d",
"tensorflow.python.keras.testing_utils.layer_test",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.training.rmsprop.RMSPropOptimizer",
"tensorflow.python.keras.backend.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.keras.constraints.max_norm",
"tensorflow.python.keras.backend.permute_dimensions",
"numpy.random.seed",
"tensorflow.python.keras.layers.LocallyConnected1D",
"numpy.ones",
"numpy.random.normal",
"numpy.prod"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"2.3",
"2.2",
"2.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2"
]
}
] |
HolgerSpernau/pandas
|
[
"231a3ce467da04cdfe5bfb4ba37ef91610834ded"
] |
[
"pandas/tests/sparse/series/test_series.py"
] |
[
"from datetime import datetime\nimport operator\n\nimport numpy as np\nfrom numpy import nan\nimport pytest\n\nfrom pandas._libs.sparse import BlockIndex, IntIndex\nfrom pandas.compat import PY36\nfrom pandas.errors import PerformanceWarning\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame, Series, SparseDtype, SparseSeries, bdate_range, isna)\nfrom pandas.core.reshape.util import cartesian_product\nimport pandas.core.sparse.frame as spf\nfrom pandas.tests.series.test_api import SharedWithSparse\nimport pandas.util.testing as tm\n\nfrom pandas.tseries.offsets import BDay\n\n\ndef test_deprecated():\n with tm.assert_produces_warning(FutureWarning):\n pd.SparseSeries([0, 1])\n\n\ndef _test_data1():\n # nan-based\n arr = np.arange(20, dtype=float)\n index = np.arange(20)\n arr[:2] = nan\n arr[5:10] = nan\n arr[-3:] = nan\n\n return arr, index\n\n\ndef _test_data2():\n # nan-based\n arr = np.arange(15, dtype=float)\n index = np.arange(15)\n arr[7:12] = nan\n arr[-1:] = nan\n return arr, index\n\n\ndef _test_data1_zero():\n # zero-based\n arr, index = _test_data1()\n arr[np.isnan(arr)] = 0\n return arr, index\n\n\ndef _test_data2_zero():\n # zero-based\n arr, index = _test_data2()\n arr[np.isnan(arr)] = 0\n return arr, index\n\n\[email protected](\"ignore:Sparse:FutureWarning\")\nclass TestSparseSeries(SharedWithSparse):\n\n series_klass = SparseSeries\n # SharedWithSparse tests use generic, series_klass-agnostic assertion\n _assert_series_equal = staticmethod(tm.assert_sp_series_equal)\n\n def setup_method(self, method):\n arr, index = _test_data1()\n\n date_index = bdate_range('1/1/2011', periods=len(index))\n\n self.bseries = SparseSeries(arr, index=index, kind='block',\n name='bseries')\n self.ts = self.bseries\n\n self.btseries = SparseSeries(arr, index=date_index, kind='block')\n\n self.iseries = SparseSeries(arr, index=index, kind='integer',\n name='iseries')\n\n arr, index = _test_data2()\n self.bseries2 = SparseSeries(arr, index=index, kind='block')\n self.iseries2 = SparseSeries(arr, index=index, kind='integer')\n\n arr, index = _test_data1_zero()\n self.zbseries = SparseSeries(arr, index=index, kind='block',\n fill_value=0, name='zbseries')\n self.ziseries = SparseSeries(arr, index=index, kind='integer',\n fill_value=0)\n\n arr, index = _test_data2_zero()\n self.zbseries2 = SparseSeries(arr, index=index, kind='block',\n fill_value=0)\n self.ziseries2 = SparseSeries(arr, index=index, kind='integer',\n fill_value=0)\n\n def test_constructor_dict_input(self):\n # gh-16905\n constructor_dict = {1: 1.}\n index = [0, 1, 2]\n\n # Series with index passed in\n series = pd.Series(constructor_dict)\n expected = SparseSeries(series, index=index)\n\n result = SparseSeries(constructor_dict, index=index)\n tm.assert_sp_series_equal(result, expected)\n\n # Series with index and dictionary with no index\n expected = SparseSeries(series)\n\n result = SparseSeries(constructor_dict)\n tm.assert_sp_series_equal(result, expected)\n\n def test_constructor_dict_order(self):\n # GH19018\n # initialization ordering: by insertion order if python>= 3.6, else\n # order by value\n d = {'b': 1, 'a': 0, 'c': 2}\n result = SparseSeries(d)\n if PY36:\n expected = SparseSeries([1, 0, 2], index=list('bac'))\n else:\n expected = SparseSeries([0, 1, 2], index=list('abc'))\n tm.assert_sp_series_equal(result, expected)\n\n def test_constructor_dtype(self):\n arr = SparseSeries([np.nan, 1, 2, np.nan])\n assert arr.dtype == SparseDtype(np.float64)\n assert np.isnan(arr.fill_value)\n\n arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)\n assert arr.dtype == SparseDtype(np.float64, 0)\n assert arr.fill_value == 0\n\n arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)\n assert arr.dtype == SparseDtype(np.int64, np.nan)\n assert np.isnan(arr.fill_value)\n\n arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n arr = SparseSeries([0, 1, 2, 4], fill_value=0, dtype=np.int64)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n def test_iteration_and_str(self):\n [x for x in self.bseries]\n str(self.bseries)\n\n def test_construct_DataFrame_with_sp_series(self):\n # it works!\n df = DataFrame({'col': self.bseries})\n\n # printing & access\n df.iloc[:1]\n df['col']\n df.dtypes\n str(df)\n\n # blocking\n expected = Series({'col': 'float64:sparse'})\n\n # GH 26705 - Assert .ftypes is deprecated\n with tm.assert_produces_warning(FutureWarning):\n result = df.ftypes\n tm.assert_series_equal(expected, result)\n\n def test_constructor_preserve_attr(self):\n arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n s = pd.SparseSeries(arr, name='x')\n assert s.dtype == SparseDtype(np.int64)\n assert s.fill_value == 0\n\n def test_series_density(self):\n # GH2803\n ts = Series(np.random.randn(10))\n ts[2:-2] = nan\n sts = ts.to_sparse()\n density = sts.density # don't die\n assert density == 4 / 10.0\n\n def test_sparse_to_dense(self):\n arr, index = _test_data1()\n series = self.bseries.to_dense()\n tm.assert_series_equal(series, Series(arr, name='bseries'))\n\n series = self.iseries.to_dense()\n tm.assert_series_equal(series, Series(arr, name='iseries'))\n\n arr, index = _test_data1_zero()\n series = self.zbseries.to_dense()\n tm.assert_series_equal(series, Series(arr, name='zbseries'))\n\n series = self.ziseries.to_dense()\n tm.assert_series_equal(series, Series(arr))\n\n def test_to_dense_fill_value(self):\n s = pd.Series([1, np.nan, np.nan, 3, np.nan])\n res = SparseSeries(s).to_dense()\n tm.assert_series_equal(res, s)\n\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n s = pd.Series([1, np.nan, 0, 3, 0])\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])\n res = SparseSeries(s).to_dense()\n tm.assert_series_equal(res, s)\n\n s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])\n res = SparseSeries(s, fill_value=0).to_dense()\n tm.assert_series_equal(res, s)\n\n def test_dense_to_sparse(self):\n series = self.bseries.to_dense()\n bseries = series.to_sparse(kind='block')\n iseries = series.to_sparse(kind='integer')\n tm.assert_sp_series_equal(bseries, self.bseries)\n tm.assert_sp_series_equal(iseries, self.iseries, check_names=False)\n assert iseries.name == self.bseries.name\n\n assert len(series) == len(bseries)\n assert len(series) == len(iseries)\n assert series.shape == bseries.shape\n assert series.shape == iseries.shape\n\n # non-NaN fill value\n series = self.zbseries.to_dense()\n zbseries = series.to_sparse(kind='block', fill_value=0)\n ziseries = series.to_sparse(kind='integer', fill_value=0)\n tm.assert_sp_series_equal(zbseries, self.zbseries)\n tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False)\n assert ziseries.name == self.zbseries.name\n\n assert len(series) == len(zbseries)\n assert len(series) == len(ziseries)\n assert series.shape == zbseries.shape\n assert series.shape == ziseries.shape\n\n def test_to_dense_preserve_name(self):\n assert (self.bseries.name is not None)\n result = self.bseries.to_dense()\n assert result.name == self.bseries.name\n\n def test_constructor(self):\n # test setup guys\n assert np.isnan(self.bseries.fill_value)\n assert isinstance(self.bseries.sp_index, BlockIndex)\n assert np.isnan(self.iseries.fill_value)\n assert isinstance(self.iseries.sp_index, IntIndex)\n\n assert self.zbseries.fill_value == 0\n tm.assert_numpy_array_equal(self.zbseries.values.to_dense(),\n self.bseries.to_dense().fillna(0).values)\n\n # pass SparseSeries\n def _check_const(sparse, name):\n # use passed series name\n result = SparseSeries(sparse)\n tm.assert_sp_series_equal(result, sparse)\n assert sparse.name == name\n assert result.name == name\n\n # use passed name\n result = SparseSeries(sparse, name='x')\n tm.assert_sp_series_equal(result, sparse, check_names=False)\n assert result.name == 'x'\n\n _check_const(self.bseries, 'bseries')\n _check_const(self.iseries, 'iseries')\n _check_const(self.zbseries, 'zbseries')\n\n # Sparse time series works\n date_index = bdate_range('1/1/2000', periods=len(self.bseries))\n s5 = SparseSeries(self.bseries, index=date_index)\n assert isinstance(s5, SparseSeries)\n\n # pass Series\n bseries2 = SparseSeries(self.bseries.to_dense())\n tm.assert_numpy_array_equal(self.bseries.sp_values, bseries2.sp_values)\n\n # pass dict?\n\n # don't copy the data by default\n values = np.ones(self.bseries.npoints)\n sp = SparseSeries(values, sparse_index=self.bseries.sp_index)\n sp.sp_values[:5] = 97\n assert values[0] == 97\n\n assert len(sp) == 20\n assert sp.shape == (20, )\n\n # but can make it copy!\n sp = SparseSeries(values, sparse_index=self.bseries.sp_index,\n copy=True)\n sp.sp_values[:5] = 100\n assert values[0] == 97\n\n assert len(sp) == 20\n assert sp.shape == (20, )\n\n def test_constructor_scalar(self):\n data = 5\n sp = SparseSeries(data, np.arange(100))\n sp = sp.reindex(np.arange(200))\n assert (sp.loc[:99] == data).all()\n assert isna(sp.loc[100:]).all()\n\n data = np.nan\n sp = SparseSeries(data, np.arange(100))\n assert len(sp) == 100\n assert sp.shape == (100, )\n\n def test_constructor_ndarray(self):\n pass\n\n def test_constructor_nonnan(self):\n arr = [0, 0, 0, nan, nan]\n sp_series = SparseSeries(arr, fill_value=0)\n tm.assert_numpy_array_equal(sp_series.values.to_dense(), np.array(arr))\n assert len(sp_series) == 5\n assert sp_series.shape == (5, )\n\n def test_constructor_empty(self):\n # see gh-9272\n sp = SparseSeries()\n assert len(sp.index) == 0\n assert sp.shape == (0, )\n\n def test_copy_astype(self):\n cop = self.bseries.astype(np.float64)\n assert cop is not self.bseries\n assert cop.sp_index is self.bseries.sp_index\n assert cop.dtype == SparseDtype(np.float64)\n\n cop2 = self.iseries.copy()\n\n tm.assert_sp_series_equal(cop, self.bseries)\n tm.assert_sp_series_equal(cop2, self.iseries)\n\n # test that data is copied\n cop[:5] = 97\n assert cop.sp_values[0] == 97\n assert self.bseries.sp_values[0] != 97\n\n # correct fill value\n zbcop = self.zbseries.copy()\n zicop = self.ziseries.copy()\n\n tm.assert_sp_series_equal(zbcop, self.zbseries)\n tm.assert_sp_series_equal(zicop, self.ziseries)\n\n # no deep copy\n view = self.bseries.copy(deep=False)\n view.sp_values[:5] = 5\n assert (self.bseries.sp_values[:5] == 5).all()\n\n def test_shape(self):\n # see gh-10452\n assert self.bseries.shape == (20, )\n assert self.btseries.shape == (20, )\n assert self.iseries.shape == (20, )\n\n assert self.bseries2.shape == (15, )\n assert self.iseries2.shape == (15, )\n\n assert self.zbseries2.shape == (15, )\n assert self.ziseries2.shape == (15, )\n\n def test_astype(self):\n result = self.bseries.astype(SparseDtype(np.int64, 0))\n expected = (self.bseries.to_dense()\n .fillna(0)\n .astype(np.int64)\n .to_sparse(fill_value=0))\n tm.assert_sp_series_equal(result, expected)\n\n def test_astype_all(self):\n orig = pd.Series(np.array([1, 2, 3]))\n s = SparseSeries(orig)\n\n types = [np.float64, np.float32, np.int64,\n np.int32, np.int16, np.int8]\n for typ in types:\n dtype = SparseDtype(typ)\n res = s.astype(dtype)\n assert res.dtype == dtype\n tm.assert_series_equal(res.to_dense(), orig.astype(typ))\n\n def test_kind(self):\n assert self.bseries.kind == 'block'\n assert self.iseries.kind == 'integer'\n\n def test_to_frame(self):\n # GH 9850\n s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x')\n exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]})\n tm.assert_sp_frame_equal(s.to_frame(), exp)\n\n exp = pd.SparseDataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})\n tm.assert_sp_frame_equal(s.to_frame(name='y'), exp)\n\n s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x', fill_value=0)\n exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]},\n default_fill_value=0)\n\n tm.assert_sp_frame_equal(s.to_frame(), exp)\n exp = pd.DataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})\n tm.assert_frame_equal(s.to_frame(name='y').to_dense(), exp)\n\n def test_pickle(self):\n def _test_roundtrip(series):\n unpickled = tm.round_trip_pickle(series)\n tm.assert_sp_series_equal(series, unpickled)\n tm.assert_series_equal(series.to_dense(), unpickled.to_dense())\n\n self._check_all(_test_roundtrip)\n\n def _check_all(self, check_func):\n check_func(self.bseries)\n check_func(self.iseries)\n check_func(self.zbseries)\n check_func(self.ziseries)\n\n def test_getitem(self):\n def _check_getitem(sp, dense):\n for idx, val in dense.items():\n tm.assert_almost_equal(val, sp[idx])\n\n for i in range(len(dense)):\n tm.assert_almost_equal(sp[i], dense[i])\n # j = np.float64(i)\n # assert_almost_equal(sp[j], dense[j])\n\n # API change 1/6/2012\n # negative getitem works\n # for i in xrange(len(dense)):\n # assert_almost_equal(sp[-i], dense[-i])\n\n _check_getitem(self.bseries, self.bseries.to_dense())\n _check_getitem(self.btseries, self.btseries.to_dense())\n\n _check_getitem(self.zbseries, self.zbseries.to_dense())\n _check_getitem(self.iseries, self.iseries.to_dense())\n _check_getitem(self.ziseries, self.ziseries.to_dense())\n\n # exception handling\n with pytest.raises(IndexError, match=\"Out of bounds access\"):\n self.bseries[len(self.bseries) + 1]\n\n # index not contained\n msg = r\"Timestamp\\('2011-01-31 00:00:00', freq='B'\\)\"\n with pytest.raises(KeyError, match=msg):\n self.btseries[self.btseries.index[-1] + BDay()]\n\n def test_get_get_value(self):\n tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])\n assert self.bseries.get(len(self.bseries) + 1) is None\n\n dt = self.btseries.index[10]\n result = self.btseries.get(dt)\n expected = self.btseries.to_dense()[dt]\n tm.assert_almost_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n tm.assert_almost_equal(\n self.bseries.get_value(10), self.bseries[10])\n\n def test_set_value(self):\n\n idx = self.btseries.index[7]\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n self.btseries.set_value(idx, 0)\n assert self.btseries[idx] == 0\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n self.iseries.set_value('foobar', 0)\n assert self.iseries.index[-1] == 'foobar'\n assert self.iseries['foobar'] == 0\n\n def test_getitem_slice(self):\n idx = self.bseries.index\n res = self.bseries[::2]\n assert isinstance(res, SparseSeries)\n\n expected = self.bseries.reindex(idx[::2])\n tm.assert_sp_series_equal(res, expected)\n\n res = self.bseries[:5]\n assert isinstance(res, SparseSeries)\n tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))\n\n res = self.bseries[5:]\n tm.assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))\n\n # negative indices\n res = self.bseries[:-3]\n tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))\n\n def test_take(self):\n def _compare_with_dense(sp):\n dense = sp.to_dense()\n\n def _compare(idx):\n dense_result = dense.take(idx).values\n sparse_result = sp.take(idx)\n assert isinstance(sparse_result, SparseSeries)\n tm.assert_almost_equal(dense_result,\n sparse_result.values.to_dense())\n\n _compare([1., 2., 3., 4., 5., 0.])\n _compare([7, 2, 9, 0, 4])\n _compare([3, 6, 3, 4, 7])\n\n self._check_all(_compare_with_dense)\n\n msg = \"index 21 is out of bounds for size 20\"\n with pytest.raises(IndexError, match=msg):\n self.bseries.take([0, len(self.bseries) + 1])\n\n # Corner case\n # XXX: changed test. Why wsa this considered a corner case?\n sp = SparseSeries(np.ones(10) * nan)\n exp = pd.Series(np.repeat(nan, 5))\n tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp.to_sparse())\n\n # multiple FutureWarnings, can't check stacklevel\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n sp.take([1, 5], convert=True)\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n sp.take([1, 5], convert=False)\n\n def test_numpy_take(self):\n sp = SparseSeries([1.0, 2.0, 3.0])\n indices = [1, 2]\n\n tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),\n np.take(sp.to_dense(), indices, axis=0))\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.take(sp, indices, out=np.empty(sp.shape))\n\n msg = \"the 'mode' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.take(sp, indices, out=None, mode='clip')\n\n def test_setitem(self):\n self.bseries[5] = 7.\n assert self.bseries[5] == 7.\n\n def test_setslice(self):\n self.bseries[5:10] = 7.\n tm.assert_series_equal(self.bseries[5:10].to_dense(),\n Series(7., index=range(5, 10),\n name=self.bseries.name))\n\n def test_operators(self):\n\n def _check_op(a, b, op):\n sp_result = op(a, b)\n adense = a.to_dense() if isinstance(a, SparseSeries) else a\n bdense = b.to_dense() if isinstance(b, SparseSeries) else b\n dense_result = op(adense, bdense)\n tm.assert_almost_equal(sp_result.to_dense(), dense_result)\n\n def check(a, b):\n _check_op(a, b, operator.add)\n _check_op(a, b, operator.sub)\n _check_op(a, b, operator.truediv)\n _check_op(a, b, operator.floordiv)\n _check_op(a, b, operator.mul)\n\n _check_op(a, b, lambda x, y: operator.add(y, x))\n _check_op(a, b, lambda x, y: operator.sub(y, x))\n _check_op(a, b, lambda x, y: operator.truediv(y, x))\n _check_op(a, b, lambda x, y: operator.floordiv(y, x))\n _check_op(a, b, lambda x, y: operator.mul(y, x))\n\n # NaN ** 0 = 1 in C?\n # _check_op(a, b, operator.pow)\n # _check_op(a, b, lambda x, y: operator.pow(y, x))\n\n check(self.bseries, self.bseries)\n check(self.iseries, self.iseries)\n check(self.bseries, self.iseries)\n\n check(self.bseries, self.bseries2)\n check(self.bseries, self.iseries2)\n check(self.iseries, self.iseries2)\n\n # scalar value\n check(self.bseries, 5)\n\n # zero-based\n check(self.zbseries, self.zbseries * 2)\n check(self.zbseries, self.zbseries2)\n check(self.ziseries, self.ziseries2)\n\n # with dense\n result = self.bseries + self.bseries.to_dense()\n tm.assert_sp_series_equal(result, self.bseries + self.bseries)\n\n def test_binary_operators(self):\n\n # skipping for now #####\n import pytest\n pytest.skip(\"skipping sparse binary operators test\")\n\n def _check_inplace_op(iop, op):\n tmp = self.bseries.copy()\n\n expected = op(tmp, self.bseries)\n iop(tmp, self.bseries)\n tm.assert_sp_series_equal(tmp, expected)\n\n inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']\n for op in inplace_ops:\n _check_inplace_op(getattr(operator, \"i%s\" % op),\n getattr(operator, op))\n\n @pytest.mark.parametrize(\"values, op, fill_value\", [\n ([True, False, False, True], operator.invert, True),\n ([True, False, False, True], operator.invert, False),\n ([0, 1, 2, 3], operator.pos, 0),\n ([0, 1, 2, 3], operator.neg, 0),\n ([0, np.nan, 2, 3], operator.pos, np.nan),\n ([0, np.nan, 2, 3], operator.neg, np.nan),\n ])\n def test_unary_operators(self, values, op, fill_value):\n # https://github.com/pandas-dev/pandas/issues/22835\n values = np.asarray(values)\n if op is operator.invert:\n new_fill_value = not fill_value\n else:\n new_fill_value = op(fill_value)\n s = SparseSeries(values,\n fill_value=fill_value,\n index=['a', 'b', 'c', 'd'],\n name='name')\n result = op(s)\n expected = SparseSeries(op(values),\n fill_value=new_fill_value,\n index=['a', 'b', 'c', 'd'],\n name='name')\n tm.assert_sp_series_equal(result, expected)\n\n def test_abs(self):\n s = SparseSeries([1, 2, -3], name='x')\n expected = SparseSeries([1, 2, 3], name='x')\n result = s.abs()\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = np.abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n s = SparseSeries([1, -2, 2, -3], fill_value=-2, name='x')\n expected = SparseSeries([1, 2, 3], sparse_index=s.sp_index,\n fill_value=2, name='x')\n result = s.abs()\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n result = np.abs(s)\n tm.assert_sp_series_equal(result, expected)\n assert result.name == 'x'\n\n def test_reindex(self):\n def _compare_with_series(sps, new_index):\n spsre = sps.reindex(new_index)\n\n series = sps.to_dense()\n seriesre = series.reindex(new_index)\n seriesre = seriesre.to_sparse(fill_value=sps.fill_value)\n\n tm.assert_sp_series_equal(spsre, seriesre)\n tm.assert_series_equal(spsre.to_dense(), seriesre.to_dense())\n\n _compare_with_series(self.bseries, self.bseries.index[::2])\n _compare_with_series(self.bseries, list(self.bseries.index[::2]))\n _compare_with_series(self.bseries, self.bseries.index[:10])\n _compare_with_series(self.bseries, self.bseries.index[5:])\n\n _compare_with_series(self.zbseries, self.zbseries.index[::2])\n _compare_with_series(self.zbseries, self.zbseries.index[:10])\n _compare_with_series(self.zbseries, self.zbseries.index[5:])\n\n # special cases\n same_index = self.bseries.reindex(self.bseries.index)\n tm.assert_sp_series_equal(self.bseries, same_index)\n assert same_index is not self.bseries\n\n # corner cases\n sp = SparseSeries([], index=[])\n # TODO: sp_zero is not used anywhere...remove?\n sp_zero = SparseSeries([], index=[], fill_value=0) # noqa\n _compare_with_series(sp, np.arange(10))\n\n # with copy=False\n reindexed = self.bseries.reindex(self.bseries.index, copy=True)\n reindexed.sp_values[:] = 1.\n assert (self.bseries.sp_values != 1.).all()\n\n reindexed = self.bseries.reindex(self.bseries.index, copy=False)\n reindexed.sp_values[:] = 1.\n tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1., 10))\n\n def test_sparse_reindex(self):\n length = 10\n\n def _check(values, index1, index2, fill_value):\n first_series = SparseSeries(values, sparse_index=index1,\n fill_value=fill_value)\n reindexed = first_series.sparse_reindex(index2)\n assert reindexed.sp_index is index2\n\n int_indices1 = index1.to_int_index().indices\n int_indices2 = index2.to_int_index().indices\n\n expected = Series(values, index=int_indices1)\n expected = expected.reindex(int_indices2).fillna(fill_value)\n tm.assert_almost_equal(expected.values, reindexed.sp_values)\n\n # make sure level argument asserts\n # TODO: expected is not used anywhere...remove?\n expected = expected.reindex(int_indices2).fillna(fill_value) # noqa\n\n def _check_with_fill_value(values, first, second, fill_value=nan):\n i_index1 = IntIndex(length, first)\n i_index2 = IntIndex(length, second)\n\n b_index1 = i_index1.to_block_index()\n b_index2 = i_index2.to_block_index()\n\n _check(values, i_index1, i_index2, fill_value)\n _check(values, b_index1, b_index2, fill_value)\n\n def _check_all(values, first, second):\n _check_with_fill_value(values, first, second, fill_value=nan)\n _check_with_fill_value(values, first, second, fill_value=0)\n\n index1 = [2, 4, 5, 6, 8, 9]\n values1 = np.arange(6.)\n\n _check_all(values1, index1, [2, 4, 5])\n _check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])\n _check_all(values1, index1, [0, 1])\n _check_all(values1, index1, [0, 1, 7, 8, 9])\n _check_all(values1, index1, [])\n\n first_series = SparseSeries(values1,\n sparse_index=IntIndex(length, index1),\n fill_value=nan)\n with pytest.raises(TypeError,\n match='new index must be a SparseIndex'):\n first_series.sparse_reindex(0)\n\n def test_repr(self):\n # TODO: These aren't used\n bsrepr = repr(self.bseries) # noqa\n isrepr = repr(self.iseries) # noqa\n\n def test_iter(self):\n pass\n\n def test_truncate(self):\n pass\n\n def test_fillna(self):\n pass\n\n def test_groupby(self):\n pass\n\n def test_reductions(self):\n def _compare_with_dense(obj, op):\n sparse_result = getattr(obj, op)()\n series = obj.to_dense()\n dense_result = getattr(series, op)()\n assert sparse_result == dense_result\n\n to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']\n\n def _compare_all(obj):\n for op in to_compare:\n _compare_with_dense(obj, op)\n\n _compare_all(self.bseries)\n\n self.bseries.sp_values[5:10] = np.NaN\n _compare_all(self.bseries)\n\n _compare_all(self.zbseries)\n self.zbseries.sp_values[5:10] = np.NaN\n _compare_all(self.zbseries)\n\n series = self.zbseries.copy()\n series.fill_value = 2\n _compare_all(series)\n\n nonna = Series(np.random.randn(20)).to_sparse()\n _compare_all(nonna)\n\n nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)\n _compare_all(nonna2)\n\n def test_dropna(self):\n sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0)\n\n sp_valid = sp.dropna()\n\n expected = sp.to_dense().dropna()\n expected = expected[expected != 0]\n exp_arr = pd.SparseArray(expected.values, fill_value=0, kind='block')\n tm.assert_sp_array_equal(sp_valid.values, exp_arr)\n tm.assert_index_equal(sp_valid.index, expected.index)\n assert len(sp_valid.sp_values) == 2\n\n result = self.bseries.dropna()\n expected = self.bseries.to_dense().dropna()\n assert not isinstance(result, SparseSeries)\n tm.assert_series_equal(result, expected)\n\n def test_homogenize(self):\n def _check_matches(indices, expected):\n data = {i: SparseSeries(idx.to_int_index().indices,\n sparse_index=idx, fill_value=np.nan)\n for i, idx in enumerate(indices)}\n\n # homogenized is only valid with NaN fill values\n homogenized = spf.homogenize(data)\n\n for k, v in homogenized.items():\n assert (v.sp_index.equals(expected))\n\n indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]),\n BlockIndex(10, [0], [10])]\n expected1 = BlockIndex(10, [2, 6], [2, 3])\n _check_matches(indices1, expected1)\n\n indices2 = [BlockIndex(10, [2], [7]), BlockIndex(10, [2], [7])]\n expected2 = indices2[0]\n _check_matches(indices2, expected2)\n\n # must have NaN fill value\n data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,\n fill_value=0)}\n with pytest.raises(TypeError, match=\"NaN fill value\"):\n spf.homogenize(data)\n\n def test_fill_value_corner(self):\n cop = self.zbseries.copy()\n cop.fill_value = 0\n result = self.bseries / cop\n\n assert np.isnan(result.fill_value)\n\n cop2 = self.zbseries.copy()\n cop2.fill_value = 1\n result = cop2 / cop\n # 1 / 0 is inf\n assert np.isinf(result.fill_value)\n\n def test_fill_value_when_combine_const(self):\n # GH12723\n s = SparseSeries([0, 1, np.nan, 3, 4, 5], index=np.arange(6))\n\n exp = s.fillna(0).add(2)\n res = s.add(2, fill_value=0)\n tm.assert_series_equal(res, exp)\n\n def test_shift(self):\n series = SparseSeries([nan, 1., 2., 3., nan, nan], index=np.arange(6))\n\n shifted = series.shift(0)\n # assert shifted is not series\n tm.assert_sp_series_equal(shifted, series)\n\n f = lambda s: s.shift(1)\n _dense_series_compare(series, f)\n\n f = lambda s: s.shift(-2)\n _dense_series_compare(series, f)\n\n series = SparseSeries([nan, 1., 2., 3., nan, nan],\n index=bdate_range('1/1/2000', periods=6))\n f = lambda s: s.shift(2, freq='B')\n _dense_series_compare(series, f)\n\n f = lambda s: s.shift(2, freq=BDay())\n _dense_series_compare(series, f)\n\n def test_shift_nan(self):\n # GH 12908\n orig = pd.Series([np.nan, 2, np.nan, 4, 0, np.nan, 0])\n sparse = orig.to_sparse()\n\n tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse(),\n check_kind=False)\n\n tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())\n tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())\n tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())\n tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())\n\n sparse = orig.to_sparse(fill_value=0)\n tm.assert_sp_series_equal(\n sparse.shift(0),\n orig.shift(0).to_sparse(fill_value=sparse.fill_value)\n )\n tm.assert_sp_series_equal(sparse.shift(1),\n orig.shift(1).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(2),\n orig.shift(2).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(3),\n orig.shift(3).to_sparse(fill_value=0),\n check_kind=False)\n\n tm.assert_sp_series_equal(sparse.shift(-1),\n orig.shift(-1).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-2),\n orig.shift(-2).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-3),\n orig.shift(-3).to_sparse(fill_value=0),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-4),\n orig.shift(-4).to_sparse(fill_value=0),\n check_kind=False)\n\n def test_shift_dtype(self):\n # GH 12908\n orig = pd.Series([1, 2, 3, 4], dtype=np.int64)\n\n sparse = orig.to_sparse()\n tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())\n\n sparse = orig.to_sparse(fill_value=np.nan)\n tm.assert_sp_series_equal(sparse.shift(0),\n orig.shift(0).to_sparse(fill_value=np.nan))\n # shift(1) or more span changes dtype to float64\n # XXX: SparseSeries doesn't need to shift dtype here.\n # Do we want to astype in shift, for backwards compat?\n # If not, document it.\n tm.assert_sp_series_equal(sparse.shift(1).astype('f8'),\n orig.shift(1).to_sparse(kind='integer'))\n tm.assert_sp_series_equal(sparse.shift(2).astype('f8'),\n orig.shift(2).to_sparse(kind='integer'))\n tm.assert_sp_series_equal(sparse.shift(3).astype('f8'),\n orig.shift(3).to_sparse(kind='integer'))\n\n tm.assert_sp_series_equal(sparse.shift(-1).astype('f8'),\n orig.shift(-1).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-2).astype('f8'),\n orig.shift(-2).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-3).astype('f8'),\n orig.shift(-3).to_sparse(),\n check_kind=False)\n tm.assert_sp_series_equal(sparse.shift(-4).astype('f8'),\n orig.shift(-4).to_sparse(),\n check_kind=False)\n\n @pytest.mark.parametrize(\"fill_value\", [\n 0,\n 1,\n np.nan\n ])\n @pytest.mark.parametrize(\"periods\", [0, 1, 2, 3, -1, -2, -3, -4])\n def test_shift_dtype_fill_value(self, fill_value, periods):\n # GH 12908\n orig = pd.Series([1, 0, 0, 4], dtype=np.dtype('int64'))\n\n sparse = orig.to_sparse(fill_value=fill_value)\n\n result = sparse.shift(periods)\n expected = orig.shift(periods).to_sparse(fill_value=fill_value)\n\n tm.assert_sp_series_equal(result, expected,\n check_kind=False,\n consolidate_block_indices=True)\n\n def test_combine_first(self):\n s = self.bseries\n\n result = s[::2].combine_first(s)\n result2 = s[::2].combine_first(s.to_dense())\n\n expected = s[::2].to_dense().combine_first(s.to_dense())\n expected = expected.to_sparse(fill_value=s.fill_value)\n\n tm.assert_sp_series_equal(result, result2)\n tm.assert_sp_series_equal(result, expected)\n\n @pytest.mark.parametrize('deep', [True, False])\n @pytest.mark.parametrize('fill_value', [0, 1, np.nan, None])\n def test_memory_usage_deep(self, deep, fill_value):\n values = [1.0] + [fill_value] * 20\n sparse_series = SparseSeries(values, fill_value=fill_value)\n dense_series = Series(values)\n sparse_usage = sparse_series.memory_usage(deep=deep)\n dense_usage = dense_series.memory_usage(deep=deep)\n\n assert sparse_usage < dense_usage\n\n\[email protected](\"ignore:Sparse:FutureWarning\")\nclass TestSparseHandlingMultiIndexes:\n\n def setup_method(self, method):\n miindex = pd.MultiIndex.from_product(\n [[\"x\", \"y\"], [\"10\", \"20\"]], names=['row-foo', 'row-bar'])\n micol = pd.MultiIndex.from_product(\n [['a', 'b', 'c'], [\"1\", \"2\"]], names=['col-foo', 'col-bar'])\n dense_multiindex_frame = pd.DataFrame(\n index=miindex, columns=micol).sort_index().sort_index(axis=1)\n self.dense_multiindex_frame = dense_multiindex_frame.fillna(value=3.14)\n\n def test_to_sparse_preserve_multiindex_names_columns(self):\n sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()\n sparse_multiindex_frame = sparse_multiindex_frame.copy()\n tm.assert_index_equal(sparse_multiindex_frame.columns,\n self.dense_multiindex_frame.columns)\n\n def test_round_trip_preserve_multiindex_names(self):\n sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()\n round_trip_multiindex_frame = sparse_multiindex_frame.to_dense()\n tm.assert_frame_equal(self.dense_multiindex_frame,\n round_trip_multiindex_frame,\n check_column_type=True,\n check_names=True)\n\n\[email protected]_if_no_scipy\[email protected](\n \"ignore:the matrix subclass:PendingDeprecationWarning\"\n)\[email protected](\"ignore:Sparse:FutureWarning\")\nclass TestSparseSeriesScipyInteraction:\n # Issue 8048: add SparseSeries coo methods\n\n def setup_method(self, method):\n import scipy.sparse\n # SparseSeries inputs used in tests, the tests rely on the order\n self.sparse_series = []\n s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])\n s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),\n (1, 2, 'a', 1),\n (1, 1, 'b', 0),\n (1, 1, 'b', 1),\n (2, 1, 'b', 0),\n (2, 1, 'b', 1)],\n names=['A', 'B', 'C', 'D'])\n self.sparse_series.append(s.to_sparse())\n\n ss = self.sparse_series[0].copy()\n ss.index.names = [3, 0, 1, 2]\n self.sparse_series.append(ss)\n\n ss = pd.Series([\n nan\n ] * 12, index=cartesian_product((range(3), range(4)))).to_sparse()\n for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):\n ss[k] = v\n self.sparse_series.append(ss)\n\n # results used in tests\n self.coo_matrices = []\n self.coo_matrices.append(scipy.sparse.coo_matrix(\n ([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)))\n self.coo_matrices.append(scipy.sparse.coo_matrix(\n ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)))\n self.coo_matrices.append(scipy.sparse.coo_matrix(\n ([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)))\n self.ils = [[(1, 2), (1, 1), (2, 1)], [(1, 1), (1, 2), (2, 1)],\n [(1, 2, 'a'), (1, 1, 'b'), (2, 1, 'b')]]\n self.jls = [[('a', 0), ('a', 1), ('b', 0), ('b', 1)], [0, 1]]\n\n def test_to_coo_text_names_integer_row_levels_nosort(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': [0, 1], 'column_levels': [2, 3]}\n result = (self.coo_matrices[0], self.ils[0], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_text_names_integer_row_levels_sort(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': [0, 1],\n 'column_levels': [2, 3],\n 'sort_labels': True}\n result = (self.coo_matrices[1], self.ils[1], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': ['A', 'B', 'C'],\n 'column_levels': ['D'],\n 'sort_labels': False}\n result = (self.coo_matrices[2], self.ils[2], self.jls[1])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_integer_names_integer_row_levels_nosort(self):\n ss = self.sparse_series[1]\n kwargs = {'row_levels': [3, 0], 'column_levels': [1, 2]}\n result = (self.coo_matrices[0], self.ils[0], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_text_names_text_row_levels_nosort(self):\n ss = self.sparse_series[0]\n kwargs = {'row_levels': ['A', 'B'], 'column_levels': ['C', 'D']}\n result = (self.coo_matrices[0], self.ils[0], self.jls[0])\n self._run_test(ss, kwargs, result)\n\n def test_to_coo_bad_partition_nonnull_intersection(self):\n ss = self.sparse_series[0]\n msg = \"Is not a partition because intersection is not null\"\n with pytest.raises(ValueError, match=msg):\n ss.to_coo(['A', 'B', 'C'], ['C', 'D'])\n\n def test_to_coo_bad_partition_small_union(self):\n ss = self.sparse_series[0]\n msg = \"Is not a partition because union is not the whole\"\n with pytest.raises(ValueError, match=msg):\n ss.to_coo(['A'], ['C', 'D'])\n\n def test_to_coo_nlevels_less_than_two(self):\n ss = self.sparse_series[0]\n ss.index = np.arange(len(ss.index))\n msg = \"to_coo requires MultiIndex with nlevels > 2\"\n with pytest.raises(ValueError, match=msg):\n ss.to_coo()\n\n def test_to_coo_bad_ilevel(self):\n ss = self.sparse_series[0]\n with pytest.raises(KeyError, match=\"Level E not found\"):\n ss.to_coo(['A', 'B'], ['C', 'D', 'E'])\n\n def test_to_coo_duplicate_index_entries(self):\n ss = pd.concat([self.sparse_series[0],\n self.sparse_series[0]]).to_sparse()\n msg = (\"Duplicate index entries are not allowed in to_coo\"\n \" transformation\")\n with pytest.raises(ValueError, match=msg):\n ss.to_coo(['A', 'B'], ['C', 'D'])\n\n def test_from_coo_dense_index(self):\n ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)\n check = self.sparse_series[2]\n tm.assert_sp_series_equal(ss, check)\n\n def test_from_coo_nodense_index(self):\n ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)\n check = self.sparse_series[2]\n check = check.dropna().to_sparse()\n tm.assert_sp_series_equal(ss, check)\n\n def test_from_coo_long_repr(self):\n # GH 13114\n # test it doesn't raise error. Formatting is tested in test_format\n import scipy.sparse\n\n sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18))\n repr(sparse)\n\n def _run_test(self, ss, kwargs, check):\n results = ss.to_coo(**kwargs)\n self._check_results_to_coo(results, check)\n # for every test, also test symmetry property (transpose), switch\n # row_levels and column_levels\n d = kwargs.copy()\n d['row_levels'] = kwargs['column_levels']\n d['column_levels'] = kwargs['row_levels']\n results = ss.to_coo(**d)\n results = (results[0].T, results[2], results[1])\n self._check_results_to_coo(results, check)\n\n def _check_results_to_coo(self, results, check):\n (A, il, jl) = results\n (A_result, il_result, jl_result) = check\n # convert to dense and compare\n tm.assert_numpy_array_equal(A.todense(), A_result.todense())\n # or compare directly as difference of sparse\n # assert(abs(A - A_result).max() < 1e-12) # max is failing in python\n # 2.6\n assert il == il_result\n assert jl == jl_result\n\n def test_concat(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n for kind in ['integer', 'block']:\n sparse1 = pd.SparseSeries(val1, name='x', kind=kind)\n sparse2 = pd.SparseSeries(val2, name='y', kind=kind)\n\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, kind=kind)\n tm.assert_sp_series_equal(res, exp)\n\n sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)\n sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)\n\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, fill_value=0, kind=kind)\n tm.assert_sp_series_equal(res, exp,\n consolidate_block_indices=True)\n\n def test_concat_axis1(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n sparse1 = pd.SparseSeries(val1, name='x')\n sparse2 = pd.SparseSeries(val2, name='y')\n\n res = pd.concat([sparse1, sparse2], axis=1)\n exp = pd.concat([pd.Series(val1, name='x'),\n pd.Series(val2, name='y')], axis=1)\n exp = pd.SparseDataFrame(exp)\n tm.assert_sp_frame_equal(res, exp)\n\n def test_concat_different_fill(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n for kind in ['integer', 'block']:\n sparse1 = pd.SparseSeries(val1, name='x', kind=kind)\n sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)\n\n with tm.assert_produces_warning(PerformanceWarning,\n raise_on_extra_warnings=False):\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, kind=kind)\n tm.assert_sp_series_equal(res, exp)\n\n with tm.assert_produces_warning(PerformanceWarning,\n raise_on_extra_warnings=False):\n res = pd.concat([sparse2, sparse1])\n exp = pd.concat([pd.Series(val2), pd.Series(val1)])\n exp = pd.SparseSeries(exp, kind=kind, fill_value=0)\n tm.assert_sp_series_equal(res, exp)\n\n def test_concat_axis1_different_fill(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n sparse1 = pd.SparseSeries(val1, name='x')\n sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)\n\n res = pd.concat([sparse1, sparse2], axis=1)\n exp = pd.concat([pd.Series(val1, name='x'),\n pd.Series(val2, name='y')], axis=1)\n assert isinstance(res, pd.SparseDataFrame)\n tm.assert_frame_equal(res.to_dense(), exp)\n\n def test_concat_different_kind(self):\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n sparse1 = pd.SparseSeries(val1, name='x', kind='integer')\n sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)\n\n with tm.assert_produces_warning(PerformanceWarning,\n raise_on_extra_warnings=False):\n res = pd.concat([sparse1, sparse2])\n exp = pd.concat([pd.Series(val1), pd.Series(val2)])\n exp = pd.SparseSeries(exp, kind='integer')\n tm.assert_sp_series_equal(res, exp)\n\n with tm.assert_produces_warning(PerformanceWarning,\n raise_on_extra_warnings=False):\n res = pd.concat([sparse2, sparse1])\n exp = pd.concat([pd.Series(val2), pd.Series(val1)])\n exp = pd.SparseSeries(exp, kind='block', fill_value=0)\n tm.assert_sp_series_equal(res, exp)\n\n def test_concat_sparse_dense(self):\n # use first input's fill_value\n val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])\n val2 = np.array([3, np.nan, 4, 0, 0])\n\n for kind in ['integer', 'block']:\n sparse = pd.SparseSeries(val1, name='x', kind=kind)\n dense = pd.Series(val2, name='y')\n\n res = pd.concat([sparse, dense])\n exp = pd.concat([pd.Series(val1), dense])\n exp = pd.SparseSeries(exp, kind=kind)\n tm.assert_sp_series_equal(res, exp)\n\n res = pd.concat([dense, sparse, dense])\n exp = pd.concat([dense, pd.Series(val1), dense])\n exp = exp.astype(\"Sparse\")\n tm.assert_series_equal(res, exp)\n\n sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)\n dense = pd.Series(val2, name='y')\n\n res = pd.concat([sparse, dense])\n exp = pd.concat([pd.Series(val1), dense])\n exp = exp.astype(SparseDtype(exp.dtype, 0))\n tm.assert_series_equal(res, exp)\n\n res = pd.concat([dense, sparse, dense])\n exp = pd.concat([dense, pd.Series(val1), dense])\n exp = exp.astype(SparseDtype(exp.dtype, 0))\n tm.assert_series_equal(res, exp)\n\n def test_value_counts(self):\n vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]\n dense = pd.Series(vals, name='xx')\n\n sparse = pd.SparseSeries(vals, name='xx')\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n sparse = pd.SparseSeries(vals, name='xx', fill_value=0)\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n def test_value_counts_dup(self):\n vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]\n\n # numeric op may cause sp_values to include the same value as\n # fill_value\n dense = pd.Series(vals, name='xx') / 0.\n sparse = pd.SparseSeries(vals, name='xx') / 0.\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n vals = [1, 2, 0, 0, 0, 1, 2, 0, 0, 1, 2, 0, 1, 1]\n\n dense = pd.Series(vals, name='xx') * 0.\n sparse = pd.SparseSeries(vals, name='xx') * 0.\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n def test_value_counts_int(self):\n vals = [1, 2, 0, 1, 2, 1, 2, 0, 1, 1]\n dense = pd.Series(vals, name='xx')\n\n # fill_value is np.nan, but should not be included in the result\n sparse = pd.SparseSeries(vals, name='xx')\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n sparse = pd.SparseSeries(vals, name='xx', fill_value=0)\n tm.assert_series_equal(sparse.value_counts(),\n dense.value_counts())\n tm.assert_series_equal(sparse.value_counts(dropna=False),\n dense.value_counts(dropna=False))\n\n def test_isna(self):\n # GH 8276\n s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')\n\n res = s.isna()\n exp = pd.SparseSeries([True, True, False, False, True], name='xxx',\n fill_value=True)\n tm.assert_sp_series_equal(res, exp)\n\n # if fill_value is not nan, True can be included in sp_values\n s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',\n fill_value=0.)\n res = s.isna()\n assert isinstance(res, pd.SparseSeries)\n exp = pd.Series([True, False, False, False, False], name='xxx')\n tm.assert_series_equal(res.to_dense(), exp)\n\n def test_notna(self):\n # GH 8276\n s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')\n\n res = s.notna()\n exp = pd.SparseSeries([False, False, True, True, False], name='xxx',\n fill_value=False)\n tm.assert_sp_series_equal(res, exp)\n\n # if fill_value is not nan, True can be included in sp_values\n s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',\n fill_value=0.)\n res = s.notna()\n assert isinstance(res, pd.SparseSeries)\n exp = pd.Series([False, True, True, True, True], name='xxx')\n tm.assert_series_equal(res.to_dense(), exp)\n\n\ndef _dense_series_compare(s, f):\n result = f(s)\n assert (isinstance(result, SparseSeries))\n dense_result = f(s.to_dense())\n tm.assert_series_equal(result.to_dense(), dense_result)\n\n\[email protected](\"ignore:Sparse:FutureWarning\")\nclass TestSparseSeriesAnalytics:\n\n def setup_method(self, method):\n arr, index = _test_data1()\n self.bseries = SparseSeries(arr, index=index, kind='block',\n name='bseries')\n\n arr, index = _test_data1_zero()\n self.zbseries = SparseSeries(arr, index=index, kind='block',\n fill_value=0, name='zbseries')\n\n def test_cumsum(self):\n result = self.bseries.cumsum()\n expected = SparseSeries(self.bseries.to_dense().cumsum())\n tm.assert_sp_series_equal(result, expected)\n\n result = self.zbseries.cumsum()\n expected = self.zbseries.to_dense().cumsum().to_sparse()\n tm.assert_series_equal(result, expected)\n\n axis = 1 # Series is 1-D, so only axis = 0 is valid.\n msg = \"No axis named {axis}\".format(axis=axis)\n with pytest.raises(ValueError, match=msg):\n self.bseries.cumsum(axis=axis)\n\n def test_numpy_cumsum(self):\n result = np.cumsum(self.bseries)\n expected = SparseSeries(self.bseries.to_dense().cumsum())\n tm.assert_sp_series_equal(result, expected)\n\n result = np.cumsum(self.zbseries)\n expected = self.zbseries.to_dense().cumsum().to_sparse()\n tm.assert_series_equal(result, expected)\n\n msg = \"the 'dtype' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.cumsum(self.bseries, dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.cumsum(self.zbseries, out=result)\n\n def test_numpy_func_call(self):\n # no exception should be raised even though\n # numpy passes in 'axis=None' or `axis=-1'\n funcs = ['sum', 'cumsum', 'var', 'mean',\n 'prod', 'cumprod', 'std', 'argsort',\n 'min', 'max']\n for func in funcs:\n for series in ('bseries', 'zbseries'):\n getattr(np, func)(getattr(self, series))\n\n def test_deprecated_numpy_func_call(self):\n # NOTE: These should be add to the 'test_numpy_func_call' test above\n # once the behavior of argmin/argmax is corrected.\n funcs = ['argmin', 'argmax']\n for func in funcs:\n for series in ('bseries', 'zbseries'):\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False,\n raise_on_extra_warnings=False):\n getattr(np, func)(getattr(self, series))\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False,\n raise_on_extra_warnings=False):\n getattr(getattr(self, series), func)()\n\n def test_deprecated_reindex_axis(self):\n # https://github.com/pandas-dev/pandas/issues/17833\n # Multiple FutureWarnings, can't check stacklevel\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False) as m:\n self.bseries.reindex_axis([0, 1, 2])\n assert 'reindex' in str(m[0].message)\n\n\[email protected](\n 'datetime_type', (np.datetime64,\n pd.Timestamp,\n lambda x: datetime.strptime(x, '%Y-%m-%d')))\[email protected](\"ignore:Sparse:FutureWarning\")\ndef test_constructor_dict_datetime64_index(datetime_type):\n # GH 9456\n dates = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']\n values = [42544017.198965244, 1234565, 40512335.181958228, -1]\n\n result = SparseSeries(dict(zip(map(datetime_type, dates), values)))\n expected = SparseSeries(values, map(pd.Timestamp, dates))\n\n tm.assert_sp_series_equal(result, expected)\n\n\[email protected](\"ignore:Sparse:FutureWarning\")\ndef test_to_sparse():\n # https://github.com/pandas-dev/pandas/issues/22389\n arr = pd.SparseArray([1, 2, None, 3])\n result = pd.Series(arr).to_sparse()\n assert len(result) == 4\n tm.assert_sp_array_equal(result.values, arr, check_kind=False)\n\n\[email protected](\"ignore:Sparse:FutureWarning\")\ndef test_constructor_mismatched_raises():\n msg = \"Length of passed values is 2, index implies 3\"\n with pytest.raises(ValueError, match=msg):\n SparseSeries([1, 2], index=[1, 2, 3])\n\n\[email protected](\"ignore:Sparse:FutureWarning\")\ndef test_block_deprecated():\n s = SparseSeries([1])\n with tm.assert_produces_warning(FutureWarning):\n s.block\n"
] |
[
[
"pandas.util.testing.assert_sp_frame_equal",
"pandas.Series",
"numpy.take",
"numpy.asarray",
"pandas.util.testing.assert_produces_warning",
"pandas._libs.sparse.IntIndex",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.SparseDataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.SparseSeries.from_coo",
"pandas.SparseSeries",
"pandas.util.testing.assert_index_equal",
"numpy.cumsum",
"numpy.random.randn",
"numpy.dtype",
"pandas.util.testing.round_trip_pickle",
"pandas.isna",
"pandas.util.testing.assert_sp_series_equal",
"pandas.tseries.offsets.BDay",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.core.sparse.frame.homogenize",
"pandas._libs.sparse.BlockIndex",
"numpy.repeat",
"pandas.concat",
"pandas.bdate_range",
"numpy.isnan",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.assert_sp_array_equal",
"pandas.MultiIndex.from_product",
"numpy.array",
"pandas.SparseDtype",
"pandas.SparseArray",
"numpy.abs",
"numpy.ones",
"numpy.isinf",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.24",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
avimehenwal/tng-sdk-benchmark
|
[
"e3b39769f4ebdeb02fb26e74ecf8c77ed96d85a6"
] |
[
"src/tngsdk/benchmark/resultprocessor/vimemu.py"
] |
[
"# Copyright (c) 2018 SONATA-NFV, 5GTANGO, Paderborn University\n# ALL RIGHTS RESERVED.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University\n# nor the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# This work has been performed in the framework of the SONATA project,\n# funded by the European Commission under Grant number 671517 through\n# the Horizon 2020 and 5G-PPP programmes. The authors would like to\n# acknowledge the contributions of their colleagues of the SONATA\n# partner consortium (www.sonata-nfv.eu).\n#\n# This work has also been performed in the framework of the 5GTANGO project,\n# funded by the European Commission under Grant number 761493 through\n# the Horizon 2020 and 5G-PPP programmes. The authors would like to\n# acknowledge the contributions of their colleagues of the SONATA\n# partner consortium (www.5gtango.eu).\nimport os\nimport pandas as pd\nfrom flatten_dict import flatten\nfrom tngsdk.benchmark.logger import TangoLogger\nfrom tngsdk.benchmark.helper import read_json, read_yaml\nfrom tngsdk.benchmark.helper import dubunderscore_reducer\n\n\nLOG = TangoLogger.getLogger(__name__)\n\n\nPATH_EX_CONFIG = \"ex_config.json\"\nPATH_CONTAINER_MONITORING = \"cmon.json\"\nPATH_CONTAINER_RESULT = \"tngbench_share/result.yml\"\nPATH_EXPERIMENT_TIMES = \"experiment_times.json\"\n\nPATH_OUTPUT_EC_METRICS = \"result_ec_metrics.csv\"\nPATH_OUTPUT_TS_METRICS = \"result_ts_metrics.csv\"\n\n\nclass VimemuResultProcessor(object):\n\n def __init__(self, args, service_experiments):\n self.args = args\n self.result_dir = args.result_dir\n self.service_experiments = service_experiments\n\n def run(self):\n if not os.path.exists(self.result_dir):\n LOG.info(\"Result dir '{}' does not exist. Skipping\"\n .format(self.result_dir))\n return\n # FIXME support multipe experiments in a single result folder\n # gen. list of result folder per experiment run\n rdlist = sorted([os.path.join(self.result_dir, rd)\n for rd in os.listdir(self.result_dir)\n if os.path.isdir(os.path.join(self.result_dir, rd))])\n # read experiment metrics\n df_em = self.read_experiment_metrics(rdlist)\n # read timeseries metrics\n # df_tm = self.read_timeseries_metrics(rdlist)\n df_em.info()\n # df_tm.info()\n # store the data frames\n df_em.to_csv(os.path.join(self.result_dir, PATH_OUTPUT_EC_METRICS))\n # df_tm.to_csv(os.path.join(self.result_dir, PATH_OUTPUT_TS_METRICS))\n\n def read_experiment_metrics(self, rdlist):\n \"\"\"\n return pandas\n \"\"\"\n rows = list()\n for idx, rd in enumerate(rdlist):\n LOG.info(\"Processing experiment metrics {}/{}\"\n .format(idx + 1, len(rdlist)))\n row = dict()\n try:\n # collect data from different sources\n row.update(self._collect_ecs(rd))\n row.update(self._collect_times(rd))\n row.update(self._collect_container_results(rd))\n except FileNotFoundError as ex:\n LOG.error(\"Result corrupted: {}\".format(ex))\n rows.append(row)\n # to Pandas\n return pd.DataFrame(rows)\n\n def read_timeseries_metrics(self, rdlist):\n \"\"\"\n return pandas\n \"\"\"\n rows = list()\n for idx, rd in enumerate(rdlist):\n LOG.info(\"Processing timeseries metrics {}/{}\"\n .format(idx + 1, len(rdlist)))\n try:\n rows.extend(self._collect_ts_container_monitoring(rd))\n except FileNotFoundError as ex:\n LOG.error(\"Result corrupted: {}\".format(ex))\n # to Pandas\n return pd.DataFrame(rows)\n\n def _collect_ecs(self, rd):\n \"\"\"\n Collect ECs from 'PATH_EX_CONFIG'\n \"\"\"\n r = dict()\n jo = read_json(os.path.join(rd, PATH_EX_CONFIG))\n r[\"run_id\"] = jo.get(\"run_id\", -1)\n r[\"experiment_name\"] = jo.get(\"name\")\n if \"parameter\" in jo:\n for k, v in jo.get(\"parameter\").items():\n # clean up the parameter keys\n k = k.replace(\"ep::\", \"param::\")\n k = k.replace(\"function\", \"func\")\n k = k.replace(\"::\", \"__\")\n r[k] = v\n return r\n\n def _collect_times(self, rd):\n \"\"\"\n Collect experiment times from 'PATH_EXPERIMENT_TIMES'\n \"\"\"\n return read_json(os.path.join(rd, PATH_EXPERIMENT_TIMES))\n\n def _collect_container_results(self, rd):\n \"\"\"\n Collect ECs from '<container_name>/PATH_CONTAINER_RESULT'\n \"\"\"\n r = dict()\n # iterate over all container directories\n for cd in self._get_container_from_rd(rd):\n try:\n yml = read_yaml(os.path.join(rd, cd, PATH_CONTAINER_RESULT))\n for k, v in yml.items():\n # add container name as key prefix\n k = \"metric__{}__{}\".format(self._get_clean_cname(cd), k)\n r[k] = v\n except BaseException as ex:\n LOG.warning(\"Couldn't process all container results: {}\"\n .format(ex))\n return r\n\n def _collect_ts_container_monitoring(self, rd):\n \"\"\"\n Collect time series data from 'PATH_CONTAINER_MONITORING'\n Data: list of tuples(timestamp, dict(docker stats))\n Returns list of rows\n \"\"\"\n samples = read_json(os.path.join(rd, PATH_CONTAINER_MONITORING))\n rows = list()\n min_time = min([ts for (ts, smpl) in samples])\n for (ts, smpl) in samples:\n row = dict()\n row[\"timestamp\"] = ts - min_time\n row.update(self._flat_sample(smpl))\n rows.append(row)\n return rows\n\n def _get_container_from_rd(self, rd):\n return sorted([cd for cd in os.listdir(rd)\n if os.path.isdir(os.path.join(rd, cd))\n and \"mn.\" in cd])\n\n def _get_clean_cname(self, name):\n return name.replace(\"mn.\", \"\").strip(\".-/_ \")\n\n def _flat_sample(self, smpl):\n \"\"\"\n Make a flat dict from given multi-dim smpl dict.\n \"\"\"\n r = dict()\n for cname, data in smpl.items():\n r[\"cname\"] = cname\n r.update(flatten(data, reducer=dubunderscore_reducer))\n return r\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
brugger1/testsuite
|
[
"9b504db668cdeaf7c561f15b76c95d05bfdd1517"
] |
[
"validation_tests/llvm/SOLLVE/pragmas/crout/findMin.py"
] |
[
"import pandas\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\n\ndataframe = pandas.read_csv(\"results.csv\")\narray = dataframe.values\nx = array[:,6]\n\nprint(\"Performance summary based on\", len(array), \"evaluations:\")\nprint(\"Min: \", x.min(), \"s\")\nprint(\"Max: \", x.max(), \"s\")\nprint(\"Mean: \", x.mean(), \"s\")\nprint(\"The best configurations (for the smallest time) of P0, P1, P2, P3, P4 and P5 is:\\n\")\nprint(\"P0 P1\tP2 \tP3 \tP4 P5\texecution time\t elapsed time\\n\")\nmn = x.min()\nfor i in range(len(array)): \n if x[i] == mn:\n print (array[i,:])\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
S2-group/ICSME-2020-replication-package
|
[
"5c468f2f5552a5971d59fc84581298c9e3c469bc"
] |
[
"network/parse-har.py"
] |
[
"import sys\nimport json\nimport numpy\nfrom haralyzer import HarParser, HarPage\n\ndef get_number_of_requests(har_page, type):\n return len(har_page.filter_entries(request_type=type, status_code='2.*'))\n\ndef get_load_time(har_page, type):\n return round(har_page.get_load_time(request_type=type, status_code='2.*', asynchronous=False))\n\n\ndef print_filetype_load_times(har_page):\n print(\"Image file size: \" + str(har_page.image_size_trans) + ' bytes')\n print('Image load time: ' + str(har_page.get_load_time(content_type='image.*', status_code='2.*')) + 'ms')\n print('JSON load time: ' + str(har_page.get_load_time(content_type='json', status_code='2.*')) + 'ms')\n\n\ndef print_request_type_load_time(har_page):\n print('Number of GET requests: ' + str(get_number_of_requests(har_page, 'GET')))\n print('GET load time: ' + str(get_load_time(har_page, 'GET')) + 'ms')\n print('Number of POST requests: ' + str(get_number_of_requests(har_page, 'POST')))\n print('POST load time: ' + str(get_load_time(har_page, 'POST')) + 'ms')\n print('Number of OPTIONS requests: ' + str(get_number_of_requests(har_page, 'OPTIONS')))\n print('OPTIONS load time: ' + str(get_load_time(har_page, 'OPTIONS')) + 'ms')\n\ndef do_request_analysis(har_page):\n getRequests = page.filter_entries(request_type='GET', status_code='2.*')\n postRequests = page.filter_entries(request_type='POST', status_code='2.*')\n optionsRequests = page.filter_entries(request_type='OPTIONS', status_code='2.*')\n\n allRequests = getRequests + postRequests + optionsRequests\n print('Total number of requests: ' + str(len(allRequests)))\n\n totalTransferSize = 0\n transferSizes = []\n times = []\n\n for request in allRequests:\n time = 0\n response = request['response']\n transferSize = response['_transferSize']\n timings = request['timings']\n\n # Add up all the timing components to get the request time\n for key in request['timings']:\n # null values are -1, we do not want those\n if timings[key] >= 0:\n time += timings[key]\n\n times.append(time)\n totalTransferSize += transferSize\n transferSizes.append(transferSize)\n \n print('Total bytes transferred: ' + str(totalTransferSize))\n print('Total time taken to transfer bytes: ' + str(round(sum(times))) + 'ms')\n print('Mean time taken by requests: ' + str(round(numpy.mean(times))) + 'ms')\n\n\n\n# Prints duplicate API requests and their count\ndef print_duplicate_requests(har_page):\n duplicates = har_page.duplicate_url_request\n for duplicate in duplicates:\n print('URL: ' + duplicate + '\\t count: ' + str(duplicates[duplicate]))\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print('This script requires you to specify a .HAR file as input. Please do so.')\n exit(1)\n\n with open(sys.argv[1], 'r') as f:\n har_parser = HarParser(json.loads(f.read()))\n\n page = har_parser.pages[0]\n assert(isinstance(page, HarPage))\n\n print(\"Showing stats for URL: \" + page.url)\n print()\n\n # print(\"Duplicate requests to URLs\")\n # print_duplicate_requests(page)\n # print()\n print_filetype_load_times(page)\n print()\n print_request_type_load_time(page)\n print()\n do_request_analysis(page)\n print()\n print('Total time taken by browser to load the page: ' + str(page.get_load_time()) + 'ms')\n\n"
] |
[
[
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
emildi/pennylane
|
[
"64901ef2f920f42385b65c8da538941ff36da7be",
"64901ef2f920f42385b65c8da538941ff36da7be",
"64901ef2f920f42385b65c8da538941ff36da7be"
] |
[
"tests/interfaces/test_batch_torch.py",
"pennylane/numpy/tensor.py",
"pennylane/ops/cv.py"
] |
[
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"Unit tests for the Torch interface\"\"\"\r\nimport functools\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\ntorch = pytest.importorskip(\"torch\")\r\n\r\nimport pennylane as qml\r\nfrom pennylane.gradients import finite_diff, param_shift\r\nfrom pennylane.interfaces.batch import execute\r\n\r\n\r\nclass TestTorchExecuteUnitTests:\r\n \"\"\"Unit tests for torch execution\"\"\"\r\n\r\n def test_jacobian_options(self, mocker, tol):\r\n \"\"\"Test setting jacobian options\"\"\"\r\n spy = mocker.spy(qml.gradients, \"param_shift\")\r\n\r\n a = torch.tensor([0.1, 0.2], requires_grad=True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute(\r\n [tape],\r\n dev,\r\n gradient_fn=param_shift,\r\n gradient_kwargs={\"shift\": np.pi / 4},\r\n interface=\"torch\",\r\n )[0]\r\n\r\n res.backward()\r\n\r\n for args in spy.call_args_list:\r\n assert args[1][\"shift\"] == np.pi / 4\r\n\r\n def test_incorrect_mode(self):\r\n \"\"\"Test that an error is raised if a gradient transform\r\n is used with mode=forward\"\"\"\r\n a = torch.tensor([0.1, 0.2], requires_grad=True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with pytest.raises(\r\n ValueError, match=\"Gradient transforms cannot be used with mode='forward'\"\r\n ):\r\n execute([tape], dev, gradient_fn=param_shift, mode=\"forward\", interface=\"torch\")[0]\r\n\r\n def test_forward_mode_reuse_state(self, mocker):\r\n \"\"\"Test that forward mode uses the `device.execute_and_gradients` pathway\r\n while reusing the quantum state.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(dev, \"execute_and_gradients\")\r\n\r\n a = torch.tensor([0.1, 0.2], requires_grad=True)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n gradient_kwargs={\"method\": \"adjoint_jacobian\", \"use_device_state\": True},\r\n interface=\"torch\",\r\n )[0]\r\n\r\n # adjoint method only performs a single device execution, but gets both result and gradient\r\n assert dev.num_executions == 1\r\n spy.assert_called()\r\n\r\n def test_forward_mode(self, mocker):\r\n \"\"\"Test that forward mode uses the `device.execute_and_gradients` pathway\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(dev, \"execute_and_gradients\")\r\n\r\n a = torch.tensor([0.1, 0.2], requires_grad=True)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n gradient_kwargs={\"method\": \"adjoint_jacobian\"},\r\n interface=\"torch\",\r\n )[0]\r\n\r\n # two device executions; one for the value, one for the Jacobian\r\n assert dev.num_executions == 2\r\n spy.assert_called()\r\n\r\n def test_backward_mode(self, mocker):\r\n \"\"\"Test that backward mode uses the `device.batch_execute` and `device.gradients` pathway\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy_execute = mocker.spy(qml.devices.DefaultQubit, \"batch_execute\")\r\n spy_gradients = mocker.spy(qml.devices.DefaultQubit, \"gradients\")\r\n\r\n a = torch.tensor([0.1, 0.2], requires_grad=True)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n mode=\"backward\",\r\n gradient_kwargs={\"method\": \"adjoint_jacobian\"},\r\n interface=\"torch\",\r\n )[0]\r\n\r\n assert dev.num_executions == 1\r\n spy_execute.assert_called()\r\n spy_gradients.assert_not_called()\r\n\r\n res.backward()\r\n spy_gradients.assert_called()\r\n\r\n\r\nclass TestCaching:\r\n \"\"\"Test for caching behaviour\"\"\"\r\n\r\n def test_cache_maxsize(self, mocker):\r\n \"\"\"Test the cachesize property of the cache\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(qml.interfaces.batch, \"cache_execute\")\r\n\r\n def cost(a, cachesize):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.probs(wires=0)\r\n\r\n return execute(\r\n [tape], dev, gradient_fn=param_shift, cachesize=cachesize, interface=\"torch\"\r\n )[0][0, 0]\r\n\r\n params = torch.tensor([0.1, 0.2], requires_grad=True)\r\n res = cost(params, cachesize=2)\r\n res.backward()\r\n cache = spy.call_args[0][1]\r\n\r\n assert cache.maxsize == 2\r\n assert cache.currsize == 2\r\n assert len(cache) == 2\r\n\r\n def test_custom_cache(self, mocker):\r\n \"\"\"Test the use of a custom cache object\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(qml.interfaces.batch, \"cache_execute\")\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.probs(wires=0)\r\n\r\n return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface=\"torch\")[0][\r\n 0, 0\r\n ]\r\n\r\n custom_cache = {}\r\n params = torch.tensor([0.1, 0.2], requires_grad=True)\r\n res = cost(params, cache=custom_cache)\r\n res.backward()\r\n\r\n cache = spy.call_args[0][1]\r\n assert cache is custom_cache\r\n\r\n def test_caching_param_shift(self, tol):\r\n \"\"\"Test that, with the parameter-shift transform,\r\n Torch always uses the optimum number of evals when computing the Jacobian.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.probs(wires=0)\r\n\r\n return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface=\"torch\")[0][\r\n 0, 0\r\n ]\r\n\r\n # Without caching, 5 evaluations are required to compute\r\n # the Jacobian: 1 (forward pass) + (2 shifts * 2 params)\r\n params = torch.tensor([0.1, 0.2], requires_grad=True)\r\n torch.autograd.functional.jacobian(lambda p: cost(p, cache=None), params)\r\n assert dev.num_executions == 5\r\n\r\n # With caching, 5 evaluations are required to compute\r\n # the Jacobian: 1 (forward pass) + (2 shifts * 2 params)\r\n dev._num_executions = 0\r\n torch.autograd.functional.jacobian(lambda p: cost(p, cache=True), params)\r\n assert dev.num_executions == 5\r\n\r\n @pytest.mark.parametrize(\"num_params\", [2, 3])\r\n def test_caching_param_shift_hessian(self, num_params, tol):\r\n \"\"\"Test that, with the parameter-shift transform,\r\n caching reduces the number of evaluations to their optimum\r\n when computing Hessians.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = torch.tensor(np.arange(1, num_params + 1) / 10, requires_grad=True)\r\n\r\n N = len(params)\r\n\r\n def cost(x, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n\r\n for i in range(2, num_params):\r\n qml.RZ(x[i], wires=[i % 2])\r\n\r\n qml.CNOT(wires=[0, 1])\r\n qml.var(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n return execute(\r\n [tape], dev, gradient_fn=param_shift, cache=cache, interface=\"torch\", max_diff=2\r\n )[0]\r\n\r\n # No caching: number of executions is not ideal\r\n hess1 = torch.autograd.functional.hessian(lambda x: cost(x, cache=None), params)\r\n\r\n if num_params == 2:\r\n # compare to theoretical result\r\n x, y, *_ = params.detach()\r\n expected = torch.tensor(\r\n [\r\n [2 * np.cos(2 * x) * np.sin(y) ** 2, np.sin(2 * x) * np.sin(2 * y)],\r\n [np.sin(2 * x) * np.sin(2 * y), -2 * np.cos(x) ** 2 * np.cos(2 * y)],\r\n ]\r\n )\r\n assert np.allclose(expected, hess1, atol=tol, rtol=0)\r\n\r\n expected_runs = 1 # forward pass\r\n expected_runs += 2 * N # Jacobian\r\n expected_runs += 4 * N + 1 # Hessian diagonal\r\n expected_runs += 4 * N ** 2 # Hessian off-diagonal\r\n assert dev.num_executions == expected_runs\r\n\r\n # Use caching: number of executions is ideal\r\n dev._num_executions = 0\r\n hess2 = torch.autograd.functional.hessian(lambda x: cost(x, cache=True), params)\r\n assert np.allclose(hess1, hess2, atol=tol, rtol=0)\r\n\r\n expected_runs_ideal = 1 # forward pass\r\n expected_runs_ideal += 2 * N # Jacobian\r\n expected_runs_ideal += 2 * N + 1 # Hessian diagonal\r\n expected_runs_ideal += 4 * N * (N - 1) // 2 # Hessian off-diagonal\r\n assert dev.num_executions == expected_runs_ideal\r\n assert expected_runs_ideal < expected_runs\r\n\r\n def test_caching_adjoint_backward(self):\r\n \"\"\"Test that caching reduces the number of adjoint evaluations\r\n when mode=backward\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = torch.tensor([0.1, 0.2, 0.3])\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.RY(a[2], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n return execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n cache=cache,\r\n mode=\"backward\",\r\n gradient_kwargs={\"method\": \"adjoint_jacobian\"},\r\n interface=\"torch\",\r\n )[0]\r\n\r\n # Without caching, 3 evaluations are required.\r\n # 1 for the forward pass, and one per output dimension\r\n # on the backward pass.\r\n torch.autograd.functional.jacobian(lambda x: cost(x, cache=None), params)\r\n assert dev.num_executions == 3\r\n\r\n # With caching, only 2 evaluations are required. One\r\n # for the forward pass, and one for the backward pass.\r\n dev._num_executions = 0\r\n torch.autograd.functional.jacobian(lambda x: cost(x, cache=True), params)\r\n assert dev.num_executions == 2\r\n\r\n\r\ntorch_devices = [None]\r\n\r\nif torch.cuda.is_available():\r\n torch_devices.append(torch.device(\"cuda\"))\r\n\r\n\r\nexecute_kwargs = [\r\n {\"gradient_fn\": param_shift, \"interface\": \"torch\"},\r\n {\r\n \"gradient_fn\": \"device\",\r\n \"mode\": \"forward\",\r\n \"gradient_kwargs\": {\"method\": \"adjoint_jacobian\", \"use_device_state\": False},\r\n \"interface\": \"torch\",\r\n },\r\n {\r\n \"gradient_fn\": \"device\",\r\n \"mode\": \"forward\",\r\n \"gradient_kwargs\": {\"method\": \"adjoint_jacobian\", \"use_device_state\": True},\r\n \"interface\": \"torch\",\r\n },\r\n {\r\n \"gradient_fn\": \"device\",\r\n \"mode\": \"backward\",\r\n \"gradient_kwargs\": {\"method\": \"adjoint_jacobian\"},\r\n \"interface\": \"torch\",\r\n },\r\n]\r\n\r\n\r\[email protected](\"torch_device\", torch_devices)\r\[email protected](\"execute_kwargs\", execute_kwargs)\r\nclass TestTorchExecuteIntegration:\r\n \"\"\"Test the torch interface execute function\r\n integrates well for both forward and backward execution\"\"\"\r\n\r\n def test_execution(self, torch_device, execute_kwargs):\r\n \"\"\"Test that the execute function produces results with the expected shapes\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n a = torch.tensor(0.1, requires_grad=True, device=torch_device)\r\n b = torch.tensor(0.2, requires_grad=False, device=torch_device)\r\n\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute([tape1, tape2], dev, **execute_kwargs)\r\n\r\n assert len(res) == 2\r\n assert res[0].shape == (1,)\r\n assert res[1].shape == (1,)\r\n\r\n def test_scalar_jacobian(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test scalar jacobian calculation by comparing two types of pipelines\"\"\"\r\n a = torch.tensor(0.1, requires_grad=True, dtype=torch.float64, device=torch_device)\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n res.backward()\r\n\r\n # compare to backprop gradient\r\n def cost(a):\r\n with qml.tape.QuantumTape() as tape:\r\n qml.RY(a, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n dev = qml.device(\"default.qubit.autograd\", wires=2)\r\n return dev.batch_execute([tape])[0]\r\n\r\n expected = qml.grad(cost)(0.1)\r\n assert torch.allclose(a.grad, torch.tensor(expected, device=torch_device), atol=tol, rtol=0)\r\n\r\n def test_jacobian(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test jacobian calculation by checking against analytic values\"\"\"\r\n a_val = 0.1\r\n b_val = 0.2\r\n\r\n a = torch.tensor(a_val, requires_grad=True, device=torch_device)\r\n b = torch.tensor(b_val, requires_grad=True, device=torch_device)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RZ(torch.tensor(0.543, device=torch_device), wires=0)\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliY(1))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n assert tape.trainable_params == [1, 2]\r\n\r\n assert isinstance(res, torch.Tensor)\r\n assert res.shape == (2,)\r\n\r\n expected = torch.tensor(\r\n [np.cos(a_val), -np.cos(a_val) * np.sin(b_val)], device=torch_device\r\n )\r\n assert torch.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n loss = torch.sum(res)\r\n\r\n loss.backward()\r\n expected = torch.tensor(\r\n [-np.sin(a_val) + np.sin(a_val) * np.sin(b_val), -np.cos(a_val) * np.cos(b_val)],\r\n dtype=a.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(a.grad, expected[0], atol=tol, rtol=0)\r\n assert torch.allclose(b.grad, expected[1], atol=tol, rtol=0)\r\n\r\n def test_tape_no_parameters(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test that a tape with no parameters is correctly\r\n ignored during the gradient computation\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n params = torch.tensor([0.1, 0.2], requires_grad=True, device=torch_device)\r\n x, y = params.detach()\r\n\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.Hadamard(0)\r\n qml.expval(qml.PauliX(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RY(0.5, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape3:\r\n qml.RY(params[0], wires=0)\r\n qml.RX(params[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = sum(execute([tape1, tape2, tape3], dev, **execute_kwargs))\r\n expected = torch.tensor(\r\n 1 + np.cos(0.5) + torch.cos(x) * torch.cos(y), dtype=res.dtype, device=res.device\r\n )\r\n\r\n assert torch.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n res.backward()\r\n grad = params.grad.detach()\r\n expected = torch.tensor(\r\n [-torch.cos(y) * torch.sin(x), -torch.cos(x) * torch.sin(y)],\r\n dtype=grad.dtype,\r\n device=grad.device,\r\n )\r\n assert torch.allclose(grad, expected, atol=tol, rtol=0)\r\n\r\n def test_reusing_quantum_tape(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test re-using a quantum tape by passing new parameters\"\"\"\r\n a = torch.tensor(0.1, requires_grad=True, device=torch_device)\r\n b = torch.tensor(0.2, requires_grad=True, device=torch_device)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliY(1))\r\n\r\n assert tape.trainable_params == [0, 1]\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n loss = torch.sum(res)\r\n loss.backward()\r\n\r\n a_val = 0.54\r\n b_val = 0.8\r\n a = torch.tensor(a_val, requires_grad=True, device=torch_device)\r\n b = torch.tensor(b_val, requires_grad=True, device=torch_device)\r\n\r\n tape.set_parameters([2 * a, b])\r\n res2 = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n expected = torch.tensor(\r\n [np.cos(2 * a_val), -np.cos(2 * a_val) * np.sin(b_val)],\r\n device=torch_device,\r\n dtype=res2.dtype,\r\n )\r\n assert torch.allclose(res2.detach(), expected, atol=tol, rtol=0)\r\n\r\n loss = torch.sum(res2)\r\n loss.backward()\r\n\r\n expected = torch.tensor(\r\n [\r\n -2 * np.sin(2 * a_val) + 2 * np.sin(2 * a_val) * np.sin(b_val),\r\n -np.cos(2 * a_val) * np.cos(b_val),\r\n ],\r\n dtype=a.dtype,\r\n device=torch_device,\r\n )\r\n\r\n assert torch.allclose(a.grad, expected[0], atol=tol, rtol=0)\r\n assert torch.allclose(b.grad, expected[1], atol=tol, rtol=0)\r\n\r\n def test_classical_processing(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test the classical processing of gate parameters within the quantum tape\"\"\"\r\n p_val = [0.1, 0.2]\r\n params = torch.tensor(p_val, requires_grad=True, device=torch_device)\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(params[0] * params[1], wires=0)\r\n qml.RZ(0.2, wires=0)\r\n qml.RX(params[1] + params[1] ** 2 + torch.sin(params[0]), wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n assert tape.trainable_params == [0, 2]\r\n\r\n tape_params = torch.tensor([i.detach() for i in tape.get_parameters()], device=torch_device)\r\n expected = torch.tensor(\r\n [p_val[0] * p_val[1], p_val[1] + p_val[1] ** 2 + np.sin(p_val[0])],\r\n dtype=tape_params.dtype,\r\n device=torch_device,\r\n )\r\n\r\n assert torch.allclose(\r\n tape_params,\r\n expected,\r\n atol=tol,\r\n rtol=0,\r\n )\r\n\r\n res.backward()\r\n\r\n assert isinstance(params.grad, torch.Tensor)\r\n assert params.shape == (2,)\r\n\r\n def test_no_trainable_parameters(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test evaluation and Jacobian if there are no trainable parameters\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(0.2, wires=0)\r\n qml.RX(torch.tensor(0.1, device=torch_device), wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n assert tape.trainable_params == []\r\n\r\n assert res.shape == (2,)\r\n assert isinstance(res, torch.Tensor)\r\n\r\n with pytest.raises(\r\n RuntimeError,\r\n match=\"element 0 of tensors does not require grad and does not have a grad_fn\",\r\n ):\r\n res.backward()\r\n\r\n @pytest.mark.parametrize(\r\n \"U\", [torch.tensor([[0.0, 1.0], [1.0, 0.0]]), np.array([[0.0, 1.0], [1.0, 0.0]])]\r\n )\r\n def test_matrix_parameter(self, torch_device, U, execute_kwargs, tol):\r\n \"\"\"Test that the torch interface works correctly\r\n with a matrix parameter\"\"\"\r\n a_val = 0.1\r\n a = torch.tensor(a_val, requires_grad=True, device=torch_device)\r\n\r\n if isinstance(U, torch.Tensor) and torch_device is not None:\r\n U = U.to(torch_device)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.QubitUnitary(U, wires=0)\r\n qml.RY(a, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n assert tape.trainable_params == [1]\r\n\r\n expected = torch.tensor(-np.cos(a_val), dtype=res.dtype, device=torch_device)\r\n assert torch.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n res.backward()\r\n expected = torch.tensor([np.sin(a_val)], dtype=a.grad.dtype, device=torch_device)\r\n assert torch.allclose(a.grad, expected, atol=tol, rtol=0)\r\n\r\n def test_differentiable_expand(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test that operation and nested tape expansion\r\n is differentiable\"\"\"\r\n\r\n class U3(qml.U3):\r\n def expand(self):\r\n tape = qml.tape.JacobianTape()\r\n theta, phi, lam = self.data\r\n wires = self.wires\r\n tape._ops += [\r\n qml.Rot(lam, theta, -lam, wires=wires),\r\n qml.PhaseShift(phi + lam, wires=wires),\r\n ]\r\n return tape\r\n\r\n tape = qml.tape.JacobianTape()\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n a = np.array(0.1)\r\n p_val = [0.1, 0.2, 0.3]\r\n p = torch.tensor(p_val, requires_grad=True, device=torch_device)\r\n\r\n with tape:\r\n qml.RX(a, wires=0)\r\n U3(p[0], p[1], p[2], wires=0)\r\n qml.expval(qml.PauliX(0))\r\n\r\n tape = tape.expand()\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n assert tape.trainable_params == [1, 2, 3, 4]\r\n assert [i.name for i in tape.operations] == [\"RX\", \"Rot\", \"PhaseShift\"]\r\n\r\n tape_params = torch.tensor([i.detach() for i in tape.get_parameters()], device=torch_device)\r\n expected = torch.tensor(\r\n [p_val[2], p_val[0], -p_val[2], p_val[1] + p_val[2]], device=torch_device\r\n )\r\n assert torch.allclose(tape_params, expected, atol=tol, rtol=0)\r\n\r\n expected = torch.tensor(\r\n np.cos(a) * np.cos(p_val[1]) * np.sin(p_val[0])\r\n + np.sin(a)\r\n * (\r\n np.cos(p_val[2]) * np.sin(p_val[1])\r\n + np.cos(p_val[0]) * np.cos(p_val[1]) * np.sin(p_val[2])\r\n ),\r\n dtype=res.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n res.backward()\r\n expected = torch.tensor(\r\n [\r\n np.cos(p_val[1])\r\n * (np.cos(a) * np.cos(p_val[0]) - np.sin(a) * np.sin(p_val[0]) * np.sin(p_val[2])),\r\n np.cos(p_val[1]) * np.cos(p_val[2]) * np.sin(a)\r\n - np.sin(p_val[1])\r\n * (np.cos(a) * np.sin(p_val[0]) + np.cos(p_val[0]) * np.sin(a) * np.sin(p_val[2])),\r\n np.sin(a)\r\n * (\r\n np.cos(p_val[0]) * np.cos(p_val[1]) * np.cos(p_val[2])\r\n - np.sin(p_val[1]) * np.sin(p_val[2])\r\n ),\r\n ],\r\n dtype=p.grad.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(p.grad, expected, atol=tol, rtol=0)\r\n\r\n def test_probability_differentiation(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Tests correct output shape and evaluation for a tape\r\n with prob outputs\"\"\"\r\n\r\n if execute_kwargs[\"gradient_fn\"] == \"device\":\r\n pytest.skip(\"Adjoint differentiation does not yet support probabilities\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x_val = 0.543\r\n y_val = -0.654\r\n x = torch.tensor(x_val, requires_grad=True, device=torch_device)\r\n y = torch.tensor(y_val, requires_grad=True, device=torch_device)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.probs(wires=[0])\r\n qml.probs(wires=[1])\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n expected = torch.tensor(\r\n [\r\n [np.cos(x_val / 2) ** 2, np.sin(x_val / 2) ** 2],\r\n [(1 + np.cos(x_val) * np.cos(y_val)) / 2, (1 - np.cos(x_val) * np.cos(y_val)) / 2],\r\n ],\r\n dtype=res.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n loss = torch.sum(res)\r\n loss.backward()\r\n expected = torch.tensor(\r\n [\r\n -np.sin(x_val) / 2\r\n + np.sin(x_val) / 2\r\n - np.sin(x_val) * np.cos(y_val) / 2\r\n + np.cos(y_val) * np.sin(x_val) / 2,\r\n -np.cos(x_val) * np.sin(y_val) / 2 + np.cos(x_val) * np.sin(y_val) / 2,\r\n ],\r\n dtype=x.grad.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(x.grad, expected[0], atol=tol, rtol=0)\r\n assert torch.allclose(y.grad, expected[1], atol=tol, rtol=0)\r\n\r\n def test_ragged_differentiation(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Tests correct output shape and evaluation for a tape\r\n with prob and expval outputs\"\"\"\r\n if execute_kwargs[\"gradient_fn\"] == \"device\":\r\n pytest.skip(\"Adjoint differentiation does not yet support probabilities\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x_val = 0.543\r\n y_val = -0.654\r\n x = torch.tensor(x_val, requires_grad=True, device=torch_device)\r\n y = torch.tensor(y_val, requires_grad=True, device=torch_device)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.probs(wires=[1])\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n expected = torch.tensor(\r\n [\r\n np.cos(x_val),\r\n (1 + np.cos(x_val) * np.cos(y_val)) / 2,\r\n (1 - np.cos(x_val) * np.cos(y_val)) / 2,\r\n ],\r\n dtype=res.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n loss = torch.sum(res)\r\n loss.backward()\r\n expected = torch.tensor(\r\n [\r\n -np.sin(x_val)\r\n + -np.sin(x_val) * np.cos(y_val) / 2\r\n + np.cos(y_val) * np.sin(x_val) / 2,\r\n -np.cos(x_val) * np.sin(y_val) / 2 + np.cos(x_val) * np.sin(y_val) / 2,\r\n ],\r\n dtype=x.grad.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(x.grad, expected[0], atol=tol, rtol=0)\r\n assert torch.allclose(y.grad, expected[1], atol=tol, rtol=0)\r\n\r\n def test_sampling(self, torch_device, execute_kwargs):\r\n \"\"\"Test sampling works as expected\"\"\"\r\n if execute_kwargs[\"gradient_fn\"] == \"device\" and execute_kwargs[\"mode\"] == \"forward\":\r\n pytest.skip(\"Adjoint differentiation does not support samples\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2, shots=10)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.Hadamard(wires=[0])\r\n qml.CNOT(wires=[0, 1])\r\n qml.sample(qml.PauliZ(0))\r\n qml.sample(qml.PauliX(1))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n assert res.shape == (2, 10)\r\n assert isinstance(res, torch.Tensor)\r\n\r\n def test_sampling_expval(self, torch_device, execute_kwargs):\r\n \"\"\"Test sampling works as expected if combined with expectation values\"\"\"\r\n if execute_kwargs[\"gradient_fn\"] == \"device\" and execute_kwargs[\"mode\"] == \"forward\":\r\n pytest.skip(\"Adjoint differentiation does not support samples\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2, shots=10)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.Hadamard(wires=[0])\r\n qml.CNOT(wires=[0, 1])\r\n qml.sample(qml.PauliZ(0))\r\n qml.expval(qml.PauliX(1))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n assert len(res) == 2\r\n assert isinstance(res, tuple)\r\n assert res[0].shape == (10,)\r\n assert isinstance(res[0], torch.Tensor)\r\n assert isinstance(res[1], torch.Tensor)\r\n\r\n def test_sampling_gradient_error(self, torch_device, execute_kwargs):\r\n \"\"\"Test differentiating a tape with sampling results in an error\"\"\"\r\n if execute_kwargs[\"gradient_fn\"] == \"device\" and execute_kwargs[\"mode\"] == \"forward\":\r\n pytest.skip(\"Adjoint differentiation does not support samples\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=1, shots=10)\r\n\r\n x = torch.tensor(0.65, requires_grad=True)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.sample(qml.PauliZ(0))\r\n\r\n res = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n with pytest.raises(\r\n RuntimeError,\r\n match=\"element 0 of tensors does not require grad and does not have a grad_fn\",\r\n ):\r\n res.backward()\r\n\r\n def test_repeated_application_after_expand(self, torch_device, execute_kwargs, tol):\r\n \"\"\"Test that the Torch interface continues to work after\r\n tape expansions\"\"\"\r\n n_qubits = 2\r\n dev = qml.device(\"default.qubit\", wires=n_qubits)\r\n\r\n weights = torch.ones((3,))\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.U3(*weights, wires=0)\r\n qml.expval(qml.PauliZ(wires=0))\r\n\r\n tape = tape.expand()\r\n res1 = execute([tape], dev, **execute_kwargs)[0]\r\n\r\n\r\[email protected](\"torch_device\", torch_devices)\r\nclass TestHigherOrderDerivatives:\r\n \"\"\"Test that the torch execute function can be differentiated\"\"\"\r\n\r\n @pytest.mark.parametrize(\r\n \"params\",\r\n [\r\n torch.tensor([0.543, -0.654], requires_grad=True),\r\n torch.tensor([0, -0.654], requires_grad=True),\r\n torch.tensor([-2.0, 0], requires_grad=True),\r\n ],\r\n )\r\n def test_parameter_shift_hessian(self, torch_device, params, tol):\r\n \"\"\"Tests that the output of the parameter-shift transform\r\n can be differentiated using torch, yielding second derivatives.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = torch.tensor([0.543, -0.654], requires_grad=True, dtype=torch.float64)\r\n\r\n def cost_fn(x):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.var(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RX(x[0], wires=0)\r\n qml.RY(x[0], wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.probs(wires=1)\r\n\r\n result = execute(\r\n [tape1, tape2], dev, gradient_fn=param_shift, interface=\"torch\", max_diff=2\r\n )\r\n return result[0] + result[1][0, 0]\r\n\r\n res = cost_fn(params)\r\n x, y = params.detach()\r\n expected = torch.as_tensor(0.5 * (3 + np.cos(x) ** 2 * np.cos(2 * y)))\r\n assert torch.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n res.backward()\r\n expected = torch.tensor(\r\n [-np.cos(x) * np.cos(2 * y) * np.sin(x), -np.cos(x) ** 2 * np.sin(2 * y)]\r\n )\r\n assert torch.allclose(params.grad.detach(), expected, atol=tol, rtol=0)\r\n\r\n res = torch.autograd.functional.hessian(cost_fn, params)\r\n expected = torch.tensor(\r\n [\r\n [-np.cos(2 * x) * np.cos(2 * y), np.sin(2 * x) * np.sin(2 * y)],\r\n [np.sin(2 * x) * np.sin(2 * y), -2 * np.cos(x) ** 2 * np.cos(2 * y)],\r\n ]\r\n )\r\n assert torch.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_hessian_vector_valued(self, torch_device, tol):\r\n \"\"\"Test hessian calculation of a vector valued tape\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def circuit(x):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(x[0], wires=0)\r\n qml.RX(x[1], wires=0)\r\n qml.probs(wires=0)\r\n\r\n return torch.stack(\r\n execute([tape], dev, gradient_fn=param_shift, interface=\"torch\", max_diff=2)\r\n )\r\n\r\n x = torch.tensor([1.0, 2.0], requires_grad=True, device=torch_device)\r\n res = circuit(x)\r\n\r\n if torch_device is not None:\r\n a, b = x.detach().cpu().numpy()\r\n else:\r\n a, b = x.detach().numpy()\r\n\r\n expected_res = torch.tensor(\r\n [\r\n 0.5 + 0.5 * np.cos(a) * np.cos(b),\r\n 0.5 - 0.5 * np.cos(a) * np.cos(b),\r\n ],\r\n dtype=res.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(res.detach(), expected_res, atol=tol, rtol=0)\r\n\r\n jac_fn = lambda x: torch.autograd.functional.jacobian(circuit, x, create_graph=True)\r\n\r\n g = jac_fn(x)\r\n\r\n hess = torch.autograd.functional.jacobian(jac_fn, x)\r\n\r\n expected_g = torch.tensor(\r\n [\r\n [-0.5 * np.sin(a) * np.cos(b), -0.5 * np.cos(a) * np.sin(b)],\r\n [0.5 * np.sin(a) * np.cos(b), 0.5 * np.cos(a) * np.sin(b)],\r\n ],\r\n dtype=g.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(g.detach(), expected_g, atol=tol, rtol=0)\r\n\r\n expected_hess = torch.tensor(\r\n [\r\n [\r\n [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.sin(a) * np.sin(b)],\r\n [0.5 * np.sin(a) * np.sin(b), -0.5 * np.cos(a) * np.cos(b)],\r\n ],\r\n [\r\n [0.5 * np.cos(a) * np.cos(b), -0.5 * np.sin(a) * np.sin(b)],\r\n [-0.5 * np.sin(a) * np.sin(b), 0.5 * np.cos(a) * np.cos(b)],\r\n ],\r\n ],\r\n dtype=hess.dtype,\r\n device=torch_device,\r\n )\r\n assert torch.allclose(hess.detach(), expected_hess, atol=tol, rtol=0)\r\n\r\n def test_adjoint_hessian(self, torch_device, tol):\r\n \"\"\"Since the adjoint hessian is not a differentiable transform,\r\n higher-order derivatives are not supported.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = torch.tensor(\r\n [0.543, -0.654], requires_grad=True, dtype=torch.float64, device=torch_device\r\n )\r\n\r\n def cost_fn(x):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n gradient_kwargs={\"method\": \"adjoint_jacobian\", \"use_device_state\": True},\r\n interface=\"torch\",\r\n )[0]\r\n\r\n res = torch.autograd.functional.hessian(cost_fn, params)\r\n expected = torch.zeros([2, 2], dtype=torch.float64, device=torch_device)\r\n assert torch.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_max_diff(self, torch_device, tol):\r\n \"\"\"Test that setting the max_diff parameter blocks higher-order\r\n derivatives\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = torch.tensor([0.543, -0.654], requires_grad=True, dtype=torch.float64)\r\n\r\n def cost_fn(x):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.var(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RX(x[0], wires=0)\r\n qml.RY(x[0], wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.probs(wires=1)\r\n\r\n result = execute(\r\n [tape1, tape2], dev, gradient_fn=param_shift, max_diff=1, interface=\"torch\"\r\n )\r\n return result[0] + result[1][0, 0]\r\n\r\n res = cost_fn(params)\r\n x, y = params.detach()\r\n expected = torch.as_tensor(0.5 * (3 + np.cos(x) ** 2 * np.cos(2 * y)))\r\n assert torch.allclose(res.to(torch_device), expected.to(torch_device), atol=tol, rtol=0)\r\n\r\n res.backward()\r\n expected = torch.tensor(\r\n [-np.cos(x) * np.cos(2 * y) * np.sin(x), -np.cos(x) ** 2 * np.sin(2 * y)]\r\n )\r\n assert torch.allclose(\r\n params.grad.detach().to(torch_device), expected.to(torch_device), atol=tol, rtol=0\r\n )\r\n\r\n res = torch.autograd.functional.hessian(cost_fn, params)\r\n expected = torch.zeros([2, 2], dtype=torch.float64)\r\n assert torch.allclose(res.to(torch_device), expected.to(torch_device), atol=tol, rtol=0)\r\n\r\n\r\nexecute_kwargs = [\r\n {\"gradient_fn\": param_shift, \"interface\": \"torch\"},\r\n {\"gradient_fn\": finite_diff, \"interface\": \"torch\"},\r\n]\r\n\r\n\r\[email protected](\"execute_kwargs\", execute_kwargs)\r\nclass TestHamiltonianWorkflows:\r\n \"\"\"Test that tapes ending with expectations\r\n of Hamiltonians provide correct results and gradients\"\"\"\r\n\r\n @pytest.fixture\r\n def cost_fn(self, execute_kwargs):\r\n \"\"\"Cost function for gradient tests\"\"\"\r\n\r\n def _cost_fn(weights, coeffs1, coeffs2, dev=None):\r\n obs1 = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)]\r\n H1 = qml.Hamiltonian(coeffs1, obs1)\r\n\r\n obs2 = [qml.PauliZ(0)]\r\n H2 = qml.Hamiltonian(coeffs2, obs2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(weights[0], wires=0)\r\n qml.RY(weights[1], wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(H1)\r\n qml.expval(H2)\r\n\r\n return execute([tape], dev, **execute_kwargs)[0]\r\n\r\n return _cost_fn\r\n\r\n @staticmethod\r\n def cost_fn_expected(weights, coeffs1, coeffs2):\r\n \"\"\"Analytic value of cost_fn above\"\"\"\r\n a, b, c = coeffs1.detach().numpy()\r\n d = coeffs2.detach().numpy()[0]\r\n x, y = weights.detach().numpy()\r\n return [-c * np.sin(x) * np.sin(y) + np.cos(x) * (a + b * np.sin(y)), d * np.cos(x)]\r\n\r\n @staticmethod\r\n def cost_fn_jacobian(weights, coeffs1, coeffs2):\r\n \"\"\"Analytic jacobian of cost_fn above\"\"\"\r\n a, b, c = coeffs1.detach().numpy()\r\n d = coeffs2.detach().numpy()[0]\r\n x, y = weights.detach().numpy()\r\n return np.array(\r\n [\r\n [\r\n -c * np.cos(x) * np.sin(y) - np.sin(x) * (a + b * np.sin(y)),\r\n b * np.cos(x) * np.cos(y) - c * np.cos(y) * np.sin(x),\r\n np.cos(x),\r\n np.cos(x) * np.sin(y),\r\n -(np.sin(x) * np.sin(y)),\r\n 0,\r\n ],\r\n [-d * np.sin(x), 0, 0, 0, 0, np.cos(x)],\r\n ]\r\n )\r\n\r\n def test_multiple_hamiltonians_not_trainable(self, cost_fn, execute_kwargs, tol):\r\n coeffs1 = torch.tensor([0.1, 0.2, 0.3], requires_grad=False, dtype=torch.float64)\r\n coeffs2 = torch.tensor([0.7], requires_grad=False, dtype=torch.float64)\r\n weights = torch.tensor([0.4, 0.5], requires_grad=True, dtype=torch.float64)\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n res = cost_fn(weights, coeffs1, coeffs2, dev=dev)\r\n expected = self.cost_fn_expected(weights, coeffs1, coeffs2)\r\n assert np.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n res = torch.hstack(\r\n torch.autograd.functional.jacobian(\r\n lambda *x: cost_fn(*x, dev=dev), (weights, coeffs1, coeffs2)\r\n )\r\n )\r\n expected = self.cost_fn_jacobian(weights, coeffs1, coeffs2)\r\n assert np.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n def test_multiple_hamiltonians_trainable(self, cost_fn, execute_kwargs, tol):\r\n coeffs1 = torch.tensor([0.1, 0.2, 0.3], requires_grad=True, dtype=torch.float64)\r\n coeffs2 = torch.tensor([0.7], requires_grad=True, dtype=torch.float64)\r\n weights = torch.tensor([0.4, 0.5], requires_grad=True, dtype=torch.float64)\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n res = cost_fn(weights, coeffs1, coeffs2, dev=dev)\r\n expected = self.cost_fn_expected(weights, coeffs1, coeffs2)\r\n assert np.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n\r\n res = torch.hstack(\r\n torch.autograd.functional.jacobian(\r\n lambda *x: cost_fn(*x, dev=dev), (weights, coeffs1, coeffs2)\r\n )\r\n )\r\n expected = self.cost_fn_jacobian(weights, coeffs1, coeffs2)\r\n assert np.allclose(res.detach(), expected, atol=tol, rtol=0)\r\n",
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nThis module provides the PennyLane :class:`~.tensor` class.\r\n\"\"\"\r\nimport numpy as onp\r\n\r\nfrom autograd import numpy as _np\r\nfrom autograd.extend import primitive, defvjp\r\n\r\nfrom autograd.tracer import Box\r\nfrom autograd.numpy.numpy_boxes import ArrayBox\r\nfrom autograd.numpy.numpy_vspaces import ComplexArrayVSpace, ArrayVSpace\r\nfrom autograd.core import VSpace\r\n\r\n\r\n__doc__ = \"NumPy with automatic differentiation support, provided by Autograd and PennyLane.\"\r\n\r\n# Hotfix since _np.asarray doesn't have a gradient rule defined.\r\n@primitive\r\ndef asarray(vals, *args, **kwargs):\r\n \"\"\"Gradient supporting autograd asarray\"\"\"\r\n if isinstance(vals, (onp.ndarray, _np.ndarray)):\r\n return _np.asarray(vals, *args, **kwargs)\r\n return _np.array(vals, *args, **kwargs)\r\n\r\n\r\ndef asarray_gradmaker(ans, *args, **kwargs):\r\n \"\"\"Gradient maker for asarray\"\"\"\r\n del ans, args, kwargs\r\n return lambda g: g\r\n\r\n\r\ndefvjp(asarray, asarray_gradmaker, argnums=(0,))\r\n\r\n\r\nclass tensor(_np.ndarray):\r\n \"\"\"Constructs a PennyLane tensor for use with Autograd QNodes.\r\n\r\n The ``tensor`` class is a subclass of ``numpy.ndarray``,\r\n providing the same multidimensional, homogeneous data-structure\r\n of fixed-size items, with an additional flag to indicate to PennyLane\r\n whether the contained data is differentiable or not.\r\n\r\n .. warning::\r\n\r\n PennyLane ``tensor`` objects are only used as part of the Autograd QNode\r\n interface. If using another machine learning library such as PyTorch or\r\n TensorFlow, use their built-in ``tf.Variable`` and ``torch.tensor`` classes\r\n instead.\r\n\r\n .. warning::\r\n\r\n Tensors should be constructed using standard array construction functions\r\n provided as part of PennyLane's NumPy implementation, including\r\n ``np.array``, ``np.zeros`` or ``np.empty``.\r\n\r\n The parameters given here refer to a low-level class\r\n for instantiating tensors.\r\n\r\n\r\n Args:\r\n input_array (array_like): Any data structure in any form that can be converted to\r\n an array. This includes lists, lists of tuples, tuples, tuples of tuples,\r\n tuples of lists and ndarrays.\r\n requires_grad (bool): whether the tensor supports differentiation\r\n\r\n **Example**\r\n\r\n The trainability of a tensor can be set on construction via the\r\n ``requires_grad`` keyword argument,\r\n\r\n >>> from pennylane import numpy as np\r\n >>> x = np.array([0, 1, 2], requires_grad=True)\r\n >>> x\r\n tensor([0, 1, 2], requires_grad=True)\r\n\r\n or in-place by modifying the ``requires_grad`` attribute:\r\n\r\n >>> x.requires_grad = False\r\n tensor([0, 1, 2], requires_grad=False)\r\n\r\n Since tensors are subclasses of ``np.ndarray``, they can be provided as arguments\r\n to any PennyLane-wrapped NumPy function:\r\n\r\n >>> np.sin(x)\r\n tensor([0. , 0.84147098, 0.90929743], requires_grad=True)\r\n\r\n When composing functions of multiple tensors, if at least one input tensor is differentiable,\r\n then the output will also be differentiable:\r\n\r\n >>> x = np.array([0, 1, 2], requires_grad=False)\r\n >>> y = np.zeros([3], requires_grad=True)\r\n >>> np.vstack([x, y])\r\n tensor([[0., 1., 2.],\r\n [0., 0., 0.]], requires_grad=True)\r\n \"\"\"\r\n\r\n def __new__(cls, input_array, *args, requires_grad=True, **kwargs):\r\n obj = asarray(input_array, *args, **kwargs)\r\n\r\n if isinstance(obj, onp.ndarray):\r\n obj = obj.view(cls)\r\n obj.requires_grad = requires_grad\r\n\r\n return obj\r\n\r\n def __array_finalize__(self, obj):\r\n # pylint: disable=attribute-defined-outside-init\r\n if obj is None: # pragma: no cover\r\n return\r\n\r\n self.requires_grad = getattr(obj, \"requires_grad\", None)\r\n\r\n def __repr__(self):\r\n string = super().__repr__()\r\n return string[:-1] + \", requires_grad={})\".format(self.requires_grad)\r\n\r\n def __array_wrap__(self, obj):\r\n out_arr = tensor(obj, requires_grad=self.requires_grad)\r\n return super().__array_wrap__(out_arr)\r\n\r\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\r\n # pylint: disable=no-member,attribute-defined-outside-init\r\n\r\n # unwrap any outputs the ufunc might have\r\n outputs = [i.view(onp.ndarray) for i in kwargs.get(\"out\", ())]\r\n\r\n if outputs:\r\n # Insert the unwrapped outputs into the keyword\r\n # args dictionary, to be passed to ndarray.__array_ufunc__\r\n outputs = tuple(outputs)\r\n kwargs[\"out\"] = outputs\r\n else:\r\n # If the ufunc has no ouputs, we simply\r\n # create a tuple containing None for all potential outputs.\r\n outputs = (None,) * ufunc.nout\r\n\r\n # unwrap the input arguments to the ufunc\r\n args = [i.unwrap() if hasattr(i, \"unwrap\") else i for i in inputs]\r\n\r\n # call the ndarray.__array_ufunc__ method to compute the result\r\n # of the vectorized ufunc\r\n res = super().__array_ufunc__(ufunc, method, *args, **kwargs)\r\n\r\n if ufunc.nout == 1:\r\n res = (res,)\r\n\r\n # construct a list of ufunc outputs to return\r\n ufunc_output = [\r\n (onp.asarray(result) if output is None else output)\r\n for result, output in zip(res, outputs)\r\n ]\r\n\r\n # if any of the inputs were trainable, the output is also trainable\r\n requires_grad = any(\r\n isinstance(x, onp.ndarray) and getattr(x, \"requires_grad\", True) for x in inputs\r\n )\r\n\r\n # Iterate through the ufunc outputs and convert each to a PennyLane tensor.\r\n # We also correctly set the requires_grad attribute.\r\n for i in range(len(ufunc_output)): # pylint: disable=consider-using-enumerate\r\n ufunc_output[i] = tensor(ufunc_output[i], requires_grad=requires_grad)\r\n\r\n if len(ufunc_output) == 1:\r\n # the ufunc has a single output so return a single tensor\r\n return ufunc_output[0]\r\n\r\n # otherwise we must return a tuple of tensors\r\n return tuple(ufunc_output)\r\n\r\n def __getitem__(self, *args, **kwargs):\r\n item = super().__getitem__(*args, **kwargs)\r\n\r\n if not isinstance(item, tensor):\r\n item = tensor(item, requires_grad=self.requires_grad)\r\n\r\n return item\r\n\r\n def __hash__(self):\r\n if self.ndim == 0:\r\n # Allowing hashing if the tensor is a scalar.\r\n # We hash both the scalar value *and* the differentiability information,\r\n # to match the behaviour of PyTorch.\r\n return hash((self.item(), self.requires_grad))\r\n\r\n raise TypeError(\"unhashable type: 'numpy.tensor'\")\r\n\r\n def __reduce__(self):\r\n # Called when pickling the object.\r\n # Numpy ndarray uses __reduce__ instead of __getstate__ to prepare an object for\r\n # pickling. self.requires_grad needs to be included in the tuple returned by\r\n # __reduce__ in order to be preserved in the unpickled object.\r\n reduced_obj = super().__reduce__()\r\n # The last (2nd) element of this tuple holds the data. Add requires_grad to this:\r\n full_reduced_data = reduced_obj[2] + (self.requires_grad,)\r\n return (reduced_obj[0], reduced_obj[1], full_reduced_data)\r\n\r\n def __setstate__(self, reduced_obj) -> None:\r\n # Called when unpickling the object.\r\n # Set self.requires_grad with the last element in the tuple returned by __reduce__:\r\n # pylint: disable=attribute-defined-outside-init\r\n self.requires_grad = reduced_obj[-1]\r\n # And call parent's __setstate__ without this element:\r\n super().__setstate__(reduced_obj[:-1])\r\n\r\n def unwrap(self):\r\n \"\"\"Converts the tensor to a standard, non-differentiable NumPy ndarray or Python scalar if\r\n the tensor is 0-dimensional.\r\n\r\n All information regarding differentiability of the tensor will be lost.\r\n\r\n .. warning::\r\n\r\n The returned array is a new view onto the **same data**. That is,\r\n the tensor and the returned ``ndarray`` share the same underlying storage.\r\n Changes to the tensor object will be reflected within the returned array,\r\n and vice versa.\r\n\r\n **Example**\r\n\r\n >>> from pennylane import numpy as np\r\n >>> x = np.array([1, 2], requires_grad=True)\r\n >>> x\r\n tensor([1, 2], requires_grad=True)\r\n >>> x.unwrap()\r\n array([1, 2])\r\n\r\n Zero dimensional array are converted to Python scalars:\r\n\r\n >>> x = np.array(1.543, requires_grad=False)\r\n >>> x.unwrap()\r\n 1.543\r\n >>> type(x.unwrap())\r\n float\r\n\r\n The underlying data is **not** copied:\r\n\r\n >>> x = np.array([1, 2], requires_grad=True)\r\n >>> y = x.unwrap()\r\n >>> x[0] = 5\r\n >>> y\r\n array([5, 2])\r\n >>> y[1] = 7\r\n >>> x\r\n tensor([5, 7], requires_grad=True)\r\n\r\n\r\n To create a copy, the ``copy()`` method can be used:\r\n\r\n >>> x = np.array([1, 2], requires_grad=True)\r\n >>> y = x.unwrap().copy()\r\n >>> x[0] = 5\r\n >>> y\r\n array([1, 2])\r\n \"\"\"\r\n if self.ndim == 0:\r\n return self.view(onp.ndarray).item()\r\n\r\n return self.view(onp.ndarray)\r\n\r\n def numpy(self):\r\n \"\"\"Converts the tensor to a standard, non-differentiable NumPy ndarray or Python scalar if\r\n the tensor is 0-dimensional.\r\n\r\n This method is an alias for :meth:`~.unwrap`. See :meth:`~.unwrap` for more details.\r\n \"\"\"\r\n return self.unwrap()\r\n\r\n\r\nclass NonDifferentiableError(Exception):\r\n \"\"\"Exception raised if attempting to differentiate non-trainable\r\n :class:`~.tensor` using Autograd.\"\"\"\r\n\r\n\r\ndef tensor_to_arraybox(x, *args):\r\n \"\"\"Convert a :class:`~.tensor` to an Autograd ``ArrayBox``.\r\n\r\n Args:\r\n x (array_like): Any data structure in any form that can be converted to\r\n an array. This includes lists, lists of tuples, tuples, tuples of tuples,\r\n tuples of lists and ndarrays.\r\n\r\n Returns:\r\n autograd.numpy.numpy_boxes.ArrayBox: Autograd ArrayBox instance of the array\r\n\r\n Raises:\r\n NonDifferentiableError: if the provided tensor is non-differentiable\r\n \"\"\"\r\n if isinstance(x, tensor):\r\n if x.requires_grad:\r\n return ArrayBox(x, *args)\r\n\r\n raise NonDifferentiableError(\r\n \"{} is non-differentiable. Set the requires_grad attribute to True.\".format(x)\r\n )\r\n\r\n return ArrayBox(x, *args)\r\n\r\n\r\nBox.type_mappings[tensor] = tensor_to_arraybox\r\nVSpace.mappings[tensor] = lambda x: ComplexArrayVSpace(x) if onp.iscomplexobj(x) else ArrayVSpace(x)\r\n",
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis module contains the available built-in continuous-variable\nquantum operations supported by PennyLane, as well as their conventions.\n\n.. todo:: Add gradient recipes for Gaussian state preparations\n\n.. todo::\n\n The gradient computation assumes all parameters are real (floats), some\n docstrings here allow complex or even array parameter values. This includes\n :class:`~.DisplacedSqueezedState` and :class:`~.CatState`.\n\n Possible solution: disallow such operations to depend on free parameters,\n this way they won't be differentiated.\n\n.. note::\n\n For the Heisenberg matrix representation of CV operations, we use the ordering\n :math:`(\\hat{\\mathbb{1}}, \\hat{x}, \\hat{p})` for single modes\n and :math:`(\\hat{\\mathbb{1}}, \\hat{x}_1, \\hat{p}_2, \\hat{x}_1,\\hat{p}_2)` for two modes .\n\"\"\"\n# As the qubit based ``decomposition``, ``_matrix``, ``diagonalizing_gates``\n# abstract methods are not defined in the CV case, disabling the related check\n# pylint: disable=abstract-method\nimport math\nimport numpy as np\nfrom scipy.linalg import block_diag\n\nfrom pennylane.operation import AnyWires, CVOperation, CVObservable\nfrom pennylane import math as qml_math\n\n\ndef _rotation(phi, bare=False):\n r\"\"\"Utility function, returns the Heisenberg transformation of a phase rotation gate.\n\n The transformation matrix returned is:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0\\\\\n 0 & \\cos\\phi & -\\sin\\phi\\\\\n 0 & \\sin\\phi & \\cos\\phi\n \\end{bmatrix}\n\n Args:\n phi (float): rotation angle.\n bare (bool): if True, return a simple 2d rotation matrix\n\n Returns:\n array[float]: transformation matrix\n \"\"\"\n c = math.cos(phi)\n s = math.sin(phi)\n temp = np.array([[c, -s], [s, c]])\n if bare:\n return temp\n return block_diag(1, temp) # pylint: disable=no-member\n\n\nclass Rotation(CVOperation):\n r\"\"\"pennylane.Rotation(phi, wires)\n Phase space rotation.\n\n .. math::\n R(\\phi) = \\exp\\left(i \\phi \\ad \\a\\right)=\\exp\\left(i \\frac{\\phi}{2}\n \\left(\\frac{\\x^2+ \\p^2}{\\hbar}-\\I\\right)\\right).\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 1\n * Gradient recipe: :math:`\\frac{d}{dr}f(R(r)) = \\frac{1}{2} \\left[f(R(\\phi+\\pi/2)) - f(R(\\phi-\\pi/2))\\right]`\n where :math:`f` is an expectation value depending on :math:`R(r)`.\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0\\\\\n 0 & \\cos\\phi & -\\sin\\phi\\\\\n 0 & \\sin\\phi & \\cos\\phi\n \\end{bmatrix}\n\n Args:\n phi (float): the rotation angle\n \"\"\"\n num_wires = 1\n grad_method = \"A\"\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def _heisenberg_rep(p):\n return _rotation(p[0])\n\n def adjoint(self, do_queue=False):\n return Rotation(-self.parameters[0], wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"R\")\n\n\nclass Squeezing(CVOperation):\n r\"\"\"pennylane.Squeezing(r, phi, wires)\n Phase space squeezing.\n\n .. math::\n S(z) = \\exp\\left(\\frac{1}{2}(z^* \\a^2 -z {\\a^\\dagger}^2)\\right).\n\n where :math:`z = r e^{i\\phi}`.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 2\n * Gradient recipe: :math:`\\frac{d}{dr}f(S(r,\\phi)) = \\frac{1}{2\\sinh s} \\left[f(S(r+s, \\phi)) - f(S(r-s, \\phi))\\right]`,\n where :math:`s` is an arbitrary real number (:math:`0.1` by default) and\n :math:`f` is an expectation value depending on :math:`S(r,\\phi)`.\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0 \\\\\n 0 & \\cosh r - \\cos\\phi \\sinh r & -\\sin\\phi\\sinh r \\\\\n 0 & -\\sin\\phi\\sinh r & \\cosh r+\\cos\\phi\\sinh r\n \\end{bmatrix}\n\n Args:\n r (float): squeezing amount\n phi (float): squeezing phase angle :math:`\\phi`\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"A\"\n\n shift = 0.1\n multiplier = 0.5 / math.sinh(shift)\n a = 1\n grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]], None)\n\n @property\n def num_params(self):\n return 2\n\n @staticmethod\n def _heisenberg_rep(p):\n R = _rotation(p[1] / 2)\n return R @ np.diag([1, math.exp(-p[0]), math.exp(p[0])]) @ R.T\n\n def adjoint(self, do_queue=False):\n r, phi = self.parameters\n new_phi = (phi + np.pi) % (2 * np.pi)\n return Squeezing(r, new_phi, wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"S\")\n\n\nclass Displacement(CVOperation):\n r\"\"\"pennylane.Displacement(a, phi, wires)\n Phase space displacement.\n\n .. math::\n D(a,\\phi) = D(\\alpha) = \\exp(\\alpha \\ad -\\alpha^* \\a)\n = \\exp\\left(-i\\sqrt{\\frac{2}{\\hbar}}(\\re(\\alpha) \\hat{p} -\\im(\\alpha) \\hat{x})\\right).\n\n where :math:`\\alpha = ae^{i\\phi}` has magnitude :math:`a\\geq 0` and phase :math:`\\phi`.\n The result of applying a displacement to the vacuum is a coherent state\n :math:`D(\\alpha)\\ket{0} = \\ket{\\alpha}`.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 2\n * Gradient recipe: :math:`\\frac{d}{da}f(D(a,\\phi)) = \\frac{1}{2s} \\left[f(D(a+s, \\phi)) - f(D(a-s, \\phi))\\right]`,\n where :math:`s` is an arbitrary real number (:math:`0.1` by default) and\n :math:`f` is an expectation value depending on :math:`D(a,\\phi)`.\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix} 1 & 0 & 0 \\\\ 2a\\cos\\phi & 1 & 0 \\\\ 2a\\sin\\phi & 0 & 1\\end{bmatrix}\n\n Args:\n a (float): displacement magnitude :math:`a=|\\alpha|`\n phi (float): phase angle :math:`\\phi`\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"A\"\n\n shift = 0.1\n multiplier = 0.5 / shift\n a = 1\n grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]], None)\n\n @property\n def num_params(self):\n return 2\n\n @staticmethod\n def _heisenberg_rep(p):\n c = math.cos(p[1])\n s = math.sin(p[1])\n scale = 2 # sqrt(2 * hbar)\n return np.array([[1, 0, 0], [scale * c * p[0], 1, 0], [scale * s * p[0], 0, 1]])\n\n def adjoint(self, do_queue=False):\n a, phi = self.parameters\n new_phi = (phi + np.pi) % (2 * np.pi)\n return Displacement(a, new_phi, wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"D\")\n\n\nclass Beamsplitter(CVOperation):\n r\"\"\"pennylane.Beamsplitter(theta, phi, wires)\n Beamsplitter interaction.\n\n .. math::\n B(\\theta,\\phi) = \\exp\\left(\\theta (e^{i \\phi} \\a \\hat{b}^\\dagger -e^{-i \\phi}\\ad \\hat{b}) \\right).\n\n **Details:**\n\n * Number of wires: 2\n * Number of parameters: 2\n * Gradient recipe: :math:`\\frac{d}{d \\theta}f(B(\\theta,\\phi)) = \\frac{1}{2} \\left[f(B(\\theta+\\pi/2, \\phi)) - f(B(\\theta-\\pi/2, \\phi))\\right]`\n where :math:`f` is an expectation value depending on :math:`B(\\theta,\\phi)`.\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0 & 0 & 0\\\\\n 0 & \\cos\\theta & 0 & -\\cos\\phi\\sin\\theta & -\\sin\\phi\\sin\\theta \\\\\n 0 & 0 & \\cos\\theta & \\sin\\phi\\sin\\theta & -\\cos\\phi\\sin\\theta\\\\\n 0 & \\cos\\phi\\sin\\theta & -\\sin\\phi\\sin\\theta & \\cos\\theta & 0\\\\\n 0 & \\sin\\phi\\sin\\theta & \\cos\\phi\\sin\\theta & 0 & \\cos\\theta\n \\end{bmatrix}\n\n Args:\n theta (float): Transmittivity angle :math:`\\theta`. The transmission amplitude\n of the beamsplitter is :math:`t = \\cos(\\theta)`.\n The value :math:`\\theta=\\pi/4` gives the 50-50 beamsplitter.\n phi (float): Phase angle :math:`\\phi`. The reflection amplitude of the\n beamsplitter is :math:`r = e^{i\\phi}\\sin(\\theta)`.\n The value :math:`\\phi = \\pi/2` gives the symmetric beamsplitter.\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 2\n grad_method = \"A\"\n\n @property\n def num_params(self):\n return 2\n\n # For the beamsplitter, both parameters are rotation-like\n @staticmethod\n def _heisenberg_rep(p):\n R = _rotation(p[1], bare=True)\n c = math.cos(p[0])\n s = math.sin(p[0])\n U = c * np.eye(5)\n U[0, 0] = 1\n U[1:3, 3:5] = -s * R.T\n U[3:5, 1:3] = s * R\n return U\n\n def adjoint(self, do_queue=False):\n theta, phi = self.parameters\n return Beamsplitter(-theta, phi, wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"BS\")\n\n\nclass TwoModeSqueezing(CVOperation):\n r\"\"\"pennylane.TwoModeSqueezing(r, phi, wires)\n Phase space two-mode squeezing.\n\n .. math::\n S_2(z) = \\exp\\left(z^* \\a \\hat{b} -z \\ad \\hat{b}^\\dagger \\right)\n = \\exp\\left(r (e^{-i\\phi} \\a\\hat{b} -e^{i\\phi} \\ad \\hat{b}^\\dagger \\right).\n\n where :math:`z = r e^{i\\phi}`.\n\n **Details:**\n\n * Number of wires: 2\n * Number of parameters: 2\n * Gradient recipe: :math:`\\frac{d}{dr}f(S_2(r,\\phi)) = \\frac{1}{2\\sinh s} \\left[f(S_2(r+s, \\phi)) - f(S_2(r-s, \\phi))\\right]`,\n where :math:`s` is an arbitrary real number (:math:`0.1` by default) and\n :math:`f` is an expectation value depending on :math:`S_2(r,\\phi)`.\n\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0 & 0 & 0 \\\\\n 0 & \\cosh r & 0 & \\sinh r \\cos \\phi & \\sinh r \\sin \\phi\\\\\n 0 & 0 & \\cosh r & \\sinh r \\sin \\phi & -\\sinh r \\cos \\phi\\\\\n 0 & \\sinh r \\cos \\phi & \\sinh r \\sin \\phi & \\cosh r & 0\\\\\n 0 & \\sinh r \\sin \\phi & -\\sinh r \\cos \\phi & 0 & \\cosh r\n \\end{bmatrix}\n\n Args:\n r (float): squeezing amount\n phi (float): squeezing phase angle :math:`\\phi`\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 2\n\n grad_method = \"A\"\n\n shift = 0.1\n multiplier = 0.5 / math.sinh(shift)\n a = 1\n grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]], None)\n\n @property\n def num_params(self):\n return 2\n\n @staticmethod\n def _heisenberg_rep(p):\n R = _rotation(p[1], bare=True)\n\n S = math.sinh(p[0]) * np.diag([1, -1])\n U = math.cosh(p[0]) * np.identity(5)\n\n U[0, 0] = 1\n U[1:3, 3:5] = S @ R.T\n U[3:5, 1:3] = S @ R.T\n return U\n\n def adjoint(self, do_queue=False):\n r, phi = self.parameters\n new_phi = (phi + np.pi) % (2 * np.pi)\n return TwoModeSqueezing(r, new_phi, wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"S\")\n\n\nclass QuadraticPhase(CVOperation):\n r\"\"\"pennylane.QuadraticPhase(s, wires)\n Quadratic phase shift.\n\n .. math::\n P(s) = e^{i \\frac{s}{2} \\hat{x}^2/\\hbar}.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 1\n * Gradient recipe: :math:`\\frac{d}{ds}f(P(s)) = \\frac{1}{2 a} \\left[f(P(s+a)) - f(P(s-a))\\right]`,\n where :math:`a` is an arbitrary real number (:math:`0.1` by default) and\n :math:`f` is an expectation value depending on :math:`P(s)`.\n\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0 \\\\\n 0 & 1 & 0 \\\\\n 0 & s & 1 \\\\\n \\end{bmatrix}\n\n Args:\n s (float): parameter\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n\n grad_method = \"A\"\n\n shift = 0.1\n multiplier = 0.5 / shift\n a = 1\n grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]],)\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def _heisenberg_rep(p):\n U = np.identity(3)\n U[2, 1] = p[0]\n return U\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"P\")\n\n\nclass ControlledAddition(CVOperation):\n r\"\"\"pennylane.ControlledAddition(s, wires)\n Controlled addition operation.\n\n .. math::\n \\text{CX}(s) = \\int dx \\ket{x}\\bra{x} \\otimes D\\left({\\frac{1}{\\sqrt{2\\hbar}}}s x\\right)\n = e^{-i s \\: \\hat{x} \\otimes \\hat{p}/\\hbar}.\n\n **Details:**\n\n * Number of wires: 2\n * Number of parameters: 1\n * Gradient recipe: :math:`\\frac{d}{ds}f(\\text{CX}(s)) = \\frac{1}{2 a} \\left[f(\\text{CX}(s+a)) - f(\\text{CX}(s-a))\\right]`,\n where :math:`a` is an arbitrary real number (:math:`0.1` by default) and\n :math:`f` is an expectation value depending on :math:`\\text{CX}(s)`.\n\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 & 0 \\\\\n 0 & 0 & 1 & 0 & -s \\\\\n 0 & s & 0 & 1 & 0 \\\\\n 0 & 0 & 0 & 0 & 1\n \\end{bmatrix}\n\n Args:\n s (float): addition multiplier\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 2\n grad_method = \"A\"\n\n shift = 0.1\n multiplier = 0.5 / shift\n a = 1\n grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]],)\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def _heisenberg_rep(p):\n U = np.identity(5)\n U[2, 4] = -p[0]\n U[3, 1] = p[0]\n return U\n\n def adjoint(self, do_queue=False):\n return ControlledAddition(-self.parameters[0], wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"X\")\n\n\nclass ControlledPhase(CVOperation):\n r\"\"\"pennylane.ControlledPhase(s, wires)\n Controlled phase operation.\n\n .. math::\n \\text{CZ}(s) = \\iint dx dy \\: e^{i sxy/\\hbar} \\ket{x,y}\\bra{x,y}\n = e^{i s \\: \\hat{x} \\otimes \\hat{x}/\\hbar}.\n\n **Details:**\n\n * Number of wires: 2\n * Number of parameters: 1\n * Gradient recipe: :math:`\\frac{d}{ds}f(\\text{CZ}(s)) = \\frac{1}{2 a} \\left[f(\\text{CZ}(s+a)) - f(\\text{CZ}(s-a))\\right]`,\n where :math:`a` is an arbitrary real number (:math:`0.1` by default) and\n :math:`f` is an expectation value depending on :math:`\\text{CZ}(s)`.\n\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 & 0 \\\\\n 0 & 0 & 1 & s & 0 \\\\\n 0 & 0 & 0 & 1 & 0 \\\\\n 0 & s & 0 & 0 & 1\n \\end{bmatrix}\n\n Args:\n s (float): phase shift multiplier\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 2\n grad_method = \"A\"\n\n shift = 0.1\n multiplier = 0.5 / shift\n a = 1\n grad_recipe = ([[multiplier, a, shift], [-multiplier, a, -shift]],)\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def _heisenberg_rep(p):\n U = np.identity(5)\n U[2, 3] = p[0]\n U[4, 1] = p[0]\n return U\n\n def adjoint(self, do_queue=False):\n return ControlledPhase(-self.parameters[0], wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"Z\")\n\n\nclass Kerr(CVOperation):\n r\"\"\"pennylane.Kerr(kappa, wires)\n Kerr interaction.\n\n .. math::\n K(\\kappa) = e^{i \\kappa \\hat{n}^2}.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 1\n * Gradient recipe: None (uses finite difference)\n\n Args:\n kappa (float): parameter\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 1\n\n def adjoint(self, do_queue=False):\n return Kerr(-self.parameters[0], wires=self.wires, do_queue=do_queue)\n\n\nclass CrossKerr(CVOperation):\n r\"\"\"pennylane.CrossKerr(kappa, wires)\n Cross-Kerr interaction.\n\n .. math::\n CK(\\kappa) = e^{i \\kappa \\hat{n}_1\\hat{n}_2}.\n\n **Details:**\n\n * Number of wires: 2\n * Number of parameters: 1\n * Gradient recipe: None (uses finite difference)\n\n Args:\n kappa (float): parameter\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 2\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 1\n\n def adjoint(self, do_queue=False):\n return CrossKerr(-self.parameters[0], wires=self.wires, do_queue=do_queue)\n\n\nclass CubicPhase(CVOperation):\n r\"\"\"pennylane.CubicPhase(gamma, wires)\n Cubic phase shift.\n\n .. math::\n V(\\gamma) = e^{i \\frac{\\gamma}{3} \\hat{x}^3/\\hbar}.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 1\n * Gradient recipe: None (uses finite difference)\n\n Args:\n gamma (float): parameter\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 1\n\n def adjoint(self, do_queue=False):\n return CubicPhase(-self.parameters[0], wires=self.wires, do_queue=do_queue)\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"V\")\n\n\nclass InterferometerUnitary(CVOperation):\n r\"\"\"pennylane.InterferometerUnitary(U, wires)\n A linear interferometer transforming the bosonic operators according to\n the unitary matrix :math:`U`.\n\n .. note::\n\n This operation implements a **fixed** linear interferometer given a known\n unitary matrix.\n\n If you instead wish to parameterize the interferometer,\n and calculate the gradient/optimize with respect to these parameters,\n consider instead the :func:`pennylane.template.Interferometer` template,\n which constructs an interferometer from a combination of beamsplitters\n and rotation gates.\n\n **Details:**\n\n * Number of wires: Any\n * Number of parameters: 1\n * Gradient recipe: None\n * Heisenberg representation:\n\n .. math:: M = \\begin{bmatrix}\n 1 & 0\\\\\n 0 & S\\\\\n \\end{bmatrix}\n\n where :math:`S` is the Gaussian symplectic transformation representing the interferometer.\n\n Args:\n U (array): A shape ``(len(wires), len(wires))`` complex unitary matrix\n wires (Sequence[int] or int): the wires the operation acts on\n \"\"\"\n num_wires = AnyWires\n grad_method = None\n grad_recipe = None\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def _heisenberg_rep(p):\n N = len(p[0])\n A = p[0].real\n B = p[0].imag\n\n rows = np.arange(2 * N).reshape(2, -1).T.flatten()\n S = np.vstack([np.hstack([A, -B]), np.hstack([B, A])])[:, rows][rows]\n\n M = np.eye(2 * N + 1)\n M[1 : 2 * N + 1, 1 : 2 * N + 1] = S\n return M\n\n def adjoint(self, do_queue=False):\n U = self.parameters[0]\n return InterferometerUnitary(\n qml_math.T(qml_math.conj(U)), wires=self.wires, do_queue=do_queue\n )\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"U\")\n\n\n# =============================================================================\n# State preparation\n# =============================================================================\n\n# TODO: put Heisenberg reps of state preparations in docstrings?\n\n\nclass CoherentState(CVOperation):\n r\"\"\"pennylane.CoherentState(a, phi, wires)\n Prepares a coherent state.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 2\n * Gradient recipe: None (uses finite difference)\n\n Args:\n a (float): displacement magnitude :math:`r=|\\alpha|`\n phi (float): phase angle :math:`\\phi`\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 2\n\n\nclass SqueezedState(CVOperation):\n r\"\"\"pennylane.SqueezedState(r, phi, wires)\n Prepares a squeezed vacuum state.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 2\n * Gradient recipe: None (uses finite difference)\n\n Args:\n r (float): squeezing magnitude\n phi (float): squeezing angle :math:`\\phi`\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 2\n\n\nclass DisplacedSqueezedState(CVOperation):\n r\"\"\"pennylane.DisplacedSqueezedState(a, phi_a, r, phi_r, wires)\n Prepares a displaced squeezed vacuum state.\n\n A displaced squeezed state is prepared by squeezing a vacuum state, and\n then applying a displacement operator,\n\n .. math::\n \\ket{\\alpha,z} = D(\\alpha)\\ket{0,z} = D(\\alpha)S(z)\\ket{0},\n\n with the displacement parameter :math:`\\alpha=ae^{i\\phi_a}` and the squeezing parameter :math:`z=re^{i\\phi_r}`.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 4\n * Gradient recipe: None (uses finite difference)\n\n Args:\n a (float): displacement magnitude :math:`a=|\\alpha|`\n phi_a (float): displacement angle :math:`\\phi_a`\n r (float): squeezing magnitude :math:`r=|z|`\n phi_r (float): squeezing angle :math:`\\phi_r`\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 4\n\n\nclass ThermalState(CVOperation):\n r\"\"\"pennylane.ThermalState(nbar, wires)\n Prepares a thermal state.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 1\n * Gradient recipe: None (uses finite difference)\n\n Args:\n nbar (float): mean thermal population of the mode\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 1\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"Thermal\")\n\n\nclass GaussianState(CVOperation):\n r\"\"\"pennylane.GaussianState(V, r, wires)\n Prepare subsystems in a given Gaussian state.\n\n **Details:**\n\n * Number of wires: Any\n * Number of parameters: 2\n * Gradient recipe: None\n\n Args:\n V (array): the :math:`2N\\times 2N` (real and positive definite) covariance matrix\n r (array): a length :math:`2N` vector of means, of the\n form :math:`(\\x_0,\\dots,\\x_{N-1},\\p_0,\\dots,\\p_{N-1})`\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = AnyWires\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 2\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"Gaussian\")\n\n\nclass FockState(CVOperation):\n r\"\"\"pennylane.FockState(n, wires)\n Prepares a single Fock state.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 1\n * Gradient recipe: None (not differentiable)\n\n Args:\n n (int): Fock state to prepare\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = None\n\n @property\n def num_params(self):\n return 1\n\n def label(self, decimals=None, base_label=None):\n r\"\"\"A customizable string representation of the operator.\n\n Args:\n decimals=None (int): If ``None``, no parameters are included. Else,\n specifies how to round the parameters.\n base_label=None (str): overwrite the non-parameter component of the label\n\n Returns:\n str: label to use in drawings\n\n **Example:**\n\n >>> qml.FockState(7, wires=0).label()\n '|7⟩'\n\n \"\"\"\n if base_label is not None:\n if decimals is None:\n return base_label\n p = format(qml_math.asarray(self.parameters[0]), \".0f\")\n return base_label + f\"\\n({p})\"\n return f\"|{qml_math.asarray(self.parameters[0])}⟩\"\n\n\nclass FockStateVector(CVOperation):\n r\"\"\"pennylane.FockStateVector(state, wires)\n Prepare subsystems using the given ket vector in the Fock basis.\n\n **Details:**\n\n * Number of wires: Any\n * Number of parameters: 1\n * Gradient recipe: None (uses finite difference)\n\n Args:\n state (array): a single ket vector, for single mode state preparation,\n or a multimode ket, with one array dimension per mode\n\n .. UsageDetails::\n\n For a single mode with cutoff dimension :math:`N`, the input is a\n 1-dimensional vector of length :math:`N`.\n\n .. code-block::\n\n dev_fock = qml.device(\"strawberryfields.fock\", wires=4, cutoff_dim=4)\n\n state = np.array([0, 0, 1, 0])\n\n @qml.qnode(dev_fock)\n def circuit():\n qml.FockStateVector(state, wires=0)\n return qml.expval(qml.NumberOperator(wires=0))\n\n For multiple modes, the input is the tensor product of single mode\n kets. For example, given a set of :math:`M` single mode vectors of\n length :math:`N`, the input should have shape ``(N, ) * M``.\n\n .. code-block::\n\n used_wires = [0, 3]\n cutoff_dim = 5\n\n dev_fock = qml.device(\"strawberryfields.fock\", wires=4, cutoff_dim=cutoff_dim)\n\n state_1 = np.array([0, 1, 0, 0, 0])\n state_2 = np.array([0, 0, 0, 1, 0])\n\n combined_state = np.kron(state_1, state_2).reshape(\n (cutoff_dim, ) * len(used_wires)\n )\n\n @qml.qnode(dev_fock)\n def circuit():\n qml.FockStateVector(combined_state, wires=used_wires)\n return qml.expval(qml.NumberOperator(wires=0))\n\n \"\"\"\n num_wires = AnyWires\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 1\n\n def label(self, decimals=None, base_label=None):\n r\"\"\"A customizable string representation of the operator.\n\n Args:\n decimals=None (int): If ``None``, no parameters are included. Else,\n specifies how to round the parameters.\n base_label=None (str): overwrite the non-parameter component of the label\n\n Returns:\n str: label to use in drawings\n\n **Example:**\n\n >>> qml.FockStateVector([1,2,3], wires=(0,1,2)).label()\n '|123⟩'\n\n \"\"\"\n if base_label is not None:\n return base_label\n basis_string = \"\".join(str(int(i)) for i in self.parameters[0])\n return f\"|{basis_string}⟩\"\n\n\nclass FockDensityMatrix(CVOperation):\n r\"\"\"pennylane.FockDensityMatrix(state, wires)\n Prepare subsystems using the given density matrix in the Fock basis.\n\n **Details:**\n\n * Number of wires: Any\n * Number of parameters: 1\n * Gradient recipe: None (uses finite difference)\n\n Args:\n state (array): a single mode matrix :math:`\\rho_{ij}`, or\n a multimode tensor :math:`\\rho_{ij,kl,\\dots,mn}`, with two indices per mode\n \"\"\"\n num_wires = AnyWires\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 1\n\n\nclass CatState(CVOperation):\n r\"\"\"pennylane.CatState(a, phi, p, wires)\n Prepares a cat state.\n\n A cat state is the coherent superposition of two coherent states,\n\n .. math::\n \\ket{\\text{cat}(\\alpha)} = \\frac{1}{N} (\\ket{\\alpha} +e^{ip\\pi} \\ket{-\\alpha}),\n\n where :math:`\\ket{\\pm\\alpha} = D(\\pm\\alpha)\\ket{0}` are coherent states with displacement\n parameters :math:`\\pm\\alpha=\\pm ae^{i\\phi}` and\n :math:`N = \\sqrt{2 (1+\\cos(p\\pi)e^{-2|\\alpha|^2})}` is the normalization factor.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 3\n * Gradient recipe: None (uses finite difference)\n\n Args:\n a (float): displacement magnitude :math:`a=|\\alpha|`\n phi (float): displacement angle :math:`\\phi`\n p (float): parity, where :math:`p=0` corresponds to an even\n cat state, and :math:`p=1` an odd cat state.\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n grad_method = \"F\"\n\n @property\n def num_params(self):\n return 3\n\n\n# =============================================================================\n# Observables\n# =============================================================================\n\n\nclass NumberOperator(CVObservable):\n r\"\"\"pennylane.ops.NumberOperator(wires)\n The photon number observable :math:`\\langle \\hat{n}\\rangle`.\n\n The number operator is defined as\n :math:`\\hat{n} = \\a^\\dagger \\a = \\frac{1}{2\\hbar}(\\x^2 +\\p^2) -\\I/2`.\n\n When used with the :func:`~.expval` function, the mean\n photon number :math:`\\braket{\\hat{n}}` is returned.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 0\n * Observable order: 2nd order in the quadrature operators\n * Heisenberg representation:\n\n .. math:: M = \\frac{1}{2\\hbar}\\begin{bmatrix}\n -\\hbar & 0 & 0\\\\\n 0 & 1 & 0\\\\\n 0 & 0 & 1\n \\end{bmatrix}\n\n Args:\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n\n ev_order = 2\n\n @property\n def num_params(self):\n return 0\n\n @staticmethod\n def _heisenberg_rep(p):\n hbar = 2\n return np.diag([-0.5, 0.5 / hbar, 0.5 / hbar])\n\n def label(self, decimals=None, base_label=None):\n return base_label or \"n\"\n\n\nclass TensorN(CVObservable):\n r\"\"\"pennylane.ops.TensorN(wires)\n The tensor product of the :class:`~.NumberOperator` acting on different wires.\n\n If a single wire is defined, returns a :class:`~.NumberOperator` instance for convenient gradient computations.\n\n When used with the :func:`~.expval` function, the expectation value\n :math:`\\langle \\hat{n}_{i_0} \\hat{n}_{i_1}\\dots \\hat{n}_{i_{N-1}}\\rangle`\n for a (sub)set of modes :math:`[i_0, i_1, \\dots, i_{N-1}]` of the system is\n returned.\n\n **Details:**\n\n * Number of wires: Any\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int] or int): the wire the operation acts on\n\n .. UsageDetails::\n\n Example for multiple modes:\n\n >>> cv_obs = qml.TensorN(wires=[0, 1])\n >>> cv_obs\n TensorN(wires=[0, 1])\n >>> cv_obs.ev_order is None\n True\n\n Example for a single mode (yields a :class:`~.NumberOperator`):\n\n >>> cv_obs = qml.TensorN(wires=[1])\n >>> cv_obs\n NumberOperator(wires=[1])\n >>> cv_obs.ev_order\n 2\n \"\"\"\n num_wires = AnyWires\n ev_order = None\n\n def __new__(cls, *params, wires=None, do_queue=True):\n # Custom definition for __new__ needed such that a NumberOperator can\n # be returned when a single mode is defined\n\n if wires is None and len(params) != 0:\n wires = params[-1]\n params = params[:-1]\n\n if wires is not None and (isinstance(wires, int) or len(wires) == 1):\n return NumberOperator(*params, wires=wires, do_queue=do_queue)\n return super().__new__(cls)\n\n @property\n def num_params(self):\n return 0\n\n def label(self, decimals=None, base_label=None):\n if base_label is not None:\n return base_label\n return \"⊗\".join(\"n\" for _ in self.wires)\n\n\nclass X(CVObservable):\n r\"\"\"pennylane.ops.X(wires)\n The position quadrature observable :math:`\\hat{x}`.\n\n When used with the :func:`~.expval` function, the position expectation\n value :math:`\\braket{\\hat{x}}` is returned. This corresponds to\n the mean displacement in the phase space along the :math:`x` axis.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 0\n * Observable order: 1st order in the quadrature operators\n * Heisenberg representation:\n\n .. math:: d = [0, 1, 0]\n\n Args:\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n\n ev_order = 1\n\n @property\n def num_params(self):\n return 0\n\n @staticmethod\n def _heisenberg_rep(p):\n return np.array([0, 1, 0])\n\n\nclass P(CVObservable):\n r\"\"\"pennylane.ops.P(wires)\n The momentum quadrature observable :math:`\\hat{p}`.\n\n When used with the :func:`~.expval` function, the momentum expectation\n value :math:`\\braket{\\hat{p}}` is returned. This corresponds to\n the mean displacement in the phase space along the :math:`p` axis.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 0\n * Observable order: 1st order in the quadrature operators\n * Heisenberg representation:\n\n .. math:: d = [0, 0, 1]\n\n Args:\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n\n ev_order = 1\n\n @property\n def num_params(self):\n return 0\n\n @staticmethod\n def _heisenberg_rep(p):\n return np.array([0, 0, 1])\n\n\nclass QuadOperator(CVObservable):\n r\"\"\"pennylane.ops.QuadOperator(phi, wires)\n The generalized quadrature observable :math:`\\x_\\phi = \\x cos\\phi+\\p\\sin\\phi`.\n\n When used with the :func:`~.expval` function, the expectation\n value :math:`\\braket{\\hat{\\x_\\phi}}` is returned. This corresponds to\n the mean displacement in the phase space along axis at angle :math:`\\phi`.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 1\n * Observable order: 1st order in the quadrature operators\n * Heisenberg representation:\n\n .. math:: d = [0, \\cos\\phi, \\sin\\phi]\n\n Args:\n phi (float): axis in the phase space at which to calculate\n the generalized quadrature observable\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n\n grad_method = \"A\"\n ev_order = 1\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def _heisenberg_rep(p):\n phi = p[0]\n return np.array([0, math.cos(phi), math.sin(phi)]) # TODO check\n\n def label(self, decimals=None, base_label=None):\n r\"\"\"A customizable string representation of the operator.\n\n Args:\n decimals=None (int): If ``None``, no parameters are included. Else,\n specifies how to round the parameters.\n base_label=None (str): overwrite the non-parameter component of the label\n\n Returns:\n str: label to use in drawings\n\n **Example:**\n\n >>> op = qml.QuadOperator(1.234, wires=0)\n >>> op.label()\n 'cos(φ)x\\n+sin(φ)p'\n >>> op.label(decimals=2)\n 'cos(1.23)x\\n+sin(1.23)p'\n >>> op.label(base_label=\"Quad\", decimals=2)\n 'Quad\\n(1.23)'\n\n \"\"\"\n\n if base_label is not None:\n return super().label(decimals=decimals, base_label=base_label)\n\n if decimals is None:\n p = \"φ\"\n else:\n p = format(qml_math.array(self.parameters[0]), f\".{decimals}f\")\n return f\"cos({p})x\\n+sin({p})p\"\n\n\nclass PolyXP(CVObservable):\n r\"\"\"pennylane.ops.PolyXP(q, wires)\n An arbitrary second-order polynomial observable.\n\n Represents an arbitrary observable :math:`P(\\x,\\p)` that is a second order\n polynomial in the basis :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n\n For first-order observables the representation is a real vector\n :math:`\\mathbf{d}` such that :math:`P(\\x,\\p) = \\mathbf{d}^T \\mathbf{r}`.\n\n For second-order observables the representation is a real symmetric\n matrix :math:`A` such that :math:`P(\\x,\\p) = \\mathbf{r}^T A \\mathbf{r}`.\n\n Used for evaluating arbitrary order-2 CV expectation values of\n :class:`~.pennylane.tape.CVParamShiftTape`.\n\n **Details:**\n\n * Number of wires: Any\n * Number of parameters: 1\n * Observable order: 2nd order in the quadrature operators\n * Heisenberg representation: :math:`A`\n\n Args:\n q (array[float]): expansion coefficients\n\n \"\"\"\n num_wires = AnyWires\n\n grad_method = \"F\"\n ev_order = 2\n\n @property\n def num_params(self):\n return 1\n\n @staticmethod\n def _heisenberg_rep(p):\n return p[0]\n\n\nclass FockStateProjector(CVObservable):\n r\"\"\"pennylane.ops.FockStateProjector(n, wires)\n The number state observable :math:`\\ket{n}\\bra{n}`.\n\n Represents the non-Gaussian number state observable\n\n .. math:: \\ket{n}\\bra{n} = \\ket{n_0, n_1, \\dots, n_P}\\bra{n_0, n_1, \\dots, n_P}\n\n where :math:`n_i` is the occupation number of the :math:`i` th wire.\n\n The expectation of this observable is\n\n .. math::\n E[\\ket{n}\\bra{n}] = \\text{Tr}(\\ket{n}\\bra{n}\\rho)\n = \\text{Tr}(\\braketT{n}{\\rho}{n})\n = \\braketT{n}{\\rho}{n}\n\n corresponding to the probability of measuring the quantum state in the state\n :math:`\\ket{n}=\\ket{n_0, n_1, \\dots, n_P}`.\n\n .. note::\n\n If ``expval(FockStateProjector)`` is applied to a subset of wires,\n the unaffected wires are traced out prior to the expectation value\n calculation.\n\n **Details:**\n\n * Number of wires: Any\n * Number of parameters: 1\n * Observable order: None (non-Gaussian)\n\n Args:\n n (array): Array of non-negative integers representing the number state\n observable :math:`\\ket{n}\\bra{n}=\\ket{n_0, n_1, \\dots, n_P}\\bra{n_0, n_1, \\dots, n_P}`.\n\n For example, to return the observable :math:`\\ket{0,4,2}\\bra{0,4,2}` acting on\n wires 0, 1, and 3 of a QNode, you would call ``FockStateProjector(np.array([0, 4, 2], wires=[0, 1, 3]))``.\n\n Note that ``len(n)==len(wires)``, and that ``len(n)`` cannot exceed the\n total number of wires in the QNode.\n \"\"\"\n num_wires = AnyWires\n\n grad_method = None\n ev_order = None\n\n @property\n def num_params(self):\n return 1\n\n def label(self, decimals=None, base_label=None):\n r\"\"\"A customizable string representation of the operator.\n\n Args:\n decimals=None (int): If ``None``, no parameters are included. Else,\n specifies how to round the parameters.\n base_label=None (str): overwrite the non-parameter component of the label\n\n Returns:\n str: label to use in drawings\n\n **Example:**\n\n >>> qml.FockStateProjector([1,2,3], wires=(0,1,2)).label()\n '|123⟩⟨123|'\n\n \"\"\"\n\n if base_label is not None:\n return super().label(decimals=decimals, base_label=base_label)\n\n basis_string = \"\".join(str(int(i)) for i in self.parameters[0])\n return f\"|{basis_string}⟩⟨{basis_string}|\"\n\n\nops = {\n \"Beamsplitter\",\n \"ControlledAddition\",\n \"ControlledPhase\",\n \"Displacement\",\n \"Kerr\",\n \"CrossKerr\",\n \"QuadraticPhase\",\n \"Rotation\",\n \"Squeezing\",\n \"TwoModeSqueezing\",\n \"CubicPhase\",\n \"InterferometerUnitary\",\n \"CatState\",\n \"CoherentState\",\n \"FockDensityMatrix\",\n \"DisplacedSqueezedState\",\n \"FockState\",\n \"FockStateVector\",\n \"SqueezedState\",\n \"ThermalState\",\n \"GaussianState\",\n}\n\n\nobs = {\"QuadOperator\", \"NumberOperator\", \"TensorN\", \"P\", \"X\", \"PolyXP\", \"FockStateProjector\"}\n\n\n__all__ = list(ops | obs)\n"
] |
[
[
"numpy.allclose",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.array"
],
[
"numpy.asarray",
"numpy.iscomplexobj"
],
[
"numpy.diag",
"numpy.hstack",
"scipy.linalg.block_diag",
"numpy.arange",
"numpy.eye",
"numpy.identity",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
Ahostility/AnalyzePeople-1
|
[
"edcd70fd6b236f4fafdbe4565b8a4975c1e084e4"
] |
[
"src/diarization/segment_another.py"
] |
[
"import numpy as np\n\nmin_time = 3\nd = 0.06\nduration = 1.6\nmax_time = 5\n\n\ndef get_another_fragments(voice_fragments, operator_similarity, wav_splits):\n new_fragments = []\n new_similarity = []\n for voice_fragment in voice_fragments:\n cur_diff = voice_fragment[1] - voice_fragment[0]\n if cur_diff >= min_time:\n ind1 = max(0, int((voice_fragment[0] - duration) / d))\n ind2 = int((voice_fragment[1] - duration) / d)\n if cur_diff <= max_time:\n new_similarity.append(operator_similarity[ind1:ind2].mean())\n new_fragments.append(voice_fragment)\n else:\n cur_similarity_arr, cur_fragments_arr = segment_fragment(operator_similarity[ind1:ind2],\n wav_splits[ind1:ind2])\n new_similarity += cur_similarity_arr\n new_fragments += cur_fragments_arr\n\n if len(new_similarity) == 0:\n res = []\n sorted_ids = np.argsort(new_similarity)\n\n min_id = int(len(sorted_ids) / 8)\n res = new_fragments[sorted_ids[min_id]]\n # print(res)\n # res = [18, 21]\n return res\n\n\ndef segment_fragment(a, wav_splits):\n window = int((min_time - 1.6)/0.06)\n new_similarity = [a[i:i+window].mean() for i in range(len(a) - window + 1)]\n new_fragments = [[wav_splits[i][0], wav_splits[i + window - 1][1]] for i in range(len(new_similarity))]\n return new_similarity, new_fragments\n\n\ndef unite_segments(fragments, min_time):\n res = []\n cut_part = 0.1\n sum_time = 0.0\n for fragment in fragments:\n is_changed = False\n fragment[0] += cut_part\n fragment[1] -= cut_part\n for i in range(len(res)):\n if fragment[0] < res[i][1] and fragment[0] > res[i][0]:\n sum_time += fragment[1] - res[i][1]\n res[i][1] = fragment[1]\n is_changed = True\n break\n elif fragment[1] < res[i][1] and fragment[1] > res[i][0]:\n sum_time += res[i][0] - fragment[0]\n res[i][0] = fragment[0]\n is_changed = True\n break\n\n if not is_changed:\n sum_time += fragment[1] - fragment[0]\n res.append([fragment[0], fragment[1]])\n\n if sum_time >= min_time:\n return res\n return res\n"
] |
[
[
"numpy.argsort"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mingminyu/mumu
|
[
"e9f6c86a0b678ce4467ffba7f3dc4c0c8f971ff8"
] |
[
"mumu/feature/_woe.py"
] |
[
"# coding: utf-8\n# ================================================\n# Project: mumu\n# File: feature/_woe.py\n# Author: Mingmin Yu\n# Email: [email protected]\n# Date: 2021/6/24 18:24\n# Description:\n# ================================================\nimport os\nimport string\nimport logging\nimport math\nimport xlsxwriter\nimport pandas as pd\nimport numpy as np\nimport scipy.stats\nimport matplotlib.pyplot as plt\nfrom sklearn import tree\nfrom matplotlib.font_manager import FontProperties\nfrom ._contingency import chi2_contingency\n\n\ndef __woe_calc(bad=None, good=None, bad_freq=None, good_freq=None):\n \"\"\"Calculate woe\n\n :param bad: int or float,\n count of samples(target=1) in single bin\n :param good: int or float\n count of samples(target=0) in single bin\n :param bad_freq: int or float\n count of samples(target=1) in all samples\n :param good_freq: int or float\n count of samples(target=0) in all samples\n :return:\n \"\"\"\n target_rt = bad / float(bad_freq)\n non_target_rt = good / float(good_freq)\n\n if float(bad) != 0.0 and bad / float(bad + good) != 1.0:\n woe = math.log(float(target_rt / non_target_rt))\n elif target_rt == 0.0:\n woe = -99999999.0\n elif bad / float(bad + good) == 1.0:\n woe = 99999999.0\n else:\n woe = -99999999.0\n\n return woe\n\n\ndef __iv_calc(ds=None):\n \"\"\"Calculate iv value of the variable\n\n :param ds: DataFrame\n :return: iv, float\n \"\"\"\n bad_dist = ds['1'] / float(ds['1'].sum())\n good_dist = ds['0'] / float(ds['0'].sum())\n bad_dist = bad_dist.apply(lambda x: 0.0001 if x == 0 else x)\n ds['iv'] = (bad_dist - good_dist) * np.log(bad_dist / good_dist)\n iv = ds['iv'].sum()\n\n return iv\n\n\ndef __target_check(df_master=None, target=\"target\"):\n \"\"\"Check target is right, target column only includes 0 and 1 in theory.\n\n :param df_master: DataFrame\n :param target: str\n :return:\n \"\"\"\n # 检查target是否只有0和1两个取值\n if set(df_master[target].unique()) != {0, 1}:\n raise ValueError('Target are not only 0 and 1!')\n\n\ndef init_binning(df_master=None, var_name=None, target=None, max_bin_num=200, missing=False, cut_points=None):\n \"\"\"Cut bins for numerical variables(`var_name`) and describe distribution of target in each bin.\n\n :param df_master: DataFrame\n :param var_name: str\n :param target: str\n :param max_bin_num: int, default 200,\n max num of bins cut\n :param missing: bool, default False\n whether if the variable has missing value.\n :param cut_points: list, 指定的切分点,若不指定则根据分位点分bin\n specified cut points, if not, then cut bins according to cut points.\n :return: ds: DataFrame,\n statistical information for bins cut of the variable.\n \"\"\"\n df_tmp = df_master[[var_name, target]].copy()\n\n # Initialization cut points\n if cut_points is not None:\n if len(cut_points) == 0:\n raise ValueError('wrong cut points: {0}'.format(var_name))\n\n if np.max(df_tmp[var_name]) >= cut_points[-1]:\n # last value of bins is inf\n cut_points[-1] = np.inf\n\n if np.min(df_tmp[var_name]) < cut_points[0]:\n # # last value of bins is minimum\n cut_points[0] = np.min(df_tmp[var_name]) # bins第一个值改为min value, 防止忘记填最小值\n\n # If #(unique value) < max_bin_num, then each value will be a bin.\n elif len(df_tmp[var_name].unique()) < max_bin_num:\n cut_points = np.sort(df_tmp[var_name].unique())\n cut_points = np.append(cut_points, np.inf)\n\n # If #(unique value) >= max_bin_num, then cut `max_bin_num` bins according to cut points.\n else:\n pct = np.arange(max_bin_num + 1) / max_bin_num\n # calculate cut points and drop duplicates.\n cut_points = df_tmp[var_name].quantile(pct, interpolation='higher').unique() # 计算分位点并去重\n cut_points[-1] = np.inf\n\n # when missing is true, put `-1.0` into a single bin\n if missing:\n if cut_points[0] == -1.0:\n tmp_ary1 = np.asarray([-1.0, 0.0])\n tmp_ary2 = np.asarray(cut_points[2:])\n cut_points = np.concatenate((tmp_ary1, tmp_ary2), axis=0)\n else:\n logging.warning('Expect variable has missing value but actually no missing')\n\n # cut bins according to cut points and calculate distribution of target in each bin.\n # 按切分点分bin,并计算每个bin中target的分布\n df_tmp[var_name + '_bin'] = np.digitize(df_tmp[var_name], bins=cut_points, right=False)\n ds = df_tmp.groupby(var_name + '_bin')[target].value_counts().unstack().fillna(value=0)\n ds['total'] = ds[0] + ds[1]\n ds['bin'] = [[cut_points[i - 1], cut_points[i]] for i in list(ds.index)]\n ds['bin_lb'] = [cut_points[i - 1] for i in list(ds.index)]\n ds = ds.sort_values(by='bin_lb', axis=0, ascending=True).reset_index(drop=True) # 根据bin的下界进行排序\n ds.columns = ['0', '1', 'total', 'bin', 'bin_lb']\n\n return ds\n\n\ndef __value_match(map_dict=None, key_list=None):\n \"\"\"Return corresponding key of the value according to `map_dict`\n example: [1,3,4,5] → ['本科','大专','高中','研究生']\n\n :param map_dict: dict\n :param key_list: list\n :return: list,\n \"\"\"\n result = []\n\n for key in key_list:\n if key in map_dict:\n result.append(map_dict[key])\n else:\n result.append('base')\n\n return result\n\n\ndef __mergebin(ds=None, idx_list=None, idx=None, var_type=None):\n \"\"\"Merge adjacent bins, and calculate statistical information of the merged bin and chi value\n\n :param ds: DataFrame\n :param idx_list: list\n :param idx: int\n index of bins in list will be merged\n :param var_type: str, options ['numerical', 'categorical']\n type of the variable.\n :return: ds: DataFrame\n \"\"\"\n # merge two bins, and recalculate distribution of target.\n ds.at[idx_list[idx], ['0', '1']] = ds.loc[idx_list[idx], ['0', '1']] + ds.loc[idx_list[idx + 1], ['0', '1']]\n ds.at[idx_list[idx], 'total'] = ds.loc[idx_list[idx], 'total'] + ds.loc[idx_list[idx + 1], 'total']\n\n # recalculate range of the merged bin\n if var_type == 'numerical':\n ds.at[idx_list[idx], 'bin'] = [ds.loc[idx_list[idx], 'bin'][0], ds.loc[idx_list[idx + 1], 'bin'][1]]\n elif var_type == 'categorical':\n ds.at[idx_list[idx], 'bin'] = ds.loc[idx_list[idx:idx + 2], 'bin'].sum()\n\n # drop original bin after merged\n ds = ds.drop(idx_list[idx + 1], axis=0)\n # drop the index of original bins after merged\n idx_list.pop(idx + 1)\n\n # recalculate chi values of the merged bin, previous bin and later bin\n # if the merged bin is not first, then don't need to calculate the chi value of the previous bin\n if idx != 0:\n ds.at[idx_list[idx - 1], 'chisq'] = chi2_contingency(ds.loc[idx_list[(idx - 1):(idx + 1)], ['0', '1']])[0]\n\n # if the merged bin is not last, then don't need to calculate the chi value of the later bin\n if idx < ds.shape[0] - 1:\n ds.at[idx_list[idx], 'chisq'] = chi2_contingency(ds.loc[idx_list[idx:idx + 2], ['0', '1']])[0]\n else:\n ds.at[idx_list[idx], 'chisq'] = 9999999.0\n\n return ds\n\n\ndef generate_reference(ds=None, var_name=None, var_type=None):\n \"\"\"generate reference table of woe of the variable.\n\n :param ds: DataFrame\n :param var_name: str\n :param var_type: str, options ['numerical', 'categorical']\n :return: DataFrame, reference table\n \"\"\"\n # calculate woe and iv value for each bin\n good_freq = ds['0'].sum()\n bad_freq = ds['1'].sum()\n ds['woe_value'] = ds.apply(lambda x: __woe_calc(x['1'], x['0'], bad_freq, good_freq), axis=1)\n iv = __iv_calc(ds)\n\n # generate reference table\n df_ref_table = pd.DataFrame(columns=['Var_Name', 'Var_Type', 'Bin_No', 'Var_Value', 'Ref_Value',\n 'Count_0', 'Count_1', 'Total', 'Target_Rate', 'Proportion', 'IV'])\n df_ref_table['Bin_No'] = range(1, ds.shape[0] + 1) # Bin的编号,从1开始\n df_ref_table['Var_Value'] = ds['bin'].astype(str) # 将list转成字符串\n df_ref_table['Ref_Value'] = ds['woe_value']\n df_ref_table['Count_0'] = ds['0']\n df_ref_table['Count_1'] = ds['1']\n df_ref_table['Total'] = ds['total']\n df_ref_table['Target_Rate'] = 1.0 * df_ref_table['Count_1'] / df_ref_table['Total']\n df_ref_table['Proportion'] = 1.0 * df_ref_table['Total'] / ds['total'].sum()\n df_ref_table['IV'] = iv\n df_ref_table['Var_Name'] = var_name\n df_ref_table['Var_Type'] = var_type\n\n return df_ref_table\n\n\ndef __get_list_str(x):\n \"\"\"Get value of the categorical variable, and put 3 value in one line\n\n :param x: str\n :return: list\n \"\"\"\n str_list = x.split('\\001')\n s = ''\n for i in range(len(str_list)):\n s += str_list[i] + ','\n if (i + 1) % 3 == 0 and i + 1 != len(str_list):\n s += '\\n'\n\n return s[:-1]\n\n\ndef plot_reference(df_ref=None, save_path=None, figsize=(10, 4)):\n \"\"\"Woe plotting according to reference table\n\n :param df_ref: DataFrame\n :param save_path: str, the path of image saved\n :param figsize: tuple, size of the figure, default (10, 4)\n :return:\n \"\"\"\n x = np.arange(df_ref.shape[0])\n y = df_ref['Ref_Value'].values\n z = df_ref['Target_Rate'].values\n var_name = df_ref['Var_Name'].iloc[0]\n iv = round(df_ref['IV'].iloc[0], 5)\n\n plt.figure(figsize=figsize, dpi=200)\n plt.bar(x, df_ref['Proportion'], color='royalblue', label='0', align='center')\n plt.bar(x, df_ref['Proportion'] * df_ref['Target_Rate'], color='firebrick', label='1', align='center')\n\n # draw label of x axis\n if df_ref['Var_Type'].iloc[0] == 'numerical':\n xticks_list = df_ref['Var_Value'].values\n xticks_list = [tuple([float(j) for j in i.strip('([] ').split(',')]) for i in xticks_list]\n xticks_list = [[round(i[0], 4), round(i[1], 4)] for i in xticks_list]\n plt.xticks(x, xticks_list)\n\n package_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n font_path = os.path.join(package_path, \"resources\", \"fonts\", \"simsun.ttc\")\n\n # noinspection PyBroadException\n try:\n zh_font = FontProperties(fname=font_path)\n except Exception:\n zh_font = FontProperties()\n\n if df_ref['Var_Type'].iloc[0] == 'categorical':\n xticks_list = df_ref['Var_Value'].apply(__get_list_str).tolist()\n plt.xticks(x, xticks_list, fontproperties=zh_font)\n\n plt.ylabel('proportion')\n plt.legend(loc=1, fontsize=9)\n ax2 = plt.twinx()\n plt.plot(x, y, '.-k', lw=2, markersize=8)\n\n for i, j, k in zip(x, y, z):\n ax2.annotate('%.2f(%.2f%%)' % (j, k * 100), xy=(i, j), va='center', ha='center',\n bbox={'boxstyle': 'round', 'fc': 'w'})\n\n plt.ylabel('Woe value(Target rate)')\n plt.title('{0}: IV={1}'.format(var_name, iv), fontproperties=zh_font, fontsize=15)\n\n # save image\n if save_path is not None:\n if save_path.endswith('.png') or save_path.endswith('.jpg'):\n plt.savefig(save_path, bbox_inches='tight')\n elif os.path.isdir(save_path):\n plt.savefig(os.path.join(save_path, '{0}.png'.format(var_name)), bbox_inches='tight')\n else:\n raise ValueError('No such file or directory: {0}'.format(save_path))\n\n plt.show()\n plt.close()\n\n\ndef numwoe_autobinning(df_master=None, var_name=None, target='target', max_bins=6,\n min_prop_in_bin=0.05, missing=True, max_bin_init=200,\n method='chisq', to_plot=True, save_path=None):\n \"\"\"Cut bins for numerical variable automatically, and calculate woe, iv value\n\n :param df_master: DataFrame\n :param var_name:\n :param target:\n :param max_bins: int, default 6\n :param min_prop_in_bin: float, default 0.05\n minimum sample ratio in each bin\n :param missing: bool, default True\n if missing is true, only support that cast `-1.0` into missing value now\n :param max_bin_init: int, default 200\n max num of bin when initialization cut, determined by the size of samples and unique value\n :param method: str, option ['chisq', 'entropy']\n methods of cut bins\n :param to_plot: bool, default True\n whether to plot reference table\n :param save_path: str, the path of image saved\n :return: df_ref_table: DataFrame, woe reference table\n \"\"\"\n # calculate size of samples for each bin\n min_samples_in_bin = int(df_master.shape[0] * min_prop_in_bin)\n\n if method == 'chisq':\n ds = init_binning(df_master, var_name=var_name, target=target, max_bin_num=max_bin_init, missing=missing)\n # calculate chi value of adjacent bins\n chisq = []\n\n for i in range(ds.shape[0] - 1):\n chisq.append(chi2_contingency(ds.iloc[[i, i + 1], [0, 1]])[0])\n\n chisq.append(9999999.0)\n ds['chisq'] = chisq\n\n # cut missing value into a single bin\n if missing:\n if ds[ds['bin_lb'] == -1.0].shape[0] > 0:\n ds_miss = ds[ds['bin_lb'] == -1.0].copy()\n ds = ds[ds['bin_lb'] != -1.0]\n else:\n ds_miss = pd.DataFrame()\n\n # merge two bins with small chi value\n ds_idx_list = list(ds.index)\n\n while (ds.shape[0] > max_bins) | (ds['chisq'].min() <= scipy.stats.chi2.ppf(0.95, 1)):\n # 找到卡方值最小的bin的index在index list中的位置\n k = ds_idx_list.index(ds['chisq'].idxmin())\n ds = __mergebin(ds=ds, idx_list=ds_idx_list, idx=k, var_type='numerical')\n\n # limit size of samples in each bin\n while (ds['total'].min() < min_samples_in_bin) & (ds.shape[0] > 2):\n # find the bin with minimum size of samples\n k = ds_idx_list.index(ds['total'].idxmin())\n\n # if chi value of previous bin < later bin, choose previous bin to merge\n if (k == len(ds_idx_list) - 1) | (\n ds.loc[ds_idx_list[k], 'chisq'] > ds.loc[ds_idx_list[k - 1], 'chisq']):\n k -= 1\n ds = __mergebin(ds, idx_list=ds_idx_list, idx=k, var_type='numerical')\n\n elif method == 'entropy':\n max_depth = int(math.log(max_bins, 2))\n\n if missing:\n df_miss = df_master.loc[df_master[var_name] == -1.0, [var_name, target]].reset_index(drop=True)\n df_no_miss = df_master.loc[df_master[var_name] != -1.0, [var_name, target]].reset_index(drop=True)\n\n if df_miss.shape[0] > 0:\n ds_miss = init_binning(df_master=df_miss, var_name=var_name, target=target,\n cut_points=[-1.0, -0.5], missing=False)\n else:\n ds_miss = pd.DataFrame()\n\n min_value = -0.5\n else:\n df_no_miss = df_master\n min_value = df_no_miss[var_name].min()\n\n clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=max_depth,\n min_samples_leaf=min_samples_in_bin)\n clf.fit(df_no_miss[var_name].values.reshape(-1, 1), df_no_miss[target])\n cut_points = np.sort(clf.tree_.threshold[clf.tree_.threshold != -2.0])\n cut_points = np.append([min_value], cut_points)\n cut_points = np.append(cut_points, df_no_miss[var_name].max())\n ds = init_binning(df_no_miss, var_name=var_name, target=target, cut_points=cut_points, missing=False)\n\n else:\n raise ValueError('wrong method, only choose \"chisq\" or \"entropy\"!')\n\n # generate final reference table\n if missing:\n ds = pd.concat([ds_miss, ds])\n\n ds = ds.reset_index(drop=True)\n df_ref_table = generate_reference(ds=ds, var_name=var_name, var_type='numerical')\n\n # plotting\n if to_plot:\n plot_reference(df_ref_table, save_path=save_path)\n\n return df_ref_table\n\n\ndef numwoe_aptbinning(df_master=None, var_name=None, target=None, bins=None, to_plot=True, save_path=None):\n \"\"\"Cut bins of numerical variables according to specified cut points, and calculate woe, iv value\n\n :param df_master: DataFrame\n :param var_name: str, variable name\n :param target: str, target name\n :param bins: list,\n specified cut points, eg. [0.0, 1.0, 3.0, 10.0]\n :param to_plot: bool, default True\n whether to plot reference table\n :param save_path: str, the path of image saved\n :return: df_ref_table: DataFrame, woe reference table\n \"\"\"\n ds = init_binning(df_master, var_name=var_name, target=target, cut_points=bins, missing=False)\n df_ref_table = generate_reference(ds, var_name=var_name, var_type='numerical')\n\n if to_plot:\n plot_reference(df_ref_table, save_path=save_path)\n\n return df_ref_table\n\n\ndef catwoe_autobinning(df_master=None, var_name=None, target=None,\n max_bins=6, min_prop_in_bin=0.05, min_samples_init=1,\n missing_value=None, to_plot=True, save_path=None):\n \"\"\"Cut bins of categorical variables according to specified cut points, and calculate woe, iv value\n\n :param: df_master: DataFrame\n :param: var_name: str, variable name\n :param: target: str, target name\n :param: max_bins: int\n :param: min_samples_init: int, default 1\n minimum size of cut bins on initialization, merge bins with small size of samples\n :param: min_prop_in_bin: float, default None\n minimum sample ratio in each bin\n :param: missing_value: str, default None\n if missing is true, will put the value into a single bin\n :param: to_plot: bool, default True\n whether to plot reference table\n :param: save_path: str, default None\n str, the path of image saved\n :return: df_ref_table: DataFrame, woe reference table\n \"\"\"\n min_samples_in_bin = int(df_master.shape[0] * min_prop_in_bin)\n ds = pd.crosstab(df_master[var_name], df_master[target]).fillna(value=0).reset_index(drop=False)\n ds['total'] = ds[1] + ds[0]\n # index for each bin\n ds['bin'] = [[i] for i in ds.index]\n ds.columns = ['value', '0', '1', 'total', 'bin']\n # generate a mapping dict of index and real value\n map_dict = dict(zip(ds.index, ds['value']))\n\n # whether to put missing value into a single bin\n if missing_value is not None:\n ds_miss = ds[ds['value'] == missing_value].copy()\n ds = ds[ds['value'] != missing_value]\n\n # sort the bin order by size of samples in each bin\n ds = ds.sort_values(by=['total'], ascending=True)\n\n # merge bins with small size of samples\n idx_small_bin = list(ds[ds['total'] < min_samples_init].index)\n\n if len(idx_small_bin) >= 2:\n ds.at[idx_small_bin[0], ['0', '1']] = ds.loc[idx_small_bin, ['0', '1']].sum()\n ds.at[idx_small_bin[0], 'total'] = ds.loc[idx_small_bin, 'total'].sum()\n ds.at[idx_small_bin[0], 'bin'] = idx_small_bin\n ds = ds.drop(idx_small_bin[1:], axis=0)\n\n # calculate target rate for each bin\n ds['target_rt'] = ds['1'] / (ds['0'] + ds['1'])\n # sort order by target rate of each bin\n ds = ds.sort_values(by='target_rt', ascending=True)\n\n # calculate chi value of two adjacent bins\n chisq = []\n\n for i in range(ds.shape[0] - 1):\n # noinspection PyBroadException\n try:\n chisq.append(chi2_contingency(ds.iloc[[i, i + 1], [1, 2]])[0])\n except Exception:\n chisq.append(chi2_contingency(ds.iloc[[i, i + 1], [0, 1]])[0])\n\n chisq.append(9999999.0)\n ds['chisq'] = chisq\n\n # loop of merging two adjacent bins, recalculate chi value of previous and later bin\n ds_idx_list = list(ds.index)\n\n while (ds.shape[0] > max_bins) | (ds.chisq.min() <= scipy.stats.chi2.ppf(0.95, 1)):\n k = ds_idx_list.index(ds['chisq'].idxmin())\n ds = __mergebin(ds, idx_list=ds_idx_list, idx=k, var_type='categorical')\n\n # limit size of samples for each bin\n while (ds['total'].min() < min_samples_in_bin) & (ds.shape[0] > 2):\n # find the bin with minimum size of samples\n k = ds_idx_list.index(ds['total'].idxmin())\n\n if (k == len(ds_idx_list) - 1) | (ds.loc[ds_idx_list[k], 'chisq'] > ds.loc[ds_idx_list[k - 1], 'chisq']):\n k -= 1\n\n ds = __mergebin(ds, idx_list=ds_idx_list, idx=k, var_type='categorical')\n\n # generate reference table\n if missing_value is not None:\n ds = pd.concat([ds_miss, ds])\n\n ds = ds.reset_index(drop=True)\n ds['bin'] = ds['bin'].apply(lambda x: __value_match(map_dict, x)) # 将索引还原成变量原本的取值\n ds['bin'] = ds['bin'].apply(lambda x: '\\001'.join(x)) # 用特殊符号'\\001'拼接value,防止出现value中有标点符号\n df_ref_table = generate_reference(ds, var_name=var_name, var_type='categorical')\n\n # plotting\n if to_plot:\n plot_reference(df_ref_table, save_path=save_path)\n\n return df_ref_table\n\n\ndef catwoe_aptbinning(df_master=None, var_name=None, target=None, bins=None, to_plot=True, save_path=None):\n \"\"\"Cut bins of categorical variables according to specified cut points, and calculate woe, iv value\n\n :param: df_master: DataFrame\n :param: var_name: str\n :param: target: str\n :param: bins: list\n rules of grouping, eg. [['初中', '高中'], ['大专', '本科', '硕士研究生'], ['博士研究生']]\n :param: to_plot: bool, default True\n whether to plot reference table\n :param: save_path: str, the path of image saved\n :return: df_ref_table: DataFrame, woe reference table\n \"\"\"\n unique_values = set(sum(bins, []))\n\n if len(unique_values) != len(sum(bins, [])):\n raise ValueError('Value is repetitive, please check bins is correct')\n\n ds = pd.crosstab(df_master[var_name], df_master[target]).fillna(value=0).reset_index(drop=False)\n ds['total'] = ds[1] + ds[0]\n ds.columns = ['bin', '0', '1', 'total']\n\n # cut bins according to the specified method\n for bin in bins:\n idx_list = []\n\n for value in bin:\n idx_list.append(int(ds[ds['bin'] == value].index.values))\n ds.at[idx_list[0], ['0', '1']] = ds.loc[idx_list, ['0', '1']].sum()\n ds.at[idx_list[0], 'total'] = ds.loc[idx_list, 'total'].sum()\n ds.at[idx_list[0], 'bin'] = bin\n ds = ds.drop(idx_list[1:], axis=0)\n\n ds = ds.reset_index(drop=True)\n\n # generate reference table\n ds['bin'] = ds['bin'].apply(lambda x: '\\001'.join(x)) # 用特殊符号'\\001'拼接value,防止出现value中有标点符号\n df_ref_table = generate_reference(ds, var_name=var_name, var_type='categorical')\n\n # plotting\n if to_plot:\n plot_reference(df_ref_table, save_path=save_path)\n\n return df_ref_table\n\n\ndef __str_convert(x):\n \"\"\"Cast types of variable into str\n\n :param x:\n :return:\n \"\"\"\n if type(x) in [int, float, np.float64]:\n return str(int(x))\n elif type(x) is str:\n return x\n else:\n return x\n\n\ndef __restore_list(s=None, value_type=None):\n \"\"\"Restore list with str format into original list\n eg:'[1, 2]' → [1,2]\n\n :param s: str, need to restored\n :param value_type: str, options ['numerical', 'categorical']\n type of value\n :return:\n \"\"\"\n if value_type == 'numerical':\n return [np.float(i.strip('[] ')) for i in s.split(',')]\n elif value_type == 'categorical':\n return s.split('\\001')\n else:\n raise ValueError('Wrong value type!')\n\n\ndef __cvlookup(value=None, map_dict=None):\n \"\"\"Find corresponding woe value of the variable\n\n :param value: str,\n :param map_dict: dict, mapping dict\n :return: woe_value: float, woe value\n \"\"\"\n if value in map_dict.keys():\n woe_value = map_dict[value]\n else:\n woe_value = 0.0\n\n return woe_value\n\n\ndef numwoe_apply(df_master=None, ref_table=None, var_name=None):\n \"\"\"Replace original value with woe value for single numerical variable\n if `bin_lb[i] ≤ X < bin_ub[i]` is meet, replace X with `ref_value[i]`\n\n :param: df_master: DataFrame\n :param: ref_table: DataFrame, reference table\n :param: var_name: str, variable name\n \"\"\"\n interval = ref_table['Var_Value'].apply(lambda x: __restore_list(x, 'numerical')).values\n # minimum\n bin_lb = np.asarray([i[0] for i in interval]).reshape((1, -1)) # bin的下界\n bin_lb[0][0] = -np.Infinity # 把第一个bin的下界替换成负无穷\n base_woe = ref_table['Ref_Value'].iloc[0] # 取第一个值作为baseline\n x = (df_master[var_name].values.reshape((-1, 1)) >= bin_lb) * 1.0\n w = ref_table['Ref_Value'].diff(periods=1).fillna(base_woe).values # 进行1阶差分\n df_master['nwoe_' + var_name] = np.dot(x, w)\n\n\ndef catwoe_apply(df_master=None, ref_table=None, var_name=None):\n \"\"\"Replace original value with woe value for single categorical variable\n\n :param: df_master: DataFrame\n :param: ref_table: DataFrame, reference table\n :param: var_name: str, variable name\n \"\"\"\n var_value = ref_table['Var_Value'].apply(lambda x: __restore_list(x, 'categorical')).values\n df_master[var_name] = df_master[var_name].apply(lambda x: __str_convert(x))\n\n # build a dict of values\n value_list = []\n ref_value_list = []\n\n for i, lst in enumerate(var_value):\n value_list += lst\n ref_value_list += [ref_table['Ref_Value'].iloc[i]] * len(lst)\n\n value_dict = dict(zip(value_list, ref_value_list))\n df_master['cwoe_' + var_name] = df_master[var_name].apply(lambda x: __cvlookup(x, value_dict))\n\n\ndef woeref_old2new(ref_woe=None):\n \"\"\"Replace old woe reference table with the new\n\n :param ref_woe: DataFrame\n old woe reference table\n :return: ref_new_woe: DataFrame\n new woe reference table\n \"\"\"\n ref_woe_copy = ref_woe.copy()\n ref_woe_copy = ref_woe_copy.loc[ref_woe_copy['Var_Value'] != 'base']\n ref_woe_copy['Total'] = ref_woe_copy['Count_0'] + ref_woe_copy['Count_1']\n ref_woe_copy = ref_woe_copy.rename(columns={'Ratio_1': 'Target_Rate', 'Ratio_All': 'Proportion'})\n ref_woe_copy['Var_Value'] = ref_woe_copy['Var_Value'].apply(lambda x: str(x).split('_'))\n\n ref_new_woe = ref_woe_copy.groupby(['Var_Name', 'Var_Type', 'IV', 'Ref_Value'], as_index=False, ) \\\n .agg({'Var_Value': 'sum', 'Count_0': 'sum', 'Count_1': 'sum', 'Total': 'sum', 'Proportion': 'sum', })\n\n ref_new_woe['Target_Rate'] = ref_new_woe['Count_1'] / ref_new_woe['Total']\n\n ref_new_woe['Var_Value'].loc[ref_new_woe['Var_Type'] == 'categorical'] = ref_new_woe['Var_Value'].loc[\n ref_new_woe['Var_Type'] == 'categorical'].apply(lambda x: \"\\001\".join(x))\n\n ref_new_woe['Bin_No'] = ref_new_woe[['Var_Name', 'Ref_Value']].groupby(['Var_Name']).rank(method='dense',\n ascending=True)\n ref_new_woe['Var_Value'] = ref_new_woe['Var_Value'].apply(lambda x: str(x).replace(\"'\", \"\"))\n\n return ref_new_woe\n\n\ndef iv_extract(woe_ref=None, save_path=None):\n \"\"\"Extract iv value according to woe reference table, order by desc\n\n :param woe_ref: DataFrame\n woe reference table\n :param save_path: str, default None\n the path of csv file saved\n :return: df_iv: DataFrame\n \"\"\"\n iv = []\n\n for var in woe_ref.Var_Name.unique():\n iv.append([var, woe_ref['IV'][woe_ref['Var_Name'] == var].iloc[0]])\n\n df_iv = pd.DataFrame(iv, columns=['Var_Name', 'IV'])\n df_iv = df_iv.sort_values(by='IV', ascending=False)\n\n if not save_path:\n df_iv.to_csv(save_path, index=False)\n\n return df_iv\n\n\ndef replace_var_woe_ref(woe_ref_all=None, woe_ref_var=None):\n \"\"\"Replace the specified reference table of variable in all with the new\n\n :param: woe_ref_all: DataFrame\n reference tables of all variables\n :param: woe_ref_var: DataFrame\n reference table of the specified variable\n :return: woe_ref_new: DataFrame\n new reference table\n \"\"\"\n replace_var = woe_ref_var['Var_Name'].unique()[0]\n woe_ref_new = woe_ref_all.loc[woe_ref_all['Var_Name'] != replace_var, :].copy()\n woe_ref_new = pd.concat([woe_ref_new, woe_ref_var], axis=0, ignore_index=True)\n\n return woe_ref_new\n\n\ndef save_pictures_in_excel(file_name=None, img_loc=None, var_list=None, columns_num=1, img_rows=20, sheet_name='images'):\n \"\"\"Save the image in excel\n\n :param file_name: str, default xlsx format\n :param img_loc: str, path of the image\n :param var_list: list, default None\n variables will saved in excel, if None, will embed all images in excel\n :param columns_num: int, default 1\n columns for the image\n :param img_rows: int, default 20\n rows for the image\n :param sheet_name:str, default 'images'\n excel sheet name\n :return:\n \"\"\"\n\n if not img_loc.endswith('/'):\n img_loc = img_loc + '/'\n\n book = xlsxwriter.Workbook(file_name)\n sheet = book.add_worksheet(sheet_name)\n\n # generate a list of images\n if var_list is not None:\n img_list = ['{0}.png'.format(var) for var in var_list]\n else:\n img_list = [img for img in os.listdir(img_loc) if img.split(\".\")[-1] in ['jpg', 'png', 'bmp']]\n\n img_list.sort(reverse=True)\n\n # list of A - Z\n location_list = [i for i in string.ascii_uppercase]\n\n for i, img in zip(range(len(img_list)), img_list):\n location_x = location_list[(i % columns_num) * 2 + 1]\n\n if i < columns_num:\n # set 90px of width\n sheet.set_column('{0}:{0}'.format(location_x), 90)\n\n location_y = img_rows * (i // columns_num) + 1\n sheet.write('{0}{1}'.format(location_x, location_y), img)\n sheet.insert_image('{0}{1}'.format(location_x, location_y + 1), img_loc + img,\n options={'x_scale': 0.8, 'y_scale': 0.88})\n book.close()\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.dot",
"numpy.asarray",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.concatenate",
"sklearn.tree.DecisionTreeClassifier",
"numpy.digitize",
"pandas.crosstab",
"matplotlib.pyplot.twinx",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"pandas.concat",
"numpy.log",
"numpy.min",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.savefig",
"numpy.append",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.sort",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
stevaras2/bert
|
[
"1efaa300eb91dea85c40de5e1586e8d2c94b89bb",
"1efaa300eb91dea85c40de5e1586e8d2c94b89bb"
] |
[
"extract_features.py",
"prepare_Fudan.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Extract pre-computed feature vectors from BERT.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport codecs\nimport collections\nimport json\nimport re\nimport numpy as np\n\nimport modeling\nimport tokenization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"input_file\", None, \"\")\n\nflags.DEFINE_string(\"output_file\", None, \"\")\n\nflags.DEFINE_string(\"layers\", \"-1,-2,-3,-4\", \"\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\"batch_size\", 32, \"Batch size for predictions.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\"master\", None,\n \"If using a TPU, the address of the master.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"use_one_hot_embeddings\", False,\n \"If True, tf.one_hot will be used for embedding lookups, otherwise \"\n \"tf.nn.embedding_lookup will be used. On TPUs, this should be True \"\n \"since it is much faster.\")\n\n\nclass InputExample(object):\n\n def __init__(self, unique_id, text_a, text_b):\n self.unique_id = unique_id\n self.text_a = text_a\n self.text_b = text_b\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):\n self.unique_id = unique_id\n self.tokens = tokens\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.input_type_ids = input_type_ids\n\n\ndef input_fn_builder(features, seq_length):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(\n all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn\n\n\ndef model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_id\": unique_ids,\n }\n\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\ndef convert_examples_to_features(examples, seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(\n InputFeatures(\n unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef read_examples(input_file):\n \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n print(input_file)\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(\n InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n return examples\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n layer_indexes = [int(x) for x in FLAGS.layers.split(\",\")]\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n master=FLAGS.master,\n tpu_config=tf.contrib.tpu.TPUConfig(\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n print(FLAGS.input_file)\n print(FLAGS.output_file)\n examples = read_examples(FLAGS.input_file)\n\n features = convert_examples_to_features(\n examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)\n\n unique_id_to_feature = {}\n for feature in features:\n unique_id_to_feature[feature.unique_id] = feature\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n layer_indexes=layer_indexes,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n predict_batch_size=FLAGS.batch_size)\n\n input_fn = input_fn_builder(\n features=features, seq_length=FLAGS.max_seq_length)\n\n with codecs.getwriter(\"utf-8\")(tf.gfile.Open(FLAGS.output_file,\n \"w\")) as writer:\n for result in estimator.predict(input_fn, yield_single_examples=True):\n unique_id = int(result[\"unique_id\"])\n feature = unique_id_to_feature[unique_id]\n output_json = collections.OrderedDict()\n output_json[\"linex_index\"] = unique_id\n all_features = []\n for (i, token) in enumerate(feature.tokens):\n all_layers = []\n for (j, layer_index) in enumerate(layer_indexes):\n layer_output = result[\"layer_output_%d\" % j]\n layers = collections.OrderedDict()\n layers[\"index\"] = layer_index\n layers[\"values\"] = [\n round(float(x), 6) for x in layer_output[i:(i + 1)].flat\n ]\n all_layers.append(layers)\n features = collections.OrderedDict()\n features[\"token\"] = token\n features[\"layers\"] = all_layers\n all_features.append(features)\n\n sentence_emb = np.zeros(768)\n average_sentence_emb = np.zeros(768)\n sentence_size = 0\n sentence = \"\"\n for feat in all_features:\n average_sentence_emb += np.asarray(feat['layers'][0]['values'])\n sentence_size += 1\n sentence += feat['token']+\" \"\n\n average_sentence_emb /= sentence_size\n output_json[\"sentence\"] = sentence[:-1]\n output_json[\"features\"] = list(average_sentence_emb)\n writer.write(json.dumps(output_json) + \"\\n\")\n\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"input_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"init_checkpoint\")\n flags.mark_flag_as_required(\"output_file\")\n tf.app.run()\n",
"from xml.etree import ElementTree\nimport os\nfrom nltk import sent_tokenize,word_tokenize\nimport re\nimport pandas as pd\n\ndef get_text_per_section(train_papers):\n xml_files = os.listdir(train_papers)\n dict_of_sections = dict()\n for f in xml_files:\n print(f)\n file = ElementTree.parse(train_papers+\"/\"+f)\n #root = file.getroot()\n sections = dict()\n\n for div in file.getiterator(tag=\"{http://www.tei-c.org/ns/1.0}div\"):\n\n sec = \"\"\n for head in div.iter('{http://www.tei-c.org/ns/1.0}head'):\n if head.text not in sections:\n sections[head.text] = \"\"\n sec = head.text\n\n text = \"\"\n for p in div.iter('{http://www.tei-c.org/ns/1.0}p'):\n text += p.text\n sections[sec] = text\n dict_of_sections[f] = sections\n\n return dict_of_sections\n\ndef create_dataset(train_papers,dataset_path):\n\n sections_per_text = get_text_per_section(train_papers)\n\n\n train_dataset = dict()# dictionary that will be use for the fine-tune of the BERT model in our data.\n #Key:The first three sentences of each section. Value: Each sentence of the section\n clipping_id_list = list()\n sentence_id_list = list()\n clip_sections = list()\n sentence_list = list()\n clipping_id = 0\n sentence_id = 0\n for file,sections in sections_per_text.items():\n\n for section,text in sections.items():\n\n sen_num = 0\n clipping = \"\"\n for sentence in sent_tokenize(text):\n clipping += sentence + \" \"\n sen_num += 1\n if sen_num > 2:\n break\n\n clipping = re.sub(r'[^A-Za-z0-9]+',\" \",clipping.strip()).lower()\n for sentence in sent_tokenize(text):\n\n sentence = re.sub(r'[^A-Za-z0-9]+',\" \",sentence).lower()\n if len(word_tokenize(sentence)) > 2:\n if clipping not in train_dataset:\n train_dataset[clipping] = [sentence]\n else:\n train_dataset[clipping].append(sentence)\n\n clipping_id_list.append(clipping_id)\n sentence_id_list.append(sentence_id)\n sentence_id += 1\n clip_sections.append(clipping)\n sentence_list.append(sentence)\n\n clipping_id += 1\n\n dataset = pd.read_csv(dataset_path)\n\n dataset_sentences = dict()\n labels_list = list()\n error = 0\n for row in dataset.iterrows():\n\n dataset_sentences[row[1][0]] = row[1][5]\n if row[1][0] not in sentence_list:\n error += 1\n\n er = 0\n for sentence in sentence_list:\n\n if sentence not in dataset_sentences:\n labels_list.append(0)\n er += 1\n else:\n labels_list.append(dataset_sentences[sentence])\n\n dataset_for_fine_tune = pd.DataFrame()\n dataset_for_fine_tune['Label'] = labels_list\n dataset_for_fine_tune['id1'] = clipping_id_list\n dataset_for_fine_tune['id2'] = sentence_id_list\n dataset_for_fine_tune['clipping'] = clip_sections\n dataset_for_fine_tune['sentence'] = sentence_list\n\n dataset_for_fine_tune.iloc[:, :].to_csv(os.path.join(\"Fudan\", \"full_dataset.tsv\"), index=None, sep=\"\\t\")\n dataset_for_fine_tune.iloc[:5500, :].to_csv(os.path.join(\"Fudan\", \"train.tsv\"), index=None, sep=\"\\t\")\n dataset_for_fine_tune.iloc[5501:, :].to_csv(os.path.join(\"Fudan\", \"dev.tsv\"), index=None, sep=\"\\t\")\n dataset_for_fine_tune.iloc[5501:, :].to_csv(os.path.join(\"Fudan\", \"test.tsv\"), index=None, sep=\"\\t\")\n\n\n\ndef create_another_dataset(dataset_path):\n\n\n dataset = pd.read_csv(dataset_path)\n textA_list = list()\n textB_list = list()\n label_list = list()\n features_id_list = list()\n sentence_id_list = list()\n id = 0\n for row in dataset.iterrows():\n textA = \"\"\n\n textA += str(row[1][0]) + \".\" + str(row[1][1]) + \".\"+ str(row[1][2]) + \".\" + str(row[1][3])\n textB = str(row[1][0])\n textA_list.append(textA)\n textB_list.append(textB)\n label_list.append(row[1][5])\n sentence_id_list.append(id)\n f_id = 100000 + id\n features_id_list.append(f_id)\n id += 1\n\n dataset_for_fine_tune = pd.DataFrame()\n dataset_for_fine_tune['Label'] = label_list\n dataset_for_fine_tune['id1'] = features_id_list\n dataset_for_fine_tune['id2'] = sentence_id_list\n dataset_for_fine_tune['features'] = textA_list\n dataset_for_fine_tune['sentence'] = textB_list\n\n\n dataset_for_fine_tune.iloc[:5500, :].to_csv(os.path.join(\"Fudan1\", \"train.tsv\"), index=None, sep=\"\\t\")\n dataset_for_fine_tune.iloc[5501:, :].to_csv(os.path.join(\"Fudan1\", \"dev.tsv\"), index=None, sep=\"\\t\")\n dataset_for_fine_tune.iloc[5501:, :].to_csv(os.path.join(\"Fudan1\", \"test.tsv\"), index=None, sep=\"\\t\")\n\n return dataset_for_fine_tune\n\n#dataset = create_another_dataset('train_sentences1.csv')\n#create_dataset('train_papers','train_sentences1.csv')\ncreate_dataset('C:/Users/user/PycharmProjects/other_dataset/train_papers_xml','C:/Users/user/PycharmProjects/other_dataset/train_sentences1.csv')\ncreate_another_dataset('C:/Users/user/PycharmProjects/other_dataset/train_sentences1.csv')"
] |
[
[
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.train.Scaffold",
"tensorflow.constant",
"tensorflow.gfile.Open",
"numpy.asarray",
"tensorflow.gfile.GFile",
"tensorflow.train.init_from_checkpoint",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.trainable_variables",
"numpy.zeros",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.app.run"
],
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
tlechauveCLS/kpi_mpc
|
[
"4dc61d210c2b97e6ac240e54a8d96c35cf9123de"
] |
[
"src/kpi_WV_nrcs/reader_nasa_gsfc_distance_to_coast_super_light.py"
] |
[
"\"\"\"\nAntoine Grouazel\nNov 2019\n\"\"\"\nimport netCDF4\nimport numpy as np\nimport logging\nfrom src.config import RASTER_NASA_COASTLINE\nnc = netCDF4.Dataset(RASTER_NASA_COASTLINE)\nDISTANCE_COASTs = nc.variables['distance_to_coast'][:]\nLON_COASTs = nc.variables['lon'][:]\nLAT_COASTs = nc.variables['lat'][:]\nnc.close()\n\n\ndef latlon2ij ( lat,lon,shape2D,llbox ) :\n \"\"\"\n convert lat,lon into i,j index\n args:\n lat (float or 1D nd.array):\n lon (float or 1D nd.array):\n shape2D (tuple): (10,20) for instance\n llbox (tuple): latmin, lonmin, latmax,lonmax\n \"\"\"\n logging.debug('input lat latlon2ij | %s',lat)\n latmin,lonmin,latmax,lonmax = llbox\n if isinstance(lat,float) or isinstance(lat,int) :\n lat = np.array([lat])\n if isinstance(lon,float) or isinstance(lon,int) :\n lon = np.array([lon])\n dlon = lonmax - lonmin\n dlat = latmax - latmin\n logging.debug('dlon = %s',dlon)\n logging.debug('dlat = %s',dlat)\n logging.debug('shape2D = %s',shape2D)\n logging.debug('lat type %s %s',type(lat),lat)\n logging.debug('lat range %s %s',lat.min(),lat.max())\n logging.debug('dlat %s shapz %s',dlat,shape2D)\n logging.debug('itest %s',np.floor((lat - latmin) * shape2D[0] / dlat))\n i = np.floor((lat - latmin) * shape2D[0] / dlat).astype(\n int) # changed May 2019 after founding a bug with B. Coatanea where indices can reach the maximum value of the shape... (agrouaze)\n j = np.floor((lon - lonmin) * shape2D[1] / dlon).astype(int)\n\n return i,j\n\ndef get_distance_to_coast_vecto(lons,lats):\n llbox=(LAT_COASTs[0],LON_COASTs[0],LAT_COASTs[-1],LON_COASTs[-1])\n indlat,indlon= latlon2ij(lats,lons,np.shape(DISTANCE_COASTs),llbox)\n indlat[(indlat>=DISTANCE_COASTs.shape[0])] = DISTANCE_COASTs.shape[0]-1\n indlon[(indlon>=DISTANCE_COASTs.shape[1])] = DISTANCE_COASTs.shape[1]-1\n dsts = DISTANCE_COASTs[indlat,indlon]\n diff_lon = lons-LON_COASTs[indlon]\n diff_lat = lats-LAT_COASTs[indlat]\n return dsts"
] |
[
[
"numpy.array",
"numpy.shape",
"numpy.floor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MehdiZouitine/Learning-Disentangled-Representations-via-Mutual-Information-Estimation
|
[
"52952aff647a33b749b709cd7f0c3cd059c66b54"
] |
[
"src/losses/loss_functions.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom src.utils.custom_typing import GanLossOutput, Tuple\n\n\nclass ClassifLoss(nn.Module):\n \"\"\"Classifier loss\"\"\"\n\n @staticmethod\n def accuracy(y_pred, target):\n return torch.sum(y_pred == target).float().mean()\n\n def __init__(self):\n super().__init__()\n self.cross_entropy = nn.CrossEntropyLoss()\n\n def __call__(\n self, y_pred: torch.Tensor, target: torch.Tensor\n ) -> Tuple[float, float]:\n \"\"\"Compute cross entropy loss\n\n Args:\n y_pred (torch.Tensor): Classifier prediction\n target (torch.Tensor): Ground truth\n\n Returns:\n Tuple[float, float]: Error and accuracy over the current batch\n \"\"\"\n batch_size = y_pred.size(0)\n\n classif_error = self.cross_entropy(\n F.softmax(y_pred, dim=1), target.long()\n ).mean()\n accuracy = self.accuracy(y_pred=torch.argmax(y_pred, dim=1), target=target)\n return classif_error, accuracy / batch_size\n\n\nclass DJSLoss(nn.Module):\n \"\"\"Jensen Shannon Divergence loss\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def __call__(self, T: torch.Tensor, T_prime: torch.Tensor) -> float:\n \"\"\"Estimator of the Jensen Shannon Divergence see paper equation (2)\n\n Args:\n T (torch.Tensor): Statistique network estimation from the marginal distribution P(x)P(z)\n T_prime (torch.Tensor): Statistique network estimation from the joint distribution P(xz)\n\n Returns:\n float: DJS estimation value\n \"\"\"\n joint_expectation = (-F.softplus(-T)).mean()\n marginal_expectation = F.softplus(T_prime).mean()\n mutual_info = joint_expectation - marginal_expectation\n\n return -mutual_info\n\n\nclass DiscriminatorLoss(nn.Module):\n \"\"\"Basic discriminator GAN loss \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def __call__(self, real_logits: torch.Tensor, fake_logits: torch.Tensor) -> float:\n \"\"\"Discriminator loss gan\n\n Args:\n real_logits (torch.Tensor): Sample from the real distribution here from P(Sx)P(Ex)\n fake_logits (torch.Tensor): Sample from the fake (generated) distribution here from P(SxEx)\n\n Returns:\n float: Discriminator loss value\n \"\"\"\n\n # Discriminator should predict real logits as logits from the real distribution\n discriminator_real = F.binary_cross_entropy_with_logits(\n input=real_logits, target=torch.ones_like(real_logits)\n )\n # Discriminator should predict fake logits as logits from the generated distribution\n discriminator_fake = F.binary_cross_entropy_with_logits(\n input=fake_logits, target=torch.zeros_like(fake_logits)\n )\n discriminator_loss = discriminator_real.mean() + discriminator_fake.mean()\n\n return discriminator_loss\n\n\nclass GeneratorLoss(nn.Module):\n \"\"\"Basic generator GAN loss \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def __call__(self, fake_logits: torch.Tensor) -> float:\n \"\"\"Generator loss\n\n Args:\n fake_logits (torch.Tensor): Sample from the fake (generated) distribution here from P(SxEx)\n\n Returns:\n float: Generator loss value\n \"\"\"\n # Discriminator should generate fake logits that fool the discriminator\n generator_loss = F.binary_cross_entropy_with_logits(\n input=fake_logits, target=torch.ones_like(fake_logits)\n )\n return generator_loss\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.sum",
"torch.nn.functional.softplus",
"torch.ones_like",
"torch.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
artem-oppermann/Deep-Autoencoders-For-Collaborative-Filtering
|
[
"ae3b9592371ea2ca2fcd16551a051837309c4f14",
"ae3b9592371ea2ca2fcd16551a051837309c4f14"
] |
[
"src/data/dataset.py",
"src/data/preprocess_data.py"
] |
[
"import tensorflow as tf\nimport os\n\n\ndef _get_training_data(FLAGS): \n ''' Buildind the input pipeline for training and inference using TFRecords files.\n @return data only for the training\n @return data for the inference\n '''\n \n filenames=[FLAGS.tf_records_train_path+'/'+f for f in os.listdir(FLAGS.tf_records_train_path)]\n \n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(parse)\n dataset = dataset.shuffle(buffer_size=1)\n dataset = dataset.repeat()\n dataset = dataset.batch(FLAGS.batch_size)\n dataset = dataset.prefetch(buffer_size=1)\n \n dataset2 = tf.data.TFRecordDataset(filenames)\n dataset2 = dataset2.map(parse)\n dataset2 = dataset2.shuffle(buffer_size=1)\n dataset2 = dataset2.repeat()\n dataset2 = dataset2.batch(1)\n dataset2 = dataset2.prefetch(buffer_size=1)\n \n return dataset, dataset2\n \n\ndef _get_test_data(FLAGS):\n ''' Buildind the input pipeline for test data.'''\n \n filenames=[FLAGS.tf_records_test_path+'/'+f for f in os.listdir(FLAGS.tf_records_test_path)]\n \n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.map(parse)\n dataset = dataset.shuffle(buffer_size=1)\n dataset = dataset.repeat()\n dataset = dataset.batch(1)\n dataset = dataset.prefetch(buffer_size=1)\n \n return dataset\n\n\ndef parse(serialized):\n ''' Parser fot the TFRecords file.'''\n \n features={'movie_ratings':tf.FixedLenFeature([3952], tf.float32), \n }\n parsed_example=tf.parse_single_example(serialized,\n features=features,\n )\n movie_ratings = tf.cast(parsed_example['movie_ratings'], tf.float32)\n \n return movie_ratings",
"import pandas as pd\nimport numpy as np\nimport gc\nimport os\nfrom pathlib import Path\n\np = Path(__file__).parents[1]\n\nROOT_DIR=os.path.abspath(os.path.join(p, '..', 'data/raw/'))\n\ndef convert(data, num_users, num_movies):\n ''' Making a User-Movie-Matrix'''\n \n new_data=[]\n \n for id_user in range(1, num_users+1):\n \n id_movie=data[:,1][data[:,0]==id_user]\n id_rating=data[:,2][data[:,0]==id_user]\n ratings=np.zeros(num_movies, dtype=np.uint32)\n ratings[id_movie-1]=id_rating\n if sum(ratings)==0:\n continue\n new_data.append(ratings)\n\n del id_movie\n del id_rating\n del ratings\n \n return new_data\n\ndef get_dataset_1M():\n ''' For each train.dat and test.dat making a User-Movie-Matrix'''\n \n gc.enable()\n \n training_set=pd.read_csv(ROOT_DIR+'/ml-1m/train.dat', sep='::', header=None, engine='python', encoding='latin-1')\n training_set=np.array(training_set, dtype=np.uint32)\n \n test_set=pd.read_csv(ROOT_DIR+'/ml-1m/test.dat', sep='::', header=None, engine='python', encoding='latin-1')\n test_set=np.array(test_set, dtype=np.uint32)\n \n \n num_users=int(max(max(training_set[:,0]), max(test_set[:,0])))\n num_movies=int(max(max(training_set[:,1]), max(test_set[:,1])))\n\n training_set=convert(training_set,num_users, num_movies)\n test_set=convert(test_set,num_users, num_movies)\n \n return training_set, test_set\n \n\n\ndef _get_dataset():\n\n return get_dataset_1M()\n"
] |
[
[
"tensorflow.cast",
"tensorflow.data.TFRecordDataset",
"tensorflow.parse_single_example",
"tensorflow.FixedLenFeature"
],
[
"numpy.array",
"pandas.read_csv",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gitcheng/probfit
|
[
"820ce9ee4eee80bbd950be0fbe074b84afc423f8",
"820ce9ee4eee80bbd950be0fbe074b84afc423f8"
] |
[
"doc/pyplots/costfunc/simul.py",
"probfit/plotting.py"
] |
[
"from iminuit import Minuit\nfrom probfit import UnbinnedLH, gaussian, SimultaneousFit, rename\nfrom matplotlib import pyplot as plt\nfrom numpy.random import randn, seed\n\nseed(0)\nwidth = 2.\ndata1 = randn(1000)*width + 1\ndata2 = randn(1000)*width + 2\n\n#two gaussian with shared width\npdf1 = rename(gaussian, ('x', 'mu_1', 'sigma'))\npdf2 = rename(gaussian, ('x', 'mu_2', 'sigma'))\n\nlh1 = UnbinnedLH(pdf1, data1)\nlh2 = UnbinnedLH(pdf2, data2)\n\nsimlh = SimultaneousFit(lh1, lh2)\n\nm = Minuit(simlh, mu_1=1.2, mu_2=2.2, sigma=1.5)\n\nplt.figure(figsize=(8, 3))\nplt.subplot(211)\nsimlh.draw(m)\nplt.suptitle('Before')\n\nm.migrad() # fit\n\nplt.figure(figsize=(8, 3))\nplt.subplot(212)\nsimlh.draw(m)\nplt.suptitle('After')\n",
" # -*- coding: utf-8 -*-\n#Plotting is on python since this will make it much easier to debug and adjsut\n#no need to recompile everytime i change graph color....\n\n#needs a serious refactor\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom .nputil import mid, minmax, vector_apply\nfrom util import parse_arg, describe\nfrom math import sqrt, ceil, floor\nfrom warnings import warn\n\ndef draw_simultaneous(self, minuit=None, args=None, errors=None, **kwds):\n numf = len(self.allf)\n ret = []\n numraw = sqrt(numf)\n numcol = ceil(numraw)\n numrow = floor(numraw) if floor(numraw)*numcol>=numf else ceil(numraw)\n\n for i in range(numf):\n plt.subplot(numrow, numcol, i+1)\n part_args, part_errors = self.args_and_error_for(i, minuit, args, errors)\n ret.append(self.allf[i].draw(args=part_args, errors=part_errors, **kwds))\n\n return ret\n\ndef _get_args_and_errors(self, minuit=None, args=None, errors=None):\n \"\"\"\n consistent algorithm to get argument and errors\n 1) get it from minuit if minuit is available\n 2) if not get it from args and errors\n 2.1) if args is dict parse it.\n 3) if all else fail get it from self.last_arg\n \"\"\"\n ret_arg = None\n ret_error = None\n if minuit is not None: # case 1\n ret_arg = minuit.args\n ret_error = minuit.errors\n return ret_arg, ret_error\n\n #no minuit specified use args and errors\n if args is not None:\n if isinstance(args, dict):\n ret_arg = parse_arg(self, args)\n else:\n ret_arg = args\n else: # case 3\n ret_arg = self.last_arg\n\n if errors is not None:\n ret_error = errors\n\n return ret_arg, ret_error\n\n\ndef _param_text(parameters, arg, error):\n txt = u''\n for i, (k, v) in enumerate(zip(parameters, arg)):\n txt += u'%s = %5.4g'%(k, v)\n if error is not None:\n txt += u'±%5.4g'%error[k]\n txt += u'\\n'\n return txt\n\n#from UML\ndef draw_ulh(self, minuit=None, bins=100, ax=None, bound=None,\n parmloc=(0.05, 0.95), nfbins=200, print_par=True, grid=True,\n args=None, errors=None, parts=False, show_errbars='normal'):\n \n data_ret = None\n error_ret = None\n total_ret = None\n part_ret = []\n\n ax = plt.gca() if ax is None else ax\n\n arg, error = _get_args_and_errors(self, minuit, args, errors)\n\n n,e= np.histogram(self.data, bins=bins, range=bound, weights=self.weights)\n dataint= (n*np.diff(e)).sum()\n data_ret = (e, n)\n\n if not show_errbars:\n pp= ax.hist(mid(e), bins=e, weights=n, histtype='step')\n error_ret = (np.sqrt(n), np.sqrt(n))\n else:\n w2= None\n if show_errbars=='normal':\n w2=n\n error_ret = (np.sqrt(n), np.sqrt(n))\n elif show_errbars=='sumw2':\n weights= None\n if self.weights!= None:\n weights= self.weights**2\n w2,e= np.histogram(self.data, bins=e, weights=weights)\n error_ret = (np.sqrt(w2), np.sqrt(w2))\n else:\n raise ValueError('show_errbars must be \\'normal\\' or \\'sumw2\\'')\n\n pp= ax.errorbar(mid(e), n, np.sqrt(w2) , fmt='b+', capsize=0)\n\n #bound = (e[0], e[-1])\n draw_arg = [('lw', 2)]\n if not parts:\n draw_arg.append(('color', 'r'))\n\n # Draw pdf with finer bins\n ef= np.linspace(e[0],e[-1], nfbins+1)\n scale= dataint if not self.extended else nfbins/float(bins)\n total_ret = draw_pdf_with_edges(self.f, arg, ef, ax=ax, density=not self.extended, scale=scale,\n **dict(draw_arg))\n\n if parts:\n f_parts = getattr(self.f, 'parts', None)\n if f_parts is not None:\n for p in f_parts():\n ret = draw_pdf_with_edges(p, arg, ef, ax=ax, scale=scale, density=not self.extended)\n part_ret.append(ret)\n ax.grid(grid)\n\n txt = _param_text(describe(self), arg, error)\n if print_par:\n ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',\n transform=ax.transAxes)\n return (data_ret, error_ret, total_ret, part_ret)\n\ndef draw_residual_ulh(self, minuit=None, bins=100, ax=None, bound=None,\n parmloc=(0.05, 0.95), print_par=False, grid=True,\n args=None, errors=None, show_errbars=True,\n errbar_algo='normal', norm=False):\n\n ax = plt.gca() if ax is None else ax\n\n arg, error = _get_args_and_errors(self, minuit, args, errors)\n\n n,e= np.histogram(self.data, bins=bins, range=bound, weights=self.weights)\n dataint= (n*np.diff(e)).sum()\n scale= dataint if not self.extended else 1.0\n w2= None\n if errbar_algo=='normal':\n w2=n\n elif errbar_algo=='sumw2':\n weights= None\n if self.weights!= None:\n weights= self.weights**2\n w2,e= np.histogram(self.data, bins=e, weights=weights)\n else:\n raise ValueError('errbar_algo must be \\'normal\\' or \\'sumw2\\'')\n yerr= np.sqrt(w2)\n\n arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg\n yf = vector_apply(self.f, mid(e), *arg)\n yf*= (scale*np.diff(e) if self.extended else scale)\n n = n- yf\n if norm:\n sel= yerr>0\n n[sel]/= yerr[sel]\n yerr= np.ones(len(yerr))\n\n if show_errbars:\n pp= ax.errorbar(mid(e), n, yerr , fmt='b+', capsize=0)\n else: # No errorbars\n pp= ax.bar(e[:-1], n, width=np.diff(e))\n\n #bound = (e[0], e[-1])\n #draw_arg = [('lw', 2), ('color', 'r')]\n ax.plot([e[0],e[-1]],[0.,0.], 'r-')\n\n ax.grid(grid)\n\n txt = _param_text(describe(self), arg, error)\n if print_par:\n ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',\n transform=ax.transAxes)\n\n return mid(e), n, yerr\n\n#from chi2 regression\ndef draw_x2(self, minuit=None, ax=None, parmloc=(0.05, 0.95), print_par=True,\n args=None, errors=None, grid=True, parts=False, nbins=None):\n data_ret = None\n error_ret = None\n total_ret = None\n part_ret = []\n \n ax = plt.gca() if ax is None else ax\n\n arg, error = _get_args_and_errors(self, minuit, args, errors)\n\n x=self.x\n y=self.y\n data_err = self.error\n\n # Draw data points\n data_ret = x,y\n if data_err is None:\n ax.plot(x, y, '+')\n err_ret = (np.ones(len(self.x)), np.ones(len(self.x)))\n else:\n ax.errorbar(x, y, data_err, fmt='+', capsize=0)\n err_ret = (data_err, data_err)\n draw_arg = [('lw', 2)]\n draw_arg.append(('color', 'r'))\n\n # Draw PDF curve(s)\n if nbins is not None:\n x = np.linspace(x[0],x[-1], nbins)\n\n total_ret = draw_pdf_with_midpoints(self.f, arg, x, ax=ax, **dict(draw_arg))\n if parts:\n f_parts = getattr(self.f, 'parts', None)\n if f_parts is not None:\n for p in f_parts():\n tmp = draw_pdf_with_midpoints(p, arg, x, ax=ax, **dict(draw_arg))\n part_ret.append(tmp)\n\n # Print text\n txt = _param_text(describe(self), arg, error)\n chi2 = self(*arg)\n if self.ndof > 0:\n txt+=u'chi2/ndof = %5.4g(%5.4g/%d)'%(chi2/self.ndof, chi2, self.ndof)\n else:\n txt+=u'chi2/ndof = (%5.4g/%d)'%(chi2, self.ndof)\n\n if print_par:\n ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',\n transform=ax.transAxes)\n\n\n ax.grid(grid)\n return (data_ret, error_ret, total_ret , part_ret)\n\ndef draw_x2_residual(self, minuit=None, ax=None, args=None, errors=None, grid=True,\n norm=False):\n ax = plt.gca() if ax is None else ax\n\n arg, error = _get_args_and_errors(self, minuit, args, errors)\n\n x=self.x\n y=self.y\n data_err = self.error\n f=self.f\n\n arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg\n yf = vector_apply(f, x, *arg)\n\n yplot= y-yf\n eplot= data_err if data_err is not None else np.zeros(len(x))\n if norm:\n if data_err is None:\n warn(RuntimeWarning('No error on data points; cannot normalize to error'))\n else:\n yplot= yplot/data_err\n eplot= data_err/data_err\n ax.errorbar(x, yplot, eplot, fmt='b+', capsize=0)\n ax.grid(grid)\n return x, yplot, eplot\n\n#from binned chi2\ndef draw_bx2(self, minuit=None, parmloc=(0.05, 0.95), nfbins=500, ax=None,\n print_par=True, args=None, errors=None, parts=False, grid=True):\n \n data_ret = None\n error_ret = None\n total_ret = None\n part_ret = []\n \n ax = plt.gca() if ax is None else ax\n\n arg, error = _get_args_and_errors(self, minuit, args, errors)\n\n m = mid(self.edges)\n\n ax.errorbar(m, self.h, self.err, fmt='+', capsize=0)\n data_ret = (self.edges, self.h)\n error_ret = (self.err, self.err)\n\n bound = (self.edges[0], self.edges[-1])\n\n scale = nfbins/float(self.bins) #scale back to bins\n\n draw_arg = [('lw', 2)]\n\n if not parts:\n draw_arg.append(('color', 'r'))\n\n total_ret = draw_pdf(self.f, arg, bins=nfbins, bound=bound, ax=ax, density=False,\n scale=scale, **dict(draw_arg))\n\n if parts:\n f_parts = getattr(self.f, 'parts', None)\n if f_parts is not None:\n for p in f_parts():\n tmp = draw_pdf(p, arg, bound=bound, bins=nfbins, ax=ax, density=False,\n scale=scale)\n part_ret.append(tmp)\n\n ax.grid(grid)\n\n txt = _param_text(describe(self), arg, error)\n\n chi2 = self(*arg)\n if self.ndof > 0:\n txt+=u'chi2/ndof = %5.4g(%5.4g/%d)'%(chi2/self.ndof, chi2, self.ndof)\n else:\n txt+=u'chi2/ndof = (%5.4g/%d)'%(chi2, self.ndof)\n\n if print_par:\n ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',\n transform=ax.transAxes)\n\n return (data_ret, error_ret, total_ret, part_ret)\n\n\n#from binnedLH\ndef draw_blh(self, minuit=None, parmloc=(0.05, 0.95),\n nfbins=1000, ax=None, print_par=True, grid=True,\n args=None, errors=None, parts=False):\n data_ret = None\n error_ret = None\n total_ret = None\n part_ret = []\n \n ax = plt.gca() if ax is None else ax\n\n arg, error = _get_args_and_errors(self, minuit, args, errors)\n\n m = mid(self.edges)\n\n if self.use_w2:\n err = np.sqrt(self.w2)\n else:\n err = np.sqrt(self.h)\n\n n= np.copy(self.h)\n dataint= (n*np.diff(self.edges)).sum()\n scale= dataint if not self.extended else 1.0\n\n ax.errorbar(m, n, err, fmt='+', capsize=0)\n data_ret = (self.edges, n)\n error_ret = (err, err)\n\n draw_arg = [('lw', 2)]\n if not parts:\n draw_arg.append(('color', 'r'))\n bound = (self.edges[0], self.edges[-1])\n \n #scale back to bins\n if self.extended:\n scale= nfbins/float(self.bins) \n total_ret = draw_pdf(self.f, arg, bins=nfbins, bound=bound, ax=ax, density=not self.extended,\n scale=scale, **dict(draw_arg))\n if parts:\n f_parts = getattr(self.f, 'parts', None)\n if f_parts is not None:\n for p in f_parts():\n tmp = draw_pdf(p, arg, bins=nfbins, bound=bound, ax=ax,\n density=not self.extended, scale=scale)\n part_ret.append(tmp)\n ax.grid(grid)\n\n txt = _param_text(describe(self), arg, error)\n\n if print_par:\n ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',\n transform=ax.transAxes)\n\n return (data_ret, error_ret, total_ret, part_ret)\n \n\ndef draw_residual_blh(self, minuit=None, parmloc=(0.05, 0.95),\n ax=None, print_par=False, args=None, errors=None,\n norm=False, grid=True):\n ax = plt.gca() if ax is None else ax\n\n arg, error = _get_args_and_errors(self, minuit, args, errors)\n\n m = mid(self.edges)\n\n if self.use_w2:\n err = np.sqrt(self.w2)\n else:\n err = np.sqrt(self.h)\n\n n= np.copy(self.h)\n dataint= (n*np.diff(self.edges)).sum()\n scale= dataint if not self.extended else 1.0\n\n arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg\n yf = vector_apply(self.f, m, *arg)\n yf*= (scale*np.diff(self.edges) if self.extended else scale)\n n = n- yf\n if norm:\n sel= err>0\n n[sel]/= err[sel]\n err= np.ones(len(err))\n\n ax.errorbar(m, n, err, fmt='+', capsize=0)\n\n ax.plot([self.edges[0],self.edges[-1]],[0.,0.], 'r-')\n\n ax.grid(grid)\n\n txt = _param_text(describe(self), arg, error)\n\n if print_par:\n ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',\n transform=ax.transAxes)\n return m, n, err\n\n\ndef draw_compare(f, arg, edges, data, errors=None, ax=None, grid=True, normed=False, parts=False):\n \"\"\"\n TODO: this needs to be rewritten\n \"\"\"\n #arg is either map or tuple\n ax = plt.gca() if ax is None else ax\n arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg\n x = (edges[:-1]+edges[1:])/2.0\n bw = np.diff(edges)\n yf = vector_apply(f, x, *arg)\n total = np.sum(data)\n if normed:\n ax.errorbar(x, data/bw/total, errors/bw/total, fmt='b+', capsize=0)\n ax.plot(x, yf, 'r', lw=2)\n else:\n ax.errorbar(x, data, errors, fmt='b+', capsize=0)\n ax.plot(x, yf*bw, 'r', lw=2)\n\n #now draw the parts\n if parts:\n if not hasattr(f, 'eval_parts'):\n warn(RuntimeWarning('parts is set to True but function does '\n 'not have eval_parts method'))\n else:\n scale = bw if not normed else 1.\n parts_val = list()\n for tx in x:\n val = f.eval_parts(tx, *arg)\n parts_val.append(val)\n py = zip(*parts_val)\n for y in py:\n tmpy = np.array(y)\n ax.plot(x, tmpy*scale, lw=2, alpha=0.5)\n plt.grid(grid)\n return x, yf, data\n\n\ndef draw_normed_pdf(f, arg, bound, bins=100, scale=1.0, density=True, ax=None, **kwds):\n return draw_pdf(f, arg, bound, bins=100, scale=1.0, density=True,\n normed_pdf=True, ax=ax, **kwds)\n\n\ndef draw_pdf(f, arg, bound, bins=100, scale=1.0, density=True,\n normed_pdf=False, ax=None, **kwds):\n \"\"\"\n draw pdf with given argument and bounds.\n\n **Arguments**\n\n * **f** your pdf. The first argument is assumed to be independent\n variable\n\n * **arg** argument can be tuple or list\n\n * **bound** tuple(xmin,xmax)\n\n * **bins** number of bins to plot pdf. Default 100.\n\n * **scale** multiply pdf by given number. Default 1.0.\n\n * **density** plot density instead of expected count in each bin\n (pdf*bin width). Default True.\n\n * **normed_pdf** Normalize pdf in given bound. Default False\n\n * The rest of keyword argument will be pass to pyplot.plot\n\n **Returns**\n \n x, y of what's being plot\n \"\"\"\n edges = np.linspace(bound[0], bound[1], bins)\n return draw_pdf_with_edges(f, arg, edges, ax=ax, scale=scale, density=density,\n normed_pdf=normed_pdf, **kwds)\n\n\ndef draw_pdf_with_edges(f, arg, edges, ax=None, scale=1.0, density=True,\n normed_pdf=False, **kwds):\n x = (edges[:-1]+edges[1:])/2.0\n bw = np.diff(edges)\n scale *= bw if not density else 1.\n\n return draw_pdf_with_midpoints(f, arg, x, ax=ax, scale=scale,\n normed_pdf=normed_pdf, **kwds)\n\n\ndef draw_pdf_with_midpoints(f, arg, x, ax=None, scale=1.0, normed_pdf=False, **kwds):\n ax = plt.gca() if ax is None else ax\n arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg\n yf = vector_apply(f, x, *arg)\n\n if normed_pdf:\n normed_factor = sum(yf) # assume equal binwidth\n yf /= normed_factor\n yf *= scale\n\n ax.plot(x, yf, **kwds)\n return x, yf\n\n\n#draw comparison between function given args and data\ndef draw_compare_hist(f, arg, data, bins=100, bound=None, ax=None, weights=None,\n normed=False, use_w2=False, parts=False, grid=True):\n \"\"\"\n draw histogram of data with poisson error bar and f(x,*arg).\n\n ::\n\n data = np.random.rand(10000)\n f = gaussian\n draw_compare_hist(f, {'mean':0,'sigma':1}, data, normed=True)\n\n **Arguments**\n\n - **f**\n - **arg** argument pass to f. Can be dictionary or list.\n - **data** data array\n - **bins** number of bins. Default 100.\n - **bound** optional boundary of plot in tuple form. If `None` is\n given, the bound is determined from min and max of the data. Default\n `None`\n - **weights** weights array. Default None.\n - **normed** optional normalized data flag. Default False.\n - **use_w2** scaled error down to the original statistics instead of\n weighted statistics.\n - **parts** draw parts of pdf. (Works with AddPdf and Add2PdfNorm).\n Default False.\n \"\"\"\n ax = plt.gca() if ax is None else ax\n bound = minmax(data) if bound is None else bound\n h, e = np.histogram(data, bins=bins, range=bound, weights=weights)\n err = None\n if weights is not None and use_w2:\n err, _ = np.histogram(data, bins=bins, range=bound,\n weights=weights*weights)\n err = np.sqrt(err)\n else:\n err = np.sqrt(h)\n return draw_compare(f, arg, e, h, err, ax=ax, grid=grid, normed=normed, parts=parts)\n"
] |
[
[
"numpy.random.seed",
"matplotlib.pyplot.subplot",
"numpy.random.randn",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.gca",
"numpy.sqrt",
"numpy.linspace",
"numpy.copy",
"matplotlib.pyplot.subplot",
"numpy.diff",
"matplotlib.pyplot.grid",
"numpy.array",
"numpy.histogram",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sapio-S/habitat-lab
|
[
"47985177d6e6932db98ee6698371253182cf96fb"
] |
[
"habitat/sims/habitat_simulator/habitat_simulator.py"
] |
[
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom enum import Enum\nfrom typing import Any, List, Optional\n\nimport numpy as np\nfrom gym import Space, spaces\nfrom habitat_sim import utils\nimport habitat_sim\nfrom habitat.core.logging import logger\nfrom habitat.core.registry import registry\nfrom habitat.core.simulator import (\n AgentState,\n Config,\n DepthSensor,\n Observations,\n RGBSensor,\n SemanticSensor,\n Sensor,\n SensorSuite,\n ShortestPathPoint,\n Simulator,\n SimulatorActions,\n)\n\nRGBSENSOR_DIMENSION = 3\n\nimport random\n\n\ndef overwrite_config(config_from: Config, config_to: Any) -> None:\n for attr, value in config_from.items():\n if hasattr(config_to, attr.lower()):\n setattr(config_to, attr.lower(), value)\n\n\ndef check_sim_obs(obs, sensor):\n assert obs is not None, (\n \"Observation corresponding to {} not present in \"\n \"simulator's observations\".format(sensor.uuid)\n )\n\n\[email protected]_sensor\nclass HabitatSimRGBSensor(RGBSensor):\n sim_sensor_type: habitat_sim.SensorType\n\n def __init__(self, config):\n self.sim_sensor_type = habitat_sim.SensorType.COLOR\n super().__init__(config=config)\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n return spaces.Box(\n low=0,\n high=255,\n shape=(self.config.HEIGHT, self.config.WIDTH, RGBSENSOR_DIMENSION),\n dtype=np.uint8,\n )\n\n def get_observation(self, sim_obs):\n '''\n obs = [\n sim_obs[i].get(self.uuid, None)[:, :, :RGBSENSOR_DIMENSION]\n for i in range(len(self._sim.agents))\n ]\n '''\n obs = sim_obs.get(self.uuid, None)\n check_sim_obs(obs, self)\n\n # remove alpha channel\n obs = obs[:, :, :RGBSENSOR_DIMENSION]\n \n return obs\n\n\[email protected]_sensor\nclass HabitatSimDepthSensor(DepthSensor):\n sim_sensor_type: habitat_sim.SensorType\n min_depth_value: float\n max_depth_value: float\n\n def __init__(self, config):\n self.sim_sensor_type = habitat_sim.SensorType.DEPTH\n\n if config.NORMALIZE_DEPTH:\n self.min_depth_value = 0\n self.max_depth_value = 1\n else:\n self.min_depth_value = config.MIN_DEPTH\n self.max_depth_value = config.MAX_DEPTH\n\n super().__init__(config=config)\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n return spaces.Box(\n low=self.min_depth_value,\n high=self.max_depth_value,\n shape=(self.config.HEIGHT, self.config.WIDTH, 1),\n dtype=np.float32,\n )\n\n def get_observation(self, sim_obs):\n '''\n obs = []\n for i in range(len(self._sim.agents)):\n OB = sim_obs[i].get(self.uuid, None)\n check_sim_obs(OB, self)\n OB = np.clip(OB, self.config.MIN_DEPTH, self.config.MAX_DEPTH)\n if self.config.NORMALIZE_DEPTH:\n OB = (OB - self.config.MIN_DEPTH) / self.config.MAX_DEPTH\n \n OB = np.expand_dims(OB, axis=2)\n obs.append(OB)\n '''\n obs = sim_obs.get(self.uuid, None)\n check_sim_obs(obs, self)\n\n obs = np.clip(obs, self.config.MIN_DEPTH, self.config.MAX_DEPTH)\n if self.config.NORMALIZE_DEPTH:\n # normalize depth observation to [0, 1]\n obs = (obs - self.config.MIN_DEPTH) / self.config.MAX_DEPTH\n\n obs = np.expand_dims(obs, axis=2) # make depth observation a 3D array\n \n return obs\n\n\[email protected]_sensor\nclass HabitatSimSemanticSensor(SemanticSensor):\n sim_sensor_type: habitat_sim.SensorType\n\n def __init__(self, config):\n self.sim_sensor_type = habitat_sim.SensorType.SEMANTIC\n super().__init__(config=config)\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n return spaces.Box(\n low=np.iinfo(np.uint32).min,\n high=np.iinfo(np.uint32).max,\n shape=(self.config.HEIGHT, self.config.WIDTH),\n dtype=np.uint32,\n )\n\n def get_observation(self, sim_obs):\n obs = sim_obs.get(self.uuid, None)\n check_sim_obs(obs, self)\n return obs\n\n\[email protected]_simulator(name=\"Sim-v0\")\nclass HabitatSim(Simulator):\n r\"\"\"Simulator wrapper over habitat-sim\n\n habitat-sim repo: https://github.com/facebookresearch/habitat-sim\n\n Args:\n config: configuration for initializing the simulator.\n \"\"\"\n def __init__(self, config: Config) -> None:\n self.config = config\n agent_config = self._get_agent_config()\n sim_sensors = []\n for sensor_name in agent_config.SENSORS:\n sensor_cfg = getattr(self.config, sensor_name)\n sensor_type = registry.get_sensor(sensor_cfg.TYPE)\n\n assert sensor_type is not None, \"invalid sensor type {}\".format(\n sensor_cfg.TYPE\n )\n sim_sensors.append(sensor_type(sensor_cfg))\n self._sensor_suite = SensorSuite(sim_sensors)\n self.sim_config = self.create_sim_config(self._sensor_suite)\n self._current_scene = self.sim_config.sim_cfg.scene.id\n self._sim = habitat_sim.Simulator(self.sim_config)\n self._action_space = spaces.Discrete(\n len(self.sim_config.agents[0].action_space)\n )\n\n self._is_episode_active = False\n \n\n def create_sim_config(\n self, _sensor_suite: SensorSuite\n ) -> habitat_sim.Configuration:\n sim_config = habitat_sim.SimulatorConfiguration()\n sim_config.seed = self.config.SEED\n sim_config.scene.id = self.config.SCENE\n sim_config.gpu_device_id = self.config.HABITAT_SIM_V0.GPU_DEVICE_ID\n agent_config = habitat_sim.AgentConfiguration()\n overwrite_config(\n config_from=self._get_agent_config(), config_to=agent_config\n )\n\n sensor_specifications = []\n for sensor in _sensor_suite.sensors.values():\n sim_sensor_cfg = habitat_sim.SensorSpec()\n sim_sensor_cfg.uuid = sensor.uuid\n sim_sensor_cfg.resolution = list(\n sensor.observation_space.shape[:2]\n )\n sim_sensor_cfg.parameters[\"hfov\"] = str(sensor.config.HFOV)\n sim_sensor_cfg.position = sensor.config.POSITION\n sim_sensor_cfg.orientation = sensor.config.ORIENTATION\n # TODO(maksymets): Add configure method to Sensor API to avoid\n # accessing child attributes through parent interface\n sim_sensor_cfg.sensor_type = sensor.sim_sensor_type # type: ignore\n sensor_specifications.append(sim_sensor_cfg)\n\n agent_config.sensor_specifications = sensor_specifications\n agent_config.action_space = registry.get_action_space_configuration(\n self.config.ACTION_SPACE_CONFIG\n )(self.config).get()\n\n agents_config = []\n for i in range(self.config.NUM_AGENTS):\n agents_config.append(agent_config)\n\n return habitat_sim.Configuration(sim_config, agents_config)\n\n @property\n def sensor_suite(self) -> SensorSuite:\n return self._sensor_suite\n\n @property\n def action_space(self) -> Space:\n return self._action_space\n\n @property\n def is_episode_active(self) -> bool:\n return self._is_episode_active\n\n def _update_agents_state(self) -> bool:\n is_updated = False\n for agent_id in range(self.config.NUM_AGENTS):\n agent_cfg = self._get_agent_config()\n if agent_cfg.IS_SET_START_STATE:\n self.set_agent_state(\n agent_cfg.START_POSITION[agent_id],\n agent_cfg.START_ROTATION[agent_id],\n agent_id,\n self.config.NUM_AGENTS\n )\n is_updated = True\n\n return is_updated\n\n def reset(self):\n sim_obs = self._sim.reset()\n if self._update_agents_state():\n sim_obs = [\n self._sim.get_sensor_observations(i)\n for i in range(len(self._sim.agents))\n ]\n\n self._prev_sim_obs = sim_obs\n self._is_episode_active = True\n return [\n self._sensor_suite.get_observations(sim_obs[i])\n for i in range(len(self._sim.agents))\n ]\n\n def step(self, action, agent_id):\n assert self._is_episode_active, (\n \"episode is not active, environment not RESET or \"\n \"STOP action called previously\"\n )\n\n if action == self.index_stop_action:\n if not self.config.CHANGE_AGENTS:\n self._is_episode_active = False\n sim_obs = self._sim.get_sensor_observations(agent_id)\n else: \n sim_obs = self._sim.step(action, agent_id)\n\n self._prev_sim_obs = sim_obs\n\n observations = self._sensor_suite.get_observations(sim_obs)\n return observations\n\n def render(self, mode: str = \"rgb\") -> Any:\n r\"\"\"\n Args:\n mode: sensor whose observation is used for returning the frame,\n eg: \"rgb\", \"depth\", \"semantic\"\n\n Returns:\n rendered frame according to the mode\n \"\"\"\n sim_obs = self._sim.get_sensor_observations()\n observations = self._sensor_suite.get_observations(sim_obs)\n\n output = observations.get(mode)\n assert output is not None, \"mode {} sensor is not active\".format(mode)\n\n return output\n\n def seed(self, seed):\n self._sim.seed(seed)\n\n def reconfigure(self, config: Config) -> None:\n # TODO(maksymets): Switch to Habitat-Sim more efficient caching\n is_same_scene = config.SCENE == self._current_scene\n self.config = config\n self.sim_config = self.create_sim_config(self._sensor_suite)\n if not is_same_scene:\n self._current_scene = config.SCENE\n self._sim.close()\n del self._sim\n self._sim = habitat_sim.Simulator(self.sim_config)\n\n self._update_agents_state()\n\n def geodesic_distance(self, position_a, position_b):\n path = habitat_sim.ShortestPath()\n path.requested_start = np.array(position_a, dtype=np.float32)\n path.requested_end = np.array(position_b, dtype=np.float32)\n self._sim.pathfinder.find_path(path)\n return path.geodesic_distance\n\n def action_space_shortest_path(\n self, source: AgentState, targets: List[AgentState], agent_id: int = 0\n ) -> List[ShortestPathPoint]:\n r\"\"\"\n Returns:\n List of agent states and actions along the shortest path from\n source to the nearest target (both included). If one of the\n target(s) is identical to the source, a list containing only\n one node with the identical agent state is returned. Returns\n an empty list in case none of the targets are reachable from\n the source. For the last item in the returned list the action\n will be None.\n \"\"\"\n raise NotImplementedError(\n \"This function is no longer implemented. Please use the greedy \"\n \"follower instead\"\n )\n\n @property\n def up_vector(self):\n return np.array([0.0, 1.0, 0.0])\n\n @property\n def forward_vector(self):\n return -np.array([0.0, 0.0, 1.0])\n\n def get_straight_shortest_path_points(self, position_a, position_b):\n path = habitat_sim.ShortestPath()\n path.requested_start = position_a\n path.requested_end = position_b\n self._sim.pathfinder.find_path(path)\n return path.points\n\n def sample_navigable_point(self):\n return self._sim.pathfinder.get_random_navigable_point().tolist()\n\n def is_navigable(self, point: List[float]):\n return self._sim.pathfinder.is_navigable(point)\n\n def semantic_annotations(self):\n r\"\"\"\n Returns:\n SemanticScene which is a three level hierarchy of semantic\n annotations for the current scene. Specifically this method\n returns a SemanticScene which contains a list of SemanticLevel's\n where each SemanticLevel contains a list of SemanticRegion's where\n each SemanticRegion contains a list of SemanticObject's.\n\n SemanticScene has attributes: aabb(axis-aligned bounding box) which\n has attributes aabb.center and aabb.sizes which are 3d vectors,\n categories, levels, objects, regions.\n\n SemanticLevel has attributes: id, aabb, objects and regions.\n\n SemanticRegion has attributes: id, level, aabb, category (to get\n name of category use category.name()) and objects.\n\n SemanticObject has attributes: id, region, aabb, obb (oriented\n bounding box) and category.\n\n SemanticScene contains List[SemanticLevels]\n SemanticLevel contains List[SemanticRegion]\n SemanticRegion contains List[SemanticObject]\n\n Example to loop through in a hierarchical fashion:\n for level in semantic_scene.levels:\n for region in level.regions:\n for obj in region.objects:\n \"\"\"\n return self._sim.semantic_scene\n\n def close(self):\n self._sim.close()\n\n def _get_agent_config(self) -> Any:\n agent_config = getattr(self.config, 'AGENT')\n return agent_config\n\n def get_agent_state(self, agent_id: int = 0) -> habitat_sim.AgentState:\n return self._sim.get_agent(agent_id).get_state()\n\n def set_agent_state(\n self,\n position: List[float],\n rotation: List[float],\n agent_id: int = 0,\n num_agents: int = 2,\n reset_sensors: bool = True,\n ) -> bool:\n r\"\"\"Sets agent state similar to initialize_agent, but without agents\n creation. On failure to place the agent in the proper position, it is\n moved back to its previous pose.\n\n Args:\n position: list containing 3 entries for (x, y, z).\n rotation: list with 4 entries for (x, y, z, w) elements of unit\n quaternion (versor) representing agent 3D orientation,\n (https://en.wikipedia.org/wiki/Versor)\n agent_id: int identification of agent from multiagent setup.\n reset_sensors: bool for if sensor changes (e.g. tilt) should be\n reset).\n\n Returns:\n True if the set was successful else moves the agent back to its\n original pose and returns false.\n \"\"\"\n\n agent = self._sim.get_agent(agent_id)\n original_state = self.get_agent_state(agent_id)\n new_state = self.get_agent_state(agent_id)\n new_state.position = position\n new_state.rotation = rotation\n \n if not self.config.USE_FIXED_START_POS:\n if not self.config.USE_DIFFERENT_START_POS and not self.config.USE_SAME_ROTATION: # 180\n new_rotation = utils.quat_from_coeffs(rotation)\n new_angle_rotation = utils.quat_to_angle_axis(new_rotation)\n \n if new_angle_rotation[0] + (2*np.pi/num_agents)*agent_id > 2*np.pi: \n new_state.rotation = utils.quat_from_angle_axis(new_angle_rotation[0]+(2*np.pi/num_agents)*agent_id-2*np.pi, new_angle_rotation[1])\n else:\n new_state.rotation = utils.quat_from_angle_axis(new_angle_rotation[0]+(2*np.pi/num_agents)*agent_id, new_angle_rotation[1])\n \n if self.config.USE_RANDOM_ROTATION:\n new_rotation = utils.quat_from_coeffs(rotation)\n new_angle_rotation = utils.quat_to_angle_axis(new_rotation)\n\n if new_angle_rotation[0] + random.random()*2*np.pi > 2*np.pi: \n new_state.rotation = utils.quat_from_angle_axis(new_angle_rotation[0] + random.random()*2*np.pi - 2*np.pi, new_angle_rotation[1])\n else:\n new_state.rotation = utils.quat_from_angle_axis(new_angle_rotation[0] + random.random()*2*np.pi, new_angle_rotation[1])\n\n # NB: The agent state also contains the sensor states in _absolute_\n # coordinates. In order to set the agent's body to a specific\n # location and have the sensors follow, we must not provide any\n # state for the sensors. This will cause them to follow the agent's\n # body\n new_state.sensor_states = dict()\n agent.set_state(new_state, reset_sensors)\n\n if not self._check_agent_position(position, agent_id):\n agent.set_state(original_state, reset_sensors)\n return False\n\n return True\n\n def get_observations_at(\n self,\n position: List[float],\n rotation: List[float],\n keep_agent_at_new_pose: bool = False,\n ) -> Optional[Observations]:\n\n current_state = self.get_agent_state()\n\n success = self.set_agent_state(position, rotation, reset_sensors=False)\n if success:\n sim_obs = self._sim.get_sensor_observations()\n\n self._prev_sim_obs = sim_obs\n\n observations = self._sensor_suite.get_observations(sim_obs)\n if not keep_agent_at_new_pose:\n self.set_agent_state(\n current_state.position,\n current_state.rotation,\n reset_sensors=False,\n )\n return observations\n else:\n return None\n\n # TODO (maksymets): Remove check after simulator became stable\n def _check_agent_position(self, position, agent_id=0) -> bool:\n if not np.allclose(position, self.get_agent_state(agent_id).position):\n logger.info(\"Agent state diverges from configured start position.\")\n return False\n return True\n\n def distance_to_closest_obstacle(self, position, max_search_radius=2.0):\n return self._sim.pathfinder.distance_to_closest_obstacle(\n position, max_search_radius\n )\n\n def island_radius(self, position):\n return self._sim.pathfinder.island_radius(position)\n\n @property\n def previous_step_collided(self):\n r\"\"\"Whether or not the previous step resulted in a collision\n\n Returns:\n bool: True if the previous step resulted in a collision, false otherwise\n\n Warning:\n This feild is only updated when :meth:`step`, :meth:`reset`, or :meth:`get_observations_at` are\n called. It does not update when the agent is moved to a new loction. Furthermore, it\n will _always_ be false after :meth:`reset` or :meth:`get_observations_at` as neither of those\n result in an action (step) being taken.\n \"\"\"\n return self._prev_sim_obs.get(\"collided\", False)\n"
] |
[
[
"numpy.array",
"numpy.expand_dims",
"numpy.iinfo",
"numpy.clip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hyperfraise/pytorch-detect-to-track
|
[
"920aed6948687825d80a0eb2ab21268a5f74f9a8"
] |
[
"lib/model/utils/online_tubes.py"
] |
[
"import os\nimport glob\nimport shutil\nimport subprocess\nimport time\nfrom collections import deque\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nimport cv2\nfrom model.utils.blob import prep_im_for_blob, im_list_to_blob\nfrom model.utils.config import cfg\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_overlaps\n\nimport pdb\n\n\nclass VideoPostProcessor(object):\n def __init__(self, pred_boxes, scores, pred_trk_boxes, classes, video_id=\"\"):\n print(\"Starting post-processing on video id {}\".format(video_id))\n self.video_id = video_id\n self.pred_boxes = pred_boxes\n self.scores = scores\n self.pred_trk_boxes = pred_trk_boxes\n self.num_frame_pairs = pred_boxes.size(0)\n self.num_frames = self.num_frame_pairs + 1\n self.num_classes = len(classes)\n self.classes = classes\n self.class_agnostic = True\n self.jumpgap = 5\n self.alpha_l = 3.0\n self.all_paths = np.ndarray(shape=(self.num_classes,), dtype=np.object)\n\n def class_paths(self, path_score_thresh=0.0):\n start_paths = time.time()\n # Iterate over all det categories and generate paths for each class\n self.generate_paths()\n # Perform temporal labeling\n final_tubes = self.get_tubes()\n end_paths = time.time()\n print(\n \"Tube generation done for all classes. Took {} sec\".format(\n end_paths - start_paths\n )\n )\n keep = torch.nonzero(final_tubes[\"dpPathScore\"] > path_score_thresh).view(-1)\n self.path_total_score = final_tubes[\"path_total_score\"][keep]\n self.path_scores = final_tubes[\"path_scores\"][keep.cpu().numpy()]\n self.path_boxes = final_tubes[\"path_boxes\"][keep.cpu().numpy()]\n self.path_starts = final_tubes[\"starts\"][keep]\n self.path_ends = final_tubes[\"ends\"][keep]\n self.path_labels = final_tubes[\"label\"][keep]\n return final_tubes\n\n def dpEM_max(self, M):\n M = M.t() # classes X length of tubes[frames]\n r, c = M.size(0), M.size(1)\n D = torch.zeros(r, c + 1).cuda() # add extra column\n D[:, 1:] = M.clone()\n v = torch.arange(r).cuda()\n # phi stores all max indices in the forward pass\n phi = torch.zeros(r, c).cuda()\n # For each frame, get the max score for each class. The term alpha_l*(v!=i) will add\n # a penalty by subtracting alpha_l from the data term for all classes other than the ith class.\n # The vector (v!=i) will return a logical tensor consisting of r elements. The ith location\n # is 0, and all other locations are 1. This way, all classes other than the ith class\n # are multiplied by alpha_l. For each ith iteraction (over classes), we get the max value and\n # add it to the data term D[i,j]. This way, the max value for the ith class is stored in the\n # jth frame.\n for j in range(1, c + 1): # frame index\n for i in range(r): # class index\n # Get max det score for frame j-1, penalize classes != ith class\n dmax, tb = torch.max(\n D[:, j - 1] - self.alpha_l * (v != i).float(), dim=0, keepdim=True\n )\n # For ith class, sum scores across frames\n D[i, j] += dmax[0]\n # For ith class and j-1st frame, assign label with max score\n phi[i, j - 1] = tb[0]\n # Traceback from last frame\n D = D[:, 1:]\n q = c\n # predicted class in last frame of tube\n _, p = torch.max(D[:, -1], dim=0, keepdim=True)\n i = p[0] # index of max element in last frame of D\n j = q - 1 # frame indices\n p = deque([i + 1]) # add 1 to i since class index 0 is __background__ class\n q = deque([j]) # jth frame in tube, start at end of tube\n while j > 0: # quit when you hit the first frame in the tube\n tb = int(\n phi[i, j]\n ) # i:index of max element in last frame of D, j:last frame index\n p.appendleft(tb + 1)\n q.appendleft(j - 1)\n j -= 1\n i = tb\n\n return torch.FloatTensor(p).cuda(), torch.FloatTensor(q).cuda(), D\n\n def extract_action(self, _p, _q, _D, action):\n \"\"\"\n Extract frames in path where label=action\n \"\"\"\n inds = torch.nonzero(_p == action)\n if inds.numel() == 0:\n ts = torch.FloatTensor([]).cuda()\n te = torch.FloatTensor([]).cuda()\n scores = torch.FloatTensor([]).cuda()\n label = torch.FloatTensor([]).cuda()\n total_score = torch.FloatTensor([]).cuda()\n else:\n inds_diff = torch.cat(\n [inds, (inds[-1] + 1).view(-1, 1)], dim=0\n ) - torch.cat([(inds[0] - 2).view(-1, 1), inds], dim=0)\n inds_diff = inds_diff.view(-1)\n ts = torch.nonzero(inds_diff > 1)\n inds = inds.view(-1)\n if ts.size(0) > 1: # 2 starting points for label=action\n te = torch.cat(\n [ts.view(-1)[1:] - 1, torch.cuda.LongTensor([inds.size(0) - 1])]\n )\n else:\n te = inds.size(0) - 1\n te = torch.cuda.LongTensor([te])\n ts = torch.index_select(inds, 0, ts.view(-1))\n te = torch.index_select(inds, 0, te.view(-1))\n dt = te - ts\n q_s = torch.index_select(_q, 0, ts.view(-1)).long()\n q_e = torch.index_select(_q, 0, te.view(-1)).long()\n D_e = torch.index_select(_D, 1, q_e)[action]\n D_s = torch.index_select(_D, 1, q_s)[action]\n scores = ((D_e - D_s) / (dt.float() + 1e-6)).view(-1, 1)\n label = torch.cuda.FloatTensor(ts.size(0), 1).fill_(1) * action\n total_score = (\n torch.cuda.FloatTensor(ts.size(0), 1).fill_(1)\n * _D[int(_p[-1]), int(_q[-1])]\n / _p.size(0)\n )\n\n return ts, te, scores, label, total_score\n\n def get_tubes(self):\n \"\"\"\n Facade function for smoothing tubes.\n \"\"\"\n num_classes = self.num_classes\n counter = 0\n final_tubes = {\n \"starts\": [],\n \"ends\": [],\n \"ts\": [],\n \"video_id\": [],\n \"te\": [],\n \"dpActionScore\": [],\n \"label\": [],\n \"dpPathScore\": [],\n \"path_total_score\": [],\n \"path_boxes\": [],\n \"path_scores\": [],\n }\n # Perform temporal trimming\n for cls_ix in range(1, num_classes): # skip background class\n print(\n \"Performing temporal smoothing for class {}\".format(\n self.classes[cls_ix]\n )\n )\n # get paths for cls_ix\n class_paths = self.all_paths[cls_ix]\n if class_paths is None: # skip classes with no paths\n continue\n num_paths = len(self.all_paths[cls_ix][\"count\"]) # num paths for cls_ix\n for i_pth in range(num_paths):\n M = class_paths[\"all_scores\"][i_pth].clone()[\n :, 1:\n ] # softmax across classes (exclude bkg)\n pred_path, time, D = self.dpEM_max(M)\n Ts, Te, Scores, Label, DpPathScore = self.extract_action(\n pred_path, time, D, cls_ix\n )\n if Ts.numel() == 0:\n continue\n for k in range(Ts.numel()):\n final_tubes[\"starts\"].append(class_paths[\"start\"][i_pth])\n final_tubes[\"ends\"].append(class_paths[\"end\"][i_pth])\n final_tubes[\"ts\"].append(Ts[k]) # where tube starts for this class\n final_tubes[\"video_id\"].append(self.video_id)\n final_tubes[\"te\"].append(Te[k]) # where tube end for this class\n final_tubes[\"dpActionScore\"].append(Scores[k])\n final_tubes[\"label\"].append(Label[k])\n final_tubes[\"dpPathScore\"].append(DpPathScore[k])\n final_tubes[\"path_total_score\"].append(\n class_paths[\"scores\"][i_pth].mean()\n )\n final_tubes[\"path_boxes\"].append(class_paths[\"boxes\"][i_pth])\n final_tubes[\"path_scores\"].append(class_paths[\"scores\"][i_pth])\n final_tubes[\"starts\"] = torch.cat(final_tubes[\"starts\"], dim=0)\n final_tubes[\"ends\"] = torch.cat(final_tubes[\"ends\"], dim=0)\n final_tubes[\"ts\"] = torch.cuda.LongTensor(final_tubes[\"ts\"])\n final_tubes[\"te\"] = torch.cuda.LongTensor(final_tubes[\"te\"])\n final_tubes[\"dpActionScore\"] = torch.cat(final_tubes[\"dpActionScore\"], dim=0)\n final_tubes[\"label\"] = torch.cat(final_tubes[\"label\"], dim=0)\n final_tubes[\"dpPathScore\"] = torch.cat(final_tubes[\"dpPathScore\"], dim=0)\n final_tubes[\"path_total_score\"] = torch.cuda.FloatTensor(\n final_tubes[\"path_total_score\"]\n )\n final_tubes[\"path_boxes\"] = np.array(final_tubes[\"path_boxes\"], dtype=np.object)\n final_tubes[\"path_scores\"] = np.array(\n final_tubes[\"path_scores\"], dtype=np.object\n )\n return final_tubes\n\n def generate_paths(self):\n for cls_ix in range(1, self.num_classes): # skip background\n all_scores = np.ndarray(shape=(self.num_frame_pairs,), dtype=np.object)\n cls_boxes = np.ndarray(shape=(self.num_frame_pairs,), dtype=np.object)\n cls_scores = np.ndarray(shape=(self.num_frame_pairs,), dtype=np.object)\n print(\"Class: {}\".format(self.classes[cls_ix]))\n self._curr_class = self.classes[cls_ix]\n for pair_ix in range(self.num_frame_pairs):\n boxes_t0 = self.pred_boxes[pair_ix][0].clone()\n scores_t0 = self.scores[pair_ix][0][:, cls_ix].clone()\n pick = torch.nonzero(scores_t0 > 0.0).view(-1)\n # If no good scores for this frame/class, go to next frame\n assert pick.numel() > 0, \"No detections found for this class.\"\n if pick.numel() == 0:\n all_scores[pair_ix] = torch.cuda.FloatTensor(0) # empty tensor\n cls_boxes[pair_ix] = torch.cuda.FloatTensor(0) # empty tensor\n cls_scores[pair_ix] = torch.cuda.FloatTensor(0) # empty tensor\n continue\n # Get scores that passed filter and sort highest-->lowest\n scores_t0 = scores_t0[pick]\n boxes_t0 = boxes_t0[pick, :]\n all_scores_t0 = self.scores[pair_ix][0][pick, :]\n _, pick = torch.sort(scores_t0, descending=True)\n # Take at most 50 per frame per class\n to_pick = min(50, pick.numel())\n pick = pick[:to_pick]\n scores_t0 = scores_t0[pick]\n boxes_t0 = boxes_t0[pick, :]\n all_scores_t0 = all_scores_t0[pick, :]\n cls_dets_t0 = torch.cat(\n [boxes_t0, scores_t0.contiguous().view(-1, 1)], dim=1\n )\n pick = nms(cls_dets_t0, 0.3)\n # TODO check pick is sorted in descending order\n # Take top 10 dets after nms\n pick = pick.view(-1).long()\n pick = pick[: min(10, pick.numel())]\n\n cls_boxes[pair_ix] = boxes_t0[pick, :].clone()\n cls_scores[pair_ix] = scores_t0[pick].clone()\n all_scores[pair_ix] = all_scores_t0[pick, :].clone()\n\n paths = self.incremental_linking(cls_boxes, cls_scores, all_scores)\n self.all_paths[cls_ix] = paths\n\n def get_path_count(self, live_paths_boxes):\n return len(live_paths_boxes)\n\n def bbox_overlaps(self, anchors, gt_boxes):\n \"\"\"\n anchors: (N, 4) ndarray of float\n gt_boxes: (K, 4) ndarray of float\n\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n N = anchors.size(0)\n K = gt_boxes.size(0)\n\n gt_boxes_area = (\n (gt_boxes[:, 2] - gt_boxes[:, 0] + 1)\n * (gt_boxes[:, 3] - gt_boxes[:, 1] + 1)\n ).view(1, K)\n\n anchors_area = (\n (anchors[:, 2] - anchors[:, 0] + 1) * (anchors[:, 3] - anchors[:, 1] + 1)\n ).view(N, 1)\n\n boxes = anchors.view(N, 1, 4).expand(N, K, 4)\n query_boxes = gt_boxes.view(1, K, 4).expand(N, K, 4)\n\n iw = (\n torch.min(boxes[:, :, 2], query_boxes[:, :, 2])\n - torch.max(boxes[:, :, 0], query_boxes[:, :, 0])\n + 1\n )\n iw[iw < 0] = 0\n\n ih = (\n torch.min(boxes[:, :, 3], query_boxes[:, :, 3])\n - torch.max(boxes[:, :, 1], query_boxes[:, :, 1])\n + 1\n )\n ih[ih < 0] = 0\n\n ua = anchors_area + gt_boxes_area - (iw * ih)\n overlaps = iw * ih / ua\n\n return overlaps\n\n def fill_gaps(\n self,\n paths_boxes,\n paths_scores,\n paths_all_score,\n paths_path_score,\n paths_found_at,\n paths_count,\n paths_last_found,\n ):\n \"\"\"\n paths: nd.objectarray of torch.Tensors\n gap: threshold for stale tube (in units of frames)\n \"\"\"\n gap = self.jumpgap\n gap_filled_paths_start = []\n gap_filled_paths_end = []\n gap_filled_paths_path_score = []\n gap_filled_paths_found_at = []\n gap_filled_paths_count = []\n gap_filled_paths_last_found = []\n gap_filled_paths_boxes = []\n gap_filled_paths_scores = []\n gap_filled_paths_all_scores = []\n\n g_count = 0\n path_count = self.get_path_count(paths_boxes)\n for lp in range(path_count):\n if (\n paths_found_at[lp].size(0) > gap\n ): # do we have at least @gap boxes in tube\n gap_filled_paths_start.append([])\n gap_filled_paths_end.append([])\n gap_filled_paths_path_score.append([])\n gap_filled_paths_found_at.append([])\n gap_filled_paths_count.append([])\n gap_filled_paths_last_found.append([])\n gap_filled_paths_boxes.append([])\n gap_filled_paths_scores.append([])\n gap_filled_paths_all_scores.append([])\n\n gap_filled_paths_start[g_count].append(\n paths_found_at[lp][0]\n ) # start frame of tube\n gap_filled_paths_end[g_count].append(\n paths_found_at[lp][-1]\n ) # end frame of tube\n gap_filled_paths_path_score[g_count].append(\n paths_path_score[lp].clone()\n ) # path score\n gap_filled_paths_found_at[g_count].append(\n paths_found_at[lp].clone()\n ) # trail of frames\n gap_filled_paths_count[g_count].append(paths_count[lp]) # boxes in tube\n gap_filled_paths_last_found[g_count].append(\n paths_last_found[lp]\n ) # frames where last found\n count = 0\n i = 0 # index of box in tube\n while i < paths_scores[lp].size(0):\n diff_found = (\n paths_found_at[lp][i] - paths_found_at[lp][max(0, i - 1)]\n )[0]\n if count == 0 or diff_found == 1:\n gap_filled_paths_boxes[g_count].append(\n paths_boxes[lp][i, :].clone().unsqueeze(0)\n )\n gap_filled_paths_scores[g_count].append(\n paths_scores[lp][i].clone().unsqueeze(0)\n )\n gap_filled_paths_all_scores[g_count].append(\n paths_all_score[lp][i, :].clone().unsqueeze(0)\n )\n i += 1\n count += 1\n else: # boxes in tube are > 1 frame apart, so fill the gap with the ith box\n for d in range(diff_found):\n gap_filled_paths_boxes[g_count].append(\n paths_boxes[lp][i, :].clone().unsqueeze(0)\n )\n gap_filled_paths_scores[g_count].append(\n paths_scores[lp][i].clone().unsqueeze(0)\n )\n gap_filled_paths_all_scores[g_count].append(\n paths_all_score[lp][i, :].clone().unsqueeze(0)\n )\n count += 1\n i += 1\n g_count += 1\n\n return (\n gap_filled_paths_boxes,\n gap_filled_paths_scores,\n gap_filled_paths_all_scores,\n gap_filled_paths_path_score,\n gap_filled_paths_found_at,\n gap_filled_paths_count,\n gap_filled_paths_last_found,\n gap_filled_paths_start,\n gap_filled_paths_end,\n )\n\n def incremental_linking(self, frames_boxes, frames_scores, frames_all_scores):\n # Online path building\n # dead-path count\n dp_count = 0\n for t0 in range(self.num_frame_pairs):\n # boxes detected in frame t0\n if frames_boxes[t0].numel() == 0:\n num_boxes = 0\n else:\n num_boxes = frames_boxes[t0].size(0)\n assert (\n num_boxes > 0\n ), \"Must have boxes for class to build tubes. Check your filter threshold.\"\n if t0 == 0: # If on first frame pair, initialize with all detections\n live_paths_boxes = np.ndarray(shape=(num_boxes,), dtype=np.object)\n live_paths_scores = np.ndarray(shape=(num_boxes,), dtype=np.object)\n live_paths_all_scores = np.ndarray(shape=(num_boxes,), dtype=np.object)\n live_paths_path_score = np.ndarray(shape=(num_boxes,), dtype=np.object)\n live_paths_found_at = np.ndarray(shape=(num_boxes,), dtype=np.object)\n live_paths_count = np.ndarray(shape=(num_boxes,), dtype=np.object)\n live_paths_last_found = np.ndarray(shape=(num_boxes,), dtype=np.object)\n\n for b in range(num_boxes):\n live_paths_boxes[b] = frames_boxes[t0][b, :].clone().unsqueeze(0)\n live_paths_scores[b] = (\n frames_scores[t0][torch.cuda.LongTensor([b])]\n .clone()\n .unsqueeze(0)\n )\n live_paths_all_scores[b] = (\n frames_all_scores[t0][b].clone().unsqueeze(0)\n )\n live_paths_path_score[b] = (\n frames_scores[t0][torch.cuda.LongTensor([b])]\n .clone()\n .unsqueeze(0)\n )\n live_paths_found_at[b] = torch.cuda.LongTensor([0]).unsqueeze(0)\n live_paths_count[b] = 1\n live_paths_last_found[b] = 0 # last time found from current frame\n else: # frames after the first\n lp_count = self.get_path_count(live_paths_boxes) # get live-path count\n print(\n \"Live paths in frame {} for class {}: {}\".format(\n t0, self._curr_class, lp_count\n )\n )\n # last box in each live path\n last_boxes_lps = torch.cat(\n [box[-1].unsqueeze(0) for box in live_paths_boxes], dim=0\n )\n # iou between boxes in last frame of tube and dets in current frame\n iou = self.bbox_overlaps(last_boxes_lps, frames_boxes[t0].clone())\n # Take scores in current frame dets that have iou above 0.1\n edge_scores = frames_scores[t0].clone().expand(lp_count, num_boxes)\n edge_scores = edge_scores * (iou > 0.1).float()\n # edge_scores = torch.zeros(lp_count,num_boxes).cuda()\n dead_count = 0\n covered_boxes = torch.zeros(1, num_boxes).cuda()\n path_order_score = torch.zeros(1, lp_count).cuda()\n for lp in range(lp_count):\n # Is the path live (has it been found in last jumpgap frames)?\n if live_paths_last_found[lp] < self.jumpgap:\n # scores of boxes in current frame t0 that overlap with lpth path\n box_to_lp_score = edge_scores[lp, :].clone()\n if (\n box_to_lp_score.sum() > 0\n ): # check if there's at least one box match\n # get box with max score\n m_score, max_ind = box_to_lp_score.max(0)\n # Add box/score to live path\n live_paths_count[lp] += 1 # increment boxes in live path\n live_paths_boxes[lp] = torch.cat(\n [live_paths_boxes[lp], frames_boxes[t0][max_ind, :]],\n dim=0,\n )\n live_paths_scores[lp] = torch.cat(\n [\n live_paths_scores[lp],\n frames_scores[t0][max_ind].view(1, -1),\n ],\n dim=0,\n )\n live_paths_all_scores[lp] = torch.cat(\n [\n live_paths_all_scores[lp],\n frames_all_scores[t0][max_ind],\n ],\n dim=0,\n )\n live_paths_path_score[\n lp\n ] += m_score # running sum of box scores in this live path\n # trail of frames boxes were found in\n live_paths_found_at[lp] = torch.cat(\n [\n live_paths_found_at[lp],\n torch.cuda.LongTensor([t0]).view(1, -1),\n ],\n dim=0,\n )\n live_paths_last_found[lp] = 0 # refresh tube\n edge_scores[\n :, max_ind\n ] = 0.0 # remove box from available boxes\n covered_boxes[0][\n max_ind\n ] = 1.0 # remove box from available boxes\n else: # don't add det boxes to live path, but keep track of how many frames have passed\n live_paths_last_found[lp] += 1 # tube is more stale\n\n scores, _ = torch.sort(live_paths_scores[lp])\n num_sc = scores.numel()\n path_order_score[:, lp] = scores[\n max(0, num_sc - self.jumpgap) :\n ].mean()\n else: # path is dead\n dead_count += 1\n\n # Sort path based on box scores and terminate dead paths\n _, path_inds = torch.sort(path_order_score, descending=True)\n path_inds = path_inds.view(-1)\n sorted_live_paths_boxes = []\n sorted_live_paths_scores = []\n sorted_live_paths_all_scores = []\n sorted_live_paths_path_score = []\n sorted_live_paths_found_at = []\n sorted_live_paths_count = []\n sorted_live_paths_last_found = []\n\n dead_paths_boxes = []\n dead_paths_scores = []\n dead_paths_all_scores = []\n dead_paths_path_score = []\n dead_paths_found_at = []\n dead_paths_count = []\n dead_paths_last_found = []\n\n lpc = 0\n for lp in range(lp_count):\n olp = path_inds[lp]\n if live_paths_last_found[lp] < self.jumpgap:\n sorted_live_paths_boxes.append(live_paths_boxes[olp].clone())\n sorted_live_paths_scores.append(live_paths_scores[olp].clone())\n sorted_live_paths_all_scores.append(\n live_paths_all_scores[olp].clone()\n )\n sorted_live_paths_path_score.append(\n live_paths_path_score[olp].clone()\n )\n sorted_live_paths_found_at.append(\n live_paths_found_at[olp].clone()\n )\n sorted_live_paths_count.append(live_paths_count[olp])\n sorted_live_paths_last_found.append(live_paths_last_found[olp])\n\n lpc += 1\n else:\n dead_paths_boxes.append(live_paths_boxes[olp].clone())\n dead_paths_scores.append(live_paths_scores[olp].clone())\n dead_paths_all_scores.append(live_paths_all_scores[olp].clone())\n dead_paths_path_score.append(live_paths_path_score[olp].clone())\n dead_paths_found_at.append(live_paths_found_at[olp].clone())\n dead_paths_count.append(live_paths_count[olp])\n dead_paths_last_found.append(live_paths_last_found[olp])\n dp_count += 1\n lp_count = self.get_path_count(\n sorted_live_paths_scores\n ) # update live-path count\n\n # Start new paths using unassigned boxes\n if covered_boxes.sum() < num_boxes:\n for b in range(num_boxes):\n if (\n covered_boxes[0][b] == 0\n ): # box is not covered and is available\n lp_count += 1 # new live paths\n sorted_live_paths_boxes.append(\n frames_boxes[t0][b, :].clone().unsqueeze(0)\n )\n sorted_live_paths_scores.append(\n frames_scores[t0][torch.cuda.LongTensor([b])]\n .clone()\n .unsqueeze(0)\n )\n sorted_live_paths_all_scores.append(\n frames_all_scores[t0][b].clone().unsqueeze(0)\n )\n sorted_live_paths_path_score.append(\n frames_scores[t0][torch.cuda.LongTensor([b])]\n .clone()\n .unsqueeze(0)\n )\n sorted_live_paths_found_at.append(\n torch.cuda.LongTensor([t0]).unsqueeze(0)\n )\n sorted_live_paths_count.append(1)\n sorted_live_paths_last_found.append(0)\n\n # live paths/dead paths for next time step\n live_paths_boxes = np.array(sorted_live_paths_boxes, dtype=np.object)\n live_paths_scores = np.array(sorted_live_paths_scores, dtype=np.object)\n live_paths_all_scores = np.array(sorted_live_paths_all_scores, dtype=np.object)\n live_paths_path_score = np.array(sorted_live_paths_path_score, dtype=np.object)\n live_paths_found_at = np.array(sorted_live_paths_found_at, dtype=np.object)\n live_paths_count = np.array(sorted_live_paths_count, dtype=np.object)\n live_paths_last_found = np.array(sorted_live_paths_last_found, dtype=np.object)\n\n dead_paths_boxes = np.array(dead_paths_boxes, dtype=np.object)\n dead_paths_scores = np.array(dead_paths_scores, dtype=np.object)\n dead_paths_all_scores = np.array(dead_paths_all_scores, dtype=np.object)\n dead_paths_path_score = np.array(dead_paths_path_score, dtype=np.object)\n dead_paths_found_at = np.array(dead_paths_found_at, dtype=np.object)\n dead_paths_count = np.array(dead_paths_count, dtype=np.object)\n dead_paths_last_found = np.array(dead_paths_last_found, dtype=np.object)\n\n live_paths = self.fill_gaps(\n live_paths_boxes,\n live_paths_scores,\n live_paths_all_scores,\n live_paths_path_score,\n live_paths_found_at,\n live_paths_count,\n live_paths_last_found,\n )\n\n live_paths_boxes = live_paths[0]\n live_paths_scores = live_paths[1]\n live_paths_all_scores = live_paths[2]\n live_paths_path_score = live_paths[3]\n live_paths_found_at = live_paths[4]\n live_paths_count = live_paths[5]\n live_paths_last_found = live_paths[6]\n live_paths_start = live_paths[7]\n live_paths_end = live_paths[8]\n\n # paths that died throughout the video, built from frame_start to frame_end\n dead_paths = self.fill_gaps(\n dead_paths_boxes,\n dead_paths_scores,\n dead_paths_all_scores,\n dead_paths_path_score,\n dead_paths_found_at,\n dead_paths_count,\n dead_paths_last_found,\n )\n\n dead_paths_boxes = dead_paths[0]\n dead_paths_scores = dead_paths[1]\n dead_paths_all_scores = dead_paths[2]\n dead_paths_path_score = dead_paths[3]\n dead_paths_found_at = dead_paths[4]\n dead_paths_count = dead_paths[5]\n dead_paths_last_found = dead_paths[6]\n dead_paths_start = dead_paths[7]\n dead_paths_end = dead_paths[8]\n\n # extend live paths with dead paths\n live_paths_start.extend(dead_paths_start)\n live_paths_end.extend(dead_paths_end)\n live_paths_boxes.extend(dead_paths_boxes)\n live_paths_scores.extend(dead_paths_scores)\n live_paths_all_scores.extend(dead_paths_all_scores)\n live_paths_path_score.extend(dead_paths_path_score)\n live_paths_found_at.extend(dead_paths_found_at)\n live_paths_count.extend(dead_paths_count)\n live_paths_last_found.extend(dead_paths_last_found)\n\n # sort paths\n\n lp_count = self.get_path_count(live_paths_scores)\n path_order_score = torch.zeros(lp_count).cuda()\n for lp in range(lp_count):\n live_paths_start[lp] = torch.cat(live_paths_start[lp], dim=0)\n live_paths_end[lp] = torch.cat(live_paths_end[lp], dim=0)\n live_paths_boxes[lp] = torch.cat(live_paths_boxes[lp], dim=0)\n live_paths_scores[lp] = torch.cat(live_paths_scores[lp], dim=0)\n live_paths_all_scores[lp] = torch.cat(live_paths_all_scores[lp], dim=0)\n live_paths_path_score[lp] = torch.cat(live_paths_path_score[lp], dim=0)\n live_paths_found_at[lp] = torch.cat(live_paths_found_at[lp], dim=0)\n # live_paths_count[lp] = torch.cat(live_paths_count[lp], dim=0)\n # live_paths_last_found[lp] = torch.cat(live_paths_last_found[lp], dim=0)\n\n scores, _ = torch.sort(live_paths_scores[lp].view(-1), descending=True)\n num_sc = scores.numel()\n path_order_score[lp] = scores[: min(20, num_sc)].mean()\n _, inds = torch.sort(path_order_score, descending=True)\n\n sorted_live_paths = {\n \"start\": [],\n \"end\": [],\n \"boxes\": [],\n \"scores\": [],\n \"all_scores\": [],\n \"path_score\": [],\n \"found_at\": [],\n \"count\": [],\n \"last_found\": [],\n }\n for lp in range(lp_count):\n olp = inds[lp]\n sorted_live_paths[\"start\"].append(live_paths_start[olp])\n sorted_live_paths[\"end\"].append(live_paths_end[olp])\n sorted_live_paths[\"boxes\"].append(live_paths_boxes[olp])\n sorted_live_paths[\"scores\"].append(live_paths_scores[olp])\n sorted_live_paths[\"all_scores\"].append(live_paths_all_scores[olp])\n sorted_live_paths[\"path_score\"].append(live_paths_path_score[olp])\n sorted_live_paths[\"found_at\"].append(live_paths_found_at[olp])\n sorted_live_paths[\"count\"].append(live_paths_count[olp])\n sorted_live_paths[\"last_found\"].append(live_paths_last_found[olp])\n\n return sorted_live_paths\n\n\n#############################################\n\n\nclass VideoDataset(data.Dataset):\n def __init__(self, video_list, det_classes):\n self.det_classes = det_classes\n self.num_classes = len(det_classes)\n self.n_videos = len(video_list)\n self.video_paths = video_list\n\n # Keep at most max_per_image dets per class per image before NMS\n self.max_per_image = 400\n # Number of legs in the siamese network\n self.n_legs = 2\n\n def __getitem__(self, idx):\n self._video_idx = idx\n self._video_blob = []\n\n self.video_name = self.video_paths[idx]\n # Initialize frame index of the current video\n self._frame_idx = 0\n self._extract_frames(self.video_paths[idx])\n self._create_video_blob()\n return self._video_blob\n\n def __len__(self):\n return self.n_videos\n\n def _create_video_blob(self):\n for i_frame in range(self._n_frames - 1):\n _sample_blob = {}\n print(\n \"Video name: {} {}/{}\".format(\n self.video_name, i_frame, self._n_frames - 1\n )\n )\n frame_data_t0 = cv2.imread(self._frame_paths[i_frame])\n frame_data_t1 = cv2.imread(self._frame_paths[i_frame + 1])\n frame_blob_t0 = self._get_image_blob(frame_data_t0, i_frame)\n frame_blob_t1 = self._get_image_blob(frame_data_t1, i_frame + 1)\n pt_frame_tensor_t0 = torch.from_numpy(frame_blob_t0[\"data\"]).cuda()\n pt_frame_tensor_t1 = torch.from_numpy(frame_blob_t1[\"data\"]).cuda()\n # Permute to (B,C,H,W)\n pt_frame_tensor_t0 = pt_frame_tensor_t0.permute(0, 3, 1, 2).contiguous()\n pt_frame_tensor_t1 = pt_frame_tensor_t1.permute(0, 3, 1, 2).contiguous()\n pt_info_tensor_t0 = torch.from_numpy(frame_blob_t0[\"im_info\"]).cuda()\n pt_info_tensor_t1 = torch.from_numpy(frame_blob_t1[\"im_info\"]).cuda()\n pt_frame_number_tensor_t0 = (\n torch.from_numpy(frame_blob_t0[\"frame_number\"]).cuda().unsqueeze(0)\n )\n pt_frame_number_tensor_t1 = (\n torch.from_numpy(frame_blob_t1[\"frame_number\"]).cuda().unsqueeze(0)\n )\n # _sample_blob['data'] = [frame_blob_t0['data'],frame_blob_t1['data']]\n _sample_blob[\"data\"] = torch.cat(\n [pt_frame_tensor_t0, pt_frame_tensor_t1], dim=0\n )\n # _sample_blob['im_info'] = [frame_blob_t0['im_info'],frame_blob_t1['im_info']]\n _sample_blob[\"im_info\"] = torch.cat(\n [pt_info_tensor_t0, pt_info_tensor_t1], dim=0\n )\n # _sample_blob['frame_number'] = [frame_blob_t0['frame_number'],frame_blob_t1['frame_number']]\n _sample_blob[\"frame_number\"] = torch.cat(\n [pt_frame_number_tensor_t0, pt_frame_number_tensor_t1], dim=0\n )\n self._video_blob.append(_sample_blob)\n\n def _extract_frames(self, v_path):\n \"\"\"Extract all fromes from @v_path\n\n :param v_path: full path to video\n :return: list of full paths to extracted video frames\n \"\"\"\n print(\"Extracting frames from {}\".format(v_path))\n # Store frames in tmp dir by default\n tmp_dir = os.path.join(\"/tmp\", os.path.basename(v_path))\n # If path exists, delete it\n if os.path.isdir(tmp_dir):\n shutil.rmtree(tmp_dir)\n os.mkdir(tmp_dir)\n # create directory to dump output to\n save_dir = tmp_dir.replace(\".mp4\", \"\") + \"_processed\"\n # clear contents of output directory before saving\n if os.path.isdir(save_dir):\n shutil.rmtree(save_dir)\n print(\"Saving to {}\".format(save_dir))\n\n self._input_dir = tmp_dir\n self._output_dir = save_dir\n # TODO Make fps configurable at command line\n cmd = \"ffmpeg -i %s -vf fps=10 %s\" % (v_path, os.path.join(tmp_dir, \"%09d.png\"))\n # execute ffmpeg cmd\n subprocess.call(cmd, shell=True)\n # set frame paths of the current video\n self._frame_paths = sorted(glob.glob(\"%s/*.png\" % tmp_dir))\n self._n_frames = len(self._frame_paths)\n self._max_per_set = (\n 160 * self._n_frames\n ) # average 160 dets per class per frame before nms\n print(\"Found {} frames\".format(self._n_frames))\n return\n\n def _get_image_blob(self, im, frame_id):\n \"\"\"Convert image into network input.\n :param im: BGR nd.array\n :param frame_id: frame number in the given video\n :return image (frame) blob\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(\n im_orig,\n None,\n None,\n fx=im_scale,\n fy=im_scale,\n interpolation=cv2.INTER_LINEAR,\n )\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n blob = im_list_to_blob(processed_ims)\n scales = np.array(im_scale_factors)\n\n blobs = {\"data\": blob}\n blobs[\"im_info\"] = np.array(\n [[blob.shape[1], blob.shape[2], scales[0]]], dtype=np.float32\n )\n blobs[\"frame_number\"] = np.array([[frame_id]])\n\n return blobs\n"
] |
[
[
"torch.max",
"torch.cat",
"numpy.min",
"torch.zeros",
"torch.min",
"numpy.ndarray",
"torch.cuda.LongTensor",
"torch.from_numpy",
"torch.cuda.FloatTensor",
"numpy.max",
"numpy.round",
"torch.FloatTensor",
"torch.sort",
"torch.nonzero",
"torch.arange",
"numpy.array",
"torch.index_select"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alirezajahani60/flee-release
|
[
"33646c2224a7ef3677f82c11bf7525083e9e649c"
] |
[
"burundi.py"
] |
[
"from flee import flee\nfrom datamanager import handle_refugee_data\nfrom datamanager import DataTable\nimport numpy as np\nimport outputanalysis.analysis as a\nimport sys\n\n\"\"\"\nGeneration 1 code. Incorporates only distance, travel always takes one day.\n\"\"\"\n\n#Burundi Simulation\n\n\ndef date_to_sim_days(date):\n return DataTable.subtract_dates(date,\"2015-05-01\")\n\nif __name__ == \"__main__\":\n\n if len(sys.argv)>1:\n if (sys.argv[1]).isnumeric():\n end_time = int(sys.argv[1])\n last_physical_day = int(sys.argv[1])\n else:\n end_time = 396\n last_physical_day = 396\n duration = flee.SimulationSettings.SimulationSettings.ReadFromCSV(sys.argv[1])\n if duration>0:\n end_time = duration\n last_physical_day = end_time\n else:\n end_time = 396\n last_physical_day = 396\n\n e = flee.Ecosystem()\n\n locations = []\n\n #Burundi\n locations.append(e.addLocation(\"Bujumbura\", movechance=\"conflict\", pop=497166))\n locations.append(e.addLocation(\"Bubanza\", movechance=\"default\"))\n locations.append(e.addLocation(\"Bukinanyana\", movechance=\"default\", pop=75750))\n locations.append(e.addLocation(\"Cibitoke\", movechance=\"default\", pop=460435))\n locations.append(e.addLocation(\"Isale\", movechance=\"default\"))\n\n locations.append(e.addLocation(\"Muramvya\", movechance=\"default\"))\n locations.append(e.addLocation(\"Kayanza\", movechance=\"default\"))\n locations.append(e.addLocation(\"Kabarore\", movechance=\"default\", pop=62303)) #This resides in Kayanza province in Burundi. Not to be confused with Kabarore, Rwanda.\n locations.append(e.addLocation(\"Mwaro\", movechance=\"default\", pop=273143))\n locations.append(e.addLocation(\"Rumonge\", movechance=\"default\"))\n\n locations.append(e.addLocation(\"Burambi\", movechance=\"default\", pop=57167))\n locations.append(e.addLocation(\"Bururi\", movechance=\"default\"))\n locations.append(e.addLocation(\"Rutana\", movechance=\"default\"))\n locations.append(e.addLocation(\"Makamba\", movechance=\"default\"))\n locations.append(e.addLocation(\"Gitega\", movechance=\"default\"))\n\n locations.append(e.addLocation(\"Karuzi\", movechance=\"default\"))\n locations.append(e.addLocation(\"Ruyigi\", movechance=\"default\"))\n locations.append(e.addLocation(\"Gisuru\", movechance=\"default\", pop=99461))\n locations.append(e.addLocation(\"Cankuzo\", movechance=\"default\"))\n locations.append(e.addLocation(\"Muyinga\", movechance=\"default\"))\n\n locations.append(e.addLocation(\"Kirundo\", movechance=\"default\"))\n locations.append(e.addLocation(\"Ngozi\", movechance=\"default\"))\n locations.append(e.addLocation(\"Gashoho\", movechance=\"default\"))\n locations.append(e.addLocation(\"Gitega-Ruyigi\", movechance=\"default\"))\n locations.append(e.addLocation(\"Makebuko\", movechance=\"default\"))\n\n locations.append(e.addLocation(\"Commune of Mabanda\", movechance=\"default\"))\n\n #Rwanda, Tanzania, Uganda and DRCongo camps\n locations.append(e.addLocation(\"Mahama\", movechance=\"camp\", capacity=49451, foreign=True))\n locations.append(e.addLocation(\"Nduta\", movechance=\"default\", capacity=55320, foreign=True)) # Nduta open on 2015-08-10\n locations.append(e.addLocation(\"Kagunga\", movechance=1/21.0, foreign=True))\n locations.append(e.addLocation(\"Nyarugusu\", movechance=\"camp\", capacity=100925, foreign=True))\n locations.append(e.addLocation(\"Nakivale\", movechance=\"camp\", capacity=18734, foreign=True))\n locations.append(e.addLocation(\"Lusenda\", movechance=\"default\", capacity=17210, foreign=True))\n\n #Within Burundi\n e.linkUp(\"Bujumbura\",\"Bubanza\",\"48.0\")\n e.linkUp(\"Bubanza\",\"Bukinanyana\",\"74.0\")\n e.linkUp(\"Bujumbura\",\"Cibitoke\",\"63.0\")\n e.linkUp(\"Cibitoke\",\"Bukinanyana\",\"49.0\")\n e.linkUp(\"Bujumbura\",\"Muramvya\",\"58.0\")\n e.linkUp(\"Muramvya\",\"Gitega\",\"44.0\")\n e.linkUp(\"Gitega\",\"Karuzi\",\"54.0\")\n e.linkUp(\"Gitega\",\"Ruyigi\",\"55.0\")\n e.linkUp(\"Ruyigi\",\"Karuzi\",\"43.0\")\n e.linkUp(\"Karuzi\",\"Muyinga\",\"42.0\")\n e.linkUp(\"Bujumbura\",\"Kayanza\",\"95.0\")\n e.linkUp(\"Kayanza\",\"Ngozi\",\"31.0\") ##\n e.linkUp(\"Ngozi\",\"Gashoho\",\"41.0\") ##\n e.linkUp(\"Kayanza\",\"Kabarore\",\"18.0\")\n e.linkUp(\"Gashoho\",\"Kirundo\",\"42.0\")\n e.linkUp(\"Gashoho\",\"Muyinga\",\"34.0\")\n e.linkUp(\"Bujumbura\",\"Mwaro\",\"67.0\")\n e.linkUp(\"Mwaro\",\"Gitega\",\"46.0\")\n e.linkUp(\"Bujumbura\",\"Rumonge\",\"75.0\")\n e.linkUp(\"Rumonge\",\"Bururi\",\"31.0\")\n e.linkUp(\"Rumonge\",\"Burambi\",\"22.0\")\n e.linkUp(\"Rumonge\",\"Commune of Mabanda\",\"73.0\")\n e.linkUp(\"Commune of Mabanda\",\"Makamba\",\"18.0\") # ??\n e.linkUp(\"Bururi\",\"Rutana\",\"65.0\")\n e.linkUp(\"Makamba\",\"Rutana\",\"50.0\") # ??\n e.linkUp(\"Rutana\",\"Makebuko\",\"46.0\") # ??\n e.linkUp(\"Makebuko\",\"Gitega\",\"24.0\") # ??\n e.linkUp(\"Makebuko\",\"Ruyigi\",\"40.0\")\n e.linkUp(\"Ruyigi\",\"Cankuzo\",\"51.0\")\n e.linkUp(\"Ruyigi\",\"Gisuru\",\"31.0\")\n e.linkUp(\"Cankuzo\",\"Muyinga\",\"63.0\")\n\n #Camps, starting at index locations[26] (at time of writing).\n e.linkUp(\"Muyinga\",\"Mahama\",\"135.0\")\n e.linkUp(\"Kirundo\",\"Mahama\",\"183.0\") #Shorter route than via Gashoho and Muyinga. Goes through Bugesera, where a transit centre is located according to UNHCR reports.\n e.linkUp(\"Gisuru\",\"Nduta\",\"60.0\")\n e.linkUp(\"Commune of Mabanda\",\"Kagunga\",\"36.0\")\n e.linkUp(\"Commune of Mabanda\",\"Nyarugusu\",\"71.0\") #Estimated distance, as exact location of Nyarugusu is uncertain.\n\n e.linkUp(\"Kagunga\",\"Nyarugusu\",\"91.0\", forced_redirection=True) #From Kagunga to Kigoma by ship (Kagunga=Kigoma)\n e.linkUp(\"Kirundo\",\"Nakivale\",\"318.0\")\n e.linkUp(\"Kayanza\",\"Nakivale\",\"413.0\")\n\n e.linkUp(\"Nduta\",\"Nyarugusu\",\"150.0\", forced_redirection=True) #distance needs to be checked.\n\n d = handle_refugee_data.RefugeeTable(csvformat=\"generic\", data_directory=\"source_data/burundi2015\", start_date=\"2015-05-01\")\n\n # Correcting for overestimations due to inaccurate level 1 registrations in five of the camps.\n # These errors led to a perceived large drop in refugee population in all of these camps.\n # We correct by linearly scaling the values down to make the last level 1 registration match the first level 2 registration value.\n # To our knowledge, all level 2 registration procedures were put in place by the end of 2016.\n d.correctLevel1Registrations(\"Mahama\",\"2015-10-04\")\n d.correctLevel1Registrations(\"Nduta\",\"2016-04-06\")\n d.correctLevel1Registrations(\"Nyarugusu\",\"2015-11-10\")\n d.correctLevel1Registrations(\"Nakivale\",\"2015-08-18\")\n d.correctLevel1Registrations(\"Lusenda\",\"2015-09-30\")\n\n locations[26].capacity = d.getMaxFromData(\"Mahama\", last_physical_day)\n locations[27].capacity = d.getMaxFromData(\"Nduta\", last_physical_day)\n locations[29].capacity = d.getMaxFromData(\"Nyarugusu\", last_physical_day)\n locations[30].capacity = d.getMaxFromData(\"Nakivale\", last_physical_day)\n locations[31].capacity = d.getMaxFromData(\"Lusenda\", last_physical_day)\n\n\n\n list_of_cities = \"Time\"\n\n for l in locations:\n list_of_cities = \"%s,%s\" % (list_of_cities, l.name)\n\n #print(list_of_cities)\n #print(\"Time, campname\")\n print(\"Day,Mahama sim,Mahama data,Mahama error,Nduta sim,Nduta data,Nduta error,Nyarugusu sim,Nyarugusu data,Nyarugusu error,Nakivale sim,Nakivale data,Nakivale error,Lusenda sim,Lusenda data,Lusenda error,Total error,refugees in camps (UNHCR),total refugees (simulation),raw UNHCR refugee count,retrofitted time,refugees in camps (simulation),refugee_debt,Total error (retrofitted)\")\n\n\n #Set up a mechanism to incorporate temporary decreases in refugees\n refugee_debt = 0\n refugees_raw = 0 #raw (interpolated) data from TOTAL UNHCR refugee count only\n\n\n e.add_conflict_zone(\"Bujumbura\")\n\n\n t_retrofitted = 0\n\n for t in range(0,end_time):\n\n t_data = t\n\n #Lusenda camp open on the 30th of July 2015\n if t_data == date_to_sim_days(\"2015-07-30\"): #Open Lusenda\n locations[31].SetCampMoveChance()\n locations[31].Camp=True\n e.linkUp(\"Bujumbura\",\"Lusenda\",\"53.0\") #Only added when the refugee inflow starts at Lusenda, on 30-07-2015\n\n if t_data == date_to_sim_days(\"2015-08-10\"):\n locations[27].SetCampMoveChance()\n locations[27].Camp=True\n e.remove_link(\"Nduta\",\"Nyarugusu\")\n e.linkUp(\"Nduta\",\"Nyarugusu\",\"150.0\") #Re-add link, but without forced redirection\n\n\n #Append conflict_zone and weight to list.\n #Conflict zones after the start of simulation period\n if t_data == date_to_sim_days(\"2015-07-10\"): #Intense fighting between military & multineer military forces\n e.add_conflict_zone(\"Kabarore\")\n\n elif t_data == date_to_sim_days(\"2015-07-11\"): #Intense fighting between military & mulineer military forces\n e.add_conflict_zone(\"Bukinanyana\")\n\n elif t_data == date_to_sim_days(\"2015-07-15\"): #Battles unidentified armed groups coordinately attacked military barracks\n e.add_conflict_zone(\"Cibitoke\")\n\n elif t_data == date_to_sim_days(\"2015-10-26\"): #Clashes and battles police forces\n e.add_conflict_zone(\"Mwaro\")\n\n elif t_data == date_to_sim_days(\"2015-11-23\"): #Battles unidentified armed groups coordinate attacks\n e.add_conflict_zone(\"Gisuru\")\n\n elif t_data == date_to_sim_days(\"2015-12-08\"): #Military forces\n e.add_conflict_zone(\"Burambi\")\n\n #new_refs = d.get_new_refugees(t)\n new_refs = d.get_new_refugees(t, FullInterpolation=True) - refugee_debt\n refugees_raw += d.get_new_refugees(t, FullInterpolation=True)\n if new_refs < 0:\n refugee_debt = -new_refs\n new_refs = 0\n elif refugee_debt > 0:\n refugee_debt = 0\n\n # Here we use the random choice to make a weighted choice between the source locations.\n for i in range(0, new_refs):\n e.addAgent(e.pick_conflict_location())\n\n #Propagate the model by one time step.\n e.evolve()\n\n #e.printInfo()\n\n #Validation/data comparison\n mahama_data = d.get_field(\"Mahama\", t) #- d.get_field(\"Mahama\", 0)\n nduta_data = d.get_field(\"Nduta\", t) #-d.get_field(\"Nduta\", 0)\n nyarugusu_data = d.get_field(\"Nyarugusu\", t) #- d.get_field(\"Nyarugusu\", 0)\n nakivale_data = d.get_field(\"Nakivale\", t) #- d.get_field(\"Nakivale\", 0)\n lusenda_data = d.get_field(\"Lusenda\", t) #- d.get_field(\"Lusenda\", 0)\n\n errors = []\n abs_errors = []\n loc_data = [mahama_data, nduta_data, nyarugusu_data, nakivale_data, lusenda_data]\n camp_locations = [26, 27, 29, 30, 31]\n\n camps = []\n for i in camp_locations:\n camps += [locations[i]]\n camp_names = [\"Mahama\", \"Nduta\", \"Nyarugusu\", \"Nakivale\", \"Lusenda\"]\n\n camp_pops_retrofitted = []\n errors_retrofitted = []\n abs_errors_retrofitted = []\n\n # calculate retrofitted time.\n refugees_in_camps_sim = 0\n for c in camps:\n refugees_in_camps_sim += c.numAgents\n t_retrofitted = d.retrofit_time_to_refugee_count(refugees_in_camps_sim, camp_names)\n\n # calculate errors\n for i in range(0,len(camp_locations)):\n camp_number = camp_locations[i]\n errors += [a.rel_error(locations[camp_number].numAgents, loc_data[i])]\n abs_errors += [a.abs_error(locations[camp_number].numAgents, loc_data[i])]\n\n # errors when using retrofitted time stepping.\n camp_pops_retrofitted += [d.get_field(camp_names[i], t_retrofitted, FullInterpolation=True)]\n errors_retrofitted += [a.rel_error(camps[i].numAgents, camp_pops_retrofitted[-1])]\n abs_errors_retrofitted += [a.abs_error(camps[i].numAgents, camp_pops_retrofitted[-1])]\n\n output = \"%s\" % t\n\n for i in range(0,len(errors)):\n camp_number = camp_locations[i]\n output += \",%s,%s,%s\" % (locations[camp_number].numAgents, loc_data[i], errors[i])\n\n\n if refugees_raw>0:\n #output_string += \",%s,%s,%s,%s\" % (float(np.sum(abs_errors))/float(refugees_raw), int(sum(loc_data)), e.numAgents(), refugees_raw)\n output += \",%s,%s,%s,%s,%s,%s,%s,%s\" % (float(np.sum(abs_errors))/float(refugees_raw), int(sum(loc_data)), e.numAgents(), refugees_raw, t_retrofitted, refugees_in_camps_sim, refugee_debt, float(np.sum(abs_errors_retrofitted))/float(refugees_raw))\n else:\n output += \",0,0,0,0,0,0,0\"\n #output_string += \",0\"\n\n\n print(output)\n\n"
] |
[
[
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xravitejax/OpenSeq2Seq
|
[
"dd98278b66385911a3143d8f0b95c6d5187935d0",
"dd98278b66385911a3143d8f0b95c6d5187935d0"
] |
[
"open_seq2seq/parts/tacotron/tacotron_decoder.py",
"open_seq2seq/data/text2speech/speech_utils.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nModified by blisc to enable support for tacotron models, specfically enables\nthe prenet\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n\nimport collections\n\nfrom tensorflow.contrib.seq2seq.python.ops import decoder\nfrom tensorflow.contrib.seq2seq.python.ops import helper as helper_py\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.layers import base as layers_base\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.util import nest\n\nclass BasicDecoderOutput(\n collections.namedtuple(\n \"BasicDecoderOutput\", (\"rnn_output\", \"stop_token_output\", \"sample_id\")\n )\n):\n pass\n\n\nclass TacotronDecoder(decoder.Decoder):\n \"\"\"Basic sampling decoder.\"\"\"\n\n def __init__(\n self,\n decoder_cell,\n helper,\n initial_decoder_state,\n spec_layer,\n stop_token_layer,\n prenet=None,\n dtype=dtypes.float32,\n train=True\n ):\n \"\"\"Initialize TacotronDecoder.\n\n Args:\n decoder_cell: An `RNNCell` instance.\n helper: A `Helper` instance.\n initial_decoder_state: A (possibly nested tuple of...) tensors and\n TensorArrays. The initial state of the RNNCell.\n stop_token_layer: An instance of `tf.layers.Layer`, i.e.,\n `tf.layers.Dense`. Stop token layer to apply to the RNN output to\n predict when to stop the decoder\n spec_layer: An instance of `tf.layers.Layer`, i.e.,\n `tf.layers.Dense`. Output layer to apply to the RNN output to map\n the ressult to a spectrogram\n prenet: The prenet to apply to inputs\n\n Raises:\n TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.\n \"\"\"\n rnn_cell_impl.assert_like_rnncell(\"cell\", decoder_cell)\n if not isinstance(helper, helper_py.Helper):\n raise TypeError(\"helper must be a Helper, received: %s\" % type(helper))\n if (\n spec_layer is not None and\n not isinstance(spec_layer, layers_base.Layer)\n ):\n raise TypeError(\n \"spec_layer must be a Layer, received: %s\" % type(spec_layer)\n )\n self._decoder_cell = decoder_cell\n self._helper = helper\n self._decoder_initial_state = initial_decoder_state\n self._spec_layer = spec_layer\n self._stop_token_layer = stop_token_layer\n self._dtype = dtype\n self._prenet = prenet\n\n if train:\n self._spec_layer = None\n self._stop_token_layer = None\n\n @property\n def batch_size(self):\n return self._helper.batch_size\n\n def _rnn_output_size(self):\n size = self._decoder_cell.output_size\n if self._spec_layer is None:\n return size\n\n output_shape_with_unknown_batch = nest.map_structure(\n lambda s: tensor_shape.TensorShape([None]).concatenate(s), size\n )\n layer_output_shape = self._spec_layer.compute_output_shape(\n output_shape_with_unknown_batch\n )\n return nest.map_structure(lambda s: s[1:], layer_output_shape)\n\n def _stop_token_output_size(self):\n size = self._decoder_cell.output_size\n if self._stop_token_layer is None:\n return size\n\n output_shape_with_unknown_batch = nest.map_structure(\n lambda s: tensor_shape.TensorShape([None]).concatenate(s), size\n )\n layer_output_shape = self._stop_token_layer.compute_output_shape(\n output_shape_with_unknown_batch\n )\n return nest.map_structure(lambda s: s[1:], layer_output_shape)\n\n @property\n def output_size(self):\n return BasicDecoderOutput(\n rnn_output=self._rnn_output_size(),\n stop_token_output=self._stop_token_output_size(),\n sample_id=self._helper.sample_ids_shape\n )\n\n @property\n def output_dtype(self):\n # dtype = nest.flatten(self._decoder_initial_state)[0].dtype\n return BasicDecoderOutput(\n nest.map_structure(lambda _: self._dtype, self._rnn_output_size()),\n nest.map_structure(lambda _: self._dtype, self._stop_token_output_size()),\n self._helper.sample_ids_dtype\n )\n\n def initialize(self, name=None):\n \"\"\"Initialize the decoder.\n\n Args:\n name: Name scope for any created operations.\n \"\"\"\n state = (self._decoder_initial_state, )\n return self._helper.initialize() + state\n\n def step(self, time, inputs, state, name=None):\n \"\"\"Perform a decoding step.\n\n Args:\n time: scalar `int32` tensor.\n inputs: A (structure of) input tensors.\n state: A (structure of) state tensors and TensorArrays.\n name: Name scope for any created operations.\n\n Returns:\n `(outputs, next_state, next_inputs, finished)`.\n \"\"\"\n with ops.name_scope(name, \"BasicDecoderStep\", (time, inputs, state)):\n if self._prenet is not None:\n inputs = self._prenet(inputs)\n\n cell_outputs, cell_state = self._decoder_cell(inputs, state)\n\n # If we are training and not using scheduled sampling, we can move\n # all projection layers outside decoder, should be faster?\n # else we must project inside decoder\n if self._spec_layer is not None:\n spec_outputs = self._spec_layer(cell_outputs)\n else:\n spec_outputs = cell_outputs\n if self._stop_token_layer is not None:\n stop_token_output = self._stop_token_layer(spec_outputs)\n else:\n stop_token_output = cell_outputs\n\n sample_ids = self._helper.sample(\n time=time, outputs=spec_outputs, state=cell_state\n )\n (finished, next_inputs, next_state) = self._helper.next_inputs(\n time=time,\n outputs=spec_outputs,\n state=cell_state,\n sample_ids=sample_ids,\n stop_token_predictions=stop_token_output\n )\n outputs = BasicDecoderOutput(spec_outputs, stop_token_output, sample_ids)\n return (outputs, next_state, next_inputs, finished)\n",
"# Copyright (c) 2018 NVIDIA Corporation\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\nfrom six.moves import range\n\nimport numpy as np\nimport librosa\nimport librosa.filters\n\ndef get_speech_features_from_file(\n filename,\n num_features,\n features_type='magnitude',\n window_size=1024,\n window_stride=256,\n mag_power=2,\n feature_normalize=False,\n mean=0.,\n std=1.\n):\n \"\"\" Helper function to retrieve spectrograms from wav files\n\n\n Args:\n filename (string): WAVE filename.\n num_features (int): number of speech features in frequency domain.\n features_type (string): 'magnitude' or 'mel'.\n window_size (int): size of analysis window in samples.\n window_stride (int): stride of analysis window in samples.\n mag_power (int): power to raise magnitude spectrograms (prior to dot product\n with mel basis)\n 1 for energy spectrograms\n 2 fot power spectrograms\n feature_normalize(bool): whether to normalize the data with mean and std\n mean(float): if normalize is enabled, the mean to normalize to\n std(float): if normalize is enabled, the deviation to normalize to\n\n Returns:\n np.array: np.array of audio features with shape=[num_time_steps,\n num_features].\n \"\"\"\n # load audio signal\n signal, fs = librosa.core.load(filename, sr=None)\n return get_speech_features(\n signal, fs, num_features, features_type, window_size,\n window_stride, mag_power, feature_normalize, mean, std\n )\n\n\ndef get_speech_features(\n signal,\n fs,\n num_features,\n features_type='magnitude',\n n_window_size=1024,\n n_window_stride=256,\n mag_power=2,\n feature_normalize=False,\n mean=0.,\n std=1.\n):\n \"\"\" Helper function to retrieve spectrograms from loaded wav\n\n\n Args:\n signal: signal loaded with librosa.\n fs (int): sampling frequency in Hz.\n num_features (int): number of speech features in frequency domain.\n features_type (string): 'magnitude' or 'mel'.\n window_size (int): size of analysis window in samples.\n window_stride (int): stride of analysis window in samples.\n mag_power (int): power to raise magnitude spectrograms (prior to dot product\n with mel basis)\n 1 for energy spectrograms\n 2 fot power spectrograms\n feature_normalize(bool): whether to normalize the data with mean and std\n mean(float): if normalize is enabled, the mean to normalize to\n std(float): if normalize is enabled, the deviation to normalize to\n\n Returns:\n np.array: np.array of audio features with shape=[num_time_steps,\n num_features].\n \"\"\"\n if features_type == 'magnitude':\n complex_spec = librosa.stft(y=signal, n_fft=n_window_size)\n mag, _ = librosa.magphase(complex_spec, power=mag_power)\n features = np.log(np.clip(mag, a_min=1e-5, a_max=None)).T\n assert num_features <= n_window_size // 2 + 1, \\\n \"num_features for spectrogram should be <= (fs * window_size // 2 + 1)\"\n\n # cut high frequency part\n features = features[:, :num_features]\n elif features_type == 'mel':\n features = librosa.feature.melspectrogram(\n y=signal,\n sr=fs,\n n_fft=n_window_size,\n hop_length=n_window_stride,\n n_mels=num_features,\n power=mag_power\n )\n features = np.log(np.clip(features, a_min=1e-5, a_max=None)).T\n else:\n raise ValueError('Unknown features type: {}'.format(features_type))\n\n if feature_normalize:\n features = normalize(features, mean, std)\n\n return features\n\n\ndef get_mel(\n log_mag_spec,\n fs=22050,\n n_fft=1024,\n n_mels=80,\n power=2.,\n feature_normalize=False,\n mean=0,\n std=1,\n mel_basis=None\n):\n \"\"\"\n Method to get mel spectrograms from magnitude spectrograms\n\n Args:\n log_mag_spec (np.array): log of the magnitude spec\n fs (int): sampling frequency in Hz\n n_fft (int): size of fft window in samples\n n_mels (int): number of mel features\n power (float): power of the mag spectrogram\n feature_normalize (bool): whether the mag spec was normalized\n mean (float): normalization param of mag spec\n std (float): normalization param of mag spec\n mel_basis (np.array): optional pre-computed mel basis to save computational\n time if passed. If not passed, it will call librosa to construct one\n\n Returns:\n np.array: mel_spec with shape [time, n_mels]\n \"\"\"\n if mel_basis is None:\n mel_basis = librosa.filters.mel(fs, n_fft, n_mels=n_mels)\n log_mag_spec = log_mag_spec * power\n mag_spec = np.exp(log_mag_spec)\n mel_spec = np.dot(mag_spec, mel_basis.T)\n mel_spec = np.log(np.clip(mel_spec, a_min=1e-5, a_max=None))\n if feature_normalize:\n mel_spec = normalize(mel_spec, mean, std)\n return mel_spec\n\n\ndef inverse_mel(\n log_mel_spec,\n fs=22050,\n n_fft=1024,\n n_mels=80,\n power=2.,\n feature_normalize=False,\n mean=0,\n std=1,\n mel_basis=None\n):\n \"\"\"\n Very hacky method to reconstruct mag spec from mel\n\n Args:\n log_mel_spec (np.array): log of the mel spec\n fs (int): sampling frequency in Hz\n n_fft (int): size of fft window in samples\n n_mels (int): number of mel features\n power (float): power of the mag spectrogram that was used to generate the\n mel spec\n feature_normalize (bool): whether the mel spec was normalized\n mean (float): normalization param of mel spec\n std (float): normalization param of mel spec\n mel_basis (np.array): optional pre-computed mel basis to save computational\n time if passed. If not passed, it will call librosa to construct one\n\n Returns:\n np.array: mag_spec with shape [time, n_fft/2 + 1]\n \"\"\"\n if mel_basis is None:\n mel_basis = librosa.filters.mel(fs, n_fft, n_mels=n_mels)\n if feature_normalize:\n log_mel_spec = denormalize(log_mel_spec, mean, std)\n mel_spec = np.exp(log_mel_spec)\n mag_spec = np.dot(mel_spec, mel_basis)\n mag_spec = mag_spec * 876\n mag_spec = np.power(mag_spec, 1. / power)\n return mag_spec\n\n\ndef normalize(features, mean, std):\n \"\"\"\n Normalizes features with the specificed mean and std\n \"\"\"\n return (features - mean) / std\n\n\ndef denormalize(features, mean, std):\n \"\"\"\n Normalizes features with the specificed mean and std\n \"\"\"\n return features * std + mean\n"
] |
[
[
"tensorflow.python.ops.rnn_cell_impl.assert_like_rnncell",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.framework.tensor_shape.TensorShape"
],
[
"numpy.dot",
"numpy.exp",
"numpy.clip",
"numpy.power"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.13",
"2.2",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mit-acl/dc2g
|
[
"6f739e191c51acd5fb4ecb50609e1c26a10782fd"
] |
[
"dc2g/planners/DC2GPlanner.py"
] |
[
"from dc2g.planners.FrontierPlanner import FrontierPlanner\nimport numpy as np\nfrom skimage.transform import resize\nimport tensorflow as tf\nimport json\nimport os\n\nimport matplotlib.pyplot as plt\n\n\nclass DC2GPlanner(FrontierPlanner):\n def __init__(self, model_name, traversable_colors, goal_color, room_or_object_goal, camera_fov, camera_range_x, camera_range_y, env_to_coor, env_next_coords, env_to_grid, env_grid_resolution, env_render, output_name=\"output_masked\", name=\"DC2G\"):\n super(DC2GPlanner, self).__init__(traversable_colors, goal_color, room_or_object_goal, camera_fov, camera_range_x, camera_range_y, env_to_coor, env_next_coords, env_to_grid, env_grid_resolution, env_render, name=name)\n\n self.load_model(model_name, output_name)\n\n self.search_planner = self.dc2g_planner\n\n def load_model(self, model_name, output_name=\"output_masked\"):\n # Set up the deep cost-to-go network (load network weights)\n self.tf_sess = tf.compat.v1.Session()\n model_dir = \"{project_path}/data/trained_networks/{model_name}\".format(project_path=self.project_path, model_name=model_name)\n saver = tf.compat.v1.train.import_meta_graph(model_dir + \"/export.meta\")\n saver.restore(self.tf_sess, model_dir + \"/export\")\n input_vars = json.loads(tf.compat.v1.get_collection(\"inputs\")[0].decode('utf-8'))\n output_vars = json.loads(tf.compat.v1.get_collection(\"outputs\")[0].decode('utf-8'))\n input = tf.compat.v1.get_default_graph().get_tensor_by_name(input_vars[\"input\"])\n output = tf.compat.v1.get_default_graph().get_tensor_by_name(output_vars[output_name])\n self.tf_tensors = {'input': input, 'output': output}\n try:\n goal_rgb = tf.compat.v1.get_default_graph().get_tensor_by_name(input_vars[\"goal_rgb\"])\n self.tf_tensors['goal_rgb'] = goal_rgb\n except:\n pass\n print(\"loaded model.\")\n\n def visualize(self):\n raise NotImplementedError\n\n def dc2g_planner(self, position, theta_ind, semantic_array, reachable_array, bfs_parent_dict, traversable_array):\n '''\n outdated doc...\n Description: TODO\n inputs:\n - position: current position of robot in gridworld (e.g. np.array([px, py]))\n - theta_ind: current heading index of robot in gridworld (e.g. 2) - some int btwn 0-3 inclusive\n - semantic_array: 32x32x3 np array of robot's current partial knowledge of gridworld\n - reachable_inds_arr: nx2 np array of grid coordinates the agent can definitely reach given its current partial semantic map knowledge\n - tf_sess: tensorflow session\n - tf_input: tensorflow shortcut to refer to 256x256x3 image input\n - tf_output: tensorflow shortcut to refer to 256x256x3 image output\n - bfs_parent_dict: dictionary keyed by each reachable (px, py, theta_ind) coordinate, s.t. child coord -> (parent coord, action)\n created by running exhaustive BFS on grid from current coordinate\n outputs:\n - action: int of action to take\n '''\n c2g_array, raw_c2g = self.c2g_query(semantic_array)\n c2g_array[traversable_array == 0] = 0\n\n # plt.imshow(c2g_array, cmap='gray', vmin=0, vmax=255)\n # plt.show()\n\n self.c2g_array = c2g_array\n\n if self.plot_panels:\n plt.figure(\"Planner Panel\")\n plt.subplot(self.subplots[\"DC2G\"])\n plt.imshow(c2g_array, cmap=plt.cm.gray, interpolation='nearest')\n if self.save_individual_figures:\n plt.imsave(\"{individual_figure_path}/c2g/step_{step_num}.png\".format(individual_figure_path=self.individual_figure_path, step_num=str(self.step_number).zfill(3)), raw_c2g)\n\n action, path = self.choose_frontier_pt(position, theta_ind, semantic_array, reachable_array, bfs_parent_dict, traversable_array, frontier_cost_array=c2g_array)\n\n return action, path\n\n def saveIndivFig(self, dir, arr):\n full_dir = \"{individual_figure_path}/{dir}\".format(dir=dir, individual_figure_path=self.individual_figure_path)\n if not os.path.exists(full_dir):\n os.makedirs(full_dir)\n plt.imsave(\"{full_dir}/step_{step_num}.png\".format(full_dir=full_dir, step_num=str(self.step_number).zfill(3)), arr)\n\n def c2g_query(self, semantic_array):\n\n input_data = semantic_array\n if input_data.shape[2] == 3:\n input_data = np.dstack( ( input_data, np.ones(input_data.shape[:2]) ) )\n desired_input_shape = self.tf_tensors['input'].get_shape().as_list()[:2]\n input_data = resize(input_data, desired_input_shape, order=0)\n if np.max(input_data) > 1:\n input_data = input_data / 255.\n if input_data.shape[2] == 4:\n input_data = input_data[:,:,:3]\n\n # input_data = input_data*255\n feed_dict = {self.tf_tensors['input']: input_data}\n if 'goal_rgb' in self.tf_tensors:\n goal_rgb = goal_rgb_val = np.array([128., 0., 0.])/255.\n feed_dict[self.tf_tensors['goal_rgb']] = goal_rgb\n output_value = self.tf_sess.run(self.tf_tensors['output'], feed_dict=feed_dict)\n output_value_resized = resize(output_value[:,:,0], semantic_array.shape[:2], order=0)\n c2g_array = output_value_resized\n\n # hsv = plt_colors.rgb_to_hsv(output_value)\n # c2g_array = hsv[:, :, 2]\n # c2g_array[(hsv[:, :, 1] > 0.3)] = 0 # remove all \"red\" (non-traversable pixels) from c2g map\n # c2g_array = scipy.misc.imresize(c2g_array, semantic_array.shape[:2], interp='nearest')\n return c2g_array, output_value_resized\n"
] |
[
[
"matplotlib.pyplot.imshow",
"tensorflow.compat.v1.train.import_meta_graph",
"tensorflow.compat.v1.get_default_graph",
"numpy.ones",
"tensorflow.compat.v1.Session",
"numpy.max",
"tensorflow.compat.v1.get_collection",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PVSemk/segmentation_models.pytorch
|
[
"8d9b033be918dfc1e6186d9ef404cc7d2c171e8d",
"8d9b033be918dfc1e6186d9ef404cc7d2c171e8d"
] |
[
"segmentation_models_pytorch/encoders/inceptionv4.py",
"segmentation_models_pytorch/encoders/dpn.py"
] |
[
"\"\"\" Each encoder should have following attributes and methods and be inherited from `_base.EncoderMixin`\n\nAttributes:\n\n _out_channels (list of int): specify number of channels for each encoder feature tensor\n _depth (int): specify number of stages in decoder (in other words number of downsampling operations)\n _in_channels (int): default number of input channels in first Conv2d layer for encoder (usually 3)\n\nMethods:\n\n forward(self, x: torch.Tensor)\n produce list of features of different spatial resolutions, each feature is a 4D torch.tensor of\n shape NCHW (features should be sorted in descending order according to spatial resolution, starting\n with resolution same as input `x` tensor).\n\n Input: `x` with shape (1, 3, 64, 64)\n Output: [f0, f1, f2, f3, f4, f5] - features with corresponding shapes\n [(1, 3, 64, 64), (1, 64, 32, 32), (1, 128, 16, 16), (1, 256, 8, 8),\n (1, 512, 4, 4), (1, 1024, 2, 2)] (C - dim may differ)\n\n also should support number of features according to specified depth, e.g. if depth = 5,\n number of feature tensors = 6 (one with same resolution as input and 5 downsampled),\n depth = 3 -> number of feature tensors = 4 (one with same resolution as input and 3 downsampled).\n\"\"\"\n\nimport torch.nn as nn\n\nfrom pretrainedmodels.models.inceptionv4 import BasicConv2d, InceptionV4, pretrained_settings\n\nfrom ._base import EncoderMixin\n\n\nclass InceptionV4Encoder(InceptionV4, EncoderMixin):\n def __init__(self, stage_idxs, out_channels, depth=5, **kwargs):\n super().__init__(**kwargs)\n self._stage_idxs = stage_idxs\n self._out_channels = out_channels\n self._depth = depth\n self._in_channels = 3\n\n # correct paddings\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if m.kernel_size == (3, 3):\n m.padding = (1, 1)\n if isinstance(m, nn.MaxPool2d):\n m.padding = (1, 1)\n\n # remove linear layers\n del self.last_linear\n\n def make_dilated(self, stage_list, dilation_list):\n raise ValueError(\n \"InceptionV4 encoder does not support dilated mode \"\n \"due to pooling operation for downsampling!\"\n )\n\n def get_stages(self):\n return [\n nn.Identity(),\n self.features[:self._stage_idxs[0]],\n self.features[self._stage_idxs[0]:self._stage_idxs[1]],\n self.features[self._stage_idxs[1]:self._stage_idxs[2]],\n self.features[self._stage_idxs[2]:self._stage_idxs[3]],\n self.features[self._stage_idxs[3]:],\n ]\n\n def forward(self, x):\n\n stages = self.get_stages()\n\n features = []\n for i in range(self._depth + 1):\n x = stages[i](x)\n features.append(x)\n\n return features\n\n def load_state_dict(self, state_dict, **kwargs):\n state_dict.pop(\"last_linear.bias\")\n state_dict.pop(\"last_linear.weight\")\n super().load_state_dict(state_dict, **kwargs)\n\n\ninceptionv4_encoders = {\n \"inceptionv4\": {\n \"encoder\": InceptionV4Encoder,\n \"pretrained_settings\": pretrained_settings[\"inceptionv4\"],\n \"params\": {\n \"stage_idxs\": (3, 5, 9, 15),\n \"out_channels\": (3, 64, 192, 384, 1024, 1536),\n \"num_classes\": 1001,\n },\n }\n}\n",
"\"\"\" Each encoder should have following attributes and methods and be inherited from `_base.EncoderMixin`\n\nAttributes:\n\n _out_channels (list of int): specify number of channels for each encoder feature tensor\n _depth (int): specify number of stages in decoder (in other words number of downsampling operations)\n _in_channels (int): default number of input channels in first Conv2d layer for encoder (usually 3)\n\nMethods:\n\n forward(self, x: torch.Tensor)\n produce list of features of different spatial resolutions, each feature is a 4D torch.tensor of\n shape NCHW (features should be sorted in descending order according to spatial resolution, starting\n with resolution same as input `x` tensor).\n\n Input: `x` with shape (1, 3, 64, 64)\n Output: [f0, f1, f2, f3, f4, f5] - features with corresponding shapes\n [(1, 3, 64, 64), (1, 64, 32, 32), (1, 128, 16, 16), (1, 256, 8, 8),\n (1, 512, 4, 4), (1, 1024, 2, 2)] (C - dim may differ)\n\n also should support number of features according to specified depth, e.g. if depth = 5,\n number of feature tensors = 6 (one with same resolution as input and 5 downsampled),\n depth = 3 -> number of feature tensors = 4 (one with same resolution as input and 3 downsampled).\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom pretrainedmodels.models.dpn import DPN, pretrained_settings\n\nfrom ._base import EncoderMixin\n\n\nclass DPNEncorder(DPN, EncoderMixin):\n def __init__(self, stage_idxs, out_channels, depth=5, **kwargs):\n super().__init__(**kwargs)\n self._stage_idxs = stage_idxs\n self._depth = depth\n self._out_channels = out_channels\n self._in_channels = 3\n\n del self.last_linear\n\n def get_stages(self):\n return [\n nn.Identity(),\n nn.Sequential(self.features[0].conv, self.features[0].bn, self.features[0].act),\n nn.Sequential(self.features[0].pool, self.features[1:self._stage_idxs[0]]),\n self.features[self._stage_idxs[0]:self._stage_idxs[1]],\n self.features[self._stage_idxs[1]:self._stage_idxs[2]],\n self.features[self._stage_idxs[2]:self._stage_idxs[3]],\n ]\n\n def forward(self, x):\n\n stages = self.get_stages()\n\n features = []\n for i in range(self._depth + 1):\n x = stages[i](x)\n if isinstance(x, (list, tuple)):\n features.append(F.relu(torch.cat(x, dim=1), inplace=True))\n else:\n features.append(x)\n\n return features\n\n def load_state_dict(self, state_dict, **kwargs):\n state_dict.pop(\"last_linear.bias\")\n state_dict.pop(\"last_linear.weight\")\n super().load_state_dict(state_dict, **kwargs)\n\n\ndpn_encoders = {\n \"dpn68\": {\n \"encoder\": DPNEncorder,\n \"pretrained_settings\": pretrained_settings[\"dpn68\"],\n \"params\": {\n \"stage_idxs\": (4, 8, 20, 24),\n \"out_channels\": (3, 10, 144, 320, 704, 832),\n \"groups\": 32,\n \"inc_sec\": (16, 32, 32, 64),\n \"k_r\": 128,\n \"k_sec\": (3, 4, 12, 3),\n \"num_classes\": 1000,\n \"num_init_features\": 10,\n \"small\": True,\n \"test_time_pool\": True,\n },\n },\n \"dpn68b\": {\n \"encoder\": DPNEncorder,\n \"pretrained_settings\": pretrained_settings[\"dpn68b\"],\n \"params\": {\n \"stage_idxs\": (4, 8, 20, 24),\n \"out_channels\": (3, 10, 144, 320, 704, 832),\n \"b\": True,\n \"groups\": 32,\n \"inc_sec\": (16, 32, 32, 64),\n \"k_r\": 128,\n \"k_sec\": (3, 4, 12, 3),\n \"num_classes\": 1000,\n \"num_init_features\": 10,\n \"small\": True,\n \"test_time_pool\": True,\n },\n },\n \"dpn92\": {\n \"encoder\": DPNEncorder,\n \"pretrained_settings\": pretrained_settings[\"dpn92\"],\n \"params\": {\n \"stage_idxs\": (4, 8, 28, 32),\n \"out_channels\": (3, 64, 336, 704, 1552, 2688),\n \"groups\": 32,\n \"inc_sec\": (16, 32, 24, 128),\n \"k_r\": 96,\n \"k_sec\": (3, 4, 20, 3),\n \"num_classes\": 1000,\n \"num_init_features\": 64,\n \"test_time_pool\": True,\n },\n },\n \"dpn98\": {\n \"encoder\": DPNEncorder,\n \"pretrained_settings\": pretrained_settings[\"dpn98\"],\n \"params\": {\n \"stage_idxs\": (4, 10, 30, 34),\n \"out_channels\": (3, 96, 336, 768, 1728, 2688),\n \"groups\": 40,\n \"inc_sec\": (16, 32, 32, 128),\n \"k_r\": 160,\n \"k_sec\": (3, 6, 20, 3),\n \"num_classes\": 1000,\n \"num_init_features\": 96,\n \"test_time_pool\": True,\n },\n },\n \"dpn107\": {\n \"encoder\": DPNEncorder,\n \"pretrained_settings\": pretrained_settings[\"dpn107\"],\n \"params\": {\n \"stage_idxs\": (5, 13, 33, 37),\n \"out_channels\": (3, 128, 376, 1152, 2432, 2688),\n \"groups\": 50,\n \"inc_sec\": (20, 64, 64, 128),\n \"k_r\": 200,\n \"k_sec\": (4, 8, 20, 3),\n \"num_classes\": 1000,\n \"num_init_features\": 128,\n \"test_time_pool\": True,\n },\n },\n \"dpn131\": {\n \"encoder\": DPNEncorder,\n \"pretrained_settings\": pretrained_settings[\"dpn131\"],\n \"params\": {\n \"stage_idxs\": (5, 13, 41, 45),\n \"out_channels\": (3, 128, 352, 832, 1984, 2688),\n \"groups\": 40,\n \"inc_sec\": (16, 32, 32, 128),\n \"k_r\": 160,\n \"k_sec\": (4, 8, 28, 3),\n \"num_classes\": 1000,\n \"num_init_features\": 128,\n \"test_time_pool\": True,\n },\n },\n}\n"
] |
[
[
"torch.nn.Identity"
],
[
"torch.nn.Sequential",
"torch.nn.Identity",
"torch.cat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
isabelchaves/BiGIT
|
[
"6b5ef354f1c9877cad47a48a3e032cd1ba8dccca"
] |
[
"src/predictions.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom src.configs.experiment_config import ExperimentConfig\nfrom src.configs.variables_const import VariablesConsts\nfrom src.data.processing.textual_processing import PreProcessing\nfrom src.evaluation.evaluation_method import EvaluationMethod\n\n\nclass Predict:\n\n def __init__(self, model_class, product_vector_space, search_terms_vector_space, product_ids):\n self.model_class = model_class\n self.product_vector_space = product_vector_space\n self.search_terms_vector_space = search_terms_vector_space\n self.product_ids = product_ids\n\n def run_predictions(self, ):\n data_to_predict = pd.read_parquet(ExperimentConfig.data_path + 'test_set.parquet')\n language_process = PreProcessing(language=ExperimentConfig.language)\n data_to_predict[VariablesConsts.SEARCH_TERM_PROCESSED] = data_to_predict[VariablesConsts.SEARCH_TERM].apply(\n lambda x: language_process.tokenizer(x))\n data_to_predict[VariablesConsts.PRODUCT_TITLE_PROCESSED] = data_to_predict[VariablesConsts.PRODUCT_TITLE].apply(\n lambda x: language_process.tokenizer(x))\n\n products, queries = self.model_class.prepare_data(data=data_to_predict)\n\n # queries = self._approximate_queries_the_vector_space(queries=queries)\n\n EvaluationMethod(product_ids=self.product_ids).run(data=data_to_predict,\n data_to_evaluate=queries,\n vector_space_to_search=self.product_vector_space,\n evaluate_column=VariablesConsts.SEARCH_TERM_PROCESSED)\n\n def _approximate_queries_the_vector_space(self, queries):\n similar_queries = pd.DataFrame(columns=[VariablesConsts.DISTANCE, VariablesConsts.SEARCH_TERM_PROCESSED])\n\n for value in tqdm(queries, desc='Evaluate the products of the queries'):\n ids, distance = self.search_terms_vector_space.knnQuery(queries[value], k=10)\n # print(query)\n similar_queries = similar_queries.append(\n pd.DataFrame([[x[1]] + [self.search_terms_vector_space[x[0]]] for x in zip(ids, distance)],\n columns=[VariablesConsts.DISTANCE, VariablesConsts.SEARCH_TERM_PROCESSED]),\n ignore_index=True)\n\n similar = np.stack(\n similar_queries.apply(lambda x: [x.distance * value for value in x.search_term_processed], axis=1).to_numpy())\n similar = similar.sum(axis=0, dtype='float') / sum(similar_queries.distance)\n update = np.add(queries[value], similar)\n queries[value] = update / 10\n # queries[value] = similar.sum(axis=0, dtype='float') / 10\n\n return queries\n"
] |
[
[
"pandas.read_parquet",
"pandas.DataFrame",
"numpy.add"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
lukovnikov/parseq
|
[
"65a4a2444d78779c3255e70a7897f77e73cdcdda",
"65a4a2444d78779c3255e70a7897f77e73cdcdda"
] |
[
"parseq/scripts_compgen/transformer.py",
"parseq/eval.py"
] |
[
"\n\"\"\" Copied from Hugging face T5 model code in Pytorch. \"\"\"\n\n\nimport copy\nimport logging\nimport math\nimport os\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom transformers import PretrainedConfig\nfrom transformers.configuration_t5 import T5Config\nfrom transformers.file_utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_callable\nfrom transformers.modeling_utils import PreTrainedModel, prune_linear_layer\n\n\nlogger = logging.getLogger(__name__)\n\n####################################################\n# PyTorch Models are constructed by sub-classing\n# - torch.nn.Module for the layers and\n# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)\n####################################################\n\n\nclass TransformerConfig(PretrainedConfig):\n r\"\"\"\n :class:`~transformers.T5Config` is the configuration class to store the configuration of a\n `T5Model`.\n\n\n Arguments:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `T5Model`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\", \"swish\" and \"gelu_new\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `T5Model`.\n initializer_factor: A factor for initializing all weight matrices (should be kept to 1.0, used for initialization testing).\n layer_norm_eps: The epsilon used by LayerNorm.\n \"\"\"\n # pretrained_config_archive_map = T5_PRETRAINED_CONFIG_ARCHIVE_MAP\n model_type = \"transformer\"\n\n def __init__(\n self,\n vocab_size=32128,\n n_positions=512,\n d_model=512,\n d_kv=64,\n d_ff=2048,\n num_layers=6,\n num_heads=8,\n relative_attention_num_buckets=32,\n dropout_rate=0.1,\n sideways_dropout=0.0,\n attention_dropout_rate=0.0,\n layer_norm_epsilon=1e-6,\n initializer_factor=1.0,\n is_encoder_decoder=True,\n pad_token_id=0,\n eos_token_id=1,\n use_position_bias=False,\n use_causal_mask=True, # use causal mask in decoder blocks\n use_relative_position=False,\n vib_att=False,\n **kwargs\n ):\n super().__init__(\n pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs,\n )\n self.vocab_size = vocab_size\n self.n_positions = n_positions\n self.d_model = d_model\n self.d_kv = d_kv\n self.d_ff = d_ff\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.dropout_rate = dropout_rate\n self.sideways_dropout = sideways_dropout\n self.attention_dropout_rate = attention_dropout_rate\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_factor = initializer_factor\n self.use_position_bias = use_position_bias\n self.use_causal_mask = use_causal_mask\n self.use_relative_position = use_relative_position\n self.vib_att = vib_att\n\n @property\n def max_position_embeddings(self):\n return self.n_positions\n\n @property\n def hidden_size(self):\n return self.d_model\n\n @property\n def num_attention_heads(self):\n return self.num_heads\n\n @property\n def num_hidden_layers(self):\n return self.num_layers\n\n\nFACTOR = 1.\n\n\nclass TransformerDenseReluDense(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)\n self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)\n self.dropout = nn.Dropout(config.dropout_rate)\n self.reset_parameters()\n\n def reset_parameters(self):\n self.wi.weight.data.normal_(mean=0.0, std=FACTOR * ((self.config.d_model) ** -0.5))\n if hasattr(self.wi, \"bias\") and self.wi.bias is not None:\n self.wi.bias.data.zero_()\n self.wo.weight.data.normal_(mean=0.0, std=FACTOR * ((self.config.d_ff) ** -0.5))\n if hasattr(self.wo, \"bias\") and self.wo.bias is not None:\n self.wo.bias.data.zero_()\n\n def forward(self, hidden_states):\n h = self.wi(hidden_states)\n h = F.relu(h)\n h = self.dropout(h)\n h = self.wo(h)\n return h\n\n\nclass TransformerLayerFF(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.DenseReluDense = TransformerDenseReluDense(config)\n self.layer_norm = torch.nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n norm_x = self.layer_norm(hidden_states)\n y = self.DenseReluDense(norm_x)\n layer_output = hidden_states + self.dropout(y)\n return layer_output\n\n\nclass TransformerAttention(nn.Module):\n def __init__(self, config: TransformerConfig, rel_emb=None):\n super().__init__()\n self.config = config\n self.is_decoder = config.is_decoder\n\n if isinstance(rel_emb, int): # create new embedding module here\n raise NotImplemented()\n elif isinstance(rel_emb, nn.Module): # assign\n self.rel_emb = rel_emb\n else:\n assert rel_emb is False or rel_emb is None\n self.rel_emb = None\n\n self.output_attentions = config.output_attentions\n self.d_model = config.d_model\n self.d_kv = config.d_kv\n self.n_heads = config.num_heads\n assert self.d_model == self.d_kv * self.n_heads\n self.dropout = torch.nn.Dropout(config.attention_dropout_rate)\n self.sidedropout = torch.nn.Dropout(config.sideways_dropout)\n self.inner_dim = self.n_heads * self.d_kv\n\n # Mesh TensorFlow initialization to avoid scaling before softmax\n self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)\n\n if config.vib_att:\n self.o_gate = nn.Linear(self.inner_dim, self.d_model, bias=False)\n self.o_mu = nn.Linear(self.d_model, self.d_model, bias=True)\n self.o_logvar = nn.Linear(self.d_model, self.d_model, bias=True)\n self.o_ln = nn.LayerNorm(self.d_model, eps=self.config.layer_norm_epsilon)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n d_model = self.config.d_model\n d_kv = self.config.d_kv\n n_heads = self.config.num_heads\n self.q.weight.data.normal_(mean=0.0, std=FACTOR * ((d_model * d_kv) ** -0.5))\n self.k.weight.data.normal_(mean=0.0, std=FACTOR * (d_model ** -0.5))\n self.v.weight.data.normal_(mean=0.0, std=FACTOR * (d_model ** -0.5))\n self.o.weight.data.normal_(mean=0.0, std=FACTOR * ((n_heads * d_kv) ** -0.5))\n if self.config.vib_att:\n self.o_gate.weight.data.normal_(mean=0.0, std=FACTOR * ((n_heads * d_kv) ** -0.5))\n self.o_mu.weight.data.normal_(mean=0.0, std=FACTOR * ((n_heads * d_kv) ** -0.5))\n self.o_mu.bias.data.fill_(0)\n self.o_logvar.weight.data.normal_(mean=0.0, std=FACTOR * ((n_heads * d_kv) ** -0.5))\n self.o_logvar.bias.data.fill_(0)\n\n def forward(\n self,\n input,\n mask=None,\n kv=None,\n past_key_value_state=None,\n query_length=None,\n use_cache=False,\n relpos=None,\n ):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n # past_key_value_state[0] is (bs, n_heads, q_len - 1, dim_per_head)\n bs, qlen, dim = input.size()\n\n if past_key_value_state is not None:\n assert self.is_decoder is True, \"Encoder cannot cache past key value states\"\n assert (\n len(past_key_value_state) == 2\n ), \"past_key_value_state should have 2 past states: keys and values. Got {} past states\".format(\n len(past_key_value_state)\n )\n real_qlen = qlen + past_key_value_state[0].shape[2] if query_length is None else query_length\n else:\n real_qlen = qlen\n\n if kv is None:\n klen = real_qlen\n else:\n klen = kv.size(1)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)\n\n q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)\n\n if kv is None:\n _input = self.sidedropout(input)\n k = shape(self.k(_input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(_input)) # (bs, n_heads, qlen, dim_per_head)\n elif past_key_value_state is None:\n _kv = self.sidedropout(kv)\n k = v = _kv\n k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if past_key_value_state is not None:\n if kv is None:\n k_, v_ = past_key_value_state\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = past_key_value_state\n\n if self.is_decoder and use_cache is True:\n present_key_value_state = ((k, v),)\n else:\n present_key_value_state = (None,)\n\n scores = torch.einsum(\"bnqd,bnkd->bnqk\", q, k) # (bs, n_heads, qlen, klen)\n scores = scores / math.sqrt(self.d_kv)\n\n if relpos is not None:\n assert self.rel_emb is not None, \"can't process relpos because rel_emb is not initialized\"\n relpos_scores = self.rel_emb.compute_scores(q, relpos) # (bs, n_heads, qlen, dim)x(bs, qlen, klen)->(bs, n_heads, qlen, klen)\n scores = scores + relpos_scores\n\n if mask is not None:\n scores = scores + mask\n\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = self.dropout(weights) # (bs, n_heads, qlen, klen)\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n\n if relpos is not None:\n context_rel = self.rel_emb.compute_context(weights, relpos) # (bs, n_heads, qlen, klen)x(bs, qlen, klen) -> (bs, n_heads, qlen, dim)\n context = context + context_rel\n\n context = unshape(context) # (bs, qlen, dim)\n\n _context = context\n context = self.o(context)\n\n if self.config.vib_att:\n _context = torch.relu(context) * torch.sigmoid(self.o_gate(_context))\n _context = self.o_ln(_context)\n mu, logvar = self.o_mu(_context), self.o_logvar(_context)\n\n if self.training:\n ret = mu + torch.exp(0.5 * logvar) * torch.randn_like(mu)\n else:\n ret = mu\n context = ret\n\n priorkl = torch.zeros(ret.size(0), ret.size(1), device=ret.device)\n if self.training:\n priorkl = -0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp(), dim=-1) # (batsize, seqlen)\n # priorkls = priorkls * mask.float() # TOD: mask !!!\n # priorkl = priorkls.sum(-1)\n\n outputs = (context, ) + present_key_value_state + (priorkl,)\n else:\n outputs = (context, ) + present_key_value_state\n\n if self.output_attentions:\n outputs = outputs + (weights,)\n return outputs\n\n\nclass TransformerLayerSelfAttention(nn.Module):\n def __init__(self, config, rel_emb=None):\n super().__init__()\n self.SelfAttention = TransformerAttention(config, rel_emb=rel_emb)\n self.layer_norm = torch.nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n past_key_value_state=None,\n use_cache=False,\n relpos=None,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.SelfAttention(\n norm_x,\n mask=attention_mask,\n past_key_value_state=past_key_value_state,\n use_cache=use_cache,\n relpos=relpos,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass TransformerLayerCrossAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.EncDecAttention = TransformerAttention(config)\n self.layer_norm = torch.nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n kv,\n attention_mask=None,\n past_key_value_state=None,\n use_cache=False,\n query_length=None,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.EncDecAttention(\n norm_x,\n mask=attention_mask,\n kv=kv,\n past_key_value_state=past_key_value_state,\n use_cache=use_cache,\n query_length=query_length,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, config, rel_emb=None):\n super().__init__()\n self.is_decoder = config.is_decoder\n self.layer = nn.ModuleList()\n self.layer.append(TransformerLayerSelfAttention(config, rel_emb=rel_emb))\n if self.is_decoder:\n self.layer.append(TransformerLayerCrossAttention(config))\n\n self.layer.append(TransformerLayerFF(config))\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value_state=None,\n use_cache=False,\n relpos=None,\n ):\n\n if past_key_value_state is not None:\n assert self.is_decoder, \"Only decoder can use `past_key_value_states`\"\n expected_num_past_key_value_states = 2 if encoder_hidden_states is None else 4\n\n error_message = \"There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states\".format(\n expected_num_past_key_value_states,\n \"2 (past / key) for cross attention\" if expected_num_past_key_value_states == 4 else \"\",\n len(past_key_value_state),\n )\n assert len(past_key_value_state) == expected_num_past_key_value_states, error_message\n\n self_attn_past_key_value_state = past_key_value_state[:2]\n cross_attn_past_key_value_state = past_key_value_state[2:]\n else:\n self_attn_past_key_value_state, cross_attn_past_key_value_state = None, None\n\n self_attention_outputs = self.layer[0](\n hidden_states,\n attention_mask=attention_mask,\n past_key_value_state=self_attn_past_key_value_state,\n use_cache=use_cache,\n relpos=relpos,\n )\n hidden_states, present_key_value_state = self_attention_outputs[:2]\n attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n # the actual query length is unknown for cross attention\n # if using past key value states. Need to inject it here\n if present_key_value_state is not None:\n query_length = present_key_value_state[0].shape[2]\n else:\n query_length = None\n\n cross_attention_outputs = self.layer[1](\n hidden_states,\n kv=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n past_key_value_state=cross_attn_past_key_value_state,\n query_length=query_length,\n use_cache=use_cache,\n )\n hidden_states = cross_attention_outputs[0]\n # Combine self attn and cross attn key value states\n if present_key_value_state is not None:\n present_key_value_state = present_key_value_state + cross_attention_outputs[1]\n\n # Keep cross-attention outputs and relative position weights\n attention_outputs = attention_outputs + cross_attention_outputs[2:]\n\n # Apply Feed Forward layer\n hidden_states = self.layer[-1](hidden_states)\n outputs = (hidden_states,)\n\n # Add attentions if we output them\n outputs = outputs + (present_key_value_state,) + attention_outputs\n return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n\n\nclass TransformerPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = T5Config\n # pretrained_model_archive_map = T5_PRETRAINED_MODEL_ARCHIVE_MAP\n # load_tf_weights = load_tf_weights_in_t5\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n input_ids = torch.tensor(DUMMY_INPUTS)\n input_mask = torch.tensor(DUMMY_MASK)\n dummy_inputs = {\n \"decoder_input_ids\": input_ids,\n \"input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n return dummy_inputs\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n factor = self.config.initializer_factor # Used for testing weights initialization\n # if isinstance(module, torch.nn.LayerNorm):\n # module.weight.data.fill_(factor * 1.0)\n # elif isinstance(module, (TransformerModel, T5ForConditionalGeneration)):\n # # Mesh TensorFlow embeddings initialization\n # # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624\n # module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)\n # if isinstance(module, TransformerDenseReluDense):\n # # Mesh TensorFlow FF initialization\n # # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56\n # # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89\n # module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))\n # if hasattr(module.wi, \"bias\") and module.wi.bias is not None:\n # module.wi.bias.data.zero_()\n # module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))\n # if hasattr(module.wo, \"bias\") and module.wo.bias is not None:\n # module.wo.bias.data.zero_()\n # elif isinstance(module, TransformerAttention):\n # # Mesh TensorFlow attention initialization to avoid scaling before softmax\n # # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136\n # d_model = self.config.d_model\n # d_kv = self.config.d_kv\n # n_heads = self.config.num_heads\n # module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))\n # module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n # module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n # module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))\n # if module.has_relative_attention_bias:\n # module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))\n\n def _shift_right(self, input_ids):\n decoder_start_token_id = self.config.decoder_start_token_id\n pad_token_id = self.config.pad_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information\"\n\n # shift inputs to the right\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()\n shifted_input_ids[..., 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in lm_labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `lm_labels` has only positive values and -100\"\n\n return shifted_input_ids\n\n\nclass TransformerStack(TransformerPreTrainedModel):\n def __init__(self, config:TransformerConfig, embed_tokens=None, rel_emb=False):\n \"\"\"\n If rel_emb is False or None, no relative positioning added\n If rel_emb is int: layer-wise separate embeddings created at every layer\n If rel_emb is Module: module will be shared as relpos embeddings across all layers\n If rel_emb is List[Module]\n \"\"\"\n super().__init__(config)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n\n self.embed_tokens = embed_tokens\n self.is_decoder = config.is_decoder\n self.rel_emb = rel_emb\n\n if isinstance(self.rel_emb, nn.Module):\n self.rel_emb = torch.nn.ModuleList([self.rel_emb for _ in range(config.num_layers)])\n elif self.rel_emb is False or self.rel_emb is None:\n self.rel_emb = [None for _ in range(config.num_layers)]\n\n self.block = nn.ModuleList(\n [TransformerBlock(config, rel_emb=self.rel_emb[i]) for i in range(config.num_layers)]\n )\n\n self.final_layer_norm = torch.nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n # self.init_weights()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def get_output_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, new_embeddings):\n self.embed_tokens = new_embeddings\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n inputs_embeds=None,\n past_key_value_states=None,\n use_cache=False,\n relpos=None,\n ):\n # assert(use_cache == False)\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n if self.is_decoder:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs_embeds is None:\n assert self.embed_tokens is not None, \"You have to intialize the model with valid token embeddings\"\n inputs_embeds = self.embed_tokens(input_ids)\n\n batch_size, seq_length = input_shape\n\n if past_key_value_states is not None:\n assert seq_length == 1, \"Input shape is {}, but should be {} when using past_key_value_sates\".format(\n input_shape, (batch_size, 1)\n )\n # required mask seq length can be calculated via length of past\n # key value states and seq_length = 1 for the last token\n mask_seq_length = past_key_value_states[0][0].shape[2] + seq_length\n else:\n mask_seq_length = seq_length\n\n if attention_mask is None:\n attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)\n if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:\n encoder_seq_length = encoder_hidden_states.shape[1]\n encoder_attention_mask = torch.ones(batch_size, encoder_seq_length).to(inputs_embeds.device)\n\n # initialize past_key_value_states with `None` if past does not exist\n if past_key_value_states is None:\n past_key_value_states = [None] * len(self.block)\n\n # ourselves in which case we just need to make it broadcastable to all heads.\n # !!! causality is added to the attention_mask in the following line!\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, self.device)\n\n if self.is_decoder and encoder_attention_mask is not None:\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n present_key_value_states = ()\n all_hidden_states = ()\n all_attentions = ()\n\n hidden_states = self.dropout(inputs_embeds)\n\n vib_att_priorkls = []\n\n for i, (layer_module, past_key_value_state) in enumerate(zip(self.block, past_key_value_states)):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask=extended_attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_value_state=past_key_value_state,\n use_cache=use_cache,\n relpos=relpos,\n )\n # layer_outputs is a tuple with:\n # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n hidden_states, present_key_value_state = layer_outputs[:2]\n # append next layer key value states\n present_key_value_states = present_key_value_states + (present_key_value_state,)\n\n layer_outputs = layer_outputs[2:]\n if self.config.vib_att:\n priorkl = layer_outputs[0]\n vib_att_priorkls.append(priorkl)\n layer_outputs = layer_outputs[1:]\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now\n\n if len(vib_att_priorkls) > 0:\n vib_att_priorkls = sum(vib_att_priorkls)\n else:\n vib_att_priorkls = None\n\n hidden_states = self.final_layer_norm(hidden_states)\n # hidden_states = self.dropout(hidden_states)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.config.vib_att:\n outputs = outputs + (vib_att_priorkls,)\n if use_cache is True:\n assert self.is_decoder, \"`use_cache` can only be set to `True` if {} is used as a decoder\".format(self)\n outputs = outputs + (present_key_value_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (presents,) (all hidden states), (all attentions)\n\n def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: tuple, device: torch.device):\n \"\"\"Makes broadcastable attention mask and causal mask so that future and masked tokens are ignored.\n\n Arguments:\n attention_mask: torch.Tensor with 1 indicating tokens to ATTEND to\n input_shape: tuple, shape of input_ids\n device: torch.Device, usually self.device\n\n Returns:\n torch.Tensor with dtype of attention_mask.dtype\n \"\"\"\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - if the model is a decoder, apply a causal mask in addition to the padding mask\n # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and self.config.use_causal_mask:\n batch_size, seq_length = input_shape\n seq_ids = torch.arange(seq_length, device=device)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n # causal and attention masks must have same type with pytorch version < 1.3\n causal_mask = causal_mask.to(attention_mask.dtype)\n extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for input_ids (shape {}) or attention_mask (shape {})\".format(\n input_shape, attention_mask.shape\n )\n )\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask\n\n\nT5_START_DOCSTRING = r\"\"\" The T5 model was proposed in\n `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`_\n by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.\n It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.\n\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n .. _`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`:\n https://arxiv.org/abs/1910.10683\n\n .. _`torch.nn.Module`:\n https://pytorch.org/docs/stable/nn.html#module\n\n Parameters:\n config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nT5_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left.\n Indices can be obtained using :class:`transformers.T5Tokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n To know more on how to prepare :obj:`input_ids` for pre-training take a look at\n `T5 Training <./t5.html#training>`_ .\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`):\n Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`)\n `last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder.\n Used in the cross-attention of the decoder.\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`, defaults to :obj:`None`):\n Provide for sequence to sequence training. T5 uses the pad_token_id as the starting token for decoder_input_ids generation.\n If `decoder_past_key_value_states` is used, optionally only the last `decoder_input_ids` have to be input (see `decoder_past_key_value_states`).\n To know more on how to prepare :obj:`decoder_input_ids` for pre-training take a look at\n `T5 Training <./t5.html#training>`_ .\n decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`):\n Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default.\n decoder_past_key_value_states (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains pre-computed key and value hidden-states of the attention blocks.\n Can be used to speed up decoding.\n If `decoder_past_key_value_states` are used, the user can optionally input only the last `decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all `decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If `use_cache` is True, `decoder_past_key_value_states` are returned and can be used to speed up decoding (see `decoder_past_key_value_states`).\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation.\n If `decoder_past_key_value_states` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `decoder_past_key_value_states`).\n This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare T5 Model transformer outputting raw hidden-states\" \"without any specific head on top.\",\n T5_START_DOCSTRING,\n)\nclass TransformerModel(TransformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n self.encoder = TransformerStack(encoder_config, self.shared)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n self.decoder = TransformerStack(decoder_config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n self.encoder.set_input_embeddings(new_embeddings)\n self.decoder.set_input_embeddings(new_embeddings)\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(T5_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n decoder_past_key_value_states=None,\n use_cache=True,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n head_mask=None,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.T5Config`) and inputs.\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n If `decoder_past_key_value_states` is used only the last hidden-state of the sequences of shape :obj:`(batch_size, 1, hidden_size)` is output.\n decoder_past_key_value_states (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`, `optional`, returned when ``use_cache=True``):\n Contains pre-computed key and value hidden-states of the attention blocks.\n Can be used to speed up sequential decoding (see `decoder_past_key_value_states` input).\n Note that when using `decoder_past_key_value_states`, the model only outputs the last `hidden-state` of the sequence of shape :obj:`(batch_size, 1, config.vocab_size)`.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import T5Tokenizer, T5Model\n\n tokenizer = T5Tokenizer.from_pretrained('t5-small')\n model = T5Model.from_pretrained('t5-small')\n input_ids = tokenizer.encode(\"Hello, my dog is cute\", return_tensors=\"pt\") # Batch size 1\n outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask\n )\n\n hidden_states = encoder_outputs[0]\n\n # If decoding with past key value states, only the last tokens\n # should be given as an input\n if decoder_past_key_value_states is not None:\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n if decoder_inputs_embeds is not None:\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_value_states=decoder_past_key_value_states,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n )\n\n if use_cache is True:\n past = ((encoder_outputs, decoder_outputs[1]),)\n decoder_outputs = decoder_outputs[:1] + past + decoder_outputs[2:]\n\n return decoder_outputs + encoder_outputs\n",
"from abc import ABC, abstractmethod\nfrom functools import partial\nfrom typing import Union, Dict, Callable, List\n\nimport nltk\nimport qelos as q\n\nimport torch\nimport numpy as np\n\nfrom parseq.grammar import are_equal_trees\nfrom parseq.states import State, DecodableState, TrainableDecodableState, BeamState\n\n\nclass SelectedLoss(q.SelectedLinearLoss):\n \"\"\" Same as LinearLoss, but with selection from tuple of outputs from model (that specifies losses)\n To be used to output multiple losses from the model/ select one model output as training loss\n \"\"\"\n def forward(self, model_outs, gold, **kw):\n metrics = model_outs[0]\n x = metrics[self.which]\n if self.reduction in [\"elementwise_mean\", \"mean\"]:\n ret = x.mean()\n elif self.reduction == \"sum\":\n ret = x.sum()\n else:\n ret = x\n return ret\n\n\ndef make_array_of_metrics(*lossnames, reduction=None):\n ret = []\n for lossname in lossnames:\n ret.append(q.MetricWrapper(SelectedLoss(lossname, reduction=reduction), name=lossname))\n return ret\n\n\nclass Metric(ABC):\n @abstractmethod\n def forward(self, probs, predactions, gold, x:State=None) -> Dict:\n pass\n\n def __call__(self, probs, predactions, gold, x:State=None) -> Dict:\n return self.forward(probs, predactions, gold, x)\n\n\nclass Loss(torch.nn.Module, ABC):\n def __init__(self, contrib=1., **kw):\n super(Loss, self).__init__(**kw)\n self.contrib = contrib\n\n @abstractmethod\n def forward(self, probs, predactions, gold, x:State=None)->Dict:\n pass\n\n\nclass BCELoss(Loss):\n def __init__(self, weight=None, reduction=\"mean\", mode=\"logits\", smoothing:float=0., **kw):\n super(BCELoss, self).__init__(**kw)\n if mode == \"logits\":\n self.bce = torch.nn.BCEWithLogitsLoss(weight=weight, reduction=reduction)\n elif mode == \"logprobs\":\n self.bce = torch.nn.BCELoss(weight=weight, reduction=reduction)\n self.smoothing = smoothing\n\n def forward(self, probs, predactions, gold, x:State=None) ->Dict:\n if self.smoothing > 0:\n gold = gold.clamp(self.smoothing, 1-self.smoothing)\n ret = self.bce(probs, gold) * self.contrib\n return {\"loss\": ret, \"ce\": ret}\n\n\nclass KLLoss(Loss):\n def __init__(self, weight=None, reduction=\"mean\", mode=\"logits\", goldmode=\"logits\", maximize=False, **kw):\n super(KLLoss, self).__init__(**kw)\n self.reduction = reduction\n self.mode = mode\n self.goldmode = goldmode\n self.sm = torch.nn.Softmax(-1)\n self.logsm = torch.nn.LogSoftmax(-1)\n self.mult = -1 if maximize else 1\n self.kldiv = torch.nn.KLDivLoss(reduction=\"none\")\n\n def forward(self, probs, predactions, golds, mask=None, x:State=None) ->Dict:\n if probs.size(1) < golds.size(1):\n extension = torch.ones(probs.size(0), golds.size(1) - probs.size(1), probs.size(2), dtype=probs.dtype, device=probs.device)\n extension /= extension.size(2) # makes uniform dist\n probs = torch.cat([probs, extension], 1)\n else:\n probs = probs[:, :golds.size(1)]\n\n if self.mode == \"logits\":\n logprobs = self.logsm(probs)\n elif self.mode == \"probs\":\n logprobs = torch.log(probs).clamp_min(-1e9)\n elif self.mode == \"logprobs\":\n logprobs = probs.clamp_min(-1e9)\n else:\n raise Exception(f\"mode '{self.mode}' unknown. \")\n\n if self.goldmode == \"logits\":\n goldprobs = self.sm(golds)\n elif self.goldmode == \"probs\":\n goldprobs = golds\n elif self.goldmode == \"logprobs\":\n goldprobs = torch.exp(golds)\n else:\n raise Exception(f\"goldmode '{self.goldmode}' unknown. \")\n\n kl = self.kldiv(logprobs, goldprobs) # (batsize, seqlen, vocabsize)\n kl = kl.sum(-1)\n\n if mask is not None:\n assert mask.dim() == 2, f\"mask dim must be 2\"\n kl = kl * mask.float()\n\n if self.reduction == \"mean\":\n ret = kl.sum() / mask.float().sum()\n elif self.reduction == \"sum\":\n ret = kl.sum()\n elif self.reduction == \"none\" or self.reduction is None:\n ret = kl\n else:\n raise Exception(f\"Unknown reduction '{self.reduction}'\")\n\n ret = ret * self.contrib * self.mult\n\n return {\"kl\": ret, \"loss\": ret}\n\n\nclass EntropyLoss(Loss):\n def __init__(self, weight=None, reduction=\"mean\", ignore_index=-100, mode=\"logits\", maximize=True, **kw):\n super(EntropyLoss, self).__init__(**kw)\n self.reduction = reduction\n self.ignore_index = ignore_index\n self.mode = mode\n self.sm = torch.nn.Softmax(-1)\n self.logsm = torch.nn.LogSoftmax(-1)\n self.mult = -1 if maximize else 1\n\n def forward(self, probs, predactions, golds, mask=None, x:State=None) ->Dict:\n if probs.size(1) < golds.size(1):\n extension = torch.ones(probs.size(0), golds.size(1) - probs.size(1), probs.size(2), dtype=probs.dtype, device=probs.device)\n extension /= extension.size(2) # makes uniform dist\n probs = torch.cat([probs, extension], 1)\n else:\n probs = probs[:, :golds.size(1)]\n\n if self.mode == \"logits\":\n probs = self.sm(probs)\n logprobs = self.logsm(probs).clamp_min(-1e9)\n elif self.mode == \"probs\":\n logprobs = torch.log(probs).clamp_min(-1e9)\n elif self.mode == \"logprobs\":\n logprobs = probs.clamp_min(-1e9)\n probs = torch.exp(probs)\n else:\n raise Exception(f\"mode '{self.mode}' unknown. \")\n\n entropy = -(probs * logprobs)\n if mask is not None and mask.dim() == 3:\n entropy = entropy * mask\n\n entropy = entropy.sum(-1) # (batsize, seqlen)\n\n gold_mask = mask if mask.dim() == 2 else None\n if self.ignore_index >= 0:\n if gold_mask is None:\n gold_mask = golds != self.ignore_index\n else:\n gold_mask = gold_mask & (golds != self.ignore_index)\n\n if gold_mask is not None:\n entropy = entropy * gold_mask.float()\n\n if self.reduction == \"mean\":\n ret = entropy.sum() / gold_mask.float().sum()\n elif self.reduction == \"sum\":\n ret = entropy.sum()\n elif self.reduction == \"none\" or self.reduction is None:\n ret = entropy\n else:\n raise Exception(f\"Unknown reduction '{self.reduction}'\")\n\n ret = ret * self.contrib * self.mult\n\n return {\"entropy\": ret, \"loss\": ret}\n\n\nclass CELoss(Loss):\n def __init__(self, weight=None, reduction=\"mean\", ignore_index=0, mode=\"logits\", smoothing:float=0., **kw):\n super(CELoss, self).__init__(**kw)\n self.mode = mode\n self.ce = q.CELoss(weight=weight, reduction=reduction, ignore_index=ignore_index, mode=mode)\n if smoothing != 0.:\n assert(smoothing < 1. and smoothing > 0.)\n assert(mode in [\"logits\", \"logprobs\"])\n self.ce = q.SmoothedCELoss(reduction=reduction, ignore_index=ignore_index, smoothing=smoothing, mode=mode, weight=weight)\n\n def forward(self, probs, predactions, golds, x:State=None): # must be BasicStates\n # golds = x.get_gold()\n if probs.size(1) < golds.size(1):\n extension = torch.ones(probs.size(0), golds.size(1) - probs.size(1), probs.size(2), dtype=probs.dtype, device=probs.device)\n extension /= extension.size(2) # makes uniform dist\n probs = torch.cat([probs, extension], 1)\n else:\n probs = probs[:, :golds.size(1)]\n if probs.size(1) != golds.size(1):\n print(probs, golds)\n\n selected = probs.gather(2, golds[:, :, None])\n if torch.any(selected == (-np.infty if self.mode in (\"logits\", \"logprobs\") else 0.)):\n print(\"gold id could not be generated\")\n\n loss = self.ce(probs, golds)\n loss = loss * self.contrib\n return {\"loss\": loss, \"ce\": loss}\n\n\ndef state_path_penalty_getter(x, spec=None):\n path = spec.split(\".\")\n o = x\n for path_e in path:\n o = getattr(o, path_e)\n return o\n\n\nclass StatePenalty(Loss):\n def __init__(self, getter, weight=1., reduction=\"mean\", name=\"penalty\", **kw):\n super(StatePenalty, self).__init__(**kw)\n if isinstance(getter, str):\n getter = partial(state_path_penalty_getter, spec=getter)\n self.getter = getter\n self.reduction = reduction\n self.weight = weight\n self._name = name\n\n def forward(self, probs, predactions, gold, x:State=None) ->Dict:\n # get tensor from state\n penalty_vec = self.getter(x)\n assert(penalty_vec.dim() == 1 and penalty_vec.size(0) == probs.size(0))\n if self.reduction in (\"mean\", \"default\"):\n penalty = penalty_vec.mean()\n elif self.reduction == \"sum\":\n penalty = penalty_vec.sum()\n elif self.reduction in (\"none\", None):\n penalty = penalty_vec\n else:\n raise Exception(f\"unknown reduction mode: {self.reduction}\")\n ret = penalty * q.v(self.weight)\n ret = ret * self.contrib\n return {\"loss\": ret, self._name: ret}\n\n\nclass SeqAccuracies(Metric):\n padid = 0\n unkid = 1\n def forward(self, probs, predactions, golds, x:State=None): # must be BasicStates\n # TODO: GOLD MUST CONTAIN END TOKEN !!!!!\n # golds = x.get_gold()\n mask = golds != self.padid\n if predactions.size(1) < golds.size(1):\n extension = torch.zeros(predactions.size(0), golds.size(1) - predactions.size(1), dtype=predactions.dtype, device=predactions.device)\n predactions = torch.cat([predactions, extension], 1)\n else:\n predactions = predactions[:, :golds.size(1)]\n same = golds == predactions\n same = same & (predactions != self.unkid)\n seq_accs = (same | ~mask).all(1).float()\n elem_accs = (same & mask).sum(1).float() / mask.sum(1).float()\n ret = {\"seq_acc\": seq_accs.sum().detach().cpu().item() / seq_accs.size(0),\n \"elem_acc\": elem_accs.sum().detach().cpu().item() / elem_accs.size(0)}\n return ret\n\n\nclass DerivedAccuracy(Metric):\n def __init__(self, name:str=\"derived_acc\", tensor2tree:Callable[[torch.Tensor], nltk.Tree]=None, **kw):\n super(DerivedAccuracy, self).__init__(**kw)\n self.name = name\n self.tensor2tree = tensor2tree\n\n def forward(self, probs, predactions, golds, x:State=None):\n # golds = x.get_gold()\n gold_trees = [self.tensor2tree(gold) for gold in golds]\n pred_trees = [self.tensor2tree(predactionse) for predactionse in predactions]\n ret = [float(gold_tree == pred_tree) for gold_tree, pred_tree in zip(gold_trees, pred_trees)]\n ret = {self.name: sum(ret) / len(ret)}\n return ret\n\n\nclass TreeAccuracy(Metric):\n unktoken = \"@UNK@\"\n def __init__(self, name:str=\"tree_acc\", tensor2tree:Callable[[torch.Tensor], nltk.Tree]=None, orderless=set(), **kw):\n super(TreeAccuracy, self).__init__(**kw)\n self.name = name\n self.tensor2tree = tensor2tree\n self.orderless = orderless\n\n def forward(self, probs, predactions, golds, x:State=None):\n def compare(_gold_trees, _predactions):\n pred_trees = [self.tensor2tree(predactionse) for predactionse in _predactions]\n ret = [float(are_equal_trees(gold_tree, pred_tree, orderless=self.orderless, unktoken=self.unktoken))\n for gold_tree, pred_tree in zip(_gold_trees, pred_trees)]\n return ret\n if isinstance(predactions, torch.Tensor) and predactions.dim() == 3: # beam states\n # assert(isinstance(x, BeamState))\n # golds = x.bstates.get(0).get_gold()\n gold_trees = [self.tensor2tree(goldse) for goldse in golds]\n rets = []\n for i in range(predactions.size(1)):\n ret_i = compare(gold_trees, predactions[:, i])\n rets.append(ret_i)\n rets = np.asarray(rets).T\n acc_cum = np.cumsum(rets, 1)\n acc_cum = np.clip(acc_cum, 0, 1)\n r = {}\n batsize = acc_cum.shape[0]\n r[self.name] = sum(acc_cum[:, 0]) / batsize\n for j in range(acc_cum.shape[1]):\n r[f\"{self.name}_at{j+1}\"] = sum(acc_cum[:, j]) / batsize\n r[f\"{self.name}_at_last\"] = sum(acc_cum[:, -1]) / batsize\n return r\n else:\n # assert(predactions.dim() == 2)\n # golds = x.get_gold()\n # _gold_trees = x.gold_trees\n gold_trees = [self.tensor2tree(goldse) for goldse in golds]\n ret = compare(gold_trees, predactions)\n ret = {self.name: sum(ret) / len(ret)}\n return ret\n\n\nclass BeamSeqAccuracies(Metric):\n def forward(self, probs, predactions, golds, x:State=None):\n # golds = x.bstates.get(0).get_gold()\n # for i in range(len(x.bstates._list)):\n # assert(torch.allclose(x.bstates.get(i).get_gold(), golds))\n mask = golds != 0\n\n if predactions.size(2) < golds.size(1):\n extension = torch.zeros(predactions.size(0), predactions.size(1), golds.size(1) - predactions.size(2), dtype=predactions.dtype, device=predactions.device)\n predactions = torch.cat([predactions, extension], 2)\n else:\n predactions = predactions[:, :, :golds.size(1)]\n same = golds[:, None, :] == predactions\n seq_accs = (same | ~mask[:, None, :]).all(2) # (batsize, beamsize)\n assert(torch.allclose((seq_accs.float().sum(-1) <= 1).float(), torch.ones_like(seq_accs[:, 0]).float()))\n batsize, beamsize = seq_accs.size(0), seq_accs.size(1)\n seq_accs_cum = (seq_accs.cumsum(-1) > 0).float()\n seq_accs_cum_sum = list((seq_accs_cum.sum(0) / batsize).detach().cpu().numpy()) # (beamsize,)\n\n ret = {}\n for j in range(0, beamsize):\n ret[f\"beam_seq_recall_at{j+1}\"] = seq_accs_cum_sum[j]\n ret[\"beam_recall\"] = seq_accs_cum_sum[-1]\n ret[\"beam_seq_acc\"] = seq_accs_cum_sum[0]\n ret[\"beam_seq_acc_bottom\"] = seq_accs[:, -1].float().sum().detach().cpu().item() / batsize\n\n elem_accs = (same & mask[:, None, :]).sum(2).float() / mask[:, None, :].sum(2).float()\n elem_accs = elem_accs.max(1)[0]\n ret[\"beam_best_elem_acc\"] = elem_accs.sum().detach().cpu().item() / batsize\n return ret\n\n\n\n"
] |
[
[
"torch.all",
"torch.randn_like",
"torch.nn.Dropout",
"torch.ones",
"torch.cat",
"torch.einsum",
"torch.nn.ModuleList",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.tensor",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.functional.relu",
"torch.relu",
"torch.exp",
"torch.arange"
],
[
"torch.nn.Softmax",
"torch.nn.LogSoftmax",
"torch.nn.KLDivLoss",
"torch.cat",
"numpy.clip",
"numpy.asarray",
"numpy.cumsum",
"torch.nn.BCELoss",
"torch.any",
"torch.exp",
"torch.nn.BCEWithLogitsLoss",
"torch.log",
"torch.ones_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sorrowyn/query2labels
|
[
"03f8aa74caa6e41fb6898bdf29565d10d212dab7"
] |
[
"lib/models/tresnet/layers/general_layers.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom models.tresnet.layers.avg_pool import FastAvgPool2d\r\n\r\n\r\nclass Flatten(nn.Module):\r\n def forward(self, x):\r\n return x.view(x.size(0), -1)\r\n\r\n\r\nclass DepthToSpace(nn.Module):\r\n\r\n def __init__(self, block_size):\r\n super().__init__()\r\n self.bs = block_size\r\n\r\n def forward(self, x):\r\n N, C, H, W = x.size()\r\n x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)\r\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)\r\n x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)\r\n return x\r\n\r\n\r\nclass SpaceToDepthModule(nn.Module):\r\n def __init__(self, remove_model_jit=False):\r\n super().__init__()\r\n if not remove_model_jit:\r\n self.op = SpaceToDepthJit()\r\n else:\r\n self.op = SpaceToDepth()\r\n\r\n def forward(self, x):\r\n return self.op(x)\r\n\r\n\r\nclass SpaceToDepth(nn.Module):\r\n def __init__(self, block_size=4):\r\n super().__init__()\r\n assert block_size == 4\r\n self.bs = block_size\r\n\r\n def forward(self, x):\r\n N, C, H, W = x.size()\r\n x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)\r\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)\r\n x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)\r\n return x\r\n\r\n\r\[email protected]\r\nclass SpaceToDepthJit(object):\r\n def __call__(self, x: torch.Tensor):\r\n # assuming hard-coded that block_size==4 for acceleration\r\n N, C, H, W = x.size()\r\n x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs)\r\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)\r\n x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs)\r\n return x\r\n\r\n\r\nclass hard_sigmoid(nn.Module):\r\n def __init__(self, inplace=True):\r\n super(hard_sigmoid, self).__init__()\r\n self.inplace = inplace\r\n\r\n def forward(self, x):\r\n if self.inplace:\r\n return x.add_(3.).clamp_(0., 6.).div_(6.)\r\n else:\r\n return F.relu6(x + 3.) / 6.\r\n\r\n\r\nclass SEModule(nn.Module):\r\n\r\n def __init__(self, channels, reduction_channels, inplace=True):\r\n super(SEModule, self).__init__()\r\n self.avg_pool = FastAvgPool2d()\r\n self.fc1 = nn.Conv2d(channels, reduction_channels, kernel_size=1, padding=0, bias=True)\r\n self.relu = nn.ReLU(inplace=inplace)\r\n self.fc2 = nn.Conv2d(reduction_channels, channels, kernel_size=1, padding=0, bias=True)\r\n # self.activation = hard_sigmoid(inplace=inplace)\r\n self.activation = nn.Sigmoid()\r\n\r\n def forward(self, x):\r\n x_se = self.avg_pool(x)\r\n x_se2 = self.fc1(x_se)\r\n x_se2 = self.relu(x_se2)\r\n x_se = self.fc2(x_se2)\r\n x_se = self.activation(x_se)\r\n return x * x_se\r\n"
] |
[
[
"torch.nn.functional.relu6",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Sigmoid"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
darkknight2223/streamlit-webrtc
|
[
"30286f5c003d443fac6d7f91a6d84e84eacc0408",
"30286f5c003d443fac6d7f91a6d84e84eacc0408"
] |
[
"app_deepspeech.py",
"app_videochat.py"
] |
[
"import logging\nimport logging.handlers\nimport queue\nimport threading\nimport time\nimport urllib.request\nfrom collections import deque\nfrom pathlib import Path\nfrom typing import List\n\nimport av\nimport numpy as np\nimport pydub\nimport streamlit as st\n\nfrom streamlit_webrtc import AudioProcessorBase, WebRtcMode, webrtc_streamer\n\nHERE = Path(__file__).parent\n\nlogger = logging.getLogger(__name__)\n\n\n# This code is based on https://github.com/streamlit/demo-self-driving/blob/230245391f2dda0cb464008195a470751c01770b/streamlit_app.py#L48 # noqa: E501\ndef download_file(url, download_to: Path, expected_size=None):\n # Don't download the file twice.\n # (If possible, verify the download using the file length.)\n if download_to.exists():\n if expected_size:\n if download_to.stat().st_size == expected_size:\n return\n else:\n st.info(f\"{url} is already downloaded.\")\n if not st.button(\"Download again?\"):\n return\n\n download_to.parent.mkdir(parents=True, exist_ok=True)\n\n # These are handles to two visual elements to animate.\n weights_warning, progress_bar = None, None\n try:\n weights_warning = st.warning(\"Downloading %s...\" % url)\n progress_bar = st.progress(0)\n with open(download_to, \"wb\") as output_file:\n with urllib.request.urlopen(url) as response:\n length = int(response.info()[\"Content-Length\"])\n counter = 0.0\n MEGABYTES = 2.0 ** 20.0\n while True:\n data = response.read(8192)\n if not data:\n break\n counter += len(data)\n output_file.write(data)\n\n # We perform animation by overwriting the elements.\n weights_warning.warning(\n \"Downloading %s... (%6.2f/%6.2f MB)\"\n % (url, counter / MEGABYTES, length / MEGABYTES)\n )\n progress_bar.progress(min(counter / length, 1.0))\n # Finally, we remove these visual elements by calling .empty().\n finally:\n if weights_warning is not None:\n weights_warning.empty()\n if progress_bar is not None:\n progress_bar.empty()\n\n\ndef main():\n st.header(\"Real Time Speech-to-Text\")\n st.markdown(\n \"\"\"\nThis demo app is using [DeepSpeech](https://github.com/mozilla/DeepSpeech),\nan open speech-to-text engine.\n\nA pre-trained model released with\n[v0.9.3](https://github.com/mozilla/DeepSpeech/releases/tag/v0.9.3),\ntrained on American English is being served.\n\"\"\"\n )\n\n # https://github.com/mozilla/DeepSpeech/releases/tag/v0.9.3\n MODEL_URL = \"https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm\" # noqa\n LANG_MODEL_URL = \"https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer\" # noqa\n MODEL_LOCAL_PATH = HERE / \"models/deepspeech-0.9.3-models.pbmm\"\n LANG_MODEL_LOCAL_PATH = HERE / \"models/deepspeech-0.9.3-models.scorer\"\n\n download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=188915987)\n download_file(LANG_MODEL_URL, LANG_MODEL_LOCAL_PATH, expected_size=953363776)\n\n lm_alpha = 0.931289039105002\n lm_beta = 1.1834137581510284\n beam = 100\n\n sound_only_page = \"Sound only (sendonly)\"\n with_video_page = \"With video (sendrecv)\"\n app_mode = st.selectbox(\"Choose the app mode\", [sound_only_page, with_video_page])\n\n if app_mode == sound_only_page:\n app_sst(\n str(MODEL_LOCAL_PATH), str(LANG_MODEL_LOCAL_PATH), lm_alpha, lm_beta, beam\n )\n elif app_mode == with_video_page:\n app_sst_with_video(\n str(MODEL_LOCAL_PATH), str(LANG_MODEL_LOCAL_PATH), lm_alpha, lm_beta, beam\n )\n\n\ndef app_sst(model_path: str, lm_path: str, lm_alpha: float, lm_beta: float, beam: int):\n webrtc_ctx = webrtc_streamer(\n key=\"speech-to-text\",\n mode=WebRtcMode.SENDONLY,\n audio_receiver_size=1024,\n rtc_configuration={\"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]},\n media_stream_constraints={\"video\": False, \"audio\": True},\n )\n\n status_indicator = st.empty()\n\n if not webrtc_ctx.state.playing:\n return\n\n status_indicator.write(\"Loading...\")\n text_output = st.empty()\n stream = None\n\n while True:\n if webrtc_ctx.audio_receiver:\n if stream is None:\n from deepspeech import Model\n\n model = Model(model_path)\n model.enableExternalScorer(lm_path)\n model.setScorerAlphaBeta(lm_alpha, lm_beta)\n model.setBeamWidth(beam)\n\n stream = model.createStream()\n\n status_indicator.write(\"Model loaded.\")\n\n sound_chunk = pydub.AudioSegment.empty()\n try:\n audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)\n except queue.Empty:\n time.sleep(0.1)\n status_indicator.write(\"No frame arrived.\")\n continue\n\n status_indicator.write(\"Running. Say something!\")\n\n for audio_frame in audio_frames:\n sound = pydub.AudioSegment(\n data=audio_frame.to_ndarray().tobytes(),\n sample_width=audio_frame.format.bytes,\n frame_rate=audio_frame.sample_rate,\n channels=len(audio_frame.layout.channels),\n )\n sound_chunk += sound\n\n if len(sound_chunk) > 0:\n sound_chunk = sound_chunk.set_channels(1).set_frame_rate(\n model.sampleRate()\n )\n buffer = np.array(sound_chunk.get_array_of_samples())\n stream.feedAudioContent(buffer)\n text = stream.intermediateDecode()\n text_output.markdown(f\"**Text:** {text}\")\n else:\n status_indicator.write(\"AudioReciver is not set. Abort.\")\n break\n\n\ndef app_sst_with_video(\n model_path: str, lm_path: str, lm_alpha: float, lm_beta: float, beam: int\n):\n class AudioProcessor(AudioProcessorBase):\n frames_lock: threading.Lock\n frames: deque\n\n def __init__(self) -> None:\n self.frames_lock = threading.Lock()\n self.frames = deque([])\n\n async def recv_queued(self, frames: List[av.AudioFrame]) -> av.AudioFrame:\n with self.frames_lock:\n self.frames.extend(frames)\n\n # Return empty frames to be silent.\n new_frames = []\n for frame in frames:\n input_array = frame.to_ndarray()\n new_frame = av.AudioFrame.from_ndarray(\n np.zeros(input_array.shape, dtype=input_array.dtype),\n layout=frame.layout.name,\n )\n new_frame.sample_rate = frame.sample_rate\n new_frames.append(new_frame)\n\n return new_frames\n\n webrtc_ctx = webrtc_streamer(\n key=\"speech-to-text-w-video\",\n mode=WebRtcMode.SENDRECV,\n audio_processor_factory=AudioProcessor,\n rtc_configuration={\"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]},\n media_stream_constraints={\"video\": True, \"audio\": True},\n )\n\n status_indicator = st.empty()\n\n if not webrtc_ctx.state.playing:\n return\n\n status_indicator.write(\"Loading...\")\n text_output = st.empty()\n stream = None\n\n while True:\n if webrtc_ctx.audio_processor:\n if stream is None:\n from deepspeech import Model\n\n model = Model(model_path)\n model.enableExternalScorer(lm_path)\n model.setScorerAlphaBeta(lm_alpha, lm_beta)\n model.setBeamWidth(beam)\n\n stream = model.createStream()\n\n status_indicator.write(\"Model loaded.\")\n\n sound_chunk = pydub.AudioSegment.empty()\n\n audio_frames = []\n with webrtc_ctx.audio_processor.frames_lock:\n while len(webrtc_ctx.audio_processor.frames) > 0:\n frame = webrtc_ctx.audio_processor.frames.popleft()\n audio_frames.append(frame)\n\n if len(audio_frames) == 0:\n time.sleep(0.1)\n status_indicator.write(\"No frame arrived.\")\n continue\n\n status_indicator.write(\"Running. Say something!\")\n\n for audio_frame in audio_frames:\n sound = pydub.AudioSegment(\n data=audio_frame.to_ndarray().tobytes(),\n sample_width=audio_frame.format.bytes,\n frame_rate=audio_frame.sample_rate,\n channels=len(audio_frame.layout.channels),\n )\n sound_chunk += sound\n\n if len(sound_chunk) > 0:\n sound_chunk = sound_chunk.set_channels(1).set_frame_rate(\n model.sampleRate()\n )\n buffer = np.array(sound_chunk.get_array_of_samples())\n stream.feedAudioContent(buffer)\n text = stream.intermediateDecode()\n text_output.markdown(f\"**Text:** {text}\")\n else:\n status_indicator.write(\"AudioReciver is not set. Abort.\")\n break\n\n\nif __name__ == \"__main__\":\n import os\n\n DEBUG = os.environ.get(\"DEBUG\", \"false\").lower() not in [\"false\", \"no\", \"0\"]\n\n logging.basicConfig(\n format=\"[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: \"\n \"%(message)s\",\n force=True,\n )\n\n logger.setLevel(level=logging.DEBUG if DEBUG else logging.INFO)\n\n st_webrtc_logger = logging.getLogger(\"streamlit_webrtc\")\n st_webrtc_logger.setLevel(logging.DEBUG)\n\n fsevents_logger = logging.getLogger(\"fsevents\")\n fsevents_logger.setLevel(logging.WARNING)\n\n main()\n",
"import logging\nimport math\nfrom typing import List\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal # type: ignore\n\nimport av\nimport cv2\nimport numpy as np\nimport streamlit as st\nfrom streamlit_server_state import server_state, server_state_lock\n\nfrom streamlit_webrtc import (\n MixerBase,\n VideoProcessorBase,\n WebRtcMode,\n WebRtcStreamerContext,\n create_mix_track,\n create_process_track,\n webrtc_streamer,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenCVVideoProcessor(VideoProcessorBase):\n type: Literal[\"noop\", \"cartoon\", \"edges\", \"rotate\"]\n\n def __init__(self) -> None:\n self.type = \"noop\"\n\n def recv(self, frame: av.VideoFrame) -> av.VideoFrame:\n img = frame.to_ndarray(format=\"bgr24\")\n\n if self.type == \"noop\":\n pass\n elif self.type == \"cartoon\":\n # prepare color\n img_color = cv2.pyrDown(cv2.pyrDown(img))\n for _ in range(6):\n img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n\n # prepare edges\n img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_edges = cv2.adaptiveThreshold(\n cv2.medianBlur(img_edges, 7),\n 255,\n cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY,\n 9,\n 2,\n )\n img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n\n # combine color and edges\n img = cv2.bitwise_and(img_color, img_edges)\n elif self.type == \"edges\":\n # perform edge detection\n img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR)\n elif self.type == \"rotate\":\n # rotate image\n rows, cols, _ = img.shape\n M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1)\n img = cv2.warpAffine(img, M, (cols, rows))\n\n return av.VideoFrame.from_ndarray(img, format=\"bgr24\")\n\n\nclass MultiWindowMixer(MixerBase):\n def on_update(self, frames: List[av.VideoFrame]) -> av.VideoFrame:\n buf_w = 640\n buf_h = 480\n buffer = np.zeros((buf_h, buf_w, 3), dtype=np.uint8)\n\n n_inputs = len(frames)\n\n n_cols = math.ceil(math.sqrt(n_inputs))\n n_rows = math.ceil(n_inputs / n_cols)\n grid_w = buf_w // n_cols\n grid_h = buf_h // n_rows\n\n for i in range(n_inputs):\n frame = frames[i]\n if frame is None:\n continue\n\n grid_x = (i % n_cols) * grid_w\n grid_y = (i // n_cols) * grid_h\n\n img = frame.to_ndarray(format=\"bgr24\")\n src_h, src_w = img.shape[0:2]\n\n aspect_ratio = src_w / src_h\n\n window_w = min(grid_w, int(grid_h * aspect_ratio))\n window_h = min(grid_h, int(window_w / aspect_ratio))\n\n window_offset_x = (grid_w - window_w) // 2\n window_offset_y = (grid_h - window_h) // 2\n\n window_x0 = grid_x + window_offset_x\n window_y0 = grid_y + window_offset_y\n window_x1 = window_x0 + window_w\n window_y1 = window_y0 + window_h\n\n buffer[window_y0:window_y1, window_x0:window_x1, :] = cv2.resize(\n img, (window_w, window_h)\n )\n\n new_frame = av.VideoFrame.from_ndarray(buffer, format=\"bgr24\")\n\n return new_frame\n\n\ndef main():\n with server_state_lock[\"webrtc_contexts\"]:\n if \"webrtc_contexts\" not in server_state:\n server_state[\"webrtc_contexts\"] = []\n\n with server_state_lock[\"mix_track\"]:\n if \"mix_track\" not in server_state:\n server_state[\"mix_track\"] = create_mix_track(\n kind=\"video\", mixer_factory=MultiWindowMixer, key=\"mix\"\n )\n\n mix_track = server_state[\"mix_track\"]\n\n self_ctx = webrtc_streamer(\n key=\"self\",\n mode=WebRtcMode.SENDRECV,\n rtc_configuration={\"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]},\n media_stream_constraints={\"video\": True, \"audio\": True},\n source_video_track=mix_track,\n sendback_audio=False,\n )\n\n self_process_track = None\n if self_ctx.input_video_track:\n self_process_track = create_process_track(\n input_track=self_ctx.input_video_track,\n processor_factory=OpenCVVideoProcessor,\n )\n mix_track.add_input_track(self_process_track)\n\n self_process_track.processor.type = st.radio(\n \"Select transform type\",\n (\"noop\", \"cartoon\", \"edges\", \"rotate\"),\n key=\"filter1-type\",\n )\n\n with server_state_lock[\"webrtc_contexts\"]:\n webrtc_contexts: List[WebRtcStreamerContext] = server_state[\"webrtc_contexts\"]\n self_is_playing = self_ctx.state.playing and self_process_track\n if self_is_playing and self_ctx not in webrtc_contexts:\n webrtc_contexts.append(self_ctx)\n server_state[\"webrtc_contexts\"] = webrtc_contexts\n elif not self_is_playing and self_ctx in webrtc_contexts:\n webrtc_contexts.remove(self_ctx)\n server_state[\"webrtc_contexts\"] = webrtc_contexts\n\n if self_ctx.state.playing:\n # Audio streams are transferred in SFU manner\n # TODO: Create MCU to mix audio streams\n for ctx in webrtc_contexts:\n if ctx == self_ctx or not ctx.state.playing:\n continue\n webrtc_streamer(\n key=f\"sound-{id(ctx)}\",\n mode=WebRtcMode.RECVONLY,\n rtc_configuration={\n \"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]\n },\n media_stream_constraints={\"video\": False, \"audio\": True},\n source_audio_track=ctx.input_audio_track,\n desired_playing_state=ctx.state.playing,\n )\n\n\nif __name__ == \"__main__\":\n import os\n\n DEBUG = os.environ.get(\"DEBUG\", \"false\").lower() not in [\"false\", \"no\", \"0\"]\n\n logging.basicConfig(\n format=\"[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: \"\n \"%(message)s\",\n force=True,\n )\n\n logger.setLevel(level=logging.DEBUG if DEBUG else logging.INFO)\n\n st_webrtc_logger = logging.getLogger(\"streamlit_webrtc\")\n st_webrtc_logger.setLevel(logging.DEBUG if DEBUG else logging.INFO)\n\n aioice_logger = logging.getLogger(\"aioice\")\n aioice_logger.setLevel(logging.WARNING)\n\n fsevents_logger = logging.getLogger(\"fsevents\")\n fsevents_logger.setLevel(logging.WARNING)\n\n main()\n"
] |
[
[
"numpy.zeros"
],
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alvaradoo/arkouda
|
[
"c9e03fb1d8f7d0d1633b80b50353346ad0ffd43f",
"c9e03fb1d8f7d0d1633b80b50353346ad0ffd43f"
] |
[
"tests/suffixarray_test.py",
"arkouda/pdarraycreation.py"
] |
[
"import numpy as np\nfrom collections import Counter\nfrom context import arkouda as ak\nfrom base_test import ArkoudaTest\nimport pytest\nimport random\nimport string\nak.verbose = False\n\nN = 100\nUNIQUE = N//2\n\n# test_strings = np.array(['These are', 'some', 'interesting',\n# '~!@#$%^&*()_+', 'sarrays', '8675309.',\n# 'These are', 'some', 'duplicates.',\n# 'hello', 'world'])\n\n# test_suffix array = np.array([9, 5, 0, 6, 8, 4, 2, 1, 7, 3],\n# [4, 3, 2, 1, 0], [11, 3, 5, 10, 8, 0, 9, 1, 4, 6, 2, 7]\n# [13, 1, 3, 4, 5, 7, 9, 10, 8, 12, 2, 6, 11, 0],\n# [7, 5, 3, 4, 2, 6, 0, 1],[8, 7, 5, 4, 3, 1, 2, 0, 6],\n# [9, 5, 0, 6, 8, 4, 2, 1, 7, 3],[4, 3, 2, 1, 0],\n# [10, 6, 5, 0, 8, 4, 3, 2, 9, 7, 1],[5, 1, 0, 2, 3, 4]\n# [5, 4, 3, 1, 2, 0]\ndef compare_sas(a, b):\n return all(x == y for x, y in zip(a, b))\n \nerrors = False\n\ndef run_test_argsort(sarrays, test_sas, cat):\n akperm = ak.argsort(sarrays)\n aksorted = sarrays[akperm].to_ndarray()\n npsorted = np.sort(test_sas)\n assert((aksorted == npsorted).all())\n catperm = ak.argsort(cat)\n catsorted = cat[catperm].to_ndarray()\n assert((catsorted == npsorted).all())\n\ndef run_test_unique(sarrays, test_sas, cat):\n # unique\n akuniq = ak.unique(sarrays)\n catuniq = ak.unique(cat)\n akset = set(akuniq.to_ndarray())\n catset = set(catuniq.to_ndarray())\n assert(akset == catset)\n # There should be no duplicates\n assert(akuniq.size == len(akset))\n npset = set(np.unique(test_sas))\n # When converted to a set, should agree with numpy\n assert(akset == npset)\n return akset\n\ndef run_test_index(sarrays, test_sas, cat):\n # int index\n assert(sarrays[N//3] == test_sas[N//3])\n #assert(cat[N//3] == test_sas[N//3])\n print(\"int index passed\")\n \ndef run_test_slice(sarrays, test_sas, cat):\n assert(compare_sas(sarrays[N//4:N//3], \n test_sas[N//4:N//3]))\n #assert(compare_sas(cat[N//4:N//3].to_ndarray(), \n # test_sas[N//4:N//3]))\n \ndef run_test_pdarray_index(sarrays, test_sas, cat):\n inds = ak.arange(0, len(sarrays), 10)\n assert(compare_sas(sarrays[inds].to_ndarray(), test_sas[inds.to_ndarray()]))\n #assert(compare_sas(cat[inds].to_ndarray(), test_sas[inds.to_ndarray()]))\n\ndef run_comparison_test(sarrays, test_sas, cat):\n akinds = (sarrays == test_sas[N//4])\n #catinds = (cat == test_sas[N//4])\n npinds = (test_sas == test_sas[N//4])\n assert(np.allclose(akinds, npinds))\n\ndef run_test_in1d(sarrays, cat, base_words):\n more_choices = ak.randint(0, UNIQUE, 100)\n #akwords = base_words[more_choices]\n #more_words = akwords.to_ndarray()\n matches = ak.in1d(sarrays, akwords)\n catmatches = ak.in1d(cat, akwords)\n assert((matches == catmatches).all())\n # Every word in matches should be in the target set\n for word in sarrays[matches].to_ndarray():\n assert(word in more_words)\n # Exhaustively find all matches to make sure we didn't miss any\n inds = ak.zeros(sarrays.size, dtype=ak.bool)\n for word in more_words:\n inds |= (sarrays == word)\n assert((inds == matches).all())\n\ndef run_test_groupby(sarrays, cat, akset):\n g = ak.GroupBy(sarrays)\n gc = ak.GroupBy(cat)\n # Unique keys should be same result as ak.unique\n assert(akset == set(g.unique_keys.to_ndarray()))\n assert(akset == set(gc.unique_keys.to_ndarray()))\n assert((gc.permutation == g.permutation).all())\n permStrings = sarrays[g.permutation].to_ndarray()\n # Check each group individually\n lengths = np.diff(np.hstack((g.segments.to_ndarray(), np.array([g.size]))))\n for uk, s, l in zip(g.unique_keys.to_ndarray(),\n g.segments.to_ndarray(),\n lengths):\n # All values in group should equal key\n assert((permStrings[s:s+l] == uk).all())\n # Key should not appear anywhere outside of group\n assert(not (permStrings[:s] == uk).any())\n assert(not (permStrings[s+l:] == uk).any())\n\n\ndef run_test_contains(sarrays, test_sas, delim):\n found = sarrays.contains(delim).to_ndarray()\n npfound = np.array([s.count(delim) > 0 for s in test_sas])\n assert((found == npfound).all())\n\ndef run_test_starts_with(sarrays, test_sas, delim):\n found = sarrays.startswith(delim).to_ndarray()\n npfound = np.array([s.startswith(delim) for s in test_sas])\n assert((found == npfound).all())\n\ndef run_test_ends_with(sarrays, test_sas, delim):\n found = sarrays.endswith(delim).to_ndarray()\n npfound = np.array([s.endswith(delim) for s in test_sas])\n assert((found == npfound).all())\n\ndef run_test_peel(sarrays, test_sas, delim):\n import itertools as it\n tf = (True, False)\n def munge(triple, inc, part):\n ret = []\n for h, s, t in triple:\n if not part and s == '':\n ret.append(('', h))\n else:\n if inc:\n ret.append((h + s, t))\n else:\n ret.append((h, t))\n l, r = tuple(zip(*ret))\n return np.array(l), np.array(r)\n\n def rmunge(triple, inc, part):\n ret = []\n for h, s, t in triple:\n if not part and s == '':\n ret.append((t, ''))\n else:\n if inc:\n ret.append((h, s + t))\n else:\n ret.append((h, t))\n l, r = tuple(zip(*ret))\n return np.array(l), np.array(r)\n\n def slide(triple, delim):\n h, s, t = triple\n h2, s2, t2 = t.partition(delim)\n newh = h + s + h2\n return newh, s2, t2\n\n def rslide(triple, delim):\n h, s, t = triple\n h2, s2, t2 = h.rpartition(delim)\n newt = t2 + s + t\n return h2, s2, newt\n \n for times, inc, part in it.product(range(1,4), tf, tf):\n ls, rs = sarrays.peel(delim, times=times, includeDelimiter=inc, keepPartial=part)\n triples = [s.partition(delim) for s in test_sas]\n for i in range(times-1):\n triples = [slide(t, delim) for t in triples]\n ltest, rtest = munge(triples, inc, part)\n assert((ltest == ls.to_ndarray()).all() and (rtest == rs.to_ndarray()).all())\n\n for times, inc, part in it.product(range(1,4), tf, tf):\n ls, rs = sarrays.rpeel(delim, times=times, includeDelimiter=inc, keepPartial=part)\n triples = [s.rpartition(delim) for s in test_sas]\n for i in range(times-1):\n triples = [rslide(t, delim) for t in triples]\n ltest, rtest = rmunge(triples, inc, part)\n assert((ltest == ls.to_ndarray()).all() and (rtest == rs.to_ndarray()).all())\n\ndef run_test_stick(sarrays, test_sas, base_words, delim):\n test_sas2 = np.random.choice(base_words.to_ndarray(), N, replace=True)\n sarrays2 = ak.array(test_sas2)\n stuck = sarrays.stick(sarrays2, delimiter=delim).to_ndarray()\n tstuck = np.array([delim.join((a, b)) for a, b in zip(test_sas, test_sas2)])\n assert ((stuck == tstuck).all())\n assert ((sarrays + sarrays2) == sarrays.stick(sarrays2, delimiter=\"\")).all()\n\n lstuck = sarrays.lstick(sarrays2, delimiter=delim).to_ndarray()\n tlstuck = np.array([delim.join((b, a)) for a, b in zip(test_sas, test_sas2)])\n assert ((lstuck == tlstuck).all())\n assert ((sarrays2 + sarrays) == sarrays.lstick(sarrays2, delimiter=\"\")).all()\n\ndef suffixArray(s):\n suffixes = [(s[i:], i) for i in range(len(s))]\n suffixes.sort(key=lambda x: x[0])\n sa= [s[1] for s in suffixes]\n #sa.insert(0,len(sa))\n return sa\n\ndef get_random_string(length):\n letters = string.ascii_lowercase\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n# print(\"Random string of length\", length, \"is:\", result_str)\n \ndef ascill_to_string(ini_list):\n res=\"\"\n for val in ini_list: \n res = res + chr(int(val))\n return res\n\n\ndef string_to_int(sa_str):\n ary=[]\n for val in sa_str: \n ary.append(int(val))\n return ary\n\ndef akstrings_to_suffix_array(ak_str):\n ary=[]\n for val in ak_str: \n x=val.split(\" \",1)\n y=x[1]\n z=y.split(\" \")\n s=ascill_to_string(z)\n sa=suffixArray(s)\n ary.append(sa)\n return ary\n\ndef aksa_to_int_array(ak_str):\n ary=[]\n for val in ak_str: \n x=val.split(\" \",1)\n y=x[1]\n z=y.split(\" \")\n intz= [int(z[i]) for i in range(len(z))]\n ary.append(intz)\n return ary\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n ak.connect(server=sys.argv[1], port=sys.argv[2])\n else:\n ak.connect()\n\n # with open(__file__, 'r') as f:\n # base_words = np.array(f.read().split())\n # test_sas = np.random.choice(base_words, N, replace=True)\n # sarrays = ak.array(test_sas)\n # generate a Strings object\n base_words1 = ak.random_strings_uniform(1, 10, UNIQUE, characters='printable')\n # get the real strings\n strings1 = [base_words1[i] for i in range(len(base_words1))]\n # generate a Strings object\n base_words2 = ak.random_strings_lognormal(2, 0.25, UNIQUE, characters='printable')\n # get the real strings\n strings2 = [base_words2[i] for i in range(len(base_words2))]\n #Generate suffix array locally\n sa_ori1=akstrings_to_suffix_array(strings1)\n #Generate suffix array locally\n sa_ori2=akstrings_to_suffix_array(strings2)\n #Generate suffix array remotely\n sa1=ak.suffix_array(base_words1)\n #Generate suffix array remotely\n sa2=ak.suffix_array(base_words2)\n #get the suffix array from SArray object\n suffixarray1=[sa1[i] for i in range(len(sa1))]\n #transfer the string suffix array to real int suffix array\n sa_test1=aksa_to_int_array(suffixarray1)\n #get the suffix array from SArray object\n suffixarray2=[sa2[i] for i in range(len(sa2))]\n #transfer the string suffix array to real int suffix array\n sa_test2=aksa_to_int_array(suffixarray2)\n \n cat=0\n # int index\n run_test_index(sa_ori1, sa_test1, cat)\n run_test_index(sa_ori2, sa_test2, cat)\n print(\"int index passed\")\n \n # slice\n run_test_slice(sa_ori1, sa_test1, cat)\n run_test_slice(sa_ori2, sa_test2, cat)\n print(\"slice passed\")\n \n # pdarray int index\n #run_test_pdarray_index(sa_ori1, sa_test1, cat)\n #run_test_pdarray_index(sa_ori2, sa_test2, cat)\n #print(\"pdarray int index passed\")\n\n # comparison\n run_comparison_test(sa_ori1, sa_test1, cat)\n run_comparison_test(sa_ori2, sa_test2, cat)\n print(\"comparison passed\")\n\n # pdarray bool index\n #run_test_pdarray_index(sarrays, test_sas, cat)\n #print(\"pdarray bool index passed\")\n\n # in1d and iter\n # more_words = np.random.choice(base_words, 100)\n # akwords = ak.array(more_words)\n #run_test_in1d(sa_ori1, sa_test1, cat)\n #run_test_in1d(sa_ori2, sa_test2, cat)\n #print(\"in1d and iter passed\")\n\n # argsort\n #run_test_argsort(sa_ori1, sa_test1, cat)\n \n # unique\n #akset = run_test_unique(sarrays, test_sas, cat)\n '''\n # groupby\n run_test_groupby(sarrays, cat, akset)\n print(\"groupby passed\")\n \n # substring functions\n x, w = tuple(zip(*Counter(''.join(base_words.to_ndarray())).items()))\n delim = np.random.choice(x, p=(np.array(w)/sum(w)))\n\n # contains\n run_test_contains(sarrays, test_sas, delim)\n print(\"contains passed\")\n\n # startswith\n run_test_starts_with(sarrays, test_sas, delim)\n print(\"startswith passed\")\n\n # endswith\n run_test_ends_with(sarrays, test_sas, delim)\n print(\"endswith passed\")\n\n # peel\n run_test_peel(sarrays, test_sas, delim)\n print(\"peel passed\")\n\n # stick\n run_test_stick(sarrays, test_sas, base_words, delim)\n print(\"stick passed\")\n '''\nclass SuffixArrayTest(ArkoudaTest):\n \n def setUp(self):\n ArkoudaTest.setUp(self)\n base_words1 = ak.random_strings_uniform(1, 10, UNIQUE, characters='printable')\n base_words2 = ak.random_strings_lognormal(2, 0.25, UNIQUE, characters='printable')\n base_sas1 = ak.suffix_array(base_words1)\n base_sas2 = ak.suffix_array(base_words2)\n '''\n gremlins = ak.array([' ', ''])\n self.base_words = ak.concatenate((base_words1, base_words2, gremlins))\n self.np_base_words = np.hstack((base_words1.to_ndarray(), base_words2.to_ndarray()))\n choices = ak.randint(0, self.base_words.size, N)\n self.sarrays = ak.concatenate((self.base_words[choices], gremlins))\n self.test_sas = self.sarrays.to_ndarray()\n self.cat = ak.Categorical(self.sarrays)\n x, w = tuple(zip(*Counter(''.join(self.base_words.to_ndarray())).items()))\n self.delim = np.random.choice(x, p=(np.array(w)/sum(w)))\n self.akset = set(ak.unique(self.sarrays).to_ndarray())\n '''\n\n def test_compare_sarrays(self):\n assert compare_sarrays(self.base_words.to_ndarray(), self.np_base_words)\n \n def test_argsort(self):\n run_test_argsort(self.sarrays, self.test_sas, self.cat)\n\n def test_in1d(self):\n run_test_in1d(self.sarrays, self.cat, self.base_words)\n \n def test_unique(self):\n run_test_unique(self.sarrays, self.test_sas, self.cat)\n\n def test_groupby(self):\n run_test_groupby(self.sarrays, self.cat, self.akset)\n \n @pytest.mark.skip(reason=\"awaiting bug fix.\")\n def test_index(self):\n run_test_index(self.sarrays, self.test_sas, self.cat)\n \n def test_slice(self):\n run_test_slice(self.sarrays, self.test_sas, self.cat)\n \n def test_pdarray_index(self):\n run_test_pdarray_index(self.sarrays, self.test_sas, self.cat)\n\n def test_contains(self):\n run_test_contains(self.sarrays, self.test_sas, self.delim)\n \n def test_starts_with(self):\n run_test_starts_with(self.sarrays, self.test_sas, self.delim)\n\n @pytest.mark.skip(reason=\"awaiting bug fix.\")\n def test_ends_with(self):\n run_test_ends_with(self.sarrays, self.test_sas, self.delim)\n \n def test_error_handling(self):\n sarraysOne = ak.random_sarrays_uniform(1, 10, UNIQUE, \n characters='printable')\n sarraysTwo = ak.random_sarrays_uniform(1, 10, UNIQUE, \n characters='printable')\n\n with self.assertRaises(TypeError) as cm:\n sarraysOne.lstick(sarraysTwo, delimiter=1)\n self.assertEqual('Delimiter must be a string, not int', \n cm.exception.args[0])\n \n with self.assertRaises(TypeError) as cm:\n sarraysOne.lstick([1], 1)\n self.assertEqual('stick: not supported between String and list', \n cm.exception.args[0]) \n \n with self.assertRaises(TypeError) as cm:\n sarraysOne.startswith(1)\n self.assertEqual('Substring must be a string, not int', \n cm.exception.args[0]) \n \n with self.assertRaises(TypeError) as cm:\n sarraysOne.endswith(1)\n self.assertEqual('Substring must be a string, not int', \n cm.exception.args[0]) \n \n with self.assertRaises(TypeError) as cm:\n sarraysOne.contains(1)\n self.assertEqual('Substring must be a string, not int', \n cm.exception.args[0]) \n \n with self.assertRaises(TypeError) as cm:\n sarraysOne.peel(1)\n self.assertEqual('Delimiter must be a string, not int', \n cm.exception.args[0]) \n\n with self.assertRaises(ValueError) as cm:\n sarraysOne.peel(\"\",-5)\n self.assertEqual('Times must be >= 1', \n cm.exception.args[0]) \n\n @pytest.mark.skip(reason=\"awaiting bug fix.\")\n def test_peel(self):\n run_test_peel(self.sarrays, self.test_sas, self.delim)\n\n @pytest.mark.skip(reson=\"awaiting bug fix.\")\n def test_stick(self):\n run_test_stick(self.sarrays, self.test_sas, self.base_words, self.delim)\n",
"import numpy as np # type: ignore\nimport pandas as pd # type: ignore\nimport struct\nfrom typing import cast, Iterable, Optional, Union\nfrom typeguard import typechecked\nfrom arkouda.client import generic_msg\nfrom arkouda.dtypes import *\nfrom arkouda.dtypes import structDtypeCodes, NUMBER_FORMAT_STRINGS\nfrom arkouda.dtypes import dtype as akdtype\nfrom arkouda.pdarrayclass import pdarray, create_pdarray\nfrom arkouda.strings import Strings\nfrom arkouda.strings import SArrays\nfrom multipledispatch import dispatch \n\n__all__ = [\"array\", \"zeros\", \"ones\", \"zeros_like\", \"ones_like\", \"arange\",\n \"linspace\", \"randint\", \"uniform\", \"standard_normal\",\n \"random_strings_uniform\", \"random_strings_lognormal\", \"from_series\",\n \"suffix_array\"]\n\nnumericDTypes = frozenset([\"bool\", \"int64\", \"float64\"]) \n\nRANDINT_TYPES = {'int64','float64'}\n\nseries_dtypes = {'string' : np.str_,\n \"<class 'str'>\" : np.str_,\n 'int64' : np.int64,\n \"<class 'numpy.int64'>\" : np.int64, \n 'float64' : np.float64,\n \"<class 'numpy.float64'>\" : np.float64, \n 'bool' : np.bool,\n \"<class 'bool'>\" : np.bool,\n 'datetime64[ns]' : np.int64\n }\n\n@typechecked\ndef from_series(series : pd.Series, dtype : Optional[type]=None) -> Union[pdarray,Strings]:\n \"\"\"\n Converts a Pandas Series to an Arkouda pdarray or Strings object. If\n dtype is None, the dtype is inferred from the Pandas Series. Otherwise,\n the dtype parameter is set if the dtype of the Pandas Series is to be overridden or is \n unknown (for example, in situations where the Series dtype is object).\n \n Parameters\n ----------\n series : Pandas Series\n The Pandas Series with a dtype of bool, float64, int64, or string\n dtype : Optional[type]\n The valid dtype types are np.bool, np.float64, np.int64, and np.str\n\n Returns\n -------\n Union[pdarray,Strings]\n \n Raises\n ------\n TypeError\n Raised if series is not a Pandas Series object\n ValueError\n Raised if the Series dtype is not bool, float64, int64, string, or datetime\n\n Examples\n --------\n >>> ak.from_series(pd.Series(np.random.randint(0,10,5)))\n array([9, 0, 4, 7, 9])\n >>> ak.from_series(pd.Series(['1', '2', '3', '4', '5']),dtype=np.int64)\n array([1, 2, 3, 4, 5])\n >>> ak.from_series(pd.Series(np.random.uniform(low=0.0,high=1.0,size=3)))\n array([0.57600036956445599, 0.41619265571741659, 0.6615356693784662])\n >>> ak.from_series(pd.Series(['0.57600036956445599', '0.41619265571741659',\n '0.6615356693784662']), dtype=np.float64)\n array([0.57600036956445599, 0.41619265571741659, 0.6615356693784662])\n >>> ak.from_series(pd.Series(np.random.choice([True, False],size=5)))\n array([True, False, True, True, True])\n >>> ak.from_series(pd.Series(['True', 'False', 'False', 'True', 'True']), dtype=np.bool)\n array([True, True, True, True, True])\n >>> ak.from_series(pd.Series(['a', 'b', 'c', 'd', 'e'], dtype=\"string\"))\n array(['a', 'b', 'c', 'd', 'e'])\n >>> ak.from_series(pd.Series(['a', 'b', 'c', 'd', 'e']),dtype=np.str)\n array(['a', 'b', 'c', 'd', 'e'])\n >>> ak.from_series(pd.Series(pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01')])))\n array([1514764800000000000, 1514764800000000000]) \n \n Notes\n -----\n The supported datatypes are bool, float64, int64, string, and datetime64[ns],which are\n either inferred from the the Pandas Series or is set via the dtype parameter. \n \n Series of datetime are converted to Arkouda arrays of dtype int64 (date in milliseconds)\n \"\"\" \n if not dtype: \n dt = series.dtype.name\n else:\n dt = str(dtype)\n try:\n n_array = series.to_numpy(dtype=series_dtypes[dt])\n except KeyError:\n raise ValueError(('dtype {} is unsupported. Supported dtypes are bool, ' +\n 'float64, int64, string, and datetime64[ns]').format(dt))\n return array(n_array)\n\ndef array(a : Union[pdarray,np.ndarray, Iterable]) -> Union[pdarray, Strings]:\n \"\"\"\n Convert an iterable to a pdarray or Strings object, sending the corresponding\n data to the arkouda server. \n\n Parameters\n ----------\n a : Union[pdarray, np.ndarray]\n Rank-1 array of a supported dtype\n\n Returns\n -------\n pdarray or Strings\n A pdarray instance stored on arkouda server or Strings instance, which\n is composed of two pdarrays stored on arkouda server\n \n Raises\n ------\n TypeError\n Raised if a is not a pdarray, np.ndarray, or Python Iterable such as a\n list, array, tuple, or deque\n RuntimeError\n If a is not one-dimensional, nbytes > maxTransferBytes, a.dtype is\n not supported (not in DTypes), or if the product of a size and\n a.itemsize > maxTransferBytes\n\n See Also\n --------\n pdarray.to_ndarray\n\n Notes\n -----\n The number of bytes in the input array cannot exceed `arkouda.maxTransferBytes`,\n otherwise a RuntimeError will be raised. This is to protect the user\n from overwhelming the connection between the Python client and the arkouda\n server, under the assumption that it is a low-bandwidth connection. The user\n may override this limit by setting ak.maxTransferBytes to a larger value, \n but should proceed with caution.\n \n If the pdrray or ndarray is of type U, this method is called twice recursively \n to create the Strings object and the two corresponding pdarrays for string \n bytes and offsets, respectively.\n\n Examples\n --------\n >>> a = [3, 5, 7]\n >>> b = ak.array(a)\n >>> b\n array([3, 5, 7])\n \n >>> type(b)\n arkouda.pdarray \n \"\"\"\n # If a is already a pdarray, do nothing\n if isinstance(a, pdarray):\n return a\n from arkouda.client import maxTransferBytes\n # If a is not already a numpy.ndarray, convert it\n if not isinstance(a, np.ndarray):\n try:\n a = np.array(a)\n except:\n raise TypeError(('a must be a pdarray, np.ndarray, or convertible to' +\n ' a numpy array'))\n # Only rank 1 arrays currently supported\n if a.ndim != 1:\n raise RuntimeError(\"Only rank-1 pdarrays or ndarrays supported\")\n # Check if array of strings\n if a.dtype.kind == 'U' or 'U' in a.dtype.kind:\n encoded = np.array([elem.encode() for elem in a])\n # Length of each string, plus null byte terminator\n lengths = np.array([len(elem) for elem in encoded]) + 1\n # Compute zero-up segment offsets\n offsets = np.cumsum(lengths) - lengths\n # Allocate and fill bytes array with string segments\n nbytes = offsets[-1] + lengths[-1]\n if nbytes > maxTransferBytes:\n raise RuntimeError((\"Creating pdarray would require transferring {} bytes,\" +\n \" which exceeds allowed transfer size. Increase \" +\n \"ak.maxTransferBytes to force.\").format(nbytes))\n values = np.zeros(nbytes, dtype=np.uint8)\n for s, o in zip(encoded, offsets):\n for i, b in enumerate(s):\n values[o+i] = b\n # Recurse to create pdarrays for offsets and values, then return Strings object\n return Strings(array(offsets), array(values))\n # If not strings, then check that dtype is supported in arkouda\n if a.dtype.name not in DTypes:\n raise RuntimeError(\"Unhandled dtype {}\".format(a.dtype))\n # Do not allow arrays that are too large\n size = a.size\n if (size * a.itemsize) > maxTransferBytes:\n raise RuntimeError((\"Array exceeds allowed transfer size. Increase \" +\n \"ak.maxTransferBytes to allow\"))\n # Pack binary array data into a bytes object with a command header\n # including the dtype and size\n fmt = \">{:n}{}\".format(size, structDtypeCodes[a.dtype.name])\n req_msg = \"array {} {:n} \".\\\n format(a.dtype.name, size).encode() + struct.pack(fmt, *a)\n repMsg = generic_msg(req_msg, send_bytes=True)\n return create_pdarray(cast(str,repMsg))\n\ndef zeros(size : int, dtype : type=np.float64) -> pdarray:\n \"\"\"\n Create a pdarray filled with zeros.\n\n Parameters\n ----------\n size : int\n Size of the array (only rank-1 arrays supported)\n dtype : {float64, int64, bool}\n Type of resulting array, default float64\n\n Returns\n -------\n pdarray\n Zeros of the requested size and dtype\n \n Raises\n ------\n TypeError\n Raised if the supplied dtype is not supported or if the size\n parameter is neither an int nor a str that is parseable to an int.\n\n See Also\n --------\n ones, zeros_like\n\n Examples\n --------\n >>> ak.zeros(5, dtype=ak.int64)\n array([0, 0, 0, 0, 0])\n\n >>> ak.zeros(5, dtype=ak.float64)\n array([0, 0, 0, 0, 0])\n\n >>> ak.zeros(5, dtype=ak.bool)\n array([False, False, False, False, False])\n \"\"\"\n if not np.isscalar(size):\n raise TypeError(\"size must be a scalar, not {}\".\\\n format(size.__class__.__name__))\n dtype = akdtype(dtype) # normalize dtype\n # check dtype for error\n if cast(np.dtype,dtype).name not in numericDTypes:\n raise TypeError(\"unsupported dtype {}\".format(dtype))\n kind, itemsize = translate_np_dtype(dtype)\n repMsg = generic_msg(\"create {} {}\".format(cast(np.dtype,dtype).name, size))\n return create_pdarray(cast(str, repMsg))\n\ndef ones(size : int, dtype : type=float64) -> pdarray:\n \"\"\"\n Create a pdarray filled with ones.\n\n Parameters\n ----------\n size : int\n Size of the array (only rank-1 arrays supported)\n dtype : {float64, int64, bool}\n Resulting array type, default float64\n\n Returns\n -------\n pdarray\n Ones of the requested size and dtype\n \n Raises\n ------\n TypeError\n Raised if the supplied dtype is not supported or if the size\n parameter is neither an int nor a str that is parseable to an int.\n\n See Also\n --------\n zeros, ones_like\n\n Examples\n --------\n >>> ak.ones(5, dtype=ak.int64)\n array([1, 1, 1, 1, 1])\n\n >>> ak.ones(5, dtype=ak.float64)\n array([1, 1, 1, 1, 1])\n\n >>> ak.ones(5, dtype=ak.bool)\n array([True, True, True, True, True])\n \"\"\"\n if not np.isscalar(size):\n raise TypeError(\"size must be a scalar, not {}\".\\\n format(size.__class__.__name__))\n dtype = akdtype(dtype) # normalize dtype\n # check dtype for error\n if cast(np.dtype,dtype).name not in numericDTypes:\n raise TypeError(\"unsupported dtype {}\".format(dtype))\n kind, itemsize = translate_np_dtype(dtype)\n repMsg = generic_msg(\"create {} {}\".format(cast(np.dtype,dtype).name, size))\n a = create_pdarray(cast(str,repMsg))\n a.fill(1)\n return a\n\n@typechecked\ndef zeros_like(pda : pdarray) -> pdarray:\n \"\"\"\n Create a zero-filled pdarray of the same size and dtype as an existing \n pdarray.\n\n Parameters\n ----------\n pda : pdarray\n Array to use for size and dtype\n\n Returns\n -------\n pdarray\n Equivalent to ak.zeros(pda.size, pda.dtype)\n \n Raises\n ------\n TypeError\n Raised if the pda parameter is not a pdarray.\n\n See Also\n --------\n zeros, ones_like\n\n Examples\n --------\n >>> zeros = ak.zeros(5, dtype=ak.int64)\n >>> ak.zeros_like(zeros)\n array([0, 0, 0, 0, 0])\n\n >>> zeros = ak.zeros(5, dtype=ak.float64)\n >>> ak.zeros_like(zeros)\n array([0, 0, 0, 0, 0])\n\n >>> zeros = ak.zeros(5, dtype=ak.bool)\n >>> ak.zeros_like(zeros)\n array([False, False, False, False, False])\n \"\"\"\n return zeros(pda.size, pda.dtype)\n\n@typechecked\ndef ones_like(pda : pdarray) -> pdarray:\n \"\"\"\n Create a one-filled pdarray of the same size and dtype as an existing \n pdarray.\n\n Parameters\n ----------\n pda : pdarray\n Array to use for size and dtype\n\n Returns\n -------\n pdarray\n Equivalent to ak.ones(pda.size, pda.dtype)\n \n Raises\n ------\n TypeError\n Raised if the pda parameter is not a pdarray.\n\n See Also\n --------\n ones, zeros_like\n \n Notes\n -----\n Logic for generating the pdarray is delegated to the ak.ones method.\n Accordingly, the supported dtypes match are defined by the ak.ones method.\n \n Examples\n --------\n >>> ones = ak.ones(5, dtype=ak.int64)\n >>> ak.ones_like(ones)\n array([1, 1, 1, 1, 1])\n\n >>> ones = ak.ones(5, dtype=ak.float64)\n >>> ak.ones_like(ones)\n array([1, 1, 1, 1, 1])\n\n >>> ones = ak.ones(5, dtype=ak.bool)\n >>> ak.ones_like(ones)\n array([True, True, True, True, True])\n \"\"\"\n return ones(pda.size, pda.dtype)\n\ndef arange(*args) -> pdarray:\n \"\"\"\n arange([start,] stop[, stride])\n\n Create a pdarray of consecutive integers within the interval [start, stop).\n If only one arg is given then arg is the stop parameter. If two args are given\n then the first arg is start and second is stop. If three args are given\n then the first arg is start, second is stop, third is stride.\n\n Parameters\n ----------\n start : int, optional\n Starting value (inclusive), the default starting value is 0\n stop : int\n Stopping value (exclusive)\n stride : int, optional\n The difference between consecutive elements, the default stride is 1,\n if stride is specified then start must also be specified. \n\n Returns\n -------\n pdarray, int64\n Integers from start (inclusive) to stop (exclusive) by stride\n \n Raises\n ------\n TypeError\n Raised if start, stop, or stride is not an int object\n ZeroDivisionError\n Raised if stride == 0\n\n See Also\n --------\n linspace, zeros, ones, randint\n \n Notes\n -----\n Negative strides result in decreasing values. Currently, only int64 pdarrays\n can be created with this function. For float64 arrays, use linspace.\n\n Examples\n --------\n >>> ak.arange(0, 5, 1)\n array([0, 1, 2, 3, 4])\n\n >>> ak.arange(5, 0, -1)\n array([5, 4, 3, 2, 1])\n\n >>> ak.arange(0, 10, 2)\n array([0, 2, 4, 6, 8])\n \"\"\"\n \n #if one arg is given then arg is stop\n if len(args) == 1:\n start = 0\n stop = args[0]\n stride = 1\n\n #if two args are given then first arg is start and second is stop\n if len(args) == 2:\n start = args[0]\n stop = args[1]\n stride = 1\n\n #if three args are given then first arg is start,\n #second is stop, third is stride\n if len(args) == 3:\n start = args[0]\n stop = args[1]\n stride = args[2]\n\n if not all((np.isscalar(start), np.isscalar(stop), np.isscalar(stride))):\n raise TypeError(\"all arguments must be scalars\")\n\n if stride == 0:\n raise ZeroDivisionError(\"division by zero\")\n\n if isinstance(start, int) and isinstance(stop, int) and isinstance(stride, int):\n # TO DO: fix bug in server that goes 2 steps too far for negative strides\n if stride < 0:\n stop = stop + 2\n repMsg = generic_msg(\"arange {} {} {}\".format(start, stop, stride))\n return create_pdarray(cast(str,repMsg))\n else:\n raise TypeError(\"start,stop,stride must be type int {} {} {}\".\\\n format(start,stop,stride))\n\ndef linspace(start : int, stop : int, length : int) -> pdarray:\n \"\"\"\n Create a pdarray of linearly-spaced floats in a closed interval.\n\n Parameters\n ----------\n start : scalar\n Start of interval (inclusive)\n stop : scalar\n End of interval (inclusive)\n length : int\n Number of points\n\n Returns\n -------\n pdarray, float64\n Array of evenly spaced float values along the interval\n \n Raises\n ------\n TypeError\n Raised if start or stop is not a scalar or if length is not int\n\n See Also\n --------\n arange\n \n Notes\n -----\n If that start is greater than stop, the pdarray values are generated in \n descending order.\n\n Examples\n --------\n >>> ak.linspace(0, 1, 5)\n array([0, 0.25, 0.5, 0.75, 1])\n\n >>> ak.linspace(start=1, stop=0, length=5)\n array([1, 0.75, 0.5, 0.25, 0])\n\n >>> ak.linspace(start=-5, stop=0, length=5)\n array([-5, -3.75, -2.5, -1.25, 0])\n \"\"\"\n if not all((np.isscalar(start), np.isscalar(stop), np.isscalar(length))):\n raise TypeError(\"all arguments must be scalars\")\n\n starttype = resolve_scalar_dtype(start)\n\n try: \n startstr = NUMBER_FORMAT_STRINGS[starttype].format(start)\n except KeyError as ke:\n raise TypeError(('The start parameter must be an int or a scalar that' +\n ' can be parsed to an int, but is a {}'.format(ke)))\n stoptype = resolve_scalar_dtype(stop)\n\n try: \n stopstr = NUMBER_FORMAT_STRINGS[stoptype].format(stop)\n except KeyError as ke:\n raise TypeError(('The stop parameter must be an int or a scalar that' +\n ' can be parsed to an int, but is a {}'.format(ke)))\n\n lentype = resolve_scalar_dtype(length)\n if lentype != 'int64':\n raise TypeError(\"The length parameter must be an int64\")\n\n try: \n lenstr = NUMBER_FORMAT_STRINGS[lentype].format(length)\n except KeyError as ke:\n raise TypeError(('The length parameter must be an int or a scalar that' +\n ' can be parsed to an int, but is a {}'.format(ke)))\n\n repMsg = generic_msg(\"linspace {} {} {}\".format(startstr, stopstr, lenstr))\n return create_pdarray(cast(str,repMsg))\n\ndef randint(low : Union[int,float], high : Union[int,float], size : int, dtype=int64, seed : Union[None, int]=None) -> pdarray:\n \"\"\"\n Generate a pdarray of randomized int, float, or bool values in a specified range.\n\n Parameters\n ----------\n low : Union[int,float]\n The low value (inclusive) of the range\n high : Union[int,float]\n The high value (exclusive for int, inclusive for float) of the range\n size : int\n The length of the returned array\n dtype : {int64, float64, bool}\n The dtype of the array\n\n Returns\n -------\n pdarray\n Values drawn uniformly from the specified range having the desired dtype\n \n Raises\n ------\n TypeError\n Raised if dtype.name not in DTypes, size is not an int, low or if \n not a scalar\n ValueError\n Raised if size < 0 or if high < low\n\n Notes\n -----\n Calling randint with dtype=float64 will result in uniform non-integral\n floating point values.\n\n Examples\n --------\n >>> ak.randint(0, 10, 5)\n array([5, 7, 4, 8, 3])\n\n >>> ak.randint(0, 1, 3, dtype=ak.float64)\n array([0.92176432277231968, 0.083130710959903542, 0.68894208386667544])\n\n >>> ak.randint(0, 1, 5, dtype=ak.bool)\n array([True, False, True, True, True])\n \"\"\"\n if not all((np.isscalar(low), np.isscalar(high), np.isscalar(size))):\n raise TypeError(\"all arguments must be scalars\")\n if resolve_scalar_dtype(size) != 'int64':\n raise TypeError(\"The size parameter must be an integer\")\n if resolve_scalar_dtype(low) not in RANDINT_TYPES:\n raise TypeError(\"The low parameter must be an integer or float\")\n if resolve_scalar_dtype(high) not in RANDINT_TYPES:\n raise TypeError(\"The high parameter must be an integer or float\")\n if size < 0 or high < low:\n raise ValueError(\"size must be > 0 and high > low\")\n dtype = akdtype(dtype) # normalize dtype\n # check dtype for error\n if dtype.name not in DTypes:\n raise TypeError(\"unsupported dtype {}\".format(dtype))\n lowstr = NUMBER_FORMAT_STRINGS[dtype.name].format(low)\n highstr = NUMBER_FORMAT_STRINGS[dtype.name].format(high)\n sizestr = NUMBER_FORMAT_STRINGS['int64'].format(size)\n repMsg = generic_msg(\"randint {} {} {} {} {}\".\\\n format(sizestr, dtype.name, lowstr, highstr, seed))\n return create_pdarray(repMsg)\n\n@typechecked\ndef uniform(size : int, low : float=0.0, high : float=1.0, seed: Union[None, int]=None) -> pdarray:\n \"\"\"\n Generate a pdarray with uniformly distributed random values \n in a specified range.\n\n Parameters\n ----------\n low : float\n The low value (inclusive) of the range\n high : float\n The high value (inclusive) of the range\n size : int\n The length of the returned array\n\n Returns\n -------\n pdarray, float64\n Values drawn uniformly from the specified range\n\n Raises\n ------\n TypeError\n Raised if dtype.name not in DTypes, size is not an int, or if\n either low or high is not an int or float\n ValueError\n Raised if size < 0 or if high < low\n\n Examples\n --------\n >>> ak.uniform(3)\n array([0.92176432277231968, 0.083130710959903542, 0.68894208386667544])\n \"\"\"\n return randint(low=low, high=high, size=size, dtype='float64', seed=seed)\n\n \n@typechecked\ndef standard_normal(size : int, seed : Union[None, int]=None) -> pdarray:\n \"\"\"\n Draw real numbers from the standard normal distribution.\n\n Parameters\n ----------\n size : int\n The number of samples to draw (size of the returned array)\n \n Returns\n -------\n pdarray, float64\n The array of random numbers\n \n Raises\n ------\n TypeError\n Raised if size is not an int\n ValueError\n Raised if size < 0\n\n See Also\n --------\n randint\n\n Notes\n -----\n For random samples from :math:`N(\\mu, \\sigma^2)`, use:\n\n ``(sigma * standard_normal(size)) + mu``\n \"\"\"\n if size < 0:\n raise ValueError(\"The size parameter must be > 0\")\n msg = \"randomNormal {} {}\".format(NUMBER_FORMAT_STRINGS['int64'].format(size), seed)\n repMsg = generic_msg(msg)\n return create_pdarray(cast(str,repMsg))\n\n@typechecked\ndef random_strings_uniform(minlen : int, maxlen : int, size : int, \n characters : str='uppercase', seed : Union[None, int]=None) -> Strings:\n \"\"\"\n Generate random strings with lengths uniformly distributed between \n minlen and maxlen, and with characters drawn from a specified set.\n\n Parameters\n ----------\n minlen : int\n The minimum allowed length of string\n maxlen : int\n The maximum allowed length of string\n size : int\n The number of strings to generate\n characters : (uppercase, lowercase, numeric, printable, binary)\n The set of characters to draw from\n\n Returns\n -------\n Strings\n The array of random strings\n \n Raises\n ------\n ValueError\n Raised if minlen < 0, maxlen < minlen, or size < 0\n\n See Also\n --------\n random_strings_lognormal, randint\n \"\"\"\n if minlen < 0 or maxlen < minlen or size < 0:\n raise ValueError((\"Incompatible arguments: minlen < 0, maxlen < minlen, \" +\n \"or size < 0\"))\n msg = \"randomStrings {} {} {} {} {} {}\".\\\n format(NUMBER_FORMAT_STRINGS['int64'].format(size),\n \"uniform\", characters,\n NUMBER_FORMAT_STRINGS['int64'].format(minlen),\n NUMBER_FORMAT_STRINGS['int64'].format(maxlen),\n seed)\n repMsg = generic_msg(msg)\n return Strings(*(cast(str,repMsg).split('+')))\n\n@typechecked\ndef random_strings_lognormal(logmean : Union[float, int], logstd : Union[float, int], \n size : int, characters : str='uppercase',\n seed : Union[None, int]=None) -> Strings:\n \"\"\"\n Generate random strings with log-normally distributed lengths and \n with characters drawn from a specified set.\n\n Parameters\n ----------\n logmean : Union[float, int]\n The log-mean of the length distribution\n logstd : float\n The log-standard-deviation of the length distribution\n size : int\n The number of strings to generate\n characters : (uppercase, lowercase, numeric, printable, binary)\n The set of characters to draw from\n\n Returns\n -------\n Strings\n The Strings object encapsulating a pdarray of random strings\n \n Raises\n ------\n TypeError\n Raised if logmean is neither a float nor a int, logstd is not a float, \n size is not an int, or if characters is not a str\n ValueError\n Raised if logstd <= 0 or size < 0\n\n See Also\n --------\n random_strings_lognormal, randint\n\n Notes\n -----\n The lengths of the generated strings are distributed $Lognormal(\\mu, \\sigma^2)$,\n with :math:`\\mu = logmean` and :math:`\\sigma = logstd`. Thus, the strings will\n have an average length of :math:`exp(\\mu + 0.5*\\sigma^2)`, a minimum length of \n zero, and a heavy tail towards longer strings.\n \"\"\"\n if logstd <= 0 or size < 0:\n raise ValueError(\"Incompatible arguments: logstd <= 0 or size < 0\")\n msg = \"randomStrings {} {} {} {} {} {}\".\\\n format(NUMBER_FORMAT_STRINGS['int64'].format(size),\n \"lognormal\", characters,\n NUMBER_FORMAT_STRINGS['float64'].format(logmean),\n NUMBER_FORMAT_STRINGS['float64'].format(logstd),\n seed)\n repMsg = generic_msg(msg)\n return Strings(*(cast(str,repMsg).split('+')))\n\n\n\n#@typechecked\n@dispatch(Strings) \ndef suffix_array( strings : Strings) -> SArrays:\n \"\"\"\n Return the suffix arrays of given strings. The size/shape of each suffix\n\tarrays is the same as the corresponding strings. \n\tA simple example of suffix array is as follow. Given a string \"banana$\",\n\tall the suffixes are as follows. \n\ts[0]=\"banana$\"\n\ts[1]=\"anana$\"\n\ts[2]=\"nana$\"\n\ts[3]=\"ana$\"\n\ts[4]=\"na$\"\n\ts[5]=\"a$\"\n\ts[6]=\"$\"\n\tThe suffix array of string \"banana$\" is the array of indices of sorted suffixes.\n\ts[6]=\"$\"\n\ts[5]=\"a$\"\n\ts[3]=\"ana$\"\n\ts[1]=\"anana$\"\n\ts[0]=\"banana$\"\n\ts[4]=\"na$\"\n\ts[2]=\"nana$\"\n\tso sa=[6,5,3,1,0,4,2]\n\n Returns\n -------\n pdarray\n The suffix arrays of the given strings\n\n See Also\n --------\n\n Notes\n -----\n \n Raises\n ------ \n RuntimeError\n Raised if there is a server-side error in executing group request or\n creating the pdarray encapsulating the return message\n \"\"\"\n msg = \"segmentedSuffixAry {} {} {}\".format( strings.objtype,\n strings.offsets.name,\n strings.bytes.name) \n repMsg = generic_msg(msg)\n pdarrays= SArrays(*(repMsg.split('+')))\n return pdarrays\n@dispatch(str) \ndef suffix_array(filename: str) -> SArrays:\n \"\"\"\n This function is major used for testing correctness and performance\n Return the suffix array of given file name's content as a string. \n\tA simple example of suffix array is as follow. Given string \"banana$\",\n\tall the suffixes are as follows. \n\ts[0]=\"banana$\"\n\ts[1]=\"anana$\"\n\ts[2]=\"nana$\"\n\ts[3]=\"ana$\"\n\ts[4]=\"na$\"\n\ts[5]=\"a$\"\n\ts[6]=\"$\"\n\tThe suffix array of string \"banana$\" is the array of indices of sorted suffixes.\n\ts[6]=\"$\"\n\ts[5]=\"a$\"\n\ts[3]=\"ana$\"\n\ts[1]=\"anana$\"\n\ts[0]=\"banana$\"\n\ts[4]=\"na$\"\n\ts[2]=\"nana$\"\n\tso sa=[6,5,3,1,0,4,2]\n\n Returns\n -------\n pdarray\n The suffix arrays of the given strings\n\n See Also\n --------\n\n Notes\n -----\n \n Raises\n ------ \n RuntimeError\n Raised if there is a server-side error in executing group request or\n creating the pdarray encapsulating the return message\n \"\"\"\n msg = \"segmentedSAFile {}\".format( filename )\n repMsg = generic_msg(msg)\n pdarrays= SArrays(*(repMsg.split('+')))\n return pdarrays\n"
] |
[
[
"numpy.array",
"numpy.allclose",
"numpy.sort",
"numpy.unique"
],
[
"numpy.array",
"numpy.zeros",
"numpy.cumsum",
"numpy.isscalar"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
igncp/leaves
|
[
"158f776a07f2a2f7f862f00976889abf0bb30fc9"
] |
[
"src/languages/python/test-formatted/Pandas/series-test.py"
] |
[
"import unittest\nimport pandas as pd\nimport numpy as np\nfrom pandas import Series\n\n# Using parts of Python for Data Analysis - Wes McKinney\n\nser1 = Series([1, 4, 6, 8])\nser2 = Series([1, 6, -1, 2, None], index=['g', 'r', 'a', 'z', 'u'])\n\n\nclass SeriesTestCase(unittest.TestCase):\n def using_series_test(self):\n self.assertIsInstance(ser1.values, np.ndarray)\n self.assertEquals(len(ser1[ser1 < 0]), 0)\n self.assertEquals(len(ser1[ser1 > 0]), 4)\n self.assertTrue('g' in ser2)\n self.assertFalse('f' in ser2)\n self.assertFalse('f' in ser2)\n self.assertTrue(pd.notnull(ser2)['g'])\n self.assertFalse(pd.notnull(ser2)['u'])\n"
] |
[
[
"pandas.notnull",
"pandas.Series"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
FISHackathon2020/RAN
|
[
"cb5e1459f4d26bd619ba7244979fce277b44aba9",
"cb5e1459f4d26bd619ba7244979fce277b44aba9"
] |
[
"python/envs/hackathon/lib/python3.7/site-packages/gensim/models/deprecated/word2vec.py",
"python/hackathon/src/google_word_similarity/trainer.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\n\"\"\"\nWarnings\n--------\n.. deprecated:: 3.3.0\n Use :mod:`gensim.models.word2vec` instead.\n\n\nProduce word vectors with deep learning via word2vec's \"skip-gram and CBOW models\", using either\nhierarchical softmax or negative sampling [1]_ [2]_.\n\nNOTE: There are more ways to get word vectors in Gensim than just Word2Vec.\nSee wrappers for FastText, VarEmbed and WordRank.\n\nThe training algorithms were originally ported from the C package https://code.google.com/p/word2vec/\nand extended with additional functionality.\n\nFor a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews,\nvisit http://radimrehurek.com/2014/02/word2vec-tutorial/\n\n**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**\n(70x speedup compared to plain NumPy implementation [3]_).\n\nInitialize a model with e.g.:\n\n.. sourcecode:: pycon\n\n >>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)\n\nPersist a model to disk with:\n\n.. sourcecode:: pycon\n\n >>> model.save(fname)\n >>> model = Word2Vec.load(fname) # you can continue training with the loaded model!\n\nThe word vectors are stored in a KeyedVectors instance in model.wv.\nThis separates the read-only word vector lookup operations in KeyedVectors from the training code in Word2Vec:\n\n.. sourcecode:: pycon\n\n >>> model.wv['computer'] # numpy vector of a word\n array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)\n\nThe word vectors can also be instantiated from an existing file on disk in the word2vec C format\nas a KeyedVectors instance::\n\n NOTE: It is impossible to continue training the vectors loaded from the C format because hidden weights,\n vocabulary frequency and the binary tree is missing:\n\n .. sourcecode:: pycon\n\n >>> from gensim.models.keyedvectors import KeyedVectors\n >>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format\n >>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format\n\n\nYou can perform various NLP word tasks with the model. Some of them\nare already built-in:\n\n.. sourcecode:: pycon\n\n >>> model.wv.most_similar(positive=['woman', 'king'], negative=['man'])\n [('queen', 0.50882536), ...]\n\n >>> model.wv.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])\n [('queen', 0.71382287), ...]\n\n >>> model.wv.doesnt_match(\"breakfast cereal dinner lunch\".split())\n 'cereal'\n\n >>> model.wv.similarity('woman', 'man')\n 0.73723527\n\nProbability of a text under the model:\n\n.. sourcecode:: pycon\n\n >>> model.score([\"The fox jumped over a lazy dog\".split()])\n 0.2158356\n\nCorrelation with human opinion on word similarity:\n\n.. sourcecode:: pycon\n\n >>> model.wv.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))\n 0.51, 0.62, 0.13\n\nAnd on analogies:\n\n.. sourcecode:: pycon\n\n >>> model.wv.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))\n\nand so on.\n\nIf you're finished training a model (i.e. no more updates, only querying),\nthen switch to the :mod:`gensim.models.KeyedVectors` instance in wv\n\n.. sourcecode:: pycon\n\n >>> word_vectors = model.wv\n >>> del model\n\nto trim unneeded model memory = use much less RAM.\n\nNote that there is a :mod:`gensim.models.phrases` module which lets you automatically\ndetect phrases longer than one word. Using phrases, you can learn a word2vec model\nwhere \"words\" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:\n\n.. sourcecode:: pycon\n\n >>> bigram_transformer = gensim.models.Phrases(sentences)\n >>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)\n\n.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.\n Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.\n.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.\n Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.\n.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/\n\"\"\"\nfrom __future__ import division # py3 \"true division\"\n\nimport logging\nimport sys\nimport os\nimport heapq\nfrom timeit import default_timer\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport threading\nimport itertools\nimport warnings\n\nfrom gensim.utils import keep_vocab_item, call_on_class_only\nfrom gensim.models.deprecated.keyedvectors import KeyedVectors, Vocab\nfrom gensim.models.word2vec import Word2Vec as NewWord2Vec\nfrom gensim.models.deprecated.old_saveload import SaveLoad\n\ntry:\n from queue import Queue, Empty\nexcept ImportError:\n from Queue import Queue, Empty\n\nfrom numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\\\n uint32, seterr, array, uint8, vstack, fromstring, sqrt,\\\n empty, sum as np_sum, ones, logaddexp\n\nfrom scipy.special import expit\n\nfrom gensim import utils\nfrom gensim import matutils # utility fnc for pickling, common scipy operations etc\nfrom six import iteritems, itervalues, string_types\nfrom six.moves import range\nfrom types import GeneratorType\n\nlogger = logging.getLogger(__name__)\n\n\n# failed... fall back to plain numpy (20-80x slower training than the above)\nFAST_VERSION = -1\nMAX_WORDS_IN_BATCH = 10000\n\n\ndef load_old_word2vec(*args, **kwargs):\n old_model = Word2Vec.load(*args, **kwargs)\n vector_size = getattr(old_model, 'vector_size', old_model.layer1_size)\n params = {\n 'size': vector_size,\n 'alpha': old_model.alpha,\n 'window': old_model.window,\n 'min_count': old_model.min_count,\n 'max_vocab_size': old_model.__dict__.get('max_vocab_size', None),\n 'sample': old_model.__dict__.get('sample', 1e-3),\n 'seed': old_model.seed,\n 'workers': old_model.workers,\n 'min_alpha': old_model.min_alpha,\n 'sg': old_model.sg,\n 'hs': old_model.hs,\n 'negative': old_model.negative,\n 'cbow_mean': old_model.cbow_mean,\n 'hashfxn': old_model.__dict__.get('hashfxn', hash),\n 'iter': old_model.__dict__.get('iter', 5),\n 'null_word': old_model.__dict__.get('null_word', 0),\n 'sorted_vocab': old_model.__dict__.get('sorted_vocab', 1),\n 'batch_words': old_model.__dict__.get('batch_words', MAX_WORDS_IN_BATCH),\n 'compute_loss': old_model.__dict__.get('compute_loss', None)\n }\n new_model = NewWord2Vec(**params)\n # set trainables attributes\n new_model.wv.vectors = old_model.wv.syn0\n if hasattr(old_model.wv, 'syn0norm'):\n new_model.wv.vectors_norm = old_model.wv.syn0norm\n if hasattr(old_model, 'syn1'):\n new_model.trainables.syn1 = old_model.syn1\n if hasattr(old_model, 'syn1neg'):\n new_model.trainables.syn1neg = old_model.syn1neg\n if hasattr(old_model, 'syn0_lockf'):\n new_model.trainables.vectors_lockf = old_model.syn0_lockf\n # set vocabulary attributes\n new_model.wv.vocab = old_model.wv.vocab\n new_model.wv.index2word = old_model.wv.index2word\n new_model.vocabulary.cum_table = old_model.__dict__.get('cum_table', None)\n\n new_model.train_count = old_model.__dict__.get('train_count', None)\n new_model.corpus_count = old_model.__dict__.get('corpus_count', None)\n new_model.corpus_total_words = old_model.__dict__.get('corpus_total_words', None)\n new_model.running_training_loss = old_model.__dict__.get('running_training_loss', 0)\n new_model.total_train_time = old_model.__dict__.get('total_train_time', None)\n new_model.min_alpha_yet_reached = old_model.__dict__.get('min_alpha_yet_reached', old_model.alpha)\n new_model.model_trimmed_post_training = old_model.__dict__.get('model_trimmed_post_training', None)\n\n return new_model\n\n\ndef train_batch_sg(model, sentences, alpha, work=None, compute_loss=False):\n \"\"\"\n Update skip-gram model by training on a sequence of sentences.\n\n Each sentence is a list of string tokens, which are looked up in the model's\n vocab dictionary. Called internally from `Word2Vec.train()`.\n\n This is the non-optimized, Python version. If you have cython installed, gensim\n will use the optimized version from word2vec_inner instead.\n\n \"\"\"\n result = 0\n for sentence in sentences:\n word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab\n and model.wv.vocab[w].sample_int > model.random.rand() * 2**32]\n for pos, word in enumerate(word_vocabs):\n reduced_window = model.random.randint(model.window) # `b` in the original word2vec code\n\n # now go over all words from the (reduced) window, predicting each one in turn\n start = max(0, pos - model.window + reduced_window)\n for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):\n # don't train on the `word` itself\n if pos2 != pos:\n train_sg_pair(\n model, model.wv.index2word[word.index], word2.index, alpha, compute_loss=compute_loss\n )\n\n result += len(word_vocabs)\n return result\n\n\ndef train_batch_cbow(model, sentences, alpha, work=None, neu1=None, compute_loss=False):\n \"\"\"\n Update CBOW model by training on a sequence of sentences.\n\n Each sentence is a list of string tokens, which are looked up in the model's\n vocab dictionary. Called internally from `Word2Vec.train()`.\n\n This is the non-optimized, Python version. If you have cython installed, gensim\n will use the optimized version from word2vec_inner instead.\n\n \"\"\"\n result = 0\n for sentence in sentences:\n word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab\n and model.wv.vocab[w].sample_int > model.random.rand() * 2**32]\n for pos, word in enumerate(word_vocabs):\n reduced_window = model.random.randint(model.window) # `b` in the original word2vec code\n start = max(0, pos - model.window + reduced_window)\n window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)\n word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]\n l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x vector_size\n if word2_indices and model.cbow_mean:\n l1 /= len(word2_indices)\n train_cbow_pair(model, word, word2_indices, l1, alpha, compute_loss=compute_loss)\n result += len(word_vocabs)\n return result\n\n\ndef score_sentence_sg(model, sentence, work=None):\n \"\"\"\n Obtain likelihood score for a single sentence in a fitted skip-gram representaion.\n\n The sentence is a list of Vocab objects (or None, when the corresponding\n word is not in the vocabulary). Called internally from `Word2Vec.score()`.\n\n This is the non-optimized, Python version. If you have cython installed, gensim\n will use the optimized version from word2vec_inner instead.\n\n \"\"\"\n log_prob_sentence = 0.0\n if model.negative:\n raise RuntimeError(\"scoring is only available for HS=True\")\n\n word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]\n for pos, word in enumerate(word_vocabs):\n if word is None:\n continue # OOV word in the input sentence => skip\n\n # now go over all words from the window, predicting each one in turn\n start = max(0, pos - model.window)\n for pos2, word2 in enumerate(word_vocabs[start: pos + model.window + 1], start):\n # don't train on OOV words and on the `word` itself\n if word2 is not None and pos2 != pos:\n log_prob_sentence += score_sg_pair(model, word, word2)\n\n return log_prob_sentence\n\n\ndef score_sentence_cbow(model, sentence, work=None, neu1=None):\n \"\"\"\n Obtain likelihood score for a single sentence in a fitted CBOW representaion.\n\n The sentence is a list of Vocab objects (or None, where the corresponding\n word is not in the vocabulary. Called internally from `Word2Vec.score()`.\n\n This is the non-optimized, Python version. If you have cython installed, gensim\n will use the optimized version from word2vec_inner instead.\n\n \"\"\"\n log_prob_sentence = 0.0\n if model.negative:\n raise RuntimeError(\"scoring is only available for HS=True\")\n\n word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]\n for pos, word in enumerate(word_vocabs):\n if word is None:\n continue # OOV word in the input sentence => skip\n\n start = max(0, pos - model.window)\n window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)\n word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]\n l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size\n if word2_indices and model.cbow_mean:\n l1 /= len(word2_indices)\n log_prob_sentence += score_cbow_pair(model, word, l1)\n\n return log_prob_sentence\n\n\ndef train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,\n context_vectors=None, context_locks=None, compute_loss=False, is_ft=False):\n if context_vectors is None:\n if is_ft:\n context_vectors_vocab = model.wv.syn0_vocab\n context_vectors_ngrams = model.wv.syn0_ngrams\n else:\n context_vectors = model.wv.syn0\n if context_locks is None:\n if is_ft:\n context_locks_vocab = model.syn0_vocab_lockf\n context_locks_ngrams = model.syn0_ngrams_lockf\n else:\n context_locks = model.syn0_lockf\n\n if word not in model.wv.vocab:\n return\n predict_word = model.wv.vocab[word] # target word (NN output)\n\n if is_ft:\n l1_vocab = context_vectors_vocab[context_index[0]]\n l1_ngrams = np_sum(context_vectors_ngrams[context_index[1:]], axis=0)\n if context_index:\n l1 = np_sum([l1_vocab, l1_ngrams], axis=0) / len(context_index)\n else:\n l1 = context_vectors[context_index] # input word (NN input/projection layer)\n lock_factor = context_locks[context_index]\n\n neu1e = zeros(l1.shape)\n\n if model.hs:\n # work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)\n l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size\n prod_term = dot(l1, l2a.T)\n fa = expit(prod_term) # propagate hidden -> output\n ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate\n if learn_hidden:\n model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output\n neu1e += dot(ga, l2a) # save error\n\n # loss component corresponding to hierarchical softmax\n if compute_loss:\n sgn = (-1.0)**predict_word.code # `ch` function, 0 -> 1, 1 -> -1\n lprob = -log(expit(-sgn * prod_term))\n model.running_training_loss += sum(lprob)\n\n if model.negative:\n # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)\n word_indices = [predict_word.index]\n while len(word_indices) < model.negative + 1:\n w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))\n if w != predict_word.index:\n word_indices.append(w)\n l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size\n prod_term = dot(l1, l2b.T)\n fb = expit(prod_term) # propagate hidden -> output\n gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate\n if learn_hidden:\n model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output\n neu1e += dot(gb, l2b) # save error\n\n # loss component corresponding to negative sampling\n if compute_loss:\n model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words\n model.running_training_loss -= log(expit(prod_term[0])) # for the output word\n\n if learn_vectors:\n if is_ft:\n model.wv.syn0_vocab[context_index[0]] += neu1e * context_locks_vocab[context_index[0]]\n for i in context_index[1:]:\n model.wv.syn0_ngrams[i] += neu1e * context_locks_ngrams[i]\n else:\n l1 += neu1e * lock_factor # learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)\n return neu1e\n\n\ndef train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True,\n compute_loss=False, context_vectors=None, context_locks=None, is_ft=False):\n if context_vectors is None:\n if is_ft:\n context_vectors_vocab = model.wv.syn0_vocab\n context_vectors_ngrams = model.wv.syn0_ngrams\n else:\n context_vectors = model.wv.syn0\n if context_locks is None:\n if is_ft:\n context_locks_vocab = model.syn0_vocab_lockf\n context_locks_ngrams = model.syn0_ngrams_lockf\n else:\n context_locks = model.syn0_lockf\n\n neu1e = zeros(l1.shape)\n\n if model.hs:\n l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size\n prod_term = dot(l1, l2a.T)\n fa = expit(prod_term) # propagate hidden -> output\n ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate\n if learn_hidden:\n model.syn1[word.point] += outer(ga, l1) # learn hidden -> output\n neu1e += dot(ga, l2a) # save error\n\n # loss component corresponding to hierarchical softmax\n if compute_loss:\n sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1\n model.running_training_loss += sum(-log(expit(-sgn * prod_term)))\n\n if model.negative:\n # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)\n word_indices = [word.index]\n while len(word_indices) < model.negative + 1:\n w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))\n if w != word.index:\n word_indices.append(w)\n l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size\n prod_term = dot(l1, l2b.T)\n fb = expit(prod_term) # propagate hidden -> output\n gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate\n if learn_hidden:\n model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output\n neu1e += dot(gb, l2b) # save error\n\n # loss component corresponding to negative sampling\n if compute_loss:\n model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words\n model.running_training_loss -= log(expit(prod_term[0])) # for the output word\n\n if learn_vectors:\n # learn input -> hidden, here for all words in the window separately\n if is_ft:\n if not model.cbow_mean and input_word_indices:\n neu1e /= (len(input_word_indices[0]) + len(input_word_indices[1]))\n for i in input_word_indices[0]:\n context_vectors_vocab[i] += neu1e * context_locks_vocab[i]\n for i in input_word_indices[1]:\n context_vectors_ngrams[i] += neu1e * context_locks_ngrams[i]\n else:\n if not model.cbow_mean and input_word_indices:\n neu1e /= len(input_word_indices)\n for i in input_word_indices:\n context_vectors[i] += neu1e * context_locks[i]\n\n return neu1e\n\n\ndef score_sg_pair(model, word, word2):\n l1 = model.wv.syn0[word2.index]\n l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size\n sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1\n lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))\n return sum(lprob)\n\n\ndef score_cbow_pair(model, word, l1):\n l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size\n sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1\n lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))\n return sum(lprob)\n\n\nclass Word2Vec(SaveLoad):\n \"\"\"\n Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/\n\n If you're finished training a model (=no more updates, only querying)\n then switch to the :mod:`gensim.models.KeyedVectors` instance in wv\n\n The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format\n compatible with the original word2vec implementation via `wv.save_word2vec_format()`\n and `KeyedVectors.load_word2vec_format()`.\n\n \"\"\"\n\n def __init__(self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,\n max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,\n sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,\n trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH, compute_loss=False):\n \"\"\"\n Initialize the model from an iterable of `sentences`. Each sentence is a\n list of words (unicode strings) that will be used for training.\n\n The `sentences` iterable can be simply a list, but for larger corpora,\n consider an iterable that streams the sentences directly from disk/network.\n See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in\n this module for such examples.\n\n If you don't supply `sentences`, the model is left uninitialized -- use if\n you plan to initialize it in some other way.\n\n `sg` defines the training algorithm. By default (`sg=0`), CBOW is used.\n Otherwise (`sg=1`), skip-gram is employed.\n\n `size` is the dimensionality of the feature vectors.\n\n `window` is the maximum distance between the current and predicted word within a sentence.\n\n `alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses).\n\n `seed` = for the random number generator. Initial vectors for each\n word are seeded with a hash of the concatenation of word + str(seed).\n Note that for a fully deterministically-reproducible run, you must also limit the model to\n a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python\n 3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED\n environment variable to control hash randomization.)\n\n `min_count` = ignore all words with total frequency lower than this.\n\n `max_vocab_size` = limit RAM during vocabulary building; if there are more unique\n words than this, then prune the infrequent ones. Every 10 million word types\n need about 1GB of RAM. Set to `None` for no limit (default).\n\n `sample` = threshold for configuring which higher-frequency words are randomly downsampled;\n default is 1e-3, useful range is (0, 1e-5).\n\n `workers` = use this many worker threads to train the model (=faster training with multicore machines).\n\n `hs` = if 1, hierarchical softmax will be used for model training.\n If set to 0 (default), and `negative` is non-zero, negative sampling will be used.\n\n `negative` = if > 0, negative sampling will be used, the int for negative\n specifies how many \"noise words\" should be drawn (usually between 5-20).\n Default is 5. If set to 0, no negative samping is used.\n\n `cbow_mean` = if 0, use the sum of the context word vectors. If 1 (default), use the mean.\n Only applies when cbow is used.\n\n `hashfxn` = hash function to use to randomly initialize weights, for increased\n training reproducibility. Default is Python's rudimentary built in hash function.\n\n `iter` = number of iterations (epochs) over the corpus. Default is 5.\n\n `trim_rule` = vocabulary trimming rule, specifies whether certain words should remain\n in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).\n Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and\n returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.\n Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part\n of the model.\n\n `sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before\n assigning word indexes.\n\n `batch_words` = target size (in words) for batches of examples passed to worker threads (and\n thus cython routines). Default is 10000. (Larger batches will be passed if individual\n texts are longer than 10000 words, but the standard cython code truncates to that maximum.)\n\n \"\"\"\n\n self.load = call_on_class_only\n\n if FAST_VERSION == -1:\n logger.warning('Slow version of %s is being used', __name__)\n else:\n logger.debug('Fast version of %s is being used', __name__)\n\n self.initialize_word_vectors()\n self.sg = int(sg)\n self.cum_table = None # for negative sampling\n self.vector_size = int(size)\n self.layer1_size = int(size)\n if size % 4 != 0:\n logger.warning(\"consider setting layer size to a multiple of 4 for greater performance\")\n self.alpha = float(alpha)\n self.min_alpha_yet_reached = float(alpha) # To warn user if alpha increases\n self.window = int(window)\n self.max_vocab_size = max_vocab_size\n self.seed = seed\n self.random = random.RandomState(seed)\n self.min_count = min_count\n self.sample = sample\n self.workers = int(workers)\n self.min_alpha = float(min_alpha)\n self.hs = hs\n self.negative = negative\n self.cbow_mean = int(cbow_mean)\n self.hashfxn = hashfxn\n self.iter = iter\n self.null_word = null_word\n self.train_count = 0\n self.total_train_time = 0\n self.sorted_vocab = sorted_vocab\n self.batch_words = batch_words\n self.model_trimmed_post_training = False\n self.compute_loss = compute_loss\n self.running_training_loss = 0\n if sentences is not None:\n if isinstance(sentences, GeneratorType):\n raise TypeError(\"You can't pass a generator as the sentences argument. Try an iterator.\")\n self.build_vocab(sentences, trim_rule=trim_rule)\n self.train(\n sentences, total_examples=self.corpus_count, epochs=self.iter,\n start_alpha=self.alpha, end_alpha=self.min_alpha\n )\n else:\n if trim_rule is not None:\n logger.warning(\n \"The rule, if given, is only used to prune vocabulary during build_vocab() \"\n \"and is not stored as part of the model. Model initialized without sentences. \"\n \"trim_rule provided, if any, will be ignored.\"\n )\n\n def initialize_word_vectors(self):\n self.wv = KeyedVectors()\n\n def make_cum_table(self, power=0.75, domain=2**31 - 1):\n \"\"\"\n Create a cumulative-distribution table using stored vocabulary word counts for\n drawing random words in the negative-sampling training routines.\n\n To draw a word index, choose a random integer up to the maximum value in the\n table (cum_table[-1]), then finding that integer's sorted insertion point\n (as if by bisect_left or ndarray.searchsorted()). That insertion point is the\n drawn index, coming up in proportion equal to the increment at that slot.\n\n Called internally from 'build_vocab()'.\n \"\"\"\n vocab_size = len(self.wv.index2word)\n self.cum_table = zeros(vocab_size, dtype=uint32)\n # compute sum of all power (Z in paper)\n train_words_pow = 0.0\n for word_index in range(vocab_size):\n train_words_pow += self.wv.vocab[self.wv.index2word[word_index]].count**power\n cumulative = 0.0\n for word_index in range(vocab_size):\n cumulative += self.wv.vocab[self.wv.index2word[word_index]].count**power\n self.cum_table[word_index] = round(cumulative / train_words_pow * domain)\n if len(self.cum_table) > 0:\n assert self.cum_table[-1] == domain\n\n def create_binary_tree(self):\n \"\"\"\n Create a binary Huffman tree using stored vocabulary word counts. Frequent words\n will have shorter binary codes. Called internally from `build_vocab()`.\n\n \"\"\"\n logger.info(\"constructing a huffman tree from %i words\", len(self.wv.vocab))\n\n # build the huffman tree\n heap = list(itervalues(self.wv.vocab))\n heapq.heapify(heap)\n for i in range(len(self.wv.vocab) - 1):\n min1, min2 = heapq.heappop(heap), heapq.heappop(heap)\n heapq.heappush(\n heap, Vocab(count=min1.count + min2.count, index=i + len(self.wv.vocab), left=min1, right=min2)\n )\n\n # recurse over the tree, assigning a binary code to each vocabulary word\n if heap:\n max_depth, stack = 0, [(heap[0], [], [])]\n while stack:\n node, codes, points = stack.pop()\n if node.index < len(self.wv.vocab):\n # leaf node => store its path from the root\n node.code, node.point = codes, points\n max_depth = max(len(codes), max_depth)\n else:\n # inner node => continue recursion\n points = array(list(points) + [node.index - len(self.wv.vocab)], dtype=uint32)\n stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))\n stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))\n\n logger.info(\"built huffman tree with maximum node depth %i\", max_depth)\n\n def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):\n \"\"\"\n Build vocabulary from a sequence of sentences (can be a once-only generator stream).\n Each sentence must be a list of unicode strings.\n \"\"\"\n self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey\n # trim by min_count & precalculate downsampling\n self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)\n self.finalize_vocab(update=update) # build tables & arrays\n\n def build_vocab_from_freq(self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False):\n \"\"\"\n Build vocabulary from a dictionary of word frequencies.\n Build model vocabulary from a passed dictionary that contains (word,word count).\n Words must be of type unicode strings.\n\n Parameters\n ----------\n `word_freq` : dict\n Word,Word_Count dictionary.\n `keep_raw_vocab` : bool\n If not true, delete the raw vocabulary after the scaling is done and free up RAM.\n `corpus_count`: int\n Even if no corpus is provided, this argument can set corpus_count explicitly.\n `trim_rule` = vocabulary trimming rule, specifies whether certain words should remain\n in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).\n Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and\n returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.\n `update`: bool\n If true, the new provided words in `word_freq` dict will be added to model's vocab.\n\n Returns\n --------\n None\n\n Examples\n --------\n\n .. sourcecode:: pycon\n\n >>> from gensim.models.word2vec import Word2Vec\n >>> model = Word2Vec()\n >>> model.build_vocab_from_freq({\"Word1\": 15, \"Word2\": 20})\n\n \"\"\"\n logger.info(\"Processing provided word frequencies\")\n # Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)\n # to be directly the raw vocab\n raw_vocab = word_freq\n logger.info(\n \"collected %i different raw word, with total frequency of %i\",\n len(raw_vocab), sum(itervalues(raw_vocab))\n )\n\n # Since no sentences are provided, this is to control the corpus_count\n self.corpus_count = corpus_count if corpus_count else 0\n self.raw_vocab = raw_vocab\n\n # trim by min_count & precalculate downsampling\n self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)\n self.finalize_vocab(update=update) # build tables & arrays\n\n def scan_vocab(self, sentences, progress_per=10000, trim_rule=None):\n \"\"\"Do an initial scan of all words appearing in sentences.\"\"\"\n logger.info(\"collecting all words and their counts\")\n sentence_no = -1\n total_words = 0\n min_reduce = 1\n vocab = defaultdict(int)\n checked_string_types = 0\n for sentence_no, sentence in enumerate(sentences):\n if not checked_string_types:\n if isinstance(sentence, string_types):\n logger.warning(\n \"Each 'sentences' item should be a list of words (usually unicode strings). \"\n \"First item here is instead plain %s.\",\n type(sentence)\n )\n checked_string_types += 1\n if sentence_no % progress_per == 0:\n logger.info(\n \"PROGRESS: at sentence #%i, processed %i words, keeping %i word types\",\n sentence_no, total_words, len(vocab)\n )\n for word in sentence:\n vocab[word] += 1\n total_words += len(sentence)\n\n if self.max_vocab_size and len(vocab) > self.max_vocab_size:\n utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)\n min_reduce += 1\n\n logger.info(\n \"collected %i word types from a corpus of %i raw words and %i sentences\",\n len(vocab), total_words, sentence_no + 1\n )\n self.corpus_count = sentence_no + 1\n self.raw_vocab = vocab\n return total_words\n\n def scale_vocab(self, min_count=None, sample=None, dry_run=False,\n keep_raw_vocab=False, trim_rule=None, update=False):\n \"\"\"\n Apply vocabulary settings for `min_count` (discarding less-frequent words)\n and `sample` (controlling the downsampling of more-frequent words).\n\n Calling with `dry_run=True` will only simulate the provided settings and\n report the size of the retained vocabulary, effective corpus length, and\n estimated memory requirements. Results are both printed via logging and\n returned as a dict.\n\n Delete the raw vocabulary after the scaling is done to free up RAM,\n unless `keep_raw_vocab` is set.\n\n \"\"\"\n min_count = min_count or self.min_count\n sample = sample or self.sample\n drop_total = drop_unique = 0\n\n if not update:\n logger.info(\"Loading a fresh vocabulary\")\n retain_total, retain_words = 0, []\n # Discard words less-frequent than min_count\n if not dry_run:\n self.wv.index2word = []\n # make stored settings match these applied settings\n self.min_count = min_count\n self.sample = sample\n self.wv.vocab = {}\n\n for word, v in iteritems(self.raw_vocab):\n if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):\n retain_words.append(word)\n retain_total += v\n if not dry_run:\n self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))\n self.wv.index2word.append(word)\n else:\n drop_unique += 1\n drop_total += v\n original_unique_total = len(retain_words) + drop_unique\n retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)\n logger.info(\n \"min_count=%d retains %i unique words (%i%% of original %i, drops %i)\",\n min_count, len(retain_words), retain_unique_pct, original_unique_total, drop_unique\n )\n original_total = retain_total + drop_total\n retain_pct = retain_total * 100 / max(original_total, 1)\n logger.info(\n \"min_count=%d leaves %i word corpus (%i%% of original %i, drops %i)\",\n min_count, retain_total, retain_pct, original_total, drop_total\n )\n else:\n logger.info(\"Updating model with new vocabulary\")\n new_total = pre_exist_total = 0\n new_words = pre_exist_words = []\n for word, v in iteritems(self.raw_vocab):\n if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):\n if word in self.wv.vocab:\n pre_exist_words.append(word)\n pre_exist_total += v\n if not dry_run:\n self.wv.vocab[word].count += v\n else:\n new_words.append(word)\n new_total += v\n if not dry_run:\n self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))\n self.wv.index2word.append(word)\n else:\n drop_unique += 1\n drop_total += v\n original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique\n pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)\n new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)\n logger.info(\n \"New added %i unique words (%i%% of original %i) \"\n \"and increased the count of %i pre-existing words (%i%% of original %i)\",\n len(new_words), new_unique_pct, original_unique_total, len(pre_exist_words),\n pre_exist_unique_pct, original_unique_total\n )\n retain_words = new_words + pre_exist_words\n retain_total = new_total + pre_exist_total\n\n # Precalculate each vocabulary item's threshold for sampling\n if not sample:\n # no words downsampled\n threshold_count = retain_total\n elif sample < 1.0:\n # traditional meaning: set parameter as proportion of total\n threshold_count = sample * retain_total\n else:\n # new shorthand: sample >= 1 means downsample all words with higher count than sample\n threshold_count = int(sample * (3 + sqrt(5)) / 2)\n\n downsample_total, downsample_unique = 0, 0\n for w in retain_words:\n v = self.raw_vocab[w]\n word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)\n if word_probability < 1.0:\n downsample_unique += 1\n downsample_total += word_probability * v\n else:\n word_probability = 1.0\n downsample_total += v\n if not dry_run:\n self.wv.vocab[w].sample_int = int(round(word_probability * 2**32))\n\n if not dry_run and not keep_raw_vocab:\n logger.info(\"deleting the raw counts dictionary of %i items\", len(self.raw_vocab))\n self.raw_vocab = defaultdict(int)\n\n logger.info(\"sample=%g downsamples %i most-common words\", sample, downsample_unique)\n logger.info(\n \"downsampling leaves estimated %i word corpus (%.1f%% of prior %i)\",\n downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total\n )\n\n # return from each step: words-affected, resulting-corpus-size, extra memory estimates\n report_values = {\n 'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique,\n 'downsample_total': int(downsample_total), 'memory': self.estimate_memory(vocab_size=len(retain_words))\n }\n\n return report_values\n\n def finalize_vocab(self, update=False):\n \"\"\"Build tables and model weights based on final vocabulary settings.\"\"\"\n if not self.wv.index2word:\n self.scale_vocab()\n if self.sorted_vocab and not update:\n self.sort_vocab()\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_cum_table()\n if self.null_word:\n # create null pseudo-word for padding when using concatenative L1 (run-of-words)\n # this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter\n word, v = '\\0', Vocab(count=1, sample_int=0)\n v.index = len(self.wv.vocab)\n self.wv.index2word.append(word)\n self.wv.vocab[word] = v\n # set initial input/projection and hidden weights\n if not update:\n self.reset_weights()\n else:\n self.update_weights()\n\n def sort_vocab(self):\n \"\"\"Sort the vocabulary so the most frequent words have the lowest indexes.\"\"\"\n if len(self.wv.syn0):\n raise RuntimeError(\"cannot sort vocabulary after model weights already initialized.\")\n self.wv.index2word.sort(key=lambda word: self.wv.vocab[word].count, reverse=True)\n for i, word in enumerate(self.wv.index2word):\n self.wv.vocab[word].index = i\n\n def reset_from(self, other_model):\n \"\"\"\n Borrow shareable pre-built structures (like vocab) from the other_model. Useful\n if testing multiple models in parallel on the same corpus.\n \"\"\"\n self.wv.vocab = other_model.wv.vocab\n self.wv.index2word = other_model.wv.index2word\n self.cum_table = other_model.cum_table\n self.corpus_count = other_model.corpus_count\n self.reset_weights()\n\n def _do_train_job(self, sentences, alpha, inits):\n \"\"\"\n Train a single batch of sentences. Return 2-tuple `(effective word count after\n ignoring unknown words and sentence length trimming, total word count)`.\n \"\"\"\n work, neu1 = inits\n tally = 0\n if self.sg:\n tally += train_batch_sg(self, sentences, alpha, work, self.compute_loss)\n else:\n tally += train_batch_cbow(self, sentences, alpha, work, neu1, self.compute_loss)\n return tally, self._raw_word_count(sentences)\n\n def _raw_word_count(self, job):\n \"\"\"Return the number of words in a given job.\"\"\"\n return sum(len(sentence) for sentence in job)\n\n def train(self, sentences, total_examples=None, total_words=None,\n epochs=None, start_alpha=None, end_alpha=None, word_count=0,\n queue_factor=2, report_delay=1.0, compute_loss=None):\n \"\"\"\n Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).\n For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)\n\n To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate\n progres-percentage logging, either total_examples (count of sentences) or total_words (count of\n raw words in sentences) MUST be provided. (If the corpus is the same as was provided to\n `build_vocab()`, the count of examples in that corpus will be available in the model's\n `corpus_count` property.)\n\n To avoid common mistakes around the model's ability to do multiple training passes itself, an\n explicit `epochs` argument MUST be provided. In the common and recommended case, where `train()`\n is only called once, the model's cached `iter` value should be supplied as `epochs` value.\n \"\"\"\n if self.model_trimmed_post_training:\n raise RuntimeError(\"Parameters for training were discarded using model_trimmed_post_training method\")\n if FAST_VERSION < 0:\n warnings.warn(\n \"C extension not loaded for Word2Vec, training will be slow. \"\n \"Install a C compiler and reinstall gensim for fast training.\"\n )\n self.neg_labels = []\n if self.negative > 0:\n # precompute negative labels optimization for pure-python training\n self.neg_labels = zeros(self.negative + 1)\n self.neg_labels[0] = 1.\n\n if compute_loss:\n self.compute_loss = compute_loss\n self.running_training_loss = 0\n\n logger.info(\n \"training model with %i workers on %i vocabulary and %i features, \"\n \"using sg=%s hs=%s sample=%s negative=%s window=%s\",\n self.workers, len(self.wv.vocab), self.layer1_size, self.sg,\n self.hs, self.sample, self.negative, self.window\n )\n\n if not self.wv.vocab:\n raise RuntimeError(\"you must first build vocabulary before training the model\")\n if not len(self.wv.syn0):\n raise RuntimeError(\"you must first finalize vocabulary before training the model\")\n\n if not hasattr(self, 'corpus_count'):\n raise ValueError(\n \"The number of sentences in the training corpus is missing. \"\n \"Did you load the model via KeyedVectors.load_word2vec_format?\"\n \"Models loaded via load_word2vec_format don't support further training. \"\n \"Instead start with a blank model, scan_vocab on the new corpus, \"\n \"intersect_word2vec_format with the old model, then train.\"\n )\n\n if total_words is None and total_examples is None:\n raise ValueError(\n \"You must specify either total_examples or total_words, for proper alpha and progress calculations. \"\n \"The usual value is total_examples=model.corpus_count.\"\n )\n if epochs is None:\n raise ValueError(\"You must specify an explict epochs count. The usual value is epochs=model.iter.\")\n start_alpha = start_alpha or self.alpha\n end_alpha = end_alpha or self.min_alpha\n\n job_tally = 0\n\n if epochs > 1:\n sentences = utils.RepeatCorpusNTimes(sentences, epochs)\n total_words = total_words and total_words * epochs\n total_examples = total_examples and total_examples * epochs\n\n def worker_loop():\n \"\"\"Train the model, lifting lists of sentences from the job_queue.\"\"\"\n work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory\n neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)\n jobs_processed = 0\n while True:\n job = job_queue.get()\n if job is None:\n progress_queue.put(None)\n break # no more jobs => quit this worker\n sentences, alpha = job\n tally, raw_tally = self._do_train_job(sentences, alpha, (work, neu1))\n progress_queue.put((len(sentences), tally, raw_tally)) # report back progress\n jobs_processed += 1\n logger.debug(\"worker exiting, processed %i jobs\", jobs_processed)\n\n def job_producer():\n \"\"\"Fill jobs queue using the input `sentences` iterator.\"\"\"\n job_batch, batch_size = [], 0\n pushed_words, pushed_examples = 0, 0\n next_alpha = start_alpha\n if next_alpha > self.min_alpha_yet_reached:\n logger.warning(\"Effective 'alpha' higher than previous training cycles\")\n self.min_alpha_yet_reached = next_alpha\n job_no = 0\n\n for sent_idx, sentence in enumerate(sentences):\n sentence_length = self._raw_word_count([sentence])\n\n # can we fit this sentence into the existing job batch?\n if batch_size + sentence_length <= self.batch_words:\n # yes => add it to the current job\n job_batch.append(sentence)\n batch_size += sentence_length\n else:\n # no => submit the existing job\n logger.debug(\n \"queueing job #%i (%i words, %i sentences) at alpha %.05f\",\n job_no, batch_size, len(job_batch), next_alpha\n )\n job_no += 1\n job_queue.put((job_batch, next_alpha))\n\n # update the learning rate for the next job\n if end_alpha < next_alpha:\n if total_examples:\n # examples-based decay\n pushed_examples += len(job_batch)\n progress = 1.0 * pushed_examples / total_examples\n else:\n # words-based decay\n pushed_words += self._raw_word_count(job_batch)\n progress = 1.0 * pushed_words / total_words\n next_alpha = start_alpha - (start_alpha - end_alpha) * progress\n next_alpha = max(end_alpha, next_alpha)\n\n # add the sentence that didn't fit as the first item of a new job\n job_batch, batch_size = [sentence], sentence_length\n\n # add the last job too (may be significantly smaller than batch_words)\n if job_batch:\n logger.debug(\n \"queueing job #%i (%i words, %i sentences) at alpha %.05f\",\n job_no, batch_size, len(job_batch), next_alpha\n )\n job_no += 1\n job_queue.put((job_batch, next_alpha))\n\n if job_no == 0 and self.train_count == 0:\n logger.warning(\n \"train() called with an empty iterator (if not intended, \"\n \"be sure to provide a corpus that offers restartable iteration = an iterable).\"\n )\n\n # give the workers heads up that they can finish -- no more work!\n for _ in range(self.workers):\n job_queue.put(None)\n logger.debug(\"job loop exiting, total %i jobs\", job_no)\n\n # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(\n job_queue = Queue(maxsize=queue_factor * self.workers)\n progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)\n\n workers = [threading.Thread(target=worker_loop) for _ in range(self.workers)]\n unfinished_worker_count = len(workers)\n workers.append(threading.Thread(target=job_producer))\n\n for thread in workers:\n thread.daemon = True # make interrupting the process with ctrl+c easier\n thread.start()\n\n example_count, trained_word_count, raw_word_count = 0, 0, word_count\n start, next_report = default_timer() - 0.00001, 1.0\n\n while unfinished_worker_count > 0:\n report = progress_queue.get() # blocks if workers too slow\n if report is None: # a thread reporting that it finished\n unfinished_worker_count -= 1\n logger.info(\"worker thread finished; awaiting finish of %i more threads\", unfinished_worker_count)\n continue\n examples, trained_words, raw_words = report\n job_tally += 1\n\n # update progress stats\n example_count += examples\n trained_word_count += trained_words # only words in vocab & sampled\n raw_word_count += raw_words\n\n # log progress once every report_delay seconds\n elapsed = default_timer() - start\n if elapsed >= next_report:\n if total_examples:\n # examples-based progress %\n logger.info(\n \"PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i\",\n 100.0 * example_count / total_examples, trained_word_count / elapsed,\n utils.qsize(job_queue), utils.qsize(progress_queue)\n )\n else:\n # words-based progress %\n logger.info(\n \"PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i\",\n 100.0 * raw_word_count / total_words, trained_word_count / elapsed,\n utils.qsize(job_queue), utils.qsize(progress_queue)\n )\n next_report = elapsed + report_delay\n\n # all done; report the final stats\n elapsed = default_timer() - start\n logger.info(\n \"training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s\",\n raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed\n )\n if job_tally < 10 * self.workers:\n logger.warning(\n \"under 10 jobs per worker: consider setting a smaller `batch_words' for smoother alpha decay\"\n )\n\n # check that the input corpus hasn't changed during iteration\n if total_examples and total_examples != example_count:\n logger.warning(\n \"supplied example count (%i) did not equal expected count (%i)\", example_count, total_examples\n )\n if total_words and total_words != raw_word_count:\n logger.warning(\n \"supplied raw word count (%i) did not equal expected count (%i)\", raw_word_count, total_words\n )\n\n self.train_count += 1 # number of times train() has been called\n self.total_train_time += elapsed\n self.clear_sims()\n return trained_word_count\n\n # basics copied from the train() function\n def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):\n \"\"\"\n Score the log probability for a sequence of sentences (can be a once-only generator stream).\n Each sentence must be a list of unicode strings.\n This does not change the fitted model in any way (see Word2Vec.train() for that).\n\n We have currently only implemented score for the hierarchical softmax scheme,\n so you need to have run word2vec with hs=1 and negative=0 for this to work.\n\n Note that you should specify total_sentences; we'll run into problems if you ask to\n score more than this number of sentences but it is inefficient to set the value too high.\n\n See the article by [#taddy]_ and the gensim demo at [#deepir]_ for examples of\n how to use such scores in document classification.\n\n .. [#taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations,\n in Proceedings of the 2015 Conference of the Association of Computational Linguistics.\n .. [#deepir] https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb\n\n \"\"\"\n if FAST_VERSION < 0:\n warnings.warn(\n \"C extension compilation failed, scoring will be slow. \"\n \"Install a C compiler and reinstall gensim for fastness.\"\n )\n\n logger.info(\n \"scoring sentences with %i workers on %i vocabulary and %i features, \"\n \"using sg=%s hs=%s sample=%s and negative=%s\",\n self.workers, len(self.wv.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative\n )\n\n if not self.wv.vocab:\n raise RuntimeError(\"you must first build vocabulary before scoring new data\")\n\n if not self.hs:\n raise RuntimeError(\n \"We have currently only implemented score for the hierarchical softmax scheme, \"\n \"so you need to have run word2vec with hs=1 and negative=0 for this to work.\"\n )\n\n def worker_loop():\n \"\"\"Compute log probability for each sentence, lifting lists of sentences from the jobs queue.\"\"\"\n work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)\n neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)\n while True:\n job = job_queue.get()\n if job is None: # signal to finish\n break\n ns = 0\n for sentence_id, sentence in job:\n if sentence_id >= total_sentences:\n break\n if self.sg:\n score = score_sentence_sg(self, sentence, work)\n else:\n score = score_sentence_cbow(self, sentence, work, neu1)\n sentence_scores[sentence_id] = score\n ns += 1\n progress_queue.put(ns) # report progress\n\n start, next_report = default_timer(), 1.0\n # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(\n job_queue = Queue(maxsize=queue_factor * self.workers)\n progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)\n\n workers = [threading.Thread(target=worker_loop) for _ in range(self.workers)]\n for thread in workers:\n thread.daemon = True # make interrupting the process with ctrl+c easier\n thread.start()\n\n sentence_count = 0\n sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)\n\n push_done = False\n done_jobs = 0\n jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))\n\n # fill jobs queue with (id, sentence) job items\n while True:\n try:\n job_no, items = next(jobs_source)\n if (job_no - 1) * chunksize > total_sentences:\n logger.warning(\n \"terminating after %i sentences (set higher total_sentences if you want more).\",\n total_sentences\n )\n job_no -= 1\n raise StopIteration()\n logger.debug(\"putting job #%i in the queue\", job_no)\n job_queue.put(items)\n except StopIteration:\n logger.info(\"reached end of input; waiting to finish %i outstanding jobs\", job_no - done_jobs + 1)\n for _ in range(self.workers):\n job_queue.put(None) # give the workers heads up that they can finish -- no more work!\n push_done = True\n try:\n while done_jobs < (job_no + 1) or not push_done:\n ns = progress_queue.get(push_done) # only block after all jobs pushed\n sentence_count += ns\n done_jobs += 1\n elapsed = default_timer() - start\n if elapsed >= next_report:\n logger.info(\n \"PROGRESS: at %.2f%% sentences, %.0f sentences/s\",\n 100.0 * sentence_count, sentence_count / elapsed\n )\n next_report = elapsed + report_delay # don't flood log, wait report_delay seconds\n else:\n # loop ended by job count; really done\n break\n except Empty:\n pass # already out of loop; continue to next push\n\n elapsed = default_timer() - start\n self.clear_sims()\n logger.info(\n \"scoring %i sentences took %.1fs, %.0f sentences/s\",\n sentence_count, elapsed, sentence_count / elapsed\n )\n return sentence_scores[:sentence_count]\n\n def clear_sims(self):\n \"\"\"\n Removes all L2-normalized vectors for words from the model.\n You will have to recompute them using init_sims method.\n \"\"\"\n\n self.wv.syn0norm = None\n\n def update_weights(self):\n \"\"\"\n Copy all the existing weights, and reset the weights for the newly\n added vocabulary.\n \"\"\"\n logger.info(\"updating layer weights\")\n gained_vocab = len(self.wv.vocab) - len(self.wv.syn0)\n newsyn0 = empty((gained_vocab, self.vector_size), dtype=REAL)\n\n # randomize the remaining words\n for i in range(len(self.wv.syn0), len(self.wv.vocab)):\n # construct deterministic seed from word AND seed argument\n newsyn0[i - len(self.wv.syn0)] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))\n\n # Raise an error if an online update is run before initial training on a corpus\n if not len(self.wv.syn0):\n raise RuntimeError(\n \"You cannot do an online vocabulary-update of a model which has no prior vocabulary. \"\n \"First build the vocabulary of your model with a corpus before doing an online update.\"\n )\n\n self.wv.syn0 = vstack([self.wv.syn0, newsyn0])\n\n if self.hs:\n self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=REAL)])\n if self.negative:\n self.syn1neg = vstack([self.syn1neg, zeros((gained_vocab, self.layer1_size), dtype=REAL)])\n self.wv.syn0norm = None\n\n # do not suppress learning for already learned words\n self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning\n\n def reset_weights(self):\n \"\"\"Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary.\"\"\"\n logger.info(\"resetting layer weights\")\n self.wv.syn0 = empty((len(self.wv.vocab), self.vector_size), dtype=REAL)\n # randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once\n for i in range(len(self.wv.vocab)):\n # construct deterministic seed from word AND seed argument\n self.wv.syn0[i] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))\n if self.hs:\n self.syn1 = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)\n if self.negative:\n self.syn1neg = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)\n self.wv.syn0norm = None\n\n self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning\n\n def seeded_vector(self, seed_string):\n \"\"\"Create one 'random' vector (but deterministic by seed_string)\"\"\"\n # Note: built-in hash() may vary by Python version or even (in Py3.x) per launch\n once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)\n return (once.rand(self.vector_size) - 0.5) / self.vector_size\n\n def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):\n \"\"\"\n Merge the input-hidden weight matrix from the original C word2vec-tool format\n given, where it intersects with the current vocabulary. (No words are added to the\n existing vocabulary, but intersecting words adopt the file's weights, and\n non-intersecting words are left alone.)\n\n `binary` is a boolean indicating whether the data is in binary word2vec format.\n\n `lockf` is a lock-factor value to be set for any imported word-vectors; the\n default value of 0.0 prevents further updating of the vector during subsequent\n training. Use 1.0 to allow further training updates of merged vectors.\n \"\"\"\n overlap_count = 0\n logger.info(\"loading projection weights from %s\", fname)\n with utils.open(fname, 'rb') as fin:\n header = utils.to_unicode(fin.readline(), encoding=encoding)\n vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format\n if not vector_size == self.vector_size:\n raise ValueError(\"incompatible vector size %d in file %s\" % (vector_size, fname))\n # TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?\n if binary:\n binary_len = dtype(REAL).itemsize * vector_size\n for _ in range(vocab_size):\n # mixed text and binary: read text first, then binary\n word = []\n while True:\n ch = fin.read(1)\n if ch == b' ':\n break\n if ch != b'\\n': # ignore newlines in front of words (some binary files have)\n word.append(ch)\n word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)\n weights = fromstring(fin.read(binary_len), dtype=REAL)\n if word in self.wv.vocab:\n overlap_count += 1\n self.wv.syn0[self.wv.vocab[word].index] = weights\n self.syn0_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes\n else:\n for line_no, line in enumerate(fin):\n parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(\" \")\n if len(parts) != vector_size + 1:\n raise ValueError(\"invalid vector on line %s (is this really the text format?)\" % line_no)\n word, weights = parts[0], [REAL(x) for x in parts[1:]]\n if word in self.wv.vocab:\n overlap_count += 1\n self.wv.syn0[self.wv.vocab[word].index] = weights\n self.syn0_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes\n logger.info(\"merged %d vectors into %s matrix from %s\", overlap_count, self.wv.syn0.shape, fname)\n\n def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None):\n \"\"\"\n Deprecated. Use self.wv.most_similar() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.most_similar`\n \"\"\"\n return self.wv.most_similar(positive, negative, topn, restrict_vocab, indexer)\n\n def wmdistance(self, document1, document2):\n \"\"\"\n Deprecated. Use self.wv.wmdistance() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.wmdistance`\n \"\"\"\n return self.wv.wmdistance(document1, document2)\n\n def most_similar_cosmul(self, positive=None, negative=None, topn=10):\n \"\"\"\n Deprecated. Use self.wv.most_similar_cosmul() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.most_similar_cosmul`\n \"\"\"\n return self.wv.most_similar_cosmul(positive, negative, topn)\n\n def similar_by_word(self, word, topn=10, restrict_vocab=None):\n \"\"\"\n Deprecated. Use self.wv.similar_by_word() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.similar_by_word`\n \"\"\"\n return self.wv.similar_by_word(word, topn, restrict_vocab)\n\n def similar_by_vector(self, vector, topn=10, restrict_vocab=None):\n \"\"\"\n Deprecated. Use self.wv.similar_by_vector() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.similar_by_vector`\n \"\"\"\n return self.wv.similar_by_vector(vector, topn, restrict_vocab)\n\n def doesnt_match(self, words):\n \"\"\"\n Deprecated. Use self.wv.doesnt_match() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.doesnt_match`\n \"\"\"\n return self.wv.doesnt_match(words)\n\n def __getitem__(self, words):\n \"\"\"\n Deprecated. Use self.wv.__getitem__() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.__getitem__`\n \"\"\"\n return self.wv.__getitem__(words)\n\n def __contains__(self, word):\n \"\"\"\n Deprecated. Use self.wv.__contains__() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.__contains__`\n \"\"\"\n return self.wv.__contains__(word)\n\n def similarity(self, w1, w2):\n \"\"\"\n Deprecated. Use self.wv.similarity() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.similarity`\n \"\"\"\n return self.wv.similarity(w1, w2)\n\n def n_similarity(self, ws1, ws2):\n \"\"\"\n Deprecated. Use self.wv.n_similarity() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.n_similarity`\n \"\"\"\n return self.wv.n_similarity(ws1, ws2)\n\n def predict_output_word(self, context_words_list, topn=10):\n \"\"\"Report the probability distribution of the center word given the context words\n as input to the trained model.\"\"\"\n if not self.negative:\n raise RuntimeError(\n \"We have currently only implemented predict_output_word for the negative sampling scheme, \"\n \"so you need to have run word2vec with negative > 0 for this to work.\"\n )\n\n if not hasattr(self.wv, 'syn0') or not hasattr(self, 'syn1neg'):\n raise RuntimeError(\"Parameters required for predicting the output words not found.\")\n\n word_vocabs = [self.wv.vocab[w] for w in context_words_list if w in self.wv.vocab]\n if not word_vocabs:\n warnings.warn(\"All the input context words are out-of-vocabulary for the current model.\")\n return None\n\n word2_indices = [word.index for word in word_vocabs]\n\n l1 = np_sum(self.wv.syn0[word2_indices], axis=0)\n if word2_indices and self.cbow_mean:\n l1 /= len(word2_indices)\n\n prob_values = exp(dot(l1, self.syn1neg.T)) # propagate hidden -> output and take softmax to get probabilities\n prob_values /= sum(prob_values)\n top_indices = matutils.argsort(prob_values, topn=topn, reverse=True)\n # returning the most probable output words with their probabilities\n return [(self.wv.index2word[index1], prob_values[index1]) for index1 in top_indices]\n\n def init_sims(self, replace=False):\n \"\"\"\n init_sims() resides in KeyedVectors because it deals with syn0 mainly, but because syn1 is not an attribute\n of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0 happens inside of KeyedVectors\n \"\"\"\n if replace and hasattr(self, 'syn1'):\n del self.syn1\n return self.wv.init_sims(replace)\n\n def estimate_memory(self, vocab_size=None, report=None):\n \"\"\"Estimate required memory for a model using current settings and provided vocabulary size.\"\"\"\n vocab_size = vocab_size or len(self.wv.vocab)\n report = report or {}\n report['vocab'] = vocab_size * (700 if self.hs else 500)\n report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize\n if self.hs:\n report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize\n if self.negative:\n report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize\n report['total'] = sum(report.values())\n logger.info(\n \"estimated required memory for %i words and %i dimensions: %i bytes\",\n vocab_size, self.vector_size, report['total']\n )\n return report\n\n @staticmethod\n def log_accuracy(section):\n return KeyedVectors.log_accuracy(section)\n\n def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True):\n most_similar = most_similar or KeyedVectors.most_similar\n return self.wv.accuracy(questions, restrict_vocab, most_similar, case_insensitive)\n\n @staticmethod\n def log_evaluate_word_pairs(pearson, spearman, oov, pairs):\n \"\"\"\n Deprecated. Use self.wv.log_evaluate_word_pairs() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.log_evaluate_word_pairs`\n \"\"\"\n return KeyedVectors.log_evaluate_word_pairs(pearson, spearman, oov, pairs)\n\n def evaluate_word_pairs(self, pairs, delimiter='\\t', restrict_vocab=300000,\n case_insensitive=True, dummy4unknown=False):\n \"\"\"\n Deprecated. Use self.wv.evaluate_word_pairs() instead.\n Refer to the documentation for `gensim.models.KeyedVectors.evaluate_word_pairs`\n \"\"\"\n return self.wv.evaluate_word_pairs(pairs, delimiter, restrict_vocab, case_insensitive, dummy4unknown)\n\n def __str__(self):\n return \"%s(vocab=%s, size=%s, alpha=%s)\" % (\n self.__class__.__name__, len(self.wv.index2word), self.vector_size, self.alpha\n )\n\n def _minimize_model(self, save_syn1=False, save_syn1neg=False, save_syn0_lockf=False):\n warnings.warn(\n \"This method would be deprecated in the future. \"\n \"Keep just_word_vectors = model.wv to retain just the KeyedVectors instance \"\n \"for read-only querying of word vectors.\"\n )\n if save_syn1 and save_syn1neg and save_syn0_lockf:\n return\n if hasattr(self, 'syn1') and not save_syn1:\n del self.syn1\n if hasattr(self, 'syn1neg') and not save_syn1neg:\n del self.syn1neg\n if hasattr(self, 'syn0_lockf') and not save_syn0_lockf:\n del self.syn0_lockf\n self.model_trimmed_post_training = True\n\n def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):\n \"\"\"\n Discard parameters that are used in training and score. Use if you're sure you're done training a model.\n If `replace_word_vectors_with_normalized` is set, forget the original vectors and only keep the normalized\n ones = saves lots of memory!\n \"\"\"\n if replace_word_vectors_with_normalized:\n self.init_sims(replace=True)\n self._minimize_model()\n\n def save(self, *args, **kwargs):\n # don't bother storing the cached normalized vectors, recalculable table\n kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])\n\n super(Word2Vec, self).save(*args, **kwargs)\n\n save.__doc__ = SaveLoad.save.__doc__\n\n @classmethod\n def load(cls, *args, **kwargs):\n model = super(Word2Vec, cls).load(*args, **kwargs)\n # update older models\n if hasattr(model, 'table'):\n delattr(model, 'table') # discard in favor of cum_table\n if model.negative and hasattr(model.wv, 'index2word'):\n model.make_cum_table() # rebuild cum_table from vocabulary\n if not hasattr(model, 'corpus_count'):\n model.corpus_count = None\n if not hasattr(model, 'corpus_total_words'):\n model.corpus_total_words = None\n for v in model.wv.vocab.values():\n if hasattr(v, 'sample_int'):\n break # already 0.12.0+ style int probabilities\n elif hasattr(v, 'sample_probability'):\n v.sample_int = int(round(v.sample_probability * 2**32))\n del v.sample_probability\n if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'):\n model.syn0_lockf = ones(len(model.wv.syn0), dtype=REAL)\n if not hasattr(model, 'random'):\n model.random = random.RandomState(model.seed)\n if not hasattr(model, 'train_count'):\n model.train_count = 0\n model.total_train_time = 0\n return model\n\n def _load_specials(self, *args, **kwargs):\n super(Word2Vec, self)._load_specials(*args, **kwargs)\n # loading from a pre-KeyedVectors word2vec model\n if not hasattr(self, 'wv'):\n wv = KeyedVectors()\n wv.syn0 = self.__dict__.get('syn0', [])\n wv.syn0norm = self.__dict__.get('syn0norm', None)\n wv.vocab = self.__dict__.get('vocab', {})\n wv.index2word = self.__dict__.get('index2word', [])\n self.wv = wv\n\n @classmethod\n def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',\n limit=None, datatype=REAL):\n \"\"\"Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.\"\"\"\n raise DeprecationWarning(\"Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.\")\n\n def save_word2vec_format(self, fname, fvocab=None, binary=False):\n \"\"\"Deprecated. Use model.wv.save_word2vec_format instead.\"\"\"\n raise DeprecationWarning(\"Deprecated. Use model.wv.save_word2vec_format instead.\")\n\n def get_latest_training_loss(self):\n return self.running_training_loss\n\n\nclass BrownCorpus(object):\n \"\"\"Iterate over sentences from the Brown corpus (part of NLTK data).\"\"\"\n\n def __init__(self, dirname):\n self.dirname = dirname\n\n def __iter__(self):\n for fname in os.listdir(self.dirname):\n fname = os.path.join(self.dirname, fname)\n if not os.path.isfile(fname):\n continue\n with utils.open(fname, 'rb') as fin:\n for line in fin:\n line = utils.to_unicode(line)\n # each file line is a single sentence in the Brown corpus\n # each token is WORD/POS_TAG\n token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]\n # ignore words with non-alphabetic tags like \",\", \"!\" etc (punctuation, weird stuff)\n words = [\"%s/%s\" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]\n if not words: # don't bother sending out empty sentences\n continue\n yield words\n\n\nclass Text8Corpus(object):\n \"\"\"Iterate over sentences from the \"text8\" corpus, unzipped from http://mattmahoney.net/dc/text8.zip .\"\"\"\n\n def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):\n self.fname = fname\n self.max_sentence_length = max_sentence_length\n\n def __iter__(self):\n # the entire corpus is one gigantic line -- there are no sentence marks at all\n # so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens\n sentence, rest = [], b''\n with utils.open(self.fname, 'rb') as fin:\n while True:\n text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM\n if text == rest: # EOF\n words = utils.to_unicode(text).split()\n sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)\n if sentence:\n yield sentence\n break\n last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration\n words, rest = (utils.to_unicode(text[:last_token]).split(),\n text[last_token:].strip()) if last_token >= 0 else ([], text)\n sentence.extend(words)\n while len(sentence) >= self.max_sentence_length:\n yield sentence[:self.max_sentence_length]\n sentence = sentence[self.max_sentence_length:]\n\n\nclass LineSentence(object):\n \"\"\"\n Simple format: one sentence = one line; words already preprocessed and separated by whitespace.\n \"\"\"\n\n def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):\n \"\"\"\n `source` can be either a string or a file object. Clip the file to the first\n `limit` lines (or not clipped if limit is None, the default).\n\n Example::\n\n sentences = LineSentence('myfile.txt')\n\n Or for compressed files::\n\n sentences = LineSentence('compressed_text.txt.bz2')\n sentences = LineSentence('compressed_text.txt.gz')\n\n \"\"\"\n self.source = source\n self.max_sentence_length = max_sentence_length\n self.limit = limit\n\n def __iter__(self):\n \"\"\"Iterate through the lines in the source.\"\"\"\n try:\n # Assume it is a file-like object and try treating it as such\n # Things that don't have seek will trigger an exception\n self.source.seek(0)\n for line in itertools.islice(self.source, self.limit):\n line = utils.to_unicode(line).split()\n i = 0\n while i < len(line):\n yield line[i: i + self.max_sentence_length]\n i += self.max_sentence_length\n except AttributeError:\n # If it didn't work like a file, use it as a string filename\n with utils.open(self.source, 'rb') as fin:\n for line in itertools.islice(fin, self.limit):\n line = utils.to_unicode(line).split()\n i = 0\n while i < len(line):\n yield line[i: i + self.max_sentence_length]\n i += self.max_sentence_length\n\n\nclass PathLineSentences(object):\n \"\"\"\n\n Works like word2vec.LineSentence, but will process all files in a directory in alphabetical order by filename.\n The directory can only contain files that can be read by LineSentence: .bz2, .gz, and text files.\n Any file not ending with .bz2 or .gz is assumed to be a text file. Does not work with subdirectories.\n\n The format of files (either text, or compressed text files) in the path is one sentence = one line,\n with words already preprocessed and separated by whitespace.\n\n \"\"\"\n\n def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):\n \"\"\"\n `source` should be a path to a directory (as a string) where all files can be opened by the\n LineSentence class. Each file will be read up to `limit` lines (or not clipped if limit is None, the default).\n\n Example::\n\n sentences = PathLineSentences(os.getcwd() + '\\\\corpus\\\\')\n\n The files in the directory should be either text files, .bz2 files, or .gz files.\n\n \"\"\"\n self.source = source\n self.max_sentence_length = max_sentence_length\n self.limit = limit\n\n if os.path.isfile(self.source):\n logger.debug('single file given as source, rather than a directory of files')\n logger.debug('consider using models.word2vec.LineSentence for a single file')\n self.input_files = [self.source] # force code compatibility with list of files\n elif os.path.isdir(self.source):\n self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path\n logger.info('reading directory %s', self.source)\n self.input_files = os.listdir(self.source)\n self.input_files = [self.source + filename for filename in self.input_files] # make full paths\n self.input_files.sort() # makes sure it happens in filename order\n else: # not a file or a directory, then we can't do anything with it\n raise ValueError('input is neither a file nor a path')\n logger.info('files read into PathLineSentences:%s', '\\n'.join(self.input_files))\n\n def __iter__(self):\n \"\"\"iterate through the files\"\"\"\n for file_name in self.input_files:\n logger.info('reading file %s', file_name)\n with utils.open(file_name, 'rb') as fin:\n for line in itertools.islice(fin, self.limit):\n line = utils.to_unicode(line).split()\n i = 0\n while i < len(line):\n yield line[i:i + self.max_sentence_length]\n i += self.max_sentence_length\n\n\n# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \\\n# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3\nif __name__ == \"__main__\":\n import argparse\n logging.basicConfig(\n format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',\n level=logging.INFO\n )\n logger.info(\"running %s\", \" \".join(sys.argv))\n logger.info(\"using optimization %s\", FAST_VERSION)\n\n # check and process cmdline input\n program = os.path.basename(sys.argv[0])\n if len(sys.argv) < 2:\n print(globals()['__doc__'] % locals())\n sys.exit(1)\n\n from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle\n\n seterr(all='raise') # don't ignore numpy errors\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-train\", help=\"Use text data from file TRAIN to train the model\", required=True)\n parser.add_argument(\"-output\", help=\"Use file OUTPUT to save the resulting word vectors\")\n parser.add_argument(\"-window\", help=\"Set max skip length WINDOW between words; default is 5\", type=int, default=5)\n parser.add_argument(\"-size\", help=\"Set size of word vectors; default is 100\", type=int, default=100)\n parser.add_argument(\n \"-sample\",\n help=\"Set threshold for occurrence of words. \"\n \"Those that appear with higher frequency in the training data will be randomly down-sampled;\"\n \" default is 1e-3, useful range is (0, 1e-5)\",\n type=float, default=1e-3\n )\n parser.add_argument(\n \"-hs\", help=\"Use Hierarchical Softmax; default is 0 (not used)\",\n type=int, default=0, choices=[0, 1]\n )\n parser.add_argument(\n \"-negative\", help=\"Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)\",\n type=int, default=5\n )\n parser.add_argument(\"-threads\", help=\"Use THREADS threads (default 12)\", type=int, default=12)\n parser.add_argument(\"-iter\", help=\"Run more training iterations (default 5)\", type=int, default=5)\n parser.add_argument(\n \"-min_count\", help=\"This will discard words that appear less than MIN_COUNT times; default is 5\",\n type=int, default=5\n )\n parser.add_argument(\n \"-cbow\", help=\"Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)\",\n type=int, default=1, choices=[0, 1]\n )\n parser.add_argument(\n \"-binary\", help=\"Save the resulting vectors in binary mode; default is 0 (off)\",\n type=int, default=0, choices=[0, 1]\n )\n parser.add_argument(\"-accuracy\", help=\"Use questions from file ACCURACY to evaluate the model\")\n\n args = parser.parse_args()\n\n if args.cbow == 0:\n skipgram = 1\n else:\n skipgram = 0\n\n corpus = LineSentence(args.train)\n\n model = Word2Vec(\n corpus, size=args.size, min_count=args.min_count, workers=args.threads,\n window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,\n negative=args.negative, cbow_mean=1, iter=args.iter\n )\n\n if args.output:\n outfile = args.output\n model.wv.save_word2vec_format(outfile, binary=args.binary)\n else:\n outfile = args.train\n model.save(outfile + '.model')\n if args.binary == 1:\n model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)\n else:\n model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)\n\n if args.accuracy:\n model.accuracy(args.accuracy)\n\n logger.info(\"finished running %s\", program)\n",
"from gensim.models import Word2Vec\n\nsentences = [\n ['this', 'is', 'a', 'good', 'product'],\n ['it', 'is', 'a', 'excellent', 'product'],\n ['it', 'is', 'a', 'bad', 'product'],\n ['that', 'is', 'the', 'worst', 'product']\n ]\n\n# 문장을 이용하여 단어와 벡터를 생성한다.\nmodel = Word2Vec(sentences, size=300, window=3, min_count=1, workers=1)\n\n\n\n\nfile_name = 'GoogleNews-vectors-negative300.bin'\nmodel.intersect_word2vec_format(fname=file_name, binary=True)\n\n# 단어벡터를 구한다.\nword_vectors = model.wv\n\nvocabs = word_vectors.vocab.keys()\nword_vectors_list = [word_vectors[v] for v in vocabs]\n\n# 단어간 유사도를 확인하다\nprint(word_vectors.similarity(w1='it', w2='this'))\n\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2)\nxys = pca.fit_transform(word_vectors_list)\nxs = xys[:,0]\nys = xys[:,1]\n\n\n# 최종 모델을 저장한다.\nmodel.save('word2vec.model')\n\n# 저장한 모델을 읽어서 이용한다.\nmodel = Word2Vec.load('word2vec.model')"
] |
[
[
"numpy.dot",
"numpy.sqrt",
"scipy.special.expit",
"numpy.vstack",
"numpy.dtype",
"numpy.seterr",
"numpy.float32",
"numpy.outer",
"numpy.random.RandomState",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
],
[
"sklearn.decomposition.PCA"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jsexauer/pandas
|
[
"85703a7806ade5394fe511b8c433cdfca5428593"
] |
[
"pandas/tests/test_frame.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n# pylint: disable-msg=W0612,E1101\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta, time\nimport sys\nimport operator\nimport re\nimport csv\nimport nose\nimport functools\nimport itertools\nfrom itertools import product, permutations\nfrom distutils.version import LooseVersion\n\nfrom pandas.compat import(\n map, zip, range, long, lrange, lmap, lzip,\n OrderedDict, u, StringIO\n)\nfrom pandas import compat\n\nfrom numpy import random, nan, inf\nfrom numpy.random import randn\nimport numpy as np\nimport numpy.ma as ma\nfrom numpy.testing import assert_array_equal\nimport numpy.ma.mrecords as mrecords\n\nimport pandas.core.nanops as nanops\nimport pandas.core.common as com\nimport pandas.core.format as fmt\nimport pandas.core.datetools as datetools\nfrom pandas import (DataFrame, Index, Series, notnull, isnull,\n MultiIndex, DatetimeIndex, Timestamp, date_range,\n read_csv, timedelta_range, Timedelta,\n option_context)\nimport pandas as pd\nfrom pandas.parser import CParserError\nfrom pandas.util.misc import is_little_endian\n\nfrom pandas.util.testing import (assert_almost_equal,\n assert_series_equal,\n assert_frame_equal,\n assertRaisesRegexp,\n assertRaises,\n makeCustomDataframe as mkdf,\n ensure_clean)\nfrom pandas.core.indexing import IndexingError\nfrom pandas.core.common import PandasError\n\nimport pandas.util.testing as tm\nimport pandas.lib as lib\n\nfrom numpy.testing.decorators import slow\n\n#---------------------------------------------------------------------\n# DataFrame test cases\n\nJOIN_TYPES = ['inner', 'outer', 'left', 'right']\nMIXED_FLOAT_DTYPES = ['float16','float32','float64']\nMIXED_INT_DTYPES = ['uint8','uint16','uint32','uint64','int8','int16',\n 'int32','int64']\n\ndef _check_mixed_float(df, dtype = None):\n\n # float16 are most likely to be upcasted to float32\n dtypes = dict(A = 'float32', B = 'float32', C = 'float16', D = 'float64')\n if isinstance(dtype, compat.string_types):\n dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get('A'):\n assert(df.dtypes['A'] == dtypes['A'])\n if dtypes.get('B'):\n assert(df.dtypes['B'] == dtypes['B'])\n if dtypes.get('C'):\n assert(df.dtypes['C'] == dtypes['C'])\n if dtypes.get('D'):\n assert(df.dtypes['D'] == dtypes['D'])\n\n\ndef _check_mixed_int(df, dtype = None):\n dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64')\n if isinstance(dtype, compat.string_types):\n dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get('A'):\n assert(df.dtypes['A'] == dtypes['A'])\n if dtypes.get('B'):\n assert(df.dtypes['B'] == dtypes['B'])\n if dtypes.get('C'):\n assert(df.dtypes['C'] == dtypes['C'])\n if dtypes.get('D'):\n assert(df.dtypes['D'] == dtypes['D'])\n\n\nclass CheckIndexing(object):\n\n _multiprocess_can_split_ = True\n\n def test_getitem(self):\n # slicing\n sl = self.frame[:20]\n self.assertEqual(20, len(sl.index))\n\n # column access\n\n for _, series in compat.iteritems(sl):\n self.assertEqual(20, len(series.index))\n self.assertTrue(tm.equalContents(series.index, sl.index))\n\n for key, _ in compat.iteritems(self.frame._series):\n self.assertIsNotNone(self.frame[key])\n\n self.assertNotIn('random', self.frame)\n with assertRaisesRegexp(KeyError, 'random'):\n self.frame['random']\n\n df = self.frame.copy()\n df['$10'] = randn(len(df))\n ad = randn(len(df))\n df['@awesome_domain'] = ad\n self.assertRaises(KeyError, df.__getitem__, 'df[\"$10\"]')\n res = df['@awesome_domain']\n assert_array_equal(ad, res.values)\n\n def test_getitem_dupe_cols(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n try:\n df[['baf']]\n except KeyError:\n pass\n else:\n self.fail(\"Dataframe failed to raise KeyError\")\n\n def test_get(self):\n b = self.frame.get('B')\n assert_series_equal(b, self.frame['B'])\n\n self.assertIsNone(self.frame.get('foo'))\n assert_series_equal(self.frame.get('foo', self.frame['B']),\n self.frame['B'])\n # None\n # GH 5652\n for df in [DataFrame(), DataFrame(columns=list('AB')), DataFrame(columns=list('AB'),index=range(3)) ]:\n result = df.get(None)\n self.assertIsNone(result)\n\n def test_getitem_iterator(self):\n idx = iter(['A', 'B', 'C'])\n result = self.frame.ix[:, idx]\n expected = self.frame.ix[:, ['A', 'B', 'C']]\n assert_frame_equal(result, expected)\n\n def test_getitem_list(self):\n self.frame.columns.name = 'foo'\n\n result = self.frame[['B', 'A']]\n result2 = self.frame[Index(['B', 'A'])]\n\n expected = self.frame.ix[:, ['B', 'A']]\n expected.columns.name = 'foo'\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n self.assertEqual(result.columns.name, 'foo')\n\n with assertRaisesRegexp(KeyError, 'not in index'):\n self.frame[['B', 'A', 'food']]\n with assertRaisesRegexp(KeyError, 'not in index'):\n self.frame[Index(['B', 'A', 'foo'])]\n\n # tuples\n df = DataFrame(randn(8, 3),\n columns=Index([('foo', 'bar'), ('baz', 'qux'),\n ('peek', 'aboo')], name=['sth', 'sth2']))\n\n result = df[[('foo', 'bar'), ('baz', 'qux')]]\n expected = df.ix[:, :2]\n assert_frame_equal(result, expected)\n self.assertEqual(result.columns.names, ['sth', 'sth2'])\n\n def test_setitem_list(self):\n\n self.frame['E'] = 'foo'\n data = self.frame[['A', 'B']]\n self.frame[['B', 'A']] = data\n\n assert_series_equal(self.frame['B'], data['A'])\n assert_series_equal(self.frame['A'], data['B'])\n\n with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):\n data[['A']] = self.frame[['A', 'B']]\n with assertRaisesRegexp(ValueError, 'Length of values does not match '\n 'length of index'):\n data['A'] = range(len(data.index) - 1)\n\n df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)\n df.ix[1, ['tt1', 'tt2']] = [1, 2]\n\n result = df.ix[1, ['tt1', 'tt2']]\n expected = Series([1, 2], df.columns, dtype=np.int_)\n assert_series_equal(result, expected)\n\n df['tt1'] = df['tt2'] = '0'\n df.ix[1, ['tt1', 'tt2']] = ['1', '2']\n result = df.ix[1, ['tt1', 'tt2']]\n expected = Series(['1', '2'], df.columns)\n assert_series_equal(result, expected)\n\n def test_setitem_list_not_dataframe(self):\n data = np.random.randn(len(self.frame), 2)\n self.frame[['A', 'B']] = data\n assert_almost_equal(self.frame[['A', 'B']].values, data)\n\n def test_setitem_list_of_tuples(self):\n tuples = lzip(self.frame['A'], self.frame['B'])\n self.frame['tuples'] = tuples\n\n result = self.frame['tuples']\n expected = Series(tuples, index=self.frame.index)\n assert_series_equal(result, expected)\n\n def test_setitem_mulit_index(self):\n # GH7655, test that assigning to a sub-frame of a frame\n # with multi-index columns aligns both rows and columns\n it = ['jim', 'joe', 'jolie'], ['first', 'last'], \\\n ['left', 'center', 'right']\n\n cols = MultiIndex.from_product(it)\n index = pd.date_range('20141006',periods=20)\n vals = np.random.randint(1, 1000, (len(index), len(cols)))\n df = pd.DataFrame(vals, columns=cols, index=index)\n\n i, j = df.index.values.copy(), it[-1][:]\n\n np.random.shuffle(i)\n df['jim'] = df['jolie'].loc[i, ::-1]\n assert_frame_equal(df['jim'], df['jolie'])\n\n np.random.shuffle(j)\n df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]\n assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])\n\n np.random.shuffle(j)\n df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]\n assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])\n\n def test_inplace_ops_alignment(self):\n\n # inplace ops / ops alignment\n # GH 8511\n\n columns = list('abcdefg')\n X_orig = DataFrame(np.arange(10*len(columns)).reshape(-1,len(columns)), columns=columns, index=range(10))\n Z = 100*X_orig.iloc[:,1:-1].copy()\n block1 = list('bedcf')\n subs = list('bcdef')\n\n # add\n X = X_orig.copy()\n result1 = (X[block1] + Z).reindex(columns=subs)\n\n X[block1] += Z\n result2 = X.reindex(columns=subs)\n\n X = X_orig.copy()\n result3 = (X[block1] + Z[block1]).reindex(columns=subs)\n\n X[block1] += Z[block1]\n result4 = X.reindex(columns=subs)\n\n assert_frame_equal(result1, result2)\n assert_frame_equal(result1, result3)\n assert_frame_equal(result1, result4)\n\n # sub\n X = X_orig.copy()\n result1 = (X[block1] - Z).reindex(columns=subs)\n\n X[block1] -= Z\n result2 = X.reindex(columns=subs)\n\n X = X_orig.copy()\n result3 = (X[block1] - Z[block1]).reindex(columns=subs)\n\n X[block1] -= Z[block1]\n result4 = X.reindex(columns=subs)\n\n assert_frame_equal(result1, result2)\n assert_frame_equal(result1, result3)\n assert_frame_equal(result1, result4)\n\n def test_inplace_ops_identity(self):\n\n # GH 5104\n # make sure that we are actually changing the object\n s_orig = Series([1, 2, 3])\n df_orig = DataFrame(np.random.randint(0,5,size=10).reshape(-1,5))\n\n # no dtype change\n s = s_orig.copy()\n s2 = s\n s += 1\n assert_series_equal(s,s2)\n assert_series_equal(s_orig+1,s)\n self.assertIs(s,s2)\n self.assertIs(s._data,s2._data)\n\n df = df_orig.copy()\n df2 = df\n df += 1\n assert_frame_equal(df,df2)\n assert_frame_equal(df_orig+1,df)\n self.assertIs(df,df2)\n self.assertIs(df._data,df2._data)\n\n # dtype change\n s = s_orig.copy()\n s2 = s\n s += 1.5\n assert_series_equal(s,s2)\n assert_series_equal(s_orig+1.5,s)\n\n df = df_orig.copy()\n df2 = df\n df += 1.5\n assert_frame_equal(df,df2)\n assert_frame_equal(df_orig+1.5,df)\n self.assertIs(df,df2)\n self.assertIs(df._data,df2._data)\n\n # mixed dtype\n arr = np.random.randint(0,10,size=5)\n df_orig = DataFrame({'A' : arr.copy(), 'B' : 'foo'})\n df = df_orig.copy()\n df2 = df\n df['A'] += 1\n expected = DataFrame({'A' : arr.copy()+1, 'B' : 'foo'})\n assert_frame_equal(df,expected)\n assert_frame_equal(df2,expected)\n self.assertIs(df._data,df2._data)\n\n df = df_orig.copy()\n df2 = df\n df['A'] += 1.5\n expected = DataFrame({'A' : arr.copy()+1.5, 'B' : 'foo'})\n assert_frame_equal(df,expected)\n assert_frame_equal(df2,expected)\n self.assertIs(df._data,df2._data)\n\n def test_getitem_boolean(self):\n # boolean indexing\n d = self.tsframe.index[10]\n indexer = self.tsframe.index > d\n indexer_obj = indexer.astype(object)\n\n subindex = self.tsframe.index[indexer]\n subframe = self.tsframe[indexer]\n\n self.assert_numpy_array_equal(subindex, subframe.index)\n with assertRaisesRegexp(ValueError, 'Item wrong length'):\n self.tsframe[indexer[:-1]]\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n with tm.assertRaisesRegexp(ValueError, 'boolean values only'):\n self.tsframe[self.tsframe]\n\n # test that Series work\n indexer_obj = Series(indexer_obj, self.tsframe.index)\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n # test that Series indexers reindex\n import warnings\n warnings.filterwarnings(action='ignore', category=UserWarning)\n\n indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n warnings.filterwarnings(action='default', category=UserWarning)\n\n # test df[df > 0]\n for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n data = df._get_numeric_data()\n bif = df[df > 0]\n bifw = DataFrame(dict([ (c,np.where(data[c] > 0, data[c], np.nan)) for c in data.columns ]),\n index=data.index, columns=data.columns)\n\n # add back other columns to compare\n for c in df.columns:\n if c not in bifw:\n bifw[c] = df[c]\n bifw = bifw.reindex(columns = df.columns)\n\n assert_frame_equal(bif, bifw, check_dtype=False)\n for c in df.columns:\n if bif[c].dtype != bifw[c].dtype:\n self.assertEqual(bif[c].dtype, df[c].dtype)\n\n def test_getitem_boolean_casting(self):\n\n # don't upcast if we don't need to\n df = self.tsframe.copy()\n df['E'] = 1\n df['E'] = df['E'].astype('int32')\n df['E1'] = df['E'].copy()\n df['F'] = 1\n df['F'] = df['F'].astype('int64')\n df['F1'] = df['F'].copy()\n\n casted = df[df>0]\n result = casted.get_dtype_counts()\n expected = Series({'float64': 4, 'int32' : 2, 'int64' : 2})\n assert_series_equal(result, expected)\n\n # int block splitting\n df.ix[1:3,['E1','F1']] = 0\n casted = df[df>0]\n result = casted.get_dtype_counts()\n expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})\n assert_series_equal(result, expected)\n\n # where dtype conversions\n # GH 3733\n df = DataFrame(data = np.random.randn(100, 50))\n df = df.where(df > 0) # create nans\n bools = df > 0\n mask = isnull(df)\n expected = bools.astype(float).mask(mask)\n result = bools.mask(mask)\n assert_frame_equal(result,expected)\n\n def test_getitem_boolean_list(self):\n df = DataFrame(np.arange(12).reshape(3, 4))\n\n def _checkit(lst):\n result = df[lst]\n expected = df.ix[df.index[lst]]\n assert_frame_equal(result, expected)\n\n _checkit([True, False, True])\n _checkit([True, True, True])\n _checkit([False, False, False])\n\n def test_getitem_boolean_iadd(self):\n arr = randn(5, 5)\n\n df = DataFrame(arr.copy(), columns = ['A','B','C','D','E'])\n\n df[df < 0] += 1\n arr[arr < 0] += 1\n\n assert_almost_equal(df.values, arr)\n\n def test_boolean_index_empty_corner(self):\n # #2096\n blah = DataFrame(np.empty([0, 1]), columns=['A'],\n index=DatetimeIndex([]))\n\n # both of these should succeed trivially\n k = np.array([], bool)\n\n blah[k]\n blah[k] = 0\n\n def test_getitem_ix_mixed_integer(self):\n df = DataFrame(np.random.randn(4, 3),\n index=[1, 10, 'C', 'E'], columns=[1, 2, 3])\n\n result = df.ix[:-1]\n expected = df.ix[df.index[:-1]]\n assert_frame_equal(result, expected)\n\n result = df.ix[[1, 10]]\n expected = df.ix[Index([1, 10], dtype=object)]\n assert_frame_equal(result, expected)\n\n def test_getitem_setitem_ix_negative_integers(self):\n result = self.frame.ix[:, -1]\n assert_series_equal(result, self.frame['D'])\n\n result = self.frame.ix[:, [-1]]\n assert_frame_equal(result, self.frame[['D']])\n\n result = self.frame.ix[:, [-1, -2]]\n assert_frame_equal(result, self.frame[['D', 'C']])\n\n self.frame.ix[:, [-1]] = 0\n self.assertTrue((self.frame['D'] == 0).all())\n\n df = DataFrame(np.random.randn(8, 4))\n self.assertTrue(isnull(df.ix[:, [-1]].values).all())\n\n # #1942\n a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])\n a.ix[-1] = a.ix[-2]\n\n assert_series_equal(a.ix[-1], a.ix[-2])\n\n def test_getattr(self):\n tm.assert_series_equal(self.frame.A, self.frame['A'])\n self.assertRaises(AttributeError, getattr, self.frame,\n 'NONEXISTENT_NAME')\n\n def test_setattr_column(self):\n df = DataFrame({'foobar': 1}, index=lrange(10))\n\n df.foobar = 5\n self.assertTrue((df.foobar == 5).all())\n\n def test_setitem(self):\n # not sure what else to do here\n series = self.frame['A'][::2]\n self.frame['col5'] = series\n self.assertIn('col5', self.frame)\n tm.assert_dict_equal(series, self.frame['col5'],\n compare_keys=False)\n\n series = self.frame['A']\n self.frame['col6'] = series\n tm.assert_dict_equal(series, self.frame['col6'],\n compare_keys=False)\n\n with tm.assertRaises(KeyError):\n self.frame[randn(len(self.frame) + 1)] = 1\n\n # set ndarray\n arr = randn(len(self.frame))\n self.frame['col9'] = arr\n self.assertTrue((self.frame['col9'] == arr).all())\n\n self.frame['col7'] = 5\n assert((self.frame['col7'] == 5).all())\n\n self.frame['col0'] = 3.14\n assert((self.frame['col0'] == 3.14).all())\n\n self.frame['col8'] = 'foo'\n assert((self.frame['col8'] == 'foo').all())\n\n # this is partially a view (e.g. some blocks are view)\n # so raise/warn\n smaller = self.frame[:2]\n def f():\n smaller['col10'] = ['1', '2']\n self.assertRaises(com.SettingWithCopyError, f)\n self.assertEqual(smaller['col10'].dtype, np.object_)\n self.assertTrue((smaller['col10'] == ['1', '2']).all())\n\n # with a dtype\n for dtype in ['int32','int64','float32','float64']:\n self.frame[dtype] = np.array(arr,dtype=dtype)\n self.assertEqual(self.frame[dtype].dtype.name, dtype)\n\n # dtype changing GH4204\n df = DataFrame([[0,0]])\n df.iloc[0] = np.nan\n expected = DataFrame([[np.nan,np.nan]])\n assert_frame_equal(df,expected)\n\n df = DataFrame([[0,0]])\n df.loc[0] = np.nan\n assert_frame_equal(df,expected)\n\n def test_setitem_tuple(self):\n self.frame['A', 'B'] = self.frame['A']\n assert_series_equal(self.frame['A', 'B'], self.frame['A'])\n\n def test_setitem_always_copy(self):\n s = self.frame['A'].copy()\n self.frame['E'] = s\n\n self.frame['E'][5:10] = nan\n self.assertTrue(notnull(s[5:10]).all())\n\n def test_setitem_boolean(self):\n df = self.frame.copy()\n values = self.frame.values\n\n df[df['A'] > 0] = 4\n values[values[:, 0] > 0] = 4\n assert_almost_equal(df.values, values)\n\n # test that column reindexing works\n series = df['A'] == 4\n series = series.reindex(df.index[::-1])\n df[series] = 1\n values[values[:, 0] == 4] = 1\n assert_almost_equal(df.values, values)\n\n df[df > 0] = 5\n values[values > 0] = 5\n assert_almost_equal(df.values, values)\n\n df[df == 5] = 0\n values[values == 5] = 0\n assert_almost_equal(df.values, values)\n\n # a df that needs alignment first\n df[df[:-1] < 0] = 2\n np.putmask(values[:-1], values[:-1] < 0, 2)\n assert_almost_equal(df.values, values)\n\n # indexed with same shape but rows-reversed df\n df[df[::-1] == 2] = 3\n values[values == 2] = 3\n assert_almost_equal(df.values, values)\n\n with assertRaisesRegexp(TypeError, 'Must pass DataFrame with boolean '\n 'values only'):\n df[df * 0] = 2\n\n # index with DataFrame\n mask = df > np.abs(df)\n expected = df.copy()\n df[df > np.abs(df)] = nan\n expected.values[mask.values] = nan\n assert_frame_equal(df, expected)\n\n # set from DataFrame\n expected = df.copy()\n df[df > np.abs(df)] = df * 2\n np.putmask(expected.values, mask.values, df.values * 2)\n assert_frame_equal(df, expected)\n\n def test_setitem_cast(self):\n self.frame['D'] = self.frame['D'].astype('i8')\n self.assertEqual(self.frame['D'].dtype, np.int64)\n\n # #669, should not cast?\n # this is now set to int64, which means a replacement of the column to\n # the value dtype (and nothing to do with the existing dtype)\n self.frame['B'] = 0\n self.assertEqual(self.frame['B'].dtype, np.int64)\n\n # cast if pass array of course\n self.frame['B'] = np.arange(len(self.frame))\n self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))\n\n self.frame['foo'] = 'bar'\n self.frame['foo'] = 0\n self.assertEqual(self.frame['foo'].dtype, np.int64)\n\n self.frame['foo'] = 'bar'\n self.frame['foo'] = 2.5\n self.assertEqual(self.frame['foo'].dtype, np.float64)\n\n self.frame['something'] = 0\n self.assertEqual(self.frame['something'].dtype, np.int64)\n self.frame['something'] = 2\n self.assertEqual(self.frame['something'].dtype, np.int64)\n self.frame['something'] = 2.5\n self.assertEqual(self.frame['something'].dtype, np.float64)\n\n # GH 7704\n # dtype conversion on setting\n df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))\n df['event'] = np.nan\n df.loc[10,'event'] = 'foo'\n result = df.get_dtype_counts().order()\n expected = Series({'float64' : 3, 'object' : 1 }).order()\n assert_series_equal(result, expected)\n\n def test_setitem_boolean_column(self):\n expected = self.frame.copy()\n mask = self.frame['A'] > 0\n\n self.frame.ix[mask, 'B'] = 0\n expected.values[mask.values, 1] = 0\n\n assert_frame_equal(self.frame, expected)\n\n def test_setitem_corner(self):\n # corner case\n df = DataFrame({'B': [1., 2., 3.],\n 'C': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['B']\n df['B'] = [1., 2., 3.]\n self.assertIn('B', df)\n self.assertEqual(len(df.columns), 2)\n\n df['A'] = 'beginning'\n df['E'] = 'foo'\n df['D'] = 'bar'\n df[datetime.now()] = 'date'\n df[datetime.now()] = 5.\n\n # what to do when empty frame with index\n dm = DataFrame(index=self.frame.index)\n dm['A'] = 'foo'\n dm['B'] = 'bar'\n self.assertEqual(len(dm.columns), 2)\n self.assertEqual(dm.values.dtype, np.object_)\n\n # upcast\n dm['C'] = 1\n self.assertEqual(dm['C'].dtype, np.int64)\n\n dm['E'] = 1.\n self.assertEqual(dm['E'].dtype, np.float64)\n\n # set existing column\n dm['A'] = 'bar'\n self.assertEqual('bar', dm['A'][0])\n\n dm = DataFrame(index=np.arange(3))\n dm['A'] = 1\n dm['foo'] = 'bar'\n del dm['foo']\n dm['foo'] = 'bar'\n self.assertEqual(dm['foo'].dtype, np.object_)\n\n dm['coercable'] = ['1', '2', '3']\n self.assertEqual(dm['coercable'].dtype, np.object_)\n\n def test_setitem_corner2(self):\n data = {\"title\": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,\n \"cruft\": np.random.random(20)}\n\n df = DataFrame(data)\n ix = df[df['title'] == 'bar'].index\n\n df.ix[ix, ['title']] = 'foobar'\n df.ix[ix, ['cruft']] = 0\n\n assert(df.ix[1, 'title'] == 'foobar')\n assert(df.ix[1, 'cruft'] == 0)\n\n def test_setitem_ambig(self):\n # difficulties with mixed-type data\n from decimal import Decimal\n\n # created as float type\n dm = DataFrame(index=lrange(3), columns=lrange(3))\n\n coercable_series = Series([Decimal(1) for _ in range(3)],\n index=lrange(3))\n uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))\n\n dm[0] = np.ones(3)\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNone(dm.objects)\n\n dm[1] = coercable_series\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNone(dm.objects)\n\n dm[2] = uncoercable_series\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNotNone(dm.objects)\n self.assertEqual(dm[2].dtype, np.object_)\n\n def test_setitem_clear_caches(self):\n # GH #304\n df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},\n index=[0, 1, 2, 3])\n df.insert(2, 'z', np.nan)\n\n # cache it\n foo = df['z']\n\n df.ix[2:, 'z'] = 42\n\n expected = Series([np.nan, np.nan, 42, 42], index=df.index)\n self.assertIsNot(df['z'], foo)\n assert_series_equal(df['z'], expected)\n\n def test_setitem_None(self):\n # GH #766\n self.frame[None] = self.frame['A']\n assert_series_equal(self.frame.iloc[:,-1], self.frame['A'])\n assert_series_equal(self.frame.loc[:,None], self.frame['A'])\n assert_series_equal(self.frame[None], self.frame['A'])\n repr(self.frame)\n\n def test_delitem_corner(self):\n f = self.frame.copy()\n del f['D']\n self.assertEqual(len(f.columns), 3)\n self.assertRaises(KeyError, f.__delitem__, 'D')\n del f['B']\n self.assertEqual(len(f.columns), 2)\n\n def test_getitem_fancy_2d(self):\n f = self.frame\n ix = f.ix\n\n assert_frame_equal(ix[:, ['B', 'A']], f.reindex(columns=['B', 'A']))\n\n subidx = self.frame.index[[5, 4, 1]]\n assert_frame_equal(ix[subidx, ['B', 'A']],\n f.reindex(index=subidx, columns=['B', 'A']))\n\n # slicing rows, etc.\n assert_frame_equal(ix[5:10], f[5:10])\n assert_frame_equal(ix[5:10, :], f[5:10])\n assert_frame_equal(ix[:5, ['A', 'B']],\n f.reindex(index=f.index[:5], columns=['A', 'B']))\n\n # slice rows with labels, inclusive!\n expected = ix[5:11]\n result = ix[f.index[5]:f.index[10]]\n assert_frame_equal(expected, result)\n\n # slice columns\n assert_frame_equal(ix[:, :2], f.reindex(columns=['A', 'B']))\n\n # get view\n exp = f.copy()\n ix[5:10].values[:] = 5\n exp.values[5:10] = 5\n assert_frame_equal(f, exp)\n\n self.assertRaises(ValueError, ix.__getitem__, f > 0.5)\n\n def test_slice_floats(self):\n index = [52195.504153, 52196.303147, 52198.369883]\n df = DataFrame(np.random.rand(3, 2), index=index)\n\n s1 = df.ix[52195.1:52196.5]\n self.assertEqual(len(s1), 2)\n\n s1 = df.ix[52195.1:52196.6]\n self.assertEqual(len(s1), 2)\n\n s1 = df.ix[52195.1:52198.9]\n self.assertEqual(len(s1), 3)\n\n def test_getitem_fancy_slice_integers_step(self):\n df = DataFrame(np.random.randn(10, 5))\n\n # this is OK\n result = df.ix[:8:2]\n df.ix[:8:2] = np.nan\n self.assertTrue(isnull(df.ix[:8:2]).values.all())\n\n def test_getitem_setitem_integer_slice_keyerrors(self):\n df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))\n\n # this is OK\n cp = df.copy()\n cp.ix[4:10] = 0\n self.assertTrue((cp.ix[4:10] == 0).values.all())\n\n # so is this\n cp = df.copy()\n cp.ix[3:11] = 0\n self.assertTrue((cp.ix[3:11] == 0).values.all())\n\n result = df.ix[4:10]\n result2 = df.ix[3:11]\n expected = df.reindex([4, 6, 8, 10])\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]\n self.assertRaises(KeyError, df2.ix.__getitem__, slice(3, 11))\n self.assertRaises(KeyError, df2.ix.__setitem__, slice(3, 11), 0)\n\n def test_setitem_fancy_2d(self):\n f = self.frame\n ix = f.ix\n\n # case 1\n frame = self.frame.copy()\n expected = frame.copy()\n frame.ix[:, ['B', 'A']] = 1\n expected['B'] = 1.\n expected['A'] = 1.\n assert_frame_equal(frame, expected)\n\n # case 2\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n\n expected = frame.copy()\n\n subidx = self.frame.index[[5, 4, 1]]\n values = randn(3, 2)\n\n frame.ix[subidx, ['B', 'A']] = values\n frame2.ix[[5, 4, 1], ['B', 'A']] = values\n\n expected['B'].ix[subidx] = values[:, 0]\n expected['A'].ix[subidx] = values[:, 1]\n\n assert_frame_equal(frame, expected)\n assert_frame_equal(frame2, expected)\n\n # case 3: slicing rows, etc.\n frame = self.frame.copy()\n\n expected1 = self.frame.copy()\n frame.ix[5:10] = 1.\n expected1.values[5:10] = 1.\n assert_frame_equal(frame, expected1)\n\n expected2 = self.frame.copy()\n arr = randn(5, len(frame.columns))\n frame.ix[5:10] = arr\n expected2.values[5:10] = arr\n assert_frame_equal(frame, expected2)\n\n # case 4\n frame = self.frame.copy()\n frame.ix[5:10, :] = 1.\n assert_frame_equal(frame, expected1)\n frame.ix[5:10, :] = arr\n assert_frame_equal(frame, expected2)\n\n # case 5\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n\n expected = self.frame.copy()\n values = randn(5, 2)\n\n frame.ix[:5, ['A', 'B']] = values\n expected['A'][:5] = values[:, 0]\n expected['B'][:5] = values[:, 1]\n assert_frame_equal(frame, expected)\n\n frame2.ix[:5, [0, 1]] = values\n assert_frame_equal(frame2, expected)\n\n # case 6: slice rows with labels, inclusive!\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[frame.index[5]:frame.index[10]] = 5.\n expected.values[5:11] = 5\n assert_frame_equal(frame, expected)\n\n # case 7: slice columns\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n expected = self.frame.copy()\n\n # slice indices\n frame.ix[:, 1:3] = 4.\n expected.values[:, 1:3] = 4.\n assert_frame_equal(frame, expected)\n\n # slice with labels\n frame.ix[:, 'B':'C'] = 4.\n assert_frame_equal(frame, expected)\n\n # new corner case of boolean slicing / setting\n frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),\n columns=['a', 'b'])\n lst = [100]\n lst.extend([np.nan] * 4)\n expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),\n columns=['a', 'b'])\n frame[frame['a'] == 2] = 100\n assert_frame_equal(frame, expected)\n\n def test_fancy_getitem_slice_mixed(self):\n sliced = self.mixed_frame.ix[:, -3:]\n self.assertEqual(sliced['D'].dtype, np.float64)\n\n # get view with single block\n # setting it triggers setting with copy\n sliced = self.frame.ix[:, -3:]\n def f():\n sliced['C'] = 4.\n self.assertRaises(com.SettingWithCopyError, f)\n self.assertTrue((self.frame['C'] == 4).all())\n\n def test_fancy_setitem_int_labels(self):\n # integer index defers to label-based indexing\n\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[[0, 2, 4]] = 5\n exp.values[:3] = 5\n assert_frame_equal(tmp, exp)\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[6] = 5\n exp.values[3] = 5\n assert_frame_equal(tmp, exp)\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[:, 2] = 5\n\n # tmp correctly sets the dtype\n # so match the exp way\n exp[2] = 5\n assert_frame_equal(tmp, exp)\n\n def test_fancy_getitem_int_labels(self):\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n result = df.ix[[4, 2, 0], [2, 0]]\n expected = df.reindex(index=[4, 2, 0], columns=[2, 0])\n assert_frame_equal(result, expected)\n\n result = df.ix[[4, 2, 0]]\n expected = df.reindex(index=[4, 2, 0])\n assert_frame_equal(result, expected)\n\n result = df.ix[4]\n expected = df.xs(4)\n assert_series_equal(result, expected)\n\n result = df.ix[:, 3]\n expected = df[3]\n assert_series_equal(result, expected)\n\n def test_fancy_index_int_labels_exceptions(self):\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n # labels that aren't contained\n self.assertRaises(KeyError, df.ix.__setitem__,\n ([0, 1, 2], [2, 3, 4]), 5)\n\n # try to set indices not contained in frame\n self.assertRaises(KeyError,\n self.frame.ix.__setitem__,\n ['foo', 'bar', 'baz'], 1)\n self.assertRaises(KeyError,\n self.frame.ix.__setitem__,\n (slice(None, None), ['E']), 1)\n\n # partial setting now allows this GH2578\n #self.assertRaises(KeyError,\n # self.frame.ix.__setitem__,\n # (slice(None, None), 'E'), 1)\n\n def test_setitem_fancy_mixed_2d(self):\n self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5\n result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]\n self.assertTrue((result.values == 5).all())\n\n self.mixed_frame.ix[5] = np.nan\n self.assertTrue(isnull(self.mixed_frame.ix[5]).all())\n\n self.mixed_frame.ix[5] = self.mixed_frame.ix[6]\n assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6])\n\n # #1432\n df = DataFrame({1: [1., 2., 3.],\n 2: [3, 4, 5]})\n self.assertTrue(df._is_mixed_type)\n\n df.ix[1] = [5, 10]\n\n expected = DataFrame({1: [1., 5., 3.],\n 2: [3, 10, 5]})\n\n assert_frame_equal(df, expected)\n\n def test_ix_align(self):\n b = Series(randn(10))\n b.sort()\n df_orig = DataFrame(randn(10, 4))\n df = df_orig.copy()\n\n df.ix[:, 0] = b\n assert_series_equal(df.ix[:, 0].reindex(b.index), b)\n\n dft = df_orig.T\n dft.ix[0, :] = b\n assert_series_equal(dft.ix[0, :].reindex(b.index), b)\n\n df = df_orig.copy()\n df.ix[:5, 0] = b\n s = df.ix[:5, 0]\n assert_series_equal(s, b.reindex(s.index))\n\n dft = df_orig.T\n dft.ix[0, :5] = b\n s = dft.ix[0, :5]\n assert_series_equal(s, b.reindex(s.index))\n\n df = df_orig.copy()\n idx = [0, 1, 3, 5]\n df.ix[idx, 0] = b\n s = df.ix[idx, 0]\n assert_series_equal(s, b.reindex(s.index))\n\n dft = df_orig.T\n dft.ix[0, idx] = b\n s = dft.ix[0, idx]\n assert_series_equal(s, b.reindex(s.index))\n\n def test_ix_frame_align(self):\n b = DataFrame(np.random.randn(3, 4))\n df_orig = DataFrame(randn(10, 4))\n df = df_orig.copy()\n\n df.ix[:3] = b\n out = b.ix[:3]\n assert_frame_equal(out, b)\n\n b.sort_index(inplace=True)\n\n df = df_orig.copy()\n df.ix[[0, 1, 2]] = b\n out = df.ix[[0, 1, 2]].reindex(b.index)\n assert_frame_equal(out, b)\n\n df = df_orig.copy()\n df.ix[:3] = b\n out = df.ix[:3]\n assert_frame_equal(out, b.reindex(out.index))\n\n def test_getitem_setitem_non_ix_labels(self):\n df = tm.makeTimeDataFrame()\n\n start, end = df.index[[5, 10]]\n\n result = df.ix[start:end]\n result2 = df[start:end]\n expected = df[5:11]\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n result = df.copy()\n result.ix[start:end] = 0\n result2 = df.copy()\n result2[start:end] = 0\n expected = df.copy()\n expected[5:11] = 0\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n def test_ix_multi_take(self):\n df = DataFrame(np.random.randn(3, 2))\n rs = df.ix[df.index == 0, :]\n xp = df.reindex([0])\n assert_frame_equal(rs, xp)\n\n \"\"\" #1321\n df = DataFrame(np.random.randn(3, 2))\n rs = df.ix[df.index==0, df.columns==1]\n xp = df.reindex([0], [1])\n assert_frame_equal(rs, xp)\n \"\"\"\n\n def test_ix_multi_take_nonint_index(self):\n df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],\n columns=['a', 'b'])\n rs = df.ix[[0], [0]]\n xp = df.reindex(['x'], columns=['a'])\n assert_frame_equal(rs, xp)\n\n def test_ix_multi_take_multiindex(self):\n df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],\n columns=[['a', 'b'], ['1', '2']])\n rs = df.ix[[0], [0]]\n xp = df.reindex(['x'], columns=[('a', '1')])\n assert_frame_equal(rs, xp)\n\n def test_ix_dup(self):\n idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n\n sub = df.ix[:'d']\n assert_frame_equal(sub, df)\n\n sub = df.ix['a':'c']\n assert_frame_equal(sub, df.ix[0:4])\n\n sub = df.ix['b':'d']\n assert_frame_equal(sub, df.ix[2:])\n\n def test_getitem_fancy_1d(self):\n f = self.frame\n ix = f.ix\n\n # return self if no slicing...for now\n self.assertIs(ix[:, :], f)\n\n # low dimensional slice\n xs1 = ix[2, ['C', 'B', 'A']]\n xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])\n assert_series_equal(xs1, xs2)\n\n ts1 = ix[5:10, 2]\n ts2 = f[f.columns[2]][5:10]\n assert_series_equal(ts1, ts2)\n\n # positional xs\n xs1 = ix[0]\n xs2 = f.xs(f.index[0])\n assert_series_equal(xs1, xs2)\n\n xs1 = ix[f.index[5]]\n xs2 = f.xs(f.index[5])\n assert_series_equal(xs1, xs2)\n\n # single column\n assert_series_equal(ix[:, 'A'], f['A'])\n\n # return view\n exp = f.copy()\n exp.values[5] = 4\n ix[5][:] = 4\n assert_frame_equal(exp, f)\n\n exp.values[:, 1] = 6\n ix[:, 1][:] = 6\n assert_frame_equal(exp, f)\n\n # slice of mixed-frame\n xs = self.mixed_frame.ix[5]\n exp = self.mixed_frame.xs(self.mixed_frame.index[5])\n assert_series_equal(xs, exp)\n\n def test_setitem_fancy_1d(self):\n\n # case 1: set cross-section for indices\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]\n expected['C'][2] = 1.\n expected['B'][2] = 2.\n expected['A'][2] = 3.\n assert_frame_equal(frame, expected)\n\n frame2 = self.frame.copy()\n frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]\n assert_frame_equal(frame, expected)\n\n # case 2, set a section of a column\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n vals = randn(5)\n expected.values[5:10, 2] = vals\n frame.ix[5:10, 2] = vals\n assert_frame_equal(frame, expected)\n\n frame2 = self.frame.copy()\n frame2.ix[5:10, 'B'] = vals\n assert_frame_equal(frame, expected)\n\n # case 3: full xs\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[4] = 5.\n expected.values[4] = 5.\n assert_frame_equal(frame, expected)\n\n frame.ix[frame.index[4]] = 6.\n expected.values[4] = 6.\n assert_frame_equal(frame, expected)\n\n # single column\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[:, 'A'] = 7.\n expected['A'] = 7.\n assert_frame_equal(frame, expected)\n\n def test_getitem_fancy_scalar(self):\n f = self.frame\n ix = f.ix\n # individual value\n for col in f.columns:\n ts = f[col]\n for idx in f.index[::5]:\n assert_almost_equal(ix[idx, col], ts[idx])\n\n def test_setitem_fancy_scalar(self):\n f = self.frame\n expected = self.frame.copy()\n ix = f.ix\n # individual value\n for j, col in enumerate(f.columns):\n ts = f[col]\n for idx in f.index[::5]:\n i = f.index.get_loc(idx)\n val = randn()\n expected.values[i, j] = val\n ix[idx, col] = val\n assert_frame_equal(f, expected)\n\n def test_getitem_fancy_boolean(self):\n f = self.frame\n ix = f.ix\n\n expected = f.reindex(columns=['B', 'D'])\n result = ix[:, [False, True, False, True]]\n assert_frame_equal(result, expected)\n\n expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])\n result = ix[5:10, [False, True, False, True]]\n assert_frame_equal(result, expected)\n\n boolvec = f.index > f.index[7]\n expected = f.reindex(index=f.index[boolvec])\n result = ix[boolvec]\n assert_frame_equal(result, expected)\n result = ix[boolvec, :]\n assert_frame_equal(result, expected)\n\n result = ix[boolvec, 2:]\n expected = f.reindex(index=f.index[boolvec],\n columns=['C', 'D'])\n assert_frame_equal(result, expected)\n\n def test_setitem_fancy_boolean(self):\n # from 2d, set with booleans\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n mask = frame['A'] > 0\n frame.ix[mask] = 0.\n expected.values[mask.values] = 0.\n assert_frame_equal(frame, expected)\n\n frame = self.frame.copy()\n expected = self.frame.copy()\n frame.ix[mask, ['A', 'B']] = 0.\n expected.values[mask.values, :2] = 0.\n assert_frame_equal(frame, expected)\n\n def test_getitem_fancy_ints(self):\n result = self.frame.ix[[1, 4, 7]]\n expected = self.frame.ix[self.frame.index[[1, 4, 7]]]\n assert_frame_equal(result, expected)\n\n result = self.frame.ix[:, [2, 0, 1]]\n expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]\n assert_frame_equal(result, expected)\n\n def test_getitem_setitem_fancy_exceptions(self):\n ix = self.frame.ix\n with assertRaisesRegexp(IndexingError, 'Too many indexers'):\n ix[:, :, :]\n\n with assertRaises(IndexingError):\n ix[:, :, :] = 1\n\n def test_getitem_setitem_boolean_misaligned(self):\n # boolean index misaligned labels\n mask = self.frame['A'][::-1] > 1\n\n result = self.frame.ix[mask]\n expected = self.frame.ix[mask[::-1]]\n assert_frame_equal(result, expected)\n\n cp = self.frame.copy()\n expected = self.frame.copy()\n cp.ix[mask] = 0\n expected.ix[mask] = 0\n assert_frame_equal(cp, expected)\n\n def test_getitem_setitem_boolean_multi(self):\n df = DataFrame(np.random.randn(3, 2))\n\n # get\n k1 = np.array([True, False, True])\n k2 = np.array([False, True])\n result = df.ix[k1, k2]\n expected = df.ix[[0, 2], [1]]\n assert_frame_equal(result, expected)\n\n expected = df.copy()\n df.ix[np.array([True, False, True]),\n np.array([False, True])] = 5\n expected.ix[[0, 2], [1]] = 5\n assert_frame_equal(df, expected)\n\n def test_getitem_setitem_float_labels(self):\n index = Index([1.5, 2, 3, 4, 5])\n df = DataFrame(np.random.randn(5, 5), index=index)\n\n result = df.ix[1.5:4]\n expected = df.reindex([1.5, 2, 3, 4])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.ix[4:5]\n expected = df.reindex([4, 5])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 2)\n\n # loc_float changes this to work properly\n result = df.ix[1:2]\n expected = df.iloc[0:2]\n assert_frame_equal(result, expected)\n\n df.ix[1:2] = 0\n result = df[1:2]\n self.assertTrue((result==0).all().all())\n\n # #2727\n index = Index([1.0, 2.5, 3.5, 4.5, 5.0])\n df = DataFrame(np.random.randn(5, 5), index=index)\n\n # positional slicing only via iloc!\n with tm.assert_produces_warning(FutureWarning):\n result = df.iloc[1.0:5]\n\n expected = df.reindex([2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.iloc[4:5]\n expected = df.reindex([5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 1)\n\n # GH 4892, float indexers in iloc are deprecated\n import warnings\n warnings.filterwarnings(action='error', category=FutureWarning)\n\n cp = df.copy()\n def f():\n cp.iloc[1.0:5] = 0\n self.assertRaises(FutureWarning, f)\n def f():\n result = cp.iloc[1.0:5] == 0\n self.assertRaises(FutureWarning, f)\n self.assertTrue(result.values.all())\n self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())\n\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n cp = df.copy()\n cp.iloc[4:5] = 0\n self.assertTrue((cp.iloc[4:5] == 0).values.all())\n self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())\n\n # float slicing\n result = df.ix[1.0:5]\n expected = df\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 5)\n\n result = df.ix[1.1:5]\n expected = df.reindex([2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.ix[4.51:5]\n expected = df.reindex([5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 1)\n\n result = df.ix[1.0:5.0]\n expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 5)\n\n cp = df.copy()\n cp.ix[1.0:5.0] = 0\n result = cp.ix[1.0:5.0]\n self.assertTrue((result == 0).values.all())\n\n def test_setitem_single_column_mixed(self):\n df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['foo', 'bar', 'baz'])\n df['str'] = 'qux'\n df.ix[::2, 'str'] = nan\n expected = [nan, 'qux', nan, 'qux', nan]\n assert_almost_equal(df['str'].values, expected)\n\n def test_setitem_single_column_mixed_datetime(self):\n df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['foo', 'bar', 'baz'])\n\n df['timestamp'] = Timestamp('20010102')\n\n # check our dtypes\n result = df.get_dtype_counts()\n expected = Series({'float64': 3, 'datetime64[ns]': 1})\n assert_series_equal(result, expected)\n\n # set an allowable datetime64 type\n from pandas import tslib\n df.ix['b', 'timestamp'] = tslib.iNaT\n self.assertTrue(com.isnull(df.ix['b', 'timestamp']))\n\n # allow this syntax\n df.ix['c', 'timestamp'] = nan\n self.assertTrue(com.isnull(df.ix['c', 'timestamp']))\n\n # allow this syntax\n df.ix['d', :] = nan\n self.assertTrue(com.isnull(df.ix['c', :]).all() == False)\n\n # as of GH 3216 this will now work!\n # try to set with a list like item\n #self.assertRaises(\n # Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])\n\n def test_setitem_frame(self):\n piece = self.frame.ix[:2, ['A', 'B']]\n self.frame.ix[-2:, ['A', 'B']] = piece.values\n assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n # GH 3216\n\n # already aligned\n f = self.mixed_frame.copy()\n piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])\n key = (slice(None,2), ['A', 'B'])\n f.ix[key] = piece\n assert_almost_equal(f.ix[0:2, ['A', 'B']].values,\n piece.values)\n\n # rows unaligned\n f = self.mixed_frame.copy()\n piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])\n key = (slice(None,2), ['A', 'B'])\n f.ix[key] = piece\n assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,\n piece.values[0:2])\n\n # key is unaligned with values\n f = self.mixed_frame.copy()\n piece = f.ix[:2, ['A']]\n piece.index = f.index[-2:]\n key = (slice(-2, None), ['A', 'B'])\n f.ix[key] = piece\n piece['B'] = np.nan\n assert_almost_equal(f.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n # ndarray\n f = self.mixed_frame.copy()\n piece = self.mixed_frame.ix[:2, ['A', 'B']]\n key = (slice(-2, None), ['A', 'B'])\n f.ix[key] = piece.values\n assert_almost_equal(f.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n\n # needs upcasting\n df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])\n df2 = df.copy()\n df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5\n expected = df.reindex(columns=['A','B'])\n expected += 0.5\n expected['C'] = df['C']\n assert_frame_equal(df2, expected)\n\n def test_setitem_frame_align(self):\n piece = self.frame.ix[:2, ['A', 'B']]\n piece.index = self.frame.index[-2:]\n piece.columns = ['A', 'B']\n self.frame.ix[-2:, ['A', 'B']] = piece\n assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n def test_setitem_fancy_exceptions(self):\n pass\n\n def test_getitem_boolean_missing(self):\n pass\n\n def test_setitem_boolean_missing(self):\n pass\n\n def test_getitem_setitem_ix_duplicates(self):\n # #1201\n df = DataFrame(np.random.randn(5, 3),\n index=['foo', 'foo', 'bar', 'baz', 'bar'])\n\n result = df.ix['foo']\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.ix['bar']\n expected = df.ix[[2, 4]]\n assert_frame_equal(result, expected)\n\n result = df.ix['baz']\n expected = df.ix[3]\n assert_series_equal(result, expected)\n\n def test_getitem_ix_boolean_duplicates_multiple(self):\n # #1201\n df = DataFrame(np.random.randn(5, 3),\n index=['foo', 'foo', 'bar', 'baz', 'bar'])\n\n result = df.ix[['bar']]\n exp = df.ix[[2, 4]]\n assert_frame_equal(result, exp)\n\n result = df.ix[df[1] > 0]\n exp = df[df[1] > 0]\n assert_frame_equal(result, exp)\n\n result = df.ix[df[0] > 0]\n exp = df[df[0] > 0]\n assert_frame_equal(result, exp)\n\n def test_getitem_setitem_ix_bool_keyerror(self):\n # #2199\n df = DataFrame({'a': [1, 2, 3]})\n\n self.assertRaises(KeyError, df.ix.__getitem__, False)\n self.assertRaises(KeyError, df.ix.__getitem__, True)\n\n self.assertRaises(KeyError, df.ix.__setitem__, False, 0)\n self.assertRaises(KeyError, df.ix.__setitem__, True, 0)\n\n def test_getitem_list_duplicates(self):\n # #1943\n df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))\n df.columns.name = 'foo'\n\n result = df[['B', 'C']]\n self.assertEqual(result.columns.name, 'foo')\n\n expected = df.ix[:, 2:]\n assert_frame_equal(result, expected)\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n assert_almost_equal(result, expected)\n\n def test_iteritems(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n for k, v in compat.iteritems(df):\n self.assertEqual(type(v), Series)\n\n def test_lookup(self):\n def alt(df, rows, cols):\n result = []\n for r, c in zip(rows, cols):\n result.append(df.get_value(r, c))\n return result\n\n def testit(df):\n rows = list(df.index) * len(df.columns)\n cols = list(df.columns) * len(df.index)\n result = df.lookup(rows, cols)\n expected = alt(df, rows, cols)\n assert_almost_equal(result, expected)\n\n testit(self.mixed_frame)\n testit(self.frame)\n\n df = DataFrame({'label': ['a', 'b', 'a', 'c'],\n 'mask_a': [True, True, False, True],\n 'mask_b': [True, False, False, False],\n 'mask_c': [False, True, False, True]})\n df['mask'] = df.lookup(df.index, 'mask_' + df['label'])\n exp_mask = alt(df, df.index, 'mask_' + df['label'])\n assert_almost_equal(df['mask'], exp_mask)\n self.assertEqual(df['mask'].dtype, np.bool_)\n\n with tm.assertRaises(KeyError):\n self.frame.lookup(['xyz'], ['A'])\n\n with tm.assertRaises(KeyError):\n self.frame.lookup([self.frame.index[0]], ['xyz'])\n\n with tm.assertRaisesRegexp(ValueError, 'same size'):\n self.frame.lookup(['a', 'b', 'c'], ['a'])\n\n def test_set_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n self.frame.set_value(idx, col, 1)\n assert_almost_equal(self.frame[col][idx], 1)\n\n def test_set_value_resize(self):\n\n res = self.frame.set_value('foobar', 'B', 0)\n self.assertIs(res, self.frame)\n self.assertEqual(res.index[-1], 'foobar')\n self.assertEqual(res.get_value('foobar', 'B'), 0)\n\n self.frame.loc['foobar','qux'] = 0\n self.assertEqual(self.frame.get_value('foobar', 'qux'), 0)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', 'sam')\n self.assertEqual(res3['baz'].dtype, np.object_)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', True)\n self.assertEqual(res3['baz'].dtype, np.object_)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', 5)\n self.assertTrue(com.is_float_dtype(res3['baz']))\n self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())\n self.assertRaises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')\n\n def test_set_value_with_index_dtype_change(self):\n df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))\n\n # this is actually ambiguous as the 2 is interpreted as a positional\n # so column is not created\n df = df_orig.copy()\n df.set_value('C', 2, 1.0)\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n #self.assertEqual(list(df.columns), list(df_orig.columns) + [2])\n\n df = df_orig.copy()\n df.loc['C', 2] = 1.0\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n #self.assertEqual(list(df.columns), list(df_orig.columns) + [2])\n\n # create both new\n df = df_orig.copy()\n df.set_value('C', 'D', 1.0)\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])\n\n df = df_orig.copy()\n df.loc['C', 'D'] = 1.0\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])\n\n def test_get_set_value_no_partial_indexing(self):\n # partial w/ MultiIndex raise exception\n index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])\n df = DataFrame(index=index, columns=lrange(4))\n self.assertRaises(KeyError, df.get_value, 0, 1)\n # self.assertRaises(KeyError, df.set_value, 0, 1, 0)\n\n def test_single_element_ix_dont_upcast(self):\n self.frame['E'] = 1\n self.assertTrue(issubclass(self.frame['E'].dtype.type,\n (int, np.integer)))\n\n result = self.frame.ix[self.frame.index[5], 'E']\n self.assertTrue(com.is_integer(result))\n\n def test_irow(self):\n df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))\n\n result = df.irow(1)\n exp = df.ix[2]\n assert_series_equal(result, exp)\n\n result = df.irow(2)\n exp = df.ix[4]\n assert_series_equal(result, exp)\n\n # slice\n result = df.irow(slice(4, 8))\n expected = df.ix[8:14]\n assert_frame_equal(result, expected)\n\n # verify slice is view\n # setting it makes it raise/warn\n def f():\n result[2] = 0.\n self.assertRaises(com.SettingWithCopyError, f)\n exp_col = df[2].copy()\n exp_col[4:8] = 0.\n assert_series_equal(df[2], exp_col)\n\n # list of integers\n result = df.irow([1, 2, 4, 6])\n expected = df.reindex(df.index[[1, 2, 4, 6]])\n assert_frame_equal(result, expected)\n\n def test_icol(self):\n df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))\n\n result = df.icol(1)\n exp = df.ix[:, 2]\n assert_series_equal(result, exp)\n\n result = df.icol(2)\n exp = df.ix[:, 4]\n assert_series_equal(result, exp)\n\n # slice\n result = df.icol(slice(4, 8))\n expected = df.ix[:, 8:14]\n assert_frame_equal(result, expected)\n\n # verify slice is view\n # and that we are setting a copy\n def f():\n result[8] = 0.\n self.assertRaises(com.SettingWithCopyError, f)\n self.assertTrue((df[8] == 0).all())\n\n # list of integers\n result = df.icol([1, 2, 4, 6])\n expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])\n assert_frame_equal(result, expected)\n\n def test_irow_icol_duplicates(self):\n df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),\n index=list('aab'))\n\n result = df.irow(0)\n result2 = df.ix[0]\n tm.assert_isinstance(result, Series)\n assert_almost_equal(result.values, df.values[0])\n assert_series_equal(result, result2)\n\n result = df.T.icol(0)\n result2 = df.T.ix[:, 0]\n tm.assert_isinstance(result, Series)\n assert_almost_equal(result.values, df.values[0])\n assert_series_equal(result, result2)\n\n # multiindex\n df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],\n ['A', 'A', 'B']],\n index=[['i', 'i', 'j'], ['X', 'X', 'Y']])\n rs = df.irow(0)\n xp = df.ix[0]\n assert_series_equal(rs, xp)\n\n rs = df.icol(0)\n xp = df.T.ix[0]\n assert_series_equal(rs, xp)\n\n rs = df.icol([0])\n xp = df.ix[:, [0]]\n assert_frame_equal(rs, xp)\n\n # #2259\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])\n result = df.icol([0])\n expected = df.take([0], axis=1)\n assert_frame_equal(result, expected)\n\n def test_icol_sparse_propegate_fill_value(self):\n from pandas.sparse.api import SparseDataFrame\n df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)\n self.assertTrue(len(df['A'].sp_values) == len(df.icol(0).sp_values))\n\n def test_iget_value(self):\n for i, row in enumerate(self.frame.index):\n for j, col in enumerate(self.frame.columns):\n result = self.frame.iget_value(i, j)\n expected = self.frame.get_value(row, col)\n assert_almost_equal(result, expected)\n\n def test_nested_exception(self):\n # Ignore the strange way of triggering the problem\n # (which may get fixed), it's just a way to trigger\n # the issue or reraising an outer exception without\n # a named argument\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8,\n 9]}).set_index([\"a\", \"b\"])\n l = list(df.index)\n l[0] = [\"a\", \"b\"]\n df.index = l\n\n try:\n repr(df)\n except Exception as e:\n self.assertNotEqual(type(e), UnboundLocalError)\n\n def test_reindex_methods(self):\n df = pd.DataFrame({'x': list(range(5))})\n target = np.array([-0.1, 0.9, 1.1, 1.5])\n\n for method, expected_values in [('nearest', [0, 1, 1, 2]),\n ('pad', [np.nan, 0, 1, 1]),\n ('backfill', [0, 1, 2, 2])]:\n expected = pd.DataFrame({'x': expected_values}, index=target)\n actual = df.reindex(target, method=method)\n assert_frame_equal(expected, actual)\n\n e2 = expected[::-1]\n actual = df.reindex(target[::-1], method=method)\n assert_frame_equal(e2, actual)\n\n new_order = [3, 0, 2, 1]\n e2 = expected.iloc[new_order]\n actual = df.reindex(target[new_order], method=method)\n assert_frame_equal(e2, actual)\n\n switched_method = ('pad' if method == 'backfill'\n else 'backfill' if method == 'pad'\n else method)\n actual = df[::-1].reindex(target, method=switched_method)\n assert_frame_equal(expected, actual)\n\n def test_non_monotonic_reindex_methods(self):\n dr = pd.date_range('2013-08-01', periods=6, freq='B')\n data = np.random.randn(6,1)\n df = pd.DataFrame(data, index=dr, columns=list('A'))\n df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],\n columns=list('A'))\n # index is not monotonic increasing or decreasing\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='nearest')\n\n def test_reindex_level(self):\n from itertools import permutations\n icol = ['jim', 'joe', 'jolie']\n\n def verify_first_level(df, level, idx):\n f = lambda val: np.nonzero(df[level] == val)[0]\n i = np.concatenate(list(map(f, idx)))\n left = df.set_index(icol).reindex(idx, level=level)\n right = df.iloc[i].set_index(icol)\n assert_frame_equal(left, right)\n\n def verify(df, level, idx, indexer):\n left = df.set_index(icol).reindex(idx, level=level)\n right = df.iloc[indexer].set_index(icol)\n assert_frame_equal(left, right)\n\n df = pd.DataFrame({'jim':list('B' * 4 + 'A' * 2 + 'C' * 3),\n 'joe':list('abcdeabcd')[::-1],\n 'jolie':[10, 20, 30] * 3,\n 'joline': np.random.randint(0, 1000, 9)})\n\n target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'], ['D', 'F'],\n ['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],\n ['A', 'B'], ['B', 'A', 'C'], ['A', 'C', 'B']]\n\n for idx in target:\n verify_first_level(df, 'jim', idx)\n\n verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])\n verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])\n verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])\n verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])\n verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])\n verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])\n verify(df, 'joe', list('edwq'), [0, 4, 5])\n verify(df, 'joe', list('wq'), [])\n\n df = DataFrame({'jim':['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,\n 'joe':['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +\n ['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +\n ['3rd'] * 3 + ['2nd'] * 2,\n # this needs to be jointly unique with jim and joe or\n # reindexing will fail ~1.5% of the time, this works\n # out to needing unique groups of same size as joe\n 'jolie': np.concatenate([np.random.choice(1000, x, replace=False)\n for x in [2, 3, 3, 2, 3, 2, 3, 2]]),\n 'joline': np.random.randn(20).round(3) * 10})\n\n for idx in permutations(df['jim'].unique()):\n for i in range(3):\n verify_first_level(df, 'jim', idx[:i+1])\n\n i = [2,3,4,0,1,8,9,5,6,7,10,11,12,13,14,18,19,15,16,17]\n verify(df, 'joe', ['1st', '2nd', '3rd'], i)\n\n i = [0,1,2,3,4,10,11,12,5,6,7,8,9,15,16,17,18,19,13,14]\n verify(df, 'joe', ['3rd', '2nd', '1st'], i)\n\n i = [0,1,5,6,7,10,11,12,18,19,15,16,17]\n verify(df, 'joe', ['2nd', '3rd'], i)\n\n i = [0,1,2,3,4,10,11,12,8,9,15,16,17,13,14]\n verify(df, 'joe', ['3rd', '1st'], i)\n\n def test_getitem_ix_float_duplicates(self):\n df = pd.DataFrame(np.random.randn(3, 3),\n index=[0.1, 0.2, 0.2], columns=list('abc'))\n expect = df.iloc[1:]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df.index = [1, 0.2, 0.2]\n expect = df.iloc[1:]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df = pd.DataFrame(np.random.randn(4, 3),\n index=[1, 0.2, 0.2, 1], columns=list('abc'))\n expect = df.iloc[1:-1]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:-1, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df.index = [0.1, 0.2, 2, 0.2]\n expect = df.iloc[[1, -1]]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[[1, -1], 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n def test_setitem_with_sparse_value(self):\n # GH8131\n df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})\n sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)\n df['new_column'] = sp_series\n tm.assert_series_equal(df['new_column'], sp_series)\n\n def test_setitem_with_unaligned_sparse_value(self):\n df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})\n sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])\n .to_sparse(fill_value=0))\n df['new_column'] = sp_series\n tm.assert_series_equal(df['new_column'], pd.Series([1, 0, 0]))\n\n\n_seriesd = tm.getSeriesData()\n_tsd = tm.getTimeSeriesData()\n\n_frame = DataFrame(_seriesd)\n_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])\n_intframe = DataFrame(dict((k, v.astype(int))\n for k, v in compat.iteritems(_seriesd)))\n\n_tsframe = DataFrame(_tsd)\n\n_mixed_frame = _frame.copy()\n_mixed_frame['foo'] = 'bar'\n\n\nclass SafeForSparse(object):\n\n _multiprocess_can_split_ = True\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n for attr in ('index', 'columns'):\n ind = getattr(self.frame, attr)\n ind.name = None\n cp = self.frame.copy()\n getattr(cp, attr).name = 'foo'\n self.assertIsNone(getattr(self.frame, attr).name)\n\n def test_getitem_pop_assign_name(self):\n s = self.frame['A']\n self.assertEqual(s.name, 'A')\n\n s = self.frame.pop('A')\n self.assertEqual(s.name, 'A')\n\n s = self.frame.ix[:, 'B']\n self.assertEqual(s.name, 'B')\n\n s2 = s.ix[:]\n self.assertEqual(s2.name, 'B')\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n assert_almost_equal(result, expected)\n\n def test_join_index(self):\n # left / right\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2)\n self.assertTrue(f.index.equals(joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='left')\n self.assertTrue(joined.index.equals(f.index))\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='right')\n self.assertTrue(joined.index.equals(f2.index))\n self.assertEqual(len(joined.columns), 4)\n\n # inner\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='inner')\n self.assertTrue(joined.index.equals(f.index.intersection(f2.index)))\n self.assertEqual(len(joined.columns), 4)\n\n # outer\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='outer')\n self.assertTrue(tm.equalContents(self.frame.index, joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')\n\n # corner case - overlapping columns\n for how in ('outer', 'left', 'inner'):\n with assertRaisesRegexp(ValueError, 'columns overlap but no suffix'):\n self.frame.join(self.frame, how=how)\n\n def test_join_index_more(self):\n af = self.frame.ix[:, ['A', 'B']]\n bf = self.frame.ix[::2, ['C', 'D']]\n\n expected = af.copy()\n expected['C'] = self.frame['C'][::2]\n expected['D'] = self.frame['D'][::2]\n\n result = af.join(bf)\n assert_frame_equal(result, expected)\n\n result = af.join(bf, how='right')\n assert_frame_equal(result, expected[::2])\n\n result = bf.join(af, how='right')\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n def test_join_index_series(self):\n df = self.frame.copy()\n s = df.pop(self.frame.columns[-1])\n joined = df.join(s)\n\n assert_frame_equal(joined, self.frame, check_names=False) # TODO should this check_names ?\n\n s.name = None\n assertRaisesRegexp(ValueError, 'must have a name', df.join, s)\n\n def test_join_overlap(self):\n df1 = self.frame.ix[:, ['A', 'B', 'C']]\n df2 = self.frame.ix[:, ['B', 'C', 'D']]\n\n joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')\n df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')\n df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')\n no_overlap = self.frame.ix[:, ['A', 'D']]\n expected = df1_suf.join(df2_suf).join(no_overlap)\n\n # column order not necessarily sorted\n assert_frame_equal(joined, expected.ix[:, joined.columns])\n\n def test_add_prefix_suffix(self):\n with_prefix = self.frame.add_prefix('foo#')\n expected = ['foo#%s' % c for c in self.frame.columns]\n self.assert_numpy_array_equal(with_prefix.columns, expected)\n\n with_suffix = self.frame.add_suffix('#foo')\n expected = ['%s#foo' % c for c in self.frame.columns]\n self.assert_numpy_array_equal(with_suffix.columns, expected)\n\n\nclass TestDataFrame(tm.TestCase, CheckIndexing,\n SafeForSparse):\n klass = DataFrame\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n import warnings\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n self.frame = _frame.copy()\n self.frame2 = _frame2.copy()\n\n # force these all to int64 to avoid platform testing issues\n self.intframe = DataFrame(dict([ (c,s) for c,s in compat.iteritems(_intframe) ]), dtype = np.int64)\n self.tsframe = _tsframe.copy()\n self.mixed_frame = _mixed_frame.copy()\n self.mixed_float = DataFrame({ 'A': _frame['A'].copy().astype('float32'),\n 'B': _frame['B'].copy().astype('float32'),\n 'C': _frame['C'].copy().astype('float16'),\n 'D': _frame['D'].copy().astype('float64') })\n self.mixed_float2 = DataFrame({ 'A': _frame2['A'].copy().astype('float32'),\n 'B': _frame2['B'].copy().astype('float32'),\n 'C': _frame2['C'].copy().astype('float16'),\n 'D': _frame2['D'].copy().astype('float64') })\n self.mixed_int = DataFrame({ 'A': _intframe['A'].copy().astype('int32'),\n 'B': np.ones(len(_intframe['B']),dtype='uint64'),\n 'C': _intframe['C'].copy().astype('uint8'),\n 'D': _intframe['D'].copy().astype('int64') })\n self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),\n 'int32' : np.array([1]*10,dtype='int32'),\n }, index=np.arange(10))\n\n self.ts1 = tm.makeTimeSeries()\n self.ts2 = tm.makeTimeSeries()[5:]\n self.ts3 = tm.makeTimeSeries()[-5:]\n self.ts4 = tm.makeTimeSeries()[1:-1]\n\n self.ts_dict = {\n 'col1': self.ts1,\n 'col2': self.ts2,\n 'col3': self.ts3,\n 'col4': self.ts4,\n }\n self.empty = DataFrame({})\n\n arr = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n\n self.simple = DataFrame(arr, columns=['one', 'two', 'three'],\n index=['a', 'b', 'c'])\n\n def test_get_axis(self):\n f = self.frame\n self.assertEqual(f._get_axis_number(0), 0)\n self.assertEqual(f._get_axis_number(1), 1)\n self.assertEqual(f._get_axis_number('index'), 0)\n self.assertEqual(f._get_axis_number('rows'), 0)\n self.assertEqual(f._get_axis_number('columns'), 1)\n\n self.assertEqual(f._get_axis_name(0), 'index')\n self.assertEqual(f._get_axis_name(1), 'columns')\n self.assertEqual(f._get_axis_name('index'), 'index')\n self.assertEqual(f._get_axis_name('rows'), 'index')\n self.assertEqual(f._get_axis_name('columns'), 'columns')\n\n self.assertIs(f._get_axis(0), f.index)\n self.assertIs(f._get_axis(1), f.columns)\n\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)\n assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')\n assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, None)\n\n def test_set_index(self):\n idx = Index(np.arange(len(self.mixed_frame)))\n\n # cache it\n _ = self.mixed_frame['foo']\n self.mixed_frame.index = idx\n self.assertIs(self.mixed_frame['foo'].index, idx)\n with assertRaisesRegexp(ValueError, 'Length mismatch'):\n self.mixed_frame.index = idx[::2]\n\n def test_set_index_cast(self):\n\n # issue casting an index then set_index\n df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]},\n index = [2010,2011,2012])\n expected = df.ix[2010]\n new_index = df.index.astype(np.int32)\n df.index = new_index\n result = df.ix[2010]\n assert_series_equal(result,expected)\n\n def test_set_index2(self):\n df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],\n 'B': ['one', 'two', 'three', 'one', 'two'],\n 'C': ['a', 'b', 'c', 'd', 'e'],\n 'D': np.random.randn(5),\n 'E': np.random.randn(5)})\n\n # new object, single-column\n result = df.set_index('C')\n result_nodrop = df.set_index('C', drop=False)\n\n index = Index(df['C'], name='C')\n\n expected = df.ix[:, ['A', 'B', 'D', 'E']]\n expected.index = index\n\n expected_nodrop = df.copy()\n expected_nodrop.index = index\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result_nodrop, expected_nodrop)\n self.assertEqual(result.index.name, index.name)\n\n # inplace, single\n df2 = df.copy()\n\n df2.set_index('C', inplace=True)\n\n assert_frame_equal(df2, expected)\n\n df3 = df.copy()\n df3.set_index('C', drop=False, inplace=True)\n\n assert_frame_equal(df3, expected_nodrop)\n\n # create new object, multi-column\n result = df.set_index(['A', 'B'])\n result_nodrop = df.set_index(['A', 'B'], drop=False)\n\n index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])\n\n expected = df.ix[:, ['C', 'D', 'E']]\n expected.index = index\n\n expected_nodrop = df.copy()\n expected_nodrop.index = index\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result_nodrop, expected_nodrop)\n self.assertEqual(result.index.names, index.names)\n\n # inplace\n df2 = df.copy()\n df2.set_index(['A', 'B'], inplace=True)\n assert_frame_equal(df2, expected)\n\n df3 = df.copy()\n df3.set_index(['A', 'B'], drop=False, inplace=True)\n assert_frame_equal(df3, expected_nodrop)\n\n # corner case\n with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):\n df.set_index('A', verify_integrity=True)\n\n # append\n result = df.set_index(['A', 'B'], append=True)\n xp = df.reset_index().set_index(['index', 'A', 'B'])\n xp.index.names = [None, 'A', 'B']\n assert_frame_equal(result, xp)\n\n # append to existing multiindex\n rdf = df.set_index(['A'], append=True)\n rdf = rdf.set_index(['B', 'C'], append=True)\n expected = df.set_index(['A', 'B', 'C'], append=True)\n assert_frame_equal(rdf, expected)\n\n # Series\n result = df.set_index(df.C)\n self.assertEqual(result.index.name, 'C')\n\n def test_set_index_nonuniq(self):\n df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],\n 'B': ['one', 'two', 'three', 'one', 'two'],\n 'C': ['a', 'b', 'c', 'd', 'e'],\n 'D': np.random.randn(5),\n 'E': np.random.randn(5)})\n with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):\n df.set_index('A', verify_integrity=True, inplace=True)\n self.assertIn('A', df)\n\n def test_set_index_bug(self):\n # GH1590\n df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})\n df2 = df.select(lambda indx: indx >= 1)\n rs = df2.set_index('key')\n xp = DataFrame({'val': [1, 2]},\n Index(['b', 'c'], name='key'))\n assert_frame_equal(rs, xp)\n\n def test_set_index_pass_arrays(self):\n df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8)})\n\n # multiple columns\n result = df.set_index(['A', df['B'].values], drop=False)\n expected = df.set_index(['A', 'B'], drop=False)\n assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ?\n\n def test_set_index_cast_datetimeindex(self):\n df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)\n for i in range(1000)],\n 'B': np.random.randn(1000)})\n\n idf = df.set_index('A')\n tm.assert_isinstance(idf.index, DatetimeIndex)\n\n # don't cast a DatetimeIndex WITH a tz, leave as object\n # GH 6032\n i = pd.DatetimeIndex(pd.tseries.tools.to_datetime(['2013-1-1 13:00','2013-1-2 14:00'], errors=\"raise\")).tz_localize('US/Pacific')\n df = DataFrame(np.random.randn(2,1),columns=['A'])\n\n expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),\n pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype=\"object\"))\n\n # convert index to series\n result = Series(i)\n assert_series_equal(result, expected)\n\n # assignt to frame\n df['B'] = i\n result = df['B']\n assert_series_equal(result, expected)\n\n # keep the timezone\n result = i.to_series(keep_tz=True)\n assert_series_equal(result.reset_index(drop=True), expected)\n\n # convert to utc\n df['C'] = i.to_series().reset_index(drop=True)\n result = df['C']\n comp = DatetimeIndex(expected.values).copy()\n comp.tz = None\n self.assert_numpy_array_equal(result.values, comp.values)\n\n # list of datetimes with a tz\n df['D'] = i.to_pydatetime()\n result = df['D']\n assert_series_equal(result, expected)\n\n # GH 6785\n # set the index manually\n import pytz\n df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}])\n expected = df.set_index('ts')\n df.index = df['ts']\n df.pop('ts')\n assert_frame_equal(df, expected)\n\n # GH 3950\n # reset_index with single level\n for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:\n idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx')\n df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)\n\n expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2),\n datetime(2011, 1, 3), datetime(2011, 1, 4),\n datetime(2011, 1, 5)],\n 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},\n columns=['idx', 'a', 'b'])\n expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz))\n assert_frame_equal(df.reset_index(), expected)\n\n def test_set_index_multiindexcolumns(self):\n columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])\n df = DataFrame(np.random.randn(3, 3), columns=columns)\n rs = df.set_index(df.columns[0])\n xp = df.ix[:, 1:]\n xp.index = df.ix[:, 0].values\n xp.index.names = [df.columns[0]]\n assert_frame_equal(rs, xp)\n\n def test_set_index_empty_column(self):\n # #1971\n df = DataFrame([\n dict(a=1, p=0),\n dict(a=2, m=10),\n dict(a=3, m=11, p=20),\n dict(a=4, m=12, p=21)\n ], columns=('a', 'm', 'p', 'x'))\n\n # it works!\n result = df.set_index(['a', 'x'])\n repr(result)\n\n def test_set_columns(self):\n cols = Index(np.arange(len(self.mixed_frame.columns)))\n self.mixed_frame.columns = cols\n with assertRaisesRegexp(ValueError, 'Length mismatch'):\n self.mixed_frame.columns = cols[::2]\n\n def test_keys(self):\n getkeys = self.frame.keys\n self.assertIs(getkeys(), self.frame.columns)\n\n def test_column_contains_typeerror(self):\n try:\n self.frame.columns in self.frame\n except TypeError:\n pass\n\n def test_constructor(self):\n df = DataFrame()\n self.assertEqual(len(df.index), 0)\n\n df = DataFrame(data={})\n self.assertEqual(len(df.index), 0)\n\n def test_constructor_mixed(self):\n index, data = tm.getMixedTypeDict()\n\n indexed_frame = DataFrame(data, index=index)\n unindexed_frame = DataFrame(data)\n\n self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)\n\n def test_constructor_cast_failure(self):\n foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)\n self.assertEqual(foo['a'].dtype, object)\n\n # GH 3010, constructing with odd arrays\n df = DataFrame(np.ones((4,2)))\n\n # this is ok\n df['foo'] = np.ones((4,2)).tolist()\n\n # this is not ok\n self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))\n\n # this is ok\n df['foo2'] = np.ones((4,2)).tolist()\n\n def test_constructor_dtype_copy(self):\n orig_df = DataFrame({\n 'col1': [1.],\n 'col2': [2.],\n 'col3': [3.]})\n\n new_df = pd.DataFrame(orig_df, dtype=float, copy=True)\n\n new_df['col1'] = 200.\n self.assertEqual(orig_df['col1'][0], 1.)\n\n def test_constructor_dtype_nocast_view(self):\n df = DataFrame([[1, 2]])\n should_be_view = DataFrame(df, dtype=df[0].dtype)\n should_be_view[0][0] = 99\n self.assertEqual(df.values[0, 0], 99)\n\n should_be_view = DataFrame(df.values, dtype=df[0].dtype)\n should_be_view[0][0] = 97\n self.assertEqual(df.values[0, 0], 97)\n\n def test_constructor_dtype_list_data(self):\n df = DataFrame([[1, '2'],\n [None, 'a']], dtype=object)\n self.assertIsNone(df.ix[1, 0])\n self.assertEqual(df.ix[0, 1], '2')\n\n def test_constructor_list_frames(self):\n\n # GH 3243\n result = DataFrame([DataFrame([])])\n self.assertEqual(result.shape, (1,0))\n\n result = DataFrame([DataFrame(dict(A = lrange(5)))])\n tm.assert_isinstance(result.iloc[0,0], DataFrame)\n\n def test_constructor_mixed_dtypes(self):\n\n def _make_mixed_dtypes_df(typ, ad = None):\n\n if typ == 'int':\n dtypes = MIXED_INT_DTYPES\n arrays = [ np.array(np.random.rand(10), dtype = d) for d in dtypes ]\n elif typ == 'float':\n dtypes = MIXED_FLOAT_DTYPES\n arrays = [ np.array(np.random.randint(10, size=10), dtype = d) for d in dtypes ]\n\n zipper = lzip(dtypes,arrays)\n for d,a in zipper:\n assert(a.dtype == d)\n if ad is None:\n ad = dict()\n ad.update(dict([ (d,a) for d,a in zipper ]))\n return DataFrame(ad)\n\n def _check_mixed_dtypes(df, dtypes = None):\n if dtypes is None:\n dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES\n for d in dtypes:\n if d in df:\n assert(df.dtypes[d] == d)\n\n # mixed floating and integer coexinst in the same frame\n df = _make_mixed_dtypes_df('float')\n _check_mixed_dtypes(df)\n\n # add lots of types\n df = _make_mixed_dtypes_df('float', dict(A = 1, B = 'foo', C = 'bar'))\n _check_mixed_dtypes(df)\n\n # GH 622\n df = _make_mixed_dtypes_df('int')\n _check_mixed_dtypes(df)\n\n def test_constructor_rec(self):\n rec = self.frame.to_records(index=False)\n\n # Assigning causes segfault in NumPy < 1.5.1\n # rec.dtype.names = list(rec.dtype.names)[::-1]\n\n index = self.frame.index\n\n df = DataFrame(rec)\n self.assert_numpy_array_equal(df.columns, rec.dtype.names)\n\n df2 = DataFrame(rec, index=index)\n self.assert_numpy_array_equal(df2.columns, rec.dtype.names)\n self.assertTrue(df2.index.equals(index))\n\n rng = np.arange(len(rec))[::-1]\n df3 = DataFrame(rec, index=rng, columns=['C', 'B'])\n expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])\n assert_frame_equal(df3, expected)\n\n def test_constructor_bool(self):\n df = DataFrame({0: np.ones(10, dtype=bool),\n 1: np.zeros(10, dtype=bool)})\n self.assertEqual(df.values.dtype, np.bool_)\n\n def test_constructor_overflow_int64(self):\n values = np.array([2 ** 64 - i for i in range(1, 10)],\n dtype=np.uint64)\n\n result = DataFrame({'a': values})\n self.assertEqual(result['a'].dtype, object)\n\n # #2355\n data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),\n (8921811264899370420, 45), (long(17019687244989530680), 270),\n (long(9930107427299601010), 273)]\n dtype = [('uid', 'u8'), ('score', 'u8')]\n data = np.zeros((len(data_scores),), dtype=dtype)\n data[:] = data_scores\n df_crawls = DataFrame(data)\n self.assertEqual(df_crawls['uid'].dtype, object)\n\n def test_constructor_ordereddict(self):\n import random\n nitems = 100\n nums = lrange(nitems)\n random.shuffle(nums)\n expected = ['A%d' % i for i in nums]\n df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))\n self.assertEqual(expected, list(df.columns))\n\n def test_constructor_dict(self):\n frame = DataFrame({'col1': self.ts1,\n 'col2': self.ts2})\n\n tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)\n tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)\n\n frame = DataFrame({'col1': self.ts1,\n 'col2': self.ts2},\n columns=['col2', 'col3', 'col4'])\n\n self.assertEqual(len(frame), len(self.ts2))\n self.assertNotIn('col1', frame)\n self.assertTrue(isnull(frame['col3']).all())\n\n # Corner cases\n self.assertEqual(len(DataFrame({})), 0)\n\n # mix dict and array, wrong size - no spec for which error should raise\n # first\n with tm.assertRaises(ValueError):\n DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})\n\n # Length-one dict micro-optimization\n frame = DataFrame({'A': {'1': 1, '2': 2}})\n self.assert_numpy_array_equal(frame.index, ['1', '2'])\n\n # empty dict plus index\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx)\n self.assertIs(frame.index, idx)\n\n # empty with index and columns\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx, columns=idx)\n self.assertIs(frame.index, idx)\n self.assertIs(frame.columns, idx)\n self.assertEqual(len(frame._series), 3)\n\n # with dict of empty list and Series\n frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])\n self.assertTrue(frame.index.equals(Index([])))\n\n def test_constructor_multi_index(self):\n # GH 4078\n # construction error with mi and all-nan frame\n tuples = [(2, 3), (3, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi,columns=mi)\n self.assertTrue(pd.isnull(df).values.ravel().all())\n\n tuples = [(3, 3), (2, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi,columns=mi)\n self.assertTrue(pd.isnull(df).values.ravel().all())\n\n def test_constructor_error_msgs(self):\n msg = \"Mixing dicts with non-Series may lead to ambiguous ordering.\"\n # mix dict and array, wrong size\n with assertRaisesRegexp(ValueError, msg):\n DataFrame({'A': {'a': 'a', 'b': 'b'},\n 'B': ['a', 'b', 'c']})\n\n # wrong size ndarray, GH 3105\n msg = \"Shape of passed values is \\(3, 4\\), indices imply \\(3, 3\\)\"\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(np.arange(12).reshape((4, 3)),\n columns=['foo', 'bar', 'baz'],\n index=date_range('2000-01-01', periods=3))\n\n\n # higher dim raise exception\n with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):\n DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])\n\n # wrong size axis labels\n with assertRaisesRegexp(ValueError, \"Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)\"):\n DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])\n\n with assertRaisesRegexp(ValueError, \"Shape of passed values is \\(3, 2\\), indices imply \\(2, 2\\)\"):\n DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])\n\n with assertRaisesRegexp(ValueError, 'If using all scalar values, you must pass an index'):\n DataFrame({'a': False, 'b': True})\n\n def test_constructor_with_embedded_frames(self):\n\n # embedded data frames\n df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]})\n df2 = DataFrame([df1, df1+10])\n\n df2.dtypes\n str(df2)\n\n result = df2.loc[0,0]\n assert_frame_equal(result,df1)\n\n result = df2.loc[1,0]\n assert_frame_equal(result,df1+10)\n\n def test_insert_error_msmgs(self):\n\n # GH 7432\n df = DataFrame({'foo':['a', 'b', 'c'], 'bar':[1,2,3], 'baz':['d','e','f']}).set_index('foo')\n s = DataFrame({'foo':['a', 'b', 'c', 'a'], 'fiz':['g','h','i','j']}).set_index('foo')\n msg = 'cannot reindex from a duplicate axis'\n with assertRaisesRegexp(ValueError, msg):\n df['newcol'] = s\n\n # GH 4107, more descriptive error message\n df = DataFrame(np.random.randint(0,2,(4,4)),\n columns=['a', 'b', 'c', 'd'])\n\n msg = 'incompatible index of inserted column with frame index'\n with assertRaisesRegexp(TypeError, msg):\n df['gr'] = df.groupby(['b', 'c']).count()\n\n def test_constructor_subclass_dict(self):\n # Test for passing dict subclass to constructor\n data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),\n 'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}\n df = DataFrame(data)\n refdf = DataFrame(dict((col, dict(compat.iteritems(val)))\n for col, val in compat.iteritems(data)))\n assert_frame_equal(refdf, df)\n\n data = tm.TestSubDict(compat.iteritems(data))\n df = DataFrame(data)\n assert_frame_equal(refdf, df)\n\n # try with defaultdict\n from collections import defaultdict\n data = {}\n self.frame['B'][:10] = np.nan\n for k, v in compat.iteritems(self.frame):\n dct = defaultdict(dict)\n dct.update(v.to_dict())\n data[k] = dct\n frame = DataFrame(data)\n assert_frame_equal(self.frame.sort_index(), frame)\n\n def test_constructor_dict_block(self):\n expected = [[4., 3., 2., 1.]]\n df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},\n columns=['d', 'c', 'b', 'a'])\n assert_almost_equal(df.values, expected)\n\n def test_constructor_dict_cast(self):\n # cast float tests\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n frame = DataFrame(test_data, dtype=float)\n self.assertEqual(len(frame), 3)\n self.assertEqual(frame['B'].dtype, np.float64)\n self.assertEqual(frame['A'].dtype, np.float64)\n\n frame = DataFrame(test_data)\n self.assertEqual(len(frame), 3)\n self.assertEqual(frame['B'].dtype, np.object_)\n self.assertEqual(frame['A'].dtype, np.float64)\n\n # can't cast to float\n test_data = {\n 'A': dict(zip(range(20), tm.makeStringIndex(20))),\n 'B': dict(zip(range(15), randn(15)))\n }\n frame = DataFrame(test_data, dtype=float)\n self.assertEqual(len(frame), 20)\n self.assertEqual(frame['A'].dtype, np.object_)\n self.assertEqual(frame['B'].dtype, np.float64)\n\n def test_constructor_dict_dont_upcast(self):\n d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}\n df = DataFrame(d)\n tm.assert_isinstance(df['Col1']['Row2'], float)\n\n dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])\n tm.assert_isinstance(dm[1][1], int)\n\n def test_constructor_dict_of_tuples(self):\n # GH #1491\n data = {'a': (1, 2, 3), 'b': (4, 5, 6)}\n\n result = DataFrame(data)\n expected = DataFrame(dict((k, list(v)) for k, v in compat.iteritems(data)))\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_dict_multiindex(self):\n check = lambda result, expected: tm.assert_frame_equal(\n result, expected, check_dtype=True, check_index_type=True,\n check_column_type=True, check_names=True)\n d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},\n ('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},\n ('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}\n _d = sorted(d.items())\n df = DataFrame(d)\n expected = DataFrame(\n [x[1] for x in _d],\n index=MultiIndex.from_tuples([x[0] for x in _d])).T\n expected.index = MultiIndex.from_tuples(expected.index)\n check(df, expected)\n\n d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}\n _d.insert(0, ('z', d['z']))\n expected = DataFrame(\n [x[1] for x in _d],\n index=Index([x[0] for x in _d], tupleize_cols=False)).T\n expected.index = Index(expected.index, tupleize_cols=False)\n df = DataFrame(d)\n df = df.reindex(columns=expected.columns, index=expected.index)\n check(df, expected)\n\n def _check_basic_constructor(self, empty):\n \"mat: 2d matrix with shpae (3, 2) to input. empty - makes sized objects\"\n mat = empty((2, 3), dtype=float)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n\n # 1-D input\n frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])\n self.assertEqual(len(frame.index), 3)\n self.assertEqual(len(frame.columns), 1)\n\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.int64)\n self.assertEqual(frame.values.dtype, np.int64)\n\n # wrong size axis labels\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(mat, columns=['A', 'B', 'C'], index=[1])\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(2, 2\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(mat, columns=['A', 'B'], index=[1, 2])\n\n # higher dim raise exception\n with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):\n DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],\n index=[1])\n\n # automatic labeling\n frame = DataFrame(mat)\n self.assert_numpy_array_equal(frame.index, lrange(2))\n self.assert_numpy_array_equal(frame.columns, lrange(3))\n\n frame = DataFrame(mat, index=[1, 2])\n self.assert_numpy_array_equal(frame.columns, lrange(3))\n\n frame = DataFrame(mat, columns=['A', 'B', 'C'])\n self.assert_numpy_array_equal(frame.index, lrange(2))\n\n # 0-length axis\n frame = DataFrame(empty((0, 3)))\n self.assertEqual(len(frame.index), 0)\n\n frame = DataFrame(empty((3, 0)))\n self.assertEqual(len(frame.columns), 0)\n\n def test_constructor_ndarray(self):\n mat = np.zeros((2, 3), dtype=float)\n self._check_basic_constructor(np.ones)\n\n frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])\n self.assertEqual(len(frame), 2)\n\n def test_constructor_maskedarray(self):\n self._check_basic_constructor(ma.masked_all)\n\n # Check non-masked values\n mat = ma.masked_all((2, 3), dtype=float)\n mat[0, 0] = 1.0\n mat[1, 2] = 2.0\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1.0, frame['A'][1])\n self.assertEqual(2.0, frame['C'][2])\n\n # what is this even checking??\n mat = ma.masked_all((2, 3), dtype=float)\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n def test_constructor_maskedarray_nonfloat(self):\n # masked int promoted to float\n mat = ma.masked_all((2, 3), dtype=int)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.float64)\n self.assertEqual(frame.values.dtype, np.float64)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1, frame['A'][1])\n self.assertEqual(2, frame['C'][2])\n\n # masked np.datetime64 stays (use lib.NaT as null)\n mat = ma.masked_all((2, 3), dtype='M8[ns]')\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(isnull(frame).values.all())\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.int64)\n self.assertEqual(frame.values.dtype, np.int64)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1, frame['A'].view('i8')[1])\n self.assertEqual(2, frame['C'].view('i8')[2])\n\n # masked bool promoted to object\n mat = ma.masked_all((2, 3), dtype=bool)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=object)\n self.assertEqual(frame.values.dtype, object)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = True\n mat2[1, 2] = False\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(True, frame['A'][1])\n self.assertEqual(False, frame['C'][2])\n\n def test_constructor_mrecarray(self):\n # Ensure mrecarray produces frame identical to dict of masked arrays\n # from GH3479\n\n assert_fr_equal = functools.partial(assert_frame_equal,\n check_index_type=True,\n check_column_type=True,\n check_frame_type=True)\n arrays = [\n ('float', np.array([1.5, 2.0])),\n ('int', np.array([1, 2])),\n ('str', np.array(['abc', 'def'])),\n ]\n for name, arr in arrays[:]:\n arrays.append(('masked1_' + name,\n np.ma.masked_array(arr, mask=[False, True])))\n arrays.append(('masked_all', np.ma.masked_all((2,))))\n arrays.append(('masked_none',\n np.ma.masked_array([1.0, 2.5], mask=False)))\n\n # call assert_frame_equal for all selections of 3 arrays\n for comb in itertools.combinations(arrays, 3):\n names, data = zip(*comb)\n mrecs = mrecords.fromarrays(data, names=names)\n\n # fill the comb\n comb = dict([ (k, v.filled()) if hasattr(v,'filled') else (k, v) for k, v in comb ])\n\n expected = DataFrame(comb,columns=names)\n result = DataFrame(mrecs)\n assert_fr_equal(result,expected)\n\n # specify columns\n expected = DataFrame(comb,columns=names[::-1])\n result = DataFrame(mrecs, columns=names[::-1])\n assert_fr_equal(result,expected)\n\n # specify index\n expected = DataFrame(comb,columns=names,index=[1,2])\n result = DataFrame(mrecs, index=[1,2])\n assert_fr_equal(result,expected)\n\n def test_constructor_corner(self):\n df = DataFrame(index=[])\n self.assertEqual(df.values.shape, (0, 0))\n\n # empty but with specified dtype\n df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)\n self.assertEqual(df.values.dtype, np.object_)\n\n # does not error but ends up float\n df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)\n self.assertEqual(df.values.dtype, np.object_)\n\n # #1783 empty dtype object\n df = DataFrame({}, columns=['foo', 'bar'])\n self.assertEqual(df.values.dtype, np.object_)\n\n df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),\n dtype=int)\n self.assertEqual(df.values.dtype, np.object_)\n\n\n def test_constructor_scalar_inference(self):\n data = {'int': 1, 'bool': True,\n 'float': 3., 'complex': 4j, 'object': 'foo'}\n df = DataFrame(data, index=np.arange(10))\n\n self.assertEqual(df['int'].dtype, np.int64)\n self.assertEqual(df['bool'].dtype, np.bool_)\n self.assertEqual(df['float'].dtype, np.float64)\n self.assertEqual(df['complex'].dtype, np.complex128)\n self.assertEqual(df['object'].dtype, np.object_)\n\n def test_constructor_arrays_and_scalars(self):\n df = DataFrame({'a': randn(10), 'b': True})\n exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})\n\n assert_frame_equal(df, exp)\n with tm.assertRaisesRegexp(ValueError, 'must pass an index'):\n DataFrame({'a': False, 'b': True})\n\n def test_constructor_DataFrame(self):\n df = DataFrame(self.frame)\n assert_frame_equal(df, self.frame)\n\n df_casted = DataFrame(self.frame, dtype=np.int64)\n self.assertEqual(df_casted.values.dtype, np.int64)\n\n def test_constructor_more(self):\n # used to be in test_matrix.py\n arr = randn(10)\n dm = DataFrame(arr, columns=['A'], index=np.arange(10))\n self.assertEqual(dm.values.ndim, 2)\n\n arr = randn(0)\n dm = DataFrame(arr)\n self.assertEqual(dm.values.ndim, 2)\n self.assertEqual(dm.values.ndim, 2)\n\n # no data specified\n dm = DataFrame(columns=['A', 'B'], index=np.arange(10))\n self.assertEqual(dm.values.shape, (10, 2))\n\n dm = DataFrame(columns=['A', 'B'])\n self.assertEqual(dm.values.shape, (0, 2))\n\n dm = DataFrame(index=np.arange(10))\n self.assertEqual(dm.values.shape, (10, 0))\n\n # corner, silly\n # TODO: Fix this Exception to be better...\n with assertRaisesRegexp(PandasError, 'constructor not properly called'):\n DataFrame((1, 2, 3))\n\n # can't cast\n mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)\n with assertRaisesRegexp(ValueError, 'cast'):\n DataFrame(mat, index=[0, 1], columns=[0], dtype=float)\n\n dm = DataFrame(DataFrame(self.frame._series))\n tm.assert_frame_equal(dm, self.frame)\n\n # int cast\n dm = DataFrame({'A': np.ones(10, dtype=int),\n 'B': np.ones(10, dtype=np.float64)},\n index=np.arange(10))\n\n self.assertEqual(len(dm.columns), 2)\n self.assertEqual(dm.values.dtype, np.float64)\n\n def test_constructor_empty_list(self):\n df = DataFrame([], index=[])\n expected = DataFrame(index=[])\n assert_frame_equal(df, expected)\n\n def test_constructor_list_of_lists(self):\n # GH #484\n l = [[1, 'a'], [2, 'b']]\n df = DataFrame(data=l, columns=[\"num\", \"str\"])\n self.assertTrue(com.is_integer_dtype(df['num']))\n self.assertEqual(df['str'].dtype, np.object_)\n\n # GH 4851\n # list of 0-dim ndarrays\n expected = DataFrame({ 0: range(10) })\n data = [np.array(x) for x in range(10)]\n result = DataFrame(data)\n assert_frame_equal(result, expected)\n\n def test_constructor_sequence_like(self):\n # GH 3783\n # collections.Squence like\n import collections\n\n class DummyContainer(collections.Sequence):\n def __init__(self, lst):\n self._lst = lst\n def __getitem__(self, n):\n return self._lst.__getitem__(n)\n def __len__(self, n):\n return self._lst.__len__()\n\n l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]\n columns = [\"num\", \"str\"]\n result = DataFrame(l, columns=columns)\n expected = DataFrame([[1,'a'],[2,'b']],columns=columns)\n assert_frame_equal(result, expected, check_dtype=False)\n\n # GH 4297\n # support Array\n import array\n result = DataFrame.from_items([('A', array.array('i', range(10)))])\n expected = DataFrame({ 'A' : list(range(10)) })\n assert_frame_equal(result, expected, check_dtype=False)\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ array.array('i', range(10)), array.array('i',range(10)) ])\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_iterator(self):\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ range(10), range(10) ])\n assert_frame_equal(result, expected)\n\n def test_constructor_generator(self):\n #related #2305\n\n gen1 = (i for i in range(10))\n gen2 = (i for i in range(10))\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ gen1, gen2 ])\n assert_frame_equal(result, expected)\n\n gen = ([ i, 'a'] for i in range(10))\n result = DataFrame(gen)\n expected = DataFrame({ 0 : range(10), 1 : 'a' })\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_list_of_dicts(self):\n data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),\n OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),\n OrderedDict([['a', 1.5], ['d', 6]]),\n OrderedDict(),\n OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),\n OrderedDict([['b', 3], ['c', 4], ['d', 6]])]\n\n result = DataFrame(data)\n expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),\n orient='index')\n assert_frame_equal(result, expected.reindex(result.index))\n\n result = DataFrame([{}])\n expected = DataFrame(index=[0])\n assert_frame_equal(result, expected)\n\n def test_constructor_list_of_series(self):\n data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),\n OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]\n sdict = OrderedDict(zip(['x', 'y'], data))\n idx = Index(['a', 'b', 'c'])\n\n # all named\n data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),\n Series([1.5, 3, 6], idx, name='y')]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected)\n\n # some unnamed\n data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),\n Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n\n sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result.sort_index(), expected)\n\n # none named\n data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),\n OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),\n OrderedDict([['a', 1.5], ['d', 6]]),\n OrderedDict(),\n OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),\n OrderedDict([['b', 3], ['c', 4], ['d', 6]])]\n data = [Series(d) for d in data]\n\n result = DataFrame(data)\n sdict = OrderedDict(zip(range(len(data)), data))\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected.reindex(result.index))\n\n result2 = DataFrame(data, index=np.arange(6))\n assert_frame_equal(result, result2)\n\n result = DataFrame([Series({})])\n expected = DataFrame(index=[0])\n assert_frame_equal(result, expected)\n\n data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),\n OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]\n sdict = OrderedDict(zip(range(len(data)), data))\n\n idx = Index(['a', 'b', 'c'])\n data2 = [Series([1.5, 3, 4], idx, dtype='O'),\n Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected)\n\n def test_constructor_list_of_derived_dicts(self):\n class CustomDict(dict):\n pass\n d = {'a': 1.5, 'b': 3}\n\n data_custom = [CustomDict(d)]\n data = [d]\n\n result_custom = DataFrame(data_custom)\n result = DataFrame(data)\n assert_frame_equal(result, result_custom)\n\n def test_constructor_ragged(self):\n data = {'A': randn(10),\n 'B': randn(8)}\n with assertRaisesRegexp(ValueError, 'arrays must all be same length'):\n DataFrame(data)\n\n def test_constructor_scalar(self):\n idx = Index(lrange(3))\n df = DataFrame({\"a\": 0}, index=idx)\n expected = DataFrame({\"a\": [0, 0, 0]}, index=idx)\n assert_frame_equal(df, expected, check_dtype=False)\n\n def test_constructor_Series_copy_bug(self):\n df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])\n df.copy()\n\n def test_constructor_mixed_dict_and_Series(self):\n data = {}\n data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}\n data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])\n\n result = DataFrame(data)\n self.assertTrue(result.index.is_monotonic)\n\n # ordering ambiguous, raise exception\n with assertRaisesRegexp(ValueError, 'ambiguous ordering'):\n DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})\n\n # this is OK though\n result = DataFrame({'A': ['a', 'b'],\n 'B': Series(['a', 'b'], index=['a', 'b'])})\n expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},\n index=['a', 'b'])\n assert_frame_equal(result, expected)\n\n def test_constructor_tuples(self):\n result = DataFrame({'A': [(1, 2), (3, 4)]})\n expected = DataFrame({'A': Series([(1, 2), (3, 4)])})\n assert_frame_equal(result, expected)\n\n def test_constructor_orient(self):\n data_dict = self.mixed_frame.T._series\n recons = DataFrame.from_dict(data_dict, orient='index')\n expected = self.mixed_frame.sort_index()\n assert_frame_equal(recons, expected)\n\n # dict of sequence\n a = {'hi': [32, 3, 3],\n 'there': [3, 5, 3]}\n rs = DataFrame.from_dict(a, orient='index')\n xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))\n assert_frame_equal(rs, xp)\n\n def test_constructor_Series_named(self):\n a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')\n df = DataFrame(a)\n self.assertEqual(df.columns[0], 'x')\n self.assertTrue(df.index.equals(a.index))\n\n # ndarray like\n arr = np.random.randn(10)\n s = Series(arr,name='x')\n df = DataFrame(s)\n expected = DataFrame(dict(x = s))\n assert_frame_equal(df,expected)\n\n s = Series(arr,index=range(3,13))\n df = DataFrame(s)\n expected = DataFrame({ 0 : s })\n assert_frame_equal(df,expected)\n\n self.assertRaises(ValueError, DataFrame, s, columns=[1,2])\n\n # #2234\n a = Series([], name='x')\n df = DataFrame(a)\n self.assertEqual(df.columns[0], 'x')\n\n # series with name and w/o\n s1 = Series(arr,name='x')\n df = DataFrame([s1, arr]).T\n expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0'])\n assert_frame_equal(df,expected)\n\n # this is a bit non-intuitive here; the series collapse down to arrays\n df = DataFrame([arr, s1]).T\n expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1])\n assert_frame_equal(df,expected)\n\n def test_constructor_Series_differently_indexed(self):\n # name\n s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')\n\n # no name\n s2 = Series([1, 2, 3], index=['a', 'b', 'c'])\n\n other_index = Index(['a', 'b'])\n\n df1 = DataFrame(s1, index=other_index)\n exp1 = DataFrame(s1.reindex(other_index))\n self.assertEqual(df1.columns[0], 'x')\n assert_frame_equal(df1, exp1)\n\n df2 = DataFrame(s2, index=other_index)\n exp2 = DataFrame(s2.reindex(other_index))\n self.assertEqual(df2.columns[0], 0)\n self.assertTrue(df2.index.equals(other_index))\n assert_frame_equal(df2, exp2)\n\n def test_constructor_manager_resize(self):\n index = list(self.frame.index[:5])\n columns = list(self.frame.columns[:3])\n\n result = DataFrame(self.frame._data, index=index,\n columns=columns)\n self.assert_numpy_array_equal(result.index, index)\n self.assert_numpy_array_equal(result.columns, columns)\n\n def test_constructor_from_items(self):\n items = [(c, self.frame[c]) for c in self.frame.columns]\n recons = DataFrame.from_items(items)\n assert_frame_equal(recons, self.frame)\n\n # pass some columns\n recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])\n assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])\n\n # orient='index'\n\n row_items = [(idx, self.mixed_frame.xs(idx))\n for idx in self.mixed_frame.index]\n\n recons = DataFrame.from_items(row_items,\n columns=self.mixed_frame.columns,\n orient='index')\n assert_frame_equal(recons, self.mixed_frame)\n self.assertEqual(recons['A'].dtype, np.float64)\n\n with tm.assertRaisesRegexp(TypeError,\n \"Must pass columns with orient='index'\"):\n DataFrame.from_items(row_items, orient='index')\n\n # orient='index', but thar be tuples\n arr = lib.list_to_object_array(\n [('bar', 'baz')] * len(self.mixed_frame))\n self.mixed_frame['foo'] = arr\n row_items = [(idx, list(self.mixed_frame.xs(idx)))\n for idx in self.mixed_frame.index]\n recons = DataFrame.from_items(row_items,\n columns=self.mixed_frame.columns,\n orient='index')\n assert_frame_equal(recons, self.mixed_frame)\n tm.assert_isinstance(recons['foo'][0], tuple)\n\n rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],\n orient='index', columns=['one', 'two', 'three'])\n xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],\n columns=['one', 'two', 'three'])\n assert_frame_equal(rs, xp)\n\n def test_constructor_mix_series_nonseries(self):\n df = DataFrame({'A': self.frame['A'],\n 'B': list(self.frame['B'])}, columns=['A', 'B'])\n assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])\n\n with tm.assertRaisesRegexp(ValueError, 'does not match index length'):\n DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})\n\n def test_constructor_miscast_na_int_dtype(self):\n df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)\n expected = DataFrame([[np.nan, 1], [1, 0]])\n assert_frame_equal(df, expected)\n\n def test_constructor_iterator_failure(self):\n with assertRaisesRegexp(TypeError, 'iterator'):\n df = DataFrame(iter([1, 2, 3]))\n\n def test_constructor_column_duplicates(self):\n # it works! #2079\n df = DataFrame([[8, 5]], columns=['a', 'a'])\n edf = DataFrame([[8, 5]])\n edf.columns = ['a', 'a']\n\n assert_frame_equal(df, edf)\n\n idf = DataFrame.from_items(\n [('a', [8]), ('a', [5])], columns=['a', 'a'])\n assert_frame_equal(idf, edf)\n\n self.assertRaises(ValueError, DataFrame.from_items,\n [('a', [8]), ('a', [5]), ('b', [6])],\n columns=['b', 'a', 'a'])\n\n def test_column_dups_operations(self):\n\n def check(result, expected=None):\n if expected is not None:\n assert_frame_equal(result,expected)\n result.dtypes\n str(result)\n\n # assignment\n # GH 3687\n arr = np.random.randn(3, 2)\n idx = lrange(2)\n df = DataFrame(arr, columns=['A', 'A'])\n df.columns = idx\n expected = DataFrame(arr,columns=idx)\n check(df,expected)\n\n idx = date_range('20130101',periods=4,freq='Q-NOV')\n df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])\n df.columns = idx\n expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)\n check(df,expected)\n\n # insert\n df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])\n df['string'] = 'bah'\n expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])\n check(df,expected)\n with assertRaisesRegexp(ValueError, 'Length of value'):\n df.insert(0, 'AnotherColumn', range(len(df.index) - 1))\n\n # insert same dtype\n df['foo2'] = 3\n expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])\n check(df,expected)\n\n # set (non-dup)\n df['foo2'] = 4\n expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])\n check(df,expected)\n df['foo2'] = 3\n\n # delete (non dup)\n del df['bar']\n expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])\n check(df,expected)\n\n # try to delete again (its not consolidated)\n del df['hello']\n expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])\n check(df,expected)\n\n # consolidate\n df = df.consolidate()\n expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])\n check(df,expected)\n\n # insert\n df.insert(2,'new_col',5.)\n expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])\n check(df,expected)\n\n # insert a dup\n assertRaisesRegexp(ValueError, 'cannot insert', df.insert, 2, 'new_col', 4.)\n df.insert(2,'new_col',4.,allow_duplicates=True)\n expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])\n check(df,expected)\n\n # delete (dup)\n del df['foo']\n expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])\n assert_frame_equal(df,expected)\n\n # dup across dtypes\n df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])\n check(df)\n\n df['foo2'] = 7.\n expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])\n check(df,expected)\n\n result = df['foo']\n expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])\n check(result,expected)\n\n # multiple replacements\n df['foo'] = 'string'\n expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])\n check(df,expected)\n\n del df['foo']\n expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])\n check(df,expected)\n\n # values\n df = DataFrame([[1,2.5],[3,4.5]], index=[1,2], columns=['x','x'])\n result = df.values\n expected = np.array([[1,2.5],[3,4.5]])\n self.assertTrue((result == expected).all().all())\n\n # rename, GH 4403\n df4 = DataFrame({'TClose': [22.02],\n 'RT': [0.0454],\n 'TExg': [0.0422]},\n index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date']))\n\n df5 = DataFrame({'STK_ID': [600809] * 3,\n 'RPT_Date': [20120930,20121231,20130331],\n 'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],\n 'TClose': [38.05, 41.66, 30.01]},\n index=MultiIndex.from_tuples([(600809, 20120930), (600809, 20121231),(600809,20130331)], names=['STK_ID', 'RPT_Date']))\n\n k = pd.merge(df4,df5,how='inner',left_index=True,right_index=True)\n result = k.rename(columns={'TClose_x':'TClose', 'TClose_y':'QT_Close'})\n str(result)\n result.dtypes\n\n expected = DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809, u('饡驦'), 30.01 ]],\n columns=['RT','TClose','TExg','RPT_Date','STK_ID','STK_Name','QT_Close']).set_index(['STK_ID','RPT_Date'],drop=False)\n assert_frame_equal(result,expected)\n\n # reindex is invalid!\n df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])\n self.assertRaises(ValueError, df.reindex, columns=['bar'])\n self.assertRaises(ValueError, df.reindex, columns=['bar','foo'])\n\n # drop\n df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])\n result = df.drop(['a'],axis=1)\n expected = DataFrame([[1],[1],[1]],columns=['bar'])\n check(result,expected)\n result = df.drop('a',axis=1)\n check(result,expected)\n\n # describe\n df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64')\n result = df.describe()\n s = df.iloc[:,0].describe()\n expected = pd.concat([ s, s, s],keys=df.columns,axis=1)\n check(result,expected)\n\n # check column dups with index equal and not equal to df's index\n df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'A'])\n for index in [df.index, pd.Index(list('edcba'))]:\n this_df = df.copy()\n expected_ser = pd.Series(index.values, index=this_df.index)\n expected_df = DataFrame.from_items([('A', expected_ser),\n ('B', this_df['B']),\n ('A', expected_ser)])\n this_df['A'] = index\n check(this_df, expected_df)\n\n # operations\n for op in ['__add__','__mul__','__sub__','__truediv__']:\n df = DataFrame(dict(A = np.arange(10), B = np.random.rand(10)))\n expected = getattr(df,op)(df)\n expected.columns = ['A','A']\n df.columns = ['A','A']\n result = getattr(df,op)(df)\n check(result,expected)\n\n # multiple assignments that change dtypes\n # the location indexer is a slice\n # GH 6120\n df = DataFrame(np.random.randn(5,2), columns=['that', 'that'])\n expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])\n\n df['that'] = 1.0\n check(df, expected)\n\n df = DataFrame(np.random.rand(5,2), columns=['that', 'that'])\n expected = DataFrame(1, index=range(5), columns=['that', 'that'])\n\n df['that'] = 1\n check(df, expected)\n\n def test_column_dups2(self):\n\n # drop buggy GH 6240\n df = DataFrame({'A' : np.random.randn(5),\n 'B' : np.random.randn(5),\n 'C' : np.random.randn(5),\n 'D' : ['a','b','c','d','e'] })\n\n expected = df.take([0,1,1], axis=1)\n df2 = df.take([2,0,1,2,1], axis=1)\n result = df2.drop('C',axis=1)\n assert_frame_equal(result, expected)\n\n # dropna\n df = DataFrame({'A' : np.random.randn(5),\n 'B' : np.random.randn(5),\n 'C' : np.random.randn(5),\n 'D' : ['a','b','c','d','e'] })\n df.iloc[2,[0,1,2]] = np.nan\n df.iloc[0,0] = np.nan\n df.iloc[1,1] = np.nan\n df.iloc[:,3] = np.nan\n expected = df.dropna(subset=['A','B','C'],how='all')\n expected.columns = ['A','A','B','C']\n\n df.columns = ['A','A','B','C']\n\n result = df.dropna(subset=['A','C'],how='all')\n assert_frame_equal(result, expected)\n\n def test_column_dups_indexing(self):\n def check(result, expected=None):\n if expected is not None:\n assert_frame_equal(result,expected)\n result.dtypes\n str(result)\n\n # boolean indexing\n # GH 4879\n dups = ['A', 'A', 'C', 'D']\n df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')\n expected = df[df.C > 6]\n expected.columns = dups\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n result = df[df.C > 6]\n check(result,expected)\n\n # where\n df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')\n expected = df[df > 6]\n expected.columns = dups\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n result = df[df > 6]\n check(result,expected)\n\n # boolean with the duplicate raises\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n self.assertRaises(ValueError, lambda : df[df.A > 6])\n\n # dup aligining operations should work\n # GH 5185\n df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])\n df2 = DataFrame([1, 2, 3], index=[1, 2, 3])\n expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3])\n result = df1.sub(df2)\n assert_frame_equal(result,expected)\n\n # equality\n df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B'])\n df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A'])\n\n # not-comparing like-labelled\n self.assertRaises(ValueError, lambda : df1 == df2)\n\n df1r = df1.reindex_like(df2)\n result = df1r == df2\n expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])\n assert_frame_equal(result,expected)\n\n # mixed column selection\n # GH 5639\n dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),\n 'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),\n 'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})\n expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)\n result = dfbool[['one', 'three', 'one']]\n check(result,expected)\n\n # multi-axis dups\n # GH 6121\n df = DataFrame(np.arange(25.).reshape(5,5),\n index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'C', 'D', 'E'])\n z = df[['A', 'C', 'A']].copy()\n expected = z.ix[['a', 'c', 'a']]\n\n df = DataFrame(np.arange(25.).reshape(5,5),\n index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'C', 'D', 'E'])\n z = df[['A', 'C', 'A']]\n result = z.ix[['a', 'c', 'a']]\n check(result,expected)\n\n\n def test_column_dups_indexing2(self):\n\n # GH 8363\n # datetime ops with a non-unique index\n df = DataFrame({'A' : np.arange(5,dtype='int64'),\n 'B' : np.arange(1,6,dtype='int64')},\n index=[2,2,3,3,4])\n result = df.B-df.A\n expected = Series(1,index=[2,2,3,3,4])\n assert_series_equal(result,expected)\n\n df = DataFrame({'A' : date_range('20130101',periods=5), 'B' : date_range('20130101 09:00:00', periods=5)},index=[2,2,3,3,4])\n result = df.B-df.A\n expected = Series(Timedelta('9 hours'),index=[2,2,3,3,4])\n assert_series_equal(result,expected)\n\n def test_insert_benchmark(self):\n # from the vb_suite/frame_methods/frame_insert_columns\n N = 10\n K = 5\n df = DataFrame(index=lrange(N))\n new_col = np.random.randn(N)\n for i in range(K):\n df[i] = new_col\n expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=lrange(N))\n assert_frame_equal(df,expected)\n\n def test_constructor_single_value(self):\n\n # expecting single value upcasting here\n df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])\n assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('float64'), df.index,\n df.columns))\n\n df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])\n assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'), df.index,\n df.columns))\n\n\n df = DataFrame('a', index=[1, 2], columns=['a', 'c'])\n assert_frame_equal(df, DataFrame(np.array([['a', 'a'],\n ['a', 'a']],\n dtype=object),\n index=[1, 2],\n columns=['a', 'c']))\n\n self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])\n self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])\n with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):\n DataFrame('a', [1, 2], ['a', 'c'], float)\n\n def test_constructor_with_datetimes(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n # single item\n df = DataFrame({'A' : 1, 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp(\"20010101\"), 'E' : datetime(2001,1,2,0,0) },\n index=np.arange(10))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, datetime64name: 2, objectname : 2})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified)\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array(1.,dtype=floatname),\n intname : np.array(1,dtype=intname)}, index=np.arange(10))\n result = df.get_dtype_counts()\n expected = { objectname : 1 }\n if intname == 'int64':\n expected['int64'] = 2\n else:\n expected['int64'] = 1\n expected[intname] = 1\n if floatname == 'float64':\n expected['float64'] = 2\n else:\n expected['float64'] = 1\n expected[floatname] = 1\n\n result.sort_index()\n expected = Series(expected)\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # check with ndarray construction ndim>0\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname),\n intname : np.array([1]*10,dtype=intname)}, index=np.arange(10))\n result = df.get_dtype_counts()\n result.sort_index()\n assert_series_equal(result, expected)\n\n # GH 2809\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n datetime_s = Series(datetimes)\n self.assertEqual(datetime_s.dtype, 'M8[ns]')\n df = DataFrame({'datetime_s':datetime_s})\n result = df.get_dtype_counts()\n expected = Series({ datetime64name : 1 })\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # GH 2810\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n dates = [ts.date() for ts in ind]\n df = DataFrame({'datetimes': datetimes, 'dates':dates})\n result = df.get_dtype_counts()\n expected = Series({ datetime64name : 1, objectname : 1 })\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # GH 7594\n # don't coerce tz-aware\n import pytz\n tz = pytz.timezone('US/Eastern')\n dt = tz.localize(datetime(2012, 1, 1))\n df = DataFrame({'End Date': dt}, index=[0])\n self.assertEqual(df.iat[0,0],dt)\n assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))\n\n df = DataFrame([{'End Date': dt}])\n self.assertEqual(df.iat[0,0],dt)\n assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))\n\n # tz-aware (UTC and other tz's)\n # GH 8411\n dr = date_range('20130101',periods=3)\n df = DataFrame({ 'value' : dr})\n self.assertTrue(df.iat[0,0].tz is None)\n dr = date_range('20130101',periods=3,tz='UTC')\n df = DataFrame({ 'value' : dr})\n self.assertTrue(str(df.iat[0,0].tz) == 'UTC')\n dr = date_range('20130101',periods=3,tz='US/Eastern')\n df = DataFrame({ 'value' : dr})\n self.assertTrue(str(df.iat[0,0].tz) == 'US/Eastern')\n\n # GH 7822\n # preserver an index with a tz on dict construction\n i = date_range('1/1/2011', periods=5, freq='10s', tz = 'US/Eastern')\n\n expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True) })\n df = DataFrame()\n df['a'] = i\n assert_frame_equal(df, expected)\n\n df = DataFrame( {'a' : i } )\n assert_frame_equal(df, expected)\n\n # multiples\n i_no_tz = date_range('1/1/2011', periods=5, freq='10s')\n df = DataFrame( {'a' : i, 'b' : i_no_tz } )\n expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz })\n assert_frame_equal(df, expected)\n\n def test_constructor_for_list_with_dtypes(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n # test list of lists/ndarrays\n df = DataFrame([np.arange(5) for x in range(5)])\n result = df.get_dtype_counts()\n expected = Series({'int64' : 5})\n\n df = DataFrame([np.array(np.arange(5),dtype='int32') for x in range(5)])\n result = df.get_dtype_counts()\n expected = Series({'int32' : 5})\n\n # overflow issue? (we always expecte int64 upcasting here)\n df = DataFrame({'a' : [2**31,2**31+1]})\n result = df.get_dtype_counts()\n expected = Series({'int64' : 1 })\n assert_series_equal(result, expected)\n\n # GH #2751 (construction with no index specified), make sure we cast to platform values\n df = DataFrame([1, 2])\n result = df.get_dtype_counts()\n expected = Series({'int64': 1 })\n assert_series_equal(result, expected)\n\n df = DataFrame([1.,2.])\n result = df.get_dtype_counts()\n expected = Series({'float64' : 1 })\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : [1, 2]})\n result = df.get_dtype_counts()\n expected = Series({'int64' : 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : [1., 2.]})\n result = df.get_dtype_counts()\n expected = Series({'float64' : 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : 1 }, index=lrange(3))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : 1. }, index=lrange(3))\n result = df.get_dtype_counts()\n expected = Series({'float64': 1 })\n assert_series_equal(result, expected)\n\n # with object list\n df = DataFrame({'a':[1,2,4,7], 'b':[1.2, 2.3, 5.1, 6.3],\n 'c':list('abcd'), 'd':[datetime(2000,1,1) for i in range(4)],\n 'e' : [1.,2,4.,7]})\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n def test_not_hashable(self):\n df = pd.DataFrame([1])\n self.assertRaises(TypeError, hash, df)\n self.assertRaises(TypeError, hash, self.empty)\n\n def test_timedeltas(self):\n\n df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),\n B = Series([ timedelta(days=i) for i in range(3) ])))\n result = df.get_dtype_counts()\n expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 })\n result.sort()\n expected.sort()\n assert_series_equal(result, expected)\n\n df['C'] = df['A'] + df['B']\n expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 })\n result = df.get_dtype_counts()\n result.sort()\n expected.sort()\n assert_series_equal(result, expected)\n\n # mixed int types\n df['D'] = 1\n expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 })\n result = df.get_dtype_counts()\n result.sort()\n expected.sort()\n assert_series_equal(result, expected)\n\n def test_operators_timedelta64(self):\n\n from datetime import datetime, timedelta\n df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),\n B = date_range('2012-1-2', periods=3, freq='D'),\n C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))\n\n diffs = DataFrame(dict(A = df['A']-df['C'],\n B = df['A']-df['B']))\n\n\n # min\n result = diffs.min()\n self.assertEqual(result[0], diffs.ix[0,'A'])\n self.assertEqual(result[1], diffs.ix[0,'B'])\n\n result = diffs.min(axis=1)\n self.assertTrue((result == diffs.ix[0,'B']).all() == True)\n\n # max\n result = diffs.max()\n self.assertEqual(result[0], diffs.ix[2,'A'])\n self.assertEqual(result[1], diffs.ix[2,'B'])\n\n result = diffs.max(axis=1)\n self.assertTrue((result == diffs['A']).all() == True)\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame(dict(A = df['A']-df['C'],\n B = df['B']-df['A']))\n assert_frame_equal(result,expected)\n assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed['C'] = 'foo'\n mixed['D'] = 1\n mixed['E'] = 1.\n mixed['F'] = Timestamp('20130101')\n\n # results in an object array\n from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type\n result = mixed.min()\n expected = Series([_coerce_scalar_to_timedelta_type(timedelta(seconds=5*60+5)),\n _coerce_scalar_to_timedelta_type(timedelta(days=-1)),\n 'foo',\n 1,\n 1.0,\n Timestamp('20130101')],\n index=mixed.columns)\n assert_series_equal(result,expected)\n\n # excludes numeric\n result = mixed.min(axis=1)\n expected = Series([1, 1, 1.],index=[0, 1, 2])\n assert_series_equal(result,expected)\n\n # works when only those columns are selected\n result = mixed[['A','B']].min(1)\n expected = Series([ timedelta(days=-1) ] * 3)\n assert_series_equal(result,expected)\n\n result = mixed[['A','B']].min()\n expected = Series([ timedelta(seconds=5*60+5), timedelta(days=-1) ],index=['A','B'])\n assert_series_equal(result,expected)\n\n # GH 3106\n df = DataFrame({'time' : date_range('20130102',periods=5),\n 'time2' : date_range('20130105',periods=5) })\n df['off1'] = df['time2']-df['time']\n self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')\n\n df['off2'] = df['time']-df['time2']\n df._consolidate_inplace()\n self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')\n self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')\n\n def test_datetimelike_setitem_with_inference(self):\n # GH 7592\n # assignment of timedeltas with NaT\n\n one_hour = timedelta(hours=1)\n df = DataFrame(index=date_range('20130101',periods=4))\n df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]')\n df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]')\n df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]')\n df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]')\n df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]')\n df['F'] = np.timedelta64('NaT')\n df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]')\n df.ix[-3:,'G'] = date_range('20130101',periods=3)\n df['H'] = np.datetime64('NaT')\n result = df.dtypes\n expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))\n assert_series_equal(result,expected)\n\n def test_new_empty_index(self):\n df1 = DataFrame(randn(0, 3))\n df2 = DataFrame(randn(0, 3))\n df1.index.name = 'foo'\n self.assertIsNone(df2.index.name)\n\n def test_astype(self):\n casted = self.frame.astype(int)\n expected = DataFrame(self.frame.values.astype(int),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n casted = self.frame.astype(np.int32)\n expected = DataFrame(self.frame.values.astype(np.int32),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n self.frame['foo'] = '5'\n casted = self.frame.astype(int)\n expected = DataFrame(self.frame.values.astype(int),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n # mixed casting\n def _check_cast(df, v):\n self.assertEqual(list(set([ s.dtype.name for _, s in compat.iteritems(df) ]))[0], v)\n\n mn = self.all_mixed._get_numeric_data().copy()\n mn['little_float'] = np.array(12345.,dtype='float16')\n mn['big_float'] = np.array(123456789101112.,dtype='float64')\n\n casted = mn.astype('float64')\n _check_cast(casted, 'float64')\n\n casted = mn.astype('int64')\n _check_cast(casted, 'int64')\n\n casted = self.mixed_float.reindex(columns = ['A','B']).astype('float32')\n _check_cast(casted, 'float32')\n\n casted = mn.reindex(columns = ['little_float']).astype('float16')\n _check_cast(casted, 'float16')\n\n casted = self.mixed_float.reindex(columns = ['A','B']).astype('float16')\n _check_cast(casted, 'float16')\n\n casted = mn.astype('float32')\n _check_cast(casted, 'float32')\n\n casted = mn.astype('int32')\n _check_cast(casted, 'int32')\n\n # to object\n casted = mn.astype('O')\n _check_cast(casted, 'object')\n\n def test_astype_with_exclude_string(self):\n df = self.frame.copy()\n expected = self.frame.astype(int)\n df['string'] = 'foo'\n casted = df.astype(int, raise_on_error = False)\n\n expected['string'] = 'foo'\n assert_frame_equal(casted, expected)\n\n df = self.frame.copy()\n expected = self.frame.astype(np.int32)\n df['string'] = 'foo'\n casted = df.astype(np.int32, raise_on_error = False)\n\n expected['string'] = 'foo'\n assert_frame_equal(casted, expected)\n\n def test_astype_with_view(self):\n\n tf = self.mixed_float.reindex(columns = ['A','B','C'])\n\n casted = tf.astype(np.int64)\n\n casted = tf.astype(np.float32)\n\n # this is the only real reason to do it this way\n tf = np.round(self.frame).astype(np.int32)\n casted = tf.astype(np.float32, copy = False)\n\n tf = self.frame.astype(np.float64)\n casted = tf.astype(np.int64, copy = False)\n\n def test_astype_cast_nan_int(self):\n df = DataFrame(data={\"Values\": [1.0, 2.0, 3.0, np.nan]})\n self.assertRaises(ValueError, df.astype, np.int64)\n\n def test_array_interface(self):\n result = np.sqrt(self.frame)\n tm.assert_isinstance(result, type(self.frame))\n self.assertIs(result.index, self.frame.index)\n self.assertIs(result.columns, self.frame.columns)\n\n assert_frame_equal(result, self.frame.apply(np.sqrt))\n\n def test_pickle(self):\n unpickled = self.round_trip_pickle(self.mixed_frame)\n assert_frame_equal(self.mixed_frame, unpickled)\n\n # buglet\n self.mixed_frame._data.ndim\n\n # empty\n unpickled = self.round_trip_pickle(self.empty)\n repr(unpickled)\n\n def test_to_dict(self):\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n recons_data = DataFrame(test_data).to_dict()\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"l\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][int(k2) - 1])\n\n recons_data = DataFrame(test_data).to_dict(\"s\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"sp\")\n\n expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],\n 'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}\n\n tm.assert_almost_equal(recons_data, expected_split)\n\n recons_data = DataFrame(test_data).to_dict(\"r\")\n\n expected_records = [{'A': 1.0, 'B': '1'},\n {'A': 2.0, 'B': '2'},\n {'A': nan, 'B': '3'}]\n\n tm.assert_almost_equal(recons_data, expected_records)\n\n def test_to_dict_invalid_orient(self):\n df = DataFrame({'A':[0, 1]})\n self.assertRaises(ValueError, df.to_dict, orient='invalid')\n\n def test_to_records_dt64(self):\n df = DataFrame([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]],\n index=date_range(\"2012-01-01\", \"2012-01-02\"))\n self.assertEqual(df.to_records()['index'][0], df.index[0])\n\n rs = df.to_records(convert_datetime64=False)\n self.assertEqual(rs['index'][0], df.index.values[0])\n\n def test_to_records_with_multindex(self):\n # GH3189\n index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n data = np.zeros((8, 4))\n df = DataFrame(data, index=index)\n r = df.to_records(index=True)['level_0']\n self.assertTrue('bar' in r)\n self.assertTrue('one' not in r)\n\n def test_to_records_with_Mapping_type(self):\n import email\n from email.parser import Parser\n import collections\n\n collections.Mapping.register(email.message.Message)\n\n headers = Parser().parsestr('From: <[email protected]>\\n'\n 'To: <[email protected]>\\n'\n 'Subject: Test message\\n'\n '\\n'\n 'Body would go here\\n')\n\n frame = DataFrame.from_records([headers])\n all( x in frame for x in ['Type','Subject','From'])\n\n def test_from_records_to_records(self):\n # from numpy documentation\n arr = np.zeros((2,), dtype=('i4,f4,a10'))\n arr[:] = [(1, 2., 'Hello'), (2, 3., \"World\")]\n\n frame = DataFrame.from_records(arr)\n\n index = np.arange(len(arr))[::-1]\n indexed_frame = DataFrame.from_records(arr, index=index)\n self.assert_numpy_array_equal(indexed_frame.index, index)\n\n # without names, it should go to last ditch\n arr2 = np.zeros((2,3))\n tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))\n\n # wrong length\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame.from_records(arr, index=index[:-1])\n\n indexed_frame = DataFrame.from_records(arr, index='f1')\n\n # what to do?\n records = indexed_frame.to_records()\n self.assertEqual(len(records.dtype.names), 3)\n\n records = indexed_frame.to_records(index=False)\n self.assertEqual(len(records.dtype.names), 2)\n self.assertNotIn('index', records.dtype.names)\n\n def test_from_records_nones(self):\n tuples = [(1, 2, None, 3),\n (1, 2, None, 3),\n (None, 2, 5, 3)]\n\n df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])\n self.assertTrue(np.isnan(df['c'][0]))\n\n def test_from_records_iterator(self):\n arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6), (7., 7., 8, 8)],\n dtype=[('x', np.float64), ('u', np.float32), ('y', np.int64), ('z', np.int32) ])\n df = DataFrame.from_records(iter(arr), nrows=2)\n xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),\n 'u': np.array([1.0, 3.0], dtype=np.float32),\n 'y': np.array([2, 4], dtype=np.int64),\n 'z': np.array([2, 4], dtype=np.int32)})\n assert_frame_equal(df.reindex_like(xp), xp)\n\n # no dtypes specified here, so just compare with the default\n arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]\n df = DataFrame.from_records(iter(arr), columns=['x', 'y'],\n nrows=2)\n assert_frame_equal(df, xp.reindex(columns=['x','y']), check_dtype=False)\n\n def test_from_records_tuples_generator(self):\n def tuple_generator(length):\n for i in range(length):\n letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n yield (i, letters[i % len(letters)], i/length)\n\n columns_names = ['Integer', 'String', 'Float']\n columns = [[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))]\n data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = tuple_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n assert_frame_equal(result, expected)\n\n def test_from_records_lists_generator(self):\n def list_generator(length):\n for i in range(length):\n letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n yield [i, letters[i % len(letters)], i/length]\n\n columns_names = ['Integer', 'String', 'Float']\n columns = [[i[j] for i in list_generator(10)] for j in range(len(columns_names))]\n data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = list_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n assert_frame_equal(result, expected)\n\n def test_from_records_columns_not_modified(self):\n tuples = [(1, 2, 3),\n (1, 2, 3),\n (2, 5, 3)]\n\n columns = ['a', 'b', 'c']\n original_columns = list(columns)\n df = DataFrame.from_records(tuples, columns=columns, index='a')\n self.assertEqual(columns, original_columns)\n\n def test_from_records_decimal(self):\n from decimal import Decimal\n\n tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]\n\n df = DataFrame.from_records(tuples, columns=['a'])\n self.assertEqual(df['a'].dtype, object)\n\n df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)\n self.assertEqual(df['a'].dtype, np.float64)\n self.assertTrue(np.isnan(df['a'].values[-1]))\n\n def test_from_records_duplicates(self):\n result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],\n columns=['a', 'b', 'a'])\n\n expected = DataFrame([(1, 2, 3), (4, 5, 6)],\n columns=['a', 'b', 'a'])\n\n assert_frame_equal(result, expected)\n\n def test_from_records_set_index_name(self):\n def create_dict(order_id):\n return {'order_id': order_id, 'quantity': np.random.randint(1, 10),\n 'price': np.random.randint(1, 10)}\n documents = [create_dict(i) for i in range(10)]\n # demo missing data\n documents.append({'order_id': 10, 'quantity': 5})\n\n result = DataFrame.from_records(documents, index='order_id')\n self.assertEqual(result.index.name, 'order_id')\n\n # MultiIndex\n result = DataFrame.from_records(documents,\n index=['order_id', 'quantity'])\n self.assertEqual(result.index.names, ('order_id', 'quantity'))\n\n def test_from_records_misc_brokenness(self):\n # #2179\n\n data = {1: ['foo'], 2: ['bar']}\n\n result = DataFrame.from_records(data, columns=['a', 'b'])\n exp = DataFrame(data, columns=['a', 'b'])\n assert_frame_equal(result, exp)\n\n # overlap in index/index_names\n\n data = {'a': [1, 2, 3], 'b': [4, 5, 6]}\n\n result = DataFrame.from_records(data, index=['a', 'b', 'c'])\n exp = DataFrame(data, index=['a', 'b', 'c'])\n assert_frame_equal(result, exp)\n\n\n # GH 2623\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj\n df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])\n results = df2_obj.get_dtype_counts()\n expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })\n\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 1])\n df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])\n results = df2_obj.get_dtype_counts()\n expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })\n\n def test_from_records_empty(self):\n # 3562\n result = DataFrame.from_records([], columns=['a','b','c'])\n expected = DataFrame(columns=['a','b','c'])\n assert_frame_equal(result, expected)\n\n result = DataFrame.from_records([], columns=['a','b','b'])\n expected = DataFrame(columns=['a','b','b'])\n assert_frame_equal(result, expected)\n\n def test_from_records_empty_with_nonempty_fields_gh3682(self):\n a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])\n df = DataFrame.from_records(a, index='id')\n assert_array_equal(df.index, Index([1], name='id'))\n self.assertEqual(df.index.name, 'id')\n assert_array_equal(df.columns, Index(['value']))\n\n b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])\n df = DataFrame.from_records(b, index='id')\n assert_array_equal(df.index, Index([], name='id'))\n self.assertEqual(df.index.name, 'id')\n\n def test_from_records_with_datetimes(self):\n if sys.version < LooseVersion('2.7'):\n raise nose.SkipTest('rec arrays dont work properly with py2.6')\n\n # this may fail on certain platforms because of a numpy issue\n # related GH6140\n if not is_little_endian():\n raise nose.SkipTest(\"known failure of test on non-little endian\")\n\n # construction with a null in a recarray\n # GH 6140\n expected = DataFrame({ 'EXPIRY' : [datetime(2005, 3, 1, 0, 0), None ]})\n\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [('EXPIRY', '<M8[ns]')]\n\n try:\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n except (ValueError):\n raise nose.SkipTest(\"known failure of numpy rec array creation\")\n\n result = DataFrame.from_records(recarray)\n assert_frame_equal(result,expected)\n\n # coercion should work too\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [('EXPIRY', '<M8[m]')]\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n result = DataFrame.from_records(recarray)\n assert_frame_equal(result,expected)\n\n def test_to_records_floats(self):\n df = DataFrame(np.random.rand(10, 10))\n df.to_records()\n\n def test_to_recods_index_name(self):\n df = DataFrame(np.random.randn(3, 3))\n df.index.name = 'X'\n rs = df.to_records()\n self.assertIn('X', rs.dtype.fields)\n\n df = DataFrame(np.random.randn(3, 3))\n rs = df.to_records()\n self.assertIn('index', rs.dtype.fields)\n\n df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])\n df.index.names = ['A', None]\n rs = df.to_records()\n self.assertIn('level_0', rs.dtype.fields)\n\n def test_join_str_datetime(self):\n str_dates = ['20120209', '20120222']\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n A = DataFrame(str_dates, index=lrange(2), columns=['aa'])\n C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)\n\n tst = A.join(C, on='aa')\n\n self.assertEqual(len(tst.columns), 3)\n\n def test_from_records_sequencelike(self):\n df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),\n 'A1': np.array(np.random.randn(6), dtype = np.float64),\n 'B' : np.array(np.arange(6), dtype = np.int64),\n 'C' : ['foo'] * 6,\n 'D' : np.array([True, False] * 3, dtype=bool),\n 'E' : np.array(np.random.randn(6), dtype = np.float32),\n 'E1': np.array(np.random.randn(6), dtype = np.float32),\n 'F' : np.array(np.arange(6), dtype = np.int32) })\n\n # this is actually tricky to create the recordlike arrays and have the dtypes be intact\n blocks = df.blocks\n tuples = []\n columns = []\n dtypes = []\n for dtype, b in compat.iteritems(blocks):\n columns.extend(b.columns)\n dtypes.extend([ (c,np.dtype(dtype).descr[0][1]) for c in b.columns ])\n for i in range(len(df.index)):\n tup = []\n for _, b in compat.iteritems(blocks):\n tup.extend(b.irow(i).values)\n tuples.append(tuple(tup))\n\n recarray = np.array(tuples, dtype=dtypes).view(np.recarray)\n recarray2 = df.to_records()\n lists = [list(x) for x in tuples]\n\n # tuples (lose the dtype info)\n result = DataFrame.from_records(tuples, columns=columns).reindex(columns=df.columns)\n\n # created recarray and with to_records recarray (have dtype info)\n result2 = DataFrame.from_records(recarray, columns=columns).reindex(columns=df.columns)\n result3 = DataFrame.from_records(recarray2, columns=columns).reindex(columns=df.columns)\n\n # list of tupels (no dtype info)\n result4 = DataFrame.from_records(lists, columns=columns).reindex(columns=df.columns)\n\n assert_frame_equal(result, df, check_dtype=False)\n assert_frame_equal(result2, df)\n assert_frame_equal(result3, df)\n assert_frame_equal(result4, df, check_dtype=False)\n\n # tuples is in the order of the columns\n result = DataFrame.from_records(tuples)\n self.assert_numpy_array_equal(result.columns, lrange(8))\n\n # test exclude parameter & we are casting the results here (as we don't have dtype info to recover)\n columns_to_test = [ columns.index('C'), columns.index('E1') ]\n\n exclude = list(set(range(8))-set(columns_to_test))\n result = DataFrame.from_records(tuples, exclude=exclude)\n result.columns = [ columns[i] for i in sorted(columns_to_test) ]\n assert_series_equal(result['C'], df['C'])\n assert_series_equal(result['E1'], df['E1'].astype('float64'))\n\n # empty case\n result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])\n self.assertEqual(len(result), 0)\n self.assert_numpy_array_equal(result.columns, ['foo', 'bar', 'baz'])\n\n result = DataFrame.from_records([])\n self.assertEqual(len(result), 0)\n self.assertEqual(len(result.columns), 0)\n\n def test_from_records_dictlike(self):\n\n # test the dict methods\n df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),\n 'A1': np.array(np.random.randn(6), dtype = np.float64),\n 'B' : np.array(np.arange(6), dtype = np.int64),\n 'C' : ['foo'] * 6,\n 'D' : np.array([True, False] * 3, dtype=bool),\n 'E' : np.array(np.random.randn(6), dtype = np.float32),\n 'E1': np.array(np.random.randn(6), dtype = np.float32),\n 'F' : np.array(np.arange(6), dtype = np.int32) })\n\n # columns is in a different order here than the actual items iterated from the dict\n columns = []\n for dtype, b in compat.iteritems(df.blocks):\n columns.extend(b.columns)\n\n asdict = dict((x, y) for x, y in compat.iteritems(df))\n asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))\n\n # dict of series & dict of ndarrays (have dtype info)\n results = []\n results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))\n results.append(DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns))\n results.append(DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns))\n\n for r in results:\n assert_frame_equal(r, df)\n\n def test_from_records_with_index_data(self):\n df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n\n data = np.random.randn(10)\n df1 = DataFrame.from_records(df, index=data)\n assert(df1.index.equals(Index(data)))\n\n def test_from_records_bad_index_column(self):\n df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n\n # should pass\n df1 = DataFrame.from_records(df, index=['C'])\n assert(df1.index.equals(Index(df.C)))\n\n df1 = DataFrame.from_records(df, index='C')\n assert(df1.index.equals(Index(df.C)))\n\n # should fail\n self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])\n self.assertRaises(KeyError, DataFrame.from_records, df, index=2)\n\n def test_from_records_non_tuple(self):\n class Record(object):\n\n def __init__(self, *args):\n self.args = args\n\n def __getitem__(self, i):\n return self.args[i]\n\n def __iter__(self):\n return iter(self.args)\n\n recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]\n tups = lmap(tuple, recs)\n\n result = DataFrame.from_records(recs)\n expected = DataFrame.from_records(tups)\n assert_frame_equal(result, expected)\n\n def test_from_records_len0_with_columns(self):\n # #2633\n result = DataFrame.from_records([], index='foo',\n columns=['foo', 'bar'])\n\n self.assertTrue(np.array_equal(result.columns, ['bar']))\n self.assertEqual(len(result), 0)\n self.assertEqual(result.index.name, 'foo')\n\n def test_get_agg_axis(self):\n cols = self.frame._get_agg_axis(0)\n self.assertIs(cols, self.frame.columns)\n\n idx = self.frame._get_agg_axis(1)\n self.assertIs(idx, self.frame.index)\n\n self.assertRaises(ValueError, self.frame._get_agg_axis, 2)\n\n def test_nonzero(self):\n self.assertTrue(self.empty.empty)\n\n self.assertFalse(self.frame.empty)\n self.assertFalse(self.mixed_frame.empty)\n\n # corner case\n df = DataFrame({'A': [1., 2., 3.],\n 'B': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['A']\n self.assertFalse(df.empty)\n\n def test_repr_empty(self):\n buf = StringIO()\n\n # empty\n foo = repr(self.empty)\n\n # empty with index\n frame = DataFrame(index=np.arange(1000))\n foo = repr(frame)\n\n def test_repr_mixed(self):\n buf = StringIO()\n\n # mixed\n foo = repr(self.mixed_frame)\n self.mixed_frame.info(verbose=False, buf=buf)\n\n @slow\n def test_repr_mixed_big(self):\n # big mixed\n biggie = DataFrame({'A': randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n biggie.loc[:20,'A'] = nan\n biggie.loc[:20,'B'] = nan\n\n foo = repr(biggie)\n\n def test_repr(self):\n buf = StringIO()\n\n # small one\n foo = repr(self.frame)\n self.frame.info(verbose=False, buf=buf)\n\n # even smaller\n self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)\n self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)\n\n # exhausting cases in DataFrame.info\n\n # columns but no index\n no_index = DataFrame(columns=[0, 1, 3])\n foo = repr(no_index)\n\n # no columns or index\n self.empty.info(buf=buf)\n\n df = DataFrame([\"a\\n\\r\\tb\"], columns=[\"a\\n\\r\\td\"], index=[\"a\\n\\r\\tf\"])\n self.assertFalse(\"\\t\" in repr(df))\n self.assertFalse(\"\\r\" in repr(df))\n self.assertFalse(\"a\\n\" in repr(df))\n\n def test_repr_dimensions(self):\n df = DataFrame([[1, 2,], [3, 4]])\n with option_context('display.show_dimensions', True):\n self.assertTrue(\"2 rows x 2 columns\" in repr(df))\n\n with option_context('display.show_dimensions', False):\n self.assertFalse(\"2 rows x 2 columns\" in repr(df))\n\n with option_context('display.show_dimensions', 'truncate'):\n self.assertFalse(\"2 rows x 2 columns\" in repr(df))\n\n @slow\n def test_repr_big(self):\n buf = StringIO()\n\n # big one\n biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),\n index=lrange(200))\n foo = repr(biggie)\n\n def test_repr_unsortable(self):\n # columns are not sortable\n import warnings\n warn_filters = warnings.filters\n warnings.filterwarnings('ignore',\n category=FutureWarning,\n module=\".*format\")\n\n unsortable = DataFrame({'foo': [1] * 50,\n datetime.today(): [1] * 50,\n 'bar': ['bar'] * 50,\n datetime.today(\n ) + timedelta(1): ['bar'] * 50},\n index=np.arange(50))\n foo = repr(unsortable)\n\n fmt.set_option('display.precision', 3, 'display.column_space', 10)\n repr(self.frame)\n\n fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)\n repr(self.frame)\n\n fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)\n repr(self.frame)\n\n self.reset_display_options()\n\n warnings.filters = warn_filters\n\n def test_repr_unicode(self):\n uval = u('\\u03c3\\u03c3\\u03c3\\u03c3')\n bval = uval.encode('utf-8')\n df = DataFrame({'A': [uval, uval]})\n\n result = repr(df)\n ex_top = ' A'\n self.assertEqual(result.split('\\n')[0].rstrip(), ex_top)\n\n df = DataFrame({'A': [uval, uval]})\n result = repr(df)\n self.assertEqual(result.split('\\n')[0].rstrip(), ex_top)\n\n def test_unicode_string_with_unicode(self):\n df = DataFrame({'A': [u(\"\\u05d0\")]})\n\n if compat.PY3:\n str(df)\n else:\n compat.text_type(df)\n\n def test_bytestring_with_unicode(self):\n df = DataFrame({'A': [u(\"\\u05d0\")]})\n if compat.PY3:\n bytes(df)\n else:\n str(df)\n\n def test_very_wide_info_repr(self):\n df = DataFrame(np.random.randn(10, 20),\n columns=tm.rands_array(10, 20))\n repr(df)\n\n def test_repr_column_name_unicode_truncation_bug(self):\n # #1906\n df = DataFrame({'Id': [7117434],\n 'StringCol': ('Is it possible to modify drop plot code'\n ' so that the output graph is displayed '\n 'in iphone simulator, Is it possible to '\n 'modify drop plot code so that the '\n 'output graph is \\xe2\\x80\\xa8displayed '\n 'in iphone simulator.Now we are adding '\n 'the CSV file externally. I want to Call'\n ' the File through the code..')})\n\n result = repr(df)\n self.assertIn('StringCol', result)\n\n def test_head_tail(self):\n assert_frame_equal(self.frame.head(), self.frame[:5])\n assert_frame_equal(self.frame.tail(), self.frame[-5:])\n assert_frame_equal(self.frame.head(0), self.frame)\n assert_frame_equal(self.frame.tail(0), self.frame)\n assert_frame_equal(self.frame.head(-1), self.frame[:-1])\n assert_frame_equal(self.frame.tail(-1), self.frame[1:])\n assert_frame_equal(self.frame.head(1), self.frame[:1])\n assert_frame_equal(self.frame.tail(1), self.frame[-1:])\n # with a float index\n df = self.frame.copy()\n df.index = np.arange(len(self.frame)) + 0.1\n assert_frame_equal(df.head(), df.iloc[:5])\n assert_frame_equal(df.tail(), df.iloc[-5:])\n assert_frame_equal(df.head(0), df)\n assert_frame_equal(df.tail(0), df)\n assert_frame_equal(df.head(-1), df.iloc[:-1])\n assert_frame_equal(df.tail(-1), df.iloc[1:])\n #test empty dataframe\n empty_df = DataFrame()\n assert_frame_equal(empty_df.tail(), empty_df)\n assert_frame_equal(empty_df.head(), empty_df)\n\n def test_insert(self):\n df = DataFrame(np.random.randn(5, 3), index=np.arange(5),\n columns=['c', 'b', 'a'])\n\n df.insert(0, 'foo', df['a'])\n self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])\n assert_almost_equal(df['a'], df['foo'])\n\n df.insert(2, 'bar', df['c'])\n self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'bar', 'b', 'a'])\n assert_almost_equal(df['c'], df['bar'])\n\n # diff dtype\n\n # new item\n df['x'] = df['a'].astype('float32')\n result = Series(dict(float64 = 5, float32 = 1))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n # replacing current (in different block)\n df['a'] = df['a'].astype('float32')\n result = Series(dict(float64 = 4, float32 = 2))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n df['y'] = df['a'].astype('int32')\n result = Series(dict(float64 = 4, float32 = 2, int32 = 1))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n with assertRaisesRegexp(ValueError, 'already exists'):\n df.insert(1, 'a', df['b'])\n self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])\n\n df.columns.name = 'some_name'\n # preserve columns name field\n df.insert(0, 'baz', df['c'])\n self.assertEqual(df.columns.name, 'some_name')\n\n def test_delitem(self):\n del self.frame['A']\n self.assertNotIn('A', self.frame)\n\n def test_pop(self):\n self.frame.columns.name = 'baz'\n\n A = self.frame.pop('A')\n self.assertNotIn('A', self.frame)\n\n self.frame['foo'] = 'bar'\n foo = self.frame.pop('foo')\n self.assertNotIn('foo', self.frame)\n # TODO self.assertEqual(self.frame.columns.name, 'baz')\n\n def test_pop_non_unique_cols(self):\n df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})\n df.columns = [\"a\", \"b\", \"a\"]\n\n res = df.pop(\"a\")\n self.assertEqual(type(res), DataFrame)\n self.assertEqual(len(res), 2)\n self.assertEqual(len(df.columns), 1)\n self.assertTrue(\"b\" in df.columns)\n self.assertFalse(\"a\" in df.columns)\n self.assertEqual(len(df.index), 2)\n\n def test_iter(self):\n self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))\n\n def test_iterrows(self):\n for i, (k, v) in enumerate(self.frame.iterrows()):\n exp = self.frame.xs(self.frame.index[i])\n assert_series_equal(v, exp)\n\n for i, (k, v) in enumerate(self.mixed_frame.iterrows()):\n exp = self.mixed_frame.xs(self.mixed_frame.index[i])\n assert_series_equal(v, exp)\n\n def test_itertuples(self):\n for i, tup in enumerate(self.frame.itertuples()):\n s = Series(tup[1:])\n s.name = tup[0]\n expected = self.frame.ix[i, :].reset_index(drop=True)\n assert_series_equal(s, expected)\n\n df = DataFrame({'floats': np.random.randn(5),\n 'ints': lrange(5)}, columns=['floats', 'ints'])\n\n for tup in df.itertuples(index=False):\n tm.assert_isinstance(tup[1], np.integer)\n\n df = DataFrame(data={\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n dfaa = df[['a', 'a']]\n self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])\n\n def test_len(self):\n self.assertEqual(len(self.frame), len(self.frame.index))\n\n def test_operators(self):\n garbage = random.random(4)\n colSeries = Series(garbage, index=np.array(self.frame.columns))\n\n idSum = self.frame + self.frame\n seriesSum = self.frame + colSeries\n\n for col, series in compat.iteritems(idSum):\n for idx, val in compat.iteritems(series):\n origVal = self.frame[col][idx] * 2\n if not np.isnan(val):\n self.assertEqual(val, origVal)\n else:\n self.assertTrue(np.isnan(origVal))\n\n for col, series in compat.iteritems(seriesSum):\n for idx, val in compat.iteritems(series):\n origVal = self.frame[col][idx] + colSeries[col]\n if not np.isnan(val):\n self.assertEqual(val, origVal)\n else:\n self.assertTrue(np.isnan(origVal))\n\n added = self.frame2 + self.frame2\n expected = self.frame2 * 2\n assert_frame_equal(added, expected)\n\n df = DataFrame({'a': ['a', None, 'b']})\n assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))\n\n def test_ops_np_scalar(self):\n vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]\n f = lambda x: DataFrame(x, index=list('ABCDE'),\n columns=['jim', 'joe', 'jolie'])\n\n df = f(vals)\n\n for x in xs:\n assert_frame_equal(df / np.array(x), f(vals / x))\n assert_frame_equal(np.array(x) * df, f(vals * x))\n assert_frame_equal(df + np.array(x), f(vals + x))\n assert_frame_equal(np.array(x) - df, f(x - vals))\n\n def test_operators_boolean(self):\n\n # GH 5808\n # empty frames, non-mixed dtype\n\n result = DataFrame(index=[1]) & DataFrame(index=[1])\n assert_frame_equal(result,DataFrame(index=[1]))\n\n result = DataFrame(index=[1]) | DataFrame(index=[1])\n assert_frame_equal(result,DataFrame(index=[1]))\n\n result = DataFrame(index=[1]) & DataFrame(index=[1,2])\n assert_frame_equal(result,DataFrame(index=[1,2]))\n\n result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(index=[1],columns=['A']))\n\n result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))\n\n result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))\n\n # boolean ops\n result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))\n\n def f():\n DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n self.assertRaises(TypeError, f)\n\n def f():\n DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n self.assertRaises(TypeError, f)\n\n def test_operators_none_as_na(self):\n df = DataFrame({\"col1\": [2, 5.0, 123, None],\n \"col2\": [1, 2, 3, 4]}, dtype=object)\n\n ops = [operator.add, operator.sub, operator.mul, operator.truediv]\n\n # since filling converts dtypes from object, changed expected to be object\n for op in ops:\n filled = df.fillna(np.nan)\n result = op(df, 3)\n expected = op(filled, 3).astype(object)\n expected[com.isnull(expected)] = None\n assert_frame_equal(result, expected)\n\n result = op(df, df)\n expected = op(filled, filled).astype(object)\n expected[com.isnull(expected)] = None\n assert_frame_equal(result, expected)\n\n result = op(df, df.fillna(7))\n assert_frame_equal(result, expected)\n\n result = op(df.fillna(7), df)\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_comparison_invalid(self):\n\n def check(df,df2):\n\n for (x, y) in [(df,df2),(df2,df)]:\n self.assertRaises(TypeError, lambda : x == y)\n self.assertRaises(TypeError, lambda : x != y)\n self.assertRaises(TypeError, lambda : x >= y)\n self.assertRaises(TypeError, lambda : x > y)\n self.assertRaises(TypeError, lambda : x < y)\n self.assertRaises(TypeError, lambda : x <= y)\n\n # GH4968\n # invalid date/int comparisons\n df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])\n df['dates'] = date_range('20010101', periods=len(df))\n\n df2 = df.copy()\n df2['dates'] = df['a']\n check(df,df2)\n\n df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])\n df2 = DataFrame({'a': date_range('20010101', periods=len(df)), 'b': date_range('20100101', periods=len(df))})\n check(df,df2)\n\n def test_timestamp_compare(self):\n # make sure we can compare Timestamps on the right AND left hand side\n # GH4982\n df = DataFrame({'dates1': date_range('20010101', periods=10),\n 'dates2': date_range('20010102', periods=10),\n 'intcol': np.random.randint(1000000000, size=10),\n 'floatcol': np.random.randn(10),\n 'stringcol': list(tm.rands(10))})\n df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT\n ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',\n 'ne': 'ne'}\n for left, right in ops.items():\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # no nats\n expected = left_f(df, Timestamp('20010109'))\n result = right_f(Timestamp('20010109'), df)\n tm.assert_frame_equal(result, expected)\n\n # nats\n expected = left_f(df, Timestamp('nat'))\n result = right_f(Timestamp('nat'), df)\n tm.assert_frame_equal(result, expected)\n\n def test_modulo(self):\n\n # GH3590, modulo as ints\n p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })\n\n ### this is technically wrong as the integer portion is coerced to float ###\n expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })\n result = p % p\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')\n result2.iloc[0:3,1] = np.nan\n assert_frame_equal(result2,expected)\n\n result = p % 0\n expected = DataFrame(np.nan,index=p.index,columns=p.columns)\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)\n assert_frame_equal(result2,expected)\n\n # not commutative with series\n p = DataFrame(np.random.randn(10, 5))\n s = p[0]\n res = s % p\n res2 = p % s\n self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))\n\n def test_div(self):\n\n # integer div, but deal with the 0's (GH 9144)\n p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })\n result = p / p\n\n expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),\n 'second': Series([nan, nan, nan, 1])})\n assert_frame_equal(result,expected)\n\n result2 = DataFrame(p.values.astype('float') / p.values, index=p.index,\n columns=p.columns)\n assert_frame_equal(result2,expected)\n\n result = p / 0\n expected = DataFrame(inf, index=p.index, columns=p.columns)\n expected.iloc[0:3, 1] = nan\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values.astype('float64') / 0, index=p.index,\n columns=p.columns)\n assert_frame_equal(result2,expected)\n\n p = DataFrame(np.random.randn(10, 5))\n s = p[0]\n res = s / p\n res2 = p / s\n self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))\n\n def test_logical_operators(self):\n\n def _check_bin_op(op):\n result = op(df1, df2)\n expected = DataFrame(op(df1.values, df2.values), index=df1.index,\n columns=df1.columns)\n self.assertEqual(result.values.dtype, np.bool_)\n assert_frame_equal(result, expected)\n\n def _check_unary_op(op):\n result = op(df1)\n expected = DataFrame(op(df1.values), index=df1.index,\n columns=df1.columns)\n self.assertEqual(result.values.dtype, np.bool_)\n assert_frame_equal(result, expected)\n\n df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},\n 'b': {'a': False, 'b': True, 'c': False,\n 'd': False, 'e': False},\n 'c': {'a': False, 'b': False, 'c': True,\n 'd': False, 'e': False},\n 'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},\n 'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}\n\n df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},\n 'b': {'a': False, 'b': True, 'c': False,\n 'd': False, 'e': False},\n 'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},\n 'd': {'a': False, 'b': False, 'c': False,\n 'd': True, 'e': False},\n 'e': {'a': False, 'b': False, 'c': False,\n 'd': False, 'e': True}}\n\n df1 = DataFrame(df1)\n df2 = DataFrame(df2)\n\n _check_bin_op(operator.and_)\n _check_bin_op(operator.or_)\n _check_bin_op(operator.xor)\n\n # operator.neg is deprecated in numpy >= 1.9\n _check_unary_op(operator.inv)\n\n def test_logical_typeerror(self):\n if not compat.PY3:\n self.assertRaises(TypeError, self.frame.__eq__, 'foo')\n self.assertRaises(TypeError, self.frame.__lt__, 'foo')\n self.assertRaises(TypeError, self.frame.__gt__, 'foo')\n self.assertRaises(TypeError, self.frame.__ne__, 'foo')\n else:\n raise nose.SkipTest('test_logical_typeerror not tested on PY3')\n\n def test_constructor_lists_to_object_dtype(self):\n # from #1074\n d = DataFrame({'a': [np.nan, False]})\n self.assertEqual(d['a'].dtype, np.object_)\n self.assertFalse(d['a'][1])\n\n def test_constructor_with_nas(self):\n # GH 5016\n # na's in indicies\n\n def check(df):\n for i in range(len(df.columns)):\n df.iloc[:,i]\n\n # allow single nans to succeed\n indexer = np.arange(len(df.columns))[isnull(df.columns)]\n\n if len(indexer) == 1:\n assert_series_equal(df.iloc[:,indexer[0]],df.loc[:,np.nan])\n\n\n # multiple nans should fail\n else:\n\n def f():\n df.loc[:,np.nan]\n self.assertRaises(ValueError, f)\n\n\n df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])\n check(df)\n\n df = DataFrame([[1,2,3],[4,5,6]], columns=[1.1,2.2,np.nan])\n check(df)\n\n df = DataFrame([[0,1,2,3],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])\n check(df)\n\n df = DataFrame([[0.0,1,2,3.0],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])\n check(df)\n\n def test_logical_with_nas(self):\n d = DataFrame({'a': [np.nan, False], 'b': [True, True]})\n\n # GH4947\n # bool comparisons should return bool\n result = d['a'] | d['b']\n expected = Series([False, True])\n assert_series_equal(result, expected)\n\n # GH4604, automatic casting here\n result = d['a'].fillna(False) | d['b']\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n result = d['a'].fillna(False,downcast=False) | d['b']\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n def test_neg(self):\n # what to do?\n assert_frame_equal(-self.frame, -1 * self.frame)\n\n def test_invert(self):\n assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))\n\n def test_first_last_valid(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n mat[-5:] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n index = frame.first_valid_index()\n\n self.assertEqual(index, frame.index[5])\n\n index = frame.last_valid_index()\n self.assertEqual(index, frame.index[-6])\n\n def test_arith_flex_frame(self):\n ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']\n if not compat.PY3:\n aliases = {}\n else:\n aliases = {'div': 'truediv'}\n\n for op in ops:\n try:\n alias = aliases.get(op, op)\n f = getattr(operator, alias)\n result = getattr(self.frame, op)(2 * self.frame)\n exp = f(self.frame, 2 * self.frame)\n assert_frame_equal(result, exp)\n\n # vs mix float\n result = getattr(self.mixed_float, op)(2 * self.mixed_float)\n exp = f(self.mixed_float, 2 * self.mixed_float)\n assert_frame_equal(result, exp)\n _check_mixed_float(result, dtype = dict(C = None))\n\n # vs mix int\n if op in ['add','sub','mul']:\n result = getattr(self.mixed_int, op)(2 + self.mixed_int)\n exp = f(self.mixed_int, 2 + self.mixed_int)\n\n # overflow in the uint\n dtype = None\n if op in ['sub']:\n dtype = dict(B = 'object', C = None)\n elif op in ['add','mul']:\n dtype = dict(C = None)\n assert_frame_equal(result, exp)\n _check_mixed_int(result, dtype = dtype)\n\n # rops\n r_f = lambda x, y: f(y, x)\n result = getattr(self.frame, 'r' + op)(2 * self.frame)\n exp = r_f(self.frame, 2 * self.frame)\n assert_frame_equal(result, exp)\n\n # vs mix float\n result = getattr(self.mixed_float, op)(2 * self.mixed_float)\n exp = f(self.mixed_float, 2 * self.mixed_float)\n assert_frame_equal(result, exp)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = getattr(self.intframe, op)(2 * self.intframe)\n exp = f(self.intframe, 2 * self.intframe)\n assert_frame_equal(result, exp)\n\n # vs mix int\n if op in ['add','sub','mul']:\n result = getattr(self.mixed_int, op)(2 + self.mixed_int)\n exp = f(self.mixed_int, 2 + self.mixed_int)\n\n # overflow in the uint\n dtype = None\n if op in ['sub']:\n dtype = dict(B = 'object', C = None)\n elif op in ['add','mul']:\n dtype = dict(C = None)\n assert_frame_equal(result, exp)\n _check_mixed_int(result, dtype = dtype)\n except:\n com.pprint_thing(\"Failing operation %r\" % op)\n raise\n\n # ndim >= 3\n ndim_5 = np.ones(self.frame.shape + (3, 4, 5))\n with assertRaisesRegexp(ValueError, 'shape'):\n f(self.frame, ndim_5)\n\n with assertRaisesRegexp(ValueError, 'shape'):\n getattr(self.frame, op)(ndim_5)\n\n\n # res_add = self.frame.add(self.frame)\n # res_sub = self.frame.sub(self.frame)\n # res_mul = self.frame.mul(self.frame)\n # res_div = self.frame.div(2 * self.frame)\n\n # assert_frame_equal(res_add, self.frame + self.frame)\n # assert_frame_equal(res_sub, self.frame - self.frame)\n # assert_frame_equal(res_mul, self.frame * self.frame)\n # assert_frame_equal(res_div, self.frame / (2 * self.frame))\n\n const_add = self.frame.add(1)\n assert_frame_equal(const_add, self.frame + 1)\n\n # corner cases\n result = self.frame.add(self.frame[:0])\n assert_frame_equal(result, self.frame * np.nan)\n\n result = self.frame[:0].add(self.frame)\n assert_frame_equal(result, self.frame * np.nan)\n with assertRaisesRegexp(NotImplementedError, 'fill_value'):\n self.frame.add(self.frame.irow(0), fill_value=3)\n with assertRaisesRegexp(NotImplementedError, 'fill_value'):\n self.frame.add(self.frame.irow(0), axis='index', fill_value=3)\n\n def test_binary_ops_align(self):\n\n # test aligning binary ops\n\n # GH 6681\n index=MultiIndex.from_product([list('abc'),\n ['one','two','three'],\n [1,2,3]],\n names=['first','second','third'])\n\n df = DataFrame(np.arange(27*3).reshape(27,3),\n index=index,\n columns=['value1','value2','value3']).sortlevel()\n\n idx = pd.IndexSlice\n for op in ['add','sub','mul','div','truediv']:\n opa = getattr(operator,op,None)\n if opa is None:\n continue\n\n x = Series([ 1.0, 10.0, 100.0], [1,2,3])\n result = getattr(df,op)(x,level='third',axis=0)\n\n expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel()\n assert_frame_equal(result, expected)\n\n x = Series([ 1.0, 10.0], ['two','three'])\n result = getattr(df,op)(x,level='second',axis=0)\n\n expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel()\n assert_frame_equal(result, expected)\n\n ## GH9463 (alignment level of dataframe with series)\n\n midx = MultiIndex.from_product([['A', 'B'],['a', 'b']])\n df = DataFrame(np.ones((2,4), dtype='int64'), columns=midx)\n s = pd.Series({'a':1, 'b':2})\n\n df2 = df.copy()\n df2.columns.names = ['lvl0', 'lvl1']\n s2 = s.copy()\n s2.index.name = 'lvl1'\n\n # different cases of integer/string level names:\n res1 = df.mul(s, axis=1, level=1)\n res2 = df.mul(s2, axis=1, level=1)\n res3 = df2.mul(s, axis=1, level=1)\n res4 = df2.mul(s2, axis=1, level=1)\n res5 = df2.mul(s, axis=1, level='lvl1')\n res6 = df2.mul(s2, axis=1, level='lvl1')\n\n exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),\n columns=midx)\n\n for res in [res1, res2]:\n assert_frame_equal(res, exp)\n\n exp.columns.names = ['lvl0', 'lvl1']\n for res in [res3, res4, res5, res6]:\n assert_frame_equal(res, exp)\n\n def test_arith_mixed(self):\n\n left = DataFrame({'A': ['a', 'b', 'c'],\n 'B': [1, 2, 3]})\n\n result = left + left\n expected = DataFrame({'A': ['aa', 'bb', 'cc'],\n 'B': [2, 4, 6]})\n assert_frame_equal(result, expected)\n\n def test_arith_getitem_commute(self):\n df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})\n\n self._test_op(df, operator.add)\n self._test_op(df, operator.sub)\n self._test_op(df, operator.mul)\n self._test_op(df, operator.truediv)\n self._test_op(df, operator.floordiv)\n self._test_op(df, operator.pow)\n\n self._test_op(df, lambda x, y: y + x)\n self._test_op(df, lambda x, y: y - x)\n self._test_op(df, lambda x, y: y * x)\n self._test_op(df, lambda x, y: y / x)\n self._test_op(df, lambda x, y: y ** x)\n\n self._test_op(df, lambda x, y: x + y)\n self._test_op(df, lambda x, y: x - y)\n self._test_op(df, lambda x, y: x * y)\n self._test_op(df, lambda x, y: x / y)\n self._test_op(df, lambda x, y: x ** y)\n\n @staticmethod\n def _test_op(df, op):\n result = op(df, 1)\n\n if not df.columns.is_unique:\n raise ValueError(\"Only unique columns supported by this test\")\n\n for col in result.columns:\n assert_series_equal(result[col], op(df[col], 1))\n\n def test_bool_flex_frame(self):\n data = np.random.randn(5, 3)\n other_data = np.random.randn(5, 3)\n df = DataFrame(data)\n other = DataFrame(other_data)\n ndim_5 = np.ones(df.shape + (1, 3))\n\n # Unaligned\n def _check_unaligned_frame(meth, op, df, other):\n part_o = other.ix[3:, 1:].copy()\n rs = meth(part_o)\n xp = op(df, part_o.reindex(index=df.index, columns=df.columns))\n assert_frame_equal(rs, xp)\n\n # DataFrame\n self.assertTrue(df.eq(df).values.all())\n self.assertFalse(df.ne(df).values.any())\n for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:\n f = getattr(df, op)\n o = getattr(operator, op)\n # No NAs\n assert_frame_equal(f(other), o(df, other))\n _check_unaligned_frame(f, o, df, other)\n # ndarray\n assert_frame_equal(f(other.values), o(df, other.values))\n # scalar\n assert_frame_equal(f(0), o(df, 0))\n # NAs\n assert_frame_equal(f(np.nan), o(df, np.nan))\n with assertRaisesRegexp(ValueError, 'shape'):\n f(ndim_5)\n\n # Series\n def _test_seq(df, idx_ser, col_ser):\n idx_eq = df.eq(idx_ser, axis=0)\n col_eq = df.eq(col_ser)\n idx_ne = df.ne(idx_ser, axis=0)\n col_ne = df.ne(col_ser)\n assert_frame_equal(col_eq, df == Series(col_ser))\n assert_frame_equal(col_eq, -col_ne)\n assert_frame_equal(idx_eq, -idx_ne)\n assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)\n assert_frame_equal(col_eq, df.eq(list(col_ser)))\n assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))\n assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))\n\n idx_gt = df.gt(idx_ser, axis=0)\n col_gt = df.gt(col_ser)\n idx_le = df.le(idx_ser, axis=0)\n col_le = df.le(col_ser)\n\n assert_frame_equal(col_gt, df > Series(col_ser))\n assert_frame_equal(col_gt, -col_le)\n assert_frame_equal(idx_gt, -idx_le)\n assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)\n\n idx_ge = df.ge(idx_ser, axis=0)\n col_ge = df.ge(col_ser)\n idx_lt = df.lt(idx_ser, axis=0)\n col_lt = df.lt(col_ser)\n assert_frame_equal(col_ge, df >= Series(col_ser))\n assert_frame_equal(col_ge, -col_lt)\n assert_frame_equal(idx_ge, -idx_lt)\n assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)\n\n idx_ser = Series(np.random.randn(5))\n col_ser = Series(np.random.randn(3))\n _test_seq(df, idx_ser, col_ser)\n\n\n # list/tuple\n _test_seq(df, idx_ser.values, col_ser.values)\n\n # NA\n df.ix[0, 0] = np.nan\n rs = df.eq(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.ne(df)\n self.assertTrue(rs.ix[0, 0])\n rs = df.gt(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.lt(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.ge(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.le(df)\n self.assertFalse(rs.ix[0, 0])\n\n\n\n # complex\n arr = np.array([np.nan, 1, 6, np.nan])\n arr2 = np.array([2j, np.nan, 7, None])\n df = DataFrame({'a': arr})\n df2 = DataFrame({'a': arr2})\n rs = df.gt(df2)\n self.assertFalse(rs.values.any())\n rs = df.ne(df2)\n self.assertTrue(rs.values.all())\n\n arr3 = np.array([2j, np.nan, None])\n df3 = DataFrame({'a': arr3})\n rs = df3.gt(2j)\n self.assertFalse(rs.values.any())\n\n # corner, dtype=object\n df1 = DataFrame({'col': ['foo', np.nan, 'bar']})\n df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})\n result = df1.ne(df2)\n exp = DataFrame({'col': [False, True, False]})\n assert_frame_equal(result, exp)\n\n def test_arith_flex_series(self):\n df = self.simple\n\n row = df.xs('a')\n col = df['two']\n # after arithmetic refactor, add truediv here\n ops = ['add', 'sub', 'mul', 'mod']\n for op in ops:\n f = getattr(df, op)\n op = getattr(operator, op)\n assert_frame_equal(f(row), op(df, row))\n assert_frame_equal(f(col, axis=0), op(df.T, col).T)\n\n # special case for some reason\n assert_frame_equal(df.add(row, axis=None), df + row)\n\n # cases which will be refactored after big arithmetic refactor\n assert_frame_equal(df.div(row), df / row)\n assert_frame_equal(df.div(col, axis=0), (df.T / col).T)\n\n # broadcasting issue in GH7325\n df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64')\n expected = DataFrame([[nan, inf], [1.0, 1.5], [1.0, 1.25]])\n result = df.div(df[0],axis='index')\n assert_frame_equal(result,expected)\n\n df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64')\n expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]])\n result = df.div(df[0],axis='index')\n assert_frame_equal(result,expected)\n\n def test_arith_non_pandas_object(self):\n df = self.simple\n\n val1 = df.xs('a').values\n added = DataFrame(df.values + val1, index=df.index, columns=df.columns)\n assert_frame_equal(df + val1, added)\n\n added = DataFrame((df.values.T + val1).T,\n index=df.index, columns=df.columns)\n assert_frame_equal(df.add(val1, axis=0), added)\n\n val2 = list(df['two'])\n\n added = DataFrame(df.values + val2, index=df.index, columns=df.columns)\n assert_frame_equal(df + val2, added)\n\n added = DataFrame((df.values.T + val2).T, index=df.index,\n columns=df.columns)\n assert_frame_equal(df.add(val2, axis='index'), added)\n\n val3 = np.random.rand(*df.shape)\n added = DataFrame(df.values + val3, index=df.index, columns=df.columns)\n assert_frame_equal(df.add(val3), added)\n\n def test_combineFrame(self):\n frame_copy = self.frame.reindex(self.frame.index[::2])\n\n del frame_copy['D']\n frame_copy['C'][:5] = nan\n\n added = self.frame + frame_copy\n tm.assert_dict_equal(added['A'].valid(),\n self.frame['A'] * 2,\n compare_keys=False)\n\n self.assertTrue(np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())\n\n # assert(False)\n\n self.assertTrue(np.isnan(added['D']).all())\n\n self_added = self.frame + self.frame\n self.assertTrue(self_added.index.equals(self.frame.index))\n\n added_rev = frame_copy + self.frame\n self.assertTrue(np.isnan(added['D']).all())\n\n # corner cases\n\n # empty\n plus_empty = self.frame + self.empty\n self.assertTrue(np.isnan(plus_empty.values).all())\n\n empty_plus = self.empty + self.frame\n self.assertTrue(np.isnan(empty_plus.values).all())\n\n empty_empty = self.empty + self.empty\n self.assertTrue(empty_empty.empty)\n\n # out of order\n reverse = self.frame.reindex(columns=self.frame.columns[::-1])\n\n assert_frame_equal(reverse + self.frame, self.frame * 2)\n\n # mix vs float64, upcast\n added = self.frame + self.mixed_float\n _check_mixed_float(added, dtype = 'float64')\n added = self.mixed_float + self.frame\n _check_mixed_float(added, dtype = 'float64')\n\n # mix vs mix\n added = self.mixed_float + self.mixed_float2\n _check_mixed_float(added, dtype = dict(C = None))\n added = self.mixed_float2 + self.mixed_float\n _check_mixed_float(added, dtype = dict(C = None))\n\n # with int\n added = self.frame + self.mixed_int\n _check_mixed_float(added, dtype = 'float64')\n\n def test_combineSeries(self):\n\n # Series\n series = self.frame.xs(self.frame.index[0])\n\n added = self.frame + series\n\n for key, s in compat.iteritems(added):\n assert_series_equal(s, self.frame[key] + series[key])\n\n larger_series = series.to_dict()\n larger_series['E'] = 1\n larger_series = Series(larger_series)\n larger_added = self.frame + larger_series\n\n for key, s in compat.iteritems(self.frame):\n assert_series_equal(larger_added[key], s + series[key])\n self.assertIn('E', larger_added)\n self.assertTrue(np.isnan(larger_added['E']).all())\n\n # vs mix (upcast) as needed\n added = self.mixed_float + series\n _check_mixed_float(added, dtype = 'float64')\n added = self.mixed_float + series.astype('float32')\n _check_mixed_float(added, dtype = dict(C = None))\n added = self.mixed_float + series.astype('float16')\n _check_mixed_float(added, dtype = dict(C = None))\n\n #### these raise with numexpr.....as we are adding an int64 to an uint64....weird\n # vs int\n #added = self.mixed_int + (100*series).astype('int64')\n #_check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C = 'int64', D = 'int64'))\n #added = self.mixed_int + (100*series).astype('int32')\n #_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64'))\n\n # TimeSeries\n buf = StringIO()\n tmp = sys.stderr\n sys.stderr = buf\n\n try:\n ts = self.tsframe['A']\n added = self.tsframe + ts\n\n for key, col in compat.iteritems(self.tsframe):\n assert_series_equal(added[key], col + ts)\n\n smaller_frame = self.tsframe[:-5]\n smaller_added = smaller_frame + ts\n\n self.assertTrue(smaller_added.index.equals(self.tsframe.index))\n\n smaller_ts = ts[:-5]\n smaller_added2 = self.tsframe + smaller_ts\n assert_frame_equal(smaller_added, smaller_added2)\n\n # length 0\n result = self.tsframe + ts[:0]\n\n # Frame is length 0\n result = self.tsframe[:0] + ts\n self.assertEqual(len(result), 0)\n\n # empty but with non-empty index\n frame = self.tsframe[:1].reindex(columns=[])\n result = frame * ts\n self.assertEqual(len(result), len(ts))\n finally:\n sys.stderr = tmp\n\n def test_combineFunc(self):\n result = self.frame * 2\n self.assert_numpy_array_equal(result.values, self.frame.values * 2)\n\n # vs mix\n result = self.mixed_float * 2\n for c, s in compat.iteritems(result):\n self.assert_numpy_array_equal(s.values, self.mixed_float[c].values * 2)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = self.empty * 2\n self.assertIs(result.index, self.empty.index)\n self.assertEqual(len(result.columns), 0)\n\n def test_comparisons(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n\n row = self.simple.xs('a')\n ndim_5 = np.ones(df1.shape + (1, 1, 1))\n\n def test_comp(func):\n result = func(df1, df2)\n self.assert_numpy_array_equal(result.values,\n func(df1.values, df2.values))\n with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):\n func(df1, ndim_5)\n\n result2 = func(self.simple, row)\n self.assert_numpy_array_equal(result2.values,\n func(self.simple.values, row.values))\n\n result3 = func(self.frame, 0)\n self.assert_numpy_array_equal(result3.values,\n func(self.frame.values, 0))\n\n\n with assertRaisesRegexp(ValueError, 'Can only compare '\n 'identically-labeled DataFrame'):\n func(self.simple, self.simple[:2])\n\n test_comp(operator.eq)\n test_comp(operator.ne)\n test_comp(operator.lt)\n test_comp(operator.gt)\n test_comp(operator.ge)\n test_comp(operator.le)\n\n def test_string_comparison(self):\n df = DataFrame([{\"a\": 1, \"b\": \"foo\"}, {\"a\": 2, \"b\": \"bar\"}])\n mask_a = df.a > 1\n assert_frame_equal(df[mask_a], df.ix[1:1, :])\n assert_frame_equal(df[-mask_a], df.ix[0:0, :])\n\n mask_b = df.b == \"foo\"\n assert_frame_equal(df[mask_b], df.ix[0:0, :])\n assert_frame_equal(df[-mask_b], df.ix[1:1, :])\n\n def test_float_none_comparison(self):\n df = DataFrame(np.random.randn(8, 3), index=lrange(8),\n columns=['A', 'B', 'C'])\n\n self.assertRaises(TypeError, df.__eq__, None)\n\n def test_boolean_comparison(self):\n\n # GH 4576\n # boolean comparisons with a tuple/list give unexpected results\n df = DataFrame(np.arange(6).reshape((3,2)))\n b = np.array([2, 2])\n b_r = np.atleast_2d([2,2])\n b_c = b_r.T\n l = (2,2,2)\n tup = tuple(l)\n\n # gt\n expected = DataFrame([[False,False],[False,True],[True,True]])\n result = df>b\n assert_frame_equal(result,expected)\n\n result = df.values>b\n assert_array_equal(result,expected.values)\n\n result = df>l\n assert_frame_equal(result,expected)\n\n result = df>tup\n assert_frame_equal(result,expected)\n\n result = df>b_r\n assert_frame_equal(result,expected)\n\n result = df.values>b_r\n assert_array_equal(result,expected.values)\n\n self.assertRaises(ValueError, df.__gt__, b_c)\n self.assertRaises(ValueError, df.values.__gt__, b_c)\n\n # ==\n expected = DataFrame([[False,False],[True,False],[False,False]])\n result = df == b\n assert_frame_equal(result,expected)\n\n result = df==l\n assert_frame_equal(result,expected)\n\n result = df==tup\n assert_frame_equal(result,expected)\n\n result = df == b_r\n assert_frame_equal(result,expected)\n\n result = df.values == b_r\n assert_array_equal(result,expected.values)\n\n self.assertRaises(ValueError, lambda : df == b_c)\n self.assertFalse((df.values == b_c))\n\n # with alignment\n df = DataFrame(np.arange(6).reshape((3,2)),columns=list('AB'),index=list('abc'))\n expected.index=df.index\n expected.columns=df.columns\n\n result = df==l\n assert_frame_equal(result,expected)\n\n result = df==tup\n assert_frame_equal(result,expected)\n\n # not shape compatible\n self.assertRaises(ValueError, lambda : df == (2,2))\n self.assertRaises(ValueError, lambda : df == [2,2])\n\n def test_to_csv_deprecated_options(self):\n\n pname = '__tmp_to_csv_deprecated_options__'\n with ensure_clean(pname) as path:\n\n self.tsframe[1:3] = np.nan\n self.tsframe.to_csv(path, nanRep='foo')\n recons = read_csv(path,index_col=0,parse_dates=[0],na_values=['foo'])\n assert_frame_equal(self.tsframe, recons)\n\n with tm.assert_produces_warning(FutureWarning):\n self.frame.to_csv(path, cols=['A', 'B'])\n\n with tm.assert_produces_warning(False):\n self.frame.to_csv(path, columns=['A', 'B'])\n\n\n def test_to_csv_from_csv(self):\n\n pname = '__tmp_to_csv_from_csv__'\n with ensure_clean(pname) as path:\n\n self.frame['A'][:5] = nan\n\n self.frame.to_csv(path)\n self.frame.to_csv(path, columns=['A', 'B'])\n self.frame.to_csv(path, header=False)\n self.frame.to_csv(path, index=False)\n\n # test roundtrip\n self.tsframe.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.tsframe, recons)\n\n self.tsframe.to_csv(path, index_label='index')\n recons = DataFrame.from_csv(path, index_col=None)\n assert(len(recons.columns) == len(self.tsframe.columns) + 1)\n\n # no index\n self.tsframe.to_csv(path, index=False)\n recons = DataFrame.from_csv(path, index_col=None)\n assert_almost_equal(self.tsframe.values, recons.values)\n\n # corner case\n dm = DataFrame({'s1': Series(lrange(3), lrange(3)),\n 's2': Series(lrange(2), lrange(2))})\n dm.to_csv(path)\n recons = DataFrame.from_csv(path)\n assert_frame_equal(dm, recons)\n\n with ensure_clean(pname) as path:\n\n # duplicate index\n df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],\n columns=['x', 'y', 'z'])\n df.to_csv(path)\n result = DataFrame.from_csv(path)\n assert_frame_equal(result, df)\n\n midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])\n df = DataFrame(np.random.randn(3, 3), index=midx,\n columns=['x', 'y', 'z'])\n df.to_csv(path)\n result = DataFrame.from_csv(path, index_col=[0, 1, 2],\n parse_dates=False)\n assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?\n\n # column aliases\n col_aliases = Index(['AA', 'X', 'Y', 'Z'])\n self.frame2.to_csv(path, header=col_aliases)\n rs = DataFrame.from_csv(path)\n xp = self.frame2.copy()\n xp.columns = col_aliases\n\n assert_frame_equal(xp, rs)\n\n self.assertRaises(ValueError, self.frame2.to_csv, path,\n header=['AA', 'X'])\n\n with ensure_clean(pname) as path:\n import pandas as pd\n df1 = DataFrame(np.random.randn(3, 1))\n df2 = DataFrame(np.random.randn(3, 1))\n\n df1.to_csv(path)\n df2.to_csv(path,mode='a',header=False)\n xp = pd.concat([df1,df2])\n rs = pd.read_csv(path,index_col=0)\n rs.columns = lmap(int,rs.columns)\n xp.columns = lmap(int,xp.columns)\n assert_frame_equal(xp,rs)\n\n def test_to_csv_cols_reordering(self):\n # GH3454\n import pandas as pd\n\n def _check_df(df,cols=None):\n with ensure_clean() as path:\n df.to_csv(path,columns = cols,engine='python')\n rs_p = pd.read_csv(path,index_col=0)\n df.to_csv(path,columns = cols,chunksize=chunksize)\n rs_c = pd.read_csv(path,index_col=0)\n\n if cols:\n df = df[cols]\n assert (rs_c.columns==rs_p.columns).all()\n assert_frame_equal(df,rs_c,check_names=False)\n\n chunksize=5\n N = int(chunksize*2.5)\n\n df= mkdf(N, 3)\n cs = df.columns\n cols = [cs[2],cs[0]]\n _check_df(df,cols)\n\n def test_to_csv_legacy_raises_on_dupe_cols(self):\n df= mkdf(10, 3)\n df.columns = ['a','a','b']\n with ensure_clean() as path:\n self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')\n\n def test_to_csv_new_dupe_cols(self):\n import pandas as pd\n def _check_df(df,cols=None):\n with ensure_clean() as path:\n df.to_csv(path,columns = cols,chunksize=chunksize)\n rs_c = pd.read_csv(path,index_col=0)\n\n # we wrote them in a different order\n # so compare them in that order\n if cols is not None:\n\n if df.columns.is_unique:\n rs_c.columns = cols\n else:\n indexer, missing = df.columns.get_indexer_non_unique(cols)\n rs_c.columns = df.columns.take(indexer)\n\n for c in cols:\n obj_df = df[c]\n obj_rs = rs_c[c]\n if isinstance(obj_df,Series):\n assert_series_equal(obj_df,obj_rs)\n else:\n assert_frame_equal(obj_df,obj_rs,check_names=False)\n\n # wrote in the same order\n else:\n rs_c.columns = df.columns\n assert_frame_equal(df,rs_c,check_names=False)\n\n chunksize=5\n N = int(chunksize*2.5)\n\n # dupe cols\n df= mkdf(N, 3)\n df.columns = ['a','a','b']\n _check_df(df,None)\n\n # dupe cols with selection\n cols = ['b','a']\n _check_df(df,cols)\n\n @slow\n def test_to_csv_moar(self):\n path = '__tmp_to_csv_moar__'\n\n def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,\n dupe_col=False):\n\n kwargs = dict(parse_dates=False)\n if cnlvl:\n if rnlvl is not None:\n kwargs['index_col'] = lrange(rnlvl)\n kwargs['header'] = lrange(cnlvl)\n with ensure_clean(path) as path:\n df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)\n recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs)\n else:\n kwargs['header'] = 0\n with ensure_clean(path) as path:\n df.to_csv(path,encoding='utf8',chunksize=chunksize)\n recons = DataFrame.from_csv(path,**kwargs)\n\n def _to_uni(x):\n if not isinstance(x, compat.text_type):\n return x.decode('utf8')\n return x\n if dupe_col:\n # read_Csv disambiguates the columns by\n # labeling them dupe.1,dupe.2, etc'. monkey patch columns\n recons.columns = df.columns\n if rnlvl and not cnlvl:\n delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)]\n ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)\n recons.index = ix\n recons = recons.iloc[:,rnlvl-1:]\n\n type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')\n if r_dtype:\n if r_dtype == 'u': # unicode\n r_dtype='O'\n recons.index = np.array(lmap(_to_uni,recons.index),\n dtype=r_dtype)\n df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype)\n elif r_dtype == 'dt': # unicode\n r_dtype='O'\n recons.index = np.array(lmap(Timestamp,recons.index),\n dtype=r_dtype)\n df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype)\n elif r_dtype == 'p':\n r_dtype='O'\n recons.index = np.array(list(map(Timestamp,\n recons.index.to_datetime())),\n dtype=r_dtype)\n df.index = np.array(list(map(Timestamp,\n df.index.to_datetime())),\n dtype=r_dtype)\n else:\n r_dtype= type_map.get(r_dtype)\n recons.index = np.array(recons.index,dtype=r_dtype )\n df.index = np.array(df.index,dtype=r_dtype )\n if c_dtype:\n if c_dtype == 'u':\n c_dtype='O'\n recons.columns = np.array(lmap(_to_uni,recons.columns),\n dtype=c_dtype)\n df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype )\n elif c_dtype == 'dt':\n c_dtype='O'\n recons.columns = np.array(lmap(Timestamp,recons.columns),\n dtype=c_dtype )\n df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype)\n elif c_dtype == 'p':\n c_dtype='O'\n recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()),\n dtype=c_dtype)\n df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype )\n else:\n c_dtype= type_map.get(c_dtype)\n recons.columns = np.array(recons.columns,dtype=c_dtype )\n df.columns = np.array(df.columns,dtype=c_dtype )\n\n assert_frame_equal(df,recons,check_names=False,check_less_precise=True)\n\n N = 100\n chunksize=1000\n\n # GH3437\n from pandas import NaT\n def make_dtnat_arr(n,nnat=None):\n if nnat is None:\n nnat= int(n*0.1) # 10%\n s=list(date_range('2000',freq='5min',periods=n))\n if nnat:\n for i in np.random.randint(0,len(s),nnat):\n s[i] = NaT\n i = np.random.randint(100)\n s[-i] = NaT\n s[i] = NaT\n return s\n\n # N=35000\n s1=make_dtnat_arr(chunksize+5)\n s2=make_dtnat_arr(chunksize+5,0)\n path = '1.csv'\n\n # s3=make_dtnjat_arr(chunksize+5,0)\n with ensure_clean('.csv') as pth:\n df=DataFrame(dict(a=s1,b=s2))\n df.to_csv(pth,chunksize=chunksize)\n recons = DataFrame.from_csv(pth).convert_objects('coerce')\n assert_frame_equal(df, recons,check_names=False,check_less_precise=True)\n\n for ncols in [4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type='dt',\n c_idx_type='s'),path, 'dt','s')\n\n\n for ncols in [4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type='dt',\n c_idx_type='s'),path, 'dt','s')\n pass\n\n for r_idx_type,c_idx_type in [('i','i'),('s','s'),('u','dt'),('p','p')]:\n for ncols in [1,2,3,4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type=r_idx_type,\n c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)\n\n for ncols in [1,2,3,4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols),path)\n\n for nrows in [10,N-2,N-1,N,N+1,N+2]:\n df = mkdf(nrows, 3)\n cols = list(df.columns)\n cols[:2] = [\"dupe\",\"dupe\"]\n cols[-2:] = [\"dupe\",\"dupe\"]\n ix = list(df.index)\n ix[:2] = [\"rdupe\",\"rdupe\"]\n ix[-2:] = [\"rdupe\",\"rdupe\"]\n df.index=ix\n df.columns=cols\n _do_test(df,path,dupe_col=True)\n\n\n _do_test(DataFrame(index=lrange(10)),path)\n _do_test(mkdf(chunksize//2+1, 2,r_idx_nlevels=2),path,rnlvl=2)\n for ncols in [2,3,4]:\n base = int(chunksize//ncols)\n for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)\n _do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)\n _do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),\n path,rnlvl=2,cnlvl=2)\n\n def test_to_csv_from_csv_w_some_infs(self):\n\n # test roundtrip with inf, -inf, nan, as full columns and mix\n self.frame['G'] = np.nan\n f = lambda x: [np.inf, np.nan][np.random.rand() < .5]\n self.frame['H'] = self.frame.index.map(f)\n\n with ensure_clean() as path:\n self.frame.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name\n assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)\n\n def test_to_csv_from_csv_w_all_infs(self):\n\n # test roundtrip with inf, -inf, nan, as full columns and mix\n self.frame['E'] = np.inf\n self.frame['F'] = -np.inf\n\n with ensure_clean() as path:\n self.frame.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name\n assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)\n\n def test_to_csv_no_index(self):\n # GH 3624, after appending columns, to_csv fails\n pname = '__tmp_to_csv_no_index__'\n with ensure_clean(pname) as path:\n df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})\n df.to_csv(path, index=False)\n result = read_csv(path)\n assert_frame_equal(df,result)\n df['c3'] = Series([7,8,9],dtype='int64')\n df.to_csv(path, index=False)\n result = read_csv(path)\n assert_frame_equal(df,result)\n\n def test_to_csv_headers(self):\n # GH6186, the presence or absence of `index` incorrectly\n # causes to_csv to have different header semantics.\n pname = '__tmp_to_csv_headers__'\n from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])\n to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])\n with ensure_clean(pname) as path:\n from_df.to_csv(path, header=['X', 'Y'])\n recons = DataFrame.from_csv(path)\n assert_frame_equal(to_df, recons)\n\n from_df.to_csv(path, index=False, header=['X', 'Y'])\n recons = DataFrame.from_csv(path)\n recons.reset_index(inplace=True)\n assert_frame_equal(to_df, recons)\n\n def test_to_csv_multiindex(self):\n\n pname = '__tmp_to_csv_multiindex__'\n frame = self.frame\n old_index = frame.index\n arrays = np.arange(len(old_index) * 2).reshape(2, -1)\n new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])\n frame.index = new_index\n\n with ensure_clean(pname) as path:\n\n frame.to_csv(path, header=False)\n frame.to_csv(path, columns=['A', 'B'])\n\n # round trip\n frame.to_csv(path)\n df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)\n\n assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name\n self.assertEqual(frame.index.names, df.index.names)\n self.frame.index = old_index # needed if setUP becomes a classmethod\n\n # try multiindex with dates\n tsframe = self.tsframe\n old_index = tsframe.index\n new_index = [old_index, np.arange(len(old_index))]\n tsframe.index = MultiIndex.from_arrays(new_index)\n\n tsframe.to_csv(path, index_label=['time', 'foo'])\n recons = DataFrame.from_csv(path, index_col=[0, 1])\n assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name\n\n # do not load index\n tsframe.to_csv(path)\n recons = DataFrame.from_csv(path, index_col=None)\n np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)\n\n # no index\n tsframe.to_csv(path, index=False)\n recons = DataFrame.from_csv(path, index_col=None)\n assert_almost_equal(recons.values, self.tsframe.values)\n self.tsframe.index = old_index # needed if setUP becomes classmethod\n\n with ensure_clean(pname) as path:\n # GH3571, GH1651, GH3141\n\n def _make_frame(names=None):\n if names is True:\n names = ['first','second']\n return DataFrame(np.random.randint(0,10,size=(3,3)),\n columns=MultiIndex.from_tuples([('bah', 'foo'),\n ('bah', 'bar'),\n ('ban', 'baz')],\n names=names),\n dtype='int64')\n\n # column & index are multi-index\n df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # column is mi\n df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # dup column names?\n df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # writing with no index\n df = _make_frame()\n df.to_csv(path,tupleize_cols=False,index=False)\n result = read_csv(path,header=[0,1],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # we lose the names here\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False,index=False)\n result = read_csv(path,header=[0,1],tupleize_cols=False)\n self.assertTrue(all([ x is None for x in result.columns.names ]))\n result.columns.names = df.columns.names\n assert_frame_equal(df,result)\n\n # tupleize_cols=True and index=False\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=True,index=False)\n result = read_csv(path,header=0,tupleize_cols=True,index_col=None)\n result.columns = df.columns\n assert_frame_equal(df,result)\n\n # whatsnew example\n df = _make_frame()\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # column & index are multi-index (compatibility)\n df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=True)\n result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)\n result.columns = df.columns\n assert_frame_equal(df,result)\n\n # invalid options\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False)\n\n # catch invalid headers\n with assertRaisesRegexp(CParserError, 'Passed header=\\[0,1,2\\] are too many rows for this multi_index of columns'):\n read_csv(path,tupleize_cols=False,header=lrange(3),index_col=0)\n\n with assertRaisesRegexp(CParserError, 'Passed header=\\[0,1,2,3,4,5,6\\], len of 7, but only 6 lines in file'):\n read_csv(path,tupleize_cols=False,header=lrange(7),index_col=0)\n\n for i in [4,5,6]:\n with tm.assertRaises(CParserError):\n read_csv(path, tupleize_cols=False, header=lrange(i), index_col=0)\n\n # write with cols\n with assertRaisesRegexp(TypeError, 'cannot specify cols with a MultiIndex'):\n df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])\n\n with ensure_clean(pname) as path:\n # empty\n tsframe[:0].to_csv(path)\n recons = DataFrame.from_csv(path)\n exp = tsframe[:0]\n exp.index = []\n\n self.assertTrue(recons.columns.equals(exp.columns))\n self.assertEqual(len(recons), 0)\n\n def test_to_csv_float32_nanrep(self):\n df = DataFrame(np.random.randn(1, 4).astype(np.float32))\n df[1] = np.nan\n\n with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:\n df.to_csv(path, na_rep=999)\n\n with open(path) as f:\n lines = f.readlines()\n self.assertEqual(lines[1].split(',')[2], '999')\n\n def test_to_csv_withcommas(self):\n\n # Commas inside fields should be correctly escaped when saving as CSV.\n df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})\n\n with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:\n df.to_csv(path)\n df2 = DataFrame.from_csv(path)\n assert_frame_equal(df2, df)\n\n def test_to_csv_mixed(self):\n\n def create_cols(name):\n return [ \"%s%03d\" % (name,i) for i in range(5) ]\n\n df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))\n df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))\n df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))\n df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))\n\n # add in some nans\n df_float.ix[30:50,1:3] = np.nan\n\n #### this is a bug in read_csv right now ####\n #df_dt.ix[30:50,1:3] = np.nan\n\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)\n\n # dtype\n dtypes = dict()\n for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:\n for c in create_cols(n):\n dtypes[c] = dtype\n\n with ensure_clean() as filename:\n df.to_csv(filename)\n rs = read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))\n assert_frame_equal(rs, df)\n\n def test_to_csv_dups_cols(self):\n\n df = DataFrame(np.random.randn(1000, 30),columns=lrange(15)+lrange(15),dtype='float64')\n\n with ensure_clean() as filename:\n df.to_csv(filename) # single dtype, fine\n result = read_csv(filename,index_col=0)\n result.columns = df.columns\n assert_frame_equal(result,df)\n\n df_float = DataFrame(np.random.randn(1000, 3),dtype='float64')\n df_int = DataFrame(np.random.randn(1000, 3),dtype='int64')\n df_bool = DataFrame(True,index=df_float.index,columns=lrange(3))\n df_object = DataFrame('foo',index=df_float.index,columns=lrange(3))\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=lrange(3))\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True)\n\n cols = []\n for i in range(5):\n cols.extend([0,1,2])\n df.columns = cols\n\n from pandas import to_datetime\n with ensure_clean() as filename:\n df.to_csv(filename)\n result = read_csv(filename,index_col=0)\n\n # date cols\n for i in ['0.4','1.4','2.4']:\n result[i] = to_datetime(result[i])\n\n result.columns = df.columns\n assert_frame_equal(result,df)\n\n # GH3457\n from pandas.util.testing import makeCustomDataframe as mkdf\n\n N=10\n df= mkdf(N, 3)\n df.columns = ['a','a','b']\n\n with ensure_clean() as filename:\n df.to_csv(filename)\n\n # read_csv will rename the dups columns\n result = read_csv(filename,index_col=0)\n result = result.rename(columns={ 'a.1' : 'a' })\n assert_frame_equal(result,df)\n\n def test_to_csv_chunking(self):\n\n aa=DataFrame({'A':lrange(100000)})\n aa['B'] = aa.A + 1.0\n aa['C'] = aa.A + 2.0\n aa['D'] = aa.A + 3.0\n\n for chunksize in [10000,50000,100000]:\n with ensure_clean() as filename:\n aa.to_csv(filename,chunksize=chunksize)\n rs = read_csv(filename,index_col=0)\n assert_frame_equal(rs, aa)\n\n def test_to_csv_wide_frame_formatting(self):\n # Issue #8621\n df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)\n with ensure_clean() as filename:\n df.to_csv(filename, header=False, index=False)\n rs = read_csv(filename, header=None)\n assert_frame_equal(rs, df)\n\n def test_to_csv_bug(self):\n f1 = StringIO('a,1.0\\nb,2.0')\n df = DataFrame.from_csv(f1, header=None)\n newdf = DataFrame({'t': df[df.columns[0]]})\n\n with ensure_clean() as path:\n newdf.to_csv(path)\n\n recons = read_csv(path, index_col=0)\n assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1\n\n def test_to_csv_unicode(self):\n\n df = DataFrame({u('c/\\u03c3'): [1, 2, 3]})\n with ensure_clean() as path:\n\n df.to_csv(path, encoding='UTF-8')\n df2 = read_csv(path, index_col=0, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n df.to_csv(path, encoding='UTF-8', index=False)\n df2 = read_csv(path, index_col=None, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n def test_to_csv_unicode_index_col(self):\n buf = StringIO('')\n df = DataFrame(\n [[u(\"\\u05d0\"), \"d2\", \"d3\", \"d4\"], [\"a1\", \"a2\", \"a3\", \"a4\"]],\n columns=[u(\"\\u05d0\"),\n u(\"\\u05d1\"), u(\"\\u05d2\"), u(\"\\u05d3\")],\n index=[u(\"\\u05d0\"), u(\"\\u05d1\")])\n\n df.to_csv(buf, encoding='UTF-8')\n buf.seek(0)\n\n df2 = read_csv(buf, index_col=0, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n def test_to_csv_stringio(self):\n buf = StringIO()\n self.frame.to_csv(buf)\n buf.seek(0)\n recons = read_csv(buf, index_col=0)\n assert_frame_equal(recons, self.frame, check_names=False) # TODO to_csv drops column name\n\n def test_to_csv_float_format(self):\n\n df = DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with ensure_clean() as filename:\n\n df.to_csv(filename, float_format='%.2f')\n\n rs = read_csv(filename, index_col=0)\n xp = DataFrame([[0.12, 0.23, 0.57],\n [12.32, 123123.20, 321321.20]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n assert_frame_equal(rs, xp)\n\n def test_to_csv_quoting(self):\n df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})\n\n buf = StringIO()\n df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC)\n\n result = buf.getvalue()\n expected = ('\"A\",\"B\"\\n'\n '1,\"foo\"\\n'\n '2,\"bar\"\\n'\n '3,\"baz\"\\n')\n\n self.assertEqual(result, expected)\n\n # quoting windows line terminators, presents with encoding?\n # #3503\n text = 'a,b,c\\n1,\"test \\r\\n\",3\\n'\n df = pd.read_csv(StringIO(text))\n buf = StringIO()\n df.to_csv(buf, encoding='utf-8', index=False)\n self.assertEqual(buf.getvalue(), text)\n\n def test_to_csv_unicodewriter_quoting(self):\n df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})\n\n buf = StringIO()\n df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,\n encoding='utf-8')\n\n result = buf.getvalue()\n expected = ('\"A\",\"B\"\\n'\n '1,\"foo\"\\n'\n '2,\"bar\"\\n'\n '3,\"baz\"\\n')\n\n self.assertEqual(result, expected)\n\n def test_to_csv_quote_none(self):\n # GH4328\n df = DataFrame({'A': ['hello', '{\"hello\"}']})\n for encoding in (None, 'utf-8'):\n buf = StringIO()\n df.to_csv(buf, quoting=csv.QUOTE_NONE,\n encoding=encoding, index=False)\n result = buf.getvalue()\n expected = 'A\\nhello\\n{\"hello\"}\\n'\n self.assertEqual(result, expected)\n\n def test_to_csv_index_no_leading_comma(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['one', 'two', 'three'])\n\n buf = StringIO()\n df.to_csv(buf, index_label=False)\n expected = ('A,B\\n'\n 'one,1,4\\n'\n 'two,2,5\\n'\n 'three,3,6\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n def test_to_csv_line_terminators(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['one', 'two', 'three'])\n\n buf = StringIO()\n df.to_csv(buf, line_terminator='\\r\\n')\n expected = (',A,B\\r\\n'\n 'one,1,4\\r\\n'\n 'two,2,5\\r\\n'\n 'three,3,6\\r\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n buf = StringIO()\n df.to_csv(buf) # The default line terminator remains \\n\n expected = (',A,B\\n'\n 'one,1,4\\n'\n 'two,2,5\\n'\n 'three,3,6\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n def test_to_csv_from_csv_categorical(self):\n\n # CSV with categoricals should result in the same output as when one would add a \"normal\"\n # Series/DataFrame.\n s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))\n s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])\n res = StringIO()\n s.to_csv(res)\n exp = StringIO()\n s2.to_csv(exp)\n self.assertEqual(res.getvalue(), exp.getvalue())\n\n df = DataFrame({\"s\":s})\n df2 = DataFrame({\"s\":s2})\n res = StringIO()\n df.to_csv(res)\n exp = StringIO()\n df2.to_csv(exp)\n self.assertEqual(res.getvalue(), exp.getvalue())\n\n def test_to_csv_path_is_none(self):\n # GH 8215\n # Make sure we return string for consistency with\n # Series.to_csv()\n csv_str = self.frame.to_csv(path=None)\n self.assertIsInstance(csv_str, str)\n recons = pd.read_csv(StringIO(csv_str), index_col=0)\n assert_frame_equal(self.frame, recons)\n\n def test_info(self):\n io = StringIO()\n self.frame.info(buf=io)\n self.tsframe.info(buf=io)\n\n frame = DataFrame(np.random.randn(5, 3))\n\n import sys\n sys.stdout = StringIO()\n frame.info()\n frame.info(verbose=False)\n sys.stdout = sys.__stdout__\n\n def test_info_wide(self):\n from pandas import set_option, reset_option\n io = StringIO()\n df = DataFrame(np.random.randn(5, 101))\n df.info(buf=io)\n\n io = StringIO()\n df.info(buf=io, max_cols=101)\n rs = io.getvalue()\n self.assertTrue(len(rs.splitlines()) > 100)\n xp = rs\n\n set_option('display.max_info_columns', 101)\n io = StringIO()\n df.info(buf=io)\n self.assertEqual(rs, xp)\n reset_option('display.max_info_columns')\n\n def test_info_duplicate_columns(self):\n io = StringIO()\n\n # it works!\n frame = DataFrame(np.random.randn(1500, 4),\n columns=['a', 'a', 'b', 'b'])\n frame.info(buf=io)\n\n def test_info_shows_column_dtypes(self):\n dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',\n 'complex128', 'object', 'bool']\n data = {}\n n = 10\n for i, dtype in enumerate(dtypes):\n data[i] = np.random.randint(2, size=n).astype(dtype)\n df = DataFrame(data)\n buf = StringIO()\n df.info(buf=buf)\n res = buf.getvalue()\n for i, dtype in enumerate(dtypes):\n name = '%d %d non-null %s' % (i, n, dtype)\n assert name in res\n\n def test_info_max_cols(self):\n df = DataFrame(np.random.randn(10, 5))\n for len_, verbose in [(5, None), (5, False), (10, True)]:\n # For verbose always ^ setting ^ summarize ^ full output\n with option_context('max_info_columns', 4):\n buf = StringIO()\n df.info(buf=buf, verbose=verbose)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n for len_, verbose in [(10, None), (5, False), (10, True)]:\n\n # max_cols no exceeded\n with option_context('max_info_columns', 5):\n buf = StringIO()\n df.info(buf=buf, verbose=verbose)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n for len_, max_cols in [(10, 5), (5, 4)]:\n # setting truncates\n with option_context('max_info_columns', 4):\n buf = StringIO()\n df.info(buf=buf, max_cols=max_cols)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n # setting wouldn't truncate\n with option_context('max_info_columns', 5):\n buf = StringIO()\n df.info(buf=buf, max_cols=max_cols)\n res = buf.getvalue()\n self.assertEqual(len(res.strip().split('\\n')), len_)\n\n def test_info_memory_usage(self):\n # Ensure memory usage is displayed, when asserted, on the last line\n dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',\n 'complex128', 'object', 'bool']\n data = {}\n n = 10\n for i, dtype in enumerate(dtypes):\n data[i] = np.random.randint(2, size=n).astype(dtype)\n df = DataFrame(data)\n buf = StringIO()\n # display memory usage case\n df.info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n self.assertTrue(\"memory usage: \" in res[-1])\n # do not display memory usage cas\n df.info(buf=buf, memory_usage=False)\n res = buf.getvalue().splitlines()\n self.assertTrue(\"memory usage: \" not in res[-1])\n\n df.info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n # memory usage is a lower bound, so print it as XYZ+ MB\n self.assertTrue(re.match(r\"memory usage: [^+]+\\+\", res[-1]))\n\n df.iloc[:, :5].info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n # excluded column with object dtype, so estimate is accurate\n self.assertFalse(re.match(r\"memory usage: [^+]+\\+\", res[-1]))\n\n df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])\n df_with_object_index.info(buf=buf, memory_usage=True)\n res = buf.getvalue().splitlines()\n self.assertTrue(re.match(r\"memory usage: [^+]+\\+\", res[-1]))\n\n # Test a DataFrame with duplicate columns\n dtypes = ['int64', 'int64', 'int64', 'float64']\n data = {}\n n = 100\n for i, dtype in enumerate(dtypes):\n data[i] = np.random.randint(2, size=n).astype(dtype)\n df = DataFrame(data)\n df.columns = dtypes\n # Ensure df size is as expected\n df_size = df.memory_usage().sum()\n exp_size = len(dtypes) * n * 8 # cols * rows * bytes\n self.assertEqual(df_size, exp_size)\n # Ensure number of cols in memory_usage is the same as df\n size_df = np.size(df.columns.values) # index=False; default\n self.assertEqual(size_df, np.size(df.memory_usage()))\n\n # test for validity\n DataFrame(1,index=['a'],columns=['A']).memory_usage(index=True)\n DataFrame(1,index=['a'],columns=['A']).index.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).memory_usage(index=True)\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes\n DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes\n\n def test_dtypes(self):\n self.mixed_frame['bool'] = self.mixed_frame['A'] > 0\n result = self.mixed_frame.dtypes\n expected = Series(dict((k, v.dtype)\n for k, v in compat.iteritems(self.mixed_frame)),\n index=result.index)\n assert_series_equal(result, expected)\n\n # compat, GH 8722\n with option_context('use_inf_as_null',True):\n df = DataFrame([[1]])\n result = df.dtypes\n assert_series_equal(result,Series({0:np.dtype('int64')}))\n\n def test_convert_objects(self):\n\n oops = self.mixed_frame.T.T\n converted = oops.convert_objects()\n assert_frame_equal(converted, self.mixed_frame)\n self.assertEqual(converted['A'].dtype, np.float64)\n\n # force numeric conversion\n self.mixed_frame['H'] = '1.'\n self.mixed_frame['I'] = '1'\n\n # add in some items that will be nan\n l = len(self.mixed_frame)\n self.mixed_frame['J'] = '1.'\n self.mixed_frame['K'] = '1'\n self.mixed_frame.ix[0:5,['J','K']] = 'garbled'\n converted = self.mixed_frame.convert_objects(convert_numeric=True)\n self.assertEqual(converted['H'].dtype, 'float64')\n self.assertEqual(converted['I'].dtype, 'int64')\n self.assertEqual(converted['J'].dtype, 'float64')\n self.assertEqual(converted['K'].dtype, 'float64')\n self.assertEqual(len(converted['J'].dropna()), l-5)\n self.assertEqual(len(converted['K'].dropna()), l-5)\n\n # via astype\n converted = self.mixed_frame.copy()\n converted['H'] = converted['H'].astype('float64')\n converted['I'] = converted['I'].astype('int64')\n self.assertEqual(converted['H'].dtype, 'float64')\n self.assertEqual(converted['I'].dtype, 'int64')\n\n # via astype, but errors\n converted = self.mixed_frame.copy()\n with assertRaisesRegexp(ValueError, 'invalid literal'):\n converted['H'].astype('int32')\n\n # mixed in a single column\n df = DataFrame(dict(s = Series([1, 'na', 3 ,4])))\n result = df.convert_objects(convert_numeric=True)\n expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4])))\n assert_frame_equal(result, expected)\n\n def test_convert_objects_no_conversion(self):\n mixed1 = DataFrame(\n {'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})\n mixed2 = mixed1.convert_objects()\n assert_frame_equal(mixed1, mixed2)\n\n def test_append_series_dict(self):\n df = DataFrame(np.random.randn(5, 4),\n columns=['foo', 'bar', 'baz', 'qux'])\n\n series = df.ix[4]\n with assertRaisesRegexp(ValueError, 'Indexes have overlapping values'):\n df.append(series, verify_integrity=True)\n series.name = None\n with assertRaisesRegexp(TypeError, 'Can only append a Series if '\n 'ignore_index=True'):\n df.append(series, verify_integrity=True)\n\n result = df.append(series[::-1], ignore_index=True)\n expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,\n ignore_index=True)\n assert_frame_equal(result, expected)\n\n # dict\n result = df.append(series.to_dict(), ignore_index=True)\n assert_frame_equal(result, expected)\n\n result = df.append(series[::-1][:3], ignore_index=True)\n expected = df.append(DataFrame({0: series[::-1][:3]}).T,\n ignore_index=True)\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n # can append when name set\n row = df.ix[4]\n row.name = 5\n result = df.append(row)\n expected = df.append(df[-1:], ignore_index=True)\n assert_frame_equal(result, expected)\n\n def test_append_list_of_series_dicts(self):\n df = DataFrame(np.random.randn(5, 4),\n columns=['foo', 'bar', 'baz', 'qux'])\n\n dicts = [x.to_dict() for idx, x in df.iterrows()]\n\n result = df.append(dicts, ignore_index=True)\n expected = df.append(df, ignore_index=True)\n assert_frame_equal(result, expected)\n\n # different columns\n dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},\n {'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]\n result = df.append(dicts, ignore_index=True)\n expected = df.append(DataFrame(dicts), ignore_index=True)\n assert_frame_equal(result, expected)\n\n def test_append_empty_dataframe(self):\n\n # Empty df append empty df\n df1 = DataFrame([])\n df2 = DataFrame([])\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Non-empty df append empty df\n df1 = DataFrame(np.random.randn(5, 2))\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Empty df with columns append empty df\n df1 = DataFrame(columns=['bar', 'foo'])\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Non-Empty df with columns append empty df\n df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n def test_append_dtypes(self):\n\n # GH 5754\n # row appends of different dtypes (so need to do by-item)\n # can sometimes infer the correct type\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))\n df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })\n assert_frame_equal(result, expected)\n\n def test_asfreq(self):\n offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)\n rule_monthly = self.tsframe.asfreq('BM')\n\n assert_almost_equal(offset_monthly['A'], rule_monthly['A'])\n\n filled = rule_monthly.asfreq('B', method='pad')\n # TODO: actually check that this worked.\n\n # don't forget!\n filled_dep = rule_monthly.asfreq('B', method='pad')\n\n # test does not blow up on length-0 DataFrame\n zero_length = self.tsframe.reindex([])\n result = zero_length.asfreq('BM')\n self.assertIsNot(result, zero_length)\n\n def test_asfreq_datetimeindex(self):\n df = DataFrame({'A': [1, 2, 3]},\n index=[datetime(2011, 11, 1), datetime(2011, 11, 2),\n datetime(2011, 11, 3)])\n df = df.asfreq('B')\n tm.assert_isinstance(df.index, DatetimeIndex)\n\n ts = df['A'].asfreq('B')\n tm.assert_isinstance(ts.index, DatetimeIndex)\n\n def test_at_time_between_time_datetimeindex(self):\n index = date_range(\"2012-01-01\", \"2012-01-05\", freq='30min')\n df = DataFrame(randn(len(index), 5), index=index)\n akey = time(12, 0, 0)\n bkey = slice(time(13, 0, 0), time(14, 0, 0))\n ainds = [24, 72, 120, 168]\n binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]\n\n result = df.at_time(akey)\n expected = df.ix[akey]\n expected2 = df.ix[ainds]\n assert_frame_equal(result, expected)\n assert_frame_equal(result, expected2)\n self.assertEqual(len(result), 4)\n\n result = df.between_time(bkey.start, bkey.stop)\n expected = df.ix[bkey]\n expected2 = df.ix[binds]\n assert_frame_equal(result, expected)\n assert_frame_equal(result, expected2)\n self.assertEqual(len(result), 12)\n\n result = df.copy()\n result.ix[akey] = 0\n result = result.ix[akey]\n expected = df.ix[akey].copy()\n expected.ix[:] = 0\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.ix[akey] = 0\n result.ix[akey] = df.ix[ainds]\n assert_frame_equal(result, df)\n\n result = df.copy()\n result.ix[bkey] = 0\n result = result.ix[bkey]\n expected = df.ix[bkey].copy()\n expected.ix[:] = 0\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.ix[bkey] = 0\n result.ix[bkey] = df.ix[binds]\n assert_frame_equal(result, df)\n\n def test_as_matrix(self):\n frame = self.frame\n mat = frame.as_matrix()\n\n frameCols = frame.columns\n for i, row in enumerate(mat):\n for j, value in enumerate(row):\n col = frameCols[j]\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][i]))\n else:\n self.assertEqual(value, frame[col][i])\n\n # mixed type\n mat = self.mixed_frame.as_matrix(['foo', 'A'])\n self.assertEqual(mat[0, 0], 'bar')\n\n df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})\n mat = df.as_matrix()\n self.assertEqual(mat[0, 0], 1j)\n\n # single block corner case\n mat = self.frame.as_matrix(['A', 'B'])\n expected = self.frame.reindex(columns=['A', 'B']).values\n assert_almost_equal(mat, expected)\n\n def test_as_matrix_duplicates(self):\n df = DataFrame([[1, 2, 'a', 'b'],\n [1, 2, 'a', 'b']],\n columns=['one', 'one', 'two', 'two'])\n\n result = df.values\n expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],\n dtype=object)\n\n self.assertTrue(np.array_equal(result, expected))\n\n def test_ftypes(self):\n frame = self.mixed_float\n expected = Series(dict(A = 'float32:dense', B = 'float32:dense', C = 'float16:dense', D = 'float64:dense'))\n expected.sort()\n result = frame.ftypes\n result.sort()\n assert_series_equal(result,expected)\n\n def test_values(self):\n self.frame.values[:, 0] = 5.\n self.assertTrue((self.frame.values[:, 0] == 5).all())\n\n def test_deepcopy(self):\n cp = deepcopy(self.frame)\n series = cp['A']\n series[:] = 10\n for idx, value in compat.iteritems(series):\n self.assertNotEqual(self.frame['A'][idx], value)\n\n def test_copy(self):\n cop = self.frame.copy()\n cop['E'] = cop['A']\n self.assertNotIn('E', self.frame)\n\n # copy objects\n copy = self.mixed_frame.copy()\n self.assertIsNot(copy._data, self.mixed_frame._data)\n\n def _check_method(self, method='pearson', check_minp=False):\n if not check_minp:\n correls = self.frame.corr(method=method)\n exp = self.frame['A'].corr(self.frame['C'], method=method)\n assert_almost_equal(correls['A']['C'], exp)\n else:\n result = self.frame.corr(min_periods=len(self.frame) - 8)\n expected = self.frame.corr()\n expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan\n\n def test_corr_pearson(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('pearson')\n\n def test_corr_kendall(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('kendall')\n\n def test_corr_spearman(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('spearman')\n\n def test_corr_non_numeric(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n # exclude non-numeric types\n result = self.mixed_frame.corr()\n expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()\n assert_frame_equal(result, expected)\n\n def test_corr_nooverlap(self):\n tm._skip_if_no_scipy()\n\n # nothing in common\n for meth in ['pearson', 'kendall', 'spearman']:\n df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1.5, 1]})\n rs = df.corr(meth)\n self.assertTrue(isnull(rs.ix['A', 'B']))\n self.assertTrue(isnull(rs.ix['B', 'A']))\n self.assertEqual(rs.ix['A', 'A'], 1)\n self.assertEqual(rs.ix['B', 'B'], 1)\n\n def test_corr_constant(self):\n tm._skip_if_no_scipy()\n\n # constant --> all NA\n\n for meth in ['pearson', 'spearman']:\n df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1, 1]})\n rs = df.corr(meth)\n self.assertTrue(isnull(rs.values).all())\n\n def test_corr_int(self):\n # dtypes other than float64 #1761\n df3 = DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1, 2, 3, 4]})\n\n # it works!\n df3.cov()\n df3.corr()\n\n def test_cov(self):\n # min_periods no NAs (corner case)\n expected = self.frame.cov()\n result = self.frame.cov(min_periods=len(self.frame))\n\n assert_frame_equal(expected, result)\n\n result = self.frame.cov(min_periods=len(self.frame) + 1)\n self.assertTrue(isnull(result.values).all())\n\n # with NAs\n frame = self.frame.copy()\n frame['A'][:5] = nan\n frame['B'][5:10] = nan\n result = self.frame.cov(min_periods=len(self.frame) - 8)\n expected = self.frame.cov()\n expected.ix['A', 'B'] = np.nan\n expected.ix['B', 'A'] = np.nan\n\n # regular\n self.frame['A'][:5] = nan\n self.frame['B'][:10] = nan\n cov = self.frame.cov()\n\n assert_almost_equal(cov['A']['C'],\n self.frame['A'].cov(self.frame['C']))\n\n # exclude non-numeric types\n result = self.mixed_frame.cov()\n expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()\n assert_frame_equal(result, expected)\n\n # Single column frame\n df = DataFrame(np.linspace(0.0,1.0,10))\n result = df.cov()\n expected = DataFrame(np.cov(df.values.T).reshape((1,1)),\n index=df.columns,columns=df.columns)\n assert_frame_equal(result, expected)\n df.ix[0] = np.nan\n result = df.cov()\n expected = DataFrame(np.cov(df.values[1:].T).reshape((1,1)),\n index=df.columns,columns=df.columns)\n assert_frame_equal(result, expected)\n\n def test_corrwith(self):\n a = self.tsframe\n noise = Series(randn(len(a)), index=a.index)\n\n b = self.tsframe + noise\n\n # make sure order does not matter\n b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])\n del b['B']\n\n colcorr = a.corrwith(b, axis=0)\n assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))\n\n rowcorr = a.corrwith(b, axis=1)\n assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))\n\n dropped = a.corrwith(b, axis=0, drop=True)\n assert_almost_equal(dropped['A'], a['A'].corr(b['A']))\n self.assertNotIn('B', dropped)\n\n dropped = a.corrwith(b, axis=1, drop=True)\n self.assertNotIn(a.index[-1], dropped.index)\n\n # non time-series data\n index = ['a', 'b', 'c', 'd', 'e']\n columns = ['one', 'two', 'three', 'four']\n df1 = DataFrame(randn(5, 4), index=index, columns=columns)\n df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)\n correls = df1.corrwith(df2, axis=1)\n for row in index[:4]:\n assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))\n\n def test_corrwith_with_objects(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n cols = ['A', 'B', 'C', 'D']\n\n df1['obj'] = 'foo'\n df2['obj'] = 'bar'\n\n result = df1.corrwith(df2)\n expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])\n assert_series_equal(result, expected)\n\n result = df1.corrwith(df2, axis=1)\n expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)\n assert_series_equal(result, expected)\n\n def test_corrwith_series(self):\n result = self.tsframe.corrwith(self.tsframe['A'])\n expected = self.tsframe.apply(self.tsframe['A'].corr)\n\n assert_series_equal(result, expected)\n\n def test_corrwith_matches_corrcoef(self):\n df1 = DataFrame(np.arange(10000), columns=['a'])\n df2 = DataFrame(np.arange(10000)**2, columns=['a'])\n c1 = df1.corrwith(df2)['a']\n c2 = np.corrcoef(df1['a'],df2['a'])[0][1]\n\n assert_almost_equal(c1, c2)\n self.assertTrue(c1 < 1)\n\n def test_drop_names(self):\n df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],\n columns=['d', 'e', 'f'])\n df.index.name, df.columns.name = 'first', 'second'\n df_dropped_b = df.drop('b')\n df_dropped_e = df.drop('e', axis=1)\n df_inplace_b, df_inplace_e = df.copy(), df.copy()\n df_inplace_b.drop('b', inplace=True)\n df_inplace_e.drop('e', axis=1, inplace=True)\n for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):\n self.assertEqual(obj.index.name, 'first')\n self.assertEqual(obj.columns.name, 'second')\n self.assertEqual(list(df.columns), ['d', 'e', 'f'])\n\n def test_dropEmptyRows(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n original = Series(mat, index=self.frame.index)\n expected = original.dropna()\n inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()\n\n smaller_frame = frame.dropna(how='all')\n # check that original was preserved\n assert_series_equal(frame['foo'], original)\n inplace_frame1.dropna(how='all', inplace=True)\n assert_series_equal(smaller_frame['foo'], expected)\n assert_series_equal(inplace_frame1['foo'], expected)\n\n smaller_frame = frame.dropna(how='all', subset=['foo'])\n inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)\n assert_series_equal(smaller_frame['foo'], expected)\n assert_series_equal(inplace_frame2['foo'], expected)\n\n def test_dropIncompleteRows(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n frame['bar'] = 5\n original = Series(mat, index=self.frame.index)\n inp_frame1, inp_frame2 = frame.copy(), frame.copy()\n\n smaller_frame = frame.dropna()\n assert_series_equal(frame['foo'], original)\n inp_frame1.dropna(inplace=True)\n self.assert_numpy_array_equal(smaller_frame['foo'], mat[5:])\n self.assert_numpy_array_equal(inp_frame1['foo'], mat[5:])\n\n samesize_frame = frame.dropna(subset=['bar'])\n assert_series_equal(frame['foo'], original)\n self.assertTrue((frame['bar'] == 5).all())\n inp_frame2.dropna(subset=['bar'], inplace=True)\n self.assertTrue(samesize_frame.index.equals(self.frame.index))\n self.assertTrue(inp_frame2.index.equals(self.frame.index))\n\n def test_dropna(self):\n df = DataFrame(np.random.randn(6, 4))\n df[2][:2] = nan\n\n dropped = df.dropna(axis=1)\n expected = df.ix[:, [0, 1, 3]]\n inp = df.copy()\n inp.dropna(axis=1, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=0)\n expected = df.ix[lrange(2, 6)]\n inp = df.copy()\n inp.dropna(axis=0, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n # threshold\n dropped = df.dropna(axis=1, thresh=5)\n expected = df.ix[:, [0, 1, 3]]\n inp = df.copy()\n inp.dropna(axis=1, thresh=5, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=0, thresh=4)\n expected = df.ix[lrange(2, 6)]\n inp = df.copy()\n inp.dropna(axis=0, thresh=4, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=1, thresh=4)\n assert_frame_equal(dropped, df)\n\n dropped = df.dropna(axis=1, thresh=3)\n assert_frame_equal(dropped, df)\n\n # subset\n dropped = df.dropna(axis=0, subset=[0, 1, 3])\n inp = df.copy()\n inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)\n assert_frame_equal(dropped, df)\n assert_frame_equal(inp, df)\n\n # all\n dropped = df.dropna(axis=1, how='all')\n assert_frame_equal(dropped, df)\n\n df[2] = nan\n dropped = df.dropna(axis=1, how='all')\n expected = df.ix[:, [0, 1, 3]]\n assert_frame_equal(dropped, expected)\n\n # bad input\n self.assertRaises(ValueError, df.dropna, axis=3)\n\n\n def test_drop_and_dropna_caching(self):\n # tst that cacher updates\n original = Series([1, 2, np.nan])\n expected = Series([1, 2], dtype=original.dtype)\n df = pd.DataFrame({'A': original.values.copy()})\n df2 = df.copy()\n df['A'].dropna()\n assert_series_equal(df['A'], original)\n df['A'].dropna(inplace=True)\n assert_series_equal(df['A'], expected)\n df2['A'].drop([1])\n assert_series_equal(df2['A'], original)\n df2['A'].drop([1], inplace=True)\n assert_series_equal(df2['A'], original.drop([1]))\n\n def test_dropna_corner(self):\n # bad input\n self.assertRaises(ValueError, self.frame.dropna, how='foo')\n self.assertRaises(TypeError, self.frame.dropna, how=None)\n # non-existent column - 8303\n self.assertRaises(KeyError, self.frame.dropna, subset=['A','X'])\n\n def test_dropna_multiple_axes(self):\n df = DataFrame([[1, np.nan, 2, 3],\n [4, np.nan, 5, 6],\n [np.nan, np.nan, np.nan, np.nan],\n [7, np.nan, 8, 9]])\n cp = df.copy()\n result = df.dropna(how='all', axis=[0, 1])\n result2 = df.dropna(how='all', axis=(0, 1))\n expected = df.dropna(how='all').dropna(how='all', axis=1)\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n assert_frame_equal(df, cp)\n\n inp = df.copy()\n inp.dropna(how='all', axis=(0, 1), inplace=True)\n assert_frame_equal(inp, expected)\n\n def test_drop_duplicates(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('AAA')\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('AAA', take_last=True)\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n expected = df.ix[[0, 1, 2, 3]]\n result = df.drop_duplicates(np.array(['AAA', 'B']))\n assert_frame_equal(result, expected)\n result = df.drop_duplicates(['AAA', 'B'])\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AAA', 'B'), take_last=True)\n expected = df.ix[[0, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n # consider everything\n df2 = df.ix[:, ['AAA', 'B', 'C']]\n\n result = df2.drop_duplicates()\n # in this case only\n expected = df2.drop_duplicates(['AAA', 'B'])\n assert_frame_equal(result, expected)\n\n result = df2.drop_duplicates(take_last=True)\n expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_deprecated_warning(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n expected = df[:2]\n\n # Raises warning\n with tm.assert_produces_warning(False):\n result = df.drop_duplicates(subset='AAA')\n assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(cols='AAA')\n assert_frame_equal(result, expected)\n\n # Does not allow both subset and cols\n self.assertRaises(TypeError, df.drop_duplicates,\n kwargs={'cols': 'AAA', 'subset': 'B'})\n\n # Does not allow unknown kwargs\n self.assertRaises(TypeError, df.drop_duplicates,\n kwargs={'subset': 'AAA', 'bad_arg': True})\n\n def test_drop_duplicates_tuple(self):\n df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates(('AA', 'AB'))\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AA', 'AB'), take_last=True)\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n expected = df.ix[[0, 1, 2, 3]]\n result = df.drop_duplicates((('AA', 'AB'), 'B'))\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_NA(self):\n # none\n df = DataFrame({'A': [None, None, 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('A')\n expected = df.ix[[0, 2, 3]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('A', take_last=True)\n expected = df.ix[[1, 6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n result = df.drop_duplicates(['A', 'B'])\n expected = df.ix[[0, 2, 3, 6]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['A', 'B'], take_last=True)\n expected = df.ix[[1, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n # nan\n df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('C')\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('C', take_last=True)\n expected = df.ix[[3, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n result = df.drop_duplicates(['C', 'B'])\n expected = df.ix[[0, 1, 2, 4]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['C', 'B'], take_last=True)\n expected = df.ix[[1, 3, 6, 7]]\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_inplace(self):\n orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n df = orig.copy()\n df.drop_duplicates('A', inplace=True)\n expected = orig[:2]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates('A', take_last=True, inplace=True)\n expected = orig.ix[[6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n # multi column\n df = orig.copy()\n df.drop_duplicates(['A', 'B'], inplace=True)\n expected = orig.ix[[0, 1, 2, 3]]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)\n expected = orig.ix[[0, 5, 6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n # consider everything\n orig2 = orig.ix[:, ['A', 'B', 'C']].copy()\n\n df2 = orig2.copy()\n df2.drop_duplicates(inplace=True)\n # in this case only\n expected = orig2.drop_duplicates(['A', 'B'])\n result = df2\n assert_frame_equal(result, expected)\n\n df2 = orig2.copy()\n df2.drop_duplicates(take_last=True, inplace=True)\n expected = orig2.drop_duplicates(['A', 'B'], take_last=True)\n result = df2\n assert_frame_equal(result, expected)\n\n def test_duplicated_deprecated_warning(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # Raises warning\n with tm.assert_produces_warning(False):\n result = df.duplicated(subset='AAA')\n\n with tm.assert_produces_warning(FutureWarning):\n result = df.duplicated(cols='AAA')\n\n # Does not allow both subset and cols\n self.assertRaises(TypeError, df.duplicated,\n kwargs={'cols': 'AAA', 'subset': 'B'})\n\n # Does not allow unknown kwargs\n self.assertRaises(TypeError, df.duplicated,\n kwargs={'subset': 'AAA', 'bad_arg': True})\n\n def test_drop_col_still_multiindex(self):\n arrays = [['a', 'b', 'c', 'top'],\n ['', '', '', 'OD'],\n ['', '', '', 'wx']]\n\n tuples = sorted(zip(*arrays))\n index = MultiIndex.from_tuples(tuples)\n\n df = DataFrame(randn(3, 4), columns=index)\n del df[('a', '', '')]\n assert(isinstance(df.columns, MultiIndex))\n\n def test_drop(self):\n simple = DataFrame({\"A\": [1, 2, 3, 4], \"B\": [0, 1, 2, 3]})\n assert_frame_equal(simple.drop(\"A\", axis=1), simple[['B']])\n assert_frame_equal(simple.drop([\"A\", \"B\"], axis='columns'),\n simple[[]])\n assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])\n assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :])\n\n #non-unique - wheee!\n nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),\n columns=['a', 'a', 'b'])\n assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])\n assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])\n\n nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))\n nu_df.columns = list('abc')\n assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[[\"Y\"], :])\n assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])\n\n # inplace cache issue\n # GH 5628\n df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))\n expected = df[~(df.b>0)]\n df.drop(labels=df[df.b>0].index, inplace=True)\n assert_frame_equal(df,expected)\n\n def test_fillna(self):\n self.tsframe.ix[:5,'A'] = nan\n self.tsframe.ix[-5:,'A'] = nan\n\n zero_filled = self.tsframe.fillna(0)\n self.assertTrue((zero_filled.ix[:5,'A'] == 0).all())\n\n padded = self.tsframe.fillna(method='pad')\n self.assertTrue(np.isnan(padded.ix[:5,'A']).all())\n self.assertTrue((padded.ix[-5:,'A'] == padded.ix[-5,'A']).all())\n\n # mixed type\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n result = self.mixed_frame.fillna(value=0)\n result = self.mixed_frame.fillna(method='pad')\n\n self.assertRaises(ValueError, self.tsframe.fillna)\n self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')\n\n # mixed numeric (but no float16)\n mf = self.mixed_float.reindex(columns=['A','B','D'])\n mf.ix[-10:,'A'] = nan\n result = mf.fillna(value=0)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = mf.fillna(method='pad')\n _check_mixed_float(result, dtype = dict(C = None))\n\n # empty frame (GH #2778)\n df = DataFrame(columns=['x'])\n for m in ['pad','backfill']:\n df.x.fillna(method=m,inplace=1)\n df.x.fillna(method=m)\n\n # with different dtype (GH3386)\n df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']])\n\n result = df.fillna({ 2: 'foo' })\n expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']])\n assert_frame_equal(result, expected)\n\n df.fillna({ 2: 'foo' }, inplace=True)\n assert_frame_equal(df, expected)\n\n # limit and value\n df = DataFrame(np.random.randn(10,3))\n df.iloc[2:7,0] = np.nan\n df.iloc[3:5,2] = np.nan\n\n expected = df.copy()\n expected.iloc[2,0] = 999\n expected.iloc[3,2] = 999\n result = df.fillna(999,limit=1)\n assert_frame_equal(result, expected)\n\n # with datelike\n # GH 6344\n df = DataFrame({\n 'Date':[pd.NaT, Timestamp(\"2014-1-1\")],\n 'Date2':[ Timestamp(\"2013-1-1\"), pd.NaT]\n })\n\n expected = df.copy()\n expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2'])\n result = df.fillna(value={'Date':df['Date2']})\n assert_frame_equal(result, expected)\n\n def test_fillna_dtype_conversion(self):\n # make sure that fillna on an empty frame works\n df = DataFrame(index=[\"A\",\"B\",\"C\"], columns = [1,2,3,4,5])\n result = df.get_dtype_counts().order()\n expected = Series({ 'object' : 5 })\n assert_series_equal(result, expected)\n\n result = df.fillna(1)\n expected = DataFrame(1, index=[\"A\",\"B\",\"C\"], columns = [1,2,3,4,5])\n result = result.get_dtype_counts().order()\n expected = Series({ 'int64' : 5 })\n assert_series_equal(result, expected)\n\n # empty block\n df = DataFrame(index=lrange(3),columns=['A','B'],dtype='float64')\n result = df.fillna('nan')\n expected = DataFrame('nan',index=lrange(3),columns=['A','B'])\n assert_frame_equal(result, expected)\n\n # equiv of replace\n df = DataFrame(dict(A = [1,np.nan], B = [1.,2.]))\n for v in ['',1,np.nan,1.0]:\n expected = df.replace(np.nan,v)\n result = df.fillna(v)\n assert_frame_equal(result, expected)\n\n def test_ffill(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n assert_frame_equal(self.tsframe.ffill(),\n self.tsframe.fillna(method='ffill'))\n\n def test_bfill(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n assert_frame_equal(self.tsframe.bfill(),\n self.tsframe.fillna(method='bfill'))\n\n def test_fillna_skip_certain_blocks(self):\n # don't try to fill boolean, int blocks\n\n df = DataFrame(np.random.randn(10, 4).astype(int))\n\n # it works!\n df.fillna(np.nan)\n\n def test_fillna_inplace(self):\n df = DataFrame(np.random.randn(10, 4))\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n\n expected = df.fillna(value=0)\n self.assertIsNot(expected, df)\n\n df.fillna(value=0, inplace=True)\n assert_frame_equal(df, expected)\n\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n expected = df.fillna(method='ffill')\n self.assertIsNot(expected, df)\n\n df.fillna(method='ffill', inplace=True)\n assert_frame_equal(df, expected)\n\n def test_fillna_dict_series(self):\n df = DataFrame({'a': [nan, 1, 2, nan, nan],\n 'b': [1, 2, 3, nan, nan],\n 'c': [nan, 1, 2, 3, 4]})\n\n result = df.fillna({'a': 0, 'b': 5})\n\n expected = df.copy()\n expected['a'] = expected['a'].fillna(0)\n expected['b'] = expected['b'].fillna(5)\n assert_frame_equal(result, expected)\n\n # it works\n result = df.fillna({'a': 0, 'b': 5, 'd': 7})\n\n # Series treated same as dict\n result = df.fillna(df.max())\n expected = df.fillna(df.max().to_dict())\n assert_frame_equal(result, expected)\n\n # disable this for now\n with assertRaisesRegexp(NotImplementedError, 'column by column'):\n df.fillna(df.max(1), axis=1)\n\n def test_fillna_dataframe(self):\n # GH 8377\n df = DataFrame({'a': [nan, 1, 2, nan, nan],\n 'b': [1, 2, 3, nan, nan],\n 'c': [nan, 1, 2, 3, 4]},\n index = list('VWXYZ'))\n\n # df2 may have different index and columns\n df2 = DataFrame({'a': [nan, 10, 20, 30, 40],\n 'b': [50, 60, 70, 80, 90],\n 'foo': ['bar']*5},\n index = list('VWXuZ'))\n\n result = df.fillna(df2)\n\n # only those columns and indices which are shared get filled\n expected = DataFrame({'a': [nan, 1, 2, nan, 40],\n 'b': [1, 2, 3, nan, 90],\n 'c': [nan, 1, 2, 3, 4]},\n index = list('VWXYZ'))\n\n assert_frame_equal(result, expected)\n\n def test_fillna_columns(self):\n df = DataFrame(np.random.randn(10, 10))\n df.values[:, ::2] = np.nan\n\n result = df.fillna(method='ffill', axis=1)\n expected = df.T.fillna(method='pad').T\n assert_frame_equal(result, expected)\n\n df.insert(6, 'foo', 5)\n result = df.fillna(method='ffill', axis=1)\n expected = df.astype(float).fillna(method='ffill', axis=1)\n assert_frame_equal(result, expected)\n\n\n def test_fillna_invalid_method(self):\n with assertRaisesRegexp(ValueError, 'ffil'):\n self.frame.fillna(method='ffil')\n\n def test_fillna_invalid_value(self):\n # list\n self.assertRaises(TypeError, self.frame.fillna, [1, 2])\n # tuple\n self.assertRaises(TypeError, self.frame.fillna, (1, 2))\n # frame with series\n self.assertRaises(ValueError, self.frame.iloc[:,0].fillna, self.frame)\n\n def test_replace_inplace(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n tsframe = self.tsframe.copy()\n tsframe.replace(nan, 0, inplace=True)\n assert_frame_equal(tsframe, self.tsframe.fillna(0))\n\n self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)\n self.assertRaises(TypeError, self.tsframe.replace, nan)\n\n # mixed type\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n\n result = self.mixed_frame.replace(np.nan, 0)\n expected = self.mixed_frame.fillna(value=0)\n assert_frame_equal(result, expected)\n\n tsframe = self.tsframe.copy()\n tsframe.replace([nan], [0], inplace=True)\n assert_frame_equal(tsframe, self.tsframe.fillna(0))\n\n def test_regex_replace_scalar(self):\n obj = {'a': list('ab..'), 'b': list('efgh')}\n dfobj = DataFrame(obj)\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ### simplest cases\n ## regex -> value\n # obj frame\n res = dfobj.replace(r'\\s*\\.\\s*', nan, regex=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.replace(r'\\s*\\.\\s*', nan, regex=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1')\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfmix.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfmix.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n def test_regex_replace_scalar_inplace(self):\n obj = {'a': list('ab..'), 'b': list('efgh')}\n dfobj = DataFrame(obj)\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ### simplest cases\n ## regex -> value\n # obj frame\n res = dfobj.copy()\n res.replace(r'\\s*\\.\\s*', nan, regex=True, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(r'\\s*\\.\\s*', nan, regex=True, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True, inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True, inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.copy()\n res.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1', regex=True,\n inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1', regex=True,\n inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfobj.copy()\n res.replace(regex=r'\\s*\\.\\s*', value=nan, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(regex=r'\\s*\\.\\s*', value=nan, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1', inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1', inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.copy()\n res.replace(regex=re.compile(r'\\s*\\.\\s*'), value=nan, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(regex=re.compile(r'\\s*\\.\\s*'), value=nan, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1',\n inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1',\n inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_obj(self):\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'e|f|g']\n values = [nan, 'crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +\n ['h'], 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(e|f|g)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',\n 'f_crap',\n 'g_crap', 'h'],\n 'c': ['h', 'e_crap', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.replace(value=values, regex=to_replace_res)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_obj_inplace(self):\n ### same as above with inplace=True\n ## lists of regexes and values\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'e|f|g']\n values = [nan, 'crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +\n ['h'], 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(e|f|g)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',\n 'f_crap',\n 'g_crap', 'h'],\n 'c': ['h', 'e_crap', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.copy()\n res.replace(value=values, regex=to_replace_res, inplace=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed(self):\n ## mixed frame to make sure this doesn't break things\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'a']\n values = [nan, 'crap']\n mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}\n dfmix2 = DataFrame(mix2)\n res = dfmix2.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(a|b)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',\n '..']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.replace(regex=to_replace_res, value=values)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed_inplace(self):\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n # the same inplace\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'a']\n values = [nan, 'crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(a|b)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',\n '..']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.copy()\n res.replace(regex=to_replace_res, value=values, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_dict_mixed(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n dfmix = DataFrame(mix)\n\n ## dicts\n # single dict {re1: v1}, search the whole frame\n # need test for this...\n\n # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole\n # frame\n res = dfmix.replace({'b': r'\\s*\\.\\s*'}, {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace({'b': r'\\s*\\.\\s*'}, {'b': nan}, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the\n # whole frame\n res = dfmix.replace({'b': r'\\s*(\\.)\\s*'}, {'b': r'\\1ty'}, regex=True)\n res2 = dfmix.copy()\n res2.replace({'b': r'\\s*(\\.)\\s*'}, {'b': r'\\1ty'}, inplace=True,\n regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n res = dfmix.replace(regex={'b': r'\\s*(\\.)\\s*'}, value={'b': r'\\1ty'})\n res2 = dfmix.copy()\n res2.replace(regex={'b': r'\\s*(\\.)\\s*'}, value={'b': r'\\1ty'},\n inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n # scalar -> dict\n # to_replace regex, {value: value}\n expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':\n mix['c']})\n res = dfmix.replace('a', {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace('a', {'b': nan}, regex=True, inplace=True)\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n res = dfmix.replace('a', {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace(regex='a', value={'b': nan}, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n def test_regex_replace_dict_nested(self):\n # nested dicts will not work until this is implemented for Series\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n dfmix = DataFrame(mix)\n res = dfmix.replace({'b': {r'\\s*\\.\\s*': nan}}, regex=True)\n res2 = dfmix.copy()\n res4 = dfmix.copy()\n res2.replace({'b': {r'\\s*\\.\\s*': nan}}, inplace=True, regex=True)\n res3 = dfmix.replace(regex={'b': {r'\\s*\\.\\s*': nan}})\n res4.replace(regex={'b': {r'\\s*\\.\\s*': nan}}, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n assert_frame_equal(res4, expec)\n\n def test_regex_replace_dict_nested_gh4115(self):\n df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})\n expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})\n assert_frame_equal(df.replace({'Type': {'Q':0,'T':1}}), expected)\n\n def test_regex_replace_list_to_scalar(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace([r'\\s*\\.\\s*', 'a|b'], nan, regex=True)\n res2 = df.copy()\n res3 = df.copy()\n res2.replace([r'\\s*\\.\\s*', 'a|b'], nan, regex=True, inplace=True)\n res3.replace(regex=[r'\\s*\\.\\s*', 'a|b'], value=nan, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),\n 'c': [nan, nan, nan, 'd']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_str_to_numeric(self):\n # what happens when you try to replace a numeric value with a regex?\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace(r'\\s*\\.\\s*', 0, regex=True)\n res2 = df.copy()\n res2.replace(r'\\s*\\.\\s*', 0, inplace=True, regex=True)\n res3 = df.copy()\n res3.replace(regex=r'\\s*\\.\\s*', value=0, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_regex_list_to_numeric(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace([r'\\s*\\.\\s*', 'b'], 0, regex=True)\n res2 = df.copy()\n res2.replace([r'\\s*\\.\\s*', 'b'], 0, regex=True, inplace=True)\n res3 = df.copy()\n res3.replace(regex=[r'\\s*\\.\\s*', 'b'], value=0, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,\n nan,\n 'd']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_series_of_regexes(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n s1 = Series({'b': r'\\s*\\.\\s*'})\n s2 = Series({'b': nan})\n res = df.replace(s1, s2, regex=True)\n res2 = df.copy()\n res2.replace(s1, s2, inplace=True, regex=True)\n res3 = df.copy()\n res3.replace(regex=s1, value=s2, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_numeric_to_object_conversion(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace(0, 'a')\n expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})\n assert_frame_equal(res, expec)\n self.assertEqual(res.a.dtype, np.object_)\n\n def test_replace_regex_metachar(self):\n metachars = '[]', '()', '\\d', '\\w', '\\s'\n\n for metachar in metachars:\n df = DataFrame({'a': [metachar, 'else']})\n result = df.replace({'a': {metachar: 'paren'}})\n expected = DataFrame({'a': ['paren', 'else']})\n tm.assert_frame_equal(result, expected)\n\n def test_replace(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n zero_filled = self.tsframe.replace(nan, -1e8)\n assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))\n assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)\n\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n self.tsframe['B'][:5] = -1e8\n\n # empty\n df = DataFrame(index=['a', 'b'])\n assert_frame_equal(df, df.replace(5, 7))\n\n def test_replace_list(self):\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]\n to_replace_res = [r'.', r'e']\n values = [nan, 'crap']\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame({'a': ['a', 'b', nan, nan],\n 'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',\n 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [v1, v2, ..., vN] -> [v1, v2, .., vN]\n to_replace_res = [r'.', r'f']\n values = [r'..', r'crap']\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',\n 'h'],\n 'c': ['h', 'e', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n def test_replace_series_dict(self):\n # from GH 3064\n df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})\n result = df.replace(0, {'zero': 0.5, 'one': 1.0})\n expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})\n assert_frame_equal(result, expected)\n\n result = df.replace(0, df.mean())\n assert_frame_equal(result, expected)\n\n # series to series/dict\n df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})\n s = Series({'zero': 0.0, 'one': 2.0})\n result = df.replace(s, {'zero': 0.5, 'one': 1.0})\n expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})\n assert_frame_equal(result, expected)\n\n result = df.replace(s, df.mean())\n assert_frame_equal(result, expected)\n\n def test_replace_convert(self):\n # gh 3907\n df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])\n m = {'foo': 1, 'bar': 2, 'bah': 3}\n rep = df.replace(m)\n expec = Series([ np.int64] * 3)\n res = rep.dtypes\n assert_series_equal(expec, res)\n\n def test_replace_mixed(self):\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n\n result = self.mixed_frame.replace(np.nan, -18)\n expected = self.mixed_frame.fillna(value=-18)\n assert_frame_equal(result, expected)\n assert_frame_equal(result.replace(-18, nan), self.mixed_frame)\n\n result = self.mixed_frame.replace(np.nan, -1e8)\n expected = self.mixed_frame.fillna(value=-1e8)\n assert_frame_equal(result, expected)\n assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)\n\n # int block upcasting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })\n result = df.replace(0, 0.5)\n assert_frame_equal(result,expected)\n\n df.replace(0, 0.5, inplace=True)\n assert_frame_equal(df,expected)\n\n # int block splitting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })\n result = df.replace(0, 0.5)\n assert_frame_equal(result,expected)\n\n # to object block upcasting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })\n result = df.replace(2, 'foo')\n assert_frame_equal(result,expected)\n\n expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })\n result = df.replace([1,2], ['foo','bar'])\n assert_frame_equal(result,expected)\n\n # test case from\n from pandas.util.testing import makeCustomDataframe as mkdf\n df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })\n result = df.replace(3, df.mean().to_dict())\n expected = df.copy().astype('float64')\n m = df.mean()\n expected.iloc[0,0] = m[0]\n expected.iloc[1,1] = m[1]\n assert_frame_equal(result,expected)\n\n def test_replace_simple_nested_dict(self):\n df = DataFrame({'col': range(1, 5)})\n expected = DataFrame({'col': ['a', 2, 3, 'b']})\n\n result = df.replace({'col': {1: 'a', 4: 'b'}})\n tm.assert_frame_equal(expected, result)\n\n # in this case, should be the same as the not nested version\n result = df.replace({1: 'a', 4: 'b'})\n tm.assert_frame_equal(expected, result)\n\n def test_replace_simple_nested_dict_with_nonexistent_value(self):\n df = DataFrame({'col': range(1, 5)})\n expected = DataFrame({'col': ['a', 2, 3, 'b']})\n\n result = df.replace({-1: '-', 1: 'a', 4: 'b'})\n tm.assert_frame_equal(expected, result)\n\n result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})\n tm.assert_frame_equal(expected, result)\n\n def test_interpolate(self):\n pass\n\n def test_replace_value_is_none(self):\n self.assertRaises(TypeError, self.tsframe.replace, nan)\n orig_value = self.tsframe.iloc[0, 0]\n orig2 = self.tsframe.iloc[1, 0]\n\n self.tsframe.iloc[0, 0] = nan\n self.tsframe.iloc[1, 0] = 1\n\n result = self.tsframe.replace(to_replace={nan: 0})\n expected = self.tsframe.T.replace(to_replace={nan: 0}).T\n assert_frame_equal(result, expected)\n\n result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})\n tsframe = self.tsframe.copy()\n tsframe.iloc[0, 0] = 0\n tsframe.iloc[1, 0] = -1e8\n expected = tsframe\n assert_frame_equal(expected, result)\n self.tsframe.iloc[0, 0] = orig_value\n self.tsframe.iloc[1, 0] = orig2\n\n def test_replace_for_new_dtypes(self):\n\n # dtypes\n tsframe = self.tsframe.copy().astype(np.float32)\n tsframe['A'][:5] = nan\n tsframe['A'][-5:] = nan\n\n zero_filled = tsframe.replace(nan, -1e8)\n assert_frame_equal(zero_filled, tsframe.fillna(-1e8))\n assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)\n\n tsframe['A'][:5] = nan\n tsframe['A'][-5:] = nan\n tsframe['B'][:5] = -1e8\n\n b = tsframe['B']\n b[b == -1e8] = nan\n tsframe['B'] = b\n result = tsframe.fillna(method='bfill')\n assert_frame_equal(result, tsframe.fillna(method='bfill'))\n\n def test_replace_dtypes(self):\n # int\n df = DataFrame({'ints': [1, 2, 3]})\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]})\n assert_frame_equal(result, expected)\n\n df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)\n assert_frame_equal(result, expected)\n\n df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)\n assert_frame_equal(result, expected)\n\n # bools\n df = DataFrame({'bools': [True, False, True]})\n result = df.replace(False, True)\n self.assertTrue(result.values.all())\n\n # complex blocks\n df = DataFrame({'complex': [1j, 2j, 3j]})\n result = df.replace(1j, 0j)\n expected = DataFrame({'complex': [0j, 2j, 3j]})\n assert_frame_equal(result, expected)\n\n # datetime blocks\n prev = datetime.today()\n now = datetime.today()\n df = DataFrame({'datetime64': Index([prev, now, prev])})\n result = df.replace(prev, now)\n expected = DataFrame({'datetime64': Index([now] * 3)})\n assert_frame_equal(result, expected)\n\n def test_replace_input_formats(self):\n # both dicts\n to_rep = {'A': np.nan, 'B': 0, 'C': ''}\n values = {'A': 0, 'B': -1, 'C': 'missing'}\n df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],\n 'C': ['', 'asdf', 'fd']})\n filled = df.replace(to_rep, values)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(to_rep[k], values[k])\n assert_frame_equal(filled, DataFrame(expected))\n\n result = df.replace([0, 2, 5], [5, 2, 0])\n expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],\n 'C': ['', 'asdf', 'fd']})\n assert_frame_equal(result, expected)\n\n # dict to scalar\n filled = df.replace(to_rep, 0)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(to_rep[k], 0)\n assert_frame_equal(filled, DataFrame(expected))\n\n self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])\n\n # scalar to dict\n values = {'A': 0, 'B': -1, 'C': 'missing'}\n df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],\n 'C': ['', 'asdf', 'fd']})\n filled = df.replace(np.nan, values)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(np.nan, values[k])\n assert_frame_equal(filled, DataFrame(expected))\n\n # list to list\n to_rep = [np.nan, 0, '']\n values = [-2, -1, 'missing']\n result = df.replace(to_rep, values)\n expected = df.copy()\n for i in range(len(to_rep)):\n expected.replace(to_rep[i], values[i], inplace=True)\n assert_frame_equal(result, expected)\n\n self.assertRaises(ValueError, df.replace, to_rep, values[1:])\n\n # list to scalar\n to_rep = [np.nan, 0, '']\n result = df.replace(to_rep, -1)\n expected = df.copy()\n for i in range(len(to_rep)):\n expected.replace(to_rep[i], -1, inplace=True)\n assert_frame_equal(result, expected)\n\n def test_replace_limit(self):\n pass\n\n def test_replace_dict_no_regex(self):\n answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:\n 'Disagree', 4: 'Strongly Disagree'})\n weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':\n 5, 'Strongly Disagree': 1}\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_series_no_regex(self):\n answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:\n 'Disagree', 4: 'Strongly Disagree'})\n weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,\n 'Strongly Agree': 5, 'Strongly Disagree': 1})\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_dict_tuple_list_ordering_remains_the_same(self):\n df = DataFrame(dict(A=[nan, 1]))\n res1 = df.replace(to_replace={nan: 0, 1: -1e8})\n res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])\n res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])\n\n expected = DataFrame({'A': [0, -1e8]})\n tm.assert_frame_equal(res1, res2)\n tm.assert_frame_equal(res2, res3)\n tm.assert_frame_equal(res3, expected)\n\n def test_replace_doesnt_replace_without_regex(self):\n from pandas.compat import StringIO\n raw = \"\"\"fol T_opp T_Dir T_Enh\n 0 1 0 0 vo\n 1 2 vr 0 0\n 2 2 0 0 0\n 3 3 0 bt 0\"\"\"\n df = read_csv(StringIO(raw), sep=r'\\s+')\n res = df.replace({'\\D': 1})\n tm.assert_frame_equal(df, res)\n\n def test_replace_bool_with_string(self):\n df = DataFrame({'a': [True, False], 'b': list('ab')})\n result = df.replace(True, 'a')\n expected = DataFrame({'a': ['a', False], 'b': df.b})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_pure_bool_with_string_no_op(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace('asdf', 'fdsa')\n tm.assert_frame_equal(df, result)\n\n def test_replace_bool_with_bool(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace(False, True)\n expected = DataFrame(np.ones((2, 2), dtype=bool))\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_dict_with_bool_keys(self):\n df = DataFrame({0: [True, False], 1: [False, True]})\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n df.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_replace_truthy(self):\n df = DataFrame({'a': [True, True]})\n r = df.replace([np.inf, -np.inf], np.nan)\n e = df\n tm.assert_frame_equal(r, e)\n\n def test_replace_int_to_int_chain(self):\n df = DataFrame({'a': lrange(1, 5)})\n with tm.assertRaisesRegexp(ValueError, \"Replacement not allowed .+\"):\n df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})\n\n def test_replace_str_to_str_chain(self):\n a = np.arange(1, 5)\n astr = a.astype(str)\n bstr = np.arange(2, 6).astype(str)\n df = DataFrame({'a': astr})\n with tm.assertRaisesRegexp(ValueError, \"Replacement not allowed .+\"):\n df.replace({'a': dict(zip(astr, bstr))})\n\n def test_replace_swapping_bug(self):\n df = pd.DataFrame({'a': [True, False, True]})\n res = df.replace({'a': {True: 'Y', False: 'N'}})\n expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})\n tm.assert_frame_equal(res, expect)\n\n df = pd.DataFrame({'a': [0, 1, 0]})\n res = df.replace({'a': {0: 'Y', 1: 'N'}})\n expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})\n tm.assert_frame_equal(res, expect)\n\n def test_replace_period(self):\n d = {'fname':\n {'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'),\n 'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'),\n 'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'),\n 'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'),\n 'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'),\n 'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'),\n 'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}}\n\n df = pd.DataFrame(['out_augmented_AUG_2012.json',\n 'out_augmented_SEP_2013.json',\n 'out_augmented_SUBSIDY_WEEK.json',\n 'out_augmented_MAY_2012.json',\n 'out_augmented_MAY_2011.json',\n 'out_augmented_AUG_2011.json',\n 'out_augmented_JAN_2011.json'], columns=['fname'])\n tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))\n expected = DataFrame({'fname': [d['fname'][k]\n for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetime(self):\n d = {'fname':\n {'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),\n 'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),\n 'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),\n 'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),\n 'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),\n 'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),\n 'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}\n\n df = pd.DataFrame(['out_augmented_AUG_2012.json',\n 'out_augmented_SEP_2013.json',\n 'out_augmented_SUBSIDY_WEEK.json',\n 'out_augmented_MAY_2012.json',\n 'out_augmented_MAY_2011.json',\n 'out_augmented_AUG_2011.json',\n 'out_augmented_JAN_2011.json'], columns=['fname'])\n tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))\n expected = DataFrame({'fname': [d['fname'][k]\n for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_combine_multiple_frames_dtypes(self):\n\n # GH 2759\n A = DataFrame(data=np.ones((10, 2)), columns=['foo', 'bar'], dtype=np.float64)\n B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)\n results = pd.concat((A, B), axis=1).get_dtype_counts()\n expected = Series(dict( float64 = 2, float32 = 2 ))\n assert_series_equal(results,expected)\n\n def test_ops(self):\n\n # tst ops and reversed ops in evaluation\n # GH7198\n\n # smaller hits python, larger hits numexpr\n for n in [ 4, 4000 ]:\n\n df = DataFrame(1,index=range(n),columns=list('abcd'))\n df.iloc[0] = 2\n m = df.mean()\n\n for op_str, op, rop in [('+','__add__','__radd__'),\n ('-','__sub__','__rsub__'),\n ('*','__mul__','__rmul__'),\n ('/','__truediv__','__rtruediv__')]:\n\n base = DataFrame(np.tile(m.values,n).reshape(n,-1),columns=list('abcd'))\n expected = eval(\"base{op}df\".format(op=op_str))\n\n # ops as strings\n result = eval(\"m{op}df\".format(op=op_str))\n assert_frame_equal(result,expected)\n\n # these are commutative\n if op in ['+','*']:\n result = getattr(df,op)(m)\n assert_frame_equal(result,expected)\n\n # these are not\n elif op in ['-','/']:\n result = getattr(df,rop)(m)\n assert_frame_equal(result,expected)\n\n # GH7192\n df = DataFrame(dict(A=np.random.randn(25000)))\n df.iloc[0:5] = np.nan\n expected = (1-np.isnan(df.iloc[0:25]))\n result = (1-np.isnan(df)).iloc[0:25]\n assert_frame_equal(result,expected)\n\n def test_truncate(self):\n offset = datetools.bday\n\n ts = self.tsframe[::3]\n\n start, end = self.tsframe.index[3], self.tsframe.index[6]\n\n start_missing = self.tsframe.index[2]\n end_missing = self.tsframe.index[7]\n\n # neither specified\n truncated = ts.truncate()\n assert_frame_equal(truncated, ts)\n\n # both specified\n expected = ts[1:3]\n\n truncated = ts.truncate(start, end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(start_missing, end_missing)\n assert_frame_equal(truncated, expected)\n\n # start specified\n expected = ts[1:]\n\n truncated = ts.truncate(before=start)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(before=start_missing)\n assert_frame_equal(truncated, expected)\n\n # end specified\n expected = ts[:3]\n\n truncated = ts.truncate(after=end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(after=end_missing)\n assert_frame_equal(truncated, expected)\n\n self.assertRaises(ValueError, ts.truncate,\n before=ts.index[-1] - 1,\n after=ts.index[0] +1)\n\n def test_truncate_copy(self):\n index = self.tsframe.index\n truncated = self.tsframe.truncate(index[5], index[10])\n truncated.values[:] = 5.\n self.assertFalse((self.tsframe.values[5:11] == 5).any())\n\n def test_xs(self):\n idx = self.frame.index[5]\n xs = self.frame.xs(idx)\n for item, value in compat.iteritems(xs):\n if np.isnan(value):\n self.assertTrue(np.isnan(self.frame[item][idx]))\n else:\n self.assertEqual(value, self.frame[item][idx])\n\n # mixed-type xs\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n frame = DataFrame(test_data)\n xs = frame.xs('1')\n self.assertEqual(xs.dtype, np.object_)\n self.assertEqual(xs['A'], 1)\n self.assertEqual(xs['B'], '1')\n\n with tm.assertRaises(KeyError):\n self.tsframe.xs(self.tsframe.index[0] - datetools.bday)\n\n # xs get column\n series = self.frame.xs('A', axis=1)\n expected = self.frame['A']\n assert_series_equal(series, expected)\n\n # view is returned if possible\n series = self.frame.xs('A', axis=1)\n series[:] = 5\n self.assertTrue((expected == 5).all())\n\n def test_xs_corner(self):\n # pathological mixed-type reordering case\n df = DataFrame(index=[0])\n df['A'] = 1.\n df['B'] = 'foo'\n df['C'] = 2.\n df['D'] = 'bar'\n df['E'] = 3.\n\n xs = df.xs(0)\n assert_almost_equal(xs, [1., 'foo', 2., 'bar', 3.])\n\n # no columns but index\n df = DataFrame(index=['a', 'b', 'c'])\n result = df.xs('a')\n expected = Series([])\n assert_series_equal(result, expected)\n\n def test_xs_duplicates(self):\n df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])\n\n cross = df.xs('c')\n exp = df.irow(2)\n assert_series_equal(cross, exp)\n\n def test_xs_keep_level(self):\n df = DataFrame({'day': {0: 'sat', 1: 'sun'},\n 'flavour': {0: 'strawberry', 1: 'strawberry'},\n 'sales': {0: 10, 1: 12},\n 'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day'])\n result = df.xs('sat', level='day', drop_level=False)\n expected = df[:1]\n assert_frame_equal(result, expected)\n\n result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)\n assert_frame_equal(result, expected)\n\n def test_pivot(self):\n data = {\n 'index': ['A', 'B', 'C', 'C', 'B', 'A'],\n 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],\n 'values': [1., 2., 3., 3., 2., 1.]\n }\n\n frame = DataFrame(data)\n pivoted = frame.pivot(\n index='index', columns='columns', values='values')\n\n expected = DataFrame({\n 'One': {'A': 1., 'B': 2., 'C': 3.},\n 'Two': {'A': 1., 'B': 2., 'C': 3.}\n })\n expected.index.name, expected.columns.name = 'index', 'columns'\n\n assert_frame_equal(pivoted, expected)\n\n # name tracking\n self.assertEqual(pivoted.index.name, 'index')\n self.assertEqual(pivoted.columns.name, 'columns')\n\n # don't specify values\n pivoted = frame.pivot(index='index', columns='columns')\n self.assertEqual(pivoted.index.name, 'index')\n self.assertEqual(pivoted.columns.names, (None, 'columns'))\n\n # pivot multiple columns\n wp = tm.makePanel()\n lp = wp.to_frame()\n df = lp.reset_index()\n assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())\n\n def test_pivot_duplicates(self):\n data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],\n 'b': ['one', 'two', 'one', 'one', 'two'],\n 'c': [1., 2., 3., 3., 4.]})\n with assertRaisesRegexp(ValueError, 'duplicate entries'):\n data.pivot('a', 'b', 'c')\n\n def test_pivot_empty(self):\n df = DataFrame({}, columns=['a', 'b', 'c'])\n result = df.pivot('a', 'b', 'c')\n expected = DataFrame({})\n assert_frame_equal(result, expected, check_names=False)\n\n def test_pivot_integer_bug(self):\n df = DataFrame(data=[(\"A\", \"1\", \"A1\"), (\"B\", \"2\", \"B2\")])\n\n result = df.pivot(index=1, columns=0, values=2)\n repr(result)\n self.assert_numpy_array_equal(result.columns, ['A', 'B'])\n\n def test_reindex(self):\n newFrame = self.frame.reindex(self.ts1.index)\n\n for col in newFrame.columns:\n for idx, val in compat.iteritems(newFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(newFrame):\n self.assertTrue(tm.equalContents(series.index, newFrame.index))\n emptyFrame = self.frame.reindex(Index([]))\n self.assertEqual(len(emptyFrame.index), 0)\n\n # Cython code should be unit-tested directly\n nonContigFrame = self.frame.reindex(self.ts1.index[::2])\n\n for col in nonContigFrame.columns:\n for idx, val in compat.iteritems(nonContigFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(nonContigFrame):\n self.assertTrue(tm.equalContents(series.index,\n nonContigFrame.index))\n\n # corner cases\n\n # Same index, copies values but not index if copy=False\n newFrame = self.frame.reindex(self.frame.index, copy=False)\n self.assertIs(newFrame.index, self.frame.index)\n\n # length zero\n newFrame = self.frame.reindex([])\n self.assertTrue(newFrame.empty)\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # length zero with columns reindexed with non-empty index\n newFrame = self.frame.reindex([])\n newFrame = newFrame.reindex(self.frame.index)\n self.assertEqual(len(newFrame.index), len(self.frame.index))\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # pass non-Index\n newFrame = self.frame.reindex(list(self.ts1.index))\n self.assertTrue(newFrame.index.equals(self.ts1.index))\n\n # copy with no axes\n result = self.frame.reindex()\n assert_frame_equal(result,self.frame)\n self.assertFalse(result is self.frame)\n\n def test_reindex_name_remains(self):\n s = Series(random.rand(10))\n df = DataFrame(s, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n\n df = df.reindex(i)\n self.assertEqual(df.index.name, 'iname')\n\n df = df.reindex(Index(np.arange(10), name='tmpname'))\n self.assertEqual(df.index.name, 'tmpname')\n\n s = Series(random.rand(10))\n df = DataFrame(s.T, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n df = df.reindex(columns=i)\n self.assertEqual(df.columns.name, 'iname')\n\n def test_reindex_int(self):\n smaller = self.intframe.reindex(self.intframe.index[::2])\n\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n bigger = smaller.reindex(self.intframe.index)\n self.assertEqual(bigger['A'].dtype, np.float64)\n\n smaller = self.intframe.reindex(columns=['A', 'B'])\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n def test_reindex_like(self):\n other = self.frame.reindex(index=self.frame.index[:10],\n columns=['C', 'B'])\n\n assert_frame_equal(other, self.frame.reindex_like(other))\n\n def test_reindex_columns(self):\n newFrame = self.frame.reindex(columns=['A', 'B', 'E'])\n\n assert_series_equal(newFrame['B'], self.frame['B'])\n self.assertTrue(np.isnan(newFrame['E']).all())\n self.assertNotIn('C', newFrame)\n\n # length zero\n newFrame = self.frame.reindex(columns=[])\n self.assertTrue(newFrame.empty)\n\n def test_reindex_axes(self):\n\n # GH 3317, reindexing by both axes loses freq of the index\n from datetime import datetime\n df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c'])\n time_freq = date_range('2012-01-01', '2012-01-03', freq='d')\n some_cols = ['a', 'b']\n\n index_freq = df.reindex(index=time_freq).index.freq\n both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq\n seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq\n self.assertEqual(index_freq, both_freq)\n self.assertEqual(index_freq, seq_freq)\n\n def test_reindex_fill_value(self):\n df = DataFrame(np.random.randn(10, 4))\n\n # axis=0\n result = df.reindex(lrange(15))\n self.assertTrue(np.isnan(result.values[-5:]).all())\n\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n # axis=1\n result = df.reindex(columns=lrange(5), fill_value=0.)\n expected = df.copy()\n expected[4] = 0.\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value=0)\n expected = df.copy()\n expected[4] = 0\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value='foo')\n expected = df.copy()\n expected[4] = 'foo'\n assert_frame_equal(result, expected)\n\n # reindex_axis\n result = df.reindex_axis(lrange(15), fill_value=0., axis=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n result = df.reindex_axis(lrange(5), fill_value=0., axis=1)\n expected = df.reindex(columns=lrange(5)).fillna(0)\n assert_frame_equal(result, expected)\n\n # other dtypes\n df['foo'] = 'foo'\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n def test_reindex_dups(self):\n\n # GH4746, reindex on duplicate index error messages\n arr = np.random.randn(10)\n df = DataFrame(arr,index=[1,2,3,4,5,1,2,3,4,5])\n\n # set index is ok\n result = df.copy()\n result.index = list(range(len(df)))\n expected = DataFrame(arr,index=list(range(len(df))))\n assert_frame_equal(result,expected)\n\n # reindex fails\n self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))\n\n def test_align(self):\n af, bf = self.frame.align(self.frame)\n self.assertIsNot(af._data, self.frame._data)\n\n af, bf = self.frame.align(self.frame, copy=False)\n self.assertIs(af._data, self.frame._data)\n\n # axis = 0\n other = self.frame.ix[:-5, :3]\n af, bf = self.frame.align(other, axis=0, fill_value=-1)\n self.assertTrue(bf.columns.equals(other.columns))\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.difference(join_idx)\n diff_b = other.index.difference(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n diff_b_vals = bf.reindex(diff_b).values\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='right', axis=0)\n self.assertTrue(bf.columns.equals(other.columns))\n self.assertTrue(bf.index.equals(other.index))\n self.assertTrue(af.index.equals(other.index))\n\n # axis = 1\n other = self.frame.ix[:-5, :3].copy()\n af, bf = self.frame.align(other, axis=1)\n self.assertTrue(bf.columns.equals(self.frame.columns))\n self.assertTrue(bf.index.equals(other.index))\n\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.difference(join_idx)\n diff_b = other.index.difference(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n diff_b_vals = bf.reindex(diff_b).values\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='inner', axis=1)\n self.assertTrue(bf.columns.equals(other.columns))\n\n af, bf = self.frame.align(other, join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(other.columns))\n\n # test other non-float types\n af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(other.columns))\n\n af, bf = self.mixed_frame.align(self.mixed_frame,\n join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(self.mixed_frame.columns))\n\n af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=None)\n self.assertTrue(bf.index.equals(Index([])))\n\n af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n # mixed floats/ints\n af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n # try to align dataframe to series along bad axis\n self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],\n join='inner', axis=2)\n\n def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):\n aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,\n fill_axis=fill_axis)\n\n join_index, join_columns = None, None\n\n ea, eb = a, b\n if axis is None or axis == 0:\n join_index = a.index.join(b.index, how=how)\n ea = ea.reindex(index=join_index)\n eb = eb.reindex(index=join_index)\n\n if axis is None or axis == 1:\n join_columns = a.columns.join(b.columns, how=how)\n ea = ea.reindex(columns=join_columns)\n eb = eb.reindex(columns=join_columns)\n\n ea = ea.fillna(axis=fill_axis, method=method, limit=limit)\n eb = eb.fillna(axis=fill_axis, method=method, limit=limit)\n\n assert_frame_equal(aa, ea)\n assert_frame_equal(ab, eb)\n\n def test_align_fill_method_inner(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('inner', meth, ax, fax)\n\n def test_align_fill_method_outer(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('outer', meth, ax, fax)\n\n def test_align_fill_method_left(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('left', meth, ax, fax)\n\n def test_align_fill_method_right(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('right', meth, ax, fax)\n\n def _check_align_fill(self, kind, meth, ax, fax):\n left = self.frame.ix[0:4, :10]\n right = self.frame.ix[2:, 6:]\n empty = self.frame.ix[:0, :0]\n\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty left\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty right\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # both empty\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n def test_align_int_fill_bug(self):\n # GH #910\n X = np.arange(10*10, dtype='float64').reshape(10, 10)\n Y = np.ones((10, 1), dtype=int)\n\n df1 = DataFrame(X)\n df1['0.X'] = Y.squeeze()\n\n df2 = df1.astype(float)\n\n result = df1 - df1.mean()\n expected = df2 - df2.mean()\n assert_frame_equal(result, expected)\n\n def test_where(self):\n default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])\n\n def _safe_add(df):\n # only add to the numeric items\n def is_ok(s):\n return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'\n return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in compat.iteritems(df) ]))\n\n def _check_get(df, cond, check_dtypes = True):\n other1 = _safe_add(df)\n rs = df.where(cond, other1)\n rs2 = df.where(cond.values, other1)\n for k, v in rs.iteritems():\n assert_series_equal(v, Series(np.where(cond[k], df[k], other1[k]),index=v.index))\n assert_frame_equal(rs, rs2)\n\n # dtypes\n if check_dtypes:\n self.assertTrue((rs.dtypes == df.dtypes).all() == True)\n\n # check getting\n for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n cond = df > 0\n _check_get(df, cond)\n\n\n # upcasting case (GH # 2794)\n df = DataFrame(dict([ (c,Series([1]*3,dtype=c)) for c in ['int64','int32','float32','float64'] ]))\n df.ix[1,:] = 0\n result = df.where(df>=0).get_dtype_counts()\n\n #### when we don't preserve boolean casts ####\n #expected = Series({ 'float32' : 1, 'float64' : 3 })\n\n expected = Series({ 'float32' : 1, 'float64' : 1, 'int32' : 1, 'int64' : 1 })\n assert_series_equal(result, expected)\n\n # aligning\n def _check_align(df, cond, other, check_dtypes = True):\n rs = df.where(cond, other)\n for i, k in enumerate(rs.columns):\n result = rs[k]\n d = df[k].values\n c = cond[k].reindex(df[k].index).fillna(False).values\n\n if np.isscalar(other):\n o = other\n else:\n if isinstance(other,np.ndarray):\n o = Series(other[:,i],index=result.index).values\n else:\n o = other[k].values\n\n new_values = d if c.all() else np.where(c, d, o)\n expected = Series(new_values,index=result.index)\n\n # since we can't always have the correct numpy dtype\n # as numpy doesn't know how to downcast, don't check\n assert_series_equal(result, expected, check_dtype=False)\n\n # dtypes\n # can't check dtype when other is an ndarray\n\n if check_dtypes and not isinstance(other,np.ndarray):\n self.assertTrue((rs.dtypes == df.dtypes).all() == True)\n\n for df in [ self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n # other is a frame\n cond = (df > 0)[1:]\n _check_align(df, cond, _safe_add(df))\n\n # check other is ndarray\n cond = df > 0\n _check_align(df, cond, (_safe_add(df).values))\n\n # integers are upcast, so don't check the dtypes\n cond = df > 0\n check_dtypes = all([ not issubclass(s.type,np.integer) for s in df.dtypes ])\n _check_align(df, cond, np.nan, check_dtypes = check_dtypes)\n\n # invalid conditions\n df = default_frame\n err1 = (df + 1).values[0:2, :]\n self.assertRaises(ValueError, df.where, cond, err1)\n\n err2 = cond.ix[:2, :].values\n other1 = _safe_add(df)\n self.assertRaises(ValueError, df.where, err2, other1)\n\n self.assertRaises(ValueError, df.mask, True)\n self.assertRaises(ValueError, df.mask, 0)\n\n # where inplace\n def _check_set(df, cond, check_dtypes = True):\n dfi = df.copy()\n econd = cond.reindex_like(df).fillna(True)\n expected = dfi.mask(~econd)\n\n dfi.where(cond, np.nan, inplace=True)\n assert_frame_equal(dfi, expected)\n\n # dtypes (and confirm upcasts)x\n if check_dtypes:\n for k, v in compat.iteritems(df.dtypes):\n if issubclass(v.type,np.integer) and not cond[k].all():\n v = np.dtype('float64')\n self.assertEqual(dfi[k].dtype, v)\n\n for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n cond = df > 0\n _check_set(df, cond)\n\n cond = df >= 0\n _check_set(df, cond)\n\n # aligining\n cond = (df >= 0)[1:]\n _check_set(df, cond)\n\n def test_where_bug(self):\n\n # GH 2793\n\n df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [4.0, 3.0, 2.0, 1.0]}, dtype = 'float64')\n expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')\n result = df.where(df > 2, np.nan)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(result > 2, np.nan, inplace=True)\n assert_frame_equal(result, expected)\n\n # mixed\n for dtype in ['int16','int8','int32','int64']:\n df = DataFrame({'a': np.array([1, 2, 3, 4],dtype=dtype), 'b': np.array([4.0, 3.0, 2.0, 1.0], dtype = 'float64') })\n expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')\n result = df.where(df > 2, np.nan)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(result > 2, np.nan, inplace=True)\n assert_frame_equal(result, expected)\n\n # transpositional issue\n # GH7506\n a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]})\n b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]})\n do_not_replace = b.isnull() | (a > b)\n\n expected = a.copy()\n expected[~do_not_replace] = b\n\n result = a.where(do_not_replace,b)\n assert_frame_equal(result,expected)\n\n a = DataFrame({ 0 : [4,6], 1 : [1,0]})\n b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]})\n do_not_replace = b.isnull() | (a > b)\n\n expected = a.copy()\n expected[~do_not_replace] = b\n\n result = a.where(do_not_replace,b)\n assert_frame_equal(result,expected)\n\n def test_where_datetime(self):\n\n # GH 3311\n df = DataFrame(dict(A = date_range('20130102',periods=5),\n B = date_range('20130104',periods=5),\n C = np.random.randn(5)))\n\n stamp = datetime(2013,1,3)\n result = df[df>stamp]\n expected = df.copy()\n expected.loc[[0,1],'A'] = np.nan\n assert_frame_equal(result,expected)\n\n def test_where_none(self):\n # GH 4667\n # setting with None changes dtype\n df = DataFrame({'series': Series(range(10))}).astype(float)\n df[df > 7] = None\n expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) })\n assert_frame_equal(df, expected)\n\n # GH 7656\n df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}])\n expected = df.where(~isnull(df), None)\n with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):\n df.where(~isnull(df), None, inplace=True)\n\n def test_where_align(self):\n\n def create():\n df = DataFrame(np.random.randn(10,3))\n df.iloc[3:5,0] = np.nan\n df.iloc[4:6,1] = np.nan\n df.iloc[5:8,2] = np.nan\n return df\n\n # series\n df = create()\n expected = df.fillna(df.mean())\n result = df.where(pd.notnull(df),df.mean(),axis='columns')\n assert_frame_equal(result, expected)\n\n df.where(pd.notnull(df),df.mean(),inplace=True,axis='columns')\n assert_frame_equal(df, expected)\n\n df = create().fillna(0)\n expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0])\n result = df.where(df>0,df[0],axis='index')\n assert_frame_equal(result, expected)\n result = df.where(df>0,df[0],axis='rows')\n assert_frame_equal(result, expected)\n\n # frame\n df = create()\n expected = df.fillna(1)\n result = df.where(pd.notnull(df),DataFrame(1,index=df.index,columns=df.columns))\n assert_frame_equal(result, expected)\n\n def test_where_complex(self):\n # GH 6345\n expected = DataFrame([[1+1j, 2], [np.nan, 4+1j]], columns=['a', 'b'])\n df = DataFrame([[1+1j, 2], [5+1j, 4+1j]], columns=['a', 'b'])\n df[df.abs() >= 5] = np.nan\n assert_frame_equal(df,expected)\n\n def test_mask(self):\n df = DataFrame(np.random.randn(5, 3))\n cond = df > 0\n\n rs = df.where(cond, np.nan)\n assert_frame_equal(rs, df.mask(df <= 0))\n assert_frame_equal(rs, df.mask(~cond))\n\n def test_mask_edge_case_1xN_frame(self):\n # GH4071\n df = DataFrame([[1, 2]])\n res = df.mask(DataFrame([[True, False]]))\n expec = DataFrame([[nan, 2]])\n assert_frame_equal(res, expec)\n\n #----------------------------------------------------------------------\n # Transposing\n\n def test_transpose(self):\n frame = self.frame\n dft = frame.T\n for idx, series in compat.iteritems(dft):\n for col, value in compat.iteritems(series):\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][idx]))\n else:\n self.assertEqual(value, frame[col][idx])\n\n # mixed type\n index, data = tm.getMixedTypeDict()\n mixed = DataFrame(data, index=index)\n\n mixed_T = mixed.T\n for col, s in compat.iteritems(mixed_T):\n self.assertEqual(s.dtype, np.object_)\n\n def test_transpose_get_view(self):\n dft = self.frame.T\n dft.values[:, 5:10] = 5\n\n self.assertTrue((self.frame.values[5:10] == 5).all())\n\n #----------------------------------------------------------------------\n # Renaming\n\n def test_rename(self):\n mapping = {\n 'A': 'a',\n 'B': 'b',\n 'C': 'c',\n 'D': 'd'\n }\n\n renamed = self.frame.rename(columns=mapping)\n renamed2 = self.frame.rename(columns=str.lower)\n\n assert_frame_equal(renamed, renamed2)\n assert_frame_equal(renamed2.rename(columns=str.upper),\n self.frame, check_names=False)\n\n # index\n data = {\n 'A': {'foo': 0, 'bar': 1}\n }\n\n # gets sorted alphabetical\n df = DataFrame(data)\n renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})\n self.assert_numpy_array_equal(renamed.index, ['foo', 'bar'])\n\n renamed = df.rename(index=str.upper)\n self.assert_numpy_array_equal(renamed.index, ['BAR', 'FOO'])\n\n # have to pass something\n self.assertRaises(TypeError, self.frame.rename)\n\n # partial columns\n renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})\n self.assert_numpy_array_equal(renamed.columns, ['A', 'B', 'foo', 'bar'])\n\n # other axis\n renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})\n self.assert_numpy_array_equal(renamed.index, ['A', 'B', 'foo', 'bar'])\n\n # index with name\n index = Index(['foo', 'bar'], name='name')\n renamer = DataFrame(data, index=index)\n renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})\n self.assert_numpy_array_equal(renamed.index, ['bar', 'foo'])\n self.assertEqual(renamed.index.name, renamer.index.name)\n\n # MultiIndex\n tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]\n tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]\n index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])\n columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz'])\n renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns)\n renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},\n columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})\n new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])\n new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')])\n self.assert_numpy_array_equal(renamed.index, new_index)\n self.assert_numpy_array_equal(renamed.columns, new_columns)\n self.assertEqual(renamed.index.names, renamer.index.names)\n self.assertEqual(renamed.columns.names, renamer.columns.names)\n\n def test_rename_nocopy(self):\n renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)\n renamed['foo'] = 1.\n self.assertTrue((self.frame['C'] == 1.).all())\n\n def test_rename_inplace(self):\n self.frame.rename(columns={'C': 'foo'})\n self.assertIn('C', self.frame)\n self.assertNotIn('foo', self.frame)\n\n c_id = id(self.frame['C'])\n frame = self.frame.copy()\n frame.rename(columns={'C': 'foo'}, inplace=True)\n\n self.assertNotIn('C', frame)\n self.assertIn('foo', frame)\n self.assertNotEqual(id(frame['foo']), c_id)\n\n def test_rename_bug(self):\n # GH 5344\n # rename set ref_locs, and set_index was not resetting\n df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]})\n df = df.rename(columns={0 : 'a'})\n df = df.rename(columns={1 : 'b'})\n df = df.set_index(['a','b'])\n df.columns = ['2001-01-01']\n expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')],\n names=['a','b']),\n columns=['2001-01-01'])\n assert_frame_equal(df,expected)\n\n #----------------------------------------------------------------------\n # Time series related\n def test_diff(self):\n the_diff = self.tsframe.diff(1)\n\n assert_series_equal(the_diff['A'],\n self.tsframe['A'] - self.tsframe['A'].shift(1))\n\n # int dtype\n a = 10000000000000000\n b = a + 1\n s = Series([a, b])\n\n rs = DataFrame({'s': s}).diff()\n self.assertEqual(rs.s[1], 1)\n\n # mixed numeric\n tf = self.tsframe.astype('float32')\n the_diff = tf.diff(1)\n assert_series_equal(the_diff['A'],\n tf['A'] - tf['A'].shift(1))\n\n def test_diff_timedelta(self):\n # GH 4533\n df = DataFrame(dict(time=[Timestamp('20130101 9:01'),\n Timestamp('20130101 9:02')],\n value=[1.0,2.0]))\n\n res = df.diff()\n exp = DataFrame([[pd.NaT, np.nan],\n [Timedelta('00:01:00'), 1]],\n columns=['time', 'value'])\n assert_frame_equal(res, exp)\n\n def test_diff_mixed_dtype(self):\n df = DataFrame(np.random.randn(5, 3))\n df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)\n\n result = df.diff()\n self.assertEqual(result[0].dtype, np.float64)\n\n def test_diff_neg_n(self):\n rs = self.tsframe.diff(-1)\n xp = self.tsframe - self.tsframe.shift(-1)\n assert_frame_equal(rs, xp)\n\n def test_diff_float_n(self):\n rs = self.tsframe.diff(1.)\n xp = self.tsframe.diff(1)\n assert_frame_equal(rs, xp)\n\n def test_pct_change(self):\n rs = self.tsframe.pct_change(fill_method=None)\n assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)\n\n rs = self.tsframe.pct_change(2)\n filled = self.tsframe.fillna(method='pad')\n assert_frame_equal(rs, filled / filled.shift(2) - 1)\n\n rs = self.tsframe.pct_change(fill_method='bfill', limit=1)\n filled = self.tsframe.fillna(method='bfill', limit=1)\n assert_frame_equal(rs, filled / filled.shift(1) - 1)\n\n rs = self.tsframe.pct_change(freq='5D')\n filled = self.tsframe.fillna(method='pad')\n assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1., 1.5, np.nan, 2.5, 3.])\n\n df = DataFrame({'a': s, 'b': s})\n\n chg = df.pct_change()\n expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])\n edf = DataFrame({'a': expected, 'b': expected})\n assert_frame_equal(chg, edf)\n\n def test_shift(self):\n # naive shift\n shiftedFrame = self.tsframe.shift(5)\n self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))\n\n shiftedSeries = self.tsframe['A'].shift(5)\n assert_series_equal(shiftedFrame['A'], shiftedSeries)\n\n shiftedFrame = self.tsframe.shift(-5)\n self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))\n\n shiftedSeries = self.tsframe['A'].shift(-5)\n assert_series_equal(shiftedFrame['A'], shiftedSeries)\n\n # shift by 0\n unshifted = self.tsframe.shift(0)\n assert_frame_equal(unshifted, self.tsframe)\n\n # shift by DateOffset\n shiftedFrame = self.tsframe.shift(5, freq=datetools.BDay())\n self.assertEqual(len(shiftedFrame), len(self.tsframe))\n\n shiftedFrame2 = self.tsframe.shift(5, freq='B')\n assert_frame_equal(shiftedFrame, shiftedFrame2)\n\n d = self.tsframe.index[0]\n shifted_d = d + datetools.BDay(5)\n assert_series_equal(self.tsframe.xs(d),\n shiftedFrame.xs(shifted_d))\n\n # shift int frame\n int_shifted = self.intframe.shift(1)\n\n # Shifting with PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.shift(1)\n unshifted = shifted.shift(-1)\n self.assertTrue(shifted.index.equals(ps.index))\n\n tm.assert_dict_equal(unshifted.ix[:, 0].valid(), ps.ix[:, 0],\n compare_keys=False)\n\n shifted2 = ps.shift(1, 'B')\n shifted3 = ps.shift(1, datetools.bday)\n assert_frame_equal(shifted2, shifted3)\n assert_frame_equal(ps, shifted2.shift(-1, 'B'))\n\n assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',\n ps.shift, freq='D')\n\n\n # shift other axis\n # GH 6371\n df = DataFrame(np.random.rand(10,5))\n expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)\n result = df.shift(1,axis=1)\n assert_frame_equal(result,expected)\n\n # shift named axis\n df = DataFrame(np.random.rand(10,5))\n expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)\n result = df.shift(1,axis='columns')\n assert_frame_equal(result,expected)\n\n def test_shift_bool(self):\n df = DataFrame({'high': [True, False],\n 'low': [False, False]})\n rs = df.shift(1)\n xp = DataFrame(np.array([[np.nan, np.nan],\n [True, False]], dtype=object),\n columns=['high', 'low'])\n assert_frame_equal(rs, xp)\n\n def test_shift_empty(self):\n # Regression test for #8019\n df = DataFrame({'foo': []})\n rs = df.shift(-1)\n\n assert_frame_equal(df, rs)\n\n def test_tshift(self):\n # PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(unshifted, ps)\n\n shifted2 = ps.tshift(freq='B')\n assert_frame_equal(shifted, shifted2)\n\n shifted3 = ps.tshift(freq=datetools.bday)\n assert_frame_equal(shifted, shifted3)\n\n assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')\n\n # DatetimeIndex\n shifted = self.tsframe.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(self.tsframe, unshifted)\n\n shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)\n assert_frame_equal(shifted, shifted2)\n\n inferred_ts = DataFrame(self.tsframe.values,\n Index(np.asarray(self.tsframe.index)),\n columns=self.tsframe.columns)\n shifted = inferred_ts.tshift(1)\n unshifted = shifted.tshift(-1)\n assert_frame_equal(shifted, self.tsframe.tshift(1))\n assert_frame_equal(unshifted, inferred_ts)\n\n no_freq = self.tsframe.ix[[0, 5, 7], :]\n self.assertRaises(ValueError, no_freq.tshift)\n\n def test_apply(self):\n # ufunc\n applied = self.frame.apply(np.sqrt)\n assert_series_equal(np.sqrt(self.frame['A']), applied['A'])\n\n # aggregator\n applied = self.frame.apply(np.mean)\n self.assertEqual(applied['A'], np.mean(self.frame['A']))\n\n d = self.frame.index[0]\n applied = self.frame.apply(np.mean, axis=1)\n self.assertEqual(applied[d], np.mean(self.frame.xs(d)))\n self.assertIs(applied.index, self.frame.index) # want this\n\n # invalid axis\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])\n self.assertRaises(ValueError, df.apply, lambda x: x, 2)\n\n def test_apply_mixed_datetimelike(self):\n # mixed datetimelike\n # GH 7778\n df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') })\n result = df.apply(lambda x: x, axis=1)\n assert_frame_equal(result, df)\n\n def test_apply_empty(self):\n # empty\n applied = self.empty.apply(np.sqrt)\n self.assertTrue(applied.empty)\n\n applied = self.empty.apply(np.mean)\n self.assertTrue(applied.empty)\n\n no_rows = self.frame[:0]\n result = no_rows.apply(lambda x: x.mean())\n expected = Series(np.nan, index=self.frame.columns)\n assert_series_equal(result, expected)\n\n no_cols = self.frame.ix[:, []]\n result = no_cols.apply(lambda x: x.mean(), axis=1)\n expected = Series(np.nan, index=self.frame.index)\n assert_series_equal(result, expected)\n\n # 2476\n xp = DataFrame(index=['a'])\n rs = xp.apply(lambda x: x['a'], axis=1)\n assert_frame_equal(xp, rs)\n\n # reduce with an empty DataFrame\n x = []\n result = self.empty.apply(x.append, axis=1, reduce=False)\n assert_frame_equal(result, self.empty)\n result = self.empty.apply(x.append, axis=1, reduce=True)\n assert_series_equal(result, Series([]))\n\n empty_with_cols = DataFrame(columns=['a', 'b', 'c'])\n result = empty_with_cols.apply(x.append, axis=1, reduce=False)\n assert_frame_equal(result, empty_with_cols)\n result = empty_with_cols.apply(x.append, axis=1, reduce=True)\n assert_series_equal(result, Series([]))\n\n # Ensure that x.append hasn't been called\n self.assertEqual(x, [])\n\n def test_apply_standard_nonunique(self):\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])\n rs = df.apply(lambda s: s[0], axis=1)\n xp = Series([1, 4, 7], ['a', 'a', 'c'])\n assert_series_equal(rs, xp)\n\n rs = df.T.apply(lambda s: s[0], axis=0)\n assert_series_equal(rs, xp)\n\n def test_apply_broadcast(self):\n broadcasted = self.frame.apply(np.mean, broadcast=True)\n agged = self.frame.apply(np.mean)\n\n for col, ts in compat.iteritems(broadcasted):\n self.assertTrue((ts == agged[col]).all())\n\n broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)\n agged = self.frame.apply(np.mean, axis=1)\n for idx in broadcasted.index:\n self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())\n\n def test_apply_raw(self):\n result0 = self.frame.apply(np.mean, raw=True)\n result1 = self.frame.apply(np.mean, axis=1, raw=True)\n\n expected0 = self.frame.apply(lambda x: x.values.mean())\n expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)\n\n assert_series_equal(result0, expected0)\n assert_series_equal(result1, expected1)\n\n # no reduction\n result = self.frame.apply(lambda x: x * 2, raw=True)\n expected = self.frame * 2\n assert_frame_equal(result, expected)\n\n def test_apply_axis1(self):\n d = self.frame.index[0]\n tapplied = self.frame.apply(np.mean, axis=1)\n self.assertEqual(tapplied[d], np.mean(self.frame.xs(d)))\n\n def test_apply_ignore_failures(self):\n result = self.mixed_frame._apply_standard(np.mean, 0,\n ignore_failures=True)\n expected = self.mixed_frame._get_numeric_data().apply(np.mean)\n assert_series_equal(result, expected)\n\n def test_apply_mixed_dtype_corner(self):\n df = DataFrame({'A': ['foo'],\n 'B': [1.]})\n result = df[:0].apply(np.mean, axis=1)\n # the result here is actually kind of ambiguous, should it be a Series\n # or a DataFrame?\n expected = Series(np.nan, index=[])\n assert_series_equal(result, expected)\n\n df = DataFrame({'A': ['foo'],\n 'B': [1.]})\n result = df.apply(lambda x: x['A'], axis=1)\n expected = Series(['foo'],index=[0])\n assert_series_equal(result, expected)\n\n result = df.apply(lambda x: x['B'], axis=1)\n expected = Series([1.],index=[0])\n assert_series_equal(result, expected)\n\n def test_apply_empty_infer_type(self):\n no_cols = DataFrame(index=['a', 'b', 'c'])\n no_index = DataFrame(columns=['a', 'b', 'c'])\n\n def _check(df, f):\n test_res = f(np.array([], dtype='f8'))\n is_reduction = not isinstance(test_res, np.ndarray)\n\n def _checkit(axis=0, raw=False):\n res = df.apply(f, axis=axis, raw=raw)\n if is_reduction:\n agg_axis = df._get_agg_axis(axis)\n tm.assert_isinstance(res, Series)\n self.assertIs(res.index, agg_axis)\n else:\n tm.assert_isinstance(res, DataFrame)\n\n _checkit()\n _checkit(axis=1)\n _checkit(raw=True)\n _checkit(axis=0, raw=True)\n\n _check(no_cols, lambda x: x)\n _check(no_cols, lambda x: x.mean())\n _check(no_index, lambda x: x)\n _check(no_index, lambda x: x.mean())\n\n result = no_cols.apply(lambda x: x.mean(), broadcast=True)\n tm.assert_isinstance(result, DataFrame)\n\n def test_apply_with_args_kwds(self):\n def add_some(x, howmuch=0):\n return x + howmuch\n\n def agg_and_add(x, howmuch=0):\n return x.mean() + howmuch\n\n def subtract_and_divide(x, sub, divide=1):\n return (x - sub) / divide\n\n result = self.frame.apply(add_some, howmuch=2)\n exp = self.frame.apply(lambda x: x + 2)\n assert_frame_equal(result, exp)\n\n result = self.frame.apply(agg_and_add, howmuch=2)\n exp = self.frame.apply(lambda x: x.mean() + 2)\n assert_series_equal(result, exp)\n\n res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)\n exp = self.frame.apply(lambda x: (x - 2.) / 2.)\n assert_frame_equal(res, exp)\n\n def test_apply_yield_list(self):\n result = self.frame.apply(list)\n assert_frame_equal(result, self.frame)\n\n def test_apply_reduce_Series(self):\n self.frame.ix[::2, 'A'] = np.nan\n expected = self.frame.mean(1)\n result = self.frame.apply(np.mean, axis=1)\n assert_series_equal(result, expected)\n\n def test_apply_differently_indexed(self):\n df = DataFrame(np.random.randn(20, 10))\n\n result0 = df.apply(Series.describe, axis=0)\n expected0 = DataFrame(dict((i, v.describe())\n for i, v in compat.iteritems(df)),\n columns=df.columns)\n assert_frame_equal(result0, expected0)\n\n result1 = df.apply(Series.describe, axis=1)\n expected1 = DataFrame(dict((i, v.describe())\n for i, v in compat.iteritems(df.T)),\n columns=df.index).T\n assert_frame_equal(result1, expected1)\n\n def test_apply_modify_traceback(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n data.loc[4,'C'] = np.nan\n\n def transform(row):\n if row['C'].startswith('shin') and row['A'] == 'foo':\n row['D'] = 7\n return row\n\n def transform2(row):\n if (notnull(row['C']) and row['C'].startswith('shin')\n and row['A'] == 'foo'):\n row['D'] = 7\n return row\n\n try:\n transformed = data.apply(transform, axis=1)\n except AttributeError as e:\n self.assertEqual(len(e.args), 2)\n self.assertEqual(e.args[1], 'occurred at index 4')\n self.assertEqual(e.args[0], \"'float' object has no attribute 'startswith'\")\n\n def test_apply_bug(self):\n\n # GH 6125\n import datetime\n positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],\n [1, 'DEF0', 20], [2, 'ABC1', 50],\n [2, 'YUM1', 20], [2, 'DEF1', 20]],\n columns=['a', 'market', 'position'])\n def f(r):\n return r['market']\n expected = positions.apply(f, axis=1)\n\n positions = DataFrame([[datetime.datetime(2013, 1, 1), 'ABC0', 50],\n [datetime.datetime(2013, 1, 2), 'YUM0', 20],\n [datetime.datetime(2013, 1, 3), 'DEF0', 20],\n [datetime.datetime(2013, 1, 4), 'ABC1', 50],\n [datetime.datetime(2013, 1, 5), 'YUM1', 20],\n [datetime.datetime(2013, 1, 6), 'DEF1', 20]],\n columns=['a', 'market', 'position'])\n result = positions.apply(f, axis=1)\n assert_series_equal(result,expected)\n\n def test_swapaxes(self):\n df = DataFrame(np.random.randn(10, 5))\n assert_frame_equal(df.T, df.swapaxes(0, 1))\n assert_frame_equal(df.T, df.swapaxes(1, 0))\n assert_frame_equal(df, df.swapaxes(0, 0))\n self.assertRaises(ValueError, df.swapaxes, 2, 5)\n\n def test_apply_convert_objects(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n result = data.apply(lambda x: x, axis=1)\n assert_frame_equal(result.convert_objects(), data)\n\n def test_apply_attach_name(self):\n result = self.frame.apply(lambda x: x.name)\n expected = Series(self.frame.columns, index=self.frame.columns)\n assert_series_equal(result, expected)\n\n result = self.frame.apply(lambda x: x.name, axis=1)\n expected = Series(self.frame.index, index=self.frame.index)\n assert_series_equal(result, expected)\n\n # non-reductions\n result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))\n expected = DataFrame(np.tile(self.frame.columns,\n (len(self.frame.index), 1)),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(result, expected)\n\n result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),\n axis=1)\n expected = DataFrame(np.tile(self.frame.index,\n (len(self.frame.columns), 1)).T,\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(result, expected)\n\n def test_apply_multi_index(self):\n s = DataFrame([[1,2], [3,4], [5,6]])\n s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])\n s.columns = ['col1','col2']\n res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)\n tm.assert_isinstance(res.index, MultiIndex)\n\n def test_applymap(self):\n applied = self.frame.applymap(lambda x: x * 2)\n assert_frame_equal(applied, self.frame * 2)\n result = self.frame.applymap(type)\n\n # GH #465, function returning tuples\n result = self.frame.applymap(lambda x: (x, x))\n tm.assert_isinstance(result['A'][0], tuple)\n\n # GH 2909, object conversion to float in constructor?\n df = DataFrame(data=[1,'a'])\n result = df.applymap(lambda x: x)\n self.assertEqual(result.dtypes[0], object)\n\n df = DataFrame(data=[1.,'a'])\n result = df.applymap(lambda x: x)\n self.assertEqual(result.dtypes[0], object)\n\n # GH2786\n df = DataFrame(np.random.random((3,4)))\n df2 = df.copy()\n cols = ['a','a','a','a']\n df.columns = cols\n\n expected = df2.applymap(str)\n expected.columns = cols\n result = df.applymap(str)\n assert_frame_equal(result,expected)\n\n # datetime/timedelta\n df['datetime'] = Timestamp('20130101')\n df['timedelta'] = Timedelta('1 min')\n result = df.applymap(str)\n for f in ['datetime','timedelta']:\n self.assertEqual(result.loc[0,f],str(df.loc[0,f]))\n\n def test_filter(self):\n # items\n filtered = self.frame.filter(['A', 'B', 'E'])\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n # other axis\n idx = self.frame.index[0:4]\n filtered = self.frame.filter(idx, axis='index')\n expected = self.frame.reindex(index=idx)\n assert_frame_equal(filtered,expected)\n\n # like\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n filtered = fcopy.filter(like='A')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # like with ints in column names\n df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])\n filtered = df.filter(like='_')\n self.assertEqual(len(filtered.columns), 2)\n\n # pass in None\n with assertRaisesRegexp(TypeError, 'Must pass'):\n self.frame.filter(items=None)\n\n # objects\n filtered = self.mixed_frame.filter(like='foo')\n self.assertIn('foo', filtered)\n\n # unicode columns, won't ascii-encode\n df = self.frame.rename(columns={'B': u('\\u2202')})\n filtered = df.filter(like='C')\n self.assertTrue('C' in filtered)\n\n def test_filter_regex_search(self):\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n # regex\n filtered = fcopy.filter(regex='[A]+')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # doesn't have to be at beginning\n df = DataFrame({'aBBa': [1, 2],\n 'BBaBB': [1, 2],\n 'aCCa': [1, 2],\n 'aCCaBB': [1, 2]})\n\n result = df.filter(regex='BB')\n exp = df[[x for x in df.columns if 'BB' in x]]\n assert_frame_equal(result, exp)\n\n def test_filter_corner(self):\n empty = DataFrame()\n\n result = empty.filter([])\n assert_frame_equal(result, empty)\n\n result = empty.filter(like='foo')\n assert_frame_equal(result, empty)\n\n def test_select(self):\n f = lambda x: x.weekday() == 2\n result = self.tsframe.select(f, axis=0)\n expected = self.tsframe.reindex(\n index=self.tsframe.index[[f(x) for x in self.tsframe.index]])\n assert_frame_equal(result, expected)\n\n result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)\n expected = self.frame.reindex(columns=['B', 'D'])\n\n assert_frame_equal(result, expected, check_names=False) # TODO should reindex check_names?\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)\n\n # no change, position\n result = df.reorder_levels([0, 1, 2])\n assert_frame_equal(df, result)\n\n # no change, labels\n result = df.reorder_levels(['L0', 'L1', 'L2'])\n assert_frame_equal(df, result)\n\n # rotate, position\n result = df.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},\n index=e_idx)\n assert_frame_equal(result, expected)\n\n result = df.reorder_levels([0, 0, 0])\n e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n names=['L0', 'L0', 'L0'])\n expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},\n index=e_idx)\n assert_frame_equal(result, expected)\n\n result = df.reorder_levels(['L0', 'L0', 'L0'])\n assert_frame_equal(result, expected)\n\n def test_sort_index(self):\n frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n # axis=0\n unordered = frame.ix[[3, 2, 4, 1]]\n sorted_df = unordered.sort_index()\n expected = frame\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = unordered.sort_index(ascending=False)\n expected = frame[::-1]\n assert_frame_equal(sorted_df, expected)\n\n # axis=1\n unordered = frame.ix[:, ['D', 'B', 'C', 'A']]\n sorted_df = unordered.sort_index(axis=1)\n expected = frame\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = unordered.sort_index(axis=1, ascending=False)\n expected = frame.ix[:, ::-1]\n assert_frame_equal(sorted_df, expected)\n\n # by column\n sorted_df = frame.sort_index(by='A')\n indexer = frame['A'].argsort().values\n expected = frame.ix[frame.index[indexer]]\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort_index(by='A', ascending=False)\n indexer = indexer[::-1]\n expected = frame.ix[frame.index[indexer]]\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort(columns='A', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n # GH4839\n sorted_df = frame.sort(columns=['A'], ascending=[False])\n assert_frame_equal(sorted_df, expected)\n\n # check for now\n sorted_df = frame.sort(columns='A')\n assert_frame_equal(sorted_df, expected[::-1])\n expected = frame.sort_index(by='A')\n assert_frame_equal(sorted_df, expected)\n\n\n sorted_df = frame.sort(columns=['A', 'B'], ascending=False)\n expected = frame.sort_index(by=['A', 'B'], ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort(columns=['A', 'B'])\n assert_frame_equal(sorted_df, expected[::-1])\n\n self.assertRaises(ValueError, frame.sort_index, axis=2, inplace=True)\n\n msg = 'When sorting by column, axis must be 0'\n with assertRaisesRegexp(ValueError, msg):\n frame.sort_index(by='A', axis=1)\n\n msg = r'Length of ascending \\(5\\) != length of by \\(2\\)'\n with assertRaisesRegexp(ValueError, msg):\n frame.sort_index(by=['A', 'B'], axis=0, ascending=[True] * 5)\n\n def test_sort_nan(self):\n # GH3917\n nan = np.nan\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]})\n\n # sort one column only\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n sorted_df = df.sort(['A'], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n sorted_df = df.sort(['A'], na_position='first', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n # na_position='last', order\n expected = DataFrame(\n {'A': [1, 1, 2, 4, 6, 8, nan],\n 'B': [2, 9, nan, 5, 5, 4, 5]},\n index=[3, 0, 1, 6, 4, 5, 2])\n sorted_df = df.sort(['A','B'])\n assert_frame_equal(sorted_df, expected)\n\n # na_position='first', order\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 2, 9, nan, 5, 5, 4]},\n index=[2, 3, 0, 1, 6, 4, 5])\n sorted_df = df.sort(['A','B'], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n # na_position='first', not order\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n sorted_df = df.sort(['A','B'], ascending=[1,0], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n # na_position='last', not order\n expected = DataFrame(\n {'A': [8, 6, 4, 2, 1, 1, nan],\n 'B': [4, 5, 5, nan, 2, 9, 5]},\n index=[5, 4, 6, 1, 3, 0, 2])\n sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='last')\n assert_frame_equal(sorted_df, expected)\n\n # Test DataFrame with nan label\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]},\n index = [1, 2, 3, 4, 5, 6, nan])\n\n # NaN label, ascending=True, na_position='last'\n sorted_df = df.sort(kind='quicksort', ascending=True, na_position='last')\n expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]},\n index = [1, 2, 3, 4, 5, 6, nan])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=True, na_position='first'\n sorted_df = df.sort(na_position='first')\n expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],\n 'B': [5, 9, nan, 5, 2, 5, 4]},\n index = [nan, 1, 2, 3, 4, 5, 6])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=False, na_position='last'\n sorted_df = df.sort(kind='quicksort', ascending=False)\n expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],\n 'B': [4, 5, 2, 5, nan, 9, 5]},\n index = [6, 5, 4, 3, 2, 1, nan])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=False, na_position='first'\n sorted_df = df.sort(kind='quicksort', ascending=False, na_position='first')\n expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],\n 'B': [5, 4, 5, 2, 5, nan, 9]},\n index = [nan, 6, 5, 4, 3, 2, 1])\n assert_frame_equal(sorted_df, expected)\n\n def test_stable_descending_sort(self):\n # GH #6399\n df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],\n columns=['sort_col', 'order'])\n sorted_df = df.sort_index(by='sort_col', kind='mergesort',\n ascending=False)\n assert_frame_equal(df, sorted_df)\n\n def test_stable_descending_multicolumn_sort(self):\n nan = np.nan\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]})\n # test stable mergesort\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 2, 9]},\n index=[2, 5, 4, 6, 1, 3, 0])\n sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='first',\n kind='mergesort')\n assert_frame_equal(sorted_df, expected)\n\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n sorted_df = df.sort(['A','B'], ascending=[0,0], na_position='first',\n kind='mergesort')\n assert_frame_equal(sorted_df, expected)\n\n def test_sort_index_multicolumn(self):\n import random\n A = np.arange(5).repeat(20)\n B = np.tile(np.arange(5), 20)\n random.shuffle(A)\n random.shuffle(B)\n frame = DataFrame({'A': A, 'B': B,\n 'C': np.random.randn(100)})\n\n result = frame.sort_index(by=['A', 'B'])\n indexer = np.lexsort((frame['B'], frame['A']))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n result = frame.sort_index(by=['A', 'B'], ascending=False)\n indexer = np.lexsort((frame['B'].rank(ascending=False),\n frame['A'].rank(ascending=False)))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n result = frame.sort_index(by=['B', 'A'])\n indexer = np.lexsort((frame['A'], frame['B']))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n def test_sort_index_inplace(self):\n frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n # axis=0\n unordered = frame.ix[[3, 2, 4, 1]]\n a_id = id(unordered['A'])\n df = unordered.copy()\n df.sort_index(inplace=True)\n expected = frame\n assert_frame_equal(df, expected)\n self.assertNotEqual(a_id, id(df['A']))\n\n df = unordered.copy()\n df.sort_index(ascending=False, inplace=True)\n expected = frame[::-1]\n assert_frame_equal(df, expected)\n\n # axis=1\n unordered = frame.ix[:, ['D', 'B', 'C', 'A']]\n df = unordered.copy()\n df.sort_index(axis=1, inplace=True)\n expected = frame\n assert_frame_equal(df, expected)\n\n df = unordered.copy()\n df.sort_index(axis=1, ascending=False, inplace=True)\n expected = frame.ix[:, ::-1]\n assert_frame_equal(df, expected)\n\n def test_sort_index_different_sortorder(self):\n import random\n A = np.arange(20).repeat(5)\n B = np.tile(np.arange(5), 20)\n\n indexer = np.random.permutation(100)\n A = A.take(indexer)\n B = B.take(indexer)\n\n df = DataFrame({'A': A, 'B': B,\n 'C': np.random.randn(100)})\n\n result = df.sort_index(by=['A', 'B'], ascending=[1, 0])\n\n ex_indexer = np.lexsort((df.B.max() - df.B, df.A))\n expected = df.take(ex_indexer)\n assert_frame_equal(result, expected)\n\n # test with multiindex, too\n idf = df.set_index(['A', 'B'])\n\n result = idf.sort_index(ascending=[1, 0])\n expected = idf.take(ex_indexer)\n assert_frame_equal(result, expected)\n\n # also, Series!\n result = idf['C'].sort_index(ascending=[1, 0])\n assert_series_equal(result, expected['C'])\n\n def test_sort_inplace(self):\n frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n sorted_df = frame.copy()\n sorted_df.sort(columns='A', inplace=True)\n expected = frame.sort_index(by='A')\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.copy()\n sorted_df.sort(columns='A', ascending=False, inplace=True)\n expected = frame.sort_index(by='A', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.copy()\n sorted_df.sort(columns=['A', 'B'], ascending=False, inplace=True)\n expected = frame.sort_index(by=['A', 'B'], ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n def test_sort_index_duplicates(self):\n df = DataFrame([lrange(5,9), lrange(4)],\n columns=['a', 'a', 'b', 'b'])\n\n with assertRaisesRegexp(ValueError, 'duplicate'):\n df.sort_index(by='a')\n with assertRaisesRegexp(ValueError, 'duplicate'):\n df.sort_index(by=['a'])\n with assertRaisesRegexp(ValueError, 'duplicate'):\n # multi-column 'by' is separate codepath\n df.sort_index(by=['a', 'b'])\n\n # with multi-index\n # GH4370\n df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))\n with assertRaisesRegexp(ValueError, 'levels'):\n df.sort_index(by='a')\n\n # convert tuples to a list of tuples\n expected = df.sort_index(by=[('a',1)])\n result = df.sort_index(by=('a',1))\n assert_frame_equal(result, expected)\n\n def test_sortlevel(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n df = DataFrame([[1, 2], [3, 4]], mi)\n res = df.sortlevel('A', sort_remaining=False)\n assert_frame_equal(df, res)\n\n res = df.sortlevel(['A', 'B'], sort_remaining=False)\n assert_frame_equal(df, res)\n\n def test_sort_datetimes(self):\n\n # GH 3461, argsort / lexsort differences for a datetime column\n df = DataFrame(['a','a','a','b','c','d','e','f','g'],\n columns=['A'],\n index=date_range('20130101',periods=9))\n dts = [Timestamp(x)\n for x in ['2004-02-11','2004-01-21','2004-01-26',\n '2005-09-20','2010-10-04','2009-05-12',\n '2008-11-12','2010-09-28','2010-09-28']]\n df['B'] = dts[::2] + dts[1::2]\n df['C'] = 2.\n df['A1'] = 3.\n\n df1 = df.sort(columns='A')\n df2 = df.sort(columns=['A'])\n assert_frame_equal(df1,df2)\n\n df1 = df.sort(columns='B')\n df2 = df.sort(columns=['B'])\n assert_frame_equal(df1,df2)\n\n def test_frame_column_inplace_sort_exception(self):\n s = self.frame['A']\n with assertRaisesRegexp(ValueError, \"This Series is a view\"):\n s.sort()\n\n cp = s.copy()\n cp.sort() # it works!\n\n def test_combine_first(self):\n # disjoint\n head, tail = self.frame[:5], self.frame[5:]\n\n combined = head.combine_first(tail)\n reordered_frame = self.frame.reindex(combined.index)\n assert_frame_equal(combined, reordered_frame)\n self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))\n assert_series_equal(combined['A'], reordered_frame['A'])\n\n # same index\n fcopy = self.frame.copy()\n fcopy['A'] = 1\n del fcopy['C']\n\n fcopy2 = self.frame.copy()\n fcopy2['B'] = 0\n del fcopy2['D']\n\n combined = fcopy.combine_first(fcopy2)\n\n self.assertTrue((combined['A'] == 1).all())\n assert_series_equal(combined['B'], fcopy['B'])\n assert_series_equal(combined['C'], fcopy2['C'])\n assert_series_equal(combined['D'], fcopy['D'])\n\n # overlap\n head, tail = reordered_frame[:10].copy(), reordered_frame\n head['A'] = 1\n\n combined = head.combine_first(tail)\n self.assertTrue((combined['A'][:10] == 1).all())\n\n # reverse overlap\n tail['A'][:10] = 0\n combined = tail.combine_first(head)\n self.assertTrue((combined['A'][:10] == 0).all())\n\n # no overlap\n f = self.frame[:10]\n g = self.frame[10:]\n combined = f.combine_first(g)\n assert_series_equal(combined['A'].reindex(f.index), f['A'])\n assert_series_equal(combined['A'].reindex(g.index), g['A'])\n\n # corner cases\n comb = self.frame.combine_first(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combine_first(self.frame)\n assert_frame_equal(comb, self.frame)\n\n comb = self.frame.combine_first(DataFrame(index=[\"faz\", \"boo\"]))\n self.assertTrue(\"faz\" in comb.index)\n\n # #2525\n df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])\n df2 = DataFrame({}, columns=['b'])\n result = df.combine_first(df2)\n self.assertTrue('b' in result)\n\n def test_combine_first_mixed_bug(self):\n idx = Index(['a', 'b', 'c', 'e'])\n ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)\n ser2 = Series(['a', 'b', 'c', 'e'], index=idx)\n ser3 = Series([12, 4, 5, 97], index=idx)\n\n frame1 = DataFrame({\"col0\": ser1,\n \"col2\": ser2,\n \"col3\": ser3})\n\n idx = Index(['a', 'b', 'c', 'f'])\n ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)\n ser2 = Series(['a', 'b', 'c', 'f'], index=idx)\n ser3 = Series([12, 4, 5, 97], index=idx)\n\n frame2 = DataFrame({\"col1\": ser1,\n \"col2\": ser2,\n \"col5\": ser3})\n\n combined = frame1.combine_first(frame2)\n self.assertEqual(len(combined.columns), 5)\n\n # gh 3016 (same as in update)\n df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n\n other = DataFrame([[45,45]],index=[0],columns=['A','B'])\n result = df.combine_first(other)\n assert_frame_equal(result, df)\n\n df.ix[0,'A'] = np.nan\n result = df.combine_first(other)\n df.ix[0,'A'] = 45\n assert_frame_equal(result, df)\n\n # doc example\n df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],\n 'B' : [np.nan, 2., 3., np.nan, 6.]})\n\n df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],\n 'B' : [np.nan, np.nan, 3., 4., 6., 8.]})\n\n result = df1.combine_first(df2)\n expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })\n assert_frame_equal(result,expected)\n\n # GH3552, return object dtype with bools\n df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])\n df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])\n\n result = df1.combine_first(df2)[2]\n expected = Series([True,True,False])\n assert_series_equal(result,expected)\n\n # GH 3593, converting datetime64[ns] incorrecly\n df0 = DataFrame({\"a\":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})\n df1 = DataFrame({\"a\":[None, None, None]})\n df2 = df1.combine_first(df0)\n assert_frame_equal(df2,df0)\n\n df2 = df0.combine_first(df1)\n assert_frame_equal(df2,df0)\n\n df0 = DataFrame({\"a\":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})\n df1 = DataFrame({\"a\":[datetime(2000, 1, 2), None, None]})\n df2 = df1.combine_first(df0)\n result = df0.copy()\n result.iloc[0,:] = df1.iloc[0,:]\n assert_frame_equal(df2,result)\n\n df2 = df0.combine_first(df1)\n assert_frame_equal(df2,df0)\n\n def test_update(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other)\n\n expected = DataFrame([[1.5, nan, 3],\n [3.6, 2, 3],\n [1.5, nan, 3],\n [1.5, nan, 7.]])\n assert_frame_equal(df, expected)\n\n def test_update_dtypes(self):\n\n # gh 3016\n df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n\n other = DataFrame([[45,45]],index=[0],columns=['A','B'])\n df.update(other)\n\n expected = DataFrame([[45.,45.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n assert_frame_equal(df, expected)\n\n def test_update_nooverwrite(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other, overwrite=False)\n\n expected = DataFrame([[1.5, nan, 3],\n [1.5, 2, 3],\n [1.5, nan, 3],\n [1.5, nan, 3.]])\n assert_frame_equal(df, expected)\n\n def test_update_filtered(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other, filter_func=lambda x: x > 2)\n\n expected = DataFrame([[1.5, nan, 3],\n [1.5, nan, 3],\n [1.5, nan, 3],\n [1.5, nan, 7.]])\n assert_frame_equal(df, expected)\n\n def test_update_raise(self):\n df = DataFrame([[1.5, 1, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[2., nan],\n [nan, 7]], index=[1, 3], columns=[1, 2])\n with assertRaisesRegexp(ValueError, \"Data overlaps\"):\n df.update(other, raise_conflict=True)\n\n def test_update_from_non_df(self):\n d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}\n df = DataFrame(d)\n\n d['a'] = Series([5, 6, 7, 8])\n df.update(d)\n\n expected = DataFrame(d)\n\n assert_frame_equal(df, expected)\n\n d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}\n df = DataFrame(d)\n\n d['a'] = [5, 6, 7, 8]\n df.update(d)\n\n expected = DataFrame(d)\n\n assert_frame_equal(df, expected)\n\n def test_combineAdd(self):\n # trivial\n comb = self.frame.combineAdd(self.frame)\n assert_frame_equal(comb, self.frame * 2)\n\n # more rigorous\n a = DataFrame([[1., nan, nan, 2., nan]],\n columns=np.arange(5))\n b = DataFrame([[2., 3., nan, 2., 6., nan]],\n columns=np.arange(6))\n expected = DataFrame([[3., 3., nan, 4., 6., nan]],\n columns=np.arange(6))\n\n result = a.combineAdd(b)\n assert_frame_equal(result, expected)\n result2 = a.T.combineAdd(b.T)\n assert_frame_equal(result2, expected.T)\n\n expected2 = a.combine(b, operator.add, fill_value=0.)\n assert_frame_equal(expected, expected2)\n\n # corner cases\n comb = self.frame.combineAdd(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combineAdd(self.frame)\n assert_frame_equal(comb, self.frame)\n\n # integer corner case\n df1 = DataFrame({'x': [5]})\n df2 = DataFrame({'x': [1]})\n df3 = DataFrame({'x': [6]})\n comb = df1.combineAdd(df2)\n assert_frame_equal(comb, df3)\n\n # mixed type GH2191\n df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})\n df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})\n rs = df1.combineAdd(df2)\n xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})\n assert_frame_equal(xp, rs)\n\n # TODO: test integer fill corner?\n\n def test_combineMult(self):\n # trivial\n comb = self.frame.combineMult(self.frame)\n\n assert_frame_equal(comb, self.frame ** 2)\n\n # corner cases\n comb = self.frame.combineMult(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combineMult(self.frame)\n assert_frame_equal(comb, self.frame)\n\n def test_combine_generic(self):\n df1 = self.frame\n df2 = self.frame.ix[:-5, ['A', 'B', 'C']]\n\n combined = df1.combine(df2, np.add)\n combined2 = df2.combine(df1, np.add)\n self.assertTrue(combined['D'].isnull().all())\n self.assertTrue(combined2['D'].isnull().all())\n\n chunk = combined.ix[:-5, ['A', 'B', 'C']]\n chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]\n\n exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2\n assert_frame_equal(chunk, exp)\n assert_frame_equal(chunk2, exp)\n\n def test_clip(self):\n median = self.frame.median().median()\n\n capped = self.frame.clip_upper(median)\n self.assertFalse((capped.values > median).any())\n\n floored = self.frame.clip_lower(median)\n self.assertFalse((floored.values < median).any())\n\n double = self.frame.clip(upper=median, lower=median)\n self.assertFalse((double.values != median).any())\n\n def test_dataframe_clip(self):\n\n # GH #2747\n df = DataFrame(np.random.randn(1000,2))\n\n for lb, ub in [(-1,1),(1,-1)]:\n clipped_df = df.clip(lb, ub)\n\n lb, ub = min(lb,ub), max(ub,lb)\n lb_mask = df.values <= lb\n ub_mask = df.values >= ub\n mask = ~lb_mask & ~ub_mask\n self.assertTrue((clipped_df.values[lb_mask] == lb).all() == True)\n self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True)\n self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True)\n\n def test_get_X_columns(self):\n # numeric and object columns\n\n df = DataFrame({'a': [1, 2, 3],\n 'b' : [True, False, True],\n 'c': ['foo', 'bar', 'baz'],\n 'd': [None, None, None],\n 'e': [3.14, 0.577, 2.773]})\n\n self.assert_numpy_array_equal(df._get_numeric_data().columns,\n ['a', 'b', 'e'])\n\n def test_is_mixed_type(self):\n self.assertFalse(self.frame._is_mixed_type)\n self.assertTrue(self.mixed_frame._is_mixed_type)\n\n def test_get_numeric_data(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'f' : Timestamp('20010102')},\n index=np.arange(10))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',\n 'd' : np.array([1.]*10,dtype='float32'),\n 'e' : np.array([1]*10,dtype='int32'),\n 'f' : np.array([1]*10,dtype='int16'),\n 'g' : Timestamp('20010102')},\n index=np.arange(10))\n\n result = df._get_numeric_data()\n expected = df.ix[:, ['a', 'b','d','e','f']]\n assert_frame_equal(result, expected)\n\n only_obj = df.ix[:, ['c','g']]\n result = only_obj._get_numeric_data()\n expected = df.ix[:, []]\n assert_frame_equal(result, expected)\n\n df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]})\n result = df._get_numeric_data()\n expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]})\n assert_frame_equal(result, expected)\n\n df = result.copy()\n result = df._get_numeric_data()\n expected = df\n assert_frame_equal(result, expected)\n\n def test_bool_describe_in_mixed_frame(self):\n df = DataFrame({\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n })\n\n # Boolean data and integer data is included in .describe() output, string data isn't\n self.assert_numpy_array_equal(df.describe().columns, ['bool_data', 'int_data'])\n\n bool_describe = df.describe()['bool_data']\n\n # Both the min and the max values should stay booleans\n self.assertEqual(bool_describe['min'].dtype, np.bool_)\n self.assertEqual(bool_describe['max'].dtype, np.bool_)\n\n self.assertFalse(bool_describe['min'])\n self.assertTrue(bool_describe['max'])\n\n # For numeric operations, like mean or median, the values True/False are cast to\n # the integer values 1 and 0\n assert_almost_equal(bool_describe['mean'], 0.4)\n assert_almost_equal(bool_describe['50%'], 0)\n\n def test_reduce_mixed_frame(self):\n # GH 6806\n df = DataFrame({\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n })\n df.reindex(columns=['bool_data', 'int_data', 'string_data'])\n test = df.sum(axis=0)\n assert_almost_equal(test.values, [2, 150, 'abcde'])\n assert_series_equal(test, df.T.sum(axis=1))\n\n def test_count(self):\n f = lambda s: notnull(s).sum()\n self._check_stat_op('count', f,\n has_skipna=False,\n has_numeric_only=True,\n check_dtype=False,\n check_dates=True)\n\n # corner case\n frame = DataFrame()\n ct1 = frame.count(1)\n tm.assert_isinstance(ct1, Series)\n\n ct2 = frame.count(0)\n tm.assert_isinstance(ct2, Series)\n\n # GH #423\n df = DataFrame(index=lrange(10))\n result = df.count(1)\n expected = Series(0, index=df.index)\n assert_series_equal(result, expected)\n\n df = DataFrame(columns=lrange(10))\n result = df.count(0)\n expected = Series(0, index=df.columns)\n assert_series_equal(result, expected)\n\n df = DataFrame()\n result = df.count()\n expected = Series(0, index=[])\n assert_series_equal(result, expected)\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum, has_numeric_only=True)\n\n # mixed types (with upcasting happening)\n self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),\n has_numeric_only=True, check_dtype=False, check_less_precise=True)\n\n def test_stat_operators_attempt_obj_array(self):\n data = {\n 'a': [-0.00049987540199591344, -0.0016467257772919831,\n 0.00067695870775883013],\n 'b': [-0, -0, 0.0],\n 'c': [0.00031111847529610595, 0.0014902627951905339,\n -0.00094099200035979691]\n }\n df1 = DataFrame(data, index=['foo', 'bar', 'baz'],\n dtype='O')\n methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']\n\n # GH #676\n df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],\n 2: [np.nan, 4]}, dtype=object)\n\n for df in [df1, df2]:\n for meth in methods:\n self.assertEqual(df.values.dtype, np.object_)\n result = getattr(df, meth)(1)\n expected = getattr(df.astype('f8'), meth)(1)\n assert_series_equal(result, expected)\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean, check_dates=True)\n\n def test_product(self):\n self._check_stat_op('product', np.prod)\n\n def test_median(self):\n def wrapper(x):\n if isnull(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, check_dates=True)\n\n def test_min(self):\n self._check_stat_op('min', np.min, check_dates=True)\n self._check_stat_op('min', np.min, frame=self.intframe)\n\n def test_cummin(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cummin = self.tsframe.cummin()\n expected = self.tsframe.apply(Series.cummin)\n assert_frame_equal(cummin, expected)\n\n # axis = 1\n cummin = self.tsframe.cummin(axis=1)\n expected = self.tsframe.apply(Series.cummin, axis=1)\n assert_frame_equal(cummin, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummin()\n\n # fix issue\n cummin_xs = self.tsframe.cummin(axis=1)\n self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))\n\n def test_cummax(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cummax = self.tsframe.cummax()\n expected = self.tsframe.apply(Series.cummax)\n assert_frame_equal(cummax, expected)\n\n # axis = 1\n cummax = self.tsframe.cummax(axis=1)\n expected = self.tsframe.apply(Series.cummax, axis=1)\n assert_frame_equal(cummax, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummax()\n\n # fix issue\n cummax_xs = self.tsframe.cummax(axis=1)\n self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))\n\n def test_max(self):\n self._check_stat_op('max', np.max, check_dates=True)\n self._check_stat_op('max', np.max, frame=self.intframe)\n\n def test_mad(self):\n f = lambda x: np.abs(x - x.mean()).mean()\n self._check_stat_op('mad', f)\n\n def test_var_std(self):\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n result = self.tsframe.std(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4))\n assert_almost_equal(result, expected)\n\n result = self.tsframe.var(ddof=4)\n expected = self.tsframe.apply(lambda x: x.var(ddof=4))\n assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nanvar(arr, axis=0)\n self.assertFalse((result < 0).any())\n if nanops._USE_BOTTLENECK:\n nanops._USE_BOTTLENECK = False\n result = nanops.nanvar(arr, axis=0)\n self.assertFalse((result < 0).any())\n nanops._USE_BOTTLENECK = True\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.tsframe.sem(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x)))\n assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nansem(arr, axis=0)\n self.assertFalse((result < 0).any())\n if nanops._USE_BOTTLENECK:\n nanops._USE_BOTTLENECK = False\n result = nanops.nansem(arr, axis=0)\n self.assertFalse((result < 0).any())\n nanops._USE_BOTTLENECK = True\n\n def test_skew(self):\n tm._skip_if_no_scipy()\n from scipy.stats import skew\n\n def alt(x):\n if len(x) < 3:\n return np.nan\n return skew(x, bias=False)\n\n self._check_stat_op('skew', alt)\n\n def test_kurt(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import kurtosis\n\n def alt(x):\n if len(x) < 4:\n return np.nan\n return kurtosis(x, bias=False)\n\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n df = DataFrame(np.random.randn(6, 3), index=index)\n assert_series_equal(df.kurt(), df.kurt(level=0).xs('bar'))\n\n def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,\n has_numeric_only=False, check_dtype=True, check_dates=False,\n check_less_precise=False):\n if frame is None:\n frame = self.frame\n # set some NAs\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if check_dates:\n df = DataFrame({'b': date_range('1/1/2001', periods=2)})\n _f = getattr(df, name)\n result = _f()\n self.assertIsInstance(result, Series)\n\n df['a'] = lrange(len(df))\n result = getattr(df, name)()\n self.assertIsInstance(result, Series)\n self.assertTrue(len(result))\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna()\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n assert_series_equal(result0, frame.apply(wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n assert_series_equal(result0, frame.apply(skipna_wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise)\n\n # check dtypes\n if check_dtype:\n lcd_dtype = frame.values.dtype\n self.assertEqual(lcd_dtype, result0.dtype)\n self.assertEqual(lcd_dtype, result1.dtype)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)\n # make sure works on mixed-type frame\n getattr(self.mixed_frame, name)(axis=0)\n getattr(self.mixed_frame, name)(axis=1)\n\n if has_numeric_only:\n getattr(self.mixed_frame, name)(axis=0, numeric_only=True)\n getattr(self.mixed_frame, name)(axis=1, numeric_only=True)\n getattr(self.frame, name)(axis=0, numeric_only=False)\n getattr(self.frame, name)(axis=1, numeric_only=False)\n\n # all NA case\n if has_skipna:\n all_na = self.frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n self.assertTrue(np.isnan(r0).all())\n self.assertTrue(np.isnan(r1).all())\n\n def test_mode(self):\n df = pd.DataFrame({\"A\": [12, 12, 11, 12, 19, 11],\n \"B\": [10, 10, 10, np.nan, 3, 4],\n \"C\": [8, 8, 8, 9, 9, 9],\n \"D\": range(6),\n \"E\": [8, 8, 1, 1, 3, 3]})\n assert_frame_equal(df[[\"A\"]].mode(),\n pd.DataFrame({\"A\": [12]}))\n assert_frame_equal(df[[\"D\"]].mode(),\n pd.DataFrame(pd.Series([], dtype=\"int64\"),\n columns=[\"D\"]))\n assert_frame_equal(df[[\"E\"]].mode(),\n pd.DataFrame(pd.Series([1, 3, 8], dtype=\"int64\"),\n columns=[\"E\"]))\n assert_frame_equal(df[[\"A\", \"B\"]].mode(),\n pd.DataFrame({\"A\": [12], \"B\": [10.]}))\n assert_frame_equal(df.mode(),\n pd.DataFrame({\"A\": [12, np.nan, np.nan],\n \"B\": [10, np.nan, np.nan],\n \"C\": [8, 9, np.nan],\n \"D\": [np.nan, np.nan, np.nan],\n \"E\": [1, 3, 8]}))\n\n # outputs in sorted order\n df[\"C\"] = list(reversed(df[\"C\"]))\n com.pprint_thing(df[\"C\"])\n com.pprint_thing(df[\"C\"].mode())\n a, b = (df[[\"A\", \"B\", \"C\"]].mode(),\n pd.DataFrame({\"A\": [12, np.nan],\n \"B\": [10, np.nan],\n \"C\": [8, 9]}))\n com.pprint_thing(a)\n com.pprint_thing(b)\n assert_frame_equal(a, b)\n # should work with heterogeneous types\n df = pd.DataFrame({\"A\": range(6),\n \"B\": pd.date_range('2011', periods=6),\n \"C\": list('abcdef')})\n exp = pd.DataFrame({\"A\": pd.Series([], dtype=df[\"A\"].dtype),\n \"B\": pd.Series([], dtype=df[\"B\"].dtype),\n \"C\": pd.Series([], dtype=df[\"C\"].dtype)})\n assert_frame_equal(df.mode(), exp)\n\n # and also when not empty\n df.loc[1, \"A\"] = 0\n df.loc[4, \"B\"] = df.loc[3, \"B\"]\n df.loc[5, \"C\"] = 'e'\n exp = pd.DataFrame({\"A\": pd.Series([0], dtype=df[\"A\"].dtype),\n \"B\": pd.Series([df.loc[3, \"B\"]], dtype=df[\"B\"].dtype),\n \"C\": pd.Series(['e'], dtype=df[\"C\"].dtype)})\n\n assert_frame_equal(df.mode(), exp)\n\n def test_sum_corner(self):\n axis0 = self.empty.sum(0)\n axis1 = self.empty.sum(1)\n tm.assert_isinstance(axis0, Series)\n tm.assert_isinstance(axis1, Series)\n self.assertEqual(len(axis0), 0)\n self.assertEqual(len(axis1), 0)\n\n def test_sum_object(self):\n values = self.frame.values.astype(int)\n frame = DataFrame(values, index=self.frame.index,\n columns=self.frame.columns)\n deltas = frame * timedelta(1)\n deltas.sum()\n\n def test_sum_bool(self):\n # ensure this works, bug report\n bools = np.isnan(self.frame)\n bools.sum(1)\n bools.sum(0)\n\n def test_mean_corner(self):\n # unit test when have object data\n the_mean = self.mixed_frame.mean(axis=0)\n the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)\n self.assertTrue(the_sum.index.equals(the_mean.index))\n self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))\n\n # xs sum mixed type, just want to know it works...\n the_mean = self.mixed_frame.mean(axis=1)\n the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)\n self.assertTrue(the_sum.index.equals(the_mean.index))\n\n # take mean of boolean column\n self.frame['bool'] = self.frame['A'] > 0\n means = self.frame.mean(0)\n self.assertEqual(means['bool'], self.frame['bool'].values.mean())\n\n def test_stats_mixed_type(self):\n # don't blow up\n self.mixed_frame.std(1)\n self.mixed_frame.var(1)\n self.mixed_frame.mean(1)\n self.mixed_frame.skew(1)\n\n def test_median_corner(self):\n def wrapper(x):\n if isnull(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, frame=self.intframe,\n check_dtype=False, check_dates=True)\n\n def test_quantile(self):\n from numpy import percentile\n\n q = self.tsframe.quantile(0.1, axis=0)\n self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))\n q = self.tsframe.quantile(0.9, axis=1)\n q = self.intframe.quantile(0.1)\n self.assertEqual(q['A'], percentile(self.intframe['A'], 10))\n\n # test degenerate case\n q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)\n assert(np.isnan(q['x']) and np.isnan(q['y']))\n\n # non-numeric exclusion\n df = DataFrame({'col1':['A','A','B','B'], 'col2':[1,2,3,4]})\n rs = df.quantile(0.5)\n xp = df.median()\n assert_series_equal(rs, xp)\n\n # axis\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]}, index=[1, 2, 3])\n result = df.quantile(.5, axis=1)\n expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n result = df.quantile([.5, .75], axis=1)\n expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],\n 3: [3.5, 3.75]}, index=[\"0.5\", \"0.75\"])\n assert_frame_equal(result, expected)\n\n # We may want to break API in the future to change this\n # so that we exclude non-numeric along the same axis\n # See GH #7312\n df = DataFrame([[1, 2, 3],\n ['a', 'b', 4]])\n result = df.quantile(.5, axis=1)\n expected = Series([3., 4.], index=[0, 1])\n assert_series_equal(result, expected)\n\n def test_quantile_multi(self):\n df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n columns=['a', 'b', 'c'])\n result = df.quantile([.25, .5])\n expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],\n index=[.25, .5], columns=['a', 'b', 'c'])\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.quantile([.25, .5], axis=1)\n expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],\n index=[.25, .5], columns=[0, 1, 2])\n\n # empty\n result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)\n expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},\n index=[.1, .9])\n assert_frame_equal(result, expected)\n\n def test_quantile_datetime(self):\n df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})\n\n # exclude datetime\n result = df.quantile(.5)\n expected = Series([2.5], index=['b'])\n\n # datetime\n result = df.quantile(.5, numeric_only=False)\n expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],\n index=['a', 'b'])\n assert_series_equal(result, expected)\n\n # datetime w/ multi\n result = df.quantile([.5], numeric_only=False)\n expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],\n index=[.5], columns=['a', 'b'])\n assert_frame_equal(result, expected)\n\n # axis = 1\n df['c'] = pd.to_datetime(['2011', '2012'])\n result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)\n expected = Series([Timestamp('2010-07-02 12:00:00'),\n Timestamp('2011-07-02 12:00:00')],\n index=[0, 1])\n assert_series_equal(result, expected)\n\n result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)\n expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),\n Timestamp('2011-07-02 12:00:00')]],\n index=[0.5], columns=[0, 1])\n assert_frame_equal(result, expected)\n\n def test_cumsum(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cumsum = self.tsframe.cumsum()\n expected = self.tsframe.apply(Series.cumsum)\n assert_frame_equal(cumsum, expected)\n\n # axis = 1\n cumsum = self.tsframe.cumsum(axis=1)\n expected = self.tsframe.apply(Series.cumsum, axis=1)\n assert_frame_equal(cumsum, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cumsum()\n\n # fix issue\n cumsum_xs = self.tsframe.cumsum(axis=1)\n self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))\n\n def test_cumprod(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cumprod = self.tsframe.cumprod()\n expected = self.tsframe.apply(Series.cumprod)\n assert_frame_equal(cumprod, expected)\n\n # axis = 1\n cumprod = self.tsframe.cumprod(axis=1)\n expected = self.tsframe.apply(Series.cumprod, axis=1)\n assert_frame_equal(cumprod, expected)\n\n # fix issue\n cumprod_xs = self.tsframe.cumprod(axis=1)\n self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))\n\n # ints\n df = self.tsframe.fillna(0).astype(int)\n df.cumprod(0)\n df.cumprod(1)\n\n # ints32\n df = self.tsframe.fillna(0).astype(np.int32)\n df.cumprod(0)\n df.cumprod(1)\n\n def test_rank(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n ranks0 = self.frame.rank()\n ranks1 = self.frame.rank(1)\n mask = np.isnan(self.frame.values)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp0[mask] = np.nan\n\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n exp1[mask] = np.nan\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # integers\n df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))\n\n result = df.rank()\n exp = df.astype(float).rank()\n assert_frame_equal(result, exp)\n\n result = df.rank(1)\n exp = df.astype(float).rank(1)\n assert_frame_equal(result, exp)\n\n def test_rank2(self):\n from datetime import datetime\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0\n result = df.rank(1, pct=True)\n assert_frame_equal(result, expected)\n\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = df.rank(0) / 2.0\n result = df.rank(0, pct=True)\n assert_frame_equal(result, expected)\n\n\n\n df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n\n expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])\n result = df.rank(0, numeric_only=False)\n assert_frame_equal(result, expected)\n\n df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])\n result = df.rank(0, numeric_only=False)\n assert_frame_equal(result, expected)\n\n # f7u12, this does not work without extensive workaround\n data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],\n [datetime(2000, 1, 2), datetime(2000, 1, 3),\n datetime(2000, 1, 1)]]\n df = DataFrame(data)\n\n # check the rank\n expected = DataFrame([[2., nan, 1.],\n [2., 3., 1.]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n # mixed-type frames\n self.mixed_frame['datetime'] = datetime.now()\n self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)\n\n result = self.mixed_frame.rank(1)\n expected = self.mixed_frame.rank(1, numeric_only=True)\n assert_frame_equal(result, expected)\n\n df = DataFrame({\"a\":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]})\n exp = DataFrame({\"a\":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]})\n assert_frame_equal(df.rank(), exp)\n\n def test_rank_na_option(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n # bottom\n ranks0 = self.frame.rank(na_option='bottom')\n ranks1 = self.frame.rank(1, na_option='bottom')\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # top\n ranks0 = self.frame.rank(na_option='top')\n ranks1 = self.frame.rank(1, na_option='top')\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fval0)\n exp1 = np.apply_along_axis(rankdata, 1, fval1)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # bottom\n ranks0 = self.frame.rank(na_option='top', ascending=False)\n ranks1 = self.frame.rank(1, na_option='top', ascending=False)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fvals)\n exp1 = np.apply_along_axis(rankdata, 1, -fvals)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # top\n ranks0 = self.frame.rank(na_option='bottom', ascending=False)\n ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fval0)\n exp1 = np.apply_along_axis(rankdata, 1, -fval1)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n def test_axis_aliases(self):\n\n f = self.frame\n\n # reg name\n expected = f.sum(axis=0)\n result = f.sum(axis='index')\n assert_series_equal(result, expected)\n\n expected = f.sum(axis=1)\n result = f.sum(axis='columns')\n assert_series_equal(result, expected)\n\n def test_combine_first_mixed(self):\n a = Series(['a', 'b'], index=lrange(2))\n b = Series(lrange(2), index=lrange(2))\n f = DataFrame({'A': a, 'B': b})\n\n a = Series(['a', 'b'], index=lrange(5, 7))\n b = Series(lrange(2), index=lrange(5, 7))\n g = DataFrame({'A': a, 'B': b})\n\n combined = f.combine_first(g)\n\n def test_more_asMatrix(self):\n values = self.mixed_frame.as_matrix()\n self.assertEqual(values.shape[1], len(self.mixed_frame.columns))\n\n def test_reindex_boolean(self):\n frame = DataFrame(np.ones((10, 2), dtype=bool),\n index=np.arange(0, 20, 2),\n columns=[0, 2])\n\n reindexed = frame.reindex(np.arange(10))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[0][1]))\n\n reindexed = frame.reindex(columns=lrange(3))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[1]).all())\n\n def test_reindex_objects(self):\n reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])\n self.assertIn('foo', reindexed)\n\n reindexed = self.mixed_frame.reindex(columns=['A', 'B'])\n self.assertNotIn('foo', reindexed)\n\n def test_reindex_corner(self):\n index = Index(['a', 'b', 'c'])\n dm = self.empty.reindex(index=[1, 2, 3])\n reindexed = dm.reindex(columns=index)\n self.assertTrue(reindexed.columns.equals(index))\n\n # ints are weird\n\n smaller = self.intframe.reindex(columns=['A', 'B', 'E'])\n self.assertEqual(smaller['E'].dtype, np.float64)\n\n def test_reindex_axis(self):\n cols = ['A', 'B', 'E']\n reindexed1 = self.intframe.reindex_axis(cols, axis=1)\n reindexed2 = self.intframe.reindex(columns=cols)\n assert_frame_equal(reindexed1, reindexed2)\n\n rows = self.intframe.index[0:5]\n reindexed1 = self.intframe.reindex_axis(rows, axis=0)\n reindexed2 = self.intframe.reindex(index=rows)\n assert_frame_equal(reindexed1, reindexed2)\n\n self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)\n\n # no-op case\n cols = self.frame.columns.copy()\n newFrame = self.frame.reindex_axis(cols, axis=1)\n assert_frame_equal(newFrame, self.frame)\n\n def test_reindex_with_nans(self):\n df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],\n columns=['a', 'b'],\n index=[100.0, 101.0, np.nan, 102.0, 103.0])\n\n result = df.reindex(index=[101.0, 102.0, 103.0])\n expected = df.iloc[[1, 3, 4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[103.0])\n expected = df.iloc[[4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[101.0])\n expected = df.iloc[[1]]\n assert_frame_equal(result, expected)\n\n def test_reindex_multi(self):\n df = DataFrame(np.random.randn(3, 3))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(2), lrange(2))\n expected = df.reindex(lrange(2)).reindex(columns=lrange(2))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])\n\n result = df.reindex(index=[0, 1], columns=['a', 'b'])\n expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])\n\n assert_frame_equal(result, expected)\n\n def test_rename_objects(self):\n renamed = self.mixed_frame.rename(columns=str.upper)\n self.assertIn('FOO', renamed)\n self.assertNotIn('foo', renamed)\n\n def test_fill_corner(self):\n self.mixed_frame.ix[5:20,'foo'] = nan\n self.mixed_frame.ix[-10:,'A'] = nan\n\n filled = self.mixed_frame.fillna(value=0)\n self.assertTrue((filled.ix[5:20,'foo'] == 0).all())\n del self.mixed_frame['foo']\n\n empty_float = self.frame.reindex(columns=[])\n result = empty_float.fillna(value=0)\n\n def test_count_objects(self):\n dm = DataFrame(self.mixed_frame._series)\n df = DataFrame(self.mixed_frame._series)\n\n tm.assert_series_equal(dm.count(), df.count())\n tm.assert_series_equal(dm.count(1), df.count(1))\n\n def test_cumsum_corner(self):\n dm = DataFrame(np.arange(20).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n result = dm.cumsum()\n\n #----------------------------------------------------------------------\n # Stacking / unstacking\n\n def test_stack_unstack(self):\n stacked = self.frame.stack()\n stacked_df = DataFrame({'foo': stacked, 'bar': stacked})\n\n unstacked = stacked.unstack()\n unstacked_df = stacked_df.unstack()\n\n assert_frame_equal(unstacked, self.frame)\n assert_frame_equal(unstacked_df['bar'], self.frame)\n\n unstacked_cols = stacked.unstack(0)\n unstacked_cols_df = stacked_df.unstack(0)\n assert_frame_equal(unstacked_cols.T, self.frame)\n assert_frame_equal(unstacked_cols_df['bar'].T, self.frame)\n\n def test_stack_ints(self):\n df = DataFrame(\n np.random.randn(30, 27),\n columns=MultiIndex.from_tuples(\n list(itertools.product(range(3), repeat=3))\n )\n )\n assert_frame_equal(\n df.stack(level=[1, 2]),\n df.stack(level=1).stack(level=1)\n )\n assert_frame_equal(\n df.stack(level=[-2, -1]),\n df.stack(level=1).stack(level=1)\n )\n\n df_named = df.copy()\n df_named.columns.set_names(range(3), inplace=True)\n assert_frame_equal(\n df_named.stack(level=[1, 2]),\n df_named.stack(level=1).stack(level=1)\n )\n\n def test_stack_mixed_levels(self):\n columns = MultiIndex.from_tuples(\n [('A', 'cat', 'long'), ('B', 'cat', 'long'),\n ('A', 'dog', 'short'), ('B', 'dog', 'short')],\n names=['exp', 'animal', 'hair_length']\n )\n df = DataFrame(randn(4, 4), columns=columns)\n\n animal_hair_stacked = df.stack(level=['animal', 'hair_length'])\n exp_hair_stacked = df.stack(level=['exp', 'hair_length'])\n\n # GH #8584: Need to check that stacking works when a number\n # is passed that is both a level name and in the range of\n # the level numbers\n df2 = df.copy()\n df2.columns.names = ['exp', 'animal', 1]\n assert_frame_equal(df2.stack(level=['animal', 1]),\n animal_hair_stacked, check_names=False)\n assert_frame_equal(df2.stack(level=['exp', 1]),\n exp_hair_stacked, check_names=False)\n\n # When mixed types are passed and the ints are not level\n # names, raise\n self.assertRaises(ValueError, df2.stack, level=['animal', 0])\n\n # GH #8584: Having 0 in the level names could raise a\n # strange error about lexsort depth\n df3 = df.copy()\n df3.columns.names = ['exp', 'animal', 0]\n assert_frame_equal(df3.stack(level=['animal', 0]),\n animal_hair_stacked, check_names=False)\n\n def test_stack_int_level_names(self):\n columns = MultiIndex.from_tuples(\n [('A', 'cat', 'long'), ('B', 'cat', 'long'),\n ('A', 'dog', 'short'), ('B', 'dog', 'short')],\n names=['exp', 'animal', 'hair_length']\n )\n df = DataFrame(randn(4, 4), columns=columns)\n\n exp_animal_stacked = df.stack(level=['exp', 'animal'])\n animal_hair_stacked = df.stack(level=['animal', 'hair_length'])\n exp_hair_stacked = df.stack(level=['exp', 'hair_length'])\n\n df2 = df.copy()\n df2.columns.names = [0, 1, 2]\n assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,\n check_names=False )\n assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,\n check_names=False)\n assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,\n check_names=False)\n\n # Out-of-order int column names\n df3 = df.copy()\n df3.columns.names = [2, 0, 1]\n assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,\n check_names=False)\n assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,\n check_names=False)\n assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,\n check_names=False)\n\n\n def test_unstack_bool(self):\n df = DataFrame([False, False],\n index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),\n columns=['col'])\n rs = df.unstack()\n xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],\n dtype=object),\n index=['a', 'b'],\n columns=MultiIndex.from_arrays([['col', 'col'],\n ['c', 'l']]))\n assert_frame_equal(rs, xp)\n\n def test_unstack_to_series(self):\n # check reversibility\n data = self.frame.unstack()\n\n self.assertTrue(isinstance(data, Series))\n undo = data.unstack().T\n assert_frame_equal(undo, self.frame)\n\n # check NA handling\n data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})\n data.index = Index(['a', 'b', 'c'])\n result = data.unstack()\n\n midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],\n labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])\n expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)\n\n assert_series_equal(result, expected)\n\n # check composability of unstack\n old_data = data.copy()\n for _ in range(4):\n data = data.unstack()\n assert_frame_equal(old_data, data)\n\n def test_unstack_dtypes(self):\n\n # GH 2929\n rows = [[1, 1, 3, 4],\n [1, 2, 3, 4],\n [2, 1, 3, 4],\n [2, 2, 3, 4]]\n\n df = DataFrame(rows, columns=list('ABCD'))\n result = df.get_dtype_counts()\n expected = Series({'int64' : 4})\n assert_series_equal(result, expected)\n\n # single dtype\n df2 = df.set_index(['A','B'])\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'int64' : 4})\n assert_series_equal(result, expected)\n\n # mixed\n df2 = df.set_index(['A','B'])\n df2['C'] = 3.\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'int64' : 2, 'float64' : 2})\n assert_series_equal(result, expected)\n\n df2['D'] = 'foo'\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'float64' : 2, 'object' : 2})\n assert_series_equal(result, expected)\n\n # GH7405\n for c, d in (np.zeros(5), np.zeros(5)), \\\n (np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):\n\n df = DataFrame({'A': ['a']*5, 'C':c, 'D':d,\n 'B':pd.date_range('2012-01-01', periods=5)})\n\n right = df.iloc[:3].copy(deep=True)\n\n df = df.set_index(['A', 'B'])\n df['D'] = df['D'].astype('int64')\n\n left = df.iloc[:3].unstack(0)\n right = right.set_index(['A', 'B']).unstack(0)\n right[('D', 'a')] = right[('D', 'a')].astype('int64')\n\n self.assertEqual(left.shape, (3, 2))\n tm.assert_frame_equal(left, right)\n\n def test_unstack_non_unique_index_names(self):\n idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],\n names=['c1', 'c1'])\n df = DataFrame([1, 2], index=idx)\n with tm.assertRaises(ValueError):\n df.unstack('c1')\n\n with tm.assertRaises(ValueError):\n df.T.stack('c1')\n\n def test_unstack_nan_index(self): # GH7466\n cast = lambda val: '{0:1}'.format('' if val != val else val)\n nan = np.nan\n\n def verify(df):\n mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]\n rows, cols = df.notnull().values.nonzero()\n for i, j in zip(rows, cols):\n left = sorted(df.iloc[i, j].split('.'))\n right = mk_list(df.index[i]) + mk_list(df.columns[j])\n right = sorted(list(map(cast, right)))\n self.assertEqual(left, right)\n\n df = DataFrame({'jim':['a', 'b', nan, 'd'],\n 'joe':['w', 'x', 'y', 'z'],\n 'jolie':['a.w', 'b.x', ' .y', 'd.z']})\n\n left = df.set_index(['jim', 'joe']).unstack()['jolie']\n right = df.set_index(['joe', 'jim']).unstack()['jolie'].T\n assert_frame_equal(left, right)\n\n for idx in permutations(df.columns[:2]):\n mi = df.set_index(list(idx))\n for lev in range(2):\n udf = mi.unstack(level=lev)\n self.assertEqual(udf.notnull().values.sum(), len(df))\n verify(udf['jolie'])\n\n df = DataFrame({'1st':['d'] * 3 + [nan] * 5 + ['a'] * 2 +\n ['c'] * 3 + ['e'] * 2 + ['b'] * 5,\n '2nd':['y'] * 2 + ['w'] * 3 + [nan] * 3 +\n ['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,\n '3rd':[67,39,53,72,57,80,31,18,11,30,59,\n 50,62,59,76,52,14,53,60,51]})\n\n df['4th'], df['5th'] = \\\n df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \\\n df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)\n\n for idx in permutations(['1st', '2nd', '3rd']):\n mi = df.set_index(list(idx))\n for lev in range(3):\n udf = mi.unstack(level=lev)\n self.assertEqual(udf.notnull().values.sum(), 2 * len(df))\n for col in ['4th', '5th']:\n verify(udf[col])\n\n # GH7403\n df = pd.DataFrame({'A': list('aaaabbbb'),'B':range(8), 'C':range(8)})\n df.iloc[3, 1] = np.NaN\n left = df.set_index(['A', 'B']).unstack(0)\n\n vals = [[3, 0, 1, 2, nan, nan, nan, nan],\n [nan, nan, nan, nan, 4, 5, 6, 7]]\n vals = list(map(list, zip(*vals)))\n idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')\n cols = MultiIndex(levels=[['C'], ['a', 'b']],\n labels=[[0, 0], [0, 1]],\n names=[None, 'A'])\n\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n df = DataFrame({'A': list('aaaabbbb'), 'B':list(range(4))*2,\n 'C':range(8)})\n df.iloc[2,1] = np.NaN\n left = df.set_index(['A', 'B']).unstack(0)\n\n vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]\n cols = MultiIndex(levels=[['C'], ['a', 'b']],\n labels=[[0, 0], [0, 1]],\n names=[None, 'A'])\n idx = Index([nan, 0, 1, 2, 3], name='B')\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n df = pd.DataFrame({'A': list('aaaabbbb'),'B':list(range(4))*2,\n 'C':range(8)})\n df.iloc[3,1] = np.NaN\n left = df.set_index(['A', 'B']).unstack(0)\n\n vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]\n cols = MultiIndex(levels=[['C'], ['a', 'b']],\n labels=[[0, 0], [0, 1]],\n names=[None, 'A'])\n idx = Index([nan, 0, 1, 2, 3], name='B')\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n # GH7401\n df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C':np.arange(10),\n 'B':date_range('2012-01-01', periods=5).tolist()*2 })\n\n df.iloc[3,1] = np.NaN\n left = df.set_index(['A', 'B']).unstack()\n\n vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])\n idx = Index(['a', 'b'], name='A')\n cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],\n labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],\n names=[None, 'B'])\n\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n # GH4862\n vals = [['Hg', nan, nan, 680585148],\n ['U', 0.0, nan, 680585148],\n ['Pb', 7.07e-06, nan, 680585148],\n ['Sn', 2.3614e-05, 0.0133, 680607017],\n ['Ag', 0.0, 0.0133, 680607017],\n ['Hg', -0.00015, 0.0133, 680607017]]\n df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],\n index=[17263, 17264, 17265, 17266, 17267, 17268])\n\n left = df.copy().set_index(['s_id','dosage','agent']).unstack()\n\n vals = [[nan, nan, 7.07e-06, nan, 0.0],\n [0.0, -0.00015, nan, 2.3614e-05, nan]]\n\n idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],\n labels=[[0, 1], [-1, 0]],\n names=['s_id', 'dosage'])\n\n cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],\n labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],\n names=[None, 'agent'])\n\n right = DataFrame(vals, columns=cols, index=idx)\n assert_frame_equal(left, right)\n\n left = df.ix[17264:].copy().set_index(['s_id','dosage','agent'])\n assert_frame_equal(left.unstack(), right)\n\n def test_stack_datetime_column_multiIndex(self):\n # GH 8039\n t = datetime(2014, 1, 1)\n df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))\n result = df.stack()\n\n eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])\n ecols = MultiIndex.from_tuples([(t, 'A')])\n expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)\n assert_frame_equal(result, expected)\n\n def test_stack_partial_multiIndex(self):\n # GH 8844\n def _test_stack_with_multiindex(multiindex):\n df = DataFrame(np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),\n columns=multiindex)\n for level in (-1, 0, 1, [0, 1], [1, 0]):\n result = df.stack(level=level, dropna=False)\n\n if isinstance(level, int):\n # Stacking a single level should not make any all-NaN rows,\n # so df.stack(level=level, dropna=False) should be the same\n # as df.stack(level=level, dropna=True).\n expected = df.stack(level=level, dropna=True)\n if isinstance(expected, Series):\n assert_series_equal(result, expected)\n else:\n assert_frame_equal(result, expected)\n\n df.columns = MultiIndex.from_tuples(df.columns.get_values(),\n names=df.columns.names)\n expected = df.stack(level=level, dropna=False)\n if isinstance(expected, Series):\n assert_series_equal(result, expected)\n else:\n assert_frame_equal(result, expected)\n\n full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),\n ('A', 'y'),\n ('C', 'x'), ('C', 'u')],\n names=['Upper', 'Lower'])\n for multiindex_columns in ([0, 1, 2, 3, 4],\n [0, 1, 2, 3], [0, 1, 2, 4],\n [0, 1, 2], [1, 2, 3], [2, 3, 4],\n [0, 1], [0, 2], [0, 3],\n [0], [2], [4]):\n _test_stack_with_multiindex(full_multiindex[multiindex_columns])\n if len(multiindex_columns) > 1:\n multiindex_columns.reverse()\n _test_stack_with_multiindex(full_multiindex[multiindex_columns])\n\n df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])\n result = df.stack(dropna=False)\n expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],\n index=MultiIndex(levels=[[0, 1], ['u', 'x', 'y', 'z']],\n labels=[[0, 0, 1, 1], [1, 3, 1, 3]],\n names=[None, 'Lower']),\n columns=Index(['B', 'C'], name='Upper'),\n dtype=df.dtypes[0])\n assert_frame_equal(result, expected)\n\n def test_repr_with_mi_nat(self):\n df = DataFrame({'X': [1, 2]},\n index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])\n res = repr(df)\n exp = ' X\\nNaT a 1\\n2013-01-01 b 2'\n nose.tools.assert_equal(res, exp)\n\n def test_reset_index(self):\n stacked = self.frame.stack()[::2]\n stacked = DataFrame({'foo': stacked, 'bar': stacked})\n\n names = ['first', 'second']\n stacked.index.names = names\n deleveled = stacked.reset_index()\n for i, (lev, lab) in enumerate(zip(stacked.index.levels,\n stacked.index.labels)):\n values = lev.take(lab)\n name = names[i]\n assert_almost_equal(values, deleveled[name])\n\n stacked.index.names = [None, None]\n deleveled2 = stacked.reset_index()\n self.assert_numpy_array_equal(deleveled['first'],\n deleveled2['level_0'])\n self.assert_numpy_array_equal(deleveled['second'],\n deleveled2['level_1'])\n\n # default name assigned\n rdf = self.frame.reset_index()\n self.assert_numpy_array_equal(rdf['index'], self.frame.index.values)\n\n # default name assigned, corner case\n df = self.frame.copy()\n df['index'] = 'foo'\n rdf = df.reset_index()\n self.assert_numpy_array_equal(rdf['level_0'], self.frame.index.values)\n\n # but this is ok\n self.frame.index.name = 'index'\n deleveled = self.frame.reset_index()\n self.assert_numpy_array_equal(deleveled['index'],\n self.frame.index.values)\n self.assert_numpy_array_equal(deleveled.index,\n np.arange(len(deleveled)))\n\n # preserve column names\n self.frame.columns.name = 'columns'\n resetted = self.frame.reset_index()\n self.assertEqual(resetted.columns.name, 'columns')\n\n # only remove certain columns\n frame = self.frame.reset_index().set_index(['index', 'A', 'B'])\n rs = frame.reset_index(['A', 'B'])\n\n assert_frame_equal(rs, self.frame, check_names=False) # TODO should reset_index check_names ?\n\n rs = frame.reset_index(['index', 'A', 'B'])\n assert_frame_equal(rs, self.frame.reset_index(), check_names=False)\n\n rs = frame.reset_index(['index', 'A', 'B'])\n assert_frame_equal(rs, self.frame.reset_index(), check_names=False)\n\n rs = frame.reset_index('A')\n xp = self.frame.reset_index().set_index(['index', 'B'])\n assert_frame_equal(rs, xp, check_names=False)\n\n # test resetting in place\n df = self.frame.copy()\n resetted = self.frame.reset_index()\n df.reset_index(inplace=True)\n assert_frame_equal(df, resetted, check_names=False)\n\n frame = self.frame.reset_index().set_index(['index', 'A', 'B'])\n rs = frame.reset_index('A', drop=True)\n xp = self.frame.copy()\n del xp['A']\n xp = xp.set_index(['B'], append=True)\n assert_frame_equal(rs, xp, check_names=False)\n\n def test_reset_index_right_dtype(self):\n time = np.arange(0.0, 10, np.sqrt(2) / 2)\n s1 = Series((9.81 * time ** 2) / 2,\n index=Index(time, name='time'),\n name='speed')\n df = DataFrame(s1)\n\n resetted = s1.reset_index()\n self.assertEqual(resetted['time'].dtype, np.float64)\n\n resetted = df.reset_index()\n self.assertEqual(resetted['time'].dtype, np.float64)\n\n def test_reset_index_multiindex_col(self):\n vals = np.random.randn(3, 3).astype(object)\n idx = ['x', 'y', 'z']\n full = np.hstack(([[x] for x in idx], vals))\n df = DataFrame(vals, Index(idx, name='a'),\n columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])\n rs = df.reset_index()\n xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],\n ['', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_fill=None)\n xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_level=1, col_fill='blah')\n xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n df = DataFrame(vals,\n MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],\n names=['d', 'a']),\n columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])\n rs = df.reset_index('a', )\n xp = DataFrame(full, Index([0, 1, 2], name='d'),\n columns=[['a', 'b', 'b', 'c'],\n ['', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index('a', col_fill=None)\n xp = DataFrame(full, Index(lrange(3), name='d'),\n columns=[['a', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index('a', col_fill='blah', col_level=1)\n xp = DataFrame(full, Index(lrange(3), name='d'),\n columns=[['blah', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n def test_reset_index_with_datetimeindex_cols(self):\n # GH5818\n #\n df = pd.DataFrame([[1, 2], [3, 4]],\n columns=pd.date_range('1/1/2013', '1/2/2013'),\n index=['A', 'B'])\n\n result = df.reset_index()\n expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],\n columns=['index', datetime(2013, 1, 1),\n datetime(2013, 1, 2)])\n assert_frame_equal(result, expected)\n\n #----------------------------------------------------------------------\n # Tests to cope with refactored internals\n def test_as_matrix_numeric_cols(self):\n self.frame['foo'] = 'bar'\n\n values = self.frame.as_matrix(['A', 'B', 'C', 'D'])\n self.assertEqual(values.dtype, np.float64)\n\n def test_as_matrix_lcd(self):\n\n # mixed lcd\n values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])\n self.assertEqual(values.dtype, np.float64)\n\n values = self.mixed_float.as_matrix(['A', 'B', 'C' ])\n self.assertEqual(values.dtype, np.float32)\n\n values = self.mixed_float.as_matrix(['C'])\n self.assertEqual(values.dtype, np.float16)\n\n values = self.mixed_int.as_matrix(['A','B','C','D'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A','D'])\n self.assertEqual(values.dtype, np.int64)\n\n # guess all ints are cast to uints....\n values = self.mixed_int.as_matrix(['A','B','C'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A','C'])\n self.assertEqual(values.dtype, np.int32)\n\n values = self.mixed_int.as_matrix(['C','D'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A'])\n self.assertEqual(values.dtype, np.int32)\n\n values = self.mixed_int.as_matrix(['C'])\n self.assertEqual(values.dtype, np.uint8)\n\n def test_constructor_with_convert(self):\n # this is actually mostly a test of lib.maybe_convert_objects\n # #2845\n df = DataFrame({'A' : [2**63-1] })\n result = df['A']\n expected = Series(np.asarray([2**63-1], np.int64))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2**63] })\n result = df['A']\n expected = Series(np.asarray([2**63], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [datetime(2005, 1, 1), True] })\n result = df['A']\n expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [None, 1] })\n result = df['A']\n expected = Series(np.asarray([np.nan, 1], np.float_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0, 2] })\n result = df['A']\n expected = Series(np.asarray([1.0, 2], np.float_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, 3] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, 3], np.complex_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, 3.0] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, 3.0], np.complex_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, True] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, True], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0, None] })\n result = df['A']\n expected = Series(np.asarray([1.0, np.nan], np.float_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, None] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, np.nan], np.complex_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2.0, 1, True, None] })\n result = df['A']\n expected = Series(np.asarray([2.0, 1, True, None], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2.0, 1, datetime(2006, 1, 1), None] })\n result = df['A']\n expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),\n None], np.object_))\n assert_series_equal(result, expected)\n\n def test_construction_with_mixed(self):\n # test construction edge cases with mixed types\n\n # f7u12, this does not work without extensive workaround\n data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],\n [datetime(2000, 1, 2), datetime(2000, 1, 3),\n datetime(2000, 1, 1)]]\n df = DataFrame(data)\n\n # check dtypes\n result = df.get_dtype_counts().order()\n expected = Series({ 'datetime64[ns]' : 3 })\n\n # mixed-type frames\n self.mixed_frame['datetime'] = datetime.now()\n self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)\n self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')\n self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')\n result = self.mixed_frame.get_dtype_counts().order()\n expected = Series({ 'float64' : 4,\n 'object' : 1,\n 'datetime64[ns]' : 1,\n 'timedelta64[ns]' : 1}).order()\n assert_series_equal(result,expected)\n\n def test_construction_with_conversions(self):\n\n # convert from a numpy array of non-ns timedelta64\n arr = np.array([1,2,3],dtype='timedelta64[s]')\n s = Series(arr)\n expected = Series(timedelta_range('00:00:01',periods=3,freq='s'))\n assert_series_equal(s,expected)\n\n df = DataFrame(index=range(3))\n df['A'] = arr\n expected = DataFrame({'A' : timedelta_range('00:00:01',periods=3,freq='s')},\n index=range(3))\n assert_frame_equal(df,expected)\n\n # convert from a numpy array of non-ns datetime64\n #### note that creating a numpy datetime64 is in LOCAL time!!!!\n #### seems to work for M8[D], but not for M8[s]\n\n s = Series(np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]'))\n assert_series_equal(s,Series(date_range('20130101',periods=3,freq='D')))\n #s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))\n #assert_series_equal(s,date_range('20130101 00:00:01',period=3,freq='s'))\n\n expected = DataFrame({\n 'dt1' : Timestamp('20130101'),\n 'dt2' : date_range('20130101',periods=3),\n #'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),\n },index=range(3))\n\n\n df = DataFrame(index=range(3))\n df['dt1'] = np.datetime64('2013-01-01')\n df['dt2'] = np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]')\n #df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')\n assert_frame_equal(df, expected)\n\n def test_constructor_frame_copy(self):\n cop = DataFrame(self.frame, copy=True)\n cop['A'] = 5\n self.assertTrue((cop['A'] == 5).all())\n self.assertFalse((self.frame['A'] == 5).all())\n\n def test_constructor_ndarray_copy(self):\n df = DataFrame(self.frame.values)\n\n self.frame.values[5] = 5\n self.assertTrue((df.values[5] == 5).all())\n\n df = DataFrame(self.frame.values, copy=True)\n self.frame.values[6] = 6\n self.assertFalse((df.values[6] == 6).all())\n\n def test_constructor_series_copy(self):\n series = self.frame._series\n\n df = DataFrame({'A': series['A']})\n df['A'][:] = 5\n\n self.assertFalse((series['A'] == 5).all())\n\n def test_constructor_compound_dtypes(self):\n # GH 5191\n # compound dtypes should raise not-implementederror\n\n def f(dtype):\n return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), \"aa\", 20), 9)),\n columns=[\"A\", \"B\", \"C\"], dtype=dtype)\n\n self.assertRaises(NotImplementedError, f, [(\"A\",\"datetime64[h]\"), (\"B\",\"str\"), (\"C\",\"int32\")])\n\n # these work (though results may be unexpected)\n f('int64')\n f('float64')\n f('M8[ns]')\n\n def test_assign_columns(self):\n self.frame['hi'] = 'there'\n\n frame = self.frame.copy()\n frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']\n assert_series_equal(self.frame['C'], frame['baz'])\n assert_series_equal(self.frame['hi'], frame['foo2'])\n\n def test_columns_with_dups(self):\n\n # GH 3468 related\n\n # basic\n df = DataFrame([[1,2]], columns=['a','a'])\n df.columns = ['a','a.1']\n str(df)\n expected = DataFrame([[1,2]], columns=['a','a.1'])\n assert_frame_equal(df, expected)\n\n df = DataFrame([[1,2,3]], columns=['b','a','a'])\n df.columns = ['b','a','a.1']\n str(df)\n expected = DataFrame([[1,2,3]], columns=['b','a','a.1'])\n assert_frame_equal(df, expected)\n\n # with a dup index\n df = DataFrame([[1,2]], columns=['a','a'])\n df.columns = ['b','b']\n str(df)\n expected = DataFrame([[1,2]], columns=['b','b'])\n assert_frame_equal(df, expected)\n\n # multi-dtype\n df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c'])\n df.columns = list('ABCDEFG')\n str(df)\n expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG'))\n assert_frame_equal(df, expected)\n\n # this is an error because we cannot disambiguate the dup columns\n self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a']))\n\n # dups across blocks\n df_float = DataFrame(np.random.randn(10, 3),dtype='float64')\n df_int = DataFrame(np.random.randn(10, 3),dtype='int64')\n df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)\n df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)\n\n self.assertEqual(len(df._data._blknos), len(df.columns))\n self.assertEqual(len(df._data._blklocs), len(df.columns))\n\n # testing iget\n for i in range(len(df.columns)):\n df.iloc[:,i]\n\n # dup columns across dtype GH 2079/2194\n vals = [[1, -1, 2.], [2, -2, 3.]]\n rs = DataFrame(vals, columns=['A', 'A', 'B'])\n xp = DataFrame(vals)\n xp.columns = ['A', 'A', 'B']\n assert_frame_equal(rs, xp)\n\n def test_insert_column_bug_4032(self):\n\n # GH4032, inserting a column and renaming causing errors\n df = DataFrame({'b': [1.1, 2.2]})\n df = df.rename(columns={})\n df.insert(0, 'a', [1, 2])\n\n result = df.rename(columns={})\n str(result)\n expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b'])\n assert_frame_equal(result,expected)\n df.insert(0, 'c', [1.3, 2.3])\n\n result = df.rename(columns={})\n str(result)\n\n expected = DataFrame([[1.3,1,1.1],[2.3,2, 2.2]],columns=['c','a','b'])\n assert_frame_equal(result,expected)\n\n def test_cast_internals(self):\n casted = DataFrame(self.frame._data, dtype=int)\n expected = DataFrame(self.frame._series, dtype=int)\n assert_frame_equal(casted, expected)\n\n casted = DataFrame(self.frame._data, dtype=np.int32)\n expected = DataFrame(self.frame._series, dtype=np.int32)\n assert_frame_equal(casted, expected)\n\n def test_consolidate(self):\n self.frame['E'] = 7.\n consolidated = self.frame.consolidate()\n self.assertEqual(len(consolidated._data.blocks), 1)\n\n # Ensure copy, do I want this?\n recons = consolidated.consolidate()\n self.assertIsNot(recons, consolidated)\n assert_frame_equal(recons, consolidated)\n\n self.frame['F'] = 8.\n self.assertEqual(len(self.frame._data.blocks), 3)\n self.frame.consolidate(inplace=True)\n self.assertEqual(len(self.frame._data.blocks), 1)\n\n def test_consolidate_inplace(self):\n frame = self.frame.copy()\n\n # triggers in-place consolidation\n for letter in range(ord('A'), ord('Z')):\n self.frame[chr(letter)] = chr(letter)\n\n def test_as_matrix_consolidate(self):\n self.frame['E'] = 7.\n self.assertFalse(self.frame._data.is_consolidated())\n _ = self.frame.as_matrix()\n self.assertTrue(self.frame._data.is_consolidated())\n\n def test_modify_values(self):\n self.frame.values[5] = 5\n self.assertTrue((self.frame.values[5] == 5).all())\n\n # unconsolidated\n self.frame['E'] = 7.\n self.frame.values[6] = 6\n self.assertTrue((self.frame.values[6] == 6).all())\n\n def test_boolean_set_uncons(self):\n self.frame['E'] = 7.\n\n expected = self.frame.values.copy()\n expected[expected > 1] = 2\n\n self.frame[self.frame > 1] = 2\n assert_almost_equal(expected, self.frame.values)\n\n def test_xs_view(self):\n \"\"\"\n in 0.14 this will return a view if possible\n a copy otherwise, but this is numpy dependent\n \"\"\"\n\n dm = DataFrame(np.arange(20.).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n\n dm.xs(2)[:] = 10\n self.assertTrue((dm.xs(2) == 10).all())\n\n def test_boolean_indexing(self):\n idx = lrange(3)\n cols = ['A','B','C']\n df1 = DataFrame(index=idx, columns=cols,\n data=np.array([[0.0, 0.5, 1.0],\n [1.5, 2.0, 2.5],\n [3.0, 3.5, 4.0]],\n dtype=float))\n df2 = DataFrame(index=idx, columns=cols,\n data=np.ones((len(idx), len(cols))))\n\n expected = DataFrame(index=idx, columns=cols,\n data=np.array([[0.0, 0.5, 1.0],\n [1.5, 2.0, -1],\n [-1, -1, -1]], dtype=float))\n\n df1[df1 > 2.0 * df2] = -1\n assert_frame_equal(df1, expected)\n with assertRaisesRegexp(ValueError, 'Item wrong length'):\n df1[df1.index[:-1] > 2] = -1\n\n def test_boolean_indexing_mixed(self):\n df = DataFrame(\n {long(0): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n long(1): {35: np.nan,\n 40: 0.32632316859446198,\n 43: np.nan,\n 49: 0.32632316859446198,\n 50: 0.39114724480578139},\n long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},\n long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n 'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})\n\n # mixed int/float ok\n df2 = df.copy()\n df2[df2>0.3] = 1\n expected = df.copy()\n expected.loc[40,1] = 1\n expected.loc[49,1] = 1\n expected.loc[50,1] = 1\n expected.loc[35,4] = 1\n assert_frame_equal(df2,expected)\n\n df['foo'] = 'test'\n with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):\n df[df > 0.3] = 1\n\n def test_sum_bools(self):\n df = DataFrame(index=lrange(1), columns=lrange(10))\n bools = isnull(df)\n self.assertEqual(bools.sum(axis=1)[0], 10)\n\n def test_fillna_col_reordering(self):\n idx = lrange(20)\n cols = [\"COL.\" + str(i) for i in range(5, 0, -1)]\n data = np.random.rand(20, 5)\n df = DataFrame(index=lrange(20), columns=cols, data=data)\n filled = df.fillna(method='ffill')\n self.assertEqual(df.columns.tolist(), filled.columns.tolist())\n\n def test_take(self):\n\n # homogeneous\n #----------------------------------------\n order = [3, 1, 2, 0]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['D', 'B', 'C', 'A']]\n assert_frame_equal(result, expected, check_names=False)\n\n # neg indicies\n order = [2,1,-1]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['C', 'B', 'D']]\n assert_frame_equal(result, expected, check_names=False)\n\n # illegal indices\n self.assertRaises(IndexError, df.take, [3,1,2,30], axis=0)\n self.assertRaises(IndexError, df.take, [3,1,2,-31], axis=0)\n self.assertRaises(IndexError, df.take, [3,1,2,5], axis=1)\n self.assertRaises(IndexError, df.take, [3,1,2,-5], axis=1)\n\n # mixed-dtype\n #----------------------------------------\n order = [4, 1, 2, 0, 3]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n # neg indicies\n order = [4,1,-2]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['foo', 'B', 'D']]\n assert_frame_equal(result, expected)\n\n # by dtype\n order = [1, 2, 0, 3]\n for df in [self.mixed_float,self.mixed_int]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n def test_iterkv_deprecation(self):\n with tm.assert_produces_warning(DeprecationWarning):\n self.mixed_float.iterkv()\n\n def test_iterkv_names(self):\n for k, v in compat.iteritems(self.mixed_frame):\n self.assertEqual(v.name, k)\n\n def test_series_put_names(self):\n series = self.mixed_frame._series\n for k, v in compat.iteritems(series):\n self.assertEqual(v.name, k)\n\n def test_dot(self):\n a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],\n columns=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],\n columns=['one', 'two'])\n\n result = a.dot(b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n # Check alignment\n b1 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n assert_frame_equal(result, expected)\n\n # Check series argument\n result = a.dot(b['one'])\n assert_series_equal(result, expected['one'])\n result = a.dot(b1['one'])\n assert_series_equal(result, expected['one'])\n\n # can pass correct-length arrays\n row = a.ix[0].values\n\n result = a.dot(row)\n exp = a.dot(a.ix[0])\n assert_series_equal(result, exp)\n\n with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):\n a.dot(row[:-1])\n\n a = np.random.rand(1, 5)\n b = np.random.rand(5, 1)\n A = DataFrame(a)\n B = DataFrame(b)\n\n # it works\n result = A.dot(b)\n\n # unaligned\n df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))\n df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])\n\n assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)\n\n def test_idxmin(self):\n frame = self.frame\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmin(axis=axis, skipna=skipna)\n expected = df.apply(\n Series.idxmin, axis=axis, skipna=skipna)\n assert_series_equal(result, expected)\n\n self.assertRaises(ValueError, frame.idxmin, axis=2)\n\n def test_idxmax(self):\n frame = self.frame\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmax(axis=axis, skipna=skipna)\n expected = df.apply(\n Series.idxmax, axis=axis, skipna=skipna)\n assert_series_equal(result, expected)\n\n self.assertRaises(ValueError, frame.idxmax, axis=2)\n\n def test_stale_cached_series_bug_473(self):\n\n # this is chained, but ok\n with option_context('chained_assignment',None):\n Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),\n columns=('e', 'f', 'g', 'h'))\n repr(Y)\n Y['e'] = Y['e'].astype('object')\n Y['g']['c'] = np.NaN\n repr(Y)\n result = Y.sum()\n exp = Y['g'].sum()\n self.assertTrue(isnull(Y['g']['c']))\n\n def test_index_namedtuple(self):\n from collections import namedtuple\n IndexType = namedtuple(\"IndexType\", [\"a\", \"b\"])\n idx1 = IndexType(\"foo\", \"bar\")\n idx2 = IndexType(\"baz\", \"bof\")\n index = Index([idx1, idx2],\n name=\"composite_index\", tupleize_cols=False)\n df = DataFrame([(1, 2), (3, 4)], index=index, columns=[\"A\", \"B\"])\n result = df.ix[IndexType(\"foo\", \"bar\")][\"A\"]\n self.assertEqual(result, 1)\n\n def test_empty_nonzero(self):\n df = DataFrame([1, 2, 3])\n self.assertFalse(df.empty)\n df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()\n self.assertTrue(df.empty)\n self.assertTrue(df.T.empty)\n\n def test_any_all(self):\n\n self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)\n self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)\n\n df = DataFrame(randn(10, 4)) > 0\n df.any(1)\n df.all(1)\n df.any(1, bool_only=True)\n df.all(1, bool_only=True)\n\n # skip pathological failure cases\n # class CantNonzero(object):\n\n # def __nonzero__(self):\n # raise ValueError\n\n # df[4] = CantNonzero()\n\n # it works!\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n # df[4][4] = np.nan\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n def test_consolidate_datetime64(self):\n # numpy vstack bug\n\n data = \"\"\"\\\nstarting,ending,measure\n2012-06-21 00:00,2012-06-23 07:00,77\n2012-06-23 07:00,2012-06-23 16:30,65\n2012-06-23 16:30,2012-06-25 08:00,77\n2012-06-25 08:00,2012-06-26 12:00,0\n2012-06-26 12:00,2012-06-27 08:00,77\n\"\"\"\n df = read_csv(StringIO(data), parse_dates=[0, 1])\n\n ser_starting = df.starting\n ser_starting.index = ser_starting.values\n ser_starting = ser_starting.tz_localize('US/Eastern')\n ser_starting = ser_starting.tz_convert('UTC')\n\n ser_ending = df.ending\n ser_ending.index = ser_ending.values\n ser_ending = ser_ending.tz_localize('US/Eastern')\n ser_ending = ser_ending.tz_convert('UTC')\n\n df.starting = ser_starting.index\n df.ending = ser_ending.index\n\n tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)\n tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)\n\n def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,\n has_bool_only=False):\n if frame is None:\n frame = self.frame > 0\n # set some NAs\n frame = DataFrame(frame.values.astype(object), frame.index,\n frame.columns)\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna().values\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n assert_series_equal(result0, frame.apply(wrapper))\n assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n assert_series_equal(result0, frame.apply(skipna_wrapper))\n assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n self.assertRaises(ValueError, f, axis=2)\n\n # make sure works on mixed-type frame\n mixed = self.mixed_frame\n mixed['_bool_'] = np.random.randn(len(mixed)) > 0\n getattr(mixed, name)(axis=0)\n getattr(mixed, name)(axis=1)\n\n class NonzeroFail:\n\n def __nonzero__(self):\n raise ValueError\n\n mixed['_nonzero_fail_'] = NonzeroFail()\n\n if has_bool_only:\n getattr(mixed, name)(axis=0, bool_only=True)\n getattr(mixed, name)(axis=1, bool_only=True)\n getattr(frame, name)(axis=0, bool_only=False)\n getattr(frame, name)(axis=1, bool_only=False)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if name == 'any':\n self.assertFalse(r0.any())\n self.assertFalse(r1.any())\n else:\n self.assertTrue(r0.all())\n self.assertTrue(r1.all())\n\n def test_strange_column_corruption_issue(self):\n\n df = DataFrame(index=[0, 1])\n df[0] = nan\n wasCol = {}\n # uncommenting these makes the results match\n # for col in xrange(100, 200):\n # wasCol[col] = 1\n # df[col] = nan\n\n for i, dt in enumerate(df.index):\n for col in range(100, 200):\n if not col in wasCol:\n wasCol[col] = 1\n df[col] = nan\n df[col][dt] = i\n\n myid = 100\n\n first = len(df.ix[isnull(df[myid]), [myid]])\n second = len(df.ix[isnull(df[myid]), [myid]])\n self.assertTrue(first == second == 0)\n\n def test_inplace_return_self(self):\n # re #1893\n\n data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],\n 'b': [0, 0, 1, 1],\n 'c': [1, 2, 3, 4]})\n\n def _check_f(base, f):\n result = f(base)\n self.assertTrue(result is None)\n\n # -----DataFrame-----\n\n # set_index\n f = lambda x: x.set_index('a', inplace=True)\n _check_f(data.copy(), f)\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True)\n _check_f(data.set_index('a'), f)\n\n # drop_duplicates\n f = lambda x: x.drop_duplicates(inplace=True)\n _check_f(data.copy(), f)\n\n # sort\n f = lambda x: x.sort('b', inplace=True)\n _check_f(data.copy(), f)\n\n # sort_index\n f = lambda x: x.sort_index(inplace=True)\n _check_f(data.copy(), f)\n\n # sortlevel\n f = lambda x: x.sortlevel(0, inplace=True)\n _check_f(data.set_index(['a', 'b']), f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(data.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(data.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(data.copy(), f)\n\n # -----Series-----\n d = data.copy()['c']\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True, drop=True)\n _check_f(data.set_index('a')['c'], f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(d.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(d.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(d.copy(), f)\n\n def test_isin(self):\n # GH #4211\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n other = ['a', 'b', 'c']\n\n result = df.isin(other)\n expected = DataFrame([df.loc[s].isin(other) for s in df.index])\n assert_frame_equal(result, expected)\n\n def test_isin_empty(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n result = df.isin([])\n expected = pd.DataFrame(False, df.index, df.columns)\n assert_frame_equal(result, expected)\n\n def test_isin_dict(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n d = {'A': ['a']}\n\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n\n result = df.isin(d)\n assert_frame_equal(result, expected)\n\n # non unique columns\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n df.columns = ['A', 'A']\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n result = df.isin(d)\n assert_frame_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n #GH4763\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n with tm.assertRaises(TypeError):\n df.isin('a')\n\n with tm.assertRaises(TypeError):\n df.isin('aaa')\n\n def test_isin_df(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})\n expected = DataFrame(False, df1.index, df1.columns)\n result = df1.isin(df2)\n expected['A'].loc[[1, 3]] = True\n expected['B'].loc[[0, 2]] = True\n assert_frame_equal(result, expected)\n\n # partial overlapping columns\n df2.columns = ['A', 'C']\n result = df1.isin(df2)\n expected['B'] = False\n assert_frame_equal(result, expected)\n\n def test_isin_df_dupe_values(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n # just cols duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['B', 'B'])\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n # just index duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['A', 'B'], index=[0, 0, 1, 1])\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n # cols and index:\n df2.columns = ['B', 'B']\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n def test_isin_dupe_self(self):\n other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})\n df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A'])\n result = df.isin(other)\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected.loc[0] = True\n expected.iloc[1, 1] = True\n assert_frame_equal(result, expected)\n\n def test_isin_against_series(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},\n index=['a', 'b', 'c', 'd'])\n s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected['A'].loc['a'] = True\n expected.loc['d'] = True\n result = df.isin(s)\n assert_frame_equal(result, expected)\n\n def test_isin_multiIndex(self):\n idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),\n (0, 'b', 'bar'), (0, 'b', 'baz'),\n (2, 'a', 'foo'), (2, 'a', 'bar'),\n (2, 'c', 'bar'), (2, 'c', 'baz'),\n (1, 'b', 'foo'), (1, 'b', 'bar'),\n (1, 'c', 'bar'), (1, 'c', 'baz')])\n df1 = DataFrame({'A': np.ones(12),\n 'B': np.zeros(12)}, index=idx)\n df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n 'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})\n # against regular index\n expected = DataFrame(False, index=df1.index, columns=df1.columns)\n result = df1.isin(df2)\n assert_frame_equal(result, expected)\n\n df2.index = idx\n expected = df2.values.astype(np.bool)\n expected[:, 1] = ~expected[:, 1]\n expected = DataFrame(expected, columns=['A', 'B'], index=idx)\n\n result = df1.isin(df2)\n assert_frame_equal(result, expected)\n\n def test_to_csv_date_format(self):\n from pandas import to_datetime\n pname = '__tmp_to_csv_date_format__'\n with ensure_clean(pname) as path:\n for engine in [None, 'python']:\n dt_index = self.tsframe.index\n datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)\n\n datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)\n # Check that the data was put in the specified format\n test = read_csv(path, index_col=0)\n\n datetime_frame_int = datetime_frame.applymap(lambda x: int(x.strftime('%Y%m%d')))\n datetime_frame_int.index = datetime_frame_int.index.map(lambda x: int(x.strftime('%Y%m%d')))\n\n assert_frame_equal(test, datetime_frame_int)\n\n datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)\n # Check that the data was put in the specified format\n test = read_csv(path, index_col=0)\n datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))\n datetime_frame_str.index = datetime_frame_str.index.map(lambda x: x.strftime('%Y-%m-%d'))\n\n assert_frame_equal(test, datetime_frame_str)\n\n # Check that columns get converted\n datetime_frame_columns = datetime_frame.T\n\n datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)\n\n test = read_csv(path, index_col=0)\n\n datetime_frame_columns = datetime_frame_columns.applymap(lambda x: int(x.strftime('%Y%m%d')))\n # Columns don't get converted to ints by read_csv\n datetime_frame_columns.columns = datetime_frame_columns.columns.map(lambda x: x.strftime('%Y%m%d'))\n\n assert_frame_equal(test, datetime_frame_columns)\n\n # test NaTs\n nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])\n nat_frame = DataFrame({'A': nat_index}, index=nat_index)\n\n nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)\n\n test = read_csv(path, parse_dates=[0, 1], index_col=0)\n\n assert_frame_equal(test, nat_frame)\n\n def test_concat_empty_dataframe_dtypes(self):\n df = DataFrame(columns=list(\"abc\"))\n df['a'] = df['a'].astype(np.bool_)\n df['b'] = df['b'].astype(np.int32)\n df['c'] = df['c'].astype(np.float64)\n\n result = pd.concat([df, df])\n self.assertEqual(result['a'].dtype, np.bool_)\n self.assertEqual(result['b'].dtype, np.int32)\n self.assertEqual(result['c'].dtype, np.float64)\n\n result = pd.concat([df, df.astype(np.float64)])\n self.assertEqual(result['a'].dtype, np.object_)\n self.assertEqual(result['b'].dtype, np.float64)\n self.assertEqual(result['c'].dtype, np.float64)\n\n def test_empty_frame_dtypes_ftypes(self):\n empty_df = pd.DataFrame()\n assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))\n assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))\n\n nocols_df = pd.DataFrame(index=[1,2,3])\n assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))\n assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))\n\n norows_df = pd.DataFrame(columns=list(\"abc\"))\n assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list(\"abc\")))\n assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list(\"abc\")))\n\n norows_int_df = pd.DataFrame(columns=list(\"abc\")).astype(np.int32)\n assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list(\"abc\")))\n assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list(\"abc\")))\n\n odict = OrderedDict\n df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])\n assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),\n ('b', np.bool),\n ('c', np.float64)])))\n assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),\n ('b', 'bool:dense'),\n ('c', 'float64:dense')])))\n\n # same but for empty slice of df\n assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int64),\n ('b', np.bool),\n ('c', np.float64)])))\n assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),\n ('b', 'bool:dense'),\n ('c', 'float64:dense')])))\n\n def test_dtypes_are_correct_after_column_slice(self):\n # GH6525\n df = pd.DataFrame(index=range(5), columns=list(\"abc\"), dtype=np.float_)\n odict = OrderedDict\n assert_series_equal(df.dtypes,\n pd.Series(odict([('a', np.float_), ('b', np.float_),\n ('c', np.float_),])))\n assert_series_equal(df.iloc[:,2:].dtypes,\n pd.Series(odict([('c', np.float_)])))\n assert_series_equal(df.dtypes,\n pd.Series(odict([('a', np.float_), ('b', np.float_),\n ('c', np.float_),])))\n\n def test_set_index_names(self):\n df = pd.util.testing.makeDataFrame()\n df.index.name = 'name'\n\n self.assertEqual(df.set_index(df.index).index.names, ['name'])\n\n mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])\n mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,\n names=['A', 'B', 'A', 'B'])\n\n df = df.set_index(['A', 'B'])\n\n self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])\n\n # Check that set_index isn't converting a MultiIndex into an Index\n self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))\n\n # Check actual equality\n tm.assert_index_equal(df.set_index(df.index).index, mi)\n\n # Check that [MultiIndex, MultiIndex] yields a MultiIndex rather\n # than a pair of tuples\n self.assertTrue(isinstance(df.set_index([df.index, df.index]).index, MultiIndex))\n\n # Check equality\n tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)\n\n def test_select_dtypes_include(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.Categorical(list('abc'))})\n ri = df.select_dtypes(include=[np.number])\n ei = df[['b', 'c', 'd']]\n tm.assert_frame_equal(ri, ei)\n\n ri = df.select_dtypes(include=[np.number,'category'])\n ei = df[['b', 'c', 'd', 'f']]\n tm.assert_frame_equal(ri, ei)\n\n def test_select_dtypes_exclude(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True]})\n re = df.select_dtypes(exclude=[np.number])\n ee = df[['a', 'e']]\n tm.assert_frame_equal(re, ee)\n\n def test_select_dtypes_exclude_include(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n exclude = np.datetime64,\n include = np.bool_, 'integer'\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[['b', 'c', 'e']]\n tm.assert_frame_equal(r, e)\n\n exclude = 'datetime',\n include = 'bool', 'int64', 'int32'\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[['b', 'e']]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n df['g'] = df.f.diff()\n assert not hasattr(np, 'u8')\n r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])\n e = df[['a', 'b']]\n tm.assert_frame_equal(r, e)\n\n r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])\n e = df[['a', 'b', 'g']]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_empty(self):\n df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})\n with tm.assertRaisesRegexp(ValueError, 'at least one of include or '\n 'exclude must be nonempty'):\n df.select_dtypes()\n\n def test_select_dtypes_raises_on_string(self):\n df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(include='object')\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(exclude='object')\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(include=int, exclude='object')\n\n def test_select_dtypes_bad_datetime64(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):\n df.select_dtypes(include=['datetime64[D]'])\n\n with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):\n df.select_dtypes(exclude=['datetime64[as]'])\n\n def test_select_dtypes_str_raises(self):\n df = DataFrame({'a': list('abc'),\n 'g': list(u('abc')),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n string_dtypes = set((str, 'str', np.string_, 'S1',\n 'unicode', np.unicode_, 'U1'))\n try:\n string_dtypes.add(unicode)\n except NameError:\n pass\n for dt in string_dtypes:\n with tm.assertRaisesRegexp(TypeError,\n 'string dtypes are not allowed'):\n df.select_dtypes(include=[dt])\n with tm.assertRaisesRegexp(TypeError,\n 'string dtypes are not allowed'):\n df.select_dtypes(exclude=[dt])\n\n def test_select_dtypes_bad_arg_raises(self):\n df = DataFrame({'a': list('abc'),\n 'g': list(u('abc')),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'):\n df.select_dtypes(['blargy, blarg, blarg'])\n\n def test_assign(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})\n original = df.copy()\n result = df.assign(C=df.B / df.A)\n expected = df.copy()\n expected['C'] = [4, 2.5, 2]\n assert_frame_equal(result, expected)\n\n # lambda syntax\n result = df.assign(C=lambda x: x.B / x.A)\n assert_frame_equal(result, expected)\n\n # original is unmodified\n assert_frame_equal(df, original)\n\n # Non-Series array-like\n result = df.assign(C=[4, 2.5, 2])\n assert_frame_equal(result, expected)\n # original is unmodified\n assert_frame_equal(df, original)\n\n result = df.assign(B=df.B / df.A)\n expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})\n assert_frame_equal(result, expected)\n\n # overwrite\n result = df.assign(A=df.A + df.B)\n expected = df.copy()\n expected['A'] = [5, 7, 9]\n assert_frame_equal(result, expected)\n\n # lambda\n result = df.assign(A=lambda x: x.A + x.B)\n assert_frame_equal(result, expected)\n\n def test_assign_multiple(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})\n result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)\n expected = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9],\n 'D': [1, 2, 3], 'E': [4, 5, 6]})\n # column order isn't preserved\n assert_frame_equal(result.reindex_like(expected), expected)\n\n def test_assign_bad(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})\n # non-keyword argument\n with tm.assertRaises(TypeError):\n df.assign(lambda x: x.A)\n with tm.assertRaises(AttributeError):\n df.assign(C=df.A, D=df.A + df.C)\n with tm.assertRaises(KeyError):\n df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])\n with tm.assertRaises(KeyError):\n df.assign(C=df.A, D=lambda x: x['A'] + x['C'])\n\ndef skip_if_no_ne(engine='numexpr'):\n if engine == 'numexpr':\n try:\n import numexpr as ne\n except ImportError:\n raise nose.SkipTest(\"cannot query engine numexpr when numexpr not \"\n \"installed\")\n\n\ndef skip_if_no_pandas_parser(parser):\n if parser != 'pandas':\n raise nose.SkipTest(\"cannot evaluate with parser {0!r}\".format(parser))\n\n\nclass TestDataFrameQueryWithMultiIndex(object):\n def check_query_with_named_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = tm.choice(['eggs', 'ham'], size=10)\n index = MultiIndex.from_arrays([a, b], names=['color', 'food'])\n df = DataFrame(randn(10, 2), index=index)\n ind = Series(df.index.get_level_values('color').values, index=index,\n name='color')\n\n # equality\n res1 = df.query('color == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == color', parser=parser, engine=engine)\n exp = df[ind == 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('color != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != color', parser=parser, engine=engine)\n exp = df[ind != 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('color == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == color', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('color != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != color', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" in color', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in color', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n def test_query_with_named_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_named_multiindex, parser, engine\n\n def check_query_with_unnamed_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = tm.choice(['eggs', 'ham'], size=10)\n index = MultiIndex.from_arrays([a, b])\n df = DataFrame(randn(10, 2), index=index)\n ind = Series(df.index.get_level_values(0).values, index=index)\n\n res1 = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == ilevel_0', parser=parser, engine=engine)\n exp = df[ind == 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != ilevel_0', parser=parser, engine=engine)\n exp = df[ind != 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_0 == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_0 != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" in ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n #### LEVEL 1 ####\n ind = Series(df.index.get_level_values(1).values, index=index)\n res1 = df.query('ilevel_1 == \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" == ilevel_1', parser=parser, engine=engine)\n exp = df[ind == 'eggs']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_1 != \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" != ilevel_1', parser=parser, engine=engine)\n exp = df[ind != 'eggs']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_1 == [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] == ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_1 != [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] != ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"eggs\"] in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" in ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"eggs\"] not in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" not in ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n def test_query_with_unnamed_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_unnamed_multiindex, parser, engine\n\n def check_query_with_partially_named_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = np.arange(10)\n index = MultiIndex.from_arrays([a, b])\n index.names = [None, 'rating']\n df = DataFrame(randn(10, 2), index=index)\n res = df.query('rating == 1', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values('rating').values, index=index,\n name='rating')\n exp = df[ind == 1]\n assert_frame_equal(res, exp)\n\n res = df.query('rating != 1', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values('rating').values, index=index,\n name='rating')\n exp = df[ind != 1]\n assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind == \"red\"]\n assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind != \"red\"]\n assert_frame_equal(res, exp)\n\n def test_query_with_partially_named_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_partially_named_multiindex, parser, engine\n\n def test_query_multiindex_get_index_resolvers(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_multiindex_get_index_resolvers, parser, engine\n\n def check_query_multiindex_get_index_resolvers(self, parser, engine):\n df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])\n resolvers = df._get_index_resolvers()\n\n def to_series(mi, level):\n level_values = mi.get_level_values(level)\n s = level_values.to_series()\n s.index = mi\n return s\n\n col_series = df.columns.to_series()\n expected = {'index': df.index,\n 'columns': col_series,\n 'spam': to_series(df.index, 'spam'),\n 'eggs': to_series(df.index, 'eggs'),\n 'C0': col_series}\n for k, v in resolvers.items():\n if isinstance(v, Index):\n assert v.is_(expected[k])\n elif isinstance(v, Series):\n tm.assert_series_equal(v, expected[k])\n else:\n raise AssertionError(\"object must be a Series or Index\")\n\n def test_raise_on_panel_with_multiindex(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_raise_on_panel_with_multiindex, parser, engine\n\n def check_raise_on_panel_with_multiindex(self, parser, engine):\n tm.skip_if_no_ne()\n p = tm.makePanel(7)\n p.items = tm.makeCustomIndex(len(p.items), nlevels=2)\n with tm.assertRaises(NotImplementedError):\n pd.eval('p + 1', parser=parser, engine=engine)\n\n def test_raise_on_panel4d_with_multiindex(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_raise_on_panel4d_with_multiindex, parser, engine\n\n def check_raise_on_panel4d_with_multiindex(self, parser, engine):\n tm.skip_if_no_ne()\n p4d = tm.makePanel4D(7)\n p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)\n with tm.assertRaises(NotImplementedError):\n pd.eval('p4d + 1', parser=parser, engine=engine)\n\n\nclass TestDataFrameQueryNumExprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryNumExprPandas, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n tm.skip_if_no_ne(cls.engine)\n\n @classmethod\n def tearDownClass(cls):\n super(TestDataFrameQueryNumExprPandas, cls).tearDownClass()\n del cls.engine, cls.parser\n\n def test_date_query_with_attribute_access(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('dates1 < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates2'] = date_range('1/1/2013', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT\n res = df.query('dates1 < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.iloc[0, 0] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n d = {}\n d['dates1'] = date_range('1/1/2012', periods=n)\n d['dates3'] = date_range('1/1/2014', periods=n)\n df = DataFrame(d)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine, parser=parser)\n expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_query_with_non_date(self):\n engine, parser = self.engine, self.parser\n\n n = 10\n df = DataFrame({'dates': date_range('1/1/2012', periods=n),\n 'nondate': np.arange(n)})\n\n ops = '==', '!=', '<', '>', '<=', '>='\n\n for op in ops:\n with tm.assertRaises(TypeError):\n df.query('dates %s nondate' % op, parser=parser, engine=engine)\n\n def test_query_syntax_error(self):\n engine, parser = self.engine, self.parser\n df = DataFrame({\"i\": lrange(10), \"+\": lrange(3, 13),\n \"r\": lrange(4, 14)})\n with tm.assertRaises(SyntaxError):\n df.query('i - +', engine=engine, parser=parser)\n\n def test_query_scope(self):\n from pandas.computation.ops import UndefinedVariableError\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(20, 2), columns=list('ab'))\n\n a, b = 1, 2\n res = df.query('a > b', engine=engine, parser=parser)\n expected = df[df.a > df.b]\n tm.assert_frame_equal(res, expected)\n\n res = df.query('@a > b', engine=engine, parser=parser)\n expected = df[a > df.b]\n tm.assert_frame_equal(res, expected)\n\n # no local variable c\n with tm.assertRaises(UndefinedVariableError):\n df.query('@a > b > @c', engine=engine, parser=parser)\n\n # no column named 'c'\n with tm.assertRaises(UndefinedVariableError):\n df.query('@a > b > c', engine=engine, parser=parser)\n\n def test_query_doesnt_pickup_local(self):\n from pandas.computation.ops import UndefinedVariableError\n\n engine, parser = self.engine, self.parser\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n from numpy import sin\n\n # we don't pick up the local 'sin'\n with tm.assertRaises(UndefinedVariableError):\n df.query('sin > 5', engine=engine, parser=parser)\n\n def test_query_builtin(self):\n from pandas.computation.engines import NumExprClobberingError\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n with tm.assertRaisesRegexp(NumExprClobberingError,\n 'Variables in expression.+'):\n df.query('sin > 5', engine=engine, parser=parser)\n\n def test_query(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])\n\n assert_frame_equal(df.query('a < b', engine=engine, parser=parser),\n df[df.a < df.b])\n assert_frame_equal(df.query('a + b > b * c', engine=engine,\n parser=parser),\n df[df.a + df.b > df.b * df.c])\n\n def test_query_index_with_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randint(10, size=(10, 3)),\n index=Index(range(10), name='blob'),\n columns=['a', 'b', 'c'])\n res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)\n expec = df[(df.index < 5) & (df.a < df.b)]\n assert_frame_equal(res, expec)\n\n res = df.query('blob < b', engine=engine, parser=parser)\n expec = df[df.index < df.b]\n\n assert_frame_equal(res, expec)\n\n def test_query_index_without_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randint(10, size=(10, 3)),\n index=range(10), columns=['a', 'b', 'c'])\n\n # \"index\" should refer to the index\n res = df.query('index < b', engine=engine, parser=parser)\n expec = df[df.index < df.b]\n assert_frame_equal(res, expec)\n\n # test against a scalar\n res = df.query('index < 5', engine=engine, parser=parser)\n expec = df[df.index < 5]\n assert_frame_equal(res, expec)\n\n def test_nested_scope(self):\n engine = self.engine\n parser = self.parser\n\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n expected = df[(df > 0) & (df2 > 0)]\n\n result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser)\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,\n parser=parser)\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',\n engine=engine, parser=parser)\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)\n expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)\n assert_frame_equal(result, expected)\n\n def test_nested_raises_on_local_self_reference(self):\n from pandas.computation.ops import UndefinedVariableError\n\n df = DataFrame(np.random.randn(5, 3))\n\n # can't reference ourself b/c we're a local so @ is necessary\n with tm.assertRaises(UndefinedVariableError):\n df.query('df > 0', engine=self.engine, parser=self.parser)\n\n def test_local_syntax(self):\n skip_if_no_pandas_parser(self.parser)\n\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(100, 10), columns=list('abcdefghij'))\n b = 1\n expect = df[df.a < b]\n result = df.query('a < @b', engine=engine, parser=parser)\n assert_frame_equal(result, expect)\n\n expect = df[df.a < df.b]\n result = df.query('a < b', engine=engine, parser=parser)\n assert_frame_equal(result, expect)\n\n def test_chained_cmp_and_in(self):\n skip_if_no_pandas_parser(self.parser)\n engine, parser = self.engine, self.parser\n cols = list('abc')\n df = DataFrame(randn(100, len(cols)), columns=cols)\n res = df.query('a < b < c and a not in b not in c', engine=engine,\n parser=parser)\n ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)\n expec = df[ind]\n assert_frame_equal(res, expec)\n\n def test_local_variable_with_in(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n a = Series(np.random.randint(3, size=15), name='a')\n b = Series(np.random.randint(10, size=15), name='b')\n df = DataFrame({'a': a, 'b': b})\n\n expected = df.loc[(df.b - 1).isin(a)]\n result = df.query('b - 1 in a', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n b = Series(np.random.randint(10, size=15), name='b')\n expected = df.loc[(b - 1).isin(a)]\n result = df.query('@b - 1 in a', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n def test_at_inside_string(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n c = 1\n df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})\n result = df.query('a == \"@c\"', engine=engine, parser=parser)\n expected = df[df.a == \"@c\"]\n tm.assert_frame_equal(result, expected)\n\n def test_query_undefined_local(self):\n from pandas.computation.ops import UndefinedVariableError\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n df = DataFrame(np.random.rand(10, 2), columns=list('ab'))\n with tm.assertRaisesRegexp(UndefinedVariableError,\n \"local variable 'c' is not defined\"):\n df.query('a == @c', engine=engine, parser=parser)\n\n def test_index_resolvers_come_after_columns_with_the_same_name(self):\n n = 1\n a = np.r_[20:101:20]\n\n df = DataFrame({'index': a, 'b': np.random.randn(a.size)})\n df.index.name = 'index'\n result = df.query('index > 5', engine=self.engine, parser=self.parser)\n expected = df[df['index'] > 5]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({'index': a, 'b': np.random.randn(a.size)})\n result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({'a': a, 'b': np.random.randn(a.size)})\n df.index.name = 'a'\n result = df.query('a > 5', engine=self.engine, parser=self.parser)\n expected = df[df.a > 5]\n tm.assert_frame_equal(result, expected)\n\n result = df.query('index > 5', engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n def test_inf(self):\n n = 10\n df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})\n df.loc[::2, 0] = np.inf\n ops = '==', '!='\n d = dict(zip(ops, (operator.eq, operator.ne)))\n for op, f in d.items():\n q = 'a %s inf' % op\n expected = df[f(df.a, np.inf)]\n result = df.query(q, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n tm.skip_if_no_ne(cls.engine)\n cls.frame = _frame.copy()\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('(dates1 < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n tm.assert_frame_equal(res, expec)\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates2'] = date_range('1/1/2013', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT\n res = df.query('(dates1 < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('(index < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.iloc[0, 0] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('(index < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n with tm.assertRaises(NotImplementedError):\n df.query('index < 20130101 < dates3', engine=engine, parser=parser)\n\n def test_nested_scope(self):\n from pandas.computation.ops import UndefinedVariableError\n engine = self.engine\n parser = self.parser\n # smoke test\n x = 1\n result = pd.eval('x + 1', engine=engine, parser=parser)\n self.assertEqual(result, 2)\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n\n # don't have the pandas parser\n with tm.assertRaises(SyntaxError):\n df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)\n\n with tm.assertRaises(UndefinedVariableError):\n df.query('(df>0) & (df2>0)', engine=engine, parser=parser)\n\n expected = df[(df > 0) & (df2 > 0)]\n result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,\n parser=parser)\n tm.assert_frame_equal(expected, result)\n\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',\n engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n cls.frame = _frame.copy()\n\n def test_query_builtin(self):\n from pandas.computation.engines import NumExprClobberingError\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n expected = df[df.index > 5]\n result = df.query('sin > 5', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryPythonPython, cls).setUpClass()\n cls.engine = cls.parser = 'python'\n cls.frame = _frame.copy()\n\n def test_query_builtin(self):\n from pandas.computation.engines import NumExprClobberingError\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n expected = df[df.index > 5]\n result = df.query('sin > 5', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nPARSERS = 'python', 'pandas'\nENGINES = 'python', 'numexpr'\n\n\nclass TestDataFrameQueryStrings(object):\n def check_str_query_method(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame(randn(10, 1), columns=['b'])\n df['strings'] = Series(list('aabbccddee'))\n expect = df[df.strings == 'a']\n\n if parser != 'pandas':\n col = 'strings'\n lst = '\"a\"'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = '==', '!='\n ops = 2 * ([eq] + [ne])\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)\n assertRaises(NotImplementedError, df.query, ex, engine=engine,\n parser=parser, local_dict={'strings': df.strings})\n else:\n res = df.query('\"a\" == strings', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('strings == \"a\"', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n assert_frame_equal(res, df[df.strings.isin(['a'])])\n\n expect = df[df.strings != 'a']\n res = df.query('strings != \"a\"', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('\"a\" != strings', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n assert_frame_equal(res, df[~df.strings.isin(['a'])])\n\n def test_str_query_method(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_str_query_method, parser, engine\n\n def test_str_list_query_method(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_str_list_query_method, parser, engine\n\n def check_str_list_query_method(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame(randn(10, 1), columns=['b'])\n df['strings'] = Series(list('aabbccddee'))\n expect = df[df.strings.isin(['a', 'b'])]\n\n if parser != 'pandas':\n col = 'strings'\n lst = '[\"a\", \"b\"]'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = '==', '!='\n ops = 2 * ([eq] + [ne])\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)\n with tm.assertRaises(NotImplementedError):\n df.query(ex, engine=engine, parser=parser)\n else:\n res = df.query('strings == [\"a\", \"b\"]', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] == strings', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n expect = df[~df.strings.isin(['a', 'b'])]\n\n res = df.query('strings != [\"a\", \"b\"]', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] != strings', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n def check_query_with_string_columns(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': list('aaaabbbbcccc'),\n 'b': list('aabbccddeeff'),\n 'c': np.random.randint(5, size=12),\n 'd': np.random.randint(9, size=12)})\n if parser == 'pandas':\n res = df.query('a in b', parser=parser, engine=engine)\n expec = df[df.a.isin(df.b)]\n assert_frame_equal(res, expec)\n\n res = df.query('a in b and c < d', parser=parser, engine=engine)\n expec = df[df.a.isin(df.b) & (df.c < df.d)]\n assert_frame_equal(res, expec)\n else:\n with assertRaises(NotImplementedError):\n df.query('a in b', parser=parser, engine=engine)\n\n with assertRaises(NotImplementedError):\n df.query('a in b and c < d', parser=parser, engine=engine)\n\n def test_query_with_string_columns(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_string_columns, parser, engine\n\n def check_object_array_eq_ne(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': list('aaaabbbbcccc'),\n 'b': list('aabbccddeeff'),\n 'c': np.random.randint(5, size=12),\n 'd': np.random.randint(9, size=12)})\n res = df.query('a == b', parser=parser, engine=engine)\n exp = df[df.a == df.b]\n assert_frame_equal(res, exp)\n\n res = df.query('a != b', parser=parser, engine=engine)\n exp = df[df.a != df.b]\n assert_frame_equal(res, exp)\n\n def test_object_array_eq_ne(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_object_array_eq_ne, parser, engine\n\n def check_query_with_nested_strings(self, parser, engine):\n tm.skip_if_no_ne(engine)\n skip_if_no_pandas_parser(parser)\n from pandas.compat import StringIO\n raw = \"\"\"id event timestamp\n 1 \"page 1 load\" 1/1/2014 0:00:01\n 1 \"page 1 exit\" 1/1/2014 0:00:31\n 2 \"page 2 load\" 1/1/2014 0:01:01\n 2 \"page 2 exit\" 1/1/2014 0:01:31\n 3 \"page 3 load\" 1/1/2014 0:02:01\n 3 \"page 3 exit\" 1/1/2014 0:02:31\n 4 \"page 1 load\" 2/1/2014 1:00:01\n 4 \"page 1 exit\" 2/1/2014 1:00:31\n 5 \"page 2 load\" 2/1/2014 1:01:01\n 5 \"page 2 exit\" 2/1/2014 1:01:31\n 6 \"page 3 load\" 2/1/2014 1:02:01\n 6 \"page 3 exit\" 2/1/2014 1:02:31\n \"\"\"\n df = pd.read_csv(StringIO(raw), sep=r'\\s{2,}', engine='python',\n parse_dates=['timestamp'])\n expected = df[df.event == '\"page 1 load\"']\n res = df.query(\"\"\"'\"page 1 load\"' in event\"\"\", parser=parser,\n engine=engine)\n tm.assert_frame_equal(expected, res)\n\n def test_query_with_nested_string(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_nested_strings, parser, engine\n\n def check_query_with_nested_special_character(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': ['a', 'b', 'test & test'],\n 'b': [1, 2, 3]})\n res = df.query('a == \"test & test\"', parser=parser, engine=engine)\n expec = df[df.a == 'test & test']\n tm.assert_frame_equal(res, expec)\n\n def test_query_with_nested_special_character(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_nested_special_character, parser, engine\n\n def check_query_lex_compare_strings(self, parser, engine):\n tm.skip_if_no_ne(engine=engine)\n import operator as opr\n\n a = Series(tm.choice(list('abcde'), 20))\n b = Series(np.arange(a.size))\n df = DataFrame({'X': a, 'Y': b})\n\n ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}\n\n for op, func in ops.items():\n res = df.query('X %s \"d\"' % op, engine=engine, parser=parser)\n expected = df[func(df.X, 'd')]\n assert_frame_equal(res, expected)\n\n def test_query_lex_compare_strings(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_lex_compare_strings, parser, engine\n\n def check_query_single_element_booleans(self, parser, engine):\n tm.skip_if_no_ne(engine)\n columns = 'bid', 'bidsize', 'ask', 'asksize'\n data = np.random.randint(2, size=(1, len(columns))).astype(bool)\n df = DataFrame(data, columns=columns)\n res = df.query('bid & ask', engine=engine, parser=parser)\n expected = df[df.bid & df.ask]\n assert_frame_equal(res, expected)\n\n def test_query_single_element_booleans(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_single_element_booleans, parser, engine\n\n def check_query_string_scalar_variable(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],\n 'Price': [109.70, 109.72, 183.30, 183.35]})\n e = df[df.Symbol == 'BUD US']\n symb = 'BUD US'\n r = df.query('Symbol == @symb', parser=parser, engine=engine)\n tm.assert_frame_equal(e, r)\n\n def test_query_string_scalar_variable(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_string_scalar_variable, parser, engine\n\n\nclass TestDataFrameEvalNumExprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalNumExprPandas, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n tm.skip_if_no_ne()\n\n def setUp(self):\n self.frame = DataFrame(randn(10, 3), columns=list('abc'))\n\n def tearDown(self):\n del self.frame\n\n def test_simple_expr(self):\n res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)\n expect = self.frame.a + self.frame.b\n assert_series_equal(res, expect)\n\n def test_bool_arith_expr(self):\n res = self.frame.eval('a[a < 1] + b', engine=self.engine,\n parser=self.parser)\n expect = self.frame.a[self.frame.a < 1] + self.frame.b\n assert_series_equal(res, expect)\n\n def test_invalid_type_for_operator_raises(self):\n df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})\n ops = '+', '-', '*', '/'\n for op in ops:\n with tm.assertRaisesRegexp(TypeError,\n \"unsupported operand type\\(s\\) for \"\n \".+: '.+' and '.+'\"):\n df.eval('a {0} b'.format(op), engine=self.engine,\n parser=self.parser)\n\n\nclass TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n tm.skip_if_no_ne(cls.engine)\n\n\nclass TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n\n\nclass TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalPythonPython, cls).tearDownClass()\n cls.engine = cls.parser = 'python'\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] |
[
[
"pandas.util.testing.choice",
"pandas.merge",
"pandas.core.format.set_option",
"numpy.sqrt",
"pandas.DataFrame.from_csv",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.compat.lzip",
"numpy.where",
"pandas.compat.OrderedDict",
"pandas.compat.text_type",
"pandas.tseries.tools.to_datetime",
"numpy.zeros",
"pandas.DataFrame.from_items",
"pandas.concat",
"pandas.core.nanops.nansem",
"pandas.MultiIndex",
"numpy.random.choice",
"numpy.median",
"pandas.util.testing.getMixedTypeDict",
"pandas.util.testing.getSeriesData",
"pandas.core.common.is_integer",
"pandas.date_range",
"scipy.stats.kurtosis",
"numpy.array",
"pandas.timedelta_range",
"numpy.random.shuffle",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.random.permutation",
"numpy.shape",
"numpy.ma.masked_array",
"numpy.isinf",
"pandas.compat.range",
"pandas.Series",
"pandas.util.testing.assert_isinstance",
"pandas.core.common.is_integer_dtype",
"numpy.asarray",
"pandas.util.testing.makePanel",
"numpy.core.records.fromarrays",
"numpy.var",
"pandas.reset_option",
"pandas.compat.StringIO",
"numpy.size",
"numpy.std",
"pandas.set_option",
"pandas.util.testing.equalContents",
"scipy.stats.skew",
"numpy.putmask",
"pandas.core.datetools.BDay",
"pandas.compat.u",
"pandas.util.testing.makeStringIndex",
"numpy.timedelta64",
"numpy.atleast_2d",
"numpy.random.rand",
"pandas.DataFrame.from_dict",
"numpy.corrcoef",
"numpy.ma.mrecords.fromarrays",
"pandas.util.misc.is_little_endian",
"numpy.array_equal",
"pandas.util.testing.assertRaisesRegexp",
"numpy.ma.copy",
"numpy.ones",
"pandas.util.testing.getTimeSeriesData",
"pandas.util.testing.rands",
"pandas.Period",
"numpy.isscalar",
"numpy.empty",
"pandas.core.nanops.nanvar",
"pandas.util.testing.assert_dict_equal",
"numpy.linspace",
"pandas.DataFrame",
"numpy.round",
"pandas.compat.map",
"numpy.mean",
"pandas.compat.iteritems",
"pandas.DataFrame.from_records",
"numpy.random.randint",
"numpy.hstack",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.util.testing._skip_if_no_scipy",
"numpy.lexsort",
"pandas.DatetimeIndex",
"numpy.repeat",
"pandas.compat.long",
"pandas.notnull",
"numpy.nonzero",
"numpy.isnan",
"pandas.util.testing.assert_almost_equal",
"pandas.Timedelta",
"numpy.cov",
"pandas.util.testing.skip_if_no_ne",
"pandas.util.testing.makeCustomDataframe",
"pandas.eval",
"pandas.isnull",
"numpy.tile",
"numpy.percentile",
"pandas.util.testing.assertRaises",
"pandas.util.testing.makePeriodFrame",
"pandas.compat.zip",
"pandas.core.common.pprint_thing",
"numpy.dot",
"pandas.to_datetime",
"pandas.util.testing.ensure_clean",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"pandas.util.testing.makeDataFrame",
"pandas.util.testing.makeTimeDataFrame",
"pandas.util.testing.makePanel4D",
"pandas.read_csv",
"numpy.arange",
"pandas.core.common.is_float_dtype",
"pandas.compat.lmap",
"numpy.apply_along_axis",
"pandas.util.testing.rands_array",
"pandas.Categorical",
"pandas.option_context",
"pandas.MultiIndex.from_product",
"numpy.ma.masked_all",
"numpy.random.random",
"pandas.util.testing.makeTimeSeries",
"pandas.sparse.api.SparseDataFrame",
"numpy.abs",
"pandas.MultiIndex.from_arrays",
"pandas.core.common.isnull",
"pandas.Timestamp",
"pandas.compat.lrange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
noahshpak/ray
|
[
"edd783bc327760a4892ab89222ee551e42df15b9",
"edd783bc327760a4892ab89222ee551e42df15b9"
] |
[
"rllib/models/catalog.py",
"rllib/evaluation/per_policy_sample_collector.py"
] |
[
"from functools import partial\nimport gym\nimport logging\nimport numpy as np\nimport tree\nfrom typing import List\n\nfrom ray.tune.registry import RLLIB_MODEL, RLLIB_PREPROCESSOR, \\\n RLLIB_ACTION_DIST, _global_registry\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.preprocessors import get_preprocessor, Preprocessor\nfrom ray.rllib.models.tf.fcnet_v1 import FullyConnectedNetwork\nfrom ray.rllib.models.tf.lstm_v1 import LSTM\nfrom ray.rllib.models.tf.modelv1_compat import make_v1_wrapper\nfrom ray.rllib.models.tf.recurrent_net import LSTMWrapper\nfrom ray.rllib.models.tf.tf_action_dist import Categorical, \\\n Deterministic, DiagGaussian, Dirichlet, \\\n MultiActionDistribution, MultiCategorical\nfrom ray.rllib.models.tf.visionnet_v1 import VisionNetwork\nfrom ray.rllib.models.torch.torch_action_dist import TorchCategorical, \\\n TorchDeterministic, TorchDiagGaussian, \\\n TorchMultiActionDistribution, TorchMultiCategorical\nfrom ray.rllib.utils.annotations import DeveloperAPI, PublicAPI\nfrom ray.rllib.utils.deprecation import deprecation_warning, DEPRECATED_VALUE\nfrom ray.rllib.utils.error import UnsupportedSpaceException\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.utils.spaces.simplex import Simplex\nfrom ray.rllib.utils.spaces.space_utils import flatten_space\nfrom ray.rllib.utils.typing import ModelConfigDict, TensorType\n\ntf1, tf, tfv = try_import_tf()\n\nlogger = logging.getLogger(__name__)\n\n# yapf: disable\n# __sphinx_doc_begin__\nMODEL_DEFAULTS: ModelConfigDict = {\n # === Built-in options ===\n # Filter config. List of [out_channels, kernel, stride] for each filter\n \"conv_filters\": None,\n # Nonlinearity for built-in convnet\n \"conv_activation\": \"relu\",\n # Nonlinearity for fully connected net (tanh, relu)\n \"fcnet_activation\": \"tanh\",\n # Number of hidden layers for fully connected net\n \"fcnet_hiddens\": [256, 256],\n # For DiagGaussian action distributions, make the second half of the model\n # outputs floating bias variables instead of state-dependent. This only\n # has an effect is using the default fully connected net.\n \"free_log_std\": False,\n # Whether to skip the final linear layer used to resize the hidden layer\n # outputs to size `num_outputs`. If True, then the last hidden layer\n # should already match num_outputs.\n \"no_final_linear\": False,\n # Whether layers should be shared for the value function.\n \"vf_share_layers\": True,\n\n # == LSTM ==\n # Whether to wrap the model with an LSTM.\n \"use_lstm\": False,\n # Max seq len for training the LSTM, defaults to 20.\n \"max_seq_len\": 20,\n # Size of the LSTM cell.\n \"lstm_cell_size\": 256,\n # Whether to feed a_{t-1}, r_{t-1} to LSTM.\n \"lstm_use_prev_action_reward\": False,\n # When using modelv1 models with a modelv2 algorithm, you may have to\n # define the state shape here (e.g., [256, 256]).\n \"state_shape\": None,\n\n # == Atari ==\n # Whether to enable framestack for Atari envs\n \"framestack\": True,\n # Final resized frame dimension\n \"dim\": 84,\n # (deprecated) Converts ATARI frame to 1 Channel Grayscale image\n \"grayscale\": False,\n # (deprecated) Changes frame to range from [-1, 1] if true\n \"zero_mean\": True,\n\n # === Options for custom models ===\n # Name of a custom model to use\n \"custom_model\": None,\n # Extra options to pass to the custom classes.\n # These will be available in the Model's\n \"custom_model_config\": {},\n # Name of a custom action distribution to use.\n \"custom_action_dist\": None,\n # Custom preprocessors are deprecated. Please use a wrapper class around\n # your environment instead to preprocess observations.\n \"custom_preprocessor\": None,\n\n # Deprecated config keys.\n \"custom_options\": DEPRECATED_VALUE,\n}\n# __sphinx_doc_end__\n# yapf: enable\n\n\n@PublicAPI\nclass ModelCatalog:\n \"\"\"Registry of models, preprocessors, and action distributions for envs.\n\n Examples:\n >>> prep = ModelCatalog.get_preprocessor(env)\n >>> observation = prep.transform(raw_observation)\n\n >>> dist_class, dist_dim = ModelCatalog.get_action_dist(\n ... env.action_space, {})\n >>> model = ModelCatalog.get_model_v2(\n ... obs_space, action_space, num_outputs, options)\n >>> dist = dist_class(model.outputs, model)\n >>> action = dist.sample()\n \"\"\"\n\n @staticmethod\n @DeveloperAPI\n def get_action_dist(action_space: gym.Space,\n config: ModelConfigDict,\n dist_type: str = None,\n framework: str = \"tf\",\n **kwargs) -> (type, int):\n \"\"\"Returns a distribution class and size for the given action space.\n\n Args:\n action_space (Space): Action space of the target gym env.\n config (Optional[dict]): Optional model config.\n dist_type (Optional[str]): Identifier of the action distribution\n interpreted as a hint.\n framework (str): One of \"tf\", \"tfe\", or \"torch\".\n kwargs (dict): Optional kwargs to pass on to the Distribution's\n constructor.\n\n Returns:\n Tuple:\n - dist_class (ActionDistribution): Python class of the\n distribution.\n - dist_dim (int): The size of the input vector to the\n distribution.\n \"\"\"\n\n dist = None\n config = config or MODEL_DEFAULTS\n # Custom distribution given.\n if config.get(\"custom_action_dist\"):\n action_dist_name = config[\"custom_action_dist\"]\n logger.debug(\n \"Using custom action distribution {}\".format(action_dist_name))\n dist = _global_registry.get(RLLIB_ACTION_DIST, action_dist_name)\n # Dist_type is given directly as a class.\n elif type(dist_type) is type and \\\n issubclass(dist_type, ActionDistribution) and \\\n dist_type not in (\n MultiActionDistribution, TorchMultiActionDistribution):\n dist = dist_type\n # Box space -> DiagGaussian OR Deterministic.\n elif isinstance(action_space, gym.spaces.Box):\n if len(action_space.shape) > 1:\n raise UnsupportedSpaceException(\n \"Action space has multiple dimensions \"\n \"{}. \".format(action_space.shape) +\n \"Consider reshaping this into a single dimension, \"\n \"using a custom action distribution, \"\n \"using a Tuple action space, or the multi-agent API.\")\n # TODO(sven): Check for bounds and return SquashedNormal, etc..\n if dist_type is None:\n dist = TorchDiagGaussian if framework == \"torch\" \\\n else DiagGaussian\n elif dist_type == \"deterministic\":\n dist = TorchDeterministic if framework == \"torch\" \\\n else Deterministic\n # Discrete Space -> Categorical.\n elif isinstance(action_space, gym.spaces.Discrete):\n dist = TorchCategorical if framework == \"torch\" else Categorical\n # Tuple/Dict Spaces -> MultiAction.\n elif dist_type in (MultiActionDistribution,\n TorchMultiActionDistribution) or \\\n isinstance(action_space, (gym.spaces.Tuple, gym.spaces.Dict)):\n flat_action_space = flatten_space(action_space)\n child_dists_and_in_lens = tree.map_structure(\n lambda s: ModelCatalog.get_action_dist(\n s, config, framework=framework), flat_action_space)\n child_dists = [e[0] for e in child_dists_and_in_lens]\n input_lens = [int(e[1]) for e in child_dists_and_in_lens]\n return partial(\n (TorchMultiActionDistribution\n if framework == \"torch\" else MultiActionDistribution),\n action_space=action_space,\n child_distributions=child_dists,\n input_lens=input_lens), int(sum(input_lens))\n # Simplex -> Dirichlet.\n elif isinstance(action_space, Simplex):\n if framework == \"torch\":\n # TODO(sven): implement\n raise NotImplementedError(\n \"Simplex action spaces not supported for torch.\")\n dist = Dirichlet\n # MultiDiscrete -> MultiCategorical.\n elif isinstance(action_space, gym.spaces.MultiDiscrete):\n dist = TorchMultiCategorical if framework == \"torch\" else \\\n MultiCategorical\n return partial(dist, input_lens=action_space.nvec), \\\n int(sum(action_space.nvec))\n # Unknown type -> Error.\n else:\n raise NotImplementedError(\"Unsupported args: {} {}\".format(\n action_space, dist_type))\n\n return dist, dist.required_model_output_shape(action_space, config)\n\n @staticmethod\n @DeveloperAPI\n def get_action_shape(action_space: gym.Space) -> (np.dtype, List[int]):\n \"\"\"Returns action tensor dtype and shape for the action space.\n\n Args:\n action_space (Space): Action space of the target gym env.\n Returns:\n (dtype, shape): Dtype and shape of the actions tensor.\n \"\"\"\n\n if isinstance(action_space, gym.spaces.Discrete):\n return (tf.int64, (None, ))\n elif isinstance(action_space, (gym.spaces.Box, Simplex)):\n return (tf.float32, (None, ) + action_space.shape)\n elif isinstance(action_space, gym.spaces.MultiDiscrete):\n return (tf.as_dtype(action_space.dtype),\n (None, ) + action_space.shape)\n elif isinstance(action_space, (gym.spaces.Tuple, gym.spaces.Dict)):\n flat_action_space = flatten_space(action_space)\n size = 0\n all_discrete = True\n for i in range(len(flat_action_space)):\n if isinstance(flat_action_space[i], gym.spaces.Discrete):\n size += 1\n else:\n all_discrete = False\n size += np.product(flat_action_space[i].shape)\n size = int(size)\n return (tf.int64 if all_discrete else tf.float32, (None, size))\n else:\n raise NotImplementedError(\n \"Action space {} not supported\".format(action_space))\n\n @staticmethod\n @DeveloperAPI\n def get_action_placeholder(action_space: gym.Space,\n name: str = \"action\") -> TensorType:\n \"\"\"Returns an action placeholder consistent with the action space\n\n Args:\n action_space (Space): Action space of the target gym env.\n name (str): An optional string to name the placeholder by.\n Default: \"action\".\n Returns:\n action_placeholder (Tensor): A placeholder for the actions\n \"\"\"\n\n dtype, shape = ModelCatalog.get_action_shape(action_space)\n\n return tf1.placeholder(dtype, shape=shape, name=name)\n\n @staticmethod\n @DeveloperAPI\n def get_model_v2(obs_space: gym.Space,\n action_space: gym.Space,\n num_outputs: int,\n model_config: ModelConfigDict,\n framework: str = \"tf\",\n name: str = \"default_model\",\n model_interface: type = None,\n default_model: type = None,\n **model_kwargs) -> ModelV2:\n \"\"\"Returns a suitable model compatible with given spaces and output.\n\n Args:\n obs_space (Space): Observation space of the target gym env. This\n may have an `original_space` attribute that specifies how to\n unflatten the tensor into a ragged tensor.\n action_space (Space): Action space of the target gym env.\n num_outputs (int): The size of the output vector of the model.\n framework (str): One of \"tf\", \"tfe\", or \"torch\".\n name (str): Name (scope) for the model.\n model_interface (cls): Interface required for the model\n default_model (cls): Override the default class for the model. This\n only has an effect when not using a custom model\n model_kwargs (dict): args to pass to the ModelV2 constructor\n\n Returns:\n model (ModelV2): Model to use for the policy.\n \"\"\"\n\n if model_config.get(\"custom_model\"):\n\n if \"custom_options\" in model_config and \\\n model_config[\"custom_options\"] != DEPRECATED_VALUE:\n deprecation_warning(\n \"model.custom_options\",\n \"model.custom_model_config\",\n error=False)\n model_config[\"custom_model_config\"] = \\\n model_config.pop(\"custom_options\")\n\n if isinstance(model_config[\"custom_model\"], type):\n model_cls = model_config[\"custom_model\"]\n else:\n model_cls = _global_registry.get(RLLIB_MODEL,\n model_config[\"custom_model\"])\n\n # TODO(sven): Hard-deprecate Model(V1).\n if issubclass(model_cls, ModelV2):\n logger.info(\"Wrapping {} as {}\".format(model_cls,\n model_interface))\n model_cls = ModelCatalog._wrap_if_needed(\n model_cls, model_interface)\n\n if framework in [\"tf\", \"tfe\"]:\n # Track and warn if vars were created but not registered.\n created = set()\n\n def track_var_creation(next_creator, **kw):\n v = next_creator(**kw)\n created.add(v)\n return v\n\n with tf.variable_creator_scope(track_var_creation):\n # Try calling with kwargs first (custom ModelV2 should\n # accept these as kwargs, not get them from\n # config[\"custom_model_config\"] anymore).\n try:\n instance = model_cls(obs_space, action_space,\n num_outputs, model_config,\n name, **model_kwargs)\n except TypeError as e:\n # Keyword error: Try old way w/o kwargs.\n if \"__init__() got an unexpected \" in e.args[0]:\n logger.warning(\n \"Custom ModelV2 should accept all custom \"\n \"options as **kwargs, instead of expecting\"\n \" them in config['custom_model_config']!\")\n instance = model_cls(obs_space, action_space,\n num_outputs, model_config,\n name)\n # Other error -> re-raise.\n else:\n raise e\n registered = set(instance.variables())\n not_registered = set()\n for var in created:\n if var not in registered:\n not_registered.add(var)\n if not_registered:\n raise ValueError(\n \"It looks like variables {} were created as part \"\n \"of {} but does not appear in model.variables() \"\n \"({}). Did you forget to call \"\n \"model.register_variables() on the variables in \"\n \"question?\".format(not_registered, instance,\n registered))\n else:\n # PyTorch automatically tracks nn.Modules inside the parent\n # nn.Module's constructor.\n # TODO(sven): Do this for TF as well.\n instance = model_cls(obs_space, action_space, num_outputs,\n model_config, name, **model_kwargs)\n return instance\n # TODO(sven): Hard-deprecate Model(V1). This check will be\n # superflous then.\n elif tf.executing_eagerly():\n raise ValueError(\n \"Eager execution requires a TFModelV2 model to be \"\n \"used, however you specified a custom model {}\".format(\n model_cls))\n\n if framework in [\"tf\", \"tfe\", \"tf2\"]:\n v2_class = None\n # Try to get a default v2 model.\n if not model_config.get(\"custom_model\"):\n v2_class = default_model or ModelCatalog._get_v2_model_class(\n obs_space, model_config, framework=framework)\n\n if model_config.get(\"use_lstm\"):\n wrapped_cls = v2_class\n forward = wrapped_cls.forward\n v2_class = ModelCatalog._wrap_if_needed(\n wrapped_cls, LSTMWrapper)\n v2_class._wrapped_forward = forward\n\n # fallback to a default v1 model\n if v2_class is None:\n if tf.executing_eagerly():\n raise ValueError(\n \"Eager execution requires a TFModelV2 model to be \"\n \"used, however there is no default V2 model for this \"\n \"observation space: {}, use_lstm={}\".format(\n obs_space, model_config.get(\"use_lstm\")))\n v2_class = make_v1_wrapper(ModelCatalog.get_model)\n # Wrap in the requested interface.\n wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)\n return wrapper(obs_space, action_space, num_outputs, model_config,\n name, **model_kwargs)\n elif framework == \"torch\":\n v2_class = \\\n default_model or ModelCatalog._get_v2_model_class(\n obs_space, model_config, framework=framework)\n if model_config.get(\"use_lstm\"):\n from ray.rllib.models.torch.recurrent_net import LSTMWrapper \\\n as TorchLSTMWrapper\n wrapped_cls = v2_class\n forward = wrapped_cls.forward\n v2_class = ModelCatalog._wrap_if_needed(\n wrapped_cls, TorchLSTMWrapper)\n v2_class._wrapped_forward = forward\n # Wrap in the requested interface.\n wrapper = ModelCatalog._wrap_if_needed(v2_class, model_interface)\n return wrapper(obs_space, action_space, num_outputs, model_config,\n name, **model_kwargs)\n else:\n raise NotImplementedError(\n \"`framework` must be 'tf|tfe|torch', but is \"\n \"{}!\".format(framework))\n\n @staticmethod\n @DeveloperAPI\n def get_preprocessor(env: gym.Env, options: dict = None) -> Preprocessor:\n \"\"\"Returns a suitable preprocessor for the given env.\n\n This is a wrapper for get_preprocessor_for_space().\n \"\"\"\n\n return ModelCatalog.get_preprocessor_for_space(env.observation_space,\n options)\n\n @staticmethod\n @DeveloperAPI\n def get_preprocessor_for_space(observation_space: gym.Space,\n options: dict = None) -> Preprocessor:\n \"\"\"Returns a suitable preprocessor for the given observation space.\n\n Args:\n observation_space (Space): The input observation space.\n options (dict): Options to pass to the preprocessor.\n\n Returns:\n preprocessor (Preprocessor): Preprocessor for the observations.\n \"\"\"\n\n options = options or MODEL_DEFAULTS\n for k in options.keys():\n if k not in MODEL_DEFAULTS:\n raise Exception(\"Unknown config key `{}`, all keys: {}\".format(\n k, list(MODEL_DEFAULTS)))\n\n if options.get(\"custom_preprocessor\"):\n preprocessor = options[\"custom_preprocessor\"]\n logger.info(\"Using custom preprocessor {}\".format(preprocessor))\n logger.warning(\n \"DeprecationWarning: Custom preprocessors are deprecated, \"\n \"since they sometimes conflict with the built-in \"\n \"preprocessors for handling complex observation spaces. \"\n \"Please use wrapper classes around your environment \"\n \"instead of preprocessors.\")\n prep = _global_registry.get(RLLIB_PREPROCESSOR, preprocessor)(\n observation_space, options)\n else:\n cls = get_preprocessor(observation_space)\n prep = cls(observation_space, options)\n\n logger.debug(\"Created preprocessor {}: {} -> {}\".format(\n prep, observation_space, prep.shape))\n return prep\n\n @staticmethod\n @PublicAPI\n def register_custom_preprocessor(preprocessor_name: str,\n preprocessor_class: type) -> None:\n \"\"\"Register a custom preprocessor class by name.\n\n The preprocessor can be later used by specifying\n {\"custom_preprocessor\": preprocesor_name} in the model config.\n\n Args:\n preprocessor_name (str): Name to register the preprocessor under.\n preprocessor_class (type): Python class of the preprocessor.\n \"\"\"\n _global_registry.register(RLLIB_PREPROCESSOR, preprocessor_name,\n preprocessor_class)\n\n @staticmethod\n @PublicAPI\n def register_custom_model(model_name: str, model_class: type) -> None:\n \"\"\"Register a custom model class by name.\n\n The model can be later used by specifying {\"custom_model\": model_name}\n in the model config.\n\n Args:\n model_name (str): Name to register the model under.\n model_class (type): Python class of the model.\n \"\"\"\n _global_registry.register(RLLIB_MODEL, model_name, model_class)\n\n @staticmethod\n @PublicAPI\n def register_custom_action_dist(action_dist_name: str,\n action_dist_class: type) -> None:\n \"\"\"Register a custom action distribution class by name.\n\n The model can be later used by specifying\n {\"custom_action_dist\": action_dist_name} in the model config.\n\n Args:\n model_name (str): Name to register the action distribution under.\n model_class (type): Python class of the action distribution.\n \"\"\"\n _global_registry.register(RLLIB_ACTION_DIST, action_dist_name,\n action_dist_class)\n\n @staticmethod\n def _wrap_if_needed(model_cls: type, model_interface: type) -> type:\n assert issubclass(model_cls, ModelV2), model_cls\n\n if not model_interface or issubclass(model_cls, model_interface):\n return model_cls\n\n class wrapper(model_interface, model_cls):\n pass\n\n name = \"{}_as_{}\".format(model_cls.__name__, model_interface.__name__)\n wrapper.__name__ = name\n wrapper.__qualname__ = name\n\n return wrapper\n\n @staticmethod\n def _get_v2_model_class(input_space, model_config, framework=\"tf\"):\n if framework == \"torch\":\n from ray.rllib.models.torch.fcnet import (FullyConnectedNetwork as\n FCNet)\n from ray.rllib.models.torch.visionnet import (VisionNetwork as\n VisionNet)\n else:\n from ray.rllib.models.tf.fcnet import \\\n FullyConnectedNetwork as FCNet\n from ray.rllib.models.tf.visionnet import \\\n VisionNetwork as VisionNet\n\n # Discrete/1D obs-spaces.\n if isinstance(input_space, gym.spaces.Discrete) or \\\n len(input_space.shape) <= 2:\n return FCNet\n # Default Conv2D net.\n else:\n return VisionNet\n\n # -------------------\n # DEPRECATED METHODS.\n # -------------------\n @staticmethod\n def get_model(input_dict,\n obs_space,\n action_space,\n num_outputs,\n options,\n state_in=None,\n seq_lens=None):\n \"\"\"Deprecated: Use get_model_v2() instead.\"\"\"\n\n deprecation_warning(\"get_model\", \"get_model_v2\", error=False)\n assert isinstance(input_dict, dict)\n options = options or MODEL_DEFAULTS\n model = ModelCatalog._get_model(input_dict, obs_space, action_space,\n num_outputs, options, state_in,\n seq_lens)\n\n if options.get(\"use_lstm\"):\n copy = dict(input_dict)\n copy[\"obs\"] = model.last_layer\n feature_space = gym.spaces.Box(\n -1, 1, shape=(model.last_layer.shape[1], ))\n model = LSTM(copy, feature_space, action_space, num_outputs,\n options, state_in, seq_lens)\n\n logger.debug(\n \"Created model {}: ({} of {}, {}, {}, {}) -> {}, {}\".format(\n model, input_dict, obs_space, action_space, state_in, seq_lens,\n model.outputs, model.state_out))\n\n model._validate_output_shape()\n return model\n\n @staticmethod\n def _get_model(input_dict, obs_space, action_space, num_outputs, options,\n state_in, seq_lens):\n deprecation_warning(\"_get_model\", \"get_model_v2\", error=False)\n if options.get(\"custom_model\"):\n model = options[\"custom_model\"]\n logger.debug(\"Using custom model {}\".format(model))\n return _global_registry.get(RLLIB_MODEL, model)(\n input_dict,\n obs_space,\n action_space,\n num_outputs,\n options,\n state_in=state_in,\n seq_lens=seq_lens)\n\n obs_rank = len(input_dict[\"obs\"].shape) - 1 # drops batch dim\n\n if obs_rank > 2:\n return VisionNetwork(input_dict, obs_space, action_space,\n num_outputs, options)\n\n return FullyConnectedNetwork(input_dict, obs_space, action_space,\n num_outputs, options)\n",
"import logging\nimport numpy as np\nfrom typing import Dict, Optional\n\nfrom ray.rllib.evaluation.episode import MultiAgentEpisode\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.policy.view_requirement import ViewRequirement\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\nfrom ray.rllib.utils.types import AgentID, EnvID, EpisodeID, TensorType\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\nlogger = logging.getLogger(__name__)\n\n\nclass _PerPolicySampleCollector:\n \"\"\"A class for efficiently collecting samples for a single (fixed) policy.\n\n Can be used by a _MultiAgentSampleCollector for its different policies.\n \"\"\"\n\n def __init__(self,\n num_agents: Optional[int] = None,\n num_timesteps: Optional[int] = None,\n time_major: bool = True,\n shift_before: int = 0,\n shift_after: int = 0):\n \"\"\"Initializes a _PerPolicySampleCollector object.\n\n Args:\n num_agents (int): The max number of agent slots to pre-allocate\n in the buffer.\n num_timesteps (int): The max number of timesteps to pre-allocate\n in the buffer.\n time_major (Optional[bool]): Whether to preallocate buffers and\n collect samples in time-major fashion (TxBx...).\n shift_before (int): The additional number of time slots to\n pre-allocate at the beginning of a time window (for possible\n underlying data column shifts, e.g. PREV_ACTIONS).\n shift_after (int): The additional number of time slots to\n pre-allocate at the end of a time window (for possible\n underlying data column shifts, e.g. NEXT_OBS).\n \"\"\"\n\n self.num_agents = num_agents or 100\n self.num_timesteps = num_timesteps\n self.time_major = time_major\n # `shift_before must at least be 1 for the init obs timestep.\n self.shift_before = max(shift_before, 1)\n self.shift_after = shift_after\n\n # The offset on the agent dim to start the next SampleBatch build from.\n self.sample_batch_offset = 0\n\n # The actual underlying data-buffers.\n self.buffers = {}\n self.postprocessed_agents = [False] * self.num_agents\n\n # Next agent-slot to be used by a new agent/env combination.\n self.agent_slot_cursor = 0\n # Maps agent/episode ID/chunk-num to an agent slot.\n self.agent_key_to_slot = {}\n # Maps agent/episode ID to the last chunk-num.\n self.agent_key_to_chunk_num = {}\n # Maps agent slot number to agent keys.\n self.slot_to_agent_key = [None] * self.num_agents\n # Maps agent/episode ID/chunk-num to a time step cursor.\n self.agent_key_to_timestep = {}\n\n # Total timesteps taken in the env over all agents since last reset.\n self.timesteps_since_last_reset = 0\n\n # Indices (T,B) to pick from the buffers for the next forward pass.\n self.forward_pass_indices = [[], []]\n self.forward_pass_size = 0\n # Maps index from the forward pass batch to (agent_id, episode_id,\n # env_id) tuple.\n self.forward_pass_index_to_agent_info = {}\n self.agent_key_to_forward_pass_index = {}\n\n def add_init_obs(self, episode_id: EpisodeID, agent_id: AgentID,\n env_id: EnvID, chunk_num: int,\n init_obs: TensorType) -> None:\n \"\"\"Adds a single initial observation (after env.reset()) to the buffer.\n\n Args:\n episode_id (EpisodeID): Unique ID for the episode we are adding the\n initial observation for.\n agent_id (AgentID): Unique ID for the agent we are adding the\n initial observation for.\n env_id (EnvID): The env ID to which `init_obs` belongs.\n chunk_num (int): The time-chunk number (0-based). Some episodes\n may last for longer than self.num_timesteps and therefore\n have to be chopped into chunks.\n init_obs (TensorType): Initial observation (after env.reset()).\n \"\"\"\n agent_key = (agent_id, episode_id, chunk_num)\n agent_slot = self.agent_slot_cursor\n self.agent_key_to_slot[agent_key] = agent_slot\n self.agent_key_to_chunk_num[agent_key[:2]] = chunk_num\n self.slot_to_agent_key[agent_slot] = agent_key\n self._next_agent_slot()\n\n if SampleBatch.OBS not in self.buffers:\n self._build_buffers(single_row={SampleBatch.OBS: init_obs})\n if self.time_major:\n self.buffers[SampleBatch.OBS][self.shift_before-1, agent_slot] = \\\n init_obs\n else:\n self.buffers[SampleBatch.OBS][agent_slot, self.shift_before-1] = \\\n init_obs\n self.agent_key_to_timestep[agent_key] = self.shift_before\n\n self._add_to_next_inference_call(agent_key, env_id, agent_slot,\n self.shift_before - 1)\n\n def add_action_reward_next_obs(\n self, episode_id: EpisodeID, agent_id: AgentID, env_id: EnvID,\n agent_done: bool, values: Dict[str, TensorType]) -> None:\n \"\"\"Add the given dictionary (row) of values to this batch.\n\n Args:\n episode_id (EpisodeID): Unique ID for the episode we are adding the\n values for.\n agent_id (AgentID): Unique ID for the agent we are adding the\n values for.\n env_id (EnvID): The env ID to which the given data belongs.\n agent_done (bool): Whether next obs should not be used for an\n upcoming inference call. Default: False = next-obs should be\n used for upcoming inference.\n values (Dict[str, TensorType]): Data dict (interpreted as a single\n row) to be added to buffer. Must contain keys:\n SampleBatch.ACTIONS, REWARDS, DONES, and NEXT_OBS.\n \"\"\"\n assert (SampleBatch.ACTIONS in values and SampleBatch.REWARDS in values\n and SampleBatch.NEXT_OBS in values\n and SampleBatch.DONES in values)\n\n assert SampleBatch.OBS not in values\n values[SampleBatch.OBS] = values[SampleBatch.NEXT_OBS]\n del values[SampleBatch.NEXT_OBS]\n\n chunk_num = self.agent_key_to_chunk_num[(agent_id, episode_id)]\n agent_key = (agent_id, episode_id, chunk_num)\n agent_slot = self.agent_key_to_slot[agent_key]\n ts = self.agent_key_to_timestep[agent_key]\n for k, v in values.items():\n if k not in self.buffers:\n self._build_buffers(single_row=values)\n if self.time_major:\n self.buffers[k][ts, agent_slot] = v\n else:\n self.buffers[k][agent_slot, ts] = v\n self.agent_key_to_timestep[agent_key] += 1\n\n # Time-axis is \"full\" -> Cut-over to new chunk (only if not DONE).\n if self.agent_key_to_timestep[\n agent_key] - self.shift_before == self.num_timesteps and \\\n not values[SampleBatch.DONES]:\n self._new_chunk_from(agent_slot, agent_key,\n self.agent_key_to_timestep[agent_key])\n\n self.timesteps_since_last_reset += 1\n\n if not agent_done:\n self._add_to_next_inference_call(agent_key, env_id, agent_slot, ts)\n\n def get_inference_input_dict(self, view_reqs: Dict[str, ViewRequirement]\n ) -> Dict[str, TensorType]:\n \"\"\"Returns an input_dict for an (inference) forward pass.\n\n The input_dict can then be used for action computations inside a\n Policy via `Policy.compute_actions_from_input_dict()`.\n\n Args:\n view_reqs (Dict[str, ViewRequirement]): The view requirements\n dict to use.\n\n Returns:\n Dict[str, TensorType]: The input_dict to be passed into the ModelV2\n for inference/training.\n\n Examples:\n >>> obs, r, done, info = env.step(action)\n >>> collector.add_action_reward_next_obs(12345, 0, \"pol0\", {\n ... \"action\": action, \"obs\": obs, \"reward\": r, \"done\": done\n ... })\n >>> input_dict = collector.get_inference_input_dict(policy.model)\n >>> action = policy.compute_actions_from_input_dict(input_dict)\n >>> # repeat\n \"\"\"\n input_dict = {}\n for view_col, view_req in view_reqs.items():\n # Create the batch of data from the different buffers.\n data_col = view_req.data_col or view_col\n if data_col not in self.buffers:\n self._build_buffers({data_col: view_req.space.sample()})\n\n indices = self.forward_pass_indices\n if self.time_major:\n input_dict[view_col] = self.buffers[data_col][indices]\n else:\n if isinstance(view_req.shift, (list, tuple)):\n time_indices = \\\n np.array(view_req.shift) + np.array(indices[0])\n input_dict[view_col] = self.buffers[data_col][indices[1],\n time_indices]\n else:\n input_dict[view_col] = \\\n self.buffers[data_col][indices[1], indices[0]]\n\n self._reset_inference_call()\n\n return input_dict\n\n def get_postprocessing_sample_batches(\n self,\n episode: MultiAgentEpisode,\n view_reqs: Dict[str, ViewRequirement]) -> \\\n Dict[AgentID, SampleBatch]:\n \"\"\"Returns a SampleBatch object ready for postprocessing.\n\n Args:\n episode (MultiAgentEpisode): The MultiAgentEpisode object to\n get the to-be-postprocessed SampleBatches for.\n view_reqs (Dict[str, ViewRequirement]): The view requirements dict\n to use for creating the SampleBatch from our buffers.\n\n Returns:\n Dict[AgentID, SampleBatch]: The sample batch objects to be passed\n to `Policy.postprocess_trajectory()`.\n \"\"\"\n # Loop through all agents and create a SampleBatch\n # (as \"view\"; no copying).\n\n # Construct the SampleBatch-dict.\n sample_batch_data = {}\n\n range_ = self.agent_slot_cursor - self.sample_batch_offset\n if range_ < 0:\n range_ = self.num_agents + range_\n for i in range(range_):\n agent_slot = self.sample_batch_offset + i\n if agent_slot >= self.num_agents:\n agent_slot = agent_slot % self.num_agents\n # Do not postprocess the same slot twice.\n if self.postprocessed_agents[agent_slot]:\n continue\n agent_key = self.slot_to_agent_key[agent_slot]\n # Skip other episodes (if episode provided).\n if episode and agent_key[1] != episode.episode_id:\n continue\n end = self.agent_key_to_timestep[agent_key]\n # Do not build any empty SampleBatches.\n if end == self.shift_before:\n continue\n self.postprocessed_agents[agent_slot] = True\n\n assert agent_key not in sample_batch_data\n sample_batch_data[agent_key] = {}\n batch = sample_batch_data[agent_key]\n\n for view_col, view_req in view_reqs.items():\n # Skip columns that will only get added through postprocessing\n # (these may not even exist yet).\n if view_req.created_during_postprocessing:\n continue\n\n data_col = view_req.data_col or view_col\n shift = view_req.shift\n if data_col == SampleBatch.OBS:\n shift -= 1\n\n batch[view_col] = self.buffers[data_col][\n self.shift_before + shift:end + shift, agent_slot]\n\n batches = {}\n for agent_key, data in sample_batch_data.items():\n batches[agent_key] = SampleBatch(data)\n return batches\n\n def get_train_sample_batch_and_reset(self, view_reqs) -> SampleBatch:\n \"\"\"Returns the accumulated sample batche for this policy.\n\n This is usually called to collect samples for policy training.\n\n Returns:\n SampleBatch: Returns the accumulated sample batch for this\n policy.\n \"\"\"\n seq_lens = [\n self.agent_key_to_timestep[k] - self.shift_before\n for k in self.slot_to_agent_key if k is not None\n ]\n first_zero_len = len(seq_lens)\n if seq_lens[-1] == 0:\n first_zero_len = seq_lens.index(0)\n # Assert that all zeros lie at the end of the seq_lens array.\n try:\n assert all(seq_lens[i] == 0\n for i in range(first_zero_len, len(seq_lens)))\n except AssertionError as e:\n print()\n raise e\n\n t_start = self.shift_before\n t_end = t_start + self.num_timesteps\n\n # The agent_slot cursor that points to the newest agent-slot that\n # actually already has at least 1 timestep of data (thus it excludes\n # just-rolled over chunks (which only have the initial obs in them)).\n valid_agent_cursor = \\\n (self.agent_slot_cursor - (len(seq_lens) - first_zero_len)) % \\\n self.num_agents\n\n # Construct the view dict.\n view = {}\n for view_col, view_req in view_reqs.items():\n data_col = view_req.data_col or view_col\n assert data_col in self.buffers\n # For OBS, indices must be shifted by -1.\n extra_shift = 0 if data_col != SampleBatch.OBS else -1\n # If agent_slot has been rolled-over to beginning, we have to copy\n # here.\n if valid_agent_cursor < self.sample_batch_offset:\n time_slice = self.buffers[data_col][t_start + extra_shift:\n t_end + extra_shift]\n one_ = time_slice[:, self.sample_batch_offset:]\n two_ = time_slice[:, :valid_agent_cursor]\n if torch and isinstance(time_slice, torch.Tensor):\n view[view_col] = torch.cat([one_, two_], dim=1)\n else:\n view[view_col] = np.concatenate([one_, two_], axis=1)\n else:\n view[view_col] = \\\n self.buffers[data_col][\n t_start + extra_shift:t_end + extra_shift,\n self.sample_batch_offset:valid_agent_cursor]\n\n # Copy all still ongoing trajectories to new agent slots\n # (including the ones that just started (are seq_len=0)).\n new_chunk_args = []\n for i, seq_len in enumerate(seq_lens):\n if seq_len < self.num_timesteps:\n agent_slot = self.sample_batch_offset + i\n if agent_slot >= self.num_agents:\n agent_slot = agent_slot % self.num_agents\n if not self.buffers[SampleBatch.\n DONES][seq_len - 1 +\n self.shift_before][agent_slot]:\n agent_key = self.slot_to_agent_key[agent_slot]\n new_chunk_args.append(\n (agent_slot, agent_key,\n self.agent_key_to_timestep[agent_key]))\n # Cut out all 0 seq-lens.\n seq_lens = seq_lens[:first_zero_len]\n batch = SampleBatch(\n view, _seq_lens=np.array(seq_lens), _time_major=True)\n\n # Reset everything for new data.\n self.postprocessed_agents = [False] * self.num_agents\n self.agent_key_to_slot.clear()\n self.agent_key_to_chunk_num.clear()\n self.slot_to_agent_key = [None] * self.num_agents\n self.agent_key_to_timestep.clear()\n self.timesteps_since_last_reset = 0\n self.forward_pass_size = 0\n self.sample_batch_offset = self.agent_slot_cursor\n\n for args in new_chunk_args:\n self._new_chunk_from(*args)\n\n return batch\n\n def _build_buffers(self, single_row: Dict[str, TensorType]) -> None:\n \"\"\"Builds the internal data buffers based on a single given row.\n\n Args:\n single_row (Dict[str, TensorType]): A single datarow with one or\n more columns (str as key, np.ndarray|tensor as data).\n \"\"\"\n time_size = self.num_timesteps + self.shift_before + self.shift_after\n for col, data in single_row.items():\n if col in self.buffers:\n continue\n base_shape = (time_size, self.num_agents) if self.time_major else \\\n (self.num_agents, time_size)\n # Python primitive -> np.array.\n if isinstance(data, (int, float, bool)):\n t_ = type(data)\n dtype = np.float32 if t_ == float else \\\n np.int32 if type(data) == int else np.bool_\n self.buffers[col] = np.zeros(shape=base_shape, dtype=dtype)\n # np.ndarray, torch.Tensor, or tf.Tensor.\n else:\n shape = base_shape + data.shape\n dtype = data.dtype\n if torch and isinstance(data, torch.Tensor):\n self.buffers[col] = torch.zeros(\n *shape, dtype=dtype, device=data.device)\n elif tf and isinstance(data, tf.Tensor):\n self.buffers[col] = tf.zeros(shape=shape, dtype=dtype)\n else:\n self.buffers[col] = np.zeros(shape=shape, dtype=dtype)\n\n def _next_agent_slot(self):\n \"\"\"Starts a new agent slot at the end of the agent-axis.\n\n Also makes sure, the new slot is not taken yet.\n \"\"\"\n self.agent_slot_cursor += 1\n if self.agent_slot_cursor >= self.num_agents:\n self.agent_slot_cursor = 0\n # Just make sure, there is space in our buffer.\n assert self.slot_to_agent_key[self.agent_slot_cursor] is None\n\n def _new_chunk_from(self, agent_slot, agent_key, timestep):\n \"\"\"Creates a new time-window (chunk) given an agent.\n\n The agent may already have an unfinished episode going on (in a\n previous chunk). The end of that previous chunk will be copied to the\n beginning of the new one for proper data-shift handling (e.g.\n PREV_ACTIONS/REWARDS).\n\n Args:\n agent_slot (int): The agent to start a new chunk for (from an\n ongoing episode (chunk)).\n agent_key (Tuple[AgentID, EpisodeID, int]): The internal key to\n identify an active agent in some episode.\n timestep (int): The timestep in the old chunk being continued.\n \"\"\"\n new_agent_slot = self.agent_slot_cursor\n # Increase chunk num by 1.\n new_agent_key = agent_key[:2] + (agent_key[2] + 1, )\n # Copy relevant timesteps at end of old chunk into new one.\n if self.time_major:\n for k in self.buffers.keys():\n self.buffers[k][0:self.shift_before, new_agent_slot] = \\\n self.buffers[k][\n timestep - self.shift_before:timestep, agent_slot]\n else:\n for k in self.buffers.keys():\n self.buffers[k][new_agent_slot, 0:self.shift_before] = \\\n self.buffers[k][\n agent_slot, timestep - self.shift_before:timestep]\n\n self.agent_key_to_slot[new_agent_key] = new_agent_slot\n self.agent_key_to_chunk_num[new_agent_key[:2]] = new_agent_key[2]\n self.slot_to_agent_key[new_agent_slot] = new_agent_key\n self._next_agent_slot()\n self.agent_key_to_timestep[new_agent_key] = self.shift_before\n\n def _add_to_next_inference_call(self, agent_key, env_id, agent_slot,\n timestep):\n \"\"\"Registers given T and B (agent_slot) for get_inference_input_dict.\n\n Calling `get_inference_input_dict` will produce an input_dict (for\n Policy.compute_actions_from_input_dict) with all registered agent/time\n indices and then automatically reset the registry.\n\n Args:\n agent_key (Tuple[AgentID, EpisodeID, int]): The internal key to\n identify an active agent in some episode.\n env_id (EnvID): The env ID of the given agent.\n agent_slot (int): The agent_slot to register (B axis).\n timestep (int): The timestep to register (T axis).\n \"\"\"\n idx = self.forward_pass_size\n self.forward_pass_index_to_agent_info[idx] = (agent_key[0],\n agent_key[1], env_id)\n self.agent_key_to_forward_pass_index[agent_key[:2]] = idx\n if self.forward_pass_size == 0:\n self.forward_pass_indices[0].clear()\n self.forward_pass_indices[1].clear()\n self.forward_pass_indices[0].append(timestep)\n self.forward_pass_indices[1].append(agent_slot)\n self.forward_pass_size += 1\n\n def _reset_inference_call(self):\n \"\"\"Resets indices for the next inference call.\n\n After calling this, new calls to `add_init_obs()` and\n `add_action_reward_next_obs()` will count for the next input_dict\n returned by `get_inference_input_dict()`.\n \"\"\"\n self.forward_pass_size = 0\n"
] |
[
[
"numpy.product"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PhanatosZou/nltk
|
[
"750e488569b6f80c72ae6ca74eff90eae55e6c4e"
] |
[
"nltk/metrics/scores.py"
] |
[
"# Natural Language Toolkit: Evaluation\n#\n# Copyright (C) 2001-2019 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\nfrom math import fabs\nimport operator\nfrom random import shuffle\nfrom functools import reduce\n\ntry:\n from scipy.stats.stats import betai\nexcept ImportError:\n betai = None\n\nfrom nltk.util import LazyConcatenation, LazyMap\n\n\ndef accuracy(reference, test):\n \"\"\"\n Given a list of reference values and a corresponding list of test\n values, return the fraction of corresponding values that are\n equal. In particular, return the fraction of indices\n ``0<i<=len(test)`` such that ``test[i] == reference[i]``.\n\n :type reference: list\n :param reference: An ordered list of reference values.\n :type test: list\n :param test: A list of values to compare against the corresponding\n reference values.\n :raise ValueError: If ``reference`` and ``length`` do not have the\n same length.\n \"\"\"\n if len(reference) != len(test):\n raise ValueError(\"Lists must have the same length.\")\n return sum(x == y for x, y in zip(reference, test)) / len(test)\n\n\ndef precision(reference, test):\n \"\"\"\n Given a set of reference values and a set of test values, return\n the fraction of test values that appear in the reference set.\n In particular, return card(``reference`` intersection ``test``)/card(``test``).\n If ``test`` is empty, then return None.\n\n :type reference: set\n :param reference: A set of reference values.\n :type test: set\n :param test: A set of values to compare against the reference set.\n :rtype: float or None\n \"\"\"\n if not hasattr(reference, \"intersection\") or not hasattr(test, \"intersection\"):\n raise TypeError(\"reference and test should be sets\")\n\n if len(test) == 0:\n return None\n else:\n return len(reference.intersection(test)) / len(test)\n\n\ndef recall(reference, test):\n \"\"\"\n Given a set of reference values and a set of test values, return\n the fraction of reference values that appear in the test set.\n In particular, return card(``reference`` intersection ``test``)/card(``reference``).\n If ``reference`` is empty, then return None.\n\n :type reference: set\n :param reference: A set of reference values.\n :type test: set\n :param test: A set of values to compare against the reference set.\n :rtype: float or None\n \"\"\"\n if not hasattr(reference, \"intersection\") or not hasattr(test, \"intersection\"):\n raise TypeError(\"reference and test should be sets\")\n\n if len(reference) == 0:\n return None\n else:\n return len(reference.intersection(test)) / len(reference)\n\n\ndef f_measure(reference, test, alpha=0.5):\n \"\"\"\n Given a set of reference values and a set of test values, return\n the f-measure of the test values, when compared against the\n reference values. The f-measure is the harmonic mean of the\n ``precision`` and ``recall``, weighted by ``alpha``. In particular,\n given the precision *p* and recall *r* defined by:\n\n - *p* = card(``reference`` intersection ``test``)/card(``test``)\n - *r* = card(``reference`` intersection ``test``)/card(``reference``)\n\n The f-measure is:\n\n - *1/(alpha/p + (1-alpha)/r)*\n\n If either ``reference`` or ``test`` is empty, then ``f_measure``\n returns None.\n\n :type reference: set\n :param reference: A set of reference values.\n :type test: set\n :param test: A set of values to compare against the reference set.\n :rtype: float or None\n \"\"\"\n p = precision(reference, test)\n r = recall(reference, test)\n if p is None or r is None:\n return None\n if p == 0 or r == 0:\n return 0\n return 1.0 / (alpha / p + (1 - alpha) / r)\n\n\ndef log_likelihood(reference, test):\n \"\"\"\n Given a list of reference values and a corresponding list of test\n probability distributions, return the average log likelihood of\n the reference values, given the probability distributions.\n\n :param reference: A list of reference values\n :type reference: list\n :param test: A list of probability distributions over values to\n compare against the corresponding reference values.\n :type test: list(ProbDistI)\n \"\"\"\n if len(reference) != len(test):\n raise ValueError(\"Lists must have the same length.\")\n\n # Return the average value of dist.logprob(val).\n total_likelihood = sum(dist.logprob(val) for (val, dist) in zip(reference, test))\n return total_likelihood / len(reference)\n\n\ndef approxrand(a, b, **kwargs):\n \"\"\"\n Returns an approximate significance level between two lists of\n independently generated test values.\n\n Approximate randomization calculates significance by randomly drawing\n from a sample of the possible permutations. At the limit of the number\n of possible permutations, the significance level is exact. The\n approximate significance level is the sample mean number of times the\n statistic of the permutated lists varies from the actual statistic of\n the unpermuted argument lists.\n\n :return: a tuple containing an approximate significance level, the count\n of the number of times the pseudo-statistic varied from the\n actual statistic, and the number of shuffles\n :rtype: tuple\n :param a: a list of test values\n :type a: list\n :param b: another list of independently generated test values\n :type b: list\n \"\"\"\n shuffles = kwargs.get(\"shuffles\", 999)\n # there's no point in trying to shuffle beyond all possible permutations\n shuffles = min(shuffles, reduce(operator.mul, range(1, len(a) + len(b) + 1)))\n stat = kwargs.get(\"statistic\", lambda lst: sum(lst) / len(lst))\n verbose = kwargs.get(\"verbose\", False)\n\n if verbose:\n print(\"shuffles: %d\" % shuffles)\n\n actual_stat = fabs(stat(a) - stat(b))\n\n if verbose:\n print(\"actual statistic: %f\" % actual_stat)\n print(\"-\" * 60)\n\n c = 1e-100\n lst = LazyConcatenation([a, b])\n indices = list(range(len(a) + len(b)))\n\n for i in range(shuffles):\n if verbose and i % 10 == 0:\n print(\"shuffle: %d\" % i)\n\n shuffle(indices)\n\n pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[: len(a)]))\n pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a) :]))\n pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b)\n\n if pseudo_stat >= actual_stat:\n c += 1\n\n if verbose and i % 10 == 0:\n print(\"pseudo-statistic: %f\" % pseudo_stat)\n print(\"significance: %f\" % ((c + 1) / (i + 1)))\n print(\"-\" * 60)\n\n significance = (c + 1) / (shuffles + 1)\n\n if verbose:\n print(\"significance: %f\" % significance)\n if betai:\n for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:\n print(\"prob(phi<=%f): %f\" % (phi, betai(c, shuffles, phi)))\n\n return (significance, c, shuffles)\n\n\ndef demo():\n print(\"-\" * 75)\n reference = \"DET NN VB DET JJ NN NN IN DET NN\".split()\n test = \"DET VB VB DET NN NN NN IN DET NN\".split()\n print(\"Reference =\", reference)\n print(\"Test =\", test)\n print(\"Accuracy:\", accuracy(reference, test))\n\n print(\"-\" * 75)\n reference_set = set(reference)\n test_set = set(test)\n print(\"Reference =\", reference_set)\n print(\"Test = \", test_set)\n print(\"Precision:\", precision(reference_set, test_set))\n print(\" Recall:\", recall(reference_set, test_set))\n print(\"F-Measure:\", f_measure(reference_set, test_set))\n print(\"-\" * 75)\n\n\nif __name__ == \"__main__\":\n demo()\n"
] |
[
[
"scipy.stats.stats.betai"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.19",
"0.18",
"0.12",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
nmaeder/UmbrellaPipeline
|
[
"b2b3d0cadd8d6660e7a617f6204be042efd2d40a"
] |
[
"UmbrellaPipeline/tests/test_thorough.py"
] |
[
"import os, pytest, time, math\nimport openmmtools\nfrom openmm import Vec3, unit, app\nimport numpy as np\nimport warnings\n\nfrom UmbrellaPipeline import UmbrellaPipeline\nfrom UmbrellaPipeline.analysis import PMFCalculator\nfrom UmbrellaPipeline.path_finding import (\n Tree,\n Grid,\n GridNode,\n TreeNode,\n GridEscapeRoom,\n TreeEscapeRoom,\n)\nfrom UmbrellaPipeline.sampling import (\n add_ligand_restraint,\n ghost_ligand,\n ramp_up_coulomb,\n ramp_up_vdw,\n SamplingCluster,\n create_openmm_system,\n add_barostat,\n)\nfrom UmbrellaPipeline.utils import (\n gen_pbc_box,\n get_residue_indices,\n get_center_of_mass_coordinates,\n get_centroid_coordinates,\n parse_params,\n execute_bash,\n execute_bash_parallel,\n display_time,\n)\n\nwarnings.filterwarnings(action=\"ignore\")\n\npipeline = UmbrellaPipeline(\n ligand_residue_name=\"unl\",\n toppar_stream_file=\"UmbrellaPipeline/data/toppar/toppar.str\",\n toppar_directory=\"UmbrellaPipeline/data/toppar\",\n psf_file=\"UmbrellaPipeline/data/step5_input.psf\",\n crd_file=\"UmbrellaPipeline/data/step5_input.crd\",\n)\n\ndef test_time():\n assert display_time(0) == \"00:00\"\n assert display_time(59) == \"00:59\"\n assert display_time(60) == \"00:01:00\"\n assert display_time(61) == \"00:01:01\"\n assert display_time(44530) == \"00:12:22:10\"\n assert display_time(-33) == \"-00:33\"\n\ndef test_genbox():\n\n assert pipeline.system_info.psf_object.boxVectors == None\n minC = gen_pbc_box(\n psf=pipeline.system_info.psf_object,\n pos=pipeline.system_info.crd_object.positions,\n )\n assert minC == [\n unit.Quantity(value=-0.56125095743, unit=unit.nanometer),\n unit.Quantity(value=-0.46094509581000004, unit=unit.nanometer),\n unit.Quantity(value=-0.06344883114, unit=unit.nanometer),\n ]\n print(pipeline.system_info.psf_object.boxVectors)\n assert pipeline.system_info.psf_object.boxVectors == unit.Quantity(\n value=(\n Vec3(x=11.07094907954, y=0.0, z=0.0),\n Vec3(x=0.0, y=10.882602253800002, z=0.0),\n Vec3(x=0.0, y=0.0, z=10.20869495182),\n ),\n unit=unit.nanometer,\n )\n\n\ndef test_pipeline():\n path = pipeline.generate_path()\n\n\ndef test_add_harmonic_restraint():\n\n gen_pbc_box(\n psf=pipeline.system_info.psf_object,\n pos=pipeline.system_info.crd_object.positions,\n )\n system = pipeline.system_info.psf_object.createSystem(\n params=pipeline.system_info.params\n )\n ind = get_residue_indices(\n atom_list=pipeline.system_info.psf_object.atom_list, name=\"unl\"\n )\n fc = 10 * unit.kilocalorie_per_mole / (unit.angstrom ** 2)\n pos = Vec3(\n x=1 * unit.angstrom,\n y=2 * unit.angstrom,\n z=3 * unit.angstrom,\n )\n add_ligand_restraint(\n system=system, atom_group=ind, force_constant=fc, positions=pos\n )\n\n\ndef test_script_writing():\n output = [\n \"run_umbrella_window_0.sh\",\n \"run_umbrella_window_1.sh\",\n ]\n tree = Tree.from_files(\n positions=pipeline.system_info.crd_object.positions,\n psf=pipeline.system_info.psf_object,\n )\n st = Vec3(1, 2, 3)\n path = [st, st]\n\n sim = SamplingCluster(\n simulation_parameter=pipeline.simulation_parameters,\n system_info=pipeline.system_info,\n traj_write_path=os.path.dirname(__file__),\n conda_environment=\"openmm\",\n sge_working_dir=os.path.dirname(__file__),\n )\n\n sim.openmm_system = sim.system_info.psf_object.createSystem(sim.system_info.params)\n\n sim.write_scripts(path=path)\n\n for i in output:\n p = os.path.abspath(os.path.dirname(__file__) + \"/\" + i)\n assert os.path.exists(p)\n os.remove(p)\n\n\ndef test_ligand_indices():\n indices = get_residue_indices(pipeline.system_info.psf_object.atom_list, name=\"unl\")\n goal = list(range(8478, 8514, 1))\n assert indices == goal\n\n indices = get_residue_indices(\n pipeline.system_info.psf_object.atom_list, name=\"unl\", include_hydrogens=False\n )\n goal = list(range(8478, 8499, 1))\n assert indices == goal\n\n\ndef test_protein_indices():\n indices = get_residue_indices(pipeline.system_info.psf_object.atom_list)\n goal = list(range(0, 8478, 1))\n assert indices == goal\n\n\ndef test_param_parser():\n params = parse_params(\n toppar_directory=\"UmbrellaPipeline/data/toppar\",\n toppar_str_file=\"UmbrellaPipeline/data/toppar/toppar.str\",\n )\n\n\ndef test_centroid_coords():\n ind1 = get_residue_indices(\n atom_list=pipeline.system_info.psf_object.atom_list, name=\"unl\"\n )\n ind2 = get_residue_indices(\n atom_list=pipeline.system_info.psf_object.atom_list,\n name=\"unl\",\n include_hydrogens=False,\n )\n\n print(get_centroid_coordinates(pipeline.system_info.crd_object.positions, ind1))\n\n print(get_centroid_coordinates(pipeline.system_info.crd_object.positions, ind2))\n\n a = get_centroid_coordinates(pipeline.system_info.crd_object.positions, ind1)\n b = unit.Quantity(\n value=Vec3(x=4.800868342909999, y=5.1623615832338885, z=5.116963445551665),\n unit=unit.nanometer,\n )\n assert round(a.x, 5) == round(b.x, 5)\n assert round(a.y, 5) == round(b.y, 5)\n assert round(a.z, 5) == round(b.z, 5)\n a = get_centroid_coordinates(pipeline.system_info.crd_object.positions, ind2)\n b = unit.Quantity(\n value=Vec3(x=4.791905722784763, y=5.152082995253809, z=5.1381769457266655),\n unit=unit.nanometer,\n )\n assert round(a.x, 5) == round(b.x, 5)\n assert round(a.y, 5) == round(b.y, 5)\n assert round(a.z, 5) == round(b.z, 5)\n\n\ndef test_com_coords():\n gen_pbc_box(\n pos=pipeline.system_info.crd_object.positions,\n psf=pipeline.system_info.psf_object,\n )\n system = pipeline.system_info.psf_object.createSystem(\n params=pipeline.system_info.params,\n nonbondedMethod=app.PME,\n nonbondedCutoff=1.2 * unit.nanometers,\n constraints=app.HBonds,\n rigidWater=True,\n )\n a = get_center_of_mass_coordinates(\n positions=pipeline.system_info.crd_object.positions,\n indices=pipeline.system_info.ligand_indices,\n masses=system,\n )\n b = unit.Quantity(\n value=Vec3(x=4.7843494194906455, y=5.141548282974986, z=5.1745056529196995),\n unit=unit.nanometer,\n )\n assert round(a.x, 5) == round(b.x, 5)\n assert round(a.y, 5) == round(b.y, 5)\n assert round(a.z, 5) == round(b.z, 5)\n\n\ndef test_execute_bash():\n command1 = \"echo Hello World\"\n command2 = [\"echo\", \"Hello World\"]\n command3 = \"sleep 12\"\n stderr = \"testerr.log\"\n stdout = \"testout.log\"\n ret1 = execute_bash(command=command1)\n ret2 = execute_bash(command=command2, stdout_file=stdout)\n with pytest.raises(TimeoutError):\n execute_bash(command=command3, kill_after_wait=True, stderr_file=stderr)\n assert ret1 == \"Hello World\\n\"\n assert ret2 == \"Hello World\\n\"\n os.remove(stderr)\n os.remove(stdout)\n\n\ndef test_parallel_bash():\n commands = [\"sleep 3\", \"sleep 3\", \"sleep 3\", \"echo World\"]\n start = time.time()\n o = execute_bash_parallel(command=commands)\n end = time.time() - start\n assert end < 5\n assert o[3] == \"World\\n\"\n\n\ndef test_ghosting():\n\n # create simulation, system and context\n platform = openmmtools.utils.get_fastest_platform()\n if platform.getName() == (\"CUDA\" or \"OpenCL\"):\n props = {\"Precision\": \"mixed\"}\n else:\n props = None\n system = pipeline.system_info.psf_object.createSystem(pipeline.system_info.params)\n integrator = openmmtools.integrators.LangevinIntegrator()\n simulation = app.Simulation(\n topology=pipeline.system_info.psf_object.topology,\n system=system,\n integrator=integrator,\n platform=platform,\n platformProperties=props,\n )\n simulation.context.setPositions(pipeline.system_info.crd_object.positions)\n orig_params = []\n\n f = simulation.context.getSystem().getForces()\n for fs in f:\n if type(fs).__name__ == \"NonbondedForce\":\n for index in pipeline.system_info.ligand_indices:\n orig_params.append(fs.getParticleParameters(index))\n\n ghost_ligand(\n simulation=simulation, ligand_indices=pipeline.system_info.ligand_indices\n )\n f = simulation.context.getSystem().getForces()\n for fs in f:\n if type(fs).__name__ == \"NonbondedForce\":\n for index in pipeline.system_info.ligand_indices:\n assert fs.getParticleParameters(index) == [\n 0 * unit.elementary_charge,\n 0 * unit.nanometer,\n 0 * unit.kilojoule_per_mole,\n ]\n\n ramp_up_vdw(\n lamda=0.5,\n simulation=simulation,\n ligand_indices=pipeline.system_info.ligand_indices,\n original_parameters=orig_params,\n )\n f = simulation.context.getSystem().getForces()\n for fs in f:\n if type(fs).__name__ == \"NonbondedForce\":\n for it, index in enumerate(pipeline.system_info.ligand_indices):\n assert fs.getParticleParameters(index) == [\n 0 * unit.elementary_charge,\n 0.5 * orig_params[it][1],\n 0.5 * orig_params[it][2],\n ]\n\n ramp_up_coulomb(\n lamda=1,\n simulation=simulation,\n ligand_indices=pipeline.system_info.ligand_indices,\n original_parameters=orig_params,\n )\n f = simulation.context.getSystem().getForces()\n for fs in f:\n if type(fs).__name__ == \"NonbondedForce\":\n for it, index in enumerate(pipeline.system_info.ligand_indices):\n assert fs.getParticleParameters(index) == [\n 1 * orig_params[it][0],\n 0.5 * orig_params[it][1],\n 0.5 * orig_params[it][2],\n ]\n\n\[email protected](\n os.getenv(\"CI\") == \"true\", reason=\"Precision problem when testing on github.\"\n)\ndef test_sampling():\n\n # create simulation, system and context\n platform = openmmtools.utils.get_fastest_platform()\n if platform.getName() == (\"CUDA\" or \"OpenCL\"):\n props = {\"Precision\": \"mixed\"}\n else:\n props = None\n system = create_openmm_system(pipeline.system_info, pipeline.simulation_parameters)\n integrator = openmmtools.integrators.LangevinIntegrator()\n simulation = app.Simulation(\n topology=pipeline.system_info.psf_object.topology,\n system=system,\n integrator=integrator,\n platform=platform,\n platformProperties=props,\n )\n simulation.context.setPositions(pipeline.system_info.crd_object.positions)\n simulation.minimizeEnergy(maxIterations=50)\n simulation.step(5)\n\n\ndef test_system_creation():\n system = create_openmm_system(pipeline.system_info, pipeline.simulation_parameters)\n system = create_openmm_system(\n pipeline.system_info,\n pipeline.simulation_parameters,\n ligand_restraint=True,\n bb_restraints=True,\n path=[Vec3(1, 2, 3)],\n )\n\n\ndef test_barostat_creation():\n system = create_openmm_system(pipeline.system_info, pipeline.simulation_parameters)\n add_barostat(\n system, properties=pipeline.simulation_parameters, membrane_barostat=True\n )\n add_barostat(\n system, properties=pipeline.simulation_parameters, membrane_barostat=False\n )\n\n\ndef test_grid_escape_room_basic():\n grid = Grid(grid=np.zeros(shape=(10, 10, 10), dtype=bool))\n start = GridNode(0, 0, 0)\n end = GridNode(9, 9, 9)\n escape_room = GridEscapeRoom(grid=grid, start=start)\n\n\ndef test_grid_successors():\n grid = Grid(grid=np.zeros(shape=(10, 10, 10), dtype=bool))\n grid.grid[1][1][1] = True\n start = GridNode(0, 0, 0)\n end = GridNode(9, 9, 9)\n escape_room = GridEscapeRoom(grid=grid, start=start)\n children = escape_room.generate_successors(parent=start)\n supposedchildren = [\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n ]\n\n for i, c in enumerate(children):\n assert c.get_coordinates() == supposedchildren[i]\n\n children = escape_room.generate_successors(parent=end)\n supposedchildren = [\n [8, 8, 8],\n [8, 8, 9],\n [8, 9, 8],\n [8, 9, 9],\n [9, 8, 8],\n [9, 8, 9],\n [9, 9, 8],\n ]\n\n for i, c in enumerate(children):\n assert c.get_coordinates() == supposedchildren[i]\n\n\ndef test_grid_pathfinding():\n grid = Grid.from_files(\n crd=pipeline.system_info.crd_object,\n psf=pipeline.system_info.psf_object,\n gridsize=3 * unit.angstrom,\n )\n node = grid.node_from_files(\n psf=pipeline.system_info.psf_object,\n crd=pipeline.system_info.crd_object,\n name=\"UNL\",\n )\n assert not grid.position_is_blocked(node)\n escape_room = GridEscapeRoom(grid=grid, start=node)\n path = escape_room.escape_room()\n assert path != []\n\n\ndef test_grid_path_partitioning():\n\n # Generate grid and a star objects\n\n path1, path2 = [], []\n goal1, goal2 = [], []\n sq3 = 0.5 / math.sqrt(3)\n grid1 = Grid(\n grid=np.zeros(shape=(10, 10, 10), dtype=bool),\n boxlengths=unit.Quantity(value=Vec3(1, 1, 1), unit=unit.angstrom),\n offset=Vec3(0, 0, 0) * unit.angstrom,\n )\n grid2 = Grid(\n grid=np.zeros(shape=(10, 10, 10), dtype=bool),\n boxlengths=unit.Quantity(value=Vec3(2, 2, 2), unit=unit.angstrom),\n offset=Vec3(-9, -5, -6) * unit.angstrom,\n )\n\n for i in range(5):\n path1.append(GridNode(x=i, y=i, z=i))\n path2.append(GridNode(x=i, y=0, z=1))\n\n escape_room1 = GridEscapeRoom(grid=grid1, start=GridNode(x=0, y=0, z=0))\n escape_room2 = GridEscapeRoom(grid=grid2, start=GridNode(x=0, y=0, z=0))\n\n # Generate paths\n\n escape_room1.shortest_path = path1\n escape_room2.shortest_path = path2\n\n path1 = escape_room1.get_path_for_sampling(0.05 * unit.nanometer)\n path2 = escape_room2.get_path_for_sampling(0.5 * unit.angstrom)\n\n # Generate desired outcomes\n\n for i in range(len(path1)):\n goal1.append(\n unit.Quantity(Vec3(x=i * sq3, y=i * sq3, z=i * sq3), unit=unit.angstrom)\n )\n\n for i in range(len(path2)):\n goal2.append(\n unit.Quantity(Vec3(x=i / 2 - 9, y=0 - 5, z=2 - 6), unit=unit.angstrom)\n )\n\n # Check generated paths for tested outcome\n\n for i in range(len(path1)):\n for j in range(3):\n assert round(path1[i][j].value_in_unit(path1[i].unit), 5) == round(\n goal1[i][j].value_in_unit(goal1[i].unit), 5\n )\n for i in range(len(path2)):\n for j in range(3):\n print(i, j)\n assert round(path2[i][j].value_in_unit(path2[i].unit), 5) == round(\n goal2[i][j].value_in_unit(goal2[i].unit), 5\n )\n\n\ndef test_tree_successor():\n nodes = []\n for i in range(5):\n nodes.append(unit.Quantity(Vec3(i + 1, i + 1, i + 2), unit.nanometer))\n tree = Tree(coordinates=nodes)\n start = unit.Quantity(Vec3(0, 0, 0), unit.nanometer)\n escape_room = TreeEscapeRoom(tree=tree, start=start)\n parent = TreeNode()\n children = escape_room.generate_successors(\n parent=parent, resolution=1, wall_radius=0.12\n )\n supposedchildren = []\n for i in tree.POSSIBLE_NEIGHBOURS:\n supposedchildren.append(\n TreeNode(\n x=i[0] + start.x,\n y=i[1] + start.y,\n z=i[2] + start.z,\n )\n )\n for i, c in enumerate(children):\n assert c.get_grid_coordinates() == supposedchildren[i].get_grid_coordinates()\n\n\ndef test_path_finding():\n escape_room = TreeEscapeRoom.from_files(pipeline.system_info)\n path = escape_room.find_path()\n assert path != []\n\n\ndef test_tree_path_partitioning():\n escape_room = TreeEscapeRoom.from_files(pipeline.system_info)\n path = escape_room.find_path()\n newp = escape_room.get_path_for_sampling()\n for i, p in enumerate(newp):\n try:\n dist = Tree.calculate_euclidean_distance(p, newp[i + 1])\n assert round(dist, 3) == 0.1\n except IndexError:\n pass\n\n\ndef test_load_path():\n pmf = PMFCalculator(\n simulation_parameters=pipeline.simulation_parameters,\n system_info=pipeline.system_info,\n trajectory_directory=\"UmbrellaPipeline/data\",\n original_path_interval=1 * unit.nanometer,\n )\n a = pmf.load_original_path()\n b = pmf.load_sampled_coordinates()\n assert len(a) == 43\n assert len(b) == 43 * 500\n\n\ndef test_pymbar_pmf():\n pmf = PMFCalculator(\n simulation_parameters=pipeline.simulation_parameters,\n system_info=pipeline.system_info,\n trajectory_directory=\"UmbrellaPipeline/data\",\n original_path_interval=1 * unit.nanometer,\n )\n a = pmf.load_original_path()\n b = pmf.load_sampled_coordinates()\n p, e = pmf.calculate_pmf()\n assert len(a) == len(p)\n"
] |
[
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarceloJacinto/dsorlib
|
[
"5208919579726b73f088605885f6e1f0bf17d297"
] |
[
"vehicles/disturbances/gaussian_disturbance.py"
] |
[
"# MIT License\n#\n# Copyright (c) 2021 Marcelo Jacinto\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom numpy import array, array_equal, zeros, maximum, minimum\nfrom numpy.random import normal\n\nfrom dsorlib.vehicles.disturbances.abstract_disturbance import AbstractDisturbance\n\n\nclass GaussianDisturbance(AbstractDisturbance):\n \"\"\"\n GaussianOceanCurrents is a class that implements ocean currents following a random\n gaussian distribution centered around a specified mean vector\n \"\"\"\n\n def __init__(self,\n mean: array = array([0.0, 0.0, 0.0]),\n sigma: array = array([0.0, 0.0, 0.0]),\n min: array = array([0.0, 0.0, 0.0]),\n max: array = array([0.0, 0.0, 0.0])):\n \"\"\"\n Instantiate a Gaussian Ocean Currents object.\n It defines the mean linear velocity for the current -> mean=[v_x, v_y, v_z] in the Inertial Frame {U}\n It also defines the sigma=[sigma_x, sigma_y, sigma_z]\n \"\"\"\n # Call the super class constructor\n super().__init__()\n\n # Define the mean velocity for the waves\n self.mean = array(mean).reshape((3,))\n\n # Define the std deviation for the velocity of the waves (noise)\n self.sigma = array(sigma).reshape((3,))\n\n # Define the boundaries for the values of the current velocities\n self.min = array(min).reshape((3,))\n self.max = array(max).reshape((3,))\n\n def get_currents(self):\n \"\"\"\n Update the values of the waves following a random gaussian distribution\n\n returns:\n A numpy array with 3 elements (vx, vy, vz)\n \"\"\"\n\n # Check if we have no std. dev. - In this case, the waves are \"static\"\n if array_equal(self.sigma, zeros(self.sigma.shape)):\n return self.mean\n\n # Generate the random numbers according to a gaussian distribution\n rand_current = normal(self.mean, self.sigma)\n\n # Check if the generated current is above the limits pre-defined\n rand_current = maximum(self.min, rand_current)\n rand_current = minimum(self.max, rand_current)\n\n return rand_current\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"numpy.random.normal",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
petergtz/io
|
[
"1f80d7dfcc824eb9803ea977228ae19249ade13f"
] |
[
"tensorflow_io/core/python/ops/audio_io_tensor_ops.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"AudioIOTensor\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_io.core.python.ops import io_tensor_ops\nfrom tensorflow_io.core.python.ops import core_ops\n\nclass AudioGraphIOTensor(object):\n \"\"\"AudioGraphIOTensor\"\"\"\n\n #=============================================================================\n # Constructor (private)\n #=============================================================================\n def __init__(self,\n resource,\n shape, dtype, rate,\n internal=False):\n with tf.name_scope(\"AudioGraphIOTensor\"):\n assert internal\n self._resource = resource\n self._shape = shape\n self._dtype = dtype\n self._rate = rate\n super(AudioGraphIOTensor, self).__init__()\n\n #=============================================================================\n # Accessors\n #=============================================================================\n\n @property\n def shape(self):\n \"\"\"Returns the `TensorShape` that represents the shape of the tensor.\"\"\"\n return self._shape\n\n @property\n def dtype(self):\n \"\"\"Returns the `dtype` of elements in the tensor.\"\"\"\n return self._dtype\n\n #=============================================================================\n # String Encoding\n #=============================================================================\n def __repr__(self):\n meta = \"\".join([\", %s=%s\" % (\n k, repr(v.__get__(self))) for k, v in self.__class__.__dict__.items(\n ) if isinstance(v, _IOTensorMeta)])\n return \"<%s: shape=%s, dtype=%s%s>\" % (\n self.__class__.__name__, self.shape, self.dtype, meta)\n\n #=============================================================================\n # Tensor Type Conversions\n #=============================================================================\n\n def to_tensor(self):\n \"\"\"Converts this `IOTensor` into a `tf.Tensor`.\n\n Args:\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `Tensor` with value obtained from this `IOTensor`.\n \"\"\"\n return core_ops.io_wav_readable_read(\n self._resource, 0, -1, dtype=self._dtype)\n\n #=============================================================================\n # Indexing and slicing\n #=============================================================================\n def __getitem__(self, key):\n \"\"\"Returns the specified piece of this IOTensor.\"\"\"\n if isinstance(key, slice):\n return core_ops.io_wav_readable_read(\n self._resource, key.start, key.stop, dtype=self._dtype)\n item = core_ops.io_wav_readable_read(\n self._resource, key, key + 1, dtype=self._dtype)\n if tf.shape(item)[0] == 0:\n raise IndexError(\"index %s is out of range\" % key)\n return item[0]\n\n def __len__(self):\n \"\"\"Returns the total number of items of this IOTensor.\"\"\"\n return self._shape[0]\n\n #=============================================================================\n # Accessors\n #=============================================================================\n @io_tensor_ops._IOTensorMeta # pylint: disable=protected-access\n def rate(self):\n \"\"\"The sample `rate` of the audio stream\"\"\"\n return self._rate\n\nclass AudioIOTensor(AudioGraphIOTensor):\n \"\"\"AudioIOTensor\n\n An `AudioIOTensor` is an `IOTensor` backed by audio files such as WAV\n format. It consists of only one `Tensor` with `shape` defined as\n `[n_samples, n_channels]`. It is a subclass of `BaseIOTensor`\n with additional `rate` property exposed, indicating the sample rate\n of the audio.\n \"\"\"\n\n #=============================================================================\n # Constructor (private)\n #=============================================================================\n def __init__(self,\n filename,\n internal=False):\n with tf.name_scope(\"FromAudio\"):\n resource = core_ops.io_wav_readable_init(filename)\n shape, dtype, rate = core_ops.io_wav_readable_spec(resource)\n shape = tf.TensorShape(shape)\n dtype = tf.as_dtype(dtype.numpy())\n super(AudioIOTensor, self).__init__(\n resource, shape, dtype, rate, internal=internal)\n"
] |
[
[
"tensorflow.TensorShape",
"tensorflow.name_scope",
"tensorflow.shape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
abagaria/RBFDQN
|
[
"0b31d4083abf0b1a9721a7bb0411bbe3fd873422"
] |
[
"novelty_classifier.py"
] |
[
"import numpy as np\nfrom thundersvm import OneClassSVM\nfrom tqdm import tqdm\n\n\nclass NoveltyDetectionClassifier(object):\n def __init__(self, nu_high, nu_low, nu_resolution, gamma=\"scale\"):\n self.nu_high = nu_high\n self.nu_low = nu_low\n self.nu_resolution = nu_resolution\n\n # -- Gamma of \"auto\" corresponds to 1/n_features\n # -- Gamma of \"scale\" corresponds to 1/(n_features * X.var())\n # -- Depending on whether the var is less than or greater than 1,\n # -- setting gamma to \"scale\" either leads to a smooth or complex decision boundary\n # -- Gamma can also be a floating point number\n self.gamma = gamma\n\n self.classifiers = []\n\n def __call__(self, X):\n return self.predict(X)\n\n def determine_gamma(self, X):\n if isinstance(self.gamma, (int, float)):\n return self.gamma\n\n n_features = X.shape[1]\n\n if self.gamma == \"auto\":\n return 1. / n_features\n if self.gamma == \"scale\":\n return 1. / (n_features * X.var())\n\n raise ValueError(self.gamma)\n\n def create_one_class_classifier(self, nu, X):\n gamma = self.determine_gamma(X)\n clf = OneClassSVM(kernel=\"rbf\", nu=nu, gamma=gamma)\n return clf\n\n def create_family_of_classifiers(self, X):\n nu_range = np.arange(self.nu_low, self.nu_high, self.nu_resolution)\n classifiers = [self.create_one_class_classifier(nu, X) for nu in nu_range]\n return classifiers\n\n def fit(self, X):\n self.classifiers = self.create_family_of_classifiers(X)\n for classifier in tqdm(self.classifiers, desc=\"Fitting OC-SVMs\"): # type: OneClassSVM\n classifier.fit(X)\n\n def predict(self, X): # TODO: Chunk up inference\n overall_predictions = []\n for classifier in self.classifiers: # type: OneClassSVM\n clf_predictions = classifier.predict(X) == 1\n overall_predictions.append(clf_predictions)\n overall_predictions = np.array(overall_predictions)\n prediction_probabilities = np.mean(overall_predictions, axis=0)\n return prediction_probabilities\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cpcloud/ibis-bigquery
|
[
"e3997d42752ec49f4b8c8625097682b27ef4d350"
] |
[
"ibis_bigquery/compiler.py"
] |
[
"\"\"\"Module to convert from Ibis expression to SQL string.\"\"\"\n\nimport base64\nimport datetime\nfrom functools import partial\n\nimport ibis\nfrom ibis.backends.base.sql import compiler\n\ntry:\n import ibis.common.exceptions as com\nexcept ImportError:\n import ibis.common as com\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.lineage as lin\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport numpy as np\nimport regex as re\nimport toolz\nfrom ibis.backends.base.sql.compiler import ExprTranslator, TableSetFormatter\nfrom ibis.backends.base.sql.registry import (\n fixed_arity,\n literal,\n operation_registry,\n reduction,\n unary,\n)\nfrom multipledispatch import Dispatcher\n\nfrom .datatypes import ibis_type_to_bigquery_type\n\n\nclass BigQueryUDFNode(ops.ValueOp):\n \"\"\"Represents use of a UDF.\"\"\"\n\n\nclass BigQueryUDFDefinition(compiler.DDL):\n \"\"\"Represents definition of a temporary UDF.\"\"\"\n\n def __init__(self, expr, context):\n self.expr = expr\n self.context = context\n\n def compile(self):\n \"\"\"Generate UDF string from definition.\"\"\"\n return self.expr.op().js\n\n\nclass BigQueryUnion(compiler.Union):\n \"\"\"Union of tables.\"\"\"\n\n @staticmethod\n def keyword(distinct):\n \"\"\"Use disctinct UNION if distinct is True.\"\"\"\n return \"UNION DISTINCT\" if distinct else \"UNION ALL\"\n\n\ndef find_bigquery_udf(expr):\n \"\"\"Filter which includes only UDFs from expression tree.\"\"\"\n if isinstance(expr.op(), BigQueryUDFNode):\n result = expr\n else:\n result = None\n return lin.proceed, result\n\n\ndef _extract_field(sql_attr):\n def extract_field_formatter(translator, expr):\n op = expr.op()\n arg = translator.translate(op.args[0])\n if sql_attr == \"epochseconds\":\n return f\"UNIX_SECONDS({arg})\"\n else:\n return f\"EXTRACT({sql_attr} from {arg})\"\n\n return extract_field_formatter\n\n\nbigquery_cast = Dispatcher(\"bigquery_cast\")\n\n\n@bigquery_cast.register(str, dt.Timestamp, dt.Integer)\ndef bigquery_cast_timestamp_to_integer(compiled_arg, from_, to):\n \"\"\"Convert TIMESTAMP to INT64 (seconds since Unix epoch).\"\"\"\n return \"UNIX_MICROS({})\".format(compiled_arg)\n\n\n@bigquery_cast.register(str, dt.DataType, dt.DataType)\ndef bigquery_cast_generate(compiled_arg, from_, to):\n \"\"\"Cast to desired type.\"\"\"\n sql_type = ibis_type_to_bigquery_type(to)\n return \"CAST({} AS {})\".format(compiled_arg, sql_type)\n\n\ndef _cast(translator, expr):\n op = expr.op()\n arg, target_type = op.args\n arg_formatted = translator.translate(arg)\n return bigquery_cast(arg_formatted, arg.type(), target_type)\n\n\ndef integer_to_timestamp(translator: compiler.ExprTranslator, expr: ibis.Expr) -> str:\n \"\"\"Interprets an integer as a timestamp.\"\"\"\n op = expr.op()\n arg, unit = op.args\n arg = translator.translate(arg)\n\n if unit == \"s\":\n return \"TIMESTAMP_SECONDS({})\".format(arg)\n elif unit == \"ms\":\n return \"TIMESTAMP_MILLIS({})\".format(arg)\n elif unit == \"us\":\n return \"TIMESTAMP_MICROS({})\".format(arg)\n elif unit == \"ns\":\n # Timestamps are represented internally as elapsed microseconds, so some\n # rounding is required if an integer represents nanoseconds.\n # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp_type\n return \"TIMESTAMP_MICROS(CAST(ROUND({} / 1000) AS INT64))\".format(arg)\n\n raise NotImplementedError(\"cannot cast unit {}\".format(unit))\n\n\ndef _struct_field(translator, expr):\n arg, field = expr.op().args\n arg_formatted = translator.translate(arg)\n return \"{}.`{}`\".format(arg_formatted, field)\n\n\ndef _array_concat(translator, expr):\n return \"ARRAY_CONCAT({})\".format(\n \", \".join(map(translator.translate, expr.op().args))\n )\n\n\ndef _array_index(translator, expr):\n # SAFE_OFFSET returns NULL if out of bounds\n return \"{}[SAFE_OFFSET({})]\".format(*map(translator.translate, expr.op().args))\n\n\ndef _hash(translator, expr):\n op = expr.op()\n arg, how = op.args\n\n arg_formatted = translator.translate(arg)\n\n if how == \"farm_fingerprint\":\n return f\"farm_fingerprint({arg_formatted})\"\n else:\n raise NotImplementedError(how)\n\n\ndef _string_find(translator, expr):\n haystack, needle, start, end = expr.op().args\n\n if start is not None:\n raise NotImplementedError(\"start not implemented for string find\")\n if end is not None:\n raise NotImplementedError(\"end not implemented for string find\")\n\n return \"STRPOS({}, {}) - 1\".format(\n translator.translate(haystack), translator.translate(needle)\n )\n\n\ndef _translate_pattern(translator, pattern):\n # add 'r' to string literals to indicate to BigQuery this is a raw string\n return \"r\" * isinstance(pattern.op(), ops.Literal) + translator.translate(pattern)\n\n\ndef _regex_search(translator, expr):\n arg, pattern = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = \"REGEXP_CONTAINS({}, {})\".format(translator.translate(arg), regex)\n return result\n\n\ndef _regex_extract(translator, expr):\n arg, pattern, index = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = \"REGEXP_EXTRACT_ALL({}, {})[SAFE_OFFSET({})]\".format(\n translator.translate(arg), regex, translator.translate(index)\n )\n return result\n\n\ndef _regex_replace(translator, expr):\n arg, pattern, replacement = expr.op().args\n regex = _translate_pattern(translator, pattern)\n result = \"REGEXP_REPLACE({}, {}, {})\".format(\n translator.translate(arg), regex, translator.translate(replacement)\n )\n return result\n\n\ndef _string_concat(translator, expr):\n return \"CONCAT({})\".format(\", \".join(map(translator.translate, expr.op().arg)))\n\n\ndef _string_join(translator, expr):\n sep, args = expr.op().args\n return \"ARRAY_TO_STRING([{}], {})\".format(\n \", \".join(map(translator.translate, args)), translator.translate(sep)\n )\n\n\ndef _string_ascii(translator, expr):\n (arg,) = expr.op().args\n return \"TO_CODE_POINTS({})[SAFE_OFFSET(0)]\".format(translator.translate(arg))\n\n\ndef _string_right(translator, expr):\n arg, nchars = map(translator.translate, expr.op().args)\n return \"SUBSTR({arg}, -LEAST(LENGTH({arg}), {nchars}))\".format(\n arg=arg, nchars=nchars\n )\n\n\ndef _string_substring(translator, expr):\n op = expr.op()\n arg, start, length = op.args\n if length.op().value < 0:\n raise ValueError(\"Length parameter should not be a negative value.\")\n\n base_substring = operation_registry[ops.Substring]\n return base_substring(translator, expr)\n\n\ndef _array_literal_format(expr):\n return str(list(expr.op().value))\n\n\ndef _log(translator, expr):\n op = expr.op()\n arg, base = op.args\n arg_formatted = translator.translate(arg)\n\n if base is None:\n return \"ln({})\".format(arg_formatted)\n\n base_formatted = translator.translate(base)\n return \"log({}, {})\".format(arg_formatted, base_formatted)\n\n\ndef _literal(translator, expr):\n\n if isinstance(expr, ir.NumericValue):\n value = expr.op().value\n if not np.isfinite(value):\n return \"CAST({!r} AS FLOAT64)\".format(str(value))\n\n # special case literal timestamp, date, and time scalars\n if isinstance(expr.op(), ops.Literal):\n value = expr.op().value\n if isinstance(expr, ir.DateScalar):\n if isinstance(value, datetime.datetime):\n raw_value = value.date()\n else:\n raw_value = value\n return \"DATE '{}'\".format(raw_value)\n elif isinstance(expr, ir.TimestampScalar):\n return \"TIMESTAMP '{}'\".format(value)\n elif isinstance(expr, ir.TimeScalar):\n # TODO: define extractors on TimeValue expressions\n return \"TIME '{}'\".format(value)\n elif isinstance(expr, ir.BinaryScalar):\n return \"FROM_BASE64('{}')\".format(\n base64.b64encode(value).decode(encoding=\"utf-8\")\n )\n\n try:\n return literal(translator, expr)\n except NotImplementedError:\n if isinstance(expr, ir.ArrayValue):\n return _array_literal_format(expr)\n raise NotImplementedError(type(expr).__name__)\n\n\ndef _arbitrary(translator, expr):\n arg, how, where = expr.op().args\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n if how not in (None, \"first\"):\n raise com.UnsupportedOperationError(\n \"{!r} value not supported for arbitrary in BigQuery\".format(how)\n )\n\n return \"ANY_VALUE({})\".format(translator.translate(arg))\n\n\n_date_units = {\n \"Y\": \"YEAR\",\n \"Q\": \"QUARTER\",\n \"W\": \"WEEK\",\n \"M\": \"MONTH\",\n \"D\": \"DAY\",\n}\n\n\n_timestamp_units = {\n \"us\": \"MICROSECOND\",\n \"ms\": \"MILLISECOND\",\n \"s\": \"SECOND\",\n \"m\": \"MINUTE\",\n \"h\": \"HOUR\",\n}\n_timestamp_units.update(_date_units)\n\n\ndef _truncate(kind, units):\n def truncator(translator, expr):\n arg, unit = expr.op().args\n trans_arg = translator.translate(arg)\n valid_unit = units.get(unit)\n if valid_unit is None:\n raise com.UnsupportedOperationError(\n \"BigQuery does not support truncating {} values to unit \"\n \"{!r}\".format(arg.type(), unit)\n )\n return \"{}_TRUNC({}, {})\".format(kind, trans_arg, valid_unit)\n\n return truncator\n\n\ndef _timestamp_op(func, units):\n def _formatter(translator, expr):\n op = expr.op()\n arg, offset = op.args\n\n unit = offset.type().unit\n if unit not in units:\n raise com.UnsupportedOperationError(\n \"BigQuery does not allow binary operation \"\n \"{} with INTERVAL offset {}\".format(func, unit)\n )\n formatted_arg = translator.translate(arg)\n formatted_offset = translator.translate(offset)\n result = \"{}({}, {})\".format(func, formatted_arg, formatted_offset)\n return result\n\n return _formatter\n\n\nSTRFTIME_FORMAT_FUNCTIONS = {\n dt.Date: \"DATE\",\n dt.Time: \"TIME\",\n dt.Timestamp: \"TIMESTAMP\",\n}\n\n\n_operation_registry = {\n **operation_registry,\n}\n_operation_registry.update(\n {\n ops.ExtractYear: _extract_field(\"year\"),\n ops.ExtractMonth: _extract_field(\"month\"),\n ops.ExtractDay: _extract_field(\"day\"),\n ops.ExtractHour: _extract_field(\"hour\"),\n ops.ExtractMinute: _extract_field(\"minute\"),\n ops.ExtractSecond: _extract_field(\"second\"),\n ops.ExtractMillisecond: _extract_field(\"millisecond\"),\n ops.Hash: _hash,\n ops.StringReplace: fixed_arity(\"REPLACE\", 3),\n ops.StringSplit: fixed_arity(\"SPLIT\", 2),\n ops.StringConcat: _string_concat,\n ops.StringJoin: _string_join,\n ops.StringAscii: _string_ascii,\n ops.StringFind: _string_find,\n ops.Substring: _string_substring,\n ops.StrRight: _string_right,\n ops.Repeat: fixed_arity(\"REPEAT\", 2),\n ops.RegexSearch: _regex_search,\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: _regex_replace,\n ops.GroupConcat: reduction(\"STRING_AGG\"),\n ops.IfNull: fixed_arity(\"IFNULL\", 2),\n ops.Cast: _cast,\n ops.StructField: _struct_field,\n ops.ArrayCollect: unary(\"ARRAY_AGG\"),\n ops.ArrayConcat: _array_concat,\n ops.ArrayIndex: _array_index,\n ops.ArrayLength: unary(\"ARRAY_LENGTH\"),\n ops.HLLCardinality: reduction(\"APPROX_COUNT_DISTINCT\"),\n ops.Log: _log,\n ops.Sign: unary(\"SIGN\"),\n ops.Modulus: fixed_arity(\"MOD\", 2),\n ops.Date: unary(\"DATE\"),\n # BigQuery doesn't have these operations built in.\n # ops.ArrayRepeat: _array_repeat,\n # ops.ArraySlice: _array_slice,\n ops.Literal: _literal,\n ops.Arbitrary: _arbitrary,\n ops.TimestampTruncate: _truncate(\"TIMESTAMP\", _timestamp_units),\n ops.DateTruncate: _truncate(\"DATE\", _date_units),\n ops.TimeTruncate: _truncate(\"TIME\", _timestamp_units),\n ops.Time: unary(\"TIME\"),\n ops.TimestampAdd: _timestamp_op(\"TIMESTAMP_ADD\", {\"h\", \"m\", \"s\", \"ms\", \"us\"}),\n ops.TimestampSub: _timestamp_op(\"TIMESTAMP_SUB\", {\"h\", \"m\", \"s\", \"ms\", \"us\"}),\n ops.DateAdd: _timestamp_op(\"DATE_ADD\", {\"D\", \"W\", \"M\", \"Q\", \"Y\"}),\n ops.DateSub: _timestamp_op(\"DATE_SUB\", {\"D\", \"W\", \"M\", \"Q\", \"Y\"}),\n ops.TimestampNow: fixed_arity(\"CURRENT_TIMESTAMP\", 0),\n ops.TimestampFromUNIX: integer_to_timestamp,\n }\n)\n\n\ndef _try_register_op(op_name: str, value):\n \"\"\"Register operation if it exists in Ibis.\n\n This allows us to decouple slightly from ibis-framework releases.\n \"\"\"\n if hasattr(ops, op_name):\n _operation_registry[getattr(ops, op_name)] = value\n\n\n# 2.x\n_try_register_op(\"BitAnd\", reduction(\"BIT_AND\"))\n_try_register_op(\"BitOr\", reduction(\"BIT_OR\"))\n_try_register_op(\"BitXor\", reduction(\"BIT_XOR\"))\n# 1.4\n_try_register_op(\"ExtractQuarter\", _extract_field(\"quarter\"))\n_try_register_op(\"ExtractEpochSeconds\", _extract_field(\"epochseconds\"))\n\n\n_invalid_operations = {\n ops.Translate,\n ops.FindInSet,\n ops.Capitalize,\n ops.DateDiff,\n ops.TimestampDiff,\n}\n\n_operation_registry = {\n k: v for k, v in _operation_registry.items() if k not in _invalid_operations\n}\n\n\nclass BigQueryExprTranslator(ExprTranslator):\n \"\"\"Translate expressions to strings.\"\"\"\n\n _registry = _operation_registry\n\n @classmethod\n def compiles(cls, klass):\n def decorator(f):\n cls._registry[klass] = f\n return f\n\n return decorator\n\n def _trans_param(self, expr):\n op = expr.op()\n if op not in self.context.params:\n raise KeyError(op)\n return \"@{}\".format(expr.get_name())\n\n\ncompiles = BigQueryExprTranslator.compiles\nrewrites = BigQueryExprTranslator.rewrites\n\n\n@compiles(ops.DayOfWeekIndex)\ndef bigquery_day_of_week_index(t, e):\n \"\"\"Convert timestamp to day-of-week integer.\"\"\"\n arg = e.op().args[0]\n arg_formatted = t.translate(arg)\n return \"MOD(EXTRACT(DAYOFWEEK FROM {}) + 5, 7)\".format(arg_formatted)\n\n\n@rewrites(ops.DayOfWeekName)\ndef bigquery_day_of_week_name(e):\n \"\"\"Convert TIMESTAMP to day-of-week string.\"\"\"\n arg = e.op().args[0]\n return arg.strftime(\"%A\")\n\n\n@compiles(ops.Divide)\ndef bigquery_compiles_divide(t, e):\n \"\"\"Floating point division.\"\"\"\n return \"IEEE_DIVIDE({}, {})\".format(*map(t.translate, e.op().args))\n\n\n@compiles(ops.Strftime)\ndef compiles_strftime(translator, expr):\n \"\"\"Timestamp formatting.\"\"\"\n arg, format_string = expr.op().args\n arg_type = arg.type()\n strftime_format_func_name = STRFTIME_FORMAT_FUNCTIONS[type(arg_type)]\n fmt_string = translator.translate(format_string)\n arg_formatted = translator.translate(arg)\n if isinstance(arg_type, dt.Timestamp):\n return \"FORMAT_{}({}, {}, {!r})\".format(\n strftime_format_func_name,\n fmt_string,\n arg_formatted,\n arg_type.timezone if arg_type.timezone is not None else \"UTC\",\n )\n return \"FORMAT_{}({}, {})\".format(\n strftime_format_func_name, fmt_string, arg_formatted\n )\n\n\n@compiles(ops.StringToTimestamp)\ndef compiles_string_to_timestamp(translator, expr):\n \"\"\"Timestamp parsing.\"\"\"\n arg, format_string, timezone_arg = expr.op().args\n fmt_string = translator.translate(format_string)\n arg_formatted = translator.translate(arg)\n if timezone_arg is not None:\n timezone_str = translator.translate(timezone_arg)\n return \"PARSE_TIMESTAMP({}, {}, {})\".format(\n fmt_string, arg_formatted, timezone_str\n )\n return \"PARSE_TIMESTAMP({}, {})\".format(fmt_string, arg_formatted)\n\n\nclass BigQueryTableSetFormatter(TableSetFormatter):\n def _quote_identifier(self, name):\n if re.match(r\"^[A-Za-z][A-Za-z_0-9]*$\", name):\n return name\n return \"`{}`\".format(name)\n\n\nclass BigQueryCompiler(compiler.Compiler):\n translator_class = BigQueryExprTranslator\n table_set_formatter_class = BigQueryTableSetFormatter\n union_class = BigQueryUnion\n\n @staticmethod\n def _generate_setup_queries(expr, context):\n \"\"\"Generate DDL for temporary resources.\"\"\"\n queries = map(\n partial(BigQueryUDFDefinition, context=context),\n lin.traverse(find_bigquery_udf, expr),\n )\n\n # UDFs are uniquely identified by the name of the Node subclass we\n # generate.\n return list(toolz.unique(queries, key=lambda x: type(x.expr.op()).__name__))\n\n\n@rewrites(ops.IdenticalTo)\ndef identical_to(expr):\n left, right = expr.op().args\n return (left.isnull() & right.isnull()) | (left == right)\n\n\n@rewrites(ops.Log2)\ndef log2(expr):\n (arg,) = expr.op().args\n return arg.log(2)\n\n\n@rewrites(ops.Sum)\ndef bq_sum(expr):\n arg = expr.op().args[0]\n where = expr.op().args[1]\n if isinstance(arg, ir.BooleanColumn):\n return arg.cast(\"int64\").sum(where=where)\n else:\n return expr\n\n\n@rewrites(ops.Mean)\ndef bq_mean(expr):\n arg = expr.op().args[0]\n where = expr.op().args[1]\n if isinstance(arg, ir.BooleanColumn):\n return arg.cast(\"int64\").mean(where=where)\n else:\n return expr\n\n\n@compiles(ops.Floor)\ndef compiles_floor(t, e):\n bigquery_type = ibis_type_to_bigquery_type(e.type())\n arg = e.op().arg\n return \"CAST(FLOOR({}) AS {})\".format(t.translate(arg), bigquery_type)\n\n\n@compiles(ops.CMSMedian)\ndef compiles_approx(translator, expr):\n expr = expr.op()\n arg = expr.arg\n where = expr.where\n\n if where is not None:\n arg = where.ifelse(arg, ibis.NA)\n\n return \"APPROX_QUANTILES({}, 2)[OFFSET(1)]\".format(translator.translate(arg))\n\n\n@compiles(ops.Covariance)\ndef compiles_covar(translator, expr):\n expr = expr.op()\n left = expr.left\n right = expr.right\n where = expr.where\n\n if expr.how == \"sample\":\n how = \"SAMP\"\n elif expr.how == \"pop\":\n how = \"POP\"\n else:\n raise ValueError(\"Covariance with how={!r} is not supported.\".format(how))\n\n if where is not None:\n left = where.ifelse(left, ibis.NA)\n right = where.ifelse(right, ibis.NA)\n\n return \"COVAR_{}({}, {})\".format(\n how, translator.translate(left), translator.translate(right)\n )\n\n\n@rewrites(ops.Any)\n@rewrites(ops.All)\n@rewrites(ops.NotAny)\n@rewrites(ops.NotAll)\ndef bigquery_any_all_no_op(expr):\n return expr\n\n\n@compiles(ops.Any)\ndef bigquery_compile_any(translator, expr):\n return \"LOGICAL_OR({})\".format(*map(translator.translate, expr.op().args))\n\n\n@compiles(ops.NotAny)\ndef bigquery_compile_notany(translator, expr):\n return \"LOGICAL_AND(NOT ({}))\".format(*map(translator.translate, expr.op().args))\n\n\n@compiles(ops.All)\ndef bigquery_compile_all(translator, expr):\n return \"LOGICAL_AND({})\".format(*map(translator.translate, expr.op().args))\n\n\n@compiles(ops.NotAll)\ndef bigquery_compile_notall(translator, expr):\n return \"LOGICAL_OR(NOT ({}))\".format(*map(translator.translate, expr.op().args))\n"
] |
[
[
"numpy.isfinite"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ggsonic/BertSum
|
[
"7f5ce04507e92f07d51dc823ed4e42873b35f4a6"
] |
[
"src/models/encoder.py"
] |
[
"import math\n\nimport torch\nimport torch.nn as nn\n\nfrom models.neural import MultiHeadedAttention, PositionwiseFeedForward\nfrom models.rnn import LayerNormLSTM\n\n\nclass Classifier(nn.Module):\n def __init__(self, hidden_size):\n super(Classifier, self).__init__()\n self.linear1 = nn.Linear(hidden_size, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x, mask_cls):\n h = self.linear1(x).squeeze(-1)\n sent_scores = self.sigmoid(h) * mask_cls.float()\n return sent_scores\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, dropout, dim, max_len=5000):\n pe = torch.zeros(max_len, dim)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *\n -(math.log(10000.0) / dim)))\n pe[:, 0::2] = torch.sin(position.float() * div_term)\n pe[:, 1::2] = torch.cos(position.float() * div_term)\n pe = pe.unsqueeze(0)\n super(PositionalEncoding, self).__init__()\n self.register_buffer('pe', pe)\n self.dropout = nn.Dropout(p=dropout)\n self.dim = dim\n\n def forward(self, emb, step=None):\n emb = emb * math.sqrt(self.dim)\n if (step):\n emb = emb + self.pe[:, step][:, None, :]\n\n else:\n emb = emb + self.pe[:, :emb.size(1)]\n emb = self.dropout(emb)\n return emb\n\n def get_emb(self, emb):\n return self.pe[:, :emb.size(1)]\n\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, d_model, heads, d_ff, dropout):\n super(TransformerEncoderLayer, self).__init__()\n\n self.self_attn = MultiHeadedAttention(\n heads, d_model, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, iter, query, inputs, mask):\n if (iter != 0):\n input_norm = self.layer_norm(inputs)\n else:\n input_norm = inputs\n\n mask = mask.unsqueeze(1)\n context = self.self_attn(input_norm, input_norm, input_norm,\n mask=mask)\n out = self.dropout(context) + inputs\n return self.feed_forward(out)\n\n\nclass TransformerInterEncoder(nn.Module):\n def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0):\n super(TransformerInterEncoder, self).__init__()\n self.d_model = d_model\n self.num_inter_layers = num_inter_layers\n self.pos_emb = PositionalEncoding(dropout, d_model)\n self.transformer_inter = nn.ModuleList(\n [TransformerEncoderLayer(d_model, heads, d_ff, dropout)\n for _ in range(num_inter_layers)])\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.wo = nn.Linear(d_model, 1, bias=True)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, top_vecs, mask):\n \"\"\" See :obj:`EncoderBase.forward()`\"\"\"\n\n batch_size, n_sents = top_vecs.size(0), top_vecs.size(1)\n pos_emb = self.pos_emb.pe[:, :n_sents]\n x = top_vecs * mask[:, :, None].float()\n x = x + pos_emb\n\n for i in range(self.num_inter_layers):\n #x = self.transformer_inter[i](i, x, x, 1 - mask) # all_sents * max_tokens * dim\n x = self.transformer_inter[i](i, x, x, ~mask) # all_sents * max_tokens * dim\n x = self.layer_norm(x)\n sent_scores = self.sigmoid(self.wo(x))\n sent_scores = sent_scores.squeeze(-1) * mask.float()\n\n return sent_scores\n\n\nclass RNNEncoder(nn.Module):\n\n def __init__(self, bidirectional, num_layers, input_size,\n hidden_size, dropout=0.0):\n super(RNNEncoder, self).__init__()\n num_directions = 2 if bidirectional else 1\n assert hidden_size % num_directions == 0\n hidden_size = hidden_size // num_directions\n\n self.rnn = LayerNormLSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n bidirectional=bidirectional)\n\n self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True)\n self.dropout = nn.Dropout(dropout)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x, mask):\n \"\"\"See :func:`EncoderBase.forward()`\"\"\"\n x = torch.transpose(x, 1, 0)\n memory_bank, _ = self.rnn(x)\n memory_bank = self.dropout(memory_bank) + x\n memory_bank = torch.transpose(memory_bank, 1, 0)\n\n sent_scores = self.sigmoid(self.wo(memory_bank))\n sent_scores = sent_scores.squeeze(-1) * mask.float()\n return sent_scores\n"
] |
[
[
"torch.nn.Dropout",
"torch.transpose",
"torch.zeros",
"torch.nn.LayerNorm",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mitdo/o2ac-ur
|
[
"f684f21fd280a22ec061dc5d503801f6fefb2422",
"f684f21fd280a22ec061dc5d503801f6fefb2422"
] |
[
"catkin_ws/src/o2ac_routines/src/o2ac_routines/assembly.py",
"catkin_ws/src/o2ac_routines/src/o2ac_routines/robot_base.py"
] |
[
"#!/usr/bin/env python\n\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2021, OMRON SINIC X\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of OMRON SINIC X nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Author: Felix von Drigalski, Cristian C. Beltran-Hernandez\n\nfrom ur_control.constants import DONE, TERMINATION_CRITERIA\nimport o2ac_routines.helpers as helpers\nfrom o2ac_routines.common import O2ACCommon\nfrom o2ac_assembly_database.assembly_reader import AssemblyReader\nfrom o2ac_assembly_database.parts_reader import PartsReader\nimport moveit_task_constructor_msgs.msg\nimport moveit_msgs.msg\nimport std_msgs.msg\nimport o2ac_msgs.msg\nimport actionlib\nfrom o2ac_msgs.srv import *\nimport numpy as np\nimport time\nimport math\nfrom os import wait\nimport sys\nimport copy\n\nfrom o2ac_routines.base import AssemblyStatus\nfrom ur_control import conversions, transformations\nimport rospy\nimport geometry_msgs.msg\nimport moveit_msgs\nimport tf_conversions\nimport tf\nfrom math import pi, radians, sin, cos, pi\ntau = 2.0*pi # Part of math from Python 3.6\n\n\nclass O2ACAssembly(O2ACCommon):\n \"\"\"\n This class contains the assembly routines.\n \"\"\"\n\n def __init__(self):\n super(O2ACAssembly, self).__init__()\n\n # Load the initial database\n if not self.assembly_database.db_name == \"wrs_assembly_2021\":\n self.set_assembly(\"wrs_assembly_2021\")\n\n # Spawn tools and objects\n self.define_tool_collision_objects()\n\n # Only used for MTC planning\n # screw_ids = ['m3', 'm4']\n # for screw_id in screw_ids:\n # self.spawn_tool('screw_tool_' + screw_id)\n # self.upload_tool_grasps_to_param_server(screw_id)\n\n self.belt_storage_location = geometry_msgs.msg.PoseStamped()\n self.belt_storage_location.header.frame_id = \"left_centering_link\"\n self.belt_storage_location.pose.position.x = -0.005 # Height\n self.belt_storage_location.pose.position.y = 0.05\n self.belt_storage_location.pose.position.z = 0.05\n\n ################ ----- Subtasks\n\n def pick_and_store_belt(self):\n self.b_bot.go_to_named_pose(\"home\")\n self.a_bot.go_to_pose_goal(self.tray_view_high, end_effector_link=\"a_bot_outside_camera_color_frame\", speed=.8)\n\n self.vision.activate_camera(\"a_bot_outside_camera\")\n self.activate_led(\"a_bot\")\n self.get_3d_poses_from_ssd()\n r2 = self.get_feasible_grasp_points(\"belt\")\n if r2:\n pick_goal = r2[0]\n pick_goal.pose.position.z = -0.001\n pick_goal.pose.position.x = -0.02 # MAGIC NUMBER\n else:\n rospy.logerr(\"Could not find belt grasp pose! Aborting.\")\n return False\n\n # TODO(felixvd): Adjust this check so that the gripper does not open before the vision confirmed the belt pick\n\n self.vision.activate_camera(\"a_bot_inside_camera\")\n self.simple_pick(\"a_bot\", pick_goal, gripper_force=100.0, approach_height=0.15, grasp_width=.04, axis=\"z\")\n\n self.b_bot.go_to_named_pose(\"home\")\n self.simple_place(\"a_bot\", self.belt_storage_location)\n self.a_bot.move_lin_rel(relative_translation=[0, -0.05, .1])\n\n success = self.vision.check_pick_success(\"belt\")\n if success:\n rospy.loginfo(\"Belt storage success!\")\n else:\n rospy.loginfo(\"Belt storage failed!\")\n # TODO(felixvd): Open gripper over tray in case an object was picked accidentally\n\n self.a_bot.go_to_named_pose(\"home\")\n return success\n\n ################ ----- Subtasks\n\n def subtask_zero(self, skip_initial_perception=False, use_b_bot_camera=False):\n # ============= SUBTASK BASE (picking and orienting and placing the baseplate) =======================\n rospy.loginfo(\"======== SUBTASK BASE ========\")\n\n self.unlock_base_plate()\n self.publish_status_text(\"Target: base plate\")\n grasp_name = \"big_holes_grasp\" if self.assembly_database.db_name in [\"wrs_assembly_2021\", \"wrs_assembly_2021_surprise\"] else \"default_grasp\"\n success = self.pick_base_panel(grasp_name=grasp_name, skip_initial_perception=skip_initial_perception, use_b_bot_camera=use_b_bot_camera)\n if not success:\n rospy.logerr(\"Fail to grasp base. Trying again with different grasp (default_grasp)\")\n grasp_name = \"terminal_grasp\"\n success = self.pick_base_panel(grasp_name=grasp_name, skip_initial_perception=skip_initial_perception, use_b_bot_camera=use_b_bot_camera)\n # if not success:\n # rospy.logerr(\"Fail to grasp base. Trying again with different grasp (terminal_grasp)\")\n # grasp_name = \"terminal_grasp\"\n # success = self.pick_base_panel(grasp_name=grasp_name, skip_initial_perception=skip_initial_perception, use_b_bot_camera=use_b_bot_camera)\n return False\n\n self.allow_collisions_with_robot_hand(\"tray\", \"a_bot\", allow=False)\n self.allow_collisions_with_robot_hand(\"tray_center\", \"a_bot\", allow=False)\n\n if not self.use_real_robot:\n self.allow_collisions_with_robot_hand(\"base_fixture_top\", \"a_bot\", allow=True)\n\n # self.confirm_to_proceed(\"finetune above pose\")\n # print(\"q:\", self.a_bot.robot_group.get_current_joint_values())\n # self.confirm_to_proceed(\"finetune in pose\")\n\n # There is a risk of overextending the wrist joint if we don't use the joint pose\n if grasp_name == \"big_holes_grasp\":\n # above_base_drop = conversions.to_pose_stamped(\"assembled_part_01\", [0.109, 0.069, 0.084, 0.004, -0.005, -0.708, 0.707])\n above_base_drop = [1.609, -1.446, 1.595, -1.7201, -1.5673, -1.5186]\n base_inserted = conversions.to_pose_stamped(\"assembled_part_01\", [0.108, 0.008, 0.083, 0.004, -0.005, -0.708, 0.707]) # Taught\n elif grasp_name == \"default_grasp\":\n # Move to fixation\n above_base_drop = [1.57783019, -1.430060581, 1.67834741, -1.82884373, -1.56911117, 0.00590014457]\n base_drop = conversions.to_pose_stamped(\"assembled_part_01\", [0.111, 0.007, 0.07, tau/4., 0, -tau/4.])\n base_inserted = conversions.to_pose_stamped(\"assembled_part_01\", [0.108, -0.006, 0.067, 1.568, 0.103, -1.582]) # Taught\n else:\n return False\n\n seq = []\n seq.append(helpers.to_sequence_item(above_base_drop, 0.5, linear=False))\n if grasp_name == \"default_grasp\":\n seq.append(helpers.to_sequence_item(base_drop, 0.3))\n seq.append(helpers.to_sequence_item(base_inserted, 0.2))\n if not self.execute_sequence(\"a_bot\", seq, \"place base plate\"):\n return False\n # self.a_bot.move_joints(above_base_drop, speed=0.5)\n # self.a_bot.go_to_pose_goal(base_drop, speed=0.3, move_lin = True)\n # self.a_bot.go_to_pose_goal(base_inserted, speed=0.05, move_lin = True)\n self.a_bot.gripper.open(opening_width=0.0425, velocity=0.05)\n self.a_bot.gripper.close(force=0, velocity=0.03, wait=False)\n self.a_bot.gripper.open(opening_width=0.0425)\n self.a_bot.gripper.forget_attached_item()\n self.a_bot.move_lin_rel([0, 0, 0.02])\n\n def set_base_plate():\n rospy.sleep(0.3)\n self.lock_base_plate()\n rospy.sleep(0.3)\n self.unlock_base_plate()\n rospy.sleep(0.3)\n self.lock_base_plate()\n\n def a_bot_return():\n self.a_bot.move_lin_rel(relative_translation=[0.05, -0.1, 0.02], speed=1.0)\n self.allow_collisions_with_robot_hand(\"base\", \"a_bot\", allow=False)\n self.publish_part_in_assembled_position(\"base\", marker_only=True)\n self.do_tasks_simultaneous(a_bot_return, set_base_plate)\n if not self.use_real_robot:\n self.allow_collisions_with_robot_hand(\"base_fixture_top\", \"a_bot\", allow=False)\n return True\n\n def subtask_a(self, simultaneous=True):\n # ============= SUBTASK A (picking and inserting and fastening the motor) =======================\n rospy.loginfo(\"======== SUBTASK A (motor) ========\")\n self.publish_status_text(\"Target: Motor\")\n\n self.a_success = False\n self.b_success = False\n\n def b_task():\n self.b_success = self.pick_motor()\n if not self.b_success:\n rospy.logerr(\"Fail to pick motor\")\n return False\n self.assembly_status.motor_placed_outside_of_tray = True\n self.b_success = self.orient_motor()\n if not self.b_success:\n rospy.logerr(\"Fail to orient motor\")\n return False\n self.assembly_status.motor_oriented = self.b_success\n\n def a_task():\n self.a_success = self.do_change_tool_action(\"a_bot\", equip=True, screw_size=3)\n self.a_success &= self.a_bot.go_to_named_pose(\"screw_ready\")\n\n if simultaneous:\n self.do_tasks_simultaneous(a_task, b_task, timeout=120)\n else:\n b_task()\n a_task()\n\n if not self.a_success or not self.b_success:\n rospy.logerr(\"Fail to do subtask a, part 1 (a_bot:%s)(b_bot:%s)\" % (self.a_success, self.b_success))\n self.do_change_tool_action(\"a_bot\", equip=False, screw_size=3)\n self.a_bot.go_to_named_pose(\"home\")\n self.b_bot.gripper.open()\n self.b_bot.go_to_named_pose(\"home\")\n return False\n\n if not self.align_motor_pre_insertion():\n rospy.logerr(\"Fail to do subtask a, part 2\")\n return False\n if not self.insert_motor(\"assembled_part_02_back_hole\"):\n rospy.logerr(\"Fail to do subtask a, part 3\")\n return False\n if not self.fasten_motor():\n rospy.logerr(\"Fail to do subtask a, part 4. Attempt Fallback once\")\n if not self.fasten_motor_fallback():\n rospy.logerr(\"Fail to do fallback\")\n self.do_change_tool_action(\"a_bot\", equip=False, screw_size=3)\n self.a_bot.go_to_named_pose(\"home\")\n self.b_bot.gripper.open()\n self.b_bot.go_to_named_pose(\"home\")\n\n return True\n\n def subtask_b(self, simultaneous_execution=False):\n rospy.loginfo(\"======== SUBTASK B (motor pulley) ========\")\n self.publish_status_text(\"Target: Motor Pulley\")\n target_link = \"assembled_part_05_center\"\n self.a_bot_success = False\n self.b_bot_success = False\n\n def a_bot_task():\n if not self.pick_motor_pulley(robot_name=\"a_bot\", attempt=5):\n return False\n if not self.orient_motor_pulley(target_link, robot_name=\"a_bot\"):\n return False\n self.confirm_to_proceed(\"finetune\")\n if not self.insert_motor_pulley(target_link, robot_name=\"a_bot\"):\n return False\n self.a_bot.gripper.forget_attached_item()\n self.publish_part_in_assembled_position(\"motor_pulley\", marker_only=True)\n self.a_bot_success = True\n\n def b_bot_task():\n if not self.equip_tool(\"b_bot\", \"set_screw_tool\"):\n return False\n b_bot_approach_pose = conversions.to_pose_stamped(target_link, [0.006, -0.002, -0.072] + np.deg2rad([174.3, -87.6, -135.8]).tolist())\n if not self.b_bot.go_to_pose_goal(b_bot_approach_pose, speed=1.0, move_lin=True, end_effector_link=\"b_bot_set_screw_tool_tip_link\"):\n return False\n self.b_bot_success = True\n\n if simultaneous_execution:\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=180)\n else:\n a_bot_task()\n if self.a_bot_success:\n b_bot_task()\n\n if not self.a_bot_success or not self.b_bot_success:\n rospy.logerr(\"Fail to do motor pulley fastening (simultaneous=%s) a_bot:%s b_bot:%s\" % (simultaneous_execution, self.a_bot_success, self.b_bot_success))\n return False\n\n if not self.fasten_motor_pulley(target_link, simultaneous=simultaneous_execution):\n return False\n\n return True\n\n def subtask_c1(self):\n rospy.loginfo(\"======== SUBTASK C (bearing) ========\")\n self.publish_status_text(\"Target: Bearing\")\n self.unequip_tool(\"a_bot\")\n success = False\n if self.pick_up_and_insert_bearing(task=\"assembly\", robot_name=\"a_bot\"):\n self.publish_part_in_assembled_position(\"bearing\", marker_only=True)\n self.a_bot.go_to_named_pose(\"centering_area\", speed=1.0)\n self.a_bot.gripper.forget_attached_item()\n self.b_bot.go_to_named_pose(\"home\", speed=1.0)\n if self.align_bearing_holes(task=\"assembly\"):\n self.b_bot.go_to_named_pose(\"home\", speed=1.0)\n success = self.fasten_bearing(task=\"assembly\", with_extra_retighten=True, robot_name=\"a_bot\")\n self.unequip_tool('a_bot', 'screw_tool_m4')\n return success\n\n def subtask_c2(self, simultaneous_execution=True, skip_pick_end_cap=False, assemble_bearing_spacer=False):\n rospy.loginfo(\"======== SUBTASK C (output shaft) ========\")\n self.publish_status_text(\"Target: Output Shaft\")\n if not simultaneous_execution:\n self.ab_bot.go_to_named_pose(\"home\")\n\n self.allow_collisions_with_robot_hand(\"shaft\", \"b_bot\", True)\n self.allow_collisions_with_robot_hand(\"end_cap\", \"a_bot\", True)\n\n if not skip_pick_end_cap:\n self.publish_status_text(\"Target: end cap\")\n if not self.pick_end_cap():\n return False\n if simultaneous_execution:\n self.a_bot.go_to_named_pose(\"centering_area\", speed=1.0)\n\n self.a_bot_success = False\n self.b_bot_success = False\n\n def a_bot_task():\n if not self.orient_shaft_end_cap():\n return False\n self.a_bot_success = True\n\n def b_bot_task():\n if not self.pick_shaft():\n return False\n if not self.orient_shaft():\n return False\n self.b_bot_success = True\n\n if simultaneous_execution:\n self.publish_status_text(\"Target: shaft & end cap\")\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=300)\n else:\n a_bot_task()\n self.publish_status_text(\"Target: shaft\")\n b_bot_task()\n\n if not self.a_bot_success or not self.a_bot_success:\n rospy.logerr(\"Fail to assemble shaft\")\n self.drop_in_tray(\"b_bot\")\n self.b_bot.go_to_named_pose(\"home\")\n self.drop_in_tray(\"a_bot\")\n self.a_bot.go_to_named_pose(\"home\")\n return False\n\n # pre_insertion_shaft = conversions.to_pose_stamped(\"tray_center\", [0.0, 0, 0.2, 0, 0, -tau/4.])\n # if not self.b_bot.go_to_pose_goal(pre_insertion_shaft, speed=0.3):\n pre_insertion_shaft = [1.78158, -0.98719, 2.42349, -4.57638, -1.78597, 0.00433]\n if not self.b_bot.move_joints(pre_insertion_shaft, speed=0.4):\n rospy.logerr(\"Fail to go to pre_insertion_shaft\")\n return False\n\n above_pre_insertion_end_cap = conversions.to_pose_stamped(\"tray_center\", [-0.004, 0.010, 0.280]+np.deg2rad([-180, 90, -90]).tolist())\n if not self.a_bot.go_to_pose_goal(above_pre_insertion_end_cap, speed=0.6, move_lin=False):\n rospy.logerr(\"Fail to go to pre_insertion_end_cap\")\n return False\n pre_insertion_end_cap = conversions.to_pose_stamped(\"tray_center\", [-0.0045, 0.010, 0.245]+np.deg2rad([-180, 90, -90]).tolist())\n if not self.a_bot.go_to_pose_goal(pre_insertion_end_cap, speed=0.3, move_lin=True):\n rospy.logerr(\"Fail to go to pre_insertion_end_cap\")\n return False\n\n # self.confirm_to_proceed(\"insertion of end cap\")\n if not self.insert_end_cap():\n rospy.logerr(\"failed to insert end cap. maybe\")\n # return False\n self.despawn_object(\"end_cap\")\n self.a_bot.gripper.forget_attached_item()\n\n # self.confirm_to_proceed(\"Did insertion succeed? Press Enter to open gripper\")\n\n self.a_bot.gripper.send_command(0.06, velocity=0.01)\n self.a_bot.move_lin_rel([0, 0, 0.05], speed=0.3)\n self.a_bot.gripper.detach_object(\"end_cap\")\n self.despawn_object(\"end_cap\")\n\n self.confirm_to_proceed(\"prepare screw\")\n\n if not self.fasten_end_cap():\n return False\n\n if not self.a_bot.go_to_named_pose(\"home\"):\n return False\n\n # self.confirm_to_proceed(\"insert to bearing\")\n if not self.align_shaft(\"assembled_part_07_inserted\", pre_insert_offset=0.065):\n return False\n self.b_bot.gripper.forget_attached_item()\n\n self.a_bot_success = False\n self.b_bot_success = False\n\n self.allow_collisions_with_robot_hand(\"base_fixture_top\", \"b_bot\")\n self.despawn_object(\"shaft\")\n if not self.insert_shaft(\"assembled_part_07_inserted\", target=0.043):\n return False\n self.publish_part_in_assembled_position(\"shaft\", marker_only=True)\n self.allow_collisions_with_robot_hand(\"base_fixture_top\", \"b_bot\", False)\n self.b_bot_success = True\n\n self.allow_collisions_with_robot_hand(\"end_cap\", \"a_bot\", False)\n self.allow_collisions_with_robot_hand(\"shaft\", \"b_bot\", False)\n\n if not simultaneous_execution:\n if not self.b_bot.go_to_named_pose(\"home\"):\n return False\n\n return True\n\n def subtask_d(self):\n rospy.loginfo(\"======== SUBTASK D (output pulley) ========\")\n \n # Push shaft into contact\n approach_hold_pose = conversions.to_pose_stamped(\"assembled_part_07_inserted\", [0.15, 0.000, -0.15] + np.deg2rad([-90, -90, -90]).tolist())\n pre_hold_pose = conversions.to_pose_stamped(\"assembled_part_07_inserted\", [0.15, 0.000, 0.02] + np.deg2rad([-90, -90, -90]).tolist())\n at_hold_pose = conversions.to_pose_stamped(\"assembled_part_07_inserted\", [0.041, 0.000, 0.02] + np.deg2rad([-90, -90, -90]).tolist())\n self.b_bot.gripper.close(wait=False)\n self.confirm_to_proceed(\"pulley_grasp_pose a_bot\")\n seq = []\n seq.append(helpers.to_sequence_item(approach_hold_pose, speed=1.0, retime=True))\n seq.append(helpers.to_sequence_item(pre_hold_pose, speed=1.0, retime=True))\n seq.append(helpers.to_sequence_item(at_hold_pose, speed=1.0, retime=True))\n if not self.execute_sequence(\"b_bot\", seq, \"push shaft into place\"):\n rospy.logerr(\"Fail to push shaft into place with b_bot\")\n return False\n\n # bearing spacer\n self.publish_status_text(\"Target: Output Spacer\")\n if not self.pick_bearing_spacer(\"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n if not self.orient_bearing_spacer(\"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n if not self.align_bearing_spacer_pre_insertion(\"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n self.confirm_to_proceed(\"fine tune\")\n if not self.insert_bearing_spacer(\"assembled_part_07_inserted\", \"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n self.a_bot.gripper.forget_attached_item()\n\n # output pulley\n self.publish_status_text(\"Target: Output Pulley\")\n if not self.pick_output_pulley(\"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n if not self.orient_output_pulley(\"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n if not self.align_output_pulley_pre_insertion(\"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n self.confirm_to_proceed(\"fine tune\")\n if not self.insert_output_pulley(\"assembled_part_07_inserted\", \"a_bot\"):\n self.b_bot.move_lin_rel([-0.15,0,0.1])\n return False\n self.a_bot.gripper.forget_attached_item()\n\n self.a_bot_success = False\n self.b_bot_success = False\n\n self.b_bot.go_to_pose_goal(approach_hold_pose, move_lin=True) \n\n self.publish_part_in_assembled_position(\"output_pulley\")\n # Includes the fastening of the screws\n return self.check_output_pulley_angle()\n\n def subtask_e(self):\n # Idler pulley\n rospy.loginfo(\"======== SUBTASK E (Idler pulley) ========\")\n self.publish_status_text(\"Target: Idler Pulley\")\n return self.subtask_e_urscript()\n\n def subtask_e_urscript(self):\n \"\"\" A hard-coded UR script version of the subtask. Expects the spacer and pulley to be in the correct locations\n in storage.\n \"\"\"\n idler_pulley_store_pose = conversions.to_pose_stamped(\"left_centering_link\", [-0.006, 0.003, 0.061, -tau/4, 0, 0])\n idler_spacer_store_pose = conversions.to_pose_stamped(\"left_centering_link\", [-0.006, 0.002, 0.163, -tau/4, 0, 0])\n\n self.despawn_object(\"idler_pin\")\n self.ab_bot.go_to_named_pose(\"home\")\n\n self.confirm_to_proceed(\"pick idler pulley\")\n self.pick_idler_pulley_assembly(\"a_bot\")\n self.simple_place(\"a_bot\", idler_pulley_store_pose, place_height=0.0, approach_height=0.15, axis=\"x\", sign=-1)\n self.a_bot.gripper.forget_attached_item()\n\n self.publish_status_text(\"Target: Idler Spacer\")\n self.confirm_to_proceed(\"pick spacer\")\n self.pick_idler_spacer(\"a_bot\")\n self.simple_place(\"a_bot\", idler_spacer_store_pose, place_height=0.0, approach_height=0.15, axis=\"x\", sign=-1)\n self.a_bot.gripper.forget_attached_item()\n\n self.publish_status_text(\"Target: Idler Pin\")\n self.confirm_to_proceed(\"pick pin\")\n self.pick_idler_pin(\"a_bot\")\n self.confirm_to_proceed(\"urscript?\")\n\n self.a_bot.load_and_execute_program(program_name=\"wrs2020/asm_idler_pulley_v1.urp\", skip_ros_activation=True)\n self.b_bot.load_and_execute_program(program_name=\"wrs2020/asm_idler_pulley_p1.urp\", skip_ros_activation=True)\n rospy.sleep(6)\n self.b_bot.close_ur_popup()\n rospy.sleep(7)\n self.a_bot.close_ur_popup()\n rospy.sleep(2)\n self.b_bot.close_ur_popup()\n rospy.sleep(2)\n self.a_bot.close_ur_popup()\n rospy.sleep(87)\n self.b_bot.close_ur_popup()\n rospy.sleep(4)\n self.a_bot.close_ur_popup()\n rospy.sleep(6)\n self.b_bot.close_ur_popup()\n rospy.sleep(1)\n self.a_bot.close_ur_popup()\n\n # Go through pause dialogues\n self.confirm_to_proceed(\"Go through pause dialogs manually. Press enter after b_bot went home and a_bot holds the pulley at the ridge.\")\n\n # TODO(cambel): move a_bot to some target pose w.r.t a frame id for any changes in the product arrangement\n\n # Equip padless tool\n self.b_bot.go_to_named_pose(\"home\")\n self.publish_status_text(\"Target: Idler Pulley\")\n self.equip_tool(\"b_bot\", \"padless_tool_m4\")\n self.b_bot.go_to_named_pose(\"horizontal_screw_ready\")\n self.b_bot.load_and_execute_program(program_name=\"wrs2020/asm_idler_pulley_p2.urp\", skip_ros_activation=True)\n rospy.sleep(5)\n self.a_bot.close_ur_popup()\n rospy.sleep(1)\n self.a_bot.close_ur_popup()\n helpers.wait_for_UR_program(\"/a_bot\", rospy.Duration.from_sec(60))\n\n # Go through pause dialogues again\n self.confirm_to_proceed(\"Go through pause dialogs manually. Did both robots finish?\")\n\n # When a_bot is finished:\n self.equip_nut_tool()\n self.fasten_idler_pulley_with_nut_tool(target_link=\"assembled_part_03_pulley_ridge_top\")\n\n def a_bot_task():\n return self.unequip_nut_tool()\n\n def b_bot_task():\n success = self.playback_sequence(\"idler_pulley_return_screw_tool\")\n self.b_bot.go_to_named_pose(\"horizontal_screw_ready\")\n return success and self.unequip_tool(\"b_bot\", \"padless_tool_m4\")\n\n if not self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=60):\n return False\n\n return True\n\n def subtask_f(self):\n rospy.loginfo(\"======== SUBTASK F (motor panel (small L-plate)) ========\")\n attempts = 0\n success = False\n while not success and attempts < 3 and not rospy.is_shutdown():\n rospy.loginfo(\"======== SUBTASK F, attempt \" + str(attempts) + \" ========\")\n success = self.panel_subtask(panel=\"panel_motor\", attempt_nr=attempts)\n attempts += 1\n return success\n\n def subtask_g(self):\n rospy.loginfo(\"======== SUBTASK G (bearing panel (large L-plate)) ========\")\n attempts = 0\n success = False\n while not success and attempts < 3 and not rospy.is_shutdown():\n success = self.panel_subtask(panel=\"panel_bearing\", attempt_nr=attempts)\n attempts += 1\n return success\n\n def panel_subtask(self, panel, attempt_nr=0, allow_fallbacks=True, simultaneous_execution=True):\n \"\"\"\n input parameter panel needs to be \"panel_motor\" or \"panel_bearing\"\n \"\"\"\n if simultaneous_execution:\n return self.panel_subtask_simultaneous(panel, attempt_nr=attempt_nr, allow_fallbacks=allow_fallbacks)\n\n self.publish_status_text(\"Target: \" + panel)\n if not self.b_bot.go_to_named_pose(\"feeder_pick_ready\"):\n rospy.logerr(\"b_bot did not move out of the way. Aborting.\")\n return False\n\n self.activate_led(\"a_bot\")\n plate_pose = self.get_large_item_position_from_top(panel, \"a_bot\")\n if not plate_pose:\n rospy.logerr(\"Cannot find \" + panel + \" in tray. Return False.\")\n return False\n\n # Pick using grasp pose only, ignoring scene object\n grasp_pose = self.assembly_database.get_grasp_pose(panel, \"default_grasp\")\n if not grasp_pose:\n rospy.logerr(\"Could not load grasp pose \" + \"default_grasp\" + \" for object \" + panel + \". Aborting pick.\")\n return False\n grasp_pose.header.frame_id = \"move_group/\" + panel\n try:\n self.listener.waitForTransform(\"move_group/\" + panel, \"tray_center\", grasp_pose.header.stamp, rospy.Duration(1))\n grasp_pose_tray = self.listener.transformPose(\"tray_center\", grasp_pose)\n except:\n rospy.logerr(\"Could not transform from object. Is the object \" + panel + \" in the scene?\")\n return False\n\n self.planning_scene_interface.allow_collisions(panel, \"\")\n self.planning_scene_interface.allow_collisions(panel, \"tray\")\n self.planning_scene_interface.allow_collisions(panel, \"tray_center\")\n self.allow_collisions_with_robot_hand(panel, \"a_bot\")\n rospy.sleep(1.0) # TODO(felixvd): Necessary after enabling collisions? Likely.\n if not self.too_close_to_border(grasp_pose_tray, border_dist=0.025):\n picked = self.simple_pick(\"a_bot\", grasp_pose_tray, axis=\"z\", grasp_width=0.06, minimum_grasp_width=0.0001)\n else:\n picked = False\n\n if allow_fallbacks:\n # Fallback: Try moving the plate\n if not picked:\n self.a_bot.go_to_named_pose(\"home\")\n self.unequip_tool(\"b_bot\")\n if panel == \"panel_motor\":\n tool_pull_pose = conversions.to_pose_stamped(\"move_group/panel_motor\", [0.03, 0.038, 0.0, 0, 0, 0])\n elif panel == \"panel_bearing\":\n tool_pull_pose = conversions.to_pose_stamped(\"move_group/panel_bearing/front_hole\", [0.0, 0.0, 0.0, 0, 0, 0])\n\n print(\"tool_pull_pose\", tool_pull_pose.pose.position)\n tool_pull_pose = self.listener.transformPose(\"tray_center\", tool_pull_pose)\n print(\"tool_pull_pose tfed\", tool_pull_pose.pose.position)\n\n # If close to border, pull towards the middle\n if self.too_close_to_border(grasp_pose_tray, border_dist=0.04):\n # Add 1 cm distance to pull pose\n # print(\"tool_pull_pose before\", tool_pull_pose.pose.position)\n # tool_pull_pose.pose.position.x += 0.01 * np.sign(tool_pull_pose.pose.position.x)\n # tool_pull_pose.pose.position.y += 0.01 * np.sign(tool_pull_pose.pose.position.y)\n print(\"tool_pull_pose after\", tool_pull_pose.pose.position)\n self.move_towards_center_with_tool(\"b_bot\", target_pose=tool_pull_pose, distance=0.05, start_with_spiral=True)\n self.planning_scene_interface.allow_collisions(panel, \"\") # Collisions are reactivated in move_towards_center_with_tool\n self.planning_scene_interface.allow_collisions(panel, \"tray\")\n self.planning_scene_interface.allow_collisions(panel, \"tray_center\")\n self.allow_collisions_with_robot_hand(panel, \"a_bot\")\n else: # If not close to border, try to hit a hole and make space around the plate\n self.declutter_with_tool(\"b_bot\", tool_pull_pose)\n\n self.b_bot.go_to_named_pose(\"feeder_pick_ready\")\n return self.panel_subtask(panel, attempt_nr=attempt_nr, allow_fallbacks=False)\n\n # Fallback: Try to pick all 4 possible orientations\n if attempt_nr > 0:\n for i in range(4):\n rospy.logwarn(\"Fallback: Rotating plate (\" + str() + \" out of 3 times)\")\n self.rotate_plate_collision_object_in_tray(panel)\n rospy.sleep(.5)\n grasp_pose_tray = self.listener.transformPose(\"tray_center\", grasp_pose)\n if self.is_grasp_pose_feasible(grasp_pose_tray, border_dist=0.025):\n picked = self.simple_pick(\"a_bot\", grasp_pose_tray, axis=\"z\", grasp_width=0.06, minimum_grasp_width=0.0001)\n if picked:\n break\n\n if not picked:\n rospy.logerr(\"Did not pick panel. Abort.\")\n return False\n\n self.confirm_to_proceed(\"Go on to placing program?\")\n\n # TODO: Check that the plate is seen by SSD when placed outside the tray\n\n if panel == \"panel_bearing\":\n success_a = self.a_bot.load_program(program_name=\"wrs2020/bearing_plate_full.urp\", recursion_depth=3)\n elif panel == \"panel_motor\":\n success_a = self.a_bot.load_program(program_name=\"wrs2020/motor_plate_full.urp\", recursion_depth=3)\n\n if not success_a:\n rospy.logerr(\"Failed to load plate placing program on a_bot\")\n return False\n\n if not self.a_bot.execute_loaded_program():\n rospy.logerr(\"Failed to execute plate placing program on a_bot\")\n return False\n rospy.loginfo(\"Running bearing plate rearrangement on a_bot.\")\n helpers.wait_for_UR_program(\"/a_bot\", rospy.Duration.from_sec(40))\n\n self.publish_part_in_assembled_position(panel)\n self.allow_collisions_with_robot_hand(panel, \"a_bot\")\n\n self.fasten_panel(panel)\n\n self.unlock_base_plate()\n rospy.sleep(0.5)\n self.lock_base_plate()\n self.allow_collisions_with_robot_hand(panel, \"a_bot\", allow=False)\n return True\n\n def panel_subtask_simultaneous(self, panel, attempt_nr=0, allow_fallbacks=True):\n \"\"\"\n input parameter panel needs to be \"panel_motor\" or \"panel_bearing\"\n \"\"\"\n self.publish_status_text(\"Target: \" + panel)\n\n def b_bot_task(): # Pick tool & screw, then wait\n self.equip_tool(robot_name=\"b_bot\", tool_name=\"screw_tool_m4\")\n if not self.b_bot.go_to_named_pose(\"feeder_pick_ready\"):\n rospy.logerr(\"b_bot did not move out of the way. Aborting.\")\n return False\n\n grasp_pose = self.assembly_database.get_grasp_pose(panel, \"default_grasp\")\n if not grasp_pose:\n rospy.logerr(\"Could not load grasp pose \" + \"default_grasp\" + \" for object \" + panel + \". Aborting pick.\")\n return False\n grasp_pose.header.frame_id = \"move_group/\" + panel\n self.picked = False\n\n def a_bot_task(): # Pick and orient panel\n self.activate_led(\"a_bot\")\n plate_pose = self.get_large_item_position_from_top(panel, \"a_bot\")\n if not plate_pose:\n rospy.logerr(\"Cannot find \" + panel + \" in tray. Return False.\")\n return False\n\n # Pick using the grasp pose only, ignoring scene object\n try:\n self.listener.waitForTransform(\"move_group/\" + panel, \"tray_center\", grasp_pose.header.stamp, rospy.Duration(1))\n grasp_pose_tray = self.listener.transformPose(\"tray_center\", grasp_pose)\n except:\n rospy.logerr(\"Could not transform from object. Is the object \" + panel + \" in the scene?\")\n return False\n\n self.planning_scene_interface.allow_collisions(panel, \"\")\n self.planning_scene_interface.allow_collisions(panel, \"tray\")\n self.planning_scene_interface.allow_collisions(panel, \"tray_center\")\n self.allow_collisions_with_robot_hand(panel, \"a_bot\")\n rospy.sleep(1.0) # TODO(felixvd): Necessary after enabling collisions? Likely.\n if not self.too_close_to_border(grasp_pose_tray, border_dist=0.025):\n self.picked = self.simple_pick(\"a_bot\", grasp_pose_tray, axis=\"z\", grasp_width=0.06, minimum_grasp_width=0.0001)\n else:\n self.picked = False\n\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=180.0)\n\n try:\n self.listener.waitForTransform(\"move_group/\" + panel, \"tray_center\", grasp_pose.header.stamp, rospy.Duration(1))\n grasp_pose_tray = self.listener.transformPose(\"tray_center\", grasp_pose)\n except:\n rospy.logerr(\"Could not transform from object. Is the object \" + panel + \" in the scene?\")\n return False\n if allow_fallbacks:\n # Fallback: Try moving the plate\n if not self.picked:\n self.a_bot.go_to_named_pose(\"home\", wait=False)\n self.unequip_tool(\"b_bot\")\n if panel == \"panel_motor\":\n tool_pull_pose = conversions.to_pose_stamped(\"move_group/panel_motor\", [0.03, 0.038, 0.0, 0, 0, 0])\n elif panel == \"panel_bearing\":\n tool_pull_pose = conversions.to_pose_stamped(\"move_group/panel_bearing/front_hole\", [0.0, 0.0, 0.0, 0, 0, 0])\n\n # print(\"tool_pull_pose\", tool_pull_pose.pose.position)\n tool_pull_pose = self.listener.transformPose(\"tray_center\", tool_pull_pose)\n # print(\"tool_pull_pose tfed\", tool_pull_pose.pose.position)\n\n # If close to border, pull towards the middle\n if self.too_close_to_border(grasp_pose_tray, border_dist=0.04):\n # Add 1 cm distance to pull pose\n self.move_towards_center_with_tool(\"b_bot\", target_pose=tool_pull_pose, distance=0.05, start_with_spiral=True)\n self.planning_scene_interface.allow_collisions(panel, \"\") # Collisions are reactivated in move_towards_center_with_tool\n self.planning_scene_interface.allow_collisions(panel, \"tray\")\n self.planning_scene_interface.allow_collisions(panel, \"tray_center\")\n self.allow_collisions_with_robot_hand(panel, \"a_bot\")\n else: # If not close to border, try to hit a hole and make space around the plate\n self.declutter_with_tool(\"b_bot\", tool_pull_pose)\n\n self.b_bot.go_to_named_pose(\"feeder_pick_ready\", wait=False)\n return self.panel_subtask(panel, attempt_nr=attempt_nr, allow_fallbacks=False)\n\n # Fallback 2: Try to pick all 4 possible orientations\n if attempt_nr > 0:\n for i in range(4):\n rospy.logwarn(\"Fallback: Rotating plate (\" + str() + \" out of 3 times)\")\n self.rotate_plate_collision_object_in_tray(panel)\n rospy.sleep(.5)\n grasp_pose_tray = self.listener.transformPose(\"tray_center\", grasp_pose)\n if self.is_grasp_pose_feasible(grasp_pose_tray, border_dist=0.025):\n self.picked = self.simple_pick(\"a_bot\", grasp_pose_tray, axis=\"z\", grasp_width=0.06, minimum_grasp_width=0.0001)\n if self.picked:\n break\n\n if not self.picked:\n rospy.logerr(\"Did not pick panel. Abort.\")\n return False\n\n self.confirm_to_proceed(\"Go on to placing program?\")\n\n # TODO: Check that the plate is seen by SSD when placed outside the tray\n\n def a_bot_task2():\n if panel == \"panel_bearing\":\n success_a = self.a_bot.load_program(program_name=\"wrs2020/bearing_plate_full.urp\", recursion_depth=3)\n elif panel == \"panel_motor\":\n success_a = self.a_bot.load_program(program_name=\"wrs2020/motor_plate_full.urp\", recursion_depth=3)\n\n if not success_a:\n rospy.logerr(\"Failed to load plate placing program on a_bot\")\n return False\n\n if not self.a_bot.execute_loaded_program():\n rospy.logerr(\"Failed to execute plate placing program on a_bot\")\n return False\n rospy.loginfo(\"Running bearing plate rearrangement on a_bot.\")\n helpers.wait_for_UR_program(\"/a_bot\", rospy.Duration.from_sec(40))\n\n self.publish_part_in_assembled_position(panel)\n self.allow_collisions_with_robot_hand(panel, \"a_bot\")\n\n def b_bot_task2():\n self.equip_tool(robot_name=\"b_bot\", tool_name=\"screw_tool_m4\")\n self.vision.activate_camera(camera_name=\"b_bot_outside_camera\")\n self.pick_screw_from_feeder(\"b_bot\", screw_size=4, realign_tool_upon_failure=True)\n\n self.do_tasks_simultaneous(a_bot_task2, b_bot_task2, timeout=90.0)\n\n if not self.tools.screw_is_suctioned.get(\"m4\", False):\n rospy.logerr(\"Failed to pick screw from feeder, could not fix the issue. Abort.\")\n self.a_bot.gripper.open()\n self.a_bot.go_to_named_pose(\"home\")\n\n if panel == \"panel_bearing\":\n part_name = \"assembled_part_03_\"\n elif panel == \"panel_motor\":\n part_name = \"assembled_part_02_\"\n\n screw_target_pose = geometry_msgs.msg.PoseStamped()\n screw_target_pose.header.frame_id = part_name + \"bottom_screw_hole_1\"\n screw_target_pose.pose.orientation = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(radians(-20), 0, 0))\n if not self.fasten_screw_vertical('b_bot', screw_target_pose, allow_collision_with_object=panel, approach_from_front=approach_from_front):\n # Fallback for screw 1\n rospy.logerr(\"Failed to fasten panel screw 1, trying to realign tool and retry.\")\n self.realign_tool(\"b_bot\", \"screw_tool_m4\")\n self.b_bot.go_to_named_pose(\"feeder_pick_ready\")\n self.pick_screw_from_feeder(\"b_bot\", screw_size=4)\n\n # Realign plate\n self.a_bot.gripper.close(force=100)\n self.a_bot.move_lin_rel(relative_translation=[0, -0.015, 0])\n self.a_bot.gripper.open(opening_width=0.08, wait=True)\n if panel == \"panel_bearing\":\n success_a = self.a_bot.load_program(program_name=\"wrs2020/bearing_plate_positioning.urp\", recursion_depth=3)\n else:\n success_a = self.a_bot.load_program(program_name=\"wrs2020/motor_plate_positioning.urp\", recursion_depth=3)\n if not success_a:\n rospy.logerr(\"Failed to load plate positioning program on a_bot\")\n return False\n if not self.a_bot.execute_loaded_program():\n rospy.logerr(\"Failed to execute plate positioning program on a_bot\")\n return False\n helpers.wait_for_UR_program(\"/a_bot\", rospy.Duration.from_sec(20))\n\n # Retry fastening\n if not self.fasten_screw_vertical('b_bot', screw_target_pose, allow_collision_with_object=panel, approach_from_front=approach_from_front):\n rospy.logerr(\"Failed to fasten panel screw 2 again. Aborting.\")\n return False\n rospy.loginfo(\"Successfully fastened screw 1\")\n\n def a_bot_task3():\n self.a_bot.gripper.close()\n self.a_bot.gripper.open()\n if not self.a_bot.go_to_named_pose(\"home\", wait=False):\n rospy.logerr(\"Failed to move a_bot home!\")\n return False\n\n def b_bot_task3():\n self.pick_screw_from_feeder(\"b_bot\", screw_size=4, realign_tool_upon_failure=True)\n self.do_tasks_simultaneous(a_bot_task3, b_bot_task3, timeout=180.0)\n if not self.tools.screw_is_suctioned.get(\"m4\", False):\n rospy.logerr(\"Failed to pick second screw from feeder, could not fix the issue. Abort.\")\n\n screw_target_pose.header.frame_id = part_name + \"bottom_screw_hole_2\"\n if not self.fasten_screw_vertical('b_bot', screw_target_pose, allow_collision_with_object=panel, approach_from_front=approach_from_front):\n # Fallback for screw 2: Realign tool, recenter plate, try again\n rospy.logerr(\"Failed to fasten panel screw 2, trying to realign tool and retrying.\")\n self.realign_tool(\"b_bot\", \"screw_tool_m4\")\n self.b_bot.go_to_named_pose(\"feeder_pick_ready\")\n self.pick_screw_from_feeder(\"b_bot\", screw_size=4)\n\n # Recenter plate\n center_plate_pose = geometry_msgs.msg.PoseStamped()\n if panel == \"panel_bearing\":\n center_plate_pose.header.frame_id = part_name + \"pulley_ridge_middle\"\n else: # motor panel\n center_plate_pose.header.frame_id = part_name + \"motor_screw_hole_5\"\n center_plate_pose.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, radians(60), -tau/4))\n center_plate_pose.pose.position.x = 0.0025\n self.a_bot.gripper.open(opening_width=0.08, wait=False)\n self.a_bot.go_to_pose_goal(center_plate_pose, move_lin=False)\n self.a_bot.gripper.close(force=100)\n self.a_bot.gripper.open()\n if not self.a_bot.go_to_named_pose(\"home\"):\n rospy.logerr(\"Failed to move a_bot home!\")\n return False\n if not self.fasten_screw_vertical('b_bot', screw_target_pose, allow_collision_with_object=panel, approach_from_front=approach_from_front):\n rospy.logerr(\"Failed to fasten panel screw 2 again. Aborting.\")\n return False\n self.unlock_base_plate()\n rospy.sleep(0.5)\n self.lock_base_plate()\n self.allow_collisions_with_robot_hand(panel, \"a_bot\", allow=False)\n return True\n\n def panels_tasks_combined(self, simultaneous=True, pick_and_orient_insert_bearing=False,\n pick_and_orient_insert_motor=False, do_base_plate_first=True):\n panels_order = [\"panel_bearing\", \"panel_motor\"]\n switch_panels_order = self.assembly_database.assembly_info.get(\"switched_motor_and_bearing\", False)\n if switch_panels_order:\n panels_order = panels_order[::-1]\n\n print(\"self.assembly_status.completed_subtask_zero\", self.assembly_status.completed_subtask_zero)\n if do_base_plate_first and not self.assembly_status.completed_subtask_zero:\n self.b_bot.go_to_named_pose(\"home\")\n self.publish_status_text(\"Target: base plate\")\n if not self.subtask_zero(skip_initial_perception=False):\n if not self.subtask_zero(skip_initial_perception=False): # Try again\n return False\n self.assembly_status.completed_subtask_zero = True\n self.publish_part_in_assembled_position(\"base\", marker_only=True)\n self.a_bot.go_to_named_pose(\"home\", speed=1.0)\n\n self.publish_status_text(\"Target: L-plates\")\n\n # Pick bearing panel\n success = False\n for _ in range(5):\n success = self.pick_panel_with_handover(panels_order[0])\n if success:\n break\n above_centering_joint_pose = [0.48, -2.05, 2.05, -1.55, -1.58, -1.09-(tau/2)]\n self.a_bot.move_joints(above_centering_joint_pose, speed=1.0)\n\n self.panel_bearing_pose = None\n self.b_bot_success = False\n\n # Store bearing panel, pick motor panel\n def a_bot_task():\n self.panel_bearing_pose = self.center_panel(panels_order[0], store=True)\n self.assembly_status.bearing_panel_placed_outside_of_tray = True\n\n def b_bot_task():\n self.b_bot_success = self.pick_panel_with_handover(panels_order[1], simultaneous=False)\n above_centering_joint_pose = [0.48, -2.05, 2.05, -1.55, -1.58, -1.09-(tau/2)]\n self.a_bot.move_joints(above_centering_joint_pose, speed=1.0)\n\n if simultaneous:\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=120)\n else:\n a_bot_task()\n b_bot_task()\n if not self.b_bot_success:\n rospy.logerr(\"Fail to do panels_assembly 1: simultaneous=%s b:%s\" % (simultaneous, self.b_bot_success))\n self.return_l_plates()\n return False\n\n # Store motor panel, look at base plate with b_bot\n self.panel_motor_pose = None\n\n def a_bot_task():\n self.panel_motor_pose = self.center_panel(panels_order[1], store=True)\n self.assembly_status.motor_panel_placed_outside_of_tray = True\n\n def b_bot_task():\n return True\n # if pick_and_orient_insert_motor:\n # rospy.loginfo(\"Picking and orienting motor\")\n # self.assembly_status.motor_picked = self.pick_motor()\n # self.b_bot.go_to_named_pose(\"centering_area\", speed=1.0)\n\n if simultaneous:\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=300)\n else:\n a_bot_task()\n b_bot_task()\n\n # without motor pick\n if not self.panel_motor_pose :\n rospy.logerr(\"Fail to do panels_assembly 2: simultaneous=%s\" % (simultaneous))\n return False\n\n # with motor pick\n if not self.panel_motor_pose or (pick_and_orient_insert_motor and not self.assembly_status.motor_picked):\n rospy.logerr(\"Fail to do panels_assembly 2: simultaneous=%s a:%s b:%s\" % (simultaneous, bool(self.panel_motor_pose), self.assembly_status.motor_picked))\n return False\n\n # Pick base plate with a_bot, prepare fastening with b_bot\n self.a_bot_success = False\n self.b_bot_success = False\n self.panel_bearing_picked = False\n\n def a_bot_task():\n if not self.assembly_status.completed_subtask_zero:\n self.publish_status_text(\"Target: base plate\")\n if not self.subtask_zero(skip_initial_perception=False):\n if not self.subtask_zero(skip_initial_perception=False): # Try again\n return False\n if simultaneous:\n if not self.place_panel(\"a_bot\", panels_order[0], pick_again=True, pick_only=True, fake_position=True):\n rospy.logerr(\"Fail to place bearing panel in simultaneous!!\")\n return False\n self.panel_bearing_picked = True\n self.a_bot_success = True\n\n def b_bot_task():\n # if pick_and_orient_insert_motor and self.assembly_status.motor_picked:\n # rospy.sleep(5)\n # self.assembly_status.motor_oriented = self.orient_motor()\n # if not self.assembly_status.motor_oriented:\n # self.assembly_status.motor_placed_outside_of_tray = True\n # self.b_bot.move_lin_rel(relative_translation=[0, 0, 0.06], speed=0.3)\n start_time = rospy.get_time()\n while not self.b_bot_success and rospy.get_time()-start_time < 20:\n self.b_bot_success = self.do_change_tool_action(\"b_bot\", equip=True, screw_size=4)\n self.b_bot.go_to_named_pose(\"screw_ready\")\n\n if simultaneous:\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=150)\n else:\n a_bot_task()\n b_bot_task()\n if not self.b_bot_success or not self.a_bot_success:\n rospy.logerr(\"Fail to do panels_assembly3: simultaneous=%s a:%s b:%s\" % (simultaneous, self.a_bot_success, self.b_bot_success))\n if not self.do_change_tool_action(\"b_bot\", equip=False, screw_size=4):\n raise # Something is very wrong if this fails\n self.ab_bot.go_to_named_pose(\"home\")\n self.return_l_plates()\n return False\n\n self.publish_status_text(\"Target: L-plates\")\n self.a_bot_success = False\n self.b_bot_success = False\n\n def b_bot_task():\n self.b_bot_success = self.pick_screw_from_feeder(\"b_bot\", screw_size=4)\n self.b_bot.go_to_named_pose(\"feeder_pick_ready\")\n\n def a_bot_task():\n rospy.sleep(1)\n if not self.place_panel(\"a_bot\", panels_order[0], pick_again=True, fake_position=True):\n return False\n if simultaneous:\n if not self.hold_panel_for_fastening(panels_order[0]):\n return False\n self.a_bot_success = True\n\n self.publish_status_text(\"Target: fasten panel bearing\")\n\n if simultaneous:\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=120)\n else:\n a_bot_task()\n b_bot_task()\n if not self.b_bot_success or not self.a_bot_success:\n rospy.logerr(\"Fail to do panels_assembly 4: simultaneous=%s a:%s b:%s\" % (simultaneous, self.a_bot_success, self.b_bot_success))\n self.drop_in_tray(\"a_bot\")\n if not self.do_change_tool_action(\"b_bot\", equip=False, screw_size=4):\n raise # Something is very wrong if this fails\n self.ab_bot.go_to_named_pose(\"home\")\n self.return_l_plates()\n return False\n\n # Fasten plates\n self.publish_status_text(\"Target: %s\" % panels_order[0])\n self.panel_motor_picked = False\n\n def dummy_Task(): return True\n if not self.fasten_panel(panels_order[0], simultaneous=simultaneous, a_bot_task_2nd_screw=dummy_Task):\n return False\n\n self.a_bot_success = False\n self.b_bot_success = False\n\n def a_bot_task():\n if not self.place_panel(\"a_bot\", panels_order[1], pick_again=True, fake_position=True):\n self.drop_in_tray(\"a_bot\")\n return False\n if not self.hold_panel_for_fastening(panels_order[1]):\n return False\n self.a_bot_success = True\n\n def b_bot_task():\n self.b_bot_success = self.pick_screw_from_feeder(\"b_bot\", screw_size=4)\n\n self.publish_status_text(\"Target: %s\" % panels_order[1])\n if simultaneous:\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=120)\n else:\n a_bot_task()\n b_bot_task()\n\n if not self.b_bot_success or not self.a_bot_success:\n rospy.logerr(\"Fail to do panels_assembly 5: simultaneous=%s\" % simultaneous)\n self.do_change_tool_action(\"b_bot\", equip=False, screw_size=4)\n self.return_l_plates()\n return False\n\n if pick_and_orient_insert_bearing:\n def a_bot_2nd_task():\n self.publish_status_text(\"Target: Bearing\")\n if not self.pick_bearing(\"a_bot\"):\n rospy.logerr(\"Fail to pick bearing (1). abort\")\n return False\n self.assembly_status.bearing_picked = True\n if not self.orient_bearing(\"assembly\", \"a_bot\", part1=True, part2=True):\n rospy.logerr(\"Fail to orient bearing (1). abort\")\n self.drop_in_tray(\"a_bot\")\n return False\n self.assembly_status.bearing_oriented = True\n if not self.insert_bearing(\"assembled_part_07_inserted\", robot_name=\"a_bot\"):\n rospy.logerr(\"Fail to insert bearing (1). abort\")\n self.a_bot.move_lin_rel(relative_translation=[0.03, 0, 0], speed=0.05)\n self.drop_in_tray(\"a_bot\")\n return False\n\n self.assembly_status.bearing_inserted_in_panel = True\n\n waypoints = []\n waypoints.append((self.a_bot.move_lin_rel([0.15, 0, 0.0], pose_only=True), 0, 1.0))\n waypoints.append((self.a_bot.move_lin_rel([0.1, -0.3, 0.2], pose_only=True), 0, 1.0))\n waypoints.append((\"screw_ready\", 0, 1.0))\n if not self.a_bot.move_joints_trajectory(waypoints):\n rospy.logerr(\"Fail to go home\")\n self.a_bot.move_lin_rel(relative_translation=[0.1, 0, 0])\n self.a_bot.move_lin_rel(relative_translation=[0.1, -0.2, 0.1])\n self.a_bot.go_to_named_pose(\"home\")\n return False\n else:\n def a_bot_2nd_task(): return True\n\n if pick_and_orient_insert_motor and self.assembly_status.motor_oriented:\n def b_bot_2nd_task():\n self.publish_status_text(\"Target: Bearing & Motor\")\n midpoint1 = conversions.to_pose_stamped(\"vgroove_aid_drop_point_link\", [-0.25, 0.1, 0.4, tau/2, 0, radians(28)])\n waypoints = []\n waypoints.append((self.b_bot.compute_ik(midpoint1, timeout=0.02, retry=True), 0, 1.0))\n waypoints.append((\"centering_area\", 0, 1.0))\n self.b_bot.move_joints_trajectory(waypoints)\n if not self.align_motor_pre_insertion():\n return False\n if not self.insert_motor(\"assembled_part_02_back_hole\"):\n rospy.logerr(\"Fail to insert motor!!\")\n if self.b_bot.is_protective_stopped():\n rospy.logfatal(\"Something is very wrong. Trying to unlock and proceed\")\n self.b_bot.unlock_protective_stop()\n self.b_bot.gripper.open()\n self.b_bot.move_lin_rel(relative_translation=[-0.05, 0, 0], speed=0.05)\n self.b_bot.move_lin_rel(relative_translation=[0, 0.05, 0.1])\n self.b_bot.go_to_pose_goal(midpoint1)\n self.b_bot.go_to_named_pose(\"centering_area\")\n # self.orient_motor_in_aid_edge()\n place_pose = conversions.to_pose_stamped(\"right_centering_link\", [0.0, 0, 0, 0, 0, 0])\n self.simple_place(\"b_bot\", place_pose, axis='x', sign=-1, approach_height=0.15, item_id_to_detach='motor', place_height=0.03)\n self.assembly_status.motor_placed_outside_of_tray = True\n self.assembly_status.motor_picked = False\n self.assembly_status.motor_oriented = False\n return False\n self.despawn_object(\"motor\")\n self.publish_part_in_assembled_position(\"motor\", marker_only=True)\n self.assembly_status.motor_inserted_in_panel = True\n return True\n else:\n self.assembly_status.motor_inserted_in_panel = False\n def b_bot_2nd_task(): return True\n\n # Normal approach\n # if not self.fasten_panel(panels_order[1], simultaneous=simultaneous, unequip_tool_on_success=True):\n # Optimistic approach\n if not self.fasten_panel(panels_order[1], simultaneous=simultaneous, a_bot_task_2nd_screw=a_bot_2nd_task, unequip_tool_on_success=True, b_bot_2nd_task=b_bot_2nd_task):\n self.do_change_tool_action(\"b_bot\", equip=False, screw_size=4)\n return False\n\n self.do_change_tool_action(\"b_bot\", equip=False, screw_size=4)\n\n rospy.loginfo(\"===== Panels assembly completed! =====\")\n rospy.loginfo(\"===== Assembly status: %s =====\" % str(vars(self.assembly_status)))\n\n return True\n\n def subtask_h(self):\n # Attach belt\n rospy.loginfo(\"======== SUBTASK H (belt) ========\")\n self.publish_status_text(\"Target: Belt\")\n return self.belt_urscript(task_name=\"asm2021\")\n\n def subtask_i(self, simultaneous=False):\n # Insert motor cables\n rospy.loginfo(\"======== SUBTASK I (cables) ========\")\n self.publish_status_text(\"Target: Motor Cables B\")\n self.insert_motor_cables_with_tool(\"black\", simultaneous=simultaneous)\n self.publish_status_text(\"Target: Motor Cables R\")\n self.insert_motor_cables_with_tool(\"red\", simultaneous=simultaneous)\n return False\n\n def exhibition_1(self):\n \"\"\" Fasten motor and bearing from their insertion position (motor has 2 screws already) \n \"\"\"\n def a_bot_task():\n self.fasten_motor(\"a_bot\", part1=False)\n self.unequip_tool(\"a_bot\")\n self.a_bot.go_to_named_pose(\"home\")\n\n def b_bot_task():\n self.fasten_bearing(\"assembly\", with_extra_retighten=False, robot_name=\"b_bot\")\n self.unequip_tool(\"b_bot\")\n self.b_bot.go_to_named_pose(\"home\")\n\n self.do_tasks_simultaneous(a_bot_task, b_bot_task)\n\n def exhibition_2(self):\n \"\"\" insert motor pulley and shaft+endcap \"\"\"\n self.subtask_b(simultaneous_execution=True)\n self.unequip_tool(\"b_bot\", \"set_screw_tool\")\n self.ab_bot.go_to_named_pose(\"home\")\n\n self.pick_end_cap()\n\n def a_bot_task():\n self.orient_shaft_end_cap(\"a_bot\")\n pass\n\n def b_bot_task():\n approach_vgroove = conversions.to_pose_stamped(\"vgroove_aid_drop_point_link\", [-0.100, -0.001, -0.005, tau/2., 0, 0])\n on_vgroove = conversions.to_pose_stamped(\"vgroove_aid_drop_point_link\", [0.000, -0.001, -0.005, tau/2., 0, 0])\n grasp_pose = conversions.to_pose_stamped(\"vgroove_aid_drop_point_link\", [0.012, -0.001, -0.005, tau/2., 0, 0])\n self.b_bot.go_to_pose_goal(approach_vgroove)\n self.b_bot.go_to_pose_goal(on_vgroove, move_lin=True, speed=0.1)\n self.b_bot.go_to_pose_goal(grasp_pose, move_lin=True, speed=0.05)\n self.b_bot.gripper.close()\n self.b_bot.go_to_pose_goal(on_vgroove, move_lin=True, speed=0.1)\n self.b_bot.go_to_pose_goal(approach_vgroove)\n self.b_bot.go_to_named_pose(\"home\")\n # pre_insertion_shaft = conversions.to_pose_stamped(\"tray_center\", [0.0, 0, 0.2, 0, 0, -tau/4.])\n # if not self.b_bot.go_to_pose_goal(pre_insertion_shaft, speed=0.3):\n\n self.do_tasks_simultaneous(a_bot_task, b_bot_task)\n\n pre_insertion_shaft = [1.78158, -0.98719, 2.42349, -4.57638, -1.78597, 0.00433]\n if not self.b_bot.move_joints(pre_insertion_shaft, speed=0.4):\n rospy.logerr(\"Fail to go to pre_insertion_shaft\")\n return False\n\n above_pre_insertion_end_cap = conversions.to_pose_stamped(\"tray_center\", [0.000, 0.010, 0.290]+np.deg2rad([-180, 90, -90]).tolist())\n if not self.a_bot.go_to_pose_goal(above_pre_insertion_end_cap, speed=0.6, move_lin=False):\n rospy.logerr(\"Fail to go to pre_insertion_end_cap\")\n return False\n pre_insertion_end_cap = conversions.to_pose_stamped(\"tray_center\", [0.001, 0.011, 0.250]+np.deg2rad([-180, 90, -90]).tolist())\n if not self.a_bot.go_to_pose_goal(pre_insertion_end_cap, speed=0.3, move_lin=True):\n rospy.logerr(\"Fail to go to pre_insertion_end_cap\")\n return False\n self.confirm_to_proceed('finetune')\n\n # self.confirm_to_proceed(\"insertion of end cap\")\n if not self.insert_end_cap():\n rospy.logerr(\"failed to insert end cap. maybe\")\n # return False\n self.despawn_object(\"end_cap\")\n self.a_bot.gripper.forget_attached_item()\n\n # self.confirm_to_proceed(\"Did insertion succeed? Press Enter to open gripper\")\n\n self.a_bot.gripper.send_command(0.06, velocity=0.01)\n self.a_bot.move_lin_rel([0, 0, 0.05], speed=0.3)\n self.a_bot.gripper.detach_object(\"end_cap\")\n self.despawn_object(\"end_cap\")\n\n self.confirm_to_proceed(\"prepare screw\")\n\n if not self.fasten_end_cap():\n return False\n\n if not self.a_bot.go_to_named_pose(\"home\"):\n return False\n\n if not self.align_shaft(\"assembled_part_07_inserted\", pre_insert_offset=0.065):\n return False\n self.b_bot.gripper.forget_attached_item()\n\n self.allow_collisions_with_robot_hand(\"base_fixture_top\", \"b_bot\")\n self.despawn_object(\"shaft\", collisions_only=True)\n self.confirm_to_proceed('finetune')\n if not self.insert_shaft(\"assembled_part_07_inserted\", target=0.043):\n return False\n self.publish_part_in_assembled_position(\"shaft\", marker_only=True)\n self.allow_collisions_with_robot_hand(\"base_fixture_top\", \"b_bot\", False)\n\n self.ab_bot.go_to_named_pose(\"home\")\n ##############\n\n def mtc_pick_screw_tool(self, screw_type):\n rospy.loginfo(\"======== PICK TASK ========\")\n success = False\n if screw_type in ['m3', 'm4']:\n return self.do_plan_pick_action('screw_tool_' + screw_type, 'tools', 'screw_tool_m3_pickup_link', [-1.0, 0.0, 0.0], save_solution_to_file='pick_screw_tool')\n\n def mtc_suck_screw(self, screw_type):\n rospy.loginfo(\"======== FASTEN TASK ========\")\n success = False\n tool = 'screw_tool_' + screw_type\n screw_tool_tip_frame = tool + '/' + tool + '_tip'\n screw_pickup_pose = geometry_msgs.msg.PoseStamped()\n screw_pickup_pose.header.frame_id = screw_type + '_feeder_outlet_link'\n screw_pickup_pose.pose.position.x = -0.01\n screw_pickup_pose.pose.orientation = geometry_msgs.msg.Quaternion(*tf.transformations.quaternion_from_euler(2*pi/3, 0, 0))\n if screw_type in ['m3', 'm4']:\n return self.do_plan_fastening_action('screw_tool_' + screw_type, screw_pickup_pose, object_subframe_to_place=screw_tool_tip_frame, save_solution_to_file='pick_screw')\n\n def mtc_place_object_in_tray_center(self, object_name):\n rospy.loginfo(\"======== PLACE TASK ========\")\n target_pose = geometry_msgs.msg.PoseStamped()\n target_pose.header.frame_id = 'tray_center'\n target_pose.pose.position.x = -0.04\n target_pose.pose.position.y = 0.08\n target_pose.pose.orientation.w = 1\n self.do_plan_place_action(object_name, target_pose, save_solution_to_file='place_' + object_name)\n\n def mtc_pickplace_l_panel(self):\n rospy.loginfo(\"======== PICKPLACE TASK ========\")\n\n target_pose = geometry_msgs.msg.PoseStamped()\n target_pose.header.frame_id = 'base/screw_hole_panel2_1'\n target_pose.pose.orientation.w = 1\n\n self.do_plan_pickplace_action('panel_bearing', target_pose, object_subframe_to_place='panel_bearing/bottom_screw_hole_aligner_1',\n robot_names=['b_bot', 'a_bot'], force_robot_order=True, save_solution_to_file='pickplace')\n\n def mtc_pick_place_task(self):\n rospy.loginfo(\"======== PICK-PLACE TASK ========\")\n pose = geometry_msgs.msg.PoseStamped()\n pose.header.frame_id = 'move_group/base/screw_hole_panel2_1'\n pose.pose.orientation.w = 1\n return self.do_plan_pickplace_action('b_bot', 'panel_bearing', pose, save_solution_to_file='panel_bearing/bottom_screw_hole_aligner_1')\n\n def update_assembly_display(self, assembly_status=None):\n if assembly_status is None:\n assembly_status = self.assembly_status\n\n if assembly_status.completed_subtask_zero:\n self.publish_part_in_assembled_position(\"base\", marker_only=True)\n if assembly_status.completed_subtask_a:\n self.publish_part_in_assembled_position(\"motor\", marker_only=True)\n if assembly_status.completed_subtask_b:\n self.publish_part_in_assembled_position(\"motor_pulley\", marker_only=True)\n if assembly_status.completed_subtask_c1:\n self.publish_part_in_assembled_position(\"bearing\", marker_only=True)\n if assembly_status.completed_subtask_c2:\n self.publish_part_in_assembled_position(\"shaft\", marker_only=True)\n if assembly_status.completed_subtask_d:\n self.publish_part_in_assembled_position(\"output_pulley\")\n if assembly_status.completed_subtask_e:\n self.publish_part_in_assembled_position(\"idler_pulley\")\n if assembly_status.completed_subtask_f:\n self.publish_part_in_assembled_position(\"panel_motor\", marker_only=True)\n if assembly_status.completed_subtask_g:\n self.publish_part_in_assembled_position(\"panel_bearing\", marker_only=True)\n\n def assemble_drive_unit_orchestrated(self, tray_name=None, simultaneous_execution=True):\n if not self.assembly_status.tray_placed_on_table and tray_name:\n if not self.pick_tray_from_agv_stack_calibration_long_side(tray_name=tray_name):\n rospy.logerr(\"Fail to pick and place tray. Abort!\")\n return False\n self.assembly_status.tray_placed_on_table = True\n\n self.update_assembly_display()\n\n if not self.assembly_status.completed_subtask_f and not self.assembly_status.completed_subtask_g \\\n and not self.assembly_status.bearing_panel_placed_outside_of_tray and not self.assembly_status.motor_panel_placed_outside_of_tray:\n\n print(\"Starting simultaneous Plates!\")\n # L-plates and base plate\n success = self.panels_tasks_combined(simultaneous=simultaneous_execution,\n pick_and_orient_insert_bearing=True,\n pick_and_orient_insert_motor=True,\n do_base_plate_first=False)\n if success:\n self.assembly_status.completed_subtask_zero = True\n self.assembly_status.completed_subtask_f = True\n self.assembly_status.completed_subtask_g = True\n else:\n rospy.logfatal(\"Fail to assemble panels... call a reset!\")\n raise\n\n self.update_assembly_display()\n self.publish_part_in_assembled_position(\"bearing\", marker_only=True)\n\n if not self.assembly_status.completed_subtask_a and not self.assembly_status.completed_subtask_c1:\n if self.assembly_status.motor_inserted_in_panel:\n # Optimistic!\n self.fasten_motor(\"a_bot\", \"b_bot\", part1=True, part2=False)\n self.align_bearing_holes(task=\"assembly\")\n self.b_bot.go_to_named_pose(\"home\")\n def a_bot_task():\n self.fasten_motor(\"a_bot\", part1=False)\n self.unequip_tool(\"a_bot\")\n self.a_bot.go_to_named_pose(\"home\")\n def b_bot_task():\n self.fasten_bearing(\"assembly\", with_extra_retighten=False, robot_name=\"b_bot\")\n self.unequip_tool(\"b_bot\")\n self.b_bot.go_to_named_pose(\"home\")\n self.do_tasks_simultaneous(a_bot_task, b_bot_task)\n # Assume success!!\n self.assembly_status.completed_subtask_a = True\n self.assembly_status.completed_subtask_c1 = True\n\n self.update_assembly_display()\n \n self.ab_bot.go_to_named_pose(\"home\")\n\n return self.assemble_drive_unit(tray_name)\n\n def unload_assembled_unit(self, tray_name=None):\n self.publish_status_text(\"Unloading\")\n self.unequip_tool(\"a_bot\")\n self.unequip_tool(\"b_bot\")\n self.reset_scene_and_robots()\n self.ab_bot.go_to_named_pose(\"home\", speed=1.0)\n if not self.unload_drive_unit():\n rospy.logerr(\"Fail to unload drive unit. Abort!\")\n return False\n if tray_name:\n self.publish_status_text(\"Returning tray\")\n if not self.return_tray_to_agv_stack_calibration_long_side(tray_name):\n rospy.logerr(\"Fail to return tray. Abort!\")\n return False\n return True\n\n def assemble_drive_unit(self, tray_name=None):\n # ======= Tray ========\n if not self.assembly_status.tray_placed_on_table:\n rospy.loginfo(\"=== Pick tray from AGV: START ===\")\n if tray_name:\n if not self.pick_tray_from_agv_stack_calibration_long_side(tray_name=tray_name):\n rospy.logerr(\"Fail to pick and place tray. Abort!\")\n return False\n rospy.loginfo(\"=== Pick tray from AGV: FINISH ===\")\n\n self.update_assembly_display()\n\n # ======= Base Plate ========\n if not self.assembly_status.completed_subtask_zero:\n rospy.loginfo(\"=== subtask ZERO: START ===\")\n self.b_bot.go_to_named_pose(\"home\")\n self.publish_status_text(\"Target: base plate\")\n if not self.subtask_zero(skip_initial_perception=False):\n if not self.subtask_zero(skip_initial_perception=False): # Try again\n rospy.logfatal(\"Fail to do subtask zero! call a reset\")\n raise # Nothing to do other than reset\n self.a_bot.go_to_named_pose(\"home\")\n rospy.loginfo(\"=== subtask ZERO: FINISH ===\")\n\n self.update_assembly_display()\n\n # ======= L-Plates ========\n\n def do_panel(panel_name, placed_outside_of_tray, subtask_completed, start_with_fallback=False):\n if not subtask_completed:\n rospy.loginfo(\"=== subtask \" + panel_name + \": START ===\")\n if not placed_outside_of_tray:\n for i in range(3):\n success = self.pick_panel_with_handover(panel_name, simultaneous=True, rotate_on_failure=True)\n if success:\n break\n if success:\n self.center_panel(panel_name, store=True)\n placed_outside_of_tray = True\n else:\n rospy.logerr(\"Could not pick bearing panel!\")\n if placed_outside_of_tray:\n success = self.place_panel(\"a_bot\", panel_name, pick_again=True, fake_position=True)\n if start_with_fallback:\n self.center_panel_on_base_plate(panel_name)\n if success:\n self.hold_panel_for_fastening(panel_name)\n success = self.fasten_panel(panel_name, simultaneous=True, unequip_tool_on_success=True)\n subtask_completed = success\n rospy.loginfo(\"=== subtask \" + panel_name + \": Finish (%s) ===\" % success)\n return subtask_completed\n\n switch_panels_order = self.assembly_database.assembly_info.get(\"switched_motor_and_bearing\", False)\n if switch_panels_order:\n do_panel(\"panel_motor\", self.assembly_status.motor_panel_placed_outside_of_tray, self.assembly_status.completed_subtask_f)\n do_panel(\"panel_bearing\", self.assembly_status.bearing_panel_placed_outside_of_tray, self.assembly_status.completed_subtask_g)\n else:\n do_panel(\"panel_bearing\", self.assembly_status.bearing_panel_placed_outside_of_tray, self.assembly_status.completed_subtask_g, start_with_fallback=True)\n do_panel(\"panel_motor\", self.assembly_status.motor_panel_placed_outside_of_tray, self.assembly_status.completed_subtask_f)\n\n self.a_bot.go_to_named_pose(\"home\", speed=self.speed_fastest, acceleration=self.acc_fastest)\n self.do_change_tool_action(\"b_bot\", equip=False, screw_size=4)\n\n self.update_assembly_display()\n\n # ======= Bearing ========\n if not self.assembly_status.completed_subtask_c1:\n rospy.loginfo(\"=== subtask C1: START ===\")\n self.assembly_status.completed_subtask_c1 = self.subtask_c1() # bearing\n rospy.loginfo(\"=== subtask C1: Finish (%s) ===\" % self.assembly_status.completed_subtask_c1)\n\n self.update_assembly_display()\n\n # ======= Motor ========\n if not self.assembly_status.completed_subtask_a:\n rospy.loginfo(\"=== subtask A: START ===\")\n self.assembly_status.completed_subtask_a = self.subtask_a() # motor\n rospy.loginfo(\"=== subtask A: Finish (%s) ===\" % self.assembly_status.completed_subtask_a)\n self.do_change_tool_action(\"a_bot\", equip=False)\n\n # ======= Motor Pulley ========\n if self.assembly_status.completed_subtask_a and not self.assembly_status.completed_subtask_b:\n rospy.loginfo(\"=== subtask B: START ===\")\n self.assembly_status.completed_subtask_b = self.subtask_b(simultaneous_execution=True) # motor pulley\n rospy.loginfo(\"=== subtask B: Finish (%s) ===\" % self.assembly_status.completed_subtask_b)\n self.do_change_tool_action(\"a_bot\", equip=False)\n\n # ======= Shaft ========\n if self.assembly_status.completed_subtask_c1 and not self.assembly_status.completed_subtask_c2:\n rospy.loginfo(\"=== subtask C2: START ===\")\n self.assembly_status.completed_subtask_c2 = self.subtask_c2() # shaft\n rospy.loginfo(\"=== subtask C2: Finish (%s) ===\" % self.assembly_status.completed_subtask_c2)\n\n # ======= Output Pulley ========\n if self.assembly_status.completed_subtask_c2 and not self.assembly_status.completed_subtask_d:\n rospy.loginfo(\"=== subtask D: START ===\")\n self.assembly_status.completed_subtask_d = self.subtask_d() # bearing spacer/output pulley\n rospy.loginfo(\"=== subtask D: Finish (%s) ===\" % self.assembly_status.completed_subtask_d)\n\n # ======= Idler Pulley ========\n if not self.assembly_status.completed_subtask_e:\n rospy.loginfo(\"=== subtask E: START ===\")\n self.assembly_status.completed_subtask_e = self.subtask_e()\n rospy.loginfo(\"=== subtask E: Finish (%s) ===\" % self.assembly_status.completed_subtask_e)\n\n self.publish_part_in_assembled_position(\"idler_pulley\")\n self.unequip_tool(\"b_bot\", \"padless_tool_m4\")\n\n # ======= Belt ========\n if not self.assembly_status.completed_subtask_h:\n rospy.loginfo(\"=== subtask H: START ===\")\n self.assembly_status.completed_subtask_h = self.subtask_h()\n rospy.loginfo(\"=== subtask H: Finish (%s) ===\" % self.assembly_status.completed_subtask_h)\n\n self.publish_part_in_assembled_position(\"belt\")\n \n # ======= Motor Cables ========\n if not self.assembly_status.completed_subtask_i1 or not self.assembly_status.completed_subtask_i2:\n rospy.loginfo(\"=== subtask I: START ===\")\n success = self.subtask_i(simultaneous=True)\n self.assembly_status.completed_subtask_i1 = success\n self.assembly_status.completed_subtask_i2 = success\n rospy.loginfo(\"=== subtask I: Finish (%s) ===\" % success)\n\n self.unload_assembled_unit(tray_name)\n rospy.loginfo(\"==== Finished. ====\")\n\n def full_assembly_task(self, simultaneous_execution=True):\n self.ab_bot.go_to_named_pose(\"home\")\n self.reset_scene_and_robots()\n orders = []\n orders.append({\"tray_name\": \"tray1\", \"assembly_name\": \"wrs_assembly_2021\", \"status\": self.get_first_order_status()}) # Top tray\n orders.append({\"tray_name\": \"tray2\", \"assembly_name\": \"wrs_assembly_2021\", \"status\": self.get_second_order_status()}) # Bottom tray\n\n simultaneous = [True, True]\n unload_right_away = [False, False]\n\n if not orders[0][\"status\"].tray_placed_on_table:\n print(\"get from AGV\")\n\n def load_first_assembly():\n self.set_assembly(orders[0][\"assembly_name\"])\n # self.do_tasks_simultaneous(load_first_assembly, self.center_tray_stack, timeout=90)\n load_first_assembly()\n self.center_tray_stack()\n else:\n print(\"already on the table\")\n self.set_assembly(orders[0][\"assembly_name\"])\n stack_center = [-0.03, 0]\n tray_heights = [0.03, -0.02]\n self.trays = {\"tray%s\" % (i+1): (stack_center+[tray_height], True) for i, tray_height in enumerate(tray_heights)}\n self.trays_return = {\"tray%s\" % (i+1): (stack_center+[tray_height], True) for i, tray_height in enumerate(tray_heights[::-1])}\n\n for i, order in enumerate(orders):\n self.assembly_status = order[\"status\"]\n\n if self.assembly_status.tray_delivered_to_agv:\n rospy.loginfo(\"Order nr. \" + str(i) + \" already completed! Skipping.\")\n continue\n if not self.assembly_status.tray_delivered_to_agv and self.assembly_status.assembly_unloaded:\n if not self.return_tray_to_agv_stack_calibration_long_side(order[\"tray_name\"]):\n rospy.logerr(\"Fail to return tray. Abort, call reset!\")\n return False\n continue\n\n self.set_assembly(order[\"assembly_name\"])\n\n if i == 1 and not self.assembly_status.tray_placed_on_table:\n self.spawn_tray_stack(orientation_parallel=True, spawn_single_tray=True)\n if unload_right_away[i]: # Start by unloading the tray into the AGV\n self.unload_assembled_unit(order[\"tray_name\"])\n else: # Normal execution\n if simultaneous[i]:\n self.assemble_drive_unit_orchestrated(order[\"tray_name\"], simultaneous_execution)\n else:\n self.assemble_drive_unit(order[\"tray_name\"])\n rospy.loginfo(\"==== Finished both tasks ====\")\n\n self.publish_part_in_assembled_position(\"Finished!\")\n \n return\n\n def get_first_order_status(self):\n \"\"\" A convenience function to define the status of the first order (to be used after a reset in the competition)\n \"\"\"\n s = AssemblyStatus()\n s.tray_placed_on_table = False # Needs to be True when doing second set only!\n\n s.bearing_panel_placed_outside_of_tray = False\n s.motor_panel_placed_outside_of_tray = False\n\n s.belt_placed_outside_of_tray = False\n\n s.motor_picked = False\n s.motor_oriented = False\n s.motor_placed_outside_of_tray = False\n s.motor_inserted_in_panel = False\n\n s.bearing_placed_outside_of_tray = False\n s.bearing_picked = False\n s.bearing_oriented = False\n s.bearing_inserted_in_panel = False\n s.bearing_holes_aligned = False\n s.bearing_spacer_assembled = False\n\n s.idler_pulley_spacer_placed_outside_of_tray = False\n s.idler_pulley_placed_outside_of_tray = False\n\n s.completed_subtask_zero = False # Base\n s.completed_subtask_a = False # Motor\n s.completed_subtask_b = False # Motor pulley\n s.completed_subtask_c1 = False # Bearing\n s.completed_subtask_c2 = False # Shaft\n s.completed_subtask_d = False # Output pulley\n s.completed_subtask_e = False # Idler Pulley\n s.completed_subtask_f = False # Motor plate\n s.completed_subtask_g = False # Bearing plate\n s.completed_subtask_h = False # Belt\n s.completed_subtask_i1 = False # Cable 1\n s.completed_subtask_i2 = False # Cable 2\n\n s.assembly_unloaded = False\n\n s.tray_delivered_to_agv = False\n return s\n\n def get_second_order_status(self):\n \"\"\" A convenience function to define the status of the second order (to be used after a reset in the competition)\n \"\"\"\n s = AssemblyStatus()\n s.tray_placed_on_table = False\n\n s.bearing_panel_placed_outside_of_tray = False\n s.motor_panel_placed_outside_of_tray = False\n\n s.belt_placed_outside_of_tray = False\n\n s.motor_picked = False\n s.motor_oriented = False\n s.motor_placed_outside_of_tray = False\n s.motor_inserted_in_panel = False\n\n s.bearing_placed_outside_of_tray = False\n s.bearing_picked = False\n s.bearing_oriented = False\n s.bearing_inserted_in_panel = False\n s.bearing_holes_aligned = False\n s.bearing_spacer_assembled = False\n\n s.idler_pulley_spacer_placed_outside_of_tray = False\n s.idler_pulley_placed_outside_of_tray = False\n\n s.completed_subtask_zero = False # Base\n s.completed_subtask_a = False # Motor\n s.completed_subtask_b = False # Motor pulley\n s.completed_subtask_c1 = False # Bearing\n s.completed_subtask_c2 = False # Shaft\n s.completed_subtask_d = False # Fasten output pulley\n s.completed_subtask_e = False # Output pulley\n s.completed_subtask_f = False # Motor plate\n s.completed_subtask_g = False # Bearing plate\n s.completed_subtask_h = False # Belt\n s.completed_subtask_i1 = False # Cable 1\n s.completed_subtask_i2 = False # Cable 2\n\n s.assembly_unloaded = False\n\n s.tray_delivered_to_agv = False\n return s\n\n def assemble_drive_unit_simultaneous(self):\n # This is the v2. \"orchestrated\" is v3.\n if not self.assembly_status.tray_placed_on_table:\n self.center_tray_stack()\n self.pick_tray_from_agv_stack_calibration_long_side(\"tray1\")\n # TODO(cambel): add a loop for the second tray\n\n self.a_bot_success = False\n self.b_bot_success = False\n\n def b_bot_task():\n self.pick_and_store_motor()\n self.b_bot.go_to_named_pose(\"home\")\n\n def a_bot_task():\n # Look into the tray\n self.publish_status_text(\"Target: base plate\")\n while not self.assembly_status.completed_subtask_zero and not rospy.is_shutdown():\n self.assembly_status.completed_subtask_zero = self.subtask_zero() # Base plate\n\n self.do_tasks_simultaneous(a_bot_task, b_bot_task, timeout=60)\n\n self.vision.activate_camera(\"b_bot_outside_camera\")\n\n self.confirm_to_proceed(\"press enter to proceed to subtask_g\")\n if not self.assembly_status.completed_subtask_g:\n self.assembly_status.completed_subtask_g = self.subtask_g() # Bearing plate\n self.confirm_to_proceed(\"press enter to proceed to subtask_f\")\n if not self.assembly_status.completed_subtask_f:\n self.assembly_status.completed_subtask_f = self.subtask_f() # Motor plate\n\n self.a_bot.go_to_named_pose(\"home\", speed=self.speed_fastest, acceleration=self.acc_fastest)\n self.do_change_tool_action(\"b_bot\", equip=False, screw_size=4)\n\n if self.assembly_status.completed_subtask_g: # Bearing plate\n self.assembly_status.completed_subtask_c1 = self.subtask_c1() # bearing\n # if self.assembly_status.completed_subtask_c1:\n # self.assembly_status.completed_subtask_c2 = self.subtask_c2() # shaft\n # if self.assembly_status.completed_subtask_c2:\n # self.assembly_status.completed_subtask_e = self.subtask_e() # bearing spacer / output pulley\n\n self.ab_bot.go_to_named_pose(\"home\")\n self.unload_drive_unit()\n self.return_tray_to_agv_stack_calibration_long_side(\"tray1\")\n self.assembly_status = AssemblyStatus()\n rospy.loginfo(\"==== Finished.\")\n",
"#!/usr/bin/env python\n\n# Software License Agreement (BSD License)\n#\n# Copyright (c) 2021, OMRON SINIC X\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of OMRON SINIC X nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# Author: Cristian C. Beltran-Hernandez, Felix von Drigalski\n\nimport actionlib\nfrom actionlib_msgs.msg import GoalStatus\n\nimport copy\nimport rosbag\nimport rospkg\nimport rospy\nimport moveit_commander\nimport numpy as np\n\nimport geometry_msgs.msg\nimport moveit_msgs.msg\nimport moveit_msgs.srv\nfrom std_msgs.msg import Bool\n\nfrom o2ac_routines import helpers\nfrom ur_control import conversions, transformations\n\n\nclass RobotBase():\n \"\"\" Base methods for any robot arm controlled via MoveIt \"\"\"\n\n def __init__(self, group_name, tf_listener):\n self.robot_group = moveit_commander.MoveGroupCommander(group_name)\n self.listener = tf_listener\n\n self.sequence_move_group = actionlib.SimpleActionClient(\"/sequence_move_group\", moveit_msgs.msg.MoveGroupSequenceAction)\n\n self.run_mode_ = True # The modes limit the maximum speed of motions. Used with the safety system @WRS2020\n self.pause_mode_ = False\n self.test_mode_ = False\n\n self.sub_run_mode_ = rospy.Subscriber(\"/run_mode\", Bool, self.run_mode_callback)\n self.sub_pause_mode_ = rospy.Subscriber(\"/pause_mode\", Bool, self.pause_mode_callback)\n self.sub_test_mode_ = rospy.Subscriber(\"/test_mode\", Bool, self.test_mode_callback)\n\n rospy.wait_for_service('compute_ik')\n rospy.wait_for_service('compute_fk')\n\n self.moveit_ik_srv = rospy.ServiceProxy('/compute_ik', moveit_msgs.srv.GetPositionIK)\n self.moveit_fk_srv = rospy.ServiceProxy('/compute_fk', moveit_msgs.srv.GetPositionFK)\n\n def run_mode_callback(self, msg):\n self.run_mode_ = msg.data\n\n def pause_mode_callback(self, msg):\n self.pause_mode_ = msg.data\n\n def test_mode_callback(self, msg):\n self.test_mode_ = msg.data\n\n def compute_fk(self, robot_state=None, tcp_link=None, frame_id=None):\n \"\"\"\n Compute the Forward kinematics for a move group using the MoveIt service\n robot_state: list, tuple, or moveit_msgs.msg.RobotState\n if passed as `list` or `tuple`: assumes that the joint values are in the same order as defined for that group\n \"\"\"\n if robot_state:\n if isinstance(robot_state, moveit_msgs.msg.RobotState):\n robot_state_ = robot_state\n elif isinstance(robot_state, (list, tuple, np.ndarray)):\n robot_state_ = moveit_msgs.msg.RobotState()\n robot_state_.joint_state.name = self.robot_group.get_active_joints()\n robot_state_.joint_state.position = list(robot_state)\n else:\n rospy.logerr(\"Unsupported type of robot_state %s\" % type(robot_state))\n raise\n else:\n return self.compute_fk(robot_state=self.robot_group.get_current_joint_values())\n req = moveit_msgs.srv.GetPositionFKRequest()\n req.fk_link_names = [tcp_link if tcp_link else self.robot_group.get_end_effector_link()]\n req.robot_state = robot_state_\n res = self.moveit_fk_srv.call(req)\n if res.error_code.val != moveit_msgs.msg.MoveItErrorCodes.SUCCESS:\n rospy.logwarn(\"compute FK failed with code: %s\" % res.error_code.val)\n return False\n else:\n if frame_id:\n return self.listener.transformPose(frame_id, res.pose_stamped[0])\n return res.pose_stamped[0]\n\n def compute_ik(self, target_pose, joints_seed=None, timeout=0.01, end_effector_link=\"\", retry=False, allow_collisions=False):\n \"\"\"\n Compute the Inverse Kinematics for a move group the MoveIt service\n target_pose: PoseStamped\n joints_seed: list, must be in the same order as defined for that group\n timeout: float, overrides the timeout for the IK solver (higher is sometimes better)\n retry: bool, for 10 secs send the same request until success or timeout\n allow_collisions: bool, compute IK with or without considering collisions with other objects (Likely self-collisions are always considered)\n return\n solution: `list`: the joint values are in the same order as defined for that group\n \"\"\"\n if isinstance(target_pose, geometry_msgs.msg.PoseStamped):\n ik_request = moveit_msgs.msg.PositionIKRequest()\n ik_request.avoid_collisions = not allow_collisions\n ik_request.timeout = rospy.Duration(timeout)\n ik_request.pose_stamped = target_pose\n ik_request.group_name = self.robot_group.get_name()\n ik_request.ik_link_name = end_effector_link\n ik_request.robot_state.joint_state.name = self.robot_group.get_active_joints()\n ik_request.robot_state.joint_state.position = joints_seed if joints_seed is not None else self.robot_group.get_current_joint_values()\n else:\n rospy.logerr(\"Unsupported type of target_pose %s\" % type(target_pose))\n raise\n\n req = moveit_msgs.srv.GetPositionIKRequest()\n req.ik_request = ik_request\n res = self.moveit_ik_srv.call(req)\n\n if retry:\n start_time = rospy.get_time()\n while res.error_code.val != moveit_msgs.msg.MoveItErrorCodes.SUCCESS \\\n and not rospy.is_shutdown() and (rospy.get_time() - start_time < 10):\n res = self.moveit_ik_srv.call(req)\n\n if res.error_code.val != moveit_msgs.msg.MoveItErrorCodes.SUCCESS:\n rospy.logwarn(\"compute IK failed with code: %s\" % res.error_code.val)\n return None\n\n solution = []\n for joint_name in self.robot_group.get_active_joints():\n solution.append(res.solution.joint_state.position[res.solution.joint_state.name.index(joint_name)])\n return solution\n\n def set_up_move_group(self, speed, acceleration, planner=\"OMPL\"):\n \"\"\" Set move group interface's planner, speed scaling, and acceleration scaling \"\"\"\n assert not rospy.is_shutdown()\n (speed_, accel_) = self.limit_speed_and_acc(speed, acceleration)\n group = self.robot_group\n rospy.logdebug(\"Setting velocity scaling to \" + str(speed_))\n rospy.logdebug(\"Setting acceleration scaling to \" + str(accel_))\n group.set_max_velocity_scaling_factor(speed_)\n group.set_max_acceleration_scaling_factor(accel_)\n self.set_planner(planner)\n return speed_, accel_\n\n def set_planner(self, planner=\"OMPL\"):\n group = self.robot_group\n if planner == \"OMPL\":\n group.set_planning_pipeline_id(\"ompl\")\n group.set_planner_id(\"RRTConnect\")\n group.set_goal_joint_tolerance(1e-3)\n elif planner == \"LINEAR\":\n group.set_planning_pipeline_id(\"pilz_industrial_motion_planner\")\n group.set_planner_id(\"LIN\")\n elif planner == \"PTP\":\n group.set_planning_pipeline_id(\"pilz_industrial_motion_planner\")\n group.set_planner_id(\"PTP\")\n elif planner == \"CIRC\":\n group.set_planning_pipeline_id(\"pilz_industrial_motion_planner\")\n group.set_planner_id(\"CIRC\")\n else:\n raise ValueError(\"Unsupported planner: %s\" % planner)\n\n def limit_speed_and_acc(self, speed, acceleration):\n if self.pause_mode_ or self.test_mode_:\n if speed > self.reduced_mode_speed_limit:\n rospy.loginfo(\"Reducing speed from \" + str(speed) + \" to \" + str(self.reduced_mode_speed_limit) + \" because robot is in test or pause mode\")\n speed = self.reduced_mode_speed_limit\n sp = copy.copy(speed)\n acc = copy.copy(acceleration)\n if sp > 1.0:\n sp = 1.0\n if acc is None:\n rospy.logdebug(\"Setting acceleration to \" + str(sp) + \" by default.\")\n acc = sp/2.0\n else:\n if acc > sp:\n rospy.logdebug(\"Setting acceleration to \" + str(sp) + \" instead of \" + str(acceleration) + \" to avoid jerky motion.\")\n acc = sp\n return (sp, acc)\n\n def check_goal_pose_reached(self, goal_pose):\n current_pose = self.robot_group.get_current_pose()\n if current_pose.header.frame_id != goal_pose.header.frame_id:\n gp = self.listener.transformPose(current_pose.header.frame_id, goal_pose)\n else:\n gp = goal_pose\n return helpers.all_close(gp.pose, current_pose.pose, 0.01)\n\n def joint_configuration_changes(self, start, end, tolerance=0.1):\n \"\"\" Returns True if the sign of any joint angle changes during the motion,\n and the joint angle is not near 0 (0.01 rad =~ 0.5 deg tolerance).\n \"\"\"\n signs = np.sign(np.array(start)*np.array(end))\n\n if np.all(signs > 0):\n return False # = all OK\n\n joint_changes_small = True\n for i in range(len(signs)):\n\n if signs[i] < 0:\n if abs(start[i] < tolerance) or abs(end[i] < tolerance):\n rospy.logdebug(\"Joint changes sign, but the change is small. Ignoring.\")\n rospy.logdebug(\"start[i] = %d6, end[i] = %d6\", (start[i], end[i]))\n continue\n rospy.logerr(\"Joint angle \" + str(i) + \" would change sign!\")\n print(\"start[i] = %d6, end[i] = %d6\", (start[i], end[i]))\n joint_changes_small = False\n if joint_changes_small:\n return False # = all OK\n else:\n return True # Joints change\n\n def get_current_pose_stamped(self):\n return self.robot_group.get_current_pose()\n\n def get_current_pose(self):\n return self.robot_group.get_current_pose().pose\n\n def get_named_pose_target(self, name):\n return helpers.ordered_joint_values_from_dict(self.robot_group.get_named_target_values(name), self.robot_group.get_active_joints())\n\n def save_plan(self, filename, plan):\n \"\"\" Store a given plan to a file \"\"\"\n rp = rospkg.RosPack()\n bagfile = rp.get_path(\"o2ac_routines\") + \"/config/saved_plans/\" + filename\n with rosbag.Bag(bagfile, 'w') as bag:\n bag.write(topic=\"saved_plan\", msg=plan)\n\n def load_saved_plan(self, filename):\n \"\"\" Loads a given plan from a file \"\"\"\n rp = rospkg.RosPack()\n bagfile = rp.get_path(\"o2ac_routines\") + \"/config/saved_plans/\" + filename\n with rosbag.Bag(bagfile, 'r') as bag:\n for (topic, plan, ts) in bag.read_messages():\n return plan\n\n def execute_saved_plan(self, filename=\"\", plan=[], wait=True):\n if filename and not plan:\n plan = self.load_saved_plan(filename)\n return self.execute_plan(plan, wait)\n\n # ------ Robot motion functions\n\n def execute_plan(self, plan, wait=True):\n self.robot_group.execute(plan, wait=wait)\n self.robot_group.clear_pose_targets()\n if wait:\n current_joints = self.robot_group.get_current_joint_values()\n goal_joints = helpers.get_trajectory_joint_goal(plan, self.robot_group.get_active_joints())\n return helpers.all_close(goal_joints, current_joints, 0.01)\n return True\n\n def go_to_pose_goal(self, pose_goal_stamped, speed=0.5, acceleration=None,\n end_effector_link=\"\", move_lin=False, wait=True, plan_only=False, initial_joints=None,\n allow_joint_configuration_flip=False, move_ptp=True, timeout=5, retry_non_linear=False,\n retime=False):\n \"\"\" Move robot to a given PoseStamped goal \n pose_goal_stamped: PoseStamped\n plan_only: bool, if true, return only plan and planning time\n initial_joints: list, initial joint configuration for planning\n allow_joint_configuration: bool\n move_lin: bool, if true, force used of Pilz linear planner\n move_ptp: bool, if true, plan first using Pilz PTP planner, in case of failure, retry with OMPL\n retry_non_linear: bool, if true, move_lin true and the planner fails to plan, replan using OMPL\n retime: bool, if true, retime plan using `time_optimal_trajectory_generation`\n \"\"\"\n move_ptp = False if move_lin else move_ptp # Override if move_lin is set (Linear takes priority since PTP is the default value)\n\n planner = \"LINEAR\" if move_lin else (\"PTP\" if move_ptp else \"OMPL\")\n speed_, accel_ = self.set_up_move_group(speed, acceleration, planner)\n\n group = self.robot_group\n group.clear_pose_targets()\n\n if not end_effector_link:\n end_effector_link = self.ns + \"_gripper_tip_link\"\n group.set_end_effector_link(end_effector_link)\n\n if move_lin: # is this necessary??\n pose_goal_ = self.listener.transformPose(\"world\", pose_goal_stamped)\n else:\n pose_goal_ = pose_goal_stamped\n success = False\n start_time = rospy.Time.now()\n tries = 0\n robots_in_simultaneous = rospy.get_param(\"/o2ac/simultaneous\", False)\n timeout = 15.0 if robots_in_simultaneous else timeout\n while not success and (rospy.Time.now() - start_time < rospy.Duration(timeout)) and not rospy.is_shutdown():\n if initial_joints:\n group.set_start_state(helpers.to_robot_state(group, initial_joints))\n else:\n group.set_start_state_to_current_state()\n\n group.set_pose_target(pose_goal_)\n success, plan, planning_time, error = group.plan()\n\n if success:\n if self.joint_configuration_changes(plan.joint_trajectory.points[0].positions,\n plan.joint_trajectory.points[-1].positions) \\\n and not allow_joint_configuration_flip:\n success = False\n rospy.logwarn(\"Joint configuration would have flipped.\")\n continue\n if success:\n if planner != \"LINEAR\" or retime:\n # retime\n plan = self.robot_group.retime_trajectory(self.robot_group.get_current_state(), plan, algorithm=\"time_optimal_trajectory_generation\",\n velocity_scaling_factor=speed_, acceleration_scaling_factor=accel_)\n if plan_only:\n group.set_start_state_to_current_state()\n group.clear_pose_targets()\n return plan, planning_time\n else:\n success = self.execute_plan(plan, wait=wait)\n else:\n if move_ptp: # Just one try is enough for PTP, give up and try OMPL\n self.set_up_move_group(speed, acceleration, \"OMPL\")\n if robots_in_simultaneous:\n rospy.sleep(1.0) # give time to other robot to get out of the way\n elif not move_ptp:\n rospy.sleep(0.2)\n rospy.logwarn(\"go_to_pose_goal(move_lin=%s) attempt failed. Retrying.\" % str(move_lin))\n tries += 1\n\n if not success:\n rospy.logerr(\"go_to_pose_goal failed \" + str(tries) + \" times! Broke out, published failed pose. simultaneous=\" + str(robots_in_simultaneous))\n helpers.publish_marker(pose_goal_stamped, \"pose\", self.ns + \"_move_lin_failed_pose_\" + str(self.marker_counter))\n self.marker_counter += 1\n else:\n helpers.publish_marker(pose_goal_stamped, \"pose\", self.ns + \"_go_to_pose_goal_failed_pose_\" + str(self.marker_counter), marker_topic=\"o2ac_success_markers\")\n self.marker_counter += 1\n\n group.clear_pose_targets()\n if not success and move_lin and retry_non_linear:\n return self.go_to_pose_goal(pose_goal_stamped, speed/2, acceleration, end_effector_link, move_lin=False, plan_only=plan_only, initial_joints=initial_joints,\n allow_joint_configuration_flip=allow_joint_configuration_flip, move_ptp=True, timeout=timeout, retry_non_linear=False)\n return success\n\n def move_lin_trajectory(self, trajectory, speed=1.0, acceleration=None, end_effector_link=\"\",\n plan_only=False, initial_joints=None, allow_joint_configuration_flip=False, timeout=10):\n \"\"\" From multiple waypoints, compute a linear trajectory using Pilz Linear planner\"\"\"\n\n # TODO: Add allow_joint_configuration_flip\n if not self.set_up_move_group(speed, acceleration, planner=\"LINEAR\"):\n return False\n\n if not end_effector_link:\n end_effector_link = self.ns + \"_gripper_tip_link\"\n\n group = self.robot_group\n\n group.set_end_effector_link(end_effector_link)\n if len(trajectory[0]) == 2: # Speed per point was not defined\n waypoints = [(self.listener.transformPose(\"world\", ps), blend_radius, speed) for ps, blend_radius in trajectory]\n elif len(trajectory[0]) == 3:\n waypoints = [(self.listener.transformPose(\"world\", ps), blend_radius, speed) for ps, blend_radius, speed in trajectory]\n\n motion_plan_requests = []\n\n # Start from current pose\n if initial_joints:\n initial_pose = self.compute_fk(initial_joints, end_effector_link)\n group.set_pose_target(initial_pose)\n else:\n group.set_pose_target(group.get_current_pose(end_effector_link))\n msi = moveit_msgs.msg.MotionSequenceItem()\n msi.req = group.construct_motion_plan_request()\n msi.blend_radius = 0.0\n\n if initial_joints:\n msi.req.start_state = helpers.to_robot_state(self.robot_group, initial_joints)\n else:\n msi.req.start_state = helpers.to_robot_state(self.robot_group, self.robot_group.get_current_joint_values())\n\n motion_plan_requests.append(msi)\n\n for wp, blend_radius, spd in waypoints:\n self.set_up_move_group(spd, spd/2.0, planner=\"LINEAR\")\n group.clear_pose_targets()\n group.set_pose_target(wp)\n msi = moveit_msgs.msg.MotionSequenceItem()\n msi.req = group.construct_motion_plan_request()\n msi.req.start_state = moveit_msgs.msg.RobotState()\n msi.blend_radius = blend_radius\n motion_plan_requests.append(msi)\n\n # Force last point to be 0.0 to avoid raising an error in the planner\n motion_plan_requests[-1].blend_radius = 0.0\n\n # Make MotionSequence\n goal = moveit_msgs.msg.MoveGroupSequenceGoal()\n goal.request = moveit_msgs.msg.MotionSequenceRequest()\n goal.request.items = motion_plan_requests\n # Plan only always for compatibility with simultaneous motions\n goal.planning_options.plan_only = True\n\n start_time = rospy.Time.now()\n success = False\n robots_in_simultaneous = rospy.get_param(\"/o2ac/simultaneous\", False)\n timeout = 15.0 if robots_in_simultaneous else timeout\n while not success and (rospy.Time.now() - start_time < rospy.Duration(timeout)) and not rospy.is_shutdown():\n\n self.sequence_move_group.send_goal_and_wait(goal)\n response = self.sequence_move_group.get_result()\n\n group.clear_pose_targets()\n\n if response.response.error_code.val == 1:\n plan = response.response.planned_trajectories[0] # support only one plan?\n planning_time = response.response.planning_time\n if plan_only:\n return plan, planning_time\n else:\n return self.execute_plan(plan, wait=True)\n else:\n if robots_in_simultaneous:\n rospy.sleep(1.0) # give time to other robot to get out of the way\n else:\n rospy.sleep(0.2)\n rospy.logerr(\"Failed to plan linear trajectory. error code: %s\" % response.response.error_code.val)\n return False\n\n def move_lin(self, pose_goal_stamped, speed=0.5, acceleration=None, end_effector_link=\"\", wait=True,\n plan_only=False, initial_joints=None, allow_joint_configuration_flip=False):\n \"\"\" Wrapper for compatibility with old API \"\"\"\n return self.go_to_pose_goal(pose_goal_stamped, speed, acceleration, end_effector_link, move_lin=True,\n wait=wait, plan_only=plan_only, initial_joints=initial_joints,\n allow_joint_configuration_flip=allow_joint_configuration_flip)\n\n def move_lin_rel(self, relative_translation=[0, 0, 0], relative_rotation=[0, 0, 0], speed=.5,\n acceleration=None, relative_to_robot_base=False, relative_to_tcp=False,\n wait=True, end_effector_link=\"\", plan_only=False, initial_joints=None,\n allow_joint_configuration_flip=False, pose_only=False, timeout=5.0, retime=False):\n '''\n Does a move_lin relative to the current position of the robot.\n\n relative_translation: translation relative to current tcp position, expressed in world frame\n relative_rotation: rotation relative to current tcp position, expressed in world frame\n\n If any of the following flags is active, the relative motion is not expressed in the world frame any more\n relative_to_robot_base: If true, uses the robot_base coordinates for the relative motion (not workspace_center!)\n relative_to_tcp: If true, uses the robot's end effector link coordinates for the relative motion\n '''\n if not end_effector_link:\n end_effector_link = self.ns + \"_gripper_tip_link\"\n\n group = self.robot_group\n group.set_end_effector_link(end_effector_link)\n\n if initial_joints:\n w2b = self.listener.lookupTransform(\"world\", self.ns + \"_base_link\", rospy.Time.now()) # static transform\n t_w2b = transformations.pose_to_transform(list(w2b[0]) + list(w2b[1])) # transform robot's base to world frame\n b2tcp = self.compute_fk(initial_joints, tcp_link=end_effector_link, frame_id=self.ns + \"_base_link\") # forward kinematics\n t_b2tcp = conversions.from_pose(b2tcp.pose) # transform tcp to robot's base\n if relative_to_tcp:\n new_pose = conversions.to_pose_stamped(end_effector_link, [0, 0, 0, 0, 0, 0.])\n elif relative_to_robot_base:\n new_pose = self.compute_fk(initial_joints, tcp_link=end_effector_link, frame_id=self.ns + \"_base_link\")\n else:\n t_w2tcp = transformations.concatenate_matrices(t_w2b, t_b2tcp)\n new_pose = conversions.to_pose_stamped(\"world\", transformations.pose_quaternion_from_matrix(t_w2tcp))\n else:\n new_pose = group.get_current_pose()\n\n if relative_to_robot_base:\n new_pose = self.listener.transformPose(self.ns + \"_base_link\", new_pose)\n elif relative_to_tcp:\n new_pose.header.stamp = rospy.Time.now()\n # Workaround for TF lookup into the future error\n tries = 0\n while tries < 10:\n try:\n self.listener.waitForTransform(self.ns + \"_gripper_tip_link\", new_pose.header.frame_id, new_pose.header.stamp, rospy.Duration(1))\n new_pose = self.listener.transformPose(self.ns + \"_gripper_tip_link\", new_pose)\n break\n except:\n tries += 1\n\n new_position = conversions.from_point(new_pose.pose.position) + relative_translation\n new_pose.pose.position = conversions.to_point(new_position)\n new_pose.pose.orientation = helpers.rotateQuaternionByRPYInUnrotatedFrame(relative_rotation[0], relative_rotation[1],\n relative_rotation[2], new_pose.pose.orientation)\n\n if initial_joints:\n newpose = conversions.from_pose_to_list(new_pose.pose) # new relative transformation\n t_newpose = transformations.pose_to_transform(newpose)\n if relative_to_tcp:\n # manually compute the transform from TCP to world since we are doing offline planning\n t_w2tcp = transformations.concatenate_matrices(t_w2b, t_b2tcp, t_newpose)\n new_pose = conversions.to_pose_stamped(\"world\", transformations.pose_quaternion_from_matrix(t_w2tcp))\n if relative_to_robot_base:\n # manually compute the transform from base to world since we are doing offline planning\n t_w2tcp = transformations.concatenate_matrices(t_w2b, t_newpose)\n new_pose = conversions.to_pose_stamped(\"world\", transformations.pose_quaternion_from_matrix(t_w2tcp))\n\n if pose_only:\n return new_pose\n else:\n return self.go_to_pose_goal(new_pose, speed=speed, acceleration=acceleration,\n end_effector_link=end_effector_link, wait=wait,\n move_lin=True, plan_only=plan_only, initial_joints=initial_joints,\n allow_joint_configuration_flip=allow_joint_configuration_flip,\n retry_non_linear=False, timeout=timeout, retime=retime)\n\n def go_to_named_pose(self, pose_name, speed=0.5, acceleration=None, wait=True, plan_only=False, initial_joints=None, move_ptp=True):\n \"\"\"\n pose_name should be a named pose in the moveit_config, such as \"home\", \"back\" etc.\n \"\"\"\n speed_, accel_ = self.set_up_move_group(speed, acceleration, planner=(\"PTP\" if move_ptp else \"OMPL\"))\n group = self.robot_group\n\n group.set_named_target(pose_name)\n\n start_time = rospy.Time.now()\n robots_in_simultaneous = rospy.get_param(\"/o2ac/simultaneous\", False)\n timeout = 15.0 if robots_in_simultaneous else 5.0\n success = False\n while not success and (rospy.Time.now() - start_time < rospy.Duration(timeout)) and not rospy.is_shutdown():\n if initial_joints:\n group.set_start_state(helpers.to_robot_state(group, initial_joints))\n else:\n group.set_start_state_to_current_state()\n success, plan, planning_time, error = group.plan()\n if success:\n # retime\n plan = self.robot_group.retime_trajectory(self.robot_group.get_current_state(), plan, algorithm=\"time_optimal_trajectory_generation\",\n velocity_scaling_factor=speed_, acceleration_scaling_factor=accel_)\n group.clear_pose_targets()\n group.set_start_state_to_current_state()\n if plan_only:\n return plan, planning_time\n else:\n success = self.execute_plan(plan, wait=wait)\n else:\n if move_ptp:\n rospy.logerr(\"NamedPose: Failed planning with PTP, retry with OMPL\")\n self.set_up_move_group(speed, acceleration, \"OMPL\")\n rospy.logerr(\"Failed planning with error: %s\" % error)\n if robots_in_simultaneous:\n rospy.sleep(1.0)\n else:\n rospy.sleep(0.2)\n return success\n\n def move_joints(self, joint_pose_goal, speed=0.6, acceleration=None, wait=True, plan_only=False, initial_joints=None, move_ptp=True):\n \"\"\" Wrapper for MoveIt joint target commands \"\"\"\n speed_, accel_ = self.set_up_move_group(speed, acceleration, planner=(\"PTP\" if move_ptp else \"OMPL\"))\n group = self.robot_group\n\n group.set_joint_value_target(joint_pose_goal)\n\n start_time = rospy.Time.now()\n robots_in_simultaneous = rospy.get_param(\"/o2ac/simultaneous\", False)\n timeout = 15.0 if robots_in_simultaneous else 5.0\n success = False\n while not success and (rospy.Time.now() - start_time < rospy.Duration(timeout)) and not rospy.is_shutdown():\n if initial_joints:\n group.set_start_state(helpers.to_robot_state(group, initial_joints))\n else:\n group.set_start_state_to_current_state()\n success, plan, planning_time, error = group.plan()\n if success:\n # retime\n plan = self.robot_group.retime_trajectory(self.robot_group.get_current_state(), plan, algorithm=\"time_optimal_trajectory_generation\",\n velocity_scaling_factor=speed_, acceleration_scaling_factor=accel_)\n group.set_start_state_to_current_state()\n if plan_only:\n return plan, planning_time\n else:\n return self.execute_plan(plan, wait=wait)\n else:\n if move_ptp:\n rospy.logerr(\"MoveJoints: Failed planning with PTP, retry with OMPL\")\n self.set_up_move_group(speed, acceleration, \"OMPL\")\n rospy.logerr(\"Failed planning with error: %s\" % error)\n if robots_in_simultaneous:\n rospy.sleep(1.0)\n else:\n rospy.sleep(0.2)\n\n return False\n\n def move_joints_trajectory(self, trajectory, speed=1.0, acceleration=None, plan_only=False, initial_joints=None, end_effector_link=\"\", planner=\"PTP\", timeout=5.0):\n \"\"\" From multiple waypoints, compute a joint trajectory using PTP or OMPL\"\"\"\n speed_, accel_ = self.set_up_move_group(speed, acceleration, planner=planner)\n\n group = self.robot_group\n\n try:\n if not end_effector_link:\n end_effector_link = self.ns + \"_gripper_tip_link\"\n group.set_end_effector_link(end_effector_link)\n except:\n pass\n\n waypoints = []\n for point, blend_radius, speed in trajectory:\n if isinstance(point, str):\n joint_values = helpers.ordered_joint_values_from_dict(group.get_named_target_values(point), group.get_active_joints())\n elif isinstance(point, tuple) or isinstance(point, list) or isinstance(point, geometry_msgs.msg.PoseStamped):\n joint_values = point\n else:\n rospy.logerr(\"Joint trajectory with invalid point: type=%s\" % type(point))\n return False\n waypoints.append((joint_values, blend_radius, speed))\n\n group.set_joint_value_target(initial_joints if initial_joints else group.get_current_joint_values())\n # Start from current pose\n msi = moveit_msgs.msg.MotionSequenceItem()\n msi.req = group.construct_motion_plan_request()\n msi.blend_radius = 0.0\n msi.req.start_state = helpers.to_robot_state(group, initial_joints if initial_joints else group.get_current_joint_values())\n\n motion_plan_requests = []\n motion_plan_requests.append(msi)\n\n for wp, blend_radius, spd in waypoints:\n self.set_up_move_group(spd, spd/2.0, planner=planner)\n group.clear_pose_targets()\n try:\n group.set_joint_value_target(wp)\n except Exception as e:\n rospy.logerr(\"Can set joint traj point: %s. Abort\" % e)\n break\n msi = moveit_msgs.msg.MotionSequenceItem()\n msi.req = group.construct_motion_plan_request()\n msi.req.start_state = moveit_msgs.msg.RobotState()\n msi.blend_radius = blend_radius\n motion_plan_requests.append(msi)\n\n # Force last point to be 0.0 to avoid raising an error in the planner\n motion_plan_requests[-1].blend_radius = 0.0\n\n # Make MotionSequence\n goal = moveit_msgs.msg.MoveGroupSequenceGoal()\n goal.request = moveit_msgs.msg.MotionSequenceRequest()\n goal.request.items = motion_plan_requests\n # Plan only always for compatibility with simultaneous motions\n goal.planning_options.plan_only = True\n\n start_time = rospy.Time.now()\n success = False\n robots_in_simultaneous = rospy.get_param(\"/o2ac/simultaneous\", False)\n timeout = 15.0 if robots_in_simultaneous else timeout\n while not success and (rospy.Time.now() - start_time < rospy.Duration(timeout)) and not rospy.is_shutdown():\n self.sequence_move_group.send_goal_and_wait(goal)\n response = self.sequence_move_group.get_result()\n\n group.clear_pose_targets()\n\n if response.response.error_code.val == 1: # Success\n plan = response.response.planned_trajectories[0] # support only one plan?\n # retime\n plan = self.robot_group.retime_trajectory(self.robot_group.get_current_state(), plan, algorithm=\"time_optimal_trajectory_generation\",\n velocity_scaling_factor=speed_, acceleration_scaling_factor=accel_)\n planning_time = response.response.planning_time\n if plan_only:\n return plan, planning_time\n else:\n return self.execute_plan(plan, wait=True)\n else:\n rospy.logerr(\"Failed to plan joint trajectory. error code: %s\" % response.response.error_code.val)\n if robots_in_simultaneous:\n rospy.sleep(1.0)\n else:\n rospy.sleep(0.2)\n # Update the joint state\n goal.request.items[0].req.start_state = helpers.to_robot_state(group, initial_joints if initial_joints else group.get_current_joint_values())\n return False\n\n def move_circ(self, pose_goal_stamped, constraint_point, constraint_type=\"center\", speed=0.5, acceleration=None, wait=True, end_effector_link=\"\",\n plan_only=False, initial_joints=None, timeout=5.0):\n if not self.set_up_move_group(speed, acceleration, \"CIRC\"):\n return False\n\n group = self.robot_group\n group.clear_pose_targets()\n\n if not end_effector_link:\n end_effector_link = self.ns + \"_gripper_tip_link\"\n group.set_end_effector_link(end_effector_link)\n\n if initial_joints:\n group.set_start_state(helpers.to_robot_state(group, initial_joints))\n\n pose_goal_world = self.listener.transformPose(\"world\", pose_goal_stamped)\n group.set_pose_target(pose_goal_world)\n\n constraint = moveit_msgs.msg.Constraints()\n if constraint_type not in (\"center\", \"interim\"):\n rospy.logerr(\"Invalid parameter: %s\" % constraint_type)\n return False\n constraint.name = constraint_type\n pc = moveit_msgs.msg.PositionConstraint()\n if constraint_type == \"center\":\n constraint_pose = conversions.from_pose_to_list(self.get_current_pose())[:3] - constraint_point\n constraint_pose = conversions.to_pose(constraint_pose.tolist()+[0, 0, 0])\n else:\n constraint_pose = conversions.to_pose(constraint_point+[0, 0, 0]) # Pose\n pc.constraint_region.primitive_poses = [constraint_pose]\n constraint.position_constraints = [pc]\n group.set_path_constraints(constraint)\n\n success = False\n start_time = rospy.Time.now()\n while not success and (rospy.Time.now() - start_time < rospy.Duration(timeout)) and not rospy.is_shutdown():\n success, plan, planning_time, error = group.plan()\n\n if success:\n if plan_only:\n group.clear_pose_targets()\n group.set_start_state_to_current_state()\n return plan, planning_time\n else:\n self.execute_plan(plan, wait=wait)\n else:\n rospy.sleep(0.2)\n rospy.logwarn(\"go_to_pose_goal attempt failed. Retrying.\")\n\n group.clear_pose_targets()\n return success\n"
] |
[
[
"numpy.deg2rad"
],
[
"numpy.all",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lucmon/TopologyDetect
|
[
"79607f3ce39a1ee6ded41b2500629065cf1cfe51",
"79607f3ce39a1ee6ded41b2500629065cf1cfe51"
] |
[
"e2efold_rt/experiment_rnastralign/e2e_learning_stage1_rnastralign_all_long.py",
"e2efold_rt/experiment_archiveii/e2e_learning_stage1.py"
] |
[
"import os\nfrom e2efold.common.config import process_config\nfrom e2efold.common.utils import get_args\nargs = get_args()\n\nconfig_file = args.config\n\nconfig = process_config(config_file)\nprint(\"#####Stage 1#####\")\nprint('Here is the configuration of this run: ')\nprint(config)\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= config.gpu\n\nimport torch.optim as optim\nfrom torch.utils import data\n\nfrom e2efold.models import ContactNetwork, ContactNetwork_test, ContactNetwork_fc\nfrom e2efold.models import ContactAttention, ContactAttention_simple_fix_PE\nfrom e2efold.models import ContactAttention_simple\nfrom e2efold.common.utils import *\nfrom e2efold.common.long_seq_pre_post_process import *\nfrom e2efold.postprocess import postprocess\n\n\nd = config.u_net_d\nBATCH_SIZE = config.batch_size_stage_1\nOUT_STEP = config.OUT_STEP\nLOAD_MODEL = config.LOAD_MODEL\npp_steps = config.pp_steps\ndata_type = config.data_type\nmodel_type = config.model_type\nmodel_path = '../models_ckpt/supervised_{}_{}_d{}_l3.pt'.format(model_type, data_type,d)\nepoches_first = config.epoches_first\nevaluate_epi = config.evaluate_epi_stage_1\n\n\nsteps_done = 0\n# if gpu is to be used\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nseed_torch()\n\n# for loading data\n# loading the rna ss data, the data has been preprocessed\n# 5s data is just a demo data, which do not have pseudoknot, will generate another data having that\nfrom e2efold.data_generator import RNASSDataGenerator, Dataset, Dataset_1800\nimport collections\nRNA_SS_data = collections.namedtuple('RNA_SS_data', \n 'seq ss_label length name pairs')\n\ntrain_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'train_600')\nval_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'val_600')\nif data_type == 'archiveII_all':\n test_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_600')\nif data_type == 'rnastralign_all':\n test_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_no_redundant_600')\n\ntrain_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'train_1800')\nval_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'val_1800')\nif data_type == 'archiveII_all':\n test_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_1800')\nif data_type == 'rnastralign_all':\n test_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_no_redundant_1800')\n\n\nseq_len = train_data.data_y.shape[-2]\nprint('Max seq length ', seq_len)\n\n\n# using the pytorch interface to parallel the data generation and model training\nparams = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 6,\n 'drop_last': True}\ntrain_set = Dataset(train_data)\ntrain_generator = data.DataLoader(train_set, **params)\n\nval_set = Dataset(val_data)\nval_generator = data.DataLoader(val_set, **params)\n\nparams = {'batch_size': 1,\n 'shuffle': True,\n 'num_workers': 6,\n 'drop_last': False}\ntrain_set_1800 = Dataset_1800(train_data_1800)\ntrain_generator_1800 = data.DataLoader(train_set_1800, **params)\n\nval_set_1800 = Dataset_1800(val_data_1800)\nval_generator_1800 = data.DataLoader(val_set_1800, **params)\n\nparams = {'batch_size': BATCH_SIZE,\n 'shuffle': False,\n 'num_workers': 6,\n 'drop_last': True}\ntest_set = Dataset(test_data)\ntest_generator = data.DataLoader(test_set, **params)\n\nparams = {'batch_size': 1,\n 'shuffle': False,\n 'num_workers': 6,\n 'drop_last': False}\ntest_set_1800 = Dataset_1800(test_data_1800)\ntest_generator_1800 = data.DataLoader(test_set_1800, **params)\n\n\nif model_type =='test_lc':\n contact_net = ContactNetwork_test(d=d, L=seq_len).to(device)\nif model_type == 'att6':\n contact_net = ContactAttention(d=d, L=seq_len).to(device)\nif model_type == 'att_simple':\n contact_net = ContactAttention_simple(d=d, L=seq_len).to(device) \nif model_type == 'att_simple_fix':\n contact_net = ContactAttention_simple_fix_PE(d=d, L=seq_len, \n device=device).to(device)\nif model_type == 'fc':\n contact_net = ContactNetwork_fc(d=d, L=seq_len).to(device)\nif model_type == 'conv2d_fc':\n contact_net = ContactNetwork(d=d, L=seq_len).to(device)\n\n\nif LOAD_MODEL and os.path.isfile(model_path):\n print('Loading u net model...')\n contact_net.load_state_dict(torch.load(model_path))\n\n\nu_optimizer = optim.Adam(contact_net.parameters())\n\n# for length as 600\npos_weight = torch.Tensor([300]).to(device)\ncriterion_bce_weighted = torch.nn.BCEWithLogitsLoss(\n pos_weight = pos_weight)\n\n\n# randomly select one sample from the test set and perform the evaluation\ndef model_eval():\n contact_net.eval()\n contacts, seq_embeddings, matrix_reps, seq_lens = next(iter(val_generator))\n contacts_batch = torch.Tensor(contacts.float()).to(device)\n seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)\n\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros(1,2,1).to(device)\n PE_batch = get_pe(seq_lens, 600).float().to(device)\n\n with torch.no_grad():\n pred_contacts = contact_net(PE_batch, \n seq_embedding_batch, state_pad)\n\n u_no_train = postprocess(pred_contacts,\n seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n map_no_train = (u_no_train > 0.5).float()\n f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n print('Average val F1 score for 600 with pure post-processing: ', np.average(f1_no_train_tmp))\n\n seq_embedding_batch, PE_batch, contacts_batch, _, _, _, _ = next(iter(val_generator_1800))\n seq_embedding_batch = seq_embedding_batch[0].to(device)\n PE_batch = PE_batch[0].to(device)\n contacts_batch = contacts_batch[0]\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros(1,2,2).to(device)\n with torch.no_grad():\n pred_contacts = contact_net(PE_batch, \n seq_embedding_batch, state_pad)\n u_no_train = postprocess(pred_contacts,\n seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n map_no_train = (u_no_train > 0.5).float()\n f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n print('Average val F1 score for 1800 with pure post-processing: ', np.average(f1_no_train_tmp))\n\n\ndef model_eval_all_test():\n contact_net.eval()\n result_no_train = list()\n result_no_train_shift = list()\n seq_lens_list = list()\n batch_n = 0\n # for contacts, seq_embeddings, matrix_reps, seq_lens in test_generator:\n # if batch_n%10==0:\n # print('Batch number: ', batch_n)\n # batch_n += 1\n # contacts_batch = torch.Tensor(contacts.float()).to(device)\n # seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)\n\n # state_pad = torch.zeros(1,2,2).to(device)\n\n # PE_batch = get_pe(seq_lens, 600).float().to(device)\n # with torch.no_grad():\n # pred_contacts = contact_net(PE_batch, \n # seq_embedding_batch, state_pad)\n\n # # only post-processing without learning\n # u_no_train = postprocess(pred_contacts,\n # seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n # map_no_train = (u_no_train > 0.5).float()\n # result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train.cpu()[i],\n # contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n # result_no_train += result_no_train_tmp\n # result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train.cpu()[i],\n # contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n # result_no_train_shift += result_no_train_tmp_shift\n\n\n for seq_embedding_batch, PE_batch, contacts_batch, _, _, _, seq_lens in test_generator_1800:\n if batch_n%10==0:\n print('Batch number: ', batch_n)\n batch_n += 1\n seq_embedding_batch = seq_embedding_batch[0].to(device)\n PE_batch = PE_batch[0].to(device)\n contacts_batch = contacts_batch[0]\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros(1,2,2).to(device)\n\n with torch.no_grad():\n pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)\n\n # only post-processing without learning\n u_no_train = postprocess(pred_contacts,\n seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n map_no_train = (u_no_train > 0.5).float()\n result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n result_no_train += result_no_train_tmp\n result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n result_no_train_shift += result_no_train_tmp_shift\n seq_lens_list += list(seq_lens)\n\n\n nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)\n nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)\n\n nt_exact_p = np.nan_to_num(np.array(nt_exact_p))\n nt_exact_r = np.nan_to_num(np.array(nt_exact_r))\n nt_exact_f1 = np.nan_to_num(np.array(nt_exact_f1))\n\n nt_shift_p = np.nan_to_num(np.array(nt_shift_p))\n nt_shift_r = np.nan_to_num(np.array(nt_shift_r))\n nt_shift_f1 = np.nan_to_num(np.array(nt_shift_f1))\n \n print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))\n print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))\n print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))\n print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))\n print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))\n print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))\n nt_exact_f1_agg = list()\n nt_shift_f1_agg = list()\n for i in range(len(seq_lens_list)):\n nt_exact_f1_agg.append(np.average(nt_exact_f1[i*15:(i+1)*15]))\n nt_shift_f1_agg.append(np.average(nt_shift_f1[i*15:(i+1)*15]))\n result_dict = dict()\n result_dict['exact_p'] = nt_exact_p\n result_dict['exact_r'] = nt_exact_r\n result_dict['exact_f1'] = nt_exact_f1\n result_dict['shift_p'] = nt_shift_p\n result_dict['shift_r'] = nt_shift_r\n result_dict['shift_f1'] = nt_shift_f1\n result_dict['seq_lens'] = seq_lens_list\n result_dict['exact_weighted_f1'] = np.sum(np.array(nt_exact_f1_agg)*np.array(seq_lens_list)/np.sum(seq_lens_list))\n result_dict['shift_weighted_f1'] = np.sum(np.array(nt_shift_f1_agg)*np.array(seq_lens_list)/np.sum(seq_lens_list))\n import _pickle as pickle\n with open('../results/rnastralign_long_pure_pp_evaluation_dict.pickle', 'wb') as f:\n pickle.dump(result_dict, f)\n\ndef model_eval_all_test_greedy_sort():\n contact_net.eval()\n result_no_train = list()\n result_no_train_shift = list()\n seq_lens_list = list()\n batch_n = 0\n for seq_embedding_batch, PE_batch, contacts_batch, comb_index, _, contacts, seq_lens in test_generator_1800:\n if batch_n%10==0:\n print('Batch number: ', batch_n)\n batch_n += 1\n seq_embedding_batch = seq_embedding_batch[0].to(device)\n PE_batch = PE_batch[0].to(device)\n contacts_batch = contacts_batch[0]\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros(1,2,2).to(device)\n\n with torch.no_grad():\n pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)\n pred_u_map = combine_chunk_u_maps_no_replace(pred_contacts, comb_index, 6)\n pred_u_map = pred_u_map.unsqueeze(0)\n\n # only post-processing without learning\n map_no_train = conflict_sort(pred_u_map)\n result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train[i],\n contacts.float().cpu()[i]), range(contacts.shape[0])))\n result_no_train += result_no_train_tmp\n result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train[i],\n contacts.float().cpu()[i]), range(contacts.shape[0])))\n result_no_train_shift += result_no_train_tmp_shift\n seq_lens_list += list(seq_lens)\n\n\n nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)\n nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)\n\n nt_exact_p = np.nan_to_num(np.array(nt_exact_p))\n nt_exact_r = np.nan_to_num(np.array(nt_exact_r))\n nt_exact_f1 = np.nan_to_num(np.array(nt_exact_f1))\n\n nt_shift_p = np.nan_to_num(np.array(nt_shift_p))\n nt_shift_r = np.nan_to_num(np.array(nt_shift_r))\n nt_shift_f1 = np.nan_to_num(np.array(nt_shift_f1))\n \n print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))\n print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))\n print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))\n print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))\n print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))\n print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))\n result_dict = dict()\n result_dict['exact_p'] = nt_exact_p\n result_dict['exact_r'] = nt_exact_r\n result_dict['exact_f1'] = nt_exact_f1\n result_dict['shift_p'] = nt_shift_p\n result_dict['shift_r'] = nt_shift_r\n result_dict['shift_f1'] = nt_shift_f1\n result_dict['seq_lens'] = seq_lens_list\n result_dict['exact_weighted_f1'] = np.sum(np.array(nt_exact_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))\n result_dict['shift_weighted_f1'] = np.sum(np.array(nt_shift_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))\n import _pickle as pickle\n with open('../results/rnastralign_long_greedy_sort_evaluation_dict.pickle', 'wb') as f:\n pickle.dump(result_dict, f)\n\ndef model_eval_all_test_greedy_sampling():\n contact_net.eval()\n result_no_train = list()\n result_no_train_shift = list()\n seq_lens_list = list()\n batch_n = 0\n for seq_embedding_batch, PE_batch, contacts_batch, comb_index, _, contacts, seq_lens in test_generator_1800:\n if batch_n%10==0:\n print('Batch number: ', batch_n)\n batch_n += 1\n seq_embedding_batch = seq_embedding_batch[0].to(device)\n PE_batch = PE_batch[0].to(device)\n contacts_batch = contacts_batch[0]\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros(1,2,2).to(device)\n\n with torch.no_grad():\n pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)\n pred_u_map = combine_chunk_u_maps_no_replace(pred_contacts, comb_index, 6)\n pred_u_map = pred_u_map.unsqueeze(0)\n\n # only post-processing without learning\n map_no_train = conflict_sampling(pred_u_map)\n result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train[i],\n contacts.float().cpu()[i]), range(contacts.shape[0])))\n result_no_train += result_no_train_tmp\n result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train[i],\n contacts.float().cpu()[i]), range(contacts.shape[0])))\n result_no_train_shift += result_no_train_tmp_shift\n seq_lens_list += list(seq_lens)\n\n\n nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)\n nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)\n\n nt_exact_p = np.nan_to_num(np.array(nt_exact_p))\n nt_exact_r = np.nan_to_num(np.array(nt_exact_r))\n nt_exact_f1 = np.nan_to_num(np.array(nt_exact_f1))\n\n nt_shift_p = np.nan_to_num(np.array(nt_shift_p))\n nt_shift_r = np.nan_to_num(np.array(nt_shift_r))\n nt_shift_f1 = np.nan_to_num(np.array(nt_shift_f1))\n \n print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))\n print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))\n print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))\n print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))\n print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))\n print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))\n result_dict = dict()\n result_dict['exact_p'] = nt_exact_p\n result_dict['exact_r'] = nt_exact_r\n result_dict['exact_f1'] = nt_exact_f1\n result_dict['shift_p'] = nt_shift_p\n result_dict['shift_r'] = nt_shift_r\n result_dict['shift_f1'] = nt_shift_f1\n result_dict['seq_lens'] = seq_lens_list\n result_dict['exact_weighted_f1'] = np.sum(np.array(nt_exact_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))\n result_dict['shift_weighted_f1'] = np.sum(np.array(nt_shift_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))\n import _pickle as pickle\n with open('../results/rnastralign_long_greedy_sampling_evaluation_dict.pickle', 'wb') as f:\n pickle.dump(result_dict, f)\n\n\n# There are three steps of training\n# step one: train the u net\nfor epoch in range(epoches_first):\n contact_net.train()\n print('On short sequence phase:')\n for contacts, seq_embeddings, matrix_reps, seq_lens in train_generator:\n contacts_batch = torch.Tensor(contacts.float()).to(device)\n seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)\n\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros(1,2,2).to(device)\n\n PE_batch = get_pe(seq_lens, 600).float().to(device)\n contact_masks = torch.Tensor(contact_map_masks(seq_lens, 600)).to(device)\n pred_contacts = contact_net(PE_batch, \n seq_embedding_batch, state_pad)\n\n # Compute loss\n loss_u = criterion_bce_weighted(pred_contacts*contact_masks, contacts_batch)\n\n # print(steps_done)\n if steps_done % OUT_STEP ==0:\n print('Stage 1, epoch for 600: {}, step: {}, loss: {}'.format(\n epoch, steps_done, loss_u))\n u_no_train = postprocess(pred_contacts,\n seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n map_no_train = (u_no_train > 0.5).float()\n f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n print('Average train F1 score for 600 with pure post-processing: ', np.average(f1_no_train_tmp))\n\n # Optimize the model\n u_optimizer.zero_grad()\n loss_u.backward()\n u_optimizer.step()\n steps_done=steps_done+1\n if steps_done % 600 ==0:\n break\n\n print('On long sequence phase:')\n u_optimizer.zero_grad()\n for seq_embedding_batch, PE_batch, contacts_batch, _, _, _, _ in train_generator_1800:\n\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros(1,2,2).to(device)\n seq_embedding_batch = seq_embedding_batch[0].to(device)\n PE_batch = PE_batch[0].to(device)\n contacts_batch = contacts_batch[0].to(device)\n\n pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)\n\n # Compute loss\n loss_u = criterion_bce_weighted(pred_contacts, contacts_batch)\n\n # print(steps_done)\n if steps_done % OUT_STEP ==0:\n print('Stage 1, epoch for 1800: {},step: {}, loss: {}'.format(\n epoch, steps_done, loss_u))\n u_no_train = postprocess(pred_contacts,\n seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n map_no_train = (u_no_train > 0.5).float()\n f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n print('Average train F1 score for 1800 with pure post-processing: ', np.average(f1_no_train_tmp))\n\n # Optimize the model\n loss_u.backward()\n if steps_done % 5 ==0:\n u_optimizer.step()\n u_optimizer.zero_grad()\n steps_done=steps_done+1\n if steps_done % 150 ==0:\n break\n\n if epoch%evaluate_epi==0:\n model_eval()\n torch.save(contact_net.state_dict(), model_path)\n\n# model_eval_all_test()\n\n# sys.exit()\n\n\n\n\n\n\n\n",
"import torch.optim as optim\nfrom torch.utils import data\n\nfrom e2efold.models import ContactNetwork, ContactNetwork_test, ContactNetwork_fc\nfrom e2efold.models import ContactAttention, ContactAttention_simple_fix_PE\nfrom e2efold.models import ContactAttention_simple\nfrom e2efold.common.utils import *\nfrom e2efold.common.config import process_config\nfrom e2efold.postprocess import postprocess\n\nargs = get_args()\n\nconfig_file = args.config\n\nconfig = process_config(config_file)\nprint(\"#####Stage 1#####\")\nprint('Here is the configuration of this run: ')\nprint(config)\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= config.gpu\n\nd = config.u_net_d\nBATCH_SIZE = config.batch_size_stage_1\nOUT_STEP = config.OUT_STEP\nLOAD_MODEL = config.LOAD_MODEL\npp_steps = config.pp_steps\ndata_type = config.data_type\nmodel_type = config.model_type\nmodel_path = '../models_ckpt/supervised_{}_{}_d{}_l3.pt'.format(model_type, data_type,d)\nepoches_first = config.epoches_first\nevaluate_epi = config.evaluate_epi_stage_1\n\n\nsteps_done = 0\n# if gpu is to be used\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nseed_torch()\n\n# for loading data\n# loading the rna ss data, the data has been preprocessed\n# 5s data is just a demo data, which do not have pseudoknot, will generate another data having that\nfrom e2efold.data_generator import RNASSDataGenerator, Dataset\nimport collections\nRNA_SS_data = collections.namedtuple('RNA_SS_data', \n 'seq ss_label length name pairs')\n\ntrain_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'train')\nval_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'val')\ntest_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_no_redundant')\n# test_data = RNASSDataGenerator('../data/rnastralign_all/', 'test_no_redundant_600')\n\nseq_len = train_data.data_y.shape[-2]\nprint('Max seq length ', seq_len)\n\n\n# using the pytorch interface to parallel the data generation and model training\nparams = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 6,\n 'drop_last': True}\ntrain_set = Dataset(train_data)\ntrain_generator = data.DataLoader(train_set, **params)\n\nval_set = Dataset(val_data)\nval_generator = data.DataLoader(val_set, **params)\n\ntest_set = Dataset(test_data)\ntest_generator = data.DataLoader(test_set, **params)\n\n# seq_len =500\n\n# store the intermidiate activation\n\nactivation = {}\ndef get_activation(name):\n def hook(model, input, output):\n activation[name] = output.detach()\n return hook\n\nif model_type =='test_lc':\n contact_net = ContactNetwork_test(d=d, L=seq_len).to(device)\nif model_type == 'att6':\n contact_net = ContactAttention(d=d, L=seq_len).to(device)\nif model_type == 'att_simple':\n contact_net = ContactAttention_simple(d=d, L=seq_len).to(device) \nif model_type == 'att_simple_fix':\n contact_net = ContactAttention_simple_fix_PE(d=d, L=seq_len, \n device=device).to(device)\nif model_type == 'fc':\n contact_net = ContactNetwork_fc(d=d, L=seq_len).to(device)\nif model_type == 'conv2d_fc':\n contact_net = ContactNetwork(d=d, L=seq_len).to(device)\n\n# contact_net.conv1d2.register_forward_hook(get_activation('conv1d2'))\n\nif LOAD_MODEL and os.path.isfile(model_path):\n print('Loading u net model...')\n contact_net.load_state_dict(torch.load(model_path))\n\n\nu_optimizer = optim.Adam(contact_net.parameters())\n\n# for 5s\n# pos_weight = torch.Tensor([100]).to(device)\n# for length as 600\npos_weight = torch.Tensor([300]).to(device)\ncriterion_bce_weighted = torch.nn.BCEWithLogitsLoss(\n pos_weight = pos_weight)\n\n\n# randomly select one sample from the test set and perform the evaluation\ndef model_eval():\n contact_net.eval()\n contacts, seq_embeddings, matrix_reps, seq_lens = next(iter(val_generator))\n contacts_batch = torch.Tensor(contacts.float()).to(device)\n seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)\n matrix_reps_batch = torch.unsqueeze(\n torch.Tensor(matrix_reps.float()).to(device), -1)\n\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros([matrix_reps_batch.shape[0], \n seq_len, seq_len]).to(device)\n PE_batch = get_pe(seq_lens, seq_len).float().to(device)\n\n with torch.no_grad():\n pred_contacts = contact_net(PE_batch, \n seq_embedding_batch, state_pad)\n\n u_no_train = postprocess(pred_contacts,\n seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n map_no_train = (u_no_train > 0.5).float()\n f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n print('Average val F1 score with pure post-processing: ', np.average(f1_no_train_tmp))\n\ndef model_eval_all_test():\n contact_net.eval()\n result_no_train = list()\n result_no_train_shift = list()\n batch_n = 0\n for contacts, seq_embeddings, matrix_reps, seq_lens in test_generator:\n if batch_n%10==0:\n print('Batch number: ', batch_n)\n batch_n += 1\n contacts_batch = torch.Tensor(contacts.float()).to(device)\n seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)\n matrix_reps_batch = torch.unsqueeze(\n torch.Tensor(matrix_reps.float()).to(device), -1)\n\n state_pad = torch.zeros([matrix_reps_batch.shape[0], \n seq_len, seq_len]).to(device)\n\n PE_batch = get_pe(seq_lens, seq_len).float().to(device)\n with torch.no_grad():\n pred_contacts = contact_net(PE_batch, \n seq_embedding_batch, state_pad)\n\n # only post-processing without learning\n u_no_train = postprocess(pred_contacts,\n seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)\n map_no_train = (u_no_train > 0.5).float()\n result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n result_no_train += result_no_train_tmp\n result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train.cpu()[i],\n contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))\n result_no_train_shift += result_no_train_tmp_shift\n\n nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)\n nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift) \n \n print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))\n\n print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))\n\n print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))\n\n print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))\n\n print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))\n\n print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))\n\n\n# There are three steps of training\n# step one: train the u net\nfor epoch in range(epoches_first):\n contact_net.train()\n # num_batches = int(np.ceil(train_data.len / BATCH_SIZE))\n # for i in range(num_batches):\n\n for contacts, seq_embeddings, matrix_reps, seq_lens in train_generator:\n # contacts, seq_embeddings, matrix_reps, seq_lens = next(iter(train_generator))\n\n contacts_batch = torch.Tensor(contacts.float()).to(device)\n seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)\n matrix_reps_batch = torch.unsqueeze(\n torch.Tensor(matrix_reps.float()).to(device), -1)\n\n # padding the states for supervised training with all 0s\n state_pad = torch.zeros([matrix_reps_batch.shape[0], \n seq_len, seq_len]).to(device)\n\n\n PE_batch = get_pe(seq_lens, seq_len).float().to(device)\n contact_masks = torch.Tensor(contact_map_masks(seq_lens, seq_len)).to(device)\n pred_contacts = contact_net(PE_batch, \n seq_embedding_batch, state_pad)\n\n # Compute loss\n loss_u = criterion_bce_weighted(pred_contacts*contact_masks, contacts_batch)\n\n # print(steps_done)\n if steps_done % OUT_STEP ==0:\n print('Stage 1, epoch: {},step: {}, loss: {}'.format(\n epoch, steps_done, loss_u))\n\n # Optimize the model\n u_optimizer.zero_grad()\n loss_u.backward()\n u_optimizer.step()\n steps_done=steps_done+1\n\n if epoch%evaluate_epi==0:\n model_eval()\n torch.save(contact_net.state_dict(), model_path)\n\nmodel_eval_all_test()\n\n# sys.exit()\n\n\n\n\n\n\n\n"
] |
[
[
"torch.utils.data.DataLoader"
],
[
"torch.utils.data.DataLoader"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wslerry/sketch_simplification
|
[
"e47c95e9c77757d520725628937fe610ba04c8f9"
] |
[
"simplify.py"
] |
[
"import torch\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\n#from torch.utils.serialization import load_lua\nimport torchfile\n\nfrom PIL import Image\nimport argparse\n\nparser = argparse.ArgumentParser(description='Sketch simplification demo.')\nparser.add_argument('--model', type=str, default='model_gan.t7', help='Model to use.')\nparser.add_argument('--img', type=str, default='test.png', help='Input image file.')\nparser.add_argument('--out', type=str, default='out.png', help='File to output.')\nopt = parser.parse_args()\n\nuse_cuda = torch.cuda.device_count() > 0\n\ncache = torchfile.load(opt.model)\n#cache = load_lua( opt.model )\nmodel = cache.model\nimmean = cache.mean\nimstd = cache.std\nmodel.evaluate()\n\ndata = Image.open( opt.img ).convert('L')\nw, h = data.size[0], data.size[1]\npw = 8-(w%8) if w%8!=0 else 0\nph = 8-(h%8) if h%8!=0 else 0\ndata = ((transforms.ToTensor()(data)-immean)/imstd).unsqueeze(0)\nif pw!=0 or ph!=0:\n data = torch.nn.ReplicationPad2d( (0,pw,0,ph) )( data ).data\n\nif use_cuda:\n pred = model.cuda().forward( data.cuda() ).float()\nelse:\n pred = model.forward( data )\nsave_image( pred[0], opt.out )\n\n\n"
] |
[
[
"torch.cuda.device_count",
"torch.nn.ReplicationPad2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShrishtiHore/DL_Text_Predictor
|
[
"049696f1995b2e23b95f7283b07b37982c77b681"
] |
[
"text_predictor.py"
] |
[
"import tensorflow as tf\nfrom data_provider import DataProvider\nfrom rnn_model import RNNModel\nimport sys\nimport matplotlib\nimport numpy as np\nimport time\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\n# Args\nif len(sys.argv) != 2:\n print(\"Please select a dataset.\")\n print(\"Usage: python text_predictor.py <dataset>\")\n print(\"Available datasets: kanye, shakespeare, wikipedia, reuters, hackernews, war_and_peace, sherlock\")\n exit(1)\nelse:\n dataset = sys.argv[1]\n\ndataset = sys.argv[0]\ndata_dir = \"./data/\" + dataset\ntensorboard_dir = data_dir + \"/tensorboard/\" + str(time.strftime(\"%Y-%m-%d_%H-%M-%S\"))\ninput_file = data_dir + \"/input.txt\"\noutput_file = data_dir + \"/output.txt\"\noutput = open(output_file, \"w\")\noutput.close()\n\n# Hyperparams\nBATCH_SIZE = 32\nSEQUENCE_LENGTH = 25\nLEARNING_RATE = 0.01\nDECAY_RATE = 0.97\nHIDDEN_LAYER_SIZE = 256\nCELLS_SIZE = 2\n\nTEXT_SAMPLE_LENGTH = 500\nSAMPLING_FREQUENCY = 1000\nLOGGING_FREQUENCY = 1000\n\n\ndef rnn():\n data_provider = DataProvider(data_dir, BATCH_SIZE, SEQUENCE_LENGTH)\n model = RNNModel(data_provider.vocabulary_size, batch_size=BATCH_SIZE, sequence_length=SEQUENCE_LENGTH, hidden_layer_size=HIDDEN_LAYER_SIZE, cells_size=CELLS_SIZE)\n\n with tf.Session() as sess:\n\n summaries = tf.summary.merge_all()\n writer = tf.summary.FileWriter(tensorboard_dir)\n writer.add_graph(sess.graph)\n sess.run(tf.global_variables_initializer())\n\n epoch = 0\n temp_losses = []\n smooth_losses = []\n\n while True:\n sess.run(tf.assign(model.learning_rate, LEARNING_RATE * (DECAY_RATE ** epoch)))\n data_provider.reset_batch_pointer()\n state = sess.run(model.initial_state)\n for batch in range(data_provider.batches_size):\n inputs, targets = data_provider.next_batch()\n feed = {model.input_data: inputs, model.targets: targets}\n for index, (c, h) in enumerate(model.initial_state):\n feed[c] = state[index].c\n feed[h] = state[index].h\n iteration = epoch * data_provider.batches_size + batch\n summary, loss, state, _ = sess.run([summaries, model.cost, model.final_state, model.train_op], feed)\n writer.add_summary(summary, iteration)\n temp_losses.append(loss)\n\n if iteration % SAMPLING_FREQUENCY == 0:\n sample_text(sess, data_provider, iteration)\n\n if iteration % LOGGING_FREQUENCY == 0:\n smooth_loss = np.mean(temp_losses)\n smooth_losses.append(smooth_loss)\n temp_losses = []\n plot(smooth_losses, \"iterations (thousands)\", \"loss\")\n print('{{\"metric\": \"iteration\", \"value\": {}}}'.format(iteration))\n print('{{\"metric\": \"epoch\", \"value\": {}}}'.format(epoch))\n print('{{\"metric\": \"loss\", \"value\": {}}}'.format(smooth_loss))\n epoch += 1\n\ndef sample_text(sess, data_provider, iteration):\n model = RNNModel(data_provider.vocabulary_size, batch_size=1, sequence_length=1, hidden_layer_size=HIDDEN_LAYER_SIZE, cells_size=CELLS_SIZE, training=False)\n text = model.sample(sess, data_provider.chars, data_provider.vocabulary, TEXT_SAMPLE_LENGTH).encode(\"utf-8\")\n output = open(output_file, \"a\")\n output.write(\"Iteration: \" + str(iteration) + \"\\n\")\n output.write(text + \"\\n\")\n output.write(\"\\n\")\n output.close()\n\ndef plot(data, x_label, y_label):\n plt.plot(range(len(data)), data)\n plt.title(dataset)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.savefig(data_dir + \"/\" + y_label + \".png\", bbox_inches=\"tight\")\n plt.close()\n\n\nif __name__ == '__main__':\n print(\"Selected dataset: \") + str(dataset)\n print(\"Batch size: \") + str(BATCH_SIZE)\n print(\"Sequence length: \") + str(SEQUENCE_LENGTH)\n print(\"Learning rate: \") + str(LEARNING_RATE)\n print(\"Decay rate: \") + str(DECAY_RATE)\n print(\"Hidden layer size: \") + str(HIDDEN_LAYER_SIZE)\n print(\"Cells size: \") + str(CELLS_SIZE)\n rnn()\n"
] |
[
[
"tensorflow.summary.FileWriter",
"matplotlib.pyplot.title",
"matplotlib.use",
"tensorflow.assign",
"matplotlib.pyplot.savefig",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"numpy.mean",
"matplotlib.pyplot.close",
"tensorflow.Session",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
arti1117/deep-learning-from-scratch
|
[
"6ee8cb8c16b396bfd7cb6ad6530c00bf503c6440"
] |
[
"CHAPTER03/sigmoid.py"
] |
[
"import numpy as np\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n"
] |
[
[
"numpy.exp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danjampro/panoptes-utils
|
[
"ff51019cdd0e188cf5e8d8d70fc3579776a31716"
] |
[
"src/panoptes/utils/images/bayer.py"
] |
[
"from decimal import Decimal\nfrom enum import IntEnum\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.stats import SigmaClip\nfrom loguru import logger\nfrom panoptes.utils.images import fits as fits_utils\nfrom photutils import Background2D\nfrom photutils import BkgZoomInterpolator\nfrom photutils import MeanBackground\nfrom photutils import MedianBackground\nfrom photutils import MMMBackground\nfrom photutils import SExtractorBackground\n\n\nclass RGB(IntEnum):\n \"\"\"Helper class for array index access.\"\"\"\n RED = 0\n R = 0\n GREEN = 1\n G = 1\n G1 = 1\n BLUE = 2\n B = 2\n\n\ndef get_rgb_data(data, separate_green=False):\n \"\"\"Get the data split into separate channels for RGB.\n\n `data` can be a 2D (`W x H`) or 3D (`N x W x H`) array where W=width\n and H=height of the data, with N=number of frames.\n\n The return array will be a `3 x W x H` or `3 x N x W x H` array.\n\n The Bayer array defines a superpixel as a collection of 4 pixels\n set in a square grid::\n\n R G\n G B\n\n `ds9` and other image viewers define the coordinate axis from the\n lower left corner of the image, which is how a traditional x-y plane\n is defined and how most images would expect to look when viewed. This\n means that the `(0, 0)` coordinate position will be in the lower left\n corner of the image.\n\n When the data is loaded into a `numpy` array the data is flipped on the\n vertical axis in order to maintain the same indexing/slicing features.\n This means the the ``(0, 0)`` coordinate position is in the upper-left\n corner of the array when output. When plotting this array one can use\n the ``origin='lower'`` option to view the array as would be expected in\n a normal image although this does not change the actual index.\n\n Image dimensions::\n\n ----------------------------\n x | width | i | columns | 5208\n y | height | j | rows | 3476\n\n Bayer pattern as seen in ds9::\n\n x / j\n\n 0 1 2 3 ... 5204 5205 5206 5207\n --------------------------------------------\n 3475 | R G1 R G1 R G1 R G1\n 3474 | G2 B G2 B G2 B G2 B\n 3473 | R G1 R G1 R G1 R G1\n 3472 | G2 B G2 B G2 B G2 B\n . |\n y / i . |\n . |\n 3 | R G1 R G1 R G1 R G1\n 2 | G2 B G2 B G2 B G2 B\n 1 | R G1 R G1 R G1 R G1\n 0 | G2 B G2 B G2 B G2 B\n\n The RGGB super-pixels thus start in the upper-left.\n\n Bayer pattern as seen in a numpy array::\n\n x / j\n\n 0 1 2 3 ... 5204 5205 5206 5207\n --------------------------------------------\n 0 | G2 B G2 B G2 B G2 B\n 1 | R G1 R G1 R G1 R G1\n 2 | G2 B G2 B G2 B G2 B\n 3 | R G1 R G1 R G1 R G1\n . |\n y / i . |\n . |\n 3472 | G2 B G2 B G2 B G2 B\n 3473 | R G1 R G1 R G1 R G1\n 3474 | G2 B G2 B G2 B G2 B\n 3475 | R G1 R G1 R G1 R G1\n\n Here the RGGB super-pixels are flipped upside down.\n\n In both cases the data is in the following format::\n\n | row (y) | col (x)\n --------------| ------\n R | odd i, | even j\n G1 | odd i, | odd j\n G2 | even i, | even j\n B | even i, | odd j\n\n And a mask can therefore be generated as::\n\n bayer[1::2, 0::2] = 1 # Red\n bayer[1::2, 1::2] = 1 # Green\n bayer[0::2, 0::2] = 1 # Green\n bayer[0::2, 1::2] = 1 # Blue\n\n \"\"\"\n rgb_masks = get_rgb_masks(data, separate_green=separate_green)\n\n color_data = list()\n\n # Red\n color_data.append(np.ma.array(data, mask=rgb_masks[0]))\n\n # Green\n color_data.append(np.ma.array(data, mask=rgb_masks[1]))\n\n if separate_green:\n color_data.append(np.ma.array(data, mask=rgb_masks[2]))\n\n # Blue\n color_data.append(np.ma.array(data, mask=rgb_masks[-1]))\n\n return np.ma.array(color_data)\n\n\ndef get_rgb_masks(data, separate_green=False):\n \"\"\"Get the RGGB Bayer pattern for the given data.\n\n .. note::\n\n See :py:func:`get_rgb_data` for a description of the RGGB pattern.\n\n Args:\n data (`np.array`): An array of data representing an image.\n separate_green (bool, optional): If the two green channels should be separated,\n default False.\n\n Returns:\n tuple(np.array, np.array, np.array): A 3-tuple of numpy arrays of `bool` type.\n \"\"\"\n\n r_mask = np.ones_like(data).astype(bool)\n g1_mask = np.ones_like(data).astype(bool)\n b_mask = np.ones_like(data).astype(bool)\n\n if separate_green:\n g2_mask = np.ones_like(data).astype(bool)\n else:\n g2_mask = g1_mask\n\n if data.ndim == 2:\n r_mask[1::2, 0::2] = False\n g1_mask[1::2, 1::2] = False\n g2_mask[0::2, 0::2] = False\n b_mask[0::2, 1::2] = False\n elif data.ndim == 3:\n r_mask[..., 1::2, 0::2] = False\n g1_mask[..., 1::2, 1::2] = False\n g2_mask[..., 0::2, 0::2] = False\n b_mask[..., 0::2, 1::2] = False\n else:\n raise TypeError('Only 2D and 3D data allowed')\n\n if separate_green:\n return np.array([r_mask, g1_mask, g2_mask, b_mask])\n else:\n return np.array([r_mask, g1_mask, b_mask])\n\n\ndef get_pixel_color(x, y):\n \"\"\" Given a zero-indexed x,y position, return the corresponding color.\n\n .. note::\n\n See :py:func:`get_rgb_data` for a description of the RGGB pattern.\n\n Returns:\n str: one of 'R', 'G1', 'G2', 'B'\n \"\"\"\n x = int(x)\n y = int(y)\n if x % 2 == 0:\n if y % 2 == 0:\n return 'G2'\n else:\n return 'R'\n else:\n if y % 2 == 0:\n return 'B'\n else:\n return 'G1'\n\n\ndef get_stamp_slice(x, y, stamp_size=(14, 14), ignore_superpixel=False, as_slices=True):\n \"\"\"Get the slice around a given position with fixed Bayer pattern.\n\n Given an x,y pixel position, get the slice object for a stamp of a given size\n but make sure the first position corresponds to a red-pixel. This means that\n x,y will not necessarily be at the center of the resulting stamp.\n\n .. doctest::\n\n >>> from panoptes.utils.images import bayer\n >>> # Make a super-pixel as represented in numpy (see full stamp below).\n >>> superpixel = np.array(['G2', 'B', 'R', 'G1']).reshape(2, 2)\n >>> superpixel\n array([['G2', 'B'],\n ['R', 'G1']], dtype='<U2')\n >>> # Tile it into a 5x5 grid of super-pixels, i.e. a 10x10 stamp.\n >>> stamp0 = np.tile(superpixel, (5, 5))\n >>> stamp0\n array([['G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1'],\n ['G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1'],\n ['G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1'],\n ['G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1'],\n ['G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1', 'R', 'G1']],\n dtype='<U2')\n >>> stamp1 = np.arange(100).reshape(10, 10)\n >>> stamp1\n array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],\n [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],\n [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n [70, 71, 72, 73, 74, 75, 76, 77, 78, 79],\n [80, 81, 82, 83, 84, 85, 86, 87, 88, 89],\n [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])\n >>> x = 7\n >>> y = 5\n >>> pixel_index = (y, x) # y=rows, x=columns\n >>> stamp0[pixel_index]\n 'G1'\n >>> stamp1[pixel_index]\n 57\n >>> slice0 = bayer.get_stamp_slice(x, y, stamp_size=(6, 6))\n >>> slice0\n (slice(2, 8, None), slice(4, 10, None))\n >>> stamp0[slice0]\n array([['G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1'],\n ['G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1'],\n ['G2', 'B', 'G2', 'B', 'G2', 'B'],\n ['R', 'G1', 'R', 'G1', 'R', 'G1']], dtype='<U2')\n >>> stamp1[slice0]\n array([[24, 25, 26, 27, 28, 29],\n [34, 35, 36, 37, 38, 39],\n [44, 45, 46, 47, 48, 49],\n [54, 55, 56, 57, 58, 59],\n [64, 65, 66, 67, 68, 69],\n [74, 75, 76, 77, 78, 79]])\n >>> # Return y_min, y_max, x_min, x_max\n >>> bayer.get_stamp_slice(x, y, stamp_size=(6, 6), as_slices=False)\n (2, 8, 4, 10)\n\n The original index had a value of `57`, which is within the center superpixel.\n\n Notice that the resulting stamp has a super-pixel in the center and is bordered on all sides by a complete\n superpixel. This is required by default and an invalid size\n\n We can use `ignore_superpixel=True` to get an odd-sized stamp.\n\n .. doctest::\n\n >>> slice1 = bayer.get_stamp_slice(x, y, stamp_size=(5, 5), ignore_superpixel=True)\n >>> slice1\n (slice(3, 8, None), slice(5, 10, None))\n >>> stamp0[slice1]\n array([['G1', 'R', 'G1', 'R', 'G1'],\n ['B', 'G2', 'B', 'G2', 'B'],\n ['G1', 'R', 'G1', 'R', 'G1'],\n ['B', 'G2', 'B', 'G2', 'B'],\n ['G1', 'R', 'G1', 'R', 'G1']], dtype='<U2')\n >>> stamp1[slice1]\n array([[35, 36, 37, 38, 39],\n [45, 46, 47, 48, 49],\n [55, 56, 57, 58, 59],\n [65, 66, 67, 68, 69],\n [75, 76, 77, 78, 79]])\n\n This puts the requested pixel in the center but does not offer any\n guarantees about the RGGB pattern.\n\n Args:\n x (float): X pixel position.\n y (float): Y pixel position.\n stamp_size (tuple, optional): The size of the cutout, default (14, 14).\n ignore_superpixel (bool): If superpixels should be ignored, default False.\n as_slices (bool): Return slice objects, default True. Otherwise returns:\n y_min, y_max, x_min, x_max\n Returns:\n `list(slice, slice)` or `list(int, int, int, int)`: A list of row and\n column slice objects or a list defining the bounding box:\n y_min, y_max, x_min, x_max. Return type depends on the `as_slices`\n parameter and defaults to a list of two slices.\n \"\"\"\n # Make sure requested size can have superpixels on each side.\n if not ignore_superpixel:\n for side_length in stamp_size:\n side_length -= 2 # Subtract center superpixel\n if side_length / 2 % 2 != 0:\n raise RuntimeError(f\"Invalid slice size: {side_length + 2} \"\n f\"Slice must have even number of pixels on each side\"\n f\"of center superpixel. i.e. 6, 10, 14, 18...\")\n\n # Pixels have nasty 0.5 rounding issues\n x = Decimal(float(x)).to_integral()\n y = Decimal(float(y)).to_integral()\n color = get_pixel_color(x, y)\n logger.debug(f'Found color={color} for x={x} y={y}')\n\n x_half = int(stamp_size[0] / 2)\n y_half = int(stamp_size[1] / 2)\n\n x_min = int(x - x_half)\n x_max = int(x + x_half)\n\n y_min = int(y - y_half)\n y_max = int(y + y_half)\n\n # Alter the bounds depending on identified center pixel so we always center superpixel have:\n # G2 B\n # R G1\n if color == 'R':\n x_min += 1\n x_max += 1\n elif color == 'G2':\n x_min += 1\n x_max += 1\n y_min += 1\n y_max += 1\n elif color == 'B':\n y_min += 1\n y_max += 1\n\n # if stamp_size is odd add extra\n if stamp_size[0] % 2 == 1:\n x_max += 1\n y_max += 1\n\n logger.debug(f'x_min={x_min}, x_max={x_max}, y_min={y_min}, y_max={y_max}')\n\n if as_slices:\n return slice(y_min, y_max), slice(x_min, x_max)\n else:\n return y_min, y_max, x_min, x_max\n\n\ndef get_rgb_background(data,\n box_size=(79, 84),\n filter_size=(11, 12),\n estimator='mmm',\n interpolator='zoom',\n sigma=5,\n iters=10,\n exclude_percentile=100,\n return_separate=False,\n *args,\n **kwargs\n ):\n \"\"\"Get the background for each color channel.\n\n Note: This funtion does not perform any additional calibration, such as flat, bias,\n or dark correction. It is expected you have performed any necessary pre-processing\n to `data` before passing to this function.\n\n By default this uses a box size of (79, 84), which gives an integer number\n of boxes. The size of the median filter box for the low resolution background\n is on the order of the stamp size.\n\n Most of the options are described in the `photutils.Background2D` page:\n https://photutils.readthedocs.io/en/stable/background.html#d-background-and-noise-estimation\n\n >>> from panoptes.utils.images.bayer import RGB\n >>> from panoptes.utils.images import fits as fits_utils\n >>> # Get our data and pre-process (basic bias subtract here).\n >>> fits_fn = getfixture('solved_fits_file')\n >>> camera_bias = 2048\n >>> data = fits_utils.getdata(fits_fn) - camera_bias\n\n >> The default is to return a single array for the background.\n >>> rgb_back = get_rgb_background(data)\n >>> rgb_back.mean()\n 136...\n >>> rgb_back.std()\n 36...\n\n >>> # Can also return the Background2D objects, which is the input to save_rgb_bg_fits\n >>> rgb_backs = get_rgb_background(data, return_separate=True)\n >>> rgb_backs[RGB.RED]\n <photutils.background.background_2d.Background2D...>\n\n >>> {color.name:int(rgb_back[color].mean()) for color in RGB}\n {'RED': 145, 'GREEN': 127, 'BLUE': 145}\n\n Args:\n data (np.array): The data to use if no `fits_fn` is provided.\n box_size (tuple, optional): The box size over which to compute the\n 2D-Background, default (79, 84).\n filter_size (tuple, optional): The filter size for determining the median,\n default (11, 12).\n estimator (str, optional): The estimator object to use, default 'mmm'.\n interpolator (str, optional): The interpolater object to user, default 'zoom'.\n sigma (int, optional): The sigma on which to filter values, default 5.\n iters (int, optional): The number of iterations to sigma filter, default 10.\n exclude_percentile (int, optional): The percentage of the data (per channel)\n that can be masked, default 100 (i.e. all).\n return_separate (bool, optional): If the function should return a separate array\n for color channel, default False.\n *args: Description\n **kwargs: Description\n\n Returns:\n `numpy.array`|list(Background2D): Either a single numpy array representing the entire\n background, or a list of masked numpy arrays in RGB order. The background\n for each channel has full interploation across all pixels, but the mask covers\n them.\n \"\"\"\n logger.debug(\"RGB background subtraction\")\n logger.debug(f\"{estimator} {interpolator} {box_size} {filter_size} {sigma} {iters}\")\n\n estimators = {\n 'sexb': SExtractorBackground,\n 'median': MedianBackground,\n 'mean': MeanBackground,\n 'mmm': MMMBackground\n }\n interpolators = {\n 'zoom': BkgZoomInterpolator,\n }\n\n bkg_estimator = estimators[estimator]()\n interp = interpolators[interpolator]()\n\n # Get the data per color channel.\n logger.debug(f'Getting RGB background data ({data.shape})')\n rgb_data = get_rgb_data(data)\n\n backgrounds = list()\n for color, color_data in zip(RGB, rgb_data):\n logger.debug(f'Calculating background for {color.name.lower()} pixels')\n\n bkg = Background2D(color_data,\n box_size,\n filter_size=filter_size,\n sigma_clip=SigmaClip(sigma=sigma, maxiters=iters),\n bkg_estimator=bkg_estimator,\n exclude_percentile=exclude_percentile,\n mask=color_data.mask,\n interpolator=interp)\n\n logger.debug(f\"{color.name.lower()}: {bkg.background_median:.02f} \"\n f\"RMS: {bkg.background_rms_median:.02f}\")\n\n if return_separate:\n backgrounds.append(bkg)\n else:\n # Create a masked array for the background\n backgrounds.append(np.ma.array(data=bkg.background, mask=color_data.mask))\n\n if return_separate:\n return backgrounds\n\n # Create one array for the backgrounds, where any holes are filled with zeros.\n full_background = np.ma.array(backgrounds).sum(0).filled(0)\n\n return full_background\n\n\ndef save_rgb_bg_fits(rgb_bg_data, output_filename, header=None, fpack=True, overwrite=True):\n \"\"\"Save a FITS file containing a combined background as well as separate channels.\n\n Args:\n rgb_bg_data (list[photutils.Background2D]): The RGB background data as\n returned by calling `panoptes.utils.images.bayer.get_rgb_background`\n with `return_separate=True`.\n output_filename (str): The output name for the FITS file.\n header (astropy.io.fits.Header): FITS header to be saved with the file.\n fpack (bool): If the FITS file should be compressed, default True.\n overwrite (bool): If FITS file should be overwritten, default True.\n \"\"\"\n\n # Get combined data for Primary HDU\n combined_bg = np.array([np.ma.array(data=d.background, mask=d.mask).filled(0)\n for d in rgb_bg_data]).sum(0)\n\n header = header or fits.Header()\n\n # Save as ing16.\n header['BITPIX'] = 16\n\n # Combined background is primary hdu.\n primary = fits.PrimaryHDU(combined_bg, header=header)\n primary.scale('int16')\n hdu_list = [primary]\n\n for color, bg in zip(RGB, rgb_bg_data):\n h0 = fits.Header()\n h0['COLOR'] = f'{color.name.lower()}'\n\n h0['IMGTYPE'] = 'background'\n img0 = fits.ImageHDU(bg.background, header=h0)\n img0.scale('int16')\n hdu_list.append(img0)\n\n h0['IMGTYPE'] = 'background_rms'\n img1 = fits.ImageHDU(bg.background_rms, header=h0)\n img1.scale('int16')\n hdu_list.append(img1)\n\n hdul = fits.HDUList(hdu_list)\n hdul.writeto(output_filename, overwrite=overwrite)\n\n if fpack:\n output_filename = fits_utils.fpack(output_filename)\n\n return output_filename\n"
] |
[
[
"numpy.ma.array",
"numpy.array",
"numpy.ones_like"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tsalo/rapidtide
|
[
"9e0faff6e9796c21bd62d4b98c309034c0eba766"
] |
[
"rapidtide/OrthoImageItem.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: latin-1 -*-\n#\n# Copyright 2016-2021 Blaise Frederick\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n# $Author: frederic $\n# $Date: 2016/04/07 21:46:54 $\n# $Id: OrthoImageItem.py,v 1.13 2016/04/07 21:46:54 frederic Exp $\n#\n# -*- coding: utf-8 -*-\n\n\"\"\"\nA widget for orthographically displaying 3 and 4 dimensional data\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\n\ntry:\n from PIL import Image\n\n PILexists = True\nexcept ImportError:\n PILexists = False\n\n\ndef newColorbar(left, top, impixpervoxx, impixpervoxy, imgsize):\n cb_xdim = imgsize // 10\n cb_ydim = imgsize\n theviewbox = pg.ViewBox(enableMouse=False)\n theviewbox.setRange(\n QtCore.QRectF(0, 0, cb_xdim, cb_ydim),\n xRange=(0, cb_xdim - 1),\n yRange=(0, cb_ydim - 1),\n padding=0.0,\n disableAutoRange=True,\n )\n theviewbox.setBackgroundColor([50, 50, 50])\n theviewbox.setAspectLocked()\n\n thecolorbarfgwin = pg.ImageItem()\n theviewbox.addItem(thecolorbarfgwin)\n thecolorbarfgwin.setZValue(10)\n thecolorbarfgwin.translate(left, top)\n thecolorbarfgwin.scale(impixpervoxx, impixpervoxy)\n\n thecolorbarbgwin = pg.ImageItem()\n theviewbox.addItem(thecolorbarbgwin)\n thecolorbarbgwin.setZValue(0)\n thecolorbarbgwin.translate(left, top)\n thecolorbarbgwin.scale(impixpervoxx, impixpervoxy)\n\n colorbarvals = np.zeros((cb_xdim, cb_ydim), dtype=np.float64)\n for i in range(0, cb_ydim):\n colorbarvals[:, i] = i * (1.0 / (cb_ydim - 1.0))\n return thecolorbarfgwin, thecolorbarbgwin, theviewbox, colorbarvals\n\n\ndef newViewWindow(\n view, xdim, ydim, left, top, impixpervoxx, impixpervoxy, imgsize, enableMouse=False\n):\n theviewbox = view.addViewBox(enableMouse=enableMouse, enableMenu=False, lockAspect=1.0)\n theviewbox.setAspectLocked()\n theviewbox.setRange(QtCore.QRectF(0, 0, imgsize, imgsize), padding=0.0, disableAutoRange=True)\n theviewbox.setBackgroundColor([50, 50, 50])\n\n theviewfgwin = pg.ImageItem()\n theviewbox.addItem(theviewfgwin)\n theviewfgwin.setZValue(10)\n theviewfgwin.translate(left, top)\n theviewfgwin.scale(impixpervoxx, impixpervoxy)\n\n theviewbgwin = pg.ImageItem()\n theviewbox.addItem(theviewbgwin)\n theviewbgwin.setZValue(0)\n theviewbgwin.translate(left, top)\n theviewbgwin.scale(impixpervoxx, impixpervoxy)\n\n theviewvLine = pg.InfiniteLine(angle=90, movable=False, pen=\"g\")\n theviewvLine.setZValue(20)\n theviewbox.addItem(theviewvLine)\n theviewhLine = pg.InfiniteLine(angle=0, movable=False, pen=\"g\")\n theviewhLine.setZValue(20)\n theviewbox.addItem(theviewhLine)\n\n return theviewfgwin, theviewbgwin, theviewvLine, theviewhLine, theviewbox\n\n\nclass OrthoImageItem(QtGui.QWidget):\n updated = QtCore.pyqtSignal()\n\n def __init__(\n self,\n map,\n axview,\n corview,\n sagview,\n enableMouse=False,\n button=None,\n imgsize=64,\n arrangement=0,\n bgmap=None,\n verbose=False,\n ):\n QtGui.QWidget.__init__(self)\n self.map = map\n self.bgmap = bgmap\n self.axview = axview\n self.corview = corview\n self.sagview = sagview\n self.button = button\n self.verbose = verbose\n self.enableMouse = enableMouse\n self.xdim = self.map.xdim # this is the number of voxels along this axis\n self.ydim = self.map.ydim # this is the number of voxels along this axis\n self.zdim = self.map.zdim # this is the number of voxels along this axis\n self.tdim = self.map.tdim # this is the number of voxels along this axis\n self.xsize = self.map.xsize # this is the mapping between voxel and physical space\n self.ysize = self.map.ysize # this is the mapping between voxel and physical space\n self.zsize = self.map.zsize # this is the mapping between voxel and physical space\n self.imgsize = imgsize\n self.xfov = self.xdim * self.xsize\n self.yfov = self.ydim * self.ysize\n self.zfov = self.zdim * self.zsize\n self.xpos = int(self.xdim // 2)\n self.ypos = int(self.ydim // 2)\n self.zpos = int(self.zdim // 2)\n self.tpos = int(0)\n self.maxfov = np.max([self.xfov, self.yfov, self.zfov])\n self.impixpervoxx = self.imgsize * (self.xfov / self.maxfov) / self.xdim\n self.impixpervoxy = self.imgsize * (self.yfov / self.maxfov) / self.ydim\n self.impixpervoxz = self.imgsize * (self.zfov / self.maxfov) / self.zdim\n self.offsetx = self.imgsize * (0.5 - self.xfov / (2.0 * self.maxfov))\n self.offsety = self.imgsize * (0.5 - self.yfov / (2.0 * self.maxfov))\n self.offsetz = self.imgsize * (0.5 - self.zfov / (2.0 * self.maxfov))\n\n if self.verbose:\n print(\"OrthoImageItem intialization:\")\n print(\" Dimensions:\", self.xdim, self.ydim, self.zdim)\n print(\" Voxel sizes:\", self.xsize, self.ysize, self.zsize)\n print(\" FOVs:\", self.xfov, self.yfov, self.zfov)\n print(\" Maxfov, imgsize:\", self.maxfov, self.imgsize)\n print(\n \" Scale factors:\", self.impixpervoxx, self.impixpervoxy, self.impixpervoxz,\n )\n print(\" Offsets:\", self.offsetx, self.offsety, self.offsetz)\n self.buttonisdown = False\n\n self.arrangement = arrangement\n self.axview.setBackground(None)\n self.axview.setRange(padding=0.0)\n self.axview.ci.layout.setContentsMargins(0, 0, 0, 0)\n self.axview.ci.layout.setSpacing(5)\n self.corview.setBackground(None)\n self.corview.setRange(padding=0.0)\n self.corview.ci.layout.setContentsMargins(0, 0, 0, 0)\n self.corview.ci.layout.setSpacing(5)\n self.sagview.setBackground(None)\n self.sagview.setRange(padding=0.0)\n self.sagview.ci.layout.setContentsMargins(0, 0, 0, 0)\n self.sagview.ci.layout.setSpacing(5)\n\n (\n self.axviewwin,\n self.axviewbgwin,\n self.axviewvLine,\n self.axviewhLine,\n self.axviewbox,\n ) = newViewWindow(\n self.axview,\n self.xdim,\n self.ydim,\n self.offsetx,\n self.offsety,\n self.impixpervoxx,\n self.impixpervoxy,\n self.imgsize,\n enableMouse=self.enableMouse,\n )\n (\n self.corviewwin,\n self.corviewbgwin,\n self.corviewvLine,\n self.corviewhLine,\n self.corviewbox,\n ) = newViewWindow(\n self.corview,\n self.xdim,\n self.zdim,\n self.offsetx,\n self.offsetz,\n self.impixpervoxx,\n self.impixpervoxz,\n self.imgsize,\n enableMouse=self.enableMouse,\n )\n (\n self.sagviewwin,\n self.sagviewbgwin,\n self.sagviewvLine,\n self.sagviewhLine,\n self.sagviewbox,\n ) = newViewWindow(\n self.sagview,\n self.ydim,\n self.zdim,\n self.offsety,\n self.offsetz,\n self.impixpervoxy,\n self.impixpervoxz,\n self.imgsize,\n enableMouse=self.enableMouse,\n )\n if self.enableMouse:\n self.axviewbox.keyPressEvent = self.handleaxkey\n self.axviewbox.mousePressEvent = self.handleaxclick\n self.axviewbox.mouseMoveEvent = self.handleaxmousemove\n self.axviewbox.mouseReleaseEvent = self.handlemouseup\n self.corviewbox.mousePressEvent = self.handlecorclick\n self.corviewbox.mouseMoveEvent = self.handlecormousemove\n self.corviewbox.mouseReleaseEvent = self.handlemouseup\n self.sagviewbox.mousePressEvent = self.handlesagclick\n self.sagviewbox.mouseMoveEvent = self.handlesagmousemove\n self.sagviewbox.mouseReleaseEvent = self.handlemouseup\n\n self.enableView()\n self.updateAllViews()\n\n def xvox2pix(self, xpos):\n return int(np.round(self.offsetx + self.impixpervoxx * xpos))\n\n def yvox2pix(self, ypos):\n return int(np.round(self.offsety + self.impixpervoxy * ypos))\n\n def zvox2pix(self, zpos):\n return int(np.round(self.offsetz + self.impixpervoxz * zpos))\n\n def xpix2vox(self, xpix):\n thepos = (xpix - self.offsetx) / self.impixpervoxx\n if thepos > self.xdim - 1:\n thepos = self.xdim - 1\n if thepos < 0:\n thepos = 0\n return int(np.round(thepos))\n\n def ypix2vox(self, ypix):\n thepos = (ypix - self.offsety) / self.impixpervoxy\n if thepos > self.ydim - 1:\n thepos = self.ydim - 1\n if thepos < 0:\n thepos = 0\n return int(np.round(thepos))\n\n def zpix2vox(self, zpix):\n thepos = (zpix - self.offsetz) / self.impixpervoxz\n if thepos > self.zdim - 1:\n thepos = self.zdim - 1\n if thepos < 0:\n thepos = 0\n return int(np.round(thepos))\n\n def updateAllViews(self):\n if self.tdim == 1:\n axdata = self.map.maskeddata[:, :, self.zpos]\n else:\n axdata = self.map.maskeddata[:, :, self.zpos, self.tpos]\n if not (self.map.mask is None):\n axmask = self.map.mask[:, :, self.zpos]\n else:\n axmask = 0.0 * self.map.maskeddata[:, :, self.zpos] + 1.0\n if self.bgmap is None:\n axbg = None\n else:\n axbg = self.bgmap.data[:, :, self.zpos]\n self.updateOneView(axdata, axmask, axbg, self.map.theLUT, self.axviewwin, self.axviewbgwin)\n self.axviewvLine.setValue(self.xvox2pix(self.xpos))\n self.axviewhLine.setValue(self.yvox2pix(self.ypos))\n\n if self.tdim == 1:\n cordata = self.map.maskeddata[:, self.ypos, :]\n else:\n cordata = self.map.maskeddata[:, self.ypos, :, self.tpos]\n if not (self.map.mask is None):\n cormask = self.map.mask[:, self.ypos, :]\n else:\n cormask = 0.0 * self.map.maskeddata[:, self.ypos, :] + 1.0\n if self.bgmap is None:\n corbg = None\n else:\n corbg = self.bgmap.data[:, self.ypos, :]\n self.updateOneView(\n cordata, cormask, corbg, self.map.theLUT, self.corviewwin, self.corviewbgwin\n )\n self.corviewvLine.setValue(self.xvox2pix(self.xpos))\n self.corviewhLine.setValue(self.zvox2pix(self.zpos))\n\n if self.tdim == 1:\n sagdata = self.map.maskeddata[self.xpos, :, :]\n else:\n sagdata = self.map.maskeddata[self.xpos, :, :, self.tpos]\n if not (self.map.mask is None):\n sagmask = self.map.mask[self.xpos, :, :]\n else:\n sagmask = 0.0 * self.map.maskeddata[self.xpos, :, :] + 1.0\n if self.bgmap is None:\n sagbg = None\n else:\n sagbg = self.bgmap.data[self.xpos, :, :]\n self.updateOneView(\n sagdata, sagmask, sagbg, self.map.theLUT, self.sagviewwin, self.sagviewbgwin\n )\n self.sagviewvLine.setValue(self.yvox2pix(self.ypos))\n self.sagviewhLine.setValue(self.zvox2pix(self.zpos))\n\n def updateOneView(self, data, mask, background, theLUT, thefgwin, thebgwin):\n im = self.applyLUT(data, mask, theLUT, self.map.dispmin, self.map.dispmax)\n thefgwin.setImage(im.astype(\"float\"))\n if background is not None:\n thebgwin.setImage(background.astype(\"float\"), autoLevels=True)\n\n def setMap(self, themap):\n self.map = themap\n self.tdim = self.map.tdim\n\n def enableView(self):\n if self.button is not None:\n self.button.setText(self.map.label)\n self.button.setDisabled(False)\n self.button.show()\n self.axview.show()\n self.corview.show()\n self.sagview.show()\n\n def applyLUT(self, theimage, mask, theLUT, dispmin, dispmax):\n offset = dispmin\n if dispmax - dispmin > 0:\n scale = len(theLUT) / (dispmax - dispmin)\n else:\n scale = 0.0\n scaleddata = np.rint((theimage - offset) * scale).astype(\"int32\")\n scaleddata[np.where(scaleddata < 0)] = 0\n scaleddata[np.where(scaleddata > (len(theLUT) - 1))] = len(theLUT) - 1\n mappeddata = theLUT[scaleddata]\n mappeddata[:, :, 3][np.where(mask < 1)] = 0\n return mappeddata\n\n def updateCursors(self):\n xpix = self.xvox2pix(self.xpos)\n ypix = self.yvox2pix(self.ypos)\n zpix = self.zvox2pix(self.zpos)\n self.axviewvLine.setValue(xpix)\n self.axviewhLine.setValue(ypix)\n self.corviewvLine.setValue(xpix)\n self.corviewhLine.setValue(zpix)\n self.sagviewvLine.setValue(ypix)\n self.sagviewhLine.setValue(zpix)\n\n def handlemouseup(self, event):\n self.buttonisdown = False\n self.updateCursors()\n self.updateAllViews()\n\n def handleaxmousemove(self, event):\n if self.buttonisdown:\n self.xpos = self.xpix2vox(event.pos().x() - 1)\n self.ypos = self.ypix2vox(self.imgsize - event.pos().y() + 1)\n self.updateAllViews()\n self.updated.emit()\n\n def handlecormousemove(self, event):\n if self.buttonisdown:\n self.xpos = self.xpix2vox(event.pos().x() - 1)\n self.zpos = self.zpix2vox(self.imgsize - event.pos().y() + 1)\n self.updateAllViews()\n self.updated.emit()\n\n def handlesagmousemove(self, event):\n if self.buttonisdown:\n self.ypos = self.ypix2vox(event.pos().x() - 1)\n self.zpos = self.zpix2vox(self.imgsize - event.pos().y() + 1)\n self.updateAllViews()\n self.updated.emit()\n\n def handleaxkey(self, event):\n print(event)\n self.updateAllViews()\n self.updated.emit()\n\n def handleaxclick(self, event):\n self.xpos = self.xpix2vox(event.pos().x() - 1)\n self.ypos = self.ypix2vox(self.imgsize - event.pos().y() + 1)\n self.buttonisdown = True\n self.updateAllViews()\n self.updated.emit()\n\n def handlecorclick(self, event):\n self.xpos = self.xpix2vox(event.pos().x() - 1)\n self.zpos = self.zpix2vox(self.imgsize - event.pos().y() + 1)\n self.buttonisdown = True\n self.updateAllViews()\n self.updated.emit()\n\n def handlesagclick(self, event):\n self.ypos = self.ypix2vox(event.pos().x() - 1)\n self.zpos = self.zpix2vox(self.imgsize - event.pos().y() + 1)\n self.buttonisdown = True\n self.updateAllViews()\n self.updated.emit()\n\n def setXYZpos(self, xpos, ypos, zpos, emitsignal=True):\n self.xpos = int(xpos)\n self.ypos = int(ypos)\n self.zpos = int(zpos)\n self.updateAllViews()\n if emitsignal:\n self.updated.emit()\n\n def setTpos(self, tpos, emitsignal=True):\n if tpos > self.tdim - 1:\n self.tpos = int(self.tdim - 1)\n else:\n self.tpos = int(tpos)\n\n self.updateAllViews()\n if emitsignal:\n self.updated.emit()\n\n def getFocusVal(self):\n if self.tdim > 1:\n return self.map.maskeddata[self.xpos, self.ypos, self.zpos, self.tpos]\n else:\n return self.map.maskeddata[self.xpos, self.ypos, self.zpos]\n\n def saveandcomposite(self, square_img, fg_img, bg_img, name, savedir, scalefach, scalefacv):\n if PILexists:\n print(\"using PIL to save \", name)\n squarename = os.path.join(savedir, name + \"_square.png\")\n fgname = os.path.join(savedir, name + \"_foreground.png\")\n bgname = os.path.join(savedir, name + \"_background.png\")\n compositename = os.path.join(savedir, name + \".jpg\")\n\n # make the individual layers\n square_img.save(squarename)\n fg_img.save(fgname)\n bg_img.save(bgname)\n square = Image.open(squarename)\n background = Image.open(bgname)\n foreground = Image.open(fgname)\n print(foreground.getbands())\n\n # now composite\n background.paste(foreground, None, foreground)\n flipped = background.transpose(Image.FLIP_TOP_BOTTOM)\n\n # scale\n print(\"scaling\")\n mulfac = 8\n hsize = int(mulfac * scalefach)\n vsize = int(mulfac * scalefacv)\n print(\"scaling to \", hsize, vsize)\n flipped = flipped.resize((hsize, vsize), Image.NEAREST)\n\n # save and clean up\n print(\"saving to \", compositename)\n flipped.save(compositename, \"jpeg\")\n print(\"cleaning\")\n os.remove(fgname)\n os.remove(bgname)\n os.remove(squarename)\n else:\n print(\"saving \", name)\n square_img.save(os.path.join(savedir, name + \"_square.png\"))\n fg_img.save(os.path.join(savedir, name + \"_fg.png\"))\n bg_img.save(os.path.join(savedir, name + \"_bg.png\"))\n\n def saveDisp(self):\n print(\"saving main window\")\n mydialog = QtGui.QFileDialog()\n options = mydialog.Options()\n thedir = str(\n mydialog.getExistingDirectory(options=options, caption=\"Image output directory\")\n )\n print(\"thedir=\", thedir)\n thename = self.map.namebase + self.map.name\n\n # make a square background\n thesquarewin = pg.ImageItem()\n thesquarewin.translate(0, 0)\n maximpervox = np.max([self.impixpervoxx, self.impixpervoxy, self.impixpervoxz])\n maxdim = np.max([self.xdim, self.ydim, self.zdim])\n thesquarewin.scale(maximpervox, maximpervox)\n thesquarewin.setImage(np.zeros((maxdim, maxdim), dtype=float), autoLevels=True)\n\n # make a rectangular background\n therectwin = pg.ImageItem()\n therectwin.translate(0, 0)\n therectwin.scale(maximpervox, maximpervox)\n therectwin.setImage(np.zeros((maxdim // 10, maxdim), dtype=float), autoLevels=True)\n\n (thecolorbarfgwin, thecolorbarbgwin, thecolorbarviewbox, colorbarvals,) = newColorbar(\n 0, 0, maximpervox, maximpervox, maxdim\n )\n cbim = self.applyLUT(\n colorbarvals, (colorbarvals * 0 + 1).astype(\"int\"), self.map.theLUT, 0.0, 1.0,\n )\n thecolorbarfgwin.setImage(cbim.astype(\"float\"))\n thecolorbarbgwin.setImage(cbim.astype(\"float\"), autoLevels=True)\n print(thecolorbarfgwin)\n print(thecolorbarbgwin)\n print(thecolorbarviewbox)\n\n self.saveandcomposite(\n thesquarewin,\n self.axviewwin,\n self.axviewbgwin,\n thename + \"_ax\",\n thedir,\n self.xdim * self.xsize,\n self.ydim * self.ysize,\n )\n self.saveandcomposite(\n thesquarewin,\n self.corviewwin,\n self.corviewbgwin,\n thename + \"_cor\",\n thedir,\n self.xdim * self.xsize,\n self.zdim * self.zsize,\n )\n self.saveandcomposite(\n thesquarewin,\n self.sagviewwin,\n self.sagviewbgwin,\n thename + \"_sag\",\n thedir,\n self.ydim * self.ysize,\n self.zdim * self.zsize,\n )\n \"\"\"self.saveandcomposite(therectwin,\n thecolorbarfgwin, thecolorbarbgwin,\n thename + '_colorbar', thedir,\n maximpervox * maxdim // 10,\n maximpervox * maxdim)\"\"\"\n\n with open(os.path.join(thedir, thename + \"_lims.txt\"), \"w\") as FILE:\n FILE.writelines(str(self.map.dispmin) + \"\\t\" + str(self.map.dispmax))\n # img_colorbar.save(thedir + self.map.name + '_colorbar.png')\n\n def summarize(self):\n if self.map is not None:\n # print('OrthoImageItem[', self.map.name, ']: map is set')\n pass\n"
] |
[
[
"numpy.rint",
"numpy.round",
"numpy.max",
"numpy.zeros",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JinxedQAQ/Generating-Talking-Face-with-Controllable-Eye-Movements-by-Disentangled-Blinking-Feature
|
[
"c1b68c010ccfb6b1d438dba97a1317ce0ae2aab8",
"c1b68c010ccfb6b1d438dba97a1317ce0ae2aab8"
] |
[
"util/util.py",
"test_all_h_1v1.py"
] |
[
"from __future__ import print_function\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport inspect, re\nimport torch.nn as nn\nimport os\nfrom Options_all import BaseOptions\nimport collections\nconfig = BaseOptions().parse()\n\n\ndef tensor2im(image_tensor, imtype=np.uint8):\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0\n PIL_image = image_numpy\n\n return PIL_image.astype(imtype)\n\ndef tensor2image(image_tensor, imtype=np.uint8):\n image_numpy = image_tensor.cpu().float().numpy()\n image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0\n PIL_image = image_numpy\n\n return PIL_image.astype(imtype)\n\ndef tensor2mfcc(image_tensor, imtype=np.uint8):\n image_numpy = image_tensor[0].cpu().float().numpy()\n PIL_image = image_numpy\n return PIL_image.astype(imtype)\n\ndef diagnose_network(net, name='network'):\n mean = 0.0\n count = 0\n for param in net.parameters():\n if param.grad is not None:\n mean += torch.mean(torch.abs(param.grad.data))\n count += 1\n if count > 0:\n mean = mean / count\n print(name)\n print(mean)\n\n\ndef save_image(image_numpy, image_path):\n image_pil = Image.fromarray(image_numpy)\n image_pil.save(image_path)\n\ndef info(object, spacing=10, collapse=1):\n \"\"\"Print methods and doc strings.\n Takes module, class, list, dictionary, or string.\"\"\"\n methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print( \"\\n\".join([\"%s %s\" %\n (method.ljust(spacing),\n processFunc(str(getattr(object, method).__doc__)))\n for method in methodList]) )\n\ndef varname(p):\n for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:\n m = re.search(r'\\bvarname\\s*\\(\\s*([A-Za-z_][A-Za-z0-9_]*)\\s*\\)', line)\n if m:\n return m.group(1)\n\ndef print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))\n\n\ndef mkdirs(paths):\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef save_checkpoint(state, epoch, filename=config.name + '_checkpoint.pth.tar', step=0):\n torch.save(state, os.path.join(config.checkpoints_dir, str(epoch) + \"_\" + str(step) + \"_\" + filename))\n\n\ndef copy_state_dict(state_dict, model, strip=None):\n tgt_state = model.state_dict()\n copied_names = set()\n for name, param in state_dict.items():\n if strip is not None and name.startswith(strip):\n name = name[len(strip):]\n if name not in tgt_state:\n continue\n if isinstance(param, nn.Parameter):\n param = param.data\n if param.size() != tgt_state[name].size():\n print('mismatch:', name, param.size(), tgt_state[name].size())\n continue\n tgt_state[name].copy_(param)\n copied_names.add(name)\n\n missing = set(tgt_state.keys()) - copied_names\n if len(missing) > 0:\n print(\"missing keys in state_dict:\", missing)\n\n return model\n\ndef copy_state_dict_h(state_dict, model,modelname, strip=None):\n tgt_state = model.state_dict()\n copied_names = set()\n for name, param in state_dict.items():\n name2 = name.replace('module.','')\n if strip is not None and name2.startswith(strip):\n name2 = name2[len(strip):]\n if name2 not in tgt_state:\n continue\n if isinstance(param, nn.Parameter):\n param = param.data\n if param.size() != tgt_state[name2].size():\n print('mismatch:', name2, param.size(), tgt_state[name2].size())\n continue\n tgt_state[name2].copy_(param)\n copied_names.add(name2)\n\n missing = set(tgt_state.keys()) - copied_names\n if len(missing) > 0:\n print('{} of {} is missing in {}'.format(len(missing),len(set(tgt_state.keys())),modelname))\n print(modelname,\" has missing keys in state_dict:\", missing)\n\n return model\n\ndef copy_state_dict_add_module(state_dict, model, strip=None):\n tgt_state = model.state_dict()\n copied_names = set()\n for name, param in state_dict.items():\n name2 = 'module.' + name\n if strip is not None and name2.startswith(strip):\n name2 = name2[len(strip):]\n if name2 not in tgt_state:\n continue\n if isinstance(param, nn.Parameter):\n param = param.data\n if param.size() != tgt_state[name2].size():\n print('mismatch:', name2, param.size(), tgt_state[name2].size())\n continue\n tgt_state[name2].copy_(param)\n copied_names.add(name2)\n\n missing = set(tgt_state.keys()) - copied_names\n if len(missing) > 0:\n print(modelname,\" has missing keys in state_dict:\", missing)\n\n return model\n\ndef load_checkpoint(resume_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict(checkpoint['ID_encoder'], Model.ID_encoder)\n Model.Decoder = copy_state_dict(checkpoint['Decoder'], Model.Decoder)\n Model.mfcc_encoder = copy_state_dict(checkpoint['mfcc_encoder'], Model.mfcc_encoder)\n Model.lip_feature_encoder = copy_state_dict(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder)\n Model.netD = copy_state_dict(checkpoint['netD'], Model.netD)\n Model.netD_mul = copy_state_dict(checkpoint['netD_mul'], Model.netD_mul)\n Model.ID_lip_discriminator = copy_state_dict(checkpoint['ID_lip_discriminator'], Model.ID_lip_discriminator)\n Model.model_fusion = copy_state_dict(checkpoint['model_fusion'], Model.model_fusion)\n Model.optimizer_D.load_state_dict(checkpoint['optimizer_D'])\n #Model.optimizer_G.load_state_dict(checkpoint['optimizer_G'])\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume_path))\n\ndef load_checkpoint_without_lip(resume_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict(checkpoint['ID_encoder'], Model.ID_encoder)\n Model.Decoder = copy_state_dict(checkpoint['Decoder'], Model.Decoder)\n Model.mfcc_encoder = copy_state_dict(checkpoint['mfcc_encoder'], Model.mfcc_encoder)\n #Model.lip_feature_encoder = copy_state_dict(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder)\n Model.netD = copy_state_dict(checkpoint['netD'], Model.netD)\n Model.netD_mul = copy_state_dict(checkpoint['netD_mul'], Model.netD_mul)\n #Model.ID_lip_discriminator = copy_state_dict(checkpoint['ID_lip_discriminator'], Model.ID_lip_discriminator)\n Model.model_fusion = copy_state_dict(checkpoint['model_fusion'], Model.model_fusion)\n #Model.optimizer_D.load_state_dict(checkpoint['optimizer_D'])\n #Model.optimizer_G.load_state_dict(checkpoint['optimizer_G'])\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume_path))\n\n\ndef load_checkpoint_from101davs(resume_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict_h(checkpoint['ID_encoder'], Model.ID_encoder,'ID_encoder')\n Model.Decoder = copy_state_dict_h(checkpoint['Decoder'], Model.Decoder,'Decoder')\n Model.mfcc_encoder = copy_state_dict_h(checkpoint['mfcc_encoder'], Model.mfcc_encoder,'mfcc_encoder')\n Model.lip_feature_encoder = copy_state_dict_h(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder,'lip_feature_encoder')\n Model.netD = copy_state_dict_h(checkpoint['netD'], Model.netD,'netD')\n Model.netD_mul = copy_state_dict_h(checkpoint['netD_mul'], Model.netD_mul,'netD_mul')\n Model.ID_lip_discriminator = copy_state_dict_h(checkpoint['ID_lip_discriminator'], Model.ID_lip_discriminator,'ID_lip_discriminator')\n Model.model_fusion = copy_state_dict_h(checkpoint['model_fusion'], Model.model_fusion,'model_fusion')\n #Model.optimizer_D.load_state_dict(checkpoint['optimizer_D'])\n #Model.optimizer_G.load_state_dict(checkpoint['optimizer_G'])\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume_path))\n\ndef load_checkpoint_from101davs_without_lip(resume_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict_h(checkpoint['ID_encoder'], Model.ID_encoder,'ID_encoder')\n Model.Decoder = copy_state_dict_h(checkpoint['Decoder'], Model.Decoder,'Decoder')\n Model.mfcc_encoder = copy_state_dict_h(checkpoint['mfcc_encoder'], Model.mfcc_encoder,'mfcc_encoder')\n #Model.lip_feature_encoder = copy_state_dict_h(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder,'lip_feature_encoder')\n Model.netD = copy_state_dict_h(checkpoint['netD'], Model.netD,'netD')\n Model.netD_mul = copy_state_dict_h(checkpoint['netD_mul'], Model.netD_mul,'netD_mul')\n #Model.ID_lip_discriminator = copy_state_dict_h(checkpoint['ID_lip_discriminator'], Model.ID_lip_discriminator,'ID_lip_discriminator')\n Model.model_fusion = copy_state_dict_h(checkpoint['model_fusion'], Model.model_fusion,'model_fusion')\n #Model.optimizer_D.load_state_dict(checkpoint['optimizer_D'])\n #Model.optimizer_G.load_state_dict(checkpoint['optimizer_G'])\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume_path))\n\n\ndef load_checkpoint_from101davs_without_decoder(resume_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict_h(checkpoint['ID_encoder'], Model.ID_encoder,'ID_encoder')\n #Model.Decoder = copy_state_dict_h(checkpoint['Decoder'], Model.Decoder,'Decoder')\n Model.mfcc_encoder = copy_state_dict_h(checkpoint['mfcc_encoder'], Model.mfcc_encoder,'mfcc_encoder')\n Model.lip_feature_encoder = copy_state_dict_h(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder,'lip_feature_encoder')\n Model.netD = copy_state_dict_h(checkpoint['netD'], Model.netD,'netD')\n Model.netD_mul = copy_state_dict_h(checkpoint['netD_mul'], Model.netD_mul,'netD_mul')\n Model.ID_lip_discriminator = copy_state_dict_h(checkpoint['ID_lip_discriminator'], Model.ID_lip_discriminator,'ID_lip_discriminator')\n Model.model_fusion = copy_state_dict_h(checkpoint['model_fusion'], Model.model_fusion,'model_fusion')\n Model.optimizer_D.load_state_dict(checkpoint['optimizer_D'])\n #Model.optimizer_G.load_state_dict(checkpoint['optimizer_G'])\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume_path))\n\ndef load_separately(opt, Model):\n print(\"=> loading checkpoint '{}'\".format(opt.id_pretrain_path))\n id_pretrain = torch.load(opt.id_pretrain_path)\n Model.ID_encoder = copy_state_dict(id_pretrain['model_fusion'], Model.ID_encoder)\n print(\"=> loading checkpoint '{}'\".format(opt.feature_extractor_path))\n feature_extractor_check = torch.load(opt.feature_extractor_path)\n Model.lip_feature_encoder = copy_state_dict(feature_extractor_check['face_encoder'], Model.lip_feature_encoder)\n Model.mfcc_encoder = copy_state_dict(feature_extractor_check['mfcc_encoder'], Model.mfcc_encoder)\n Model.model_fusion = copy_state_dict(feature_extractor_check['face_fusion'], Model.model_fusion)\n return Model\n\n\ndef load_test_checkpoint(resume_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict(checkpoint['ID_encoder'], Model.ID_encoder)\n Model.Decoder = copy_state_dict(checkpoint['Decoder'], Model.Decoder)\n Model.mfcc_encoder = copy_state_dict(checkpoint['mfcc_encoder'], Model.mfcc_encoder)\n Model.lip_feature_encoder = copy_state_dict(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder)\n\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume_path))\n\ndef load_test_checkpoint_nolip(resume_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict_add_module(checkpoint['ID_encoder'], Model.ID_encoder)\n Model.Decoder = copy_state_dict_add_module(checkpoint['Decoder'], Model.Decoder)\n Model.mfcc_encoder = copy_state_dict_add_module(checkpoint['mfcc_encoder'], Model.mfcc_encoder)\n #Model.lip_feature_encoder = copy_state_dict(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder)\n\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n\n \ndef load_test_checkpoint_combine(resume_path, davs_path, Model):\n resume_path = resume_path\n if os.path.isfile(resume_path):\n print(\"=> loading checkpoint '{}'\".format(resume_path))\n checkpoint = torch.load(resume_path)\n checkpoint2 = torch.load(davs_path)\n total_steps = checkpoint['step']\n epoch = checkpoint['epoch']\n Model.ID_encoder = copy_state_dict(checkpoint2['ID_encoder'], Model.ID_encoder)\n Model.Decoder = copy_state_dict(checkpoint2['Decoder'], Model.Decoder)\n #Model.mfcc_encoder = copy_state_dict_add_module(checkpoint['mfcc_encoder'], Model.mfcc_encoder)\n Model.mfcc_encoder = copy_state_dict(checkpoint2['mfcc_encoder'], Model.mfcc_encoder)\n #Model.lip_feature_encoder = copy_state_dict(checkpoint['lip_feature_encoder'], Model.lip_feature_encoder)\n\n print(\"=> loaded checkpoint '{}' (step {})\"\n .format(resume_path, checkpoint['step']))\n return Model, total_steps, epoch\n\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume_path))",
"import time\nfrom Options_all import BaseOptions\nfrom util import util\nfrom util.visualizer import Visualizer\nfrom torch.utils.data import DataLoader\nimport os\nimport ntpath\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\nopt = BaseOptions().parse()\nopt.test_type = 'audio'\n#mfccs_root = '/media/h2/GMT7/Work/ForResults/mfccs/'\nmfccs_root = './inputvideos/'\nif opt.test_type == 'audio':\n mfccs_root = './inputaudios/'\naudios = os.listdir(mfccs_root)\n# path to audios\nopt.test_root = mfccs_root + audios[0]\n#opt.test_root = './0572_0019_0003/audio'\n\nopt.test_audio_video_length = len(os.listdir(opt.test_root))-1\nopt.test_A_path = './faces/1vs1'\n\nif opt.test_type == 'audio':\n import Test_Gen_Models.Test_Audio_Model as Gen_Model\n from Dataloader.Test_load_audio import Test_VideoFolder\nelse:\n raise('test type select error')\n\nopt.nThreads = 1 # test code only supports nThreads = 1\nopt.batchSize = 1 # test code only supports batchSize = 1\nopt.sequence_length = 1\n#test_nums = [1, 2, 3, 4] # choose input identity images\ntest_names = os.listdir(opt.test_A_path)\n\nmodel = Gen_Model.GenModel(opt)\n# _, _, start_epoch = util.load_test_checkpoint(opt.test_resume_path, model)\nstart_epoch = opt.start_epoch\nvisualizer = Visualizer(opt)\n# find the checkpoint's path name without the 'checkpoint.pth.tar'\npath_name = ntpath.basename(opt.test_resume_path)[:-19]\nweb_dir = os.path.join(opt.results_dir, path_name, '%s_%s' % ('test', start_epoch))\nfor ado in audios:\n opt.test_root = mfccs_root + ado\n opt.test_audio_video_length = len(os.listdir(opt.test_root))-1\n for i in test_names:\n if i.split('.')[0] != ado:\n continue\n web_dir = os.path.join(opt.results_dir, path_name, '%s_%s' % (ado.split('_')[-1].split('.')[0], i.split('.')[0]))\n A_path = os.path.join(opt.test_A_path, i)\n test_folder = Test_VideoFolder(root=opt.test_root, A_path=A_path, config=opt)\n test_dataloader = DataLoader(test_folder, batch_size=1,\n shuffle=False, num_workers=1)\n model, _, start_epoch = util.load_test_checkpoint(opt.test_resume_path, model)\n\n # inference during test\n\n for i2, data in enumerate(test_dataloader):\n if i2 < 5:\n model.set_test_input(data)\n model.test_train()\n\n # test\n start = time.time()\n for i3, data in enumerate(test_dataloader):\n model.set_test_input(data)\n model.test()\n visuals = model.get_current_visuals()\n img_path = model.get_image_paths()\n visualizer.save_images_test(web_dir, visuals, img_path, i3, opt.test_num)\n end = time.time()\n print('finish processing in %03f seconds' % (end - start))\n\n"
] |
[
[
"torch.abs",
"numpy.min",
"torch.load",
"numpy.median",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.transpose"
],
[
"torch.utils.data.DataLoader"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bentzinir/baselines
|
[
"5ed3f9a37272f0b7d9db3daac5e1b0a442588ff3"
] |
[
"baselines/her/paper_utils/env_scripts.py"
] |
[
"import sys\nfrom baselines.common.cmd_util import common_arg_parser\nfrom baselines.run import parse_cmdline_kwargs\nfrom baselines.her.metric_diversification import MetricDiversifier\nfrom baselines.her.cover_measure import init_from_point\nimport gym\n#import gym_maze\nimport numpy as np\nimport time\nfrom baselines.her.paper_utils import utils as paper_utils\nnp.set_printoptions(precision=2)\nimport random\n\n\ndef set_goal(env, scrb):\n if len(scrb.used_slots()) == 0:\n return env.reset()\n return env.set_goal(goal=scrb.draw(1)[0]['ag'])\n # return env.set_goal(goal=random.choice(scrb)['ag'])\n\n\ndef reset_env(env, scrb, mode='intrinsic'):\n if mode == 'intrinsic':\n return env.reset()\n elif mode == 'extrinsic':\n assert 'cover_path' is not None, 'missing cover path argument'\n pnt = scrb.draw(1)[0]\n # pnt = random.choice(scrb)\n if pnt is None:\n return env.reset()\n obs = init_from_point(env, pnt)\n return obs\n elif mode == 'random':\n obs = env.reset()\n qpos = obs[\"qpos\"]\n qvel = obs[\"qvel\"]\n ex_init = {'o': None, 'qpos': np.zeros_like(qpos), 'qvel': np.zeros_like(qvel), 'g': None}\n env.reset(ex_init=ex_init)\n\n\ndef scan_cover(env, action_repetition=1, cover_path=None, **kwargs):\n scrb = MetricDiversifier(k=100, load_model=cover_path, reward_func=None)\n obs = reset_env(env, scrb, mode='intrinsic')\n for i in range(100000):\n env.render()\n time.sleep(.1)\n if i % action_repetition == 0:\n a = env.action_space.sample()\n obs, reward, done, info = env.step(a)\n if i % 1 == 0:\n ob = reset_env(env, scrb, mode='extrinsic')\n # print(np.linalg.norm(ob[\"qvel\"]))\n time.sleep(.5)\n env.close()\n\n\ndef plain_loop(env, action_repetition=1, clip_range=0.5, **kwargs):\n reset_env(env, scrb=None, mode='intrinsic')\n print(f\"Obs: {env.observation_space['observation'].shape}, goal: {env.observation_space['achieved_goal'].shape}, action: {env.action_space.shape}\")\n sys.exit()\n i = 0\n while True:\n i += 1\n env.render()\n time.sleep(.1)\n if i % action_repetition == 0:\n a = np.clip(env.action_space.sample(), -clip_range, clip_range)\n o, r, d, info = env.step(a)\n if i % 1000 == 0:\n reset_env(env, scrb=None, mode='intrinsic')\n print(f\"Reset\")\n i = 0\n env.close()\n\n\ndef play_policy(env, env_id, T=20, load_path=None, cover_path=None, semi_metric=False, eps_greedy=False, **kwargs):\n policy, reward_fun = paper_utils.load_policy(env_id, **kwargs)\n paper_utils.load_model(load_path=load_path)\n scrb = MetricDiversifier(k=100, load_model=cover_path, reward_func=None)\n obs = reset_env(env, scrb, mode='intrinsic')\n i = 0\n while True:\n i += 1\n env.render()\n time.sleep(.01)\n action, _, state, _ = policy.step(obs)\n if eps_greedy and i % 10 == 0:\n action = env.action_space.sample()\n obs, reward, done, info = env.step(action)\n\n success = info['is_success']\n timeout = i % T == 0\n done = success or timeout\n if done:\n # input(f\"success: {success}, invalid: {invalid}, timeout: {timeout}\")\n if scrb is None or semi_metric:\n reset_env(env, scrb, mode='intrinsic')\n else:\n reset_env(env, scrb, mode='extrinsic')\n obs = set_goal(env, scrb)\n i = 0\n env.close()\n\n\ndef exp1_loop(env, scrb, policy, eps_greedy, T, semi_metric, nsteps):\n\n obs = reset_env(env, scrb, mode='intrinsic')\n\n while len(scrb.open_slots()) > 0:\n pnt = scrb.init_record(o=obs['observation'].copy())\n scrb.load_new_point(pnt, d_func=policy.get_actions)\n assert not scrb.dilute_overlaps\n\n reached_goal = False\n t = 0\n counter = 0\n times = []\n radii = []\n while counter < nsteps:\n # 1. environment step\n action, _, state, _ = policy.step(obs)\n if reached_goal or (eps_greedy and t % 10 == 0):\n action = env.action_space.sample()\n\n obs, reward, done, info = env.step(action)\n success = info['is_success']\n reached_goal = reached_goal or success\n\n # 2. GPI update\n pnt = scrb.init_record(o=obs['observation'].copy())\n scrb.load_new_point(pnt, d_func=policy.get_actions)\n\n r_pack = env._max_episode_steps + scrb.M.min()\n times.append(counter)\n radii.append(r_pack)\n\n if counter % 1000 == 0:\n ...\n # scrb.save(message=counter)\n # print(f\"counter: {counter}, cover size: {scrb.current_size}, packing radius: {r_pack}\")\n\n # TODO: add back after debug\n # scrb.age += 1\n\n # 3. measure packing radius\n ...\n\n # 4. episodic reset\n if t % T == 0:\n t = 0\n reached_goal = False\n if semi_metric:\n reset_env(env, scrb, mode='intrinsic')\n else:\n reset_env(env, scrb, mode='extrinsic')\n obs = set_goal(env, scrb)\n counter += 1\n t += 1\n\n return times, radii\n\n\ndef experiment1(env, env_id, T=100, k=50, load_path=None, save_path=None, semi_metric=False, eps_greedy=False,\n dilute_overlaps=True, ntrials=5, nsteps=10000, random_mode=False, **kwargs):\n\n policy, reward_fun = paper_utils.load_policy(env_id, **kwargs)\n paper_utils.load_model(load_path=load_path)\n if semi_metric:\n metric_str = \"semi_metric\"\n else:\n metric_str = \"full_metric\"\n\n for random_mode in [True, False]:\n if random_mode:\n random_str = 'random'\n alpha = 0\n else:\n random_str = 'scrb'\n alpha = 0.5\n\n log_path = f\"{save_path}/{metric_str}_{random_str}\"\n\n results = dict()\n k_vec = [10, 20, 30, 40, 50]\n # k_vec = [50]\n for k in k_vec:\n results[k] = dict()\n k_radii = []\n for trial_idx in range(ntrials):\n scrb = MetricDiversifier(k=k, vis=False, dilute_overlaps=dilute_overlaps, vis_coords=[0, 1], save_path=log_path,\n reward_func=reward_fun, random_mode=random_mode)\n times, radii = exp1_loop(env, scrb, policy, eps_greedy, T, semi_metric, nsteps)\n k_radii.append(radii)\n print(f\"k: {k}, trial: {trial_idx}/{ntrials}, nsteps: {nsteps}\")\n results[k][\"mean\"] = np.asarray(k_radii).mean(axis=0)\n results[k][\"std\"] = np.asarray(k_radii).std(axis=0)\n results[k][\"time\"] = times\n\n paper_utils.exp1_to_figure(results, save_directory=log_path, alpha=alpha, message=f\"{metric_str}_{random_str}\")\n\n exp1_loop(env, scrb, policy, eps_greedy, T, semi_metric, 50)\n paper_utils.exp1_overlayed_figure(env, scrb, save_directory=log_path, message=f\"{metric_str}_{random_str}\")\n\n\ndef exp2_loop(env, policy, models_path, epochs, ngoals, max_steps, vis=False, eps_greedy=False):\n goals = [env.env.draw_goal() for _ in range(ngoals)]\n recall_at_epoch = []\n # epochs = paper_utils.list_epochs(models_path)\n # epochs.sort()\n # epochs = [epoch for epoch in epochs if epoch % 25 == 0]\n # epochs = epochs[:2]\n for epoch_idx in epochs:\n reached = np.zeros(len(goals))\n paper_utils.load_model(load_path=f\"{models_path}/epoch_{epoch_idx}.model\")\n for gidx, goal in enumerate(goals):\n if reached[gidx]:\n continue\n obs = reset_env(env, scrb=None, mode='intrinsic')\n env.env.set_goal(goal=goal)\n for t in range(max_steps):\n if reached[gidx]:\n break\n if vis:\n env.render()\n time.sleep(.01)\n action, _, state, _ = policy.step(obs)\n if eps_greedy and t % 10 == 0:\n action = env.action_space.sample()\n obs, reward, done, info = env.step(action)\n if info['is_success']:\n reached[gidx] = 1\n recall_at_epoch.append(reached.mean())\n return epochs, recall_at_epoch\n\n\ndef experiment2(env, env_id, T=100, scrb_models_path=None, plain_models_path=None, save_path=None, eps_greedy=False, ntrials=5, ngoals=100, vis=False, **kwargs):\n policy, reward_fun = paper_utils.load_policy(env_id, **kwargs)\n\n scrb_epochs = paper_utils.list_epochs(scrb_models_path)\n plain_epochs = paper_utils.list_epochs(plain_models_path)\n\n scrb_epochs.sort()\n plain_epochs.sort()\n\n scrb_epochs = [epoch for epoch in scrb_epochs if epoch % 50 == 0]\n plain_epochs = [epoch for epoch in plain_epochs if epoch % 50 == 0]\n nepochs = np.minimum(len(scrb_epochs), len(plain_epochs))\n epochs = scrb_epochs[:nepochs]\n print(epochs)\n results = dict()\n for scrb in [True, False]:\n if scrb:\n scrb_str = 'scrb'\n method_name = r'$\\alpha =$' + f\"{0.5}\"\n models_path = scrb_models_path\n else:\n scrb_str = 'naive'\n method_name = r'$\\alpha =$' + f\"{0.0}\"\n models_path = plain_models_path\n recalls = []\n results[scrb_str] = dict()\n for trial_idx in range(ntrials):\n print(f\"------------------experiment 2: trial #{trial_idx}-----------------\")\n epochs, recall = exp2_loop(env, policy, models_path, epochs, ngoals, max_steps=T, vis=vis, eps_greedy=eps_greedy)\n recalls.append(recall)\n\n results[scrb_str][\"mean\"] = np.asarray(recalls).mean(axis=0)\n results[scrb_str][\"std\"] = np.asarray(recalls).std(axis=0)\n results[scrb_str]['method_name'] = method_name\n results[scrb_str][\"epochs\"] = epochs\n\n paper_utils.exp3_to_figure(results, save_directory=save_path, message=f\"{env_id}\")\n\n\ndef exp3_loop(env, policy, models_path, covers_path, ngoals, max_steps, semi_metric, vis=False, eps_greedy=False):\n\n variance_at_epoch = []\n min_dists = []\n hit_times = []\n epochs = paper_utils.list_epochs(covers_path)\n epochs.sort()\n epochs = [epoch for epoch in epochs if epoch % 25 == 0]\n\n # epochs = epochs[:2]\n for epoch_idx in epochs:\n model_path = f\"{models_path}/epoch_{epochs[-1]}.model\"\n paper_utils.load_model(load_path=model_path)\n cover_path = f\"{covers_path}/epoch_{epoch_idx}.json\"\n scrb = MetricDiversifier(k=100, vis=False, vis_coords=[0, 1], save_path=None, load_model=cover_path, reward_func=None)\n min_dist = scrb.M.min()\n pnts = scrb.draw(ngoals, replace=False)\n reached = np.zeros(len(pnts))\n hit_time = [max_steps for _ in range(ngoals)]\n reached_list = []\n for pidx, pnt in enumerate(pnts):\n goal = pnt['ag']\n if reached[pidx]:\n continue\n if semi_metric:\n obs = reset_env(env, scrb=scrb, mode='intrinsic')\n else:\n refidx=pidx\n while refidx == pidx:\n refidx = random.choice([i for i in range(len(pnts))])\n refpnt = pnts[refidx]\n obs = init_from_point(env, refpnt)\n env.env.set_goal(goal=np.asarray(goal))\n for t in range(max_steps):\n if reached[pidx]:\n break\n if vis:\n env.render()\n time.sleep(.01)\n action, _, state, _ = policy.step(obs)\n if eps_greedy and t % 10 == 0:\n action = env.action_space.sample()\n obs, reward, done, info = env.step(action)\n if info['is_success']:\n reached[pidx] = 1\n reached_list.append(goal)\n hit_time[pidx] = t\n if len(reached_list) == 0:\n variance_at_epoch.append(0)\n else:\n variance_at_epoch.append(np.asarray(reached_list).std())\n min_dists.append(min_dist)\n hit_times.append(np.mean(hit_time))\n return epochs, variance_at_epoch, min_dists, hit_times\n\n\ndef experiment3(env, env_id, T=100, models_path=None, covers_path=None, save_path=None, eps_greedy=False, semi_metric=False, ntrials=5, ngoals=100, vis=False, **kwargs):\n policy, reward_fun = paper_utils.load_policy(env_id, **kwargs)\n\n metric = 'mean_hit_time'\n results = dict()\n for scrb in [True, False]:\n if not scrb:\n continue\n if scrb:\n scrb_str = 'scrb'\n method_name = r'$\\alpha =$' + f\"{0.5}\"\n else:\n scrb_str = 'naive'\n method_name = r'$\\alpha =$' + f\"{0.0}\"\n variances = []\n min_dists = []\n mean_hit_times = []\n results[scrb_str] = dict()\n for trial_idx in range(ntrials):\n print(f\"------------------experiment 3: trial #{trial_idx}-----------------\")\n epochs, variance, min_dist, mean_hit_time = exp3_loop(env, policy, models_path, covers_path, ngoals, semi_metric=semi_metric, max_steps=T, vis=vis, eps_greedy=eps_greedy)\n variances.append(variance)\n min_dists.append(min_dist)\n mean_hit_times.append(mean_hit_time)\n if metric == 'variance':\n results[scrb_str][\"mean\"] = np.asarray(variances).mean(axis=0)\n results[scrb_str][\"std\"] = np.asarray(variances).std(axis=0)\n elif metric == 'min_dists':\n results[scrb_str][\"mean\"] = np.asarray(min_dists).mean(axis=0)\n results[scrb_str][\"std\"] = np.asarray(min_dists).std(axis=0)\n elif metric == 'mean_hit_time':\n results[scrb_str][\"mean\"] = np.asarray(mean_hit_times).mean(axis=0)\n results[scrb_str][\"std\"] = np.asarray(mean_hit_times).std(axis=0)\n results[scrb_str]['method_name'] = method_name\n results[scrb_str][\"epochs\"] = epochs\n\n paper_utils.exp3_to_figure(results, save_directory=save_path, message=f\"{env_id}_{metric}\")\n\n\ndef exp4_loop(env, policy, models_path, covers_path, ngoals, max_steps, semi_metric, vis=False, eps_greedy=False):\n recall_at_epoch = []\n hit_time_at_epoch = []\n model_epochs = paper_utils.list_epochs(models_path)\n cover_epochs = paper_utils.list_epochs(covers_path)\n\n model_epochs = [epoch for epoch in model_epochs if epoch % 25 == 0]\n cover_epochs = [epoch for epoch in cover_epochs if epoch % 25 == 0]\n n_epochs = np.minimum(len(model_epochs), len(cover_epochs))\n\n epochs = model_epochs[:n_epochs]\n for epoch_idx in epochs:\n\n cover_path = f\"{covers_path}/epoch_{epoch_idx}.json\"\n scrb = MetricDiversifier(k=100, load_model=cover_path, reward_func=None)\n ngoals = np.minimum(ngoals, scrb.k)\n paper_utils.load_model(load_path=f\"{models_path}/epoch_{epoch_idx}.model\")\n pnts = scrb.draw(ngoals, replace=False)\n reached = np.zeros(len(pnts))\n hit_time = [max_steps for _ in range(len(pnts))]\n for pidx, pnt in enumerate(pnts):\n goal = pnt['ag']\n if reached[pidx]:\n continue\n if semi_metric:\n obs = reset_env(env, scrb=scrb, mode='intrinsic')\n else:\n refidx = pidx\n while refidx == pidx:\n refidx = random.choice([i for i in range(len(pnts))])\n refpnt = pnts[refidx]\n obs = init_from_point(env, refpnt)\n env.env.set_goal(goal=np.asarray(goal))\n for t in range(max_steps):\n if reached[pidx]:\n break\n if vis:\n env.render()\n time.sleep(.01)\n action, _, state, _ = policy.step(obs)\n if eps_greedy and t % 10 == 0:\n action = env.action_space.sample()\n obs, reward, done, info = env.step(action)\n if info['is_success']:\n reached[pidx] = 1\n hit_time[pidx] = t\n recall_at_epoch.append(reached.mean())\n hit_time_at_epoch.append(np.mean(hit_time))\n return epochs, recall_at_epoch, hit_time_at_epoch\n\n\ndef experiment4(env, env_id, T=100, models_path_a=None, models_path_b=None, covers_path_a=None, covers_path_b=None,\n save_path=None, eps_greedy=False, ntrials=5, ngoals=100, vis=False, semi_metric=False, **kwargs):\n policy, reward_fun = paper_utils.load_policy(env_id, **kwargs)\n\n results = dict()\n\n ab_recalls = []\n ba_recalls = []\n ab_hit_times = []\n ba_hit_times = []\n for metric in ['coverage', 'hit_time']:\n results[f\"{metric}\"] = dict()\n results[f\"{metric}\"] = dict()\n for type in [\"a2b\", \"b2a\"]:\n results[f\"{metric}\"][f\"{type}\"] = dict()\n results[f\"{metric}\"][f\"{type}\"] = dict()\n\n for trial_idx in range(ntrials):\n print(f\"------------------experiment 4: trial #{trial_idx}-----------------\")\n\n # A - > B\n epochs, ab_recall, ab_hit_time = exp4_loop(env, policy, models_path_a, covers_path_b, semi_metric=semi_metric, ngoals=ngoals, max_steps=T, vis=vis, eps_greedy=eps_greedy)\n ab_recalls.append(ab_recall)\n ab_hit_times.append(ab_hit_time)\n\n # B - > A\n epochs, ba_recall, ba_hit_time = exp4_loop(env, policy, models_path_b, covers_path_a, semi_metric=semi_metric, ngoals=ngoals, max_steps=T, vis=vis, eps_greedy=eps_greedy)\n ba_recalls.append(ba_recall)\n ba_hit_times.append(ba_hit_time)\n\n for metric in ['coverage', 'hit_time']:\n if metric == 'coverage':\n ab_values = ab_recalls\n ba_values = ba_recalls\n elif metric == 'hit_time':\n ab_values = ab_hit_times\n ba_values = ba_hit_times\n results[metric][\"a2b\"][\"mean\"] = np.asarray(ab_values).mean(axis=0)\n results[metric][\"a2b\"][\"std\"] = np.asarray(ab_values).std(axis=0)\n results[metric][\"a2b\"]['method_name'] = r'$\\alpha =$' + f\"{0.0}\"\n results[metric][\"a2b\"][\"epochs\"] = epochs\n\n results[metric][\"b2a\"][\"mean\"] = np.asarray(ba_values).mean(axis=0)\n results[metric][\"b2a\"][\"std\"] = np.asarray(ba_values).std(axis=0)\n results[metric][\"b2a\"]['method_name'] = r'$\\alpha =$' + f\"{0.5}\"\n results[metric][\"b2a\"][\"epochs\"] = epochs\n\n paper_utils.exp3_to_figure(results[f\"{metric}\"], save_directory=save_path, message=f\"{env_id}_{metric}\")\n\n\nif __name__ == '__main__':\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args(sys.argv)\n extra_args = parse_cmdline_kwargs(unknown_args)\n environment = gym.make(args.env, **extra_args)\n if extra_args['option'] == 'scan_cover':\n scan_cover(environment, **extra_args)\n elif extra_args['option'] == 'plain_loop':\n plain_loop(environment, **extra_args)\n elif extra_args['option'] == 'play_policy':\n assert extra_args['load_path'] is not None\n play_policy(env=environment, env_id=args.env, **extra_args)\n elif extra_args['option'] == 'experiment1':\n assert extra_args['load_path'] is not None, 'load path is none'\n assert args.save_path is not None, 'save path is none'\n experiment1(env=environment, env_id=args.env, save_path=args.save_path, **extra_args)\n elif extra_args['option'] == 'experiment2':\n assert extra_args['scrb_models_path'] is not None, 'models path is none'\n assert extra_args['plain_models_path'] is not None, 'models path is none'\n assert args.save_path is not None, 'save path is none'\n experiment2(env=environment, env_id=args.env, save_path=args.save_path, **extra_args)\n elif extra_args['option'] == 'experiment3':\n assert extra_args['models_path'] is not None, 'models path is none'\n assert extra_args['covers_path'] is not None, 'covers path is none'\n assert args.save_path is not None, 'save path is none'\n experiment3(env=environment, env_id=args.env, save_path=args.save_path, **extra_args)\n elif extra_args['option'] == 'experiment4':\n assert extra_args['models_path_a'] is not None, 'models path is none'\n assert extra_args['models_path_b'] is not None, 'models path is none'\n assert extra_args['covers_path_a'] is not None, 'covers path is none'\n assert extra_args['covers_path_b'] is not None, 'covers path is none'\n assert args.save_path is not None, 'save path is none'\n experiment4(env=environment, env_id=args.env, save_path=args.save_path, **extra_args)"
] |
[
[
"numpy.minimum",
"numpy.asarray",
"numpy.set_printoptions",
"numpy.zeros_like",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hank-ai/gradio
|
[
"00af5ec1abbaa3160ec45f58a22909f6453f7182"
] |
[
"gradio/interface.py"
] |
[
"\"\"\"\nThis is the core file in the `gradio` package, and defines the Interface class, including methods for constructing the\ninterface using the input and output types.\n\"\"\"\n\nimport gradio\nfrom gradio.inputs import InputComponent\nfrom gradio.outputs import OutputComponent\nfrom gradio import networking, strings, utils\nfrom gradio.interpretation import quantify_difference_in_label\nfrom gradio import encryptor\nimport pkg_resources\nimport requests\nimport random\nimport time\nimport webbrowser\nimport inspect\nimport sys\nimport weakref\nimport analytics\nimport numpy as np\nimport os\nimport copy\nimport markdown2\nimport json\nfrom getpass import getpass\n\nanalytics.write_key = \"uxIFddIEuuUcFLf9VgH2teTEtPlWdkNy\"\nanalytics_url = 'https://api.gradio.app/'\nip_address = networking.get_local_ip_address()\n\nJSON_PATH = os.path.join(os.path.dirname(gradio.__file__), \"launches.json\")\n\nclass Interface:\n \"\"\"\n Interfaces are created with Gradio using the `gradio.Interface()` function.\n \"\"\"\n instances = weakref.WeakSet()\n\n @classmethod\n def get_instances(cls):\n \"\"\"\n :return: list of all current instances.\n \"\"\"\n return list(\n Interface.instances)\n\n def __init__(self, fn, inputs, outputs, verbose=False, examples=None,\n examples_per_page=10, live=False,\n layout=\"horizontal\", show_input=True, show_output=True,\n capture_session=False, interpretation=None,\n title=None, description=None, article=None, thumbnail=None, \n css=None, server_port=7860, server_name=networking.LOCALHOST_NAME,\n allow_screenshot=True, allow_flagging=True, flagging_options=None, encrypt=False,\n show_tips=True, embedding=None, flagging_dir=\"flagged\", analytics_enabled=True):\n\n \"\"\"\n Parameters:\n fn (Callable): the function to wrap an interface around.\n inputs (Union[str, List[Union[str, InputComponent]]]): a single Gradio input component, or list of Gradio input components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn.\n outputs (Union[str, List[Union[str, OutputComponent]]]): a single Gradio output component, or list of Gradio output components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn.\n verbose (bool): whether to print detailed information during launch.\n examples (Union[List[List[Any]], str]): sample inputs for the function; if provided, appears below the UI components and can be used to populate the interface. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided. If there are multiple input components, a log.csv file must be present in the directory to link corresponding inputs.\n examples_per_page (int): If examples are provided, how many to display per page.\n live (bool): whether the interface should automatically reload on change.\n layout (str): Layout of input and output panels. \"horizontal\" arranges them as two columns of equal height, \"unaligned\" arranges them as two columns of unequal height, and \"vertical\" arranges them vertically.\n capture_session (bool): if True, captures the default graph and session (needed for Tensorflow 1.x)\n interpretation (Union[Callable, str]): function that provides interpretation explaining prediction output. Pass \"default\" to use built-in interpreter. \n title (str): a title for the interface; if provided, appears above the input and output components.\n description (str): a description for the interface; if provided, appears above the input and output components.\n article (str): an expanded article explaining the interface; if provided, appears below the input and output components. Accepts Markdown and HTML content.\n thumbnail (str): path to image or src to use as display picture for models listed in gradio.app/hub\n css (str): custom css or path to custom css file to use with interface.\n server_port (int): will start gradio app on this port (if available) \n server_name (str): to make app accessible on local network set to \"0.0.0.0\".\n allow_screenshot (bool): if False, users will not see a button to take a screenshot of the interface.\n allow_flagging (bool): if False, users will not see a button to flag an input and output.\n flagging_options (List[str]): if not None, provides options a user must select when flagging.\n encrypt (bool): If True, flagged data will be encrypted by key provided by creator at launch\n flagging_dir (str): what to name the dir where flagged data is stored.\n show_tips (bool): if True, will occasionally show tips about new Gradio features\n \"\"\"\n\n def get_input_instance(iface):\n if isinstance(iface, str):\n shortcut = InputComponent.get_all_shortcut_implementations()[iface]\n return shortcut[0](**shortcut[1])\n elif isinstance(iface, InputComponent):\n return iface\n else:\n raise ValueError(\"Input interface must be of type `str` or \"\n \"`InputComponent`\")\n\n def get_output_instance(iface):\n if isinstance(iface, str):\n shortcut = OutputComponent.get_all_shortcut_implementations()[iface]\n return shortcut[0](**shortcut[1])\n elif isinstance(iface, OutputComponent):\n return iface\n else:\n raise ValueError(\n \"Output interface must be of type `str` or \"\n \"`OutputComponent`\"\n )\n\n if isinstance(inputs, list):\n self.input_interfaces = [get_input_instance(i) for i in inputs]\n else:\n self.input_interfaces = [get_input_instance(inputs)]\n if isinstance(outputs, list):\n self.output_interfaces = [get_output_instance(i) for i in outputs]\n else:\n self.output_interfaces = [get_output_instance(outputs)]\n if not isinstance(fn, list):\n fn = [fn]\n\n self.output_interfaces *= len(fn)\n self.predict = fn\n self.function_names = [func.__name__ for func in fn]\n self.verbose = verbose\n self.status = \"OFF\"\n self.live = live\n self.layout = layout\n self.show_input = show_input\n self.show_output = show_output\n self.flag_hash = random.getrandbits(32)\n self.capture_session = capture_session\n self.interpretation = interpretation \n self.session = None\n self.server_name = server_name\n self.title = title\n self.description = description\n if article is not None:\n article = utils.readme_to_html(article)\n article = markdown2.markdown(article)\n self.article = article\n self.thumbnail = thumbnail\n if css is not None and os.path.exists(css):\n with open(css) as css_file:\n self.css = css_file.read()\n else:\n self.css = css\n if examples is None or isinstance(examples, str) or (isinstance(examples, list) and (len(examples) == 0 or isinstance(examples[0], list))):\n self.examples = examples\n else:\n raise ValueError(\"Examples argument must either be a directory or a nested list, where each sublist represents a set of inputs.\")\n self.examples_per_page = examples_per_page\n self.server_port = server_port\n self.simple_server = None\n self.allow_screenshot = allow_screenshot\n self.allow_flagging = os.getenv(\"GRADIO_FLAGGING\") or allow_flagging\n self.flagging_options = flagging_options \n self.flagging_dir = flagging_dir\n self.encrypt = encrypt\n Interface.instances.add(self)\n self.analytics_enabled=analytics_enabled\n self.save_to = None\n self.share = None\n self.embedding = embedding\n self.show_tips = show_tips\n\n data = {'fn': fn,\n 'inputs': inputs,\n 'outputs': outputs,\n 'live': live,\n 'capture_session': capture_session,\n 'ip_address': ip_address,\n 'interpretation': interpretation,\n 'embedding': embedding,\n 'allow_flagging': allow_flagging,\n 'allow_screenshot': allow_screenshot,\n }\n\n if self.capture_session:\n try:\n import tensorflow as tf\n self.session = tf.get_default_graph(), \\\n tf.keras.backend.get_session()\n except (ImportError, AttributeError):\n # If they are using TF >= 2.0 or don't have TF,\n # just ignore this.\n pass\n\n if self.allow_flagging:\n os.makedirs(self.flagging_dir, exist_ok=True)\n\n if self.analytics_enabled:\n try:\n requests.post(analytics_url + 'gradio-initiated-analytics/',\n data=data, timeout=3)\n except (requests.ConnectionError, requests.exceptions.ReadTimeout):\n pass # do not push analytics if no network\n\n def get_config_file(self):\n config = {\n \"input_interfaces\": [\n (iface.__class__.__name__.lower(), iface.get_template_context())\n for iface in self.input_interfaces],\n \"output_interfaces\": [\n (iface.__class__.__name__.lower(), iface.get_template_context())\n for iface in self.output_interfaces],\n \"function_count\": len(self.predict),\n \"live\": self.live,\n \"examples_per_page\": self.examples_per_page,\n \"layout\": self.layout,\n \"show_input\": self.show_input,\n \"show_output\": self.show_output,\n \"title\": self.title,\n \"description\": self.description,\n \"article\": self.article,\n \"thumbnail\": self.thumbnail,\n \"allow_screenshot\": self.allow_screenshot,\n \"allow_flagging\": self.allow_flagging,\n \"flagging_options\": self.flagging_options,\n \"allow_interpretation\": self.interpretation is not None,\n \"allow_embedding\": self.embedding is not None,\n }\n try:\n param_names = inspect.getfullargspec(self.predict[0])[0]\n for iface, param in zip(config[\"input_interfaces\"], param_names):\n if not iface[1][\"label\"]:\n iface[1][\"label\"] = param.replace(\"_\", \" \")\n for i, iface in enumerate(config[\"output_interfaces\"]):\n outputs_per_function = int(len(self.output_interfaces) / len(self.predict))\n function_index = i // outputs_per_function\n component_index = i - function_index * outputs_per_function\n ret_name = \"Output \" + str(component_index + 1) if outputs_per_function > 1 else \"Output\"\n if not iface[1][\"label\"]:\n iface[1][\"label\"] = ret_name\n if len(self.predict) > 1:\n iface[1][\"label\"] = self.function_names[function_index].replace(\"_\", \" \") + \": \" + iface[1][\"label\"]\n \n except ValueError:\n pass\n if self.examples is not None and not isinstance(self.examples, str):\n config[\"examples\"] = self.examples\n return config\n\n def run_prediction(self, processed_input, return_duration=False):\n predictions = []\n durations = []\n for predict_fn in self.predict:\n start = time.time()\n if self.capture_session and self.session is not None:\n graph, sess = self.session\n with graph.as_default(), sess.as_default():\n prediction = predict_fn(*processed_input)\n else:\n try:\n prediction = predict_fn(*processed_input)\n except ValueError as exception:\n if str(exception).endswith(\"is not an element of this graph.\"):\n raise ValueError(strings.en[\"TF1_ERROR\"])\n else:\n raise exception\n duration = time.time() - start\n\n if len(self.output_interfaces) == len(self.predict):\n prediction = [prediction]\n durations.append(duration)\n predictions.extend(prediction)\n \n if return_duration:\n return predictions, durations\n else:\n return predictions\n\n def process(self, raw_input):\n \"\"\"\n :param raw_input: a list of raw inputs to process and apply the prediction(s) on.\n processed output: a list of processed outputs to return as the prediction(s).\n duration: a list of time deltas measuring inference time for each prediction fn.\n \"\"\"\n processed_input = [input_interface.preprocess(raw_input[i])\n for i, input_interface in enumerate(self.input_interfaces)]\n predictions, durations = self.run_prediction(processed_input, return_duration=True)\n processed_output = [output_interface.postprocess(\n predictions[i]) for i, output_interface in enumerate(self.output_interfaces)]\n return processed_output, durations\n \n def embed(self, processed_input):\n if self.embedding == \"default\":\n embeddings = np.concatenate([input_interface.embed(processed_input[i])\n for i, input_interface in enumerate(self.input_interfaces)])\n else:\n embeddings = self.embedding(*processed_input)\n return embeddings\n\n def interpret(self, raw_input):\n \"\"\"\n Runs the interpretation command for the machine learning model. Handles both the \"default\" out-of-the-box\n interpretation for a certain set of UI component types, as well as the custom interpretation case.\n :param raw_input: a list of raw inputs to apply the interpretation(s) on.\n \"\"\"\n if self.interpretation == \"default\":\n processed_input = [input_interface.preprocess(raw_input[i])\n for i, input_interface in enumerate(self.input_interfaces)]\n original_output = self.run_prediction(processed_input)\n scores, alternative_outputs = [], []\n for i, x in enumerate(raw_input):\n input_interface = self.input_interfaces[i]\n neighbor_raw_input = list(raw_input)\n neighbor_values, interpret_kwargs, interpret_by_removal = input_interface.get_interpretation_neighbors(x)\n interface_scores = []\n alternative_output = []\n for neighbor_input in neighbor_values:\n neighbor_raw_input[i] = neighbor_input\n processed_neighbor_input = [input_interface.preprocess(neighbor_raw_input[i])\n for i, input_interface in enumerate(self.input_interfaces)]\n neighbor_output = self.run_prediction(processed_neighbor_input)\n processed_neighbor_output = [output_interface.postprocess(\n neighbor_output[i]) for i, output_interface in enumerate(self.output_interfaces)]\n\n alternative_output.append(processed_neighbor_output)\n interface_scores.append(quantify_difference_in_label(self, original_output, neighbor_output))\n alternative_outputs.append(alternative_output)\n if not interpret_by_removal:\n interface_scores = [-score for score in interface_scores]\n scores.append(\n input_interface.get_interpretation_scores(\n raw_input[i], neighbor_values, interface_scores, **interpret_kwargs))\n return scores, alternative_outputs\n else:\n processed_input = [input_interface.preprocess(raw_input[i])\n for i, input_interface in enumerate(self.input_interfaces)]\n interpreter = self.interpretation\n\n if self.capture_session and self.session is not None:\n graph, sess = self.session\n with graph.as_default(), sess.as_default():\n interpretation = interpreter(*processed_input)\n else:\n try:\n interpretation = interpreter(*processed_input)\n except ValueError as exception:\n if str(exception).endswith(\"is not an element of this graph.\"):\n raise ValueError(strings.en[\"TF1_ERROR\"])\n else:\n raise exception\n if len(raw_input) == 1:\n interpretation = [interpretation]\n return interpretation, []\n\n def close(self):\n if self.simple_server and not (self.simple_server.fileno() == -1): # checks to see if server is running\n print(\"Closing Gradio server on port {}...\".format(self.server_port))\n networking.close_server(self.simple_server)\n\n def run_until_interrupted(self, thread, path_to_local_server):\n try:\n while 1:\n pass\n except (KeyboardInterrupt, OSError):\n print(\"Keyboard interruption in main thread... closing server.\")\n thread.keep_running = False\n networking.url_ok(path_to_local_server) # Hit the server one more time to close it\n\n def test_launch(self):\n for predict_fn in self.predict:\n print(\"Test launch: {}()...\".format(predict_fn.__name__), end=' ')\n\n raw_input = []\n for input_interface in self.input_interfaces:\n if input_interface.test_input is None: # If no test input is defined for that input interface\n print(\"SKIPPED\")\n break\n else: # If a test input is defined for each interface object\n raw_input.append(input_interface.test_input)\n else:\n self.process(raw_input)\n print(\"PASSED\")\n continue\n\n def launch(self, inline=None, inbrowser=None, share=False, debug=False, auth=None, auth_message=None, private_endpoint=None):\n \"\"\"\n Parameters:\n inline (bool): whether to display in the interface inline on python notebooks.\n inbrowser (bool): whether to automatically launch the interface in a new tab on the default browser.\n share (bool): whether to create a publicly shareable link from your computer for the interface.\n debug (bool): if True, and the interface was launched from Google Colab, prints the errors in the cell output.\n auth (Union[Tuple[str, str], List[Tuple[str, str]]]): If provided, username and password (or list of username-password tuples) required to access interface.\n auth_message (str): If provided, HTML message provided on login page.\n Returns:\n app (flask.Flask): Flask app object\n path_to_local_server (str): Locally accessible link\n share_url (str): Publicly accessible link (if share=True)\n \"\"\"\n # Alert user if a more recent version of the library exists\n utils.version_check()\n\n # Set up local flask server\n config = self.get_config_file()\n self.config = config\n if auth and not isinstance(auth[0], tuple) and not isinstance(auth[0], list):\n auth = [auth]\n self.auth = auth\n self.auth_message = auth_message\n\n # Request key for encryption\n if self.encrypt:\n self.encryption_key = encryptor.get_key(getpass(\"Enter key for encryption: \"))\n\n # Launch local flask server\n server_port, app, thread = networking.start_server(\n self, self.server_name, self.server_port, self.auth)\n path_to_local_server = \"http://{}:{}/\".format(self.server_name, server_port)\n self.server_port = server_port\n self.status = \"RUNNING\"\n self.server = app\n\n # Count number of launches\n launch_counter()\n\n # If running in a colab or not able to access localhost, automatically create a shareable link \n is_colab = utils.colab_check()\n if is_colab or not(networking.url_ok(path_to_local_server)): \n share = True\n if is_colab:\n if debug:\n print(strings.en[\"COLAB_DEBUG_TRUE\"])\n else:\n print(strings.en[\"COLAB_DEBUG_FALSE\"])\n else:\n print(strings.en[\"RUNNING_LOCALLY\"].format(path_to_local_server))\n\n if private_endpoint is not None:\n share = True\n # Set up shareable link \n self.share = share\n\n if share:\n if not private_endpoint:\n print(strings.en[\"SHARE_LINK_MESSAGE\"])\n try:\n share_url = networking.setup_tunnel(server_port, private_endpoint)\n print(strings.en[\"SHARE_LINK_DISPLAY\"].format(share_url))\n except RuntimeError:\n send_error_analytics(self.analytics_enabled)\n share_url = None\n else:\n print(strings.en[\"PUBLIC_SHARE_TRUE\"])\n share_url = None\n\n # Open a browser tab with the interface.\n if inbrowser: \n if share:\n webbrowser.open(share_url) \n else:\n webbrowser.open(path_to_local_server) \n \n # Check if running in a Python notebook in which case, display inline\n if inline is None:\n inline = utils.ipython_check()\n if inline:\n from IPython.display import IFrame, display\n # Embed the remote interface page if on google colab; otherwise, embed the local page.\n print(strings.en[\"INLINE_DISPLAY_BELOW\"])\n if share:\n while not networking.url_ok(share_url):\n time.sleep(1)\n display(IFrame(share_url, width=1000, height=500))\n else:\n display(IFrame(path_to_local_server, width=1000, height=500))\n\n send_launch_analytics(analytics_enabled=self.analytics_enabled, inbrowser=inbrowser, is_colab=is_colab, \n share=share, share_url=share_url)\n\n show_tip(self)\n\n # Run server perpetually under certain circumstances\n if debug or int(os.getenv('GRADIO_DEBUG', 0))==1:\n while True:\n sys.stdout.flush()\n time.sleep(0.1)\n is_in_interactive_mode = bool(getattr(sys, 'ps1', sys.flags.interactive))\n if not is_in_interactive_mode:\n self.run_until_interrupted(thread, path_to_local_server)\n \n return app, path_to_local_server, share_url\n\ndef show_tip(io):\n if not(io.show_tips):\n return\n if random.random() < 0.8: # Only show tips once every 5 uses\n return\n relevant_tips = []\n if io.interpretation is None:\n relevant_tips.append(strings.en[\"TIP_INTERPRETATION\"])\n if io.embedding is None and not(io.examples is None) and len(io.examples)>4:\n relevant_tips.append(strings.en[\"TIP_EMBEDDING\"])\n if len(relevant_tips)==0:\n return\n print(random.choice(relevant_tips))\n\ndef launch_counter():\n try:\n if not os.path.exists(JSON_PATH):\n launches = {\"launches\": 1}\n with open(JSON_PATH, \"w+\") as j:\n json.dump(launches, j)\n else:\n with open(JSON_PATH) as j:\n launches = json.load(j)\n launches[\"launches\"] += 1\n if launches[\"launches\"] in [25, 50]:\n print(strings.en[\"BETA_INVITE\"])\n with open(JSON_PATH, \"w\") as j:\n j.write(json.dumps(launches))\n except:\n pass\n\ndef send_error_analytics(analytics_enabled):\n data = {'error': 'RuntimeError in launch method'}\n if analytics_enabled:\n try:\n requests.post(analytics_url + 'gradio-error-analytics/',\n data=data, timeout=3)\n except (requests.ConnectionError, requests.exceptions.ReadTimeout):\n pass # do not push analytics if no network\n\ndef send_launch_analytics(analytics_enabled, inbrowser, is_colab, share, share_url):\n launch_method = 'browser' if inbrowser else 'inline'\n if analytics_enabled:\n data = {\n 'launch_method': launch_method,\n 'is_google_colab': is_colab,\n 'is_sharing_on': share,\n 'share_url': share_url,\n 'ip_address': ip_address\n }\n try:\n requests.post(analytics_url + 'gradio-launched-analytics/',\n data=data, timeout=3)\n except (requests.ConnectionError, requests.exceptions.ReadTimeout):\n pass # do not push analytics if no network\n\n\ndef reset_all():\n for io in Interface.get_instances():\n io.close()\n"
] |
[
[
"tensorflow.get_default_graph",
"tensorflow.keras.backend.get_session"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ykaitao/gluon-ts
|
[
"9622550974e9e0819e25438fc45353f8a6474b55",
"9622550974e9e0819e25438fc45353f8a6474b55"
] |
[
"src/gluonts/model/deepstate/issm.py",
"evaluations/show_results.py"
] |
[
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import List, Tuple\n\n# Third-party imports\nfrom pandas.tseries.frequencies import to_offset\n\n# First-party imports\nfrom gluonts.core.component import validated\nfrom gluonts.distribution.distribution import getF\nfrom gluonts.model.common import Tensor\nfrom gluonts.support.util import _broadcast_param\nfrom gluonts.time_feature import (\n TimeFeature,\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n WeekOfYear,\n MonthOfYear,\n)\n\n\ndef _make_block_diagonal(blocks: List[Tensor]) -> Tensor:\n assert (\n len(blocks) > 0\n ), \"You need at least one tensor to make a block-diagonal tensor\"\n\n if len(blocks) == 1:\n return blocks[0]\n\n F = getF(blocks[0])\n\n # transition coefficient is block diagonal!\n block_diagonal = _make_2_block_diagonal(F, blocks[0], blocks[1])\n for i in range(2, len(blocks)):\n block_diagonal = _make_2_block_diagonal(\n F=F, left=block_diagonal, right=blocks[i]\n )\n\n return block_diagonal\n\n\ndef _make_2_block_diagonal(F, left: Tensor, right: Tensor) -> Tensor:\n \"\"\"\n Creates a block diagonal matrix of shape (batch_size, m+n, m+n) where m and n are the sizes of\n the axis 1 of left and right respectively.\n\n Parameters\n ----------\n F\n left\n Tensor of shape (batch_size, seq_length, m, m)\n right\n Tensor of shape (batch_size, seq_length, n, n)\n Returns\n -------\n Tensor\n Block diagonal matrix of shape (batch_size, seq_length, m+n, m+n)\n \"\"\"\n # shape (batch_size, seq_length, m, n)\n zeros_off_diag = F.broadcast_add(\n left.slice_axis(\n axis=-1, begin=0, end=1\n ).zeros_like(), # shape (batch_size, seq_length, m, 1)\n right.slice_axis(\n axis=-2, begin=0, end=1\n ).zeros_like(), # shape (batch_size, seq_length, 1, n)\n )\n\n # shape (batch_size, n, m)\n zeros_off_diag_tr = zeros_off_diag.swapaxes(2, 3)\n\n # block diagonal: shape (batch_size, seq_length, m+n, m+n)\n _block_diagonal = F.concat(\n F.concat(left, zeros_off_diag, dim=3),\n F.concat(zeros_off_diag_tr, right, dim=3),\n dim=2,\n )\n\n return _block_diagonal\n\n\nclass ISSM:\n r\"\"\"\n An abstract class for providing the basic structure of Innovation State Space Model (ISSM).\n\n The structure of ISSM is given by\n\n * dimension of the latent state\n * transition and emission coefficents of the transition model\n * emission coefficient of the observation model\n\n \"\"\"\n\n @validated()\n def __init__(self):\n pass\n\n def latent_dim(self) -> int:\n raise NotImplemented()\n\n def output_dim(self) -> int:\n raise NotImplemented()\n\n def emission_coeff(self, seasonal_indicators: Tensor):\n raise NotImplemented()\n\n def transition_coeff(self, seasonal_indicators: Tensor):\n raise NotImplemented()\n\n def innovation_coeff(self, seasonal_indicators: Tensor):\n raise NotImplemented()\n\n def get_issm_coeff(\n self, seasonal_indicators: Tensor\n ) -> Tuple[Tensor, Tensor, Tensor]:\n return (\n self.emission_coeff(seasonal_indicators),\n self.transition_coeff(seasonal_indicators),\n self.innovation_coeff(seasonal_indicators),\n )\n\n\nclass LevelISSM(ISSM):\n def latent_dim(self) -> int:\n return 1\n\n def output_dim(self) -> int:\n return 1\n\n def emission_coeff(\n self, seasonal_indicators: Tensor # (batch_size, time_length)\n ) -> Tensor:\n F = getF(seasonal_indicators)\n\n _emission_coeff = F.ones(shape=(1, 1, 1, self.latent_dim()))\n\n # get the right shape: (batch_size, seq_length, obs_dim, latent_dim)\n zeros = _broadcast_param(\n F.zeros_like(\n seasonal_indicators.slice_axis(\n axis=-1, begin=0, end=1\n ).squeeze(axis=-1)\n ),\n axes=[2, 3],\n sizes=[1, self.latent_dim()],\n )\n\n return _emission_coeff.broadcast_like(zeros)\n\n def transition_coeff(\n self, seasonal_indicators: Tensor # (batch_size, time_length)\n ) -> Tensor:\n F = getF(seasonal_indicators)\n\n _transition_coeff = (\n F.eye(self.latent_dim()).expand_dims(axis=0).expand_dims(axis=0)\n )\n\n # get the right shape: (batch_size, seq_length, latent_dim, latent_dim)\n zeros = _broadcast_param(\n F.zeros_like(\n seasonal_indicators.slice_axis(\n axis=-1, begin=0, end=1\n ).squeeze(axis=-1)\n ),\n axes=[2, 3],\n sizes=[self.latent_dim(), self.latent_dim()],\n )\n\n return _transition_coeff.broadcast_like(zeros)\n\n def innovation_coeff(\n self, seasonal_indicators: Tensor # (batch_size, time_length)\n ) -> Tensor:\n return self.emission_coeff(seasonal_indicators).squeeze(axis=2)\n\n\nclass LevelTrendISSM(LevelISSM):\n def latent_dim(self) -> int:\n return 2\n\n def output_dim(self) -> int:\n return 1\n\n def transition_coeff(\n self, seasonal_indicators: Tensor # (batch_size, time_length)\n ) -> Tensor:\n F = getF(seasonal_indicators)\n\n _transition_coeff = (\n (F.diag(F.ones(shape=(2,)), k=0) + F.diag(F.ones(shape=(1,)), k=1))\n .expand_dims(axis=0)\n .expand_dims(axis=0)\n )\n\n # get the right shape: (batch_size, seq_length, latent_dim, latent_dim)\n zeros = _broadcast_param(\n F.zeros_like(\n seasonal_indicators.slice_axis(\n axis=-1, begin=0, end=1\n ).squeeze(axis=-1)\n ),\n axes=[2, 3],\n sizes=[self.latent_dim(), self.latent_dim()],\n )\n\n return _transition_coeff.broadcast_like(zeros)\n\n\nclass SeasonalityISSM(LevelISSM):\n \"\"\"\n Implements periodic seasonality which is entirely determined by the period `num_seasons`.\n \"\"\"\n\n @validated()\n def __init__(self, num_seasons: int) -> None:\n super(SeasonalityISSM, self).__init__()\n self.num_seasons = num_seasons\n\n def latent_dim(self) -> int:\n return self.num_seasons\n\n def output_dim(self) -> int:\n return 1\n\n def emission_coeff(self, seasonal_indicators: Tensor) -> Tensor:\n F = getF(seasonal_indicators)\n return F.one_hot(seasonal_indicators, depth=self.latent_dim())\n\n def innovation_coeff(self, seasonal_indicators: Tensor) -> Tensor:\n F = getF(seasonal_indicators)\n # seasonal_indicators = F.modulo(seasonal_indicators - 1, self.latent_dim)\n return F.one_hot(seasonal_indicators, depth=self.latent_dim()).squeeze(\n axis=2\n )\n\n\nclass CompositeISSM(ISSM):\n DEFAULT_ADD_TREND: bool = True\n\n @validated()\n def __init__(\n self,\n seasonal_issms: List[SeasonalityISSM],\n add_trend: bool = DEFAULT_ADD_TREND,\n ) -> None:\n super(CompositeISSM, self).__init__()\n self.seasonal_issms = seasonal_issms\n self.nonseasonal_issm = (\n LevelISSM() if add_trend is False else LevelTrendISSM()\n )\n\n def latent_dim(self) -> int:\n return (\n sum([issm.latent_dim() for issm in self.seasonal_issms])\n + self.nonseasonal_issm.latent_dim()\n )\n\n def output_dim(self) -> int:\n return self.nonseasonal_issm.output_dim()\n\n @classmethod\n def get_from_freq(cls, freq: str, add_trend: bool = DEFAULT_ADD_TREND):\n offset = to_offset(freq)\n\n seasonal_issms: List[SeasonalityISSM] = []\n\n if offset.name == \"M\":\n seasonal_issms = [\n SeasonalityISSM(num_seasons=12) # month-of-year seasonality\n ]\n elif offset.name == \"W-SUN\":\n seasonal_issms = [\n SeasonalityISSM(num_seasons=53) # week-of-year seasonality\n ]\n elif offset.name == \"D\":\n seasonal_issms = [\n SeasonalityISSM(num_seasons=7) # day-of-week seasonality\n ]\n elif offset.name == \"B\": # TODO: check this case\n seasonal_issms = [\n SeasonalityISSM(num_seasons=7) # day-of-week seasonality\n ]\n elif offset.name == \"H\":\n seasonal_issms = [\n SeasonalityISSM(num_seasons=24), # hour-of-day seasonality\n SeasonalityISSM(num_seasons=7), # day-of-week seasonality\n ]\n elif offset.name == \"T\":\n seasonal_issms = [\n SeasonalityISSM(num_seasons=60), # minute-of-hour seasonality\n SeasonalityISSM(num_seasons=24), # hour-of-day seasonality\n ]\n else:\n RuntimeError(f\"Unsupported frequency {offset.name}\")\n\n return cls(seasonal_issms=seasonal_issms, add_trend=add_trend)\n\n @classmethod\n def seasonal_features(cls, freq: str) -> List[TimeFeature]:\n offset = to_offset(freq)\n if offset.name == \"M\":\n return [MonthOfYear(normalized=False)]\n elif offset.name == \"W-SUN\":\n return [WeekOfYear(normalized=False)]\n elif offset.name == \"D\":\n return [DayOfWeek(normalized=False)]\n elif offset.name == \"B\": # TODO: check this case\n return [DayOfWeek(normalized=False)]\n elif offset.name == \"H\":\n return [HourOfDay(normalized=False), DayOfWeek(normalized=False)]\n elif offset.name == \"T\":\n return [\n MinuteOfHour(normalized=False),\n HourOfDay(normalized=False),\n ]\n else:\n RuntimeError(f\"Unsupported frequency {offset.name}\")\n\n return []\n\n def get_issm_coeff(\n self, seasonal_indicators: Tensor # (batch_size, time_length)\n ) -> Tuple[Tensor, Tensor, Tensor]:\n F = getF(seasonal_indicators)\n emission_coeff_ls, transition_coeff_ls, innovation_coeff_ls = zip(\n self.nonseasonal_issm.get_issm_coeff(seasonal_indicators),\n *[\n issm.get_issm_coeff(\n seasonal_indicators.slice_axis(\n axis=-1, begin=ix, end=ix + 1\n )\n )\n for ix, issm in enumerate(self.seasonal_issms)\n ],\n )\n\n # stack emission and innovation coefficients\n emission_coeff = F.concat(*emission_coeff_ls, dim=-1)\n\n innovation_coeff = F.concat(*innovation_coeff_ls, dim=-1)\n\n # transition coefficient is block diagonal!\n transition_coeff = _make_block_diagonal(transition_coeff_ls)\n\n return emission_coeff, transition_coeff, innovation_coeff\n",
"import pandas as pd\nimport json\nfrom pathlib import Path\nimport os\n\nfrom generate_evaluations import metrics_persisted\n\ndir_path = Path(os.path.dirname(os.path.realpath(__file__)))\n\n\ndef collect_results():\n evals = []\n for evaluation_file in dir_path.glob(\"*/*.json\"):\n with open(evaluation_file, \"r\") as f:\n evals.append(json.load(f))\n return pd.DataFrame(evals)\n\n\ndef to_markdown(df: pd.DataFrame, float_format=\"%.3f\") -> str:\n # adapted from:\n # https://stackoverflow.com/questions/33181846/programmatically-convert-pandas-dataframe-to-markdown-table\n return os.linesep.join(\n [\n \"|\".join(df.columns),\n \"|\".join(4 * \"-\" for _ in df.columns),\n df.to_csv(\n sep=\"|\", index=False, header=False, float_format=float_format\n ),\n ]\n ).replace(\"|\", \" | \")\n\n\nresults_df = collect_results()\n\n# copy-paste the results in `evaluations/README.md`\nfor metric in metrics_persisted:\n print(f\"## {metric}\\n\")\n\n pivot_df = results_df.pivot_table(\n index=\"estimator\", columns=\"dataset\", values=metric\n )\n print(to_markdown(pivot_df.reset_index(level=0)))\n"
] |
[
[
"pandas.tseries.frequencies.to_offset"
],
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
matangover/Wave-U-Net
|
[
"6f44febbda3cb685d92f9e0b84e7f06e13d03fba"
] |
[
"scripts/preprocess.py"
] |
[
"from sacred import Experiment\nfrom Config import config_ingredient\nimport tensorflow as tf\nimport numpy as np\nimport os\n\nimport Datasets\nimport Utils\nimport Models.UnetAudioSeparator\n\nex = Experiment('Waveunet Preprocessing', ingredients=[config_ingredient])\n\[email protected]\n# Executed for training, sets the seed value to the Sacred config so that Sacred fixes the Python and Numpy RNG to the same state everytime.\ndef set_seed():\n seed = 1337\n\n@config_ingredient.capture\ndef preprocess(model_config, dataset):\n # Determine input and output shapes\n disc_input_shape = [model_config[\"batch_size\"], model_config[\"num_frames\"], 0] # Shape of input\n separator_class = Models.UnetAudioSeparator.UnetAudioSeparator(model_config)\n sep_input_shape, sep_output_shape = separator_class.get_padding(np.array(disc_input_shape))\n\n tiny = 'tiny' in dataset\n Datasets.preprocess_dataset(model_config, sep_input_shape, sep_output_shape, tiny)\n\[email protected]\ndef run(cfg):\n preprocess()\n print(\"Preprocessing finished.\")"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NCAR/mom6-tools
|
[
"e5a605eab97013a421a7ea8a93ae950f9e429730"
] |
[
"mom6_tools/stats.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"\nFunctions used to calculate statistics.\n\"\"\"\n\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mom6_tools.DiagsCase import DiagsCase\nfrom mom6_tools.ClimoGenerator import ClimoGenerator\nfrom mom6_tools.m6toolbox import genBasinMasks, request_workers, add_global_attrs\nfrom mom6_tools.m6plot import ztplot, plot_stats_da, xyplot\nfrom mom6_tools.MOM6grid import MOM6grid\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport yaml, os\n\ntry: import argparse\nexcept: raise Exception('This version of python is not new enough. python 2.7 or newer is required.')\n\ndef options():\n parser = argparse.ArgumentParser(description='''Script for computing and plotting statistics.''')\n parser.add_argument('diag_config_yml_path', type=str, help='''Full path to the yaml file \\\n describing the run and diagnostics to be performed.''')\n parser.add_argument('-sd','--start_date', type=str, default='',\n help='''Start year to compute averages. Default is to use value set in diag_config_yml_path''')\n parser.add_argument('-ed','--end_date', type=str, default='',\n help='''End year to compute averages. Default is to use value set in diag_config_yml_path''')\n parser.add_argument('-diff_rms', help='''Compute horizontal mean difference and RMS: model versus \\\n observations''', action=\"store_true\")\n parser.add_argument('-time_series', help='''Extract time-series for thetaoga and soga and saves \\\n annual means in a netCDF file''', action=\"store_true\")\n parser.add_argument('-forcing', help='''Compute global time averages and regionally-averaged time-series \\\n of forcing fields''', action=\"store_true\")\n parser.add_argument('-surface', help='''Compute global time averages and regionally-averaged time-series \\\n of surface fields''', action=\"store_true\")\n parser.add_argument('-nw','--number_of_workers', type=int, default=0,\n help='''Number of workers to use. Default=0 (serial).''')\n parser.add_argument('-o','--obs', type=str, default='WOA18', help='''Observational product to compare agaist. \\\n Valid options are: WOA18 (default) or PHC2''')\n parser.add_argument('-debug', help='''Add priting statements for debugging purposes''', action=\"store_true\")\n cmdLineArgs = parser.parse_args()\n return cmdLineArgs\n\ndef HorizontalMeanRmse_da(var, dims=('yh', 'xh'), weights=None, basins=None, debug=False):\n \"\"\"\n Wrapper for computing weighted horizontal root-mean-square error for DataArrays.\n This function includes the option to provide Basins masks, which returns RMSe\n for each basin provided.\n\n Parameters\n ----------\n\n var : xarray.DataArray\n Difference between the actual values and predicted values (model - obs, or residual).\n\n dims : tuple, str\n Dimensions over which to apply average. Default is ('yh', 'xh').\n\n weights : xarray.DataArray, optional\n weights to apply. It can be a masked array.\n\n basins : xarray.DataArray, optional\n Basins mask to apply. If True, returns horizontal mean RMSE for each basin provided. \\\n Basins must be generated by genBasinMasks. Default is False.\n\n debug : boolean, optional\n If true, print stuff for debugging. Default is False.\n\n Returns\n -------\n reduced : DataArray or DataSet\n If Basins is provided, returns an DataSet with horizontal mean RMS. Otherwise,\n returns a DataArray.\n \"\"\"\n check_dims(var,dims)\n\n if basins is not None and weights is None:\n raise ValueError(\"Basin masks can only be applied if weights are provided.\")\n\n if weights is None:\n return rms_da(var)\n else:\n if basins is None:\n # global reduction\n if not isinstance(weights, xr.DataArray):\n raise ValueError(\"weights must be a DataArray\")\n\n check_dims(weights, dims)\n\n total_weights = weights.sum(dim=dims)\n if debug: print('total weights is:', total_weights.values)\n out = rms_da(var, weights=weights, weights_sum=total_weights)\n if debug: print('rmse is:', out.values)\n\n else:\n # regional reduction\n if 'region' not in basins.coords:\n raise ValueError(\"Regions does not have coordinate region. Please use genBasinMasks \\\n to construct the basins mask.\")\n if len(weights.shape)!=3:\n raise ValueError(\"If basins is provided, weights must be a 3D array.\")\n\n if len(basins.shape)!=3:\n raise ValueError(\"Regions must be a 3D array.\")\n\n rmask_od = OrderedDict()\n for reg in basins.region:\n if debug: print('Region: ', reg)\n # construct a 3D region array\n tmp = np.repeat(basins.sel(region=reg).values[np.newaxis, :, :], len(var.z_l), axis=0)\n region3d = xr.DataArray(tmp,dims=var.dims[1::],\n coords= {var.dims[1]: var.z_l,\n var.dims[2]: var.yh,\n var.dims[3]: var.xh})\n if debug: print('region3d:', region3d)\n # select weights to where region3d is one\n tmp_weights = weights.where(region3d == 1.0)\n total_weights = tmp_weights.sum(dim=dims)\n if debug: print('total weights is:', total_weights.values)\n rmask_od[str(reg.values)] = rms_da(var, weights=tmp_weights, weights_sum=total_weights)\n\n if debug: print('rms is:', rmask_od[str(reg.values)])\n # create DataArray to store output\n out = xr.DataArray(np.zeros((len(basins.region), var.shape[0], var.shape[1])),\n dims=(basins.dims[0], var.dims[0], var.dims[1]),\n coords={basins.dims[0]:list(rmask_od.keys()),\n var.dims[0]: var.time,\n var.dims[1]: var.z_l})\n if debug: print(out)\n for i, rmask_field in enumerate(rmask_od.values()):\n out.values[i,:,:] = rmask_field\n\n return out\n\ndef HorizontalMeanDiff_da(var, dims=('yh', 'xh'), weights=None, basins=None, debug=False):\n \"\"\"\n Wrapper for computing weighted horizontal mean difference (model - obs) for DataArrays.\n This function includes the option to provide Basins masks, which returns horizontal mean\n difference for each basin provided.\n\n Parameters\n ----------\n\n var : xarray.DataArray\n Difference between the actual values and predicted values (model - obs, or residual).\n\n dims : tuple, str\n Dimension(s) over which to apply average. Default is ('yh', 'xh').\n\n weights : xarray.DataArray\n weights to apply. It can be a masked array.\n\n basins : xarray.DataArray, optional\n Basins mask to apply. If True, returns horizontal mean difference for each basin provided.\n Basins must be generated by genBasinMasks. Default is False.\n\n debug : boolean, optional\n If true, print stuff for debugging. Default False\n\n Returns\n -------\n reduced : DataArray or DataSet\n If Basins is provided, returns an DataSet with horizontal mean difference. Otherwise,\n returns a DataArray.\n \"\"\"\n check_dims(var,dims)\n if basins is not None and weights is None:\n raise ValueError(\"Basin masks can only be applied if weights are provided.\")\n\n if weights is None:\n return var.mean(dim=dims)\n else:\n rmask_od = OrderedDict()\n if basins is None:\n # global reduction\n if not isinstance(weights, xr.DataArray):\n raise ValueError(\"weights must be a DataArray\")\n check_dims(weights,dims)\n total_weights = weights.sum(dim=dims)\n if debug: print('total weights is:', total_weights.values)\n out = mean_da(var, dims, weights=weights, weights_sum=total_weights)\n if debug: print('horizontal mean is:', out)\n else:\n # regional reduction\n if 'region' not in basins.coords:\n raise ValueError(\"Regions does not have coordinate region. Please use genBasinMasks \\\n to construct the basins mask.\")\n if len(weights.shape)!=3:\n raise ValueError(\"If basins is provided, weights must be a 3D array.\")\n\n if len(basins.shape)!=3:\n raise ValueError(\"Regions must be a 3D array.\")\n\n for reg in basins.region:\n if debug: print('Region: ', reg)\n # construct a 3D region array\n tmp = np.repeat(basins.sel(region=reg).values[np.newaxis, :, :], len(var.z_l), axis=0)\n region3d = xr.DataArray(tmp,dims=var.dims[1::],\n coords= {var.dims[1]: var.z_l,\n var.dims[2]: var.yh,\n var.dims[3]: var.xh})\n if debug: print('region3d:', region3d)\n # select weights to where region3d is one\n tmp_weights = weights.where(region3d == 1.0)\n total_weights = tmp_weights.sum(dim=dims)\n rmask_od[str(reg.values)] = mean_da(var, dims, weights=tmp_weights, weights_sum=total_weights)\n if debug: print('horizontal mean is:', rmask_od[str(reg.values)])\n # create dataArray to store rmask_od\n out = xr.DataArray(np.zeros((len(basins.region), var.shape[0], var.shape[1])),\n dims=(basins.dims[0], var.dims[0], var.dims[1]),\n coords={basins.dims[0]:list(rmask_od.keys()),\n var.dims[0]: var.time,\n var.dims[1]: var.z_l})\n\n for i, rmask_field in enumerate(rmask_od.values()):\n out.values[i,:,:] = rmask_field\n\n return out\n\ndef min_da(da, dims=('yh', 'xh')):\n \"\"\"\n Calculates the minimun value in DataArray da,\n\n ----------\n da : xarray.DataArray\n DataArray for which to compute the min.\n\n dims : tuple, str\n Dimension(s) over which to apply reduction. Default is ('yh', 'xh').\n\n Returns\n -------\n reduction : DataSet\n xarray.Dataset with min for da.\n \"\"\"\n check_dims(da,dims)\n return da.min(dim=dims, keep_attrs=True)\n\ndef max_da(da, dims=('yh', 'xh')):\n \"\"\"\n Calculates the maximum value in DataArray da.\n\n ----------\n da : xarray.DataArray\n DataArray for which to compute the max.\n\n dims : tuple, str\n Dimension(s) over which to apply reduction. Default is ('yh', 'xh').\n\n Returns\n -------\n reduction : DataSet\n xarray.Dataset with the max for da.\n \"\"\"\n check_dims(da,dims)\n return da.max(dim=dims, keep_attrs=True)\n\ndef mean_da(da, dims=('yh', 'xh'), weights=None, weights_sum=None):\n \"\"\"\n Calculates the mean value in DataArray da (optional weighted mean).\n\n ----------\n da : xarray.DataArray\n DataArray for which to compute (weighted) mean.\n\n dims : tuple, str\n Dimension(s) over which to apply reduction. Default is ('yh', 'xh').\n\n weights : xarray.DataArray, optional\n weights to apply. It can be a masked array.\n\n weights_sum : xarray.DataArray, optional\n Total weight (i.e., weights.sum()). Only computed if not provided.\n\n Returns\n -------\n reduction : DataSet\n xarray.Dataset with (optionally weighted) mean for da.\n \"\"\"\n check_dims(da,dims)\n if weights is not None:\n if weights_sum is None: weights_sum = weights.sum(dim=dims)\n out = ((da * weights).sum(dim=dims) / weights_sum)\n # copy attrs\n out.attrs = da.attrs\n return out\n else:\n return da.mean(dim=dims, keep_attrs=True)\n\ndef std_da(da, dims=('yh', 'xh'), weights=None, weights_sum=None, da_mean=None):\n \"\"\"\n Calculates the std in DataArray da (optional weighted std).\n\n ----------\n da : xarray.DataArray\n DataArray for which to compute (weighted) std.\n\n dims : tuple, str\n Dimension(s) over which to apply reduction. Default is ('yh', 'xh').\n\n weights : xarray.DataArray, optional\n weights to apply. It can be a masked array.\n\n weights_sum : xarray.DataArray, optional\n Total weight (i.e., weights.sum()). Only computed if not provided.\n\n da_mean : xarray.DataArray, optional\n Mean value in DataArray da. Only computed if not provided.\n\n Returns\n -------\n reduction : DataSet\n xarray.Dataset with (optionally weighted) std for da.\n \"\"\"\n\n check_dims(da,dims)\n if weights is not None:\n if weights_sum is None:\n weights_sum = weights.sum(dim=dims)\n if da_mean is None: da_mean = mean_da(da, dims, weights, weights_sum)\n out = np.sqrt(((da-da_mean)**2 * weights).sum(dim=dims)/weights_sum)\n # copy attrs\n out.attrs = da.attrs\n return out\n else:\n return da.std(dim=dims, keep_attrs=True)\n\ndef rms_da(da, dims=('yh', 'xh'), weights=None, weights_sum=None):\n \"\"\"\n Calculates the rms in DataArray da (optional weighted rms).\n\n ----------\n da : xarray.DataArray\n DataArray for which to compute (weighted) rms.\n\n dims : tuple, str\n Dimension(s) over which to apply reduction. Default is ('yh', 'xh').\n\n weights : xarray.DataArray, optional\n weights to apply. It can be a masked array.\n\n weights_sum : xarray.DataArray, optional\n Total weight (i.e., weights.sum()). Only computed if not provided.\n\n Returns\n -------\n reduction : DataSet\n xarray.Dataset with (optionally weighted) rms for da.\n \"\"\"\n\n check_dims(da,dims)\n if weights is not None:\n if weights_sum is None: weights_sum = weights.sum(dim=dims)\n out = np.sqrt((da**2 * weights).sum(dim=dims)/weights_sum)\n # copy attrs\n out.attrs = da.attrs\n return out\n else:\n return np.sqrt((da**2).mean(dim=dims, keep_attrs=True))\n\ndef check_dims(da,dims):\n \"\"\"\n Checks if dims exists in ds.\n ----------\n da : xarray.DataArray\n DataArray for which to compute (weighted) min.\n\n dims : tuple, str\n Dimension(s) over which to apply reduction.\n \"\"\"\n if dims[0] not in da.dims:\n print('dims[0], da.dims',dims[0], da.dims)\n raise ValueError(\"DataArray does not have dimensions given by dims[0]\")\n if dims[1] not in da.dims:\n print('dims[1], da.dims',dims[1], da.dims)\n raise ValueError(\"DataArray does not have dimensions given by dims[1]\")\n\n return\n\ndef myStats_da(da, weights, dims=('yh', 'xh'), basins=None, debug=False):\n \"\"\"\n Calculates min, max, mean, standard deviation and root-mean-square for DataArray da\n and returns Dataset with values.\n\n Parameters\n ----------\n da : xarray.DataArray\n DataArray for which to compute weighted stats.\n\n dims : tuple, str\n Dimension(s) over which to apply reduction. Default is ('yh', 'xh').\n\n weights : xarray.DataArray\n weights to apply. It can be a masked array.\n\n basins : xarray.DataArray, optional\n Basins mask to apply. If True, returns horizontal mean RMSE for each basin provided. \\\n Basins must be generated by genBasinMasks. Default is False.\n\n debug : boolean, optional\n If true, print stuff for debugging. Default is False.\n\n Returns\n -------\n reduced : DataSet\n New xarray.Dataset with min, max and weighted mean, standard deviation and\n root-mean-square for DataArray ds.\n \"\"\"\n check_dims(da,dims)\n if weights is None:\n print('compute weights here')\n # compute weights here...\n\n rmask_od = OrderedDict()\n if basins is None:\n # global\n total_weights = weights.sum(dim=dims)\n da_min = min_da(da, dims)\n da_max = max_da(da, dims)\n da_mean = mean_da(da, dims, weights, total_weights)\n da_std = std_da(da, dims, weights, total_weights, da_mean)\n da_rms = rms_da(da, dims, weights, total_weights)\n\n if debug: print_stats(da_min, da_max, da_mean, da_std, da_rms)\n\n out = stats_to_ds(da_min, da_max, da_mean, da_std, da_rms)\n # copy attrs\n out.attrs = da.attrs\n rmask_od['Global'] = out\n\n else:\n # aplpy reduction for each basin\n if 'region' not in basins.coords:\n raise ValueError(\"Regions does not have coordinate region. Please use genBasinMasks \\\n to construct the basins mask.\")\n for reg in basins.region:\n if debug: print('Region: ', reg)\n # select region in the DataArray\n da_reg = da.where(basins.sel(region=reg).values == 1.0)\n # select weights to where region values are one\n tmp_weights = weights.where(basins.sel(region=reg).values == 1.0)\n total_weights = tmp_weights.sum(dim=dims)\n da_min = min_da(da_reg , dims)\n da_max = max_da(da_reg , dims)\n da_mean = mean_da(da_reg, dims, tmp_weights, total_weights)\n da_std = std_da(da_reg , dims, tmp_weights, total_weights, da_mean)\n da_rms = rms_da(da_reg , dims, tmp_weights, total_weights)\n\n if debug:\n print_stats(da_min, da_max, da_mean, da_std, da_rms)\n\n out = stats_to_ds(da_min, da_max, da_mean, da_std, da_rms)\n rmask_od[str(reg.values)] = out\n\n return dict_to_da(rmask_od) # create dataarray using rmask_od\n\ndef print_stats(da_min, da_max, da_mean, da_std, da_rms):\n \"\"\"\n Print values for debugging purposes.\n\n Parameters\n ----------\n\n da_* : xarray.DataArray\n DataArrays with min, max, std, mean, rms.\n \"\"\"\n print('myStats: min(da) =' ,da_min)\n print('myStats: max(da) =' ,da_max)\n print('myStats: mean(da) =',da_mean)\n print('myStats: std(da) =' ,da_std)\n print('myStats: rms(da) =' ,da_rms)\n return\n\ndef stats_to_ds(da_min, da_max, da_mean, da_std, da_rms):\n \"\"\"\n Creates a xarray.Dataset using DataArrays provided.\n\n Parameters\n ----------\n\n da_* : xarray.DataArray\n DataArrays with min, max, std, mean, rms.\n\n Returns\n -------\n ds : DataSet\n xarray.Dataset with min, max, mean, standard deviation and\n root-mean-square.\n \"\"\"\n dim0 = da_min.dims[0]\n dim0_val = da_min[dim0]\n #if 'time' in da_min:\n # var = np.zeros(len(da_min.time))\n # time = da_mean['time']\n #else:\n # var = np.zeros(1)\n # time = np.array([0.])\n\n # create dataset with zeros\n ds = xr.Dataset(data_vars={ 'da_min' : ((dim0), da_min),\n 'da_max' : ((dim0), da_max),\n 'da_std' : ((dim0), da_std),\n 'da_rms' : ((dim0), da_rms),\n 'da_mean': ((dim0), da_mean)},\n coords={dim0: dim0_val})\n # fill dataset with correct values\n #ds['da_mean'] = da_mean; ds['da_std'] = da_std; ds['da_rms'] = da_rms\n #ds['da_min'] = da_min; ds['da_max'] = da_max\n return ds\n\ndef dict_to_da(stats_dict):\n \"\"\"\n Creates a xarray.DataArray using keys in dictionary (stats_dict).\n\n Parameters\n ----------\n\n stats_dict : OrderedDict\n Dictionary with statistics computed using function myStats_da\n\n Returns\n -------\n da : DataSet\n DataArray with min, max, mean, standard deviation and\n root-mean-square for different basins.\n \"\"\"\n\n time = stats_dict[list(stats_dict.items())[0][0]].time\n basins = list(stats_dict.keys())\n stats = ['da_min', 'da_max', 'da_mean', 'da_std', 'da_rms']\n var = np.zeros((len(basins),len(stats),len(time)))\n da = xr.DataArray(var, dims=['basin', 'stats', 'time'],\n coords={'basin': basins,\n 'stats': stats,\n 'time': time},)\n for reg in (basins):\n da.sel(basin=reg).sel(stats='da_min').values[:] = stats_dict[reg].da_min.values\n da.sel(basin=reg).sel(stats='da_max').values[:] = stats_dict[reg].da_max.values\n da.sel(basin=reg).sel(stats='da_mean').values[:]= stats_dict[reg].da_mean.values\n da.sel(basin=reg).sel(stats='da_std').values[:] = stats_dict[reg].da_std.values\n da.sel(basin=reg).sel(stats='da_rms').values[:] = stats_dict[reg].da_rms.values\n\n return da\n\ndef main(stream=False):\n\n # Get options\n args = options()\n\n if not args.diff_rms and not args.surface and not args.forcing and not args.time_series:\n raise ValueError(\"Please select -diff_rms, -time_series, -surface and/or -forcing.\")\n\n # Read in the yaml file\n diag_config_yml = yaml.load(open(args.diag_config_yml_path,'r'), Loader=yaml.Loader)\n # set avg dates\n avg = diag_config_yml['Avg']\n if not args.start_date : args.start_date = avg['start_date']\n if not args.end_date : args.end_date = avg['end_date']\n\n # Create the case instance\n dcase = DiagsCase(diag_config_yml['Case'], xrformat=True)\n print('Casename is:', dcase.casename)\n RUNDIR = dcase.get_value('RUNDIR')\n\n if not os.path.isdir('PNG/Horizontal_mean_biases'):\n print('Creating a directory to place figures (PNG)... \\n')\n os.system('mkdir -p PNG/Horizontal_mean_biases')\n if not os.path.isdir('ncfiles'):\n print('Creating a directory to place netCDF files (ncfiles)... \\n')\n os.system('mkdir ncfiles')\n\n # read grid\n grd = MOM6grid(RUNDIR+'/'+dcase.casename+'.mom6.static.nc', xrformat=True)\n area = grd.area_t.where(grd.wet > 0)\n\n # Get masking for different regions\n depth = grd.depth_ocean.values\n # remove Nan's, otherwise genBasinMasks won't work\n depth[np.isnan(depth)] = 0.0\n basin_code = genBasinMasks(grd.geolon.values, grd.geolat.values, depth, xda=True)\n\n #select a few basins, namely, Global, MedSea,BalticSea,HudsonBay Arctic,\n # Pacific, Atlantic, Indian, Southern, LabSea and BaffinBay\n basins = basin_code.isel(region=[0,4,5,6,7,8,9,10,11,12,13])\n\n if args.diff_rms:\n horizontal_mean_diff_rms(grd, dcase, basins, args)\n\n if args.surface:\n #variables = ['SSH','tos','sos','mlotst','oml','speed', 'SSU', 'SSV']\n variables = ['SSH','tos','sos','mlotst','oml','speed']\n fname = '.mom6.hm_*.nc'\n xystats(fname, variables, grd, dcase, basins, args)\n\n if args.forcing:\n variables = ['friver','ficeberg','fsitherm','hfsnthermds','sfdsi', 'hflso',\n 'seaice_melt_heat', 'wfo', 'hfds', 'Heat_PmE']\n fname = '.mom6.hm_*.nc'\n xystats(fname, variables, grd, dcase, basins, args)\n\n if args.time_series:\n variables = ['thetaoga','soga']\n fname = '.mom6.hm_*.nc'\n extract_time_series(fname, variables, grd, dcase, args)\n return\n\ndef extract_time_series(fname, variables, grd, dcase, args):\n '''\n Extract time-series and saves annual means.\n\n Parameters\n ----------\n\n fname : str\n Name of the file to be processed.\n\n variables : str\n List of variables to be processed.\n\n grd : OrderedDict\n Dictionary with statistics computed using function myStats_da\n\n dcase : case object\n Object created using mom6_tools.DiagsCase.\n\n args : object\n Object with command line options.\n\n Returns\n -------\n NetCDF file with annual means.\n\n '''\n parallel, cluster, client = request_workers(args.number_of_workers)\n\n RUNDIR = dcase.get_value('RUNDIR')\n\n def preprocess(ds):\n ''' Compute montly averages and return the dataset with variables'''\n return ds[variables].resample(time=\"1M\", closed='left', \\\n keep_attrs=True).mean(dim='time', keep_attrs=True)\n\n # read forcing files\n startTime = datetime.now()\n print('Reading dataset...')\n if parallel:\n ds = xr.open_mfdataset(RUNDIR+'/'+dcase.casename+fname, \\\n chunks={'time': 365}, parallel=True, data_vars='minimal',\n coords='minimal', preprocess=preprocess)\n else:\n ds = xr.open_mfdataset(RUNDIR+'/'+dcase.casename+fname, data_vars='minimal',\n compat='override', coords='minimal', preprocess=preprocess)\n\n print('Time elasped: ', datetime.now() - startTime)\n\n # add attrs and save\n attrs = {'description': 'Annual averages of global mean ocean properties.'}\n add_global_attrs(ds,attrs)\n ds.to_netcdf('ncfiles/'+str(dcase.casename)+'_ann_ave_global_means.nc')\n if parallel:\n # close processes\n print('Releasing workers...\\n')\n client.close(); cluster.close()\n\n return\n\ndef xystats(fname, variables, grd, dcase, basins, args):\n '''\n Compute and plot statistics for 2D variables.\n\n Parameters\n ----------\n\n fname : str\n Name of the file to be processed.\n\n variables : str\n List of variables to be processed.\n\n grd : OrderedDict\n Dictionary with statistics computed using function myStats_da\n\n dcase : case object\n Object created using mom6_tools.DiagsCase.\n\n basins : DataArray\n Basins mask to apply. Returns horizontal mean RMSE for each basin provided.\n Basins must be generated by genBasinMasks.\n\n args : object\n Object with command line options.\n\n Returns\n -------\n Plots min, max, mean, std and rms for variables provided and for different basins.\n\n '''\n parallel, cluster, client = request_workers(args.number_of_workers)\n\n RUNDIR = dcase.get_value('RUNDIR')\n area = grd.area_t.where(grd.wet > 0)\n\n def preprocess(ds):\n ''' Compute montly averages and return the dataset with variables'''\n return ds[variables].resample(time=\"1M\", closed='left', \\\n keep_attrs=True).mean(dim='time', keep_attrs=True)\n\n # read forcing files\n startTime = datetime.now()\n print('Reading dataset...')\n if parallel:\n ds = xr.open_mfdataset(RUNDIR+'/'+dcase.casename+fname, \\\n chunks={'time': 365}, parallel=True, data_vars='minimal',\n coords='minimal', preprocess=preprocess)\n else:\n ds = xr.open_mfdataset(RUNDIR+'/'+dcase.casename+fname, data_vars='minimal',\n compat='override', coords='minimal', preprocess=preprocess)\n\n print('Time elasped: ', datetime.now() - startTime)\n\n for var in variables:\n startTime = datetime.now()\n print('\\n Processing {}...'.format(var))\n savefig1='PNG/'+dcase.casename+'_'+str(var)+'_xymean.png'\n savefig2='PNG/'+dcase.casename+'_'+str(var)+'_stats.png'\n\n # yearly mean\n ds_var = ds[var]\n stats = myStats_da(ds_var, dims=ds_var.dims[1::], weights=area, basins=basins)\n stats.to_netcdf('ncfiles/'+dcase.casename+'_'+str(var)+'_stats.nc')\n plot_stats_da(stats, var, ds_var.attrs['units'], save=savefig2)\n ds_var_mean = ds_var.mean(dim='time')\n ds_var_mean.to_netcdf('ncfiles/'+dcase.casename+'_'+str(var)+'_time_ave.nc')\n dummy = np.ma.masked_invalid(ds_var_mean.values)\n xyplot(dummy, grd.geolon.values, grd.geolat.values, area.values, save=savefig1,\n suptitle=ds_var.attrs['long_name'] +' ['+ ds_var.attrs['units']+']',\n title='Averaged between ' +str(ds_var.time[0].values) + ' and '+ str(ds_var.time[-1].values))\n\n plt.close()\n print('Time elasped: ', datetime.now() - startTime)\n\n if parallel:\n # close processes\n print('Releasing workers...\\n')\n client.close(); cluster.close()\n\n return\n\ndef horizontal_mean_diff_rms(grd, dcase, basins, args):\n '''\n Compute horizontal mean difference and rms: model versus observations.\n\n Parameters\n ----------\n\n grd : OrderedDict\n Dictionary with statistics computed using function myStats_da\n\n dcase : case object\n Object created using mom6_tools.DiagsCase.\n\n basins : DataArray\n Basins mask to apply. Returns horizontal mean RMSE for each basin provided.\n Basins must be generated by genBasinMasks.\n\n args : object\n Object with command line options.\n\n Returns\n -------\n Plots horizontal mean difference and rms for different basins.\n\n '''\n\n RUNDIR = dcase.get_value('RUNDIR')\n area = grd.area_t.where(grd.wet > 0)\n if args.debug: print('RUNDIR:', RUNDIR)\n parallel, cluster, client = request_workers(args.number_of_workers)\n\n def preprocess(ds):\n if 'thetao' not in ds.variables:\n ds[\"thetao\"] = xr.zeros_like(ds.h)\n if 'so' not in ds.variables:\n ds[\"so\"] = xr.zeros_like(ds.h)\n\n return ds\n\n # read dataset\n startTime = datetime.now()\n print('Reading dataset...')\n ds = xr.open_mfdataset(RUNDIR+'/'+dcase.casename+'.mom6.h_*.nc',\n parallel=True,\n combine=\"nested\", # concatenate in order of files\n concat_dim=\"time\", # concatenate along time\n preprocess=preprocess,\n ).chunk({\"time\": 12})\n\n if args.debug:\n print(ds)\n\n print('Time elasped: ', datetime.now() - startTime)\n\n print('Selecting data between {} and {}...'.format(args.start_date, args.end_date))\n ds = ds.sel(time=slice(args.start_date, args.end_date))\n\n # Compute climatologies\n thetao_model = ds.thetao.resample(time=\"1Y\", closed='left', keep_attrs=True).mean(dim='time', \\\n keep_attrs=True)\n\n salt_model = ds.so.resample(time=\"1Y\", closed='left', keep_attrs=True).mean(dim='time', \\\n keep_attrs=True)\n\n # TODO: improve how obs are selected\n if args.obs == 'PHC2':\n # load PHC2 data\n obs_path = '/glade/p/cesm/omwg/obs_data/phc/'\n obs_temp = xr.open_dataset(obs_path+'PHC2_TEMP_tx0.66v1_34lev_ann_avg.nc', decode_times=False)\n obs_salt = xr.open_dataset(obs_path+'PHC2_SALT_tx0.66v1_34lev_ann_avg.nc', decode_times=False)\n # get theta and salt and rename coordinates to be the same as the model's\n thetao_obs = obs_temp.TEMP.rename({'X': 'xh','Y': 'yh', 'depth': 'z_l'});\n salt_obs = obs_salt.SALT.rename({'X': 'xh','Y': 'yh', 'depth': 'z_l'});\n elif args.obs == 'WOA18':\n # load WOA18 data\n obs_path = '/glade/u/home/gmarques/Notebooks/CESM_MOM6/WOA18_remapping/'\n obs_temp = xr.open_dataset(obs_path+'WOA18_TEMP_tx0.66v1_34lev_ann_avg.nc', decode_times=False)\n obs_salt = xr.open_dataset(obs_path+'WOA18_SALT_tx0.66v1_34lev_ann_avg.nc', decode_times=False)\n # get theta and salt and rename coordinates to be the same as the model's\n thetao_obs = obs_temp.theta0.rename({'depth': 'z_l'});\n salt_obs = obs_salt.s_an.rename({'depth': 'z_l'});\n\n else:\n raise ValueError(\"The obs selected is not available.\")\n\n # set coordinates to the same as the model's\n thetao_obs['xh'] = thetao_model.xh; thetao_obs['yh'] = thetao_model.yh;\n salt_obs['xh'] = salt_model.xh; salt_obs['yh'] = salt_model.yh;\n\n # compute difference\n temp_diff = thetao_model - thetao_obs\n salt_diff = salt_model - salt_obs\n\n # construct a 3D area with land values masked\n area3d = np.repeat(area.values[np.newaxis, :, :], len(temp_diff.z_l), axis=0)\n mask3d = xr.DataArray(area3d, dims=(temp_diff.dims[1:4]), coords= {temp_diff.dims[1]: temp_diff.z_l,\n temp_diff.dims[2]: temp_diff.yh,\n temp_diff.dims[3]: temp_diff.xh})\n area3d_masked = mask3d.where(temp_diff[0,:] == temp_diff[0,:])\n\n # Horizontal Mean difference (model - obs)\n print('\\n Computing Horizontal Mean difference for temperature...')\n startTime = datetime.now()\n temp_bias = HorizontalMeanDiff_da(temp_diff,weights=area3d_masked, basins=basins, debug=args.debug).rename('temp_bias')\n print('Time elasped: ', datetime.now() - startTime)\n print('\\n Computing Horizontal Mean difference for salt...')\n startTime = datetime.now()\n salt_bias = HorizontalMeanDiff_da(salt_diff,weights=area3d_masked, basins=basins, debug=args.debug).rename('salt_bias')\n print('Time elasped: ', datetime.now() - startTime)\n\n # Horizontal Mean rms (model - obs)\n print('\\n Computing Horizontal Mean rms for temperature...')\n startTime = datetime.now()\n temp_rms = HorizontalMeanRmse_da(temp_diff,weights=area3d_masked, basins=basins, debug=args.debug).rename('temp_rms')\n print('Time elasped: ', datetime.now() - startTime)\n print('\\n Computing Horizontal Mean rms for salt...')\n salt_rms = HorizontalMeanRmse_da(salt_diff,weights=area3d_masked, basins=basins, debug=args.debug).rename('salt_rms')\n print('Time elasped: ', datetime.now() - startTime)\n\n if parallel:\n print('Releasing workers...')\n client.close(); cluster.close()\n\n print('Saving netCDF files...')\n attrs = { 'start_date': args.start_date,\n 'end_date': args.end_date,\n 'casename': dcase.casename,\n 'obs': args.obs,\n 'module': os.path.basename(__file__)}\n add_global_attrs(temp_bias,attrs)\n temp_bias.to_netcdf('ncfiles/'+str(dcase.casename)+'_temp_bias.nc')\n add_global_attrs(salt_bias,attrs)\n salt_bias.to_netcdf('ncfiles/'+str(dcase.casename)+'_salt_bias.nc')\n add_global_attrs(temp_rms,attrs)\n temp_rms.to_netcdf('ncfiles/'+str(dcase.casename)+'_temp_rms.nc')\n add_global_attrs(salt_rms,attrs)\n salt_rms.to_netcdf('ncfiles/'+str(dcase.casename)+'_salt_rms.nc')\n\n # temperature\n for reg in temp_bias.region:\n print('Generating temperature plots for:', str(reg.values))\n # remove Nan's\n temp_diff_reg = temp_bias.sel(region=reg).dropna('z_l')\n temp_rms_reg = temp_rms.sel(region=reg).dropna('z_l')\n if temp_diff_reg.z_l.max() <= 1000.0:\n splitscale = None\n else:\n splitscale = [0., -1000., -temp_diff_reg.z_l.max()]\n\n savefig_diff='PNG/Horizontal_mean_biases/'+str(dcase.casename)+'_'+str(reg.values)+'_temp_diff.png'\n savefig_rms='PNG/Horizontal_mean_biases/'+str(dcase.casename)+'_'+str(reg.values)+'_temp_rms.png'\n\n ztplot(temp_diff_reg.values, temp_diff_reg.time.values, temp_diff_reg.z_l.values*-1, ignore=np.nan, splitscale=splitscale,\n suptitle=dcase._casename, contour=True, title= str(reg.values) + ', Potential Temperature [C], diff (model - obs)',\n extend='both', colormap='dunnePM', autocenter=True, tunits='Year', show=False, clim=(-3,3),\n save=savefig_diff, interactive=True);\n\n ztplot(temp_rms_reg.values, temp_rms_reg.time.values, temp_rms_reg.z_l.values*-1, ignore=np.nan, splitscale=splitscale,\n suptitle=dcase._casename, contour=True, title= str(reg.values) + ', Potential Temperature [C], rms (model - obs)',\n extend='both', colormap='dunnePM', autocenter=False, tunits='Year', show=False, clim=(0,6),\n save=savefig_rms, interactive=True);\n\n plt.close('all')\n # salinity\n for reg in salt_bias.region:\n print('Generating salinity plots for ', str(reg.values))\n # remove Nan's\n salt_diff_reg = salt_bias.sel(region=reg).dropna('z_l')\n salt_rms_reg = salt_rms.sel(region=reg).dropna('z_l')\n if salt_diff_reg.z_l.max() <= 1000.0:\n splitscale = None\n else:\n splitscale = [0., -1000., -salt_diff_reg.z_l.max()]\n\n savefig_diff='PNG/Horizontal_mean_biases/'+str(dcase.casename)+'_'+str(reg.values)+'_salt_diff.png'\n savefig_rms='PNG/Horizontal_mean_biases/'+str(dcase.casename)+'_'+str(reg.values)+'_salt_rms.png'\n\n ztplot(salt_diff_reg.values, salt_diff_reg.time.values, salt_diff_reg.z_l.values*-1, ignore=np.nan, splitscale=splitscale,\n suptitle=dcase._casename, contour=True, title= str(reg.values) + ', Salinity [psu], diff (model - obs)',\n extend='both', colormap='dunnePM', autocenter=True, tunits='Year', show=False, clim=(-1.5, 1.5),\n save=savefig_diff, interactive=True);\n\n ztplot(salt_rms_reg.values, salt_rms_reg.time.values, salt_rms_reg.z_l.values*-1, ignore=np.nan, splitscale=splitscale,\n suptitle=dcase._casename, contour=True, title= str(reg.values) + ', Salinity [psu], rms (model - obs)',\n extend='both', colormap='dunnePM', autocenter=False, tunits='Year', show=False, clim=(0,3),\n save=savefig_rms, interactive=True);\n\n plt.close('all')\n return\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.isnan",
"numpy.ma.masked_invalid",
"matplotlib.pyplot.close"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deniztas/Dota2-Winner
|
[
"bc3f033c87cfdf00dd7bbe3e4e38f0efb1c58dc3"
] |
[
"dota2.py"
] |
[
"from helper import DataControl, DatasetFormatter, JsonReader, Scorer\nimport pandas as pd\nimport numpy as np\nfrom time import time as getTime\nimport matplotlib.pyplot as plt\nfrom sklearn import model_selection\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom IPython import embed\n\nlineCount = 20\nskipPlots = True\nrandSeed = 2018\nteams = {1: \"Team 1\", -1: \"Team 2\"}\nheroes = JsonReader('heroes')\nlobbies = JsonReader('lobbies')\nmods = JsonReader('mods')\nregions = JsonReader('regions')\ninfoObject = {\n 'teams': teams,\n 'heroes': heroes,\n 'lobbies': lobbies,\n 'mods': mods,\n 'regions': regions\n}\n\nprint(\" ==== Step 1 - Load Dataset ==== \")\ncolumnNames = ['team', 'region', 'mode', 'type']\ntrainDataset = pd.read_csv('dataset/dota2Train.csv', sep=',', header=None)\nfor i in range(4):\n trainDataset[i] = trainDataset[i].astype('category')\n trainDataset.rename(columns={i:columnNames[i]}, inplace=True)\nprint(\"Train dataset is loaded.\")\ntestDataset = pd.read_csv('dataset/dota2Test.csv', sep=',', header=None)\nfor i in range(4):\n testDataset[i] = testDataset[i].astype('category')\n testDataset.rename(columns={i:columnNames[i]}, inplace=True)\nprint(\"Test dataset is loaded.\")\nprint(\"-\"*lineCount)\n\nprint(\" ==== Step 2 - Summarize Dataset ==== \")\nnTrain = trainDataset.shape[0]\nnTest = testDataset.shape[0]\nnColumn = trainDataset.shape[1]\nprint(\"Number of train instance:\\t\" + str(nTrain))\nprint(\"Number of test instance:\\t\" + str(nTest))\nprint(\"Number of descriptive features:\\t\" + str(nColumn))\nprint(\"-\"*lineCount)\nprint(\"First 5 row:\")\nprint(trainDataset.head(5))\nprint(\"-\"*lineCount)\nprint(\"Statistics for categorical features:\")\nprint(trainDataset.describe(include='category'))\nprint(\"-\"*lineCount)\nprint(\"Class count of train dataset:\")\nprint(trainDataset.groupby('team').size())\nprint(\"-\"*lineCount)\nprint(\"Class count of test dataset:\")\nprint(testDataset.groupby('team').size())\nprint(\"-\"*lineCount)\nprint(\"Histograms of categorical features:\")\ncategoricDataset = trainDataset.select_dtypes(include='category')\nfor colName in categoricDataset.columns:\n categoricDataset[colName].value_counts().plot(kind='bar', title=str(colName))\n if not skipPlots:\n plt.show()\nprint(\"-\"*lineCount)\n\nprint(\" ==== Step 3 - Example Dota 2 Matches ==== \")\nprint(\"TODO HERE\")\nprint(\"*****************************************************************************************\")\n\nprint(\" ==== Step 4 - Creating Dummies ==== \")\nnCategorical = len(trainDataset.select_dtypes(include='category').columns)\ntrainDataset = pd.get_dummies(trainDataset, columns=list(trainDataset.select_dtypes(include='category').columns))\ntestDataset = pd.get_dummies(testDataset, columns=list(testDataset.select_dtypes(include='category').columns))\nprint(str(nCategorical) + \" categorical feature found.\")\nprint(\"Created \" + str(trainDataset.shape[1]-nColumn) + \" dummy feature created.\")\nprint(\"New num of column: \" + str(trainDataset.shape[1]))\nprint(\"-\"*lineCount)\n\nprint(\" ==== Step 5 - Seperating Dataset ==== \")\nregexContains = 'team'\nregexNotContains = '^((?!team).)*$'\nX = trainDataset.filter(regex=regexNotContains)\nY = trainDataset.filter(regex=regexContains)\nX_test = testDataset.filter(regex=regexNotContains)\nY_test = testDataset.filter(regex=regexContains)\nX_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=.2, random_state=randSeed)\nprint(\"Number of train instance:\\t\" + str(X_train.shape[0]))\nprint(\"Number of validation instance:\\t\" + str(X_validation.shape[0]))\nprint(\"Number of test instance:\\t\" + str(X_test.shape[0]))\nprint(\"-\"*lineCount)\n\nprint(\" ==== Step 6 - Creating Machine Learning Models ==== \")\nmodels = [\n #('Decision Tree', DecisionTreeClassifier(criterion='gini')),\n #('Random Forest', RandomForestClassifier(n_estimators=10, criterion='gini', n_jobs=1)),\n ('K-Nearest Neighbors', KNeighborsClassifier(n_neighbors=5, algorithm='auto', metric='minkowski', n_jobs=1)),\n ('MLP tri-layer', MLPClassifier(hidden_layer_sizes=(16, 32, 16 ), activation='relu', solver='adam', alpha=0.0001, learning_rate_init=0.001, max_iter=200)),\n #('MLP big-tri-layer', MLPClassifier(hidden_layer_sizes=(128, 256, 128 ), activation='relu', solver='adam', alpha=0.0001, learning_rate_init=0.001, max_iter=200)),\n #('MLP five-layer', MLPClassifier(hidden_layer_sizes=(64, 128, 256, 128, 64 ), activation='relu', solver='adam', alpha=0.0001, learning_rate_init=0.001, max_iter=100)),\n]\nprint(\"Number of models going to be run:\" + str(len(models)))\nprint(\"Models:\")\nfor modelName, _ in models:\n print(modelName)\nprint(\"-\"*lineCount)\n\n\nprint(\" ==== Step 7 - Training ==== \")\nresults = []\nfor modelname, modelObj in models:\n print(modelname + \" training has started\")\n start = getTime()\n kfold = model_selection.KFold(n_splits=10, random_state=randSeed)\n scorers = {\n 'accr': 'accuracy',\n 'prec': 'precision_macro',\n 'recl': 'recall_macro'\n }\n scores = model_selection.cross_val_score(modelObj, X_train, Y_train, cv=kfold, scoring=scorers, return_train_score=True)\n results.append(scores)\n embed()\n cv_results = scores['accr']\n print(\"Results of model \" + modelname + \":\")\n print(\"\\tMean Accuracy:\\t\" + str(cv_results.mean()))\n print(\"\\tStd.Dev. Accuracy:\\t\" + str(cv_results.std()))\n print(\"\\tRun time (in sec.):\\t\" + str(getTime() - start))\n\n"
] |
[
[
"sklearn.neural_network.MLPClassifier",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.KFold",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
GCES-Pydemic/pydemic-ui
|
[
"8e3d8bc9f73887edf6bd8ab78a4ead29fe8239ed"
] |
[
"pydemic_ui/apps/sitrep/abstract.py"
] |
[
"import pandas as pd\n\nimport mundi\nimport sidekick as sk\nfrom pydemic_ui import st\nfrom pydemic_ui.i18n import _\n\nDISPLAY_NAME = _(\"Abstract\")\nABSTRACT = _(\n \"\"\"\n## Update from the last 24 hours (as {date} Brasilia Time)\n\n**{cases}** additional cases and **{deaths}** additional deaths reported from\nall {n_children} {unit_kind};\n\nThe {n_top_cases} {unit_kind} reporting the highest number of cases in the\nlast 24 hours: {top_cases}.\n\nThe {n_top_deaths} {unit_kind} reporting the highest number of deaths in the\npast 24 hours: {top_deaths}.\n\"\"\"\n)\n\n\ndef abstract(*args, where=st, **kwargs):\n \"\"\"\n Print the abstract of the situation report.\n \"\"\"\n where.markdown(abstract_str(*args, **kwargs))\n\n\[email protected]\ndef abstract_str(top=10, kind=_(\"Federal Units\"), date=None):\n \"\"\"\n Create a markdown string with an abstract to the dashboard.\n \"\"\"\n\n children_refs = mundi.regions(country_code=\"BR\", type=\"state\").index\n children = [mundi.region(ref) for ref in children_refs]\n n_children = len(children)\n\n curves = [child.pydemic.epidemic_curve().diff() for child in children]\n date = date or max(curve.index.max() for curve in curves)\n cases = pd.Series([c.loc[date, \"cases\"] for c in curves], index=children_refs)\n deaths = pd.Series([c.loc[date, \"deaths\"] for c in curves], index=children_refs)\n\n def list_top(data: pd.Series):\n *head, last = sk.pipe(\n data.sort_values(ascending=False).index[:top],\n sk.map(mundi.region),\n sk.map(\"{0.name}\".format),\n )\n head = \", \".join(head)\n return _(\" and \").join([head, last])\n\n return _(ABSTRACT).format(\n cases=\"{:n}\".format(cases.sum()),\n deaths=\"{:n}\".format(deaths.sum()),\n date=date,\n n_children=n_children,\n n_top_cases=min(top, n_children),\n n_top_deaths=min(top, n_children),\n top_cases=list_top(cases),\n top_deaths=list_top(deaths),\n unit_kind=_(kind),\n )\n\n\ndef options(where=st):\n return {}\n\n\ndef show(where=st):\n abstract()\n\n\ndef main(embed=False, where=st):\n show(**options(where=(st if embed else st.sidebar)), where=st)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.Series"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dcbau/FreeGrid
|
[
"f9d72bbaa92b5da3d9c7d480981f6aedd559ecce",
"75a4754fc2bdc2e05e682b7fcac017d05f9c3185"
] |
[
"GUI/main_window.py",
"grid_improving/angular_distance.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport numpy as np\nimport os\nfrom GUI.audio_device_widget import AudioDeviceWidget\nfrom GUI.plot_widget import PlotWidget, PlotWidget_HPIRs, PlotWidget_HPCF\n\nuse_vispy = True\ntry:\n from GUI.vispyWidget import VispyCanvas\nexcept ImportError:\n use_vispy = False\n\n\nclass UiMainWindow(QtWidgets.QMainWindow):\n\n def __init__(self, measurement_ref):\n super().__init__()\n self.setObjectName(\"MainWindow\")\n\n self.cwidget = QtWidgets.QWidget()\n self.setCentralWidget(self.cwidget)\n\n\n self.measurement_ref = measurement_ref\n\n self.setWindowTitle(\"FreeGrid\")\n\n\n\n # VISPY WIDGET\n self.azimuthLabel = QtWidgets.QLabel(\"Az: \")\n self.azimuthLabel.setFont(QtGui.QFont(\"Arial\", 24))\n self.azimuthLabel.setMaximumHeight(30)\n self.elevationLabel = QtWidgets.QLabel(\"El: \")\n self.elevationLabel.setFont(QtGui.QFont(\"Arial\", 24))\n self.elevationLabel.setMaximumHeight(30)\n self.radiusLabel = QtWidgets.QLabel(\"Radius: \")\n self.radiusLabel.setFont(QtGui.QFont(\"Arial\", 15))\n self.radiusLabel.setMaximumHeight(30)\n\n self.vpWidget = QtWidgets.QGroupBox(\"Virtual Speaker Position\")\n self.vpWidget.setObjectName(\"vpWidget\")\n self.vpWidget.setLayout(QtWidgets.QGridLayout())\n\n if use_vispy:\n self.vpWidget.setMinimumSize(400, 400)\n\n self.vispy_canvas = VispyCanvas(self, measurement_ref)\n self.sliderTheta = QtWidgets.QSlider()\n self.sliderPhi = QtWidgets.QSlider()\n\n self.vpWidget.layout().addWidget(self.vispy_canvas.native, 0, 0, 4, 4)\n self.vpWidget.layout().addWidget(self.sliderTheta, 5, 0, 1, 4)\n self.vpWidget.layout().addWidget(self.sliderPhi, 0, 5, 4, 1)\n\n self.sliderTheta.setOrientation(QtCore.Qt.Horizontal)\n self.sliderTheta.setObjectName(\"sliderTheta\")\n self.sliderTheta.valueChanged.connect(self.vispy_canvas.update_theta)\n\n self.sliderPhi.setOrientation(QtCore.Qt.Vertical)\n self.sliderPhi.setMinimum(-25)\n self.sliderPhi.setMaximum(25)\n self.sliderPhi.setValue(0)\n self.sliderPhi.setObjectName(\"sliderPhi\")\n self.sliderPhi.valueChanged.connect(self.vispy_canvas.update_phi)\n\n self.vpWidget.layout().addWidget(self.azimuthLabel, 6, 1, 1, 1)\n self.vpWidget.layout().addWidget(self.elevationLabel, 6, 2, 1, 1)\n self.vpWidget.layout().addWidget(self.radiusLabel, 6, 3, 1, 1)\n\n else:\n self.vp_missing_label = QtWidgets.QLabel(\"Vispy package missing or deactivated: \\n3D speaker representation disabled.\")\n self.vpWidget.layout().addWidget(self.vp_missing_label, 1, 1, 1, 3)\n\n self.vpWidget.layout().addWidget(self.azimuthLabel, 2, 1, 1, 1)\n self.vpWidget.layout().addWidget(self.elevationLabel, 2, 2, 1, 1)\n self.vpWidget.layout().addWidget(self.radiusLabel, 2, 3, 1, 1)\n\n\n\n\n\n\n # DEVICE STATUS WIDGET\n\n self.device_status_widget = QtWidgets.QGroupBox(\"Audio Device Status\")\n self.device_status_widget.setLayout(QtWidgets.QHBoxLayout())\n self.device_status_widget.layout().addWidget(AudioDeviceWidget(self.measurement_ref.measurement))\n self.device_status_widget.layout().setSizeConstraint(QtWidgets.QLayout.SetFixedSize)\n\n\n # TRACKER STATUS WIDGET\n\n self.tracker_status_widget = QtWidgets.QGroupBox(\"Vive Tracker Status\")\n tracker_status = self.measurement_ref.tracker.check_tracker_availability()\n\n self.tracker1_status_label = QtWidgets.QLabel(tracker_status[\"tracker1\"])\n self.tracker2_status_label = QtWidgets.QLabel(tracker_status[\"tracker2\"])\n\n self.tracker1_label = QtWidgets.QLabel(\"(Head) Tracker 1:\")\n self.tracker2_label = QtWidgets.QLabel(\"Tracker 2:\")\n\n self.tracker_status_widget.setLayout(QtWidgets.QFormLayout())\n self.tracker_status_widget.layout().addRow(self.tracker1_label, self.tracker1_status_label)\n self.tracker_status_widget.layout().addRow(self.tracker2_label, self.tracker2_status_label)\n\n self.tracker_status_widget.setMaximumHeight(100)\n\n # OSC STATUS WIDGET\n\n self.osc_status_box = QtWidgets.QGroupBox(\"OSC Input Status\")\n self.osc_status_box.setMaximumHeight(100)\n self.osc_status_box.setLayout(QtWidgets.QVBoxLayout())\n\n self.osc_status_indicator = QtWidgets.QCheckBox(\" OSC Input\")\n self.osc_status_indicator.setStyleSheet(\"QCheckBox::indicator\"\n \"{\"\n \"background-color : lightgrey;\"\n \"}\")\n self.osc_status_indicator.setCheckable(False)\n self.osc_status_box.layout().addWidget(self.osc_status_indicator)\n\n self.osc_status_box.hide()\n\n\n # MANUAL AZ/EL/R box\n self.azimuthBox = QtWidgets.QSpinBox()\n self.azimuthBox.setMaximum(359)\n self.azimuthBox.valueChanged.connect(self.manual_update_az)\n\n self.elevationBox = QtWidgets.QSpinBox()\n self.elevationBox.setMaximum(90)\n self.elevationBox.setMinimum(-90)\n self.elevationBox.valueChanged.connect(self.manual_update_el)\n\n self.radiusBox = QtWidgets.QSpinBox()\n self.radiusBox.setMinimum(20)\n self.radiusBox.setMaximum(999)\n self.radiusBox.valueChanged.connect(self.manual_update_radius)\n\n\n self.manualAngleBox = QtWidgets.QGroupBox(\n \"Set angle manually (Only when VIVE trackers are disconnected)\")\n layout = QtWidgets.QHBoxLayout()\n layout.addWidget(QtWidgets.QLabel(\"Azimuth °\"))\n layout.addWidget(self.azimuthBox)\n layout.addWidget(QtWidgets.QLabel(\"Elevation °\"))\n layout.addWidget(self.elevationBox)\n layout.addWidget(QtWidgets.QLabel(\"Radius cm\"))\n layout.addWidget(self.radiusBox)\n\n layout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)\n\n self.manualAngleBox.setLayout(layout)\n\n\n # TAB WIDGET\n\n self.tabWidget = QtWidgets.QTabWidget(self)\n self.tabWidget.setEnabled(True)\n self.tabWidget.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)\n self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)\n self.tabWidget.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.tabWidget.setIconSize(QtCore.QSize(32, 32))\n self.tabWidget.setDocumentMode(True)\n self.tabWidget.setTabsClosable(False)\n self.tabWidget.setMovable(False)\n self.tabWidget.setTabBarAutoHide(False)\n self.tabWidget.setObjectName(\"tabWidget\")\n self.tabWidget.currentChanged.connect(self.tab_changed)\n\n self.tab_config = QtWidgets.QWidget()\n self.tab_config.setEnabled(True)\n self.tab_config.setObjectName(\"tab_config\")\n self.tab_config.setLayout(QtWidgets.QVBoxLayout())\n self.tab_config.layout().setAlignment(QtCore.Qt.AlignTop)\n self.tabWidget.addTab(self.tab_config, \"\")\n\n self.tab_measure = QtWidgets.QWidget()\n self.tab_measure.setEnabled(True)\n self.tab_measure.setObjectName(\"tab_measure\")\n self.tab_measure.setLayout(QtWidgets.QVBoxLayout())\n self.tab_measure.layout().setAlignment(QtCore.Qt.AlignCenter)\n self.tabWidget.addTab(self.tab_measure, \"\")\n\n self.tab_data = QtWidgets.QWidget()\n self.tab_data.setEnabled(True)\n self.tab_data.setObjectName(\"tab_data\")\n self.tab_data.setLayout(QtWidgets.QGridLayout())\n #self.tab_data.layout().setAlignment(QtCore.Qt.AlignCenter)\n self.tabWidget.addTab(self.tab_data, \"\")\n self.tab_data_index = self.tabWidget.count()-1\n\n self.tab_hpc = QtWidgets.QWidget()\n self.tab_hpc.setEnabled(True)\n self.tab_hpc.setLayout(QtWidgets.QVBoxLayout())\n self.tab_hpc.layout().setAlignment(QtCore.Qt.AlignCenter)\n self.tabWidget.addTab(self.tab_hpc, \"\")\n\n\n # Config Tab\n #############################\n #############################\n\n # Config Tab\n # Select Tracking Input\n ############################\n\n self.tracking_input_box = QtWidgets.QGroupBox(\"Tracking Input\")\n self.tracking_input_box.setFixedHeight(70)\n self.tracking_input_box.setLayout(QtWidgets.QHBoxLayout())\n\n self.tracking_input_vive = QtWidgets.QRadioButton(\"Vive Trackers\")\n self.tracking_input_vive.setChecked(True)\n self.tracking_input_vive.sourcename = \"Vive\"\n self.tracking_input_vive.toggled.connect(self.select_tracking_input)\n self.tracking_input_box.layout().addWidget(self.tracking_input_vive)\n\n self.tracking_input_OSC_direct = QtWidgets.QRadioButton(\"External: OSC (Az|El|R)\")\n self.tracking_input_OSC_direct.sourcename = \"OSC_direct\"\n self.tracking_input_OSC_direct.toggled.connect(self.select_tracking_input)\n self.tracking_input_box.layout().addWidget(self.tracking_input_OSC_direct)\n\n self.tab_config.layout().addWidget(self.tracking_input_box)\n\n # Config Tab\n # Vive Tracker Box\n # Show Instructions Dialog Box:\n ###########################\n\n\n self.vivetracker_box = QtWidgets.QGroupBox(\"Tracker Calibration\")\n self.vivetracker_box.setLayout(QtWidgets.QVBoxLayout())\n self.vivetracker_box.layout().setAlignment(QtCore.Qt.AlignTop)\n self.vivetracker_box.setFixedHeight(500)\n\n self.tab_config.layout().addWidget(self.vivetracker_box)\n\n self.dlg = InstructionsDialogBox()\n self.show_instructions_button = QtWidgets.QPushButton()\n self.show_instructions_button.setText(\"Show Calibration Instructions\")\n self.show_instructions_button.clicked.connect(self.dlg.show)\n self.show_instructions_button.setMaximumWidth(200)\n self.vivetracker_box.layout().addWidget(self.show_instructions_button)\n\n\n # Config Tab\n # Vive Tracker Box\n # Switch Trackers Box:\n ###########################\n\n self.switchTrackersButton = QtWidgets.QPushButton()\n self.switchTrackersButton.setText(\"Switch Tracker Roles\")\n self.switchTrackersButton.setObjectName(\"switchTrackersButton\")\n self.switchTrackersButton.setMaximumWidth(200)\n self.switchTrackersButton.clicked.connect(self.switch_trackers)\n self.vivetracker_box.layout().addWidget(self.switchTrackersButton)\n\n # Config Tab\n # Vive Tracker Box\n # Delay trackers textfield:\n ###########################\n\n self.delay_calibration_layout = QtWidgets.QHBoxLayout()\n self.delay_calibration_layout.setAlignment(QtCore.Qt.AlignLeft)\n self.vivetracker_box.layout().addLayout(self.delay_calibration_layout)\n self.calibration_info_label = QtWidgets.QLabel(\"Delay calibration triggers (s)\")\n self.delay_calibration_layout.addWidget(self.calibration_info_label)\n self.calibration_wait_time = QtWidgets.QSpinBox()\n self.delay_calibration_layout.addWidget(self.calibration_wait_time)\n\n # Config Tab\n # Vive Tracker Box\n # Head Dimensions Dialog:\n ###########################\n\n self.head_dimensions_dialog = QtWidgets.QDialog()\n\n self.head_dimensions_dialog.setModal(False)\n self.head_dimensions_dialog.setWindowTitle(\"Measure Head Dimensions\")\n\n Btn = QtWidgets.QDialogButtonBox.Close\n self.head_dimensions_dialog.buttonBox = QtWidgets.QDialogButtonBox(Btn)\n self.head_dimensions_dialog.buttonBox.clicked.connect(self.head_dimensions_dialog.close)\n\n self.head_dimensions_dialog.setLayout(QtWidgets.QVBoxLayout())\n self.head_dimensions_dialog.layout().setAlignment(QtCore.Qt.AlignLeft)\n\n self.head_dimensions_info = QtWidgets.QLabel(\"Measure the width and length of the head by holding the tracker to the left, right, front and back of the head (above the ears). This data is not mandatory for the measurement, but can be used as meta data during post processing. It is stored along with the HRIR data.\")\n self.head_dimensions_info.setWordWrap(True)\n self.head_dimensions_dialog.layout().addWidget(self.head_dimensions_info)\n\n self.head_dimensions_formlayout = QtWidgets.QFormLayout()\n self.head_dimensions_dialog.layout().addLayout(self.head_dimensions_formlayout)\n\n calibration_button_width = 180\n calibration_button_size = QtCore.QSize(calibration_button_width, 50)\n\n self.calibrate_left_head = QtWidgets.QPushButton(text='Left Side')\n self.calibrate_left_head.setAutoDefault(False)\n # self.calibrate_ear_left.setFixedSize(calibration_button_size)\n self.calibrate_left_head.setFixedWidth(calibration_button_width)\n self.calibrate_left_head.clicked.connect(lambda: self.calibrate(self.calibrate_head_left))\n self.calibrate_left_head_label = QtWidgets.QLabel(text=\"Uncalibrated\")\n self.head_dimensions_formlayout.addRow(self.calibrate_left_head, self.calibrate_left_head_label)\n\n self.calibrate_right_head = QtWidgets.QPushButton(text='Right Side')\n self.calibrate_right_head.setAutoDefault(False)\n\n # self.calibrate_ear_right.setFixedSize(calibration_button_size)\n self.calibrate_right_head.setFixedWidth(calibration_button_width)\n self.calibrate_right_head.clicked.connect(lambda: self.calibrate(self.calibrate_head_right))\n self.calibrate_right_head_label = QtWidgets.QLabel(text=\"Uncalibrated\")\n self.head_dimensions_formlayout.addRow(self.calibrate_right_head, self.calibrate_right_head_label)\n\n self.head_width_label = QtWidgets.QLabel(text=\"Head Width: - \")\n self.head_dimensions_formlayout.addRow(QtWidgets.QLabel(\"\"), self.head_width_label)\n\n self.calibrate_front_head = QtWidgets.QPushButton(text='Front Of Head')\n self.calibrate_front_head.setAutoDefault(False)\n # self.calibrate_front_head.setFixedSize(calibration_button_size)\n self.calibrate_front_head.setFixedWidth(calibration_button_width)\n self.calibrate_front_head.clicked.connect(lambda: self.calibrate(self.calibrate_head_front))\n self.calibrate_front_head_label = QtWidgets.QLabel(text=\"Uncalibrated\")\n self.head_dimensions_formlayout.addRow(self.calibrate_front_head, self.calibrate_front_head_label)\n\n self.calibrate_back_head = QtWidgets.QPushButton(text='Back Of Head')\n self.calibrate_back_head.setAutoDefault(False)\n # self.calibrate_back_head.setFixedSize(calibration_button_size)\n self.calibrate_back_head.setFixedWidth(calibration_button_width)\n self.calibrate_back_head.clicked.connect(lambda: self.calibrate(self.calibrate_head_back))\n self.calibrate_back_head_label = QtWidgets.QLabel(text=\"Uncalibrated\")\n self.head_dimensions_formlayout.addRow(self.calibrate_back_head, self.calibrate_back_head_label)\n\n self.head_length_label = QtWidgets.QLabel(text=\"Head Length: - \")\n self.head_dimensions_formlayout.addRow(QtWidgets.QLabel(\"\"), self.head_length_label)\n\n self.head_dimensions_dialog.layout().addWidget(self.head_dimensions_dialog.buttonBox)\n\n\n self.show_head_dimensions = QtWidgets.QPushButton()\n self.show_head_dimensions.setText(\"Optional: Head Dimensions\")\n self.show_head_dimensions.clicked.connect(self.head_dimensions_dialog.show)\n self.show_head_dimensions.setMaximumWidth(200)\n self.vivetracker_box.layout().addWidget(self.show_head_dimensions)\n\n # Config Tab\n # Vive Tracker Box\n # Calibration Box\n\n self.calibration_box = QtWidgets.QGroupBox(\"Tracker Calibration\")\n self.calibration_box.setLayout(QtWidgets.QVBoxLayout())\n self.calibration_box.layout().setAlignment(QtCore.Qt.AlignLeft)\n self.vivetracker_box.layout().addWidget(self.calibration_box)\n\n self.calibrations_formlayout = QtWidgets.QFormLayout()\n self.calibration_box.layout().addLayout(self.calibrations_formlayout)\n\n\n calibration_button_width = 180\n calibration_button_size = QtCore.QSize(calibration_button_width, 50)\n\n\n self.calibrate_ear_left = QtWidgets.QPushButton(text='Calibrate Left Ear')\n #self.calibrate_ear_left.setFixedSize(calibration_button_size)\n self.calibrate_ear_left.setFixedWidth(calibration_button_width)\n self.calibrate_ear_left.clicked.connect(lambda: self.calibrate(self.calibrate_left_ear))\n self.calibrate_ear_left_label = QtWidgets.QLabel(text=\"Uncalibrated\")\n self.calibrations_formlayout.addRow(self.calibrate_ear_left, self.calibrate_ear_left_label)\n\n self.calibrate_ear_right = QtWidgets.QPushButton(text='Calibrate Right Ear')\n #self.calibrate_ear_right.setFixedSize(calibration_button_size)\n self.calibrate_ear_right.setFixedWidth(calibration_button_width)\n self.calibrate_ear_right.clicked.connect(lambda: self.calibrate(self.calibrate_right_ear))\n self.calibrate_ear_right_label = QtWidgets.QLabel(text=\"Uncalibrated\")\n self.calibrations_formlayout.addRow(self.calibrate_ear_right, self.calibrate_ear_right_label)\n\n self.calibrate_acoustical_center = QtWidgets.QPushButton(text='Calibrate Speaker')\n #self.calibrate_acoustical_center.setFixedSize(calibration_button_size)\n self.calibrate_acoustical_center.setFixedWidth(calibration_button_width)\n self.calibrate_acoustical_center.clicked.connect(lambda: self.calibrate(self.calibrate_acoustical_centre))\n self.calibrate_acoustical_center_label = QtWidgets.QLabel(text=\"Uncalibrated\")\n self.calibrations_formlayout.addRow(self.calibrate_acoustical_center, self.calibrate_acoustical_center_label)\n\n\n\n self.calibrateButton = QtWidgets.QPushButton(self.tab_measure)\n self.calibrateButton.setText(\"Calibrate Orientation\")\n self.calibrateButton.setObjectName(\"calibrateButton\")\n #self.calibrateButton.setFixedSize(calibration_button_size)\n self.calibrateButton.setFixedWidth(calibration_button_width)\n self.calibrateButton.clicked.connect(lambda: self.calibrate(self.calibrate_orientation))\n self.calibrate_orientation_label = QtWidgets.QLabel(\"Uncalibrated\")\n\n self.calibrations_formlayout.addRow(self.calibrateButton, self.calibrate_orientation_label)\n\n\n # Config Tab\n # OSC Config Box\n ############################\n\n self.osc_config_box = QtWidgets.QGroupBox(\"OSC Configuration\")\n self.osc_config_box.setLayout(QtWidgets.QVBoxLayout())\n self.osc_config_box.setFixedHeight(500)\n self.osc_config_box.layout().setAlignment(QtCore.Qt.AlignTop)\n self.osc_config_box.hide()\n\n self.osc_ip_label = QtWidgets.QLabel(\"Current Host IP: \")\n self.osc_port_label = QtWidgets.QLabel(\"OSC Server Port: \")\n self.osc_address_label = QtWidgets.QLabel(\"Listening for list of [Az, El, R] on osc-address '/guided_hrtfs/angle'\")\n self.osc_address_label.setWordWrap(True)\n\n self.osc_config_box.layout().addWidget(self.osc_ip_label)\n self.osc_config_box.layout().addWidget(self.osc_port_label)\n self.osc_config_box.layout().addWidget(self.osc_address_label)\n\n\n self.tab_config.layout().addWidget(self.osc_config_box)\n\n # Config Tab\n # Measurement Parameters Box:\n ############################\n\n self.measuremet_paramteres_box = QtWidgets.QGroupBox(\"Measurement Parameters\")\n self.measuremet_paramteres_box.setLayout(QtWidgets.QVBoxLayout())\n\n\n self.sweep_parameters_dialog = QtWidgets.QDialog()\n self.sweep_parameters_dialog.setModal(False)\n self.sweep_parameters_dialog.setWindowTitle(\"Sweep Parameters\")\n\n Btn_ok = QtWidgets.QDialogButtonBox.Ok\n self.sweep_parameters_dialog.buttonBox = QtWidgets.QDialogButtonBox(Btn_ok)\n self.sweep_parameters_dialog.buttonBox.clicked.connect(self.update_sweep_parameters)\n\n self.sweep_parameters_dialog.setLayout(QtWidgets.QVBoxLayout())\n self.sweep_parameters_dialog.layout().setAlignment(QtCore.Qt.AlignLeft)\n\n self.sweep_parameters_formlayout = QtWidgets.QFormLayout()\n self.sweep_parameters_dialog.layout().addLayout(self.sweep_parameters_formlayout)\n\n # get current parameters\n sweep_params = self.measurement_ref.measurement.get_sweep_parameters()\n\n # add row entries for each parameter\n self.sweeplength_sec = QtWidgets.QLineEdit(str(sweep_params['sweeplength_sec']))\n self.sweep_parameters_formlayout.addRow(self.sweeplength_sec, QtWidgets.QLabel(text='Sweep length (sec)'))\n\n self.post_silence_sec = QtWidgets.QLineEdit(str(sweep_params['post_silence_sec']))\n self.sweep_parameters_formlayout.addRow(self.post_silence_sec, QtWidgets.QLabel(text='Silence after sweep (sec)'))\n\n self.f_start = QtWidgets.QLineEdit(str(sweep_params['f_start']))\n self.sweep_parameters_formlayout.addRow(self.f_start, QtWidgets.QLabel(text='Sweep start frequency (Hz)'))\n\n self.f_end = QtWidgets.QLineEdit(str(sweep_params['f_end']))\n self.sweep_parameters_formlayout.addRow(self.f_end, QtWidgets.QLabel(text='Sweep stop frequency (Hz)'))\n\n self.amp_db = QtWidgets.QLineEdit(str(sweep_params['amp_db']))\n self.sweep_parameters_formlayout.addRow(self.amp_db, QtWidgets.QLabel(text='Sweep gain (dB)'))\n\n self.fade_out_samples = QtWidgets.QLineEdit(str(sweep_params['fade_out_samples']))\n self.sweep_parameters_formlayout.addRow(self.fade_out_samples, QtWidgets.QLabel(text='Fadeout before sweep end (samples)'))\n\n self.sweep_parameters_errormessage = QtWidgets.QLabel(\"Invalid Sweep Paramters\")\n self.sweep_parameters_formlayout.addRow(self.sweep_parameters_errormessage)\n self.sweep_parameters_errormessage.setVisible(False)\n\n # add bottom button box\n self.sweep_parameters_dialog.layout().addWidget(self.sweep_parameters_dialog.buttonBox)\n\n self.show_sweep_parameters = QtWidgets.QPushButton()\n self.show_sweep_parameters.setText(\"Set Sweep Parameters\")\n self.show_sweep_parameters.clicked.connect(self.sweep_parameters_dialog.show)\n self.show_sweep_parameters.setMaximumWidth(200)\n self.measuremet_paramteres_box.layout().addWidget(self.show_sweep_parameters)\n\n self.tab_config.layout().addWidget(self.measuremet_paramteres_box)\n\n\n # Config Tab\n # Output Folder Box:\n ############################\n\n self.output_folder_box = QtWidgets.QGroupBox(\"Select output folder for measured data\")\n self.output_folder_box.setFixedHeight(80)\n self.output_folder_box.setLayout(QtWidgets.QHBoxLayout())\n path = os.getcwd()\n path = os.path.join(path, \"Measurements\")\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except:\n path = os.getcwd()\n self.measurement_ref.set_output_path(path)\n\n self.output_folder_select = QtWidgets.QLineEdit()\n self.output_folder_select.setText(path)\n self.output_folder_box.layout().addWidget(self.output_folder_select)\n\n self.select_folder_button = QtWidgets.QPushButton()\n self.select_folder_button.setText(\"...\")\n self.select_folder_button.clicked.connect(self.select_folder_dialog)\n self.output_folder_box.layout().addWidget(self.select_folder_button)\n\n self.tab_config.layout().addWidget(self.output_folder_box)\n\n # Config Tab\n # Send OSC Box:\n ############################\n self.send_osc_box = QtWidgets.QGroupBox(\"Send OSC data to external application\")\n self.send_osc_box.setFixedHeight(200)\n self.send_osc_box.setLayout(QtWidgets.QFormLayout())\n self.send_osc_box.layout().setLabelAlignment(QtCore.Qt.AlignLeft)\n\n\n ip, port, address = self.measurement_ref.get_osc_parameters()\n\n self.send_osc_ip_select = QtWidgets.QLineEdit()\n self.send_osc_ip_select.setText(ip)\n self.send_osc_box.layout().addRow(\"OSC IP Address: \", self.send_osc_ip_select)\n\n self.send_osc_port_select = QtWidgets.QLineEdit()\n self.send_osc_port_select.setText(f'{port}')\n self.send_osc_box.layout().addRow(\"OSC Port: \", self.send_osc_port_select)\n\n self.send_osc_address_select = QtWidgets.QLineEdit()\n self.send_osc_address_select.setText(address)\n self.send_osc_box.layout().addRow(\"OSC Address: \", self.send_osc_address_select)\n\n\n self.send_osc_button = QtWidgets.QPushButton()\n self.send_osc_button.setText(\"Send OSC\")\n self.send_osc_button.clicked.connect(self.activate_osc_send)\n self.send_osc_button.setFixedSize(100, 50)\n self.send_osc_button.setCheckable(True)\n #self.send_osc_button.setStyleSheet(\"background-color : lightgrey\")\n self.send_osc_box.layout().addRow(self.send_osc_button)\n\n self.tab_config.layout().addWidget(self.send_osc_box)\n\n\n ## MEASURE TAB\n #############################\n #############################\n\n\n\n\n\n self.measurements_main_group = QtWidgets.QGroupBox()\n self.measurements_main_group.setLayout(QtWidgets.QHBoxLayout())\n\n self.measurements_main_group.layout().addWidget(QtWidgets.QLabel(\"Session Name:\"))\n self.session_name = QtWidgets.QLineEdit()\n self.measurements_main_group.layout().addWidget(self.session_name)\n\n self.measurements_main_group.layout().addStretch()\n\n self.clear_measurements_button = QtWidgets.QPushButton(\"Clear / Start New\")\n self.clear_measurements_button.clicked.connect(self.clear_measurements)\n self.measurements_main_group.layout().addWidget(self.clear_measurements_button)\n\n self.tab_measure.layout().addWidget(self.measurements_main_group)\n\n\n\n self.startMeasurementGroupBox = QtWidgets.QGroupBox('Start Measurement')\n self.startMeasurementGroupBox.setLayout(QtWidgets.QGridLayout())\n\n self.centerTriggerButton = QtWidgets.QPushButton('Center Measurement')\n self.centerTriggerButton.setObjectName(\"Center Measurement\")\n self.centerTriggerButton.clicked.connect(self.trigger_center_measurement)\n\n self.measurementTriggerButton = QtWidgets.QPushButton('Single Measurement')\n self.measurementTriggerButton.setObjectName(\"Single Measurement\")\n self.measurementTriggerButton.setFixedSize(QtCore.QSize(200, 100))\n self.measurementTriggerButton.clicked.connect(self.measurement_ref.trigger_measurement)\n\n self.autoTriggerButton = QtWidgets.QPushButton('Auto Measurement')\n self.autoTriggerButton.clicked.connect(self.measurement_ref.trigger_auto_measurement)\n\n self.autoMeasurementTriggerProgress = QtWidgets.QProgressBar()\n self.autoMeasurementTriggerProgress.setVisible(False)\n\n self.autoTriggerStopButton = QtWidgets.QPushButton('Stop Auto Measurement')\n self.autoTriggerStopButton.clicked.connect(self.measurement_ref.stop_auto_measurement)\n\n #self.startMeasurementGroupBox.layout().addStretch()\n self.startMeasurementGroupBox.layout().addWidget(self.centerTriggerButton, 1, 0, 1, 1)\n self.startMeasurementGroupBox.layout().addWidget(self.measurementTriggerButton, 0, 1, 3, 1)\n self.startMeasurementGroupBox.layout().addWidget(self.autoTriggerButton, 1, 2, 1, 1)\n self.startMeasurementGroupBox.layout().addWidget(self.autoMeasurementTriggerProgress, 2, 2, 1, 1)\n self.startMeasurementGroupBox.layout().addWidget(self.autoTriggerStopButton, 1, 3, 1, 1)\n #self.startMeasurementGroupBox.layout().addStretch()\n self.tab_measure.layout().addWidget(self.startMeasurementGroupBox)\n\n\n\n self.point_recommender_groupbox = QtWidgets.QGroupBox('Point Recommender')\n self.point_recommender_groupbox.setLayout(QtWidgets.QHBoxLayout())\n self.point_recommender_groupbox.setEnabled(False)\n\n self.recommend_point_button = QtWidgets.QPushButton('Recommend Point')\n self.recommend_point_button.clicked.connect(self.trigger_point_recommendation)\n\n self.start_guiding_button = QtWidgets.QPushButton('Start Guidance')\n self.start_guiding_button.clicked.connect(self.trigger_guided_measurement)\n\n self.clear_recommended_points_button = QtWidgets.QPushButton('Clear Recommendations')\n self.clear_recommended_points_button.clicked.connect(self.clear_recommended_points)\n\n\n self.point_recommender_groupbox.layout().addStretch()\n self.point_recommender_groupbox.layout().addWidget(self.recommend_point_button)\n self.point_recommender_groupbox.layout().addWidget(self.start_guiding_button)\n self.point_recommender_groupbox.layout().addWidget(self.clear_recommended_points_button)\n self.tab_measure.layout().addWidget(self.point_recommender_groupbox)\n\n self.plotgroupbox = QtWidgets.QGroupBox('Measurement Plots')\n self.plotgroupbox.setLayout(QtWidgets.QVBoxLayout())\n\n self.plot_widget = PlotWidget()\n\n self.plotgroupbox.layout().addWidget(self.plot_widget)\n self.tab_measure.layout().addWidget(self.plotgroupbox)\n\n ## DATA LIST TAB\n #############################\n self.positions_table = QtWidgets.QTableView()\n self.positions_table.setModel(self.measurement_ref.positions_list)\n self.positions_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.positions_table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)\n self.positions_table.verticalHeader().show()\n self.positions_table.horizontalHeader().setSectionResizeMode(1)\n self.positions_table.verticalHeader().setSectionResizeMode(3)\n self.positions_table.setFont(QtGui.QFont('Helvetica', 13))\n self.positions_table.setShowGrid(False)\n #self.positions_table.setMaximumWidth(300)\n\n\n\n self.positions_table_selection = self.positions_table.selectionModel()\n self.positions_table_selection.currentRowChanged.connect(self.data_table_selection)\n\n self.tab_data.layout().addWidget(self.positions_table, 0, 0, 1, 1)\n\n self.remove_measurement_button = QtWidgets.QPushButton(\"Delete Selected\")\n self.remove_measurement_button.clicked.connect(self.remove_measurement)\n self.tab_data.layout().addWidget(self.remove_measurement_button, 1, 0, 1, 1)\n\n self.plot_widget2 = PlotWidget()\n #self.plot_widget2.setMaximumWidth(200)\n self.tab_data.layout().addWidget(self.plot_widget2, 0, 1, 1, 1)\n\n\n ## HEADPHONE COMPENSATION TAB\n #############################\n\n self.hp_main_group = QtWidgets.QGroupBox()\n self.hp_main_group.setLayout(QtWidgets.QHBoxLayout())\n\n self.hp_main_group.layout().addWidget(QtWidgets.QLabel(\"Headphone Name:\"))\n self.headphone_name = QtWidgets.QLineEdit()\n self.hp_main_group.layout().addWidget(self.headphone_name)\n\n self.hp_main_group.layout().addStretch()\n\n self.clear_hp_measurements_button = QtWidgets.QPushButton(\"Clear / Start New\")\n self.clear_hp_measurements_button.clicked.connect(self.clear_hp_measurements)\n self.hp_main_group.layout().addWidget(self.clear_hp_measurements_button)\n\n self.tab_hpc.layout().addWidget(self.hp_main_group)\n\n self.hp_controls_group = QtWidgets.QGroupBox()\n self.hp_controls_group.setLayout(QtWidgets.QHBoxLayout())\n self.hp_controls_group.setAlignment(QtCore.Qt.AlignLeft)\n\n\n\n self.trigger_hp_measurement_button = QtWidgets.QPushButton(\"Trigger Headphone \\n Measurement\")\n self.trigger_hp_measurement_button.clicked.connect(self.trigger_hp_measurement)\n self.trigger_hp_measurement_button.setFixedSize(QtCore.QSize(200, 100))\n self.hp_controls_group.layout().addWidget(self.trigger_hp_measurement_button)\n\n self.remove_hp_measurement_button = QtWidgets.QPushButton(\"Remove Last \\n HP Measurement\")\n self.remove_hp_measurement_button.clicked.connect(self.remove_hp_measurement)\n self.remove_measurement_button.setFixedSize(QtCore.QSize(200, 50))\n self.hp_controls_group.layout().addWidget(self.remove_hp_measurement_button)\n self.hp_controls_group.layout().addStretch()\n\n self.hp_measurement_count = QtWidgets.QLabel(\"\")\n # self.hp_measurement_count.setFixedWidth(16)\n self.hp_controls_group.layout().addWidget(self.hp_measurement_count)\n\n\n\n self.tab_hpc.layout().addWidget(self.hp_controls_group)\n #self.plot_hpc_widget = PlotWidget()\n #self.tab_hpc.layout().addWidget(self.plot_hpc_widget)\n\n\n self.plot_hpirs_widget = PlotWidget_HPIRs()\n self.tab_hpc.layout().addWidget(self.plot_hpirs_widget)\n\n self.reg_beta_layout = QtWidgets.QHBoxLayout()\n self.reg_beta_layout.setAlignment(QtCore.Qt.AlignCenter)\n self.reg_beta_layout.addWidget(QtWidgets.QLabel(\"Reg Beta:\"))\n\n self.regularization_beta_box = QtWidgets.QDoubleSpinBox()\n self.regularization_beta_box.setMaximum(1.0)\n self.regularization_beta_box.setSingleStep(0.05)\n self.regularization_beta_box.setValue(0.4)\n self.regularization_beta_box.setFixedWidth(100)\n self.regularization_beta_box.valueChanged.connect(self.set_regularization_beta)\n self.reg_beta_layout.addWidget(self.regularization_beta_box)\n\n self.tab_hpc.layout().addLayout(self.reg_beta_layout)\n\n\n\n self.plot_hpcf_widget = PlotWidget_HPCF()\n self.tab_hpc.layout().addWidget(self.plot_hpcf_widget)\n\n self.plot_hptf(np.array([]))\n self.plot_hpc_estimate(np.array([]), np.array([]))\n\n\n ## Layout finalilzation\n\n self.gridLayout = QtWidgets.QGridLayout()\n\n self.gridLayout.addWidget(self.tabWidget, 0, 1, 4, 1)\n self.gridLayout.addWidget(self.vpWidget, 0, 0, 1, 1)\n self.gridLayout.addWidget(self.device_status_widget, 1, 0, 1, 1)\n self.gridLayout.addWidget(self.tracker_status_widget, 2, 0, 1, 1)\n self.gridLayout.addWidget(self.osc_status_box, 2, 0, 1, 1)\n self.gridLayout.addWidget(self.manualAngleBox, 3, 0, 1, 1)\n\n self.gridLayout.setColumnStretch(0, 10)\n self.gridLayout.setColumnStretch(1, 10)\n self.cwidget.setLayout(self.gridLayout)\n #self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)\n\n self.resize(1100, 600)\n\n\n\n # self.menubar = QtWidgets.QMenuBar(MainWindow)\n # self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))\n # self.menubar.setObjectName(\"menubar\")\n # MainWindow.setMenuBar(self.menubar)\n # self.statusbar = QtWidgets.QStatusBar(MainWindow)\n # self.statusbar.setObjectName(\"statusbar\")\n # MainWindow.setStatusBar(self.statusbar)\n\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(self)\n\n self.measurement_ref.register_gui_handler(self)\n\n\n def closeEvent(self, *args, **kwargs):\n super(QtWidgets.QMainWindow, self).closeEvent(*args, **kwargs)\n\n def resizeEvent(self, event):\n _translate = QtCore.QCoreApplication.translate\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_config), _translate(\"MainWindow\", \"Configure\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_measure), _translate(\"MainWindow\", \"Measure\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_data), _translate(\"MainWindow\", \"Data List\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_hpc), _translate(\"MainWindow\", \"Headphone Compensation\"))\n\n\n self.positions_table.setColumnWidth(0, self.positions_table.width() / 3)\n self.positions_table.setColumnWidth(1, self.positions_table.width() / 3)\n self.positions_table.setColumnWidth(2, self.positions_table.width() / 3)\n\n\n\n def manual_update_az(self):\n self.measurement_ref.tracker.fallback_angle[0] = self.azimuthBox.value()\n\n def manual_update_el(self):\n self.measurement_ref.tracker.fallback_angle[1] = self.elevationBox.value()\n def manual_update_radius(self):\n self.measurement_ref.tracker.fallback_angle[2] = self.radiusBox.value() / 100\n\n def add_measurement_point(self, az, el):\n if use_vispy:\n self.vispy_canvas.meas_points.add_point(az, el)\n\n def add_center_point(self):\n if use_vispy:\n self.vispy_canvas.center_points.add_point(0, 0)\n\n #def remove_measurement_point(self, az, el):\n\n def plot_recordings(self, rec_l, rec_r, fb_loop, fs, fb_loop_used=False):\n self.plot_widget.plot_recordings(rec_l, rec_r, fb_loop, fs=fs, fb_loop_used=fb_loop_used)\n\n def plot_IRs(self, ir_l, ir_r, fs):\n self.plot_widget.plot_IRs(ir_l, ir_r, fs=fs)\n\n def updateMeasurementList(self, measurement_data):\n pass\n\n def switch_trackers(self):\n self.measurement_ref.tracker.switch_trackers()\n if self.measurement_ref.tracker.trackers_switched:\n self.tracker1_label = QtWidgets.QLabel(\"Tracker 1:\")\n self.tracker2_label = QtWidgets.QLabel(\"(Head) Tracker 2:\")\n else:\n self.tracker1_label = QtWidgets.QLabel(\"(Head) Tracker 1:\")\n self.tracker2_label = QtWidgets.QLabel(\"Tracker 2:\")\n\n\n def update_tracker_status(self, status):\n self.tracker1_status_label.setText(status[\"tracker1\"])\n self.tracker2_status_label.setText(status[\"tracker2\"])\n\n if status[\"tracker1\"] == \"Tracking\" and status[\"tracker2\"] == \"Tracking\" \\\n or self.measurement_ref.tracker.tracking_mode == \"OSC_direct\":\n self.show_manual_angle_box(False)\n else:\n self.show_manual_angle_box(True)\n\n def show_manual_angle_box(self, show):\n if show:\n if self.gridLayout.indexOf(self.manualAngleBox) == -1:\n self.gridLayout.removeWidget(self.tabWidget)\n self.gridLayout.addWidget(self.tabWidget, 0, 1, 4, 1)\n self.gridLayout.addWidget(self.manualAngleBox, 3, 0, 1, 1)\n self.manualAngleBox.setVisible(True)\n\n else:\n if self.gridLayout.indexOf(self.manualAngleBox) != -1:\n self.gridLayout.removeWidget(self.tabWidget)\n self.gridLayout.removeWidget(self.manualAngleBox)\n self.manualAngleBox.setVisible(False)\n self.gridLayout.addWidget(self.tabWidget, 0, 1, 3, 1)\n\n def updateCurrentAngle(self, az, el, r):\n r = r*100\n self.azimuthLabel.setText(\"Az: %.0f°\" % az)\n self.elevationLabel.setText(\"El: %.0f°\" % el)\n self.radiusLabel.setText(\"Radius: %.0fcm\" % r)\n\n def set_offset_speaker_z(self):\n self.measurement_ref.tracker.offset_cm['speaker_z'] = self.offset_speaker_z.value()\n\n def set_offset_speaker_y(self):\n self.measurement_ref.tracker.offset_cm['speaker_y'] = self.offset_speaker_y.value()\n\n def set_offset_head_y(self):\n self.measurement_ref.tracker.offset_cm['head_y'] = self.offset_head_y.value()\n\n def select_folder_dialog(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(self.cwidget,\n 'Open Directory',\n self.output_folder_select.text(),\n QtWidgets.QFileDialog.ShowDirsOnly | QtWidgets.QFileDialog.DontResolveSymlinks)\n if path:\n self.output_folder_select.setText(path)\n self.measurement_ref.set_output_path(path)\n\n\n # helper function to give all calibration functions the time delay\n def calibrate(self, calibration_function):\n interval = self.calibration_wait_time.value() * 1000\n QtCore.QTimer.singleShot(interval, calibration_function)\n\n def calibrate_orientation(self):\n if self.measurement_ref.tracker.calibrate_orientation():\n self.measurement_ref.measurement.play_sound(True)\n self.calibrate_orientation_label.setText(\"Calibrated\")\n else:\n self.measurement_ref.measurement.play_sound(False)\n\n # NOTES on the calibrating of head dimensions:\n # left_ear and right_ear are for yielding the correct head center.\n # head_left and head_right are for yielding the correct anthropometric head width\n # head_front and head_right are for yielding the correct anthropometric head length\n\n def calibrate_left_ear(self):\n if self.measurement_ref.tracker.calibrate_headdimensions('left_ear', multiple_calls=False):\n self.calibrate_ear_left_label.setText(f\"Calibrated\")#, {self.measurement_ref.tracker.head_dimensions['ear_pos_l']}\")\n self.measurement_ref.measurement.play_sound(True)\n else:\n self.measurement_ref.measurement.play_sound(False)\n if self.measurement_ref.tracker.head_dimensions['ear_pos_l'] is not None:\n self.calibrate_ear_left_label.setText(f\"Recalibration failed\")#, {self.measurement_ref.tracker.head_dimensions['ear_pos_l']}\")\n\n def calibrate_right_ear(self):\n if self.measurement_ref.tracker.calibrate_headdimensions('right_ear', multiple_calls=False):\n self.calibrate_ear_right_label.setText(f\"Calibrated\")#, {self.measurement_ref.tracker.head_dimensions['ear_pos_r']}\")\n self.measurement_ref.measurement.play_sound(True)\n else:\n self.measurement_ref.measurement.play_sound(False)\n if self.measurement_ref.tracker.head_dimensions['ear_pos_r'] is not None:\n self.calibrate_ear_right_label.setText(f\"Recalibration failed\")#, {self.measurement_ref.tracker.head_dimensions['ear_pos_r']}\")\n\n\n def calibrate_head_left(self):\n if self.measurement_ref.tracker.calibrate_headdimensions('left'):\n self.measurement_ref.measurement.play_sound(True)\n self.calibrate_left_head_label.setText(\"Calibrated\")\n if self.measurement_ref.tracker.head_dimensions['head_width'] is not None:\n self.head_width_label.setText(f\"Head Width: {self.measurement_ref.tracker.head_dimensions['head_width']:.3f}\")\n else:\n self.calibrate_left_head_label.setText(\"Calibration Failed\")\n self.measurement_ref.measurement.play_sound(False)\n\n\n def calibrate_head_right(self):\n if self.measurement_ref.tracker.calibrate_headdimensions('right'):\n self.measurement_ref.measurement.play_sound(True)\n self.calibrate_right_head_label.setText(\"Calibrated\")\n if self.measurement_ref.tracker.head_dimensions['head_width'] is not None:\n self.head_width_label.setText(f\"Head Width: {self.measurement_ref.tracker.head_dimensions['head_width']:.3f}\")\n else:\n self.calibrate_right_head_label.setText(\"Calibration Failed\")\n self.measurement_ref.measurement.play_sound(False)\n\n\n def calibrate_head_front(self):\n if self.measurement_ref.tracker.calibrate_headdimensions('front'):\n self.measurement_ref.measurement.play_sound(True)\n self.calibrate_front_head_label.setText(\"Calibrated\")\n if self.measurement_ref.tracker.head_dimensions['head_length'] is not None:\n self.head_length_label.setText(f\"Head Length: {self.measurement_ref.tracker.head_dimensions['head_length']:.3f}\")\n else:\n self.calibrate_front_head_label.setText(\"Calibration Failed\")\n self.measurement_ref.measurement.play_sound(False)\n\n\n def calibrate_head_back(self):\n if self.measurement_ref.tracker.calibrate_headdimensions('back'):\n self.measurement_ref.measurement.play_sound(True)\n self.calibrate_back_head_label.setText(\"Calibrated\")\n if self.measurement_ref.tracker.head_dimensions['head_length'] is not None:\n self.head_length_label.setText(f\"Head Length: {self.measurement_ref.tracker.head_dimensions['head_length']:.3f}\")\n else:\n self.calibrate_back_head_label.setText(\"Calibration Failed\")\n\n\n def calibrate_acoustical_centre(self):\n if self.measurement_ref.tracker.calibrate_acoustical_center():\n self.measurement_ref.measurement.play_sound(True)\n self.calibrate_acoustical_center_label.setText(f'Calibrated')#, {self.measurement_ref.tracker.acoustical_center_pos}')\n else:\n self.measurement_ref.measurement.play_sound(False)\n\n\n\n def trigger_center_measurement(self):\n interval = 0.5 * 1000\n QtCore.QTimer.singleShot(interval, self.measurement_ref.trigger_center_measurement)\n\n def trigger_point_recommendation(self):\n az, el = self.measurement_ref.recommend_points(1)\n\n def trigger_guided_measurement(self):\n self.measurement_ref.start_guided_measurement()\n\n def clear_recommended_points(self):\n self.measurement_ref.clear_recommended_points()\n\n def enable_point_recommendation(self):\n self.point_recommender_groupbox.setEnabled(True)\n\n def data_table_selection(self, selected, deselected):\n if use_vispy:\n self.vispy_canvas.meas_points.deselect_points(deselected.row())\n self.vispy_canvas.meas_points.select_point(selected.row())\n\n #print(\"Data Table Selection: \" + str(selected.row()))\n idx = selected.row()\n try:\n ir_l = self.measurement_ref.measurements[idx, 0, :]\n ir_r = self.measurement_ref.measurements[idx, 1, :]\n raw_l = self.measurement_ref.raw_signals[idx, 0, :]\n raw_r = self.measurement_ref.raw_signals[idx, 1, :]\n fb = self.measurement_ref.raw_feedbackloop[idx, 0, :]\n\n self.plot_widget2.plot_IRs(ir_l, ir_r, plot='spectrogram')\n self.plot_widget2.plot_recordings(raw_l, raw_r, fb, fs=self.measurement_ref.measurement.get_samplerate(), plot='spectrogram', fb_loop_used=self.measurement_ref.measurement.feedback_loop_used)\n\n except IndexError:\n print(\"Could not plot data: Invalid id\")\n\n\n def tab_changed(self, index):\n try:\n if index is not self.tab_data_index:\n if use_vispy:\n self.vispy_canvas.meas_points.deselect_points()\n else:\n numRows = self.positions_table.model().rowCount(QtCore.QModelIndex())\n self.positions_table.selectRow(numRows-1)\n\n except AttributeError:\n pass\n\n\n\n\n def clear_measurements(self):\n self.measurement_ref.delete_all_measurements()\n self.session_name.clear()\n\n def remove_measurement(self):\n indexes = self.positions_table_selection.selectedRows()\n for index in indexes:\n id = index.row()\n dialog = QtWidgets.QMessageBox\n ret = dialog.question(self,'', \"Are you sure you want to delete this measurement?\", dialog.Yes | dialog.No)\n\n if ret == dialog.Yes:\n #print(\"Deleting Measurement \" + str(id))\n self.measurement_ref.delete_measurement(id)\n\n def update_dev_status(self):\n devs = self.measurement_ref.devices\n\n self.label_out_exc.setText(devs['out_excitation'])\n self.label_out_exc_2.setText(devs['out_excitation_2'])\n self.label_out_fb.setText(devs['out_feedback'])\n self.label_in_left.setText(devs['in_left'])\n self.label_in_right.setText(devs['in_right'])\n self.label_in_fb.setText(devs['in_feedback'])\n\n def trigger_hp_measurement(self):\n self.measurement_ref.hp_measurement()\n\n def remove_hp_measurement(self):\n self.measurement_ref.remove_hp_measurement()\n\n # def plot_hpc_recordings(self, rec_l, rec_r, fb_loop):\n # self.plot_hpc_widget.plot_recordings(rec_l, rec_r, fb_loop)\n\n def plot_hptf(self, hpc_irs, hpc_average=None, fs=48000):\n self.plot_hpirs_widget.plot_hptf(hpc_irs, hpc_average=hpc_average, fs=fs)\n\n def plot_hpc_estimate(self, H_l, H_r, fs=48000):\n self.plot_hpcf_widget.plot_hpcf(H_l, H_r, fs=fs)\n\n\n def set_regularization_beta(self):\n self.measurement_ref.estimate_hpcf(self.regularization_beta_box.value())\n\n def clear_hp_measurements(self):\n self.measurement_ref.remove_all_hp_measurements()\n self.headphone_name.clear()\n\n def warning_invalid_tracking(self, warning=True):\n palette = self.tracker_status_widget.palette()\n if warning:\n palette.setColor(QtGui.QPalette.Window, QtGui.QColor('red'))\n else:\n palette.setColor(QtGui.QPalette.Window, QtGui.QColor('grey'))\n\n\n self.tracker_status_widget.setPalette(palette)\n self.tracker_status_widget.setAutoFillBackground(True)\n self.tracker_status_widget.repaint()\n\n def select_tracking_input(self):\n radioButton = self.tracking_input_box.sender()\n if radioButton.isChecked():\n if radioButton.sourcename == \"Vive\":\n self.vivetracker_box.show()\n self.osc_config_box.hide()\n\n self.osc_status_box.hide()\n self.tracker_status_widget.show()\n\n self.measurement_ref.tracker.set_tracking_mode(radioButton.sourcename)\n\n self.send_osc_box.setEnabled(True)\n\n\n\n if radioButton.sourcename == \"OSC_direct\":\n self.vivetracker_box.hide()\n self.osc_config_box.show()\n\n self.tracker_status_widget.hide()\n self.osc_status_box.show()\n\n self.measurement_ref.tracker.set_tracking_mode(radioButton.sourcename)\n\n ip, port = self.measurement_ref.tracker.osc_input_server.get_current_ip_and_port()\n self.osc_ip_label.setText(f\"Current Host IP: {ip}\")\n self.osc_port_label.setText(f\"OSC Server Port: {port}\")\n\n self.send_osc_box.setEnabled(False)\n\n self.show_manual_angle_box(False)\n\n\n def set_osc_status(self, osc_status):\n if osc_status:\n #self.osc_status_indicator.setStyleSheet(\"color: green\")\n self.osc_status_indicator.setStyleSheet(\"QCheckBox::indicator\"\n \"{\"\n \"background-color : lightgreen;\"\n \"}\")\n else:\n #self.osc_status_indicator.setStyleSheet(\"color: white\")\n self.osc_status_indicator.setStyleSheet(\"QCheckBox::indicator\"\n \"{\"\n \"background-color : white;\"\n \"}\")\n\n def activate_osc_send(self):\n\n checked = self.send_osc_button.isChecked()\n\n if self.send_osc_button.isChecked():\n if self.measurement_ref.start_osc_send(self.send_osc_ip_select.text(), self.send_osc_port_select.text(), self.send_osc_address_select.text()):\n self.send_osc_ip_select.setEnabled(False)\n self.send_osc_port_select.setEnabled(False)\n self.send_osc_address_select.setEnabled(False)\n self.tracking_input_OSC_direct.setEnabled(False)\n else:\n self.send_osc_button.setChecked(False)\n else:\n self.measurement_ref.stop_osc_send()\n self.send_osc_ip_select.setEnabled(True)\n self.send_osc_port_select.setEnabled(True)\n self.send_osc_address_select.setEnabled(True)\n self.tracking_input_OSC_direct.setEnabled(True)\n\n def update_sweep_parameters(self):\n try:\n self.measurement_ref.measurement.set_sweep_parameters(d_sweep_sec=float(self.sweeplength_sec.text()),\n d_post_silence_sec=float(self.post_silence_sec.text()),\n f_start=int(self.f_start.text()),\n f_end = int(self.f_end.text()),\n amp_db=float(self.amp_db.text()),\n fade_out_samples=int(self.fade_out_samples.text()))\n except ValueError:\n self.sweep_parameters_errormessage.setVisible(True)\n return\n\n self.sweep_parameters_errormessage.setVisible(False)\n self.sweep_parameters_dialog.close()\n\n return\n\n def deactivate_vispy(self):\n use_vispy = False\n\n self.vpWidget.layout().removeWidget(self.vispy_canvas.native)\n self.vpWidget.layout().removeWidget(self.sliderTheta)\n self.vpWidget.layout().removeWidget(self.sliderPhi)\n\n self.vispy_canvas.native.hide()\n self.sliderTheta.hide()\n self.sliderPhi.hide()\n\n\n\n #del self.vispy_canvas\n #del self.sliderPhi\n #del self.sliderTheta\n\n self.vp_missing_label = QtWidgets.QLabel(\n \"Vispy package missing or deactivated: \\n3D speaker representation disabled.\")\n self.vpWidget.layout().addWidget(self.vp_missing_label, 1, 1, 1, 3)\n\n\n\n\n\nclass InstructionsDialogBox(QtWidgets.QDialog):\n\n def __init__(self, *args, **kwargs):\n\n instruction_text = \\\n \"1. Mount tracker T1 on listener head. The orientation and exact position are not important, as long as it stays fixed. \\n\\n\" \\\n \"2. Check if tracker roles are correct by rotating tracker T2. The angles shouldn't change since only the position of tracker T2 is used. Switch tracker roles if necessary\\n\\n\" \\\n \"3. Hold tracker T2 to both ears (bottom center on ear canal) and calibrate each ear. Tracker T2 orientation does not matter here, but from now on tracker T1 (on the listeners head) has to stay fixed & stable on the head.\\n\\n\" \\\n \"4. Hold tracker T2 to acoustical center of speaker and calibrate it. Tracker orientation does not matter here\\n\\n\" \\\n \"5. Put tracker T2 on a planar surface (eg. on top of speaker, floor) pointing towards the same direction as frontal view of listener. Translation does not matter here\\n\\n\" \\\n \"NOTE: If acoustical center is calibrated, this calibrated position stays fixed. If the speaker is moved the calibration has to be repeated.\"\n\n\n super(InstructionsDialogBox, self).__init__(*args, **kwargs)\n\n self.setModal(False)\n self.setWindowTitle(\"Calibration Instructions\")\n\n self.instructionsbox = QtWidgets.QLabel()\n self.instructionsbox.setText(instruction_text)\n self.instructionsbox.setWordWrap(True)\n\n Btn = QtWidgets.QDialogButtonBox.Close\n\n self.buttonBox = QtWidgets.QDialogButtonBox(Btn)\n self.buttonBox.clicked.connect(self.close)\n\n self.layout = QtWidgets.QVBoxLayout()\n self.layout.addWidget(self.instructionsbox)\n self.layout.addWidget(self.buttonBox)\n\n self.setLayout(self.layout)\n\n\n",
"import numpy as np\n\n# equivalent to MATLAB sph2cart & cart2sph\ndef cart2sph(x,y,z):\n azimuth = np.arctan2(y,x)\n elevation = np.arctan2(z,np.sqrt(x**2 + y**2))\n r = np.sqrt(x**2 + y**2 + z**2)\n return azimuth, elevation, r\n\ndef sph2cart(azimuth,elevation,r):\n x = r * np.cos(elevation) * np.cos(azimuth)\n y = r * np.cos(elevation) * np.sin(azimuth)\n z = r * np.sin(elevation)\n return x, y, z\n\n# get the angular distances from a single point to a list of points\ndef getDistances(p_az, p_el, grid_az, grid_el, input_format='deg', return_format='rad'):\n\n if input_format == 'deg':\n p_az = p_az * np.pi / 180\n p_el = p_el * np.pi / 180\n grid_az = grid_az * np.pi / 180\n grid_el = grid_el * np.pi / 180\n\n x1, y1, z1 = sph2cart(p_az, p_el, 1);\n x2, y2, z2 = sph2cart(grid_az, grid_el, 1);\n\n # make the single point value a matrix with same dimensions as the grid\n x1 = x1 * np.ones_like(x2)\n y1 = y1 * np.ones_like(z2)\n z1 = z1 * np.ones_like(z2)\n\n dotProduct = np.einsum('ji,ji->i', [x1, y1, z1], [x2, y2, z2])\n\n distances = np.arccos(np.clip(dotProduct, -1.0, 1.0));\n\n if return_format == 'deg':\n distances = distances * 180 / np.pi\n\n return distances\n\n# get the angular distance from one point to another\ndef angularDistance(az1, el1, az2, el2, input_format='deg', return_format='deg'):\n\n\n if input_format == 'deg':\n az1 = az1 * np.pi / 180\n az2 = az2 * np.pi / 180\n el1 = el1 * np.pi / 180\n el2 = el2 * np.pi / 180\n\n x1, y1, z1 = sph2cart(az1, el1, 1);\n x2, y2, z2 = sph2cart(az2, el2, 1);\n\n # distance = np.arctan2(np.linalg.norm(np.cross(xyz1, xyz2)), np.dot(xyz1, xyz2)) / 180;\n distance = np.arccos(np.clip(np.dot([x1, y1, z1], [x2, y2, z2]), -1.0, 1.0));\n # distance = np.arccos(np.clip(0.5, -1.0, 1.0)) / np.pi;\n\n if return_format == 'deg':\n distance = distance * 180 / np.pi\n\n return distance"
] |
[
[
"numpy.array"
],
[
"numpy.dot",
"numpy.ones_like",
"numpy.sqrt",
"numpy.einsum",
"numpy.clip",
"numpy.cos",
"numpy.sin",
"numpy.arctan2"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OptimalDesignLab/pyStatReduce
|
[
"9ea128409b91dd582e574e2e1cc153572b6c60a4",
"9ea128409b91dd582e574e2e1cc153572b6c60a4"
] |
[
"pystatreduce/doc/plot/plot_hadamard/run_hadamard_eigen_accuracy.py",
"pystatreduce/optimize/rosenbrock/pyopt_uq_rosenbrock.py"
] |
[
"# run_hadamard_eigen_accuracy\nimport os\nimport sys\nimport errno\n\nimport numpy as np\nimport chaospy as cp\n\nfrom pystatreduce.stochastic_collocation import StochasticCollocation\nfrom pystatreduce.quantity_of_interest import QuantityOfInterest\nfrom pystatreduce.dimension_reduction import DimensionReduction\nimport pystatreduce.examples as examples\n\ndef run_hadamard(systemsize, eigen_decayrate, std_dev, n_sample):\n # n_collocation_pts = 2\n\n # Create Hadmard Quadratic object\n QoI = examples.HadamardQuadratic(systemsize, eigen_decayrate)\n\n # Create stochastic collocation object\n # collocation = StochasticCollocation(n_collocation_pts, \"Normal\")\n\n # Initialize chaospy distribution\n x = np.random.randn(QoI.systemsize)\n jdist = cp.MvNormal(x, np.diag(std_dev))\n\n threshold_factor = 0.5\n dominant_space_exact = DimensionReduction(threshold_factor=threshold_factor,\n exact_Hessian=True)\n dominant_space = DimensionReduction(threshold_factor=threshold_factor,\n exact_Hessian=False,\n n_arnoldi_sample=n_sample)\n\n dominant_space.getDominantDirections(QoI, jdist, max_eigenmodes=20)\n dominant_space_exact.getDominantDirections(QoI, jdist)\n\n # Sort the exact eigenvalues in descending order\n sort_ind = dominant_space_exact.iso_eigenvals.argsort()[::-1]\n\n # Compare the eigenvalues of the 10 most dominant spaces\n lambda_exact = dominant_space_exact.iso_eigenvals[sort_ind]\n error_arr = dominant_space.iso_eigenvals[0:10] - lambda_exact[0:10]\n # print 'error_arr = ', error_arr\n rel_error_norm = np.linalg.norm(error_arr) / np.linalg.norm(lambda_exact[0:10])\n\n return rel_error_norm\n\nsystemsize_arr = [64, 128, 256]\neigen_decayrate_arr = [2.0, 1.0, 0.5]\nn_arnoldi_samples_arr = [11, 21, 31, 41, 51]\nn_stddev_samples = 10\n\neigen_decayrate_arr_idx = 0\n\nerr_arr = np.zeros([len(n_arnoldi_samples_arr), n_stddev_samples])\navg_err = np.zeros(len(n_arnoldi_samples_arr))\nmax_err = np.zeros(len(n_arnoldi_samples_arr))\nmin_err = np.zeros(len(n_arnoldi_samples_arr))\n\nfor eigen_decayrate_arr_idx in range(0, len(eigen_decayrate_arr)):\n for i in systemsize_arr:\n for j in range(0, len(n_arnoldi_samples_arr)):\n print('decay rate = ', eigen_decayrate_arr[eigen_decayrate_arr_idx]\n ,', systemsize = ', i, ', arnoldi samples = ', n_arnoldi_samples_arr[j])\n for k in range(0, n_stddev_samples):\n std_dev = abs(np.random.randn(i))\n err_arr[j,k] = run_hadamard(i, eigen_decayrate_arr[eigen_decayrate_arr_idx],\n std_dev, n_arnoldi_samples_arr[j])\n # print 'error_norm = ', error_norm\n # sys.exit()\n avg_err[j] = np.mean(err_arr[j,:])\n max_err[j] = np.max(err_arr[j,:])\n min_err[j] = np.min(err_arr[j,:])\n\n dirname = ''.join(['./plot_data/eigen_accuracy/', str(i), '/'])\n # Create the directory if it doesn't exist\n if not os.path.isdir(dirname):\n try:\n os.makedirs(dirname)\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n fname1 = ''.join([dirname, 'avg_err_decay', str(eigen_decayrate_arr[eigen_decayrate_arr_idx]), '.txt'])\n fname2 = ''.join([dirname, 'max_err_decay', str(eigen_decayrate_arr[eigen_decayrate_arr_idx]), '.txt'])\n fname3 = ''.join([dirname, 'min_err_decay', str(eigen_decayrate_arr[eigen_decayrate_arr_idx]), '.txt'])\n\n np.savetxt(fname1, avg_err, delimiter=',')\n np.savetxt(fname2, max_err, delimiter=',')\n np.savetxt(fname3, min_err, delimiter=',')\n",
"# Use the OpenMDAO framework for OUU for optimizing a Rosenbrock function\nfrom __future__ import division, print_function\nimport os\nimport sys\nimport errno\nsys.path.insert(0, '../../src')\n\n# pyStatReduce specific imports\nimport numpy as np\nimport chaospy as cp\nfrom stochastic_collocation import StochasticCollocation\nfrom quantity_of_interest import QuantityOfInterest\nfrom dimension_reduction import DimensionReduction\nfrom stochastic_arnoldi.arnoldi_sample import ArnoldiSampling\nimport examples\n\n#pyoptsparse sepecific imports\nfrom scipy import sparse\nimport argparse\nfrom pyoptsparse import Optimization, OPT, SNOPT\n\n# pyDimReduce imports\nfrom pydimreduce.quantities_of_interest.rosenbrock import Rosenbrock\n\n# OpenMDAO imports\nfrom openmdao.api import Problem, IndepVarComp, ExplicitComponent, Group\n\nclass RosenbrockOpt(object):\n\n def __init__(self, n_random):\n self.systemsize = n_random\n self.p = Problem()\n self.ivc = self.p.model.add_subsystem('design_point', IndepVarComp())\n self.ivc.add_output('x', shape=(n_random,))\n self.p.model.add_subsystem('rosenbrock', Rosenbrock(size=n_random))\n self.p.model.connect('design_point.x', 'rosenbrock.rv')\n self.p.setup()\n\n\n def eval_QoI(self, mu, xi):\n rv = mu + xi\n self.p['design_point.x'] = rv\n self.p.run_model()\n # print(self.p['rosenbrock.fval'][0])\n return self.p['rosenbrock.fval'][0]\n\n def eval_QoIGradient(self, mu, xi):\n rv = mu + xi\n self.p['design_point.x'] = rv\n self.p.run_model()\n deriv = self.p.compute_totals(of=['rosenbrock.fval'], wrt=['design_point.x'])\n # print(deriv['rosenbrock.fval', 'design_point.x'][0])\n\n return deriv['rosenbrock.fval', 'design_point.x'][0]\n\n\ndef objfunc(xdict):\n mu = xdict['xvars']\n funcs = {}\n jdist = cp.MvNormal(mu, std_dev)\n QoI_func = QoI.eval_QoI\n funcs['obj'] = collocation.normal.reduced_mean(QoI_func, jdist, dominant_space)\n fail = False\n return funcs, fail\n\ndef sens(xdict, funcs):\n mu = xdict['xvars']\n jdist = cp.MvNormal(mu, std_dev)\n QoI_func = QoI.eval_QoIGradient\n funcsSens = {}\n funcsSens['obj', 'xvars'] = collocation_grad.normal.reduced_mean(QoI_func, jdist, dominant_space)\n\n fail = False\n return funcsSens, fail\n\nif __name__ == \"__main__\":\n\n # Instantiate the rosenbrock problem globally\n rv_systemsize = 2\n initial_seed = 2*np.ones(rv_systemsize)\n QoI = RosenbrockOpt(rv_systemsize)\n std_dev = np.eye(rv_systemsize)\n jdist = cp.MvNormal(initial_seed, std_dev)\n collocation = StochasticCollocation(3, \"Normal\")\n collocation_grad = StochasticCollocation(3, \"Normal\", QoI_dimensions=rv_systemsize)\n threshold_factor = 0.9\n dominant_space = DimensionReduction(threshold_factor, n_arnoldi_sample=3, exact_Hessian=False)\n dominant_space.getDominantDirections(QoI, jdist)\n\n # Setup the problem\n optProb = Optimization('Paraboloid', objfunc)\n lower_bound = -20*np.ones(rv_systemsize)\n upper_bound = 20*np.ones(rv_systemsize)\n optProb.addVarGroup('xvars', rv_systemsize, 'c', lower=lower_bound,\n upper=upper_bound, value=10*np.ones(rv_systemsize))\n optProb.addObj('obj')\n # Optimizer\n opt = SNOPT(optOptions = {'Major feasibility tolerance' : 1e-6})\n sol = opt(optProb, sens=sens)\n\n # Check Solution\n import inspect\n print(sol.fStar)\n print(sol.getDVs()['xvars'])\n"
] |
[
[
"numpy.diag",
"numpy.min",
"numpy.linalg.norm",
"numpy.max",
"numpy.mean",
"numpy.random.randn",
"numpy.savetxt"
],
[
"numpy.eye",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JappaB/Active_Learning_Object_Detection
|
[
"3d9ad367aa872cbf3e9d71c566042c78fe2d0e76"
] |
[
"active_learning_package/voc_eval_helpers.py"
] |
[
"import os\nimport numpy as np\nimport pickle\nimport xml.etree.ElementTree as ET\nimport time\nimport sys\n\nimport torch\nfrom torch.autograd import Variable\n\nimport data\nfrom . import helpers\n\n\ndef eval(test_dataset, args, net, al_iteration, eval_ensemble_idx = 99999, epochs_test = False, train_iters = None, use_dataset_image_ids = False):\n \"\"\"\n largely copied from eval.py from the original pytorch SSD repository: https://github.com/amdegroot/ssd.pytorch\n Slightly adjusted to fit in this active learning module\n \"\"\"\n print('start VOC eval')\n\n num_images = len(test_dataset)\n\n # all detections are collected into:\n # all_boxes[cls][image] = N x 5 array of detections in\n # (x1, y1, x2, y2, score)\n if args.dataset in ['VOC07', 'VOC12']:\n labelmap = data.VOC_CLASSES\n elif args.dataset == 'VOC07_1_class':\n labelmap = [args.relevant_class]\n elif args.dataset == 'VOC07_6_class':\n labelmap = args.labelmap\n else:\n raise NotImplementedError()\n\n\n args.summary['eval_model']['num_images_eval'] = num_images\n args.summary['eval_model']['num_objects_eval'] = 'todo'\n args.summary['eval_model']['APs'] = {}\n\n all_boxes = [[[] for _ in range(num_images)]\n for _ in range(len(labelmap) + 1)]\n\n # timers\n _t = {'im_detect': helpers.Timer(), 'misc': helpers.Timer()}\n\n output_dir = args.experiment_dir + 'eval/'\n print('output dir ', output_dir)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n\n if epochs_test:\n det_file = os.path.join(output_dir,'al-iter_'+str(al_iteration)+'_ensemble_'+str(args.eval_ensemble_idx)+'_'+str('todo')+'_detections.pkl')\n else:\n det_file = os.path.join(output_dir,'al-iter_'+str(al_iteration)+'_ensemble_'+str(args.eval_ensemble_idx)+str()+'_detections.pkl')\n\n # if already done the detection passes with this network.\n if os.path.isfile(det_file):\n with open(det_file, 'rb') as file:\n all_boxes = pickle.load(file)\n\n else:\n for i in range(num_images):\n im, gt, h, w = test_dataset.pull_item(i)\n\n x = Variable(im.unsqueeze(0))\n\n if args.cuda and torch.cuda.is_available():\n x = x.cuda()\n\n _t['im_detect'].tic()\n\n detections = net(x).data\n detect_time = _t['im_detect'].toc(average=False)\n # set detections back to cpu\n if args.cuda and torch.cuda.is_available():\n detections = detections.to('cpu')\n\n # skip j = 0, because it's the background class\n for j in range(1, detections.size(1)):\n dets = detections[0, j, :] # shape [200,5]\n mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t() # takes the detections that have confidence > 0. and expands to (5, 200) and then transposes\n dets = torch.masked_select(dets, mask).view(-1, 5)\n if dets.dim() == 0:\n continue\n boxes = dets[:, 1:]\n boxes[:, 0] *= w\n boxes[:, 2] *= w\n boxes[:, 1] *= h\n boxes[:, 3] *= h\n scores = dets[:, 0].cpu().numpy()\n cls_dets = np.hstack((boxes.cpu().numpy(),\n scores[:, np.newaxis])).astype(np.float32,\n copy=False)\n all_boxes[j][i] = cls_dets\n\n print('im_detect: {:d}/{:d} {:.3f}s \\t al iteration: {:d} \\t ensemble_idx {:d}'.format(i,\n num_images, detect_time, int(al_iteration), int(args.eval_ensemble_idx)))\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n\n print('Evaluating detections')\n evaluate_detections(all_boxes,\n output_dir,\n test_dataset,\n args,\n labelmap,\n use_dataset_image_ids)\n\n\ndef evaluate_detections(box_list, output_dir, dataset, args, labelmap, use_dataset_image_ids):\n \"\"\"\n largely copied from eval.py from the original pytorch SSD repository: https://github.com/amdegroot/ssd.pytorch\n Slightly adjusted to fit in this active learning module\n \"\"\"\n if args.dataset in ['VOC07','VOC07_1_class','VOC07_6_class']:\n\n YEAR = '2007'\n devkit_path = args.dataset_root + 'VOC' + YEAR\n\n write_voc_results_file(box_list,\n dataset,\n labelmap,\n devkit_path,\n args)\n\n do_python_eval(output_dir,\n False, # use VOC07 metrics\n devkit_path,\n labelmap,\n args,\n dataset,\n use_dataset_image_ids)\n else:\n raise NotImplementedError()\n\ndef write_voc_results_file(all_boxes,\n dataset,\n labelmap,\n devkit_path,\n args):\n \"\"\"\n largely copied from eval.py from the original pytorch SSD repository: https://github.com/amdegroot/ssd.pytorch\n \"\"\"\n\n for cls_ind, cls in enumerate(labelmap):\n print('Writing {:s} VOC results file'.format(cls))\n filename = get_voc_results_file_template('test',\n cls,\n devkit_path,\n args)\n\n # if already made the results files with this network.\n if os.path.isfile(filename):\n continue\n\n else:\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(dataset.ids):\n dets = all_boxes[cls_ind+1][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index[1], dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n\ndef do_python_eval(output_dir,\n use_07,\n devkit_path,\n labelmap,\n args,\n dataset,\n use_dataset_image_ids):\n \"\"\"\n largely copied from eval.py from the original pytorch SSD repository: https://github.com/amdegroot/ssd.pytorch\n Slightly adjusted to fit in this active learning module\n \"\"\"\n annopath = os.path.join(args.dataset_root, 'VOC2007', 'Annotations', '%s.xml')\n if type(args.imageset_test) == list and len(args.imageset_test) == 1:\n imagesetfile = args.imageset_test[0][1]\n else:\n imagesetfile = args.imageset_test\n imgsetpath = os.path.join(args.dataset_root, 'VOC2007', 'ImageSets',\n 'Main', '{:s}.txt')\n cachedir = os.path.join(devkit_path, 'annotations_cache')\n\n # The PASCAL VOC metric changed in 2010\n use_07_metric = use_07\n print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n\n iou_thresholds = [0.3]\n iou_thresholds.extend(list(np.linspace(0.5,0.95,10)))\n\n for iou_threshold in iou_thresholds:\n print('IoU threshold: ',str(iou_threshold),'\\n_______________\\n')\n args.summary['eval_model']['APs'][str(iou_threshold)] = {}\n\n aps = []\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(labelmap):\n filename = get_voc_results_file_template('test', cls, devkit_path, args) # results file\n rec, prec, ap = voc_eval(\n filename, annopath, imgsetpath.format(imagesetfile), cls, cachedir,\n ovthresh=iou_threshold, use_07_metric=use_07_metric, dataset= dataset, use_dataset_image_ids=use_dataset_image_ids) # todo: imageset_file: '/home/jasper/data/VOCdevkit/VOC2007/ImageSets/Main/test.txt'\n # rec,prec,ap = 0.1,0.2,0.3\n\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n\n #write summary average precissions\n args.summary['eval_model']['APs'][str(iou_threshold)][str(cls)] = ap\n\n # exclude classes without predictions\n aps = [ap for ap in aps if ap != -1.]\n args.summary['eval_model']['APs'][str(iou_threshold)]['mAP'] = np.mean(aps)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.3f}'.format(ap))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('--------------------------------------------------------------')\n print('\\n\\n\\n')\n\n # calculate mmAP (coco definition mAP)\n args.summary['eval_model']['APs']['mmAP'] = 0\n for key, value in args.summary['eval_model']['APs'].items():\n if key != 'mmAP':\n args.summary['eval_model']['APs']['mmAP'] += args.summary['eval_model']['APs'][key]['mAP']\n args.summary['eval_model']['APs']['mmAP'] /= 10\n\n\ndef get_voc_results_file_template(image_set, cls, devkit_path, args):\n \"\"\"\n largely copied from eval.py from the original pytorch SSD repository: https://github.com/amdegroot/ssd.pytorch\n Slightly adjusted to fit in this active learning module\n \"\"\"\n\n # VOCdevkit/VOC2007/results/det_test_aeroplane.txt\n filename = 'det_' + image_set + '_%s.txt' % (cls)\n\n filedir = args.experiment_dir + 'eval/results/al-iter_'+str(args.al_iteration)+'/ensemble_idx_'+args.eval_ensemble_idx\n # filedir = os.path.join(devkit_path, 'results') # old filedir from Max De Groot\n if not os.path.exists(filedir):\n os.makedirs(filedir, exist_ok = True)\n path = os.path.join(filedir, filename)\n return path\n\ndef voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5,\n use_07_metric=True,\n dataset = None,\n use_dataset_image_ids = False):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\nTop level function that does the PASCAL VOC evaluation.\ndetpath: Path to detections\n detpath.format(classname) should produce the detection results file.\nannopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\nimagesetfile: Text file containing the list of images, one image per line.\nclassname: Category name (duh)\ncachedir: Directory for caching the annotations\n[ovthresh]: Overlap threshold (default = 0.5)\n[use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default True)\n\n\n NOTE: largely copied from eval.py from the original pytorch SSD repository: https://github.com/amdegroot/ssd.pytorch\n Slightly adjusted to fit in this active learning module\n\"\"\"\n\n# assumes detections are in detpath.format(classname)\n# assumes annotations are in annopath.format(imagename)\n# assumes imagesetfile is a text file with each line an image name\n# cachedir caches the annotations in a pickle file\n# first load gt\n if not os.path.isdir(cachedir):\n os.mkdir(cachedir)\n cachefile = os.path.join(cachedir, 'annots.pkl') # cachefile of correct annotations/truth values.\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n if not os.path.isfile(cachefile):\n # load annots\n recs = {}\n for i, imagename in enumerate(imagenames):\n recs[imagename] = parse_rec(annopath % (imagename))\n if i % 100 == 0:\n print('Reading annotation for {:d}/{:d}'.format(\n i + 1, len(imagenames)))\n # save\n print('Saving cached annotations to {:s}'.format(cachefile))\n with open(cachefile, 'wb') as f:\n pickle.dump(recs, f)\n else:\n # load\n with open(cachefile, 'rb') as f:\n recs = pickle.load(f)\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n\n if use_dataset_image_ids:\n for imagename in dataset.ids:\n imagename = imagename[1]\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n else:\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read detections (see results folder in VOCDevkit)\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n if any(lines) == 1:\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n R = class_recs[image_ids[d]] # can result in keyerror if: class recs doesn't have the image_id (class_rec is gt for all images in imagenames, where recs is taken from the cache file) todo\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin, 0.)\n ih = np.maximum(iymax - iymin, 0.)\n inters = iw * ih\n uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +\n (BBGT[:, 2] - BBGT[:, 0]) *\n (BBGT[:, 3] - BBGT[:, 1]) - inters)\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n else:\n # note that below default values of -1 can cause negative mAPs.. Not sure why you would want this anyways..\n # rec = -1.\n # prec = -1.\n # ap = -1.\n rec = 0.\n prec = 0.\n ap = 0.\n return rec, prec, ap\n\n\ndef parse_rec(filename):\n \"\"\" Parse a PASCAL VOC xml file\n largely copied from eval.py from the original pytorch SSD repository: https://github.com/amdegroot/ssd.pytorch\n \"\"\"\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,\n int(bbox.find('ymin').text) - 1,\n int(bbox.find('xmax').text) - 1,\n int(bbox.find('ymax').text) - 1]\n objects.append(obj_struct)\n\n return objects\n\n\ndef voc_ap(rec, prec, use_07_metric=True):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:True).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"numpy.linspace",
"numpy.arange",
"numpy.cumsum",
"numpy.sort",
"numpy.finfo",
"numpy.concatenate",
"numpy.max",
"numpy.argmax",
"numpy.mean",
"numpy.where",
"torch.cuda.is_available",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"torch.masked_select"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thomasbarillot/DAQ
|
[
"20126655f74194757d25380680af9429ff27784e"
] |
[
"eTOF/ADQAPI_python/FWPD/modules/example_helpers.py"
] |
[
"import ctypes as ct\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\nimport numpy as np\r\nimport os\r\n\r\n# Setup for individual channels, these are default values\r\nclass channel_setup:\r\n def __init__(self, channel):\r\n # Set channel\r\n self.channel = channel\r\n # Set bit corresponding to channel in mask\r\n self.coincidence_masking_expression = 2**channel\r\n # Default values for all members\r\n channel = 0\r\n trig_level = 0\r\n reset_hysteresis = 0\r\n trigger_arm_hysteresis = 0\r\n reset_arm_hysteresis = 0\r\n # 1: Rising, 0: Falling\r\n trigger_polarity = 1\r\n reset_polarity = 0\r\n coincidence_window_length = 1000\r\n coincidence_masking_expression = 0\r\n number_of_records = 1\r\n record_variable_length = 1\r\n nof_pretrigger_samples = 0\r\n nof_moving_average_samples = 0\r\n moving_average_delay = 0\r\n samples_per_record = 1024\r\n # = Record size if record_variable_length is 0\r\n trailing_edge_window = samples_per_record\r\n\r\n# Common setup for acquisition, these are default values\r\nclass acquisition_setup:\r\n # Collect data from all four channels\r\n channels_mask = 0b1111\r\n\r\n# Define the record header struct\r\nclass HEADER(ct.Structure):\r\n _fields_ = [(\"RecordStatus\", ct.c_ubyte),\r\n (\"UserID\", ct.c_ubyte),\r\n (\"Channel\", ct.c_ubyte),\r\n (\"DataFormat\", ct.c_ubyte),\r\n (\"SerialNumber\", ct.c_uint32),\r\n (\"RecordNumber\", ct.c_uint32),\r\n (\"SamplePeriod\", ct.c_int32),\r\n (\"Timestamp\", ct.c_int64),\r\n (\"RecordStart\", ct.c_int64),\r\n (\"RecordLength\", ct.c_uint32),\r\n (\"MovingAverage\", ct.c_int16),\r\n (\"GateCounter\", ct.c_uint16)]\r\n\r\n# This function loads the ADQAPI library using ctypes\r\ndef adqapi_load():\r\n if os.name == 'nt':\r\n ADQAPI = ct.cdll.LoadLibrary('ADQAPI.dll')\r\n else:\r\n ADQAPI = ct.cdll.LoadLibrary('libadq.so')\r\n\r\n # Manually set return type from some ADQAPI functions\r\n ADQAPI.CreateADQControlUnit.restype = ct.c_void_p\r\n ADQAPI.ADQ_GetRevision.restype = ct.c_void_p\r\n ADQAPI.ADQ_GetPtrStream.restype = ct.POINTER(ct.c_int16)\r\n ADQAPI.ADQControlUnit_FindDevices.argtypes = [ct.c_void_p]\r\n\r\n # Print ADQAPI revision\r\n print('ADQAPI loaded, revision {:d}.'.format(ADQAPI.ADQAPI_GetRevision()))\r\n\r\n return ADQAPI\r\n\r\n# This function unloads the ADQAPI library using ctypes\r\ndef adqapi_unload(ADQAPI):\r\n if os.name == 'nt':\r\n # Unload DLL\r\n ct.windll.kernel32.FreeLibrary(ADQAPI._handle)\r\n\r\n# Convenience function when printing status from ADQAPI functions\r\ndef adq_status(status):\r\n if (status==0):\r\n return 'FAILURE'\r\n else:\r\n return 'OK'\r\n\r\n# Print revision info for an ADQ device\r\ndef print_adq_device_revisions(ADQAPI, adq_cu, adq_num):\r\n # Get revision info from ADQ\r\n rev = ADQAPI.ADQ_GetRevision(adq_cu, adq_num)\r\n revision = ct.cast(rev,ct.POINTER(ct.c_int))\r\n print('\\nConnected to ADQ #{:d}'.format(adq_num))\r\n # Print revision information\r\n print('FPGA Revision: {}'.format(revision[0]))\r\n if (revision[1]):\r\n print('Local copy')\r\n else:\r\n print('SVN Managed')\r\n if (revision[2]):\r\n print('Mixed Revision')\r\n else :\r\n print('SVN Updated')\r\n print('')\r\n\r\n# This function sets an alternating background color for a matplotlib plot\r\ndef alternate_background(ax, start_point, widths, labels=False,\r\n color='#dddddd'):\r\n\r\n ax.relim()\r\n # update ax.viewLim using the new dataLim\r\n ax.autoscale_view()\r\n plt.draw()\r\n\r\n # Calculate starting points\r\n edges = start_point+np.cumsum(np.append([0],widths))\r\n # Set plot x axis length\r\n ax.set_xlim(start_point, edges[-1])\r\n ylim=ax.get_ylim()\r\n # Draw colored fields for every other width\r\n for idx in range(1,len(edges)-1,2):\r\n ax.add_patch(\r\n patches.Rectangle(\r\n (edges[idx], ylim[0]), # point(x,y)\r\n widths[idx], # width\r\n ylim[1]-ylim[0], # height\r\n facecolor=color,\r\n edgecolor='none',\r\n zorder=-20\r\n )\r\n )\r\n # Optionally draw labels\r\n if labels==True:\r\n for idx in range(0,len(edges)-1):\r\n # Set y-position 1% under top\r\n ypos=(ylim[1])-0.01*(ylim[1]-ylim[0])\r\n # Enumerate fields\r\n plt.text(edges[idx], ypos,\r\n 'R{}'.format(idx), verticalalignment='top')\r\n\r\ndef collecting(channel_setup, records_completed):\r\n state = False\r\n\r\n for ch in range(len(channel_setup)):\r\n state = state or (records_completed[ch] < channel_setup[ch].number_of_records)\r\n\r\n return state\r\n\r\ndef print_event_counters(adqapi, adq_cu, adq_num):\r\n status = ct.c_uint()\r\n lt_tevent_ctr = ct.c_uint()\r\n lt_revent_ctr = ct.c_uint()\r\n ul_tevent_ctr = ct.c_uint()\r\n ul_revent_ctr = ct.c_uint()\r\n pt_tevent_ctr = ct.c_uint()\r\n pt_revent_ctr = ct.c_uint()\r\n acq_tevent_ctr = ct.c_uint()\r\n acq_revent_ctr = ct.c_uint()\r\n acq_revent_pt_ctr = ct.c_uint()\r\n status = adqapi.ADQ_PDGetEventCounters(adq_cu, adq_num,\r\n ct.byref(lt_tevent_ctr),\r\n ct.byref(lt_revent_ctr),\r\n ct.byref(ul_tevent_ctr),\r\n ct.byref(ul_revent_ctr),\r\n ct.byref(pt_tevent_ctr),\r\n ct.byref(pt_revent_ctr),\r\n ct.byref(acq_tevent_ctr),\r\n ct.byref(acq_revent_ctr),\r\n ct.byref(acq_revent_pt_ctr))\r\n print('ADQAPI.ADQ_PDGetEventCounters returned {}'.format(adq_status(status)))\r\n print('LT tevent ctr: {}'.format(lt_tevent_ctr.value))\r\n print('LT revent ctr: {}'.format(lt_revent_ctr.value))\r\n print('UL tevent ctr: {}'.format(ul_tevent_ctr.value))\r\n print('UL revent ctr: {}'.format(ul_revent_ctr.value))\r\n print('PT tevent ctr: {}'.format(pt_tevent_ctr.value))\r\n print('PT revent ctr: {}'.format(pt_revent_ctr.value))\r\n print('AQ tevent ctr: {}'.format(acq_tevent_ctr.value))\r\n print('AQ revent ctr: {}'.format(acq_revent_ctr.value))\r\n print('AQ revent pt ctr: {}'.format(acq_revent_pt_ctr.value))\r\n return\r\n"
] |
[
[
"numpy.append",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.draw"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qnity/insurance_python
|
[
"8dff5e365e3749b76adaeea90d89909af5f3c1b0"
] |
[
"stationary-bootstrap/StationaryBootstrap.py"
] |
[
"import numpy as np\r\n\r\ndef StationaryBootstrap(data: np.ndarray, m, sampleLength)-> np.ndarray:\r\n \"\"\"\r\n Returns a bootstraped sample of the time-series \"data\" of length \"sampleLength. \r\n The algorithm used is stationary bootstrap from 1994 Politis & Romano.\r\n \r\n Args: \r\n data ... ndarray array. A single vector of numbers containing the time-series.\r\n m ... floating number. Parameter to stationary bootstrap indicating the average length of each block in the sample.\r\n sampleLength ... integer. Length of the bootstrapped sample returned as output.\r\n \r\n Returns: \r\n sample ... ndarray array containing the final bootstraped sample.\r\n \r\n Example of use:\r\n >>> import numpy as np\r\n >>> data = np.array([1,2,3,4,5,6,7,8,9,10])\r\n >>> m = 4\r\n >>> sampleLength = 12\r\n >>> StationaryBootstrap(data, m, sampleLength)\r\n Out[0]: array([[9.],\r\n [3.],\r\n [4.],\r\n [5.],\r\n [6.],\r\n [7.],\r\n [8.],\r\n [7.],\r\n [2.],\r\n [3.],\r\n [4.],\r\n [2.]])\r\n\r\n Original paper about stationary bootstrap:\r\n Dimitris N. Politis & Joseph P. Romano (1994) The Stationary Bootstrap, Journal of the American Statistical \r\n Association, 89:428, 1303-1313, DOI: 10.1080/01621459.1994.10476870 \r\n\r\n Implemented by Gregor Fabjan from Qnity Consultants on 12/11/2021.\r\n\r\n \"\"\"\r\n accept = 1/m \r\n lenData = data.shape[0]\r\n\r\n sampleIndex = np.random.randint(0,high =lenData,size=1);\r\n sample = np.zeros((sampleLength,1))\r\n for iSample in range(sampleLength):\r\n if np.random.uniform(0,1,1)>=accept:\r\n sampleIndex += 1\r\n if sampleIndex >= lenData:\r\n sampleIndex=0 \r\n else:\r\n sampleIndex = np.random.randint(0,high = lenData,size=1)\r\n\r\n sample[iSample,0] = data[sampleIndex]\r\n return sample"
] |
[
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.