repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
ky-zhang/machinelearning_playground
|
[
"22285a0e49c888643112864430c6b7cd00a60aba"
] |
[
"AdaBoost/adaboost.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef loadSimpData():\n datMat = np.matrix([[1., 2.1],\n [2., 1.1],\n [1.3, 1.],\n [1., 1.],\n [2., 1.]])\n classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]\n\n return datMat, classLabels\n\n# x, y = loadSimpData()\n# plt.plot(x, 'ro')\n# plt.show()\n\ndef stumpClassify(dataMatrix, dimen, threshVal, threshIneq):\n retArray = np.ones((np.shape(dataMatrix)[0], 1))\n if threshIneq == 'lt':\n retArray[dataMatrix[:, dimen] <= threshVal] = -1.0\n else:\n retArray[dataMatrix[:, dimen] > threshVal] = -1.0\n return retArray\n\ndef buildStump(dataArr, classLabels, D):\n dataMatrix = np.mat(dataArr); labelMat = np.mat(classLabels).T\n m,n = np.shape(dataMatrix)\n numSteps = 10.0\n bestStump = {}\n bestClasEst = np.mat(np.zeros((m, 1)))\n minError = np.inf\n for i in range(n):\n rangeMin = dataMatrix[:, i].min()\n rangeMax = dataMatrix[:, i].max()\n stepSize = (rangeMax - rangeMin)/numSteps\n for j in range(-1, int(numSteps) + 1):\n for inequal in ['lt', 'gt']:\n threshVal = (rangeMin + float(j) * stepSize)\n predictedVals = stumpClassify(dataMatrix, i, threshVal, inequal)\n errArr = np.mat(np.ones((m, 1)))\n errArr[predictedVals == labelMat] = 0\n weightedError = D.T * errArr\n\n if weightedError < minError:\n minError = weightedError\n bestClasEst = predictedVals.copy()\n bestStump['dim'] = i\n bestStump['thresh'] = threshVal\n bestStump['ineq'] = inequal\n return bestStump, minError, bestClasEst\n"
] |
[
[
"numpy.matrix",
"numpy.zeros",
"numpy.ones",
"numpy.shape",
"numpy.mat"
]
] |
Steve-Tod/STFC3
|
[
"9f583704d271be501a2bf3c97e5b7ab3a84c2f92"
] |
[
"code/data/__init__.py"
] |
[
"import torch\nimport torch.utils.data\nimport logging\nfrom torchvision.datasets.samplers.clip_sampler import DistributedSampler, RandomClipSampler\nfrom torch.utils.data.dataloader import default_collate\n\ndef collate_fn_random_scale(batch):\n num_scale = len(batch[0])\n select_scale = torch.randint(num_scale, size=(1,)).item()\n return default_collate([d[select_scale] for d in batch])\n\ndef create_sampler(dataset, opt, shuffle):\n no_sampler = True\n if hasattr(dataset, 'Kinetics400'):\n sampler = RandomClipSampler(dataset.Kinetics400.video_clips, dataset.clips_per_video)\n dataset = sampler\n no_sampler = False\n if opt['distributed'] and not opt['no_sampler']:\n sampler = DistributedSampler(dataset, rank=opt['rank'], shuffle=shuffle)\n no_sampler = False\n if no_sampler:\n sampler = None\n return sampler\n\ndef create_dataloader(dataset, dataset_opt, sampler=None):\n phase = dataset_opt['phase']\n\n if dataset.__class__.__name__ in ['KineticsTVDatasetV8']:\n collate_fn = collate_fn_random_scale\n else:\n collate_fn = default_collate\n \n if phase == 'train':\n num_workers = dataset_opt['num_workers']\n batch_size = dataset_opt['batch_size']\n shuffle = (sampler is None)\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, sampler=sampler, collate_fn=collate_fn, drop_last=True,\n pin_memory=True)\n else:\n return torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4, sampler=sampler, collate_fn=collate_fn, pin_memory=True)\n\ndef create_dataset(dataset_opt, opt):\n mode = dataset_opt['mode']\n if mode == 'KineticsTVDataset':\n from .KineticsTVDataset import KineticsTVDataset as d\n else:\n raise NotImplementedError('Dataset [{:s}] is not recognized.'.format(mode))\n \n if 'Cache' in mode:\n if opt['distributed']:\n opt['no_sampler'] = True\n dataset_opt['part'] = (opt['rank'], opt['num_rank'])\n dataset = d(dataset_opt)\n if opt['rank'] == 0:\n logger = logging.getLogger('base')\n logger.info('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__,\n dataset_opt['name']))\n return dataset\n"
] |
[
[
"torch.randint",
"torch.utils.data.dataloader.default_collate",
"torch.utils.data.DataLoader"
]
] |
annosoo/nncase
|
[
"898bd4075a0f246bca7b62a912051af3d890aff4"
] |
[
"tests/tflite_test_runner.py"
] |
[
"import tensorflow as tf\nfrom test_runner import *\nimport os\nimport shutil\n\n\nclass TfliteTestRunner(TestRunner):\n def __init__(self, case_name, targets=None, overwirte_configs: dict = None):\n super().__init__(case_name, targets, overwirte_configs)\n\n def from_tensorflow(self, module):\n # export model\n tf.saved_model.save(module, self.case_dir)\n converter = tf.lite.TFLiteConverter.from_saved_model(self.case_dir)\n\n # convert model\n tflite_model = converter.convert()\n model_file = os.path.join(self.case_dir, 'test.tflite')\n with open(model_file, 'wb') as f:\n f.write(tflite_model)\n\n return model_file\n\n def run(self, model_file):\n if self.case_dir != os.path.dirname(model_file):\n shutil.copy(model_file, self.case_dir)\n model_file = os.path.join(\n self.case_dir, os.path.basename(model_file))\n\n super().run(model_file)\n\n def parse_model_input_output(self, model_path: str):\n interp = tf.lite.Interpreter(model_path=model_path)\n\n for item in interp.get_input_details():\n input_dict = {}\n input_dict['index'] = item['index']\n input_dict['name'] = item['name']\n input_dict['dtype'] = item['dtype']\n input_dict['shape'] = item['shape']\n self.inputs.append(input_dict)\n self.calibs.append(input_dict.copy())\n\n for item in interp.get_output_details():\n output_dict = {}\n output_dict['index'] = item['index']\n output_dict['name'] = item['name']\n output_dict['dtype'] = item['dtype']\n output_dict['shape'] = item['shape']\n self.outputs.append(output_dict)\n\n def cpu_infer(self, case_dir: str, model_file: bytes):\n interp = tf.lite.Interpreter(model_path=model_file)\n interp.allocate_tensors()\n for input in self.inputs:\n interp.set_tensor(input[\"index\"], input['data'])\n\n interp.invoke()\n\n i = 0\n for output in self.outputs:\n data = interp.get_tensor(output['index'])\n self.output_paths.append((\n os.path.join(case_dir, f'cpu_result_{i}.bin'),\n os.path.join(case_dir, f'cpu_result_{i}.txt')))\n data.tofile(self.output_paths[-1][0])\n self.totxtfile(self.output_paths[-1][1], data)\n i += 1\n\n def import_model(self, compiler, model_content, import_options):\n compiler.import_tflite(model_content, import_options)\n"
] |
[
[
"tensorflow.lite.TFLiteConverter.from_saved_model",
"tensorflow.saved_model.save",
"tensorflow.lite.Interpreter"
]
] |
NorthGuard/loop_printer
|
[
"808686c6a7bfb149a368d5d5b46f5047e81631d5"
] |
[
"src/timer.py"
] |
[
"from datetime import datetime, timedelta\n\nimport regex\nfrom scipy import optimize\nimport numpy as np\n\nfrom loop_printer.src.utility import _delta_time_str, _precision_on_microseconds, _get_difference_formatter\n\n\nclass LoopPrinterTimer:\n \"\"\"\n This timer records the step-counts passed on to the LoopPrinter and records the times of being called.\n These is_first_call are used to compute various timing information such as time passed, estimated time left etc.\n \"\"\"\n def __init__(self):\n self.time_stamps = [] # type: [timedelta]\n self.start_time = None # type: datetime\n self.steps = [] # type: [int]\n self.time_left_method = \"linear\"\n self.stride_size = 1\n self.step_nr = 0\n self.last_time = None\n\n def reset(self):\n self.time_stamps = [] # type: [timedelta]\n self.start_time = None # type: datetime\n self.steps = [] # type: [int]\n self.time_left_method = \"linear\"\n self.stride_size = 1\n self.step_nr = 0\n self.last_time = None\n\n def update_times_steps(self, count, is_first_call, memory):\n \"\"\"\n Updates the internal lists of information.\n :param int count: Current iteration.\n :param bool is_first_call: Indicates whether this is the first printing.\n :param int memory: maximum number of elements to be remembered for estimating timing information.\n \"\"\"\n self.step_nr += 1\n if len(self.time_stamps) > 0:\n self.last_time = self.time_stamps[-1]\n\n if is_first_call:\n self.start_time = datetime.now()\n self.steps = [count]\n self.step_nr = 1\n self.stride_size = 1\n else:\n if self.step_nr % self.stride_size == 0:\n self.time_stamps.append(datetime.now() - self.start_time)\n self.steps.append(count)\n\n # Check whether memory is full\n if len(self.time_stamps) > memory:\n # Make downsampling indices\n indices = list(range(0, len(self.time_stamps), 2))\n if indices[-1] != len(self.time_stamps) - 1:\n indices.append(len(self.time_stamps) - 1)\n\n # Downsample previous datapoints\n self.time_stamps = [self.time_stamps[idx] for idx in indices]\n if indices[-1] != len(self.steps) - 1:\n indices.append(len(self.steps) - 1)\n self.steps = [self.steps[idx] for idx in indices]\n\n # Increase stride for next datapoints\n self.stride_size *= 2\n\n def estimate_time_left(self, use_microseconds, n):\n \"\"\"\n Computes the estimated time left by fitting a polynomial function to the time of each iteration.\n By extrapolating these values it estimates time left.\n :param bool use_microseconds: Indicates whether the returned string should have a microsecond precision.\n :param int n: Total number of iterations in loop.\n :return: str\n \"\"\"\n n_steps = len(self.steps)\n time_left = None\n\n # Linear estimation (uses a 1st-degree polynomial)\n if n_steps > 1 and self.time_left_method.lower() == \"linear\":\n self.time_left_method = \"poly1\"\n\n # Polynomial\n if \"poly\" in self.time_left_method.lower():\n # Determine degree from input string\n degree = int(regex.search(\"\\d+\", self.time_left_method.lower()).group(0))\n\n # First few samples can only be approximated with a low-degree polynomial\n if n_steps < degree + 2:\n return None\n\n # Get seconds of each time-step\n seconds = np.array([time.total_seconds() for time in self.time_stamps])\n steps = np.array(self.steps[:-1])\n\n # Select polynomial based on degree (Forcing intersection at 0)\n if degree == 1:\n def fit_func(params, x):\n return params[0] + params[1] * x\n elif degree == 2:\n def fit_func(params, x):\n return params[0] + params[1] * x + params[2] * x ** 2\n elif degree == 3:\n def fit_func(params, x):\n return params[0] + params[1] * x + params[2] * x ** 2 + params[3] * x ** 3\n elif degree == 4:\n def fit_func(params, x):\n return params[0] + params[1] * x + params[2] * x ** 2 + params[3] * x ** 3 + params[4] * x ** 4\n else:\n raise NotImplementedError(\"ETA estimation using polynomial of degree > 4 has not been implemented \"\n \"(although it can easily be done).\")\n\n # Define error function\n def error(p, x, y):\n return fit_func(p, x) - y\n\n # Initial values\n init_p = np.array([1] * (degree + 1))\n\n # Optimize polynomial\n p1 = optimize.leastsq(error, init_p, args=(steps, seconds))\n parameters = p1[0]\n\n # Predict time left\n time_left = max(0, fit_func(parameters, n) - seconds[-1])\n time_left = timedelta(seconds=time_left)\n\n # If a time left has been computed convert to string\n if time_left is not None:\n time_left = _delta_time_str(time_left.days, time_left.seconds, time_left.microseconds,\n use_microseconds)\n\n # Return\n return time_left\n\n def compute_timings(self, use_microseconds):\n \"\"\"\n Compute timing statistics (time of last step, average step, total time).\n Returns one string for each of the informations.\n :param bool use_microseconds: Indicates whether the returned string should have a microsecond precision.\n :return: (str, str, str)\n \"\"\"\n # Last time-step\n if len(self.time_stamps) == 1:\n last_step = self.time_stamps[-1] # Last step\n total_step = self.time_stamps[-1] # Total time\n avg_step = self.time_stamps[-1] # Average step\n else:\n last_step = self.time_stamps[-1] - self.last_time # Last step\n total_step = self.time_stamps[-1] # Total time\n avg_step = self.time_stamps[-1] / self.steps[-1] # Average step\n\n # Last step\n last_step = _delta_time_str(last_step.days, last_step.seconds, last_step.microseconds,\n use_microseconds)\n\n # Total time\n total_step = _delta_time_str(total_step.days, total_step.seconds, total_step.microseconds,\n use_microseconds)\n\n # Average step\n avg_step = _delta_time_str(avg_step.days, avg_step.seconds, avg_step.microseconds,\n use_microseconds)\n\n # Return\n return last_step, total_step, avg_step\n\n def time_message(self, count, total_counts, time_stamp, date_stamp, step_time, avg_step_time,\n total_time, time_left, stamp_microseconds, time_microseconds):\n \"\"\"\n Produces the final time-message used by the LoopPrinter.\n :param int count: Iteration counter.\n :param int total_counts: Total number of counts in loop.\n :param bool time_stamp: Do you want a time-stamp of the print?\n :param bool date_stamp: Do you want a date-stamp of the print?\n :param bool step_time: Do you want a print of the last iterations time?\n :param bool avg_step_time: Do you want a print of the average iteration time?\n :param bool total_time: Do you want a print of the total time?\n :param bool time_left: Do you want a print of the estimated time left?\n :param bool stamp_microseconds: Do you want the iteration-time-stamp to have microsecond precision?\n :param bool time_microseconds: Do you want the other stamps to have microsecond precision?\n :return: str\n \"\"\"\n # Time stamp\n time_formatter = (\"%H:%M:%S:%f\" if stamp_microseconds else \"%H:%M:%S\") \\\n if time_stamp else \"\"\n\n # Date stamp\n date_formatter = \"%d-%m-%Y\" if date_stamp else \"\"\n\n # Date and time\n timing_str = date_formatter + (\" \" if time_stamp and date_stamp else \"\") + time_formatter\n stamp = datetime.now().strftime(timing_str)\n if stamp_microseconds:\n stamp = _precision_on_microseconds(stamp, stamp_microseconds)\n\n # Output message\n time_message = stamp\n\n # Additional timing information\n information_selector = [step_time, avg_step_time, total_time, time_left]\n if any(information_selector):\n # Length of each time-information\n time_stamp_length = 8 + (time_microseconds + 1 if time_microseconds else 0)\n\n # Time formatter\n special_timing_formatter = _get_difference_formatter(time_stamp_length, information_selector, stamp)\n\n # Time difference stamp\n if count > 1 and (total_time or avg_step_time or step_time):\n last_diff, total_diff, avg_diff = self.compute_timings(time_microseconds)\n else:\n last_diff = avg_diff = total_diff = \" \" * time_stamp_length\n\n # Time left\n computed_time_left = None\n if time_left:\n computed_time_left = self.estimate_time_left(time_microseconds, total_counts)\n if computed_time_left is None:\n computed_time_left = \" \" * time_stamp_length\n\n # Insert timings\n time_message += special_timing_formatter.format(last_diff, avg_diff, total_diff, computed_time_left)\n\n # Return\n return time_message\n"
] |
[
[
"numpy.array",
"scipy.optimize.leastsq"
]
] |
brittonsmith/yt_p3bh
|
[
"52dd594fb3ded4a88b2cb43ec18ab1c81e617baa"
] |
[
"scripts/plots/plot_top_growers.py"
] |
[
"from matplotlib import pyplot, ticker\nimport numpy as np\n# import seaborn as sns\nimport yt\n\nfrom grid_figure import GridFigure\n\ndef get_distribution(vals, n=101):\n if vals.size == 0:\n return np.zeros(n)\n id_sort = np.argsort(vals)\n dist = vals[id_sort[np.clip(np.round(np.linspace(0, 1, n) *\n id_sort.size).astype(int),\n 0, id_sort.size-1)]]\n return dist\n\nif __name__ == \"__main__\":\n my_fig = GridFigure(1, 1, figsize=(6, 4.5),\n left_buffer=0.15, right_buffer=0.04,\n bottom_buffer=0.14, top_buffer=0.04,\n vertical_buffer=0)\n\n # palette = sns.color_palette(palette=\"colorblind\")\n palette = \\\n [(0.0, 0.4470588235294118, 0.6980392156862745),\n (0.0, 0.6196078431372549, 0.45098039215686275),\n (0.8352941176470589, 0.3686274509803922, 0.0),\n (0.8, 0.4745098039215686, 0.6549019607843137),\n (0.9411764705882353, 0.8941176470588236, 0.25882352941176473),\n (0.33725490196078434, 0.7058823529411765, 0.9137254901960784)]\n\n labels = [\"rare peak\", \"normal\", \"void\"]\n ds_list = [\n yt.load(\"Rarepeak_LWB/top_growers.h5\"),\n yt.load(\"normal_BG1/top_growers.h5\"),\n yt.load(\"void_BG1/top_growers.h5\")\n ]\n\n my_axes = my_fig[0]\n\n my_axes.set_xscale(\"log\")\n my_axes.set_yscale(\"log\")\n for ip in range(len(ds_list)):\n my_axes.plot([1e-4], [2], color=palette[ip], label=labels[ip])\n myds = ds_list[ip]\n pids = []\n for field in myds.field_list:\n pid = int(field[1].split(\"_\")[1])\n if pid not in pids:\n pids.append(pid)\n\n for pid in pids:\n t_field = myds.data[\"p_%d_time\" % pid].to(\"Myr\")\n m_field = myds.data[\"p_%d_mass\" % pid]\n g_field = myds.data[\"p_%d_mdot\" % pid].to(\"Msun/yr\")\n g_edd = myds.quan(2.2e-8, \"1/yr\") * m_field\n f_edd = g_field / g_edd\n my_sedd = f_edd >= 0.25\n x_field = t_field - t_field[0]\n y_field = m_field / m_field[0] - 1\n print (m_field[0])\n if (my_sedd).any():\n print (f_edd[my_sedd], np.where(my_sedd)[0])\n if (my_sedd).sum() > 1:\n raise NotImplementedError \n sedd = np.where(my_sedd)[0][0]\n if sedd > 0:\n my_axes.plot(x_field[0:sedd+1], y_field[0:sedd+1],\n color=palette[ip], alpha=0.6)\n if sedd < t_field.size - 1:\n my_axes.plot(x_field[sedd:sedd+2], y_field[sedd:sedd+2],\n color=palette[ip], alpha=0.6,\n linestyle=\"--\")\n if sedd < t_field.size - 2:\n my_axes.plot(x_field[sedd+1:], y_field[sedd+1:],\n color=palette[ip], alpha=0.6)\n else:\n my_axes.plot(x_field, y_field,\n color=palette[ip], alpha=0.6)\n pgr = (m_field - m_field[0]) / (m_field[-1] - m_field[0])\n i_gr = np.where((pgr > 0.9))[0].min()\n my_axes.scatter([(t_field - t_field[0])[i_gr]],\n (m_field / m_field[0] - 1)[i_gr],\n s=100, marker=\"*\", alpha=0.9,\n color=palette[ip])\n my_axes.scatter([(t_field - t_field[0])[-1]],\n (m_field / m_field[0] - 1)[-1],\n s=100, marker=\".\", alpha=0.9,\n color=palette[ip])\n my_axes.legend(loc=\"lower right\", frameon=False,\n markerfirst=False,\n markerscale=5)\n my_axes.set_xlim(1, 1.5e2)\n for my_x in np.logspace(1, 1, 1):\n my_axes.axvline(x=my_x, color=\"black\", linestyle=\":\",\n linewidth=1.0, alpha=0.2, zorder=-100)\n\n # my_axes.yaxis.set_ticks(np.logspace(-24, -8, 5))\n # my_axes.yaxis.set_ticks(np.logspace(-24, -8, 17), minor=True)\n # my_axes.yaxis.set_minor_formatter(ticker.NullFormatter())\n my_axes.set_ylim(1e-3, 0.2)\n for my_y in np.logspace(-2, -1, 2):\n my_axes.axhline(y=my_y, color=\"black\", linestyle=\":\",\n linewidth=1.0, alpha=0.2, zorder=-100)\n\n my_axes.xaxis.set_label_text(\"black hole age [Myr]\")\n my_axes.yaxis.set_label_text(\n \"M(t) / M$_{\\\\rm i}$ - 1\")\n\n pyplot.savefig(\"figures/top_growers.pdf\")\n"
] |
[
[
"numpy.zeros",
"matplotlib.pyplot.savefig",
"numpy.where",
"numpy.argsort",
"numpy.linspace",
"numpy.logspace"
]
] |
noriyukipy/tfdlg
|
[
"1040cc3cca44e0f9629ae0cb4bd6e18cbf4fbe3f"
] |
[
"tfdlg/schedules.py"
] |
[
"import tensorflow as tf\n\n\nclass TransformerSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"TransformerScheduler implements the scheduler used in [Vaswani+, 2017]\"\"\"\n def __init__(self, d_model, warmup_steps=4000):\n super().__init__()\n\n self._d_model = d_model\n self._warmup_steps = warmup_steps\n\n def __call__(self, step):\n fst = tf.math.rsqrt(step)\n snd = step * (self._warmup_steps ** -1.5)\n\n out = tf.math.rsqrt(tf.cast(self._d_model, tf.float32)) * tf.math.minimum(fst, snd)\n\n return out\n\n def get_config(self):\n \"\"\"get_config is for supporing save/load hyper parameters.\"\"\"\n\n # Do not call `super().get_config()` to get parent parameters because\n # it will raise NotImplementedError\n # https://github.com/tensorflow/tensorflow/blob/v2.3.1/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py#L47\n return {\n \"d_model\": self._d_model,\n \"warmup_steps\": self._warmup_steps\n }\n\n\nclass WarmupLinearDecay(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, max_learning_rate, warmup_steps, training_steps):\n self._max_learning_rate = max_learning_rate\n self._warmup_steps = warmup_steps\n self._training_steps = training_steps\n\n def __call__(self, step):\n step = tf.cast(step, tf.float32)\n fst = step / tf.math.maximum(1.0, self._warmup_steps)\n snd = (self._training_steps - step) / tf.math.maximum(1.0, self._training_steps - self._warmup_steps)\n\n return self._max_learning_rate * tf.math.maximum(tf.math.minimum(fst, snd), 0)\n\n def get_config(self):\n # Do not call `super().get_config()` to get parent parameters because\n # it will raise NotImplementedError\n # https://github.com/tensorflow/tensorflow/blob/v2.3.1/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py#L47\n return {\n \"max_learning_rate\": self._max_learning_rate,\n \"warmup_steps\": self._warmup_steps,\n \"training_steps\": self._training_steps,\n }\n"
] |
[
[
"tensorflow.math.minimum",
"tensorflow.math.maximum",
"tensorflow.cast",
"tensorflow.math.rsqrt"
]
] |
parkus/emd
|
[
"7d739470f547ba84ae154a93c27362a70178ed38"
] |
[
"test_script.py"
] |
[
"import numpy as np\nimport emd.emd_functions as emd # for quick reload(emd) when debugging\nimport matplotlib.pyplot as plt\n\n#%% ---------------------------------------------------------------------------\n# GENERATE A POLYNOMIAL + SINE TEST FUNCTION\nN = 200\nt = np.arange(200, dtype=float) - N / 2\namp = 10.0\n\n# maybe a polynomial to start\ny = t**2\nfac = amp / np.max(y) #but let's make the numbers easier to read\ny *= fac\n\n# but let's give it an offset just to make sure that doesn't screw with things\ny += amp / 5.0\n\n# and now add in a sine\nperiod = N / 10.0\n\nphase = np.random.uniform(0.0, 2*np.pi)\ny += (amp / 10.0) * np.sin(2*np.pi * t / period + phase)\n\n#%% ---------------------------------------------------------------------------\n# ADD NOISE, IF DESIRED\ny += np.random.normal(0.0, amp/50.0, N)\n\n#%% ---------------------------------------------------------------------------\n# DECOMPOSE\n# choose from sawtooth EMd or \"standard\"\n#c, r = emd.emd(t, y)\nc, r = emd.saw_emd(t, y, bc='extrap')\n\n#%% ---------------------------------------------------------------------------\n# PLOT\n\npf, = plt.plot(t, y)\npr, = plt.plot(t, r)\npcs = plt.plot(t, c, 'k-')\n\nplt.legend((pf, pcs[0], pr), ('original function', 'modes', 'residual'))\n\n"
] |
[
[
"numpy.max",
"numpy.random.normal",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.random.uniform",
"numpy.arange"
]
] |
91902078/yb66
|
[
"ece7f637ac8bacb1ba51a6f1f6f1f2e9cdb91bd9"
] |
[
"yb66/discussion/widget/Crystal_Parser.py"
] |
[
"import re\nimport numpy\n#import xraylib\nimport sys\nimport os\n\ndef Crystal_Parser(filename='YB66.dat'):\n \"\"\"\n X.J. YU, [email protected]\n parse a complex crystal structure file, into a dictionary\n return a dictionary containing crystal infomation\n \"\"\"\n\n header_end = False #True, end of header parsing\n cryst = {'name':'YB66'} #returned dictionary like that one created by xraylib.Crystal_GetCrystal(descriptor)\n AnisoItem = {'Name':' ','start':0,'end':0,'beta11':0.0,'beta22':0.0,'beta33':0.0,'beta12':0.0,'beta13':0.0,'beta23':0.0}\n AtomItem = {'AtomicName': ' ','Zatom':0,'fraction':0.0,'x':0.0,'y':0.0,'z':0.0,'SeqNo':0,'charge':0.0}\n n = 0 #index anisotropic lines\n n_atom = 0 #index atom list lines\n # open file\n if not os.path.isfile(filename):\n filename = os.path.dirname(os.path.abspath(__file__)) + '\\\\YB66.dat'\n print(filename)\n fo = open(filename, \"r\")\n while True:\n line = fo.readline().strip()\n if line[-1:] == '\\n': \n line = line[0:-1] #discard last \\n character\n if len(line) == 0: #end of file, exit\n break \n a = line.split()\n if header_end == False:\n #process header info\n if a[0][:2]== '#L': #L AtomicNumber Fraction X Y Z\n header_end = True\n\n elif a[0][:2] == '#S': #S 5 YB66\n cryst['name'] = str(a[2])\n \n elif a[0][:6] == '#UCELL': #UCELL 23.44 23.44 23.44 90.0000 90.0000 90.0000\n cryst['a'] = float(a[1])\n cryst['b'] = float(a[2])\n cryst['c'] = float(a[3])\n cryst['alpha'] = float(a[4])\n cryst['beta'] = float(a[5])\n cryst['gamma'] = float(a[6])\n alpha = float(a[4]) * numpy.pi/180\n beta = float(a[5]) * numpy.pi/180\n gamma = float(a[6]) * numpy.pi/180\n cryst['volume'] = float(a[1]) * float(a[2]) * float(a[3]) * \\\n numpy.sqrt( (1 - numpy.cos(alpha)**2 - numpy.cos(beta)**2 - numpy.cos(gamma)**2) + \\\n 2 * numpy.cos(alpha) * numpy.cos(beta) * numpy.cos(gamma)) #for cubic=a*b*c\n elif a[0][:12] == '#UANISO_COFF': #UANISO_COFF_B1 1 96 0.00038 0.00044 0.00039 0 0 0 \n AnisoItem['Name']= str(a[0][13:]) #get site atom name starting from 13th character 'B1', etc\n AnisoItem['start']= int(a[1])\n AnisoItem['end']= int(a[2])\n AnisoItem['beta11']= float(a[3])\n AnisoItem['beta22']= float(a[4])\n AnisoItem['beta33']= float(a[5])\n AnisoItem['beta12']= float(a[6])\n AnisoItem['beta13']= float(a[7])\n AnisoItem['beta23']= float(a[8])\n if n ==0:\n Aniso = numpy.array([AnisoItem.copy()])\n else:\n Aniso = numpy.append(Aniso,[AnisoItem.copy()])\n n = n + 1 #increase anisotropic index\n\n else: #atom list\n #B-. 1 0.5 0.94077 0.96284\n if len(a) < 5: #min 5 column required, end of atom list or new header section start\n break\n tmp1 = re.search('(^[0-9]*)',a[0])\n if len(tmp1.group(0)) > 0: #numerical atomic number\n AtomItem['Zatom'] = int(a[0])\n AtomItem['AtomicName'] = ''\n else: #atomic name string\n tmp1 = re.search('(^[a-zA-Z]*)',a[0]) #get atomic name\n AtomItem['AtomicName'] = str(a[0])\n# Atom[n_atom]['Zatom'] = int(xraylib.SymbolToAtomicNumber(tmp1.group(0))) #atomic name to atomic number\n AtomItem['Zatom'] = SymbolToAtomicNumber(tmp1.group(0)) #atomic name to atomic number\n AtomItem['fraction'] = float(a[1])\n AtomItem['x'] = float(a[2])\n AtomItem['y'] = float(a[3])\n AtomItem['z'] = float(a[4])\n AtomItem['SeqNo'] = int(n_atom)\n if len(a) == 6: #6 colume list, last one is carried charge\n AtomItem['charge'] = float(a[5])\n if n_atom == 0:\n Atom = numpy.array([AtomItem.copy()])\n else:\n Atom = numpy.append(Atom,[AtomItem.copy()]) \n n_atom = n_atom + 1\n \n # close file\n fo.close()\n cryst['atom'] = Atom\n cryst['n_atom']= n_atom\n if n > 0:\n cryst['Aniso'] = Aniso\n cryst['n_aniso']= n\n else: #create a dummy Aniso Dictionary with start=end=0\n AnisoItem['Name']=''\n cryst['Aniso'] = numpy.array([AnisoItem])\n cryst['n_aniso']= 0\n return cryst\n\ndef SymbolToAtomicNumber(ATOM):\n atoms = [ \t[1,\"H\"],[2,\"He\"],[3,\"Li\"],[4,\"Be\"],[5,\"B\"],[6,\"C\"],[7,\"N\"],[8,\"O\"],[9,\"F\"],[10,\"Ne\"], \t\\\n [11,\"Na\"],[12,\"Mg\"],[13,\"Al\"],[14,\"Si\"],[15,\"P\"],[16,\"S\"],[17,\"Cl\"],[18,\"Ar\"],[19,\"K\"],[20,\"Ca\"], \t\\\n [21,\"Sc\"],[22,\"Ti\"],[23,\"V\"],[24,\"Cr\"],[25,\"Mn\"],[26,\"Fe\"],[27,\"Co\"],[28,\"Ni\"],[29,\"Cu\"],[30,\"Zn\"], \\\n [31,\"Ga\"],[32,\"Ge\"],[33,\"As\"],[34,\"Se\"],[35,\"Br\"],[36,\"Kr\"],[37,\"Rb\"],[38,\"Sr\"],[39,\"Y\"],[40,\"Zr\"], \\\n [41,\"Nb\"],[42,\"Mo\"],[43,\"Tc\"],[44,\"Ru\"],[45,\"Rh\"],[46,\"Pd\"],[47,\"Ag\"],[48,\"Cd\"],[49,\"In\"],[50,\"Sn\"], \t\\\n [51,\"Sb\"],[52,\"Te\"],[53,\"I\"],[54,\"Xe\"],[55,\"Cs\"],[56,\"Ba\"],[57,\"La\"],[58,\"Ce\"],[59,\"Pr\"],[60,\"Nd\"], \t\\\n [61,\"Pm\"],[62,\"Sm\"],[63,\"Eu\"],[64,\"Gd\"],[65,\"Tb\"],[66,\"Dy\"],[67,\"Ho\"],[68,\"Er\"],[69,\"Tm\"],[70,\"Yb\"], \t\\\n [71,\"Lu\"],[72,\"Hf\"],[73,\"Ta\"],[74,\"W\"],[75,\"Re\"],[76,\"Os\"],[77,\"Ir\"],[78,\"Pt\"],[79,\"Au\"],[80,\"Hg\"], \t\\\n [81,\"Tl\"],[82,\"Pb\"],[83,\"Bi\"],[84,\"Po\"],[85,\"At\"],[86,\"Rn\"],[87,\"Fr\"],[88,\"Ra\"],[89,\"Ac\"],[90,\"Th\"], \t\\\n [91,\"Pa\"],[92,\"U\"],[93,\"Np\"],[94,\"Pu\"],[95,\"Am\"],[96,\"Cm\"],[97,\"Bk\"],[98,\"Cf\"],[99,\"Es\"],[100,\"Fm\"], \t\\\n [101,\"Md\"],[102,\"No\"],[103,\"Lr\"],[104,\"Rf\"],[105,\"Db\"],[106,\"Sg\"],[107,\"Bh\"] \t]\n for a in atoms:\n if a[1] == ATOM:\n return int(a[0])\n\n raise Exception(\"Why are you here?\") "
] |
[
[
"numpy.array",
"numpy.cos"
]
] |
mrzhuzhe/PettingZoo
|
[
"81b21145cbe4b67a0f5713a806d3d992fc4ea487"
] |
[
"mytest/test.py"
] |
[
"import numpy as np\nfrom numpy import load\n\ndata = load('data/knights-archers-zombies-v8/test_run/ep_lengths.npy')\na = data\nprint(a)"
] |
[
[
"numpy.load"
]
] |
kasekun/MCVCM
|
[
"fa679ddc2458f282f5e4be8c827718ad2dd02f89"
] |
[
"mcvcm.py"
] |
[
"#!/usr/bin/env python3\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# mcvcm.py\n#\n# Interactive software for manually cross-matching infrared and radio catalogues\n# and assigning multi-component radio source associations.\n# \n# Requires radio surface brightness map, radio RMS map, SWIRE infrared mosaic,\n# SWIRE source catalogue, and ATLAS radio catalogue\n#\n# This section handles catalogue managment, and user interface\n# see cutoutslink.py for infrared/radio overlay generation.\n#\n# Note: This is generated SPECIFICALLY for multicomponent identification\n#\t\tand infrared association fot ATLAS DR3 and SWIRE (CDFS and ELAIS-S1)\n#\t\t\n#\t\tIt is not perfect, and is not generalised.\n#\t\tTo adapt this to other catalogues, please email me at:\n#\t\t\[email protected] or [email protected]\n#\n# Author:\n#\tJesse Swan \n#\tUniversity of Tasmania\n#\tJul -- Aug 2016\n#\n# 25th Aug - Fixed crashing when end of catalogue reached\n# 27th Aug - Fixed poorly designed recursive function call, where start()\n#\t\t\t was called before previous start() call exited.\n# \t\t\t Program no longer crashes after reaching python recursion limit.\n# 29th Aug - Added manual save fig option \t\t (press f in plotting window)\n# 29th Aug - Added toggling of scattered sources (press t in plotting window)\n# 23rd Nov - Adapted to SWIRE/ATLAS infrared/radio from previous DES/ATLAS optical/radio version\n# 23rd Nov - changed ID tag separator to '*' from '.'\n# 23rd Nov - Added confidence flag for source association 0--5, default is 5 (very confident)\n# 03rd Dec - Added \"identity\" class that now handles all selected sources and generates the xid tags\n# 07th Dec - Created a \"Cursor\" class for marking a selected source more clearly\n# 14th Dec - V4 Modified \"identity\" class and several index look-ups for efficiency \n# \n# 2017\n# 18th Jan - V5 changed behaviour of target_index incrementation\n# 18th Jan - Changed Identity to check table for correct Dec click AND RA click\n# 18th Jan - Fixed a bug where the xig_flag was being written as 0 in each new master (values backed up in bkp sessions though)\n# 24th Sep - Changes to data read in:\n#\t\t\t\t- fixed an exit bug with a try-catch for importing functions\n#\t\t\t\t- moved some functions to a new utilities.py\n#\t\t\t\t- changed format of json path_config to better facilitate addition of more fields\n#\t\t\t\t- changed swire_catalogue format to .fits to save about 40 seconds on read-in at launch\n#\t\t\t\t- forced window to top-left of screen when plotting\n# 27th Sep - Added a comment option: press 'c' at any time to open a dialogue box for saving a comment for current source\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfrom __future__ import print_function\n\nimport matplotlib\n\nmatplotlib.use(\"TkAgg\") # necessary for use with tkinter\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nimport textwrap\nfrom utilities import *\nfrom tkComment import tkComment\nimport json\nimport os\n\nthisdir = os.path.dirname(os.path.abspath(__file__))\n\ndef runtkc():\n global tkC\n tkC = tkComment()\n tkC.root.mainloop()\n\n\nwith open(os.path.join(thisdir, 'path_config.json'), 'r') as config:\n field_choices = tuple(json.load(config).keys())\n\n\nparser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,\n description=textwrap.dedent('''\\\n ********************************************************************************\n Interactive program for catalogue cross-identification.\n Displays infrared heatmap and radio contours with catalogue sources over-laid. \n Unique tags are generated for each object, in the format:\n \\t<infrared_host_ID>*<radio_host_ID>*m<#_of_components>*C<component_#>\n \\tExamples: \n \\t\\tInfrared host: SWIRE3_J003940.76-432549.1*EI1896*m2\n \\t\\tRadio host: SWIRE3_J003940.76-432549*EI1896*m2*C0\n \\t\\tComponent #1: SWIRE3_J003940.76-432549*EI1896*m2*C1\n \\t\\tComponent #2: SWIRE3_J003940.76-432549*EI1896*m2*C2\n\n output tables are stored in ./output/tables\n output figures are stored in ./output/figures\n\n press h in the plotting window for a list of basic controls\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n '''))\nparser.add_argument('-v', help='toggles verbose output',action=\"store_true\", default=False)\nparser.add_argument('-t', help='toggles output of function timings (requires verbose mode)', action='store_true',\n default=False)\nparser.add_argument('-x', help='if specified, processes only sources marked as \\'tricky\\'', action='store_true',\n default=False)\nparser.add_argument('-d', help='if specified, does a dummy demo dose of \\'dentification', action='store_true',\n default=False)\nparser.add_argument('--savefigs', dest='figs', default=None, choices=['png', 'eps', 'pdf'],\n help='if file extenstion also provided saves final IDd\\nfigures to that format (e.g. --savefigs '\n 'png)')\n\nparser.add_argument('field', choices=field_choices, help=f'specify the field we are working on from: {field_choices}')\n\nargs = parser.parse_args()\nfield = args.field\nverbose = args.v\nfig_extention = args.figs\ntimeon = args.t\ntrickyon = args.x\ndoing_demo = args.d\n\n\ndef verbwrap(function):\n def wrapper(*args, **kwargs):\n # Calculate the value of `verbose` here.\n # Say you're reading it from a config etc\n if verbose:\n demarc = '*' * (30 - len(function.__name__))\n mid = len(demarc)//2\n spacer = demarc[:mid] + ' %s ' %function.__name__ + demarc[mid:]\n print_center('',spacer)\n result = function(*args, **kwargs)\n print_center(spacer.replace('*','^'))\n return result\n else:\n return function(*args, **kwargs)\n return wrapper\n\n\nif verbose:\n def verboseprint(*args):\n # Print each argument separately\n for arg in args:\n print(arg,)\n print()\nelse:\n verboseprint = lambda *a: None\t # do nothing\n\n\n# ****************************************** #\n# \t\tThe nitty gritty of the program\n# ****************************************** #\n\n\nclass Identity(object):\n '''\n Handles the storage of selected source identities\n and XID tag creation from catalogue IDs\n '''\n default_rad_host = ('Rnohost', -999)\n default_inf_host = ('Inohost', -999)\n\n def __init__(self):\n self.inf_host = self.default_inf_host\n self.rad_host = self.default_rad_host\n self.components = [] #\n self.xid_tags = [] #\n # self.xid_positions = [] #\n # self.comp_radecs = [] # positions of selected sources\n # self.rad_radec = [0.,0.]\n # self.inf_radec = [0.,0.]\n\n def set_rad_host(self, index, ID_list):\n self.rad_host = (ID_list[index],index)\n\n def set_inf_host(self, index, ID_list):\n self.inf_host = (ID_list[index],index)\n\n def add_component(self, index, ID_list):\n '''\n Returns True if component was successfully added\n '''\n compID = (ID_list[index],index)\n if compID in self.components or compID == self.rad_host:\n print('Source %s has already been selected' %compID[0])\n return False\n else:\n self.components.append(compID)\n return True\n\n def generate_tags(self):\n '''\n we don't need a xid_tag fot the infrared cataloge\n\n we shouldn't get cases where there are components but no radio core\n as in this case, the closest component should be labeled as the core\n dec 14: I've added a catch to take the first compoenet and make it the\n radio core automagically if this situation arises\n '''\n # clear xid_tags if this has already been called for some reason\n # otherwise it will append the same tags, no bid deal but unnecessary\n if len(self.xid_tags):\n self.xid_tags = []\n\n if self.rad_host == self.default_rad_host and len(self.components):\n print_center(\"\\n\\t**** WARNING: Removed first component and used as radio core ID as this was empty ****\\n\")\n self.rad_host = self.components.pop(0)\n\n if self.rad_host == self.default_rad_host: # yes, for when components is also empty\n component_count = len(self.components) + 0 # catch for 'm%i' below defaulting to m1\n else:\n component_count = len(self.components)+1\n\n core_ID = f'{self.rad_host[0]}*{self.inf_host[0]}*m{component_count}*C0'\n self.xid_tags.append((core_ID,self.rad_host[1]))\n # core_ID is attached to radio catalogue 'core' named rad_host[0]\n # at row rad_host[1]\n\n for c,comp in enumerate(self.components):\n comp_ID = f'{self.rad_host[0]}*{self.inf_host[0]}*m{component_count}*C{c+1}'\n self.xid_tags.append((comp_ID,comp[1]))\n # core_ID is attached to radio catalogue source named comp[0]\n # at row comp[1] - Since this is just the XID tag the combination\n # of identical rad_host and inf_host is sufficient to associate back\n # to the combined source in the catalogue\n\n return self.xid_tags\n\n\n# ------------------------------------------ #\n@verbwrap\ndef onpick(event):\n '''\n Handles scatter point click events\n\n phase=1: Infrared host selection\n phase=2: Radio host selection\n phase=3: Radio component selection\n '''\n global ident\n global icross\n\n verboseprint('Current phase:', phase)\n if event.mouseevent.button == 1: #only want lmb clicks\n\n selection = event.artist\n xdata = selection.get_xdata()\n ydata = selection.get_ydata()\n ind = event.ind\n point = tuple(zip(xdata[ind], ydata[ind]))\n xclick,yclick = point[0] # RA,dec = point[0]\n\n print('RA, Dec click:', point[0])\n\n if phase == 1:\n ''' Marking infrared host '''\n label = 'ihost'\n ident.set_inf_host((np.where((iTable[iRA_column] == xclick) & (iTable[iDEC_column] == yclick)))[0][0],\n iTable[iID_column])\n xpix,ypix = wcsmap.wcs_world2pix([[xclick,yclick]],1)[0]\n icross = Crosshair(xpix,ypix,ax,linewidth=1.5)\n if phase == 2:\n ''' Marking radio host '''\n label = 'rhost'\n mark, col, size = 'D', 'green', 16\n ident.set_rad_host((np.where((rTable[rRA_column] == xclick) & (rTable[rDEC_column] == yclick)))[0][0],\n rTable[rID_column])\n ax.plot(xclick, yclick, mark, markersize=size, mfc='none', mec=col, mew=1.2,linewidth=2, transform=axtrans)\n if phase == 3:\n ''' Marking radio components '''\n label = 'C%i' % (len(ident.components))\n mark, col, size = 's', 'limegreen', 16\n added = ident.add_component(\n (np.where((rTable[rRA_column] == xclick) & (rTable[rDEC_column] == yclick)))[0][0],\n rTable[rID_column])\n if added:\n ax.text(xclick, yclick, ' - %s' % (label), horizontalalignment='left', transform=axtrans)\n ax.plot(xclick, yclick, mark, markersize=size, mfc='none', mec=col, mew=1.2,linewidth=2, transform=axtrans)\n\n verboseprint('[xclick,yclick,label]=',xclick,yclick,label)\n\n fig.canvas.draw_idle()\n\n# ------------------------------------------ #\n@verbwrap\ndef on_key(event):\n '''\n Handles predefined key-press events\n '''\n global phase\n global ipix_current, rpix_current\n global quitting, newtarget\n global keyID\n global certainty\n global tkC\n\n verboseprint('Key press:\\'%s\\'' %(event.key))\n verboseprint('Current phase:', phase)\n if event.key == ' ': #spacebar\n ''' move to next phase '''\n if phase == 2: # swapping these if statements causes consecutive calls\n ''' move to radio component ID '''\n tit = 'Radio component ID'\n next_phase()\n verboseprint('New phase:', phase, tit)\n if phase == 1:\n ''' move to radio host ID '''\n tit = 'Radio host ID'\n next_phase()\n verboseprint('New phase:', phase, tit)\n\n if event.key == 'enter' or event.key == 'd':\n ''' Done with this source '''\n if phase == 3:\n tag_generator()\n save_fig(ident.rad_host[0]) #does nothing unless --savefigs is passed\n cleanup()\n newtarget = True\n # # try:\n # print('comment for this source:', tkC.entryVar.get())\n # # except (AttributeError, NameError) as e:\n # # \tpass\n return None\n else: print('You\\'re not done yet')\n\n if phase == 3:\n try:\n if int(event.key) in range(5)[1:]:\n print('Identification certainty marked as %i' %int(event.key))\n certainty = event.key\n except ValueError as e:\n pass\n\n if event.key == 'X':\n ''' Mark for reexamination later, move to next source '''\n tricky_tag()\n cleanup()\n newtarget = True\n return None\n\n if event.key == 'Q':\n ''' Safely quit, saving progress'''\n update_table()\n cleanup()\n quitting = True\n return None\n\n if event.key == 'r':\n ''' Restart identification on this source '''\n ipix_current, rpix_current = ipix_default, rpix_default # reset cutout size\n cleanup()\n newtarget = False # not explicitly necessary\n return None\n\n if event.key == 'b':\n ''' increase cutout size and redraw '''\n ipix_current = int(ipix_current * 1.4)\n rpix_current = int(rpix_current * 1.4) # increase cutout size\n cleanup()\n newtarget = False # not explicitly necessary\n return None\n\n if event.key == 't':\n ''' toggles visibility of scattered sources '''\n sources.set_visible(not sources.get_visible())\n fig.canvas.draw_idle()\n\n if event.key == 'S':\n ''' Save progress of IDs to file'''\n update_table()\n\n if event.key == 'c':\n ''' produce a dialogue box for user comment '''\n print('please enter a comment in the box and hit <Return>')\n runtkc()\n\n if event.key =='C':\n ''' print user comment if given'''\n # try:\n print('comment for this source:', tkC.entryVar.get())\n print('saved comment:', tkC.comment)\n # except (AttributeError, NameError) as e:\n # \tpass\n\n if event.key == 'f':\n ''' Manually save figure '''\n ID = rTable[rID_column][target_index]\n save_fig(ID+'_manual', manual = True)\n\n if event.key == 'i':\n ''' print lst 25 id'd sources (from table) '''\n print('Last 25 IDs')\n print(rTable[rTable['mcvcm_tag'] != tag_placeholder][-25:])\n\n if event.key == 'J':\n ''' print lst 25 id'd sources (from table) '''\n print(ident.__dict__)\n\n if event.key == 'K':\n ''' force tag generation '''\n print(ident.__dict__)\n ident.generate_tags()\n print(ident.__dict__)\n\n if event.key == 'h':\n ''' print options '''\n print('...............................')\n print('...............................')\n print('Basic controls:\\n\\tspacebar - go to next phase\\n\\tenter,d -- go to next source (once in component phase)')\n print('other controls:')\n print('\\ti - show last 25 rows from ID\\'d table')\n print('\\tb - zoom out (can be pressed multiple times)')\n print('\\tt - toggle catalogue sources on/off')\n print('\\tf - manually save figure')\n print('\\tr - restart ID of current source')\n print('\\tshift+q - save table to file, and quit')\n print('\\tshift+s - save table to file')\n print('\\tshift+x - mark for reexamination later\\n\\t\\tRun script with -x flag to display only these')\n print('...............................')\n print('...............................')\n\n# ------------------------------------------ #\n@verbwrap\ndef update_table(whole_table=False):\n '''\n Saves the id'd objects to a file\n '''\n if whole_table:\n verboseprint('Saving entire table, this may take a while ...')\n rsave = rTable\n else:\n verboseprint('Saving table of ID\\'d objects only ...')\n rsave = rTable[rTable['mcvcm_tag'] != tag_placeholder]\n\n if len(rsave) == 0:\n print('No data to save!')\n else:\n verboseprint('Saving radio table ...')\n ascii.write(rsave, save_path, format='fixed_width_two_line', overwrite=True)\n verboseprint('Saved!')\n\n# ------------------------------------------ #\n@verbwrap\ndef tag_generator():\n '''\n Creates the tags for each host and component\n tag[1] is the row-number in the radio component catalogue\n '''\n global ident, tkC\n\n ident.generate_tags()\n\n for tag in ident.xid_tags:\n verboseprint('Writing to table')\n verboseprint('current XID tag:', tag[0])\n verboseprint('from radio catalogue row:', tag[1])\n\n rTable['mcvcm_tag'][tag[1]] = tag[0]\n rTable['mcvcm_flag'][tag[1]] = certainty\n\n try:\n rTable['mcvcm_comment'][tag[1]] = tkC.entryVar.get()\n except (AttributeError, NameError) as E:\n pass # comment remains as placeholder value\n\n\n# ------------------------------------------ #\n@verbwrap\ndef tricky_tag():\n '''\n Flags source to be identified later\n\n used by pressing 'shift+x' in the plotting window\n run script with -x flag to ID only these\n '''\n source = target_index\n rTable['mcvcm_tag'][source] = skipped_placeholder\n\n\n# ------------------------------------------ #\n@verbwrap\ndef next_phase():\n '''\n removes scatter data,\n retains clicked point markers,\n plots new scatter data,\n '''\n global phase, sources, phase_title\n\n if phase == 1:\n # Switch to radio host tags/data\n sources.remove()\n sources, = ax.plot(rData[rRA_column], rData[rDEC_column], picker=6, transform=axtrans, linestyle='none',\n **parameter_config['markers']['phase2'])\n phase_title = 'Radio core ID'\n ax.set_title(phase_title)\n fig.canvas.draw_idle()\n if phase == 2:\n # Switch to radio comp tags\n sources.remove()\n sources, = ax.plot(rData[rRA_column], rData[rDEC_column], picker=6, transform=axtrans, linestyle='none',\n **parameter_config['markers']['phase3'])\n phase_title = 'Radio component IDs'\n ax.set_title(phase_title)\n fig.canvas.draw_idle()\n phase += 1\n\n# ------------------------------------------ #\n@verbwrap\ndef get_target():\n '''\n handles incrementation of source to be ID'd\n '''\n global target_index\n global ipix_current, rpix_current\n global newtarget\n\n if newtarget:\n print('Moving to next target')\n else:\n return\n\n ipix_current, rpix_current = ipix_default, rpix_default # reset cutout size\n\n # save on every 5th ID\n print('Autosaving ... ')\n if not target_index%5:\n update_table()\n\n verboseprint('target_index =', target_index)\n skips = 0\n\n if trickyon:\n while newtarget:\n if target_index != len(rTable) and rTable['mcvcm_tag'][target_index] != skipped_placeholder:\n ''' skip all sources with good (or no) ID '''\n verboseprint('Good (or no) ID for row', target_index,\n '(%s: %s)' % (rTable[rID_column][target_index], rTable['mcvcm_tag'][target_index]))\n skips+=1\n target_index+=1\n else:\n newtarget=False\n verboseprint('Skipped %i sources with good (or no) IDs' %(skips), 'tindx',target_index)\n else:\n while newtarget:\n if target_index != len(rTable) and rTable['mcvcm_tag'][target_index] != tag_placeholder:\n ''' skip identifying sources already tagged '''\n verboseprint('Already ID\\'d row', target_index,\n '(%s: %s)' % (rTable[rID_column][target_index], rTable['mcvcm_tag'][target_index]))\n skips+=1\n target_index+=1\n else:\n newtarget=False\n verboseprint('Skipped %i sources already tagged' %(skips))\n\n print('New target: row', target_index)\n\n\n# ------------------------------------------ #\n@verbwrap\ndef check_save():\n '''\n checks if (in a previous xid run) a file was saved,\n and if so adds those IDs to current list so they\n don't have to be repeated.\n\n Assumes previous xid run used the same save_path\n\n Work is always saved to the core (un-numbered) file,\n this is backed up to a numbered file each time this\n script is run.\n '''\n global rTable\n count = 0\n\n if file_accessible(save_path):\n version_control(save_path)\n saved_table = ascii.read(save_path)\n for row in saved_table:\n count +=1\n source, xid, flag, comment = row[rID_column], row['mcvcm_tag'], row['mcvcm_flag'], row['mcvcm_comment']\n\n # attach this xid to appropriate source in new table\n newRow = np.where(rTable[rID_column] == source)[0][0]\n rTable[newRow]['mcvcm_tag'] = xid\n rTable[newRow]['mcvcm_flag'] = flag\n rTable[newRow]['mcvcm_comment'] = comment\n\n verboseprint('Already ID\\'d %s as %s' %(source,xid))\n verboseprint('adding to row %i of current table' %(newRow))\n\n verboseprint('\\nrecovered %i IDs from previous session' %count)\n\n\n# ------------------------------------------ #\n@verbwrap\ndef save_fig(rID, manual = False):\n '''\n Saves displayed figure to format specified\n by command line argument\n '''\n global sources, phase_title\n if manual or fig_extention != None:\n print('Saving figure ...')\n # clean up figure\n # sources.remove()\n phase_title = rID\n ax.set_title('')\n\n # Name and save figure\n if manual:\n filename = rID+'.pdf'\n else:\n extention = '.'+fig_extention\n name = rTable[rID_column][target_index]\n filename = name+extention\n\n save = os.path.join(fig_path, filename)\n fig.savefig(save, bbox_inches='tight', dpi = 300)\n verboseprint('Saved' , filename)\n if manual: # restart ID as sources are now removed\n cleanup()\n\n\n# ------------------------------------------ #\n@verbwrap\ndef cleanup():\n '''\n cleans before restart,\n or next source\n '''\n global fig, ax, axtrans, sources\n global keyID, clickID, tkC\n fig.canvas.mpl_disconnect(keyID)\n fig.canvas.mpl_disconnect(clickID)\n plt.close(fig)\n del(fig)\n del(ax)\n del(sources)\n del(keyID)\n del(clickID)\n try:\n del(tkC)\n except NameError:\n pass\n\n\n# ------------------------------------------ #\n@verbwrap\ndef start():\n '''\n sets default variables,\n starts identification process,\n can be used to restart id of current source,\n '''\n global compCount\n global phase\n global fig, ax, axtrans, sources, phase_title, wcsmap\n global clicks\n global keyID, clickID\n global iData, rData\n global ipix_current, rpix_current\n global quitting\n global ident, certainty\n global icross\n\n ident = Identity()\n # Some default values\n phase_title = 'Infrared host ID' # plot titles\n phase = 1 # Identification phase\n certainty = 1 # Identification certainty default\n\n # Say farewell, save table, and exit!\n if target_index == len(rTable):\n print('!!!!!!!!!!!!!!!!!!!!!!!')\n print('!! Nice, you\\'re done !!')\n print('!!!!!!!!!!!!!!!!!!!!!!!')\n print(' _\\n', ' /(|\\n', ' ( :', )\n print(' __\\ \\ _____\\n', ' (____) `|\\n', ' (____)| |')\n print(' (____).__ |\\n', ' (___)__.|_____\\n')\n update_table()\n quitting = True\n return None\n\n # Get coordinates of radio target\n tRA, tDEC = rTable[rRA_column][target_index], rTable[rDEC_column][target_index]\n verboseprint('target RA,Dec = ', tRA, tDEC)\n target = SkyCoord(tRA, tDEC, frame='fk5', unit='deg')\n\n # Grab figure, axis object, and axis transform from cutoutslink.py\n fig, ax, axtrans, wcsmap = cutout.cutouts2(mosaic, radioSB, radioRMS, tRA, tDEC,\n isize=ipix_current, rsize=rpix_current, verbose=verbose)\n ax.set_title(phase_title)\n fig.canvas.draw_idle()\n\n # select catalogue sources from a region around target to mininimise plotting time\n iData = iTable[iCoords.separation(target) < 240 * u.arcsec]\n rData = rTable[rCoords.separation(target) < 240 * u.arcsec]\n\n # plot sources and assign for deletion later (comma is essential to deletion!)\n sources, = ax.plot(iData[iRA_column], iData[iDEC_column], picker=6, transform=axtrans, linestyle='none',\n **parameter_config['markers']['phase1'])\n\n # Start canvas listeners\n keyID = fig.canvas.mpl_connect('key_press_event', on_key)\n clickID = fig.canvas.mpl_connect('pick_event', onpick)\n\n plt.subplots_adjust(left=0.05, right=0.9, top=0.9, bottom=0.1)\n plt.get_current_fig_manager().window.wm_geometry(f\"+{figure_pos_horizontal}+{figure_pos_vertical}\")\n plt.show()\n\n\n# ------------------------------------------ #\n\n# ------------------------------------------ #\t\n# Initial setup\n# ------------------------------------------ #\t\n\nfrom astropy.io import ascii, fits\nfrom astropy.table import Column\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nimport cutout as cutout\nimport os\nimport json\n\nthisdir = os.path.dirname(os.path.abspath(__file__))\n\n# create folders for saving output, if they already exist returns path\nif doing_demo:\n table_path = make_folder(os.path.join(thisdir, 'demo_output', 'tables'))\n fig_path = make_folder(os.path.join(thisdir, 'demo_output', 'figures'))\nelse:\n table_path = make_folder(os.path.join(thisdir, 'output', 'tables'))\n fig_path = make_folder(os.path.join(thisdir, 'output', 'figures'))\n# ------------------------------------------ #\t\n# Read in required file paths from config file\n\nwith open(os.path.join(thisdir, 'path_config.json'), 'r') as config:\n path_config = json.load(config)\n\n# field is specified in launch arguments\nradioSB = os.path.join(thisdir, path_config[field][\"radio_continuum\"])\nradioRMS = os.path.join(thisdir, path_config[field][\"radio_rms\"])\nmosaic = os.path.join(thisdir, path_config[field][\"infrared_mosaic\"])\nradio_catalogue = os.path.join(thisdir, path_config[field][\"radio_catalog\"])\ninfrared_catalogue = os.path.join(thisdir, path_config[field][\"infrared_catalog\"])\noutput_name = f'{field}_mcvcm_table.dat'\n\n# output path for saved files\nif doing_demo:\n save_path = os.path.join(thisdir, table_path, f'demo-{output_name}')\nelse:\n save_path = os.path.join(thisdir, table_path, output_name)\n\nwith open(os.path.join(thisdir, 'parameter_config.json'), 'r') as config:\n parameter_config = json.load(config)\n\nfigure_pos_horizontal = parameter_config[\"figure_position\"][\"horizontal\"]\nfigure_pos_vertical = parameter_config[\"figure_position\"][\"vertical\"]\n\nipix_default = parameter_config['cutout_pixels'][\"infrared\"]\nrpix_default = parameter_config['cutout_pixels'][\"radio\"]\nipix_current, rpix_current = ipix_default, rpix_default # sets the size (in pixels) of the slice\nstart_index = parameter_config[\"start_index\"] # Change for manual inspection of catalogue sources\ntarget_index = start_index # iterable used for rTable\n\n# ------------------------------------------ #\t\n# column names in catalogues\nrRA_column = parameter_config['column_names']['radio_ra']\nrDEC_column = parameter_config['column_names']['radio_dec']\nrID_column = parameter_config['column_names']['radio_id']\niRA_column = parameter_config['column_names']['infrared_ra']\niDEC_column = parameter_config['column_names']['infrared_dec']\niID_column = parameter_config['column_names']['infrared_id']\n\n# ------------------------------------------ #\t\n# set up catalogues, and add XID column\ntag_placeholder = '-' * 53 # placeholder needs to be same length as MAX final ^XID_tag, otherwise XID_tag is truncated\ncomment_placeholder = '-' * 53\nskipped_placeholder = '---crossmatch_skipped-redo_by_running_with_-x_flag---'\n\nprint(f'\\nReading radio table: {radio_catalogue}')\nrTable = ascii.read(radio_catalogue)\nrTable.add_column(Column([tag_placeholder, ] * len(rTable), name='mcvcm_tag'))\nrTable.add_column(Column([comment_placeholder, ] * len(rTable), name='mcvcm_comment'))\nrTable.add_column(Column([0, ] * len(rTable), name='mcvcm_flag'))\n\nprint(f'\\nReading infrared table: {infrared_catalogue}')\niTable = fits.open(infrared_catalogue)[1].data\n\n# ------------------------------------------ #\n# generate coordinate lookup array\niCoords = SkyCoord(np.array(iTable[iRA_column]), np.array(iTable[iDEC_column]), frame='fk5', unit='deg')\nrCoords = SkyCoord(np.array(rTable[rRA_column]), np.array(rTable[rDEC_column]), frame='fk5', unit='deg')\n\n\n# execute\nif __name__ == '__main__':\n check_save()\n\n quitting = False\n newtarget = rTable['mcvcm_tag'][\n start_index] != tag_placeholder # new target starts true only if already ID first source\n\n while not quitting:\n get_target()\n start()\n\n print('Quitting')"
] |
[
[
"matplotlib.use",
"numpy.array",
"matplotlib.pyplot.get_current_fig_manager",
"matplotlib.pyplot.close",
"numpy.where",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust"
]
] |
jm1261/GMRAnalysis
|
[
"de92803883b7296ed24ffecbcd6e1d967167190a"
] |
[
"GMR/DataPreparation.py"
] |
[
"import os\nimport numpy as np\nimport GMR.InputOutput as io\n\n\ndef pwr_norm(image_data, file_name, norm_power, dir_name):\n '''\n Plot a raw image and normalised image - use power meter data to\n normalise images. Normalised image's brightness estimated from\n pixels mean/std. Optionally save corrected image out. Read data\n values in using Pandas for speed.\n Args:\n image_data: <array> csv image data as 2D array\n file_name: <string> file name without extension\n norm_power: <array> normalised power array\n dir_name: <string> directory path to concatenate corrected\n image png directory to\n Returns:\n norm_img: <array> normalised image as numpy array int16\n '''\n file, img_no = file_name.split('_')\n\n norm_img = (image_data / norm_power[int(img_no)])\n norm_img *= 1e3 # increase precision for saving as int\n\n norm_img = (norm_img).astype('int32')\n return norm_img\n\n\ndef bg_norm(image,\n file_name,\n ROI,\n dir_name,\n plot_show=False,\n plot_save=False):\n '''\n Plot a raw image and normalised image - use the ROI to normalise the\n rest of the image. Normalised image's brightness estimated from\n pixels mean/std. Optionally save corrected image out. Read data\n values in using Pandas for speed.\n Args:\n image: <string> path to csv img file\n file_name: <string> file name without extension\n ROI: <tuple> x, y coordinates of ROI to use for BG correction\n dir_name: <string> directory path to concatenate corrected\n image png directory to\n plot_show: <bool> if true raw and corrected image show\n plot_save: <bool> if true raw and corrected image saved\n Returns:\n norm_img: <array> normalised image as numpy array int16\n '''\n pass\n\n\ndef trim_spec():\n '''\n Calculate which files correspond to which wavelengths are to be\n removed. Moves to new directory (_NOT_PROCESSED) corresonding numpy\n arrays (or raw csvs).\n '''\n pass\n\n\ndef trim_time():\n '''\n Calculate which directories correspond to the times which are not\n required. Moves to new directory (_NOT_PROCESSED) unwanted files.\n '''\n pass\n\n\ndef roi():\n '''\n Processes numpy arrays to slice out only wanted ROI, then re-saves\n data for further processing.\n '''\n pass\n\n\ndef normalise_file_size(number_files,\n image_save=False):\n '''\n Calculates the total file size output for normalising images and\n processing a data cube.\n Args:\n number_files: <int> number of files to process\n image_save: <bool> if images are saved set True\n '''\n file_size = (0.00775 * number_files) - 0.00578\n\n if image_save:\n file_size = (0.00794 * number_files) - 0.00578\n\n file_size = round(file_size, 2)\n return file_size\n\n\ndef normalise_process_time(number_files,\n image_save=False):\n '''\n Calculates the total processing time for normalising images\n and processing a data cube.\n Args:\n number_files: <int> number of files to process\n image_save: <bool> if images are saved set True\n '''\n process_time = -0.3+(2.5*np.exp((number_files-173.2)/324.3))\n\n if image_save:\n process_time = -3.7+(2.9*np.exp((number_files+290.5)/421.9))\n\n process_time = round(process_time, 1)\n return process_time\n\n\ndef processing_parameters(main_dir,\n exp_settings,\n image_save=False):\n '''\n Calculate the total processing time based on the number of files\n present in each hs_img. Give the option to output a time for\n both image output and data output.\n '''\n number_files = 0\n for hs_img in exp_settings['hs_imgs']:\n img_dir = os.path.join(main_dir, hs_img)\n if not os.path.isdir(img_dir):\n continue\n\n data_files = io.extract_files(dir_name=img_dir,\n file_string='img_')\n number_files += len(data_files)\n\n process_time = normalise_process_time(number_files,\n image_save)\n file_size = normalise_file_size(number_files,\n image_save)\n\n print(f'\\nTotal number of files: {number_files}')\n print(f'Save normalised images set to: {image_save}')\n print(f'Total file size: ~{file_size} GB')\n print(f'Total processing time: ~{process_time} mins')\n\n\ndef find_img_size(dir_name, file_string):\n '''\n Find size of an individual image from the hyperspectral imaging file,\n used to determine the pixel size of the camera. Find the height and\n width of each image and outputs the number of rows and colums as\n variables.\n Args:\n dir_name: <string> directory containing images\n file_string: <string> image names within direcotry (eg. img_)\n Returns:\n np.shape: <height and width>\n '''\n data_files = io.extract_files(dir_name=dir_name,\n file_string=file_string)\n zero_data = data_files[0]\n zero = os.path.join(dir_name, zero_data)\n zero_file = np.load(zero)\n\n return np.shape(zero_file)\n\n\ndef reshape_to_spec_lists(hs_data_cube, img_width=1920, img_height=1080):\n '''\n Reshapes a numpy hyperspectral data cube with axes (lambda, x, y)\n into an array with axes (pixels, lambda) so the spectrum corresponding\n to each pixel can be iterated.\n Args:\n hs_data_cube: 3D numpy array\n img_width: int, width of image in pixels. Optional\n img_height: int, height of image in pixels. Optional\n Returns:\n spec_list: 2D numpy array\n '''\n num_wavs, img_width, img_height = hs_data_cube.shape\n num_pixels = img_width*img_height\n\n spec_list = np.reshape(hs_data_cube, (num_wavs, num_pixels))\n spec_list = np.transpose(spec_list)\n return spec_list\n\n\ndef reshape_to_img(spec_list, img_width=1920, img_height=1080):\n '''\n Args:\n spec_list: 1D numpy array\n img_width: int, width of image in pixels. Optional\n img_height: int, height of image in pixels. Optional\n Returns:\n img_array: 2D numpy array\n '''\n img_array = np.reshape(spec_list, (img_width, img_height))\n img_array = np.transpose(img_array)\n return img_array\n\n\ndef wav_space(exp_settings):\n '''\n Generate the wavelength space from experimental settings dictionary\n Args:\n exp_settings: <dictionary> experimental settings function\n Returns:\n wavs: <array> wavelengths\n '''\n wavs = np.linspace(start=exp_settings['wav_i'],\n stop=exp_settings['wav_f'],\n num=((exp_settings['wav_f'] - exp_settings['wav_i'])\n / exp_settings['wav_s'])\n + 1)\n return wavs\n"
] |
[
[
"numpy.reshape",
"numpy.load",
"numpy.exp",
"numpy.shape",
"numpy.transpose",
"numpy.linspace"
]
] |
philhchen/sparse-marginalization-lvm
|
[
"0fd219868389ad56fdf981f052c914cd7fbde27b"
] |
[
"lvmhelpers/sparsesoftmax.py"
] |
[
"import torch\nfrom torch.nn import Module, NLLLoss, Softmax\nimport torch.nn.functional as F\nfrom torch.autograd import Function\n\nEPS = 1e6\n\ndef sparse_softmax(X, dim: int = -1, train=True):\n mask = X < torch.mean(X, dim=dim, keepdim=True)\n mask_offset = mask * (EPS if train else float(\"Inf\"))\n probs = F.softmax(X - mask_offset, dim=dim)\n return probs\n\n\nclass LogSparseSoftmax(Module):\n def __init__(self, dim: int = -1):\n super(LogSparseSoftmax, self).__init__()\n self.dim = dim\n \n def forward(self, X):\n mask = X < torch.mean(X, dim=self.dim, keepdim=True)\n log_probs = F.log_softmax(X - EPS * mask, dim=self.dim)\n return log_probs\n\n\nclass SparseSoftmaxLoss(Module):\n def __init__(self, reduction: str = 'none', dim: int = -1):\n super(SparseSoftmaxLoss, self).__init__()\n self.log_sparse_softmax = LogSparseSoftmax(dim)\n self.reduction = reduction\n self.dim = dim\n \n def forward(self, input: torch.Tensor, target: torch.Tensor):\n return F.nll_loss(self.log_sparse_softmax(input), target, reduction=self.reduction)\n"
] |
[
[
"torch.mean",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax"
]
] |
vb690/machine_learning_exercises
|
[
"9c5473652b576a3b0f0bd1df81e24166ca01c2b7"
] |
[
"shops/machine_learning_exercises/modules/basic_supervised.py"
] |
[
"import math\n\nimport itertools\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom .toolbox.abstract_models import _Distances\n\n\nclass Knn(_Distances):\n def __init__(self, k=6, distance='euclidian'):\n self.X_train = None\n self.y_train = None\n self.y_predicted = None\n self.k = k\n self.distance = distance\n self.distances_matrix = None\n self.nearest_neighbours = None\n self.votes = None\n\n def _compute_distance_matrix(self, input_X):\n self.distances_matrix = getattr(self, self.distance)(\n X_train=self.X_train,\n input_X=input_X\n )\n self.nearest_neighbours = self.distances_matrix.argsort()\n\n def _vote(self):\n self.votes = {label: 0 for label in list(set(self.y_train))}\n for neighbour in range(self.k):\n\n vote_from_label = self.y_train[self.nearest_neighbours[neighbour]]\n self.votes[vote_from_label] += 1\n\n return max(self.votes, key=self.votes.get)\n\n def fit(self, X_train, y_train):\n self.X_train = X_train\n self.y_train = y_train\n\n def predict(self, X_test):\n self.y_predicted = []\n for input_X in X_test:\n\n self._compute_distance_matrix(input_X)\n prediction = self._vote()\n self.y_predicted.append(prediction)\n\n return self.y_predicted\n\n\nclass LogReg:\n def __init__(self, alpha=0.001):\n self.X_train = None\n self.y_train = None\n self.y_predicted = None\n self.alpha = alpha\n self.weights = None\n self.error = None\n\n @staticmethod\n def listify_matrix(matrix):\n matrix = np.array(matrix).tolist()\n return list(itertools.chain.from_iterable(matrix))\n\n @staticmethod\n def sigmoid(w_X):\n return 1.0 / (1 + np.exp(-w_X))\n\n def _set_X_y(self, X, y):\n X = np.insert(X, 0, 1, axis=1)\n self.X_train = np.mat(X)\n self.y_train = np.mat(y).T\n self.weights = np.random.random((X.shape[1], 1))\n\n def _compute_error(self, y_predicted):\n self.error = self.y_train - y_predicted\n\n def _update_weights(self):\n update = self.alpha * (self.X_train.T * self.error)\n self.weights += update\n\n def fit(self, X_train, y_train, epochs=500):\n self._set_X_y(X_train, y_train)\n for epoch in range(epochs):\n\n prediction = self.sigmoid(self.X_train * self.weights)\n self._compute_error(prediction)\n self._update_weights()\n\n def predict(self, X_test, proba=False):\n X_test = np.insert(X_test, 0, 1, axis=1)\n self.y_predicted = self.sigmoid(np.mat(X_test) * self.weights)\n if proba:\n return self.y_predicted\n else:\n return np.around(self.y_predicted)\n\n def visualize_decision_function(self, X_test):\n X_test = np.insert(X_test, 0, 1, axis=1)\n w_X = np.mat(X_test) * self.weights\n sigmoid_w_X = self.sigmoid(w_X)\n plt.figure(figsize=(10, 10))\n plt.scatter(\n self.listify_matrix(w_X),\n self.listify_matrix(sigmoid_w_X),\n s=10\n )\n plt.xlabel('Weighted X')\n plt.ylabel('Sigmoid (Weighted X)')\n plt.title('Decision Function')\n plt.grid(True)\n plt.show()\n\n def visualize_decision_boundary(self, X_test, y_test):\n X_test = np.insert(X_test, 0, 1, axis=1)\n weights = np.asarray(self.weights)\n X_1 = X_test[:, 1]\n X_2 = X_test[:, 2]\n X_line = np.arange(min(X_1), max(X_1), 0.1)\n y_line = (-weights[0]-weights[1] * X_line)/weights[2]\n\n plt.figure(figsize=(10, 10))\n plt.scatter(\n X_1,\n X_2,\n s=10,\n c=y_test\n )\n plt.plot(X_line, y_line, color='r', linestyle='--', linewidth=1)\n plt.xlabel('X 1')\n plt.ylabel('X 2')\n plt.title('Decision Boundary')\n plt.grid(True)\n plt.show()\n\n\nclass BernulliNaiveBayes:\n def __init__(self):\n self.y_predicted = []\n\n @staticmethod\n def group_by(values_array, grouping_array):\n grouped = {group: [] for group in np.unique(grouping_array)}\n for values, group in zip(values_array, grouping_array):\n\n grouped[group].append(values)\n\n grouped = {\n group: np.array(values) for group, values in grouped.items()\n }\n return grouped\n\n @staticmethod\n def compute_bernulli_pdf(x, probability):\n if x == 1:\n return probability\n else:\n return 1-probability\n\n def _compute_labels_probabilities(self):\n labels, occurences = np.unique(self.y_train, return_counts=True)\n return {\n label: occurence/len(self.y_train) for label, occurence in\n zip(labels, occurences)\n }\n\n def _compute_features_probabilities(self, grouped):\n return {\n label: np.sum(values, axis=0) / len(values) for label, values in\n grouped.items()\n }\n\n def _compute_likelihoods(self, input_X):\n labels_likelihoods = {}\n for label, probabilities in self.features_probabilities.items():\n\n likelihood = np.prod(\n [self.compute_bernulli_pdf(x, prob) for x, prob in\n zip(input_X, probabilities)\n ]\n )\n labels_likelihoods[label] = likelihood\n\n return labels_likelihoods\n\n def _compute_joint_probabilities(self, input_X):\n labels_joint_probabilities = {}\n labels_likelihoods = self._compute_likelihoods(input_X)\n for label, probability in self.labels_probabilities.items():\n\n labels_joint_probabilities[label] = \\\n probability*labels_likelihoods[label]\n\n return labels_joint_probabilities\n\n def fit(self, X_train, y_train, labels_probabilities=None):\n setattr(self, 'X_train', X_train)\n setattr(self, 'y_train', y_train)\n if labels_probabilities is not None:\n setattr(self, 'labels_probabilities', labels_probabilities)\n else:\n setattr(\n self,\n 'labels_probabilities',\n self._compute_labels_probabilities()\n )\n grouped = self.group_by(self.X_train, self.y_train)\n setattr(\n self,\n 'features_probabilities',\n self._compute_features_probabilities(grouped)\n )\n\n def predict(self, X_test):\n for input_X in X_test:\n\n labels_joint_probabilities = self._compute_joint_probabilities(\n input_X\n )\n marginal_probability = sum(labels_joint_probabilities.values())\n prediction = {}\n for label, joint_probability in labels_joint_probabilities.items():\n\n prediction[label] = joint_probability/marginal_probability\n\n self.y_predicted.append(max(prediction, key=prediction.get))\n\n return self.y_predicted\n\n\nclass GaussianNaiveBayes:\n def __init__(self):\n self.y_predicted = []\n\n @staticmethod\n def group_by(values_array, grouping_array):\n grouped = {group: [] for group in np.unique(grouping_array)}\n for values, group in zip(values_array, grouping_array):\n\n grouped[group].append(values)\n\n grouped = {\n group: np.array(values) for group, values in grouped.items()\n }\n return grouped\n\n @staticmethod\n def compute_gaussian_pdf(x, mean, std):\n variance = std**2\n num = math.exp(-(x - mean)**2 / (2 * variance))\n denom = math.sqrt(2*math.pi*variance)\n return num / denom\n\n def _compute_statistics(self, grouped):\n labels_stats = {}\n for label, values in grouped.items():\n\n labels_stats[label] = {\n 'means': values.mean(axis=0),\n 'stds': values.std(axis=0)\n }\n\n return labels_stats\n\n def _compute_probabilities(self):\n labels, occurences = np.unique(self.y_train, return_counts=True)\n return {\n label: occurence/len(self.y_train) for label, occurence in\n zip(labels, occurences)\n }\n\n def _compute_likelihoods(self, input_X):\n labels_likelihoods = {}\n for label, stats in self.labels_stats.items():\n\n means = stats['means']\n stds = stats['stds']\n likelihood = np.prod(\n [self.compute_gaussian_pdf(x, mean, std) for x, mean, std in\n zip(input_X, means, stds)\n ]\n )\n labels_likelihoods[label] = likelihood\n\n return labels_likelihoods\n\n def _compute_joint_probabilities(self, input_X):\n labels_joint_probabilities = {}\n labels_likelihoods = self._compute_likelihoods(input_X)\n for label, probability in self.labels_probabilities.items():\n\n labels_joint_probabilities[label] = \\\n probability*labels_likelihoods[label]\n\n return labels_joint_probabilities\n\n def fit(self, X_train, y_train, labels_probabilities=None):\n setattr(self, 'X_train', X_train)\n setattr(self, 'y_train', y_train)\n if labels_probabilities is not None:\n setattr(self, 'labels_probabilities', labels_probabilities)\n else:\n setattr(\n self,\n 'labels_probabilities',\n self._compute_probabilities()\n )\n grouped = self.group_by(self.X_train, self.y_train)\n setattr(self, 'labels_stats', self._compute_statistics(grouped))\n\n def predict(self, X_test):\n for input_X in X_test:\n\n labels_joint_probabilities = self._compute_joint_probabilities(\n input_X\n )\n marginal_probability = sum(labels_joint_probabilities.values())\n prediction = {}\n for label, joint_probability in labels_joint_probabilities.items():\n\n prediction[label] = joint_probability/marginal_probability\n\n self.y_predicted.append(max(prediction, key=prediction.get))\n\n return self.y_predicted\n"
] |
[
[
"numpy.array",
"numpy.asarray",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"numpy.around",
"numpy.insert",
"numpy.unique",
"numpy.mat"
]
] |
apllolulu/SRN-Learning-to-see-in-the-dark
|
[
"94512bdcf2123ca20c1620998e11e5509472a45d"
] |
[
"train_Sony.py"
] |
[
"# uniform content loss + adaptive threshold + per_class_input + recursive G\n# improvement upon cqf37\nfrom __future__ import division\nimport os, time, scipy.io\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport rawpy\nimport glob\nfrom random import shuffle\n\ninput_dir = './dataset/Sony/short/'\ngt_dir = './dataset/Sony/long/'\ncheckpoint_dir = './result_Sony/'\nresult_dir = './result_Sony/'\n\n# get train IDs\ntrain_fns = glob.glob(gt_dir + '0*.ARW')\ntrain_ids = [int(os.path.basename(train_fn)[0:5]) for train_fn in train_fns]\n\nps = 512 # patch size for training\nsave_freq = 500\n\nDEBUG = 0\nif DEBUG == 1:\n save_freq = 2\n train_ids = train_ids[0:5]\n\n\ndef lrelu(x):\n return tf.maximum(x * 0.2, x)\n\n\ndef upsample_and_concat(x1, x2, output_channels, in_channels):\n pool_size = 2\n deconv_filter = tf.Variable(tf.truncated_normal([pool_size, pool_size, output_channels, in_channels], stddev=0.02))\n deconv = tf.nn.conv2d_transpose(x1, deconv_filter, tf.shape(x2), strides=[1, pool_size, pool_size, 1])\n\n deconv_output = tf.concat([deconv, x2], 3)\n deconv_output.set_shape([None, None, None, output_channels * 2])\n\n return deconv_output\n\n\ndef network(input):\n conv1 = slim.conv2d(input, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_1')\n conv1 = slim.conv2d(conv1, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_2')\n pool1 = slim.max_pool2d(conv1, [2, 2], padding='SAME')\n\n conv2 = slim.conv2d(pool1, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_1')\n conv2 = slim.conv2d(conv2, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_2')\n pool2 = slim.max_pool2d(conv2, [2, 2], padding='SAME')\n\n conv3 = slim.conv2d(pool2, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_1')\n conv3 = slim.conv2d(conv3, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_2')\n pool3 = slim.max_pool2d(conv3, [2, 2], padding='SAME')\n\n conv4 = slim.conv2d(pool3, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_1')\n conv4 = slim.conv2d(conv4, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_2')\n pool4 = slim.max_pool2d(conv4, [2, 2], padding='SAME')\n\n conv5 = slim.conv2d(pool4, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_1')\n conv5 = slim.conv2d(conv5, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_2')\n\n up6 = upsample_and_concat(conv5, conv4, 256, 512)\n conv6 = slim.conv2d(up6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_1')\n conv6 = slim.conv2d(conv6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_2')\n\n up7 = upsample_and_concat(conv6, conv3, 128, 256)\n conv7 = slim.conv2d(up7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_1')\n conv7 = slim.conv2d(conv7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_2')\n\n up8 = upsample_and_concat(conv7, conv2, 64, 128)\n conv8 = slim.conv2d(up8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_1')\n conv8 = slim.conv2d(conv8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_2')\n\n up9 = upsample_and_concat(conv8, conv1, 32, 64)\n conv9 = slim.conv2d(up9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_1')\n conv9 = slim.conv2d(conv9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_2')\n\n conv10 = slim.conv2d(conv9, 12, [1, 1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10, 2)\n return out\n\n\ndef pack_raw(raw):\n # pack Bayer image to 4 channels\n im = raw.raw_image_visible.astype(np.float32)\n im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level\n\n im = np.expand_dims(im, axis=2)\n img_shape = im.shape\n H = img_shape[0]\n W = img_shape[1]\n\n out = np.concatenate((im[0:H:2, 0:W:2, :],\n im[0:H:2, 1:W:2, :],\n im[1:H:2, 1:W:2, :],\n im[1:H:2, 0:W:2, :]), axis=2)\n return out\n\n\nsess = tf.Session()\nin_image = tf.placeholder(tf.float32, [None, None, None, 4])\ngt_image = tf.placeholder(tf.float32, [None, None, None, 3])\nout_image = network(in_image)\n\nG_loss = tf.reduce_mean(tf.abs(out_image - gt_image))\n\nt_vars = tf.trainable_variables()\nlr = tf.placeholder(tf.float32)\nG_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss)\n\nsaver = tf.train.Saver()\nsess.run(tf.global_variables_initializer())\nckpt = tf.train.get_checkpoint_state(checkpoint_dir)\nif ckpt:\n print('loaded ' + ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n# Raw data takes long time to load. Keep them in memory after loaded.\ngt_images = [None] * 6000\ninput_images = {}\ninput_images['300'] = [None] * len(train_ids)\ninput_images['250'] = [None] * len(train_ids)\ninput_images['100'] = [None] * len(train_ids)\n\ng_loss = np.zeros((5000, 1))\n\nallfolders = glob.glob('./result/*0')\nlastepoch = 0\nfor folder in allfolders:\n lastepoch = np.maximum(lastepoch, int(folder[-4:]))\n\nlearning_rate = 1e-4\nfor epoch in range(lastepoch, 4001):\n if os.path.isdir(\"result/%04d\" % epoch):\n continue\n cnt = 0\n if epoch > 2000:\n learning_rate = 1e-5\n\n for ind in np.random.permutation(len(train_ids)):\n # get the path from image id\n train_id = train_ids[ind]\n in_files = glob.glob(input_dir + '%05d_00*.ARW' % train_id)\n in_path = in_files[np.random.random_integers(0, len(in_files) - 1)]\n in_fn = os.path.basename(in_path)\n\n gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % train_id)\n gt_path = gt_files[0]\n gt_fn = os.path.basename(gt_path)\n in_exposure = float(in_fn[9:-5])\n gt_exposure = float(gt_fn[9:-5])\n ratio = min(gt_exposure / in_exposure, 300)\n\n st = time.time()\n cnt += 1\n\n if input_images[str(ratio)[0:3]][ind] is None:\n raw = rawpy.imread(in_path)\n input_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw), axis=0) * ratio\n\n gt_raw = rawpy.imread(gt_path)\n im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)\n gt_images[ind] = np.expand_dims(np.float32(im / 65535.0), axis=0)\n\n # crop\n H = input_images[str(ratio)[0:3]][ind].shape[1]\n W = input_images[str(ratio)[0:3]][ind].shape[2]\n\n xx = np.random.randint(0, W - ps)\n yy = np.random.randint(0, H - ps)\n input_patch = input_images[str(ratio)[0:3]][ind][:, yy:yy + ps, xx:xx + ps, :]\n gt_patch = gt_images[ind][:, yy * 2:yy * 2 + ps * 2, xx * 2:xx * 2 + ps * 2, :]\n\n if np.random.randint(2, size=1)[0] == 1: # random flip\n input_patch = np.flip(input_patch, axis=1)\n gt_patch = np.flip(gt_patch, axis=1)\n if np.random.randint(2, size=1)[0] == 1:\n input_patch = np.flip(input_patch, axis=2)\n gt_patch = np.flip(gt_patch, axis=2)\n if np.random.randint(2, size=1)[0] == 1: # random transpose\n input_patch = np.transpose(input_patch, (0, 2, 1, 3))\n gt_patch = np.transpose(gt_patch, (0, 2, 1, 3))\n\n input_patch = np.minimum(input_patch, 1.0)\n\n\n\n\n _, G_current, output = sess.run([G_opt, G_loss, out_image],\n feed_dict={in_image: input_patch, gt_image: gt_patch, lr: learning_rate})\n output = np.minimum(np.maximum(output, 0), 1)\n g_loss[ind] = G_current\n\n print(\"%d %d Loss=%.3f Time=%.3f\" % (epoch, cnt, np.mean(g_loss[np.where(g_loss)]), time.time() - st))\n\n if epoch % save_freq == 0:\n if not os.path.isdir(result_dir + '%04d' % epoch):\n os.makedirs(result_dir + '%04d' % epoch)\n\n temp = np.concatenate((gt_patch[0, :, :, :], output[0, :, :, :]), axis=1)\n scipy.misc.toimage(temp * 255, high=255, low=0, cmin=0, cmax=255).save(\n result_dir + '%04d/%05d_00_train_%d.jpg' % (epoch, train_id, ratio))\n\n saver.save(sess, checkpoint_dir + 'model.ckpt')\n"
] |
[
[
"tensorflow.contrib.slim.max_pool2d",
"numpy.minimum",
"tensorflow.train.get_checkpoint_state",
"numpy.where",
"tensorflow.depth_to_space",
"tensorflow.global_variables_initializer",
"numpy.concatenate",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.train.Saver",
"numpy.random.randint",
"numpy.transpose",
"numpy.expand_dims",
"tensorflow.abs",
"tensorflow.train.AdamOptimizer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.truncated_normal",
"numpy.float32",
"tensorflow.placeholder",
"tensorflow.contrib.slim.conv2d",
"tensorflow.maximum",
"numpy.flip",
"numpy.maximum"
]
] |
Nightfurex/MSS
|
[
"51a1bc0d4ce759288b5f3a0a46e538aa0c1a8788"
] |
[
"mslib/utils/_tests/test_thermolib.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n\n mslib._test.test_thermoblib\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Tests for the thermolib module.\n\n This file is part of mss.\n\n :copyright: Copyright 2017 Marc Rautenhaus\n :copyright: Copyright 2016-2022 by the mss team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom mslib.utils.units import units\nfrom mslib.utils import thermolib\n\n\ndef test_flightlevel2pressure2flightlevel():\n fs = (np.arange(1, 71000, 1000.) * units.m).to(units.hft)\n ps = thermolib.flightlevel2pressure(fs)\n fs_p = thermolib.pressure2flightlevel(ps).magnitude\n assert fs.magnitude == pytest.approx(fs_p)\n\n\ndef test_pressure2flightlevel2pressure():\n ps = np.arange(5, 105000, 1.)[::-1] * units.Pa\n fs = thermolib.pressure2flightlevel(ps)\n ps_p = thermolib.flightlevel2pressure(fs).magnitude\n assert ps.magnitude == pytest.approx(ps_p)\n\n\ndef test_flightlevel2pressure():\n assert thermolib.flightlevel2pressure(182.8850 * units.hft).magnitude == pytest.approx(50000)\n assert thermolib.flightlevel2pressure(530.8279 * units.hft).magnitude == pytest.approx(10000)\n assert thermolib.flightlevel2pressure(782.4335 * units.hft).magnitude == pytest.approx(3000)\n assert thermolib.flightlevel2pressure(1151.9583 * units.hft).magnitude == pytest.approx(550)\n assert thermolib.flightlevel2pressure(1626.8966 * units.hft).magnitude == pytest.approx(80)\n assert thermolib.flightlevel2pressure(1804.2727 * units.hft).magnitude == pytest.approx(40)\n with pytest.raises(ValueError):\n thermolib.flightlevel2pressure(72000 * units.m)\n\n\ndef test_pressure2flightlevel():\n assert thermolib.pressure2flightlevel(100000 * units.Pa).magnitude == pytest.approx(3.6378724)\n assert thermolib.pressure2flightlevel(75000 * units.Pa).magnitude == pytest.approx(80.91139)\n assert thermolib.pressure2flightlevel(50000 * units.Pa).magnitude == pytest.approx(182.8850)\n assert thermolib.pressure2flightlevel(10000 * units.Pa).magnitude == pytest.approx(530.8279)\n assert thermolib.pressure2flightlevel(3000 * units.Pa).magnitude == pytest.approx(782.4335)\n assert thermolib.pressure2flightlevel(550 * units.Pa).magnitude == pytest.approx(1151.9583)\n assert thermolib.pressure2flightlevel(80 * units.Pa).magnitude == pytest.approx(1626.8966)\n assert thermolib.pressure2flightlevel(40 * units.Pa).magnitude == pytest.approx(1804.2727)\n with pytest.raises(ValueError):\n thermolib.pressure2flightlevel(3.9 * units.Pa)\n\n\ndef test_isa_temperature():\n assert thermolib.isa_temperature(100 * units.hft).magnitude == pytest.approx(268.338)\n assert thermolib.isa_temperature(200 * units.hft).magnitude == pytest.approx(248.526)\n assert thermolib.isa_temperature(300 * units.hft).magnitude == pytest.approx(228.714)\n assert thermolib.isa_temperature(400 * units.hft).magnitude == pytest.approx(216.650)\n assert thermolib.isa_temperature(500 * units.hft).magnitude == pytest.approx(216.650)\n assert thermolib.isa_temperature(600 * units.hft).magnitude == pytest.approx(216.650)\n assert thermolib.isa_temperature(700 * units.hft).magnitude == pytest.approx(217.986)\n assert thermolib.isa_temperature(800 * units.hft).magnitude == pytest.approx(221.034)\n assert thermolib.isa_temperature(1000 * units.hft).magnitude == pytest.approx(227.13)\n with pytest.raises(ValueError):\n thermolib.isa_temperature(71001 * units.m)\n assert thermolib.isa_temperature(11000 * units.m).magnitude == pytest.approx(216.65)\n assert thermolib.isa_temperature(20000 * units.m).magnitude == pytest.approx(216.65)\n assert thermolib.isa_temperature(32000 * units.m).magnitude == pytest.approx(228.65)\n assert thermolib.isa_temperature(47000 * units.m).magnitude == pytest.approx(270.65)\n assert thermolib.isa_temperature(51000 * units.m).magnitude == pytest.approx(270.65)\n\n\nclass TestConverter(object):\n def test_convert_pressure_to_vertical_axis_measure(self):\n assert thermolib.convert_pressure_to_vertical_axis_measure('pressure', 10000) == 100\n assert thermolib.convert_pressure_to_vertical_axis_measure('flightlevel', 400) == 400\n assert thermolib.convert_pressure_to_vertical_axis_measure('pressure altitude', 75000) == pytest.approx(2.46618)\n"
] |
[
[
"numpy.arange"
]
] |
sungsulim/RLControl
|
[
"1af29e446958dc8c99ab101ddf5df969888d1e2e"
] |
[
"agents/base_agent.py"
] |
[
"from utils.replaybuffer import ReplayBuffer\nimport numpy as np\n\n\n# Agent interface\n# Takes an environment (just so we can get some details from the environment like the number of observables and actions)\nclass BaseAgent(object):\n def __init__(self, config, network_manager):\n\n self.norm_type = config.norm_type\n\n # Env config\n self.state_dim = config.state_dim\n self.state_min = config.state_min\n self.state_max = config.state_max\n\n self.action_dim = config.action_dim\n self.action_min = config.action_min\n self.action_max = config.action_max\n\n self.replay_buffer = ReplayBuffer(config.buffer_size, config.random_seed)\n self.batch_size = config.batch_size\n self.warmup_steps = config.warmup_steps\n self.gamma = config.gamma\n\n # to log useful stuff within agent\n self.write_log = config.write_log\n self.write_plot = config.write_plot\n\n self.network_manager = network_manager\n self.writer = config.writer\n self.config = config\n\n def start(self, state, is_train):\n return self.take_action(state, is_train, is_start=True)\n\n def step(self, state, is_train):\n return self.take_action(state, is_train, is_start=False)\n\n def take_action(self, state, is_train, is_start):\n # Warmup step not really used\n if self.replay_buffer.get_size() < self.warmup_steps:\n\n # use random seed\n # action = (np.random.random_sample(size=self.action_dim) - 0.5) * 2 * self.action_max[0]\n raise NotImplementedError\n else:\n action = self.network_manager.take_action(state, is_train, is_start)\n return action\n\n def get_value(self, s, a):\n raise NotImplementedError\n \n def update(self, state, next_state, reward, action, is_terminal, is_truncated):\n if not is_truncated:\n if not is_terminal:\n self.replay_buffer.add(state, action, reward, next_state, self.gamma)\n else:\n self.replay_buffer.add(state, action, reward, next_state, 0.0)\n\n if self.norm_type != 'none':\n self.network_manager.input_norm.update(np.array([state]))\n self.learn()\n\n def learn(self):\n if self.replay_buffer.get_size() > max(self.warmup_steps, self.batch_size):\n state, action, reward, next_state, gamma = self.replay_buffer.sample_batch(self.batch_size)\n self.network_manager.update_network(state, action, next_state, reward, gamma)\n else:\n return\n\n # Resets the agent between episodes. Should primarily be used to clear traces or other temporally linked parameters\n def reset(self):\n self.network_manager.reset()\n"
] |
[
[
"numpy.array"
]
] |
sselgueta/pymoog
|
[
"8db465fd56debc1e25e5ce9e0afbb8053610a97c"
] |
[
"build/lib/pymoog/model.py"
] |
[
"#!/usr/bin/python\n# This is the python model dealing with the stellar atmosphere model.\nimport os\nimport subprocess\nimport numpy as np\nimport re\nimport pandas as pd\nfrom pymoog import line_data\nfrom scipy.spatial import Delaunay\n\nMOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(os.environ['HOME'])\nMOOG_run_path = '{}/.pymoog/rundir/'.format(os.environ['HOME'])\nMOOG_file_path = '{}/.pymoog/files/'.format(os.environ['HOME'])\n\nif os.environ.get('READTHEDOCS') != 'True':\n #directory_path = os.path.dirname(os.path.abspath(__file__))\n grid_kurucz = pd.read_csv(MOOG_file_path + '/grid_points_kurucz.csv')\n grid_matrix = np.array(grid_kurucz[['Teff', 'logg', 'm_h']])\n tri = Delaunay(grid_matrix)\n \ndef read_Kurucz_model(model_path):\n '''\n Read the Kurucz model and save it as np.array.\n \n Parameters\n ----------\n model_path : str\n The path of Kurucz model.\n \n Returns\n ----------\n abun : np.array\n The 'ABUNDANCE CHANGE' part of Kurucz model. The first column is element number and second column is the abundance change.\n model_lines : np.array\n The array of Kurucz model. \n pradk : float\n The pradk number in Kurucz model.\n '''\n \n model_file = open(model_path)\n # Convert the model files into MOOG format.\n\n # Read and save the first two lines (except 'TITLE ') into header.\n header = ['Kurucz model: ' + model_file.readline()]\n header = header + [model_file.readline()]\n\n # Read the abundance change as well as model lines.\n temp = model_file.readline() + model_file.readline()\n\n abun_list = ''\n temp = model_file.readline()\n abun_list = abun_list + temp[42:]\n temp = model_file.readline()\n while 'ABUNDANCE CHANGE' in temp:\n abun_list = abun_list + temp[temp.index('ABUNDANCE CHANGE')+16:]\n temp = model_file.readline()\n abun = np.array(abun_list.split(), dtype='f').reshape(int(len(abun_list.split())/2), 2)\n\n # Read the model lines\n temp = temp.split()\n model_lines = []\n for _ in range(int(temp[2])):\n model_lines.append(model_file.readline().split())\n model_lines = np.array(model_lines, dtype=np.float64)\n\n # Read the PRADK value\n pradk = float(model_file.readline()[5:])\n \n return abun, model_lines, pradk\n\ndef save_interpo_model(teff, logg, m_h, abun, model_line, pradk, to_path):\n '''\n Save the array of kurucz model (e.g., the output of `read_Kurucz_model`) into a file with Kurucz format. The given stellar parameters will be written into the file.\n \n Parameters\n ----------\n teff : int\n The effective temperature of the model\n logg : float\n logg value of the model\n m_h : float\n [M/H] value (overall metallicity) of the model\n abun : np.array\n The 'ABUNDANCE CHANGE' part with the same format in the output of read_Kurucz_model.\n model_line : np.array\n The model_line part with the same format in the output of read_Kurucz_model.\n pradk : float\n pradk value.\n to_path : str\n The path to save the model.\n \n '''\n if to_path == None:\n to_path = MOOG_run_path + 'model.mod'\n else:\n pass\n\n content = ['Kurucz model: ' + 'TEFF {:.1f} GRAVITY {:.5f} LTE\\n'.format(teff, logg)]\n content = content + ['TITLE SDSC GRID [{:+.1f}] VTURB 2.0 KM/S L/H 1.25\\n'.format(m_h)] \n content = content + [' OPACITY IFOP 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0\\n']\n content = content + [' CONVECTION ON 1.25 TURBULENCE OFF 0.00 0.00 0.00 0.00\\n']\n content = content + [' ABUNDANCE SCALE {:.5f} ABUNDANCE CHANGE 1 {:.5f} 2 {:.5f}\\n'.format(10**m_h, *abun[0:2,1])]\n\n i = 2\n while i <= 98:\n if i != 98: \n content = content + [' ABUNDANCE CHANGE {0:2.0f} {6:6.2f} {1:2.0f} {7:6.2f} {2:2.0f} {8:6.2f} {3:2.0f} {9:6.2f} {4:2.0f} {10:6.2f} {5:2.0f} {11:6.2f}\\n'.format(*abun[i:i+6,0], *abun[i:i+6,1])]\n i += 6\n else:\n content = content + [' ABUNDANCE CHANGE {0:2.0f} {1:6.2f}\\n'.format(abun[i,0], abun[i,1])]\n i += 1\n\n content = content + ['READ DECK6 {:2.0f} RHOX,T,P,XNE,ABROSS,ACCRAD,VTURB\\n'.format(len(model_line))]\n \n # model_line part\n for line in model_line:\n if len(line) == 9:\n content.append(' {:.8E} {:7.1f} {:9.3E} {:9.3E} {:9.3E} {:9.3E} {:9.3E} {:9.3E} {:9.3E}\\n'.format(*line))\n elif len(line) == 7:\n content.append(' {:.8E} {:7.1f} {:9.3E} {:9.3E} {:9.3E} {:9.3E} {:9.3E}\\n'.format(*line))\n\n # End part\n content.append('PRADK {:.4E}\\n'.format(pradk))\n content.append('BEGIN ITERATION 15 COMPLETED\\n')\n \n with open(to_path, 'w') as file:\n file.writelines(content)\n\ndef interpolate_model(teff, logg, m_h, to_path=None, abun_change=None, kurucz_format=False, molecules=None):\n '''\n Interpolate the model in Kurucz format according to given stellar paraeters when necessary.\n \n Parameters\n ----------\n teff : int\n The effective temperature of the model\n logg : float\n logg value of the model\n m_h : float\n [M/H] value (overall metallicity) of the model\n to_path : str, optional\n The path of Kurucz model. If not given then it will be in MOOG_run_path + 'model.mod'\n abun_change : dict of pairs {int:float, ...}\n Abundance change, have to be a dict of pairs of atomic number and [X/Fe] values. \n kurucz_format : bool, default False\n If False then the model in MOOG format will be saved; if True then the initial Kurucz format will be saved.\n '''\n \n if to_path == None:\n to_path = MOOG_run_path + 'model.mod'\n \n p = np.array([teff, logg, m_h])\n \n # Find the grid point for interpolation and their coefficients.\n tri_simplex = tri.find_simplex(p)\n if tri_simplex == -1:\n raise ValueError('The given stellar parameters are outside grid points, failed to interpolate.')\n else:\n tri_index = tri.simplices[tri_simplex]\n grid_kurucz_sub = grid_kurucz.loc[tri_index].reset_index(drop=True)\n\n b = tri.transform[tri_simplex][:3].dot(np.transpose(p - tri.transform[tri_simplex][3]))\n b = np.concatenate([b, [1-sum(b)]])\n\n grid_kurucz_use = grid_kurucz_sub[b != 0].reset_index(drop=True)\n\n # Judge if the grid space is too large for interpolation\n if max(grid_kurucz_use['Teff'] >= 35000):\n teff_space_lim = 5000\n else:\n teff_space_lim = 1500\n teff_space_bad = np.ptp(grid_kurucz_use['Teff']) > teff_space_lim\n logg_space_bad = np.ptp(grid_kurucz_use['logg']) > 0.5\n m_h_space_bad = np.ptp(grid_kurucz_use['m_h']) > 0.5\n\n if np.any([teff_space_bad, logg_space_bad, m_h_space_bad]):\n raise ValueError('The separation between grid points is too large, failed to interpolate.')\n\n b = b[b != 0]\n\n if len(grid_kurucz_use) == 1:\n # No interpolation\n model_path = MOOG_file_path + 'model/kurucz/standard/single/teff{:.0f}logg{:.1f}m_h{:+.1f}.dat'.format(*np.array(grid_kurucz_use.loc[0]))\n subprocess.run(['cp', model_path, to_path])\n if not kurucz_format:\n KURUCZ_convert(model_path=to_path, abun_change=abun_change)\n else:\n # Interpolation\n short_64 = np.any(grid_kurucz_use['length'] == 64)\n column_7 = np.any(grid_kurucz_use['column'] == 7)\n for i in range(len(grid_kurucz_use)):\n model_path = MOOG_file_path + 'model/kurucz/standard/single/teff{:.0f}logg{:.1f}m_h{:+.1f}.dat'.format(*np.array(grid_kurucz_use.loc[i]))\n abun_single, model_line_single, pradk_single = read_Kurucz_model(model_path)\n\n # Cut the long model (72) into short (64) if one of the grid points model is short.\n if short_64 and len(model_line_single) == 72:\n model_line_single = model_line_single[8:]\n # Cut the large column (9) into small (7) if one of the grid points model is small.\n if column_7 and model_line_single.shape[1] == 9:\n model_line_single = model_line_single[:,:7]\n if i == 0:\n abun = abun_single * b[i]\n model_line = model_line_single * b[i]\n pradk = pradk_single * b[i]\n else:\n abun = abun + abun_single * b[i]\n model_line = model_line + model_line_single * b[i]\n pradk = pradk + pradk_single * b[i]\n\n # Output the interpolated model\n save_interpo_model(teff, logg, m_h, abun, model_line, pradk, to_path)\n if not kurucz_format:\n KURUCZ_convert(model_path=to_path, abun_change=abun_change, molecules=molecules)\n \ndef KURUCZ_convert(model_path=None, vmicro=2.0, abun_change=None, converted_model_path=None, model_type='atlas9', molecules=None):\n '''\n Convert the model file from Kurucz format in to MOOG format.\n\n Parameters\n ----------\n model_path : str, optional\n The path of donloaded model file. If not given then it will be MOOG_run_path + 'model.mod'\n v_micro : float, default 2.0\n microturbulance velocity of the spectra.\n abun_change : dict of pairs {int:float, ...}\n Abundance change, have to be a dict of pairs of atomic number and [X/Fe] values.\n converted_model_path : str, optional\n The name of converted model. Default will be saved into MOOG working folder.\n type : str, default 'atlas9'\n The type if input model, either 'atlas9' or 'atlas12'.\n '''\n if model_path == None:\n model_path = MOOG_run_path + 'model.mod'\n model_file = open(model_path)\n # Convert the model files into MOOG format.\n\n # Read and save the first two lines (except 'TITLE ') into header.\n header = model_file.readline() + model_file.readline()\n teff, logg, m_h, vmicro_model, l_h = [float(s) for s in re.findall(r'[-+]?[0-9]*\\.?[0-9]+', header)]\n\n # Read the abundance change as well as model lines.\n temp = model_file.readline() + model_file.readline() + model_file.readline()\n\n abun_list = ''\n temp = model_file.readline()\n while 'ABUNDANCE CHANGE' in temp[:17]:\n abun_list = abun_list + temp[17:]\n temp = model_file.readline()\n abun = np.array(abun_list.split(), dtype='f').reshape(int(len(abun_list.split())/2), 2)\n\n # Load the abundances from Asplund 2009 (which MOOG used; hard-coded 20180531)\n xabu = [12.00,10.93, 1.05, 1.38, 2.70, 8.43, 7.83, 8.69, 4.56, 7.93,\n 6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.03, 6.34,\n 3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56,\n 3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.52, 2.87, 2.21, 2.58,\n 1.46, 1.88,-5.00, 1.75, 0.91, 1.57, 0.94, 1.71, 0.80, 2.04,\n 1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42,\n -5.00, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.84,\n 0.10, 0.85,-0.12, 0.85, 0.26, 1.40, 1.38, 1.62, 0.92, 1.17,\n 0.90, 1.75, 0.65,-5.00,-5.00,-5.00,-5.00,-5.00,-5.00, 0.02,\n -5.00,-0.54,-5.00,-5.00,-5.00]\n\n # Read the model lines\n if model_type == 'atlas12':\n for _ in range(22):\n temp = model_file.readline()\n temp = temp.split()\n model_lines = []\n model_linen = int(temp[2])\n for i in range(int(temp[2])):\n model_lines.append(model_file.readline().split()[:7])\n\n # Prepare the microtrubulance value.\n vmicro = '{}E00'.format(vmicro)\n\n # Write the model file.\n # header, abun09, model_lines and vmicro\n if converted_model_path == None:\n c_model_path = MOOG_run_path + '/model.mod'\n else:\n c_model_path = converted_model_path\n c_model_file = open(c_model_path, 'w')\n\n # Header part\n c_model_file.writelines('KURUCZ\\n')\n c_model_file.writelines('TEFF = {:.1f}, LOGG = {:.1f}, M/H = {:.1f}, VTURB = {:.1f}, L/H = {:.2f}\\n'.format(teff, logg, m_h, vmicro_model, l_h))\n\n # Model part\n c_model_file.writelines('ntau= ' + str(model_linen) + '\\n')\n for i in model_lines:\n c_model_file.writelines(' ' + ' '.join(i) + '\\n')\n\n # Microturbulant velocity part\n c_model_file.writelines(' ' + vmicro + '\\n')\n\n # Element shift part\n if abun_change != None:\n abun_change_num = len(abun_change)\n c_model_file.writelines('NATOMS {} {}\\n'.format(abun_change_num, m_h))\n ele_names = list(abun_change.keys())\n ele_names.sort()\n for element in ele_names:\n if element == 2:\n c_model_file.writelines(' {:.2f} {:.2f}\\n'.format(element, xabu[element-1]+abun_change[element]))\n else:\n c_model_file.writelines(' {:.2f} {:.2f}\\n'.format(element, xabu[element-1]+abun_change[element]+float(m_h)))\n else:\n c_model_file.writelines('NATOMS 0 {}\\n'.format(m_h))\n \n # Molecular line switches\n if molecules != None:\n molecules_num = len(molecules)\n c_model_file.writelines('NMOL {}\\n'.format(molecules_num))\n molecules_str = ['{:11.1f}'.format(i) for i in molecules]\n molecules_str = ''.join(molecules_str)\n c_model_file.writelines(molecules_str)\n else:\n c_model_file.writelines('NMOL 0')\n c_model_file.close()\n\n # cv_situation = os.path.isfile(c_model_path)\n # return c_model_file, cv_situation "
] |
[
[
"numpy.array",
"numpy.any",
"numpy.transpose",
"numpy.ptp",
"pandas.read_csv",
"scipy.spatial.Delaunay"
]
] |
wolegechu/triton-inference-server
|
[
"6cce1c0627c453171c2f433442af1f254266e7a2"
] |
[
"src/clients/python/experimental_api_v2/examples/simple_grpc_v2_infer_client.py"
] |
[
"#!/usr/bin/env python\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport argparse\nimport numpy as np\nimport sys\n\nimport tritongrpcclient\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-v',\n '--verbose',\n action=\"store_true\",\n required=False,\n default=False,\n help='Enable verbose output')\n parser.add_argument('-u',\n '--url',\n type=str,\n required=False,\n default='localhost:8001',\n help='Inference server URL. Default is localhost:8001.')\n\n FLAGS = parser.parse_args()\n try:\n triton_client = tritongrpcclient.InferenceServerClient(url=FLAGS.url,\n verbose=FLAGS.verbose)\n except Exception as e:\n print(\"channel creation failed: \" + str(e))\n sys.exit()\n\n model_name = 'simple'\n\n # Infer\n inputs = []\n outputs = []\n inputs.append(tritongrpcclient.InferInput('INPUT0', [1, 16], \"INT32\"))\n inputs.append(tritongrpcclient.InferInput('INPUT1', [1, 16], \"INT32\"))\n\n # Create the data for the two input tensors. Initialize the first\n # to unique integers and the second to all ones.\n input0_data = np.arange(start=0, stop=16, dtype=np.int32)\n input0_data = np.expand_dims(input0_data, axis=0)\n input1_data = np.ones(shape=(1, 16), dtype=np.int32)\n\n # Initialize the data\n inputs[0].set_data_from_numpy(input0_data)\n inputs[1].set_data_from_numpy(input1_data)\n\n outputs.append(tritongrpcclient.InferRequestedOutput('OUTPUT0'))\n outputs.append(tritongrpcclient.InferRequestedOutput('OUTPUT1'))\n\n # Test with outputs\n results = triton_client.infer(model_name=model_name,\n inputs=inputs,\n outputs=outputs,\n headers={'test': '1'})\n\n statistics = triton_client.get_inference_statistics(model_name=model_name)\n print(statistics)\n if len(statistics.model_stats) != 1:\n print(\"FAILED: Inference Statistics\")\n sys.exit(1)\n\n # Get the output arrays from the results\n output0_data = results.as_numpy('OUTPUT0')\n output1_data = results.as_numpy('OUTPUT1')\n\n for i in range(16):\n print(str(input0_data[0][i]) + \" + \" + str(input1_data[0][i]) + \" = \" +\n str(output0_data[0][i]))\n print(str(input0_data[0][i]) + \" - \" + str(input1_data[0][i]) + \" = \" +\n str(output1_data[0][i]))\n if (input0_data[0][i] + input1_data[0][i]) != output0_data[0][i]:\n print(\"sync infer error: incorrect sum\")\n sys.exit(1)\n if (input0_data[0][i] - input1_data[0][i]) != output1_data[0][i]:\n print(\"sync infer error: incorrect difference\")\n sys.exit(1)\n\n # Test with no outputs\n results = triton_client.infer(model_name=model_name,\n inputs=inputs,\n outputs=None)\n\n # Get the output arrays from the results\n output0_data = results.as_numpy('OUTPUT0')\n output1_data = results.as_numpy('OUTPUT1')\n\n for i in range(16):\n print(str(input0_data[0][i]) + \" + \" + str(input1_data[0][i]) + \" = \" +\n str(output0_data[0][i]))\n print(str(input0_data[0][i]) + \" - \" + str(input1_data[0][i]) + \" = \" +\n str(output1_data[0][i]))\n if (input0_data[0][i] + input1_data[0][i]) != output0_data[0][i]:\n print(\"sync infer error: incorrect sum\")\n sys.exit(1)\n if (input0_data[0][i] - input1_data[0][i]) != output1_data[0][i]:\n print(\"sync infer error: incorrect difference\")\n sys.exit(1)\n\n print('PASS: infer')\n"
] |
[
[
"numpy.ones",
"numpy.arange",
"numpy.expand_dims"
]
] |
thirtywang/OpenPNM
|
[
"e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a",
"e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a"
] |
[
"OpenPNM/Network/models/pore_topology.py",
"OpenPNM/Phases/models/contact_angle.py"
] |
[
"r\"\"\"\n===============================================================================\npore_topology -- functions for monitoring and adjusting topology\n===============================================================================\n\n\"\"\"\nimport scipy as _sp\nfrom OpenPNM.Base import logging\nlogger = logging.getLogger(__name__)\n\n\ndef get_subscripts(network, shape, **kwargs):\n r\"\"\"\n Return the 3D subscripts (i,j,k) into the cubic network\n\n Parameters\n ----------\n shape : list\n The (i,j,k) shape of the network in number of pores in each direction\n\n \"\"\"\n if network.num_pores('internal') != _sp.prod(shape):\n logger.error('Supplied shape does not match Network size, cannot proceed')\n else:\n template = _sp.atleast_3d(_sp.empty(shape))\n a = _sp.indices(_sp.shape(template))\n i = a[0].flatten()\n j = a[1].flatten()\n k = a[2].flatten()\n ind = _sp.vstack((i, j, k)).T\n vals = _sp.ones((network.Np, 3))*_sp.nan\n vals[network.pores('internal')] = ind\n return vals\n\n\ndef adjust_spacing(network, new_spacing, **kwargs):\n r\"\"\"\n Adjust the the pore-to-pore lattice spacing on a cubic network\n\n Parameters\n ----------\n new_spacing : float\n The new lattice spacing to apply\n\n Notes\n -----\n At present this method only applies a uniform spacing in all directions.\n This is a limiation of OpenPNM Cubic Networks in general, and not of the\n method.\n \"\"\"\n coords = network['pore.coords']\n try:\n spacing = network._spacing\n coords = coords/spacing*new_spacing\n network._spacing = new_spacing\n except:\n pass\n return coords\n\n\ndef reduce_coordination(network, z, mode='random', **kwargs):\n r\"\"\"\n Reduce the coordination number to the specified z value\n\n Parameters\n ----------\n z : int\n The coordination number or number of throats connected a pore\n\n mode : string, optional\n Controls the logic used to trim connections. Options are:\n\n - 'random': (default) Throats will be randomly removed to achieve a\n coordination of z\n - 'max': All pores will be adjusted to have a maximum coordination of z\n (not implemented yet)\n\n Returns\n -------\n A label array indicating which throats should be trimmed to achieve desired\n coordination.\n\n Notes\n -----\n Pores with only 1 throat will be ignored in all calculations since these\n are generally boundary pores.\n\n \"\"\"\n T_trim = ~network['throat.all']\n T_nums = network.num_neighbors(network.pores())\n # Find protected throats\n T_keep = network.find_neighbor_throats(pores=(T_nums == 1))\n if mode == 'random':\n z_ave = _sp.average(T_nums[T_nums > 1])\n f_trim = (z_ave - z)/z_ave\n T_trim = _sp.rand(network.Nt) < f_trim\n T_trim = T_trim*(~network.tomask(throats=T_keep))\n if mode == 'max':\n pass\n return T_trim\n",
"r\"\"\"\n===============================================================================\nSubmodule -- contact_angle\n===============================================================================\n\n\"\"\"\nimport scipy as sp\n\n\ndef young(phase, sigma_sg, sigma_sl,\n surface_tension='pore.surface_tension', **kwargs):\n r\"\"\"\n Calculate contact angle using Young's equation\n\n Notes\n -----\n Young's equation is: sigma_lg*Cos(theta) = sigma_sg - sigma_sl\n where\n sigma_lg is the liquid-gas surface tension [N/m]\n sigma_sg is the solid-gas surface tension [N/m]\n sigma_sl is the solid-liquid interfacial tension [J/m^2]\n theta is the Young contact angle [rad]\n\n \"\"\"\n if surface_tension.split('.')[0] == 'pore':\n sigma = phase[surface_tension]\n sigma = phase.interpolate_data(data=sigma)\n else:\n sigma = phase[surface_tension]\n theta = sp.arccos((sigma_sg - sigma_sl)/phase[surface_tension])\n theta = sp.rad2deg(theta)\n return theta\n"
] |
[
[
"scipy.shape",
"scipy.vstack",
"scipy.rand",
"scipy.ones",
"scipy.prod",
"scipy.empty",
"scipy.average"
],
[
"scipy.rad2deg",
"scipy.arccos"
]
] |
Global19/qiskit-machine-learning
|
[
"479f9f03d2b6c1fbab936b5d6bfaee6eaee465b0"
] |
[
"test/connectors/test_torch_connector.py"
] |
[
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test Torch Connector.\"\"\"\n\nimport unittest\n\nfrom typing import List\n\nfrom test import QiskitMachineLearningTestCase\n\nimport numpy as np\n\nfrom ddt import ddt, data\n\ntry:\n from torch import Tensor\nexcept ImportError:\n class Tensor: # type: ignore\n \"\"\" Empty Tensor class\n Replacement if torch.Tensor is not present.\n \"\"\"\n pass\n\nfrom qiskit import QuantumCircuit\nfrom qiskit.providers.aer import QasmSimulator, StatevectorSimulator\nfrom qiskit.exceptions import MissingOptionalLibraryError\nfrom qiskit.circuit import Parameter\nfrom qiskit.utils import QuantumInstance\nfrom qiskit.opflow import StateFn, ListOp, PauliSumOp\n\nfrom qiskit_machine_learning import QiskitMachineLearningError\nfrom qiskit_machine_learning.neural_networks import CircuitQNN, TwoLayerQNN, OpflowQNN\nfrom qiskit_machine_learning.connectors import TorchConnector\n\n\n@ddt\nclass TestTorchConnector(QiskitMachineLearningTestCase):\n \"\"\"Torch Connector Tests.\"\"\"\n\n def setUp(self):\n super().setUp()\n\n # specify quantum instances\n self.sv_quantum_instance = QuantumInstance(StatevectorSimulator())\n self.qasm_quantum_instance = QuantumInstance(QasmSimulator(), shots=100)\n\n def validate_output_shape(self, model: TorchConnector, test_data: List[Tensor]) -> None:\n \"\"\"Creates a Linear PyTorch module with the same in/out dimensions as the given model,\n applies the list of test input data to both, and asserts that they have the same\n output shape.\n\n Args:\n model: model to be tested\n test_data: list of test input tensors\n\n Raises:\n MissingOptionalLibraryError: torch not installed\n QiskitMachineLearningError: Invalid input.\n \"\"\"\n try:\n from torch.nn import Linear\n except ImportError as ex:\n raise MissingOptionalLibraryError(\n libname='Pytorch',\n name='TorchConnector',\n pip_install=\"pip install 'qiskit-machine-learning[torch]'\") from ex\n\n # create benchmark model\n in_dim = model.neural_network.num_inputs\n if len(model.neural_network.output_shape) != 1:\n raise QiskitMachineLearningError('Function only works for one dimensional output')\n out_dim = model.neural_network.output_shape[0]\n linear = Linear(in_dim, out_dim)\n\n # iterate over test data and validate behavior of model\n for x in test_data:\n\n # test linear model and track whether it failed or store the output shape\n c_worked = True\n try:\n c_shape = linear(x).shape\n except Exception: # pylint: disable=broad-except\n c_worked = False\n\n # test quantum model and track whether it failed or store the output shape\n q_worked = True\n try:\n q_shape = model(x).shape\n except Exception: # pylint: disable=broad-except\n q_worked = False\n\n # compare results and assert that the behavior is equal\n self.assertEqual(c_worked, q_worked)\n if c_worked:\n self.assertEqual(c_shape, q_shape)\n\n def validate_backward_pass(self, model: TorchConnector) -> None:\n \"\"\"Uses PyTorch to validate the backward pass / autograd.\n\n Args:\n model: The model to be tested.\n \"\"\"\n try:\n import torch\n except ImportError as ex:\n self.skipTest('pytorch not installed, skipping test: {}'.format(str(ex)))\n\n # test autograd\n func = TorchConnector._TorchNNFunction.apply # (input, weights, qnn)\n input_data = (\n torch.randn(model.neural_network.num_inputs, dtype=torch.double, requires_grad=True),\n torch.randn(model.neural_network.num_weights, dtype=torch.double, requires_grad=True),\n model.neural_network,\n False\n )\n test = torch.autograd.gradcheck(func, input_data, eps=1e-4, atol=1e-3) # type: ignore\n self.assertTrue(test)\n\n @data(\n 'sv', 'qasm'\n )\n def test_opflow_qnn_1_1(self, q_i):\n \"\"\" Test Torch Connector + Opflow QNN with input/output dimension 1/1.\"\"\"\n\n if q_i == 'sv':\n quantum_instance = self.sv_quantum_instance\n else:\n quantum_instance = self.qasm_quantum_instance\n\n # construct simple feature map\n param_x = Parameter('x')\n feature_map = QuantumCircuit(1, name='fm')\n feature_map.ry(param_x, 0)\n\n # construct simple feature map\n param_y = Parameter('y')\n ansatz = QuantumCircuit(1, name='vf')\n ansatz.ry(param_y, 0)\n\n # construct QNN with statevector simulator\n qnn = TwoLayerQNN(1, feature_map, ansatz, quantum_instance=quantum_instance)\n try:\n model = TorchConnector(qnn)\n\n test_data = [\n Tensor(1),\n Tensor([1]),\n Tensor([1, 2]),\n Tensor([[1], [2]]),\n Tensor([[[1], [2]], [[3], [4]]])\n ]\n\n # test model\n self.validate_output_shape(model, test_data)\n if q_i == 'sv':\n self.validate_backward_pass(model)\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @data(\n 'sv', 'qasm'\n )\n def test_opflow_qnn_2_1(self, q_i):\n \"\"\" Test Torch Connector + Opflow QNN with input/output dimension 2/1.\"\"\"\n\n if q_i == 'sv':\n quantum_instance = self.sv_quantum_instance\n else:\n quantum_instance = self.qasm_quantum_instance\n\n # construct QNN\n qnn = TwoLayerQNN(2, quantum_instance=quantum_instance)\n try:\n model = TorchConnector(qnn)\n\n test_data = [\n Tensor(1),\n Tensor([1, 2]),\n Tensor([[1, 2]]),\n Tensor([[1], [2]]),\n Tensor([[[1], [2]], [[3], [4]]])\n ]\n\n # test model\n self.validate_output_shape(model, test_data)\n if q_i == 'sv':\n self.validate_backward_pass(model)\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @data(\n 'sv', 'qasm'\n )\n def test_opflow_qnn_2_2(self, q_i):\n \"\"\" Test Torch Connector + Opflow QNN with input/output dimension 2/2.\"\"\"\n\n if q_i == 'sv':\n quantum_instance = self.sv_quantum_instance\n else:\n quantum_instance = self.qasm_quantum_instance\n\n # construct parametrized circuit\n params_1 = [Parameter('input1'), Parameter('weight1')]\n qc_1 = QuantumCircuit(1)\n qc_1.h(0)\n qc_1.ry(params_1[0], 0)\n qc_1.rx(params_1[1], 0)\n qc_sfn_1 = StateFn(qc_1)\n\n # construct cost operator\n h_1 = StateFn(PauliSumOp.from_list([('Z', 1.0), ('X', 1.0)]))\n\n # combine operator and circuit to objective function\n op_1 = ~h_1 @ qc_sfn_1\n\n # construct parametrized circuit\n params_2 = [Parameter('input2'), Parameter('weight2')]\n qc_2 = QuantumCircuit(1)\n qc_2.h(0)\n qc_2.ry(params_2[0], 0)\n qc_2.rx(params_2[1], 0)\n qc_sfn_2 = StateFn(qc_2)\n\n # construct cost operator\n h_2 = StateFn(PauliSumOp.from_list([('Z', 1.0), ('X', 1.0)]))\n\n # combine operator and circuit to objective function\n op_2 = ~h_2 @ qc_sfn_2\n\n op = ListOp([op_1, op_2])\n\n qnn = OpflowQNN(op, [params_1[0], params_2[0]], [params_1[1], params_2[1]],\n quantum_instance=quantum_instance)\n try:\n model = TorchConnector(qnn)\n\n test_data = [\n Tensor(1),\n Tensor([1, 2]),\n Tensor([[1], [2]]),\n Tensor([[1, 2], [3, 4]])\n ]\n\n # test model\n self.validate_output_shape(model, test_data)\n if q_i == 'sv':\n self.validate_backward_pass(model)\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @data(\n # interpret, output_shape, sparse, quantum_instance\n (None, None, False, 'sv'),\n (None, None, True, 'sv'),\n (lambda x: np.sum(x) % 2, 2, False, 'sv'),\n (lambda x: np.sum(x) % 2, 2, True, 'sv'),\n (None, None, False, 'qasm'),\n (None, None, True, 'qasm'),\n (lambda x: np.sum(x) % 2, 2, False, 'qasm'),\n (lambda x: np.sum(x) % 2, 2, True, 'qasm'),\n )\n def test_circuit_qnn_1_1(self, config):\n \"\"\"Torch Connector + Circuit QNN with no interpret, dense output,\n and input/output shape 1/1 .\"\"\"\n\n interpret, output_shape, sparse, q_i = config\n if q_i == 'sv':\n quantum_instance = self.sv_quantum_instance\n else:\n quantum_instance = self.qasm_quantum_instance\n\n qc = QuantumCircuit(1)\n\n # construct simple feature map\n param_x = Parameter('x')\n qc.ry(param_x, 0)\n\n # construct simple feature map\n param_y = Parameter('y')\n qc.ry(param_y, 0)\n\n qnn = CircuitQNN(qc, [param_x], [param_y],\n sparse=sparse,\n sampling=False,\n interpret=interpret,\n output_shape=output_shape,\n quantum_instance=quantum_instance)\n try:\n model = TorchConnector(qnn)\n\n test_data = [\n Tensor(1),\n Tensor([1, 2]),\n Tensor([[1], [2]]),\n Tensor([[[1], [2]], [[3], [4]]])\n ]\n\n # test model\n self.validate_output_shape(model, test_data)\n if q_i == 'sv':\n self.validate_backward_pass(model)\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @data(\n # interpret, output_shape, sparse, quantum_instance\n (None, None, False, 'sv'),\n (None, None, True, 'sv'),\n (lambda x: np.sum(x) % 2, 2, False, 'sv'),\n (lambda x: np.sum(x) % 2, 2, True, 'sv'),\n (None, None, False, 'qasm'),\n (None, None, True, 'qasm'),\n (lambda x: np.sum(x) % 2, 2, False, 'qasm'),\n (lambda x: np.sum(x) % 2, 2, True, 'qasm'),\n )\n def test_circuit_qnn_1_8(self, config):\n \"\"\"Torch Connector + Circuit QNN with no interpret, dense output,\n and input/output shape 1/8 .\"\"\"\n\n interpret, output_shape, sparse, q_i = config\n if q_i == 'sv':\n quantum_instance = self.sv_quantum_instance\n else:\n quantum_instance = self.qasm_quantum_instance\n\n qc = QuantumCircuit(3)\n\n # construct simple feature map\n param_x = Parameter('x')\n qc.ry(param_x, range(3))\n\n # construct simple feature map\n param_y = Parameter('y')\n qc.ry(param_y, range(3))\n\n qnn = CircuitQNN(qc, [param_x], [param_y],\n sparse=sparse,\n sampling=False,\n interpret=interpret,\n output_shape=output_shape,\n quantum_instance=quantum_instance)\n try:\n model = TorchConnector(qnn)\n\n test_data = [\n Tensor(1),\n Tensor([1, 2]),\n Tensor([[1], [2]]),\n Tensor([[[1], [2]], [[3], [4]]])\n ]\n\n # test model\n self.validate_output_shape(model, test_data)\n if q_i == 'sv':\n self.validate_backward_pass(model)\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @data(\n # interpret, output_shape, sparse, quantum_instance\n (None, None, False, 'sv'),\n (None, None, True, 'sv'),\n (lambda x: np.sum(x) % 2, 2, False, 'sv'),\n (lambda x: np.sum(x) % 2, 2, True, 'sv'),\n (None, None, False, 'qasm'),\n (None, None, True, 'qasm'),\n (lambda x: np.sum(x) % 2, 2, False, 'qasm'),\n (lambda x: np.sum(x) % 2, 2, True, 'qasm'),\n )\n def test_circuit_qnn_2_4(self, config):\n \"\"\"Torch Connector + Circuit QNN with no interpret, dense output,\n and input/output shape 1/8 .\"\"\"\n\n interpret, output_shape, sparse, q_i = config\n if q_i == 'sv':\n quantum_instance = self.sv_quantum_instance\n else:\n quantum_instance = self.qasm_quantum_instance\n\n qc = QuantumCircuit(2)\n\n # construct simple feature map\n param_x_1, param_x_2 = Parameter('x1'), Parameter('x2')\n qc.ry(param_x_1, range(2))\n qc.ry(param_x_2, range(2))\n\n # construct simple feature map\n param_y = Parameter('y')\n qc.ry(param_y, range(2))\n\n qnn = CircuitQNN(qc, [param_x_1, param_x_2], [param_y],\n sparse=sparse,\n sampling=False,\n interpret=interpret,\n output_shape=output_shape,\n quantum_instance=quantum_instance)\n try:\n model = TorchConnector(qnn)\n\n test_data = [\n Tensor(1),\n Tensor([1, 2]),\n Tensor([[1], [2]]),\n Tensor([[1, 2], [3, 4]]),\n Tensor([[[1], [2]], [[3], [4]]])\n ]\n\n # test model\n self.validate_output_shape(model, test_data)\n if q_i == 'sv':\n self.validate_backward_pass(model)\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @data(\n # interpret\n (None),\n (lambda x: np.sum(x) % 2)\n )\n def test_circuit_qnn_sampling(self, interpret):\n \"\"\"Test Torch Connector + Circuit QNN for sampling.\"\"\"\n\n qc = QuantumCircuit(2)\n\n # construct simple feature map\n param_x1, param_x2 = Parameter('x1'), Parameter('x2')\n qc.ry(param_x1, range(2))\n qc.ry(param_x2, range(2))\n\n # construct simple feature map\n param_y = Parameter('y')\n qc.ry(param_y, range(2))\n\n qnn = CircuitQNN(qc, [param_x1, param_x2], [param_y],\n sparse=False,\n sampling=True,\n interpret=interpret,\n output_shape=None,\n quantum_instance=self.qasm_quantum_instance)\n try:\n model = TorchConnector(qnn)\n\n test_data = [\n Tensor([2, 2]),\n Tensor([[1, 1], [2, 2]])\n ]\n for i, x in enumerate(test_data):\n if i == 0:\n self.assertEqual(model(x).shape, qnn.output_shape)\n else:\n shape = model(x).shape\n self.assertEqual(shape, (len(x), *qnn.output_shape))\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n def test_batch_gradients(self):\n \"\"\"Test backward pass for batch input.\"\"\"\n\n # construct random data set\n num_inputs = 2\n num_samples = 10\n x = np.random.rand(num_samples, num_inputs)\n\n # set up QNN\n qnn = TwoLayerQNN(num_qubits=num_inputs, quantum_instance=self.sv_quantum_instance)\n\n # set up PyTorch module\n initial_weights = np.random.rand(qnn.num_weights)\n model = TorchConnector(qnn, initial_weights=initial_weights)\n\n # test single gradient\n w = model.weights.detach().numpy()\n res_qnn = qnn.forward(x[0, :], w)\n\n # construct finite difference gradient for weights\n eps = 1e-4\n grad = np.zeros(w.shape)\n for k in range(len(w)):\n delta = np.zeros(w.shape)\n delta[k] += eps\n\n f_1 = qnn.forward(x[0, :], w + delta)\n f_2 = qnn.forward(x[0, :], w - delta)\n\n grad[k] = (f_1 - f_2) / (2*eps)\n\n grad_qnn = qnn.backward(x[0, :], w)[1][0, 0, :]\n self.assertAlmostEqual(np.linalg.norm(grad - grad_qnn), 0.0, places=4)\n\n model.zero_grad()\n res_model = model(Tensor(x[0, :]))\n self.assertAlmostEqual(np.linalg.norm(res_model.detach().numpy() - res_qnn[0]), 0.0,\n places=4)\n res_model.backward()\n grad_model = model.weights.grad\n self.assertAlmostEqual(np.linalg.norm(grad_model.detach().numpy() - grad_qnn), 0.0,\n places=4)\n\n # test batch input\n batch_grad = np.zeros((*w.shape, num_samples, 1))\n for k in range(len(w)):\n delta = np.zeros(w.shape)\n delta[k] += eps\n\n f_1 = qnn.forward(x, w + delta)\n f_2 = qnn.forward(x, w - delta)\n\n batch_grad[k] = (f_1 - f_2) / (2*eps)\n\n batch_grad = np.sum(batch_grad, axis=1)\n batch_grad_qnn = np.sum(qnn.backward(x, w)[1], axis=0)\n self.assertAlmostEqual(np.linalg.norm(batch_grad - batch_grad_qnn.transpose()),\n 0.0, places=4)\n\n model.zero_grad()\n batch_res_model = sum(model(Tensor(x)))\n batch_res_model.backward()\n self.assertAlmostEqual(\n np.linalg.norm(model.weights.grad.numpy() - batch_grad.transpose()[0]), 0.0, places=4)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"torch.nn.Linear",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.zeros",
"numpy.sum",
"torch.autograd.gradcheck",
"torch.Tensor",
"torch.randn"
]
] |
ClaireGayral/formation_openclassroom
|
[
"ad3774064f71407ac6310b5b4b165bb5ac48e769"
] |
[
"P5_gayral_claire/script06_reduce_dim.py"
] |
[
"import matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport numpy as np\nimport pandas as pd \nimport matplotlib._color_data as mcd\nimport random\n\n\n##\n## PCA\n## \ndef get_str_vars(list_of_var):\n # from a list of str, return a sentence\n # if the line is too long (sup to 40), cut\n tmp = list_of_var.copy()\n res = \"\"\n len_line = 0\n while tmp : \n var = tmp.pop()\n res = res+ var +str(\", \")\n len_line += len(var)\n if len_line > 40:\n res = res +\"\\n\"\n len_line = 0\n return(res)\n\ndef draw_cluster_legend(ax2,clustering, corresp_color_dict):\n ## plot the legend with colored arrow\n # number of clusters : \n K = len(clustering.values.categories)\n my_color = clustering.values.categories.map(corresp_color_dict)\n # plot parallel arrows :\n ax2.quiver(np.zeros(K),np.arange(0,K),np.ones(K),np.zeros(K),\n color = my_color)\n # plot legend text next to the respective arrow :\n for k in clustering.values.categories :\n cluster_var = get_str_vars(list(clustering[clustering == k].index.values))\n ax2.text(0.2, k , str(cluster_var), fontsize='11',\n ha='left', va='center' , alpha=1)\n # set limits : \n ax2.set_xlim([-0.1,2])\n ax2.set_ylim([-1.1, K+0.1])\n ax2.set_title(\"Clustering legend\")\n # remove axis :\n ax2.get_xaxis().set_visible(False)\n ax2.get_yaxis().set_visible(False)\n plt.axis(\"off\")\n return(ax2)\n\ndef display_circles(pcs, n_comp, my_meth, axis_ranks, labels=None, \n label_rotation=0, lims=None, clustering = None, \n figsize = (9,6), fig_name=None):\n ## set coloration palette : \n if clustering is not None : \n my_color_set = ['#154406', '#15b01a', '#f97306', '#c0022f',\n '#0343df', '#fe02a2', '#8b3103', '#7e1e9c', '#017371',\n '#380282', '#6b8ba4', '#75bbfd', '#ff81c0', '#c79fef',\n '#ff073a', '#fdaa48', '#fea993', '#fe7b7c', '#c20078',\n '#029386', '#677a04', '#b25f03', '#070d0d', '#ffdf22']\n corresp_color_dict = dict(zip(clustering.values.categories, my_color_set))\n my_color = clustering.values.map(corresp_color_dict)\n\n else : \n my_color = \"grey\"\n ## set global plot arguments : \n plot_kwargs = {\"alpha\":1, \"color\":my_color}\n ## draw the correlation circle for my_meth : \n for d1, d2 in axis_ranks: # On affiche les 3 premiers plans factoriels, donc les 6 premières composantes\n if d2 < n_comp:\n ## initialise figure\n figsize_x = list(figsize)[0]\n figsize_y = list(figsize)[1]\n # figsize = (figsize_x, figsize_y)\n if clustering is not None : \n figsize_x = 2*figsize_x\n fig = plt.figure(figsize = (figsize_x, figsize_y))\n ## affichage de la legende du clustering en couleur : \n ax2 = fig.add_subplot(1,2,2)\n draw_cluster_legend(ax2, clustering, corresp_color_dict)\n # initialisation de la figure \"cercle\"\n ax1 = fig.add_subplot(1,2,1)\n else : \n fig = plt.figure(figsize = (figsize_x, figsize_y))\n ax1 = fig.add_subplot(1,1,1)\n\n ## détermination des limites du graphique\n if lims is not None :\n xmin, xmax, ymin, ymax = lims\n elif pcs.shape[1] < 30 :\n xmin, xmax, ymin, ymax = -1, 1, -1, 1\n else :\n xmin, xmax, ymin, ymax = min(pcs[d1,:]), max(pcs[d1,:]), min(pcs[d2,:]), max(pcs[d2,:])\n\n ## affichage des fleches :\n if pcs.shape[1] < 30 :\n ax1.quiver(np.zeros(pcs.shape[1]), np.zeros(pcs.shape[1]), ## depart points\n pcs[d1,:], pcs[d2,:], ## movement in each direction\n angles='xy', scale_units='xy',**plot_kwargs)\n # (voir la doc : https://matplotlib.org/api/_as_gen/matplotlib.pyplot.quiver.html)\n else: # s'il y a plus de 30 flèches, on n'affiche pas le triangle à leur extrémité\n lines = [[[0,0],[x,y]] for x,y in pcs[[d1,d2]].T]\n ax1.add_collection(LineCollection(lines, axes=ax, alpha=.1, color=my_color))\n\n ## affichage des noms des variables \n if labels is not None: \n for i,(x, y) in enumerate(pcs[[d1,d2]].T):\n if x >= xmin and x <= xmax and y >= ymin and y <= ymax :\n ax1.text(x, y, labels[i], fontsize='14', ha='center', \n va='center', rotation=label_rotation, color=\"blue\", alpha=0.5)\n\n ## affichage du cercle\n circle = plt.Circle((0,0), 1, facecolor='none', edgecolor='b')\n plt.gca().add_artist(circle)\n\n ## définition des limites du graphique\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(ymin, ymax)\n\n ## affichage des lignes horizontales et verticales\n plt.plot([-1, 1], [0, 0], color='grey', ls='--')\n plt.plot([0, 0], [-1, 1], color='grey', ls='--')\n \n if hasattr(my_meth,\"explained_variance_ratio_\") : \n ## nom des axes, avec le pourcentage d'inertie expliqué\n ax1.set_xlabel('F{} ({}%)'.format(d1+1, \n round(100*my_meth.explained_variance_ratio_[d1],1)))\n ax1.set_ylabel('F{} ({}%)'.format(d2+1, \n round(100*my_meth.explained_variance_ratio_[d2],1)))\n else : \n ## nom des axes\n ax1.set_xlabel('F{}'.format(d1+1))\n ax1.set_ylabel('F{}'.format(d2+1))\n ax1.set_title(\"Cercle des corrélations (F{} et F{})\".format(d1+1, d2+1))\n if fig_name is not None :\n plt.savefig(res_path+\"figures/\"+fig_name+str(d1+1)+str(d2+1)+\".jpg\")\n\n # plt.show(block=False)\n \n \ndef display_factorial_planes(X_projected, n_comp, my_meth, axis_ranks, ind_labels=None, alpha=1, clustering = None, figsize = (12,10)):\n # args are as defined just above \n plot_kwargs = {\"marker\":\"x\", \"alpha\":alpha, 's':20}#, \"label\" : clustering.values.categories}\n # set dict of color if clustering : \n if clustering is not None : \n my_color_set = ['#154406', '#15b01a', '#fffd01', '#f97306', '#c0022f', '#0343df', '#fe02a2', '#8b3103', \n '#7e1e9c', '#017371', '#380282', '#6b8ba4', '#75bbfd', '#ff81c0', '#c79fef', '#ff073a', \n '#fdaa48', '#fea993', '#fe7b7c', '#c20078', '#029386', '#677a04', '#b25f03', '#070d0d']\n corresp_color_dict = dict(zip(clustering.values.categories, my_color_set))\n\n for d1,d2 in axis_ranks:\n if d2 < n_comp:\n ax1 = \"axis\"+ str(d1+1)\n ax2 = \"axis\"+ str(d2+1)\n # initialisation de la figure \n fig = plt.figure(figsize=figsize)\n if clustering is not None :\n for k in clustering.values.categories:\n cluster_index = clustering[clustering==k].index\n # print(X_projected.loc[cluster_index, ax1])\n plt.scatter(X_projected.loc[cluster_index, ax1],X_projected.loc[cluster_index, ax2], \n color=corresp_color_dict[k], label = k, **plot_kwargs)\n plt.legend()\n\n else : \n plt.scatter(X_projected[ax1], X_projected[ax2], **plot_kwargs)\n # affichage des labels des points\n if ind_labels is not None:\n for i,(x,y) in enumerate(X_projected[:,[d1,d2]]):\n plt.text(x, y, ind_labels[i],\n fontsize='14', ha='center',va='center') \n\n # détermination des limites du graphique\n boundary = np.max(np.abs(X_projected.values[:, [d1,d2]])) * 1.1\n plt.xlim([-boundary,boundary])\n plt.ylim([-boundary,boundary])\n\n # affichage des lignes horizontales et verticales\n plt.plot([-100, 100], [0, 0], color='grey', ls='--')\n plt.plot([0, 0], [-100, 100], color='grey', ls='--')\n\n if hasattr(my_meth,\"explained_variance_ratio_\") : \n ## nom des axes, avec le pourcentage d'inertie expliqué\n plt.xlabel('F{} ({}%)'.format(d1+1, \n round(100*my_meth.explained_variance_ratio_[d1],1)))\n plt.ylabel('F{} ({}%)'.format(d2+1, \n round(100*my_meth.explained_variance_ratio_[d2],1)))\n else : \n ## nom des axes\n plt.xlabel('F{}'.format(d1+1))\n plt.ylabel('F{}'.format(d2+1))\n \n plt.title(\"Projection des individus (sur F{} et F{})\".format(d1+1, d2+1))\n # plt.show(block=False)\n \ndef plot_PCA_proj_of_clusters(X_proj, my_meth, axis_rank, ind_labels=None, \n alpha=1, clustering=None, figsize=(12,10)):\n ## \n ## first use in P4 \n ## \n plot_kwargs = {\"marker\":\"x\", \"alpha\":alpha, 's':10}#, \"label\" : clustering.values.categories}\n n_comp = max(list(sum(axis_ranks, ())))+1\n if clustering is None :\n clustering = pd.Series(np.ones(X_proj.shape[0]),\n index = X_proj.index,dtype=\"category\")\n ## add yellow in color to match with nutri-score : (yellow = '#ffdf22')\n my_color_set = ['#154406', '#15b01a', '#ffdf22', '#f97306', '#c0022f',\n '#0343df', '#fe02a2', '#8b3103', '#7e1e9c', '#017371',\n '#380282', '#6b8ba4', '#75bbfd', '#ff81c0', '#c79fef',\n '#ff073a', '#fdaa48', '#fea993', '#fe7b7c', '#c20078',\n '#029386', '#677a04', '#b25f03', '#070d0d', '#ffdf22']\n my_color_set = my_color_set * (X_proj.shape[0]//len(my_color_set) + 1) \n corresp_color_dict = dict(zip(clustering.values.categories, my_color_set))\n plot_rank = [1,len(axis_ranks),1]\n\n for cluster in clustering.cat.categories:\n selected_index = clustering[clustering==cluster].index\n sub_X_proj = X_proj.loc[selected_index,:]\n count_fig = 1\n plt.figure(figsize=figsize)\n for d1,d2 in axis_ranks:\n if d2 < n_comp:\n plot_rank[2] = count_fig \n plt.subplot(*plot_rank)\n ax1 = \"axis\"+ str(d1+1)\n ax2 = \"axis\"+ str(d2+1)\n # initialisation de la figure \n\n plt.scatter(sub_X_proj.loc[:, ax1],sub_X_proj.loc[:, ax2], \n color=corresp_color_dict[cluster], label = cluster, **plot_kwargs)\n plt.legend()\n\n plt.xlim([-10,10])\n plt.ylim([-20,20])\n # affichage des lignes horizontales et verticales\n plt.plot([-100, 100], [0, 0], color='grey', ls='--')\n plt.plot([0, 0], [-100, 100], color='grey', ls='--')\n\n # nom des axes, avec le pourcentage d'inertie expliqué\n plt.xlabel('F{} ({}%)'.format(d1+1, round(100*my_pca.explained_variance_ratio_[d1],1)))\n plt.ylabel('F{} ({}%)'.format(d2+1, round(100*my_pca.explained_variance_ratio_[d2],1)))\n\n plt.title(\"Projection des individus (sur F{} et F{})\".format(d1+1, d2+1))\n count_fig += 1 \n plt.show()\n \n\n\ndef display_scree_plot(pca):\n scree = pca.explained_variance_ratio_*100\n plt.bar(np.arange(len(scree))+1, scree)\n plt.plot(np.arange(len(scree))+1, scree.cumsum(),c=\"red\",marker='o')\n plt.xlabel(\"rang de l'axe d'inertie\")\n plt.ylabel(\"pourcentage d'inertie\")\n plt.title(\"Eboulis des valeurs propres\")\n #plt.show(block=False)\n \n##\n## TP hierarchical clustering\n## \n\nimport matplotlib.pyplot as plt\nfrom scipy.cluster.hierarchy import dendrogram\n\ndef plot_dendrogram(Z, names, figsize = (10,25)):\n plt.figure(figsize=figsize)\n plt.title('Hierarchical Clustering Dendrogram')\n plt.xlabel('distance')\n dendrogram(\n Z,\n labels = names,\n orientation = \"left\",\n )\n \n \n##\n## NMF\n## \n\ndef plot_coeffs(my_meth, X_, X_name= \"X\"):\n '''\n from dim reduceur, plot coefficients of the 2 first axis\n and return coefficients on whole axis\n \n Parameters:\n -----------------------------------------\n my_meth = sklearn.decomposition methode like PCA of NMF \n X_ = pd.DataFrame() of data to be reduced\n \n Returns:\n -----------------------------------------\n pd.DataFrame of coefficients\n '''\n my_meth.set_params(**{\"n_components\": X_.shape[1]})\n my_meth.fit(X_)\n\n coeffs = pd.DataFrame(my_meth.components_, columns = X_.columns,\n index = [\"ax_\"+str(k) for k in np.arange(1,my_meth.n_components+1)])\n for colname in coeffs.columns :\n plt.scatter(x = coeffs.loc[\"ax_1\", colname], \n y = coeffs.loc[\"ax_2\", colname],\n label = colname)\n plt.xlabel(\"coeff axis 1\")\n plt.ylabel(\"coeff axis 2\")\n my_meth_name = str(my_meth).split(\"(\")[0]\n plt.title(my_meth_name+\" on \"+ str(X_name),fontsize=14)\n\n plt.legend()\n return(coeffs)\n \ndef frobenius_func(y, y_pred):\n return(np.linalg.norm(y-y_pred,\"fro\"))\n\ndef pseudo_cv_reduce_dim(X, my_meth, param_grid,my_score, cv = 5):\n ## MAP THE DICT OF LIST INTO LIST OF DICT :\n param_dirg = model_selection.ParameterGrid(param_grid)\n\n ## INITIALIZATION : \n res = {} # dict of dict \n res[\"params\"]=[]\n for kwargs in param_dirg :\n res[\"params\"].append(kwargs)\n dict_score = {}\n dict_time_fit = {}\n dict_time_predict = {}\n\n k_iter = 1\n ## SET FOLDS :\n kf = model_selection.KFold(n_splits = 5)\n CV_split_iterator = kf.split(X_,y) \n\n ## LOOP ON FOLDS :\n for CV_train_range_index, CV_test_range_index in CV_split_iterator : \n ## extract train\n train_index = X_.index[CV_train_range_index]\n train = X_.iloc[CV_train_range_index]\n ## LOOP ON PARAM NAMES (HERE ONLY 1)\n fold_key = \"fold\"+str(k_iter)\n ## init fold dict\n dict_score[fold_key] = []\n dict_time_fit[fold_key] = []\n dict_time_predict[fold_key] = []\n ## loop on different set of kwargs \n for kwargs in param_dirg :\n ## SET PARAMS IN METH :\n my_meth.set_params(**kwargs)\n ## PREDICT TEST VALUES : \n t = time.time()\n W = my_meth.fit_transform(train)\n dict_time_fit[fold_key].append(time.time() - t)\n t = time.time()\n H = my_meth.components_\n X_pred = np.dot(W,H)\n dict_score[fold_key].append(my_score(train, X_pred))\n dict_time_predict[fold_key].append(time.time() - t)\n k_iter += 1\n ## save in same shape as sklearn GridSearchCV \n df_time_fit = pd.DataFrame(dict_time_fit)\n df_time_predict = pd.DataFrame(dict_time_predict)\n df_score = pd.DataFrame(dict_score)\n res[\"mean_fit_time\"] = df_time_fit.mean(axis=1).values\n res[\"std_fit_time\"] = df_time_fit.std(axis=1).values\n res[\"mean_score_time\"] = df_time_predict.mean(axis=1).values\n res[\"std_score_time\"] = df_time_predict.std(axis=1).values\n res[\"mean_test_score\"] = df_score.mean(axis=1).values\n res[\"std_test_score\"] = df_score.std(axis=1).values\n return(res)\n"
] |
[
[
"matplotlib.pyplot.text",
"numpy.dot",
"matplotlib.pyplot.xlim",
"numpy.linalg.norm",
"pandas.DataFrame",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"scipy.cluster.hierarchy.dendrogram",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.collections.LineCollection",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"numpy.ones",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"matplotlib.pyplot.scatter"
]
] |
tlu7/jax
|
[
"f4351e84191cf769b59f6e652264bb2b9ab007d8"
] |
[
"jax/interpreters/pxla.py"
] |
[
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementation of pmap and related functionality.\"\"\"\n\n# A ShardingSpec describes at a high level how a logical array is sharded across\n# devices (each ShardedDeviceArray has a ShardingSpec, and ShardingSpecs also\n# describe how to shard inputs to a parallel computation). spec_to_indices()\n# encodes exactly how a given ShardingSpec is translated to device buffers, i.e.\n# how the sharded array is \"laid out\" across devices. Given a sequence of\n# devices, we shard the data across the devices in row-major order, with\n# replication treated as an extra inner dimension.\n#\n# For example, given the logical data array [1, 2, 3, 4], if we were to\n# partition this array 4 ways with a replication factor of 2, for a total of 8\n# devices, the data on each device would be: [1, 1], [2, 2], [3, 3], [4, 4].\n#\n# This encoding is assumed by various parts of the system, e.g. generating\n# replica groups for collective operations.\n\nfrom contextlib import contextmanager\nfrom collections import defaultdict, OrderedDict\nfrom functools import partial\nimport itertools as it\nimport operator as op\nimport threading\nfrom typing import (Any, Callable, Dict, List, Optional,\n Sequence, Set, Tuple, Type, Union, Iterable)\nimport sys\n\nfrom absl import logging\nimport numpy as np\n\nfrom .._src.config import config\nfrom .. import core\nfrom .. import linear_util as lu\nfrom jax._src.abstract_arrays import array_types\nfrom ..core import ConcreteArray, ShapedArray\nfrom jax._src import device_array\nfrom .._src import source_info_util\nfrom .._src.util import (unzip3, prod, safe_map, safe_zip,\n extend_name_stack, wrap_name, assert_unreachable,\n tuple_insert, tuple_delete, distributed_debug_log)\nfrom ..errors import JAXTypeError\nfrom jax._src import dispatch\nfrom jax._src.lib import xla_bridge as xb\nfrom jax._src.lib import xla_client as xc\nfrom jax._src.lib import pmap_lib\nfrom ..tree_util import tree_flatten, tree_map\nfrom . import batching\nfrom . import partial_eval as pe\nfrom . import xla\nfrom . import ad\n\n# Built in Python lists don't support weak refs but subclasses of lists do.\nclass WeakRefList(list):\n pass\n\nif sys.version_info >= (3, 8):\n from functools import cached_property as maybe_cached_property\nelse:\n maybe_cached_property = property\n\nif sys.version_info >= (3, 9):\n OrderedDictType = OrderedDict\nelse:\n OrderedDictType = Dict\n\nxops = xc.ops\n\nunsafe_map, map = map, safe_map # type: ignore\n\nIndex = Union[int, slice, Tuple[Union[int, slice], ...]]\n\nNoSharding = pmap_lib.NoSharding\nChunked = pmap_lib.Chunked\nUnstacked = pmap_lib.Unstacked\n\nShardedAxis = pmap_lib.ShardedAxis\nReplicated = pmap_lib.Replicated\n\n_UNSHARDED_INSTANCE = NoSharding()\nAvalDimSharding = Union[Unstacked, Chunked, NoSharding]\nMeshDimAssignment = Union[ShardedAxis, Replicated]\nShardingSpec = pmap_lib.ShardingSpec\n\n\ndef sharding_spec_mesh_shape(self):\n sharded_axis_sizes = []\n for sharding in self.sharding:\n if isinstance(sharding, NoSharding):\n continue\n elif isinstance(sharding, Unstacked):\n sharded_axis_sizes.append(sharding.size)\n elif isinstance(sharding, Chunked):\n sharded_axis_sizes.extend(sharding.chunks)\n else:\n assert_unreachable(sharding)\n return tuple(sharded_axis_sizes[a.axis] if isinstance(a, ShardedAxis) else a.replicas\n for a in self.mesh_mapping)\n\ndef sharding_spec_sharding_proto(self):\n \"\"\"Converts a ShardingSpec to an OpSharding proto.\n\n See\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/xla_data.proto#L601\n for details on the OpSharding proto.\n Unfortunately the semantics are not very well described in the proto spec, but the code here might help:\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py\n \"\"\"\n mesh_shape = self.mesh_shape\n mesh = np.arange(np.prod(mesh_shape)).reshape(mesh_shape)\n\n sharded_axes = {} # maps sharded axis identifiers to mesh axis indices to which they're mapped\n replicated_maxes = [] # lists mesh axis identifiers to replicate over\n for maxis, assignment in enumerate(self.mesh_mapping):\n if isinstance(assignment, Replicated):\n replicated_maxes.append(maxis)\n elif isinstance(assignment, ShardedAxis):\n sharded_axes[assignment.axis] = maxis\n else:\n assert_unreachable(assignment)\n\n proto = xc.OpSharding()\n if len(replicated_maxes) == len(self.mesh_mapping):\n proto.type = xc.OpSharding.Type.REPLICATED\n return proto\n else:\n proto.type = xc.OpSharding.Type.OTHER\n\n mesh_permutation = []\n new_mesh_shape = []\n next_sharded_axis = 0\n for axis, sharding in enumerate(self.sharding):\n if isinstance(sharding, NoSharding):\n new_mesh_shape.append(1) # Add a dummy mesh axis we won't be sharding over\n elif isinstance(sharding, Chunked):\n for nchunks in sharding.chunks:\n maxis = sharded_axes[next_sharded_axis]\n assert mesh_shape[maxis] == nchunks\n mesh_permutation.append(maxis)\n next_sharded_axis += 1\n new_mesh_shape.append(int(np.prod(sharding.chunks)))\n elif isinstance(sharding, Unstacked):\n raise RuntimeError(\"Cannot convert unstacked sharding specs to XLA OpSharding\")\n else:\n assert_unreachable(sharding)\n\n # Create the partial sharding proto if tensor is replicated over some mesh axes\n if replicated_maxes:\n new_mesh_shape.append(-1)\n mesh_permutation.extend(replicated_maxes)\n proto.replicate_on_last_tile_dim = True\n\n proto_mesh = mesh.transpose(mesh_permutation).reshape(new_mesh_shape)\n proto.tile_assignment_dimensions = list(proto_mesh.shape)\n proto.tile_assignment_devices = list(proto_mesh.flat)\n return proto\n\ndef sharding_spec_indices(self, shape: Tuple[int, ...]) -> np.ndarray:\n \"\"\"Returns NumPy-style indices corresponding to a sharding spec.\n\n Args:\n shape: The shape of the logical array being sharded.\n\n Returns:\n An ndarray with the same shape as the logical mesh (as derived form\n `mesh_mapping`). Each entry is a NumPy-style index selecting the subset of\n the data array to be placed on a corresponding device. The indices can be\n ints, slice objects with step=1, or tuples of those.\n \"\"\"\n assert len(shape) == len(self.sharding), (shape, self.sharding)\n\n axis_indices: List[Sequence[Index]] = []\n shard_indices_shape = []\n for dim, sharding in enumerate(self.sharding):\n axis_size = shape[dim]\n if isinstance(sharding, NoSharding):\n axis_indices.append([slice(None)])\n # NOTE: We don't append unsharded dimensions to shard_indices_shape here,\n # because they do not appear in the mesh mapping.\n elif isinstance(sharding, Unstacked):\n assert axis_size == sharding.size, f'{axis_size} != {sharding.size}'\n axis_indices.append(range(axis_size))\n shard_indices_shape.append(axis_size)\n elif isinstance(sharding, Chunked):\n total_chunks = int(np.prod(sharding.chunks))\n shard_size, ragged = divmod(axis_size, total_chunks)\n assert not ragged, (axis_size, total_chunks, dim)\n axis_indices.append([slice(i * shard_size, (i + 1) * shard_size)\n for i in range(total_chunks)])\n shard_indices_shape.extend(sharding.chunks)\n else:\n assert_unreachable(sharding)\n\n # shard_indices is an ndarray representing the sharded axes of the logical array,\n # with each dimension having size equal to the number of shards across the corresponding\n # logical array dimension, and each element containing the multi-dimensional index that\n # is used to extract the corresponding shard of the logical array.\n shard_indices = np.empty([prod(shard_indices_shape)], dtype=np.object_)\n for i, idxs in enumerate(it.product(*axis_indices)):\n shard_indices[i] = idxs\n shard_indices = shard_indices.reshape(shard_indices_shape)\n\n # Ensure that each sharded axis is used exactly once in the mesh mapping\n num_sharded_dim = len(shard_indices_shape)\n sharded_dim_perm = [a.axis for a in self.mesh_mapping if isinstance(a, ShardedAxis)]\n assert (set(sharded_dim_perm) == set(range(num_sharded_dim)) and\n len(sharded_dim_perm) == num_sharded_dim)\n # Replicate/reorder the indices according to the mesh mapping\n replica_sizes = tuple(a.replicas for a in self.mesh_mapping if isinstance(a, Replicated))\n replica_dim, sharded_dim = it.count(0), iter(sharded_dim_perm)\n perm = [next(replica_dim) if isinstance(a, Replicated) else\n len(replica_sizes) + next(sharded_dim)\n for a in self.mesh_mapping]\n return (np.broadcast_to(shard_indices, replica_sizes + shard_indices.shape)\n .transpose(perm))\n\ndef sharding_spec_repr(self):\n return f'ShardingSpec({self.sharding}, {self.mesh_mapping})'\n\n\nShardingSpec.mesh_shape = property(sharding_spec_mesh_shape)\nShardingSpec.sharding_proto = sharding_spec_sharding_proto\nShardingSpec.indices = sharding_spec_indices\n# mypy raises: error: Cannot assign to a method [assignment]\nShardingSpec.__repr__ = sharding_spec_repr # type: ignore\n# Do not pollute the namespace\ndel sharding_spec_mesh_shape, sharding_spec_indices, sharding_spec_repr\n\ndef spec_to_indices(shape: Tuple[int, ...],\n spec: ShardingSpec) -> Tuple[Index, ...]:\n \"\"\"Returns numpy-style indices corresponding to a sharding spec.\n\n Each index describes a shard of the array. The order of the indices is the\n same as the device_buffers of a ShardedDeviceArray (i.e. the data is laid out\n row-major).\n\n Args:\n shape: The shape of the logical array being sharded.\n spec: Describes how the array is sharded and how the shards are assigned to\n the logical mesh.\n\n Returns:\n A tuple of length equal to the size of the mesh (inferred as the product of\n sharded dimension sizes and all replication factors). Each element is an\n int, a slice object with step=1, or a tuple thereof, to be treated as an\n index into the full logical array.\n \"\"\"\n return tuple(spec.indices(shape).flat) # type: ignore\n\n\n### util\n\ndef identity(x): return x\n\ndef _shard_arg(arg, devices, arg_indices):\n \"\"\"Returns a list of size len(devices) containing per-device buffers.\n\n For the C++ pmap path, we fallback to Python (this function) to shard\n arguments that are not supported by the C++ `ShardArg`.\n\n Arrgs:\n arg: The Python argument.\n devices: The list of devices to shard over.\n arg_indices: A list of `len(devices)` indices to use to shard the argument.\n \"\"\"\n if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:\n # The shard_arg_handlers allow an extensible set of types to be sharded, but\n # inline handling for ShardedDeviceArray as a special case for performance\n # NOTE: we compare indices instead of sharding_spec because\n # pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.\n return [\n buf if buf.device() == d else buf.copy_to_device(d)\n for d, buf in zip(devices, arg.device_buffers)\n ]\n else:\n arg = xla.canonicalize_dtype(arg)\n return shard_arg_handlers[type(arg)](arg, devices, arg_indices)\n\n\n\ndef shard_args(devices: Sequence[xb.xla_client.Device],\n indices: Sequence[Sequence[Index]],\n args) -> Sequence[Sequence[xb.xla_client.Buffer]]:\n \"\"\"Shard each argument data array along its leading axis.\n\n Args:\n devices: sequence of Devices mapping replica index to a physical device.\n indices: sequence of the same length as `args` describing how each arg\n should be sharded/replicated across `devices`. Each element in `indices`\n is the same length as `devices`.\n args: a sequence of JaxTypes representing arguments to be sharded according\n to `indices` and placed on `devices`.\n\n Returns:\n A list of length matching args, containing lists of per-device buffers\n for each argument.\n \"\"\"\n return [_shard_arg(arg, devices, indices[a]) for a, arg in enumerate(args)]\n\n\nshard_arg_handlers: Dict[Any, Callable[[Any, Any, Any], Sequence[Any]]] = {}\nshard_arg_handlers[core.Unit] = \\\n lambda x, devices, _: device_put(core.unit, devices, replicate=True)\ndef _shard_array(x, devices, indices):\n return device_put([x[i] for i in indices], devices)\nfor _t in array_types:\n shard_arg_handlers[_t] = _shard_array\n\ndef _shard_device_array(x, devices, indices):\n start_indices, limit_indices, removed_dims = unzip3(\n _as_slice_indices(x, idx) for idx in indices)\n shards = x._multi_slice(start_indices, limit_indices, removed_dims)\n return device_put(shards, devices)\nfor t in device_array.device_array_types:\n shard_arg_handlers[t] = _shard_device_array\n\n\n# NOTE(skye): we could refactor to generate _multi_slice parameters directly\n# from the input ShardingSpec, rather than the indices. However, this would\n# require duplicating the ordering logic of spec_to_indices, which is more\n# subtle and more likely to change than the index logic we have to support here.\ndef _as_slice_indices(arr: device_array.DeviceArrayProtocol, idx: Index) -> Tuple[\n Tuple[int, ...], Tuple[int, ...], Tuple[int, ...]]:\n \"\"\"Returns start_indices, limit_indices, removed_dims\"\"\"\n start_indices = [0] * arr.ndim\n limit_indices = list(arr.shape)\n removed_dims = []\n\n tuple_idx = idx if isinstance(idx, tuple) else (idx,)\n for dim, sub_idx in enumerate(tuple_idx):\n if isinstance(sub_idx, int):\n start_indices[dim] = sub_idx\n limit_indices[dim] = sub_idx + 1\n removed_dims.append(dim)\n elif sub_idx == slice(None):\n continue\n else:\n assert isinstance(sub_idx, slice), sub_idx\n assert isinstance(sub_idx.start, int), sub_idx\n assert isinstance(sub_idx.stop, int), sub_idx\n start_indices[dim] = sub_idx.start\n limit_indices[dim] = sub_idx.stop\n\n return tuple(start_indices), tuple(limit_indices), tuple(removed_dims) # type: ignore\n\n\ndef shard_aval(size, axis: int, aval):\n try:\n return shard_aval_handlers[type(aval)](size, axis, aval)\n except KeyError as err:\n raise TypeError(f\"No shard_aval handler for type: {type(aval)}\") from err\nshard_aval_handlers: Dict[Type[core.AbstractValue], Callable[[int, int, Any], Any]] = {}\nshard_aval_handlers[core.AbstractUnit] = lambda size, axis, x: x\ndef _shard_abstract_array(size, axis: int, x):\n try:\n if x.shape[axis] != size:\n raise ValueError(f\"Axis size {size} does not match dimension {axis} of \"\n f\"shape {x.shape}\")\n except IndexError:\n raise ValueError(\"Cannot split a {x.dim}D value along axis {axis}\") from None\n return x.update(shape=tuple_delete(x.shape, axis))\nshard_aval_handlers[ShapedArray] = _shard_abstract_array\n\nMeshAxisName = Any\n\"\"\"\nArrayMapping specifies how an ndarray should map to mesh axes.\n\nNote that the ordering is crucial for the cases when this mapping is non-injective\n(i.e. when multiple mesh axes map to the same positional axis). Then, the\norder of entries of the mapping determines a major-to-minor order on mesh axes,\naccording to which chunks of the value along the repeated dimension will be assigned.\n\nFor example, consider a mapping {'x': 1, 'y': 1} and a mesh with shape {'x': 2, 'y': 3}.\nThe second dimension of the value would get chunked into 6 pieces, and assigned to the\nmesh in a way that treats 'y' as the fastest changing (minor) dimension. In this case,\nthat would mean that a flat list of chunks would get assigned to a flattened list of\nmesh devices without any modifications. If the mapping was {'y': 1, 'x': 1}, then the\nmesh devices ndarray would have to be transposed before flattening and assignment.\n\"\"\"\nArrayMapping = OrderedDictType[MeshAxisName, int]\n\nAxisResource = Tuple[Optional[Tuple[Any, ...]], ...]\n\ndef array_mapping_to_axis_resources(array_mapping: ArrayMapping) -> AxisResource:\n if not array_mapping:\n return tuple()\n max_index = array_mapping[max(array_mapping, key=array_mapping.get)] # type: ignore\n reverse_map = defaultdict(list)\n for axis, index in array_mapping.items():\n reverse_map[index].append(axis)\n return tuple(\n tuple(reverse_map[i]) if reverse_map[i] else None for i in range(max_index + 1)\n )\n\ndef aval_to_result_handler(\n sharding_spec: Optional[ShardingSpec],\n indices: Optional[Tuple[Index]],\n aval: core.AbstractValue,\n global_aval: Optional[ShapedArray] = None,\n out_axis_resources: Optional[AxisResource] = None,\n global_mesh = None,\n) -> Callable[[List[xb.xla_client.Buffer]], Any]:\n \"\"\"Returns a function for handling the raw buffers of a single output aval.\n\n Args:\n sharding_spec: Indicates how the output is sharded across devices, or None\n for non-array avals.\n indices: The pre-computed result of spec_to_indices, or None for non-array\n avals.\n aval: The output AbstractValue.\n global_aval: Global output AbstractValue. Used for creating GSDAs.\n out_axis_resources: A tuple specifying the sharding of outputs.\n Used for creating GSDAs.\n global_mesh: The global device mesh that generated this output. Used\n for creating GSDAs.\n\n Returns:\n A function for handling the Buffers that will eventually be produced\n for this output. The function will return an object suitable for returning\n to the user, e.g. a ShardedDeviceArray.\n \"\"\"\n try:\n return pxla_result_handlers[type(aval)](sharding_spec, indices, aval,\n global_aval, out_axis_resources, global_mesh)\n except KeyError as err:\n raise TypeError(\"No pxla_result_handler for type: {}\".format(type(aval))\n ) from err\n\nPxlaResultHandler = Callable[..., Callable[[List[xb.xla_client.Buffer]], Any]]\npxla_result_handlers: Dict[Type[core.AbstractValue], PxlaResultHandler] = {}\npxla_result_handlers[core.AbstractUnit] = lambda *_: lambda _: core.unit\n\ndef array_result_handler(sharding_spec, indices, aval: ShapedArray, global_aval,\n out_axis_resources, global_mesh):\n if config.jax_gsda_out:\n return gsda_array_result_handler(global_aval, global_mesh, out_axis_resources)\n else:\n return sda_array_result_handler(sharding_spec, indices, aval)\n\npxla_result_handlers[ShapedArray] = array_result_handler\npxla_result_handlers[ConcreteArray] = array_result_handler\n\ndef sda_array_result_handler(sharding_spec, indices, aval: ShapedArray):\n return lambda bufs: make_sharded_device_array(aval, sharding_spec, bufs,\n indices)\n\ndef gsda_array_result_handler(global_aval, global_mesh, out_axis_resources):\n from ..experimental.gsda import GlobalShardedDeviceArray\n\n return lambda bufs: GlobalShardedDeviceArray(\n global_aval.shape, global_mesh, out_axis_resources, bufs)\n\n### lazy device-memory persistence and result handling\n\n# TODO(jblespiau): Consider removing this option.\n_USE_CPP_SDA = True\n\n\ndef make_sharded_device_array(\n aval: ShapedArray,\n sharding_spec: Optional[ShardingSpec],\n # Any is for JAX extensions implementing their own buffer.\n device_buffers: List[Union[Any, xb.xla_client.Buffer]],\n indices: Optional[Tuple[Index, ...]] = None,\n):\n \"\"\"Returns a ShardedDeviceArray implementation based on arguments.\n\n Returns either a C++ SDA or a Python DeviceArray when the buffers are not\n JAX buffers.\n\n Args:\n aval: The `ShapedArray` for this array.\n sharding_spec: If `None`, assumes a pmap-style ShardedDeviceArrays over the\n first dimension.\n device_buffers: If a list of Jax `Buffer` objects, a C++ SDA will be\n returned (if the version is high enough). Otherwise, a Python object will\n be returned, for JAX extensions not implementing the C++ API.\n indices: For caching purposes, will be computed if `None`.\n \"\"\"\n if sharding_spec is None:\n sharded_aval = aval.update(shape=aval.shape[1:])\n sharding_spec = _pmap_sharding_spec(aval.shape[0], aval.shape[0], 1, None,\n sharded_aval, 0)\n\n if indices is None:\n indices = spec_to_indices(aval.shape, sharding_spec)\n\n if (_USE_CPP_SDA and\n (not device_buffers or\n isinstance(device_buffers[0], xb.xla_client.Buffer))):\n return pmap_lib.ShardedDeviceArray.make(\n aval, sharding_spec, device_buffers,\n indices, aval.weak_type)\n\n return _ShardedDeviceArray(aval, sharding_spec, device_buffers, indices)\n\n\nif _USE_CPP_SDA:\n ShardedDeviceArrayBase = pmap_lib.ShardedDeviceArrayBase # type: ignore\n # We want the C++ SDA to extend the DeviceArrayBase. We want this both to\n # benefit from its methods, and to have isinstance(x, DeviceArray) return true\n ShardedDeviceArrayBase.__bases__ = ((device_array.DeviceArray,) + # type: ignore\n ShardedDeviceArrayBase.__bases__)\n _SDA_BASE_CLASS = pmap_lib.ShardedDeviceArrayBase # type: ignore\nelse:\n _SDA_BASE_CLASS: Type[device_array.DeviceArray] = device_array.DeviceArray # type: ignore\n\n\nclass _ShardedDeviceArray(_SDA_BASE_CLASS): # type: ignore\n \"\"\"A ShardedDeviceArray is an ndarray sharded across devices.\n\n The purpose of a ShardedDeviceArray is to reduce the number of transfers when\n executing replicated computations, by allowing results to persist on the\n devices that produced them. That way dispatching a similarly replicated\n computation that consumes the same sharded memory layout does not incur any\n transfers.\n\n A ShardedDeviceArray represents one logical ndarray value, and simulates the\n behavior of an ndarray so that it can be treated by user code as an ndarray;\n that is, it is only an optimization to reduce transfers.\n\n Attributes:\n aval: A ShapedArray indicating the shape and dtype of this array.\n sharding_spec: describes how this array is sharded across `device_buffers`.\n device_buffers: the buffers containing the data for this array. Each buffer\n is the same shape and on a different device. Buffers are in row-major\n order, with replication treated as an extra innermost dimension.\n indices: the result of spec_to_indices(sharding_spec). Can optionally be\n precomputed for efficiency. A list the same length as\n `device_buffers`. Each index indicates what portion of the full array is\n stored in the corresponding device buffer, i.e. `array[indices[i]] ==\n device_buffers[i].to_py()`.\n \"\"\"\n __slots__ = [\n \"aval\", \"device_buffers\", \"sharding_spec\", \"indices\",\n \"_one_replica_buffer_indices\", \"_npy_value\"\n ]\n\n def __init__(self,\n aval: ShapedArray,\n sharding_spec: ShardingSpec,\n device_buffers: List[xb.xla_client.Buffer],\n indices: Optional[Tuple[Index, ...]] = None):\n super().__init__()\n\n # TODO(skye): assert invariants. Keep performance in mind though.\n if indices is None:\n indices = spec_to_indices(aval.shape, sharding_spec)\n\n self.aval = aval\n self.device_buffers = device_buffers\n self.sharding_spec = sharding_spec\n self.indices = indices\n self._npy_value = None\n self._one_replica_buffer_indices = None\n if config.jax_enable_checks:\n assert type(aval) is ShapedArray\n\n @property\n def shape(self):\n return self.aval.shape\n\n @property\n def dtype(self):\n return self.aval.dtype\n\n @property\n def size(self):\n return prod(self.aval.shape)\n\n @property\n def ndim(self):\n return len(self.aval.shape)\n\n def delete(self):\n if self.device_buffers is None:\n return\n for buf in self.device_buffers:\n buf.delete()\n self.device_buffers = None\n self._npy_value = None\n\n\ndef _sda_one_replica_buffer_indices(self):\n \"\"\"Indices of buffers containing one complete copy of the array data.\"\"\"\n if self._one_replica_buffer_indices is None:\n one_replica_indices = []\n seen_index_hashes = set()\n for i, index in enumerate(self.indices):\n hashed_index = _hashable_index(index)\n if hashed_index not in seen_index_hashes:\n one_replica_indices.append(i)\n seen_index_hashes.add(hashed_index)\n self._one_replica_buffer_indices = one_replica_indices\n return self._one_replica_buffer_indices\n\n\ndef _sda_copy_to_host_async(self):\n for buffer_index in self.one_replica_buffer_indices:\n self.device_buffers[buffer_index].copy_to_host_async()\n\n\ndef _sda_check_if_deleted(self):\n if self.device_buffers is None:\n raise ValueError(\"ShardedDeviceArray has been deleted.\")\n\n\ndef _sda_block_until_ready(self):\n self._check_if_deleted()\n for buf in self.device_buffers:\n buf.block_host_until_ready()\n return self\n\n\ndef _sda_value(self):\n if self._npy_value is None:\n self.copy_to_host_async()\n npy_value = np.empty(self.aval.shape, self.aval.dtype)\n for i in self.one_replica_buffer_indices:\n npy_value[self.indices[i]] = self.device_buffers[i].to_py()\n self._npy_value = npy_value\n return self._npy_value\n\n\ndef _sda__getitem__(self, idx):\n self._check_if_deleted()\n if not isinstance(idx, tuple):\n cidx = (idx,) + (slice(None),) * (len(self.aval.shape) - 1)\n else:\n cidx = idx + (slice(None),) * (len(self.aval.shape) - len(idx))\n if self._npy_value is None:\n try:\n buf_idx = self.indices.index(cidx)\n except ValueError:\n buf_idx = None\n if buf_idx is not None:\n buf = self.device_buffers[buf_idx]\n aval = ShapedArray(buf.xla_shape().dimensions(), self.aval.dtype)\n return device_array.make_device_array(aval, None, buf)\n return super(self.__class__, self).__getitem__(idx)\n\n\ndef _sda__iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (self[i] for i in range(self.shape[0]))\n\ndef _sda__reversed__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return (self[i] for i in range(self.shape[0] - 1, -1, -1))\n\n\nfor sda in [_ShardedDeviceArray, pmap_lib.ShardedDeviceArray]:\n setattr(sda, \"one_replica_buffer_indices\",\n property(_sda_one_replica_buffer_indices))\n setattr(sda, \"copy_to_host_async\", _sda_copy_to_host_async)\n setattr(sda, \"_check_if_deleted\", _sda_check_if_deleted)\n setattr(sda, \"block_until_ready\", _sda_block_until_ready)\n setattr(sda, \"_value\", property(_sda_value))\n setattr(sda, \"__getitem__\", _sda__getitem__)\n setattr(sda, \"__iter__\", _sda__iter__)\n setattr(sda, \"__reversed__\", _sda__reversed__)\n\ndel (_sda_one_replica_buffer_indices, _sda_copy_to_host_async,\n _sda_check_if_deleted, _sda_block_until_ready, _sda_value, _sda__getitem__)\n\n\nShardedDeviceArray: Type[object]\nif _USE_CPP_SDA:\n ShardedDeviceArray = pmap_lib.ShardedDeviceArrayBase\nelse:\n ShardedDeviceArray = _ShardedDeviceArray\n\n\n\ndef _hashable_index(idx):\n return tree_map(lambda x: (x.start, x.stop) if type(x) == slice else x,\n idx)\n\n# The fast path is handled directly in shard_args().\n# TODO(skye): is there a simpler way to rewrite this using sharding_spec?\ndef _shard_sharded_device_array_slow_path(x, devices, indices):\n candidates = defaultdict(list)\n for buf, idx in safe_zip(x.device_buffers, x.indices):\n candidates[_hashable_index(idx)].append(buf)\n\n bufs = []\n for idx, device in safe_zip(indices, devices):\n # Look up all buffers that contain the correct slice of the logical array.\n candidates_list = candidates[_hashable_index(idx)]\n if not candidates_list:\n # This array isn't sharded correctly. Reshard it via host roundtrip.\n # TODO(skye): more efficient reshard?\n return shard_arg_handlers[type(x._value)](x._value, devices, indices)\n # Try to find a candidate buffer already on the correct device,\n # otherwise copy one of them.\n for buf in candidates_list:\n if buf.device() == device:\n bufs.append(buf)\n break\n else:\n bufs.append(buf.copy_to_device(device))\n return bufs\n\n\ndef _sharded_device_array_constant_handler(c, val, canonicalize_types=True):\n return xla.pyval_to_ir_constants(c, np.asarray(val),\n canonicalize_types=canonicalize_types)\n\n\ndef _register_handlers_for_sharded_device_array(sda):\n shard_arg_handlers[sda] = _shard_sharded_device_array_slow_path\n xla.register_constant_handler(sda, _sharded_device_array_constant_handler)\n\n core.pytype_aval_mappings[sda] = ConcreteArray\n dispatch.device_put_handlers[sda] = dispatch._device_put_array\n xla.pytype_aval_mappings[sda] = op.attrgetter(\"aval\")\n xla.canonicalize_dtype_handlers[sda] = identity\n\n_register_handlers_for_sharded_device_array(_ShardedDeviceArray)\n_register_handlers_for_sharded_device_array(pmap_lib.ShardedDeviceArray)\n\n### the xla_pmap primitive and its rules are comparable to xla_call in xla.py\n\ndef xla_pmap_impl(fun: lu.WrappedFun, *args, backend, axis_name, axis_size,\n global_axis_size, devices, name, in_axes, out_axes_thunk,\n donated_invars, global_arg_shapes):\n abstract_args = unsafe_map(xla.abstractify, args)\n compiled_fun, fingerprint = parallel_callable(fun, backend, axis_name, axis_size,\n global_axis_size, devices, name,\n in_axes, out_axes_thunk,\n donated_invars, global_arg_shapes,\n *abstract_args)\n\n # Don't re-abstractify args unless logging is enabled for performance.\n if config.jax_distributed_debug:\n distributed_debug_log((\"Running pmapped function\", name),\n (\"python function\", fun.f),\n (\"devices\", devices),\n (\"abstract args\", map(xla.abstractify, args)),\n (\"fingerprint\", fingerprint))\n return compiled_fun(*args)\n\[email protected]\ndef parallel_callable(fun: lu.WrappedFun,\n backend_name: Optional[str],\n axis_name,\n axis_size: int,\n global_axis_size: Optional[int],\n devices: Optional[Sequence[Any]],\n name: str,\n in_axes: Iterable[Optional[int]],\n out_axes_thunk: Callable[[], Sequence[Optional[int]]],\n donated_invars: Iterable[bool],\n global_arg_shapes,\n *avals):\n if devices is not None and len(devices) == 0:\n raise ValueError(\"'devices' argument to pmap must be non-empty, or None.\")\n\n # Determine global_axis_size for use in AxisEnv.\n # TODO(mattjj,skyewm): revive this check (inner_pmap always False now)\n # if xb.process_count() > 1 and global_axis_size is None and inner_pmap:\n # raise ValueError(\"'axis_size' must be specified for nested multi-host pmaps\")\n if (xb.process_count() == 1 and global_axis_size is not None and\n global_axis_size != axis_size):\n raise ValueError(\n f\"Specified axis_size {global_axis_size} doesn't match received \"\n f\"axis_size {axis_size}.\")\n\n if devices is not None and backend_name is None:\n backend = xb.get_device_backend(devices[0])\n else:\n backend = xb.get_backend(backend_name)\n\n must_run_on_all_devices = False\n no_nested_sharding = False\n if global_axis_size is None:\n if xb.process_count(backend) == 1:\n global_axis_size = axis_size\n elif devices:\n # This allows each host in a multi-host pmap to run on a different number\n # of devices, but precludes nested sharding (i.e. inner pmaps or\n # sharded_jits).\n global_axis_size = len(devices)\n no_nested_sharding = True\n else:\n # This assumes all hosts run on the same number of devices. We make sure\n # this assumption is true by requiring that the pmap is run on all devices\n # (and making the further assumption that each host has the same number of\n # devices). Nested sharding is ok in this case.\n global_axis_size = axis_size * xb.process_count(backend)\n assert all(len(xb.local_devices(process_index, backend)) == xb.local_device_count(backend)\n for process_index in range(xb.process_count(backend)))\n must_run_on_all_devices = True\n\n if devices:\n local_devices = [d for d in devices if d.process_index == xb.process_index(backend)]\n assert len(local_devices) > 0\n else:\n local_devices = None # type: ignore\n\n sharded_avals = tuple(shard_aval(axis_size, axis, aval) if axis is not None else aval\n for axis, aval in safe_zip(in_axes, avals))\n if any(s is not None for s in global_arg_shapes):\n # TODO(skye): we could take this branch unconditionally if we handled\n # grad of global_arg_shapes correctly.\n global_sharded_avals = [\n aval.update(shape=shape) if shape is not None else aval\n for shape, aval in safe_zip(global_arg_shapes, sharded_avals)]\n else:\n global_sharded_avals = sharded_avals # type: ignore\n if logging.vlog_is_on(2):\n logging.vlog(2, \"sharded_avals: %s\", sharded_avals)\n logging.vlog(2, \"global_sharded_avals: %s\", global_sharded_avals)\n\n with core.extend_axis_env(axis_name, global_axis_size, None): # type: ignore\n jaxpr, out_sharded_avals, consts = pe.trace_to_jaxpr_final(\n fun, global_sharded_avals, pe.debug_info_final(fun, \"pmap\"))\n jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)\n\n out_axes = out_axes_thunk()\n assert len(out_sharded_avals) == len(out_axes), (len(out_sharded_avals), len(out_axes))\n\n # TODO(skye,mattjj): allow more collectives on multi-host as we test them, but\n # for now raise an error\n if devices is not None:\n is_multi_host_pmap = len(local_devices) != len(devices)\n else:\n is_multi_host_pmap = xb.process_count(backend) > 1\n if is_multi_host_pmap:\n check_multihost_collective_allowlist(jaxpr)\n\n # TODO(skyewm): replace this with a chain of pmaps and/or sharded_jits\n jaxpr_replicas = dispatch.jaxpr_replicas(jaxpr)\n num_local_replicas = axis_size * jaxpr_replicas\n num_global_replicas = global_axis_size * jaxpr_replicas\n\n (arg_parts, out_parts, num_partitions, local_arg_parts, local_out_parts,\n local_num_partitions) = _find_partitions(jaxpr)\n\n if local_num_partitions is None:\n local_num_partitions = num_partitions\n\n if local_arg_parts is None:\n local_arg_parts = arg_parts\n if local_out_parts is None:\n local_out_parts = out_parts\n\n if logging.vlog_is_on(2):\n logging.vlog(2, \"num_replicas: %d num_local_replicas: %d\",\n num_global_replicas, num_local_replicas)\n logging.vlog(2, \"num_partitions: %d local_num_partitions: %d\",\n num_partitions, local_num_partitions)\n logging.vlog(2, \"arg_parts: %s\", arg_parts)\n logging.vlog(2, \"local_arg_parts: %s\", local_arg_parts)\n logging.vlog(2, \"out_parts: %s\", out_parts)\n logging.vlog(2, \"local_out_parts: %s\", local_out_parts)\n logging.vlog(2, \"devices: %s\", devices)\n logging.vlog(2, \"local_devices: %s\", local_devices)\n\n num_local_shards = num_local_replicas * local_num_partitions\n num_global_shards = num_global_replicas * num_partitions\n\n if (xb.process_count(backend) > 1 and must_run_on_all_devices and\n num_local_shards != xb.local_device_count(backend)):\n if num_local_shards == axis_size:\n raise ValueError(\n f\"On multi-host platforms, the input to pmapped functions must have \"\n f\"leading axis size equal to the number of local devices if no \"\n f\"`devices` argument is specified. Got axis_size={axis_size}, \"\n f\"num_local_devices={xb.local_device_count(backend)}\")\n else:\n raise ValueError(\n f\"On multi-host platforms, pmapped functions must run across all \"\n f\"devices, i.e. num_replicas * num_partitions should equal the \"\n f\"number of local devices. Got num_replicas={num_local_replicas}, \"\n f\"num_partitions={num_partitions}, and \"\n f\"num_local_devices={xb.local_device_count(backend)}\")\n\n if no_nested_sharding and (jaxpr_replicas > 1 or num_partitions > 1):\n raise ValueError(\n f\"On multi-host platforms, pmapped functions that both have `devices` \"\n f\"specified and contain an inner_pmap or sharded_jit must specify an \"\n f\"`axis_size` (or remove the `devices` argument). Got nested_replicas=\"\n f\"{jaxpr_replicas} and nested_partitions={num_partitions}\")\n\n log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG\n logging.log(log_priority,\n \"Compiling %s (%d) for %d devices with args %s. (num_replicas=%d\"\n \" num_partitions=%d)\", fun.__name__, id(fun), num_global_shards,\n avals, num_global_replicas, num_partitions)\n\n axis_env = xla.AxisEnv(num_global_replicas, (axis_name,), (global_axis_size,))\n\n tuple_args = len(global_sharded_avals) > 100 # pass long arg lists as tuple for TPU\n\n c = xc.XlaBuilder(\"pmap_{}\".format(fun.__name__))\n xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)\n replicated_args = [axis is None for axis in in_axes]\n xla_args, donated_invars = xla._xla_callable_args(c, global_sharded_avals, tuple_args,\n replicated=replicated_args,\n partitions=arg_parts,\n donated_invars=donated_invars)\n with maybe_extend_axis_env(axis_name, global_axis_size, None): # type: ignore\n ctx = xla.TranslationContext(c, backend.platform, axis_env,\n extend_name_stack(wrap_name(name, 'pmap')))\n out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)\n build_out_tuple = partial(xops.Tuple, c, out_nodes)\n if out_parts is not None:\n out_tuple = xb.with_sharding(c, out_parts, build_out_tuple)\n else:\n out_tuple = build_out_tuple()\n\n if backend.platform in (\"gpu\", \"tpu\"):\n donated_invars = xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple),\n donated_invars, tuple_args)\n built = c.Build(out_tuple)\n\n if devices is None:\n if num_global_shards > xb.device_count(backend):\n msg = (\"compiling computation that requires {} logical devices, but only {} XLA \"\n \"devices are available (num_replicas={}, num_partitions={})\")\n raise ValueError(msg.format(num_global_shards, xb.device_count(backend),\n num_global_replicas, num_partitions))\n\n # On a single host, we use the platform's default device assignment to\n # potentially take advantage of device locality. On multiple hosts, the\n # default device assignment may interleave different hosts' replicas,\n # violating pmap's semantics where data is sharded across replicas in\n # row-major order. Instead, manually create a device assignment that ensures\n # each host is responsible for a continguous set of replicas.\n if num_global_shards > num_local_shards:\n # TODO(skye): use a locality-aware assignment that satisfies the above\n # constraint.\n devices = [d for process_index in range(xb.process_count(backend))\n for d in xb.local_devices(process_index, backend)]\n else:\n devices = xb.get_backend(backend).get_default_device_assignment(\n num_global_replicas, num_partitions)\n else:\n if num_local_shards != len(local_devices):\n local_devices_str = \", \".join(map(str, local_devices))\n if num_local_shards == axis_size:\n raise ValueError(\n f\"Leading axis size of input to pmapped function must equal the \"\n f\"number of local devices passed to pmap. Got axis_size=\"\n f\"{axis_size}, num_local_devices={len(local_devices)}.\\n(Local \"\n f\"devices available to pmap: {local_devices_str})\")\n else:\n raise ValueError(\n f\"pmapped function requires {num_local_shards} local devices to \"\n f\"run due to nested pmapped or other parallel functions, but only \"\n f\"{len(local_devices)} are available.\\n(outer axis size: \"\n f\"{axis_size}, local devices available to pmap: \"\n f\"{local_devices_str})\")\n if num_global_shards != len(devices):\n raise ValueError(\"compiling computation that creates %s shards, \"\n \"but %s devices were specified\" %\n (num_global_shards, len(devices)))\n\n # 'devices' may be 1D or 2D at this point (e.g.\n # get_default_device_assignment() returns 2D assignment, caller may have\n # provided 1D list of devices).\n device_assignment = tree_map(lambda d: d.id, devices)\n # Convert to 2D in case it's 1D and we have > 1 partitions.\n device_assignment = np.array(device_assignment).reshape(\n (num_global_replicas, num_partitions))\n # TODO(b/162356737): Enabling SPMD partitioning causes issues with some\n # non-partitioned workloads, so disable unless needed.\n use_spmd_partitioning = num_partitions > 1\n compile_options = xb.get_compile_options(\n num_replicas=num_global_replicas,\n num_partitions=num_partitions,\n device_assignment=device_assignment,\n use_spmd_partitioning=use_spmd_partitioning,\n )\n compile_options.parameter_is_tupled_arguments = tuple_args\n\n local_arg_parts_ = local_arg_parts or [None] * len(avals)\n input_sharding_specs = [\n _pmap_sharding_spec(num_local_replicas, axis_size, local_num_partitions,\n parts, aval, in_axis)\n if aval is not core.abstract_unit else None\n for aval, parts, in_axis in safe_zip(sharded_avals, local_arg_parts_, in_axes)]\n input_indices = [spec_to_indices(aval.shape, spec)\n if spec is not None else None\n for aval, spec in safe_zip(avals, input_sharding_specs)]\n nouts = len(out_sharded_avals)\n if out_parts is None:\n out_parts = (None,) * nouts\n if local_out_parts is None:\n local_out_parts = (None,) * nouts\n\n local_out_avals = [get_local_aval(aval, parts, lparts)\n for aval, parts, lparts\n in safe_zip(out_sharded_avals, out_parts, local_out_parts)]\n local_unmapped_avals = [core.unmapped_aval(axis_size, axis_name, out_axis, aval)\n if out_axis is not None else aval\n for aval, out_axis in safe_zip(local_out_avals, out_axes)]\n\n out_specs = [_pmap_sharding_spec(num_local_replicas, axis_size, local_num_partitions,\n parts, aval, out_axis)\n if aval is not core.abstract_unit else None\n for parts, aval, out_axis in safe_zip(local_out_parts, local_out_avals, out_axes)]\n handle_outs = avals_to_results_handler(\n num_local_replicas, local_num_partitions, out_specs, local_unmapped_avals)\n\n if hasattr(backend, \"compile_replicated\"):\n execute_fun = backend.compile_replicated(built, compile_options,\n input_indices, input_sharding_specs,\n handle_outs)\n return WeakRefList([execute_fun, None])\n\n compiled = dispatch.compile_or_get_cached(backend, built, compile_options)\n handle_args = InputsHandler(compiled.local_devices(), input_sharding_specs,\n input_indices)\n execute_fun = partial(execute_replicated, compiled, backend, handle_args, handle_outs)\n fingerprint = getattr(compiled, \"fingerprint\", None)\n return WeakRefList([execute_fun, fingerprint])\n\nmulti_host_supported_collectives: Set[core.Primitive] = set()\n\n\ndef check_multihost_collective_allowlist(jaxpr):\n used_collectives = set(xla.jaxpr_collectives(jaxpr))\n if not used_collectives.issubset(multi_host_supported_collectives):\n bad_collectives = used_collectives - multi_host_supported_collectives\n msg = \"using collectives that aren't supported for multi-host: {}\"\n raise TypeError(msg.format(\", \".join(map(str, bad_collectives))))\n\n\nPartitionsOrReplicated = Optional[Tuple[int, ...]]\n\ndef _find_partitions(jaxpr) -> Tuple[\n Optional[Tuple[PartitionsOrReplicated, ...]],\n Optional[Tuple[PartitionsOrReplicated, ...]],\n int,\n Optional[Tuple[PartitionsOrReplicated, ...]],\n Optional[Tuple[PartitionsOrReplicated, ...]],\n Optional[int]]:\n \"\"\"Returns (in_partitions, out_partitions, num_partitions, local_in_parts,\n local_out_parts, local_num_partitions).\n \"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive.name == \"sharded_call\":\n if len(jaxpr.eqns) > 1:\n raise NotImplementedError(\n \"pmap of sharded_jit + non-sharded operations not yet implemented.\")\n num_partitions = reconcile_num_partitions(eqn.params[\"call_jaxpr\"],\n eqn.params[\"nparts\"])\n return (eqn.params[\"in_parts\"],\n eqn.params[\"out_parts_thunk\"](),\n num_partitions,\n eqn.params[\"local_in_parts\"],\n eqn.params[\"local_out_parts_thunk\"](),\n eqn.params[\"local_nparts\"])\n return None, None, 1, None, None, None\n\ndef reconcile_num_partitions(jaxpr, outer_num_parts: Optional[int]):\n \"\"\"Returns the total number of partitions to use.\n\n Validates that any inner partitioning matches outer_num_parts if provided, and\n returns the number of partitions to use based on outer_num_parts and any inner\n partitioning.\n \"\"\"\n inner_num_parts = _inner_partitions(jaxpr, outer_num_parts)\n if outer_num_parts is None and inner_num_parts is None:\n # No partitions specified anywhere, everything is replicated.\n return 1\n if outer_num_parts is None:\n return inner_num_parts\n return outer_num_parts\n\n\ndef _inner_partitions(jaxpr, expected_num_parts: Optional[int]):\n \"\"\"Returns the total number of partitions from PartitionSpecs inside `jaxpr`.\n\n Also validates that this number matches `expected_num_parts` if provided.\n \"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive.name in [\"sharding_constraint\", \"infeed\"]:\n parts = eqn.params[\"partitions\"]\n nparts = get_num_partitions(parts)\n if expected_num_parts is None:\n expected_num_parts = nparts\n elif nparts is not None and nparts != expected_num_parts:\n # TODO(skye): raise this error as we trace the jaxpr\n raise ValueError(\n f\"with_sharding_constraint with partitions={parts} \"\n f\"(total partitions: {nparts}) doesn't match expected number of \"\n f\"partitions: {expected_num_parts}. If these partitions look \"\n f\"right, check outer sharded_jit and/or other \"\n f\"with_sharding_constraint calls.\")\n else:\n for subjaxpr in core.jaxprs_in_params(eqn.params):\n expected_num_parts = _inner_partitions(subjaxpr, expected_num_parts)\n return expected_num_parts\n\n\ndef get_num_partitions(*partitions):\n partition_specs = tree_flatten(partitions)[0]\n if len(partition_specs) == 0:\n # Everything is specified as replicated (all Nones).\n return None\n num_partitions_set = {np.prod(spec) for spec in partition_specs}\n if len(num_partitions_set) > 1:\n raise ValueError(\n f\"All partition specs must use the same number of total partitions, \"\n f\"got {partitions}, with distinct number of partitions \"\n f\"{num_partitions_set} (the total number of partitions is the product \"\n f\"of a partition spec)\")\n assert len(num_partitions_set) == 1\n return num_partitions_set.pop()\n\n\ndef get_global_aval(local_aval, global_parts: PartitionsOrReplicated,\n local_parts: PartitionsOrReplicated):\n if local_aval is core.abstract_unit:\n return local_aval\n if global_parts is None:\n return local_aval\n assert local_parts is not None\n global_shape = [dim * _safe_div(ngparts, nlparts)\n for dim, ngparts, nlparts\n in safe_zip(local_aval.shape, global_parts, local_parts)]\n return local_aval.update(shape=global_shape)\n\n\ndef get_local_aval(global_aval, global_parts: PartitionsOrReplicated,\n local_parts: PartitionsOrReplicated):\n if global_aval is core.abstract_unit:\n return global_aval\n if global_parts is None:\n return global_aval\n assert local_parts is not None\n local_shape = [_safe_div(dim, _safe_div(ngparts, nlparts))\n for dim, ngparts, nlparts\n in safe_zip(global_aval.shape, global_parts, local_parts)]\n return global_aval.update(shape=local_shape)\n\n\ndef _safe_div(x, y):\n result, ragged = divmod(x, y)\n assert not ragged, f\"{x} % {y} != 0\"\n return result\n\n\nclass InputsHandler:\n __slots__ = (\"handler\", \"local_devices\", \"sharding_specs\", \"input_indices\")\n\n def __init__(self, local_devices, sharding_specs, input_indices):\n self.handler = partial(shard_args, local_devices, input_indices)\n self.local_devices = local_devices\n self.sharding_specs = sharding_specs\n self.input_indices = input_indices\n\n def __call__(self, input_buffers):\n return self.handler(input_buffers)\n\n\nclass ResultsHandler:\n __slots__ = (\"handlers\", \"out_specs\", \"out_indices\", \"unmapped_local_out_avals\")\n\n def __init__(self, handlers, out_specs, out_indices, unmapped_local_out_avals):\n self.out_specs = out_specs\n self.out_indices = out_indices\n self.handlers = handlers\n self.unmapped_local_out_avals = unmapped_local_out_avals\n\n def __call__(self, out_bufs):\n return [h(bufs) for h, bufs in safe_zip(self.handlers, out_bufs)]\n\n\ndef avals_to_results_handler(\n nrep,\n npart,\n out_specs,\n unmapped_local_out_avals,\n global_out_avals: Optional[Sequence[ShapedArray]] = None,\n out_axis_resources: Optional[Sequence[AxisResource]] = None,\n global_mesh=None):\n out_indices = [spec_to_indices(aval.shape, spec)\n if aval is not core.abstract_unit else None\n for aval, spec in safe_zip(unmapped_local_out_avals, out_specs)] # pytype: disable=attribute-error\n if global_out_avals and out_axis_resources and global_mesh:\n handlers = [\n aval_to_result_handler(spec, idcs, aval, global_aval, out_axis, global_mesh)\n for spec, idcs, aval, global_aval, out_axis in safe_zip(\n out_specs, out_indices, unmapped_local_out_avals,\n global_out_avals, out_axis_resources)\n ]\n else:\n handlers = [\n aval_to_result_handler(spec, idcs, aval)\n for spec, idcs, aval, in safe_zip(out_specs, out_indices,\n unmapped_local_out_avals)\n ]\n\n return ResultsHandler(handlers, out_specs, out_indices, unmapped_local_out_avals)\n\ndef replicate(val, axis_size, nrep, devices=None, backend=None, in_axis=0):\n \"\"\"Replicates ``val`` across multiple devices.\n\n Args:\n val: the value to be replicated.\n axis_size: the length of the output, i.e. the logical number of replicas to\n create. Usually equal to `nrep`, but in the case of nested pmaps, `nrep` may\n be a multiple of `axis_size`.\n nrep: the number of replicas to create. If ``devices`` is set, must be equal\n to ``len(devices)``.\n devices: the devices to replicate across. If None, ``nrep`` will be used to\n generate a default device assignment.\n backend: string specifying which backend to use.\n in_axis: axis along which the value is to be replciated.\n\n Returns:\n A ShardedDeviceArray of length `axis_size` where each shard is equal to\n ``val``.\n \"\"\"\n device_count = (len(devices) if devices else xb.local_device_count(backend))\n if nrep > device_count:\n msg = (\"Cannot replicate across %d replicas because only %d local devices \"\n \"are available.\" % (nrep, device_count))\n if devices:\n msg += (\" (local devices = %s)\"\n % \", \".join(map(str, devices)) if devices else str(None))\n raise ValueError(msg)\n\n if devices is None:\n assert nrep is not None\n # TODO(skye): use different device assignment on multihost\n devices = xb.get_backend(backend).get_default_device_assignment(nrep)\n assert nrep == len(devices)\n\n aval = xla.abstractify(val) # type: ShapedArray\n if in_axis is not None:\n replicated_aval = aval.update(shape=(axis_size,) + aval.shape)\n else:\n replicated_aval = aval\n # TODO(skye): figure out how partitioning should work here\n sharding_spec = _pmap_sharding_spec(nrep, axis_size, 1, None, aval, in_axis)\n device_buffers = device_put(val, devices, replicate=True)\n return make_sharded_device_array(replicated_aval, sharding_spec,\n device_buffers)\n\n\ndef _pmap_sharding_spec(nrep, axis_size, npart, parts, sharded_aval,\n map_axis: Optional[int]) -> ShardingSpec:\n \"\"\"Sharding spec for arguments or results of a pmap.\n Args:\n nrep: number of local XLA replicas (product of local axis sizes)\n axis_size: local axis size for outer pmap\n npart: total number of XLA partitions (required by sharded_jit calls)\n parts: the partitioning of the value or None\n sharded_aval: the aval of the value inside the outer pmap, an instance of\n a ShapedArray.\n map_axis: the axis along which the value is mapped in the outer pmap\n Returns:\n A ShardingSpec.\n \"\"\"\n assert isinstance(sharded_aval, ShapedArray), sharded_aval\n replication_factor, ragged = divmod(nrep, axis_size)\n assert not ragged\n # get the sharding spec from inner sharded_jits as if we weren't in a pmap\n pspec = partitioned_sharding_spec(npart, parts, sharded_aval)\n maybe_replicate = () if replication_factor == 1 else (Replicated(replication_factor),)\n if map_axis is not None:\n sharded_in_axis = sum(not isinstance(s, NoSharding) for s in pspec.sharding[:map_axis])\n def shift_sharded_axis(a: MeshDimAssignment):\n if isinstance(a, ShardedAxis) and a.axis >= sharded_in_axis:\n return ShardedAxis(a.axis + 1)\n return a\n # replication_factor represents the product of inner pmaps, so it goes\n # after the outer pmapped axis at index 0\n return ShardingSpec(\n sharding=tuple_insert(pspec.sharding, map_axis, Unstacked(axis_size)),\n mesh_mapping=it.chain([ShardedAxis(sharded_in_axis)],\n maybe_replicate,\n map(shift_sharded_axis, pspec.mesh_mapping)))\n else:\n return ShardingSpec(\n sharding=pspec.sharding,\n mesh_mapping=(Replicated(axis_size),) + maybe_replicate + pspec.mesh_mapping)\n\ndef partitioned_sharding_spec(num_partitions: int,\n partitions: Optional[Sequence[int]],\n aval) -> ShardingSpec:\n if partitions is None:\n maybe_replicate = () if num_partitions == 1 else (Replicated(num_partitions),)\n return ShardingSpec(\n sharding=[_UNSHARDED_INSTANCE] * len(aval.shape),\n mesh_mapping=maybe_replicate)\n else:\n assert len(partitions) == len(aval.shape)\n return ShardingSpec(\n # Chunked expects a list of integers\n sharding=map(Chunked, [[x] for x in partitions]),\n mesh_mapping=map(ShardedAxis, range(len(partitions))))\n\n\ndef execute_replicated(compiled, backend, in_handler, out_handler, *args):\n input_bufs = in_handler(args)\n out_bufs = compiled.execute_sharded_on_local_devices(input_bufs)\n if dispatch.needs_check_special():\n for bufs in out_bufs:\n dispatch.check_special(\"parallel computation\", bufs)\n return out_handler(out_bufs)\n\n\nxla_pmap_p = core.MapPrimitive('xla_pmap')\nxla_pmap = xla_pmap_p.bind\nxla_pmap_p.def_impl(xla_pmap_impl)\n\n# Set param update handlers to update `donated_invars` just like xla_call_p\npe.call_param_updaters[xla_pmap_p] = pe.call_param_updaters[xla.xla_call_p]\nad.call_param_updaters[xla_pmap_p] = ad.call_param_updaters[xla.xla_call_p]\nad.call_transpose_param_updaters[xla_pmap_p] = \\\n ad.call_transpose_param_updaters[xla.xla_call_p]\n\ndef _pmap_translation_rule(c, axis_env,\n in_nodes, name_stack, axis_name, axis_size,\n global_axis_size, devices, name,\n call_jaxpr, *, backend=None, in_axes, out_axes,\n donated_invars, global_arg_shapes):\n del donated_invars # Unused.\n # We in-line here rather than generating a Call HLO as in the xla_call\n # translation rule just because the extra tuple stuff is a pain.\n if axis_env.names and devices is not None:\n raise ValueError(\"Nested pmap with explicit devices argument.\")\n if global_axis_size is None:\n global_axis_size = axis_size\n new_env = xla.extend_axis_env(axis_env, axis_name, global_axis_size)\n # Shard the in_nodes that are mapped\n in_avals = [v.aval for v in call_jaxpr.invars]\n in_nodes_sharded = (\n _xla_shard(c, aval, new_env, in_node, in_axis) if in_axis is not None else in_node\n for aval, in_node, in_axis in safe_zip(in_avals, in_nodes, in_axes))\n\n with maybe_extend_axis_env(axis_name, global_axis_size, None): # type: ignore\n ctx = xla.TranslationContext(\n c, backend, new_env,\n extend_name_stack(name_stack, wrap_name(name, 'pmap')))\n sharded_outs = xla.jaxpr_subcomp(ctx, call_jaxpr, (), *in_nodes_sharded)\n out_avals = [v.aval for v in call_jaxpr.outvars]\n outs = [_xla_unshard(c, aval, new_env, out_axis, shard, backend=backend)\n for aval, out_axis, shard in safe_zip(out_avals, out_axes, sharded_outs)]\n return xops.Tuple(c, outs)\n\nxla.call_translations[xla_pmap_p] = _pmap_translation_rule\nad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)\n\ndef _xla_shard(c, aval, axis_env, x, in_axis):\n if aval is core.abstract_unit:\n return x\n elif aval is core.abstract_token:\n return x\n elif isinstance(aval, ShapedArray):\n dims = list(c.get_shape(x).dimensions())\n zero = xops.Constant(c, np.zeros((), dtype=np.uint32))\n idxs = [zero] * (len(dims) - 1)\n idxs.insert(in_axis, _unravel_index(c, axis_env))\n dims_unsqueezed = dims.copy()\n dims_unsqueezed[in_axis] = 1\n dims_squeezed = dims.copy()\n dims_squeezed.pop(in_axis)\n return xops.Reshape(xops.DynamicSlice(x, idxs, dims_unsqueezed), dims_squeezed)\n else:\n raise TypeError((aval, c.get_shape(x)))\n\n# TODO(b/110096942): more efficient gather\ndef _xla_unshard(c, aval, axis_env, out_axis, x, backend):\n if aval is core.abstract_unit:\n return x\n elif aval is core.abstract_token:\n return x\n elif isinstance(aval, ShapedArray):\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n convert_bool = (np.issubdtype(aval.dtype, np.bool_)\n and xb.get_backend(backend).platform in ('cpu', 'gpu'))\n if convert_bool:\n x = xops.ConvertElementType(\n x, xla.dtype_to_primitive_type(np.dtype(np.float32)))\n\n xla_shape = c.get_shape(x)\n dims = list(xla_shape.dimensions())\n padded = xops.Broadcast(\n xops.Constant(c, np.array(0, xla_shape.numpy_dtype())),\n [axis_env.sizes[-1]] + dims)\n zero = xops.Constant(c, np.zeros((), dtype=np.uint32))\n idxs = [_unravel_index(c, axis_env)] + [zero] * len(dims)\n padded = xops.DynamicUpdateSlice(padded, xops.Reshape(x, [1] + dims), idxs)\n replica_groups_protos = xc.make_replica_groups(\n xla.axis_groups(axis_env, axis_env.names[-1]))\n out = xops.CrossReplicaSum(padded, replica_groups_protos)\n if out_axis != 0:\n # TODO(apaszke,mattjj): Change the indices to DynamicUpdateSlice instead\n perm = list(range(1, len(dims)))\n perm.insert(out_axis, 0)\n out = xops.Transpose(out, perm)\n\n # TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU\n if convert_bool:\n nonzero = xops.Ne(out, xops.Constant(c, np.array(0, dtype=np.float32)))\n out = xops.ConvertElementType(\n nonzero, xla.dtype_to_primitive_type(np.dtype(np.bool_)))\n return out\n else:\n raise TypeError((aval, c.get_shape(x)))\n\ndef _unravel_index(c, axis_env):\n div = xops.Constant(c, np.array(axis_env.nreps // prod(axis_env.sizes),\n np.uint32))\n mod = xops.Constant(c, np.array(axis_env.sizes[-1], np.uint32))\n return xops.Rem(xops.Div(xops.ReplicaId(c), div), mod)\n\n# ------------------- xmap -------------------\n\nclass Mesh:\n\n def __init__(self, devices: np.ndarray, axis_names: Sequence[MeshAxisName]):\n assert devices.ndim == len(axis_names)\n # TODO: Make sure that devices are unique? At least with the quick and\n # dirty check that the array size is not larger than the number of\n # available devices?\n self.devices = devices.copy()\n self.devices.flags.writeable = False\n self.axis_names = tuple(axis_names)\n\n def __eq__(self, other):\n if not isinstance(other, Mesh):\n return False\n return (self.axis_names == other.axis_names and\n np.array_equal(self.devices, other.devices))\n\n def __hash__(self):\n if not hasattr(self, '_hash'):\n self._hash = hash((self.axis_names, tuple(self.devices.flat)))\n return self._hash\n\n def __setattr__(self, name, value):\n if hasattr(self, name):\n raise RuntimeError(\"Cannot reassign attributes of immutable mesh objects\")\n super().__setattr__(name, value)\n\n @property\n def shape(self):\n return OrderedDict((name, size) for name, size in safe_zip(self.axis_names, self.devices.shape))\n\n @property\n def size(self):\n return np.prod(list(self.shape.values()))\n\n @property\n def empty(self):\n return self.devices.ndim == 0\n\n @property\n def is_multi_process(self):\n return self.shape != self.local_mesh.shape\n\n @maybe_cached_property\n def local_mesh(self):\n if self.empty:\n return self\n process_index = xb.process_index()\n is_local_device = np.vectorize(\n lambda d: d.process_index == process_index, otypes=[bool])(self.devices)\n subcube_indices = []\n # We take the smallest slice of each dimension that doesn't skip any local device.\n for axis in range(self.devices.ndim):\n other_axes = tuple_delete(tuple(range(self.devices.ndim)), axis)\n # NOTE: This re-reduces over many axes multiple times, so we could definitely\n # optimize it, but I hope it won't be a bottleneck anytime soon.\n local_slices = is_local_device.any(other_axes, keepdims=False)\n nonzero_indices = np.flatnonzero(local_slices)\n start, end = int(np.min(nonzero_indices)), int(np.max(nonzero_indices))\n subcube_indices.append(slice(start, end + 1))\n subcube_indices = tuple(subcube_indices)\n # We only end up with all conditions being true if the local devices formed a\n # subcube of the full array. This is because we were biased towards taking a\n # \"hull\" spanned by the devices, and in case the local devices don't form a\n # subcube that hull will contain non-local devices.\n if not is_local_device[subcube_indices].all():\n raise ValueError(\"Devices connected to a single host must form a contiguous \"\n \"subcube of the global device mesh\")\n return Mesh(self.devices[subcube_indices], self.axis_names)\n\n @property\n def device_ids(self):\n assert not self.empty\n return np.vectorize(lambda d: d.id, otypes=[int])(self.devices)\n\n def __repr__(self):\n if self.empty:\n return \"Mesh([], ())\"\n return f\"Mesh({self.device_ids!r}, {self.axis_names!r})\"\n\n @maybe_cached_property\n def local_devices(self):\n process_index = xb.process_index()\n return [d for d in self.devices.flat if d.process_index == process_index]\n\n def local_to_global(self, axes: ArrayMapping, aval):\n return untile_aval_nd(self.shape, axes,\n tile_aval_nd(self.local_mesh.shape, axes, aval))\n\n def global_to_local(self, axes: ArrayMapping, aval):\n return untile_aval_nd(self.local_mesh.shape, axes,\n tile_aval_nd(self.shape, axes, aval))\n\n\ndef tile_aval_nd(axis_sizes, in_axes: ArrayMapping, aval, tiling_sizes=None):\n if tiling_sizes is None:\n tiling_sizes = axis_sizes\n if aval is core.abstract_unit:\n return aval\n assert isinstance(aval, ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, axis in in_axes.items():\n assert shape[axis] % tiling_sizes[name] == 0\n assert name not in named_shape\n named_shape[name] = axis_sizes[name]\n shape[axis] //= tiling_sizes[name]\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\ndef untile_aval_nd(axis_sizes, out_axes: ArrayMapping, aval):\n if aval is core.abstract_unit:\n return aval\n assert isinstance(aval, ShapedArray)\n shape = list(aval.shape)\n named_shape = dict(aval.named_shape)\n for name, axis in out_axes.items():\n shape[axis] *= axis_sizes[name]\n named_shape.pop(name, None) # The name might be missing --- it's a broadcast.\n return aval.update(shape=tuple(shape), named_shape=named_shape)\n\n\nclass SPMDBatchTrace(batching.BatchTrace):\n def get_axis_primitive_batcher(self, primitive, frame):\n if primitive in spmd_primitive_batchers:\n return partial(spmd_primitive_batchers[primitive],\n frame.size, frame.name, frame.main_trace.trace_type)\n return super().get_axis_primitive_batcher(primitive, frame)\n\n\nspmd_primitive_batchers: Dict[core.Primitive, Callable] = {}\n\n\ndef vtile_by_mesh(fun: lu.WrappedFun,\n mesh: Mesh,\n in_axes: Sequence[ArrayMapping],\n out_axes: Sequence[ArrayMapping]):\n # We vectorize in reversed order, because vmap is often biased towards\n # moving the batch axis to the front, and this way of stacking transforms\n # will order the batch axes according to the mesh axis order.\n # Not strictly necessary, but seems nicer than reversing it?\n for name, size in reversed(mesh.shape.items()):\n fun = batching.vtile(fun,\n tuple(a.get(name, None) for a in in_axes),\n tuple(a.get(name, None) for a in out_axes),\n tile_size=size,\n axis_name=name,\n main_type=SPMDBatchTrace)\n return fun\n\ndef lower_mesh_computation(\n fun: lu.WrappedFun,\n transformed_name: str,\n mesh: Mesh,\n in_axes: Sequence[ArrayMapping],\n out_axes: Union[Sequence[ArrayMapping], Callable[[], Sequence[ArrayMapping]]],\n donated_invars: Sequence[bool],\n spmd_lowering: bool,\n local_in_untiled_avals: Sequence[core.ShapedArray],\n tile_by_mesh_axes: bool):\n assert not mesh.empty\n backend = xb.get_device_backend(mesh.devices.flat[0])\n\n local_mesh = mesh.local_mesh\n global_axis_sizes = mesh.shape\n local_axis_sizes = local_mesh.shape\n\n log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG\n logging.log(log_priority,\n \"Compiling %s (%d) for %s mesh with args %s. Argument mapping: \"\n \"%s.\",\n getattr(fun, '__name__', '<unnamed function>'), id(fun),\n tuple(global_axis_sizes.items()), local_in_untiled_avals,\n in_axes)\n\n # 1. Trace to jaxpr and preprocess/verify it\n # Note that we tile by the local axis sizes, but use global axis sizes for named_shape\n in_tiled_avals = [tile_aval_nd(global_axis_sizes, aval_in_axes, aval,\n tiling_sizes=local_axis_sizes)\n for aval, aval_in_axes in safe_zip(local_in_untiled_avals, in_axes)]\n if spmd_lowering:\n # TODO: Consider handling xmap's 'vectorize' in here. We can vmap once instead of vtile twice!\n if tile_by_mesh_axes:\n assert not callable(out_axes)\n fun = vtile_by_mesh(fun, mesh, in_axes, out_axes)\n global_in_untiled_avals = [untile_aval_nd(global_axis_sizes, aval_in_axes, aval)\n for aval, aval_in_axes in safe_zip(in_tiled_avals, in_axes)]\n in_jaxpr_avals = global_in_untiled_avals\n else:\n assert tile_by_mesh_axes\n in_jaxpr_avals = in_tiled_avals\n with core.extend_axis_env_nd(mesh.shape.items()):\n jaxpr, out_jaxpr_avals, consts = pe.trace_to_jaxpr_final(fun, in_jaxpr_avals)\n if callable(out_axes):\n out_axes = out_axes()\n assert len(out_axes) == len(out_jaxpr_avals)\n if spmd_lowering:\n global_out_untiled_avals = out_jaxpr_avals\n out_tiled_avals = [tile_aval_nd(global_axis_sizes, aval_out_axes, aval)\n for aval, aval_out_axes in safe_zip(global_out_untiled_avals, out_axes)]\n else:\n out_tiled_avals = out_jaxpr_avals\n local_out_untiled_avals = [untile_aval_nd(local_axis_sizes, aval_out_axes, aval)\n for aval, aval_out_axes in safe_zip(out_tiled_avals, out_axes)]\n _sanitize_mesh_jaxpr(jaxpr)\n if local_mesh.shape != mesh.shape:\n check_multihost_collective_allowlist(jaxpr)\n jaxpr = dispatch.apply_outfeed_rewriter(jaxpr)\n\n # 3. Build up the HLO\n c = xc.XlaBuilder(f\"xmap_{fun.__name__}\")\n xla_consts = map(partial(xla.pyval_to_ir_constant, c), consts)\n tuple_args = len(in_jaxpr_avals) > 100 # pass long arg lists as tuple for TPU\n in_partitions: Optional[List]\n if spmd_lowering:\n replicated_args = [False] * len(in_jaxpr_avals)\n global_sharding_spec = mesh_sharding_specs(global_axis_sizes, mesh.axis_names)\n in_partitions = [global_sharding_spec(aval, aval_in_axes).sharding_proto()\n if aval is not core.abstract_unit else None\n for aval, aval_in_axes in safe_zip(global_in_untiled_avals, in_axes)]\n out_partitions = [global_sharding_spec(aval, aval_out_axes).sharding_proto()\n for aval, aval_out_axes in safe_zip(global_out_untiled_avals, out_axes)]\n partitions_proto = True\n axis_env = xla.AxisEnv(nreps=1, names=(), sizes=()) # All named axes have been vmapped\n else:\n replicated_args = [not axis for axis in in_axes]\n in_partitions = None\n partitions_proto = False\n axis_env = xla.AxisEnv(nreps=mesh.size,\n names=tuple(global_axis_sizes.keys()),\n sizes=tuple(global_axis_sizes.values()))\n xla_args, donated_invars = xla._xla_callable_args(\n c, in_jaxpr_avals, tuple_args,\n replicated=replicated_args,\n partitions=in_partitions,\n partitions_proto=partitions_proto,\n donated_invars=donated_invars)\n with core.extend_axis_env_nd(mesh.shape.items()):\n ctx = xla.TranslationContext(\n c, backend.platform, axis_env,\n extend_name_stack(wrap_name(transformed_name, 'xmap')))\n out_nodes = xla.jaxpr_subcomp(ctx, jaxpr, xla_consts, *xla_args)\n if spmd_lowering:\n out_partitions_t = xb.tuple_sharding_proto(out_partitions)\n out_tuple = xb.with_sharding_proto(c, out_partitions_t, xops.Tuple, c, out_nodes)\n else:\n out_tuple = xops.Tuple(c, out_nodes)\n\n if backend.platform in (\"gpu\", \"tpu\"):\n xla.set_up_aliases(c, xla_args, c.GetShape(out_tuple), donated_invars,\n tuple_args)\n # TODO: Warn about unused donations?\n\n built = c.Build(out_tuple)\n return MeshComputation(\n built, donated_invars, mesh, local_in_untiled_avals,\n local_out_untiled_avals, (out_jaxpr_avals if spmd_lowering else None),\n in_axes, out_axes, spmd_lowering, tuple_args)\n\n\nclass MeshComputation:\n def __init__(self, hlo, donated_invars, *compile_args):\n self._executable = None\n self._hlo = hlo\n self._donated_invars = donated_invars\n self.compile_args = compile_args\n\n def hlo(self):\n # this is a method for api consistency with xla.XlaComputation\n return self._hlo\n\n def compile(self,\n _allow_propagation_to_outputs : bool = False,\n _allow_compile_replicated : bool = True) -> 'MeshExecutable':\n if self._executable is None:\n self._executable = MeshExecutable(\n self._hlo, *self.compile_args,\n _allow_propagation_to_outputs=_allow_propagation_to_outputs,\n _allow_compile_replicated=_allow_compile_replicated) # type: ignore\n return self._executable\n\n\nclass MeshExecutable:\n __slots__ = ['xla_executable', 'unsafe_call', '_local_in_untiled_avals']\n\n def __init__(self,\n computation: xc.XlaComputation,\n mesh: Mesh,\n local_in_untiled_avals: Sequence[ShapedArray],\n local_out_untiled_avals: Sequence[ShapedArray],\n global_out_avals: Optional[Sequence[ShapedArray]],\n in_axes: Sequence[ArrayMapping],\n out_axes: Sequence[ArrayMapping],\n spmd_lowering: bool, tuple_args: bool,\n _allow_propagation_to_outputs: bool,\n _allow_compile_replicated: bool):\n assert not mesh.empty\n backend = xb.get_device_backend(mesh.devices.flat[0])\n\n local_mesh = mesh.local_mesh\n local_axis_sizes = local_mesh.shape\n if spmd_lowering:\n num_replicas, num_partitions = 1, mesh.size\n num_local_replicas, num_local_partitions = 1, local_mesh.size\n else:\n num_replicas, num_partitions = mesh.size, 1\n num_local_replicas, num_local_partitions = local_mesh.size, 1\n device_assignment = mesh.device_ids.reshape((num_replicas, num_partitions))\n compile_options = xb.get_compile_options(\n num_replicas=num_replicas,\n num_partitions=num_partitions,\n device_assignment=device_assignment,\n use_spmd_partitioning=spmd_lowering,\n )\n compile_options.parameter_is_tupled_arguments = tuple_args\n compile_options.executable_build_options.allow_spmd_sharding_propagation_to_output = \\\n _allow_propagation_to_outputs\n\n local_sharding_spec = mesh_sharding_specs(local_axis_sizes, mesh.axis_names)\n local_input_specs = [local_sharding_spec(aval, aval_in_axes)\n if aval is not core.abstract_unit else None\n for aval, aval_in_axes in safe_zip(local_in_untiled_avals, in_axes)]\n input_indices = [spec_to_indices(aval.shape, spec)\n if spec is not None else None\n for aval, spec in safe_zip(local_in_untiled_avals, local_input_specs)]\n\n local_output_specs = [local_sharding_spec(aval, aval_out_axes)\n for aval, aval_out_axes in safe_zip(local_out_untiled_avals, out_axes)]\n out_axis_resources = [array_mapping_to_axis_resources(o) for o in out_axes]\n handle_outs = avals_to_results_handler(num_local_replicas, num_local_partitions,\n local_output_specs, local_out_untiled_avals,\n global_out_avals, out_axis_resources, mesh)\n\n if _allow_compile_replicated and hasattr(backend, \"compile_replicated\"):\n self.unsafe_call = backend.compile_replicated(\n computation, compile_options,\n input_indices, local_input_specs,\n handle_outs)\n else:\n compiled = dispatch.compile_or_get_cached(backend, computation, compile_options)\n handle_args = InputsHandler(compiled.local_devices(), local_input_specs,\n input_indices)\n self.unsafe_call = partial(execute_replicated, compiled, backend, handle_args, handle_outs)\n self.xla_executable = compiled\n\n self._local_in_untiled_avals = local_in_untiled_avals\n\n def call(self, *args):\n arg_avals = map(xla.abstractify, args)\n ref_avals = self._local_in_untiled_avals\n dispatch.check_arg_avals_for_call(ref_avals, arg_avals)\n return self.unsafe_call(*args)\n\n\n_forbidden_primitives = {\n 'xla_pmap': 'pmap',\n 'sharded_call': 'sharded_jit',\n}\ndef _sanitize_mesh_jaxpr(jaxpr):\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n for eqn in jaxpr.eqns:\n if eqn.primitive.name in _forbidden_primitives:\n raise RuntimeError(f\"Nesting {_forbidden_primitives[eqn.primitive.name]} \"\n f\"inside xmaps not supported!\")\n core.traverse_jaxpr_params(_sanitize_mesh_jaxpr, eqn.params)\n\n\ncustom_resource_typing_rules: Dict[core.Primitive, Callable] = {}\n\ndef resource_typecheck(jaxpr, resource_env, axis_resources, what_jaxpr_thunk):\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n def _check_aval(aval, what_thunk):\n if not hasattr(aval, 'named_shape'):\n return\n resource_to_axis = {}\n for axis in aval.named_shape:\n for resource in axis_resources[axis]:\n if resource in resource_to_axis:\n other_axis = resource_to_axis[resource]\n axis, other_axis = sorted([str(axis), str(other_axis)])\n raise JAXTypeError(\n f\"Axes `{axis}` and `{other_axis}` are both mapped to the \"\n f\"resource `{resource}`, but they coincide in the named_shape \"\n f\"of {what_thunk()}\")\n resource_to_axis[resource] = axis\n\n what_thunk = lambda: (f\"an input to {what_jaxpr_thunk()}\")\n for v in jaxpr.constvars:\n _check_aval(v.aval, what_thunk)\n for v in jaxpr.invars:\n _check_aval(v.aval, what_thunk)\n what_thunk = lambda: (f\"a value returned from a primitive {eqn.primitive} created \"\n f\"at {source_info_util.summarize(eqn.source_info)}\")\n rec_what_jaxpr_thunk = lambda: (f\"a primitive {eqn.primitive} created at\"\n f\"{source_info_util.summarize(eqn.source_info)}\")\n for eqn in jaxpr.eqns:\n typing_rule = custom_resource_typing_rules.get(eqn.primitive, None)\n if typing_rule:\n typing_rule([v.aval for v in eqn.invars], eqn.params, eqn.source_info,\n resource_env, axis_resources)\n else:\n core.traverse_jaxpr_params(partial(resource_typecheck,\n resource_env=resource_env,\n axis_resources=axis_resources,\n what_jaxpr_thunk=rec_what_jaxpr_thunk),\n eqn.params)\n for v in eqn.outvars:\n _check_aval(v.aval, what_thunk)\n\n\ndef mesh_sharding_specs(axis_sizes, axis_names):\n mesh_axis_pos = {name: i for i, name in enumerate(axis_names)}\n # NOTE: This takes in the non-sharded avals!\n def mk_sharding_spec(aval, aval_axes):\n mesh_mapping = [Replicated(axis_size) for axis_size in axis_sizes.values()]\n if aval is core.abstract_token:\n assert not aval_axes\n return ShardingSpec([], mesh_mapping)\n sharding = [_UNSHARDED_INSTANCE] * len(aval.shape)\n next_sharded_axis = 0\n aval_shape = list(aval.shape)\n # NOTE: sorted is stable, which is important when multiple resources\n # map to the same axis.\n for name, axis in sorted(aval_axes.items(), key=lambda x: x[1]):\n assert aval_shape[axis] % axis_sizes[name] == 0, (axis_sizes[name], aval.shape[axis])\n aval_shape[axis] //= axis_sizes[name]\n if isinstance(sharding[axis], NoSharding):\n sharding[axis] = Chunked([])\n sharding[axis] = Chunked(sharding[axis].chunks + [axis_sizes[name]])\n assert isinstance(mesh_mapping[mesh_axis_pos[name]], Replicated), \\\n \"Value mapped to the same mesh axis twice\"\n mesh_mapping[mesh_axis_pos[name]] = ShardedAxis(next_sharded_axis)\n next_sharded_axis += 1\n return ShardingSpec(sharding, mesh_mapping)\n return mk_sharding_spec\n\n\n@contextmanager\ndef maybe_extend_axis_env(*args, **kwargs):\n with core.extend_axis_env(*args, **kwargs):\n yield\n\nclass DynamicAxisEnvFrame(object):\n __slots__ = [\"name\", \"pmap_trace\", \"hard_size\"]\n def __init__(self, name, pmap_trace, hard_size):\n self.name = name\n self.pmap_trace = pmap_trace\n self.hard_size = hard_size\n\nclass DynamicAxisEnv(list):\n def __contains__(self, axis_name):\n return axis_name in (frame.name for frame in self)\n\n def __getitem__(self, axis_name):\n if axis_name not in self:\n raise NameError(\"unbound axis name: {}\".format(axis_name))\n for frame in reversed(self):\n if frame.name == axis_name:\n return frame\n\n raise AssertionError\n\n @property\n def sizes(self):\n return tuple(frame.hard_size for frame in self)\n\n @property\n def nreps(self):\n return prod(frame.hard_size for frame in self)\n\nclass _ThreadLocalState(threading.local):\n def __init__(self):\n self.dynamic_axis_env = DynamicAxisEnv()\n\n_thread_local_state = _ThreadLocalState()\n\ndef device_put(x, devices: Sequence[xb.xla_client.Device], replicate: bool=False) -> List[xb.xla_client.Buffer]:\n \"\"\"Call device_put on a sequence of devices and return a flat sequence of buffers.\"\"\"\n if replicate:\n return list(it.chain.from_iterable(dispatch.device_put(x, device) for device in devices))\n else:\n return list(it.chain.from_iterable(dispatch.device_put(val, device) for val, device in safe_zip(x, devices)))\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.empty",
"numpy.asarray",
"numpy.array_equal",
"numpy.vectorize",
"numpy.zeros",
"numpy.min",
"numpy.prod",
"numpy.issubdtype",
"numpy.broadcast_to",
"numpy.dtype",
"numpy.flatnonzero"
]
] |
khramtsova/federated
|
[
"88b3ca65204a9922696ccefd774ece03ebf5cc8e"
] |
[
"tensorflow_federated/python/core/impl/compiler/type_serialization.py"
] |
[
"# Lint as: python3\n# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for serializing and deserializing TFF computation_types.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_federated.proto.v0 import computation_pb2 as pb\nfrom tensorflow_federated.python.common_libs import anonymous_tuple\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.impl.compiler import placement_literals\n\n\ndef _to_tensor_type_proto(tensor_type):\n py_typecheck.check_type(tensor_type, computation_types.TensorType)\n shape = tensor_type.shape\n if shape.dims is None:\n dims = None\n else:\n dims = [d.value if d.value is not None else -1 for d in shape.dims]\n return pb.TensorType(\n dtype=tensor_type.dtype.base_dtype.as_datatype_enum,\n dims=dims,\n unknown_rank=dims is None)\n\n\ndef _to_tensor_shape(tensor_type_proto):\n py_typecheck.check_type(tensor_type_proto, pb.TensorType)\n if not hasattr(tensor_type_proto, 'dims'):\n if tensor_type_proto.unknown_rank:\n return tf.TensorShape(None)\n else:\n return tf.TensorShape([])\n dims = [dim if dim >= 0 else None for dim in tensor_type_proto.dims]\n return tf.TensorShape(dims)\n\n\ndef serialize_type(type_spec):\n \"\"\"Serializes 'type_spec' as a pb.Type.\n\n NOTE: Currently only serialization for tensor, named tuple, sequence, and\n function types is implemented.\n\n Args:\n type_spec: Either an instance of computation_types.Type, or something\n convertible to it by computation_types.to_type(), or None.\n\n Returns:\n The corresponding instance of `pb.Type`, or `None` if the argument was\n `None`.\n\n Raises:\n TypeError: if the argument is of the wrong type.\n NotImplementedError: for type variants for which serialization is not\n implemented.\n \"\"\"\n # TODO(b/113112885): Implement serialization of the remaining types.\n if type_spec is None:\n return None\n target = computation_types.to_type(type_spec)\n py_typecheck.check_type(target, computation_types.Type)\n if isinstance(target, computation_types.TensorType):\n return pb.Type(tensor=_to_tensor_type_proto(target))\n elif isinstance(target, computation_types.SequenceType):\n return pb.Type(\n sequence=pb.SequenceType(element=serialize_type(target.element)))\n elif isinstance(target, computation_types.NamedTupleType):\n return pb.Type(\n tuple=pb.NamedTupleType(element=[\n pb.NamedTupleType.Element(name=e[0], value=serialize_type(e[1]))\n for e in anonymous_tuple.iter_elements(target)\n ]))\n elif isinstance(target, computation_types.FunctionType):\n return pb.Type(\n function=pb.FunctionType(\n parameter=serialize_type(target.parameter),\n result=serialize_type(target.result)))\n elif isinstance(target, computation_types.PlacementType):\n return pb.Type(placement=pb.PlacementType())\n elif isinstance(target, computation_types.FederatedType):\n if isinstance(target.placement, placement_literals.PlacementLiteral):\n return pb.Type(\n federated=pb.FederatedType(\n member=serialize_type(target.member),\n placement=pb.PlacementSpec(\n value=pb.Placement(uri=target.placement.uri)),\n all_equal=target.all_equal))\n else:\n raise NotImplementedError(\n 'Serialization of federated types with placements specifications '\n 'of type {} is not currently implemented yet.'.format(\n type(target.placement)))\n else:\n raise NotImplementedError\n\n\ndef deserialize_type(type_proto):\n \"\"\"Deserializes 'type_proto' as a computation_types.Type.\n\n NOTE: Currently only deserialization for tensor, named tuple, sequence, and\n function types is implemented.\n\n Args:\n type_proto: An instance of pb.Type or None.\n\n Returns:\n The corresponding instance of computation_types.Type (or None if the\n argument was None).\n\n Raises:\n TypeError: if the argument is of the wrong type.\n NotImplementedError: for type variants for which deserialization is not\n implemented.\n \"\"\"\n # TODO(b/113112885): Implement deserialization of the remaining types.\n if type_proto is None:\n return None\n py_typecheck.check_type(type_proto, pb.Type)\n type_variant = type_proto.WhichOneof('type')\n if type_variant is None:\n return None\n elif type_variant == 'tensor':\n tensor_proto = type_proto.tensor\n return computation_types.TensorType(\n dtype=tf.DType(tensor_proto.dtype),\n shape=_to_tensor_shape(tensor_proto))\n elif type_variant == 'sequence':\n return computation_types.SequenceType(\n deserialize_type(type_proto.sequence.element))\n elif type_variant == 'tuple':\n return computation_types.NamedTupleType([\n (lambda k, v: (k, v) if k else v)(e.name, deserialize_type(e.value))\n for e in type_proto.tuple.element\n ])\n elif type_variant == 'function':\n return computation_types.FunctionType(\n parameter=deserialize_type(type_proto.function.parameter),\n result=deserialize_type(type_proto.function.result))\n elif type_variant == 'placement':\n return computation_types.PlacementType()\n elif type_variant == 'federated':\n placement_oneof = type_proto.federated.placement.WhichOneof('placement')\n if placement_oneof == 'value':\n return computation_types.FederatedType(\n member=deserialize_type(type_proto.federated.member),\n placement=placement_literals.uri_to_placement_literal(\n type_proto.federated.placement.value.uri),\n all_equal=type_proto.federated.all_equal)\n else:\n raise NotImplementedError(\n 'Deserialization of federated types with placement spec as {} '\n 'is not currently implemented yet.'.format(placement_oneof))\n else:\n raise NotImplementedError('Unknown type variant {}.'.format(type_variant))\n"
] |
[
[
"tensorflow.DType",
"tensorflow.TensorShape"
]
] |
intel/mlir-extensions
|
[
"fb5cb57176648bb09fc80abde5fd1a415fe0fe04"
] |
[
"numba_dpcomp/numba_dpcomp/mlir/tests/test_numpy.py"
] |
[
"# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numba\n# from numba_dpcomp import njit\nfrom numba_dpcomp import vectorize\nfrom numba_dpcomp.mlir.passes import print_pass_ir, get_print_buffer\nfrom numpy.testing import assert_equal, assert_allclose # for nans comparison\nimport numpy as np\nimport itertools\nimport math\nfrom functools import partial\nimport pytest\nfrom sklearn.datasets import make_regression\n\nfrom .utils import parametrize_function_variants\nfrom .utils import njit_cached as njit\n\nnp.seterr(all='ignore')\n\ndef _vectorize_reference(func, arg1):\n ret = np.empty(arg1.shape, arg1.dtype)\n for ind, val in np.ndenumerate(arg1):\n ret[ind] = func(val)\n return ret\n\n_arr_1d_bool = np.array([True,False,True,True,False,True,True,True])\n_arr_1d_int32 = np.array([1,2,3,4,5,6,7,8], dtype=np.int32)\n_arr_1d_int64 = np.array([1,2,3,4,5,6,7,8], dtype=np.int64)\n_arr_1d_float32 = np.array([1.0,2.1,3.2,4.3,5.4,6.5,7.6,8.7], dtype=np.float32)\n_arr_1d_float64 = np.array([1.0,2.1,3.2,4.3,5.4,6.5,7.6,8.7], dtype=np.float64)\n_arr_2d_int = np.array([[1,2,3,4],[5,6,7,8]])\n_arr_2d_float = np.array([[1.0,2.1,3.2,4.3],[5.4,6.5,7.6,8.7]])\n_test_arrays = [\n # _arr_1d_bool,\n _arr_1d_int32,\n _arr_1d_int64,\n _arr_1d_float32,\n _arr_1d_float64,\n _arr_2d_int,\n _arr_2d_float,\n _arr_2d_int.T,\n _arr_2d_float.T,\n]\n_test_arrays_ids = [\n # '_arr_1d_bool',\n '_arr_1d_int32',\n '_arr_1d_int64',\n '_arr_1d_float32',\n '_arr_1d_float64',\n '_arr_2d_int',\n '_arr_2d_float',\n '_arr_2d_int.T',\n '_arr_2d_float.T',\n]\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a: a.sum()',\n 'lambda a: a.min()',\n 'lambda a: a.max()',\n 'lambda a: np.sum(a)',\n 'lambda a: np.amax(a)',\n 'lambda a: np.amin(a)',\n 'lambda a: np.mean(a)',\n 'lambda a: np.sqrt(a)',\n 'lambda a: np.square(a)',\n 'lambda a: np.log(a)',\n 'lambda a: np.sin(a)',\n 'lambda a: np.cos(a)',\n 'lambda a: np.exp(a)',\n 'lambda a: np.tanh(a)',\n 'lambda a: a.size',\n 'lambda a: a.T',\n 'lambda a: a.T.T',\n 'lambda a: a.copy()',\n])\[email protected](\"arr\",\n _test_arrays,\n ids=_test_arrays_ids)\ndef test_unary(py_func, arr, request):\n jit_func = njit(py_func)\n assert_allclose(py_func(arr), jit_func(arr), rtol=1e-4, atol=1e-7)\n\n_test_binary_test_arrays = [\n # True,\n 1,\n 2.5,\n # np.array([True, False, True]),\n np.array([1,2,3], dtype=np.int32),\n np.array([1,2,3], dtype=np.int64),\n np.array([4.4,5.5,6.6], dtype=np.float32),\n np.array([4.4,5.5,6.6], dtype=np.float64),\n]\n_test_binary_test_arrays_ids = [\n # 'True',\n '1',\n '2.5',\n # 'np.array([True, False, True])',\n 'np.array([1,2,3], dtype=np.int32)',\n 'np.array([1,2,3], dtype=np.int64)',\n 'np.array([4.4,5.5,6.6], dtype=np.float32)',\n 'np.array([4.4,5.5,6.6], dtype=np.float64)',\n]\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a, b: np.add(a, b)',\n 'lambda a, b: a + b',\n 'lambda a, b: np.subtract(a, b)',\n 'lambda a, b: a - b',\n 'lambda a, b: np.multiply(a, b)',\n 'lambda a, b: a * b',\n 'lambda a, b: np.power(a, b)',\n 'lambda a, b: a ** b',\n 'lambda a, b: np.true_divide(a, b)',\n 'lambda a, b: a / b',\n 'lambda a, b: np.arctan2(a, b)',\n 'lambda a, b: np.minimum(a, b)',\n 'lambda a, b: np.maximum(a, b)',\n 'lambda a, b: a < b',\n 'lambda a, b: a <= b',\n 'lambda a, b: a > b',\n 'lambda a, b: a >= b',\n 'lambda a, b: a == b',\n 'lambda a, b: a != b',\n 'lambda a, b: np.where(a < b, a, b)',\n 'lambda a, b: np.outer(a, b)',\n])\[email protected](\"a\",\n _test_binary_test_arrays,\n ids=_test_binary_test_arrays_ids)\[email protected](\"b\",\n _test_binary_test_arrays,\n ids=_test_binary_test_arrays_ids)\ndef test_binary(py_func, a, b):\n jit_func = njit(py_func)\n # assert_equal(py_func(a,b), jit_func(a,b))\n assert_allclose(py_func(a,b), jit_func(a,b), rtol=1e-7, atol=1e-7)\n\n_test_logical_arrays = [\n True,\n False,\n np.array([True, False]),\n np.array([[False, True],[True, False]])\n]\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a: np.logical_not(a)',\n])\[email protected](\"a\", _test_logical_arrays)\ndef test_logical1(py_func, a):\n jit_func = njit(py_func)\n assert_equal(py_func(a), jit_func(a))\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a, b: np.logical_and(a, b)',\n 'lambda a, b: a & b',\n 'lambda a, b: np.logical_or(a, b)',\n 'lambda a, b: a | b',\n 'lambda a, b: np.logical_xor(a, b)',\n 'lambda a, b: a ^ b',\n])\[email protected](\"a\", _test_logical_arrays)\[email protected](\"b\", _test_logical_arrays)\ndef test_logical2(py_func, a, b):\n jit_func = njit(py_func)\n assert_equal(py_func(a,b), jit_func(a,b))\n\n_test_broadcast_test_arrays = [\n 1,\n np.array([1]),\n np.array([[1]]),\n np.array([[1,2],[3,4]]),\n np.array([5,6]),\n np.array([[5],[6]]),\n np.array([[5,6]]),\n]\n_test_broadcast_test_arrays_ids = [\n '1',\n 'np.array([1])',\n 'np.array([[1]])',\n 'np.array([[1,2],[3,4]])',\n 'np.array([5,6])',\n 'np.array([[5],[6]])',\n 'np.array([[5,6]])',\n]\[email protected](\"a\",\n _test_broadcast_test_arrays,\n ids=_test_broadcast_test_arrays_ids)\[email protected](\"b\",\n _test_broadcast_test_arrays,\n ids=_test_broadcast_test_arrays_ids)\ndef test_broadcast(a, b):\n def py_func(a, b):\n return np.add(a, b)\n\n jit_func = njit(py_func)\n assert_equal(py_func(a,b), jit_func(a,b))\n\ndef test_staticgetitem():\n def py_func(a):\n return a[1]\n\n jit_func = njit(py_func)\n arr = np.asarray([5,6,7])\n assert_equal(py_func(arr), jit_func(arr))\n\[email protected](\"i\",\n list(range(-2,3)))\ndef test_getitem1(i):\n def py_func(a, b):\n return a[b]\n\n jit_func = njit(py_func)\n arr = np.asarray([5,6,7])\n assert_equal(py_func(arr, i), jit_func(arr, i))\n\ndef test_getitem2():\n def py_func(a, b):\n return a[b]\n\n jit_func = njit(py_func)\n arr = np.asarray([[[1,2,3],[5,6,7]]])\n assert_equal(py_func(arr, 0), jit_func(arr, 0))\n\ndef test_getitem3():\n def py_func(a, b, c):\n return a[b, c]\n\n jit_func = njit(py_func)\n arr = np.asarray([[[1,2,3],[5,6,7]]])\n assert_equal(py_func(arr, 0, 0), jit_func(arr, 0, 0))\n\ndef test_unituple_getitem1():\n def py_func(a, b, c, i):\n t = (a,b,c)\n return t[i]\n\n jit_func = njit(py_func)\n assert_equal(py_func(1,2,3,1), jit_func(1,2,3,1))\n\ndef test_unituple_getitem2():\n def py_func(t, i):\n return t[i]\n\n jit_func = njit(py_func)\n t = (1,2,3)\n assert_equal(py_func(t,1), jit_func(t,1))\n\[email protected](\"arr\",\n _test_arrays,\n ids=_test_arrays_ids)\[email protected](\"mask\", [[True], [False], [True, False], [False, True]])\ndef test_getitem_mask(arr, mask):\n if arr.ndim > 1:\n pytest.xfail() # TODO: not supprted by numba\n\n def py_func(a, m):\n return a[m]\n\n mask = np.resize(mask, arr.size).reshape(arr.shape)\n\n jit_func = njit(py_func)\n assert_equal(py_func(arr, mask), jit_func(arr, mask))\n\ndef test_array_len():\n def py_func(a):\n return len(a)\n\n jit_func = njit(py_func)\n arr = np.asarray([5,6,7])\n assert_equal(py_func(arr), jit_func(arr))\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a: np.sum(a, axis=0)',\n 'lambda a: np.sum(a, axis=1)',\n # 'lambda a: np.amax(a, axis=0)', # Not supported by numba\n # 'lambda a: np.amax(a, axis=1)',\n # 'lambda a: np.amin(a, axis=0)',\n # 'lambda a: np.amin(a, axis=1)',\n ])\[email protected](\"arr\", [\n np.array([[1,2,3],[4,5,6]], dtype=np.int32),\n np.array([[1,2,3],[4,5,6]], dtype=np.float32),\n ])\ndef test_reduce_axis(py_func, arr):\n jit_func = njit(py_func)\n assert_equal(py_func(arr), jit_func(arr))\n\ndef test_sum_add():\n def py_func(a, b):\n return np.add(a, b).sum()\n\n jit_func = njit(py_func)\n arr1 = np.asarray([1,2,3])\n arr2 = np.asarray([4,5,6])\n assert_equal(py_func(arr1, arr2), jit_func(arr1, arr2))\n\ndef test_sum_add2():\n def py_func(a, b, c):\n t = np.add(a, b)\n return np.add(t, c).sum()\n\n jit_func = njit(py_func)\n arr1 = np.asarray([1,2,3])\n arr2 = np.asarray([4,5,6])\n arr3 = np.asarray([7,8,9])\n assert_equal(py_func(arr1, arr2, arr3), jit_func(arr1, arr2, arr3))\n\[email protected](\"a,b\", [\n (np.array([1,2,3], np.float32), np.array([4,5,6], np.float32)),\n (np.array([[1,2,3],[4,5,6]], np.float32), np.array([[1,2],[3,4],[5,6]], np.float32)),\n ])\[email protected](\"parallel\", [False, True])\ndef test_dot(a, b, parallel):\n def py_func(a, b):\n return np.dot(a, b)\n\n jit_func = njit(py_func, parallel=parallel)\n assert_equal(py_func(a, b), jit_func(a, b))\n\ndef test_prange_lowering():\n def py_func(arr):\n res = 0\n for i in numba.prange(len(arr)):\n res += arr[i]\n\n return res\n\n with print_pass_ir([],['ParallelToTbbPass']):\n jit_func = njit(py_func, parallel=True)\n arr = np.arange(10000, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('plier_util.parallel') == 1, ir\n\ndef test_loop_fusion1():\n def py_func(arr):\n l = len(arr)\n res1 = 0\n for i in numba.prange(l):\n res1 += arr[i]\n\n res2 = 1.0\n for i in numba. prange(l):\n res2 *= arr[i]\n\n return res1, res2\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.arange(1, 15, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 1, ir\n assert ir.count('memref.load') == 1, ir\n\ndef test_loop_fusion2():\n def py_func(arr):\n l = len(arr)\n res1 = 0\n for i in numba.prange(l):\n res1 += arr[i]\n\n res1 += 10\n\n res2 = 0.0\n for i in numba. prange(l):\n res2 *= arr[i]\n\n return res1, res2\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.arange(1, 15, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 1, ir\n assert ir.count('memref.load') == 1, ir\n\ndef test_loop_fusion3():\n def py_func(arr):\n l = len(arr)\n res1 = 0\n for i in numba.prange(l):\n res1 += arr[i]\n\n res2 = 1.0\n for i in numba. prange(l):\n res2 *= (arr[i] * res1)\n\n return res1, res2\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.arange(1, 15, dtype=np.float32)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 2, ir\n assert ir.count('memref.load') == 2, ir\n\[email protected](\"dtype\", [np.int32, np.int64, np.float32])\ndef test_np_reduce(dtype):\n def py_func(arr):\n return arr.sum()\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n arr = np.array([[1,2,3],[4,5,6]], dtype=dtype)\n assert_equal(py_func(arr), jit_func(arr))\n ir = get_print_buffer()\n assert ir.count('scf.parallel') == 1, ir\n assert ir.count('memref.load') == 1, ir\n\ndef test_indirect_call_array():\n def inner_func(a):\n return a + 3\n\n def func(func, *args):\n return func(*args)\n\n jit_inner_func = njit(inner_func)\n jit_func = njit(func)\n\n arr = np.array([[1,2,3],[4,5,6]])\n # arr = 5\n assert_equal(func(inner_func, arr), jit_func(jit_inner_func, arr))\n\ndef test_loop_if():\n def py_func(arr):\n for i in range(len(arr)):\n if arr[i] == 5:\n arr[i] = 6\n return arr\n\n jit_func = njit(py_func)\n arr1 = np.arange(100)\n arr2 = np.arange(100)\n assert_equal(py_func(arr1), jit_func(arr2))\n\ndef test_static_setitem1():\n def py_func(a):\n a[1] = 42\n return a\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3])\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n\ndef test_static_setitem2():\n def py_func(a):\n a[:] = 42\n return a\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3])\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n\n\ndef test_static_setitem3():\n def py_func(a):\n a[(0,1)] = 42\n return a\n\n jit_func = njit(py_func)\n arr = np.asarray([[1,2],[3,4]])\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n\[email protected](\"i\",\n list(range(-2,3)))\ndef test_setitem1(i):\n def py_func(a, b):\n a[b] = 42\n return a[b]\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3])\n assert_equal(py_func(arr, i), jit_func(arr, i))\n\ndef test_setitem2():\n def py_func(a, b, c):\n a[b, c] = 42\n return a[b, c]\n\n jit_func = njit(py_func)\n arr = np.asarray([[1,2,3],[4,5,6]])\n assert_equal(py_func(arr, 1, 2), jit_func(arr, 1, 2))\n\[email protected](\"d\", [\n np.array([5,6]),\n 7\n ])\ndef test_setitem_slice1(d):\n def py_func(a, b, c, d):\n a[b:c] = d\n return a\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3,4])\n assert_equal(py_func(arr.copy(), 1, 3, d), jit_func(arr.copy(), 1, 3, d))\n\[email protected](\"d\", [\n np.array([5,6,7]),\n 7\n ])\ndef test_setitem_slice2(d):\n def py_func(a, c, d):\n a[:c] = d\n return a\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3,4])\n assert_equal(py_func(arr.copy(), 3, d), jit_func(arr.copy(), 3, d))\n\[email protected](\"d\", [\n np.array([5,6,7]),\n 7\n ])\ndef test_setitem_slice3(d):\n def py_func(a, b, d):\n a[b:] = d\n return a\n\n jit_func = njit(py_func)\n arr = np.asarray([1,2,3,4])\n assert_equal(py_func(arr.copy(), 1, d), jit_func(arr.copy(), 1, d))\n\ndef test_setitem_loop():\n def py_func(a):\n for i in range(len(a)):\n a[i] = a[i] + i\n return a.sum()\n\n jit_func = njit(py_func)\n arr = np.asarray([3,2,1])\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n\ndef test_array_bounds1():\n def py_func(a):\n res = 0\n for i in range(len(a)):\n if i >= len(a) or i < 0:\n res = res + 1\n else:\n res = res + a[i]\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_bounds2():\n def py_func(a):\n res = 0\n for i in range(len(a)):\n if i < len(a) and i >= 0:\n res = res + a[i]\n else:\n res = res + 1\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_bounds3():\n def py_func(a):\n res = 0\n for i in range(len(a)):\n if 0 <= i < len(a):\n res = res + a[i]\n else:\n res = res + 1\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_bounds4():\n def py_func(a):\n res = 0\n for i in range(len(a) - 1):\n if 0 <= i < (len(a) - 1):\n res = res + a[i]\n else:\n res = res + 1\n return res\n\n arr = np.asarray([3,2,1])\n\n with print_pass_ir([],['PostLinalgOptPass']):\n jit_func = njit(py_func)\n assert_equal(py_func(arr.copy()), jit_func(arr.copy()))\n ir = get_print_buffer()\n assert ir.count('cmpi') == 0, ir\n\ndef test_array_shape():\n def py_func(a):\n shape = a.shape\n return shape[0] + shape[1] * 10\n\n jit_func = njit(py_func)\n arr = np.array([[1,2,3],[4,5,6]])\n assert_equal(py_func(arr), jit_func(arr))\n\ndef test_array_return():\n def py_func(a):\n return a\n\n jit_func = njit(py_func)\n arr = np.array([1,2,3])\n assert_equal(py_func(arr), jit_func(arr))\n\ndef test_array_prange_const():\n def py_func(a, b):\n a[0] = 42\n for i in numba.prange(b):\n a[0] = 1\n return a[0]\n\n jit_func = njit(py_func, parallel=True)\n arr = np.array([0.0])\n assert_equal(py_func(arr, 5), jit_func(arr, 5))\n\ndef test_empty1():\n def py_func(d):\n a = np.empty(d)\n for i in range(d):\n a[i] = i\n return a\n\n jit_func = njit(py_func)\n assert_equal(py_func(5), jit_func(5))\n\ndef test_empty2():\n def py_func(d1, d2):\n a = np.empty((d1, d2))\n for i in range(d1):\n for j in range(d2):\n a[i, j] = i + j * 10\n return a\n\n jit_func = njit(py_func)\n assert_equal(py_func(5, 7), jit_func(5, 7))\n\[email protected](\"dtype\", ['int32','int64','float32','float64'])\ndef test_empty3(dtype):\n def py_func(a):\n return np.empty(a.shape, a.dtype)\n\n jit_func = njit(py_func)\n arr = np.array([1,2,3], dtype=dtype)\n assert_equal(py_func(arr).shape, jit_func(arr).shape)\n assert_equal(py_func(arr).dtype, jit_func(arr).dtype)\n\[email protected](\"shape\", [1, (2,), (2,3), (4,5,6)])\[email protected](\"dtype\", ['int32','int64','float32','float64'])\ndef test_empty_like(shape, dtype):\n def py_func(a):\n return np.empty_like(a)\n\n jit_func = njit(py_func)\n arr = np.empty(shape=shape, dtype=dtype)\n assert_equal(py_func(arr).shape, jit_func(arr).shape)\n assert_equal(py_func(arr).dtype, jit_func(arr).dtype)\n\[email protected](\"func\", [np.zeros, np.ones], ids=['zeros','ones'])\ndef test_init1(func):\n def py_func(d):\n return func(d)\n\n jit_func = njit(py_func)\n assert_equal(py_func(5), jit_func(5))\n\[email protected](\"func\", [np.zeros, np.ones], ids=['zeros','ones'])\[email protected](\"dtype\", ['int32','int64','float32','float64'])\ndef test_init2(func, dtype):\n def py_func(a):\n return func(a.shape, a.dtype)\n\n jit_func = njit(py_func)\n arr = np.array([1, 2, 3], dtype=dtype)\n assert_equal(py_func(arr).shape, jit_func(arr).shape)\n assert_equal(py_func(arr).dtype, jit_func(arr).dtype)\n\[email protected](\"func\", [np.zeros, np.ones], ids=['zeros','ones'])\[email protected]\ndef test_init3(func):\n def py_func(d):\n return func(d, dtype=np.dtype('int64'))\n\n jit_func = njit(py_func)\n assert_equal(py_func(5), jit_func(5))\n\[email protected](\"func\", [np.zeros, np.ones], ids=['zeros','ones'])\ndef test_init4(func):\n def py_func(d):\n return func(d)\n\n jit_func = njit(py_func)\n assert_equal(py_func((2, 1)), jit_func((2, 1)))\n\[email protected](\"shape\", [2,(3,4),(5,6,7)])\[email protected](\"dtype\", ['int32','int64','float32','float64'])\[email protected](\"func\", [np.zeros_like, np.ones_like], ids=['zeros_like','ones_like'])\ndef test_init_like(shape, dtype, func):\n def py_func(d):\n return func(d)\n\n a = np.empty(shape=shape, dtype=dtype)\n jit_func = njit(py_func)\n assert_equal(py_func(a), jit_func(a))\n\ndef test_parallel():\n def py_func(a, b):\n return np.add(a, b)\n\n jit_func = njit(py_func, parallel=True)\n arr = np.asarray([[[1,2,3],[4,5,6]],\n [[1,2,3],[4,5,6]]])\n assert_equal(py_func(arr,arr), jit_func(arr,arr))\n\ndef test_parallel_reduce():\n def py_func(a):\n shape = a.shape\n res = 0\n for i in range(shape[0]):\n for j in numba.prange(shape[1]):\n for k in numba.prange(shape[2]):\n res = res + a[i,j,k]\n return res\n\n jit_func = njit(py_func, parallel=True)\n arr = np.asarray([[[1,2,3],[4,5,6]]]).repeat(10000,0)\n assert_equal(py_func(arr), jit_func(arr))\n\n@parametrize_function_variants(\"func\", [\n 'lambda a : a + 1',\n 'lambda a : math.erf(a)',\n # 'lambda a : 5 if a == 1 else a', TODO: investigate\n])\[email protected](\"arr\",\n _test_arrays,\n ids=_test_arrays_ids)\ndef test_vectorize(func, arr):\n arr = np.array(arr)\n vec_func = vectorize(func)\n # assert_equal(_vectorize_reference(func, arr), vec_func(arr))\n assert_allclose(_vectorize_reference(func, arr), vec_func(arr), rtol=1e-7, atol=1e-7)\n\[email protected](\"arr\",\n _test_arrays,\n ids=_test_arrays_ids)\ndef test_vectorize_indirect(arr):\n def func(a):\n return a + 1\n\n vec_func = vectorize(func)\n\n def py_func(a):\n return vec_func(a)\n\n jit_func = njit(py_func, parallel=True)\n\n arr = np.array(arr)\n assert_equal(_vectorize_reference(func, arr), jit_func(arr))\n\[email protected](\"arr\", [\n np.array([[1,2],[3,4]]),\n # np.array([[1,2],[3,4]]).T,\n])\ndef test_fortran_layout(arr):\n def py_func(a):\n return a.T\n\n jit_func = njit(py_func)\n\n assert_equal(py_func(arr), jit_func(arr))\n\n@parametrize_function_variants(\"a\", [\n # 'np.array(1)', TODO zero rank arrays\n # 'np.array(2.5)',\n 'np.array([])',\n 'np.array([1,2,3])',\n 'np.array([[1,2,3]])',\n 'np.array([[1,2],[3,4],[5,6]])',\n ])\ndef test_atleast2d(a):\n def py_func(a):\n return np.atleast_2d(a)\n\n jit_func = njit(py_func)\n assert_equal(py_func(a), jit_func(a))\n\n_test_reshape_test_array = np.array([1,2,3,4,5,6,7,8,9,10,11,12])\n_test_reshape_test_arrays = [\n _test_reshape_test_array,\n _test_reshape_test_array.reshape((2,6)),\n _test_reshape_test_array.reshape((2,3,2)),\n]\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a: a.reshape(a.size)',\n 'lambda a: a.reshape((a.size,))',\n 'lambda a: a.reshape((a.size,1))',\n 'lambda a: a.reshape((1, a.size))',\n 'lambda a: a.reshape((1, a.size, 1))',\n 'lambda a: np.reshape(a, a.size)',\n 'lambda a: np.reshape(a, (a.size,))',\n 'lambda a: np.reshape(a, (a.size,1))',\n 'lambda a: np.reshape(a, (1, a.size))',\n 'lambda a: np.reshape(a, (1, a.size, 1))',\n ])\[email protected](\"array\", _test_reshape_test_arrays)\ndef test_reshape(py_func, array):\n jit_func = njit(py_func)\n assert_equal(py_func(array), jit_func(array))\n\[email protected](reason=\"numba: reshape() supports contiguous array only\")\ndef test_reshape_non_contiguous():\n def py_func(a):\n return a.reshape(4)\n jit_func = njit(py_func)\n array = np.arange(16).reshape((4,4))[1:3,1:3]\n assert_equal(py_func(array), jit_func(array))\n\n@parametrize_function_variants(\"py_func\", [\n # 'lambda a: a.flat', TODO: flat support\n 'lambda a: a.flatten()',\n ])\[email protected](\"array\", _test_reshape_test_arrays)\ndef test_flatten(py_func, array):\n jit_func = njit(py_func)\n assert_equal(py_func(array), jit_func(array))\n\n@parametrize_function_variants(\"py_func\", [\n 'lambda a, b: ()',\n 'lambda a, b: (a,b)',\n 'lambda a, b: ((a,b),(a,a),(b,b),())',\n ])\[email protected](\"a,b\",\n itertools.product(*(([1,2.5,np.array([1,2,3]), np.array([4.5,6.7,8.9])],)*2))\n )\ndef test_tuple_ret(py_func, a, b):\n jit_func = njit(py_func)\n assert_equal(py_func(a, b), jit_func(a, b))\n\[email protected](\"arrays\",\n [([1,2,3],[4,5,6]),\n ([[1,2],[3,4]],[[5,6],[7,8]]),\n ([[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]),\n ([1,2,3],[4,5,6],[7,8,9]),\n ([1,2],[3,4],[5,6],[7,8]),\n ])\[email protected](\"axis\",\n [0,1,2]) # TODO: None\ndef test_concat(arrays, axis):\n arr = tuple(np.array(a) for a in arrays)\n num_dims = len(arr[0].shape)\n if axis >= num_dims:\n pytest.skip() # TODO: unselect\n num_arrays = len(arrays)\n if num_arrays == 2:\n def py_func(arr1, arr2):\n return np.concatenate((arr1, arr2), axis=axis)\n elif num_arrays == 3:\n def py_func(arr1, arr2, arr3):\n return np.concatenate((arr1, arr2, arr3), axis=axis)\n elif num_arrays == 4:\n def py_func(arr1, arr2, arr3, arr4):\n return np.concatenate((arr1, arr2, arr3, arr4), axis=axis)\n else:\n assert False\n jit_func = njit(py_func)\n assert_equal(py_func(*arr), jit_func(*arr))\n\[email protected](\"arr\", [\n np.array([1,2,3,4,5,6,7,8], dtype=np.int32),\n np.array([1,2,3,4,5,6,7,8], dtype=np.float32),\n np.array([True,False,True,True,False,True,True,True])\n ])\n@parametrize_function_variants(\"py_func\", [\n 'lambda a, b, c, d: a[b:c]',\n 'lambda a, b, c, d: a[3:c]',\n 'lambda a, b, c, d: a[b:4]',\n 'lambda a, b, c, d: a[3:4]',\n 'lambda a, b, c, d: a[b:c:d]',\n 'lambda a, b, c, d: a[b:c:1]',\n 'lambda a, b, c, d: a[b:c:2]',\n 'lambda a, b, c, d: a[3:4:2]',\n ])\ndef test_slice1(arr, py_func):\n jit_func = njit(py_func)\n assert_equal(py_func(arr, 3, 4, 2), jit_func(arr, 3, 4, 2))\n\ndef test_slice2():\n def py_func(a, i, j, k):\n a1 = a[1]\n a2 = a1[2]\n return a2[3]\n\n arr = np.arange(3*4*5).reshape((3,4,5))\n jit_func = njit(py_func)\n assert_equal(py_func(arr, 1,2,3), jit_func(arr, 1,2,3))\n\ndef test_multidim_slice():\n def py_func(a, b):\n return a[1, b,:]\n jit_func = njit(py_func)\n\n a = np.array([[[1],[2],[3]],[[4],[5],[6]]])\n assert_equal(py_func(a, 0), jit_func(a, 0))\n\ndef test_size_ret():\n def py_func(a, b):\n return a.size / b\n jit_func = njit(py_func)\n\n a = np.array([[[1],[2],[3]],[[4],[5],[6]]])\n assert_equal(py_func(a, 3), jit_func(a, 3))\n\[email protected](\"a\", [\n np.array([[1,2],[4,5]])\n ])\[email protected](\"b\", [True, False])\ndef test_tensor_if(a, b):\n def py_func(m, rowvar):\n m_arr = np.atleast_2d(m)\n if not rowvar:\n m_arr = m_arr.T\n return m_arr\n jit_func = njit(py_func)\n\n assert_equal(py_func(a, b), jit_func(a, b))\n\ndef _cov(m, y=None, rowvar=True, bias=False, ddof=None):\n return np.cov(m, y, rowvar, bias, ddof)\n\n_rnd = np.random.RandomState(42)\n\n@parametrize_function_variants(\"m\", [\n 'np.array([[0, 2], [1, 1], [2, 0]]).T',\n '_rnd.randn(100).reshape(5, 20)',\n 'np.asfortranarray(np.array([[0, 2], [1, 1], [2, 0]]).T)',\n '_rnd.randn(100).reshape(5, 20)[:, ::2]',\n 'np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])',\n # 'np.full((4, 5), fill_value=True)', TODO\n 'np.array([np.nan, 0.5969, -np.inf, 0.9918, 0.7964])',\n 'np.linspace(-3, 3, 33).reshape(33, 1)',\n\n # non-array inputs\n '((0.1, 0.2), (0.11, 0.19), (0.09, 0.21))', # UniTuple\n '((0.1, 0.2), (0.11, 0.19), (0.09j, 0.21j))', # Tuple\n '(-2.1, -1, 4.3)',\n '(1, 2, 3)',\n '[4, 5, 6]',\n '((0.1, 0.2, 0.3), (0.1, 0.2, 0.3))',\n '[(1, 2, 3), (1, 3, 2)]',\n '3.142',\n # '((1.1, 2.2, 1.5),)',\n\n # empty data structures\n 'np.array([])',\n 'np.array([]).reshape(0, 2)',\n 'np.array([]).reshape(2, 0)',\n '()',\n ])\ndef test_cov_basic(m):\n if isinstance(m, (list, float)) or len(m) == 0 or np.iscomplexobj(m):\n pytest.xfail()\n py_func = _cov\n jit_func = njit(py_func)\n assert_allclose(py_func(m), jit_func(m), rtol=1e-15, atol=1e-15)\n\n_cov_inputs_m = _rnd.randn(105).reshape(15, 7)\[email protected](\"m\",\n [_cov_inputs_m])\[email protected](\"y\",\n [None, _cov_inputs_m[::-1]])\[email protected](\"rowvar\",\n [False, True])\[email protected](\"bias\",\n [False, True])\[email protected](\"ddof\",\n [None, -1, 0, 1, 3.0, True])\ndef test_cov_explicit_arguments(m, y, rowvar, bias, ddof):\n py_func = _cov\n jit_func = njit(py_func)\n assert_allclose(py_func(m=m, y=y, rowvar=rowvar, bias=bias, ddof=ddof), jit_func(m=m, y=y, rowvar=rowvar, bias=bias, ddof=ddof), rtol=1e-14, atol=1e-14)\n\n@parametrize_function_variants(\"m, y, rowvar\", [\n '(np.array([-2.1, -1, 4.3]), np.array([3, 1.1, 0.12]), True)',\n '(np.array([1, 2, 3]), np.array([1j, 2j, 3j]), True)',\n '(np.array([1j, 2j, 3j]), np.array([1, 2, 3]), True)',\n '(np.array([1, 2, 3]), np.array([1j, 2j, 3]), True)',\n '(np.array([1j, 2j, 3]), np.array([1, 2, 3]), True)',\n '(np.array([]), np.array([]), True)',\n '(1.1, 2.2, True)',\n '(_rnd.randn(10, 3), np.array([-2.1, -1, 4.3]).reshape(1, 3) / 10, True)',\n '(np.array([-2.1, -1, 4.3]), np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), True)',\n # '(np.array([-2.1, -1, 4.3]), np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), False)',\n '(np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), np.array([-2.1, -1, 4.3]), True)',\n # '(np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]), np.array([-2.1, -1, 4.3]), False)',\n ])\ndef test_cov_edge_cases(m, y, rowvar):\n if not isinstance(m, np.ndarray) or not isinstance(y, np.ndarray) or np.iscomplexobj(m) or np.iscomplexobj(y):\n pytest.xfail()\n py_func = _cov\n jit_func = njit(py_func)\n assert_allclose(py_func(m=m, y=y, rowvar=rowvar), jit_func(m=m, y=y, rowvar=rowvar), rtol=1e-14, atol=1e-14)\n\[email protected](\"arr\", [\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.int32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.float32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)).T,\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)).T,\n ])\[email protected](\"parallel\", [False, True])\ndef test_mean_loop(arr, parallel):\n def py_func(data):\n tdata = data.T\n m = np.empty(tdata.shape[0])\n for i in numba.prange(tdata.shape[0]):\n m[i] = np.mean(tdata[i])\n return m\n\n jit_func = njit(py_func, parallel=parallel)\n assert_equal(py_func(arr), jit_func(arr))\n\[email protected](\"arr\", [\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.int32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9], dtype=np.float32).reshape((3,3)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)),\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.int32).reshape((5,2)).T,\n np.array([1,2,3,4,5,6,7,8,9,0], dtype=np.float32).reshape((5,2)).T,\n make_regression(n_samples=2**10, n_features=2**7, random_state=0)[0],\n ])\[email protected](\"parallel\", [False, True])\ndef test_mean_loop_cov(arr, parallel):\n def py_func(data):\n tdata = data.T\n m = np.empty(tdata.shape[0])\n for i in numba.prange(tdata.shape[0]):\n m[i] = np.mean(tdata[i])\n c = data - m\n v = np.cov(c.T)\n return c, v\n\n jit_func = njit(py_func, parallel=parallel)\n c1, v1 = py_func(arr)\n c2, v2 = jit_func(arr)\n assert_allclose(c1, c2, rtol=1e-15, atol=1e-11)\n assert_allclose(v1, v2, rtol=1e-15, atol=1e-11)\n\[email protected](\"N,k\", [\n (1, 0),\n (2, -1),\n (2, 0),\n (2, 1),\n (3, -2),\n (3, -1),\n (3, 0),\n (3, 1),\n (3, 2),\n ])\[email protected](\"dtype\", [np.int32,np.int64,np.float32,np.float64])\ndef test_eye1(N, k, dtype):\n def py_func(N, k):\n return np.eye(N=N, k=k, dtype=dtype)\n\n jit_func = njit(py_func)\n assert_equal(py_func(N, k), jit_func(N, k))\n\[email protected](\"N,M,k\", [\n (2, 3, -1),\n (2, 3, 0),\n (2, 3, 1),\n (3, 2, -1),\n (3, 2, 0),\n (3, 2, 1),\n ])\ndef test_eye2(N, M, k):\n def py_func(N, M, k):\n return np.eye(N, M, k)\n\n jit_func = njit(py_func)\n assert_equal(py_func(N, M, k), jit_func(N, M, k))\n\n_matmul_inputs_vars = [\n ([2],[3]),\n ([2,3],[4,5]),\n ([2,3], [[2,3],[4,5]]),\n ([1,2,3], [[1,2,3],[4,5,6],[7,8,9]]),\n ([[2,3],[4,5]], [2,3]),\n ([[1,2,3],[4,5,6],[7,8,9]],[1,2,3]),\n ([[2,3],[4,5]], [[2,3],[4,5]]),\n]\n\n@parametrize_function_variants(\"py_func\", [\n # 'lambda a, b: np.matmul(a, b)',\n 'lambda a, b: a @ b',\n ])\[email protected](\"a,b\", _matmul_inputs_vars, ids=list(map(str, _matmul_inputs_vars)))\[email protected](\"dtype\", [np.float32,np.float64])\ndef test_matmul(py_func, a, b, dtype):\n a = np.array(a, dtype=dtype)\n b = np.array(b, dtype=dtype)\n jit_func = njit(py_func)\n assert_equal(py_func(a, b), jit_func(a, b))\n"
] |
[
[
"numpy.testing.assert_allclose",
"numpy.dot",
"numpy.mean",
"numpy.resize",
"numpy.iscomplexobj",
"sklearn.datasets.make_regression",
"numpy.dtype",
"numpy.concatenate",
"numpy.empty",
"numpy.seterr",
"numpy.eye",
"numpy.arange",
"numpy.empty_like",
"numpy.atleast_2d",
"numpy.array",
"numpy.ndenumerate",
"numpy.cov",
"numpy.asarray",
"numpy.add",
"numpy.random.RandomState"
]
] |
fegan104/roam
|
[
"6a3a9136c8b25e2b550c11789901b1f1d8916afc"
] |
[
"src/runvast2.py"
] |
[
"import pickle\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom macrel import graphs\nfrom macrel import viewmap\nfrom macrel import vast11data as vast\n\nN = len(vast.NODES)\n\n\ndef blend(probs, weights):\n\tassert len(probs) == len(weights)\n\tk = len(probs)\n\n\ttotal_weight = sum(w for w in weights)\n\tnorm_weights = [w/total_weight for w in weights]\n\n\tp = probs[0] * norm_weights[0]\n\tfor i in range(1, k):\n\t\tp += probs[i] * norm_weights[i]\n\n\treturn p\n\nweights = {i: 10 for i in range(N)}\ngraph_props = vast.VAST11GraphProps(weights)\n\ndata = pickle.load(open(\"vast11-connections.pickle\", \"rb\"))\n\n\ntopo_count = vast.get_vast11_topology()\ntopo_prob = graphs.get_prob_matrix(topo_count)\n\n\nmaps = []\n\nSTARTS = [0] #, 96, 144]\nSTOPS = [192] # , 144, 192]\n\nfor start, stop in zip(STARTS, STOPS):\n\tdns_count = data[\"dns\"][stop] - data[\"dns\"][start]\n\tweb_count = data[\"web\"][stop] - data[\"web\"][start]\n\tmail_count = data[\"email\"][stop] - data[\"email\"][start]\n\tother_count = data[\"other\"][stop] - data[\"other\"][start]\n\n\t# slice topo data to omit unobserved nodes!\n\t# obs_counts = dns_count + web_count\n\t# unused = np.asarray(np.sum(obs_counts, 1) == 0.0).flatten()\n\t# for i in unused:\n\t# \tfor j in unused:\n\t# \t\ttopo_count[i,j] = 0.0\n\t# topo_count.eliminate_zeros()\n\n\t#topo_prob = graphs.get_prob_matrix(topo_count)\n\n\tdns_prob = graphs.get_prob_matrix(dns_count)\n\tweb_prob = graphs.get_prob_matrix(web_count)\n\tmail_prob = graphs.get_prob_matrix(mail_count)\n\tother_prob = graphs.get_prob_matrix(other_count)\n\n\tH = 10\n\tfor blend_weights in [(H,1,1,1,1), (1,H,1,1,1), (1,1,H,1,1), (1,1,1,H,1), (1,1,1,1,H), (1,1,1,1,1)]:\n\n\t\tblend_prob = blend([topo_prob, dns_prob, web_prob, mail_prob, other_prob], blend_weights)\n\n\t\ttopo_map = graphs.get_map(topo_prob)\n\t\tdns_map = graphs.get_map(dns_prob)\n\t\tweb_map = graphs.get_map(web_prob)\n\t\tmail_map = graphs.get_map(mail_prob)\n\t\tother_map = graphs.get_map(other_prob)\n\t\tblend_map = graphs.get_map(blend_prob)\n\n\t\tmaps += [topo_map, dns_map, web_map, mail_map, other_map, blend_map]\n\nviewmap.view_maps(maps, props=graph_props, ncols=6)\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.show"
]
] |
Elizabeth-Warren/warren_organizing_email
|
[
"662e5ae4844bffe64fec4c6635ee87dabd0c3e06"
] |
[
"utils.py"
] |
[
"import datetime\nimport pandas as pd\nimport pytz\n\n\ndef date_today():\n return datetime.datetime.now(pytz.timezone(\"US/Eastern\")).date()\n\n\ndef ds_today():\n return str(date_today())\n\n\ndef sql_quote(v):\n \"\"\"Returns quoted value suitable for SQL insert.\n\n Is not robust enough to properly protect against SQL injection. Beware.\"\"\"\n if v is None or pd.isnull(v):\n return \"NULL\"\n if isinstance(v, int):\n return v\n v = str(v)\n v = v.replace(\"'\", \"\\\\'\")\n return f\"'{v}'\"\n"
] |
[
[
"pandas.isnull"
]
] |
markusweimer/hummingbird
|
[
"0bbd5607b74ead81c9876c75a89a4bd845da1b5b"
] |
[
"hummingbird/ml/operator_converters/_tree_implementations.py"
] |
[
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\n\"\"\"\nBase classes for tree algorithm implementations.\n\"\"\"\n\nimport torch\nimport numpy as np\nfrom enum import Enum\nfrom abc import ABC, abstractmethod\n\nfrom . import constants\n\n\nclass TreeImpl(Enum):\n \"\"\"\n Enum definig the available implementations for tree scoring.\n \"\"\"\n\n gemm = 1\n tree_trav = 2\n perf_tree_trav = 3\n\n\nclass AbstracTreeImpl(ABC):\n \"\"\"\n Abstract class definig the basic structure for tree-base models.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n @abstractmethod\n def aggregation(self, x):\n \"\"\"\n Method defining the aggregation operation to execute after the model is evaluated.\n\n Args:\n x: An input tensor\n\n Returns:\n The tensor result of the aggregation\n \"\"\"\n pass\n\n @abstractmethod\n def calibration(self, x):\n \"\"\"\n Method implementating the calibration operation for classifiers.\n\n Args:\n x: An input tensor\n\n Returns:\n The tensor result of the calibration\n \"\"\"\n pass\n\n\nclass AbstractPyTorchTreeImpl(AbstracTreeImpl, torch.nn.Module):\n \"\"\"\n Abstract class definig the basic structure for tree-base models implemented in PyTorch.\n \"\"\"\n\n def __init__(self, net_parameters, n_features, classes, n_classes):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n n_classes: The total number of used classes\n \"\"\"\n super(AbstractPyTorchTreeImpl, self).__init__()\n\n # Set up the variables for the subclasses.\n # Each subclass will trigger different behaviours by properly setting these.\n self.perform_class_select = False\n self.binary_classification = False\n self.classes = classes\n self.learning_rate = None\n self.regression = False\n self.alpha = None\n\n # Are we doing regression or classification?\n if classes is None:\n self.regression = True\n self.n_classes = 1\n else:\n self.n_classes = len(classes) if n_classes is None else n_classes\n if min(classes) != 0 or max(classes) != len(classes) - 1:\n self.classes = torch.nn.Parameter(torch.IntTensor(classes), requires_grad=False)\n self.perform_class_select = True\n\n\nclass GEMMTreeImpl(AbstractPyTorchTreeImpl):\n \"\"\"\n Class implementing the GEMM strategy in PyTorch for tree-base models.\n \"\"\"\n\n def __init__(self, net_parameters, n_features, classes, n_classes=None):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n n_classes: The total number of used classes\n \"\"\"\n super(GEMMTreeImpl, self).__init__(net_parameters, n_features, classes, n_classes)\n\n # Initialize the actual model.\n hidden_one_size = 0\n hidden_two_size = 0\n hidden_three_size = self.n_classes\n\n for weight, bias in net_parameters:\n hidden_one_size = max(hidden_one_size, weight[0].shape[0])\n hidden_two_size = max(hidden_two_size, weight[1].shape[0])\n\n n_trees = len(net_parameters)\n weight_1 = np.zeros((n_trees, hidden_one_size, n_features))\n bias_1 = np.zeros((n_trees, hidden_one_size))\n weight_2 = np.zeros((n_trees, hidden_two_size, hidden_one_size))\n bias_2 = np.zeros((n_trees, hidden_two_size))\n weight_3 = np.zeros((n_trees, hidden_three_size, hidden_two_size))\n\n for i, (weight, bias) in enumerate(net_parameters):\n if len(weight[0]) > 0:\n weight_1[i, 0 : weight[0].shape[0], 0 : weight[0].shape[1]] = weight[0]\n bias_1[i, 0 : bias[0].shape[0]] = bias[0]\n weight_2[i, 0 : weight[1].shape[0], 0 : weight[1].shape[1]] = weight[1]\n bias_2[i, 0 : bias[1].shape[0]] = bias[1]\n weight_3[i, 0 : weight[2].shape[0], 0 : weight[2].shape[1]] = weight[2]\n\n self.n_trees = n_trees\n self.n_features = n_features\n self.hidden_one_size = hidden_one_size\n self.hidden_two_size = hidden_two_size\n self.hidden_three_size = hidden_three_size\n\n self.weight_1 = torch.nn.Parameter(torch.from_numpy(weight_1.reshape(-1, self.n_features).astype(\"float32\")))\n self.bias_1 = torch.nn.Parameter(torch.from_numpy(bias_1.reshape(-1, 1).astype(\"float32\")))\n\n self.weight_2 = torch.nn.Parameter(torch.from_numpy(weight_2.astype(\"float32\")))\n self.bias_2 = torch.nn.Parameter(torch.from_numpy(bias_2.reshape(-1, 1).astype(\"float32\")))\n\n self.weight_3 = torch.nn.Parameter(torch.from_numpy(weight_3.astype(\"float32\")))\n\n def aggregation(self, x):\n return x\n\n def calibration(self, x):\n return x\n\n def forward(self, x):\n x = x.t()\n x = torch.mm(self.weight_1, x) < self.bias_1\n x = x.view(self.n_trees, self.hidden_one_size, -1)\n x = x.float()\n\n x = torch.matmul(self.weight_2, x)\n\n x = x.view(self.n_trees * self.hidden_two_size, -1) == self.bias_2\n x = x.view(self.n_trees, self.hidden_two_size, -1)\n x = x.float()\n\n x = torch.matmul(self.weight_3, x)\n x = x.view(self.n_trees, self.hidden_three_size, -1)\n\n x = self.aggregation(x)\n\n if self.learning_rate is not None:\n x *= self.learning_rate\n if self.alpha is not None:\n x += self.alpha\n if self.regression:\n return x\n\n x = self.calibration(x)\n\n if self.perform_class_select:\n return torch.index_select(self.classes, 0, torch.argmax(x, dim=1)), x\n else:\n return torch.argmax(x, dim=1), x\n\n\nclass TreeTraversalTreeImpl(AbstractPyTorchTreeImpl):\n \"\"\"\n Class implementing the Tree Traversal strategy in PyTorch for tree-base models.\n \"\"\"\n\n def __init__(self, tree_parameters, max_depth, n_features, classes, n_classes=None):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n max_depth: The maximum tree-depth in the model\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n n_classes: The total number of used classes\n \"\"\"\n super(TreeTraversalTreeImpl, self).__init__(tree_parameters, n_features, classes, n_classes)\n\n # Initialize the actual model.\n self.n_features = n_features\n self.max_tree_depth = max_depth\n self.num_trees = len(tree_parameters)\n self.num_nodes = max([len(tree_parameter[1]) for tree_parameter in tree_parameters])\n\n lefts = np.zeros((self.num_trees, self.num_nodes), dtype=np.float32)\n rights = np.zeros((self.num_trees, self.num_nodes), dtype=np.float32)\n\n features = np.zeros((self.num_trees, self.num_nodes), dtype=np.int64)\n thresholds = np.zeros((self.num_trees, self.num_nodes), dtype=np.float32)\n values = np.zeros((self.num_trees, self.num_nodes, self.n_classes), dtype=np.float32)\n\n for i in range(self.num_trees):\n lefts[i][: len(tree_parameters[i][0])] = tree_parameters[i][2]\n rights[i][: len(tree_parameters[i][0])] = tree_parameters[i][3]\n features[i][: len(tree_parameters[i][0])] = tree_parameters[i][4]\n thresholds[i][: len(tree_parameters[i][0])] = tree_parameters[i][5]\n values[i][: len(tree_parameters[i][0])][:] = tree_parameters[i][6]\n\n self.lefts = torch.nn.Parameter(torch.from_numpy(lefts).view(-1), requires_grad=False)\n self.rights = torch.nn.Parameter(torch.from_numpy(rights).view(-1), requires_grad=False)\n\n self.features = torch.nn.Parameter(torch.from_numpy(features).view(-1), requires_grad=False)\n self.thresholds = torch.nn.Parameter(torch.from_numpy(thresholds).view(-1))\n self.values = torch.nn.Parameter(torch.from_numpy(values).view(-1, self.n_classes))\n\n nodes_offset = [[i * self.num_nodes for i in range(self.num_trees)]]\n self.nodes_offset = torch.nn.Parameter(torch.LongTensor(nodes_offset), requires_grad=False)\n\n def aggregation(self, x):\n return x\n\n def calibration(self, x):\n return x\n\n def forward(self, x):\n indexes = self.nodes_offset\n indexes = indexes.expand(x.size()[0], self.num_trees)\n indexes = indexes.reshape(-1)\n\n for _ in range(self.max_tree_depth):\n tree_nodes = indexes\n feature_nodes = torch.index_select(self.features, 0, tree_nodes).view(-1, self.num_trees)\n feature_values = torch.gather(x, 1, feature_nodes)\n\n thresholds = torch.index_select(self.thresholds, 0, indexes).view(-1, self.num_trees)\n lefts = torch.index_select(self.lefts, 0, indexes).view(-1, self.num_trees)\n rights = torch.index_select(self.rights, 0, indexes).view(-1, self.num_trees)\n\n indexes = torch.where(torch.ge(feature_values, thresholds), rights, lefts).long()\n indexes = indexes + self.nodes_offset\n indexes = indexes.view(-1)\n\n output = torch.index_select(self.values, 0, indexes).view(-1, self.num_trees, self.n_classes)\n\n output = self.aggregation(output)\n\n if self.learning_rate is not None:\n output *= self.learning_rate\n if self.alpha is not None:\n output += self.alpha\n if self.regression:\n return output\n\n output = self.calibration(output)\n\n if self.perform_class_select:\n return torch.index_select(self.classes, 0, torch.argmax(output, dim=1)), output\n else:\n return torch.argmax(output, dim=1), output\n\n\nclass PerfectTreeTraversalTreeImpl(AbstractPyTorchTreeImpl):\n \"\"\"\n Class implementing the Perfect Tree Traversal strategy in PyTorch for tree-base models.\n \"\"\"\n\n def __init__(self, tree_parameters, max_depth, n_features, classes, n_classes=None):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n max_depth: The maximum tree-depth in the model\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n n_classes: The total number of used classes\n \"\"\"\n super(PerfectTreeTraversalTreeImpl, self).__init__(tree_parameters, n_features, classes, n_classes)\n\n # Initialize the actual model.\n self.max_tree_depth = max_depth\n self.num_trees = len(tree_parameters)\n self.n_features = n_features\n\n node_maps = [tp[0] for tp in tree_parameters]\n\n weight_0 = np.zeros((self.num_trees, 2 ** max_depth - 1))\n bias_0 = np.zeros((self.num_trees, 2 ** max_depth - 1))\n weight_1 = np.zeros((self.num_trees, 2 ** max_depth, self.n_classes))\n\n for i, node_map in enumerate(node_maps):\n self._get_weights_and_biases(node_map, max_depth, weight_0[i], weight_1[i], bias_0[i])\n\n node_by_levels = [set() for _ in range(max_depth)]\n self._traverse_by_level(node_by_levels, 0, -1, max_depth)\n\n self.root_nodes = torch.nn.Parameter(torch.from_numpy(weight_0[:, 0].flatten().astype(\"int64\")), requires_grad=False)\n self.root_biases = torch.nn.Parameter(-1 * torch.from_numpy(bias_0[:, 0].astype(\"float32\")), requires_grad=False)\n\n tree_indices = np.array([i for i in range(0, 2 * self.num_trees, 2)]).astype(\"int64\")\n self.tree_indices = torch.nn.Parameter(torch.from_numpy(tree_indices), requires_grad=False)\n\n self.nodes = []\n self.biases = []\n for i in range(1, max_depth):\n nodes = torch.nn.Parameter(\n torch.from_numpy(weight_0[:, list(sorted(node_by_levels[i]))].flatten().astype(\"int64\")), requires_grad=False\n )\n biases = torch.nn.Parameter(\n torch.from_numpy(-1 * bias_0[:, list(sorted(node_by_levels[i]))].flatten().astype(\"float32\")),\n requires_grad=False,\n )\n self.nodes.append(nodes)\n self.biases.append(biases)\n\n self.nodes = torch.nn.ParameterList(self.nodes)\n self.biases = torch.nn.ParameterList(self.biases)\n\n self.leaf_nodes = torch.nn.Parameter(\n torch.from_numpy(weight_1.reshape((-1, self.n_classes)).astype(\"float32\")), requires_grad=False\n )\n\n def aggregation(self, x):\n return x\n\n def calibration(self, x):\n return x\n\n def forward(self, x):\n prev_indices = (torch.ge(torch.index_select(x, 1, self.root_nodes), self.root_biases)).long()\n prev_indices = prev_indices + self.tree_indices\n prev_indices = prev_indices.view(-1)\n\n factor = 2\n for nodes, biases in zip(self.nodes, self.biases):\n gather_indices = torch.index_select(nodes, 0, prev_indices).view(-1, self.num_trees)\n features = torch.gather(x, 1, gather_indices).view(-1)\n prev_indices = factor * prev_indices + torch.ge(features, torch.index_select(biases, 0, prev_indices)).long().view(\n -1\n )\n\n output = torch.index_select(self.leaf_nodes, 0, prev_indices.view(-1)).view(-1, self.num_trees, self.n_classes)\n\n output = self.aggregation(output)\n\n if self.learning_rate is not None:\n output *= self.learning_rate\n if self.alpha is not None:\n output += self.alpha\n if self.regression:\n return output\n\n output = self.calibration(output)\n\n if self.perform_class_select:\n return torch.index_select(self.classes, 0, torch.argmax(output, dim=1)), output\n else:\n return torch.argmax(output, dim=1), output\n\n def _traverse_by_level(self, node_by_levels, node_id, current_level, max_level):\n current_level += 1\n if current_level == max_level:\n return node_id\n node_by_levels[current_level].add(node_id)\n node_id += 1\n node_id = self._traverse_by_level(node_by_levels, node_id, current_level, max_level)\n node_id = self._traverse_by_level(node_by_levels, node_id, current_level, max_level)\n return node_id\n\n def _get_weights_and_biases(self, nodes_map, tree_depth, weight_0, weight_1, bias_0):\n def depth_f_traversal(node, current_depth, node_id, leaf_start_id):\n weight_0[node_id] = node.feature\n bias_0[node_id] = -node.threshold\n current_depth += 1\n node_id += 1\n\n if node.left.feature == -1:\n node_id += 2 ** (tree_depth - current_depth - 1) - 1\n v = node.left.value\n weight_1[leaf_start_id : leaf_start_id + 2 ** (tree_depth - current_depth - 1)] = (\n np.ones((2 ** (tree_depth - current_depth - 1), self.n_classes)) * v\n )\n leaf_start_id += 2 ** (tree_depth - current_depth - 1)\n else:\n node_id, leaf_start_id = depth_f_traversal(node.left, current_depth, node_id, leaf_start_id)\n\n if node.right.feature == -1:\n node_id += 2 ** (tree_depth - current_depth - 1) - 1\n v = node.right.value\n weight_1[leaf_start_id : leaf_start_id + 2 ** (tree_depth - current_depth - 1)] = (\n np.ones((2 ** (tree_depth - current_depth - 1), self.n_classes)) * v\n )\n leaf_start_id += 2 ** (tree_depth - current_depth - 1)\n else:\n node_id, leaf_start_id = depth_f_traversal(node.right, current_depth, node_id, leaf_start_id)\n\n return node_id, leaf_start_id\n\n depth_f_traversal(nodes_map[0], -1, 0, 0)\n\n\nclass GEMMDecisionTreeImpl(GEMMTreeImpl):\n \"\"\"\n Class implementing the GEMM strategy in PyTorch for decision tree models.\n\n \"\"\"\n\n def __init__(self, net_parameters, n_features, classes=None):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n \"\"\"\n super(GEMMDecisionTreeImpl, self).__init__(net_parameters, n_features, classes)\n self.final_probability_divider = len(net_parameters)\n\n def aggregation(self, x):\n output = x.sum(0).t()\n\n if self.final_probability_divider > 1:\n output = output / self.final_probability_divider\n\n return output\n\n\nclass TreeTraversalDecisionTreeImpl(TreeTraversalTreeImpl):\n \"\"\"\n Class implementing the Tree Traversal strategy in PyTorch for decision tree models.\n \"\"\"\n\n def __init__(self, net_parameters, max_depth, n_features, classes=None):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n max_depth: The maximum tree-depth in the model\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n \"\"\"\n super(TreeTraversalDecisionTreeImpl, self).__init__(net_parameters, max_depth, n_features, classes)\n self.final_probability_divider = len(net_parameters)\n\n def aggregation(self, x):\n output = x.sum(1)\n\n if self.final_probability_divider > 1:\n output = output / self.final_probability_divider\n\n return output\n\n\nclass PerfectTreeTraversalDecisionTreeImpl(PerfectTreeTraversalTreeImpl):\n \"\"\"\n Class implementing the Perfect Tree Traversal strategy in PyTorch for decision tree models.\n \"\"\"\n\n def __init__(self, net_parameters, max_depth, n_features, classes=None):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n max_depth: The maximum tree-depth in the model\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n \"\"\"\n super(PerfectTreeTraversalDecisionTreeImpl, self).__init__(net_parameters, max_depth, n_features, classes)\n self.final_probability_divider = len(net_parameters)\n\n def aggregation(self, x):\n output = x.sum(1)\n\n if self.final_probability_divider > 1:\n output = output / self.final_probability_divider\n\n return output\n\n\nclass GEMMGBDTImpl(GEMMTreeImpl):\n \"\"\"\n Class implementing the GEMM strategy (in PyTorch) for GBDT models.\n \"\"\"\n\n def __init__(self, net_parameters, n_features, classes=None, extra_config={}):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n extra_config: Extra configuration used to properly implement the source tree\n \"\"\"\n super(GEMMGBDTImpl, self).__init__(net_parameters, n_features, classes, 1)\n self.n_gbdt_classes = 1\n\n if constants.LEARNING_RATE in extra_config:\n self.learning_rate = extra_config[constants.LEARNING_RATE]\n if constants.ALPHA in extra_config:\n self.alpha = torch.nn.Parameter(torch.FloatTensor(extra_config[constants.ALPHA]), requires_grad=False)\n\n if classes is not None:\n self.n_gbdt_classes = len(classes) if len(classes) > 2 else 1\n if self.n_gbdt_classes == 1:\n self.binary_classification = True\n\n self.n_trees_per_class = len(net_parameters) // self.n_gbdt_classes\n\n def aggregation(self, x):\n return torch.squeeze(x).t().view(-1, self.n_gbdt_classes, self.n_trees_per_class).sum(2)\n\n def calibration(self, x):\n if self.binary_classification:\n output = torch.sigmoid(x)\n return torch.cat([1 - output, output], dim=1)\n else:\n return torch.softmax(x, dim=1)\n\n\nclass TreeTraversalGBDTImpl(TreeTraversalTreeImpl):\n \"\"\"\n Class implementing the Tree Traversal strategy in PyTorch.\n \"\"\"\n\n def __init__(self, net_parameters, max_detph, n_features, classes=None, extra_config={}):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n max_depth: The maximum tree-depth in the model\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n extra_config: Extra configuration used to properly implement the source tree\n \"\"\"\n super(TreeTraversalGBDTImpl, self).__init__(net_parameters, max_detph, n_features, classes, 1)\n self.n_gbdt_classes = 1\n\n if constants.LEARNING_RATE in extra_config:\n self.learning_rate = extra_config[constants.LEARNING_RATE]\n if constants.ALPHA in extra_config:\n self.alpha = torch.nn.Parameter(torch.FloatTensor(extra_config[constants.ALPHA]), requires_grad=False)\n\n if classes is not None:\n self.n_gbdt_classes = len(classes) if len(classes) > 2 else 1\n if self.n_gbdt_classes == 1:\n self.binary_classification = True\n\n self.n_trees_per_class = len(net_parameters) // self.n_gbdt_classes\n\n def aggregation(self, x):\n return x.view(-1, self.n_gbdt_classes, self.n_trees_per_class).sum(2)\n\n def calibration(self, x):\n if self.binary_classification:\n output = torch.sigmoid(x)\n return torch.cat([1 - output, output], dim=1)\n else:\n return torch.softmax(x, dim=1)\n\n\nclass PerfectTreeTraversalGBDTImpl(PerfectTreeTraversalTreeImpl):\n \"\"\"\n Class implementing the Perfect Tree Traversal strategy in PyTorch.\n \"\"\"\n\n def __init__(self, net_parameters, max_depth, n_features, classes=None, extra_config={}):\n \"\"\"\n Args:\n net_parameters: The parameters defining the tree structure\n max_depth: The maximum tree-depth in the model\n n_features: The number of features input to the model\n classes: The classes used for classification. None if implementing a regression model\n extra_config: Extra configuration used to properly implement the source tree\n \"\"\"\n super(PerfectTreeTraversalGBDTImpl, self).__init__(net_parameters, max_depth, n_features, classes, 1)\n self.n_gbdt_classes = 1\n\n if constants.LEARNING_RATE in extra_config:\n self.learning_rate = extra_config[constants.LEARNING_RATE]\n if constants.ALPHA in extra_config:\n self.alpha = torch.nn.Parameter(torch.FloatTensor(extra_config[constants.ALPHA]), requires_grad=False)\n\n if classes is not None:\n self.n_gbdt_classes = len(classes) if len(classes) > 2 else 1\n if self.n_gbdt_classes == 1:\n self.binary_classification = True\n\n self.n_trees_per_class = len(net_parameters) // self.n_gbdt_classes\n\n def aggregation(self, x):\n return x.view(-1, self.n_gbdt_classes, self.n_trees_per_class).sum(2)\n\n def calibration(self, x):\n if self.binary_classification:\n output = torch.sigmoid(x)\n return torch.cat([1 - output, output], dim=1)\n else:\n return torch.softmax(x, dim=1)\n"
] |
[
[
"torch.sigmoid",
"torch.cat",
"torch.nn.ParameterList",
"numpy.zeros",
"torch.gather",
"torch.IntTensor",
"torch.FloatTensor",
"numpy.ones",
"torch.softmax",
"torch.mm",
"torch.from_numpy",
"torch.squeeze",
"torch.LongTensor",
"torch.index_select",
"torch.ge",
"torch.matmul",
"torch.argmax"
]
] |
villasen/ML-KWS-for-MCU
|
[
"704a36897c04582254239fccb8bc37af04919b1d"
] |
[
"quant_test.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n#\n# Modifications Copyright 2017-2018 Arm Inc. All Rights Reserved. \n# Adapted from freeze.py to run quantized inference on train/val/test dataset on the \n# trained model in the form of checkpoint\n# \n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os.path\nimport sys\nimport numpy as np\n\nimport tensorflow as tf\nimport input_data\nimport quant_models as models\n\ndef run_quant_inference(wanted_words, sample_rate, clip_duration_ms,\n window_size_ms, window_stride_ms, dct_coefficient_count, \n model_architecture, model_size_info):\n \"\"\"Creates an audio model with the nodes needed for inference.\n\n Uses the supplied arguments to create a model, and inserts the input and\n output nodes that are needed to use the graph for inference.\n\n Args:\n wanted_words: Comma-separated list of the words we're trying to recognize.\n sample_rate: How many samples per second are in the input audio files.\n clip_duration_ms: How many samples to analyze for the audio pattern.\n window_size_ms: Time slice duration to estimate frequencies from.\n window_stride_ms: How far apart time slices should be.\n dct_coefficient_count: Number of frequency bands to analyze.\n model_architecture: Name of the kind of model to generate.\n model_size_info: Model dimensions : different lengths for different models\n \"\"\"\n \n tf.logging.set_verbosity(tf.logging.INFO)\n sess = tf.InteractiveSession()\n words_list = input_data.prepare_words_list(wanted_words.split(','))\n model_settings = models.prepare_model_settings(\n len(words_list), sample_rate, clip_duration_ms, window_size_ms,\n window_stride_ms, dct_coefficient_count)\n\n audio_processor = input_data.AudioProcessor(\n FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,\n FLAGS.unknown_percentage,\n FLAGS.wanted_words.split(','), FLAGS.validation_percentage,\n FLAGS.testing_percentage, model_settings)\n \n label_count = model_settings['label_count']\n fingerprint_size = model_settings['fingerprint_size']\n\n fingerprint_input = tf.placeholder(\n tf.float32, [None, fingerprint_size], name='fingerprint_input')\n\n logits = models.create_model(\n fingerprint_input,\n model_settings,\n FLAGS.model_architecture,\n FLAGS.model_size_info,\n FLAGS.act_max,\n is_training=False)\n\n ground_truth_input = tf.placeholder(\n tf.float32, [None, label_count], name='groundtruth_input')\n\n predicted_indices = tf.argmax(logits, 1)\n expected_indices = tf.argmax(ground_truth_input, 1)\n correct_prediction = tf.equal(predicted_indices, expected_indices)\n confusion_matrix = tf.confusion_matrix(\n expected_indices, predicted_indices, num_classes=label_count)\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n models.load_variables_from_checkpoint(sess, FLAGS.checkpoint)\n\n\n # Quantize weights to 8-bits using (min,max) and write to file\n f = open('weights.h','wb')\n f.close()\n\n for v in tf.trainable_variables():\n var_name = str(v.name)\n var_values = sess.run(v)\n min_value = var_values.min()\n max_value = var_values.max()\n int_bits = int(np.ceil(np.log2(max(abs(min_value),abs(max_value)))))\n dec_bits = 7-int_bits\n # convert to [-128,128) or int8\n var_values = np.round(var_values*2**dec_bits)\n var_name = var_name.replace('/','_')\n var_name = var_name.replace(':','_')\n var_name = var_name.replace('-','_')\n with open('weights.h','a') as f:\n f.write('#define '+var_name+' {')\n if(len(var_values.shape)>2): #convolution layer weights\n transposed_wts = np.transpose(var_values,(3,0,1,2))\n else: #fully connected layer weights or biases of any layer\n transposed_wts = np.transpose(var_values)\n with open('weights.h','a') as f:\n transposed_wts.tofile(f,sep=\", \",format=\"%d\")\n f.write('}\\n')\n # convert back original range but quantized to 8-bits or 256 levels\n var_values = var_values/(2**dec_bits)\n # update the weights in tensorflow graph for quantizing the activations\n var_values = sess.run(tf.assign(v,var_values))\n print(var_name+' number of wts/bias: '+str(var_values.shape)+\\\n ' int bits: '+str(int_bits)+\\\n ' dec bits: '+str(dec_bits)+\\\n ' max: ('+str(var_values.max())+','+str(max_value)+')'+\\\n ' min: ('+str(var_values.min())+','+str(min_value)+')')\n \n # training set\n set_size = audio_processor.set_size('training')\n tf.logging.info('set_size=%d', set_size)\n total_accuracy = 0\n total_conf_matrix = None\n for i in xrange(0, set_size, FLAGS.batch_size):\n training_fingerprints, training_ground_truth = (\n audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,\n 0.0, 0, 'training', sess))\n training_accuracy, conf_matrix = sess.run(\n [evaluation_step, confusion_matrix],\n feed_dict={\n fingerprint_input: training_fingerprints,\n ground_truth_input: training_ground_truth,\n })\n batch_size = min(FLAGS.batch_size, set_size - i)\n total_accuracy += (training_accuracy * batch_size) / set_size\n if total_conf_matrix is None:\n total_conf_matrix = conf_matrix\n else:\n total_conf_matrix += conf_matrix\n tf.logging.info('Confusion Matrix:\\n %s' % (total_conf_matrix))\n tf.logging.info('Training accuracy = %.2f%% (N=%d)' %\n (total_accuracy * 100, set_size))\n\n # validation set\n set_size = audio_processor.set_size('validation')\n tf.logging.info('set_size=%d', set_size)\n total_accuracy = 0\n total_conf_matrix = None\n for i in xrange(0, set_size, FLAGS.batch_size):\n validation_fingerprints, validation_ground_truth = (\n audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,\n 0.0, 0, 'validation', sess))\n validation_accuracy, conf_matrix = sess.run(\n [evaluation_step, confusion_matrix],\n feed_dict={\n fingerprint_input: validation_fingerprints,\n ground_truth_input: validation_ground_truth,\n })\n batch_size = min(FLAGS.batch_size, set_size - i)\n total_accuracy += (validation_accuracy * batch_size) / set_size\n if total_conf_matrix is None:\n total_conf_matrix = conf_matrix\n else:\n total_conf_matrix += conf_matrix\n tf.logging.info('Confusion Matrix:\\n %s' % (total_conf_matrix))\n tf.logging.info('Validation accuracy = %.2f%% (N=%d)' %\n (total_accuracy * 100, set_size))\n \n # test set\n set_size = audio_processor.set_size('testing')\n tf.logging.info('set_size=%d', set_size)\n total_accuracy = 0\n total_conf_matrix = None\n for i in xrange(0, set_size, FLAGS.batch_size):\n test_fingerprints, test_ground_truth = audio_processor.get_data(\n FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)\n test_accuracy, conf_matrix = sess.run(\n [evaluation_step, confusion_matrix],\n feed_dict={\n fingerprint_input: test_fingerprints,\n ground_truth_input: test_ground_truth,\n })\n batch_size = min(FLAGS.batch_size, set_size - i)\n total_accuracy += (test_accuracy * batch_size) / set_size\n if total_conf_matrix is None:\n total_conf_matrix = conf_matrix\n else:\n total_conf_matrix += conf_matrix\n tf.logging.info('Confusion Matrix:\\n %s' % (total_conf_matrix))\n tf.logging.info('Test accuracy = %.2f%% (N=%d)' % (total_accuracy * 100,\n set_size))\n\ndef main(_):\n\n # Create the model, load weights from checkpoint and run on train/val/test\n run_quant_inference(FLAGS.wanted_words, FLAGS.sample_rate,\n FLAGS.clip_duration_ms, FLAGS.window_size_ms,\n FLAGS.window_stride_ms, FLAGS.dct_coefficient_count,\n FLAGS.model_architecture, FLAGS.model_size_info)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data_url',\n type=str,\n # pylint: disable=line-too-long\n default='http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',\n # pylint: enable=line-too-long\n help='Location of speech training data archive on the web.')\n parser.add_argument(\n '--data_dir',\n type=str,\n default='/tmp/speech_dataset/',\n help=\"\"\"\\\n Where to download the speech training data to.\n \"\"\")\n parser.add_argument(\n '--silence_percentage',\n type=float,\n default=10.0,\n help=\"\"\"\\\n How much of the training data should be silence.\n \"\"\")\n parser.add_argument(\n '--unknown_percentage',\n type=float,\n default=10.0,\n help=\"\"\"\\\n How much of the training data should be unknown words.\n \"\"\")\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of wavs to use as a test set.')\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of wavs to use as a validation set.')\n parser.add_argument(\n '--sample_rate',\n type=int,\n default=16000,\n help='Expected sample rate of the wavs',)\n parser.add_argument(\n '--clip_duration_ms',\n type=int,\n default=1000,\n help='Expected duration in milliseconds of the wavs',)\n parser.add_argument(\n '--window_size_ms',\n type=float,\n default=40.0,\n help='How long each spectrogram timeslice is',)\n parser.add_argument(\n '--window_stride_ms',\n type=float,\n default=20.0,\n help='How long each spectrogram timeslice is',)\n parser.add_argument(\n '--dct_coefficient_count',\n type=int,\n default=10,\n help='How many bins to use for the MFCC fingerprint',)\n parser.add_argument(\n '--batch_size',\n type=int,\n default=100,\n help='How many items to train with at once',)\n parser.add_argument(\n '--wanted_words',\n type=str,\n default='yes,no,up,down,left,right,on,off,stop,go',\n help='Words to use (others will be added to an unknown label)',)\n parser.add_argument(\n '--checkpoint',\n type=str,\n default='',\n help='Checkpoint to load the weights from.')\n parser.add_argument(\n '--model_architecture',\n type=str,\n default='ds_cnn',\n help='What model architecture to use')\n parser.add_argument(\n '--model_size_info',\n type=int,\n nargs=\"+\",\n default=[5,64,10,4,2,2,64,3,3,1,1,64,3,3,1,1,64,3,3,1,1,64,3,3,1,1],\n help='Model dimensions - different for various models')\n parser.add_argument(\n '--act_max',\n type=float,\n nargs=\"+\",\n default=[32,32,32,32,32,32,32,32,32,32,32,32],\n help='activations max')\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] |
[
[
"tensorflow.trainable_variables",
"tensorflow.logging.set_verbosity",
"tensorflow.assign",
"numpy.round",
"tensorflow.argmax",
"tensorflow.equal",
"tensorflow.confusion_matrix",
"tensorflow.logging.info",
"tensorflow.placeholder",
"numpy.transpose",
"tensorflow.app.run",
"tensorflow.cast",
"tensorflow.InteractiveSession"
]
] |
yifding/AVEQA_transformers
|
[
"7c6619db4fa87c331f798d89e08a54b5ec4d8868"
] |
[
"src/transformers/models/deberta_v2/modeling_deberta_v2.py"
] |
[
"# coding=utf-8\n# Copyright 2020 Microsoft and the Hugging Face Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch DeBERTa-v2 model. \"\"\"\n\nimport math\nfrom collections.abc import Sequence\n\nimport numpy as np\nimport torch\nfrom torch import _softmax_backward_data, nn\nfrom torch.nn import CrossEntropyLoss, LayerNorm\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_outputs import (\n BaseModelOutput,\n MaskedLMOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_deberta_v2 import DebertaV2Config\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"DebertaV2Config\"\n_TOKENIZER_FOR_DOC = \"DebertaV2Tokenizer\"\n_CHECKPOINT_FOR_DOC = \"microsoft/deberta-v2-xlarge\"\n\nDEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/deberta-v2-xlarge\",\n \"microsoft/deberta-v2-xxlarge\",\n \"microsoft/deberta-v2-xlarge-mnli\",\n \"microsoft/deberta-v2-xxlarge-mnli\",\n]\n\n\n# Copied from transformers.models.deberta.modeling_deberta.ContextPooler\nclass ContextPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)\n self.dropout = StableDropout(config.pooler_dropout)\n self.config = config\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n\n context_token = hidden_states[:, 0]\n context_token = self.dropout(context_token)\n pooled_output = self.dense(context_token)\n pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)\n return pooled_output\n\n @property\n def output_dim(self):\n return self.config.hidden_size\n\n\n# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2\nclass XSoftmax(torch.autograd.Function):\n \"\"\"\n Masked Softmax which is optimized for saving memory\n\n Args:\n input (:obj:`torch.tensor`): The input tensor that will apply softmax.\n mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax\n\n Example::\n\n >>> import torch\n >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax\n\n >>> # Make a tensor\n >>> x = torch.randn([4,20,100])\n\n >>> # Create a mask\n >>> mask = (x>0).int()\n\n >>> y = XSoftmax.apply(x, mask, dim=-1)\n \"\"\"\n\n @staticmethod\n def forward(self, input, mask, dim):\n self.dim = dim\n rmask = ~(mask.bool())\n\n output = input.masked_fill(rmask, float(\"-inf\"))\n output = torch.softmax(output, self.dim)\n output.masked_fill_(rmask, 0)\n self.save_for_backward(output)\n return output\n\n @staticmethod\n def backward(self, grad_output):\n (output,) = self.saved_tensors\n inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)\n return inputGrad, None, None\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DropoutContext\nclass DropoutContext(object):\n def __init__(self):\n self.dropout = 0\n self.mask = None\n self.scale = 1\n self.reuse_mask = True\n\n\n# Copied from transformers.models.deberta.modeling_deberta.get_mask\ndef get_mask(input, local_context):\n if not isinstance(local_context, DropoutContext):\n dropout = local_context\n mask = None\n else:\n dropout = local_context.dropout\n dropout *= local_context.scale\n mask = local_context.mask if local_context.reuse_mask else None\n\n if dropout > 0 and mask is None:\n mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()\n\n if isinstance(local_context, DropoutContext):\n if local_context.mask is None:\n local_context.mask = mask\n\n return mask, dropout\n\n\n# Copied from transformers.models.deberta.modeling_deberta.XDropout\nclass XDropout(torch.autograd.Function):\n \"\"\"Optimized dropout function to save computation and memory by using mask operation instead of multiplication.\"\"\"\n\n @staticmethod\n def forward(ctx, input, local_ctx):\n mask, dropout = get_mask(input, local_ctx)\n ctx.scale = 1.0 / (1 - dropout)\n if dropout > 0:\n ctx.save_for_backward(mask)\n return input.masked_fill(mask, 0) * ctx.scale\n else:\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.scale > 1:\n (mask,) = ctx.saved_tensors\n return grad_output.masked_fill(mask, 0) * ctx.scale, None\n else:\n return grad_output, None\n\n\n# Copied from transformers.models.deberta.modeling_deberta.StableDropout\nclass StableDropout(torch.nn.Module):\n \"\"\"\n Optimized dropout module for stabilizing the training\n\n Args:\n drop_prob (float): the dropout probabilities\n \"\"\"\n\n def __init__(self, drop_prob):\n super().__init__()\n self.drop_prob = drop_prob\n self.count = 0\n self.context_stack = None\n\n def forward(self, x):\n \"\"\"\n Call the module\n\n Args:\n x (:obj:`torch.tensor`): The input tensor to apply dropout\n \"\"\"\n if self.training and self.drop_prob > 0:\n return XDropout.apply(x, self.get_context())\n return x\n\n def clear_context(self):\n self.count = 0\n self.context_stack = None\n\n def init_context(self, reuse_mask=True, scale=1):\n if self.context_stack is None:\n self.context_stack = []\n self.count = 0\n for c in self.context_stack:\n c.reuse_mask = reuse_mask\n c.scale = scale\n\n def get_context(self):\n if self.context_stack is not None:\n if self.count >= len(self.context_stack):\n self.context_stack.append(DropoutContext())\n ctx = self.context_stack[self.count]\n ctx.dropout = self.drop_prob\n self.count += 1\n return ctx\n else:\n return self.drop_prob\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm\nclass DebertaV2SelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2\nclass DebertaV2Attention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = DisentangledSelfAttention(config)\n self.output = DebertaV2SelfOutput(config)\n self.config = config\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n self_output = self.self(\n hidden_states,\n attention_mask,\n return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n self_output, att_matrix = self_output\n if query_states is None:\n query_states = hidden_states\n attention_output = self.output(self_output, query_states)\n\n if return_att:\n return (attention_output, att_matrix)\n else:\n return attention_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2\nclass DebertaV2Intermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm\nclass DebertaV2Output(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2\nclass DebertaV2Layer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = DebertaV2Attention(config)\n self.intermediate = DebertaV2Intermediate(config)\n self.output = DebertaV2Output(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n attention_output = self.attention(\n hidden_states,\n attention_mask,\n return_att=return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n attention_output, att_matrix = attention_output\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n if return_att:\n return (layer_output, att_matrix)\n else:\n return layer_output\n\n\nclass ConvLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n kernel_size = getattr(config, \"conv_kernel_size\", 3)\n groups = getattr(config, \"conv_groups\", 1)\n self.conv_act = getattr(config, \"conv_act\", \"tanh\")\n self.conv = torch.nn.Conv1d(\n config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups\n )\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, residual_states, input_mask):\n out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()\n rmask = (1 - input_mask).bool()\n out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)\n out = ACT2FN[self.conv_act](self.dropout(out))\n\n layer_norm_input = residual_states + out\n output = self.LayerNorm(layer_norm_input).to(layer_norm_input)\n\n if input_mask is None:\n output_states = output\n else:\n if input_mask.dim() != layer_norm_input.dim():\n if input_mask.dim() == 4:\n input_mask = input_mask.squeeze(1).squeeze(1)\n input_mask = input_mask.unsqueeze(2)\n\n input_mask = input_mask.to(output.dtype)\n output_states = output * input_mask\n\n return output_states\n\n\nclass DebertaV2Encoder(nn.Module):\n \"\"\"Modified BertEncoder with relative position bias support\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])\n self.relative_attention = getattr(config, \"relative_attention\", False)\n\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n\n self.position_buckets = getattr(config, \"position_buckets\", -1)\n pos_ebd_size = self.max_relative_positions * 2\n\n if self.position_buckets > 0:\n pos_ebd_size = self.position_buckets * 2\n\n self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)\n\n self.norm_rel_ebd = [x.strip() for x in getattr(config, \"norm_rel_ebd\", \"none\").lower().split(\"|\")]\n\n if \"layer_norm\" in self.norm_rel_ebd:\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)\n\n self.conv = ConvLayer(config) if getattr(config, \"conv_kernel_size\", 0) > 0 else None\n\n def get_rel_embedding(self):\n rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None\n if rel_embeddings is not None and (\"layer_norm\" in self.norm_rel_ebd):\n rel_embeddings = self.LayerNorm(rel_embeddings)\n return rel_embeddings\n\n def get_attention_mask(self, attention_mask):\n if attention_mask.dim() <= 2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)\n attention_mask = attention_mask.byte()\n elif attention_mask.dim() == 3:\n attention_mask = attention_mask.unsqueeze(1)\n\n return attention_mask\n\n def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):\n if self.relative_attention and relative_pos is None:\n q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)\n relative_pos = build_relative_position(\n q, hidden_states.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions\n )\n return relative_pos\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_hidden_states=True,\n output_attentions=False,\n query_states=None,\n relative_pos=None,\n return_dict=True,\n ):\n if attention_mask.dim() <= 2:\n input_mask = attention_mask\n else:\n input_mask = (attention_mask.sum(-2) > 0).byte()\n attention_mask = self.get_attention_mask(attention_mask)\n relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)\n\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[0]\n else:\n next_kv = hidden_states\n rel_embeddings = self.get_rel_embedding()\n output_states = next_kv\n for i, layer_module in enumerate(self.layer):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (output_states,)\n\n output_states = layer_module(\n next_kv,\n attention_mask,\n output_attentions,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if output_attentions:\n output_states, att_m = output_states\n\n if i == 0 and self.conv is not None:\n output_states = self.conv(hidden_states, output_states, input_mask)\n\n if query_states is not None:\n query_states = output_states\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None\n else:\n next_kv = output_states\n\n if output_attentions:\n all_attentions = all_attentions + (att_m,)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (output_states,)\n\n if not return_dict:\n return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\ndef make_log_bucket_position(relative_pos, bucket_size, max_position):\n sign = np.sign(relative_pos)\n mid = bucket_size // 2\n abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos))\n log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid\n bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int)\n return bucket_pos\n\n\ndef build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):\n \"\"\"\n Build relative position according to the query and key\n\n We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key\n :math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\\\rightarrow k} =\n P_q - P_k`\n\n Args:\n query_size (int): the length of query\n key_size (int): the length of key\n bucket_size (int): the size of position bucket\n max_position (int): the maximum allowed absolute position\n\n Return:\n :obj:`torch.LongTensor`: A tensor with shape [1, query_size, key_size]\n\n \"\"\"\n q_ids = np.arange(0, query_size)\n k_ids = np.arange(0, key_size)\n rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1))\n if bucket_size > 0 and max_position > 0:\n rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = rel_pos_ids.unsqueeze(0)\n return rel_pos_ids\n\n\[email protected]\n# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand\ndef c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])\n\n\[email protected]\n# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand\ndef p2c_dynamic_expand(c2p_pos, query_layer, key_layer):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])\n\n\[email protected]\n# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand\ndef pos_dynamic_expand(pos_index, p2c_att, key_layer):\n return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))\n\n\nclass DisentangledSelfAttention(torch.nn.Module):\n \"\"\"\n Disentangled self-attention module\n\n Parameters:\n config (:obj:`DebertaV2Config`):\n A model config class instance with the configuration to build a new model. The schema is similar to\n `BertConfig`, for more details, please refer :class:`~transformers.DebertaV2Config`\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.num_attention_heads = config.num_attention_heads\n _attention_head_size = config.hidden_size // config.num_attention_heads\n self.attention_head_size = getattr(config, \"attention_head_size\", _attention_head_size)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n\n self.share_att_key = getattr(config, \"share_att_key\", False)\n self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []\n self.relative_attention = getattr(config, \"relative_attention\", False)\n\n if self.relative_attention:\n self.position_buckets = getattr(config, \"position_buckets\", -1)\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.pos_ebd_size = self.max_relative_positions\n if self.position_buckets > 0:\n self.pos_ebd_size = self.position_buckets\n\n self.pos_dropout = StableDropout(config.hidden_dropout_prob)\n\n if not self.share_att_key:\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = StableDropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x, attention_heads):\n new_x_shape = x.size()[:-1] + (attention_heads, -1)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n \"\"\"\n Call the module\n\n Args:\n hidden_states (:obj:`torch.FloatTensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n `Attention(Q,K,V)`\n\n attention_mask (:obj:`torch.ByteTensor`):\n An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum\n sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`\n th token.\n\n return_att (:obj:`bool`, optional):\n Whether return the attention matrix.\n\n query_states (:obj:`torch.FloatTensor`, optional):\n The `Q` state in `Attention(Q,K,V)`.\n\n relative_pos (:obj:`torch.LongTensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with\n values ranging in [`-max_relative_positions`, `max_relative_positions`].\n\n rel_embeddings (:obj:`torch.FloatTensor`):\n The embedding of relative distances. It's a tensor of shape [:math:`2 \\\\times\n \\\\text{max_relative_positions}`, `hidden_size`].\n\n\n \"\"\"\n if query_states is None:\n query_states = hidden_states\n query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)\n key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)\n value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)\n\n rel_att = None\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n scale_factor = 1\n if \"c2p\" in self.pos_att_type:\n scale_factor += 1\n if \"p2c\" in self.pos_att_type:\n scale_factor += 1\n if \"p2p\" in self.pos_att_type:\n scale_factor += 1\n scale = math.sqrt(query_layer.size(-1) * scale_factor)\n attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale\n if self.relative_attention:\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_attention_bias(\n query_layer, key_layer, relative_pos, rel_embeddings, scale_factor\n )\n\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n attention_scores = attention_scores\n attention_scores = attention_scores.view(\n -1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)\n )\n\n # bsz x height x length x dimension\n attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)\n attention_probs = self.dropout(attention_probs)\n context_layer = torch.bmm(\n attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer\n )\n context_layer = (\n context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))\n .permute(0, 2, 1, 3)\n .contiguous()\n )\n new_context_layer_shape = context_layer.size()[:-2] + (-1,)\n context_layer = context_layer.view(*new_context_layer_shape)\n if return_att:\n return (context_layer, attention_probs)\n else:\n return context_layer\n\n def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):\n if relative_pos is None:\n q = query_layer.size(-2)\n relative_pos = build_relative_position(\n q, key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions\n )\n if relative_pos.dim() == 2:\n relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)\n elif relative_pos.dim() == 3:\n relative_pos = relative_pos.unsqueeze(1)\n # bsz x height x query x key\n elif relative_pos.dim() != 4:\n raise ValueError(f\"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}\")\n\n att_span = self.pos_ebd_size\n relative_pos = relative_pos.long().to(query_layer.device)\n\n rel_embeddings = rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :].unsqueeze(0)\n if self.share_att_key:\n pos_query_layer = self.transpose_for_scores(\n self.query_proj(rel_embeddings), self.num_attention_heads\n ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)\n pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(\n query_layer.size(0) // self.num_attention_heads, 1, 1\n )\n else:\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_key_layer = self.transpose_for_scores(\n self.pos_key_proj(rel_embeddings), self.num_attention_heads\n ).repeat(\n query_layer.size(0) // self.num_attention_heads, 1, 1\n ) # .split(self.all_head_size, dim=-1)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer = self.transpose_for_scores(\n self.pos_query_proj(rel_embeddings), self.num_attention_heads\n ).repeat(\n query_layer.size(0) // self.num_attention_heads, 1, 1\n ) # .split(self.all_head_size, dim=-1)\n\n score = 0\n # content->position\n if \"c2p\" in self.pos_att_type:\n scale = math.sqrt(pos_key_layer.size(-1) * scale_factor)\n c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))\n c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)\n c2p_att = torch.gather(\n c2p_att,\n dim=-1,\n index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),\n )\n score += c2p_att / scale\n\n # position->content\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n scale = math.sqrt(pos_query_layer.size(-1) * scale_factor)\n if key_layer.size(-2) != query_layer.size(-2):\n r_pos = build_relative_position(\n key_layer.size(-2),\n key_layer.size(-2),\n bucket_size=self.position_buckets,\n max_position=self.max_relative_positions,\n ).to(query_layer.device)\n r_pos = r_pos.unsqueeze(0)\n else:\n r_pos = relative_pos\n\n p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)\n if query_layer.size(-2) != key_layer.size(-2):\n pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)\n\n if \"p2c\" in self.pos_att_type:\n p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))\n p2c_att = torch.gather(\n p2c_att,\n dim=-1,\n index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),\n ).transpose(-1, -2)\n if query_layer.size(-2) != key_layer.size(-2):\n p2c_att = torch.gather(\n p2c_att,\n dim=-2,\n index=pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))),\n )\n score += p2c_att / scale\n\n # position->position\n if \"p2p\" in self.pos_att_type:\n pos_query = pos_query_layer[:, :, att_span:, :]\n p2p_att = torch.matmul(pos_query, pos_key_layer.transpose(-1, -2))\n p2p_att = p2p_att.expand(query_layer.size()[:2] + p2p_att.size()[2:])\n if query_layer.size(-2) != key_layer.size(-2):\n p2p_att = torch.gather(\n p2p_att,\n dim=-2,\n index=pos_index.expand(query_layer.size()[:2] + (pos_index.size(-2), p2p_att.size(-1))),\n )\n p2p_att = torch.gather(\n p2p_att,\n dim=-1,\n index=c2p_pos.expand(\n [query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]\n ),\n )\n score += p2p_att\n\n return score\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm\nclass DebertaV2Embeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n pad_token_id = getattr(config, \"pad_token_id\", 0)\n self.embedding_size = getattr(config, \"embedding_size\", config.hidden_size)\n self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)\n\n self.position_biased_input = getattr(config, \"position_biased_input\", True)\n if not self.position_biased_input:\n self.position_embeddings = None\n else:\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)\n\n if config.type_vocab_size > 0:\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)\n\n if self.embedding_size != config.hidden_size:\n self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)\n self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.position_embeddings is not None:\n position_embeddings = self.position_embeddings(position_ids.long())\n else:\n position_embeddings = torch.zeros_like(inputs_embeds)\n\n embeddings = inputs_embeds\n if self.position_biased_input:\n embeddings += position_embeddings\n if self.config.type_vocab_size > 0:\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings += token_type_embeddings\n\n if self.embedding_size != self.config.hidden_size:\n embeddings = self.embed_proj(embeddings)\n\n embeddings = self.LayerNorm(embeddings)\n\n if mask is not None:\n if mask.dim() != embeddings.dim():\n if mask.dim() == 4:\n mask = mask.squeeze(1).squeeze(1)\n mask = mask.unsqueeze(2)\n mask = mask.to(embeddings.dtype)\n\n embeddings = embeddings * mask\n\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\n# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2\nclass DebertaV2PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = DebertaV2Config\n base_model_prefix = \"deberta\"\n _keys_to_ignore_on_load_missing = [\"position_ids\"]\n _keys_to_ignore_on_load_unexpected = [\"position_embeddings\"]\n\n def __init__(self, config):\n super().__init__(config)\n self._register_load_state_dict_pre_hook(self._pre_load_hook)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n def _pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n \"\"\"\n Removes the classifier if it doesn't have the correct number of labels.\n \"\"\"\n self_state = self.state_dict()\n if (\n (\"classifier.weight\" in self_state)\n and (\"classifier.weight\" in state_dict)\n and self_state[\"classifier.weight\"].size() != state_dict[\"classifier.weight\"].size()\n ):\n logger.warning(\n f\"The checkpoint classifier head has a shape {state_dict['classifier.weight'].size()} and this model \"\n f\"classifier head has a shape {self_state['classifier.weight'].size()}. Ignoring the checkpoint \"\n f\"weights. You should train your model on new data.\"\n )\n del state_dict[\"classifier.weight\"]\n if \"classifier.bias\" in state_dict:\n del state_dict[\"classifier.bias\"]\n\n\nDEBERTA_START_DOCSTRING = r\"\"\"\n The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention\n <https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of\n BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.```\n\n\n Parameters:\n config (:class:`~transformers.DebertaV2Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nDEBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.DebertaV2Tokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2\nclass DebertaV2Model(DebertaV2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = DebertaV2Embeddings(config)\n self.encoder = DebertaV2Encoder(config)\n self.z_steps = 0\n self.config = config\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError(\"The prune function is not implemented in DeBERTa model.\")\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n mask=attention_mask,\n inputs_embeds=inputs_embeds,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask,\n output_hidden_states=True,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n encoded_layers = encoder_outputs[1]\n\n if self.z_steps > 1:\n hidden_states = encoded_layers[-2]\n layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]\n query_states = encoded_layers[-1]\n rel_embeddings = self.encoder.get_rel_embedding()\n attention_mask = self.encoder.get_attention_mask(attention_mask)\n rel_pos = self.encoder.get_rel_pos(embedding_output)\n for layer in layers[1:]:\n query_states = layer(\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=query_states,\n relative_pos=rel_pos,\n rel_embeddings=rel_embeddings,\n )\n encoded_layers.append(query_states)\n\n sequence_output = encoded_layers[-1]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]\n\n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"DeBERTa Model with a `language modeling` head on top. \"\"\", DEBERTA_START_DOCSTRING)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM with Deberta->DebertaV2\nclass DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.deberta = DebertaV2Model(config)\n self.cls = DebertaV2OnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta\nclass DebertaV2PredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta\nclass DebertaV2LMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = DebertaV2PredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta\nclass DebertaV2OnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = DebertaV2LMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification with Deberta->DebertaV2\nclass DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n num_labels = getattr(config, \"num_labels\", 2)\n self.num_labels = num_labels\n\n self.deberta = DebertaV2Model(config)\n self.pooler = ContextPooler(config)\n output_dim = self.pooler.output_dim\n\n self.classifier = torch.nn.Linear(output_dim, num_labels)\n drop_out = getattr(config, \"cls_dropout\", None)\n drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out\n self.dropout = StableDropout(drop_out)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.deberta.get_input_embeddings()\n\n def set_input_embeddings(self, new_embeddings):\n self.deberta.set_input_embeddings(new_embeddings)\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n encoder_layer = outputs[0]\n pooled_output = self.pooler(encoder_layer)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # regression task\n loss_fn = torch.nn.MSELoss()\n logits = logits.view(-1).to(labels.dtype)\n loss = loss_fn(logits, labels.view(-1))\n elif labels.dim() == 1 or labels.size(-1) == 1:\n label_index = (labels >= 0).nonzero()\n labels = labels.long()\n if label_index.size(0) > 0:\n labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))\n labels = torch.gather(labels, 0, label_index.view(-1))\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))\n else:\n loss = torch.tensor(0).to(logits)\n else:\n log_softmax = torch.nn.LogSoftmax(-1)\n loss = -((log_softmax(logits) * labels).sum(-1)).mean()\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n else:\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2\nclass DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaV2Model(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\n# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering with Deberta->DebertaV2\nclass DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaV2Model(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] |
[
[
"torch.nn.Linear",
"numpy.tile",
"torch.ones",
"numpy.sign",
"numpy.where",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"numpy.log",
"torch.nn.Conv1d",
"numpy.arange",
"torch.tensor",
"torch.zeros_like",
"torch._softmax_backward_data",
"torch.zeros",
"torch.clamp",
"torch.nn.LogSoftmax",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.softmax",
"numpy.abs",
"torch.nn.Embedding",
"torch.empty_like"
]
] |
AUST-Hansen/Dive-into-DL-PyTorch
|
[
"a97816955ecbaa4a337162a9b1e3e990f6e66dc6"
] |
[
"code/d2lzh_pytorch/utils.py"
] |
[
"import collections\nimport math\nimport os\nimport random\nimport sys\nimport tarfile\nimport time\nimport json\nimport zipfile\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom collections import namedtuple\n\nfrom IPython import display\nfrom matplotlib import pyplot as plt\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchtext\nimport torchtext.vocab as Vocab\nimport numpy as np\n\n\nVOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']\n\n\nVOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],\n [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],\n [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],\n [0, 64, 128]]\n\n\n\n# ###################### 3.2 ############################\ndef set_figsize(figsize=(3.5, 2.5)):\n use_svg_display()\n # 设置图的尺寸\n plt.rcParams['figure.figsize'] = figsize\n\ndef use_svg_display():\n \"\"\"Use svg format to display plot in jupyter\"\"\"\n display.set_matplotlib_formats('svg')\n\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices) # 样本的读取顺序是随机的\n for i in range(0, num_examples, batch_size):\n j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)]) # 最后一次可能不足一个batch\n yield features.index_select(0, j), labels.index_select(0, j) \n\ndef linreg(X, w, b):\n return torch.mm(X, w) + b\n\ndef squared_loss(y_hat, y): \n # 注意这里返回的是向量, 另外, pytorch里的MSELoss并没有除以 2\n return ((y_hat - y.view(y_hat.size())) ** 2) / 2\n\ndef sgd(params, lr, batch_size):\n # 为了和原书保持一致,这里除以了batch_size,但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经\n # 沿batch维求了平均了。\n for param in params:\n param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data\n\n\n\n# ######################3##### 3.5 #############################\ndef get_fashion_mnist_labels(labels):\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]\n\ndef show_fashion_mnist(images, labels):\n use_svg_display()\n # 这里的_表示我们忽略(不使用)的变量\n _, figs = plt.subplots(1, len(images), figsize=(12, 12))\n for f, img, lbl in zip(figs, images, labels):\n f.imshow(img.view((28, 28)).numpy())\n f.set_title(lbl)\n f.axes.get_xaxis().set_visible(False)\n f.axes.get_yaxis().set_visible(False)\n # plt.show()\n\n# 5.6 修改\n# def load_data_fashion_mnist(batch_size, root='~/Datasets/FashionMNIST'):\n# \"\"\"Download the fashion mnist dataset and then load into memory.\"\"\"\n# transform = transforms.ToTensor()\n# mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)\n# mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)\n# if sys.platform.startswith('win'):\n# num_workers = 0 # 0表示不用额外的进程来加速读取数据\n# else:\n# num_workers = 4\n# train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n# test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n# return train_iter, test_iter\n\n\n\n\n# ########################### 3.6 ###############################\n# (3.13节修改)\n# def evaluate_accuracy(data_iter, net):\n# acc_sum, n = 0.0, 0\n# for X, y in data_iter:\n# acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()\n# n += y.shape[0]\n# return acc_sum / n\n\n\ndef train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n params=None, lr=None, optimizer=None):\n for epoch in range(num_epochs):\n train_l_sum, train_acc_sum, n = 0.0, 0.0, 0\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y).sum()\n \n # 梯度清零\n if optimizer is not None:\n optimizer.zero_grad()\n elif params is not None and params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n \n l.backward()\n if optimizer is None:\n sgd(params, lr, batch_size)\n else:\n optimizer.step() # “softmax回归的简洁实现”一节将用到\n \n \n train_l_sum += l.item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()\n n += y.shape[0]\n test_acc = evaluate_accuracy(test_iter, net)\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'\n % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))\n\n\n\n\n# ########################### 3.7 #####################################3\nclass FlattenLayer(torch.nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n def forward(self, x): # x shape: (batch, *, *, ...)\n return x.view(x.shape[0], -1)\n\n\n# ########################### 3.11 ###############################\ndef semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,\n legend=None, figsize=(3.5, 2.5)):\n set_figsize(figsize)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.semilogy(x_vals, y_vals)\n if x2_vals and y2_vals:\n plt.semilogy(x2_vals, y2_vals, linestyle=':')\n plt.legend(legend)\n # plt.show()\n\n\n\n\n# ############################# 3.13 ##############################\n# 5.5 修改\n# def evaluate_accuracy(data_iter, net):\n# acc_sum, n = 0.0, 0\n# for X, y in data_iter:\n# if isinstance(net, torch.nn.Module):\n# net.eval() # 评估模式, 这会关闭dropout\n# acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()\n# net.train() # 改回训练模式\n# else: # 自定义的模型\n# if('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数\n# # 将is_training设置成False\n# acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item() \n# else:\n# acc_sum += (net(X).argmax(dim=1) == y).float().sum().item() \n# n += y.shape[0]\n# return acc_sum / n\n\n\n\n\n\n\n# ########################### 5.1 #########################\ndef corr2d(X, K): \n h, w = K.shape\n Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))\n for i in range(Y.shape[0]):\n for j in range(Y.shape[1]):\n Y[i, j] = (X[i: i + h, j: j + w] * K).sum()\n return Y\n\n\n\n# ############################ 5.5 #########################\ndef evaluate_accuracy(data_iter, net, device=None):\n if device is None and isinstance(net, torch.nn.Module):\n # 如果没指定device就使用net的device\n device = list(net.parameters())[0].device \n acc_sum, n = 0.0, 0\n with torch.no_grad():\n for X, y in data_iter:\n if isinstance(net, torch.nn.Module):\n net.eval() # 评估模式, 这会关闭dropout\n acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()\n net.train() # 改回训练模式\n else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU\n if('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数\n # 将is_training设置成False\n acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item() \n else:\n acc_sum += (net(X).argmax(dim=1) == y).float().sum().item() \n n += y.shape[0]\n return acc_sum / n\n\ndef train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs):\n net = net.to(device)\n print(\"training on \", device)\n loss = torch.nn.CrossEntropyLoss()\n batch_count = 0\n for epoch in range(num_epochs):\n train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()\n for X, y in train_iter:\n X = X.to(device)\n y = y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n train_l_sum += l.cpu().item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()\n n += y.shape[0]\n batch_count += 1\n test_acc = evaluate_accuracy(test_iter, net)\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'\n % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))\n\n\n\n# ########################## 5.6 #########################3\ndef load_data_fashion_mnist(batch_size, resize=None, root='~/Datasets/FashionMNIST'):\n \"\"\"Download the fashion mnist dataset and then load into memory.\"\"\"\n trans = []\n if resize:\n trans.append(torchvision.transforms.Resize(size=resize))\n trans.append(torchvision.transforms.ToTensor())\n \n transform = torchvision.transforms.Compose(trans)\n mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)\n mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)\n if sys.platform.startswith('win'):\n num_workers = 0 # 0表示不用额外的进程来加速读取数据\n else:\n num_workers = 4\n train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n return train_iter, test_iter\n\n\n\n############################# 5.8 ##############################\nclass GlobalAvgPool2d(nn.Module):\n # 全局平均池化层可通过将池化窗口形状设置成输入的高和宽实现\n def __init__(self):\n super(GlobalAvgPool2d, self).__init__()\n def forward(self, x):\n return F.avg_pool2d(x, kernel_size=x.size()[2:])\n\n\n\n# ########################### 5.11 ################################\nclass Residual(nn.Module): \n def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):\n super(Residual, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=stride)\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)\n else:\n self.conv3 = None\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n return F.relu(Y + X)\n\ndef resnet_block(in_channels, out_channels, num_residuals, first_block=False):\n if first_block:\n assert in_channels == out_channels # 第一个模块的通道数同输入通道数一致\n blk = []\n for i in range(num_residuals):\n if i == 0 and not first_block:\n blk.append(Residual(in_channels, out_channels, use_1x1conv=True, stride=2))\n else:\n blk.append(Residual(out_channels, out_channels))\n return nn.Sequential(*blk)\n \ndef resnet18(output=10, in_channels=3):\n net = nn.Sequential(\n nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), \n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n net.add_module(\"resnet_block1\", resnet_block(64, 64, 2, first_block=True))\n net.add_module(\"resnet_block2\", resnet_block(64, 128, 2))\n net.add_module(\"resnet_block3\", resnet_block(128, 256, 2))\n net.add_module(\"resnet_block4\", resnet_block(256, 512, 2))\n net.add_module(\"global_avg_pool\", GlobalAvgPool2d()) # GlobalAvgPool2d的输出: (Batch, 512, 1, 1)\n net.add_module(\"fc\", nn.Sequential(FlattenLayer(), nn.Linear(512, output))) \n return net\n\n\n\n# ############################## 6.3 ##################################3\ndef load_data_jay_lyrics():\n \"\"\"加载周杰伦歌词数据集\"\"\"\n with zipfile.ZipFile('../../data/jaychou_lyrics.txt.zip') as zin:\n with zin.open('jaychou_lyrics.txt') as f:\n corpus_chars = f.read().decode('utf-8')\n corpus_chars = corpus_chars.replace('\\n', ' ').replace('\\r', ' ')\n corpus_chars = corpus_chars[0:10000]\n idx_to_char = list(set(corpus_chars))\n char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])\n vocab_size = len(char_to_idx)\n corpus_indices = [char_to_idx[char] for char in corpus_chars]\n return corpus_indices, char_to_idx, idx_to_char, vocab_size\n\ndef data_iter_random(corpus_indices, batch_size, num_steps, device=None):\n # 减1是因为输出的索引x是相应输入的索引y加1\n num_examples = (len(corpus_indices) - 1) // num_steps\n epoch_size = num_examples // batch_size\n example_indices = list(range(num_examples))\n random.shuffle(example_indices)\n\n # 返回从pos开始的长为num_steps的序列\n def _data(pos):\n return corpus_indices[pos: pos + num_steps]\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n for i in range(epoch_size):\n # 每次读取batch_size个随机样本\n i = i * batch_size\n batch_indices = example_indices[i: i + batch_size]\n X = [_data(j * num_steps) for j in batch_indices]\n Y = [_data(j * num_steps + 1) for j in batch_indices]\n yield torch.tensor(X, dtype=torch.float32, device=device), torch.tensor(Y, dtype=torch.float32, device=device)\n\ndef data_iter_consecutive(corpus_indices, batch_size, num_steps, device=None):\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n corpus_indices = torch.tensor(corpus_indices, dtype=torch.float32, device=device)\n data_len = len(corpus_indices)\n batch_len = data_len // batch_size\n indices = corpus_indices[0: batch_size*batch_len].view(batch_size, batch_len)\n epoch_size = (batch_len - 1) // num_steps\n for i in range(epoch_size):\n i = i * num_steps\n X = indices[:, i: i + num_steps]\n Y = indices[:, i + 1: i + num_steps + 1]\n yield X, Y\n\n\n\n\n\n# ###################################### 6.4 ######################################\ndef one_hot(x, n_class, dtype=torch.float32): \n # X shape: (batch), output shape: (batch, n_class)\n x = x.long()\n res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)\n res.scatter_(1, x.view(-1, 1), 1)\n return res\n\ndef to_onehot(X, n_class): \n # X shape: (batch, seq_len), output: seq_len elements of (batch, n_class)\n return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]\n\ndef predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,\n num_hiddens, vocab_size, device, idx_to_char, char_to_idx):\n state = init_rnn_state(1, num_hiddens, device)\n output = [char_to_idx[prefix[0]]]\n for t in range(num_chars + len(prefix) - 1):\n # 将上一时间步的输出作为当前时间步的输入\n X = to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)\n # 计算输出和更新隐藏状态\n (Y, state) = rnn(X, state, params)\n # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符\n if t < len(prefix) - 1:\n output.append(char_to_idx[prefix[t + 1]])\n else:\n output.append(int(Y[0].argmax(dim=1).item()))\n return ''.join([idx_to_char[i] for i in output])\n\ndef grad_clipping(params, theta, device):\n norm = torch.tensor([0.0], device=device)\n for param in params:\n norm += (param.grad.data ** 2).sum()\n norm = norm.sqrt().item()\n if norm > theta:\n for param in params:\n param.grad.data *= (theta / norm)\n\ndef train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n vocab_size, device, corpus_indices, idx_to_char,\n char_to_idx, is_random_iter, num_epochs, num_steps,\n lr, clipping_theta, batch_size, pred_period,\n pred_len, prefixes):\n if is_random_iter:\n data_iter_fn = data_iter_random\n else:\n data_iter_fn = data_iter_consecutive\n params = get_params()\n loss = nn.CrossEntropyLoss()\n\n for epoch in range(num_epochs):\n if not is_random_iter: # 如使用相邻采样,在epoch开始时初始化隐藏状态\n state = init_rnn_state(batch_size, num_hiddens, device)\n l_sum, n, start = 0.0, 0, time.time()\n data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)\n for X, Y in data_iter:\n if is_random_iter: # 如使用随机采样,在每个小批量更新前初始化隐藏状态\n state = init_rnn_state(batch_size, num_hiddens, device)\n else: \n # 否则需要使用detach函数从计算图分离隐藏状态, 这是为了\n # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)\n for s in state:\n s.detach_()\n \n inputs = to_onehot(X, vocab_size)\n # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵\n (outputs, state) = rnn(inputs, state, params)\n # 拼接之后形状为(num_steps * batch_size, vocab_size)\n outputs = torch.cat(outputs, dim=0)\n # Y的形状是(batch_size, num_steps),转置后再变成长度为\n # batch * num_steps 的向量,这样跟输出的行一一对应\n y = torch.transpose(Y, 0, 1).contiguous().view(-1)\n # 使用交叉熵损失计算平均分类误差\n l = loss(outputs, y.long())\n \n # 梯度清0\n if params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n l.backward()\n grad_clipping(params, clipping_theta, device) # 裁剪梯度\n sgd(params, lr, 1) # 因为误差已经取过均值,梯度不用再做平均\n l_sum += l.item() * y.shape[0]\n n += y.shape[0]\n\n if (epoch + 1) % pred_period == 0:\n print('epoch %d, perplexity %f, time %.2f sec' % (\n epoch + 1, math.exp(l_sum / n), time.time() - start))\n for prefix in prefixes:\n print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,\n num_hiddens, vocab_size, device, idx_to_char, char_to_idx))\n\n \n \n \n# ################################### 6.5 ################################################\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size):\n super(RNNModel, self).__init__()\n self.rnn = rnn_layer\n self.hidden_size = rnn_layer.hidden_size * (2 if rnn_layer.bidirectional else 1) \n self.vocab_size = vocab_size\n self.dense = nn.Linear(self.hidden_size, vocab_size)\n self.state = None\n\n def forward(self, inputs, state): # inputs: (batch, seq_len)\n # 获取one-hot向量表示\n X = to_onehot(inputs, self.vocab_size) # X是个list\n Y, self.state = self.rnn(torch.stack(X), state)\n # 全连接层会首先将Y的形状变成(num_steps * batch_size, num_hiddens),它的输出\n # 形状为(num_steps * batch_size, vocab_size)\n output = self.dense(Y.view(-1, Y.shape[-1]))\n return output, self.state\n\ndef predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char,\n char_to_idx):\n state = None\n output = [char_to_idx[prefix[0]]] # output会记录prefix加上输出\n for t in range(num_chars + len(prefix) - 1):\n X = torch.tensor([output[-1]], device=device).view(1, 1)\n if state is not None:\n if isinstance(state, tuple): # LSTM, state:(h, c) \n state = (state[0].to(device), state[1].to(device))\n else: \n state = state.to(device)\n \n (Y, state) = model(X, state) # 前向计算不需要传入模型参数\n if t < len(prefix) - 1:\n output.append(char_to_idx[prefix[t + 1]])\n else:\n output.append(int(Y.argmax(dim=1).item()))\n return ''.join([idx_to_char[i] for i in output])\n\ndef train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,\n corpus_indices, idx_to_char, char_to_idx,\n num_epochs, num_steps, lr, clipping_theta,\n batch_size, pred_period, pred_len, prefixes):\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n model.to(device)\n state = None\n for epoch in range(num_epochs):\n l_sum, n, start = 0.0, 0, time.time()\n data_iter = data_iter_consecutive(corpus_indices, batch_size, num_steps, device) # 相邻采样\n for X, Y in data_iter:\n if state is not None:\n # 使用detach函数从计算图分离隐藏状态, 这是为了\n # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)\n if isinstance (state, tuple): # LSTM, state:(h, c) \n state = (state[0].detach(), state[1].detach())\n else: \n state = state.detach()\n \n (output, state) = model(X, state) # output: 形状为(num_steps * batch_size, vocab_size)\n \n # Y的形状是(batch_size, num_steps),转置后再变成长度为\n # batch * num_steps 的向量,这样跟输出的行一一对应\n y = torch.transpose(Y, 0, 1).contiguous().view(-1)\n l = loss(output, y.long())\n \n optimizer.zero_grad()\n l.backward()\n # 梯度裁剪\n grad_clipping(model.parameters(), clipping_theta, device)\n optimizer.step()\n l_sum += l.item() * y.shape[0]\n n += y.shape[0]\n \n try:\n perplexity = math.exp(l_sum / n)\n except OverflowError:\n perplexity = float('inf')\n if (epoch + 1) % pred_period == 0:\n print('epoch %d, perplexity %f, time %.2f sec' % (\n epoch + 1, perplexity, time.time() - start))\n for prefix in prefixes:\n print(' -', predict_rnn_pytorch(\n prefix, pred_len, model, vocab_size, device, idx_to_char,\n char_to_idx))\n\n\n\n\n# ######################################## 7.2 ###############################################\ndef train_2d(trainer): \n x1, x2, s1, s2 = -5, -2, 0, 0 # s1和s2是自变量状态,本章后续几节会使用\n results = [(x1, x2)]\n for i in range(20):\n x1, x2, s1, s2 = trainer(x1, x2, s1, s2)\n results.append((x1, x2))\n print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2))\n return results\n\ndef show_trace_2d(f, results): \n plt.plot(*zip(*results), '-o', color='#ff7f0e')\n x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))\n plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')\n plt.xlabel('x1')\n plt.ylabel('x2')\n\n\n\n\n# ######################################## 7.3 ###############################################\ndef get_data_ch7(): \n data = np.genfromtxt('../../data/airfoil_self_noise.dat', delimiter='\\t')\n data = (data - data.mean(axis=0)) / data.std(axis=0)\n return torch.tensor(data[:1500, :-1], dtype=torch.float32), \\\n torch.tensor(data[:1500, -1], dtype=torch.float32) # 前1500个样本(每个样本5个特征)\n\ndef train_ch7(optimizer_fn, states, hyperparams, features, labels,\n batch_size=10, num_epochs=2):\n # 初始化模型\n net, loss = linreg, squared_loss\n \n w = torch.nn.Parameter(torch.tensor(np.random.normal(0, 0.01, size=(features.shape[1], 1)), dtype=torch.float32),\n requires_grad=True)\n b = torch.nn.Parameter(torch.zeros(1, dtype=torch.float32), requires_grad=True)\n\n def eval_loss():\n return loss(net(features, w, b), labels).mean().item()\n\n ls = [eval_loss()]\n data_iter = torch.utils.data.DataLoader(\n torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True)\n \n for _ in range(num_epochs):\n start = time.time()\n for batch_i, (X, y) in enumerate(data_iter):\n l = loss(net(X, w, b), y).mean() # 使用平均损失\n \n # 梯度清零\n if w.grad is not None:\n w.grad.data.zero_()\n b.grad.data.zero_()\n \n l.backward()\n optimizer_fn([w, b], states, hyperparams) # 迭代模型参数\n if (batch_i + 1) * batch_size % 100 == 0:\n ls.append(eval_loss()) # 每100个样本记录下当前训练误差\n # 打印结果和作图\n print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))\n set_figsize()\n plt.plot(np.linspace(0, num_epochs, len(ls)), ls)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n\n# 本函数与原书不同的是这里第一个参数优化器函数而不是优化器的名字\n# 例如: optimizer_fn=torch.optim.SGD, optimizer_hyperparams={\"lr\": 0.05}\ndef train_pytorch_ch7(optimizer_fn, optimizer_hyperparams, features, labels,\n batch_size=10, num_epochs=2):\n # 初始化模型\n net = nn.Sequential(\n nn.Linear(features.shape[-1], 1)\n )\n loss = nn.MSELoss()\n optimizer = optimizer_fn(net.parameters(), **optimizer_hyperparams)\n\n def eval_loss():\n return loss(net(features).view(-1), labels).item() / 2\n\n ls = [eval_loss()]\n data_iter = torch.utils.data.DataLoader(\n torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True)\n\n for _ in range(num_epochs):\n start = time.time()\n for batch_i, (X, y) in enumerate(data_iter):\n # 除以2是为了和train_ch7保持一致, 因为squared_loss中除了2\n l = loss(net(X).view(-1), y) / 2 \n \n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n if (batch_i + 1) * batch_size % 100 == 0:\n ls.append(eval_loss())\n # 打印结果和作图\n print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))\n set_figsize()\n plt.plot(np.linspace(0, num_epochs, len(ls)), ls)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n\n\n\n\n############################## 8.3 ##################################\nclass Benchmark():\n def __init__(self, prefix=None):\n self.prefix = prefix + ' ' if prefix else ''\n\n def __enter__(self):\n self.start = time.time()\n\n def __exit__(self, *args):\n print('%stime: %.4f sec' % (self.prefix, time.time() - self.start))\n\n\n\n\n\n# ########################### 9.1 ########################################\ndef show_images(imgs, num_rows, num_cols, scale=2):\n figsize = (num_cols * scale, num_rows * scale)\n _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)\n for i in range(num_rows):\n for j in range(num_cols):\n axes[i][j].imshow(imgs[i * num_cols + j])\n axes[i][j].axes.get_xaxis().set_visible(False)\n axes[i][j].axes.get_yaxis().set_visible(False)\n return axes\n\ndef train(train_iter, test_iter, net, loss, optimizer, device, num_epochs):\n net = net.to(device)\n print(\"training on \", device)\n batch_count = 0\n for epoch in range(num_epochs):\n train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()\n for X, y in train_iter:\n X = X.to(device)\n y = y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y) \n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n train_l_sum += l.cpu().item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()\n n += y.shape[0]\n batch_count += 1\n test_acc = evaluate_accuracy(test_iter, net)\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'\n % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))\n\n\n\n\n############################## 9.3 #####################\ndef bbox_to_rect(bbox, color):\n # 将边界框(左上x, 左上y, 右下x, 右下y)格式转换成matplotlib格式:\n # ((左上x, 左上y), 宽, 高)\n return plt.Rectangle(\n xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0], height=bbox[3]-bbox[1],\n fill=False, edgecolor=color, linewidth=2)\n\n\n\n\n############################ 9.4 ###########################\ndef MultiBoxPrior(feature_map, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5]):\n \"\"\"\n # 按照「9.4.1. 生成多个锚框」所讲的实现, anchor表示成(xmin, ymin, xmax, ymax).\n https://zh.d2l.ai/chapter_computer-vision/anchor.html\n Args:\n feature_map: torch tensor, Shape: [N, C, H, W].\n sizes: List of sizes (0~1) of generated MultiBoxPriores. \n ratios: List of aspect ratios (non-negative) of generated MultiBoxPriores. \n Returns:\n anchors of shape (1, num_anchors, 4). 由于batch里每个都一样, 所以第一维为1\n \"\"\"\n pairs = [] # pair of (size, sqrt(ration))\n for r in ratios:\n pairs.append([sizes[0], math.sqrt(r)])\n for s in sizes[1:]:\n pairs.append([s, math.sqrt(ratios[0])])\n \n pairs = np.array(pairs)\n \n ss1 = pairs[:, 0] * pairs[:, 1] # size * sqrt(ration)\n ss2 = pairs[:, 0] / pairs[:, 1] # size / sqrt(ration)\n \n base_anchors = np.stack([-ss1, -ss2, ss1, ss2], axis=1) / 2\n \n h, w = feature_map.shape[-2:]\n shifts_x = np.arange(0, w) / w\n shifts_y = np.arange(0, h) / h\n shift_x, shift_y = np.meshgrid(shifts_x, shifts_y)\n shift_x = shift_x.reshape(-1)\n shift_y = shift_y.reshape(-1)\n shifts = np.stack((shift_x, shift_y, shift_x, shift_y), axis=1)\n \n anchors = shifts.reshape((-1, 1, 4)) + base_anchors.reshape((1, -1, 4))\n \n return torch.tensor(anchors, dtype=torch.float32).view(1, -1, 4)\n\ndef show_bboxes(axes, bboxes, labels=None, colors=None):\n def _make_list(obj, default_values=None):\n if obj is None:\n obj = default_values\n elif not isinstance(obj, (list, tuple)):\n obj = [obj]\n return obj\n\n labels = _make_list(labels)\n colors = _make_list(colors, ['b', 'g', 'r', 'm', 'c'])\n for i, bbox in enumerate(bboxes):\n color = colors[i % len(colors)]\n rect = bbox_to_rect(bbox.detach().cpu().numpy(), color)\n axes.add_patch(rect)\n if labels and len(labels) > i:\n text_color = 'k' if color == 'w' else 'w'\n axes.text(rect.xy[0], rect.xy[1], labels[i],\n va='center', ha='center', fontsize=6, color=text_color,\n bbox=dict(facecolor=color, lw=0))\n\ndef compute_intersection(set_1, set_2):\n \"\"\"\n 计算anchor之间的交集\n Args:\n set_1: a tensor of dimensions (n1, 4), anchor表示成(xmin, ymin, xmax, ymax)\n set_2: a tensor of dimensions (n2, 4), anchor表示成(xmin, ymin, xmax, ymax)\n Returns:\n intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, shape: (n1, n2)\n \"\"\"\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)\n\ndef compute_jaccard(set_1, set_2):\n \"\"\"\n 计算anchor之间的Jaccard系数(IoU)\n Args:\n set_1: a tensor of dimensions (n1, 4), anchor表示成(xmin, ymin, xmax, ymax)\n set_2: a tensor of dimensions (n2, 4), anchor表示成(xmin, ymin, xmax, ymax)\n Returns:\n Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, shape: (n1, n2)\n \"\"\"\n # Find intersections\n intersection = compute_intersection(set_1, set_2) # (n1, n2)\n\n # Find areas of each box in both sets\n areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) # (n1)\n areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) # (n2)\n\n # Find the union\n # PyTorch auto-broadcasts singleton dimensions\n union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection # (n1, n2)\n\n return intersection / union # (n1, n2)\n\ndef assign_anchor(bb, anchor, jaccard_threshold=0.5):\n \"\"\"\n # 按照「9.4.1. 生成多个锚框」图9.3所讲为每个anchor分配真实的bb, anchor表示成归一化(xmin, ymin, xmax, ymax).\n https://zh.d2l.ai/chapter_computer-vision/anchor.html\n Args:\n bb: 真实边界框(bounding box), shape:(nb, 4)\n anchor: 待分配的anchor, shape:(na, 4)\n jaccard_threshold: 预先设定的阈值\n Returns:\n assigned_idx: shape: (na, ), 每个anchor分配的真实bb对应的索引, 若未分配任何bb则为-1\n \"\"\"\n na = anchor.shape[0]\n nb = bb.shape[0]\n jaccard = compute_jaccard(anchor, bb).detach().cpu().numpy() # shape: (na, nb)\n assigned_idx = np.ones(na) * -1 # 初始全为-1\n \n # 先为每个bb分配一个anchor(不要求满足jaccard_threshold)\n jaccard_cp = jaccard.copy()\n for j in range(nb):\n i = np.argmax(jaccard_cp[:, j])\n assigned_idx[i] = j\n jaccard_cp[i, :] = float(\"-inf\") # 赋值为负无穷, 相当于去掉这一行\n \n # 处理还未被分配的anchor, 要求满足jaccard_threshold\n for i in range(na):\n if assigned_idx[i] == -1:\n j = np.argmax(jaccard[i, :])\n if jaccard[i, j] >= jaccard_threshold:\n assigned_idx[i] = j\n \n return torch.tensor(assigned_idx, dtype=torch.long)\n\ndef xy_to_cxcy(xy):\n \"\"\"\n 将(x_min, y_min, x_max, y_max)形式的anchor转换成(center_x, center_y, w, h)形式的.\n https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection/blob/master/utils.py\n Args:\n xy: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)\n Returns: \n bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)\n \"\"\"\n return torch.cat([(xy[:, 2:] + xy[:, :2]) / 2, # c_x, c_y\n xy[:, 2:] - xy[:, :2]], 1) # w, h\n\ndef MultiBoxTarget(anchor, label):\n \"\"\"\n # 按照「9.4.1. 生成多个锚框」所讲的实现, anchor表示成归一化(xmin, ymin, xmax, ymax).\n https://zh.d2l.ai/chapter_computer-vision/anchor.html\n Args:\n anchor: torch tensor, 输入的锚框, 一般是通过MultiBoxPrior生成, shape:(1,锚框总数,4)\n label: 真实标签, shape为(bn, 每张图片最多的真实锚框数, 5)\n 第二维中,如果给定图片没有这么多锚框, 可以先用-1填充空白, 最后一维中的元素为[类别标签, 四个坐标值]\n Returns:\n 列表, [bbox_offset, bbox_mask, cls_labels]\n bbox_offset: 每个锚框的标注偏移量,形状为(bn,锚框总数*4)\n bbox_mask: 形状同bbox_offset, 每个锚框的掩码, 一一对应上面的偏移量, 负类锚框(背景)对应的掩码均为0, 正类锚框的掩码均为1\n cls_labels: 每个锚框的标注类别, 其中0表示为背景, 形状为(bn,锚框总数)\n \"\"\"\n assert len(anchor.shape) == 3 and len(label.shape) == 3\n bn = label.shape[0]\n \n def MultiBoxTarget_one(anc, lab, eps=1e-6):\n \"\"\"\n MultiBoxTarget函数的辅助函数, 处理batch中的一个\n Args:\n anc: shape of (锚框总数, 4)\n lab: shape of (真实锚框数, 5), 5代表[类别标签, 四个坐标值]\n eps: 一个极小值, 防止log0\n Returns:\n offset: (锚框总数*4, )\n bbox_mask: (锚框总数*4, ), 0代表背景, 1代表非背景\n cls_labels: (锚框总数, 4), 0代表背景\n \"\"\"\n an = anc.shape[0]\n assigned_idx = assign_anchor(lab[:, 1:], anc) # (锚框总数, )\n bbox_mask = ((assigned_idx >= 0).float().unsqueeze(-1)).repeat(1, 4) # (锚框总数, 4)\n\n cls_labels = torch.zeros(an, dtype=torch.long) # 0表示背景\n assigned_bb = torch.zeros((an, 4), dtype=torch.float32) # 所有anchor对应的bb坐标\n for i in range(an):\n bb_idx = assigned_idx[i]\n if bb_idx >= 0: # 即非背景\n cls_labels[i] = lab[bb_idx, 0].long().item() + 1 # 注意要加一\n assigned_bb[i, :] = lab[bb_idx, 1:]\n\n center_anc = xy_to_cxcy(anc) # (center_x, center_y, w, h)\n center_assigned_bb = xy_to_cxcy(assigned_bb)\n\n offset_xy = 10.0 * (center_assigned_bb[:, :2] - center_anc[:, :2]) / center_anc[:, 2:]\n offset_wh = 5.0 * torch.log(eps + center_assigned_bb[:, 2:] / center_anc[:, 2:])\n offset = torch.cat([offset_xy, offset_wh], dim = 1) * bbox_mask # (锚框总数, 4)\n\n return offset.view(-1), bbox_mask.view(-1), cls_labels\n \n batch_offset = []\n batch_mask = []\n batch_cls_labels = []\n for b in range(bn):\n offset, bbox_mask, cls_labels = MultiBoxTarget_one(anchor[0, :, :], label[b, :, :])\n \n batch_offset.append(offset)\n batch_mask.append(bbox_mask)\n batch_cls_labels.append(cls_labels)\n \n bbox_offset = torch.stack(batch_offset)\n bbox_mask = torch.stack(batch_mask)\n cls_labels = torch.stack(batch_cls_labels)\n \n return [bbox_offset, bbox_mask, cls_labels]\n\n\nPred_BB_Info = namedtuple(\"Pred_BB_Info\", [\"index\", \"class_id\", \"confidence\", \"xyxy\"])\ndef non_max_suppression(bb_info_list, nms_threshold = 0.5):\n \"\"\"\n 非极大抑制处理预测的边界框\n Args:\n bb_info_list: Pred_BB_Info的列表, 包含预测类别、置信度等信息\n nms_threshold: 阈值\n Returns:\n output: Pred_BB_Info的列表, 只保留过滤后的边界框信息\n \"\"\"\n output = []\n # 先根据置信度从高到低排序\n sorted_bb_info_list = sorted(bb_info_list, key = lambda x: x.confidence, reverse=True)\n\n while len(sorted_bb_info_list) != 0:\n best = sorted_bb_info_list.pop(0)\n output.append(best)\n \n if len(sorted_bb_info_list) == 0:\n break\n\n bb_xyxy = []\n for bb in sorted_bb_info_list:\n bb_xyxy.append(bb.xyxy)\n \n iou = compute_jaccard(torch.tensor([best.xyxy]), \n torch.tensor(bb_xyxy))[0] # shape: (len(sorted_bb_info_list), )\n \n n = len(sorted_bb_info_list)\n sorted_bb_info_list = [sorted_bb_info_list[i] for i in range(n) if iou[i] <= nms_threshold]\n return output\n\ndef MultiBoxDetection(cls_prob, loc_pred, anchor, nms_threshold = 0.5):\n \"\"\"\n # 按照「9.4.1. 生成多个锚框」所讲的实现, anchor表示成归一化(xmin, ymin, xmax, ymax).\n https://zh.d2l.ai/chapter_computer-vision/anchor.html\n Args:\n cls_prob: 经过softmax后得到的各个锚框的预测概率, shape:(bn, 预测总类别数+1, 锚框个数)\n loc_pred: 预测的各个锚框的偏移量, shape:(bn, 锚框个数*4)\n anchor: MultiBoxPrior输出的默认锚框, shape: (1, 锚框个数, 4)\n nms_threshold: 非极大抑制中的阈值\n Returns:\n 所有锚框的信息, shape: (bn, 锚框个数, 6)\n 每个锚框信息由[class_id, confidence, xmin, ymin, xmax, ymax]表示\n class_id=-1 表示背景或在非极大值抑制中被移除了\n \"\"\"\n assert len(cls_prob.shape) == 3 and len(loc_pred.shape) == 2 and len(anchor.shape) == 3\n bn = cls_prob.shape[0]\n \n def MultiBoxDetection_one(c_p, l_p, anc, nms_threshold = 0.5):\n \"\"\"\n MultiBoxDetection的辅助函数, 处理batch中的一个\n Args:\n c_p: (预测总类别数+1, 锚框个数)\n l_p: (锚框个数*4, )\n anc: (锚框个数, 4)\n nms_threshold: 非极大抑制中的阈值\n Return:\n output: (锚框个数, 6)\n \"\"\"\n pred_bb_num = c_p.shape[1]\n anc = (anc + l_p.view(pred_bb_num, 4)).detach().cpu().numpy() # 加上偏移量\n \n confidence, class_id = torch.max(c_p, 0)\n confidence = confidence.detach().cpu().numpy()\n class_id = class_id.detach().cpu().numpy()\n \n pred_bb_info = [Pred_BB_Info(\n index = i,\n class_id = class_id[i] - 1, # 正类label从0开始\n confidence = confidence[i],\n xyxy=[*anc[i]]) # xyxy是个列表\n for i in range(pred_bb_num)]\n \n # 正类的index\n obj_bb_idx = [bb.index for bb in non_max_suppression(pred_bb_info, nms_threshold)]\n \n output = []\n for bb in pred_bb_info:\n output.append([\n (bb.class_id if bb.index in obj_bb_idx else -1.0),\n bb.confidence,\n *bb.xyxy\n ])\n \n return torch.tensor(output) # shape: (锚框个数, 6)\n \n batch_output = []\n for b in range(bn):\n batch_output.append(MultiBoxDetection_one(cls_prob[b], loc_pred[b], anchor[0], nms_threshold))\n \n return torch.stack(batch_output)\n\n\n\n# ################################# 9.6 ############################\nclass PikachuDetDataset(torch.utils.data.Dataset):\n \"\"\"皮卡丘检测数据集类\"\"\"\n def __init__(self, data_dir, part, image_size=(256, 256)):\n assert part in [\"train\", \"val\"]\n self.image_size = image_size\n self.image_dir = os.path.join(data_dir, part, \"images\")\n \n with open(os.path.join(data_dir, part, \"label.json\")) as f:\n self.label = json.load(f)\n \n self.transform = torchvision.transforms.Compose([\n # 将 PIL 图片转换成位于[0.0, 1.0]的floatTensor, shape (C x H x W)\n torchvision.transforms.ToTensor()])\n \n def __len__(self):\n return len(self.label)\n \n def __getitem__(self, index):\n image_path = str(index + 1) + \".png\"\n \n cls = self.label[image_path][\"class\"]\n label = np.array([cls] + self.label[image_path][\"loc\"], \n dtype=\"float32\")[None, :]\n \n PIL_img = Image.open(os.path.join(self.image_dir, image_path)\n ).convert('RGB').resize(self.image_size)\n img = self.transform(PIL_img)\n \n sample = {\n \"label\": label, # shape: (1, 5) [class, xmin, ymin, xmax, ymax]\n \"image\": img # shape: (3, *image_size)\n }\n \n return sample\n\ndef load_data_pikachu(batch_size, edge_size=256, data_dir = '../../data/pikachu'): \n \"\"\"edge_size:输出图像的宽和高\"\"\"\n image_size = (edge_size, edge_size)\n train_dataset = PikachuDetDataset(data_dir, 'train', image_size)\n val_dataset = PikachuDetDataset(data_dir, 'val', image_size)\n \n\n train_iter = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, \n shuffle=True, num_workers=4)\n\n val_iter = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size,\n shuffle=False, num_workers=4)\n return train_iter, val_iter\n\n\n\n\n# ############################# 10.7 ##########################\ndef read_imdb(folder='train', data_root=\"/S1/CSCL/tangss/Datasets/aclImdb\"): \n data = []\n for label in ['pos', 'neg']:\n folder_name = os.path.join(data_root, folder, label)\n for file in tqdm(os.listdir(folder_name)):\n with open(os.path.join(folder_name, file), 'rb') as f:\n review = f.read().decode('utf-8').replace('\\n', '').lower()\n data.append([review, 1 if label == 'pos' else 0])\n random.shuffle(data)\n return data\n\ndef get_tokenized_imdb(data):\n \"\"\"\n data: list of [string, label]\n \"\"\"\n def tokenizer(text):\n return [tok.lower() for tok in text.split(' ')]\n return [tokenizer(review) for review, _ in data]\n\ndef get_vocab_imdb(data):\n tokenized_data = get_tokenized_imdb(data)\n counter = collections.Counter([tk for st in tokenized_data for tk in st])\n return torchtext.vocab.Vocab(counter, min_freq=5)\n\ndef preprocess_imdb(data, vocab):\n max_l = 500 # 将每条评论通过截断或者补0,使得长度变成500\n\n def pad(x):\n return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x))\n\n tokenized_data = get_tokenized_imdb(data)\n features = torch.tensor([pad([vocab.stoi[word] for word in words]) for words in tokenized_data])\n labels = torch.tensor([score for _, score in data])\n return features, labels\n\ndef load_pretrained_embedding(words, pretrained_vocab):\n \"\"\"从预训练好的vocab中提取出words对应的词向量\"\"\"\n embed = torch.zeros(len(words), pretrained_vocab.vectors[0].shape[0]) # 初始化为0\n oov_count = 0 # out of vocabulary\n for i, word in enumerate(words):\n try:\n idx = pretrained_vocab.stoi[word]\n embed[i, :] = pretrained_vocab.vectors[idx]\n except KeyError:\n oov_count += 0\n if oov_count > 0:\n print(\"There are %d oov words.\")\n return embed\n\ndef predict_sentiment(net, vocab, sentence):\n \"\"\"sentence是词语的列表\"\"\"\n device = list(net.parameters())[0].device\n sentence = torch.tensor([vocab.stoi[word] for word in sentence], device=device)\n label = torch.argmax(net(sentence.view((1, -1))), dim=1)\n return 'positive' if label.item() == 1 else 'negative'"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.BatchNorm2d",
"numpy.genfromtxt",
"matplotlib.pyplot.semilogy",
"torch.cuda.is_available",
"torch.transpose",
"torch.nn.CrossEntropyLoss",
"numpy.random.normal",
"torch.nn.MaxPool2d",
"matplotlib.pyplot.subplots",
"torch.tensor",
"torch.utils.data.DataLoader",
"numpy.arange",
"numpy.argmax",
"torch.nn.functional.relu",
"torch.zeros",
"numpy.array",
"torch.max",
"torch.nn.Sequential",
"torch.clamp",
"torch.mm",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"numpy.stack",
"torch.log",
"torch.utils.data.TensorDataset",
"torch.nn.MSELoss",
"matplotlib.pyplot.xlabel",
"torch.no_grad",
"numpy.ones",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.Rectangle",
"numpy.meshgrid"
]
] |
contrera/gammapy
|
[
"aa0a74baa977ee2477b5c63e036075f4219792a3",
"aa0a74baa977ee2477b5c63e036075f4219792a3"
] |
[
"gammapy/utils/nddata.py",
"gammapy/utils/fitting/tests/test_sherpa.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utility functions and classes for n-dimensional data and axes.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport itertools\nfrom collections import OrderedDict\nimport numpy as np\nfrom astropy.units import Quantity\nfrom .array import array_stats_str\n\n__all__ = [\"NDDataArray\", \"DataAxis\", \"BinnedDataAxis\", \"sqrt_space\"]\n\n\nclass NDDataArray(object):\n \"\"\"ND Data Array Base class\n\n for usage examples see :gp-extra-notebook:`nddata_demo`\n\n Parameters\n ----------\n axes : list\n List of `~gammapy.utils.nddata.DataAxis`\n data : `~astropy.units.Quantity`\n Data\n meta : dict\n Meta info\n interp_kwargs : dict\n TODO\n \"\"\"\n\n default_interp_kwargs = dict(bounds_error=False)\n \"\"\"Default interpolation kwargs used to initialize the\n `scipy.interpolate.RegularGridInterpolator`. The interpolation behaviour\n of an individual axis ('log', 'linear') can be passed to the axis on\n initialization.\"\"\"\n\n def __init__(self, axes, data=None, meta=None, interp_kwargs=None):\n self._axes = axes\n if data is not None:\n self.data = data\n if meta is not None:\n self.meta = OrderedDict(meta)\n self.interp_kwargs = interp_kwargs or self.default_interp_kwargs\n\n self._regular_grid_interp = None\n\n def __str__(self):\n ss = \"NDDataArray summary info\\n\"\n for axis in self.axes:\n ss += array_stats_str(axis.nodes, axis.name)\n ss += array_stats_str(self.data, \"Data\")\n return ss\n\n @property\n def axes(self):\n \"\"\"Array holding the axes in correct order\"\"\"\n return self._axes\n\n def axis(self, name):\n \"\"\"Return axis by name\"\"\"\n try:\n idx = [_.name for _ in self.axes].index(name)\n except ValueError:\n raise ValueError(\"Axis {} not found\".format(name))\n return self.axes[idx]\n\n @property\n def data(self):\n \"\"\"Array holding the n-dimensional data.\"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n \"\"\"Set data.\n\n Some sanity checks are performed to avoid an invalid array.\n Also, the interpolator is set to None to avoid unwanted behaviour.\n\n Parameters\n ----------\n data : `~astropy.units.Quantity`, array-like\n Data array\n \"\"\"\n data = Quantity(data)\n dimension = len(data.shape)\n if dimension != self.dim:\n raise ValueError(\n \"Overall dimensions to not match. \"\n \"Data: {}, Hist: {}\".format(dimension, self.dim)\n )\n\n for dim in np.arange(self.dim):\n axis = self.axes[dim]\n if axis.nbins != data.shape[dim]:\n msg = \"Data shape does not match in dimension {d}\\n\"\n msg += \"Axis {n} : {sa}, Data {sd}\"\n raise ValueError(\n msg.format(d=dim, n=axis.name, sa=axis.nbins, sd=data.shape[dim])\n )\n self._regular_grid_interp = None\n self._data = data\n\n @property\n def dim(self):\n \"\"\"Dimension (number of axes)\"\"\"\n return len(self.axes)\n\n def find_node(self, **kwargs):\n \"\"\"Find next node\n\n Parameters\n ----------\n kwargs : dict\n Keys are the axis names, Values the evaluation points\n \"\"\"\n node = []\n for axis in self.axes:\n lookup_val = Quantity(kwargs.pop(axis.name))\n temp = axis.find_node(lookup_val)\n node.append(temp)\n return node\n\n def evaluate(self, method=None, **kwargs):\n \"\"\"Evaluate NDData Array\n\n This function provides a uniform interface to several interpolators.\n The evaluation nodes are given as ``kwargs``.\n\n Currently available:\n `~scipy.interpolate.RegularGridInterpolator`, methods: linear, nearest\n\n Parameters\n ----------\n method : str {'linear', 'nearest'}, optional\n Interpolation method\n kwargs : dict\n Keys are the axis names, Values the evaluation points\n\n Returns\n -------\n array : `~astropy.units.Quantity`\n Interpolated values, axis order is the same as for the NDData array\n \"\"\"\n values = []\n for axis in self.axes:\n # Extract values for each axis, default: nodes\n temp = Quantity(kwargs.pop(axis.name, axis.nodes))\n # Transform to correct unit\n temp = temp.to(axis.unit).value\n # Transform to match interpolation behaviour of axis\n values.append(np.atleast_1d(axis._interp_values(temp)))\n\n # This is to catch e.g. typos in axis names\n if kwargs != {}:\n raise ValueError(\"Input given for unknown axis: {}\".format(kwargs))\n\n # This is necessary since np.append does not support the 1D case\n if self.dim > 1:\n shapes = np.concatenate([np.shape(_) for _ in values])\n else:\n shapes = values[0].shape\n\n # Flatten in order to support 2D array input\n values = [_.flatten() for _ in values]\n points = list(itertools.product(*values))\n\n if self._regular_grid_interp is None:\n self._add_regular_grid_interp()\n\n method = method or self.default_interp_kwargs.get(\"method\", None)\n res = self._regular_grid_interp(points, method=method, **kwargs)\n\n out = np.reshape(res, shapes).squeeze()\n\n # Clip interpolated values to be non-negative\n np.clip(out, 0, None, out=out)\n # Attach units to the output\n out = out * self.data.unit\n\n return out\n\n def evaluate_at_coord(self, points, method=\"linear\", **kwargs):\n \"\"\"Evaluate NDData Array on set of points.\n\n TODO: merge with `evaluate`?\n This method was added to support evaluating on arbitrary arrays\n of coordinates, not just on the outer product like `evaluate`.\n\n Parameters\n ----------\n points: dict\n contains the coordinates on which you want to interpolate (axis_name: value)\n method : str {'linear', 'nearest'}, optional\n Interpolation method\n kwargs : dict\n Keys are the axis names, Values the evaluation points\n\n Returns\n -------\n array : `~astropy.units.Quantity`\n Interpolated values, axis order is the same as for the NDData array\n \"\"\"\n if self._regular_grid_interp is None:\n self._add_regular_grid_interp()\n\n points = tuple(\n [\n axis._interp_values(points[axis.name].to(axis.unit).value)\n for axis in self.axes\n ]\n )\n res = self._regular_grid_interp(points, method=method, **kwargs)\n\n # Clip interpolated values to be non-negative\n np.clip(res, 0, None, out=res)\n # Attach units to the output\n res = res * self.data.unit\n\n return res\n\n def _add_regular_grid_interp(self, interp_kwargs=None):\n \"\"\"Add `~scipy.interpolate.RegularGridInterpolator`\n\n http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html\n\n Parameters\n ----------\n interp_kwargs : dict, optional\n Interpolation kwargs\n \"\"\"\n from scipy.interpolate import RegularGridInterpolator\n\n if interp_kwargs is None:\n interp_kwargs = self.interp_kwargs\n points = [a._interp_nodes() for a in self.axes]\n\n values = self.data.value\n\n # If values contains nan, only setup interpolator in valid range\n if np.isnan(values).any():\n if self.dim > 1:\n raise NotImplementedError(\n \"Data grid contains nan. This is not\"\n \"supported for arrays dimension > 1\"\n )\n else:\n mask = np.isfinite(values)\n points = [points[0][mask]]\n values = values[mask]\n\n self._regular_grid_interp = RegularGridInterpolator(\n points, values, **interp_kwargs\n )\n\n\nclass DataAxis(object):\n \"\"\"Data axis to be used with NDDataArray\n\n Axis values are interpreted as nodes.\n\n For binned data see `~gammapy.utils.nddata.BinnedDataAxis`.\n\n Parameters\n ----------\n nodes : `~astropy.units.Quantity`\n Interpolation nodes\n name : str, optional\n Axis name, default: 'Default'\n interpolation_mode : str {'linear', 'log'}\n Interpolation behaviour, default: 'linear'\n \"\"\"\n\n def __init__(self, nodes, name=\"Default\", interpolation_mode=\"linear\"):\n # Need this for subclassing (see BinnedDataAxis)\n if nodes is not None:\n self._data = Quantity(nodes)\n self.name = name\n self._interpolation_mode = interpolation_mode\n\n def __str__(self):\n ss = self.__class__.__name__\n ss += \"\\nName: {}\".format(self.name)\n ss += \"\\nUnit: {}\".format(self.unit)\n ss += \"\\nNodes: {}\".format(self.nbins)\n ss += \"\\nInterpolation mode: {}\".format(self.interpolation_mode)\n\n return ss\n\n @property\n def unit(self):\n \"\"\"Axis unit\"\"\"\n return self.nodes.unit\n\n @classmethod\n def logspace(cls, vmin, vmax, nbins, unit=None, **kwargs):\n \"\"\"Create axis with equally log-spaced nodes\n\n if no unit is given, it will be taken from vmax,\n log interpolation is enable by default.\n\n Parameters\n ----------\n vmin : `~astropy.units.Quantity`, float\n Lowest value\n vmax : `~astropy.units.Quantity`, float\n Highest value\n bins : int\n Number of bins\n unit : `~astropy.units.UnitBase`, str\n Unit\n \"\"\"\n kwargs.setdefault(\"interpolation_mode\", \"log\")\n\n if unit is not None:\n vmin = Quantity(vmin, unit)\n vmax = Quantity(vmax, unit)\n else:\n vmin = Quantity(vmin)\n vmax = Quantity(vmax)\n unit = vmax.unit\n vmin = vmin.to(unit)\n\n x_min, x_max = np.log10([vmin.value, vmax.value])\n vals = np.logspace(x_min, x_max, nbins)\n\n return cls(vals * unit, **kwargs)\n\n def find_node(self, val):\n \"\"\"Find next node\n\n Parameters\n ----------\n val : `~astropy.units.Quantity`\n Lookup value\n \"\"\"\n val = Quantity(val)\n\n if not val.unit.is_equivalent(self.unit):\n raise ValueError(\n \"Units mismatch: val.unit = {!r}, self.unit = {!r}\".format(\n val.unit, self.unit\n )\n )\n\n val = val.to(self.nodes.unit)\n val = np.atleast_1d(val)\n x1 = np.array([val] * self.nbins).transpose()\n x2 = np.array([self.nodes] * len(val))\n temp = np.abs(x1 - x2)\n idx = np.argmin(temp, axis=1)\n return idx\n\n @property\n def nbins(self):\n \"\"\"Number of bins\"\"\"\n return len(self.nodes)\n\n @property\n def nodes(self):\n \"\"\"Evaluation nodes\"\"\"\n return self._data\n\n @property\n def interpolation_mode(self):\n \"\"\"Interpolation mode\n \"\"\"\n return self._interpolation_mode\n\n def _interp_nodes(self):\n \"\"\"Nodes to be used for interpolation\"\"\"\n if self.interpolation_mode == \"log\":\n return np.log10(self.nodes.value)\n else:\n return self.nodes.value\n\n def _interp_values(self, values):\n \"\"\"Transform values correctly for interpolation\"\"\"\n if self.interpolation_mode == \"log\":\n return np.log10(values)\n else:\n return values\n\n\nclass BinnedDataAxis(DataAxis):\n \"\"\"Data axis for binned data\n\n Parameters\n ----------\n lo : `~astropy.units.Quantity`\n Lower bin edges\n hi : `~astropy.units.Quantity`\n Upper bin edges\n name : str, optional\n Axis name, default: 'Default'\n interpolation_mode : str {'linear', 'log'}\n Interpolation behaviour, default: 'linear'\n \"\"\"\n\n def __init__(self, lo, hi, **kwargs):\n self.lo = Quantity(lo)\n self.hi = Quantity(hi)\n super(BinnedDataAxis, self).__init__(None, **kwargs)\n\n @classmethod\n def logspace(cls, emin, emax, nbins, unit=None, **kwargs):\n # TODO: splitout log space into a helper function\n vals = DataAxis.logspace(emin, emax, nbins + 1, unit)._data\n return cls(vals[:-1], vals[1:], **kwargs)\n\n def __str__(self):\n ss = super(BinnedDataAxis, self).__str__()\n ss += \"\\nLower bounds {}\".format(self.lo)\n ss += \"\\nUpper bounds {}\".format(self.hi)\n\n return ss\n\n @property\n def bins(self):\n \"\"\"Bin edges\"\"\"\n unit = self.lo.unit\n val = np.append(self.lo.value, self.hi.to(unit).value[-1])\n return val * unit\n\n @property\n def bin_width(self):\n \"\"\"Bin width\"\"\"\n return self.hi - self.lo\n\n @property\n def nodes(self):\n \"\"\"Evaluation nodes.\n\n Depending on the interpolation mode, either log or lin center are\n returned\n \"\"\"\n if self.interpolation_mode == \"log\":\n return self.log_center()\n else:\n return self.lin_center()\n\n def lin_center(self):\n \"\"\"Linear bin centers\"\"\"\n return (self.lo + self.hi) / 2\n\n def log_center(self):\n \"\"\"Logarithmic bin centers\"\"\"\n return np.sqrt(self.lo * self.hi)\n\n\ndef sqrt_space(start, stop, num):\n \"\"\"Return numbers spaced evenly on a square root scale.\n\n This function is similar to `numpy.linspace` and `numpy.logspace`.\n\n Parameters\n ----------\n start : float\n start is the starting value of the sequence\n stop : float\n stop is the final value of the sequence\n num : int\n Number of samples to generate.\n\n Returns\n -------\n samples : `~numpy.ndarray`\n 1D array with a square root scale\n\n Examples\n --------\n >>> from gammapy.utils.nddata import sqrt_space\n >>> samples = sqrt_space(0, 2, 5)\n array([ 0. , 1. , 1.41421356, 1.73205081, 2. ])\n\n \"\"\"\n samples2 = np.linspace(start ** 2, stop ** 2, num)\n samples = np.sqrt(samples2)\n return samples\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport pytest\nfrom numpy.testing import assert_allclose\nfrom ...testing import requires_dependency\nfrom .. import Parameter, Parameters, optimize_sherpa\n\n\ndef fcn(parameters):\n x = parameters[\"x\"].value\n y = parameters[\"y\"].value\n z = parameters[\"z\"].value\n return (x - 2) ** 2 + (y - 3) ** 2 + (z - 4) ** 2\n\n\n# TODO: levmar doesn't work yet; needs array of statval as return in likelihood\n# optimiser='gridsearch' would require very low tolerance asserts, not added for now\n\n\n@requires_dependency(\"sherpa\")\[email protected](\"method\", [\"moncar\", \"simplex\"])\ndef test_sherpa(method):\n pars = Parameters([Parameter(\"x\", 2.2), Parameter(\"y\", 3.4), Parameter(\"z\", 4.5)])\n\n factors, info, _ = optimize_sherpa(function=fcn, parameters=pars, method=method)\n\n assert info[\"success\"]\n assert info[\"nfev\"] > 10\n assert_allclose(factors, [2, 3, 4], rtol=1e-2)\n assert_allclose(pars[\"x\"].value, 2, rtol=1e-2)\n assert_allclose(pars[\"y\"].value, 3, rtol=1e-2)\n assert_allclose(pars[\"z\"].value, 4, rtol=1e-2)\n"
] |
[
[
"numpy.array",
"numpy.isnan",
"numpy.reshape",
"numpy.argmin",
"numpy.shape",
"numpy.arange",
"numpy.atleast_1d",
"numpy.sqrt",
"numpy.clip",
"numpy.log10",
"numpy.abs",
"numpy.linspace",
"scipy.interpolate.RegularGridInterpolator",
"numpy.logspace",
"numpy.isfinite"
],
[
"numpy.testing.assert_allclose"
]
] |
darkreactions/rdkit
|
[
"0c388029c1f9386d832f6c321e59a11589c373d8",
"0c388029c1f9386d832f6c321e59a11589c373d8"
] |
[
"rdkit/Chem/Pharm3D/EmbedLib.py",
"rdkit/Chem/Draw/SimilarityMaps.py"
] |
[
"# $Id$\n#\n# Copyright (C) 2004-2008 Greg Landrum and Rational Discovery LLC\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the RDKit source tree.\n#\nfrom __future__ import print_function\nfrom rdkit import RDConfig\n\nimport sys, time, math\nfrom rdkit.ML.Data import Stats\nimport rdkit.DistanceGeometry as DG\nfrom rdkit import Chem\nimport numpy\nfrom rdkit.Chem import rdDistGeom as MolDG\nfrom rdkit.Chem import ChemicalFeatures\nfrom rdkit.Chem import ChemicalForceFields\nimport Pharmacophore, ExcludedVolume\nfrom rdkit import Geometry\n_times = {}\n\nfrom rdkit import RDLogger as logging\nlogger = logging.logger()\ndefaultFeatLength = 2.0\n\n\ndef GetAtomHeavyNeighbors(atom):\n \"\"\" returns a list of the heavy-atom neighbors of the\n atom passed in:\n\n >>> m = Chem.MolFromSmiles('CCO')\n >>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(0))\n >>> len(l)\n 1\n >>> isinstance(l[0],Chem.Atom)\n True\n >>> l[0].GetIdx()\n 1\n\n >>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(1))\n >>> len(l)\n 2\n >>> l[0].GetIdx()\n 0\n >>> l[1].GetIdx()\n 2\n \n \"\"\"\n res = []\n for nbr in atom.GetNeighbors():\n if nbr.GetAtomicNum() != 1:\n res.append(nbr)\n return res\n\n\ndef ReplaceGroup(match, bounds, slop=0.01, useDirs=False, dirLength=defaultFeatLength):\n \"\"\" Adds an entry at the end of the bounds matrix for a point at\n the center of a multi-point feature\n\n returns a 2-tuple:\n new bounds mat\n index of point added\n\n >>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])\n >>> match = [0,1,2]\n >>> bm,idx = ReplaceGroup(match,boundsMat,slop=0.0)\n\n the index is at the end:\n >>> idx == 3\n True\n\n and the matrix is one bigger:\n >>> bm.shape == (4, 4)\n True\n\n but the original bounds mat is not altered:\n >>> boundsMat.shape == (3, 3)\n True\n\n \n We make the assumption that the points of the\n feature form a regular polygon, are listed in order\n (i.e. pt 0 is a neighbor to pt 1 and pt N-1)\n and that the replacement point goes at the center:\n >>> print(', '.join(['%.3f'%x for x in bm[-1]]))\n 0.577, 0.577, 0.577, 0.000\n >>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))\n 1.155, 1.155, 1.155, 0.000\n\n The slop argument (default = 0.01) is fractional:\n >>> bm,idx = ReplaceGroup(match,boundsMat)\n >>> print(', '.join(['%.3f'%x for x in bm[-1]]))\n 0.572, 0.572, 0.572, 0.000\n >>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))\n 1.166, 1.166, 1.166, 0.000\n\n\n \"\"\"\n maxVal = -1000.0\n minVal = 1e8\n nPts = len(match)\n for i in range(nPts):\n idx0 = match[i]\n if i < nPts - 1:\n idx1 = match[i + 1]\n else:\n idx1 = match[0]\n if idx1 < idx0:\n idx0, idx1 = idx1, idx0\n minVal = min(minVal, bounds[idx1, idx0])\n maxVal = max(maxVal, bounds[idx0, idx1])\n maxVal *= (1 + slop)\n minVal *= (1 - slop)\n\n scaleFact = 1.0 / (2.0 * math.sin(math.pi / nPts))\n minVal *= scaleFact\n maxVal *= scaleFact\n\n replaceIdx = bounds.shape[0]\n if not useDirs:\n bm = numpy.zeros((bounds.shape[0] + 1, bounds.shape[1] + 1), numpy.float)\n else:\n bm = numpy.zeros((bounds.shape[0] + 2, bounds.shape[1] + 2), numpy.float)\n bm[0:bounds.shape[0], 0:bounds.shape[1]] = bounds\n bm[:replaceIdx, replaceIdx] = 1000.\n\n if useDirs:\n bm[:replaceIdx + 1, replaceIdx + 1] = 1000.\n # set the feature - direction point bounds:\n bm[replaceIdx, replaceIdx + 1] = dirLength + slop\n bm[replaceIdx + 1, replaceIdx] = dirLength - slop\n\n for idx1 in match:\n bm[idx1, replaceIdx] = maxVal\n bm[replaceIdx, idx1] = minVal\n if useDirs:\n # set the point - direction point bounds:\n bm[idx1, replaceIdx + 1] = numpy.sqrt(bm[replaceIdx, replaceIdx + 1]**2 + maxVal**2)\n bm[replaceIdx + 1, idx1] = numpy.sqrt(bm[replaceIdx + 1, replaceIdx]**2 + minVal**2)\n return bm, replaceIdx\n\n\ndef EmbedMol(mol, bm, atomMatch=None, weight=2.0, randomSeed=-1, excludedVolumes=None):\n \"\"\" Generates an embedding for a molecule based on a bounds matrix and adds\n a conformer (id 0) to the molecule\n\n if the optional argument atomMatch is provided, it will be used to provide\n supplemental weights for the embedding routine (used in the optimization\n phase to ensure that the resulting geometry really does satisfy the\n pharmacophore).\n\n if the excludedVolumes is provided, it should be a sequence of\n ExcludedVolume objects\n\n >>> m = Chem.MolFromSmiles('c1ccccc1C')\n >>> bounds = MolDG.GetMoleculeBoundsMatrix(m)\n >>> bounds.shape == (7, 7)\n True\n >>> m.GetNumConformers()\n 0\n >>> EmbedMol(m,bounds,randomSeed=23)\n >>> m.GetNumConformers()\n 1\n\n\n \"\"\"\n nAts = mol.GetNumAtoms()\n weights = []\n if (atomMatch):\n for i in range(len(atomMatch)):\n for j in range(i + 1, len(atomMatch)):\n weights.append((i, j, weight))\n if (excludedVolumes):\n for vol in excludedVolumes:\n idx = vol.index\n # excluded volumes affect every other atom:\n for i in range(nAts):\n weights.append((i, idx, weight))\n coords = DG.EmbedBoundsMatrix(bm, weights=weights, numZeroFail=1, randomSeed=randomSeed)\n #for row in coords:\n # print(', '.join(['%.2f'%x for x in row]))\n\n conf = Chem.Conformer(nAts)\n conf.SetId(0)\n for i in range(nAts):\n conf.SetAtomPosition(i, list(coords[i]))\n if excludedVolumes:\n for vol in excludedVolumes:\n vol.pos = numpy.array(coords[vol.index])\n\n #print(' % 7.4f % 7.4f % 7.4f Ar 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(coords[-1]), file=sys.stderr)\n mol.AddConformer(conf)\n\n\ndef AddExcludedVolumes(bm, excludedVolumes, smoothIt=True):\n \"\"\" Adds a set of excluded volumes to the bounds matrix\n and returns the new matrix\n\n excludedVolumes is a list of ExcludedVolume objects\n\n\n >>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])\n >>> ev1 = ExcludedVolume.ExcludedVolume(([(0,),0.5,1.0],),exclusionDist=1.5)\n >>> bm = AddExcludedVolumes(boundsMat,(ev1,))\n\n the results matrix is one bigger:\n >>> bm.shape == (4, 4)\n True\n\n and the original bounds mat is not altered:\n >>> boundsMat.shape == (3, 3)\n True\n\n >>> print(', '.join(['%.3f'%x for x in bm[-1]]))\n 0.500, 1.500, 1.500, 0.000\n >>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))\n 1.000, 3.000, 3.000, 0.000\n\n \"\"\"\n oDim = bm.shape[0]\n dim = oDim + len(excludedVolumes)\n res = numpy.zeros((dim, dim), numpy.float)\n res[:oDim, :oDim] = bm\n for i, vol in enumerate(excludedVolumes):\n bmIdx = oDim + i\n vol.index = bmIdx\n\n # set values to all the atoms:\n res[bmIdx, :bmIdx] = vol.exclusionDist\n res[:bmIdx, bmIdx] = 1000.0\n\n # set values to our defining features:\n for indices, minV, maxV in vol.featInfo:\n for index in indices:\n try:\n res[bmIdx, index] = minV\n res[index, bmIdx] = maxV\n except IndexError:\n logger.error('BAD INDEX: res[%d,%d], shape is %s' % (bmIdx, index, str(res.shape)))\n raise IndexError\n\n # set values to other excluded volumes:\n for j in range(bmIdx + 1, dim):\n res[bmIdx, j:dim] = 0\n res[j:dim, bmIdx] = 1000\n\n if smoothIt:\n DG.DoTriangleSmoothing(res)\n return res\n\n\ndef UpdatePharmacophoreBounds(bm, atomMatch, pcophore, useDirs=False, dirLength=defaultFeatLength,\n mol=None):\n \"\"\" loops over a distance bounds matrix and replaces the elements\n that are altered by a pharmacophore\n\n **NOTE** this returns the resulting bounds matrix, but it may also\n alter the input matrix\n\n atomMatch is a sequence of sequences containing atom indices\n for each of the pharmacophore's features.\n\n >>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),\n ... ]\n >>> pcophore=Pharmacophore.Pharmacophore(feats)\n >>> pcophore.setLowerBound(0,1, 1.0)\n >>> pcophore.setUpperBound(0,1, 2.0)\n\n >>> boundsMat = numpy.array([[0.0,3.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])\n >>> atomMatch = ((0,),(1,))\n >>> bm = UpdatePharmacophoreBounds(boundsMat,atomMatch,pcophore)\n\n\n In this case, there are no multi-atom features, so the result matrix\n is the same as the input:\n >>> bm is boundsMat\n True\n\n this means, of course, that the input boundsMat is altered:\n >>> print(', '.join(['%.3f'%x for x in boundsMat[0]]))\n 0.000, 2.000, 3.000\n >>> print(', '.join(['%.3f'%x for x in boundsMat[1]]))\n 1.000, 0.000, 3.000\n >>> print(', '.join(['%.3f'%x for x in boundsMat[2]]))\n 2.000, 2.000, 0.000\n\n \"\"\"\n replaceMap = {}\n for i, matchI in enumerate(atomMatch):\n if len(matchI) > 1:\n bm, replaceIdx = ReplaceGroup(matchI, bm, useDirs=useDirs)\n replaceMap[i] = replaceIdx\n\n for i, matchI in enumerate(atomMatch):\n mi = replaceMap.get(i, matchI[0])\n for j in range(i + 1, len(atomMatch)):\n mj = replaceMap.get(j, atomMatch[j][0])\n if mi < mj:\n idx0, idx1 = mi, mj\n else:\n idx0, idx1 = mj, mi\n bm[idx0, idx1] = pcophore.getUpperBound(i, j)\n bm[idx1, idx0] = pcophore.getLowerBound(i, j)\n\n return bm\n\n\ndef EmbedPharmacophore(mol, atomMatch, pcophore, randomSeed=-1, count=10, smoothFirst=True,\n silent=False, bounds=None, excludedVolumes=None, targetNumber=-1,\n useDirs=False):\n \"\"\" Generates one or more embeddings for a molecule that satisfy a pharmacophore\n\n atomMatch is a sequence of sequences containing atom indices\n for each of the pharmacophore's features.\n\n - count: is the maximum number of attempts to make a generating an embedding\n - smoothFirst: toggles triangle smoothing of the molecular bounds matix\n - bounds: if provided, should be the molecular bounds matrix. If this isn't\n provided, the matrix will be generated.\n - targetNumber: if this number is positive, it provides a maximum number\n of embeddings to generate (i.e. we'll have count attempts to generate\n targetNumber embeddings).\n\n returns: a 3 tuple:\n 1) the molecular bounds matrix adjusted for the pharmacophore\n 2) a list of embeddings (molecules with a single conformer)\n 3) the number of failed attempts at embedding\n\n\n >>> m = Chem.MolFromSmiles('OCCN')\n >>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),\n ... ]\n >>> pcophore=Pharmacophore.Pharmacophore(feats)\n >>> pcophore.setLowerBound(0,1, 2.5)\n >>> pcophore.setUpperBound(0,1, 3.5)\n >>> atomMatch = ((0,),(3,))\n\n >>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)\n >>> len(embeds)\n 10\n >>> nFail\n 0\n\n Set up a case that can't succeed:\n >>> pcophore=Pharmacophore.Pharmacophore(feats)\n >>> pcophore.setLowerBound(0,1, 2.0)\n >>> pcophore.setUpperBound(0,1, 2.1)\n >>> atomMatch = ((0,),(3,))\n\n >>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)\n >>> len(embeds)\n 0\n >>> nFail\n 10\n\n \"\"\"\n global _times\n if not hasattr(mol, '_chiralCenters'):\n mol._chiralCenters = Chem.FindMolChiralCenters(mol)\n\n if bounds is None:\n bounds = MolDG.GetMoleculeBoundsMatrix(mol)\n if smoothFirst:\n DG.DoTriangleSmoothing(bounds)\n\n bm = bounds.copy()\n #print '------------'\n #print 'initial'\n #for row in bm:\n # print ' ',' '.join(['% 4.2f'%x for x in row])\n #print '------------'\n\n bm = UpdatePharmacophoreBounds(bm, atomMatch, pcophore, useDirs=useDirs, mol=mol)\n\n if excludedVolumes:\n bm = AddExcludedVolumes(bm, excludedVolumes, smoothIt=False)\n\n if not DG.DoTriangleSmoothing(bm):\n raise ValueError(\"could not smooth bounds matrix\")\n\n #print '------------'\n #print 'post replace and smooth'\n #for row in bm:\n # print ' ',' '.join(['% 4.2f'%x for x in row])\n #print '------------'\n\n if targetNumber <= 0:\n targetNumber = count\n nFailed = 0\n res = []\n for i in range(count):\n tmpM = bm[:, :]\n m2 = Chem.Mol(mol)\n t1 = time.time()\n try:\n if randomSeed <= 0:\n seed = i * 10 + 1\n else:\n seed = i * 10 + randomSeed\n EmbedMol(m2, tmpM, atomMatch, randomSeed=seed, excludedVolumes=excludedVolumes)\n except ValueError:\n if not silent:\n logger.info('Embed failed')\n nFailed += 1\n else:\n t2 = time.time()\n _times['embed'] = _times.get('embed', 0) + t2 - t1\n keepIt = True\n for idx, stereo in mol._chiralCenters:\n if stereo in ('R', 'S'):\n vol = ComputeChiralVolume(m2, idx)\n if (stereo=='R' and vol>=0) or \\\n (stereo=='S' and vol<=0):\n keepIt = False\n break\n if keepIt:\n res.append(m2)\n else:\n logger.debug('Removed embedding due to chiral constraints.')\n if len(res) == targetNumber:\n break\n return bm, res, nFailed\n\n\ndef isNaN(v):\n \"\"\" provides an OS independent way of detecting NaNs\n This is intended to be used with values returned from the C++\n side of things.\n\n We can't actually test this from Python (which traps\n zero division errors), but it would work something like\n this if we could:\n >>> isNaN(0)\n False\n\n #>>> isNan(1/0)\n #True\n\n \"\"\"\n if v != v and sys.platform == 'win32':\n return True\n elif v == 0 and v == 1 and sys.platform != 'win32':\n return True\n return False\n\n\ndef OptimizeMol(mol, bm, atomMatches=None, excludedVolumes=None, forceConstant=1200.0, maxPasses=5,\n verbose=False):\n \"\"\" carries out a UFF optimization for a molecule optionally subject\n to the constraints in a bounds matrix\n\n - atomMatches, if provided, is a sequence of sequences\n - forceConstant is the force constant of the spring used to enforce\n the constraints\n\n returns a 2-tuple:\n 1) the energy of the initial conformation\n 2) the energy post-embedding\n NOTE that these energies include the energies of the constraints\n\n\n\n >>> m = Chem.MolFromSmiles('OCCN')\n >>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),\n ... ]\n >>> pcophore=Pharmacophore.Pharmacophore(feats)\n >>> pcophore.setLowerBound(0,1, 2.5)\n >>> pcophore.setUpperBound(0,1, 2.8)\n >>> atomMatch = ((0,),(3,))\n >>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)\n >>> len(embeds)\n 10\n >>> testM = embeds[0]\n\n Do the optimization:\n >>> e1,e2 = OptimizeMol(testM,bm,atomMatches=atomMatch)\n\n Optimizing should have lowered the energy:\n >>> e2 < e1\n True\n\n Check the constrained distance:\n >>> conf = testM.GetConformer(0)\n >>> p0 = conf.GetAtomPosition(0)\n >>> p3 = conf.GetAtomPosition(3)\n >>> d03 = p0.Distance(p3)\n >>> d03 >= pcophore.getLowerBound(0,1)-.01\n True\n >>> d03 <= pcophore.getUpperBound(0,1)+.01\n True\n\n If we optimize without the distance constraints (provided via the atomMatches\n argument) we're not guaranteed to get the same results, particularly in a case\n like the current one where the pharmcophore brings the atoms uncomfortably\n close together:\n >>> testM = embeds[1]\n >>> e1,e2 = OptimizeMol(testM,bm)\n >>> e2 < e1\n True\n >>> conf = testM.GetConformer(0)\n >>> p0 = conf.GetAtomPosition(0)\n >>> p3 = conf.GetAtomPosition(3)\n >>> d03 = p0.Distance(p3)\n >>> d03 >= pcophore.getLowerBound(0,1)-.01\n True\n >>> d03 <= pcophore.getUpperBound(0,1)+.01\n False\n\n \"\"\"\n try:\n ff = ChemicalForceFields.UFFGetMoleculeForceField(mol)\n except Exception:\n logger.info('Problems building molecular forcefield', exc_info=True)\n return -1.0, -1.0\n\n weights = []\n if (atomMatches):\n for k in range(len(atomMatches)):\n for i in atomMatches[k]:\n for l in range(k + 1, len(atomMatches)):\n for j in atomMatches[l]:\n weights.append((i, j))\n for i, j in weights:\n if j < i:\n i, j = j, i\n minV = bm[j, i]\n maxV = bm[i, j]\n ff.AddDistanceConstraint(i, j, minV, maxV, forceConstant)\n if excludedVolumes:\n nAts = mol.GetNumAtoms()\n conf = mol.GetConformer()\n idx = nAts\n for exVol in excludedVolumes:\n assert exVol.pos is not None\n logger.debug('ff.AddExtraPoint(%.4f,%.4f,%.4f)' % (exVol.pos[0], exVol.pos[1], exVol.pos[2]))\n ff.AddExtraPoint(exVol.pos[0], exVol.pos[1], exVol.pos[2], True)\n indices = []\n for localIndices, foo, bar in exVol.featInfo:\n indices += list(localIndices)\n for i in range(nAts):\n v = numpy.array(conf.GetAtomPosition(i)) - numpy.array(exVol.pos)\n d = numpy.sqrt(numpy.dot(v, v))\n if i not in indices:\n if d < 5.0:\n logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%d,%.0f)' %\n (i, idx, exVol.exclusionDist, 1000, forceConstant))\n ff.AddDistanceConstraint(i, idx, exVol.exclusionDist, 1000, forceConstant)\n\n else:\n logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%.3f,%.0f)' %\n (i, idx, bm[exVol.index, i], bm[i, exVol.index], forceConstant))\n ff.AddDistanceConstraint(i, idx, bm[exVol.index, i], bm[i, exVol.index], forceConstant)\n idx += 1\n ff.Initialize()\n e1 = ff.CalcEnergy()\n if isNaN(e1):\n raise ValueError('bogus energy')\n\n if verbose:\n print(Chem.MolToMolBlock(mol))\n for i, vol in enumerate(excludedVolumes):\n pos = ff.GetExtraPointPos(i)\n print(' % 7.4f % 7.4f % 7.4f As 0 0 0 0 0 0 0 0 0 0 0 0' % tuple(pos),\n file=sys.stderr)\n needsMore = ff.Minimize()\n nPasses = 0\n while needsMore and nPasses < maxPasses:\n needsMore = ff.Minimize()\n nPasses += 1\n e2 = ff.CalcEnergy()\n if isNaN(e2):\n raise ValueError('bogus energy')\n\n if verbose:\n print('--------')\n print(Chem.MolToMolBlock(mol))\n for i, vol in enumerate(excludedVolumes):\n pos = ff.GetExtraPointPos(i)\n print(' % 7.4f % 7.4f % 7.4f Sb 0 0 0 0 0 0 0 0 0 0 0 0' % tuple(pos),\n file=sys.stderr)\n ff = None\n return e1, e2\n\n\ndef EmbedOne(mol, name, match, pcophore, count=1, silent=0, **kwargs):\n \"\"\" generates statistics for a molecule's embeddings\n\n Four energies are computed for each embedding:\n 1) E1: the energy (with constraints) of the initial embedding\n 2) E2: the energy (with constraints) of the optimized embedding\n 3) E3: the energy (no constraints) the geometry for E2\n 4) E4: the energy (no constraints) of the optimized free-molecule\n (starting from the E3 geometry)\n\n Returns a 9-tuple:\n 1) the mean value of E1\n 2) the sample standard deviation of E1\n 3) the mean value of E2 \n 4) the sample standard deviation of E2\n 5) the mean value of E3 \n 6) the sample standard deviation of E3\n 7) the mean value of E4 \n 8) the sample standard deviation of E4\n 9) The number of embeddings that failed\n \n \"\"\"\n global _times\n atomMatch = [list(x.GetAtomIds()) for x in match]\n bm, ms, nFailed = EmbedPharmacophore(mol, atomMatch, pcophore, count=count, silent=silent,\n **kwargs)\n e1s = []\n e2s = []\n e3s = []\n e4s = []\n d12s = []\n d23s = []\n d34s = []\n for m in ms:\n t1 = time.time()\n try:\n e1, e2 = OptimizeMol(m, bm, atomMatch)\n except ValueError:\n pass\n else:\n t2 = time.time()\n _times['opt1'] = _times.get('opt1', 0) + t2 - t1\n\n e1s.append(e1)\n e2s.append(e2)\n\n d12s.append(e1 - e2)\n t1 = time.time()\n try:\n e3, e4 = OptimizeMol(m, bm)\n except ValueError:\n pass\n else:\n t2 = time.time()\n _times['opt2'] = _times.get('opt2', 0) + t2 - t1\n e3s.append(e3)\n e4s.append(e4)\n d23s.append(e2 - e3)\n d34s.append(e3 - e4)\n count += 1\n try:\n e1, e1d = Stats.MeanAndDev(e1s)\n except Exception:\n e1 = -1.0\n e1d = -1.0\n try:\n e2, e2d = Stats.MeanAndDev(e2s)\n except Exception:\n e2 = -1.0\n e2d = -1.0\n try:\n e3, e3d = Stats.MeanAndDev(e3s)\n except Exception:\n e3 = -1.0\n e3d = -1.0\n\n try:\n e4, e4d = Stats.MeanAndDev(e4s)\n except Exception:\n e4 = -1.0\n e4d = -1.0\n if not silent:\n print('%s(%d): %.2f(%.2f) -> %.2f(%.2f) : %.2f(%.2f) -> %.2f(%.2f)' %\n (name, nFailed, e1, e1d, e2, e2d, e3, e3d, e4, e4d))\n return e1, e1d, e2, e2d, e3, e3d, e4, e4d, nFailed\n\n\ndef MatchPharmacophoreToMol(mol, featFactory, pcophore):\n \"\"\" generates a list of all possible mappings of a pharmacophore to a molecule\n\n Returns a 2-tuple:\n 1) a boolean indicating whether or not all features were found\n 2) a list, numFeatures long, of sequences of features\n\n\n >>> import os.path\n >>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')\n >>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))\n >>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]\n >>> pcophore= Pharmacophore.Pharmacophore(activeFeats)\n >>> m = Chem.MolFromSmiles('FCCN')\n >>> match,mList = MatchPharmacophoreToMol(m,featFactory,pcophore)\n >>> match\n True\n\n Two feature types:\n >>> len(mList)\n 2\n\n The first feature type, Acceptor, has two matches:\n >>> len(mList[0])\n 2\n >>> mList[0][0].GetAtomIds()\n (0,)\n >>> mList[0][1].GetAtomIds()\n (3,)\n\n The first feature type, Donor, has a single match:\n >>> len(mList[1])\n 1\n >>> mList[1][0].GetAtomIds()\n (3,)\n\n \"\"\"\n return MatchFeatsToMol(mol, featFactory, pcophore.getFeatures())\n\n\ndef _getFeatDict(mol, featFactory, features):\n \"\"\" **INTERNAL USE ONLY**\n\n >>> import os.path\n >>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')\n >>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))\n >>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]\n >>> m = Chem.MolFromSmiles('FCCN')\n >>> d =_getFeatDict(m,featFactory,activeFeats)\n >>> sorted(list(d.keys()))\n ['Acceptor', 'Donor']\n >>> donors = d['Donor']\n >>> len(donors)\n 1\n >>> donors[0].GetAtomIds()\n (3,)\n >>> acceptors = d['Acceptor']\n >>> len(acceptors)\n 2\n >>> acceptors[0].GetAtomIds()\n (0,)\n >>> acceptors[1].GetAtomIds()\n (3,)\n \n \"\"\"\n molFeats = {}\n for feat in features:\n family = feat.GetFamily()\n if not family in molFeats:\n matches = featFactory.GetFeaturesForMol(mol, includeOnly=family)\n molFeats[family] = matches\n return molFeats\n\n\ndef MatchFeatsToMol(mol, featFactory, features):\n \"\"\" generates a list of all possible mappings of each feature to a molecule\n\n Returns a 2-tuple:\n 1) a boolean indicating whether or not all features were found\n 2) a list, numFeatures long, of sequences of features\n\n\n >>> import os.path\n >>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')\n >>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))\n >>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]\n >>> m = Chem.MolFromSmiles('FCCN')\n >>> match,mList = MatchFeatsToMol(m,featFactory,activeFeats)\n >>> match\n True\n\n Two feature types:\n >>> len(mList)\n 2\n\n The first feature type, Acceptor, has two matches:\n >>> len(mList[0])\n 2\n >>> mList[0][0].GetAtomIds()\n (0,)\n >>> mList[0][1].GetAtomIds()\n (3,)\n\n The first feature type, Donor, has a single match:\n >>> len(mList[1])\n 1\n >>> mList[1][0].GetAtomIds()\n (3,)\n\n \"\"\"\n molFeats = _getFeatDict(mol, featFactory, features)\n res = []\n for feat in features:\n matches = molFeats.get(feat.GetFamily(), [])\n if len(matches) == 0:\n return False, None\n res.append(matches)\n return True, res\n\n\ndef CombiEnum(sequence):\n \"\"\" This generator takes a sequence of sequences as an argument and\n provides all combinations of the elements of the subsequences:\n\n >>> gen = CombiEnum(((1,2),(10,20)))\n >>> next(gen)\n [1, 10]\n >>> next(gen)\n [1, 20]\n\n >>> [x for x in CombiEnum(((1,2),(10,20)))]\n [[1, 10], [1, 20], [2, 10], [2, 20]]\n \n >>> [x for x in CombiEnum(((1,2),(10,20),(100,200)))]\n [[1, 10, 100], [1, 10, 200], [1, 20, 100], [1, 20, 200], [2, 10, 100], [2, 10, 200], [2, 20, 100], [2, 20, 200]]\n\n \"\"\"\n if not len(sequence):\n yield []\n elif len(sequence) == 1:\n for entry in sequence[0]:\n yield [entry]\n else:\n for entry in sequence[0]:\n for subVal in CombiEnum(sequence[1:]):\n yield [entry] + subVal\n\n\ndef DownsampleBoundsMatrix(bm, indices, maxThresh=4.0):\n \"\"\" removes rows from a bounds matrix that are \n that are greater than a threshold value away from a set of\n other points\n\n returns the modfied bounds matrix\n\n The goal of this function is to remove rows from the bounds matrix\n that correspond to atoms that are likely to be quite far from\n the pharmacophore we're interested in. Because the bounds smoothing\n we eventually have to do is N^3, this can be a big win\n\n >>> boundsMat = numpy.array([[0.0,3.0,4.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])\n >>> bm = DownsampleBoundsMatrix(boundsMat,(0,),3.5)\n >>> bm.shape == (2, 2)\n True\n\n we don't touch the input matrix:\n >>> boundsMat.shape == (3, 3)\n True\n \n >>> print(', '.join(['%.3f'%x for x in bm[0]]))\n 0.000, 3.000\n >>> print(', '.join(['%.3f'%x for x in bm[1]]))\n 2.000, 0.000\n\n if the threshold is high enough, we don't do anything:\n >>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])\n >>> bm = DownsampleBoundsMatrix(boundsMat,(0,),5.0)\n >>> bm.shape == (3, 3)\n True\n\n If there's a max value that's close enough to *any* of the indices\n we pass in, we'll keep it:\n >>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])\n >>> bm = DownsampleBoundsMatrix(boundsMat,(0,1),3.5)\n >>> bm.shape == (3, 3)\n True\n \n \"\"\"\n nPts = bm.shape[0]\n k = numpy.zeros(nPts, numpy.int0)\n for idx in indices:\n k[idx] = 1\n for i in indices:\n row = bm[i]\n for j in range(i + 1, nPts):\n if not k[j] and row[j] < maxThresh:\n k[j] = 1\n keep = numpy.nonzero(k)[0]\n bm2 = numpy.zeros((len(keep), len(keep)), numpy.float)\n for i, idx in enumerate(keep):\n row = bm[idx]\n bm2[i] = numpy.take(row, keep)\n return bm2\n\n\ndef CoarseScreenPharmacophore(atomMatch, bounds, pcophore, verbose=False):\n \"\"\"\n\n >>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),\n ... ]\n >>> pcophore=Pharmacophore.Pharmacophore(feats)\n >>> pcophore.setLowerBound(0,1, 1.1)\n >>> pcophore.setUpperBound(0,1, 1.9)\n >>> pcophore.setLowerBound(0,2, 2.1)\n >>> pcophore.setUpperBound(0,2, 2.9)\n >>> pcophore.setLowerBound(1,2, 2.1)\n >>> pcophore.setUpperBound(1,2, 3.9)\n\n >>> bounds = numpy.array([[0,2,3],[1,0,4],[2,3,0]],numpy.float)\n >>> CoarseScreenPharmacophore(((0,),(1,)),bounds,pcophore)\n True\n\n >>> CoarseScreenPharmacophore(((0,),(2,)),bounds,pcophore)\n False\n\n >>> CoarseScreenPharmacophore(((1,),(2,)),bounds,pcophore)\n False\n\n >>> CoarseScreenPharmacophore(((0,),(1,),(2,)),bounds,pcophore)\n True\n\n >>> CoarseScreenPharmacophore(((1,),(0,),(2,)),bounds,pcophore)\n False\n\n >>> CoarseScreenPharmacophore(((2,),(1,),(0,)),bounds,pcophore)\n False\n\n # we ignore the point locations here and just use their definitions:\n >>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),\n ... ]\n >>> pcophore=Pharmacophore.Pharmacophore(feats)\n >>> pcophore.setLowerBound(0,1, 2.1)\n >>> pcophore.setUpperBound(0,1, 2.9)\n >>> pcophore.setLowerBound(0,2, 2.1)\n >>> pcophore.setUpperBound(0,2, 2.9)\n >>> pcophore.setLowerBound(0,3, 2.1)\n >>> pcophore.setUpperBound(0,3, 2.9)\n >>> pcophore.setLowerBound(1,2, 1.1)\n >>> pcophore.setUpperBound(1,2, 1.9)\n >>> pcophore.setLowerBound(1,3, 1.1)\n >>> pcophore.setUpperBound(1,3, 1.9)\n >>> pcophore.setLowerBound(2,3, 1.1)\n >>> pcophore.setUpperBound(2,3, 1.9)\n >>> bounds = numpy.array([[0,3,3,3],[2,0,2,2],[2,1,0,2],[2,1,1,0]],numpy.float)\n\n >>> CoarseScreenPharmacophore(((0,),(1,),(2,),(3,)),bounds,pcophore)\n True\n\n >>> CoarseScreenPharmacophore(((0,),(1,),(3,),(2,)),bounds,pcophore)\n True\n\n >>> CoarseScreenPharmacophore(((1,),(0,),(3,),(2,)),bounds,pcophore)\n False\n\n \"\"\"\n for k in range(len(atomMatch)):\n if len(atomMatch[k]) == 1:\n for l in range(k + 1, len(atomMatch)):\n if len(atomMatch[l]) == 1:\n idx0 = atomMatch[k][0]\n idx1 = atomMatch[l][0]\n if idx1 < idx0:\n idx0, idx1 = idx1, idx0\n if bounds[idx1,idx0] >= pcophore.getUpperBound(k, l) or \\\n bounds[idx0,idx1] <= pcophore.getLowerBound(k, l) :\n if verbose:\n print('\\t (%d,%d) [%d,%d] fail' % (idx1, idx0, k, l))\n print('\\t %f,%f - %f,%f' % (bounds[idx1, idx0], pcophore.getUpperBound(k, l),\n bounds[idx0, idx1], pcophore.getLowerBound(k, l)))\n #logger.debug('\\t >%s'%str(atomMatch))\n #logger.debug()\n #logger.debug('\\t %f,%f - %f,%f'%(bounds[idx1,idx0],pcophore.getUpperBound(k,l),\n # bounds[idx0,idx1],pcophore.getLowerBound(k,l)))\n return False\n return True\n\n\ndef Check2DBounds(atomMatch, mol, pcophore):\n \"\"\" checks to see if a particular mapping of features onto\n a molecule satisfies a pharmacophore's 2D restrictions\n\n\n >>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),\n ... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]\n >>> pcophore= Pharmacophore.Pharmacophore(activeFeats)\n >>> pcophore.setUpperBound2D(0,1,3)\n >>> m = Chem.MolFromSmiles('FCC(N)CN')\n >>> Check2DBounds(((0,),(3,)),m,pcophore)\n True\n >>> Check2DBounds(((0,),(5,)),m,pcophore)\n False\n\n \"\"\"\n dm = Chem.GetDistanceMatrix(mol, False, False, False)\n nFeats = len(atomMatch)\n for i in range(nFeats):\n for j in range(i + 1, nFeats):\n lowerB = pcophore._boundsMat2D[j, i] #lowerB = pcophore.getLowerBound2D(i,j)\n upperB = pcophore._boundsMat2D[i, j] #upperB = pcophore.getUpperBound2D(i,j)\n dij = 10000\n for atomI in atomMatch[i]:\n for atomJ in atomMatch[j]:\n try:\n dij = min(dij, dm[atomI, atomJ])\n except IndexError:\n print('bad indices:', atomI, atomJ)\n print(' shape:', dm.shape)\n print(' match:', atomMatch)\n print(' mol:')\n print(Chem.MolToMolBlock(mol))\n raise IndexError\n if dij < lowerB or dij > upperB:\n return False\n return True\n\n\ndef _checkMatch(match, mol, bounds, pcophore, use2DLimits):\n \"\"\" **INTERNAL USE ONLY**\n\n checks whether a particular atom match can be satisfied by\n a molecule\n\n \"\"\"\n atomMatch = ChemicalFeatures.GetAtomMatch(match)\n if not atomMatch:\n return None\n elif use2DLimits:\n if not Check2DBounds(atomMatch, mol, pcophore):\n return None\n if not CoarseScreenPharmacophore(atomMatch, bounds, pcophore):\n return None\n return atomMatch\n\n\ndef ConstrainedEnum(matches, mol, pcophore, bounds, use2DLimits=False, index=0, soFar=[]):\n \"\"\" Enumerates the list of atom mappings a molecule\n has to a particular pharmacophore.\n We do check distance bounds here.\n \n\n \"\"\"\n nMatches = len(matches)\n if index >= nMatches:\n yield soFar, []\n elif index == nMatches - 1:\n for entry in matches[index]:\n nextStep = soFar + [entry]\n if index != 0:\n atomMatch = _checkMatch(nextStep, mol, bounds, pcophore, use2DLimits)\n else:\n atomMatch = ChemicalFeatures.GetAtomMatch(nextStep)\n if atomMatch:\n yield soFar + [entry], atomMatch\n else:\n for entry in matches[index]:\n nextStep = soFar + [entry]\n if index != 0:\n atomMatch = _checkMatch(nextStep, mol, bounds, pcophore, use2DLimits)\n if not atomMatch:\n continue\n for val in ConstrainedEnum(matches, mol, pcophore, bounds, use2DLimits=use2DLimits,\n index=index + 1, soFar=nextStep):\n if val:\n yield val\n\n\ndef MatchPharmacophore(matches, bounds, pcophore, useDownsampling=False, use2DLimits=False,\n mol=None, excludedVolumes=None, useDirs=False):\n \"\"\"\n\n if use2DLimits is set, the molecule must also be provided and topological\n distances will also be used to filter out matches\n\n \"\"\"\n for match, atomMatch in ConstrainedEnum(matches, mol, pcophore, bounds, use2DLimits=use2DLimits):\n bm = bounds.copy()\n bm = UpdatePharmacophoreBounds(bm, atomMatch, pcophore, useDirs=useDirs, mol=mol)\n\n if excludedVolumes:\n localEvs = []\n for eV in excludedVolumes:\n featInfo = []\n for i, entry in enumerate(atomMatch):\n info = list(eV.featInfo[i])\n info[0] = entry\n featInfo.append(info)\n localEvs.append(ExcludedVolume.ExcludedVolume(featInfo, eV.index, eV.exclusionDist))\n bm = AddExcludedVolumes(bm, localEvs, smoothIt=False)\n\n sz = bm.shape[0]\n if useDownsampling:\n indices = []\n for entry in atomMatch:\n indices.extend(entry)\n if excludedVolumes:\n for vol in localEvs:\n indices.append(vol.index)\n bm = DownsampleBoundsMatrix(bm, indices)\n if DG.DoTriangleSmoothing(bm):\n return 0, bm, match, (sz, bm.shape[0])\n\n return 1, None, None, None\n\n\ndef GetAllPharmacophoreMatches(matches, bounds, pcophore, useDownsampling=0, progressCallback=None,\n use2DLimits=False, mol=None, verbose=False):\n res = []\n nDone = 0\n for match in CombiEnum(matches):\n atomMatch = ChemicalFeatures.GetAtomMatch(match)\n if atomMatch and use2DLimits and mol:\n pass2D = Check2DBounds(atomMatch, mol, pcophore)\n if verbose:\n print('..', atomMatch)\n print(' ..Pass2d:', pass2D)\n else:\n pass2D = True\n if atomMatch and pass2D and \\\n CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=verbose):\n if verbose:\n print(' ..CoarseScreen: Pass')\n\n bm = bounds.copy()\n if verbose:\n print('pre update:')\n for row in bm:\n print(' ', ' '.join(['% 4.2f' % x for x in row]))\n bm = UpdatePharmacophoreBounds(bm, atomMatch, pcophore)\n sz = bm.shape[0]\n if verbose:\n print('pre downsample:')\n for row in bm:\n print(' ', ' '.join(['% 4.2f' % x for x in row]))\n\n if useDownsampling:\n indices = []\n for entry in atomMatch:\n indices += list(entry)\n bm = DownsampleBoundsMatrix(bm, indices)\n if verbose:\n print('post downsample:')\n for row in bm:\n print(' ', ' '.join(['% 4.2f' % x for x in row]))\n\n if DG.DoTriangleSmoothing(bm):\n res.append(match)\n elif verbose:\n print('cannot smooth')\n nDone += 1\n if progressCallback:\n progressCallback(nDone)\n return res\n\n\ndef ComputeChiralVolume(mol, centerIdx, confId=-1):\n \"\"\" Computes the chiral volume of an atom\n \n We're using the chiral volume formula from Figure 7 of \n Blaney and Dixon, Rev. Comp. Chem. V, 299-335 (1994)\n\n >>> import os.path\n >>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')\n\n R configuration atoms give negative volumes:\n >>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r.mol'))\n >>> Chem.AssignStereochemistry(mol)\n >>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')\n 'R'\n >>> ComputeChiralVolume(mol,1) < 0\n True\n \n S configuration atoms give positive volumes:\n >>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s.mol'))\n >>> Chem.AssignStereochemistry(mol)\n >>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')\n 'S'\n >>> ComputeChiralVolume(mol,1) > 0\n True\n \n Non-chiral (or non-specified) atoms give zero volume:\n >>> ComputeChiralVolume(mol,0) == 0.0\n True\n\n We also work on 3-coordinate atoms (with implicit Hs):\n >>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r-3.mol'))\n >>> Chem.AssignStereochemistry(mol)\n >>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')\n 'R'\n >>> ComputeChiralVolume(mol,1)<0\n True\n \n >>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s-3.mol'))\n >>> Chem.AssignStereochemistry(mol)\n >>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')\n 'S'\n >>> ComputeChiralVolume(mol,1)>0\n True\n \n\n \n \"\"\"\n conf = mol.GetConformer(confId)\n Chem.AssignStereochemistry(mol)\n center = mol.GetAtomWithIdx(centerIdx)\n if not center.HasProp('_CIPCode'):\n return 0.0\n\n nbrs = center.GetNeighbors()\n nbrRanks = []\n for nbr in nbrs:\n rank = int(nbr.GetProp('_CIPRank'))\n pos = conf.GetAtomPosition(nbr.GetIdx())\n nbrRanks.append((rank, pos))\n\n # if we only have three neighbors (i.e. the determining H isn't present)\n # then use the central atom as the fourth point:\n if len(nbrRanks) == 3:\n nbrRanks.append((-1, conf.GetAtomPosition(centerIdx)))\n nbrRanks.sort()\n\n ps = [x[1] for x in nbrRanks]\n v1 = ps[0] - ps[3]\n v2 = ps[1] - ps[3]\n v3 = ps[2] - ps[3]\n\n res = v1.DotProduct(v2.CrossProduct(v3))\n return res\n\n\n#------------------------------------\n#\n# doctest boilerplate\n#\ndef _test():\n import doctest, sys\n return doctest.testmod(sys.modules[\"__main__\"])\n\n\nif __name__ == '__main__':\n import sys\n failed, tried = _test()\n sys.exit(failed)\n",
"# $Id$\n#\n# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Novartis Institutes for BioMedical Research Inc.\n# nor the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Created by Sereina Riniker, Aug 2013\n\nfrom rdkit import Chem\nfrom rdkit import RDConfig\nfrom rdkit import DataStructs\nfrom rdkit.Chem import rdMolDescriptors as rdMD\nfrom rdkit.Chem import rdmolops\nfrom rdkit.Chem import Draw\nfrom rdkit.six import iteritems\nimport numpy\nimport math\nimport copy\nfrom matplotlib import cm\n\n\ndef GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity):\n \"\"\"\n Calculates the atomic weights for the probe molecule\n based on a fingerprint function and a metric.\n\n Parameters:\n refMol -- the reference molecule\n probeMol -- the probe molecule\n fpFunction -- the fingerprint function\n metric -- the similarity metric\n\n Note:\n If fpFunction needs additional parameters, use a lambda construct\n \"\"\"\n if hasattr(probeMol, '_fpInfo'):\n delattr(probeMol, '_fpInfo')\n if hasattr(refMol, '_fpInfo'):\n delattr(refMol, '_fpInfo')\n refFP = fpFunction(refMol, -1)\n probeFP = fpFunction(probeMol, -1)\n baseSimilarity = metric(refFP, probeFP)\n # loop over atoms\n weights = []\n for atomId in range(probeMol.GetNumAtoms()):\n newFP = fpFunction(probeMol, atomId)\n newSimilarity = metric(refFP, newFP)\n weights.append(baseSimilarity - newSimilarity)\n if hasattr(probeMol, '_fpInfo'):\n delattr(probeMol, '_fpInfo')\n if hasattr(refMol, '_fpInfo'):\n delattr(refMol, '_fpInfo')\n return weights\n\n\ndef GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction):\n \"\"\"\n Calculates the atomic weights for the probe molecule based on\n a fingerprint function and the prediction function of a ML model.\n\n Parameters:\n probeMol -- the probe molecule\n fpFunction -- the fingerprint function\n predictionFunction -- the prediction function of the ML model\n \"\"\"\n if hasattr(probeMol, '_fpInfo'):\n delattr(probeMol, '_fpInfo')\n probeFP = fpFunction(probeMol, -1)\n baseProba = predictionFunction(probeFP)\n # loop over atoms\n weights = []\n for atomId in range(probeMol.GetNumAtoms()):\n newFP = fpFunction(probeMol, atomId)\n newProba = predictionFunction(newFP)\n weights.append(baseProba - newProba)\n if hasattr(probeMol, '_fpInfo'):\n delattr(probeMol, '_fpInfo')\n return weights\n\n\ndef GetStandardizedWeights(weights):\n \"\"\"\n Normalizes the weights,\n such that the absolute maximum weight equals 1.0.\n\n Parameters:\n weights -- the list with the atomic weights\n \"\"\"\n tmp = [math.fabs(w) for w in weights]\n currentMax = max(tmp)\n if currentMax > 0:\n return [w / currentMax for w in weights], currentMax\n else:\n return weights, currentMax\n\n\ndef GetSimilarityMapFromWeights(mol,\n weights,\n colorMap=cm.PiYG,\n scale=-1,\n size=(250, 250),\n sigma=None, #@UndefinedVariable #pylint: disable=E1101\n coordScale=1.5,\n step=0.01,\n colors='k',\n contourLines=10,\n alpha=0.5,\n **kwargs):\n \"\"\"\n Generates the similarity map for a molecule given the atomic weights.\n\n Parameters:\n mol -- the molecule of interest\n colorMap -- the matplotlib color map scheme\n scale -- the scaling: scale < 0 -> the absolute maximum weight is used as maximum scale\n scale = double -> this is the maximum scale\n size -- the size of the figure\n sigma -- the sigma for the Gaussians\n coordScale -- scaling factor for the coordinates\n step -- the step for calcAtomGaussian\n colors -- color of the contour lines\n contourLines -- if integer number N: N contour lines are drawn\n if list(numbers): contour lines at these numbers are drawn\n alpha -- the alpha blending value for the contour lines\n kwargs -- additional arguments for drawing\n \"\"\"\n if mol.GetNumAtoms() < 2:\n raise ValueError(\"too few atoms\")\n fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)\n if sigma is None:\n if mol.GetNumBonds() > 0:\n bond = mol.GetBondWithIdx(0)\n idx1 = bond.GetBeginAtomIdx()\n idx2 = bond.GetEndAtomIdx()\n sigma = 0.3 * math.sqrt(\n sum([(mol._atomPs[idx1][i] - mol._atomPs[idx2][i])**2 for i in range(2)]))\n else:\n sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i] - mol._atomPs[1][i])**2 for i in range(2)]))\n sigma = round(sigma, 2)\n x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)\n # scaling\n if scale <= 0.0:\n maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))\n else:\n maxScale = scale\n # coloring\n fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower',\n extent=(0, 1, 0, 1), vmin=-maxScale, vmax=maxScale)\n # contour lines\n # only draw them when at least one weight is not zero\n if len([w for w in weights if w != 0.0]):\n fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)\n return fig\n\n\ndef GetSimilarityMapForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity,\n **kwargs):\n \"\"\"\n Generates the similarity map for a given reference and probe molecule,\n fingerprint function and similarity metric.\n\n Parameters:\n refMol -- the reference molecule\n probeMol -- the probe molecule\n fpFunction -- the fingerprint function\n metric -- the similarity metric.\n kwargs -- additional arguments for drawing\n \"\"\"\n weights = GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric)\n weights, maxWeight = GetStandardizedWeights(weights)\n fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)\n return fig, maxWeight\n\n\ndef GetSimilarityMapForModel(probeMol, fpFunction, predictionFunction, **kwargs):\n \"\"\"\n Generates the similarity map for a given ML model and probe molecule,\n and fingerprint function.\n\n Parameters:\n probeMol -- the probe molecule\n fpFunction -- the fingerprint function\n predictionFunction -- the prediction function of the ML model\n kwargs -- additional arguments for drawing\n \"\"\"\n weights = GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction)\n weights, maxWeight = GetStandardizedWeights(weights)\n fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)\n return fig, maxWeight\n\n\napDict = {}\napDict[\n 'normal'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetAtomPairFingerprint(m, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs)\napDict[\n 'hashed'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprint(m, nBits=bits, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs)\napDict[\n 'bv'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprintAsBitVect(m, nBits=bits, minLength=minl, maxLength=maxl, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs)\n\n\n# usage: lambda m,i: GetAPFingerprint(m, i, fpType, nBits, minLength, maxLength, nBitsPerEntry)\ndef GetAPFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, minLength=1, maxLength=30,\n nBitsPerEntry=4, **kwargs):\n \"\"\"\n Calculates the atom pairs fingerprint with the torsions of atomId removed.\n\n Parameters:\n mol -- the molecule of interest\n atomId -- the atom to remove the pairs for (if -1, no pair is removed)\n fpType -- the type of AP fingerprint ('normal', 'hashed', 'bv')\n nBits -- the size of the bit vector (only for fpType='bv')\n minLength -- the minimum path length for an atom pair\n maxLength -- the maxmimum path length for an atom pair\n nBitsPerEntry -- the number of bits available for each pair\n \"\"\"\n if fpType not in ['normal', 'hashed', 'bv']:\n raise ValueError(\"Unknown Atom pairs fingerprint type\")\n if atomId < 0:\n return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, 0, **kwargs)\n if atomId >= mol.GetNumAtoms():\n raise ValueError(\"atom index greater than number of atoms\")\n return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, [atomId], **kwargs)\n\n\nttDict = {}\nttDict[\n 'normal'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetTopologicalTorsionFingerprint(m, targetSize=ts, ignoreAtoms=ia, **kwargs)\nttDict[\n 'hashed'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprint(m, nBits=bits, targetSize=ts, ignoreAtoms=ia, **kwargs)\nttDict[\n 'bv'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m, nBits=bits, targetSize=ts, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs)\n\n\n# usage: lambda m,i: GetTTFingerprint(m, i, fpType, nBits, targetSize)\ndef GetTTFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, targetSize=4, nBitsPerEntry=4,\n **kwargs):\n \"\"\"\n Calculates the topological torsion fingerprint with the pairs of atomId removed.\n\n Parameters:\n mol -- the molecule of interest\n atomId -- the atom to remove the torsions for (if -1, no torsion is removed)\n fpType -- the type of TT fingerprint ('normal', 'hashed', 'bv')\n nBits -- the size of the bit vector (only for fpType='bv')\n minLength -- the minimum path length for an atom pair\n maxLength -- the maxmimum path length for an atom pair\n nBitsPerEntry -- the number of bits available for each torsion\n\n any additional keyword arguments will be passed to the fingerprinting function.\n\n \"\"\"\n if fpType not in ['normal', 'hashed', 'bv']:\n raise ValueError(\"Unknown Topological torsion fingerprint type\")\n if atomId < 0:\n return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, 0, **kwargs)\n if atomId >= mol.GetNumAtoms():\n raise ValueError(\"atom index greater than number of atoms\")\n return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, [atomId], **kwargs)\n\n\n# usage: lambda m,i: GetMorganFingerprint(m, i, radius, fpType, nBits, useFeatures)\ndef GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False,\n **kwargs):\n \"\"\"\n Calculates the Morgan fingerprint with the environments of atomId removed.\n\n Parameters:\n mol -- the molecule of interest\n radius -- the maximum radius\n fpType -- the type of Morgan fingerprint: 'count' or 'bv'\n atomId -- the atom to remove the environments for (if -1, no environments is removed)\n nBits -- the size of the bit vector (only for fpType = 'bv')\n useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan\n\n any additional keyword arguments will be passed to the fingerprinting function.\n \"\"\"\n if fpType not in ['bv', 'count']:\n raise ValueError(\"Unknown Morgan fingerprint type\")\n if not hasattr(mol, '_fpInfo'):\n info = {}\n # get the fingerprint\n if fpType == 'bv':\n molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits, useFeatures=useFeatures,\n bitInfo=info, **kwargs)\n else:\n molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info,\n **kwargs)\n # construct the bit map\n if fpType == 'bv':\n bitmap = [DataStructs.ExplicitBitVect(nBits) for x in range(mol.GetNumAtoms())]\n else:\n bitmap = [[] for x in range(mol.GetNumAtoms())]\n for bit, es in iteritems(info):\n for at1, rad in es:\n if rad == 0: # for radius 0\n if fpType == 'bv':\n bitmap[at1][bit] = 1\n else:\n bitmap[at1].append(bit)\n else: # for radii > 0\n env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1)\n amap = {}\n submol = Chem.PathToSubmol(mol, env, atomMap=amap)\n for at2 in amap.keys():\n if fpType == 'bv':\n bitmap[at2][bit] = 1\n else:\n bitmap[at2].append(bit)\n mol._fpInfo = (molFp, bitmap)\n\n if atomId < 0:\n return mol._fpInfo[0]\n else: # remove the bits of atomId\n if atomId >= mol.GetNumAtoms():\n raise ValueError(\"atom index greater than number of atoms\")\n if len(mol._fpInfo) != 2:\n raise ValueError(\"_fpInfo not set\")\n if fpType == 'bv':\n molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor\n else: # count\n molFp = copy.deepcopy(mol._fpInfo[0])\n # delete the bits with atomId\n for bit in mol._fpInfo[1][atomId]:\n molFp[bit] -= 1\n return molFp\n\n\n# usage: lambda m,i: GetRDKFingerprint(m, i, fpType, nBits, minPath, maxPath, nBitsPerHash)\ndef GetRDKFingerprint(mol, atomId=-1, fpType='bv', nBits=2048, minPath=1, maxPath=5, nBitsPerHash=2,\n **kwargs):\n \"\"\"\n Calculates the RDKit fingerprint with the paths of atomId removed.\n\n Parameters:\n mol -- the molecule of interest\n atomId -- the atom to remove the paths for (if -1, no path is removed)\n fpType -- the type of RDKit fingerprint: 'bv'\n nBits -- the size of the bit vector\n minPath -- minimum path length\n maxPath -- maximum path length\n nBitsPerHash -- number of to set per path\n \"\"\"\n if fpType not in ['bv', '']:\n raise ValueError(\"Unknown RDKit fingerprint type\")\n fpType = 'bv'\n if not hasattr(mol, '_fpInfo'):\n info = [] # list with bits for each atom\n # get the fingerprint\n molFp = Chem.RDKFingerprint(mol, fpSize=nBits, minPath=minPath, maxPath=maxPath,\n nBitsPerHash=nBitsPerHash, atomBits=info, **kwargs)\n mol._fpInfo = (molFp, info)\n\n if atomId < 0:\n return mol._fpInfo[0]\n else: # remove the bits of atomId\n if atomId >= mol.GetNumAtoms():\n raise ValueError(\"atom index greater than number of atoms\")\n if len(mol._fpInfo) != 2:\n raise ValueError(\"_fpInfo not set\")\n molFp = copy.deepcopy(mol._fpInfo[0])\n molFp.UnSetBitsFromList(mol._fpInfo[1][atomId])\n return molFp\n"
] |
[
[
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.nonzero",
"numpy.take",
"numpy.sqrt"
],
[
"numpy.max",
"numpy.min"
]
] |
ktanushree/bulkupgrade
|
[
"8dfaa07b58a345c0ebfef9cc05b981272ce9e997"
] |
[
"bulkupgrade.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nPrisma SDWAN Bulk Device Upgrades\[email protected]\nVersion: 1.0.1 b1\n\"\"\"\n# standard modules\nimport getpass\nimport json\nimport logging\nimport datetime\nimport os\nimport sys\nimport csv\nimport time\nimport numpy as np\nimport pandas as pd\n\n#standard modules\nimport argparse\nimport logging\n\n# CloudGenix Python SDK\nimport cloudgenix\nimport codecs\n\n# Global Vars\nSDK_VERSION = cloudgenix.version\nSCRIPT_NAME = 'Prisma SD-WAN: Bulk Device Upgrade'\nCSVHEADER = [\"serial_number\",\"software_version\",\"download_time\",\"upgrade_time\",\"interfaces\",\"download_interval\",\"upgrade_interval\"]\nCSVHEADER_ABORT = [\"serial_number\"]\n\n# Set NON-SYSLOG logging to use function name\nlogger = logging.getLogger(__name__)\n\nsys.path.append(os.getcwd())\ntry:\n from cloudgenix_settings import CLOUDGENIX_AUTH_TOKEN\n\nexcept ImportError:\n # Get AUTH_TOKEN/X_AUTH_TOKEN from env variable, if it exists. X_AUTH_TOKEN takes priority.\n if \"X_AUTH_TOKEN\" in os.environ:\n CLOUDGENIX_AUTH_TOKEN = os.environ.get('X_AUTH_TOKEN')\n elif \"AUTH_TOKEN\" in os.environ:\n CLOUDGENIX_AUTH_TOKEN = os.environ.get('AUTH_TOKEN')\n else:\n # not set\n CLOUDGENIX_AUTH_TOKEN = None\n\ntry:\n from cloudgenix_settings import CLOUDGENIX_USER, CLOUDGENIX_PASSWORD\n\nexcept ImportError:\n # will get caught below\n CLOUDGENIX_USER = None\n CLOUDGENIX_PASSWORD = None\n\n\n#\n# Global Dicts\n#\nelem_id_name = {}\nelem_name_id = {}\nelem_id_hwid = {}\nelem_hwid_id = {}\nelemid_sid = {}\nsite_id_name = {}\nsite_name_id = {}\nimage_id_name = {}\nimage_name_id = {}\nunsupported_id_name = {}\nunsupported_name_id = {}\nintf_id_name = {}\nintf_name_id = {}\nhwid_sid = {}\n\n\ndef create_dicts(cgx_session):\n print(\"Creating Translation Dicts..\")\n print(\"\\tSites\")\n resp = cgx_session.get.sites()\n if resp.cgx_status:\n sitelist = resp.cgx_content.get(\"items\", None)\n for site in sitelist:\n site_id_name[site[\"id\"]] = site[\"name\"]\n site_name_id[site[\"name\"]] = site[\"id\"]\n else:\n print(\"ERR: Could not retrieve sites\")\n cloudgenix.jd_detailed(resp)\n\n print(\"\\tElements & Interfaces\")\n resp = cgx_session.get.elements()\n if resp.cgx_status:\n elemlist = resp.cgx_content.get(\"items\", None)\n for elem in elemlist:\n sid = elem[\"site_id\"]\n elem_id_name[elem[\"id\"]] = elem[\"name\"]\n elem_name_id[elem[\"name\"]] = elem[\"id\"]\n elem_id_hwid[elem[\"id\"]] = elem[\"hw_id\"]\n elem_hwid_id[elem[\"hw_id\"]] = elem[\"id\"]\n hwid_sid[elem[\"hw_id\"]] = sid\n elemid_sid[elem[\"id\"]] = sid\n\n if sid in [\"1\", 1]:\n continue\n else:\n resp = cgx_session.get.interfaces(site_id = sid, element_id=elem[\"id\"])\n if resp.cgx_status:\n intflist = resp.cgx_content.get(\"items\", None)\n for intf in intflist:\n intf_id_name[(sid,elem[\"id\"],intf[\"id\"])] = intf[\"name\"]\n intf_name_id[(sid,elem[\"id\"],intf[\"name\"])] = intf[\"id\"]\n\n else:\n print(\"ERR: Could not retrieve elements\")\n cloudgenix.jd_detailed(resp)\n\n print(\"\\tElement Images\")\n resp = cgx_session.get.element_images()\n if resp.cgx_status:\n imagelist = resp.cgx_content.get(\"items\", None)\n for image in imagelist:\n if image[\"state\"] == \"release\":\n image_id_name[image[\"id\"]] = image[\"version\"]\n image_name_id[image[\"version\"]] = image[\"id\"]\n else:\n unsupported_id_name[image[\"id\"]] = image[\"version\"]\n unsupported_name_id[image[\"version\"]] = image[\"id\"]\n\n else:\n print(\"ERR: Could not retrieve element images\")\n cloudgenix.jd_detailed(resp)\n\n return\n\n\ndef remove_bom(line):\n return line[3:] if line.startswith(codecs.BOM_UTF8) else line\n\n\ndef abort_upgrades(devicelist, cgx_session):\n for i, row in devicelist.iterrows():\n hwid = row[\"serial_number\"]\n if hwid in elem_hwid_id.keys():\n elemid = elem_hwid_id[hwid]\n\n data = {\n \"action\": \"abort_upgrade\",\n \"parameters\": None\n }\n\n resp = cgx_session.post.operations_e(element_id=elemid, data=data)\n if resp.cgx_status:\n print(\"Upgrade aborted for {}\".format(hwid))\n else:\n print(\"ERR: Could not abort upgrade for {}\".format(hwid))\n cloudgenix.jd_detailed(resp)\n\n return\n\n\ndef upgrade_device(device_data,cgx_session):\n\n for i,row in device_data.iterrows():\n hwid = row[\"serial_number\"]\n if hwid in elem_hwid_id.keys():\n elemid = elem_hwid_id[hwid]\n sid = elemid_sid[elemid]\n swversion = row[\"software_version\"]\n\n if swversion in image_name_id.keys():\n imageid = image_name_id[swversion]\n\n\n intf_list = []\n interfaces_str = row[\"interfaces\"]\n if interfaces_str is not None:\n print(interfaces_str)\n interfaces = interfaces_str.split(\",\")\n if sid in [\"1\", 1]:\n print(\"WARN: Device is not assigned to a site. Ignoring Interface settings for upgrade.\")\n intf_list = None\n else:\n for intf in interfaces:\n if (sid, elemid, intf) in intf_name_id.keys():\n iid = intf_name_id[(sid, elemid, intf)]\n intf_list.append(iid)\n\n else:\n print(\"ERR: Interface {} not found on Device {}. Ignoring Interface settings for upgrade.\".format(\n intf, hwid))\n\n if intf_list is not None:\n if len(intf_list) == 0:\n intf_list = None\n\n download_time = row[\"download_time\"]\n upgrade_time = row[\"upgrade_time\"]\n download_interval = row[\"download_interval\"]\n upgrade_interval = row[\"upgrade_interval\"]\n\n #\n # Get Current Software Status\n #\n resp = cgx_session.get.software_state(element_id=elemid)\n if resp.cgx_status:\n status = resp.cgx_content\n current_imageid = status[\"image_id\"]\n if current_imageid == imageid:\n print(\"INFO: Device {} already at {}. Skipping Upgrade..\".format(hwid,swversion))\n\n else:\n status[\"image_id\"] = imageid\n status[\"scheduled_download\"] = download_time\n status[\"scheduled_upgrade\"] = upgrade_time\n status[\"interface_ids\"] = intf_list\n status[\"download_interval\"] = download_interval\n status[\"upgrade_interval\"] = upgrade_interval\n\n resp = cgx_session.put.software_state(element_id=elemid,data=status)\n if resp.cgx_status:\n print(\"INFO: Device {} upgrade to {} scheduled\".format(hwid,swversion))\n\n else:\n print(\"ERR: Device {} could not be upgraded to {}\".format(hwid,swversion))\n cloudgenix.jd_detailed(resp)\n\n else:\n print(\"ERR: Could not retrieve software status\")\n cloudgenix.jd_detailed(resp)\n\n elif swversion in unsupported_name_id.keys():\n print(\"ERR: [CSV Row {}] Image {} is not longer supported. Please choose a different software image\".format((i+1),swversion))\n continue\n\n else:\n print(\"ERR: [CSV Row {}] Invalid Software Image {}\".format((i+1),swversion))\n continue\n else:\n print(\"ERR: [CSV Row {}] Device {} not found. Please check the Serial Number\".format((i+1),hwid))\n continue\n\n\n return\n\n\ndef go():\n ############################################################################\n # Begin Script, parse arguments.\n ############################################################################\n\n # Parse arguments\n parser = argparse.ArgumentParser(description=\"{0}.\".format(SCRIPT_NAME))\n\n # Allow Controller modification and debug level sets.\n controller_group = parser.add_argument_group('API', 'These options change how this program connects to the API.')\n controller_group.add_argument(\"--controller\", \"-C\",\n help=\"Controller URI, ex. \"\n \"C-Prod: https://api.elcapitan.cloudgenix.com\",\n default=None)\n\n controller_group.add_argument(\"--insecure\", \"-I\", help=\"Disable SSL certificate and hostname verification\",\n dest='verify', action='store_false', default=False)\n\n login_group = parser.add_argument_group('Login', 'These options allow skipping of interactive login')\n login_group.add_argument(\"--email\", \"-E\", help=\"Use this email as User Name instead of prompting\",\n default=None)\n login_group.add_argument(\"--pass\", \"-PW\", help=\"Use this Password instead of prompting\",\n default=None)\n\n # Commandline for CSV file name\n device_group = parser.add_argument_group('Device CSV', 'CSV file containing device and upgrade information')\n device_group.add_argument(\"--filename\", \"-F\", help=\"Name of the file with path. \"\n \"CSV file should contain the follow headers: \"\n \"serial_number,software_version,download_time,upgrade_time,interfaces,download_interval,upgrade_interval\", default=None)\n\n device_group.add_argument(\"--abort\", \"-A\", help=\"Abort Scheduled Upgrades\",\n default=False, action=\"store_true\")\n\n debug_group = parser.add_argument_group('Debug', 'These options enable debugging output')\n debug_group.add_argument(\"--debug\", \"-D\", help=\"Verbose Debug info, levels 0-2\", type=int,\n default=0)\n\n args = vars(parser.parse_args())\n\n abort = args[\"abort\"]\n\n if args['debug'] == 1:\n logging.basicConfig(level=logging.INFO,\n format=\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\")\n logger.setLevel(logging.INFO)\n elif args['debug'] >= 2:\n logging.basicConfig(level=logging.DEBUG,\n format=\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\")\n logger.setLevel(logging.DEBUG)\n else:\n # Remove all handlers\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n # set logging level to default\n logger.setLevel(logging.WARNING)\n\n ############################################################################\n # Instantiate API\n ############################################################################\n cgx_session = cloudgenix.API(controller=args[\"controller\"], ssl_verify=args[\"verify\"])\n\n # set debug\n cgx_session.set_debug(args[\"debug\"])\n ############################################################################\n # Draw Interactive login banner, run interactive login including args above.\n ############################################################################\n\n print(\"{0} v{1} ({2})\\n\".format(SCRIPT_NAME, SDK_VERSION, cgx_session.controller))\n\n filename = args[\"filename\"]\n if not os.path.exists(filename):\n print(\"ERR: File not found. Please provide the entire path\")\n sys.exit()\n else:\n csvdata = pd.read_csv(filename)\n csvdata = csvdata.replace({np.nan: None})\n columns = csvdata.columns.values\n\n if abort:\n if \"serial_number\" not in columns:\n print(\"ERR: Invalid CSV file format!\\nCSV Header:{}\\nPlease include column: serial_number\".format(columns))\n sys.exit()\n else:\n if set(columns) != set(CSVHEADER):\n print(\"ERR: Invalid CSV file format!\\nCSV Header:{}\\nExpected Header:{}\".format(columns,CSVHEADER))\n sys.exit()\n\n\n # login logic. Use cmdline if set, use AUTH_TOKEN next, finally user/pass from config file, then prompt.\n # figure out user\n if args[\"email\"]:\n user_email = args[\"email\"]\n elif CLOUDGENIX_USER:\n user_email = CLOUDGENIX_USER\n else:\n user_email = None\n\n # figure out password\n if args[\"pass\"]:\n user_password = args[\"pass\"]\n elif CLOUDGENIX_PASSWORD:\n user_password = CLOUDGENIX_PASSWORD\n else:\n user_password = None\n\n # check for token\n if CLOUDGENIX_AUTH_TOKEN and not args[\"email\"] and not args[\"pass\"]:\n cgx_session.interactive.use_token(CLOUDGENIX_AUTH_TOKEN)\n if cgx_session.tenant_id is None:\n print(\"AUTH_TOKEN login failure, please check token.\")\n sys.exit()\n\n else:\n while cgx_session.tenant_id is None:\n cgx_session.interactive.login(user_email, user_password)\n # clear after one failed login, force relogin.\n if not cgx_session.tenant_id:\n user_email = None\n user_password = None\n\n ############################################################################\n # Create Translation Dicts\n ############################################################################\n create_dicts(cgx_session)\n if abort:\n print(\"INFO: Aborting scheduled upgrades\")\n abort_upgrades(devicelist=csvdata, cgx_session=cgx_session)\n\n else:\n print(\"INFO: Performing Bulk Device Upgrades\")\n upgrade_device(device_data=csvdata,cgx_session=cgx_session)\n # get time now.\n curtime_str = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')\n # create file-system friendly tenant str.\n tenant_str = \"\".join(x for x in cgx_session.tenant_name if x.isalnum()).lower()\n\n # end of script, run logout to clear session.\n print(\"Logging Out.\")\n cgx_session.get.logout()\n\n\nif __name__ == \"__main__\":\n go()"
] |
[
[
"pandas.read_csv"
]
] |
ahmedhammad97/Offensive-Language-Detection
|
[
"f5bd9056eada52f63862ba4f54d4d0c2d4b59df6"
] |
[
"app.py"
] |
[
"#######################################\n## Author: Ahmed Hammad\n## License: MIT\n## Email: [email protected]\n## Website: www.ahmedhammad97.com\n#######################################\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport preprocessing, embedding, helper, classifying\nimport copy\n\n\ntrain_directory = \"datasets/training-v1/offenseval-training-v1.tsv\"\nprint(\"Reading Dataset...\")\ntrain_data = pd.read_csv(train_directory, sep='\\t', header=0)\n\ntweets = train_data[[\"tweet\"]]\nsubtask_a_labels = train_data[[\"subtask_a\"]]\nsubtask_b_labels = train_data.query(\"subtask_a == 'OFF'\")[[\"subtask_b\"]]\nsubtask_c_labels = train_data.query(\"subtask_b == 'TIN'\")[[\"subtask_c\"]]\n\nclean_tweets = copy.deepcopy(tweets)\n\ntqdm.pandas(desc=\"Cleaning Data Phase I...\")\nclean_tweets['tweet'] = tweets['tweet'].progress_apply(preprocessing.take_data_to_shower)\n\ntqdm.pandas(desc=\"Tokenizing Data...\")\nclean_tweets['tokens'] = clean_tweets['tweet'].progress_apply(preprocessing.tokenize)\n\ntqdm.pandas(desc=\"Cleaning Data Phase II...\")\nclean_tweets['tokens'] = clean_tweets['tokens'].progress_apply(preprocessing.remove_stop_words)\n\ntqdm.pandas(desc=\"Stemming And Lemmatizing\")\nclean_tweets['tokens'] = clean_tweets['tokens'].progress_apply(preprocessing.stem_and_lem)\n\ntext_vector = clean_tweets['tokens'].tolist()\n\nvectors_a = embedding.tfid(text_vector) # Numerical Vectors A\nlabels_a = subtask_a_labels['subtask_a'].values.tolist() # Subtask A Labels\n\nvectors_b = helper.get_vectors(vectors_a, labels_a, \"OFF\") # Numerical Vectors B\nlabels_b = subtask_b_labels['subtask_b'].values.tolist() # Subtask B Labels\n\nvectors_c = helper.get_vectors(vectors_b, labels_b, \"TIN\") # Numerical Vectors C\nlabels_c = subtask_c_labels['subtask_c'].values.tolist() # Subtask C Labels\n\nprint(\"\\nBuilding Model Subtask A...\")\nclassifying.classify(vectors_a[:], labels_a[:], text_vector, \"A\", \"MNB\")\n\nprint(\"\\nBuilding Model Subtask B...\")\nclassifying.classify(vectors_b[:], labels_b[:], text_vector, \"B\", \"SVM\")\n\nprint(\"\\nBuilding Model Subtask C...\")\nclassifying.classify(vectors_c[:], labels_c[:], text_vector, \"C\", \"RF\")\n\n# You can choose from the classifiers {MNB, KNN, SVM, DT, RF, LR}\n# You can also try only a subset of the data for quick classification:\n# vectors_a[1000:3000], labels_a[1000:3000]"
] |
[
[
"pandas.read_csv"
]
] |
shadinaguib/shadinaguib.github.io
|
[
"7bc8b60ffca3fafb737f820f264ae43942611f6d"
] |
[
"Notebooks/src/functions.py"
] |
[
"# ------------------------- IMPORTS -------------------------\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport pycountry\nimport reverse_geocode\nimport pycountry_convert as pc\n\n\n# -------------------- GLOBAL VARIABLES ---------------------\n\n# Cell size for defining home location\nCELL = 25\n\n\n# ------------------------ FUNCTIONS ------------------------\n\n# Function to convert km to latitude \ndef km_to_lat(km):\n # Input:\n # -- km: a scalar of a distance in kilometers\n # Output:\n # -- a scalar of the converted distance in degrees (latitude)\n \n return 180*km/(np.pi*6371)\n\n# Function to convert km to longitude\ndef km_to_lon(lat, km):\n # Input:\n # -- lat: a scalar of the latitude to consider for the calculation\n # -- km: a scalar of a distance in kilometers\n # Output:\n # -- a scalar of the converted distance in degrees (longitude)\n \n return 180*km/(np.pi*6371*np.cos(lat*np.pi/180))\n\n# Function to add discretised latitude and longitude values to dataframe\ndef discretise_checkins(checkins_df):\n # Input:\n # -- checkins_df: a pandas dataframe containing check-in information\n # Output:\n # -- checkins_discrete_df: a pandas dataframe containing the discretised check-in information\n \n # Copy input\n checkins_discrete_df = checkins_df.copy()\n \n # Calculate index and discrete value of latitude\n index_lat = ((90 + checkins_discrete_df.latitude)/km_to_lat(CELL)).astype(int)\n discrete_lat = index_lat*km_to_lat(CELL)-90\n\n # Calculate index and discrete value of longitude\n index_lon = ((180 + checkins_discrete_df.longitude)/km_to_lon(discrete_lat, CELL)).astype(int)\n discrete_lon = index_lon*km_to_lon(discrete_lat, CELL)-180\n\n # Add columns to dataframe\n checkins_discrete_df['discrete latitude'] = discrete_lat\n checkins_discrete_df['discrete longitude'] = discrete_lon\n checkins_discrete_df['position index'] = index_lat.astype(str) + '-' + index_lon.astype(str)\n \n return checkins_discrete_df\n\n# Function to find home cells\ndef find_home_cells(checkins_discrete_df):\n # Input:\n # -- checkins_discrete_df: a pandas dataframe containing discretised check-in information\n # Output:\n # -- home_cell_df: a pandas dataframe containing the home cell information for each user\n \n # Group by user and position index and count amount of position indexes\n home_cell_df = checkins_discrete_df.groupby(['user','discrete latitude', \\\n 'discrete longitude','position index'], as_index = False)['latitude'].count()\n \n # Group by user and select max number of position indexes\n home_cell_df = home_cell_df.loc[home_cell_df.groupby('user')['latitude'].idxmax()]\n \n # Reformat and rename data \n home_cell_df = home_cell_df.drop(['position index', 'latitude'], axis=1)\n home_cell_df.columns = ['user','home cell latitude','home cell longitude']\n \n return home_cell_df\n\n# Function that finds user homes\ndef find_homes(checkins_df, home_cell_df):\n # Input:\n # -- checkins_df: a pandas dataframe containing check-in information\n # -- home_cell_df: a pandas dataframe containing discretised check-in information\n # Output:\n # -- home_df: a pandas dataframe containing the home location of each user\n\n # Merge checkins dataframe with home cell dataframe\n home_df = pd.merge(checkins_df, home_cell_df, on='user')\n\n # Keep checkins within home cell\n home_df = home_df[(home_df['latitude'] >= home_df['home cell latitude']) & \\\n (home_df['latitude'] < home_df['home cell latitude'] + km_to_lat(CELL))]\n\n home_df = home_df[(home_df['longitude'] >= home_df['home cell longitude']) & \\\n (home_df['longitude'] < home_df['home cell longitude'] + \\\n km_to_lon(home_df['home cell latitude'], CELL))]\n\n # Group by user for mean latitude and longitude\n home_df = home_df.groupby('user', as_index = False)[['latitude', 'longitude']].mean()\n\n # Rename and Visualize data\n home_df.columns = ['user', 'home latitude', 'home longitude']\n \n return home_df\n\n# Function that creates dataframe with friendships and locations\ndef user_friend_location(edges_df, home_df):\n # Input:\n # -- edges_df: a pandas dataframe containing frienship networks\n # -- home_df: a pandas dataframe containing the home location of each user\n # Output:\n # -- edge_location_df: a pandas dataframe containing the and frienship networks the home location of each user\n\n # Create new datagram by merging edges and homes to show user home location\n edge_location_df = pd.merge(edges_df, home_df, on='user')\n\n # Create new dataframe from user homes, with new names to merge with edges dataframe\n friend_df = home_df.copy()\n friend_df.columns = ['friendship', 'friend latitude', 'friend longitude']\n\n # Merge with previous dataframe to have user friendships, home location and friend home locations\n edge_location_df = pd.merge(edge_location_df, friend_df, on='friendship')\n\n # Sort by user and friendship and reset index to stay consistent\n edge_location_df = edge_location_df.sort_values(by=['user', 'friendship']).reset_index(drop=True)\n \n return edge_location_df\n\n# Function that converts iso alpha 2 country code to iso alpha 3\ndef alpha2_to_alpha3(code):\n # Input:\n # -- code: iso alpha 2 country code\n # Output:\n # -- Nan or iso alpha 3 country code\n \n # Get info from iso alpha 2 code\n country_info = pycountry.countries.get(alpha_2=code)\n \n # Convert to alpha 3 if exists\n if country_info is None:\n return np.nan\n else:\n return country_info.alpha_3\n\n# Function that converts iso alpha 2 country code to country name\ndef alpha2_to_name(code):\n # Input:\n # -- code: iso alpha 2 country code\n # Output:\n # -- Nan or iso alpha 3 country code\n \n # Get info from iso alpha 2 code\n country_info = pycountry.countries.get(alpha_2=code)\n \n # Convert to country name if exists\n if country_info is None:\n return np.nan\n else:\n return country_info.name\n \n# Function that gets iso alpha 3 country code from latitude and longitude\ndef get_country_code(lat, lon):\n # Input:\n # -- lat: location latitude\n # -- lon: location longitude\n # Output:\n # -- Nan or iso alpha 3 country code\n \n # Convert latitude and longitude to iso alpha 2 code\n country_code = reverse_geocode.search(((lat, lon), (lat, lon)))[0].get('country_code')\n \n if country_code == 'IM':\n country_code = 'GB'\n \n # Return iso alpha 3 country code\n return alpha2_to_alpha3(country_code)\n\n# Function that gets country name code from latitude and longitude\ndef get_country_name(lat, lon):\n # Input:\n # -- lat: location latitude\n # -- lon: location longitude\n # Output:\n # -- Nan or iso alpha 3 country code\n \n # Convert latitude and longitude to iso alpha 2 code\n country_code = reverse_geocode.search(((lat, lon), (lat, lon)))[0].get('country_code')\n \n if country_code == 'IM':\n country_code = 'GB'\n \n # Return country name\n return alpha2_to_name(country_code)\n\n# Function that converts iso alpha 2 country code to iso alpha 3\ndef alpha3_to_alpha2(code):\n # Input:\n # -- code: iso alpha 2 country code\n # Output:\n # -- Nan or iso alpha 3 country code\n \n # Get info from iso alpha 2 code\n country_info = pycountry.countries.get(alpha_3=code)\n \n # Convert to alpha 3 if exists\n if country_info is None:\n return np.nan\n else:\n return country_info.alpha_2\n\ndef get_continent_name(code):\n # Input:\n # -- code: iso alpha 3 country code\n # Output:\n # -- continent: name of the continent\n \n # Create continent variable\n continent = np.nan\n \n # Convert iso alpha 3 code to iso alpha 2 code\n country_code = alpha3_to_alpha2(code)\n if country_code == 'VA':\n country_code = 'IT'\n elif country_code == 'SX':\n country_code = 'NL'\n \n # Convert is alpha 2 code to continent code\n continent_code = pc.country_alpha2_to_continent_code(country_code)\n \n # Convert continent code to continent name\n if continent_code == 'NA':\n continent = 'North America'\n elif continent_code == 'SA':\n continent = 'South America'\n elif continent_code == 'AF':\n continent = 'Africa'\n elif continent_code == 'AS':\n continent = 'Asia'\n elif continent_code == 'EU':\n continent = 'Europe'\n \n # Return continent name\n return continent\n\n\n# function that returns smallest array size where sum is above threshold\ndef smallest_subset_size(arr, x): \n # Initialize current sum and minimum length \n n = len(arr)\n curr_sum = 0\n min_len = n + 1\n \n # Initialize starting and ending indexes \n start = 0\n end = 0\n while (end < n): \n \n # Keep adding array elements while current \n # sum is smaller than x \n while (curr_sum <= x and end < n): \n curr_sum += arr[end] \n end+= 1\n \n # If current sum becomes greater than x. \n while (curr_sum > x and start < n): \n \n # Update minimum length if needed \n if (end - start < min_len): \n min_len = end - start \n \n # remove starting elements \n curr_sum -= arr[start] \n start+= 1\n \n return min_len \n\n# Function that converts geographical coordinates to distance in km\ndef geo_to_km(lat_0_deg, lon_0_deg, lat_1_deg, lon_1_deg):\n # Input:\n # -- lat_0_deg: a scalar of latitude of location 0\n # -- lon_0_deg: a scalar of longitude of location 0\n # -- lat_1_deg: a scalar of latitude of location 1\n # -- lon_1_deg: a scalar of longitude of location 1\n # Output:\n # -- d: a scalar of the distance between location 0 and 1\n \n lat_0 = np.deg2rad(lat_0_deg)\n lon_0 = np.deg2rad(lon_0_deg)\n lat_1 = np.deg2rad(lat_1_deg)\n lon_1 = np.deg2rad(lon_1_deg) \n d_lat = lat_1-lat_0\n d_lon = lon_1-lon_0\n \n a = np.sin(d_lat/2)**2 + np.cos(lat_0)*np.cos(lat_1)*np.sin(d_lon/2)**2 \n d = 6371*2*np.arctan2(np.sqrt(a), np.sqrt(1-a))\n \n return d"
] |
[
[
"numpy.sin",
"pandas.merge",
"numpy.sqrt",
"numpy.cos",
"numpy.deg2rad"
]
] |
bryanhe/tutorials
|
[
"37b50b7c11fb1fbd6120af3cc00a0f303f84224e"
] |
[
"beginner_source/torchvision_video_tutorial.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nTransfer Learning for Computer Vision Tutorial\n==============================================\n**Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_\n\nIn this tutorial, you will learn how to train a convolutional neural network for\nimage classification using transfer learning. You can read more about the transfer\nlearning at `cs231n notes <https://cs231n.github.io/transfer-learning/>`__\n\nQuoting these notes,\n\n In practice, very few people train an entire Convolutional Network\n from scratch (with random initialization), because it is relatively\n rare to have a dataset of sufficient size. Instead, it is common to\n pretrain a ConvNet on a very large dataset (e.g. ImageNet, which\n contains 1.2 million images with 1000 categories), and then use the\n ConvNet either as an initialization or a fixed feature extractor for\n the task of interest.\n\nThese two major transfer learning scenarios look as follows:\n\n- **Finetuning the convnet**: Instead of random initializaion, we\n initialize the network with a pretrained network, like the one that is\n trained on imagenet 1000 dataset. Rest of the training looks as\n usual.\n- **ConvNet as fixed feature extractor**: Here, we will freeze the weights\n for all of the network except that of the final fully connected\n layer. This last fully connected layer is replaced with a new one\n with random weights and only this layer is trained.\n\n\"\"\"\n# License: BSD\n# Author: Sasank Chilamkurthy\n\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\n\nplt.ion() # interactive mode\n\n######################################################################\n# Load Data\n# ---------\n#\n# We will use torchvision and torch.utils.data packages for loading the\n# data. Testing...\n#\n# The problem we're going to solve today is to train a model to classify\n# **ants** and **bees**. We have about 120 training images each for ants and bees.\n# There are 75 validation images for each class. Usually, this is a very\n# small dataset to generalize upon, if trained from scratch. Since we\n# are using transfer learning, we should be able to generalize reasonably\n# well.\n#\n# This dataset is a very small subset of imagenet.\n#\n# .. Note ::\n# Download the data from\n# `here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_\n# and extract it to the current directory.\n\n# Data augmentation and normalization for training\n# Just normalization for validation\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\ndata_dir = 'data/hymenoptera_data'\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\nclass_names = image_datasets['train'].classes\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n######################################################################\n# Visualize a few images\n# ^^^^^^^^^^^^^^^^^^^^^^\n# Let's visualize a few training images so as to understand the data\n# augmentations.\n\ndef imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n\n# Get a batch of training data\ninputs, classes = next(iter(dataloaders['train']))\n\n# Make a grid from batch\nout = torchvision.utils.make_grid(inputs)\n\nimshow(out, title=[class_names[x] for x in classes])\n\n\n######################################################################\n# Training the model\n# ------------------\n#\n# Now, let's write a general function to train a model. Here, we will\n# illustrate:\n#\n# - Scheduling the learning rate\n# - Saving the best model\n#\n# In the following, parameter ``scheduler`` is an LR scheduler object from\n# ``torch.optim.lr_scheduler``.\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\n\n######################################################################\n# Visualizing the model predictions\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# Generic function to display predictions for a few images\n#\n\ndef visualize_model(model, num_images=6):\n was_training = model.training\n model.eval()\n images_so_far = 0\n fig = plt.figure()\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(dataloaders['val']):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: {}'.format(class_names[preds[j]]))\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)\n\n######################################################################\n# Finetuning the convnet\n# ----------------------\n#\n# Load a pretrained model and reset final fully connected layer.\n#\n\nmodel_ft = models.resnet18(pretrained=True)\nnum_ftrs = model_ft.fc.in_features\n# Here the size of each output sample is set to 2.\n# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).\nmodel_ft.fc = nn.Linear(num_ftrs, 2)\n\nmodel_ft = model_ft.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n# Observe that all parameters are being optimized\noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n\n# Decay LR by a factor of 0.1 every 7 epochs\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\n######################################################################\n# Train and evaluate\n# ^^^^^^^^^^^^^^^^^^\n#\n# It should take around 15-25 min on CPU. On GPU though, it takes less than a\n# minute.\n#\n\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,\n num_epochs=25)\n\n######################################################################\n#\n\nvisualize_model(model_ft)\n\n\n######################################################################\n# ConvNet as fixed feature extractor\n# ----------------------------------\n#\n# Here, we need to freeze all the network except the final layer. We need\n# to set ``requires_grad == False`` to freeze the parameters so that the\n# gradients are not computed in ``backward()``.\n#\n# You can read more about this in the documentation\n# `here <https://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__.\n#\n\nmodel_conv = torchvision.models.resnet18(pretrained=True)\nfor param in model_conv.parameters():\n param.requires_grad = False\n\n# Parameters of newly constructed modules have requires_grad=True by default\nnum_ftrs = model_conv.fc.in_features\nmodel_conv.fc = nn.Linear(num_ftrs, 2)\n\nmodel_conv = model_conv.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\n# Observe that only parameters of final layer are being optimized as\n# opposed to before.\noptimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)\n\n# Decay LR by a factor of 0.1 every 7 epochs\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n\n\n######################################################################\n# Train and evaluate\n# ^^^^^^^^^^^^^^^^^^\n#\n# On CPU this will take about half the time compared to previous scenario.\n# This is expected as gradients don't need to be computed for most of the\n# network. However, forward does need to be computed.\n#\n\nmodel_conv = train_model(model_conv, criterion, optimizer_conv,\n exp_lr_scheduler, num_epochs=25)\n\n######################################################################\n#\n\nvisualize_model(model_conv)\n\nplt.ioff()\nplt.show()\n"
] |
[
[
"torch.nn.Linear",
"torch.optim.lr_scheduler.StepLR",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.sum",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.subplot",
"numpy.array",
"torch.max",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.clip",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ion",
"torch.no_grad",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ioff",
"torch.set_grad_enabled",
"matplotlib.pyplot.imshow"
]
] |
redherring2141/CarND-Advanced-Lane-Lines
|
[
"cb604cacaca260b03afc216864166d04209c9829"
] |
[
"files_for_submission/classLine.py"
] |
[
"import numpy as np\nfrom processFrame import process_frame\n\n# Define a class to receive the characteristics of each line detection\nclass Line():\n def __init__(self):\n # was the line detected in the last iteration?\n self.detected = False \n # x values of the last n fits of the line\n #self.recent_xfitted = [] \n #average x values of the fitted line over the last n iterations\n self.bestx = None \n #polynomial coefficients averaged over the last n iterations\n self.best_fit = None \n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])] \n #radius of curvature of the line in some units\n self.radius_of_curvature = None \n #distance in meters of vehicle center from the line\n self.line_base_pos = None \n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float') \n #x values for detected line pixels\n self.allx = None \n #y values for detected line pixels\n self.ally = None\n \n #Added\n self.dist_bw_lines = []\n self.l_curr_fitx = []\n self.r_curr_fitx = []\n \n def process_frame(self, image, flag=False):\n return process_frame(self, image, flag)\n"
] |
[
[
"numpy.array"
]
] |
tiefenauer/ip9
|
[
"4d50ee288f8a00f64a6f4a7d80639d3dc89df4e8",
"4d50ee288f8a00f64a6f4a7d80639d3dc89df4e8"
] |
[
"src/util/pipeline_util.py",
"src/create_pc_corpus.py"
] |
[
"\"\"\"\nUtility functions for end-to-end tasks\n\"\"\"\nimport json\nimport os\nfrom os.path import join, basename, exists, pardir, abspath\nfrom shutil import copyfile\n\nimport numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom pattern3.metrics import levenshtein_similarity\n\nfrom constants import ASSETS_DIR\nfrom util.audio_util import frame_to_ms\nfrom util.lm_util import ler_norm\n\n\ndef create_demo_files(target_dir, audio_src_path, transcript, df_alignments, df_stats):\n audio_dst_path = join(target_dir, 'audio.mp3')\n copyfile(audio_src_path, audio_dst_path)\n print(f'saved audio to {audio_dst_path}')\n\n transcript_path = join(target_dir, 'transcript.txt')\n with open(transcript_path, 'w', encoding='utf-8') as f:\n f.write(transcript)\n print(f'saved transcript to {transcript_path}')\n\n json_data = create_alignment_json(df_alignments)\n alignment_json_path = join(target_dir, 'alignment.json')\n with open(alignment_json_path, 'w', encoding='utf-8') as f:\n json.dump(json_data, f, indent=2, ensure_ascii=False)\n print(f'saved alignment information to {alignment_json_path}')\n\n demo_id = basename(target_dir)\n add_demo_to_index(target_dir, demo_id, df_alignments, df_stats)\n create_demo_index(target_dir, demo_id, audio_src_path, transcript, df_stats)\n\n assets_dir = join(ASSETS_DIR, 'demo')\n for file in [file for _, _, files in os.walk(assets_dir) for file in files]:\n copyfile(join(assets_dir, file), join(target_dir, file))\n copyfile(join(ASSETS_DIR, 'start_server.sh'), join(join(target_dir, pardir), 'start_server.sh'))\n\n\ndef create_alignment_json(df_transcripts):\n alignments = [{'id': ix,\n 'transcript': row['transcript'],\n 'text': row['alignment'],\n 'audio_start': row['audio_start'],\n 'audio_end': row['audio_end'],\n 'text_start': row['text_start'],\n 'text_end': row['text_end']\n } for ix, row in df_transcripts.iterrows()]\n return {'alignments': alignments}\n\n\ndef create_demo_index(target_dir, demo_id, audio_src_path, transcript, df_stats):\n template_path = join(ASSETS_DIR, '_template.html')\n soup = BeautifulSoup(open(template_path), 'html.parser')\n soup.title.string = demo_id\n soup.find(id='demo_title').string = f'Forced Alignment for {demo_id}'\n soup.find(id='target').string = transcript\n\n def create_tr(*args):\n tr = soup.new_tag('tr')\n for arg in args:\n td = soup.new_tag('td')\n td.string = str(arg)\n tr.append(td)\n return tr\n\n metrics_table = soup.find(id='metrics')\n metrics_table.append(create_tr('directory', target_dir))\n metrics_table.append(create_tr('audio file', audio_src_path))\n\n for column in df_stats:\n metrics_table.append(create_tr(column, df_stats.loc[0, column]))\n\n demo_index_path = join(target_dir, 'index.html')\n with open(demo_index_path, 'w', encoding='utf-8') as f:\n f.write(soup.prettify())\n\n return demo_index_path\n\n\ndef add_demo_to_index(target_dir, demo_id, df_alignments, df_stats):\n index_path = join(join(target_dir, pardir), 'index.html')\n if not exists(index_path):\n copyfile(join(ASSETS_DIR, '_index_template.html'), index_path)\n\n soup = BeautifulSoup(open(index_path), 'html.parser')\n table = soup.find(id='demo_list')\n\n if not soup.find(id=demo_id):\n tr = soup.new_tag('tr', id=demo_id)\n\n a = soup.new_tag('a', href=demo_id)\n a.string = demo_id\n td = soup.new_tag('td')\n td.append(a)\n tr.append(td)\n\n precision = df_stats.loc[0, 'precision']\n td = soup.new_tag('td')\n td.string = f'{precision:.3f}'\n tr.append(td)\n\n recall = df_stats.loc[0, 'recall']\n td = soup.new_tag('td')\n td.string = f'{recall:.3f}'\n tr.append(td)\n\n f_score = df_stats.loc[0, 'f-score']\n td = soup.new_tag('td')\n td.string = f'{f_score:.3f}'\n tr.append(td)\n\n ler = df_stats.loc[0, 'LER']\n td = soup.new_tag('td')\n td.string = f'{ler:.3f}'\n tr.append(td)\n\n similarity = df_stats.loc[0, 'similarity']\n td = soup.new_tag('td')\n td.string = f'{similarity:.3f}'\n tr.append(td)\n\n avg_alignment_length = int(np.mean([len(al) for al in df_alignments['alignment']]))\n td = soup.new_tag('td')\n td.string = f'{avg_alignment_length}'\n tr.append(td)\n\n n_alignments = df_stats.loc[0, '# alignments']\n td = soup.new_tag('td')\n td.string = f'{n_alignments}'\n tr.append(td)\n\n n_words = df_stats.loc[0, '# words']\n td = soup.new_tag('td')\n td.string = f'{n_words}'\n tr.append(td)\n\n n_characters = df_stats.loc[0, '# characters']\n td = soup.new_tag('td')\n td.string = f'{n_characters}'\n tr.append(td)\n\n table.append(tr)\n\n with open(index_path, 'w') as f:\n f.write(soup.prettify())\n\n\ndef update_index(target_dir, lang, num_aligned, df_keras=None, keras_path=None, df_ds=None, ds_path=None, lm_path=None,\n vocab_path=None):\n index_path = join(target_dir, 'index.html')\n soup = BeautifulSoup(open(index_path), 'html.parser')\n soup.find(id='title').string = f'Forced Alignment Demo ({lang})'\n\n soup.find(id='num_aligned').string = str(num_aligned)\n soup.find(id='keras_path').string = keras_path if keras_path else ''\n soup.find(id='ds_path').string = ds_path if ds_path else ''\n soup.find(id='lm_path').string = lm_path if lm_path else ''\n soup.find(id='vocab_path').string = vocab_path if vocab_path else ''\n\n if df_keras is not None:\n av_p = df_keras['precision'].mean()\n av_r = df_keras['recall'].mean()\n av_f = df_keras['f-score'].mean()\n soup.find(id='precision_keras').string = f'{av_p:.4f}'\n soup.find(id='recall_keras').string = f'{av_r:.4f}'\n soup.find(id='f-score_keras').string = f'{av_f:.4f}'\n\n if df_ds is not None:\n av_p = df_ds['precision'].mean()\n av_r = df_ds['recall'].mean()\n av_f = df_ds['f-score'].mean()\n soup.find(id='precision_ds').string = f'{av_p:.4f}'\n soup.find(id='recall_ds').string = f'{av_r:.4f}'\n soup.find(id='f-score_ds').string = f'{av_f:.4f}'\n\n with open(index_path, 'w') as f:\n f.write(soup.prettify())\n\n\ndef create_alignments_dataframe(voiced_segments, transcripts, sample_rate):\n alignments = []\n for i, (voice_segment, transcript) in enumerate(zip(voiced_segments, transcripts)):\n audio_start = frame_to_ms(voice_segment.start_frame, sample_rate)\n audio_end = frame_to_ms(voice_segment.end_frame, sample_rate)\n alignments.append([transcript, audio_start, audio_end])\n\n df_alignments = pd.DataFrame(alignments, columns=['transcript', 'audio_start', 'audio_end'])\n df_alignments.index.name = 'id'\n return df_alignments\n\n\ndef query_asr_params(args):\n \"\"\"\n Helper function to query ASR model from user if not set in args\n \"\"\"\n keras_path = None\n if not args.keras_path and not args.ds_path:\n args.keras_path = input('Enter path to directory containing Keras model (*.h5) or leave blank to use DS: ')\n if args.keras_path:\n keras_path = abspath(args.keras_path)\n if not exists(keras_path):\n raise ValueError(f'ERROR: Keras model not found at {keras_path}')\n\n ds_path, ds_alpha_path, ds_trie_path = None, None, None\n if not keras_path and not args.ds_path:\n while not args.ds_path:\n args.ds_path = input('Enter path to DeepSpeech model (*.pbmm): ')\n while not args.ds_alpha_path:\n args.ds_alpha_path = input('Enter path to alphabet file (*.txt): ')\n while not args.ds_trie_path:\n args.ds_trie_path = input('Enter path to trie file: ')\n if args.ds_path:\n ds_path = abspath(args.ds_path)\n if not exists(ds_path):\n raise ValueError(f'ERROR: DS model not found at {ds_path}')\n\n if not args.ds_alpha_path:\n raise ValueError('ERROR: alphabet path must be specified when using DeepSpeech model')\n ds_alpha_path = abspath(args.ds_alpha_path)\n if not exists(ds_alpha_path):\n raise ValueError(f'ERROR: alphabet not found at {ds_alpha_path}')\n\n if not args.ds_trie_path:\n raise ValueError('ERROR: trie must be specified when using DeepSpeech model')\n ds_trie_path = abspath(args.ds_trie_path)\n if not exists(ds_trie_path):\n raise ValueError(f'ERROR: Trie not found at {ds_trie_path}')\n\n return keras_path, ds_path, ds_alpha_path, ds_trie_path\n\n\ndef query_lm_params(args):\n if not args.lm_path:\n args.lm_path = input('Enter path to LM to use for spell checking (enter nothing for no spell checking): ')\n if args.lm_path:\n if not exists(abspath(args.lm_path)):\n raise ValueError(f'ERROR: LM not found at {abspath(args.lm_path)}!')\n if not args.vocab_path:\n args.vocab_path = input('Enter path to vocabulary file to use for spell checker: ')\n if args.vocab_path:\n if not exists(abspath(args.vocab_path)):\n raise ValueError(f'ERROR: Vocabulary not found at {abspath(args.vocab_path)}!')\n\n lm_path = abspath(args.lm_path) if args.lm_path else ''\n vocab_path = abspath(args.vocab_path) if args.vocab_path else ''\n return lm_path, vocab_path\n\n\ndef calculate_stats(df_alignments, model_path, transcript):\n partial_transcripts = df_alignments['transcript'].values\n alignments = df_alignments['alignment'].values\n\n # Precision = similarity between transcript and alignment\n p = np.mean([levenshtein_similarity(t, a) for t, a in zip(partial_transcripts, alignments)])\n # Recall = fraction of aligned text\n merged_alignments = ' '.join(a for a in alignments if a)\n r = len(merged_alignments) / len(transcript)\n # F-Score\n f = 2 * p * r / (p + r)\n\n ler_avg = np.mean([ler_norm(gt, al) for gt, al in zip(partial_transcripts, alignments)])\n\n data = [[model_path, len(alignments), len(transcript.split()), len(transcript), p, r, f, ler_avg]]\n columns = ['model path', '# alignments', '# words', '# characters', 'precision', 'recall', 'f-score', 'LER']\n return pd.DataFrame(data, columns=columns)\n",
"# Create ReadyLingua Corpus\nimport argparse\nimport math\nimport os\nimport sys\nfrom collections import Counter\nfrom glob import glob\nfrom os import makedirs\nfrom os.path import exists, join, basename, dirname\nfrom pathlib import Path\nfrom shutil import copyfile\n\nimport pandas as pd\nfrom lxml import etree\nfrom tqdm import tqdm\n\nfrom util.audio_util import resample_frame, resample, crop_segments\nfrom util.corpus_util import find_file_by_suffix\nfrom util.log_util import create_args_str\nfrom util.string_util import normalize, contains_numeric\n\nLANGUAGES = { # mapping from folder names to language code\n 'Deutsch': 'de',\n 'Englisch': 'en',\n 'Französisch': 'fr',\n 'Italienisch': 'it',\n 'Spanisch': 'es'\n}\n\nparser = argparse.ArgumentParser(description=\"\"\"Create PodClub corpus from raw files\"\"\")\nparser.add_argument('-f', '--file', help='Dummy argument for Jupyter Notebook compatibility')\nparser.add_argument('-s', '--source_root', default='/media/daniel/Data/corpus/podclub-raw/PodClubDaten/Deutsch',\n help=f'(optional) source root directory')\nparser.add_argument('-t', '--target_root', default='/media/daniel/IP9/corpora/podclub',\n help=f'(optional) target root directory')\nparser.add_argument('-m', '--max_entries', type=int, default=None,\n help='(optional) maximum number of corpus entries to process. Default=None=\\'all\\'')\nparser.add_argument('-o', '--overwrite', default=False, action='store_true',\n help='(optional) overwrite existing audio data if already present. Default=False)')\nargs = parser.parse_args()\n\n\ndef main():\n print(create_args_str(args))\n print(f'Processing files from {args.source_root} and saving them in {args.target_root}')\n corpus, corpus_file = create_corpus(args.source_root, args.target_root, args.max_entries)\n print(f'Done! Corpus with {len(corpus)} entries saved to {corpus_file}')\n\n\ndef create_corpus(source_dir, target_dir, max_entries=None):\n if not exists(source_dir):\n print(f\"ERROR: Source root {source_dir} does not exist!\")\n exit(0)\n if not exists(target_dir):\n makedirs(target_dir)\n\n df = create_segments(source_dir, target_dir, max_entries)\n index_file = join(target_dir, 'index.csv')\n df.to_csv(index_file)\n\n return df, index_file\n\n\ndef create_segments(source_dir, target_dir, max_entries):\n \"\"\" Iterate through all leaf directories that contain the audio and the alignment files \"\"\"\n print('Collecting files')\n\n directories = [dirname(wav_file) for wav_file in glob(source_dir + '/**/*.wav', recursive=True)]\n\n segments = []\n progress = tqdm(directories, total=min(len(directories), max_entries or math.inf), file=sys.stderr, unit='entries')\n for source_dir in progress:\n progress.set_description(f'{source_dir:{100}}')\n\n audio_file, transcript_file, segmentation_file, index_file = collect_files(source_dir)\n\n if not all(file is not None and len(file.strip()) > 0 for file in\n [audio_file, transcript_file, segmentation_file, index_file]):\n print(f'Skipping directory (not all files found): {source_dir}')\n continue\n\n entry_id, entry_name, lang, rate = collect_corpus_entry_parms(source_dir, index_file, audio_file)\n\n segment_infos = extract_segment_infos(index_file, transcript_file, rate, lang)\n crop_start, crop_end = crop_segments(segment_infos)\n\n wav_file = join(target_dir, entry_id + \".wav\")\n if not exists(wav_file) or args.overwrite:\n resample(audio_file, wav_file, crop_start, crop_end)\n\n # copy unnormalized audio file to target destination\n copyfile(transcript_file, join(target_dir, f'{entry_id}.txt'))\n\n # create segment\n for segment_info in segment_infos:\n subset = 'n/a' # must be set after all segments have been processed\n audio_file = basename(wav_file)\n start_frame = segment_info['start_frame']\n end_frame = segment_info['end_frame']\n transcript = segment_info['transcript']\n duration = (end_frame - start_frame) / 16000\n numeric = contains_numeric(transcript)\n segments.append([entry_id, subset, lang, audio_file, start_frame, end_frame, duration, transcript, numeric])\n\n columns = ['entry_id', 'subset', 'language', 'audio_file', 'start_frame', 'end_frame', 'duration', 'transcript',\n 'numeric']\n df = pd.DataFrame(segments, columns=columns)\n\n \"\"\"\n because ReadyLingua data is not pre-partitioned into train-/dev-/test-data this needs to be done after all\n corpus entries and segments are known\n \"\"\"\n total_audio = df.groupby('language')['duration'].sum().to_dict()\n audio_per_language = Counter()\n for (id, lang), df_entry in df.groupby(['entry_id', 'language']):\n if audio_per_language[lang] > 0.9 * total_audio[lang]:\n subset = 'test'\n elif audio_per_language[lang] > 0.8 * total_audio[lang]:\n subset = 'dev'\n else:\n subset = 'train'\n df.loc[df['entry_id'] == id, 'subset'] = subset\n audio_per_language[lang] += df_entry['duration'].sum()\n\n return df\n\n\ndef collect_files(source_dir):\n audio_file, transcript_file, segmentation_file, index_file = scan_content_dir(source_dir)\n\n # check if files are set\n if not audio_file:\n print('WARNING: audio file is not set')\n return None, None, None, None\n if not transcript_file:\n print('WARNING: transcript file is not set')\n return None, None, None, None\n if not segmentation_file:\n print('WARNING: segmentation file is not set')\n return None, None, None, None\n if not index_file:\n print('WARNING: index file is not set')\n return None, None, None, None\n\n audio_file = join(source_dir, audio_file)\n transcript_file = join(source_dir, transcript_file)\n segmentation_file = join(source_dir, segmentation_file)\n index_file = join(source_dir, index_file)\n\n # check if files exist\n if not exists(audio_file):\n print(f'WARNING: file {audio_file} does not exist')\n return None, None, None, None\n if not exists(transcript_file):\n print(f'WARNING: file {transcript_file} does not exist')\n return None, None, None, None\n if not exists(segmentation_file):\n print(f'WARNING: file {segmentation_file} does not exist')\n return None, None, None, None\n if not exists(index_file):\n print(f'WARNING: file {index_file} does not exist')\n return None, None, None, None\n\n return audio_file, transcript_file, segmentation_file, index_file\n\n\ndef parse_project_file(project_file):\n doc = etree.parse(project_file)\n for element in ['AudioFiles/Name', 'TextFiles/Name', 'SegmentationFiles/Name', 'IndexFiles/Name']:\n if doc.find(element) is None:\n print(f'Invalid project file (missing element \\'{element}\\'): {project_file}')\n return None, None, None, None\n\n audio_file = doc.find('AudioFiles/Name').text\n transcript_file = doc.find('TextFiles/Name').text\n segmentation_file = doc.find('SegmentationFiles/Name').text\n index_file = doc.find('IndexFiles/Name').text\n return audio_file, transcript_file, segmentation_file, index_file\n\n\ndef scan_content_dir(content_dir):\n audio_file = find_file_by_suffix(content_dir, '.wav')\n text_file = find_file_by_suffix(content_dir, '.txt')\n segmentation_file = find_file_by_suffix(content_dir, ' - Segmentation.xml')\n index_file = find_file_by_suffix(content_dir, ' - Index.xml')\n return audio_file, text_file, segmentation_file, index_file\n\n\ndef collect_corpus_entry_parms(directory, index_file, audio_file):\n entry_name = basename(directory)\n entry_id = entry_name\n\n # find language\n lang = [folder for folder in directory.split(os.sep) if folder in LANGUAGES.keys()]\n language = LANGUAGES[lang[0]] if lang else 'unknown'\n\n # find sampling rate\n doc = etree.parse(index_file)\n rate = int(doc.find('SamplingRate').text)\n\n return entry_id, entry_name, language, rate\n\n\ndef extract_segment_infos(index_file, transcript_file, src_rate, language):\n # segmentation = collect_segmentation(segmentation_file)\n speeches = collect_speeches(index_file)\n transcript = Path(transcript_file).read_text(encoding='utf-8')\n\n # merge information from index file (speech parts) with segmentation information\n segment_infos = []\n for speech_meta in speeches:\n start_text = speech_meta['start_text']\n end_text = speech_meta['end_text'] + 1 # komische Indizierung\n speech_transcript = normalize(transcript[start_text:end_text], language)\n if len(speech_transcript.strip()) == 0:\n continue\n\n segment_infos.append({\n 'start_frame': resample_frame(speech_meta['start_frame'], src_rate=src_rate),\n 'end_frame': resample_frame(speech_meta['end_frame'], src_rate=src_rate),\n 'transcript': speech_transcript\n })\n\n return segment_infos\n\n\ndef collect_segmentation(segmentation_file):\n segments = []\n doc = etree.parse(segmentation_file)\n for element in doc.findall('Segments/Segment'):\n start_frame = int(element.attrib['start'])\n end_frame = int(element.attrib['end'])\n segment = {'class': element.attrib['class'], 'start_frame': start_frame, 'end_frame': end_frame}\n segments.append(segment)\n\n return sorted(segments, key=lambda s: s['start_frame'])\n\n\ndef collect_speeches(index_file):\n speeches = []\n doc = etree.parse(index_file)\n for element in doc.findall('TextAudioIndex'):\n start_text = int(element.find('TextStartPos').text)\n end_text = int(element.find('TextEndPos').text)\n start_frame = int(element.find('AudioStartPos').text)\n end_frame = int(element.find('AudioEndPos').text)\n\n speech = {'start_frame': start_frame, 'end_frame': end_frame, 'start_text': start_text,\n 'end_text': end_text}\n speeches.append(speech)\n return sorted(speeches, key=lambda s: s['start_frame'])\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
]
] |
rbeucher/pyLineSPM
|
[
"07ab561f638cae0caccd4f27c74b03f1f1364202"
] |
[
"tests/test_unit_test.py"
] |
[
"import numpy as np\n\nsurface = np.zeros((100, 2))\nsurface[:, 0] = np.linspace(0, 8*np.pi, 100)\nsurface[:, 1] = np.cos(surface[:, 0])\n\nprecip_rate = np.ones(surface[:,0].shape)\nerodibility = np.ones(surface[:,0].shape) * 0.1\nuplift_rate = np.ones(surface[:,0].shape) * 0.01\n\ndef test_import():\n import pyLineSPM as spm\n\ndef test_spm_class():\n from pyLineSPM import WillettSPM\n spm = WillettSPM(surface, precip_rate, erodibility, uplift_rate)\n assert(len(spm.rivers) == 8)\n assert(np.allclose(surface, spm.surface))\n for river in spm.rivers:\n assert( 1.0 + river.surface[:,1].min() < 1e-2)\n assert( 1.0 - river.surface[:,1].max() < 1e-2)\n\n # Test get discharge\n discharge = spm.get_discharge()"
] |
[
[
"numpy.zeros",
"numpy.ones",
"numpy.allclose",
"numpy.cos",
"numpy.linspace"
]
] |
salvacarrion/yolo4math
|
[
"57102831bbad7e70f213052664202449d41a1294",
"57102831bbad7e70f213052664202449d41a1294"
] |
[
"preprocessing/kmeans.py",
"preprocessing/check_bboxes.py"
] |
[
"import numpy as np\nfrom utils.utils import *\n\n\nclass YOLO_Kmeans:\n\n def __init__(self, cluster_number):\n self.cluster_number = cluster_number\n\n def iou(self, boxes, clusters): # 1 box -> k clusters\n n = boxes.shape[0]\n k = self.cluster_number\n\n box_area = boxes[:, 0] * boxes[:, 1]\n box_area = box_area.repeat(k)\n box_area = np.reshape(box_area, (n, k))\n\n cluster_area = clusters[:, 0] * clusters[:, 1]\n cluster_area = np.tile(cluster_area, [1, n])\n cluster_area = np.reshape(cluster_area, (n, k))\n\n box_w_matrix = np.reshape(boxes[:, 0].repeat(k), (n, k))\n cluster_w_matrix = np.reshape(np.tile(clusters[:, 0], (1, n)), (n, k))\n min_w_matrix = np.minimum(cluster_w_matrix, box_w_matrix)\n\n box_h_matrix = np.reshape(boxes[:, 1].repeat(k), (n, k))\n cluster_h_matrix = np.reshape(np.tile(clusters[:, 1], (1, n)), (n, k))\n min_h_matrix = np.minimum(cluster_h_matrix, box_h_matrix)\n inter_area = np.multiply(min_w_matrix, min_h_matrix)\n\n result = inter_area / (box_area + cluster_area - inter_area)\n return result\n\n def avg_iou(self, boxes, clusters):\n iou = self.iou(boxes, clusters)\n accuracy = np.mean([np.max(iou, axis=1)])\n return accuracy\n\n def kmeans(self, boxes, k, dist=np.median):\n box_number = boxes.shape[0]\n distances = np.empty((box_number, k))\n last_nearest = np.zeros((box_number,))\n np.random.seed()\n clusters = boxes[np.random.choice(\n box_number, k, replace=False)] # init k clusters\n while True:\n\n distances = 1 - self.iou(boxes, clusters)\n\n current_nearest = np.argmin(distances, axis=1)\n if (last_nearest == current_nearest).all():\n break # clusters won't change\n for cluster in range(k):\n clusters[cluster] = dist( # update clusters\n boxes[current_nearest == cluster], axis=0)\n\n last_nearest = current_nearest\n\n return clusters\n\n def result2txt(self, data, avg_iou, filename):\n f = open(filename, 'w')\n row = np.shape(data)[0]\n for i in range(row):\n if i == 0:\n x_y = \"%d,%d\" % (data[i][0], data[i][1])\n else:\n x_y = \"\\n%d,%d\" % (data[i][0], data[i][1])\n f.write(x_y)\n\n f.write('\\n\\n--------------------')\n f.write('\\nk={}'.format(self.cluster_number))\n f.write('\\navg_iou={}'.format(avg_iou))\n f.close()\n\n def txt2boxes(self, filename):\n f = open(filename, 'r')\n dataSet = []\n for line in f:\n x1, y1, x2, y2 = [int(x) for x in line.strip().split(',')]\n dataSet.append([int(x2-x1), int(y2-y1)])\n result = np.array(dataSet)\n f.close()\n return result\n\n def txt2clusters(self):\n all_boxes = self.txt2boxes()\n result = self.kmeans(all_boxes, k=self.cluster_number)\n result = result[np.lexsort(result.T[0, None])]\n avg_iou = self.avg_iou(all_boxes, result)\n print(\"K anchors:\\n {}\".format(result))\n print(\"Accuracy: {:.2f}%\".format(avg_iou * 100))\n self.result2txt(result, avg_iou)\n return avg_iou\n\n def get_clusters(self, box_sizes):\n all_boxes = np.array(box_sizes)\n result = self.kmeans(all_boxes, k=self.cluster_number)\n result = result[np.lexsort(result.T[0, None])]\n avg_iou = self.avg_iou(all_boxes, result)\n print(\"K anchors:\\n {}\".format(result))\n print(\"Accuracy: {:.2f}%\".format(avg_iou * 100))\n return result, avg_iou\n\n\ndef get_boxes(json):\n # Get sizes\n box_sizes = []\n for k, v in json['annotations'].items():\n for annotations in v:\n x, y, w, h = annotations['bbox']\n box_sizes.append([w, h])\n return box_sizes\n\n\nif __name__ == \"__main__\":\n # Settings\n WIDTH, HEIGHT = (1280, 1280)\n K = 10\n subfolder = str(WIDTH)\n save_path = 'anchors/' + subfolder\n load_path = '/home/salvacarrion/Documents/datasets/equations/' + subfolder\n\n # Get box sizes\n JSON_DATASET = load_dataset(load_path + '/train.json')\n box_sizes = get_boxes(JSON_DATASET)\n\n # Make dir if it doesn't exist\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n cluster_ious = {}\n for i in range(1, K+1):\n print('Clustering with k={}...'.format(i))\n cluster_number = i\n kmeans = YOLO_Kmeans(cluster_number)\n\n # Compute clusters\n result, avg_iou = kmeans.get_clusters(box_sizes)\n cluster_ious[i] = avg_iou\n\n # Save file\n kmeans.result2txt(result, avg_iou, save_path + \"/anchors_c{}.txt\".format(kmeans.cluster_number))\n\n # Save json\n with open(save_path + \"/cluster_ious.json\".format(subfolder), 'w') as f:\n json.dump(cluster_ious, f)\n\n\n",
"import time\nimport torch\nimport numpy as np\nimport json\nimport cv2\nfrom utils.utils import *\nimport random\nfrom collections import defaultdict\n\n\ndef main():\n WIDTH, HEIGHT = (1024, 1024)\n COLORS = np.array([[200, 0, 0, 255], [0, 0, 200, 255]])\n\n load_path = '../datasets/equations/resized/{}x{}-padded'.format(WIDTH, HEIGHT)\n json_dataset = load_dataset(load_path + '/train.json')\n images = json_dataset['images']\n annotations = json_dataset['annotations']\n class_names = json_dataset['categories']\n\n print('Loading images from: {}'.format(load_path))\n print('-----------------------------\\n')\n\n # Get data\n for i, image_data in enumerate(images, 1):\n image_id = str(image_data['id'])\n bboxes = annotations[image_id] # not boxes\n\n # Build paths\n filename = load_path + '/' + image_data['filename']\n\n print(\"Loading image #{} ({}/{})...\".format(image_id, i, len(images)))\n #image = Image.open(filename)\n image = cv2.imread(filename)\n\n # print(\"\\t- Performing bbox augmentation...\")\n # bboxes = augment_bboxes(bboxes)\n #\n # print(\"\\t- Performing non-maximum supression...\")\n # bboxes, total_boxes1, total_boxes2 = non_maximum_supression(bboxes)\n # print(\"\\t\\t- Boxes supressed: {} ({:.2f}%)\".format(total_boxes1 - total_boxes2, (total_boxes1 - total_boxes2) / total_boxes1 * 100))\n\n # Draw bounding boxes\n print(\"\\t- Drawing bboxes...\")\n image = Image.fromarray(image)\n for annotation in bboxes:\n cat_id = str(annotation['category_id'])\n text = class_names[cat_id]\n draw_bbox(image, annotation['bbox'], thickness=2)\n draw_labels(image, annotation['bbox'], text, cat_id, font_size=12)\n\n # Show image\n f, ax = plt.subplots(1, 1, figsize=(4, 4))\n ax.imshow(image)\n f.tight_layout()\n #f.savefig('output/image_{}.eps'.format(i)) # Doesn't work properly\n plt.show()\n\n # Finish loop\n if i == 10:\n break\n\n\nif __name__ == \"__main__\":\n pass\n main()\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.empty",
"numpy.reshape",
"numpy.zeros",
"numpy.minimum",
"numpy.random.seed",
"numpy.argmin",
"numpy.random.choice",
"numpy.lexsort",
"numpy.tile",
"numpy.shape",
"numpy.multiply"
],
[
"numpy.array"
]
] |
piushvaish/dissecting-reinforcement-learning
|
[
"91c5d00d15b99b8d79a3c2e38fbc443856779273"
] |
[
"src/4/actor_critic.py"
] |
[
"#!/usr/bin/env python\n\n#MIT License\n#Copyright (c) 2017 Massimiliano Patacchiola\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\n#In this example I will use the class gridworld to generate a 3x4 world\n#in which the cleaning robot will move. Using the Actor-Critic algorithm I\n#will estimate the utility values of each state and the state-action matrix.\n\nimport numpy as np\nfrom gridworld import GridWorld\n\ndef softmax(x):\n '''Compute softmax values of array x.\n\n @param x the input array\n @return the softmax array\n '''\n return np.exp(x - np.max(x)) / np.sum(np.exp(x - np.max(x)))\n\ndef update_critic(utility_matrix, observation, new_observation, \n reward, alpha, gamma, done):\n '''Return the updated utility matrix\n\n @param utility_matrix the matrix before the update\n @param observation the state obsrved at t\n @param new_observation the state observed at t+1\n @param reward the reward observed after the action\n @param alpha the step size (learning rate)\n @param gamma the discount factor\n @return the updated utility matrix\n @return the estimation error delta\n '''\n u = utility_matrix[observation[0], observation[1]]\n u_t1 = utility_matrix[new_observation[0], new_observation[1]]\n delta = reward + ((gamma * u_t1) - u)\n utility_matrix[observation[0], observation[1]] += alpha * delta\n return utility_matrix, delta\n\ndef update_actor(state_action_matrix, observation, action, delta, beta_matrix=None):\n '''Return the updated state-action matrix\n\n @param state_action_matrix the matrix before the update\n @param observation the state obsrved at t\n @param action taken at time t\n @param delta the estimation error returned by the critic\n @param beta_matrix a visit counter for each state-action pair\n @return the updated matrix\n '''\n col = observation[1] + (observation[0]*4)\n if beta_matrix is None: beta = 1\n else: beta = 1 / beta_matrix[action,col]\n state_action_matrix[action, col] += beta * delta\n return state_action_matrix \n\ndef main():\n\n env = GridWorld(3, 4)\n\n #Define the state matrix\n state_matrix = np.zeros((3,4))\n state_matrix[0, 3] = 1\n state_matrix[1, 3] = 1\n state_matrix[1, 1] = -1\n print(\"State Matrix:\")\n print(state_matrix)\n\n #Define the reward matrix\n reward_matrix = np.full((3,4), -0.04)\n reward_matrix[0, 3] = 1\n reward_matrix[1, 3] = -1\n print(\"Reward Matrix:\")\n print(reward_matrix)\n\n #Define the transition matrix\n transition_matrix = np.array([[0.8, 0.1, 0.0, 0.1],\n [0.1, 0.8, 0.1, 0.0],\n [0.0, 0.1, 0.8, 0.1],\n [0.1, 0.0, 0.1, 0.8]])\n\n state_action_matrix = np.random.random((4,12))\n print(\"State-Action Matrix:\")\n print(state_action_matrix)\n\n env.setStateMatrix(state_matrix)\n env.setRewardMatrix(reward_matrix)\n env.setTransitionMatrix(transition_matrix)\n\n utility_matrix = np.zeros((3,4))\n print(\"Utility Matrix:\")\n print(utility_matrix)\n\n gamma = 0.999\n alpha = 0.001 #constant step size\n beta_matrix = np.zeros((4,12))\n tot_epoch = 300000\n print_epoch = 1000\n\n for epoch in range(tot_epoch):\n #Reset and return the first observation\n observation = env.reset(exploring_starts=True)\n for step in range(1000):\n #Estimating the action through Softmax\n col = observation[1] + (observation[0]*4)\n action_array = state_action_matrix[:, col]\n action_distribution = softmax(action_array)\n action = np.random.choice(4, 1, p=action_distribution)\n #To enable the beta parameter, enable the libe below\n #and add beta_matrix=beta_matrix in the update actor function\n #beta_matrix[action,col] += 1 #increment the counter\n #Move one step in the environment and get obs and reward\n new_observation, reward, done = env.step(action)\n utility_matrix, delta = update_critic(utility_matrix, observation, \n new_observation, reward, alpha, gamma, done)\n state_action_matrix = update_actor(state_action_matrix, observation, \n action, delta, beta_matrix=None)\n observation = new_observation\n if done: break\n \n\n if(epoch % print_epoch == 0):\n print(\"\")\n print(\"Utility matrix after \" + str(epoch+1) + \" iterations:\") \n print(utility_matrix)\n print(\"\")\n print(\"State-Action matrix after \" + str(epoch+1) + \" iterations:\") \n print(state_action_matrix)\n #Time to check the utility matrix obtained\n print(\"Utility matrix after \" + str(tot_epoch) + \" iterations:\")\n print(utility_matrix)\n print(\"State-Action matrix after \" + str(tot_epoch) + \" iterations:\")\n print(state_action_matrix)\n\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.max",
"numpy.full",
"numpy.array",
"numpy.random.choice",
"numpy.zeros",
"numpy.random.random"
]
] |
Jacfger/simple-stuffs
|
[
"5596a03ec7a42a2f32b695ed73afb8c6a3cce030"
] |
[
"utils/graph.py"
] |
[
"import os\nimport pickle\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport collections\nimport numpy as np\nimport sys\nsys.path.append(\"/home/zwanggc/FirstOrderQueryEstimation\")\nfrom fol import beta_query_v2, parse_formula, beta_query\nfrom data_helper import all_normal_form\n\n\nbeta_step = [15000 * i for i in range(1, 21)] + [360000, 420000, 450000]\nbeta_valid_step = [15000 * i for i in range(1, 21)] + [360000, 420000]\n\nstep_dict = {i: beta_step[i] for i in range(len(beta_step))}\ninverse_step_dict = {beta_step[i]: i for i in range(len(beta_step))}\n\nall_metrics = ['MRR', 'HITS1', 'HITS3', 'HITS10', 'retrieval_accuracy']\nmodel_supportform_dict = {\n 'Beta': ['DeMorgan', 'DeMorgan+MultiI', 'DNF+MultiIU'],\n 'Logic': ['DeMorgan', 'DeMorgan+MultiI', 'DNF+MultiIU'],\n 'NewLook': ['DNF+MultiIUd', 'DNF+MultiIUD']\n}\nmodel_compareform_dict = {\n 'Beta': ['original', 'DeMorgan', 'DeMorgan+MultiI', 'DNF', 'DNF+MultiIU'],\n 'Logic': ['original', 'DeMorgan', 'DeMorgan+MultiI', 'DNF', 'DNF+MultiIU'],\n 'NewLook': ['diff', 'DNF+diff', 'DNF+MultiIUd', 'DNF+MultiIUD']\n}\n\ndef print_loss(path):\n data_file = os.path.join(path, 'train.csv')\n df = pd.read_csv(data_file)\n loss = np.asarray(df['loss'])\n step = np.asarray(df['step'])\n loss = np.log(loss)\n plt.plot(step, loss)\n plt.ylabel('loss')\n plt.xlabel('step')\n plt.show()\n\n\ndef compare_loss(path, path2, choose_len=None):\n data_file = os.path.join(path, 'train.csv')\n data_2 = os.path.join(path2, 'beta_train.csv')\n df, df2 = pd.read_csv(data_file), pd.read_csv(data_2)\n loss, loss2 = np.asarray(df['loss']), np.asarray(df2['loss'])\n step = np.asarray(df['step'])\n minlen = min(len(loss), len(loss2))\n if choose_len:\n loss = loss[:choose_len]\n loss2 = loss2[:choose_len]\n step = step[:choose_len]\n if len(loss) > minlen:\n loss = loss[:minlen]\n else:\n loss2 = loss2[:minlen]\n\n compare = np.log(loss) - np.log(loss2)\n plt.plot(step, compare)\n plt.plot(step, np.zeros_like(compare), color='r')\n plt.ylabel('loss')\n plt.xlabel('step')\n plt.show()\n\n\ndef log_all_metrics(path, step, mode, log_meta_formula=beta_query_v2.values()):\n log = collections.defaultdict(lambda: collections.defaultdict(float))\n\n for meta_formula in log_meta_formula:\n # if meta_formula != 'p(e)|p(e)' and meta_formula != 'p(p(e)|p(e))':\n foq_instance = parse_formula(meta_formula)\n foq_formula = foq_instance.formula\n data_file = os.path.join(path, f'eval_{mode}_{foq_formula}.csv')\n df = pd.read_csv(data_file)\n step_range = np.asarray(df['step'])\n step_index = np.where(step_range == step)[0]\n for metric in df.columns:\n if metric != 'step':\n log[metric][foq_formula] = df[metric][step_index].values[0]\n averaged_metric = {}\n averaged_my_formula = [parse_formula(formula).formula for formula in log_meta_formula]\n for metric in log:\n averaged_metric[metric] = \\\n sum([log[metric][foq_formula] for foq_formula in averaged_my_formula]) / len(averaged_my_formula)\n all_data = pd.DataFrame.from_dict(log)\n all_data.to_csv(os.path.join(path, f'eval_{mode}_{step}_average.csv'))\n print(all_data)\n print(averaged_metric)\n\n'''\ndef log_old_metrics(path, step, mode, log_meta_formula=beta_query.values()):\n log = collections.defaultdict(lambda: collections.defaultdict(float))\n\n for meta_formula in log_meta_formula:\n # if meta_formula != 'p(e)|p(e)' and meta_formula != 'p(p(e)|p(e))':\n foq_instance = parse_foq_formula(meta_formula)\n foq_formula = foq_instance.meta_formula\n data_file = os.path.join(path, f'eval_{mode}_{foq_formula}.csv')\n df = pd.read_csv(data_file)\n step_range = np.asarray(df['step'])\n step_index = np.where(step_range == step)[0]\n for metric in df.columns:\n if metric != 'step':\n log[metric][foq_formula] = df[metric][step_index].values[0]\n averaged_metric = {}\n averaged_my_formula = [parse_foq_formula(formula).meta_formula for formula in log_meta_formula]\n for metric in log:\n averaged_metric[metric] = \\\n sum([log[metric][foq_formula] for foq_formula in averaged_my_formula]) / len(averaged_my_formula)\n all_data = pd.DataFrame.from_dict(log)\n all_data.to_csv(os.path.join(path, f'eval_{mode}_{step}_average.csv'))\n print(all_data)\n print(averaged_metric)\n'''\n\ndef read_beta_log(path, mode='test', chosen_step=None, averaged_meta_formula=beta_query_v2.values()):\n train_log = collections.defaultdict(lambda: collections.defaultdict(float))\n valid_log = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(float)))\n test_log = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(float)))\n beta_valid = collections.defaultdict(lambda: collections.defaultdict(list))\n beta_test = collections.defaultdict(lambda: collections.defaultdict(list))\n beta_log_path = os.path.join(path, 'train.log')\n with open(beta_log_path, 'r') as f:\n for line in f.readlines():\n if line[29:50] == 'Training average loss':\n info = line[58:]\n step, score = info.split(':')\n step, score = eval(step), eval(score)\n train_log['loss'][step] = score\n elif line[29:54] == 'Training average positive':\n info = line[75:]\n step, score = info.split(':')\n step, score = eval(step), eval(score)\n train_log['positive_loss'][step] = score\n elif line[29:54] == 'Training average negative':\n info = line[75:]\n step, score = info.split(':')\n step, score = eval(step), eval(score)\n train_log['negative_loss'][step] = score\n elif line[29:35] == 'Valid ':\n info = line[35:].split(' ')\n beta_name, metric, step, score = info[0], info[1], eval(info[4][:-1]), eval(info[5])\n if beta_name in beta_query_v2:\n foq_instance = parse_formula(beta_query_v2[beta_name])\n foq_formula = foq_instance.formula\n valid_log[step][metric][foq_formula] = score\n beta_valid[foq_formula][metric].append(score)\n elif line[29:34] == 'Test ' and line[34:38] != 'info':\n info = line[34:].split(' ')\n beta_name, metric, step, score = info[0], info[1], eval(info[4][:-1]), eval(info[5])\n if beta_name in beta_query_v2:\n foq_instance = parse_formula(beta_query_v2[beta_name])\n foq_formula = foq_instance.formula\n test_log[step][metric][foq_formula] = score\n beta_test[foq_formula][metric].append(score)\n train_data = pd.DataFrame.from_dict(train_log)\n train_data.to_csv(os.path.join(path, 'beta_train.csv'))\n # print(pd.DataFrame.from_dict(valid_log[chosen_step]))\n for step in eval(f'{mode}_log'):\n valid_data = pd.DataFrame.from_dict(valid_log[step])\n valid_data.to_csv(os.path.join(path, f'beta_valid_{step}.csv'))\n test_data = pd.DataFrame.from_dict(test_log[step])\n test_data.to_csv(os.path.join(path, f'beta_test_{step}.csv'))\n if chosen_step is not None:\n print(pd.DataFrame.from_dict(test_log[chosen_step]))\n else:\n print(test_data)\n averaged_metric = {}\n averaged_my_formula = [parse_formula(formula).formula for formula in averaged_meta_formula]\n for metric in test_log[15000]:\n if chosen_step is not None:\n averaged_metric[metric] = sum([test_log[chosen_step][metric][foq_formula]\n for foq_formula in averaged_my_formula]) / len(averaged_meta_formula)\n print(averaged_metric)\n return train_log, beta_valid, beta_test\n\n\ndef read_logic_log(path, mode='test', chosen_step=None, averaged_meta_formula=beta_query_v2.values()):\n train_log = collections.defaultdict(lambda: collections.defaultdict(float))\n valid_log = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(float)))\n test_log = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(float)))\n logic_valid = collections.defaultdict(lambda: collections.defaultdict(list))\n logic_test = collections.defaultdict(lambda: collections.defaultdict(list))\n beta_log_path = os.path.join(path, 'train.log')\n with open(beta_log_path, 'r') as f:\n for line in f.readlines():\n if line[29:50] == 'Training average loss':\n info = line[58:]\n step, score = info.split(':')\n step, score = eval(step), eval(score)\n train_log['loss'][step] = score\n elif line[29:61] == 'Training average positive_sample':\n info = line[75:]\n step, score = info.split(':')\n step, score = eval(step), eval(score)\n train_log['positive_loss'][step] = score\n elif line[29:61] == 'Training average negative_sample':\n info = line[75:]\n step, score = info.split(':')\n step, score = eval(step), eval(score)\n train_log['negative_loss'][step] = score\n elif line[29:35] == 'Valid ':\n info = line[35:].split(' ')\n beta_name, metric, step, score = info[0], info[1], eval(info[4][:-1]), eval(info[5])\n if beta_name in beta_query_v2:\n foq_instance = parse_formula(beta_query_v2[beta_name])\n foq_formula = foq_instance.formula\n valid_log[step][metric][foq_formula] = score\n logic_valid[foq_formula][metric].append(score)\n elif line[29:34] == 'Test ' and line[34:38] != 'info':\n info = line[34:].split(' ')\n beta_name, metric, step, score = info[0], info[1], eval(info[4][:-1]), eval(info[5])\n if beta_name in beta_query_v2:\n foq_instance = parse_formula(beta_query_v2[beta_name])\n foq_formula = foq_instance.formula\n test_log[step][metric][foq_formula] = score\n logic_test[foq_formula][metric].append(score)\n train_data = pd.DataFrame.from_dict(train_log)\n train_data.to_csv(os.path.join(path, 'beta_train.csv'))\n # print(pd.DataFrame.from_dict(valid_log[chosen_step]))\n for step in eval(f'{mode}_log'):\n valid_data = pd.DataFrame.from_dict(valid_log[step])\n valid_data.to_csv(os.path.join(path, f'logic_valid_{step}.csv'))\n test_data = pd.DataFrame.from_dict(test_log[step])\n test_data.to_csv(os.path.join(path, f'logic_test_{step}.csv'))\n if chosen_step is not None:\n print(pd.DataFrame.from_dict(test_log[chosen_step]))\n else:\n print(test_data)\n averaged_metric = {}\n averaged_my_formula = [parse_formula(formula).formula for formula in averaged_meta_formula]\n for metric in test_log[15000]:\n if chosen_step is not None:\n averaged_metric[metric] = sum([test_log[chosen_step][metric][foq_formula]\n for foq_formula in averaged_my_formula]) / len(averaged_meta_formula)\n print(averaged_metric)\n return train_log, logic_valid, logic_test\n\n\ndef plot_comparison(beta_log, my_log, all_formula):\n # metric in ['MRR', 'HITS1', 'HITS3', 'HITS10']:\n for metric in ['MRR']:\n for meta_formula in all_formula:\n foq_instance = parse_formula(beta_query_v2[meta_formula])\n foq_formula = foq_instance.formula\n beta_score = np.asarray(beta_log[foq_formula][metric])\n my_score = np.asarray(my_log[foq_formula][metric])\n n = len(my_score)\n beta_plot_step = np.asarray(beta_step)[:n]\n plt.plot(beta_plot_step, beta_score[:n], color='red', label=f'{meta_formula}_beta')\n plt.plot(beta_plot_step, my_score, linestyle=':', color='blue', label=f'{meta_formula}_ours')\n plt.title(all_formula)\n plt.legend()\n plt.show()\n\n\ndef comparison(path, all_meta_formula):\n our_train = pd.read_csv(os.path.join(path, 'train.csv'))\n my_valid = collections.defaultdict(lambda: collections.defaultdict(list))\n my_test = collections.defaultdict(lambda: collections.defaultdict(list))\n beta_train, beta_valid, beta_test = read_beta_log(path)\n for mode in ['valid', 'test']:\n for meta_formula in all_meta_formula:\n foq_instance = parse_formula(beta_query_v2[meta_formula])\n foq_formula = foq_instance.formula\n df = pd.read_csv(os.path.join(path, f'eval_{mode}_{foq_formula}.csv'))\n for metric in df.columns:\n if metric != 'step' and metric != 'num_queries':\n for i in range(len(df[metric])):\n eval(f'my_{mode}')[foq_formula][metric].append(df[metric][i])\n # plot_comparison(eval(f'beta_{mode}'), eval(f'my_{mode}'), ['1p', '2p', '3p'], mode)\n # plot_comparison(eval(f'beta_{mode}'), eval(f'my_{mode}'), ['2i', '3i'], mode)\n plot_comparison(eval(f'beta_{mode}'), eval(f'my_{mode}'), ['1p', '2p', '2i'])\n plot_comparison(eval(f'beta_{mode}'), eval(f'my_{mode}'), ['3p', '3i'])\n\n\ndef log_benchmark(folder_path, id_list, percentage=False):\n all_log = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(float)))\n for task_id in id_list:\n id_str = str(task_id)\n id_str = '0' * (4 - len(id_str)) + id_str\n # real_index = all_formula.loc[all_formula['formula_id'] == f'type{id_str}'].index[0]\n if os.path.exists(os.path.join(folder_path, f'eval_type{id_str}.csv')):\n single_log = pd.read_csv(os.path.join(folder_path, f'eval_type{id_str}.csv'))\n index2metrics = single_log['Unnamed: 0']\n for normal_form in single_log.columns:\n if normal_form != 'Unnamed: 0':\n for index in range(len(single_log[normal_form])):\n if percentage and index2metrics[index] != 'num_queries':\n all_log[index2metrics[index]][normal_form][task_id] = single_log[normal_form][index] * 100\n else:\n all_log[index2metrics[index]][normal_form][task_id] = single_log[normal_form][index]\n for metric in all_log:\n data_metric = pd.DataFrame.from_dict(all_log[metric])\n data_metric.to_csv(os.path.join(folder_path, f'all_formula_{metric}.csv'))\n return all_log\n\n\ndef normal_form_comparison(folder_path, form1, form2, metrics, save_csv=False, percentage=False):\n all_formula = pd.read_csv('data/generated_formula_anchor_node=3.csv')\n unequal_task = set()\n form1_log, form2_log = collections.defaultdict(lambda: collections.defaultdict(float)), \\\n collections.defaultdict(lambda: collections.defaultdict(float))\n comparison_log = collections.defaultdict(list)\n for metric in metrics:\n metric_logging = pd.read_csv(os.path.join(folder_path, f'all_formula_{metric}.csv'))\n index2taskid = metric_logging['Unnamed: 0']\n for index in range(len(index2taskid)):\n taskid = index2taskid[index]\n id_str = '0' * (4 - len(str(taskid))) + str(taskid)\n formula_index = all_formula.loc[all_formula['formula_id'] == f'type{id_str}'].index[0]\n formula1, formula2 = all_formula[form1][formula_index], all_formula[form2][formula_index]\n score1, score2 = metric_logging[form1][index], metric_logging[form2][index]\n if formula1 != formula2 and str(score1) != 'nan' and str(score2) != 'nan':\n # what if two scores are same\n if taskid not in unequal_task:\n assert metric == metrics[0]\n unequal_task.add(taskid)\n form1_log[metric][taskid], form2_log[metric][taskid] = score1, score2\n if len(unequal_task) > 0:\n for metric in metrics:\n averaged1, averaged2 = sum(form1_log[metric][taskid] for taskid in form1_log[metric]) / \\\n len(form1_log[metric]), \\\n sum(form2_log[metric][taskid] for taskid in form2_log[metric]) / \\\n len(form2_log[metric])\n comparison_log[metric] = [averaged1, averaged2]\n else:\n for metric in metrics:\n comparison_log[metric] = [0, 0]\n form1_win_rate = sum(form1_log['MRR'][taskid] > form2_log['MRR'][taskid] for taskid in unequal_task)\n form2_win_rate = sum(form1_log['MRR'][taskid] < form2_log['MRR'][taskid] for taskid in unequal_task)\n comparison_log['win_rate'] = [form1_win_rate, form2_win_rate]\n comparison_log['different_queries'] = [len(unequal_task), len(unequal_task)]\n if save_csv:\n compare_taskid = {}\n for metric in metrics:\n compare_taskid[f'{form1}_{metric}'] = form1_log[metric]\n compare_taskid[f'{form2}_{metric}'] = form2_log[metric]\n compare_taskid[f'{form1}_formula'] = {}\n compare_taskid[f'{form2}_formula'] = {}\n compare_taskid['winner'] = {}\n for taskid in unequal_task:\n id_str = '0' * (4 - len(str(taskid))) + str(taskid)\n formula_index = all_formula.loc[all_formula['formula_id'] == f'type{id_str}'].index[0]\n formula1, formula2 = all_formula[form1][formula_index], all_formula[form2][formula_index]\n compare_taskid[f'{form1}_formula'][taskid] = formula1\n compare_taskid[f'{form2}_formula'][taskid] = formula2\n if compare_taskid[f'{form1}_MRR'][taskid] > compare_taskid[f'{form2}_MRR'][taskid]:\n compare_taskid['winner'][taskid] = form1\n elif compare_taskid[f'{form1}_MRR'][taskid] < compare_taskid[f'{form2}_MRR'][taskid]:\n compare_taskid['winner'][taskid] = form2\n else:\n compare_taskid['winner'][taskid] = 'draw'\n data = pd.DataFrame.from_dict(compare_taskid)\n data.to_csv(os.path.join(folder_path, f'compare_detail_{form1}_{form2}.csv'))\n\n '''\n df = pd.DataFrame.from_dict(comparison_log)\n df.to_csv(os.path.join(folder_path, f'compare_{form1}_{form2}.csv'))\n '''\n return comparison_log\n\n\ndef compare_all_form(folder_path, form_list, metrics, save_csv=False):\n difference_mrr = collections.defaultdict(lambda: collections.defaultdict(list))\n difference_number = collections.defaultdict(lambda: collections.defaultdict(int))\n difference_win_rate = collections.defaultdict(lambda: collections.defaultdict(float))\n n = len(form_list)\n for i in range(n):\n for j in range(n):\n difference_number[form_list[j]][form_list[i]] = 0\n difference_win_rate[form_list[j]][form_list[i]] = 0\n for i in range(n):\n for j in range(i + 1, n):\n comparison_log = normal_form_comparison(folder_path, form_list[i], form_list[j], metrics, save_csv)\n difference_mrr[form_list[j]][form_list[i]] = comparison_log['MRR']\n difference_number[form_list[j]][form_list[i]] = comparison_log['different_queries'][0]\n difference_number[form_list[i]][form_list[j]] = comparison_log['different_queries'][0]\n formj_win, formi_win = comparison_log['win_rate']\n if formj_win + formi_win > 0:\n j_against_i = formj_win / (formj_win + formi_win) * 100\n difference_win_rate[form_list[j]][form_list[i]] = j_against_i\n difference_win_rate[form_list[i]][form_list[j]] = 100 - j_against_i\n else:\n difference_win_rate[form_list[j]][form_list[i]] = 0\n difference_win_rate[form_list[i]][form_list[j]] = 0\n\n dm, dn, dw = pd.DataFrame.from_dict(difference_mrr), pd.DataFrame.from_dict(difference_number),\\\n pd.DataFrame.from_dict(difference_win_rate)\n dm.to_csv(os.path.join(folder_path, f'allmrr_compare.csv'))\n dn.to_csv(os.path.join(folder_path, f'alllength_compare.csv'))\n dw.to_csv(os.path.join(folder_path, f'allwin_rate_compare.csv'))\n\n\ndef log_benchmark_depth_anchornode(folder_path, support_normal_forms, metrics):\n all_formula = pd.read_csv('data/generated_formula_anchor_node=3.csv')\n query_type_num = len(all_formula['original'])\n all_logging = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(list)))\n averaged_split = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(float)))\n averaged_all = collections.defaultdict(lambda: collections.defaultdict(float))\n for normal_form in support_normal_forms:\n for i in range(1, 4):\n for j in range(1, 4):\n for metric in metrics:\n averaged_split[normal_form][(i, j)][metric] = 0\n for metric in metrics:\n metric_logging = pd.read_csv(os.path.join(folder_path, f'all_formula_{metric}.csv'))\n index2taskid = metric_logging['Unnamed: 0']\n for index in range(len(index2taskid)):\n taskid = index2taskid[index]\n id_str = '0' * (4 - len(str(taskid))) + str(taskid)\n formula_index = all_formula.loc[all_formula['formula_id'] == f'type{id_str}'].index[0]\n depth = all_formula['original_depth'][formula_index]\n anchornode_num = all_formula['num_anchor_nodes'][formula_index]\n for normal_form in support_normal_forms:\n query_scores = metric_logging.loc[index][normal_form]\n all_logging[normal_form][(anchornode_num, depth)][metric].append(query_scores)\n all_number = sum(len(all_logging[support_normal_forms[0]][key][metrics[0]])\n for key in all_logging[support_normal_forms[0]])\n assert all_number == query_type_num # all query type are included\n for normal_form in support_normal_forms:\n for key in all_logging[normal_form]:\n for metric in metrics:\n averaged_split[normal_form][key][metric] = sum(all_logging[normal_form][key][metric])\\\n / len(all_logging[normal_form][key][metric])\n for normal_form in support_normal_forms:\n for metric in metrics:\n averaged_all[normal_form][metric] = sum(sum(all_logging[normal_form][key][metric])\n for key in all_logging[normal_form])\n averaged_all[normal_form][metric] /= query_type_num\n averaged_split[normal_form]['average'][metric] = averaged_all[normal_form][metric]\n df = pd.DataFrame.from_dict(averaged_split[normal_form])\n if normal_form != 'DNF+MultiIUd':\n df.to_csv(os.path.join(folder_path, f'anchornode_depth_of_{normal_form}.csv'))\n else:\n df.to_csv(os.path.join(folder_path, f'anchornode_depth_of_new_form.csv'))\n return averaged_split\n\n\ndef answer_statistic(data_folder, formula_id_file):\n formula_id_data = pd.read_csv(formula_id_file)\n query_id_str_list = formula_id_data['formula_id']\n statistis_grouping = collections.defaultdict(list)\n statistis_grouping_averaged = collections.defaultdict(lambda: collections.defaultdict(float))\n for i in range(1, 4):\n for j in range(1, 4):\n statistis_grouping[(i, j)] = []\n for type_str in query_id_str_list:\n filename = os.path.join(data_folder, f'data-{type_str}.csv')\n dense = filename.replace('data', 'tmp').replace('csv', 'pickle')\n if os.path.exists(dense):\n print(\"load from existed files\", type_str)\n with open(dense, 'rb') as f:\n data = pickle.load(f)\n easy_answer_set = data['easy_answer_set']\n hard_answer_set = data['hard_answer_set']\n easy_ans_num, hard_ans_num = sum(len(easy) for easy in easy_answer_set) /len(easy_answer_set), \\\n sum(len(hard) for hard in hard_answer_set) / len(hard_answer_set)\n formula_index = formula_id_data.loc[formula_id_data['formula_id'] == f'{type_str}'].index[0]\n depth = formula_id_data['original_depth'][formula_index]\n anchor_node_num = formula_id_data['num_anchor_nodes'][formula_index]\n statistis_grouping[(anchor_node_num, depth)].append(hard_ans_num)\n else:\n query_data = pd.read_csv(filename)\n all_easy_ans, all_hard_ans = query_data.easy_answers.map(lambda x: list(eval(x))).tolist(), \\\n query_data.hard_answers.map(lambda x: list(eval(x))).tolist()\n easy_ans_num, hard_ans_num = sum(len(easy) for easy in all_easy_ans) / len(all_easy_ans), \\\n sum(len(hard) for hard in all_hard_ans) / len(all_hard_ans)\n formula_index = formula_id_data.loc[formula_id_data['formula_id'] == f'{type_str}'].index[0]\n depth = formula_id_data['original_depth'][formula_index]\n anchor_node_num = formula_id_data['num_anchor_nodes'][formula_index]\n statistis_grouping[(anchor_node_num, depth)].append(hard_ans_num)\n\n for key in statistis_grouping:\n statistis_grouping_averaged[key]['hard'] = sum(statistis_grouping[key]) / len(statistis_grouping[key])\n print(key, len(statistis_grouping[key]))\n data_averaged = pd.DataFrame.from_dict(statistis_grouping_averaged)\n data_averaged.to_csv(os.path.join(data_folder, 'size_statistics_grouping_formhard.csv'))\n\n\n\nbox_query_v2 = {\n '1p': '(p,(e))',\n '2p': '(p,(p,(e)))',\n '3p': '(p,(p,(p,(e))))',\n '2i': '(i,(p,(e)),(p,(e)))',\n '3i': '(i,(p,(e)),(p,(e)),(p,(e)))',\n 'ip': '(p,(i,(p,(e)),(p,(e))))',\n 'pi': '(i,(p,(p,(e))),(p,(e)))',\n '2u-DNF': '(u,(p,(e)),(p,(e)))',\n 'up-DNF': '(u,(p,(p,(e))),(p,(p,(e))))',\n}\n\ncheck_query = {\n '1p': '(p,(e))',\n '2p': '(p,(p,(e)))',\n '3p': '(p,(p,(p,(e))))',\n '2i': '(i,(p,(e)),(p,(e)))',\n '3i': '(i,(p,(e)),(p,(e)),(p,(e)))',\n 'ip': '(p,(i,(p,(e)),(p,(e))))',\n 'pi': '(i,(p,(p,(e))),(p,(e)))',\n '2in': '(i,(p,(e)),(n,(p,(e))))',\n '3in': '(i,(p,(e)),(p,(e)),(n,(p,(e))))',\n 'inp': '(p,(i,(p,(e)),(n,(p,(e)))))',\n 'pin': '(i,(p,(p,(e))),(n,(p,(e))))',\n 'pni': '(i,(n,(p,(p,(e)))),(p,(e)))',\n '2u-DNF': '(u,(p,(e)),(p,(e)))',\n 'up-DNF': '(u,(p,(p,(e))),(p,(p,(e))))',\n '2u-DM': '(n,(i,(n,(p,(e))),(n,(p,(e)))))',\n 'up-DM': '(p,(n,(i,(n,(p,(e))),(n,(p,(e))))))',\n}\nDNF_query = {\n '1p': '(p,(e))',\n '2p': '(p,(p,(e)))',\n '3p': '(p,(p,(p,(e))))',\n '2i': '(i,(p,(e)),(p,(e)))',\n '3i': '(i,(p,(e)),(p,(e)),(p,(e)))',\n 'ip': '(p,(i,(p,(e)),(p,(e))))',\n 'pi': '(i,(p,(p,(e))),(p,(e)))',\n '2in': '(i,(p,(e)),(n,(p,(e))))',\n '3in': '(i,(p,(e)),(p,(e)),(n,(p,(e))))',\n 'inp': '(p,(i,(p,(e)),(n,(p,(e)))))',\n 'pin': '(i,(p,(p,(e))),(n,(p,(e))))',\n 'pni': '(i,(n,(p,(p,(e)))),(p,(e)))',\n '2u-DNF': '(u,(p,(e)),(p,(e)))',\n 'up-DNF': '(u,(p,(p,(e))),(p,(p,(e))))',\n}\n# print_loss(graph_path)\n'''\ntest_step = 450000\ntest_path = \"/home/hyin/FirstOrderQueryEstimation/log/newdev/Logic-unbounded210813.22:26:062c614d51/\"\nold_path = \"/home/hyin/FirstOrderQueryEstimation/log/newdev/Logic-unbounded210813.21:19:26aaf6eebf/\"\n# test_path = \"/home/hyin/FirstOrderQueryEstimation/log/dev/default210705.14:43:26fba267b0/\"\nlogic_path = \"/data/zwanggc/Logic-unbounded210813.22:24:17607989e2/\"\n#compare_loss(test_path, test_path, choose_len=3000)\nlog_all_metrics(test_path, test_step, 'test', log_meta_formula=check_query.values())\nlog_all_metrics(old_path, test_step, 'test', log_meta_formula=check_query.values())\n'''\np_list = [0, 1, 2, 1116, 1117]\ni_list = [13, 137, 1113, 1114]\nall_3_3_list = list(range(0, 531))\nBeta_path = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_FB15k-237/Beta_full211021.21:53:5622b7307f/\"\nNLK_path = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_FB15k-237/NLK_full211022.10:23:213a1fea21/\"\nLogic_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_FB15k-237/Logic_full211022.14:06:57d7bd0d37/\"\nBox_path = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_FB15k-237/Box_full210822.00:56:4448dc3a71\"\n\nBeta_NELL = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_NELL/Beta_full211021.21:54:2510bcf310/\"\nLogic_NELL = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_NELL/Logic_full211022.14:06:0128fd7614/\"\nNLK_NELL = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_NELL/NLK_full211022.14:23:059a6b9d86/\"\nNELL_result = {'BetaE': Beta_NELL, 'LogicE': Logic_NELL, 'NewLook': NLK_NELL}\n\nBeta_FB = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_FB15k/Beta_full211021.21:52:5760fc2d24/\"\nLogic_FB = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_FB15k/Logic_full211022.10:14:23940a46a4/\"\nNLK_FB = \"/home/zwanggc/FirstOrderQueryEstimation/benchmark_log/benchmark_FB15k/NLK_full211022.14:22:1240d958f1/\"\n\nLogic_1p_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_generalize/Logic_1p210825.15:55:2565735b8d\"\nLogic_2p_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_generalize/Logic_2p210825.16:02:51b8e4878b\"\nLogic_3p_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_generalize/Logic_3p210825.16:07:530d917424\"\nLogic_2i_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_generalize/Logic_2i210825.16:26:241f438fbf\"\nLogic_3i_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_generalize/Logic_3i210825.16:44:0584f53968\"\n\nnew_2i_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_generalize/Logic_2i210828.17:53:31146141ce\"\nnew_3i_path = \"/home/hyin/FirstOrderQueryEstimation/benchmark_log/benchmark_generalize/Logic_3i210828.17:56:27662c4441\"\nFB15_237_data, FB_data, NELL_data = 'data/benchmark/FB15k-237', 'data/benchmark/FB15k', 'data/benchmark/NELL'\n\n#log_benchmark(Logic_path, all_3_3_list, percentage=True)\n# compare_all_form(Box_path, all_normal_form, all_metrics)\n#compare_all_form(Logic_path, model_compareform_dict['LogicE'], metrics=all_metrics, save_csv=True)\n# pandas_logging_depth_anchornode(NELL_result, model_supportform_dict, all_metrics)\n#log_benchmark_depth_anchornode(Logic_path, model_supportform_dict['LogicE'], all_metrics)\nanswer_statistic(NELL_data, formula_file)\n# log_old_metrics(old_path, test_step, 'test')\n# train_all, valid_all, test_all = read_beta_log('../download_log/full/')\n# train_part, valid_part, test_part = read_logic_log(logic_path, 'test', test_step, averaged_meta_formula=DNF_query.values())\n\n\n# comparison('../download_log/1p.2p.2i/', ['1p', '2p', '2i'])\n"
] |
[
[
"numpy.zeros_like",
"numpy.asarray",
"numpy.log",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.where",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
] |
ralfrost/probnum
|
[
"6b0988009a9dd7ecda87ba28c9d5c0b8019981b6"
] |
[
"tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py"
] |
[
"\"\"\"\nWe test on two test-problems:\n * logistic ODE (because it has a closed form sol.)\n -> make sure error converges to zero (even with rate q?)\n -> Check if iterates match the closed-form solutions in\n Schober et al.\n * Lotka-Volterra (because it provides meaningful uncertainty estimates,\n if e.g. EKF-based ODE filter is implemented correctly)\n -> error estimates from adaptive step sizes are roughly satsified\n (for the ibm1-kf combo, the other ones do not apply.)\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom probnum.diffeq.odefiltsmooth import probsolve_ivp\nfrom probnum.diffeq import ode\nfrom probnum.random_variables import Dirac\n\nfrom tests.testing import NumpyAssertions\n\n\nclass TestConvergenceOnLogisticODE(unittest.TestCase):\n \"\"\"\n We test whether the convergence rates roughly hold true.\n \"\"\"\n\n def setUp(self):\n \"\"\"Setup odesolver and solve a scalar ode\"\"\"\n initrv = Dirac(0.1 * np.ones(1))\n self.ivp = ode.logistic([0.0, 1.5], initrv)\n self.stps = [0.2, 0.1]\n\n def test_error_ibm1(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, which_prior=\"ibm1\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, which_prior=\"ibm1\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 2\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ibm2(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, which_prior=\"ibm2\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, which_prior=\"ibm2\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 3\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ibm3(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, which_prior=\"ibm3\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, which_prior=\"ibm3\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 4\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ioup1(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, which_prior=\"ioup1\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, which_prior=\"ioup1\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 2\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ioup2(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, which_prior=\"ioup2\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, which_prior=\"ioup2\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 3\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ioup3(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, which_prior=\"ioup3\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, which_prior=\"ioup3\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 4\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n\nclass TestFirstIterations(unittest.TestCase, NumpyAssertions):\n \"\"\"\n Test whether first few means and covariances coincide with Prop. 1\n in Schober et al., 2019.\n \"\"\"\n\n def setUp(self):\n initrv = Dirac(0.1 * np.ones(1))\n self.ivp = ode.logistic([0.0, 1.5], initrv)\n self.step = 0.5\n sol = probsolve_ivp(\n self.ivp, step=self.step, initrv=initrv, diffconst=1.0, which_prior=\"ibm1\"\n )\n state_rvs = sol._state_rvs\n self.ms, self.cs = state_rvs.mean, state_rvs.cov\n\n def test_t0(self):\n exp_mean = np.array(\n [self.ivp.initrv.mean, self.ivp.rhs(0, self.ivp.initrv.mean)]\n )\n\n self.assertAllClose(self.ms[0], exp_mean[:, 0], rtol=1e-14)\n self.assertAllClose(self.cs[0], np.zeros((2, 2)), rtol=1e-14)\n\n def test_t1(self):\n \"\"\"\n The kernels does not coincide exactly because of the\n uncertainty calibration that takes place in\n GaussianIVPFilter.solve()\n and not in Prop. 1 of Schober et al., 2019.\n \"\"\"\n y0 = self.ivp.initrv.mean\n z0 = self.ivp.rhs(0, y0)\n z1 = self.ivp.rhs(0, y0 + self.step * z0)\n exp_mean = np.array([y0 + 0.5 * self.step * (z0 + z1), z1])\n self.assertAllClose(self.ms[1], exp_mean[:, 0], rtol=1e-14)\n\n\nclass TestAdaptivityOnLotkaVolterra(unittest.TestCase):\n \"\"\"\n Only test on \"ekf0\" with IBM(1) prior, since every other combination\n seems to dislike the adaptive scheme based on the whitened residual\n as an error estimate.\n \"\"\"\n\n def setUp(self):\n \"\"\"Setup odesolver and solve a scalar ode\"\"\"\n initrv = Dirac(20 * np.ones(2))\n self.ivp = ode.lotkavolterra([0.0, 0.5], initrv)\n self.tol = 1e-2\n\n def test_kf_ibm1_stdev(self):\n \"\"\"\n Standard deviation at end point roughly equal to tolerance.\n \"\"\"\n sol = probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ibm1\", method=\"ekf0\")\n self.assertLess(np.sqrt(sol.y.cov[-1, 0, 0]), 10 * self.tol)\n self.assertLess(0.1 * self.tol, np.sqrt(sol.y.cov[-1, 0, 0]))\n\n def test_kf_ibm1(self):\n \"\"\"\n Tests whether resulting steps are not evenly distributed.\n \"\"\"\n sol = probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ibm1\", method=\"ekf0\")\n steps = np.diff(sol.t)\n self.assertLess(np.amin(steps) / np.amax(steps), 0.8)\n\n\nclass TestLotkaVolterraOtherPriors(unittest.TestCase):\n \"\"\"\n We only test whether all the prior-filter-adaptivity combinations\n finish.\n \"\"\"\n\n def setUp(self):\n \"\"\"Setup odesolver and Lotka-Volterra IVP\"\"\"\n initrv = Dirac(20 * np.ones(2))\n self.ivp = ode.lotkavolterra([0.0, 0.5], initrv)\n self.tol = 1e-1\n self.step = 0.1\n\n def test_filter_ivp_ioup1_kf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ioup1\", method=\"ekf0\")\n\n def test_filter_ivp_ioup2_ekf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ioup2\", method=\"ekf1\")\n\n def test_filter_ivp_ioup3_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, tol=self.tol, evlvar=0.01, which_prior=\"ioup3\", method=\"ukf\"\n )\n\n def test_filter_ivp_h_ioup1_ekf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"ioup1\", method=\"ekf1\")\n\n def test_filter_ivp_h_ioup2_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, step=self.step, evlvar=0.01, which_prior=\"ioup2\", method=\"ukf\"\n )\n\n def test_filter_ivp_h_ioup3_kf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"ioup3\", method=\"ekf0\")\n\n def test_filter_ivp_mat32_kf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"matern32\", method=\"ekf0\")\n\n def test_filter_ivp_mat52_ekf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"matern52\", method=\"ekf1\")\n\n def test_filter_ivp_mat72_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, tol=self.tol, evlvar=0.01, which_prior=\"matern72\", method=\"ukf\"\n )\n\n def test_filter_ivp_h_mat32_ekf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"matern32\", method=\"ekf1\")\n\n def test_filter_ivp_h_mat52_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, step=self.step, evlvar=0.01, which_prior=\"matern52\", method=\"ukf\"\n )\n\n def test_filter_ivp_h_mat72_kf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"matern72\", method=\"ekf0\")\n\n\nclass TestConvergenceOnLogisticODESmoother(unittest.TestCase):\n \"\"\"\n We test whether the convergence rates roughly hold true.\n \"\"\"\n\n def setUp(self):\n \"\"\"Setup odesolver and solve a scalar ode\"\"\"\n initrv = Dirac(0.1 * np.ones(1))\n self.ivp = ode.logistic([0.0, 1.5], initrv)\n self.stps = [0.2, 0.1]\n\n def test_error_ibm1(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, method=\"eks0\", which_prior=\"ibm1\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, method=\"eks0\", which_prior=\"ibm1\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 2\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ibm2(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, method=\"eks0\", which_prior=\"ibm2\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, method=\"eks0\", which_prior=\"ibm2\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 3\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ibm3(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, method=\"eks0\", which_prior=\"ibm3\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, method=\"eks0\", which_prior=\"ibm3\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 4\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ioup1(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, method=\"eks0\", which_prior=\"ioup1\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, method=\"eks0\", which_prior=\"ioup1\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 2\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ioup2(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, method=\"eks0\", which_prior=\"ioup2\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, method=\"eks0\", which_prior=\"ioup2\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 3\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n def test_error_ioup3(self):\n \"\"\"Expect error rate q+1 \"\"\"\n stp1, stp2 = self.stps\n sol = probsolve_ivp(self.ivp, step=stp1, method=\"eks0\", which_prior=\"ioup3\")\n means1 = sol.y.mean\n sols1 = np.array([self.ivp.solution(t) for t in sol.t])\n err1 = np.amax(np.abs(sols1 - means1))\n sol = probsolve_ivp(self.ivp, step=stp2, method=\"eks0\", which_prior=\"ioup3\")\n means2 = sol.y.mean\n sols2 = np.array([self.ivp.solution(t) for t in sol.t])\n err2 = np.amax(np.abs(sols2 - means2))\n exp_decay = (stp2 / stp1) ** 4\n diff = np.abs(exp_decay * err1 - err2) / np.abs(err2)\n self.assertLess(diff, 1.0)\n\n\nclass TestAdaptivityOnLotkaVolterraSmoother(unittest.TestCase):\n \"\"\"\n Only test on \"ekf0\" with IBM(1) prior, since every other combination\n seems to dislike the adaptive scheme based on the whitened residual\n as an error estimate.\n \"\"\"\n\n def setUp(self):\n \"\"\"Setup odesolver and solve a scalar ode\"\"\"\n initrv = Dirac(20 * np.ones(2))\n self.ivp = ode.lotkavolterra([0.0, 0.5], initrv)\n self.tol = 1e-2\n\n def test_kf_ibm1_stdev(self):\n \"\"\"\n Standard deviation at end point roughly equal to tolerance.\n \"\"\"\n sol = probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ibm1\", method=\"eks0\")\n self.assertLess(np.sqrt(sol.y.cov[-1, 0, 0]), 10 * self.tol)\n self.assertLess(0.1 * self.tol, np.sqrt(sol.y.cov[-1, 0, 0]))\n\n def test_kf_ibm1(self):\n \"\"\"\n Tests whether resulting steps are not evenly distributed.\n \"\"\"\n sol = probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ibm1\", method=\"eks0\")\n steps = np.diff(sol.t)\n self.assertLess(np.amin(steps) / np.amax(steps), 0.8)\n\n\nclass TestLotkaVolterraOtherPriorsSmoother(unittest.TestCase):\n \"\"\"\n We only test whether all the prior-filter-adaptivity combinations\n finish.\n \"\"\"\n\n def setUp(self):\n \"\"\"Setup odesolver and Lotka-Volterra IVP\"\"\"\n initdist = Dirac(20 * np.ones(2))\n self.ivp = ode.lotkavolterra([0.0, 0.5], initdist)\n self.tol = 1e-1\n self.step = 0.1\n\n def test_filter_ivp_ioup1_kf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ioup1\", method=\"eks0\")\n\n def test_filter_ivp_ioup2_ekf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"ioup2\", method=\"eks1\")\n\n def test_filter_ivp_ioup3_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, tol=self.tol, evlvar=0.01, which_prior=\"ioup3\", method=\"uks\"\n )\n\n def test_filter_ivp_h_ioup1_ekf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"ioup1\", method=\"eks1\")\n\n def test_filter_ivp_h_ioup2_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, step=self.step, evlvar=0.01, which_prior=\"ioup2\", method=\"uks\"\n )\n\n def test_filter_ivp_h_ioup3_kf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"ioup3\", method=\"eks0\")\n\n def test_filter_ivp_mat32_kf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"matern32\", method=\"eks0\")\n\n def test_filter_ivp_mat52_ekf(self):\n probsolve_ivp(self.ivp, tol=self.tol, which_prior=\"matern52\", method=\"eks1\")\n\n def test_filter_ivp_mat72_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, tol=self.tol, evlvar=0.01, which_prior=\"matern72\", method=\"uks\"\n )\n\n def test_filter_ivp_h_mat32_ekf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"matern32\", method=\"eks1\")\n\n def test_filter_ivp_h_mat52_ukf(self):\n \"\"\"\n UKF requires some evaluation-variance to have a positive definite\n innovation matrix, apparently.\n \"\"\"\n probsolve_ivp(\n self.ivp, step=self.step, evlvar=0.01, which_prior=\"matern52\", method=\"uks\"\n )\n\n def test_filter_ivp_h_mat72_kf(self):\n probsolve_ivp(self.ivp, step=self.step, which_prior=\"matern72\", method=\"eks0\")\n\n\nclass TestPreconditioning(unittest.TestCase):\n \"\"\"\n Solver with high order and small stepsize should work up to a point where\n step**order is below machine precision.\n \"\"\"\n\n def setUp(self):\n initdist = Dirac(20 * np.ones(2))\n self.ivp = ode.lotkavolterra([0.0, 1e-4], initdist)\n self.step = 1e-5\n self.prior = \"ibm3\"\n\n def test_small_step_feasible(self):\n \"\"\"\n With the 'old' preconditioner, this is impossible because step**(2*order + 1) is too small.\n With the 'new' preconditioner, the smallest value that appears in the solver code is step**order\n \"\"\"\n probsolve_ivp(self.ivp, step=self.step, which_prior=self.prior, method=\"eks0\")\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.ones",
"numpy.diff",
"numpy.amin",
"numpy.amax",
"numpy.sqrt",
"numpy.abs"
]
] |
CQUlearningsystemgroup/BitwiseBottlneck
|
[
"db2b3a19d56740a8e933c609fed45eb667378c37"
] |
[
"utils/data/file_io.py"
] |
[
"\n\"\"\"Convenience functions for managing dataset file buffers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport atexit\nimport multiprocessing\nimport os\nimport tempfile\nimport uuid\n\nimport numpy as np\nimport six\n\nimport tensorflow as tf\n\n\nclass _GarbageCollector(object):\n \"\"\"Deletes temporary buffer files at exit.\n\n Certain tasks (such as NCF Recommendation) require writing buffers to\n temporary files. (Which may be local or distributed.) It is not generally safe\n to delete these files during operation, but they should be cleaned up. This\n class keeps track of temporary files created, and deletes them at exit.\n \"\"\"\n def __init__(self):\n self.temp_buffers = []\n\n def register(self, filepath):\n self.temp_buffers.append(filepath)\n\n def purge(self):\n try:\n for i in self.temp_buffers:\n if tf.io.gfile.exists(i):\n tf.io.gfile.remove(i)\n tf.compat.v1.logging.info(\"Buffer file {} removed\".format(i))\n except Exception as e:\n tf.compat.v1.logging.error(\"Failed to cleanup buffer files: {}\".format(e))\n\n\n_GARBAGE_COLLECTOR = _GarbageCollector()\natexit.register(_GARBAGE_COLLECTOR.purge)\n\n_ROWS_PER_CORE = 50000\n\n\ndef write_to_temp_buffer(dataframe, buffer_folder, columns):\n if buffer_folder is None:\n _, buffer_path = tempfile.mkstemp()\n else:\n tf.io.gfile.makedirs(buffer_folder)\n buffer_path = os.path.join(buffer_folder, str(uuid.uuid4()))\n _GARBAGE_COLLECTOR.register(buffer_path)\n\n return write_to_buffer(dataframe, buffer_path, columns)\n\n\ndef iter_shard_dataframe(df, rows_per_core=1000):\n \"\"\"Two way shard of a dataframe.\n\n This function evenly shards a dataframe so that it can be mapped efficiently.\n It yields a list of dataframes with length equal to the number of CPU cores,\n with each dataframe having rows_per_core rows. (Except for the last batch\n which may have fewer rows in the dataframes.) Passing vectorized inputs to\n a multiprocessing pool is much more effecient than iterating through a\n dataframe in serial and passing a list of inputs to the pool.\n\n Args:\n df: Pandas dataframe to be sharded.\n rows_per_core: Number of rows in each shard.\n\n Returns:\n A list of dataframe shards.\n \"\"\"\n n = len(df)\n num_cores = min([multiprocessing.cpu_count(), n])\n\n num_blocks = int(np.ceil(n / num_cores / rows_per_core))\n max_batch_size = num_cores * rows_per_core\n for i in range(num_blocks):\n min_index = i * max_batch_size\n max_index = min([(i + 1) * max_batch_size, n])\n df_shard = df[min_index:max_index]\n n_shard = len(df_shard)\n boundaries = np.linspace(0, n_shard, num_cores + 1, dtype=np.int64)\n yield [df_shard[boundaries[j]:boundaries[j+1]] for j in range(num_cores)]\n\n\ndef _shard_dict_to_examples(shard_dict):\n \"\"\"Converts a dict of arrays into a list of example bytes.\"\"\"\n n = [i for i in shard_dict.values()][0].shape[0]\n feature_list = [{} for _ in range(n)]\n for column, values in shard_dict.items():\n if len(values.shape) == 1:\n values = np.reshape(values, values.shape + (1,))\n\n if values.dtype.kind == \"i\":\n feature_map = lambda x: tf.train.Feature(\n int64_list=tf.train.Int64List(value=x))\n elif values.dtype.kind == \"f\":\n feature_map = lambda x: tf.train.Feature(\n float_list=tf.train.FloatList(value=x))\n else:\n raise ValueError(\"Invalid dtype\")\n for i in range(n):\n feature_list[i][column] = feature_map(values[i])\n examples = [\n tf.train.Example(features=tf.train.Features(feature=example_features))\n for example_features in feature_list\n ]\n\n return [e.SerializeToString() for e in examples]\n\n\ndef _serialize_shards(df_shards, columns, pool, writer):\n \"\"\"Map sharded dataframes to bytes, and write them to a buffer.\n\n Args:\n df_shards: A list of pandas dataframes. (Should be of similar size)\n columns: The dataframe columns to be serialized.\n pool: A multiprocessing pool to serialize in parallel.\n writer: A TFRecordWriter to write the serialized shards.\n \"\"\"\n # Pandas does not store columns of arrays as nd arrays. stack remedies this.\n map_inputs = [{c: np.stack(shard[c].values, axis=0) for c in columns}\n for shard in df_shards]\n\n # Failure within pools is very irksome. Thus, it is better to thoroughly check\n # inputs in the main process.\n for inp in map_inputs:\n # Check that all fields have the same number of rows.\n assert len(set([v.shape[0] for v in inp.values()])) == 1\n for val in inp.values():\n assert hasattr(val, \"dtype\")\n assert hasattr(val.dtype, \"kind\")\n assert val.dtype.kind in (\"i\", \"f\")\n assert len(val.shape) in (1, 2)\n shard_bytes = pool.map(_shard_dict_to_examples, map_inputs)\n for s in shard_bytes:\n for example in s:\n writer.write(example)\n\n\ndef write_to_buffer(dataframe, buffer_path, columns, expected_size=None):\n \"\"\"Write a dataframe to a binary file for a dataset to consume.\n\n Args:\n dataframe: The pandas dataframe to be serialized.\n buffer_path: The path where the serialized results will be written.\n columns: The dataframe columns to be serialized.\n expected_size: The size in bytes of the serialized results. This is used to\n lazily construct the buffer.\n\n Returns:\n The path of the buffer.\n \"\"\"\n if (tf.io.gfile.exists(buffer_path) and\n tf.io.gfile.stat(buffer_path).length > 0):\n actual_size = tf.io.gfile.stat(buffer_path).length\n if expected_size == actual_size:\n return buffer_path\n tf.compat.v1.logging.warning(\n \"Existing buffer {} has size {}. Expected size {}. Deleting and \"\n \"rebuilding buffer.\".format(buffer_path, actual_size, expected_size))\n tf.io.gfile.remove(buffer_path)\n\n if dataframe is None:\n raise ValueError(\n \"dataframe was None but a valid existing buffer was not found.\")\n\n tf.io.gfile.makedirs(os.path.split(buffer_path)[0])\n\n tf.compat.v1.logging.info(\"Constructing TFRecordDataset buffer: {}\"\n .format(buffer_path))\n\n count = 0\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n try:\n with tf.io.TFRecordWriter(buffer_path) as writer:\n for df_shards in iter_shard_dataframe(df=dataframe,\n rows_per_core=_ROWS_PER_CORE):\n _serialize_shards(df_shards, columns, pool, writer)\n count += sum([len(s) for s in df_shards])\n tf.compat.v1.logging.info(\"{}/{} examples written.\"\n .format(str(count).ljust(8), len(dataframe)))\n finally:\n pool.terminate()\n\n tf.compat.v1.logging.info(\"Buffer write complete.\")\n return buffer_path\n"
] |
[
[
"numpy.ceil",
"tensorflow.train.FloatList",
"tensorflow.compat.v1.logging.info",
"numpy.reshape",
"tensorflow.train.Features",
"tensorflow.train.Int64List",
"tensorflow.io.gfile.stat",
"tensorflow.io.gfile.makedirs",
"tensorflow.io.gfile.exists",
"numpy.stack",
"tensorflow.io.TFRecordWriter",
"numpy.linspace",
"tensorflow.io.gfile.remove"
]
] |
vincekurtz/quadruped_drake
|
[
"0fed14eeb7ba33144ab740bc9afa412398f70b7e"
] |
[
"planners/simple.py"
] |
[
"import numpy as np\nfrom pydrake.all import *\n\nclass BasicTrunkPlanner(LeafSystem):\n \"\"\"\n Implements the simplest possible trunk-model planner, which generates\n desired positions, velocities, and accelerations for the feet, center-of-mass,\n and body frame orientation. \n \"\"\"\n def __init__(self, frame_ids):\n LeafSystem.__init__(self)\n\n # Dictionary of geometry frame ids {\"trunk\": trunk_frame_id, \"lf\": lf_foot_frame_id, ...}\n self.frame_ids = frame_ids\n\n # We'll use an abstract output port so we can send all the\n # data we'd like to include in a dictionary format\n self.DeclareAbstractOutputPort(\n \"trunk_trajectory\",\n lambda: AbstractValue.Make({}),\n self.SetTrunkOutputs)\n\n # Another output port is used to send geometry data regarding the\n # trunk model to the scene graph for visualization\n fpv = FramePoseVector()\n for frame in self.frame_ids:\n fpv.set_value(frame_ids[frame], RigidTransform())\n\n self.DeclareAbstractOutputPort(\n \"trunk_geometry\",\n lambda: AbstractValue.Make(fpv),\n self.SetGeometryOutputs)\n\n # The output data is a class-level object so we can be sure we're sending\n # the same info to the controller as to the scene graph\n self.output_dict = {}\n self.SimpleStanding() # set initial values to self.output_dict\n\n def SimpleStanding(self):\n \"\"\"\n Set output values corresponing to simply\n standing on all four feet.\n \"\"\"\n # Foot positions\n self.output_dict[\"p_lf\"] = np.array([ 0.175, 0.11, 0.0]) # mini cheetah\n self.output_dict[\"p_rf\"] = np.array([ 0.175,-0.11, 0.0])\n self.output_dict[\"p_lh\"] = np.array([-0.2, 0.11, 0.0])\n self.output_dict[\"p_rh\"] = np.array([-0.2, -0.11, 0.0])\n #self.output_dict[\"p_lf\"] = np.array([ 0.34, 0.19, 0.0]) # anymal\n #self.output_dict[\"p_rf\"] = np.array([ 0.34,-0.19, 0.0])\n #self.output_dict[\"p_lh\"] = np.array([-0.34, 0.19, 0.0])\n #self.output_dict[\"p_rh\"] = np.array([-0.34,-0.19, 0.0])\n\n # Foot velocities\n self.output_dict[\"pd_lf\"] = np.zeros(3)\n self.output_dict[\"pd_rf\"] = np.zeros(3)\n self.output_dict[\"pd_lh\"] = np.zeros(3)\n self.output_dict[\"pd_rh\"] = np.zeros(3)\n \n # Foot accelerations\n self.output_dict[\"pdd_lf\"] = np.zeros(3)\n self.output_dict[\"pdd_rf\"] = np.zeros(3)\n self.output_dict[\"pdd_lh\"] = np.zeros(3)\n self.output_dict[\"pdd_rh\"] = np.zeros(3)\n\n # Foot contact states: [lf,rf,lh,rh], True indicates being in contact.\n self.output_dict[\"contact_states\"] = [True,True,True,True]\n\n # Foot contact forces, where each row corresponds to a foot [lf,rf,lh,rh].\n self.output_dict[\"f_cj\"] = np.zeros((3,4))\n\n # Body pose\n self.output_dict[\"rpy_body\"] = np.array([0.0, 0.0, 0.0])\n self.output_dict[\"p_body\"] = np.array([0.0, 0.0, 0.3])\n\n # Body velocities\n self.output_dict[\"rpyd_body\"] = np.zeros(3)\n self.output_dict[\"pd_body\"] = np.zeros(3)\n\n # Body accelerations\n self.output_dict[\"rpydd_body\"] = np.zeros(3)\n self.output_dict[\"pdd_body\"] = np.zeros(3)\n\n # Max control input (accelerations)\n self.output_dict[\"u2_max\"] = 0.0\n\n def OrientationTest(self, t):\n \"\"\"\n Given the current time t, generate output values for\n for a simple orientation test.\n \"\"\"\n self.SimpleStanding()\n self.output_dict[\"rpy_body\"] = np.array([0.0, 0.4*np.sin(t), 0.4*np.cos(t)])\n self.output_dict[\"rpyd_body\"] = np.array([0.0, 0.4*np.cos(t), -0.4*np.sin(t)])\n self.output_dict[\"rpydd_body\"] = np.array([0.0, -0.4*np.sin(t), -0.4*np.cos(t)])\n\n def RaiseFoot(self, t):\n \"\"\"\n Modify the simple standing output values to lift one foot\n off the ground.\n \"\"\"\n self.SimpleStanding()\n self.output_dict[\"p_body\"] += np.array([-0.1, 0.05, 0.0])\n\n if t>1:\n self.output_dict[\"contact_states\"] = [True,False,True,True]\n self.output_dict[\"p_rf\"] += np.array([ 0.0, 0.0, 0.1])\n\n def EdgeTest(self):\n \"\"\"\n Move the trunk right to the edge of feasibility, ensuring that\n friction constraints become active (may require a smaller timestep)\n \"\"\"\n self.SimpleStanding()\n self.output_dict[\"p_body\"] += np.array([-0.1, 0.63, 0.0])\n\n def SetTrunkOutputs(self, context, output):\n self.output_dict = output.get_mutable_value()\n\n #self.SimpleStanding()\n #self.output_dict[\"p_body\"] += np.array([0,0,0.05])\n self.OrientationTest(context.get_time())\n #self.EdgeTest()\n #self.RaiseFoot(context.get_time())\n\n def SetGeometryOutputs(self, context, output):\n fpv = output.get_mutable_value()\n fpv.clear()\n \n X_trunk = RigidTransform()\n X_trunk.set_rotation(RollPitchYaw(self.output_dict[\"rpy_body\"]))\n X_trunk.set_translation(self.output_dict[\"p_body\"])\n \n fpv.set_value(self.frame_ids[\"trunk\"], X_trunk)\n\n for foot in [\"lf\",\"rf\",\"lh\",\"rh\"]:\n X_foot = RigidTransform()\n X_foot.set_translation(self.output_dict[\"p_%s\" % foot])\n fpv.set_value(self.frame_ids[foot],X_foot)\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.zeros",
"numpy.cos"
]
] |
z0li627/lambdata_DS9
|
[
"e122f66c1da19d8b768a0719007ce060831db70e"
] |
[
"lambdata_z0li627/__init__.py"
] |
[
"\"\"\"lambdata - a collection os data science heper functions\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n#sample code\n\n#sample datasets\nONES = pd.DataFrame(np.ones(10))\nZEROS = pd.DataFrame(np.zeros(10))\n\n#sample functions\ndef increment(x):\n return (x + 1)\n\n"
] |
[
[
"numpy.ones",
"numpy.zeros"
]
] |
guyeshet/Keras-Project-Template
|
[
"4b324aea4a923ca0ceb1610487bf7139706fae33"
] |
[
"webserver/loader.py"
] |
[
"import os\nfrom collections import Counter\n\nimport numpy as np\nfrom keras.engine.saving import load_model\n\nfrom utils.utils import from_env, get_root\nfrom webserver.storage.factory import StorageFactory\n\nmodel = None\nMODEL_TYPE = from_env('MODEL_TYPE', 'uk_target_only_native_speakers')\nMODEL_NUM = from_env('MODEL_NUM', \"64812b64080b4668ac824c9ca75b6c04\")\n\n\ndef predict_class_audio(MFCCs):\n '''\n Predict class based on MFCC samples\n :param MFCCs: Numpy array of MFCCs\n :param model: Trained model\n :return: Predicted class of MFCC segment group\n '''\n global model\n MFCCs = MFCCs.reshape(MFCCs.shape[0], MFCCs.shape[1], MFCCs.shape[2], 1)\n y_predicted = model.predict_classes(MFCCs, verbose=0)\n return Counter(list(y_predicted)).most_common(1)[0][0]\n\n\ndef load(from_cloud=True):\n # The current served model based on the experiment type\n\n global model\n\n storage = StorageFactory.default()\n file_path = storage.load_model(MODEL_TYPE, MODEL_NUM)\n model = load_model(file_path)\n\n # BUG fix - initializing the model with an empty vector\n model.predict(np.zeros((1, 13, 30, 1)))\n\nload()"
] |
[
[
"numpy.zeros"
]
] |
morgoth1145/sandpiles
|
[
"d50327875c837b1af7668444cbe65df2ecc94719"
] |
[
"sandpile.py"
] |
[
"import enum\nimport os\nimport time\n\nimport numpy\nimport pyopencl as cl\nfrom pyopencl.tools import dtype_to_ctype\nfrom PIL import Image\n\nimport pyopencl.array\n\nclass SymmetryMode(enum.Enum):\n SYMMETRY_OFF = 0\n SYMMETRY_ON = 1\n SYMMETRY_ON_WITH_OVERLAP = 2\n\ndef isqrt(n):\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x\n\ndef _gen_macros(ref_data, symmetry_modes):\n yield 'ELEM_TYPE=%s' % dtype_to_ctype(ref_data.dtype)\n yield 'GRID_WIDTH=%d' % ref_data.shape[0]\n yield 'GRID_HEIGHT=%d' % ref_data.shape[1]\n yield 'X_SYMMETRY_MODE=%s' % symmetry_modes[0].name\n yield 'Y_SYMMETRY_MODE=%s' % symmetry_modes[1].name\n\nclass Sandpiles:\n def __init__(self):\n self._ctx = cl.create_some_context()\n self._queue = cl.CommandQueue(self._ctx)\n\n def create_sandpile(self, shape, symmetry_modes):\n data = pyopencl.array.zeros(self._queue,\n shape,\n numpy.uint8)\n\n return _Sandpile(self._ctx, self._queue, data, symmetry_modes)\n\n def try_load_sandpile(self, filename):\n if not filename.endswith('.npz'):\n filename += '.npz'\n\n if not os.path.exists(filename):\n return None\n\n n = numpy.load(filename)\n\n data = pyopencl.array.to_device(self._queue,\n n['array'])\n symmetry_modes = (\n SymmetryMode(n['symmetry_modes'][0]),\n SymmetryMode(n['symmetry_modes'][1])\n )\n\n return _Sandpile(self._ctx,\n self._queue,\n data,\n symmetry_modes)\n\n def scale_sandpile(self, sandpile, factor_x, factor_y):\n with open('resize_data.cl') as f:\n program = cl.Program(self._ctx, f.read())\n\n new_shape = (sandpile.data.shape[0]*factor_x,\n sandpile.data.shape[1]*factor_y)\n\n macros = list(_gen_macros(sandpile.data,\n sandpile.symmetry_modes))\n options = _macros_to_options(macros)\n program.build(options=options)\n\n dest = pyopencl.array.empty(self._queue,\n new_shape,\n numpy.uint8)\n\n program.scale_grid(self._queue,\n sandpile.data.shape,\n None,\n sandpile.data.base_data,\n dest.base_data,\n numpy.uint32(factor_x),\n numpy.uint32(factor_y))\n\n return _Sandpile(self._ctx, self._queue, dest, sandpile.symmetry_modes)\n\n def reshape_sandpile(self, sandpile, new_shape, offsets):\n assert(sandpile.data.shape[0]+offsets[0] <= new_shape[0])\n assert(sandpile.data.shape[1]+offsets[1] <= new_shape[1])\n\n with open('resize_data.cl') as f:\n program = cl.Program(self._ctx, f.read())\n\n macros = list(_gen_macros(sandpile.data,\n sandpile.symmetry_modes))\n options = _macros_to_options(macros)\n program.build(options=options)\n\n dest = pyopencl.array.zeros(self._queue,\n new_shape,\n numpy.uint8)\n\n program.reshape_grid(self._queue,\n sandpile.data.shape,\n None,\n sandpile.data.base_data,\n dest.base_data,\n numpy.uint32(new_shape[0]),\n numpy.uint32(offsets[0]),\n numpy.uint32(new_shape[1]),\n numpy.uint32(offsets[1]))\n\n return _Sandpile(self._ctx, self._queue, dest, sandpile.symmetry_modes)\n\ndef _macros_to_options(macros):\n return ['-D' + m for m in macros]\n\nclass _Sandpile:\n def __init__(self, ctx, queue, data, symmetry_modes):\n self._ctx = ctx\n self._queue = queue\n self.symmetry_modes = symmetry_modes\n\n self.data = data\n\n ctype = dtype_to_ctype(data.dtype)\n\n with open('sandpile.cl') as f:\n program = cl.Program(self._ctx, f.read())\n\n macros = _gen_macros(data, symmetry_modes)\n options = _macros_to_options(macros)\n self._program = program.build(options=options)\n\n from pyopencl.reduction import ReductionKernel\n self._diff_krnl = ReductionKernel(self._ctx,\n numpy.uint32,\n neutral='0',\n reduce_expr='a+b',\n map_expr='grid[i]!=new_grid[i]',\n arguments='const __global %s *grid, const __global %s *new_grid' % (ctype,\n ctype))\n\n def solve(self):\n start = time.perf_counter()\n\n run_iter_krnl = self._program.run_iteration\n\n iterations = 0\n adaptive_iterations = 1\n\n grid = self.data\n new_grid = pyopencl.array.empty(self._queue,\n grid.shape,\n grid.dtype)\n\n run_iter_krnl(self._queue,\n grid.shape,\n None,\n grid.base_data,\n new_grid.base_data)\n grid, new_grid = new_grid, grid\n iterations += 1\n\n while True:\n diff_evnt = self._diff_krnl(grid,\n new_grid,\n queue=self._queue)\n\n for _ in range(adaptive_iterations):\n iteration_event = run_iter_krnl(self._queue,\n grid.shape,\n None,\n grid.base_data,\n new_grid.base_data)\n grid, new_grid = new_grid, grid\n iterations += 1\n\n diff_count = diff_evnt.get()\n if 0 == diff_count:\n self.data = new_grid\n iteration_event.wait()\n return iterations, time.perf_counter()-start\n adaptive_iterations = isqrt(diff_count)\n\n def to_image(self, colors):\n return self._get_image_creator(colors).create_image(self.data)\n\n def save(self, filename):\n symmetry_modes = (\n self.symmetry_modes[0].name,\n self.symmetry_modes[1].name\n )\n numpy.savez_compressed(filename,\n a=self.data.get(),\n symmetry_modes=symmetry_modes)\n\n def _get_image_creator(self, colors):\n return _ImageCreator(self._ctx,\n self._queue,\n self.data,\n self.symmetry_modes,\n colors)\n\nclass _ImageCreator:\n def __init__(self, ctx, queue, ref_data, symmetry_modes, colors):\n self._ctx = ctx\n self._queue = queue\n self._shape = ref_data.shape\n\n if symmetry_modes[0] == SymmetryMode.SYMMETRY_OFF:\n image_width = ref_data.shape[0]\n elif symmetry_modes[0] == SymmetryMode.SYMMETRY_ON:\n image_width = 2*ref_data.shape[0]\n elif symmetry_modes[0] == SymmetryMode.SYMMETRY_ON_WITH_OVERLAP:\n image_width = 2*ref_data.shape[0]-1\n\n if symmetry_modes[1] == SymmetryMode.SYMMETRY_OFF:\n image_height = ref_data.shape[1]\n elif symmetry_modes[1] == SymmetryMode.SYMMETRY_ON:\n image_height = 2*ref_data.shape[1]\n elif symmetry_modes[1] == SymmetryMode.SYMMETRY_ON_WITH_OVERLAP:\n image_height = 2*ref_data.shape[1]-1\n\n red = [str(c[0]) for c in colors]\n green = [str(c[1]) for c in colors]\n blue = [str(c[2]) for c in colors]\n\n with open('to_image.cl') as f:\n program = f.read()\n\n macros = list(_gen_macros(ref_data, symmetry_modes))\n macros.append('COLOR_COUNT=%d' % len(colors))\n macros.append('RED_VALS=%s' % ', '.join(red))\n macros.append('GREEN_VALS=%s' % ', '.join(green))\n macros.append('BLUE_VALS=%s' % ', '.join(blue))\n macros.append('IMAGE_WIDTH=%d' % image_width)\n macros.append('IMAGE_HEIGHT=%s' % image_height)\n options = _macros_to_options(macros)\n\n self._program = cl.Program(self._ctx, program).build(options=options)\n\n self._to_image_krnl = self._program.to_image\n\n self._data = pyopencl.array.empty(self._queue,\n (image_width, image_height, 3),\n numpy.uint8)\n\n def create_image(self, data):\n shape = data.shape\n\n assert(self._shape == shape)\n\n self._to_image_krnl(self._queue,\n shape,\n None,\n data.base_data,\n self._data.base_data)\n\n return Image.fromarray(self._data.get(), 'RGB')\n"
] |
[
[
"numpy.uint32",
"numpy.load"
]
] |
hudmgy/HRNet-Facial-Landmark-Detection
|
[
"fe95d4b19e92fe267201d38648635b9beffba77a"
] |
[
"lib/datasets/face300wsd.py"
] |
[
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Created by Tianheng Cheng([email protected]), Yang Zhao\n# ------------------------------------------------------------------------------\n\nimport os\nimport random\n\nimport torch\nimport torch.utils.data as data\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\n\nfrom ..utils.transforms import fliplr_joints, crop, generate_target, transform_pixel\n\n\nclass Face300WSD(data.Dataset):\n\n def __init__(self, cfg, is_train=True, transform=None):\n # specify annotation file for dataset\n if is_train:\n self.csv_file = cfg.DATASET.TRAINSET\n else:\n self.csv_file = cfg.DATASET.TESTSET\n\n self.is_train = is_train\n self.transform = transform\n self.data_root = cfg.DATASET.ROOT\n self.input_size = cfg.MODEL.IMAGE_SIZE\n self.output_size = cfg.MODEL.HEATMAP_SIZE\n self.sigma = cfg.MODEL.SIGMA\n self.scale_factor = cfg.DATASET.SCALE_FACTOR\n self.rot_factor = cfg.DATASET.ROT_FACTOR\n self.label_type = cfg.MODEL.TARGET_TYPE\n self.flip = cfg.DATASET.FLIP\n\n # load annotations\n self.landmarks_frame = pd.read_csv(self.csv_file)\n\n def __len__(self):\n return len(self.landmarks_frame)\n\n def __getitem__(self, idx):\n\n fname = self.landmarks_frame.iloc[idx, 0]\n image_path = os.path.join(self.data_root, fname)\n scale = self.landmarks_frame.iloc[idx, 1]\n\n center_w = self.landmarks_frame.iloc[idx, 2]\n center_h = self.landmarks_frame.iloc[idx, 3]\n center = torch.Tensor([center_w, center_h])\n\n pts = self.landmarks_frame.iloc[idx, 4:].values\n pts = pts.astype('float').reshape(-1, 2)\n img = np.array(Image.open(image_path).convert('RGB'), dtype=np.float32)\n tpts = torch.Tensor(pts)\n center = torch.Tensor(center)\n\n meta = {'index': idx, 'center': center, 'scale': scale,\n 'pts': torch.Tensor(pts), 'tpts': tpts}\n\n return img, fname, meta\n\nif __name__ == '__main__':\n pass\n"
] |
[
[
"pandas.read_csv",
"torch.Tensor"
]
] |
Toyhom/models
|
[
"54f64c665e2f737a91c39f42e973fa121b80d8b4"
] |
[
"dygraph/ptb_lm/ptb_dy.py"
] |
[
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport unittest\r\nimport paddle.fluid as fluid\r\nimport paddle.fluid.core as core\r\nfrom paddle.fluid.dygraph.nn import Embedding\r\nimport paddle.fluid.framework as framework\r\nfrom paddle.fluid.optimizer import SGDOptimizer\r\nfrom paddle.fluid.dygraph.base import to_variable\r\nimport numpy as np\r\nimport six\r\n\r\nimport reader\r\nimport model_check\r\nimport time\r\n\r\nfrom args import *\r\n\r\n#import fluid.dygraph_grad_clip as dygraph_clip\r\n#from fluid.dygraph_grad_clip import *\r\n\r\nimport sys\r\nif sys.version[0] == '2':\r\n reload(sys)\r\n sys.setdefaultencoding(\"utf-8\")\r\n\r\n\r\nclass SimpleLSTMRNN(fluid.Layer):\r\n def __init__(self,\r\n name_scope,\r\n hidden_size,\r\n num_steps,\r\n num_layers=2,\r\n init_scale=0.1,\r\n dropout=None):\r\n super(SimpleLSTMRNN, self).__init__(name_scope)\r\n self._hidden_size = hidden_size\r\n self._num_layers = num_layers\r\n self._init_scale = init_scale\r\n self._dropout = dropout\r\n self._num_steps = num_steps\r\n self.cell_array = []\r\n self.hidden_array = []\r\n\r\n self.weight_1_arr = []\r\n self.weight_2_arr = []\r\n self.bias_arr = []\r\n self.mask_array = []\r\n\r\n for i in range(self._num_layers):\r\n weight_1 = self.create_parameter(\r\n attr=fluid.ParamAttr(\r\n initializer=fluid.initializer.UniformInitializer(\r\n low=-self._init_scale, high=self._init_scale)),\r\n shape=[self._hidden_size * 2, self._hidden_size * 4],\r\n dtype=\"float32\",\r\n default_initializer=fluid.initializer.UniformInitializer(\r\n low=-self._init_scale, high=self._init_scale))\r\n self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))\r\n bias_1 = self.create_parameter(\r\n attr=fluid.ParamAttr(\r\n initializer=fluid.initializer.UniformInitializer(\r\n low=-self._init_scale, high=self._init_scale)),\r\n shape=[self._hidden_size * 4],\r\n dtype=\"float32\",\r\n default_initializer=fluid.initializer.Constant(0.0))\r\n self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))\r\n\r\n def forward(self, input_embedding, init_hidden=None, init_cell=None):\r\n cell_array = []\r\n hidden_array = []\r\n\r\n for i in range(self._num_layers):\r\n hidden_array.append(init_hidden[i])\r\n cell_array.append(init_cell[i])\r\n\r\n res = []\r\n for index in range(self._num_steps):\r\n step_input = input_embedding[:,index,:]\r\n for k in range(self._num_layers):\r\n pre_hidden = hidden_array[k]\r\n pre_cell = cell_array[k]\r\n weight_1 = self.weight_1_arr[k]\r\n bias = self.bias_arr[k]\r\n\r\n nn = fluid.layers.concat([step_input, pre_hidden], 1)\r\n gate_input = fluid.layers.matmul(x=nn, y=weight_1)\r\n\r\n gate_input = fluid.layers.elementwise_add(gate_input, bias)\r\n i, j, f, o = fluid.layers.split(\r\n gate_input, num_or_sections=4, dim=-1)\r\n c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(\r\n i) * fluid.layers.tanh(j)\r\n m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)\r\n hidden_array[k] = m\r\n cell_array[k] = c\r\n step_input = m\r\n\r\n if self._dropout is not None and self._dropout > 0.0:\r\n step_input = fluid.layers.dropout(\r\n step_input,\r\n dropout_prob=self._dropout,\r\n dropout_implementation='upscale_in_train')\r\n res.append(step_input)\r\n real_res = fluid.layers.concat(res, 1)\r\n real_res = fluid.layers.reshape(real_res, [ -1, self._num_steps, self._hidden_size])\r\n last_hidden = fluid.layers.concat(hidden_array, 1)\r\n last_hidden = fluid.layers.reshape(\r\n last_hidden, shape=[-1, self._num_layers, self._hidden_size])\r\n last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])\r\n last_cell = fluid.layers.concat(cell_array, 1)\r\n last_cell = fluid.layers.reshape(\r\n last_cell, shape=[-1, self._num_layers, self._hidden_size])\r\n last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])\r\n return real_res, last_hidden, last_cell\r\n\r\n\r\nclass PtbModel(fluid.Layer):\r\n def __init__(self,\r\n name_scope,\r\n hidden_size,\r\n vocab_size,\r\n num_layers=2,\r\n num_steps=20,\r\n init_scale=0.1,\r\n dropout=None):\r\n super(PtbModel, self).__init__(name_scope)\r\n self.hidden_size = hidden_size\r\n self.vocab_size = vocab_size\r\n self.init_scale = init_scale\r\n self.num_layers = num_layers\r\n self.num_steps = num_steps\r\n self.dropout = dropout\r\n self.simple_lstm_rnn = SimpleLSTMRNN(\r\n self.full_name(),\r\n hidden_size,\r\n num_steps,\r\n num_layers=num_layers,\r\n init_scale=init_scale,\r\n dropout=dropout)\r\n self.embedding = Embedding(\r\n self.full_name(),\r\n size=[vocab_size, hidden_size],\r\n dtype='float32',\r\n is_sparse=False,\r\n param_attr=fluid.ParamAttr(\r\n name='embedding_para',\r\n initializer=fluid.initializer.UniformInitializer(\r\n low=-init_scale, high=init_scale)))\r\n self.softmax_weight = self.create_parameter(\r\n attr=fluid.ParamAttr(),\r\n shape=[self.hidden_size, self.vocab_size],\r\n dtype=\"float32\",\r\n default_initializer=fluid.initializer.UniformInitializer(\r\n low=-self.init_scale, high=self.init_scale))\r\n self.softmax_bias = self.create_parameter(\r\n attr=fluid.ParamAttr(),\r\n shape=[self.vocab_size],\r\n dtype=\"float32\",\r\n default_initializer=fluid.initializer.UniformInitializer(\r\n low=-self.init_scale, high=self.init_scale))\r\n\r\n def build_once(self, input, label, init_hidden, init_cell):\r\n pass\r\n\r\n def forward(self, input, label, init_hidden, init_cell):\r\n\r\n init_h = fluid.layers.reshape(\r\n init_hidden, shape=[self.num_layers, -1, self.hidden_size])\r\n\r\n init_c = fluid.layers.reshape(\r\n init_cell, shape=[self.num_layers, -1, self.hidden_size])\r\n\r\n x_emb = self.embedding(input)\r\n\r\n x_emb = fluid.layers.reshape(\r\n x_emb, shape=[-1, self.num_steps, self.hidden_size])\r\n if self.dropout is not None and self.dropout > 0.0:\r\n x_emb = fluid.layers.dropout(\r\n x_emb,\r\n dropout_prob=self.dropout,\r\n dropout_implementation='upscale_in_train')\r\n rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,\r\n init_c)\r\n\r\n projection = fluid.layers.matmul(rnn_out, self.softmax_weight)\r\n projection = fluid.layers.elementwise_add(projection, self.softmax_bias)\r\n\r\n loss = fluid.layers.softmax_with_cross_entropy(\r\n logits=projection, label=label, soft_label=False)\r\n loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])\r\n loss = fluid.layers.reduce_mean(loss, dim=[0])\r\n loss = fluid.layers.reduce_sum(loss)\r\n\r\n return loss, last_hidden, last_cell\r\n\r\n def debug_emb(self):\r\n\r\n np.save(\"emb_grad\", self.x_emb.gradient())\r\n\r\n\r\ndef train_ptb_lm():\r\n args = parse_args()\r\n\r\n # check if set use_gpu=True in paddlepaddle cpu version\r\n model_check.check_cuda(args.use_gpu)\r\n # check if paddlepaddle version is satisfied\r\n model_check.check_version()\r\n\r\n model_type = args.model_type\r\n\r\n vocab_size = 10000\r\n if model_type == \"test\":\r\n num_layers = 1\r\n batch_size = 2\r\n hidden_size = 10\r\n num_steps = 3\r\n init_scale = 0.1\r\n max_grad_norm = 5.0\r\n epoch_start_decay = 1\r\n max_epoch = 1\r\n dropout = 0.0\r\n lr_decay = 0.5\r\n base_learning_rate = 1.0\r\n elif model_type == \"small\":\r\n num_layers = 2\r\n batch_size = 20\r\n hidden_size = 200\r\n num_steps = 20\r\n init_scale = 0.1\r\n max_grad_norm = 5.0\r\n epoch_start_decay = 4\r\n max_epoch = 13\r\n dropout = 0.0\r\n lr_decay = 0.5\r\n base_learning_rate = 1.0\r\n elif model_type == \"medium\":\r\n num_layers = 2\r\n batch_size = 20\r\n hidden_size = 650\r\n num_steps = 35\r\n init_scale = 0.05\r\n max_grad_norm = 5.0\r\n epoch_start_decay = 6\r\n max_epoch = 39\r\n dropout = 0.5\r\n lr_decay = 0.8\r\n base_learning_rate = 1.0\r\n elif model_type == \"large\":\r\n num_layers = 2\r\n batch_size = 20\r\n hidden_size = 1500\r\n num_steps = 35\r\n init_scale = 0.04\r\n max_grad_norm = 10.0\r\n epoch_start_decay = 14\r\n max_epoch = 55\r\n dropout = 0.65\r\n lr_decay = 1.0 / 1.15\r\n base_learning_rate = 1.0\r\n else:\r\n print(\"model type not support\")\r\n return\r\n\r\n with fluid.dygraph.guard(core.CUDAPlace(0)):\r\n if args.ce:\r\n print(\"ce mode\")\r\n seed = 33\r\n np.random.seed(seed)\r\n fluid.default_startup_program().random_seed = seed\r\n fluid.default_main_program().random_seed = seed\r\n max_epoch = 1\r\n ptb_model = PtbModel(\r\n \"ptb_model\",\r\n hidden_size=hidden_size,\r\n vocab_size=vocab_size,\r\n num_layers=num_layers,\r\n num_steps=num_steps,\r\n init_scale=init_scale,\r\n dropout=dropout)\r\n\r\n if args.init_from_pretrain_model:\r\n if not os.path.exists(args.init_from_pretrain_model + '.pdparams'):\r\n print(args.init_from_pretrain_model)\r\n raise Warning(\"The pretrained params do not exist.\")\r\n return\r\n fluid.load_dygraph(args.init_from_pretrain_model)\r\n print(\"finish initing model from pretrained params from %s\" %\r\n (args.init_from_pretrain_model))\r\n\r\n dy_param_updated = dict()\r\n dy_param_init = dict()\r\n dy_loss = None\r\n last_hidden = None\r\n last_cell = None\r\n\r\n data_path = args.data_path\r\n print(\"begin to load data\")\r\n ptb_data = reader.get_ptb_data(data_path)\r\n print(\"finished load data\")\r\n train_data, valid_data, test_data = ptb_data\r\n\r\n batch_len = len(train_data) // batch_size\r\n total_batch_size = (batch_len - 1) // num_steps\r\n log_interval = 200\r\n\r\n bd = []\r\n lr_arr = [1.0]\r\n for i in range(1, max_epoch):\r\n bd.append(total_batch_size * i)\r\n new_lr = base_learning_rate * (lr_decay**\r\n max(i + 1 - epoch_start_decay, 0.0))\r\n lr_arr.append(new_lr)\r\n\r\n sgd = SGDOptimizer(learning_rate=fluid.layers.piecewise_decay(\r\n boundaries=bd, values=lr_arr))\r\n\r\n def eval(model, data):\r\n print(\"begion to eval\")\r\n total_loss = 0.0\r\n iters = 0.0\r\n init_hidden_data = np.zeros(\r\n (num_layers, batch_size, hidden_size), dtype='float32')\r\n init_cell_data = np.zeros(\r\n (num_layers, batch_size, hidden_size), dtype='float32')\r\n\r\n model.eval()\r\n train_data_iter = reader.get_data_iter(data, batch_size, num_steps)\r\n for batch_id, batch in enumerate(train_data_iter):\r\n x_data, y_data = batch\r\n x_data = x_data.reshape((-1, num_steps))\r\n y_data = y_data.reshape((-1, 1))\r\n x = to_variable(x_data)\r\n y = to_variable(y_data)\r\n init_hidden = to_variable(init_hidden_data)\r\n init_cell = to_variable(init_cell_data)\r\n dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,\r\n init_cell)\r\n\r\n out_loss = dy_loss.numpy()\r\n\r\n init_hidden_data = last_hidden.numpy()\r\n init_cell_data = last_cell.numpy()\r\n\r\n total_loss += out_loss\r\n iters += num_steps\r\n\r\n print(\"eval finished\")\r\n ppl = np.exp(total_loss / iters)\r\n print(\"ppl \", batch_id, ppl[0])\r\n if args.ce:\r\n print(\"kpis\\ttest_ppl\\t%0.3f\" % ppl[0])\r\n\r\n grad_clip = fluid.dygraph_grad_clip.GradClipByGlobalNorm(max_grad_norm)\r\n for epoch_id in range(max_epoch):\r\n ptb_model.train()\r\n total_loss = 0.0\r\n iters = 0.0\r\n init_hidden_data = np.zeros(\r\n (num_layers, batch_size, hidden_size), dtype='float32')\r\n init_cell_data = np.zeros(\r\n (num_layers, batch_size, hidden_size), dtype='float32')\r\n\r\n train_data_iter = reader.get_data_iter(train_data, batch_size,\r\n num_steps)\r\n init_hidden = to_variable(init_hidden_data)\r\n init_cell = to_variable(init_cell_data)\r\n start_time = time.time()\r\n for batch_id, batch in enumerate(train_data_iter):\r\n x_data, y_data = batch\r\n\r\n x_data = x_data.reshape((-1, num_steps, 1))\r\n y_data = y_data.reshape((-1, num_steps, 1))\r\n\r\n x = to_variable(x_data)\r\n y = to_variable(y_data)\r\n\r\n dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,\r\n init_cell)\r\n init_hidden = last_hidden\r\n init_cell = last_cell\r\n out_loss = dy_loss.numpy()\r\n\r\n dy_loss.backward()\r\n sgd.minimize(dy_loss, grad_clip=grad_clip)\r\n\r\n ptb_model.clear_gradients()\r\n total_loss += out_loss\r\n iters += num_steps\r\n\r\n if batch_id > 0 and batch_id % log_interval == 0:\r\n ppl = np.exp(total_loss / iters)\r\n print(\"-- Epoch:[%d]; Batch:[%d]; ppl: %.5f, lr: %.5f, loss: %.5f\" %\r\n (epoch_id, batch_id, ppl[0],\r\n sgd._global_learning_rate().numpy(), out_loss))\r\n\r\n print(\"one ecpoh finished\", epoch_id)\r\n print(\"time cost \", time.time() - start_time)\r\n ppl = np.exp(total_loss / iters)\r\n print(\"-- Epoch:[%d]; ppl: %.5f\" % (epoch_id, ppl[0]))\r\n if args.ce:\r\n print(\"kpis\\ttrain_ppl\\t%0.3f\" % ppl[0])\r\n save_model_dir = os.path.join(args.save_model_dir,\r\n str(epoch_id), 'params')\r\n fluid.save_dygraph(ptb_model.state_dict(), save_model_dir)\r\n print(\"Saved model to: %s.\\n\" % save_model_dir)\r\n\r\n eval(ptb_model, test_data)\r\n\r\n\r\ntrain_ptb_lm()\r\n"
] |
[
[
"numpy.random.seed",
"numpy.exp",
"numpy.zeros"
]
] |
nodmaterial/OpenVaccine
|
[
"7961d43bb2f7c5ebc35548511608cc98f9948a01"
] |
[
"covid-19_inference.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport time\nimport warnings\nwarnings.filterwarnings('ignore')\nimport pandas as pd, numpy as np\nimport math, json, gc, random, os, sys\nimport torch\nimport logging\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom sklearn.model_selection import train_test_split\nfrom catalyst.dl import SupervisedRunner\nfrom catalyst.contrib.dl.callbacks import WandbLogger\nfrom contextlib import contextmanager\nfrom catalyst.dl.callbacks import AccuracyCallback, F1ScoreCallback, OptimizerCallback\n#from pytorch_memlab import profile, MemReporter\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\n# In[2]:\n\n\ndef set_seed(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed) # type: ignore\n torch.backends.cudnn.deterministic = True # type: ignore\n torch.backends.cudnn.benchmark = True # type: ignore\n\n\n# In[3]:\n\n\nset_seed(2020)\n\n\n# In[4]:\n\n\ntest = pd.read_json('/kaggle/input/stanford-covid-vaccine/test.json', lines=True)\nsamplesub= pd.read_csv('/kaggle/input/stanford-covid-vaccine/sample_submission.csv')\n\n\n# In[5]:\n\n\nbpp_max=[]\nbpp_mean =[]\n\nid = test.id.values\nfor i in id:\n probability = np.load('../input/stanford-covid-vaccine'+'/bpps/%s.npy'%i)\n bpp_max.append(probability.max(-1).tolist())\n bpp_mean.append(probability.mean(-1).tolist())\ntest['bpp_max']=bpp_max\ntest['bpp_mean']=bpp_mean\n\n\n# In[6]:\n\n\ntest_public=test[test['seq_length']==107]\ntest_private=test[test['seq_length']==130]\n\n\n# In[7]:\n\n\ntest_public_x=test_public.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]\ntest_private_x=test_private.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]\n#CUDAに乗らないので、privateデータのサイズを小さくする。\ntest_private_x1,test_private_x2=train_test_split(test_private_x,test_size=0.5)\n\n\n# In[8]:\n\n\ntoken2int = {x:i for i, x in enumerate('().ACGUBEHIMSX')}\ndef preprocess_inputs_public(df, cols=['sequence', 'structure', 'predicted_loop_type']):\n base_fea= np.transpose(\n np.array(\n df[cols]\n .applymap(lambda seq: [token2int[x] for x in seq])\n .values\n .tolist()\n ),\n (0, 2, 1)\n )\n bpps_max_fea = np.array(test_public_x['bpp_max'].to_list())[:,:,np.newaxis]\n bpps_mean_fea = np.array(test_public_x['bpp_mean'].to_list())[:,:,np.newaxis]\n return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)\n\ndef preprocess_inputs_private1(df, cols=['sequence', 'structure', 'predicted_loop_type']):\n base_fea= np.transpose(\n np.array(\n df[cols]\n .applymap(lambda seq: [token2int[x] for x in seq])\n .values\n .tolist()\n ),\n (0, 2, 1)\n )\n bpps_max_fea = np.array(test_private_x1['bpp_max'].to_list())[:,:,np.newaxis]\n bpps_mean_fea = np.array(test_private_x1['bpp_mean'].to_list())[:,:,np.newaxis]\n return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)\n\ndef preprocess_inputs_private2(df, cols=['sequence', 'structure', 'predicted_loop_type']):\n base_fea= np.transpose(\n np.array(\n df[cols]\n .applymap(lambda seq: [token2int[x] for x in seq])\n .values\n .tolist()\n ),\n (0, 2, 1)\n )\n bpps_max_fea = np.array(test_private_x2['bpp_max'].to_list())[:,:,np.newaxis]\n bpps_mean_fea = np.array(test_private_x2['bpp_mean'].to_list())[:,:,np.newaxis]\n return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)\n\n\n# In[9]:\n\n\ntest_public_inputs = torch.from_numpy(preprocess_inputs_public(test_public_x)).to(device).float()\ntest_private_inputs1 = torch.from_numpy(preprocess_inputs_private1(test_private_x1)).to(device).float()\ntest_private_inputs2 = torch.from_numpy(preprocess_inputs_private2(test_private_x2)).to(device).float()\n\n\n# In[10]:\n\n\n#print('train_入力:{}\\nvalue_入力:{}\\ntrain_ラベル:{}\\nvalue_ラベル:{}'.format(train_inputs.shape,val_inputs.shape,train_outputs.shape,val_outputs.shape))\n\n\n# In[11]:\n\n\nclass LSTM_model(nn.Module):\n def __init__(\n self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2\n ):\n super(LSTM_model, self).__init__()\n self.pred_len = pred_len\n\n self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)\n self.lstm = nn.LSTM(\n input_size=embed_dim * 3+2,\n hidden_size=hidden_dim,\n num_layers=hidden_layers,\n dropout=dropout,\n bidirectional=True,\n batch_first=True,\n )\n self.linear = nn.Linear(hidden_dim * 2, 5)\n \n\n def forward(self, seqs):\n embed = self.embeding(seqs[:,:,0:3].long())\n reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))\n reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)\n output, hidden = self.lstm(reshaped)\n truncated = output[:, : self.pred_len, :]\n out = self.linear(truncated)\n return out\n\n\n# In[12]:\n\n\nclass GRU_model(nn.Module):\n def __init__(\n self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2\n ):\n super(GRU_model, self).__init__()\n self.pred_len = pred_len\n\n self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)\n self.gru = nn.GRU(\n input_size=embed_dim * 3+2,\n hidden_size=hidden_dim,\n num_layers=hidden_layers,\n dropout=dropout,\n bidirectional=True,\n batch_first=True,\n )\n self.linear = nn.Linear(hidden_dim * 2, 5)\n\n def forward(self, seqs):\n embed = self.embeding(seqs[:,:,0:3].long())\n reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))\n reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)\n output, hidden = self.gru(reshaped)\n truncated = output[:, : self.pred_len, :]\n out = self.linear(truncated)\n return out\n\n\n# In[13]:\n\n\nLSTM_weights_path='../input/weight11/LSTM_ver20.pth'\n\ndef get_LSTM_model(seq_len=107, pred_len=68):\n model = LSTM_model(seq_len=seq_len, pred_len=pred_len)\n checkpoint = torch.load(LSTM_weights_path)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n device = torch.device(\"cuda\")\n model.to(device)\n model.eval()\n return model\n\n\n# In[14]:\n\n\nGRU_weights_path='../input/weight11/GRU_ver8'\n\ndef get_GRU_model(seq_len=107, pred_len=68):\n model = GRU_model(seq_len=seq_len, pred_len=pred_len)\n checkpoint = torch.load(GRU_weights_path)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n device = torch.device(\"cuda\")\n model.to(device)\n model.eval()\n return model\n\n\n# In[15]:\n\n\nwith torch.no_grad():\n model =get_LSTM_model()\n prediction=model(test_public_inputs)\n result_public_LSTM=prediction.to('cpu').detach().numpy().copy()\ndel prediction\n\nwith torch.no_grad():\n model =get_LSTM_model(seq_len=130, pred_len=91)\n prediction=model(test_private_inputs1)\n result_private1_LSTM=prediction.to('cpu').detach().numpy().copy()\ndel prediction\n\nwith torch.no_grad():\n model =get_LSTM_model(seq_len=130, pred_len=91)\n prediction=model(test_private_inputs2)\n result_private2_LSTM=prediction.to('cpu').detach().numpy().copy()\ndel prediction\n\n\n# In[16]:\n\n\nwith torch.no_grad():\n model =get_GRU_model()\n prediction=model(test_public_inputs)\n result_public_GRU=prediction.to('cpu').detach().numpy().copy()\ndel prediction\n\nwith torch.no_grad():\n model =get_GRU_model(seq_len=130, pred_len=91)\n prediction=model(test_private_inputs1)\n result_private1_GRU=prediction.to('cpu').detach().numpy().copy()\ndel prediction\n\nwith torch.no_grad():\n model =get_GRU_model(seq_len=130, pred_len=91)\n prediction=model(test_private_inputs2)\n result_private2_GRU=prediction.to('cpu').detach().numpy().copy()\ndel prediction\n\n\n# In[17]:\n\n\ndf0 = pd.DataFrame(index=range(39), columns=['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',])\ndf0=df0.fillna(0)\n\n\n# In[18]:\n\n\ntest_public_id=test_public['id']\nidlist_public=test_public_id.values.tolist()\n\n\n# In[19]:\n\n\ntest_private_id1=test_private_x1['id']\nidlist_private1=test_private_id1.values.tolist()\nidlist_private1[-5:]\n\n\n# In[20]:\n\n\ntest_private_id2=test_private_x2['id']\nidlist_private2=test_private_id2.values.tolist()\nidlist_private2[:5]\n\n\n# In[21]:\n\n\n#無理やりソートすることに\ntestindex=samplesub.loc[:,['id_seqpos']]\ntestindex=testindex.reset_index()\n\n\n# In[22]:\n\n\ndf1 = pd.DataFrame(result_public_LSTM[0])\ndf1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\ndf1.insert(0, 'id_seqpos', 0)\ndf1=pd.concat([df1,df0])\nid=idlist_public[0]\nfor i in range(len(df1)):\n df1.iloc[i,0]=id+'_{}'.format(i)\nfor j in range (len(result_public_LSTM)-1):\n id = idlist_public[j+1]\n df2 = pd.DataFrame(result_public_LSTM[j+1])\n df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\n df2.insert(0, 'id_seqpos', 0)\n df2=pd.concat([df2,df0]) \n for i in range(len(df2)):\n df2.iloc[i,0]=id+'_{}'.format(i)\n df1=pd.concat([df1,df2])\npublic_dataframe=df1\n\ndf1 = pd.DataFrame(result_private1_LSTM[0])\ndf1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\ndf1.insert(0, 'id_seqpos', 0)\ndf1=pd.concat([df1,df0])\nid=idlist_private1[0]\nfor i in range(len(df1)):\n df1.iloc[i,0]=id+'_{}'.format(i)\nfor j in range (len(result_private1_LSTM)-1):\n id = idlist_private1[j+1]\n df2 = pd.DataFrame(result_private1_LSTM[j+1])\n df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\n df2.insert(0, 'id_seqpos', 0)\n df2=pd.concat([df2,df0])\n for i in range(len(df2)):\n df2.iloc[i,0]=id+'_{}'.format(i)\n df1=pd.concat([df1,df2])\nprivate_dataframe1=df1\n\ndf1 = pd.DataFrame(result_private2_LSTM[0])\ndf1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\ndf1.insert(0, 'id_seqpos', 0)\ndf1=pd.concat([df1,df0])\nid=idlist_private2[0]\nfor i in range(len(df1)):\n df1.iloc[i,0]=id+'_{}'.format(i)\nfor j in range (len(result_private2_LSTM)-1):\n id = idlist_private2[j+1]\n df2 = pd.DataFrame(result_private2_LSTM[j+1])\n df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\n df2.insert(0, 'id_seqpos', 0)\n df2=pd.concat([df2,df0])\n for i in range(len(df2)):\n df2.iloc[i,0]=id+'_{}'.format(i)\n df1=pd.concat([df1,df2])\nprivate_dataframe2=df1\n\n\n# In[23]:\n\n\nmerged_dataframe=pd.concat([public_dataframe,private_dataframe1,private_dataframe2])\n\npre_submission_LSTM=pd.merge(testindex,merged_dataframe)\n\n\n# In[24]:\n\n\npre_submission_LSTM\n\n\n# In[25]:\n\n\ndf1 = pd.DataFrame(result_public_GRU[0])\ndf1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\ndf1.insert(0, 'id_seqpos', 0)\ndf1=pd.concat([df1,df0])\nid=idlist_public[0]\nfor i in range(len(df1)):\n df1.iloc[i,0]=id+'_{}'.format(i)\nfor j in range (len(result_public_GRU)-1):\n id = idlist_public[j+1]\n df2 = pd.DataFrame(result_public_GRU[j+1])\n df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\n df2.insert(0, 'id_seqpos', 0)\n df2=pd.concat([df2,df0]) \n for i in range(len(df2)):\n df2.iloc[i,0]=id+'_{}'.format(i)\n df1=pd.concat([df1,df2])\npublic_dataframe=df1\n\ndf1 = pd.DataFrame(result_private1_GRU[0])\ndf1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\ndf1.insert(0, 'id_seqpos', 0)\ndf1=pd.concat([df1,df0])\nid=idlist_private1[0]\nfor i in range(len(df1)):\n df1.iloc[i,0]=id+'_{}'.format(i)\nfor j in range (len(result_private1_GRU)-1):\n id = idlist_private1[j+1]\n df2 = pd.DataFrame(result_private1_GRU[j+1])\n df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\n df2.insert(0, 'id_seqpos', 0)\n df2=pd.concat([df2,df0])\n for i in range(len(df2)):\n df2.iloc[i,0]=id+'_{}'.format(i)\n df1=pd.concat([df1,df2])\nprivate_dataframe1=df1\n\ndf1 = pd.DataFrame(result_private2_GRU[0])\ndf1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\ndf1.insert(0, 'id_seqpos', 0)\ndf1=pd.concat([df1,df0])\nid=idlist_private2[0]\nfor i in range(len(df1)):\n df1.iloc[i,0]=id+'_{}'.format(i)\nfor j in range (len(result_private2_GRU)-1):\n id = idlist_private2[j+1]\n df2 = pd.DataFrame(result_private2_GRU[j+1])\n df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]\n df2.insert(0, 'id_seqpos', 0)\n df2=pd.concat([df2,df0])\n for i in range(len(df2)):\n df2.iloc[i,0]=id+'_{}'.format(i)\n df1=pd.concat([df1,df2])\nprivate_dataframe2=df1\n\n\n# In[26]:\n\n\nmerged_dataframe=pd.concat([public_dataframe,private_dataframe1,private_dataframe2])\n\npre_submission_GRU=pd.merge(testindex,merged_dataframe)\n\n\n# In[27]:\n\n\nblend_preds_df = pd.DataFrame()\nblend_preds_df['id_seqpos']=pre_submission_GRU['id_seqpos']\nblend_preds_df['reactivity'] = .5*pre_submission_GRU['reactivity'] + .5*pre_submission_LSTM['reactivity']\nblend_preds_df['deg_Mg_pH10'] = .5*pre_submission_GRU['deg_Mg_pH10'] + .5*pre_submission_LSTM['deg_Mg_pH10']\nblend_preds_df['deg_pH10'] = .5*pre_submission_GRU['deg_pH10'] + .5*pre_submission_LSTM['deg_pH10']\nblend_preds_df['deg_Mg_50C'] = .5*pre_submission_GRU['deg_Mg_50C'] + .5*pre_submission_LSTM['deg_Mg_50C']\nblend_preds_df['deg_50C'] = .5*pre_submission_GRU['deg_50C'] + .5*pre_submission_LSTM['deg_50C']\nblend_preds_df\n\n\n# In[28]:\n\n\nblend_preds_df.to_csv(\"submission.csv\", index=False)\n\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.cuda.manual_seed",
"torch.nn.LSTM",
"torch.nn.GRU",
"numpy.load",
"torch.cuda.is_available",
"torch.load",
"pandas.concat",
"pandas.read_csv",
"torch.reshape",
"numpy.concatenate",
"pandas.merge",
"pandas.DataFrame",
"torch.manual_seed",
"torch.device",
"pandas.read_json",
"sklearn.model_selection.train_test_split",
"numpy.random.seed",
"torch.no_grad"
]
] |
ada-shen/Interpret_quality
|
[
"e58d8e24a44005bde1eadbf8ef34c715d02a19cf"
] |
[
"final_shapley_value.py"
] |
[
"\"\"\" save the region id and sampled orders\r\nalso calculate the Shapley value for each region (at the original position of the point cloud) \"\"\"\r\nimport os\r\n\r\nimport argparse\r\nimport torch\r\nfrom final_data_shapley import ModelNet_Loader_Shapley_test, ShapeNetDataset_Shapley_test\r\nimport numpy as np\r\nfrom torch.utils.data import DataLoader\r\nfrom tools.final_util import IOStream,set_random, set_model_args, load_model, get_folder_name_list, square_distance, mkdir\r\nfrom tools.final_util import NUM_POINTS, NUM_REGIONS, NUM_SAMPLES_SAVE, SHAPENET_CLASS # constants\r\nfrom tools.final_common import cal_reward\r\n\r\ndef _init_(args):\r\n if not os.path.exists('checkpoints'):\r\n os.makedirs('checkpoints')\r\n if not os.path.exists(args.exp_folder):\r\n os.makedirs(args.exp_folder)\r\n\r\ndef cal_region_id(data, fps_index, result_path, save=True):\r\n \"\"\" calculate and save region id of all the points\r\n Input:\r\n data: (B,num_points,3) tensor, point cloud, num_points=1024\r\n fps_index: (num_regions,) ndarray, center idx of the 32 regions\r\n result_path: path to save file\r\n Return:\r\n region_id: (num_points,) ndarray, record each point belongs to which region\r\n \"\"\"\r\n data_fps = data[:, fps_index, :] # (B, num_regions, 3), centroids of each region\r\n distance = square_distance(data, data_fps) # (B, num_points, num_regions), here B=1\r\n region_id = torch.argmin(distance, dim=2) # (B, num_points), B=1\r\n region_id = region_id.squeeze().cpu().numpy() # (num_points,) ndarray\r\n if save:\r\n np.save(result_path + \"region_id.npy\", region_id) # (num_points,)\r\n return region_id\r\n\r\n\r\n\r\ndef cal_norm_factor(model, data, lbl, center, result_path, args, save=True):\r\n \"\"\" calculate v(N) - v(empty)\r\n Input:\r\n data: (B,num_points,3) tensor, point cloud (already transposed)\r\n lbl: (B,) tensor, label\r\n center: (3,) tensor, center of point cloud\r\n result_path: path to save\r\n Return:\r\n norm_factor: scalar, v(Omega) - v(empty)\r\n \"\"\"\r\n B = data.shape[0]\r\n empty = center.view(1, 1, 3).expand(B, args.num_points, 3).clone()\r\n v_N, _ = cal_reward(model, data, lbl, args)\r\n v_empty, _ = cal_reward(model, empty, lbl, args)\r\n norm_factor = (v_N - v_empty).cpu().item()\r\n if save:\r\n np.save(result_path + \"norm_factor.npy\", norm_factor)\r\n return norm_factor\r\n\r\n\r\ndef generate_all_orders(result_path, args, save=True):\r\n \"\"\" generate random orders for sampling\r\n Input:\r\n result_path: path to save all orders\r\n Return:\r\n all_orders: (num_samples_save, num_regions) ndarray\r\n \"\"\"\r\n all_orders = []\r\n for k in range(args.num_samples_save):\r\n all_orders.append(np.random.permutation(np.arange(0, args.num_regions, 1)).reshape((1, -1))) # append (1,num_regions)\r\n all_orders = np.concatenate(all_orders, axis=0) # (num_samples_save, num_regions)\r\n if save:\r\n np.save(result_path + \"all_orders.npy\", all_orders)\r\n return all_orders\r\n\r\ndef mask_data(masked_data, center, order, region_id):\r\n \"\"\" mask the data to the center of the point cloud\r\n Input:\r\n masked_data: (region+1, num_points,3) tensor, data to be masked\r\n center: (3,) tensor, center of point cloud\r\n order: (num_regions,) ndarray\r\n region_id: (num_points,) ndarray\r\n Return:\r\n masked_data: (region+1, num_points,3) tensor, modified\r\n \"\"\"\r\n for j in range(1, len(order) + 1):\r\n mask_region_id = order[j - 1]\r\n mask_index = (region_id == mask_region_id)\r\n masked_data[:j, mask_index, :] = center\r\n return masked_data\r\n\r\n\r\ndef save_shapley(region_shap_value, pc_idx, count, result_path, region_id, args):\r\n N = args.num_points\r\n shap_value = np.zeros((N,))\r\n\r\n folder = result_path + \"shapley/\"\r\n mkdir(folder)\r\n\r\n folder2 = result_path + \"region_shapley/\"\r\n mkdir(folder2)\r\n\r\n for k in range(0, args.num_regions):\r\n region_index = (region_id == k)\r\n shap_value[region_index] = region_shap_value[k] / count\r\n\r\n np.save(folder + \"%s.npy\" % (str(pc_idx) + '_' + str(count)), shap_value)\r\n np.save(folder2 + \"%s.npy\" % (str(pc_idx) + '_' + str(count)), region_shap_value / count)\r\n\r\n\r\n\r\ndef shap_sampling(model, dataloader, args, folder_name_list):\r\n sample_nums = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 2000, 3000, 4000, 5000]\r\n\r\n with torch.no_grad():\r\n model.eval()\r\n fps_indices = np.load('fps_%s_%d_%d_index_final30.npy'%(args.dataset, args.num_points, args.num_regions))\r\n fps_indices = torch.from_numpy(fps_indices).to(args.device)\r\n\r\n for i, (data, lbl) in enumerate(dataloader):\r\n B, N = data.shape[0], args.num_points\r\n folder_name = folder_name_list[i]\r\n result_path = args.exp_folder + '%s/' % folder_name\r\n mkdir(result_path)\r\n\r\n count = 0\r\n region_sv_all = [] # (num_samples_save, num_regions)\r\n region_shap_value = np.zeros((args.num_regions,)) # (num_regions,)\r\n\r\n data = data.to(args.device) # (B, num_points, 3), here B=1\r\n lbl = lbl.to(args.device) # (B,), here B=1\r\n fps_index = fps_indices[i] # (num_regions,)\r\n region_id = cal_region_id(data, fps_index, result_path, save=True) # (num_points,)\r\n\r\n center = torch.mean(data, dim=1).squeeze() # (3,)\r\n\r\n norm_factor = cal_norm_factor(model, data, lbl, center, result_path, args, save=True)\r\n all_orders = generate_all_orders(result_path, args, save=True) # (num_samples_save, num_regions) int array\r\n\r\n while count < args.num_samples_save:\r\n print(\"pointcloud:%s, index:%d, sample:%d\" % (folder_name, i, count))\r\n order = all_orders[count] # Sample an order\r\n masked_data = data.expand(args.num_regions + 1, N, 3).clone()\r\n masked_data = mask_data(masked_data, center, order, region_id)\r\n\r\n v, _ = cal_reward(model, masked_data, lbl, args) # (num_regions+1,) tensor\r\n dv = v[1:] - v[:-1]\r\n region_shap_value[order] += (dv.cpu().numpy())\r\n\r\n temp = np.zeros((args.num_regions,))\r\n temp[order] += dv.cpu().numpy()\r\n region_sv_all.append(temp)\r\n count += 1\r\n\r\n if count in sample_nums:\r\n save_shapley(region_shap_value, i, count, result_path, region_id, args)\r\n\r\n np.save(result_path + \"region_sv_all.npy\", region_sv_all) # (num_samples_save, num_regions)\r\n\r\n\r\ndef test(args):\r\n\r\n if args.dataset == \"modelnet10\":\r\n data_loader = DataLoader(ModelNet_Loader_Shapley_test(args, partition='train', num_points=args.num_points),\r\n num_workers=8,\r\n batch_size=args.test_batch_size, shuffle=False, drop_last=False)\r\n elif args.dataset == \"shapenet\":\r\n data_loader = DataLoader(ShapeNetDataset_Shapley_test(args, split='train', npoints=args.num_points,\r\n class_choice=SHAPENET_CLASS, classification=True),\r\n num_workers=8,\r\n batch_size=args.test_batch_size, shuffle=False, drop_last=False)\r\n else:\r\n raise Exception(\"Dataset does not exist\")\r\n model = load_model(args)\r\n folder_name_list = get_folder_name_list(args)\r\n shap_sampling(model, data_loader, args, folder_name_list)\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Point Cloud Recognition')\r\n parser.add_argument('--model', type=str, default='pointconv', metavar='N',\r\n choices=['pointnet', 'dgcnn', 'gcnn', 'pointnet2', 'pointconv', 'gcnn_adv'])\r\n parser.add_argument('--dataset', type=str, default='shapenet', metavar='N',\r\n choices=['modelnet10', 'shapenet'])\r\n parser.add_argument('--test_batch_size', type=int, default=1, metavar='batch_size', help='Size of batch)')\r\n parser.add_argument('--no_cuda', type=bool, default=False, help='enables CUDA training')\r\n parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\r\n parser.add_argument('--device_id', type=int, default=0, help='gpu id to use') # change GPU here\r\n parser.add_argument('--softmax_type', type=str, default=\"modified\", choices=[\"normal\", \"modified\"])\r\n\r\n args = parser.parse_args()\r\n\r\n args.num_points = NUM_POINTS\r\n args.num_regions = NUM_REGIONS\r\n args.num_samples_save = NUM_SAMPLES_SAVE\r\n args.exp_folder = './checkpoints/exp_MODEL_%s_DATA_%s_POINTNUM_%d_REGIONNUM_%d_shapley_test/' % (\r\n args.model, args.dataset, args.num_points, args.num_regions)\r\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device_id)\r\n args.cuda = not args.no_cuda and torch.cuda.is_available()\r\n args.device = torch.device(\"cuda:0\" if args.cuda else \"cpu\")\r\n\r\n _init_(args)\r\n\r\n set_random(args.seed)\r\n set_model_args(args)\r\n\r\n if args.cuda:\r\n print(\r\n 'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')\r\n else:\r\n print('Using CPU')\r\n\r\n test(args)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] |
[
[
"numpy.concatenate",
"torch.device",
"numpy.zeros",
"torch.no_grad",
"torch.argmin",
"numpy.load",
"numpy.save",
"torch.from_numpy",
"torch.cuda.device_count",
"torch.cuda.current_device",
"torch.cuda.is_available",
"numpy.arange",
"torch.mean"
]
] |
tonybaloney/cpython-book-samples
|
[
"d61d252d63461b114c3c02329a88a74dc8c51956"
] |
[
"62/profile.py"
] |
[
"import argparse\nfrom pathlib import Path\nfrom perf._bench import BenchmarkSuite\n\nimport seaborn as sns\nimport pandas as pd\n\nsns.set(style=\"whitegrid\")\n\nparser = argparse.ArgumentParser()\nparser.add_argument('files', metavar='N', type=str, nargs='+',\n help='files to compare')\nargs = parser.parse_args()\n\nbenchmark_names = []\nrecords = []\nfirst = True\nfor f in args.files:\n benchmark_suite = BenchmarkSuite.load(f)\n if first:\n # Initialise the dictionary keys to the benchmark names\n benchmark_names = benchmark_suite.get_benchmark_names()\n first = False\n bench_name = Path(benchmark_suite.filename).name\n for name in benchmark_names:\n try:\n benchmark = benchmark_suite.get_benchmark(name)\n if benchmark is not None:\n records.append({\n 'test': name,\n 'runtime': bench_name.replace('.json', ''),\n 'stdev': benchmark.stdev(),\n 'mean': benchmark.mean(),\n 'median': benchmark.median()\n })\n except KeyError:\n # Bonus benchmark! ignore.\n pass\n\ndf = pd.DataFrame(records)\n\nfor test in benchmark_names:\n g = sns.factorplot(\n x=\"runtime\",\n y=\"mean\",\n data=df[df['test'] == test],\n palette=\"YlGnBu_d\",\n size=12,\n aspect=1,\n kind=\"bar\")\n g.despine(left=True)\n g.savefig(\"png/{}-result.png\".format(test))"
] |
[
[
"pandas.DataFrame"
]
] |
hai-dang-dau/particles
|
[
"312d0d3675518ce075c5e57c7437466d1fabd3b5"
] |
[
"setup.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport io\nimport os\nimport setuptools \nimport sys\nimport warnings\n\nNAME = 'particles'\nDESCRIPTION = 'Sequential Monte Carlo in Python'\n\nwith open('README.md') as f:\n long_description = f.read()\n\nMETADATA = dict(\n name=NAME, \n version='0.1', \n url='http://github.com/nchopin/particles/',\n license='MIT', \n author='Nicolas Chopin',\n install_requires=['numpy',\n 'scipy',\n 'numba'\n ],\n author_email='[email protected]',\n description=DESCRIPTION, \n long_description = long_description,\n long_description_content_type=\"text/markdown\",\n packages=[NAME], \n include_package_data=True,\n platforms='any',\n classifiers=[\n 'Programming Language :: Python',\n 'Development Status :: 3 - Alpha',\n 'Natural Language :: English',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Topic :: Scientific/Engineering :: Mathematics',\n ]\n)\n\nfortran_warning = \"\"\"\nlowdiscrepancy fortran module could not be built (missing compiler? see INSTALL\nnotes). Package should work as expected, except for the parts related to QMC\n(quasi-Monte Carlo). \n\"\"\"\n\n# detect that Read the docs is trying to build the package\non_rtd = os.environ.get('READTHEDOCS') == 'True'\nif on_rtd:\n # RTD does not have a fortran compiler\n from setuptools import setup\nelse:\n # Try to install using Fortran, if the compiler\n # cannot be found, switch to traditional installation.\n try:\n from numpy.distutils.core import setup\n from numpy.distutils.extension import Extension\n ext = Extension(name=NAME + \".lowdiscrepancy\", \n sources=[\"src/LowDiscrepancy.f\"])\n METADATA['ext_modules'] = [ext,]\n except:\n from setuptools import setup\n warnings.warn(fortran_warning)\n\nsetup(**METADATA)\n"
] |
[
[
"numpy.distutils.extension.Extension"
]
] |
Julien-Virl/PYCOF
|
[
"973d82d302f52a8f32bca85617d4225b0a436f62"
] |
[
"pycof/data.py"
] |
[
"import os\nimport sys\nimport getpass\nimport boto3\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport json\nimport xlrd\nimport hashlib\nfrom io import StringIO, BytesIO\nimport urllib.request\nfrom types import SimpleNamespace\n\nimport re\n\nfrom tqdm import tqdm\nimport datetime\n\nfrom .misc import write, _get_config, file_age, verbose_display, _pycof_folders\n\n\n##############################################################################################################################\n\n# Easy file read\ndef f_read(path, extension=None, parse=True, remove_comments=True, sep=',', sheet_name=0, engine='auto', credentials={}, cache='30mins', cache_name=None, verbose=False, **kwargs):\n \"\"\"Read and parse a data file.\n It can read multiple format. For data frame-like format, the function will return a pandas data frame, otherzise a string.\n The function will by default detect the extension from the file path. You can force an extension with the argument.\n It can remove comments, trailing spaces, breaklines and tabs. It can also replace f-strings with provided values.\n :Parameters:\n * **path** (:obj:`str`): path to the SQL file.\n * **extension** (:obj:`str`): extension to use. Can be 'csv', 'txt', 'xslsx', 'sql', 'html', 'py', 'json', 'js', 'parquet', 'read-only' (defaults None).\n * **parse** (:obj:`bool`): Format the query to remove trailing space and comments, ready to use format (defaults True).\n * **remove_comments** (:obj:`bool`): Remove comments from the loaded file (defaults True).\n * **sep** (:obj:`str`): Columns delimiter for pd.read_csv (defaults ',').\n * **sheet_name** (:obj:`str`): Tab column to load when reading Excel files (defaults 0).\n * **engine** (:obj:`str`): Engine to use to load the file. Can be 'pyarrow' or the function from your preferred library (defaults 'auto').\n * **credentials** (:obj:`dict`): Credentials to use to connect to AWS S3. You can also provide the credentials path or the json file name from '/etc/.pycof' (defaults {}).\n * **cache** (:obj:`str`): Caches the data to avoid downloading again.\n * **cache_name** (:obj:`str`): File name for storing cache data, if None the name will be generated by hashing the path (defaults None).\n * **verbose** (:obj:`bool`): Display intermediate steps (defaults False).\n * **\\\\*\\\\*kwargs** (:obj:`str`): Arguments to be passed to the engine or values to be formated in the file to load.\n :Configuration: The function requires the below arguments in the configuration file.\n * :obj:`AWS_ACCESS_KEY_ID`: AWS access key, can remain empty if an IAM role is assign to the host.\n * :obj:`AWS_SECRET_ACCESS_KEY`: AWS secret key, can remain empty if an IAM role is assign to the host.\n .. code-block:: python\n {\n \"AWS_ACCESS_KEY_ID\": \"\",\n \"AWS_SECRET_ACCESS_KEY\": \"\"\n }\n :Example:\n >>> sql = pycof.f_read('/path/to/file.sql', country='FR')\n >>> df1 = pycof.f_read('/path/to/df_file.json')\n >>> df2 = pycof.f_read('/path/to/df.csv')\n >>> df3 = pycof.f_read('s3://bucket/path/to/file.parquet')\n :Returns:\n * :obj:`pandas.DataFrame`: Data frame a string from file read.\n \"\"\"\n # Initialize ext var\n ext = path.split('.')[-1] if extension is None else extension\n # Initialize orgn var\n if path.startswith('s3://'):\n orgn = 'S3'\n elif path.startswith('http'):\n orgn = 'http'\n else:\n orgn = 'other'\n # Initialize data var\n data = []\n\n if orgn == 'S3':\n config = _get_config(credentials)\n if config.get(\"AWS_SECRET_ACCESS_KEY\") in [None, 'None', '']:\n try:\n s3 = boto3.client('s3')\n s3_resource = boto3.resource('s3')\n except Exception:\n raise ConnectionError(\"Please run 'aws config' on your terminal and initialize the parameters.\")\n else:\n s3 = boto3.client('s3', aws_access_key_id=config.get(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=config.get(\"AWS_SECRET_ACCESS_KEY\"),\n region_name=config.get(\"REGION\"))\n s3_resource = boto3.resource('s3', aws_access_key_id=config.get(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=config.get(\"AWS_SECRET_ACCESS_KEY\"),\n region_name=config.get(\"REGION\"))\n\n bucket = path.replace('s3://', '').split('/')[0]\n folder_path = '/'.join(path.replace('s3://', '').split('/')[1:])\n\n if ext.lower() in ['csv', 'txt', 'parq', 'parquet', 'html', 'json', 'js', 'py', 'sh', 'xls', 'xlsx']:\n # If file can be loaded by pandas, we do not download locally\n verbose_display('Loading the data from S3 directly', verbose)\n obj = s3.get_object(Bucket=bucket, Key=folder_path)\n path = BytesIO(obj['Body'].read())\n else:\n # This step will only check the cache and download the file to tmp if not available.\n # The normal below steps will still run, only the path will change if the file comes from S3\n # and cannot be loaded by pandas.\n\n cache_time = 0. if cache is False else cache\n _disp = tqdm if verbose else list\n # Force the input to be a string\n str_c_time = str(cache_time).lower().replace(' ', '')\n # Get the numerical part of the input\n c_time = float(''.join(re.findall('[^a-z]', str_c_time)))\n # Get the str part of the input - for the format\n age_fmt = ''.join(re.findall('[a-z]', str_c_time))\n\n # Hash the path to create filename\n file_name = cache_name if cache_name else hashlib.sha224(bytes(path, 'utf-8')).hexdigest().replace('-', 'm')\n data_path = _pycof_folders('data')\n\n # Changing path to local once file is downloaded to tmp folder\n path = os.path.join(data_path, file_name)\n\n # Set the S3 bucket\n s3bucket = s3_resource.Bucket(bucket)\n\n # First, check if the same path has already been downloaded locally\n if file_name in os.listdir(data_path):\n # If yes, check when and compare to cache time\n if file_age(path, format=age_fmt) < c_time:\n # If cache is recent, no need to download\n ext = os.listdir(path)[0].split('.')[-1]\n verbose_display('Data file available in cache', verbose)\n else:\n # Otherwise, we update the cache\n verbose_display('Updating data in cache', verbose)\n # Remove the existing the content of the existing folder before downloading the updated data\n for root, _, files in os.walk(path):\n for name in files:\n os.remove(os.path.join(root, name))\n # Downloading the objects from S3\n for obj in _disp(s3bucket.objects.filter(Prefix=folder_path)):\n if (obj.key == folder_path) or (not any(e in obj.key for e in ['.parquet', '.parq', '.csv', '.json', '.txt'])):\n continue\n else:\n s3bucket.download_file(obj.key, os.path.join(path, obj.key.split('/')[-1]))\n ext = obj.key.split('.')[-1]\n else:\n # If the file is not in the cache, we download it\n verbose_display('Downloading and caching data', verbose)\n # Creating the directory\n os.makedirs(path, exist_ok=True)\n for obj in _disp(s3bucket.objects.filter(Prefix=folder_path)):\n if obj.key == folder_path:\n continue\n s3bucket.download_file(obj.key, os.path.join(path, obj.key.split('/')[-1]))\n ext = obj.key.split('.')[-1]\n\n # CSV / txt\n if ext.lower() in ['csv', 'txt']:\n data = pd.read_csv(path, sep=sep, **kwargs)\n # XLSX\n elif ext.lower() in ['xls', 'xlsx']:\n _engine = 'openpyxl' if engine == 'auto' else engine\n data = pd.read_excel(path, sheet_name=sheet_name, engine=_engine, **kwargs)\n # SQL\n elif ext.lower() in ['sql']:\n if type(path) == BytesIO:\n file = path.read().decode()\n else:\n with open(path) as f:\n file = f.read()\n for line in file.split('\\n'): # Parse the data\n l_striped = line.strip() # Removing trailing spaces\n if parse:\n l_striped = l_striped.format(**kwargs) # Formating\n if remove_comments:\n l_striped = l_striped.split('--')[0] # Remove comments\n re.sub(r\"<!--(.|\\s|\\n)*?-->\", \"\", l_striped.replace('/*', '<!--').replace('*/', '-->'))\n if l_striped != '':\n data += [l_striped]\n data = ' '.join(data)\n # HTML\n elif ext.lower() in ['html']:\n if type(path) == BytesIO:\n file = path.read().decode()\n elif orgn == 'http':\n weburl = urllib.request.urlopen(path)\n file = weburl.read().decode(\"utf-8\")\n else:\n with open(path) as f:\n file = f.read()\n\n # Parse the data\n for line in file.split('\\n'):\n l_striped = line.strip() # Removing trailing spaces\n if parse:\n l_striped = l_striped.format(**kwargs) # Formating\n if remove_comments:\n l_striped = re.sub(r\"<!--(.|\\s|\\n)*?-->\", \"\", l_striped) # Remove comments\n if l_striped != '':\n data += [l_striped]\n data = ' '.join(data)\n # Python\n elif ext.lower() in ['py', 'sh']:\n if type(path) == BytesIO:\n file = path.read().decode()\n else:\n with open(path) as f:\n file = f.read()\n # Parse the data\n for line in file.split('\\n'):\n l_striped = line.strip() # Removing trailing spaces\n if parse:\n l_striped = l_striped.format(**kwargs) # Formating\n if remove_comments:\n l_striped = l_striped.split('#')[0] # Remove comments\n if l_striped != '':\n data += [l_striped]\n data = ' '.join(data)\n # JavaScript\n elif ext.lower() in ['js']:\n if type(path) == BytesIO:\n file = path.read().decode()\n else:\n with open(path) as f:\n file = f.read()\n for line in file.split('\\n'): # Parse the data\n l_striped = line.strip() # Removing trailing spaces\n if parse:\n l_striped = l_striped.format(**kwargs) # Formating\n if remove_comments:\n l_striped = l_striped.split('//')[0] # Remove comments\n re.sub(r\"<!--(.|\\s|\\n)*?-->\", \"\", l_striped.replace('/*', '<!--').replace('*/', '-->'))\n if l_striped != '':\n data += [l_striped]\n data = ' '.join(data)\n # Json\n elif ext.lower() in ['json']:\n if engine.lower() in ['json']:\n with open(path) as json_file:\n data = json.load(json_file)\n else:\n data = pd.read_json(path, **kwargs)\n # Parquet\n elif ext.lower() in ['parq', 'parquet']:\n _engine = 'pyarrow' if engine == 'auto' else engine\n\n if orgn == 'S3':\n data = pd.read_parquet(path)\n elif type(_engine) == str:\n if _engine.lower() in ['py', 'pa', 'pyarrow']:\n import pyarrow.parquet as pq\n dataset = pq.ParquetDataset(path, **kwargs)\n table = dataset.read()\n data = table.to_pandas()\n elif _engine.lower() in ['fp', 'fastparquet']:\n from fastparquet import ParquetFile\n dataset = ParquetFile(path, **kwargs)\n table = dataset.to_pandas()\n else:\n raise ValueError('Engine value not allowed')\n else:\n data = _engine(path, **kwargs)\n # Else, read-only\n elif ext.lower() in ['readonly', 'read-only', 'ro']:\n if type(path) == BytesIO:\n print(path.read().decode())\n else:\n with open(path) as f:\n for line in f:\n print(line.rstrip())\n else:\n with open(path) as f:\n file = f.read()\n data = file\n # If not read-only\n return data"
] |
[
[
"pandas.read_csv",
"pandas.read_parquet",
"pandas.read_json",
"pandas.read_excel"
]
] |
kennethsequeira/Hello-world
|
[
"464227bc7d9778a4a2a4044fe415a629003ea77f"
] |
[
"Python/KNN_with_scikit_learn.py"
] |
[
"\n# coding: utf-8\n\n# In[21]:\n\n\n#importing libraries\n#!pip install pandas\n#!pip install matplotlib\n#!pip install sklearn\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n\n# In[46]:\n\n\n#loading dataset\ndataset = pd.read_csv(\"diabetes.csv\")\ndataset.head(5)#to visialize dataset\n\n\n# In[47]:\n\n\n''' \n About data set\n Here we need to predict the class label as outcome whether the person has diabetes or not\n given Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age. we need to find whether the person has diabetes or not\n'''\n\n\n# In[48]:\n\n\n# lets find how many classes are there in Species\ndataset[\"Outcome\"].value_counts()\n\n\n# In[49]:\n\n\n# we can see that the classes are balanced with 50 each so knn works well without any biased\n\n\n# In[50]:\n\n\n#splitting the dataset into Train, Dev, Test\n# Train dataset which we use to train the model(KNN)\n# Dev dataset which we use to find the best K value\n# Test dataset which we test the model with the K value we got from above step\n\n# Why we are using 3 datasets? Because we need to reduce generalization error(G). THis is the error which we got from new data(Test data).Because we are finding the best K value using Dev dataset.\nlabel_data = dataset[\"Outcome\"]\ndel dataset[\"Outcome\"]\nX_train, X_test, y_train, y_test = train_test_split(dataset, label_data, test_size=0.2)\n#test_size = size of the test set 0.2 = 20% data .learn more at http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\nX_train, X_dev, y_train, y_dev = train_test_split(X_train, y_train, test_size=0.2)\n\n\n# In[57]:\n\n\n# Now we Train the model with the different K values\nK_values = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]\naccuracy_scores = []\nfor i in K_values:\n clf = KNeighborsClassifier(n_neighbors = i)\n clf.fit(X_train,y_train)\n pred = clf.predict(X_dev)\n accuracy_scores.append(accuracy_score(y_dev,pred))\n\n\n# In[58]:\n\n\noptimal_k = K_values[accuracy_scores.index(max(accuracy_scores))]\n\n\n# In[60]:\n\n\nprint('\\nThe optimal number of neighbors is %d.' % optimal_k)\nplt.plot(K_values, accuracy_scores)\nplt.xlabel('Number of Neighbors K')\nplt.ylabel('Accuracy score')\nplt.show()\n\n\n# In[62]:\n\n\n#here we can see that K=7 has highest accuracy. Now we build the model with K=7 on unseen data(Test dataset)\nclf = KNeighborsClassifier(n_neighbors = 7)\nclf.fit(X_train,y_train)\npred = clf.predict(X_test)\nprint(\"accuracy score \",accuracy_score(y_test,pred))\n\n\n# In[63]:\n\n\n#we can see that KNN doesn't work well. Later we try differnt algorith's on this dataset\n\n"
] |
[
[
"matplotlib.pyplot.xlabel",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.plot",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
] |
mriberodiaz/client_availability
|
[
"c8b0b89fe647ae620118569c23022f5e9aad8749"
] |
[
"optimization/shared/fed_avg_schedule.py"
] |
[
"# Copyright 2019, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"An implementation of the FedAvg algorithm with learning rate schedules.\n\nThis is intended to be a somewhat minimal implementation of Federated\nAveraging that allows for client and server learning rate scheduling.\n\nThe original FedAvg is based on the paper:\n\nCommunication-Efficient Learning of Deep Networks from Decentralized Data\n H. Brendan McMahan, Eider Moore, Daniel Ramage,\n Seth Hampson, Blaise Aguera y Arcas. AISTATS 2017.\n https://arxiv.org/abs/1602.05629\n\"\"\"\n\nimport collections\nfrom typing import Callable, Optional, Union\n\nimport attr\nimport tensorflow as tf\nimport tensorflow_federated as tff\nfrom tensorflow_federated.python.tensorflow_libs import tensor_utils\n\n\n# Convenience type aliases.\nModelBuilder = Callable[[], tff.learning.Model]\nOptimizerBuilder = Callable[[float], tf.keras.optimizers.Optimizer]\nClientWeightFn = Callable[..., float]\nLRScheduleFn = Callable[[Union[int, tf.Tensor]], Union[tf.Tensor, float]]\n\n\ndef _initialize_optimizer_vars(model: tff.learning.Model,\n optimizer: tf.keras.optimizers.Optimizer):\n \"\"\"Ensures variables holding the state of `optimizer` are created.\"\"\"\n delta = tf.nest.map_structure(tf.zeros_like, _get_weights(model).trainable)\n model_weights = _get_weights(model)\n grads_and_vars = tf.nest.map_structure(lambda x, v: (x, v), delta,\n model_weights.trainable)\n optimizer.apply_gradients(grads_and_vars, name='server_update')\n assert optimizer.variables()\n\n\ndef _get_weights(model: tff.learning.Model) -> tff.learning.ModelWeights:\n return tff.learning.ModelWeights.from_model(model)\n\n\[email protected](eq=False, order=False, frozen=True)\nclass ServerState(object):\n \"\"\"Structure for state on the server.\n\n Fields:\n - `model`: A dictionary of the model's trainable and non-trainable\n weights.\n - `optimizer_state`: The server optimizer variables.\n - `round_num`: The current training round, as a float.\n \"\"\"\n model = attr.ib()\n optimizer_state = attr.ib()\n round_num = attr.ib()\n # This is a float to avoid type incompatibility when calculating learning rate\n # schedules.\n\n\[email protected]\ndef server_update(model, server_optimizer, server_state, weights_delta):\n \"\"\"Updates `server_state` based on `weights_delta`, increase the round number.\n\n Args:\n model: A `tff.learning.Model`.\n server_optimizer: A `tf.keras.optimizers.Optimizer`.\n server_state: A `ServerState`, the state to be updated.\n weights_delta: An update to the trainable variables of the model.\n\n Returns:\n An updated `ServerState`.\n \"\"\"\n model_weights = _get_weights(model)\n tff.utils.assign(model_weights, server_state.model)\n # Server optimizer variables must be initialized prior to invoking this\n tff.utils.assign(server_optimizer.variables(), server_state.optimizer_state)\n\n weights_delta, has_non_finite_weight = (\n tensor_utils.zero_all_if_any_non_finite(weights_delta))\n if has_non_finite_weight > 0:\n return server_state\n\n # Apply the update to the model. We must multiply weights_delta by -1.0 to\n # view it as a gradient that should be applied to the server_optimizer.\n grads_and_vars = [\n (-1.0 * x, v) for x, v in zip(weights_delta, model_weights.trainable)\n ]\n\n server_optimizer.apply_gradients(grads_and_vars)\n\n # Create a new state based on the updated model.\n return tff.utils.update_state(\n server_state,\n model=model_weights,\n optimizer_state=server_optimizer.variables(),\n round_num=server_state.round_num + 1.0)\n\n\[email protected](eq=False, order=False, frozen=True)\nclass ClientOutput(object):\n \"\"\"Structure for outputs returned from clients during federated optimization.\n\n Fields:\n - `weights_delta`: A dictionary of updates to the model's trainable\n variables.\n - `client_weight`: Weight to be used in a weighted mean when\n aggregating `weights_delta`.\n - `model_output`: A structure matching\n `tff.learning.Model.report_local_outputs`, reflecting the results of\n training on the input dataset.\n - `optimizer_output`: Additional metrics or other outputs defined by the\n optimizer.\n \"\"\"\n weights_delta = attr.ib()\n client_weight = attr.ib()\n model_output = attr.ib()\n optimizer_output = attr.ib()\n\n\ndef create_client_update_fn():\n \"\"\"Returns a tf.function for the client_update.\n\n This \"create\" fn is necesessary to prevent\n \"ValueError: Creating variables on a non-first call to a function decorated\n with tf.function\" errors due to the client optimizer creating variables. This\n is really only needed because we test the client_update function directly.\n \"\"\"\n @tf.function\n def client_update(model,\n dataset,\n initial_weights,\n client_optimizer,\n client_weight_fn=None):\n \"\"\"Updates client model.\n\n Args:\n model: A `tff.learning.Model`.\n dataset: A 'tf.data.Dataset'.\n initial_weights: A `tff.learning.ModelWeights` from server.\n client_optimizer: A `tf.keras.optimizer.Optimizer` object.\n client_weight_fn: Optional function that takes the output of\n `model.report_local_outputs` and returns a tensor that provides the\n weight in the federated average of model deltas. If not provided, the\n default is the total number of examples processed on device.\n\n Returns:\n A 'ClientOutput`.\n \"\"\"\n\n model_weights = _get_weights(model)\n tff.utils.assign(model_weights, initial_weights)\n\n num_examples = tf.constant(0, dtype=tf.int32)\n for batch in dataset:\n with tf.GradientTape() as tape:\n output = model.forward_pass(batch)\n grads = tape.gradient(output.loss, model_weights.trainable)\n grads_and_vars = zip(grads, model_weights.trainable)\n client_optimizer.apply_gradients(grads_and_vars)\n num_examples += tf.shape(output.predictions)[0]\n\n aggregated_outputs = model.report_local_outputs()\n weights_delta = tf.nest.map_structure(lambda a, b: a - b,\n model_weights.trainable,\n initial_weights.trainable)\n weights_delta, has_non_finite_weight = (\n tensor_utils.zero_all_if_any_non_finite(weights_delta))\n\n if has_non_finite_weight > 0:\n client_weight = tf.constant(0, dtype=tf.float32)\n elif client_weight_fn is None:\n client_weight = tf.cast(num_examples, dtype=tf.float32)\n else:\n client_weight = client_weight_fn(aggregated_outputs)\n\n return ClientOutput(\n weights_delta, client_weight, aggregated_outputs,\n collections.OrderedDict([('num_examples', num_examples)]))\n\n return client_update\n\n\ndef build_server_init_fn(\n model_fn: ModelBuilder,\n server_optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer]):\n \"\"\"Builds a `tff.tf_computation` that returns the initial `ServerState`.\n\n The attributes `ServerState.model` and `ServerState.optimizer_state` are\n initialized via their constructor functions. The attribute\n `ServerState.round_num` is set to 0.0.\n\n Args:\n model_fn: A no-arg function that returns a `tff.learning.Model`.\n server_optimizer_fn: A no-arg function that returns a\n `tf.keras.optimizers.Optimizer`.\n\n Returns:\n A `tff.tf_computation` that returns initial `ServerState`.\n \"\"\"\n\n @tff.tf_computation\n def server_init_tf():\n server_optimizer = server_optimizer_fn()\n model = model_fn()\n _initialize_optimizer_vars(model, server_optimizer)\n return ServerState(\n model=_get_weights(model),\n optimizer_state=server_optimizer.variables(),\n round_num=0.0)\n\n return server_init_tf\n\n\ndef build_fed_avg_process(\n model_fn: ModelBuilder,\n client_optimizer_fn: OptimizerBuilder,\n client_lr: Union[float, LRScheduleFn] = 0.1,\n server_optimizer_fn: OptimizerBuilder = tf.keras.optimizers.SGD,\n server_lr: Union[float, LRScheduleFn] = 1.0,\n client_weight_fn: Optional[ClientWeightFn] = None,\n) -> tff.templates.IterativeProcess:\n \"\"\"Builds the TFF computations for optimization using federated averaging.\n\n Args:\n model_fn: A no-arg function that returns a `tff.learning.Model`.\n client_optimizer_fn: A function that accepts a `learning_rate` keyword\n argument and returns a `tf.keras.optimizers.Optimizer` instance.\n client_lr: A scalar learning rate or a function that accepts a float\n `round_num` argument and returns a learning rate.\n server_optimizer_fn: A function that accepts a `learning_rate` argument and\n returns a `tf.keras.optimizers.Optimizer` instance.\n server_lr: A scalar learning rate or a function that accepts a float\n `round_num` argument and returns a learning rate.\n client_weight_fn: Optional function that takes the output of\n `model.report_local_outputs` and returns a tensor that provides the weight\n in the federated average of model deltas. If not provided, the default is\n the total number of examples processed on device.\n\n Returns:\n A `tff.templates.IterativeProcess`.\n \"\"\"\n\n client_lr_schedule = client_lr\n if not callable(client_lr_schedule):\n client_lr_schedule = lambda round_num: client_lr\n\n server_lr_schedule = server_lr\n if not callable(server_lr_schedule):\n server_lr_schedule = lambda round_num: server_lr\n\n dummy_model = model_fn()\n\n server_init_tf = build_server_init_fn(\n model_fn,\n # Initialize with the learning rate for round zero.\n lambda: server_optimizer_fn(server_lr_schedule(0)))\n server_state_type = server_init_tf.type_signature.result\n model_weights_type = server_state_type.model\n round_num_type = server_state_type.round_num\n\n tf_dataset_type = tff.SequenceType(dummy_model.input_spec)\n model_input_type = tff.SequenceType(dummy_model.input_spec)\n\n @tff.tf_computation(model_input_type, model_weights_type, round_num_type)\n def client_update_fn(tf_dataset, initial_model_weights, round_num):\n client_lr = client_lr_schedule(round_num)\n client_optimizer = client_optimizer_fn(client_lr)\n client_update = create_client_update_fn()\n return client_update(model_fn(), tf_dataset, initial_model_weights,\n client_optimizer, client_weight_fn)\n\n @tff.tf_computation(server_state_type, model_weights_type.trainable)\n def server_update_fn(server_state, model_delta):\n model = model_fn()\n server_lr = server_lr_schedule(server_state.round_num)\n server_optimizer = server_optimizer_fn(server_lr)\n # We initialize the server optimizer variables to avoid creating them\n # within the scope of the tf.function server_update.\n _initialize_optimizer_vars(model, server_optimizer)\n return server_update(model, server_optimizer, server_state, model_delta)\n\n @tff.federated_computation(\n tff.FederatedType(server_state_type, tff.SERVER),\n tff.FederatedType(tf_dataset_type, tff.CLIENTS))\n def run_one_round(server_state, federated_dataset):\n \"\"\"Orchestration logic for one round of computation.\n\n Args:\n server_state: A `ServerState`.\n federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.\n\n Returns:\n A tuple of updated `ServerState` and the result of\n `tff.learning.Model.federated_output_computation`.\n \"\"\"\n client_model = tff.federated_broadcast(server_state.model)\n client_round_num = tff.federated_broadcast(server_state.round_num)\n\n client_outputs = tff.federated_map(\n client_update_fn,\n (federated_dataset, client_model, client_round_num))\n\n client_weight = client_outputs.client_weight\n model_delta = tff.federated_mean(\n client_outputs.weights_delta, weight=client_weight)\n\n server_state = tff.federated_map(server_update_fn,\n (server_state, model_delta))\n\n aggregated_outputs = dummy_model.federated_output_computation(\n client_outputs.model_output)\n if aggregated_outputs.type_signature.is_struct():\n aggregated_outputs = tff.federated_zip(aggregated_outputs)\n\n return server_state, aggregated_outputs\n\n @tff.federated_computation\n def initialize_fn():\n return tff.federated_value(server_init_tf(), tff.SERVER)\n\n return tff.templates.IterativeProcess(\n initialize_fn=initialize_fn, next_fn=run_one_round)\n"
] |
[
[
"tensorflow.shape",
"tensorflow.GradientTape",
"tensorflow.nest.map_structure",
"tensorflow.constant",
"tensorflow.cast"
]
] |
nerds-ufes/G-PolKA
|
[
"9c6bd42167bc333f6421a751c93a88c00841def9",
"9c6bd42167bc333f6421a751c93a88c00841def9",
"9c6bd42167bc333f6421a751c93a88c00841def9"
] |
[
"validation/emulation/experiments/linear-gpolka.p4app/test/jitter_test/graph_jitter.py",
"src/polka/keyflow_multicast.py",
"validation/emulation/experiments/linear-gpolka.p4app/test2/latency_test_btraffic/graph_latency_btraffic.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport os\n\nimport matplotlib as mpl\n\nmpl.use(\"pdf\")\nplt.style.use(\"paper\")\nplt.grid(True, linestyle=\"-\", alpha=0.5, linewidth=0.5)\n# mpl.rcParams[\"figure.figsize\"] = [3.2, 1.98]\nmpl.rcParams[\"xtick.labelsize\"] = 7\nmpl.rcParams[\"ytick.labelsize\"] = 7\nmpl.rcParams[\"font.size\"] = 7\nmpl.rcParams[\"figure.autolayout\"] = True\nmpl.rcParams[\"figure.figsize\"] = [3.0, 1.85]\nmpl.rcParams[\"axes.titlesize\"] = 8\nmpl.rcParams[\"axes.labelsize\"] = 8\nmpl.rcParams[\"lines.linewidth\"] = 1\nmpl.rcParams[\"lines.markersize\"] = 3\nmpl.rcParams[\"legend.fontsize\"] = 9\nmpl.rcParams[\"mathtext.fontset\"] = \"stix\"\nmpl.rcParams[\"font.family\"] = \"STIXGeneral\"\n\nscenario_list = [\"LB-SR\", \"M-PolKA\"]\n\nroot_folder = \"./\"\nexperiment_folders = [\n # \"sourcey-unicast-fabric_bw10\",\n \"sourcey-fabric_bw10\",\n \"gpolka-fabric_bw10\",\n]\nmy_palette = {\n \"M-PolKA\": \"#e74c3c\",\n \"LB-SR\": \"#3498db\",\n}\n# my_palette = {\"GPolKA\": \"#2ecc71\", \"Sourcey-M\": \"#e74c3c\", \"Sourcey-U\": \"#3498db\"}\n\n\ndef get_values(folder_name, scenario):\n values_ping = []\n for n in range(0, 10):\n f_name = \"{f}/{n}/a1_{n}\".format(f=root_folder + folder_name, n=n)\n print(f_name)\n with open(f_name) as f:\n for line in f:\n ping_time = float(line)\n value = {\n \"Scenario\": scenario,\n \"Topic\": n,\n \"Latency\": ping_time,\n }\n values_ping.append(value)\n return values_ping\n\n\ndf = pd.DataFrame(columns=[\"Scenario\", \"Topic\", \"Latency\"])\n\nfor i in range(len(scenario_list)):\n print(\"Experiment: {}\".format(scenario_list[i]))\n df = df.append(\n get_values(experiment_folders[i], scenario_list[i]), ignore_index=True\n )\nprint(df)\n\nflierprops = dict(\n marker=\".\",\n markerfacecolor=\"k\",\n markersize=0.5,\n linestyle=\"none\",\n markeredgecolor=\"k\",\n)\n\nax = sns.barplot(x=\"Topic\", y=\"Latency\", hue=\"Scenario\", data=df, palette=my_palette)\n\n# ax = sns.boxplot(\n# x=\"Scenario\",\n# y=\"Loss\",\n# linewidth=1.0,\n# data=df,\n# whis=1.5,\n# orient=\"v\",\n# palette=my_palette,\n# flierprops=flierprops,\n# width=0.5,\n# )\n\n# xlabels = [\"Single Path\\nfSTA1\", \"Single Path\\nfSTA2\", \"Multiple Paths\\nfSTA1/fSTA2\"]\n# ax.set_xticklabels(xlabels)\nax.set_axisbelow(True)\n\nplt.ylabel(\"Jitter (ms)\")\n# plt.ylim(2, 5)\nplt.xlabel(\"Number of core hops\")\nplt.legend(loc=\"upper left\", ncol=1, prop={\"size\": 6})\nsns.despine()\nplt.savefig(\"{}exp_linear_jitter_10m.pdf\".format(root_folder))\n# plt.show()\n",
"import time\nimport sympy\nfrom mpmath import log\nimport numpy as np\n\n\ndef coeficiente(k, e, w):\n global lista\n c = 0\n l = k - 1\n for i in range(l, -1, -1):\n tmp = i * (k ** e)\n if w >= tmp:\n c = w - tmp\n break\n if c >= 0:\n lista.append(i)\n else:\n lista.append(0)\n\n return c\n\n\ndef gerar_primos(limite_min, limite_max, total):\n primos = []\n for numero in range(limite_min, limite_max + 1):\n for auxiliar in range(2, numero / 2):\n if numero % auxiliar == 0:\n break\n else:\n primos.append(numero)\n if len(primos) == total:\n break\n return primos\n\n\ndef mod_inverse(a, b):\n r = -1\n B = b\n A = a\n eq_set = []\n full_set = []\n mod_set = []\n\n # euclid's algorithm\n while r != 1 and r != 0:\n r = b % a\n q = b // a\n eq_set = [r, b, a, q * -1]\n b = a\n a = r\n full_set.append(eq_set)\n\n for i in range(0, 4):\n mod_set.append(full_set[-1][i])\n\n mod_set.insert(2, 1)\n counter = 0\n\n # extended euclid's algorithm\n for i in range(1, len(full_set)):\n if counter % 2 == 0:\n mod_set[2] = full_set[-1 * (i + 1)][3] * mod_set[4] + mod_set[2]\n mod_set[3] = full_set[-1 * (i + 1)][1]\n\n elif counter % 2 != 0:\n mod_set[4] = full_set[-1 * (i + 1)][3] * mod_set[2] + mod_set[4]\n mod_set[1] = full_set[-1 * (i + 1)][1]\n\n counter += 1\n\n if mod_set[3] == B:\n return mod_set[2] % B\n return mod_set[4] % B\n\n\ndef calculate_routeid(Midlist, pathlen, toponame, debug=False):\n if debug:\n print(\"{}\".format(Midlist))\n print(\"Pathlen: {}\".format(pathlen))\n\n mult = 1.0\n\n for i in range(len(Midlist) - 1, len(Midlist) - pathlen - 1, -1):\n if debug:\n print(\"Midlist[{}]: {}\".format(i, Midlist[i]))\n mult = mult * Midlist[i]\n\n total = float(log(mult, 2))\n total = np.ceil(total)\n\n if debug:\n print(\"total: {}\".format(total))\n print(\"[{}] Total of the bits to PRI: {}\".format(toponame, total))\n\n return total\n\n\ndef calculate_routeid_multicast(Midlist, pathlen, toponame, debug=False):\n if debug:\n print(\"{}\".format(Midlist))\n print(\"Pathlen: {}\".format(pathlen))\n\n mult = 1.0\n\n for i in range(len(Midlist) - 1, len(Midlist) - pathlen - 1, -1):\n if debug:\n print(\"Midlist[{}]: {}\".format(i, Midlist[i]))\n mult = mult * Midlist[i]\n\n total = float(log(mult, 2))\n total = np.ceil(total)\n\n if debug:\n print(\"total: {}\".format(total))\n print(\"[{}] Total of the bits to PRI: {}\".format(toponame, total))\n\n return total\n\n\n# def max_bitlength_keyflow(nports, nnodes, lpath, toponame):\n# Midlist = gerar_primos(nports, 1000000000, nnodes)\n# nbits = int(calculate_routeid(Midlist, lpath, toponame))\n# return nbits\n\n\ndef max_bitlength_keyflow_multicast(nports, nnodes, lpath, toponame, is_multicast):\n\n if is_multicast:\n # Multicast\n print(\"Multicast\")\n minnumber = pow(2, nports)\n else:\n # Unicast\n print(\"Unicast\")\n minnumber = nports\n\n Midlist = gerar_primos(minnumber, 100000000, nnodes)\n nbits = int(calculate_routeid(Midlist, lpath, toponame))\n return nbits\n\n\n# def scalability_analysis():\n\n# lst = []\n\n# 2-tier Topologies\n# switch_ports = [24,48,96]\n# spine_nodes = [6,12,16,24,36,48]\n# lpath = 3\n\n# for nspine in spine_nodes:\n# nleaf = nspine\n# nnodes = nspine + nleaf\n\n# for nports in switch_ports:\n# if (nports>nspine):\n# nservers = (nports-nspine) * nleaf\n# topo_name = \"2-tier - spine: \", nspine, \" - leaf: \", nleaf, \"- switches: \", nnodes,\" - ports: \", nports,\" - servers: \", nservers, \" - lpath: \", lpath\n# topo_name = \"2-tier spine \"+ str(nspine) +\" leaf \"+ str(nleaf)\n# nbits = max_bitlength_keyflow(nports,nnodes,lpath,topo_name)\n# print \"######\", topo_name\n# print \"Bitlength:\", nbits\n# lst.append(topo_name+\",\"+str(nnodes)+\",\"+str(nservers)+\",\"+str(nbits))\n\n# Hypercube Topologies\n# degree = [3,4,5,6,7,8,9,10]\n\n# for ndegree in degree:\n# nservers = pow(2,ndegree)\n# nnodes = nservers\n# nports = ndegree\n# lpath = ndegree\n\n# topo_name = \"Hypercube - degree: \", ndegree, \"- switches: \", nnodes,\" - ports: \", nports,\" - servers: \", nservers, \" - lpath: \", lpath\n# topo_name = \"Hypercube degree \"+str(ndegree)\n\n# nbits = max_bitlength_keyflow(nports,nnodes,lpath,topo_name)\n# print \"######\", topo_name\n# print \"Bitlength:\", nbits\n# lst.append(topo_name+\",\"+str(nnodes)+\",\"+str(nservers)+\",\"+str(nbits))\n\n# arr = np.array(lst)\n# np.savetxt('data_keyflow.csv', arr, delimiter=',', fmt=\"%s\")\n",
"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport os\n\nimport matplotlib as mpl\n\nmpl.use(\"pdf\")\nplt.style.use(\"paper\")\nplt.grid(True, linestyle=\"-\", alpha=0.5, linewidth=0.5)\nmpl.rcParams[\"figure.figsize\"] = [3.2, 1.98]\n\nscenario_list = [\"Sourcey-U\", \"Sourcey-M\", \"GPolKA\"]\n\nroot_folder = \"./\"\nexperiment_folders = [\n \"sourcey-unicast-fabric_bw10\",\n \"sourcey-fabric_bw10\",\n \"gpolka-fabric_bw10\",\n]\nmy_palette = {\"GPolKA\": \"#2ecc71\", \"Sourcey-M\": \"#e74c3c\", \"Sourcey-U\": \"#3498db\"}\n\n\ndef get_values(folder_name, scenario):\n values_ping = []\n for n in range(0, 10):\n f_name = \"{f}/{n}/a1_{n}\".format(f=root_folder + folder_name, n=n)\n print(f_name)\n with open(f_name) as f:\n for line in f:\n ping_time = float(line)\n value = {\n \"Scenario\": scenario,\n \"Topic\": n,\n \"Latency\": ping_time,\n }\n values_ping.append(value)\n return values_ping\n\n\ndf = pd.DataFrame(columns=[\"Scenario\", \"Topic\", \"Latency\"])\n\nfor i in range(len(scenario_list)):\n print(\"Experiment: {}\".format(scenario_list[i]))\n df = df.append(\n get_values(experiment_folders[i], scenario_list[i]), ignore_index=True\n )\nprint(df)\n\nflierprops = dict(\n marker=\".\",\n markerfacecolor=\"k\",\n markersize=0.5,\n linestyle=\"none\",\n markeredgecolor=\"k\",\n)\n\nax = sns.barplot(x=\"Topic\", y=\"Latency\", hue=\"Scenario\", data=df, palette=my_palette)\n\n# ax = sns.boxplot(\n# x=\"Scenario\",\n# y=\"Loss\",\n# linewidth=1.0,\n# data=df,\n# whis=1.5,\n# orient=\"v\",\n# palette=my_palette,\n# flierprops=flierprops,\n# width=0.5,\n# )\n\n# xlabels = [\"Single Path\\nfSTA1\", \"Single Path\\nfSTA2\", \"Multiple Paths\\nfSTA1/fSTA2\"]\n# ax.set_xticklabels(xlabels)\nax.set_axisbelow(True)\n\nplt.ylabel(\"RTT (ms)\")\n# plt.ylim(2, 5)\nplt.xlabel(\"Number of core hops\")\nplt.legend(loc=\"upper left\", ncol=1, prop={\"size\": 5})\nsns.despine()\nplt.savefig(\"{}latency-btraffic.pdf\".format(root_folder))\n# plt.show()"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.grid",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.style.use"
],
[
"numpy.ceil"
],
[
"matplotlib.use",
"matplotlib.pyplot.grid",
"pandas.DataFrame",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.style.use"
]
] |
astonzhang/dgl
|
[
"2664ed2dff9d0e9e45d2349c531b460b31cec215"
] |
[
"tests/pytorch/test_line_graph.py"
] |
[
"import torch as th\nimport networkx as nx\nimport numpy as np\nimport dgl\nimport utils as U\n\nD = 5\n\ndef test_line_graph():\n N = 5\n G = dgl.DGLGraph(nx.star_graph(N))\n G.edata['h'] = th.randn((2 * N, D))\n n_edges = G.number_of_edges()\n L = G.line_graph(shared=True)\n assert L.number_of_nodes() == 2 * N\n L.ndata['h'] = th.randn((2 * N, D))\n # update node features on line graph should reflect to edge features on\n # original graph.\n u = [0, 0, 2, 3]\n v = [1, 2, 0, 0]\n eid = G.edge_ids(u, v)\n L.nodes[eid].data['h'] = th.zeros((4, D))\n assert U.allclose(G.edges[u, v].data['h'], th.zeros((4, D)))\n\n # adding a new node feature on line graph should also reflect to a new\n # edge feature on original graph\n data = th.randn(n_edges, D)\n L.ndata['w'] = data\n assert U.allclose(G.edata['w'], data)\n\ndef test_no_backtracking():\n N = 5\n G = dgl.DGLGraph(nx.star_graph(N))\n L = G.line_graph(backtracking=False)\n assert L.number_of_nodes() == 2 * N\n for i in range(1, N):\n e1 = G.edge_id(0, i)\n e2 = G.edge_id(i, 0)\n assert not L.has_edge_between(e1, e2)\n assert not L.has_edge_between(e2, e1)\n\nif __name__ == '__main__':\n test_line_graph()\n test_no_backtracking()\n"
] |
[
[
"torch.zeros",
"torch.randn"
]
] |
Huawei-Ascend/tensorflow
|
[
"67979f8cf1acbb6db6b156ee0a15d277571d4a03"
] |
[
"tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_optimizer.py"
] |
[
"\n# Optimizer for mixed precision training for Davinci NPU.\n\n\"\"\"Loss scaling optimizer.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_control_flow_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.training import optimizer\nfrom npu_bridge.hccl import hccl_ops\nfrom npu_bridge.estimator import npu_ops\n\nfrom npu_bridge.helper import helper\ngen_npu_ops = helper.get_gen_ops();\n\n\nclass NPULossScaleOptimizer(optimizer.Optimizer):\n # TODO(jamesqin): move mixed precision training explanation to __init__\n # docstring.\n \"\"\"An optimizer that applies loss scaling in backprop.\n This class is useful for \"mixed precision training\" on GPUs (or other\n potential accelerators), an approach to improve compute throughput without\n compromising model quality.\n The canonical way to perform mixed precision training is the following:\n * Model variables are kept in high precision (e.g. float32).\n * Computations are done in lower precision (e.g. float16), which enjoys\n performance speedup by virtue of hardware support. Variables are casted to\n lower precision before they're used.\n * Final gradients are casted back to high precision dtype, then used to update\n variables.\n The side-effect of performing computation in lower precision, is that it comes\n with smaller numerical range. During backproping, small gradients might\n underflow in the reduced numerical range, causing a model to converge at\n suboptimal level.\n To prevent underflow, this optimizer multiplies the loss by a factor before\n backprop starts. Consequently, the gradients are linearly scaled up by the\n same factor, thus not falling into the underflow zone. After that, to perserve\n the correctness of backprop, the gradients are down-scaled by the same factor,\n casted to the (higher) variable precision, then applied on the variables.\n See [Nvidia's manual on mixed precision training](\n https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)\n for more details.\n To use loss scale optimizer, one only needs choose a loss scale strategy and\n wrap a regular optimizer. See examples below.\n ```\n loss = loss_fn()\n opt = tf.AdamOptimizer(learning_rate=...)\n # Choose a loss scale manager which decides how to pick the right loss scale\n # throughout the training process.\n loss_scale_manager = tf.contrib.mixed_precision.FixedLossScaleManager(5000)\n # Wraps the original optimizer in a LossScaleOptimizer.\n loss_scale_optimizer =\n tf.contrib.mixed_precision.LossScaleOptimizer(opt, loss_scale_manager)\n # Call minimize() on the loss scale optimizer.\n train_op = loss_scale_optimizer.minimize(loss)\n ```\n If gradients clipping is applied, one can call\n `optimizer.compute_gradients()` and `optimizer.apply_gradients()`\n separately.\n Notice the following way of using LossScaleOptimizer is not intended. Always\n use `loss_scale_optimizer.compute_gradients()` to compute gradients instead of\n `tf.gradients()` if doing mixed precision training.\n ```\n # The following is a wrong way to use LossScaleOptimizer along with\n # tf.gradients().\n # Always use loss_scale_optimizer.compute_gradients() to compute grads, or\n # loss scale is not correctly applied.\n grads = tf.gradients(loss, ...)\n # Do some custom grad clipping.\n grads = clip_grads(grads, ...)\n loss_scale_optimizer.apply(grads_and_vars)\n ```\n \"\"\"\n\n def __init__(self, opt, loss_scale_manager, is_distributed=False):\n \"\"\"Construct a loss scaling optimizer.\n\n Args:\n opt: The actual optimizer that will be used to compute and apply the\n gradients. Must be an implementation of the\n `tf.compat.v1.train.Optimizer` interface.\n loss_scale_manager: A LossScaleManager object.\n \"\"\"\n self._opt = opt\n self._loss_scale_manager = loss_scale_manager\n self._float_status = tf.constant([0.0], dtype=tf.float32)\n self._is_distributed = is_distributed\n self._name = \"NPULossScaleOptimizer{}\".format(type(optimizer).__name__)\n\n def compute_gradients(self,\n loss,\n var_list=None,\n gate_gradients=optimizer.Optimizer.GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n grad_loss=None):\n \"\"\"Compute gradients. See base class `tf.compat.v1.train.Optimizer`.\"\"\"\n loss_scale = self._loss_scale_manager.get_loss_scale()\n if context.executing_eagerly():\n\n def scaled_loss():\n loss_val = loss()\n return loss_val * math_ops.cast(loss_scale, loss_val.dtype.base_dtype)\n else:\n if callable(loss):\n loss_val = loss()\n else:\n loss_val = loss\n scaled_loss = loss_val * math_ops.cast(loss_scale,\n loss_val.dtype.base_dtype)\n\n self._float_status = gen_npu_ops.npu_alloc_float_status()\n\n grads_and_vars = self._opt.compute_gradients(\n scaled_loss,\n var_list=var_list,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n grad_loss=grad_loss)\n\n grads_and_vars = self._down_scale(grads_and_vars, loss_scale)\n return grads_and_vars\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"Apply gradients. See base class `tf.compat.v1.train.Optimizer`.\"\"\"\n grads = []\n for (g, _) in grads_and_vars:\n if g is not None:\n grads.append(g)\n\n #is_finite_grad = []\n #for g in grads:\n # is_finite_grad.append(math_ops.reduce_all(gen_math_ops.is_finite(g)))\n #is_overall_finite = math_ops.reduce_all(is_finite_grad)\n with tf.get_default_graph().control_dependencies(grads):\n local_float_status = gen_npu_ops.npu_get_float_status(self._float_status)\n cleared_float_status = gen_npu_ops.npu_clear_float_status(local_float_status)\n\n\n if self._is_distributed:\n with tf.get_default_graph().control_dependencies([local_float_status]):\n aggregated_float_status = hccl_ops.allreduce([self._float_status], \"sum\", fusion=0)\n is_overall_finite = math_ops.reduce_all(tf.equal(aggregated_float_status,\n cleared_float_status))\n else:\n is_overall_finite = math_ops.reduce_all(tf.equal(self._float_status,\n cleared_float_status))\n # Only update gradients when all grads are finite.\n def true_apply_gradients_fn():\n # TODO: Check should allreduce before or after _down_scale() ?\n # for now we are calling allreduce before _down_scale\n def true_apply_gradients(grads_and_vars, global_step=None, name=None):\n return self._opt.apply_gradients(grads_and_vars, global_step, name)\n\n return true_apply_gradients(grads_and_vars, global_step, name)\n\n update_vars = control_flow_ops.cond(is_overall_finite,\n true_apply_gradients_fn,\n gen_control_flow_ops.no_op)\n\n # Potentially adjust gradient scale in case of finite gradients.\n return control_flow_ops.group(\n update_vars,\n self._loss_scale_manager.update_loss_scale(is_overall_finite))\n\n def _down_scale(self, grads_vars, loss_scale):\n # Down scale grads by the loss_scale.\n gv = []\n inv_loss_scale = gen_math_ops.reciprocal(loss_scale)\n for g, v in grads_vars:\n if g is not None:\n gv.append((g * math_ops.cast(inv_loss_scale, g.dtype.base_dtype), v))\n else:\n gv.append((g, v))\n return gv\n"
] |
[
[
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.get_default_graph",
"tensorflow.equal",
"tensorflow.python.ops.gen_math_ops.reciprocal",
"tensorflow.constant",
"tensorflow.python.eager.context.executing_eagerly"
]
] |
Hamifthi/CS231.n-programming-assignment
|
[
"6620de7b5cb2c2fa7db0cfe4f4801adfe77c3356",
"6620de7b5cb2c2fa7db0cfe4f4801adfe77c3356"
] |
[
"assignment3/cs231n/classifiers/squeezenet.py",
"assignment1/cs231n/gradient_check.py"
] |
[
"import tensorflow as tf\n\nNUM_CLASSES = 1000\n\ndef fire_module(x,inp,sp,e11p,e33p):\n with tf.variable_scope(\"fire\"):\n with tf.variable_scope(\"squeeze\"):\n W = tf.get_variable(\"weights\",shape=[1,1,inp,sp])\n b = tf.get_variable(\"bias\",shape=[sp])\n s = tf.nn.conv2d(x,W,[1,1,1,1],\"VALID\")+b\n s = tf.nn.relu(s)\n with tf.variable_scope(\"e11\"):\n W = tf.get_variable(\"weights\",shape=[1,1,sp,e11p])\n b = tf.get_variable(\"bias\",shape=[e11p])\n e11 = tf.nn.conv2d(s,W,[1,1,1,1],\"VALID\")+b\n e11 = tf.nn.relu(e11)\n with tf.variable_scope(\"e33\"):\n W = tf.get_variable(\"weights\",shape=[3,3,sp,e33p])\n b = tf.get_variable(\"bias\",shape=[e33p])\n e33 = tf.nn.conv2d(s,W,[1,1,1,1],\"SAME\")+b\n e33 = tf.nn.relu(e33)\n return tf.concat([e11,e33],3)\n\n\nclass SqueezeNet(object):\n def extract_features(self, input=None, reuse=True):\n if input is None:\n input = self.image\n x = input\n layers = []\n with tf.variable_scope('features', reuse=reuse):\n with tf.variable_scope('layer0'):\n W = tf.get_variable(\"weights\",shape=[3,3,3,64])\n b = tf.get_variable(\"bias\",shape=[64])\n x = tf.nn.conv2d(x,W,[1,2,2,1],\"VALID\")\n x = tf.nn.bias_add(x,b)\n layers.append(x)\n with tf.variable_scope('layer1'):\n x = tf.nn.relu(x)\n layers.append(x)\n with tf.variable_scope('layer2'):\n x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID')\n layers.append(x)\n with tf.variable_scope('layer3'):\n x = fire_module(x,64,16,64,64)\n layers.append(x)\n with tf.variable_scope('layer4'):\n x = fire_module(x,128,16,64,64)\n layers.append(x)\n with tf.variable_scope('layer5'):\n x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID')\n layers.append(x)\n with tf.variable_scope('layer6'):\n x = fire_module(x,128,32,128,128)\n layers.append(x)\n with tf.variable_scope('layer7'):\n x = fire_module(x,256,32,128,128)\n layers.append(x)\n with tf.variable_scope('layer8'):\n x = tf.nn.max_pool(x,[1,3,3,1],strides=[1,2,2,1],padding='VALID')\n layers.append(x)\n with tf.variable_scope('layer9'):\n x = fire_module(x,256,48,192,192)\n layers.append(x)\n with tf.variable_scope('layer10'):\n x = fire_module(x,384,48,192,192)\n layers.append(x)\n with tf.variable_scope('layer11'):\n x = fire_module(x,384,64,256,256)\n layers.append(x)\n with tf.variable_scope('layer12'):\n x = fire_module(x,512,64,256,256)\n layers.append(x)\n return layers\n\n def __init__(self, save_path=None, sess=None):\n \"\"\"Create a SqueezeNet model.\n Inputs:\n - save_path: path to TensorFlow checkpoint\n - sess: TensorFlow session\n - input: optional input to the model. If None, will use placeholder for input.\n \"\"\"\n self.image = tf.placeholder('float',shape=[None,None,None,3],name='input_image')\n self.labels = tf.placeholder('int32', shape=[None], name='labels')\n self.layers = []\n x = self.image\n self.layers = self.extract_features(x, reuse=False)\n self.features = self.layers[-1]\n with tf.variable_scope('classifier'):\n with tf.variable_scope('layer0'):\n x = self.features\n self.layers.append(x)\n with tf.variable_scope('layer1'):\n W = tf.get_variable(\"weights\",shape=[1,1,512,1000])\n b = tf.get_variable(\"bias\",shape=[1000])\n x = tf.nn.conv2d(x,W,[1,1,1,1],\"VALID\")\n x = tf.nn.bias_add(x,b)\n self.layers.append(x)\n with tf.variable_scope('layer2'):\n x = tf.nn.relu(x)\n self.layers.append(x)\n with tf.variable_scope('layer3'):\n x = tf.nn.avg_pool(x,[1,13,13,1],strides=[1,13,13,1],padding='VALID')\n self.layers.append(x)\n self.scores = tf.reshape(x,[-1, NUM_CLASSES])\n\n if save_path is not None:\n saver = tf.train.Saver()\n saver.restore(sess, save_path)\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(self.labels, NUM_CLASSES), logits=self.scores))\n",
"from __future__ import print_function\n\nimport numpy as np\nfrom random import randrange\n\ndef eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n \"\"\" \n a naive implementation of numerical gradient of f at x \n - f should be a function that takes a single argument\n - x is the point (numpy array) to evaluate the gradient at\n \"\"\" \n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad\n\n\ndef eval_numerical_gradient_array(f, x, df, h=1e-5):\n \"\"\"\n Evaluate a numeric gradient for a function that accepts a numpy\n array and returns a numpy array.\n \"\"\"\n grad = np.zeros_like(x)\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n ix = it.multi_index\n \n oldval = x[ix]\n x[ix] = oldval + h\n pos = f(x).copy()\n x[ix] = oldval - h\n neg = f(x).copy()\n x[ix] = oldval\n \n grad[ix] = np.sum((pos - neg) * df) / (2 * h)\n it.iternext()\n return grad\n\n\ndef eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):\n \"\"\"\n Compute numeric gradients for a function that operates on input\n and output blobs.\n \n We assume that f accepts several input blobs as arguments, followed by a blob\n into which outputs will be written. For example, f might be called like this:\n\n f(x, w, out)\n \n where x and w are input Blobs, and the result of f will be written to out.\n\n Inputs: \n - f: function\n - inputs: tuple of input blobs\n - output: output blob\n - h: step size\n \"\"\"\n numeric_diffs = []\n for input_blob in inputs:\n diff = np.zeros_like(input_blob.diffs)\n it = np.nditer(input_blob.vals, flags=['multi_index'],\n op_flags=['readwrite'])\n while not it.finished:\n idx = it.multi_index\n orig = input_blob.vals[idx]\n\n input_blob.vals[idx] = orig + h\n f(*(inputs + (output,)))\n pos = np.copy(output.vals)\n input_blob.vals[idx] = orig - h\n f(*(inputs + (output,)))\n neg = np.copy(output.vals)\n input_blob.vals[idx] = orig\n \n diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)\n\n it.iternext()\n numeric_diffs.append(diff)\n return numeric_diffs\n\n\ndef eval_numerical_gradient_net(net, inputs, output, h=1e-5):\n return eval_numerical_gradient_blobs(lambda *args: net.forward(),\n inputs, output, h=h)\n\n\ndef grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):\n \"\"\"\n sample a few random elements and only return numerical\n in this dimensions.\n \"\"\"\n\n for i in range(num_checks):\n ix = tuple([randrange(m) for m in x.shape])\n\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evaluate f(x + h)\n x[ix] = oldval - h # increment by h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # reset\n\n grad_numerical = (fxph - fxmh) / (2 * h)\n grad_analytic = analytic_grad[ix]\n rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))\n print('numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error))\n\n"
] |
[
[
"tensorflow.concat",
"tensorflow.nn.relu",
"tensorflow.nn.conv2d",
"tensorflow.one_hot",
"tensorflow.train.Saver",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.get_variable",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool",
"tensorflow.nn.avg_pool"
],
[
"numpy.sum",
"numpy.zeros_like",
"numpy.copy",
"numpy.nditer"
]
] |
ritu01rt/DeepPixel
|
[
"bb09017c40b2cbd3fe24c7cd7f1540b559434a66"
] |
[
"deeppixel/face_rec/Face_detection/FaceDetection.py"
] |
[
" \nfrom glob import glob \nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport argparse\n\n#Function to convert RBG images to Gray\ndef convertToGray(image):\n\treturn cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n#Function to convert gray images to RGB\ndef convertToRGB(image):\n\treturn cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\ndef faceDetection(img_path, output_folder, scaleFactor = 1.1):\n\t#OpenCV provides pre-trained models for detecting various objects like car , etc . here we are using haar cascade file for face detection only\n\t#Loading harcascade classifier for frontal face\n\tcascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')\n\n\t#Creating output folder if not present \n\tif output_folder and not os.path.isdir(output_folder): \n\t os.mkdir(output_folder) \n\t \t \n\timage = cv2.imread(img_path)\n\n\t#convert the image to gray scale as opencv face detector expects gray images\n\tgray_image = convertToGray(image)\n\t \n\t# Applying the haar classifier to detect faces\n\tfaces_rect = cascade.detectMultiScale(gray_image, scaleFactor=scaleFactor, minNeighbors=5)\n\tnumber_of_faces = len(faces_rect)\n\t\n\tfor (x, y, w, h) in faces_rect:\n\t cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 5)\n\n\t#convert to RGB\n\timg = convertToRGB(image)\n\n\t#Save the image to output folder\n\tif output_folder:\n\t\tinput_filename, ext = os.path.splitext(os.path.basename(img_path))\n\t\tfilename = os.path.join(output_folder, input_filename.format(ext))\n\t\tfig = plt.figure() \n\t\tplt.imshow(img) \n\t\tprint(\"Faces found: \", number_of_faces)\n\t\tplt.axis(\"off\")\n\t\tplt.savefig(filename,bbox_inches = 'tight', pad_inches = 0)\n\t\tplt.close(fig)\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-i', '--img_path', type=str, help='Path to the input image')\n\tparser.add_argument('-o', '--output_folder', type=str, default=None, help='Path where the output image will be stored')\n\targs = parser.parse_args() \n\tfaceDetection(args.img_path, args.output_folder) \n\tos._exit(0)\n\n\nif __name__ == '__main__': \n main()\n"
] |
[
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.imshow"
]
] |
FantasyJXF/deeplab_v3
|
[
"153136ac496d54e6608e4053cadf6982811f1cb5"
] |
[
"preprocessing/read_data.py"
] |
[
"import tensorflow as tf\nfrom preprocessing.inception_preprocessing import apply_with_random_selector, distort_color\nimport urllib\nimport tarfile\nimport os\n\ndef random_flip_image_and_annotation(image_tensor, annotation_tensor, image_shape):\n \"\"\"Accepts image tensor and annotation tensor and returns randomly flipped tensors of both.\n The function performs random flip of image and annotation tensors with probability of 1/2\n The flip is performed or not performed for image and annotation consistently, so that\n annotation matches the image.\n\n Parameters\n ----------\n image_tensor : Tensor of size (width, height, 3)\n Tensor with image\n annotation_tensor : Tensor of size (width, height, 1)\n Tensor with annotation\n\n Returns\n -------\n randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float.\n Randomly flipped image tensor\n randomly_flipped_annotation : Tensor of size (width, height, 1)\n Randomly flipped annotation tensor\n\n \"\"\"\n original_shape = tf.shape(annotation_tensor)\n # ensure the annotation tensor has shape (width, height, 1)\n annotation_tensor = tf.cond(tf.rank(annotation_tensor) < 3,\n lambda: tf.expand_dims(annotation_tensor, axis=2),\n lambda: annotation_tensor)\n\n # Random variable: two possible outcomes (0 or 1)\n # with a 1 in 2 chance\n random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])\n\n\n randomly_flipped_img = tf.cond(pred=tf.equal(random_var, 0),\n true_fn=lambda: tf.image.flip_left_right(image_tensor),\n false_fn=lambda: image_tensor)\n\n randomly_flipped_annotation = tf.cond(pred=tf.equal(random_var, 0),\n true_fn=lambda: tf.image.flip_left_right(annotation_tensor),\n false_fn=lambda: annotation_tensor)\n\n return randomly_flipped_img, tf.reshape(randomly_flipped_annotation, original_shape, name=\"reshape_random_flip_image_and_annotation\"), image_shape\n\n\ndef rescale_image_and_annotation_by_factor(image, annotation, image_shape, nin_scale=0.5, max_scale=2):\n #We apply data augmentation by randomly scaling theinput images(from 0.5 to 2.0)\n #and randomly left - right flipping during training.\n input_shape = tf.shape(image)[0:2]\n input_shape_float = tf.to_float(input_shape)\n\n scale = tf.random_uniform(shape=[1],\n minval=0.5,\n maxval=2)\n\n scaled_input_shape = tf.to_int32(tf.round(input_shape_float * scale))\n\n image = tf.image.resize_images(image, scaled_input_shape,\n method=tf.image.ResizeMethod.BILINEAR)\n\n # use nearest neighbour for annotations resizing in order to keep proper values\n annotation = tf.image.resize_images(annotation, scaled_input_shape,\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n return image, annotation, image_shape\n\n\ndef download_resnet_checkpoint_if_necessary(resnet_checkpoints_path, resnet_model_name):\n \"\"\"\n Check if the resnet checkpoints are already downloaded, if not download it\n :param resnet_checkpoints_path: string: path where the properly resnet checkpoint files should be found\n :param resnet_model_name: one of resnet_v2_50 or resnet_v2_101\n :return: None\n \"\"\"\n if not os.path.exists(resnet_checkpoints_path):\n # create the path and download the resnet checkpoints\n os.mkdir(resnet_checkpoints_path)\n\n filename = resnet_model_name + \"_2017_04_14.tar.gz\"\n\n url = \"http://download.tensorflow.org/models/\" + filename\n full_file_path = os.path.join(resnet_checkpoints_path, filename)\n urllib.request.urlretrieve(url, full_file_path)\n thetarfile = tarfile.open(full_file_path, \"r:gz\")\n thetarfile.extractall(path=resnet_checkpoints_path)\n thetarfile.close()\n print(\"Resnet:\", resnet_model_name, \"successfully downloaded.\")\n else:\n print(\"ResNet checkpoints file successfully found.\")\n\n\ndef scale_image_with_crop_padding(image, annotation, image_shape, crop_size):\n\n image_croped = tf.image.resize_image_with_crop_or_pad(image,crop_size,crop_size)\n\n # Shift all the classes by one -- to be able to differentiate\n # between zeros representing padded values and zeros representing\n # a particular semantic class.\n annotation_shifted_classes = annotation + 1\n\n cropped_padded_annotation = tf.image.resize_image_with_crop_or_pad(annotation_shifted_classes,crop_size,crop_size)\n\n mask_out_number=255\n annotation_additional_mask_out = tf.to_int32(tf.equal(cropped_padded_annotation, 0)) * (mask_out_number+1)\n cropped_padded_annotation = cropped_padded_annotation + annotation_additional_mask_out - 1\n\n return image_croped, tf.squeeze(cropped_padded_annotation), image_shape\n\ndef tf_record_parser(record):\n keys_to_features = {\n \"image_raw\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n 'annotation_raw': tf.FixedLenFeature([], tf.string),\n \"height\": tf.FixedLenFeature((), tf.int64),\n \"width\": tf.FixedLenFeature((), tf.int64)\n }\n\n features = tf.parse_single_example(record, keys_to_features)\n\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n annotation = tf.decode_raw(features['annotation_raw'], tf.uint8)\n\n height = tf.cast(features['height'], tf.int32)\n width = tf.cast(features['width'], tf.int32)\n\n # reshape input and annotation images\n image = tf.reshape(image, (height, width, 3), name=\"image_reshape\")\n annotation = tf.reshape(annotation, (height,width,1), name=\"annotation_reshape\")\n annotation = tf.to_int32(annotation)\n\n return tf.to_float(image), annotation, (height, width)\n\ndef distort_randomly_image_color(image_tensor, annotation_tensor, image_shape):\n \"\"\"Accepts image tensor of (width, height, 3) and returns color distorted image.\n The function performs random brightness, saturation, hue, contrast change as it is performed\n for inception model training in TF-Slim (you can find the link below in comments). All the\n parameters of random variables were originally preserved. There are two regimes for the function\n to work: fast and slow. Slow one performs only saturation and brightness random change is performed.\n\n Parameters\n ----------\n image_tensor : Tensor of size (width, height, 3) of tf.int32 or tf.float\n Tensor with image with range [0,255]\n fast_mode : boolean\n Boolean value representing whether to use fast or slow mode\n\n Returns\n -------\n img_float_distorted_original_range : Tensor of size (width, height, 3) of type tf.float.\n Image Tensor with distorted color in [0,255] intensity range\n \"\"\"\n fast_mode=False\n # Make the range to be in [0,1]\n img_float_zero_one_range = tf.to_float(image_tensor) / 255\n\n # Randomly distort the color of image. There are 4 ways to do it.\n # Credit: TF-Slim\n # https://github.com/tensorflow/models/blob/master/slim/preprocessing/inception_preprocessing.py#L224\n # Most probably the inception models were trainined using this color augmentation:\n # https://github.com/tensorflow/models/tree/master/slim#pre-trained-models\n distorted_image = apply_with_random_selector(img_float_zero_one_range,\n lambda x, ordering: distort_color(x, ordering, fast_mode=fast_mode),\n num_cases=4)\n\n img_float_distorted_original_range = distorted_image * 255\n\n return img_float_distorted_original_range, annotation_tensor, image_shape\n"
] |
[
[
"tensorflow.rank",
"tensorflow.shape",
"tensorflow.decode_raw",
"tensorflow.expand_dims",
"tensorflow.round",
"tensorflow.random_uniform",
"tensorflow.FixedLenFeature",
"tensorflow.equal",
"tensorflow.reshape",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.squeeze",
"tensorflow.to_float",
"tensorflow.image.flip_left_right",
"tensorflow.image.resize_images",
"tensorflow.to_int32",
"tensorflow.parse_single_example",
"tensorflow.cast"
]
] |
SikandarBakht/asg2cap
|
[
"97a1d866d4a2b86c1f474bb168518f97eb2f8b96"
] |
[
"framework/ops.py"
] |
[
"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.nn.utils.rnn import pad_packed_sequence\n\ndef l2norm(inputs, dim=-1):\n # inputs: (batch, dim_ft)\n norm = torch.norm(inputs, p=2, dim=dim, keepdim=True)\n inputs = inputs / norm.clamp(min=1e-10)\n return inputs\n\ndef sequence_mask(lengths, max_len=None, inverse=False):\n ''' Creates a boolean mask from sequence lengths.\n '''\n # lengths: LongTensor, (batch, )\n batch_size = lengths.size(0)\n max_len = max_len or lengths.max()\n mask = torch.arange(0, max_len).type_as(lengths).repeat(batch_size, 1)\n if inverse:\n mask = mask.ge(lengths.unsqueeze(1))\n else:\n mask = mask.lt(lengths.unsqueeze(1))\n return mask\n\ndef subsequent_mask(size):\n '''Mask out subsequent position.\n Args\n size: the length of tgt words'''\n attn_shape = (1, size, size)\n # set the values below the 1th diagnose as 0\n mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n mask = torch.from_numpy(mask) == 0\n return mask\n\ndef rnn_factory(rnn_type, **kwargs):\n rnn = getattr(nn, rnn_type.upper())(**kwargs)\n return rnn\n\n"
] |
[
[
"torch.norm",
"numpy.ones",
"torch.arange",
"torch.from_numpy"
]
] |
hklhfong/CAB420Assignment-1B
|
[
"50127f08a8c29e47c9674a7334969f77648e7092"
] |
[
"Question2.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 15 15:24:24 2020\n\n@author: hofonglaw\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.mixture import GaussianMixture\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nfrom sklearn.metrics import homogeneity_score, completeness_score, v_measure_score\nimport matplotlib.pylab as plt\nfrom collections import Counter \n\nmovie_data = pd.read_csv(r\"C:\\Users\\user\\Downloads\\Assignment_1B_Data\\Data\\Q2\\movies.csv\")\nratings_data = pd.read_csv(r\"C:\\Users\\user\\Downloads\\Assignment_1B_Data\\Data\\Q2\\ratings.csv\")\nratings_data = ratings_data.drop(columns='timestamp')\n#use len of movie data index could bring the fastest speed process\n#Finding all genres set\ngenre = movie_data['genres'][0]\nx = set(genre.split(\"|\"))\nfor i in range(len(movie_data.index)):\n temp_genre = movie_data['genres'][i]\n tempX = set(temp_genre.split(\"|\"))\n if not tempX.issubset(x):\n for val in tempX:\n x.add(val)\n\nlist_x = list(x)\nfor i in range(len(list_x)):\n movie_data[list_x[i]] = 0 \n\n#Change value the match the column \nfor i in range(len(movie_data.index)):\n temp_genre = movie_data['genres'][i]\n tempX = temp_genre.split(\"|\")\n for temp_X_item in tempX:\n movie_data.loc[i, movie_data.columns.str.contains(temp_X_item)] = 1\n \n\nmodify_movie_data = movie_data.drop(columns = ['title','genres']) \nratings_42 = ratings_data.loc[ratings_data['userId'] == 42]\nratings_42 = pd.merge(ratings_42,modify_movie_data)\nfor i in range (ratings_42.iloc[:,2].size):\n ratings_42.iloc[i,3:] = ratings_42.iloc[i,2] * ratings_42.iloc[i,3:]\nratings_42_5stars = ratings_42.loc[ratings_42['rating'] == 5]\nratings_42_5stars = ratings_42_5stars.drop(columns = ['userId','movieId','rating'])\nratings_42 = ratings_42.drop(columns = ['userId','movieId','rating'])\n\nratings_314 = ratings_data.loc[ratings_data['userId'] == 314]\nratings_314 = pd.merge(ratings_314,modify_movie_data)\nfor i in range (ratings_314.iloc[:,2].size):\n ratings_314.iloc[i,3:] = ratings_314.iloc[i,2] * ratings_314.iloc[i,3:]\nratings_314_5stars = ratings_314.loc[ratings_314['rating'] == 5]\nratings_314_5stars = ratings_314_5stars.drop(columns = ['userId','movieId','rating'])\nratings_314 = ratings_314.drop(columns = ['userId','movieId','rating'])\n\nratings_444 = ratings_data.loc[ratings_data['userId'] == 444]\nratings_444 = pd.merge(ratings_444,modify_movie_data)\nfor i in range (ratings_444.iloc[:,2].size):\n ratings_444.iloc[i,3:] = ratings_444.iloc[i,2] * ratings_444.iloc[i,3:]\nratings_444_5stars = ratings_444.loc[ratings_444['rating'] == 5]\nratings_444_5stars = ratings_444_5stars.drop(columns = ['userId','movieId','rating'])\nratings_444 = ratings_444.drop(columns = ['userId','movieId','rating'])\n\ncol_titile = ratings_42.columns.values\n\n\ndef Plotbics (ratings):\n bics = []\n for i in range (25):\n gmm = GaussianMixture(i+1, random_state=4)\n gmm.fit(ratings)\n bics.append(gmm.bic(ratings))\n \n fig, ax = plt.subplots(figsize=(9, 7))\n ax.plot(bics)\n ax.set_xlabel('Number of Clusters')\n ax.set_ylabel('BIC');\n\ndef PlotGmm(ratings,ratings_5stars, numCluster):\n gmm = GaussianMixture(numCluster, random_state=4,covariance_type = 'full')\n gmm.fit(ratings)\n scores = gmm.score_samples(ratings)\n labels = gmm.predict(ratings)\n fig, ax = plt.subplots(figsize=(9, 7))\n ax.plot(scores)\n\n five_star_list = gmm.predict(ratings_5stars);\n unique, counts = np.unique(five_star_list, return_counts=True)\n fig, ax = plt.subplots(figsize=(9, 7))\n plt.bar(unique, counts)\n ax.set_xlabel('Clusters Group Index')\n ax.set_ylabel('Amount of 5 stars review have been predicted');\n dictuniqueFiveStars = Counter(dict(zip(unique, counts))) \n Top3 = dictuniqueFiveStars.most_common(3) \n y_pos = np.arange(len(col_titile))\n for i in Top3:\n fig = plt.figure(figsize=[30, 25])\n performance = gmm.means_[i[0]]\n plt.bar(y_pos, performance, align='center', alpha=0.5)\n plt.xticks(y_pos, col_titile)\n plt.ylabel('Ratings')\n plt.title('Recommended movie genres with cluster ' + str(i[0])+ \" with \" + str(i[1]) + \" amount of 5 star reviews support\")\n plt.show()\n \nPlotbics(ratings_42)\nPlotGmm(ratings_42,ratings_42_5stars,12)\nPlotbics(ratings_314)\nPlotGmm(ratings_314,ratings_314_5stars,4)\nPlotbics(ratings_444)\nPlotGmm(ratings_444,ratings_444_5stars,3)\n\n#Movie Recommend\n#Change Minimum Score of Unseen Movie\n#Validation with some del movie in 5 star\n\n"
] |
[
[
"matplotlib.pylab.xticks",
"pandas.merge",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.bar",
"matplotlib.pylab.show",
"matplotlib.pylab.figure",
"sklearn.mixture.GaussianMixture",
"matplotlib.pylab.subplots",
"pandas.read_csv",
"numpy.unique"
]
] |
octavian-ganea/equidock_public
|
[
"ac2c754399bf20b50a27d86dbff4f6669788d47f"
] |
[
"src/inference_rigid.py"
] |
[
"import os\n\nimport torch\n\nos.environ['DGLBACKEND'] = 'pytorch'\n\nfrom datetime import datetime as dt\nfrom src.utils.protein_utils import preprocess_unbound_bound, protein_to_graph_unbound_bound\nfrom biopandas.pdb import PandasPdb\nfrom src.utils.train_utils import *\nfrom src.utils.args import *\nfrom src.utils.ot_utils import *\nfrom src.utils.zero_copy_from_numpy import *\nfrom src.utils.io import create_dir\n\n\ndataset = 'dips'\nmethod_name = 'equidock'\nremove_clashes = False # Set to true if you want to remove (most of the) steric clashes. Will increase run time.\nif remove_clashes:\n method_name = method_name + '_no_clashes'\n print('Inference with postprocessing to remove clashes')\nelse:\n print('Inference without any postprocessing to remove clashes')\n\n\n# Ligand residue locations: a_i in R^3. Receptor: b_j in R^3\n# Ligand: G_l(x) = -sigma * ln( \\sum_i exp(- ||x - a_i||^2 / sigma) ), same for G_r(x)\n# Ligand surface: x such that G_l(x) = surface_ct\n# Other properties: G_l(a_i) < 0, G_l(x) = infinity if x is far from all a_i\n# Intersection of ligand and receptor: points x such that G_l(x) < surface_ct && G_r(x) < surface_ct\n# Intersection loss: IL = \\avg_i max(0, surface_ct - G_r(a_i)) + \\avg_j max(0, surface_ct - G_l(b_j))\ndef G_fn(protein_coords, x, sigma):\n # protein_coords: (n,3) , x: (m,3), output: (m,)\n e = torch.exp(- torch.sum((protein_coords.view(1, -1, 3) - x.view(-1,1,3)) ** 2, dim=2) / float(sigma) ) # (m, n)\n return - sigma * torch.log(1e-3 + e.sum(dim=1) )\n\n\ndef compute_body_intersection_loss(model_ligand_coors_deform, bound_receptor_repres_nodes_loc_array, sigma = 25., surface_ct=10.):\n assert model_ligand_coors_deform.shape[1] == 3\n loss = torch.mean( torch.clamp(surface_ct - G_fn(bound_receptor_repres_nodes_loc_array, model_ligand_coors_deform, sigma), min=0) ) + \\\n torch.mean( torch.clamp(surface_ct - G_fn(model_ligand_coors_deform, bound_receptor_repres_nodes_loc_array, sigma), min=0) )\n return loss\n\n\ndef get_rot_mat(euler_angles):\n roll = euler_angles[0]\n yaw = euler_angles[1]\n pitch = euler_angles[2]\n\n tensor_0 = torch.zeros([])\n tensor_1 = torch.ones([])\n cos = torch.cos\n sin = torch.sin\n\n RX = torch.stack([\n torch.stack([tensor_1, tensor_0, tensor_0]),\n torch.stack([tensor_0, cos(roll), -sin(roll)]),\n torch.stack([tensor_0, sin(roll), cos(roll)])]).reshape(3, 3)\n\n RY = torch.stack([\n torch.stack([cos(pitch), tensor_0, sin(pitch)]),\n torch.stack([tensor_0, tensor_1, tensor_0]),\n torch.stack([-sin(pitch), tensor_0, cos(pitch)])]).reshape(3, 3)\n\n RZ = torch.stack([\n torch.stack([cos(yaw), -sin(yaw), tensor_0]),\n torch.stack([sin(yaw), cos(yaw), tensor_0]),\n torch.stack([tensor_0, tensor_0, tensor_1])]).reshape(3, 3)\n\n R = torch.mm(RZ, RY)\n R = torch.mm(R, RX)\n return R\n\n\n\ndef get_residues(pdb_filename):\n df = PandasPdb().read_pdb(pdb_filename).df['ATOM']\n df.rename(columns={'chain_id': 'chain', 'residue_number': 'residue', 'residue_name': 'resname',\n 'x_coord': 'x', 'y_coord': 'y', 'z_coord': 'z', 'element_symbol': 'element'}, inplace=True)\n residues = list(df.groupby(['chain', 'residue', 'resname'])) ## Not the same as sequence order !\n return residues\n\n\n\ndef main(args):\n\n ## Pre-trained models.\n if dataset == 'dips':\n checkpoint_filename = 'oct20_Wdec_0.0001#ITS_lw_10.0#Hdim_64#Nlay_8#shrdLay_F#ln_LN#lnX_0#Hnrm_0#NattH_50#skH_0.75#xConnI_0.0#LkySl_0.01#pokOTw_1.0#fine_F#/'\n checkpoint_filename = 'checkpts/' + checkpoint_filename + '/dips_model_best.pth'\n elif dataset == 'db5':\n checkpoint_filename = 'oct20_Wdec_0.001#ITS_lw_10.0#Hdim_64#Nlay_5#shrdLay_T#ln_LN#lnX_0#Hnrm_0#NattH_50#skH_0.5#xConnI_0.0#LkySl_0.01#pokOTw_1.0#fine_F#'\n checkpoint_filename = 'checkpts/' + checkpoint_filename + '/db5_model_best.pth'\n\n print('checkpoint_filename = ', checkpoint_filename)\n\n checkpoint = torch.load(checkpoint_filename, map_location=args['device'])\n\n for k,v in checkpoint['args'].items():\n args[k] = v\n args['debug'] = False\n args['device'] = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n args['n_jobs'] = 1\n args['worker'] = 0\n\n\n model = create_model(args, log)\n model.load_state_dict(checkpoint['state_dict'])\n param_count(model, log)\n model = model.to(args['device'])\n model.eval()\n\n print(args['layer_norm'], args['layer_norm_coors'], args['final_h_layer_norm'], args['intersection_loss_weight'])\n print('divide_coors_dist = ', args['divide_coors_dist'])\n\n\n\n time_list = []\n\n input_dir = './test_sets_pdb/' + dataset + '_test_random_transformed/random_transformed/'\n ground_truth_dir = './test_sets_pdb/' + dataset + '_test_random_transformed/complexes/'\n output_dir = './test_sets_pdb/' + dataset + '_' + method_name + '_results/'\n\n input_dir = './test_sets_pdb/jean/'\n ground_truth_dir = './test_sets_pdb/jean/'\n output_dir = './test_sets_pdb/jean_out/'\n # create_dir(output_dir)\n\n pdb_files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith('.pdb')]\n for file in pdb_files:\n\n if not file.endswith('_l_b.pdb'):\n continue\n ll = len('_l_b.pdb')\n ligand_filename = os.path.join(input_dir, file[:-ll] + '_l_b' + '.pdb')\n receptor_filename = os.path.join(ground_truth_dir, file[:-ll] + '_r_b' + '_COMPLEX.pdb')\n gt_ligand_filename = os.path.join(ground_truth_dir, file[:-ll] + '_l_b' + '_COMPLEX.pdb')\n out_filename = file[:-ll] + '_l_b' + '_' + method_name.upper() + '.pdb'\n\n print(' inference on file = ', ligand_filename)\n\n\n start = dt.now()\n\n ppdb_ligand = PandasPdb().read_pdb(ligand_filename)\n\n unbound_ligand_all_atoms_pre_pos = ppdb_ligand.df['ATOM'][['x_coord', 'y_coord', 'z_coord']].to_numpy().squeeze().astype(np.float32)\n\n\n def get_nodes_coors_numpy(filename, all_atoms=False):\n df = PandasPdb().read_pdb(filename).df['ATOM']\n if not all_atoms:\n return torch.from_numpy(df[df['atom_name'] == 'CA'][['x_coord', 'y_coord', 'z_coord']].to_numpy().squeeze().astype(np.float32))\n return torch.from_numpy(df[['x_coord', 'y_coord', 'z_coord']].to_numpy().squeeze().astype(np.float32))\n\n gt_ligand_nodes_coors = get_nodes_coors_numpy(gt_ligand_filename, all_atoms=True)\n gt_receptor_nodes_coors = get_nodes_coors_numpy(receptor_filename, all_atoms=True)\n initial_ligand_nodes_coors = get_nodes_coors_numpy(ligand_filename, all_atoms=True)\n\n\n\n unbound_predic_ligand, \\\n unbound_predic_receptor, \\\n bound_ligand_repres_nodes_loc_clean_array,\\\n bound_receptor_repres_nodes_loc_clean_array = preprocess_unbound_bound(\n get_residues(ligand_filename), get_residues(receptor_filename),\n graph_nodes=args['graph_nodes'], pos_cutoff=args['pocket_cutoff'], inference=True)\n\n\n ligand_graph, receptor_graph = protein_to_graph_unbound_bound(unbound_predic_ligand,\n unbound_predic_receptor,\n bound_ligand_repres_nodes_loc_clean_array,\n bound_receptor_repres_nodes_loc_clean_array,\n graph_nodes=args['graph_nodes'],\n cutoff=args['graph_cutoff'],\n max_neighbor=args['graph_max_neighbor'],\n one_hot=False,\n residue_loc_is_alphaC=args['graph_residue_loc_is_alphaC']\n )\n\n if args['input_edge_feats_dim'] < 0:\n args['input_edge_feats_dim'] = ligand_graph.edata['he'].shape[1]\n\n\n ligand_graph.ndata['new_x'] = ligand_graph.ndata['x']\n\n assert np.linalg.norm(bound_ligand_repres_nodes_loc_clean_array - ligand_graph.ndata['x'].detach().cpu().numpy()) < 1e-1\n\n # Create a batch of a single DGL graph\n batch_hetero_graph = batchify_and_create_hetero_graphs_inference(ligand_graph, receptor_graph)\n\n batch_hetero_graph = batch_hetero_graph.to(args['device'])\n model_ligand_coors_deform_list, \\\n model_keypts_ligand_list, model_keypts_receptor_list, \\\n all_rotation_list, all_translation_list = model(batch_hetero_graph, epoch=0)\n\n\n rotation = all_rotation_list[0].detach().cpu().numpy()\n translation = all_translation_list[0].detach().cpu().numpy()\n\n new_residues = (rotation @ bound_ligand_repres_nodes_loc_clean_array.T).T+translation\n assert np.linalg.norm(new_residues - model_ligand_coors_deform_list[0].detach().cpu().numpy()) < 1e-1\n\n unbound_ligand_new_pos = (rotation @ unbound_ligand_all_atoms_pre_pos.T).T+translation\n\n euler_angles_finetune = torch.zeros([3], requires_grad=True)\n translation_finetune = torch.zeros([3], requires_grad=True)\n ligand_th = (get_rot_mat(euler_angles_finetune) @ torch.from_numpy(unbound_ligand_new_pos).T).T + translation_finetune\n\n ## Optimize the non-intersection loss:\n if remove_clashes:\n non_int_loss_item = 100.\n it = 0\n while non_int_loss_item > 0.5 and it < 2000:\n non_int_loss = compute_body_intersection_loss(ligand_th, gt_receptor_nodes_coors, sigma=8, surface_ct=8)\n non_int_loss_item = non_int_loss.item()\n eta = 1e-3\n if non_int_loss < 2.:\n eta = 1e-4\n if it > 1500:\n eta = 1e-2\n if it % 100 == 0:\n print(it, ' ' , non_int_loss_item)\n non_int_loss.backward()\n translation_finetune = translation_finetune - eta * translation_finetune.grad.detach()\n translation_finetune = torch.tensor(translation_finetune, requires_grad=True)\n\n euler_angles_finetune = euler_angles_finetune - eta * euler_angles_finetune.grad.detach()\n euler_angles_finetune = torch.tensor(euler_angles_finetune, requires_grad=True)\n\n ligand_th = (get_rot_mat(euler_angles_finetune) @ torch.from_numpy(unbound_ligand_new_pos).T).T + translation_finetune\n\n it += 1\n\n\n ppdb_ligand.df['ATOM'][['x_coord', 'y_coord', 'z_coord']] = ligand_th.detach().numpy() # unbound_ligand_new_pos\n unbound_ligand_save_filename = os.path.join(output_dir, out_filename)\n ppdb_ligand.to_pdb(path=unbound_ligand_save_filename, records=['ATOM'], gz=False)\n\n end = dt.now()\n time_list.append((end-start).total_seconds())\n\n time_array = np.array(time_list)\n log(f\"Mean runtime: {np.mean(time_array)}, std runtime: {np.std(time_array)}\")\n log('Time list = ', time_list)\n\n\nif __name__ == \"__main__\":\n main(args)"
] |
[
[
"torch.zeros",
"torch.device",
"torch.stack",
"torch.mm",
"torch.ones",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
]
] |
code-BSOD/Describing_a_Knowledge_Base
|
[
"b516bc77b25fa02e86bac20f6ebc4e11113c9efe"
] |
[
"main.py"
] |
[
"import argparse\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom predictor import Predictor\nfrom utils.loader import Table2text_seq\nfrom structure_generator.EncoderRNN import EncoderRNN\nfrom structure_generator.DecoderRNN import DecoderRNN\nfrom structure_generator.seq2seq import Seq2seq\nfrom eval_final import Evaluate\nfrom eval import Evaluate_test\n\n\nclass Config(object):\n cell = \"GRU\"\n emsize = 256\n pemsize = 5\n nlayers = 1\n lr = 0.001\n epochs = 19\n batch_size = 150\n dropout = 0\n bidirectional = False\n max_grad_norm = 10\n max_len = 100\n\n\nclass ConfigTest(object):\n cell = \"GRU\"\n emsize = 30\n pemsize = 30\n nlayers = 1\n lr = 0.001\n epochs = 2\n batch_size = 10\n dropout = 0\n bidirectional = True\n max_grad_norm = 1\n testmode = True\n max_len = 50\n\n\nparser = argparse.ArgumentParser(description='pointer model')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--save', type=str, default='params.pkl',\n help='path to save the final model')\nparser.add_argument('--mode', type=int, default=0,\n help='train(0)/predict_individual(1)/predict_file(2)/compute score(3) or keep train (4)')\nparser.add_argument('--type', type=int, default=0,\n help='person(0)/animal(1)')\nparser.add_argument('--mask', type=int, default=0,\n help='false(0)/true(1)')\nargs = parser.parse_args()\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n else:\n torch.cuda.manual_seed(args.seed)\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\nconfig = Config()\n# config = ConfigTest()\n\nif args.mask == 1:\n filepost = \"_m\"\nelse:\n filepost = \"\"\n\nif args.type == 1:\n args.save = 'params_D.pkl'\n config.epochs = 20\n filepost += \"_A.txt\"\nelse:\n filepost += \"_P.txt\"\nt_dataset = Table2text_seq(0, type=args.type, USE_CUDA=args.cuda, batch_size=config.batch_size)\nv_dataset = Table2text_seq(1, type=args.type, USE_CUDA=args.cuda, batch_size=config.batch_size)\nprint(\"number of training examples: %d\" % t_dataset.len)\nembedding = nn.Embedding(t_dataset.vocab.size, config.emsize, padding_idx=0)\nencoder = EncoderRNN(t_dataset.vocab.size, embedding, config.emsize, t_dataset.max_p, config.pemsize,\n input_dropout_p=config.dropout, dropout_p=config.dropout, n_layers=config.nlayers,\n bidirectional=config.bidirectional, rnn_cell=config.cell, variable_lengths=True)\ndecoder = DecoderRNN(t_dataset.vocab.size, embedding, config.emsize, config.pemsize, sos_id=3, eos_id=2, unk_id=1,\n n_layers=config.nlayers, rnn_cell=config.cell, bidirectional=config.bidirectional,\n input_dropout_p=config.dropout, dropout_p=config.dropout, USE_CUDA=args.cuda, mask=args.mask)\nmodel = Seq2seq(encoder, decoder).to(device)\noptimizer = optim.Adam(model.parameters(), lr=config.lr)\npredictor = Predictor(model, v_dataset.vocab, args.cuda)\n\n\ndef train_batch(dataset, batch_idx, model, teacher_forcing_ratio):\n batch_s, batch_o_s, batch_t, batch_o_t, batch_f, batch_pf, batch_pb, source_len, max_source_oov = \\\n dataset.get_batch(batch_idx)\n losses = model(batch_s, batch_o_s, max_source_oov, batch_f, batch_pf, batch_pb, source_len, batch_t,\n batch_o_t, teacher_forcing_ratio)\n batch_loss = losses.mean()\n model.zero_grad()\n batch_loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)\n optimizer.step()\n return batch_loss.item(), len(source_len)\n\n\ndef train_epoches(t_dataset, v_dataset, model, n_epochs, teacher_forcing_ratio):\n eval_f = Evaluate_test()\n best_dev = 0\n train_loader = t_dataset.corpus\n len_batch = len(train_loader)\n epoch_examples_total = t_dataset.len\n for epoch in range(1, n_epochs + 1):\n model.train(True)\n torch.set_grad_enabled(True)\n epoch_loss = 0\n for batch_idx in range(len_batch):\n loss, num_examples = train_batch(t_dataset, batch_idx, model, teacher_forcing_ratio)\n epoch_loss += loss * num_examples\n sys.stdout.write(\n '%d batches processed. current batch loss: %f\\r' %\n (batch_idx, loss)\n )\n sys.stdout.flush()\n epoch_loss /= epoch_examples_total\n log_msg = \"Finished epoch %d with losses: %.4f\" % (epoch, epoch_loss)\n print(log_msg)\n predictor = Predictor(model, v_dataset.vocab, args.cuda)\n print(\"Start Evaluating\")\n cand, ref = predictor.preeval_batch(v_dataset)\n print('Result:')\n print('ref: ', ref[1][0])\n print('cand: ', cand[1])\n final_scores = eval_f.evaluate(live=True, cand=cand, ref=ref)\n epoch_score = 2*final_scores['ROUGE_L']*final_scores['Bleu_4']/(final_scores['Bleu_4']+ final_scores['ROUGE_L'])\n if epoch_score > best_dev:\n torch.save(model.state_dict(), args.save)\n print(\"model saved\")\n best_dev = epoch_score\n\n\nif __name__ == \"__main__\":\n if args.mode == 0:\n # train\n try:\n print(\"start training...\")\n train_epoches(t_dataset, v_dataset, model, config.epochs, 1)\n except KeyboardInterrupt:\n print('-' * 89)\n print('Exiting from training early')\n elif args.mode == 1:\n # predict sentence\n model.load_state_dict(torch.load(args.save))\n print(\"model restored\")\n dataset = Table2text_seq(2, type=args.type, USE_CUDA=args.cuda, batch_size=1)\n print(\"Read test data\")\n predictor = Predictor(model, dataset.vocab, args.cuda)\n while True:\n seq_str = input(\"Type index from (%d to %d) to continue:\\n\" %(0, dataset.len - 1))\n i = int(seq_str)\n batch_s, batch_o_s, batch_f, batch_pf, batch_pb, sources, targets, fields, list_oovs, source_len\\\n , max_source_oov, w2fs = dataset.get_batch(i)\n table = []\n for i in range(len(sources[0])):\n table.append(fields[0][i])\n table.append(\":\")\n table.append(sources[0][i])\n print(\"Table:\")\n print(' '.join(table)+'\\n')\n print(\"Refer: \")\n print(' '.join(targets[0])+'\\n')\n outputs = predictor.predict(batch_s, batch_o_s, batch_f, batch_pf, batch_pb, max_source_oov\n , source_len, list_oovs[0], w2fs)\n print(\"Result: \")\n print(outputs)\n print('-'*120)\n elif args.mode == 2:\n model.load_state_dict(torch.load(args.save))\n print(\"model restored\")\n dataset = Table2text_seq(2, type=args.type, USE_CUDA=args.cuda, batch_size=config.batch_size)\n print(\"Read test data\")\n predictor = Predictor(model, dataset.vocab, args.cuda)\n print(\"number of test examples: %d\" % dataset.len)\n print(\"Start Evaluating\")\n lines = predictor.predict_file(dataset)\n print(\"Start writing\")\n f_out = open(\"Output\" + filepost, 'w')\n f_out.writelines(lines)\n f_out.close()\n elif args.mode == 3:\n model.load_state_dict(torch.load(args.save))\n print(\"model restored\")\n dataset = Table2text_seq(2, type=args.type, USE_CUDA=args.cuda, batch_size=config.batch_size)\n print(\"Read test data\")\n predictor = Predictor(model, dataset.vocab, args.cuda)\n print(\"number of test examples: %d\" % dataset.len)\n eval_f = Evaluate()\n print(\"Start Evaluating\")\n cand, ref = predictor.preeval_batch(dataset)\n scores = []\n fields = [\"Bleu_1\", \"Bleu_2\", \"Bleu_3\", \"Bleu_4\", \"METEOR\", \"ROUGE_L\"]\n final_scores = eval_f.evaluate(live=True, cand=cand, ref=ref)\n\n f_out = open(\"score\" + filepost, 'w')\n for field in fields:\n f_out.write(field + '\\t' + str(final_scores[field])+'\\n')\n f_out.close()\n elif args.mode == 4:\n # load and keep training\n model.load_state_dict(torch.load(args.save))\n print(\"model restored\")\n # train\n try:\n print(\"start training...\")\n train_epoches(t_dataset, v_dataset, model, 1, 1)\n except KeyboardInterrupt:\n print('-' * 89)\n print('Exiting from training early')\n dataset = Table2text_seq(2, type=args.type, USE_CUDA=args.cuda, batch_size=config.batch_size)\n print(\"Read test data\")\n predictor = Predictor(model, dataset.vocab, args.cuda)\n print(\"number of test examples: %d\" % dataset.len)\n eval_f = Evaluate()\n print(\"Start Evaluating\")\n cand, ref = predictor.preeval_batch(dataset)\n scores = []\n fields = [\"Bleu_1\", \"Bleu_2\", \"Bleu_3\", \"Bleu_4\", \"METEOR\", \"ROUGE_L\"]\n final_scores = eval_f.evaluate(live=True, cand=cand, ref=ref)\n x = input('Save (1) or not')\n if x == '1':\n torch.save(model.state_dict(), args.save)\n print(\"model saved\")\n\n"
] |
[
[
"torch.device",
"torch.cuda.manual_seed",
"torch.set_grad_enabled",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"torch.nn.Embedding"
]
] |
scedastic/my_ml_service
|
[
"9231db2602a78a2c696c79f5ff8b26edbd225c39"
] |
[
"backend/server/apps/ml/income_classifier/random_forest.py"
] |
[
"import joblib\r\nimport pandas as pd\r\n\r\nclass RandomForestClassifier:\r\n def __init__(self):\r\n '''\r\n Constructor: loads preprocessing objects and Random Forest object (created with Jupyter)\r\n '''\r\n # path_to_artifacts = \"../../reasearch/\"\r\n path_to_artifacts = \"c:/users/ysheinfil/scedastic/my_ml_service/research/\"\r\n self.values_fill_missing = joblib.load(path_to_artifacts + \"train_mode.joblib\")\r\n self.encoders = joblib.load(path_to_artifacts + \"encoders.joblib\")\r\n self.model = joblib.load(path_to_artifacts + \"random_forest.joblib\")\r\n\r\n def preprocessing(self, input_data):\r\n '''\r\n Takes JSON data, converts it to pandas.DataFrame and apply preprocessing\r\n '''\r\n # JSON to pandas DataFrame\r\n input_data = pd.DataFrame(input_data, index=[0])\r\n\r\n # fill in missing values\r\n input_data.fillna(self.values_fill_missing)\r\n\r\n # convert categoricals\r\n for column in [\"workclass\", \"education\", \"marital-status\", \"occupation\", \"relationship\", \"race\", \"sex\", \"native-country\",]:\r\n categorical_convert = self.encoders[column]\r\n input_data[column] = categorical_convert.transform(input_data[column])\r\n \r\n return input_data\r\n\r\n def predict(self, input_data):\r\n '''\r\n Calls ML for computing predictions on prepared data\r\n '''\r\n return self.model.predict_proba(input_data)\r\n\r\n def postprocesseing(self, input_data):\r\n '''\r\n Applies post-processing on prediction values\r\n '''\r\n label = \"<=50K\"\r\n if input_data[1] > 0.5:\r\n label = \">50K\"\r\n return {\"probability\": input_data[1], \"label\": label, \"status\": \"OK\"}\r\n\r\n def compute_prediction(self, input_data):\r\n '''\r\n Combines preprocessing, predict and postprocessing and returns JSON with the response\r\n '''\r\n try:\r\n input_data = self.preprocessing(input_data)\r\n prediction = self.predict(input_data)[0] # only one sample\r\n prediction = self.postprocesseing(prediction)\r\n except Exception as e:\r\n return {\"status\": \"Error\", \"message\": str(e)}\r\n\r\n return prediction\r\n"
] |
[
[
"pandas.DataFrame"
]
] |
mady1258/clade_prediction
|
[
"c708251972be001a1341bb4ab10f8443b33a590e"
] |
[
"convert_gen_seqs_fasta.py"
] |
[
"import time\nimport sys\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport json\n\nimport utils\n\ndata_path = \"test_results/08_10_one_hot_3_CPU_20A_20B/\"\ngen_file = \"true_predicted_multiple_20B_20I_Alpha_20F_20D_21G_Lambda_21H_2_times.csv\"\nparent_clade = \"20B\"\nchild_clade = \"20I_Alpha_20F_20D_21G_Lambda_21H\"\n\n\ndef convert_seq():\n\n f_dict = utils.read_json(data_path + \"f_word_dictionaries.json\")\n dataframe = pd.read_csv(data_path + gen_file, sep=\",\")\n parent_seqs = dataframe[parent_clade]\n gen_seqs = dataframe[child_clade]\n convert_to_fasta(gen_seqs.tolist(), f_dict)\n\n\ndef convert_to_fasta(list_seqs, f_dict):\n fasta_txt = \"\"\n for i, seq in enumerate(list_seqs):\n fasta_txt += \">{}|Generated \".format(str(i))\n fasta_txt += \"\\n\\n\"\n letter_seq = [f_dict[item] for item in seq.split(\",\")]\n letter_seq = \"\".join(letter_seq)\n letter_seq = letter_seq + \"*\"\n fasta_txt += letter_seq\n fasta_txt += \"\\n\\n\"\n if i == 10:\n break\n with open(data_path + \"generated_seqs.fasta\", \"w\") as f:\n f.write(fasta_txt)\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n convert_seq()\n end_time = time.time()\n print(\"Program finished in {} seconds\".format(str(np.round(end_time - start_time, 2))))\n"
] |
[
[
"numpy.round",
"pandas.read_csv"
]
] |
ViliamVadocz/Bots
|
[
"092abc5bf92e9dab9d07499849d54a33b0b0c4f6"
] |
[
"Retired/BoostHog/util.py"
] |
[
"import math\n\ntry:\n import numpy as np\nexcept ImportError:\n try:\n from pip import main as pipmain\n except ImportError:\n from pip._internal import main as pipmain\n pipmain(['install', 'numpy'])\n try:\n import numpy as np\n except ImportError:\n raise ImportError(\"Failed to install numpy automatically, please install manually using: 'pip install numpy'\")\n\nfrom scipy.interpolate import interp1d\n\n\n''' Utility functions '''\n#Inspired by Marvin's bot Stick with modifications because I didn't understand what half of his stuff did :P\n\ndef a2(V):\n \"\"\"Converts a Vector or normal list to a numpy array of size 2.\"\"\"\n try:\n a = np.array([V[0], V[1]])\n except TypeError:\n a = np.array([V.x, V.y])\n return a\n\n\ndef a3(V):\n \"\"\"Converts a Vector, rotator or normal list to a numpy array of size 3.\"\"\"\n try:\n return np.array([V[0], V[1], V[2]])\n except TypeError:\n try:\n return np.array([V.x, V.y, V.z])\n except AttributeError:\n return np.array([V.Pitch, V.Yaw, V.Roll])\n\n\ndef a3l(L):\n \"\"\"Converts List to numpy array.\"\"\"\n return np.array([L[0], L[1], L[2]])\n\n\ndef a3r(R):\n \"\"\"Converts Rotator to numpy array.\"\"\"\n return np.array([R.pitch, R.yaw, R.roll])\n\n\ndef a3v(V):\n \"\"\"Converts Vector3 to numpy array.\"\"\"\n return np.array([V.x, V.y, V.z])\n\ndef dist2d(a, b=[0, 0]):\n \"\"\"Distance/Magnitude in 2d.\"\"\"\n return math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n\n\ndef dist3d(a, b=[0, 0, 0]):\n \"\"\"Distance/Magnitude in 3d.\"\"\"\n return math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2 + (a[2] - b[2]) ** 2)\n\n\ndef normalize(A):\n \"\"\"Resizes the vector length to 1.\"\"\"\n mag = np.linalg.norm(A)\n if mag == 0:\n mag = 1\n return A / mag\n\n\ndef turning_radius(speed):\n \"\"\"Minimum turning radius given speed.\"\"\"\n return -6.901E-11 * speed**4 + 2.1815E-07 * speed**3 - 5.4437E-06 * speed**2 + 0.12496671 * speed + 157\n\n\ndef turning_speed(radius):\n \"\"\"Maximum speed given turning radius.\"\"\"\n return 10.219 * radius - 1.75404E-2 * radius**2 + 1.49406E-5 * radius**3 - 4.486542E-9 * radius**4 - 1156.05\n\n\ndef get_steer(speed, desire_r):\n \"\"\"aproximated steer for a desired curvature for given speed.\"\"\"\n x = np.array([0, 500, 1000, 1500, 1750, 2300])\n y = np.array([0.0069, 0.00396, 0.00235, 0.001375, 0.0011, 0.00088])\n f = interp1d(x, y)\n \n max_curv = f(speed)\n desire_curv = 1 / desire_r\n\n if max_curv >= desire_curv:\n return desire_curv / max_curv\n else:\n return 1.0\n\n\ndef radius_from_points(p0, p1, p2):\n \"\"\"finds radius of circle defined by three points\"\"\"\n x0 = p0[0]\n y0 = p0[1]\n \n x1 = p1[0]\n y1 = p1[1]\n \n x2 = p2[0]\n y2 = p2[1]\n \n m0 = -(x1-x0)/(y1-y0)\n m1 = -(x2-x1)/(y2-y1)\n \n A = y2-y0 + m0*(x0+x1) - m1*(x1+x2)\n \n x = A / (2*(m0-m1))\n y = m0*(x-0.5*(x0+x1))+0.5*(y0+y1)\n \n r = math.sqrt((x-x0)**2+(y-y0)**2)\n \n return r\n\n\ndef orientMat(R):\n \"\"\"converts from Euler angles to an orientation matrix.\"\"\"\n pitch = R[0]\n yaw = R[1]\n roll = R[2]\n\n CR = math.cos(roll)\n SR = math.sin(roll)\n CP = math.cos(pitch)\n SP = math.sin(pitch)\n CY = math.cos(yaw)\n SY = math.sin(yaw)\n\n A = np.zeros((3,3))\n\n #front direction\n A[0][0] = CP * CY\n A[0][1] = CP * SY\n A[0][2] = SP \n\n #right direction\n A[1][0] = CY * SP * SR - CR * SY\n A[1][1] = SY * SP * SR + CR * CY\n A[1][2] = -CP * SR\n\n #up direction\n A[2][0] = -CR * CY * SP - SR * SY\n A[2][1] = -CR * SY * SP + SR * CY\n A[2][2] = CP * CR\n\n return A\n\n\ndef local(V, A):\n \"\"\"Transforms global/world into local coordinates.\"\"\"\n return np.dot(A, V)\n\n\ndef world(V, A):\n \"\"\"Transforms local into global/world coordinates.\"\"\"\n return np.dot(V, A)\n\n#https://en.wikipedia.org/wiki/B%C3%A9zier_curve\ndef bezier_quadratic(p0, p1, p2, t):\n \"\"\"Returns a position on bezier curve defined by 3 points and t.\"\"\"\n return p1 + (1-t)**2*(p0-p1) + t**2*(p2-p1)\n\n\ndef bezier_cubic(p0, p1, p2, p3, t):\n \"\"\"Returns a position on bezier curve defined by 4 points and t.\"\"\"\n return (1-t)**3*p0 + 3*(1-t)**2*t*p1 + 3*(1-t)*t**2*p2 + t**3*p3\n"
] |
[
[
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"scipy.interpolate.interp1d",
"numpy.zeros"
]
] |
AdamMomen/linear_regression
|
[
"cf4d3ac41709b42878222ef37697587af7cb1b97"
] |
[
".history/run_20200624142032.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model\n\n\n# Loading the data\noecd_bli = pd.read_csv(\"oced_bli_2015.csv\", thousands=\",\")\ngdp_per_capita = pd.read_csv(\n \"oced_bli_2015.csv\", thousands=\",\", delimiter=\"\\t\", encoding=\"latin1\", na_values=\"n/a\")\n\n\ndef prepare_country_stats(oecd_bli, gdp_per_capita):\n oecd_bli = oecd_bli[oecd_bli[\"INEQUALITY\"] == \"TOT\"]\n oecd_bli = oecd_bli.pivot(\n index=\"Country\", columns=\"Indicator\", values=\"Value\")\n gdp_per_capita.rename(columns={\"2015\": \"GDP per capita\"}, inplace=True)\n gdp_per_capita.set_index(\"Country\", inplace=True)\n full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,\n left_index=True, right_index=True)\n full_country_stats.sort_values(by=\"GDP per capita\", inplace=True)\n remove_indices = [0, 1, 6, 8, 33, 34, 35]\n keep_indices = list(set(range(36)) - set(remove_indices))\n return full_country_stats[[\"GDP per capita\", 'Life satisfaction']].iloc[keep_indices]\n\n\ncountry_stats = prepare_country_stats(oecd_bli, gdp_per_capita)\nX = np.c_[country_stats[\"GDP per capita\"]]\nY = np.c_[country_stats[\"Life satisfaction\"]]\n\n# Visualize the data\ncountry_starts.plot(kind='scatter')\n"
] |
[
[
"pandas.read_csv",
"pandas.merge"
]
] |
zdebeurs/radvel
|
[
"acbe76db00dc1ef558fb426a2ef5533f4676f873"
] |
[
"setup.py"
] |
[
"from setuptools import setup, find_packages, Extension\nfrom setuptools.command.build_ext import build_ext as _build_ext\nimport re\n\n\nclass build_ext(_build_ext):\n def finalize_options(self):\n _build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n __builtins__.__NUMPY_SETUP__ = False\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n\ndef get_property(prop, project):\n result = re.search(r'{}\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]'.format(prop),\n open(project + '/__init__.py').read())\n return result.group(1)\n\n\nextensions = [Extension(\"radvel._kepler\", [\"src/_kepler.pyx\"],)]\n\nreqs = []\nfor line in open('requirements.txt', 'r').readlines():\n if not line.startswith('celerite') and not line.startswith('h5py'):\n reqs.append(line)\n\nsetup(\n name=\"radvel\",\n version=get_property('__version__', 'radvel'),\n author=\"BJ Fulton, Erik Petigura, Sarah Blunt, Evan Sinukoff\",\n packages=find_packages(),\n setup_requires=['numpy', 'cython'],\n ext_modules=extensions,\n cmdclass={'build_ext': build_ext},\n data_files=[\n (\n 'radvel_example_data', \n [\n 'example_data/164922_fixed.txt', \n 'example_data/epic203771098.csv',\n 'example_data/k2-131.txt'\n ]\n )\n ],\n entry_points={'console_scripts': ['radvel=radvel.cli:main']},\n install_requires=reqs,\n include_package_data=True\n)\n"
] |
[
[
"numpy.get_include"
]
] |
thuzhf/tensorpack
|
[
"83ef7d05d0a16f82d86323c4e2f9b5d14870affa"
] |
[
"examples/boilerplate.py"
] |
[
"# -*- coding: utf-8 -*-\n# Author: Your Name <[email protected]>\n\nimport os\nimport argparse\nimport tensorflow as tf\n\nfrom tensorpack import *\n\n\"\"\"\nThis is a boiler-plate template.\nAll code is in this file is the most minimalistic way to solve a deep-learning problem with cross-validation.\n\"\"\"\n\nBATCH_SIZE = 16\nSHAPE = 28\nCHANNELS = 3\n\n\nclass Model(ModelDesc):\n def inputs(self):\n return [tf.placeholder(tf.float32, (None, SHAPE, SHAPE, CHANNELS), 'input1'),\n tf.placeholder(tf.int32, (None,), 'input2')]\n\n def build_graph(self, input1, input2):\n\n cost = tf.identity(input1 - input2, name='total_costs')\n summary.add_moving_summary(cost)\n return cost\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False)\n return tf.train.AdamOptimizer(lr)\n\n\ndef get_data(subset):\n # something that yields [[SHAPE, SHAPE, CHANNELS], [1]]\n ds = FakeData([[SHAPE, SHAPE, CHANNELS], [1]], 1000, random=False,\n dtype=['float32', 'uint8'], domain=[(0, 255), (0, 10)])\n ds = PrefetchDataZMQ(ds, 2)\n ds = BatchData(ds, BATCH_SIZE)\n return ds\n\n\ndef get_config():\n logger.auto_set_dir()\n\n ds_train = get_data('train')\n ds_test = get_data('test')\n\n return TrainConfig(\n model=Model(),\n data=QueueInput(ds_train),\n callbacks=[\n ModelSaver(),\n InferenceRunner(ds_test, [ScalarStats('total_costs')]),\n ],\n steps_per_epoch=len(ds_train),\n max_epoch=100,\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n args = parser.parse_args()\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n config = get_config()\n\n if args.gpu:\n config.nr_tower = len(args.gpu.split(','))\n if args.load:\n config.session_init = SaverRestore(args.load)\n\n launch_train_with_config(config, SimpleTrainer())\n"
] |
[
[
"tensorflow.placeholder",
"tensorflow.get_variable",
"tensorflow.identity",
"tensorflow.train.AdamOptimizer"
]
] |
mobius-scheduler/evaluation
|
[
"2f24eb564130d4108d5f5f358a9948c3909f5ceb"
] |
[
"parse.py"
] |
[
"import json\nimport sys\nimport pandas as pd\nimport glob\n\nCOLS = ['env', 'alpha', 'time', 'round', 'app_id', 'tasks_requested', 'tasks_fulfilled']\n\ndef parse_im(y, req_prev, ids):\n # open file\n with open(y) as f:\n im = json.load(f)\n\n req = {id: 0 for id in ids}\n for t in im:\n req[t['app_id']] += t['interest']\n return req\n\ndef parse_schedule(x, req, rnd, time, env, ids):\n # open file\n with open(x) as f:\n s = json.load(f)\n\n df = pd.DataFrame(columns = COLS)\n for app in ids:\n if str(app) in s['allocation']:\n alloc = s['allocation'][str(app)]\n else:\n alloc = 0\n\n assert(req[app] >= alloc)\n df = df.append(\n {\n 'env': env,\n 'alpha': s['stats']['alpha'],\n 'time': time,\n 'round': rnd,\n 'app_id': app,\n 'tasks_requested': req[app],\n 'tasks_fulfilled': alloc,\n },\n ignore_index = 1\n )\n df = df.append(\n {\n 'env': env,\n 'alpha': s['stats']['alpha'],\n 'time': time,\n 'round': rnd,\n 'app_id': -1,\n 'tasks_requested': sum(req[app] for app in ids),\n 'tasks_fulfilled': sum(s['allocation'][str(app)] \\\n if str(app) in s['allocation'] else 0 \\\n for app in ids),\n },\n ignore_index = 1\n )\n return df\n\ndef merge(parsed, to):\n x = []\n for i in range(len(parsed)):\n x += [pd.read_csv(parsed[i])]\n df = pd.concat(x)\n df.to_csv(to, index = False)\n\ndef add_col(path, name, value):\n df = pd.read_csv(path)\n df[name] = value\n df.to_csv(path, index = False)\n\ndef parse(dir, ids):\n with open(dir + '/config.cfg') as f:\n cfg = json.load(f)\n horizon = cfg['replan_sec']\n \n df = pd.DataFrame(columns = COLS)\n sched = sorted(glob.glob(dir + '/schedule_round*.json'))\n im = sorted(glob.glob(dir + '/im_round*.json'))\n req_prev = None\n for x,y in zip(sched, im):\n env = dir.split('/')[-4]\n rnd = int(x.split('round')[-1].split('.json')[0]) + 1\n req = parse_im(y, req_prev, ids)\n s = parse_schedule(x, req, rnd, horizon * rnd, env, ids)\n df = df.append(s)\n req_prev = req\n df.to_csv(dir + '/tasks.csv', index = False)\n\ndef parse_hull(dir):\n cols = ['round', 'app1', 'app2']\n df = pd.DataFrame(columns = cols)\n for f in sorted(glob.glob(dir + '/hull_round*.json')):\n rnd = int(f.split('round')[1].split('.json')[0])\n with open(f) as x:\n h = json.load(x)\n if h is None:\n continue\n for s in h:\n df = df.append(\n {\n 'round': rnd,\n 'app1': s['allocation']['1'],\n 'app2': s['allocation']['2'],\n },\n ignore_index = 1\n )\n \n df.to_csv(dir + '/hull.csv', index = False)\n\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv",
"pandas.concat"
]
] |
skyler-nch/DJI-Tello-Project
|
[
"9255cef7e9849574d9ac6f04aff446f2640628c4"
] |
[
"FaceRecognition and Tracking.py"
] |
[
"from djitellopy.tello import Tello\nimport cv2\nimport pygame\nimport numpy as np\nimport time\nimport imutils\n\n# Speed of the drone\nS = 60\n# Frames per second of the pygame window display\nFPS = 30\n\n\nclass FrontEnd(object):\n \"\"\" Maintains the Tello display and moves it through the keyboard keys.\n Press escape key to quit.\n The controls are:\n - T: Takeoff\n - L: Land\n - Arrow keys: Forward, backward, left and right.\n - A and D: Counter clockwise and clockwise rotations\n - W and S: Up and down.\n \"\"\"\n\n def __init__(self):\n # Init pygame\n pygame.init()\n\n # Creat pygame window\n pygame.display.set_caption(\"Tello video stream\")\n\n # Center of screen - 480,360\n self.screen = pygame.display.set_mode([960, 720])\n \n\n # Init Tello object that interacts with the Tello drone\n self.tello = Tello()\n\n # Drone velocities between -100~100\n self.for_back_velocity = 0\n self.left_right_velocity = 0\n self.up_down_velocity = 0\n self.yaw_velocity = 0\n self.speed = 10\n\n self.send_rc_control = False\n\n # create update timer\n pygame.time.set_timer(pygame.USEREVENT + 1, 50)\n\n def run(self):\n\n if not self.tello.connect():\n print(\"Tello not connected\")\n return\n\n if not self.tello.set_speed(self.speed):\n print(\"Not set speed to lowest possible\")\n return\n\n # In case streaming is on. This happens when we quit this program without the escape key.\n if not self.tello.streamoff():\n print(\"Could not stop video stream\")\n return\n\n if not self.tello.streamon():\n print(\"Could not start video stream\")\n return\n\n frame_read = self.tello.get_frame_read()\n face_detected = False\n should_stop = False\n #initialise tracker type\n tracker = cv2.TrackerKCF_create()\n framespersecond = 0\n while not should_stop:\n\n for event in pygame.event.get():\n if event.type == pygame.USEREVENT + 1:\n self.update()\n elif event.type == pygame.QUIT:\n should_stop = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n should_stop = True\n else:\n self.keydown(event.key)\n elif event.type == pygame.KEYUP:\n self.keyup(event.key)\n\n if frame_read.stopped:\n frame_read.stop()\n break\n\n self.screen.fill([0, 0, 0])\n originalframe = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)\n\n if face_detected == False:\n self.faces = []\n #------------FACIAL RECOGNITION-----------------------\n if framespersecond%10 == 0:\n self.faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n self.gray = cv2.cvtColor(originalframe,cv2.COLOR_BGR2GRAY)\n self.faces = self.faceCascade.detectMultiScale(\n self.gray,\n scaleFactor = 1.1,\n minNeighbors = 5,\n minSize = (30,30),\n flags = cv2.CASCADE_SCALE_IMAGE\n )\n #self.faces returns the faces it detects in a nested list [[face1],[face2]]\n #each list contains[x-axis,y-axis,width,height]\n #-----------------------------------------------------\n\n \n frame = np.rot90(originalframe)\n frame = np.flipud(frame)\n frame = pygame.surfarray.make_surface(frame)\n self.screen.blit(frame, (0, 0))\n\n if len(self.faces)>0:\n print(\"face detected\")\n face_detected = True\n #-----------------DRAW BOUNDINGBOX ON FACE------------\n self.colorofbox = pygame.Color(0,255,0)\n for item in self.faces:\n pygame.draw.rect(self.screen,self.colorofbox,pygame.Rect(item[0],item[1],item[2],item[3]),2)\n\n #select the closest face to track\n self.selectedface = self.faces[0]\n for item in self.faces:\n if (item[2]*item[3])>(self.selectedface[2]*self.selectedface[3]):\n self.selectedface = item\n ok = tracker.init(originalframe, (self.selectedface[0],self.selectedface[1],self.selectedface[2],self.selectedface[3]))\n #-----------------------------------------------------\n\n pygame.display.update()\n\n else:\n print(\"face tracking\")\n self.colorofbox = pygame.Color(255,0,0)\n\n #updates the tracker with current frame, ok determines if tracking is successful, bbox is the bounding box\n ok,bbox = tracker.update(originalframe)\n \n #----CONTROL THE DRONE BASED ON THE BOUNDING BOX------\n if ok:\n\n frame = np.rot90(originalframe)\n frame = np.flipud(frame)\n frame = pygame.surfarray.make_surface(frame)\n self.screen.blit(frame, (0, 0))\n \n pygame.draw.rect(self.screen,self.colorofbox,pygame.Rect(bbox[0],bbox[1],bbox[2],bbox[3]),2)\n self.centeredaxis = [bbox[0]+(bbox[2]//2),bbox[1]+(bbox[3]//2)]\n\n \n pygame.display.update()\n\n if self.centeredaxis[0] < 480:\n print(\"left\")\n self.yawleft()\n self.update()\n \n elif self.centeredaxis[0] > 480:\n print(\"right\")\n self.yawright()\n self.update()\n else:\n print(\"face lost\")\n face_detected = False\n \n \n #-----------------------------------------------------\n \n if framespersecond == 30:\n framespersecond = 0\n else:\n framespersecond += 1\n time.sleep(1 / FPS)\n\n # Call it always before finishing. To deallocate resources.\n self.tello.end()\n\n def forward(self):\n self.for_back_velocity = S\n \n def backward(self):\n self.for_back_velocity = -S\n\n def left(self):\n self.left_right_velocity = -S\n\n def right(self):\n self.left_right_velocity = S\n\n def up(self):\n self.up_down_velocity = S\n\n def down(self):\n self.up_down_velocity = -S\n\n def yawleft(self):\n self.yaw_velocity = -S\n\n def yawright(self):\n self.yaw_velocity = S\n\n def takeoff(self):\n self.tello.takeoff()\n self.send_rc_control = True\n\n def land(self):\n self.tello.land()\n self.send_rc_control = False\n\n def emergencystop(self):\n self.for_back_velocity = 0\n self.left_right_velocity = 0\n self.up_down_velocity = 0\n self.yaw_velocity = 0\n \n def keydown(self, key):\n \"\"\" Update velocities based on key pressed\n Arguments:\n key: pygame key\n \"\"\"\n if key == pygame.K_UP: # set forward velocity\n self.for_back_velocity = S\n elif key == pygame.K_DOWN: # set backward velocity\n self.for_back_velocity = -S\n elif key == pygame.K_LEFT: # set left velocity\n self.left_right_velocity = -S\n elif key == pygame.K_RIGHT: # set right velocity\n self.left_right_velocity = S\n elif key == pygame.K_w: # set up velocity\n self.up_down_velocity = S\n elif key == pygame.K_s: # set down velocity\n self.up_down_velocity = -S\n elif key == pygame.K_a: # set yaw counter clockwise velocity\n self.yaw_velocity = -S\n elif key == pygame.K_d: # set yaw clockwise velocity\n self.yaw_velocity = S\n\n def keyup(self, key):\n \"\"\" Update velocities based on key released\n Arguments:\n key: pygame key\n \"\"\"\n if key == pygame.K_UP or key == pygame.K_DOWN: # set zero forward/backward velocity\n self.for_back_velocity = 0\n elif key == pygame.K_LEFT or key == pygame.K_RIGHT: # set zero left/right velocity\n self.left_right_velocity = 0\n elif key == pygame.K_w or key == pygame.K_s: # set zero up/down velocity\n self.up_down_velocity = 0\n elif key == pygame.K_a or key == pygame.K_d: # set zero yaw velocity\n self.yaw_velocity = 0\n elif key == pygame.K_t: # takeoff\n self.tello.takeoff()\n self.send_rc_control = True\n elif key == pygame.K_l: # land\n self.tello.land()\n self.send_rc_control = False\n\n def update(self):\n \"\"\" Update routine. Send velocities to Tello.\"\"\"\n if self.send_rc_control:\n self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity,\n self.yaw_velocity)\n\n\ndef main():\n frontend = FrontEnd()\n\n # run frontend\n frontend.run()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.rot90",
"numpy.flipud"
]
] |
twanvl/rwa-da
|
[
"7707daf51f8baccf460907638a8012ed60ac0ffd"
] |
[
"src/deep_features/network.py"
] |
[
"# The used network architecture\n# Based on\n# https://github.com/Lasagne/Recipes/blob/master/examples/ImageNet%20Pretrained%20Network%20%28VGG_S%29.ipynb\n\nimport numpy as np\nimport lasagne\nimport theano\nimport theano.tensor as T\nfrom lasagne.layers import InputLayer, DenseLayer, DropoutLayer\nfrom lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer\nfrom lasagne.layers import MaxPool2DLayer as PoolLayer\nfrom lasagne.layers import LocalResponseNormalization2DLayer as NormLayer\nfrom lasagne.utils import floatX\nfrom lasagne.regularization import l2, regularize_network_params\nfrom lasagne.updates import nesterov_momentum\nfrom lasagne.objectives import categorical_crossentropy\nimport skimage.transform\n\nclass PretrainedNetwork:\n def __init__(self, load=True):\n # Architecture\n net = {}\n net['input'] = InputLayer((None, 3, 224, 224))\n net['conv1'] = ConvLayer(net['input'], num_filters=96, filter_size=7, stride=2, flip_filters=False)\n net['norm1'] = NormLayer(net['conv1'], alpha=0.0001) # caffe has alpha = alpha * pool_size\n net['pool1'] = PoolLayer(net['norm1'], pool_size=3, stride=3, ignore_border=False)\n net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5, flip_filters=False)\n net['pool2'] = PoolLayer(net['conv2'], pool_size=2, stride=2, ignore_border=False)\n net['conv3'] = ConvLayer(net['pool2'], num_filters=512, filter_size=3, pad=1, flip_filters=False)\n net['conv4'] = ConvLayer(net['conv3'], num_filters=512, filter_size=3, pad=1, flip_filters=False)\n net['conv5'] = ConvLayer(net['conv4'], num_filters=512, filter_size=3, pad=1, flip_filters=False)\n net['pool5'] = PoolLayer(net['conv5'], pool_size=3, stride=3, ignore_border=False)\n net['fc6'] = DenseLayer(net['pool5'], num_units=4096)\n net['drop6'] = DropoutLayer(net['fc6'], p=0.5)\n net['fc7'] = DenseLayer(net['drop6'], num_units=4096)\n net['drop7'] = DropoutLayer(net['fc7'], p=0.5)\n net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=lasagne.nonlinearities.softmax)\n self.output_layer = net['fc8']\n self.net = net\n \n if load:\n self.load_weights()\n \n # Compile\n self.predict_fn = None\n self.predict_fns = {}\n self.train_fn = {}\n self.lr = theano.shared(np.array(1e-2, dtype=np.float32))\n self.regularizer_amount = theano.shared(np.array(4e-5, dtype=np.float32))\n \n def get_output_fn(self,layer):\n input_var = self.net['input'].input_var\n out = lasagne.layers.get_output(layer, deterministic=True)\n return theano.function([input_var], out)\n \n def add_output_layer(self, num_units, after='drop7'):\n self.output_layer = DenseLayer(self.net[after], num_units=num_units, nonlinearity=lasagne.nonlinearities.softmax)\n self.predict_fn = None\n self.train_fn = {}\n \n def load_weights(self):\n # weights\n import pickle\n with open('/home/twanvl/test/vgg_cnn_s.pkl','rb') as file:\n model = pickle.load(file, encoding='latin1')\n self.classes = model['synset words']\n self.mean_image = model['mean image']\n lasagne.layers.set_all_param_values(self.output_layer, model['values'])\n \n def save_weights_np(self, filename):\n np.savez(filename, *lasagne.layers.get_all_param_values(self.output_layer), mean_image=self.mean_image)\n def load_weights_np(self, filename):\n params = lasagne.layers.get_all_params(self.output_layer)\n with np.load(filename) as f:\n param_values = [f['arr_%d' % i] for i in range(len(params))]\n self.mean_image = f['mean_image']\n lasagne.layers.set_all_param_values(self.output_layer, param_values)\n\n def preprocess_many(self, ims, **kwargs):\n # Preprocess a list of images\n return np.array([self.preprocess(x, many=True, **kwargs) for x in ims])\n \n def preprocess(self, im, many=False, crop_h=0.5, crop_w=0.5, flip=False, size=256, smallest=True, random=False):\n # Preprocess an image\n # Resize so smallest/largest dim = 256, preserving aspect ratio\n im = resize(im, size, smallest)\n # Central crop to 224x224\n h, w, _ = im.shape\n if random:\n y0 = np.random.randint(h-224)\n x0 = np.random.randint(w-224)\n flip = np.random.randint(2)\n else:\n y0 = int((h-224)*crop_h)\n x0 = int((w-224)*crop_w)\n im = im[y0:y0+224, x0:x0+224]\n # Flip horizontally?\n if flip:\n im = im[:,::-1]\n if not many:\n rawim = np.copy(im).astype('uint8')\n # Shuffle axes to c01\n im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)\n # Convert to BGR\n im = im[::-1, :, :]\n # Subtract mean\n im = im - self.mean_image\n if many:\n return floatX(im)\n else:\n return rawim, floatX(im[np.newaxis])\n\n def classify(self,im, preprocess=False, **kwargs):\n if preprocess:\n im = self.preprocess_many(im, **kwargs)\n if self.predict_fn is None:\n self.predict_fn = self.get_output_fn(self.output_layer)\n prob = batch_predict(self.predict_fn, im)\n return np.array(np.argmax(prob, axis=1), dtype=np.int32)\n \n def classify_test(self,im, **kwargs):\n # Run a test of the classifier, output nice looking matplotlib figure\n rawim, im = self.preprocess(im, **kwargs)\n #prob = np.array(lasagne.layers.get_output(self.output_layer, im, deterministic=True).eval())\n if self.predict_fn is None:\n self.predict_fn = self.get_output_fn(self.output_layer)\n prob = np.array(self.predict_fn(im))\n top5 = np.argsort(prob[0])[-1:-6:-1]\n import matplotlib.pyplot as plt\n plt.figure()\n plt.imshow(rawim.astype('uint8'))\n plt.axis('off')\n for n, label in enumerate(top5):\n plt.text(250, 70 + n * 20, '{}. {}'.format(n+1, self.classes[label]), fontsize=14)\n \n def get_features(self, im, layer, preprocess=False):\n if layer not in self.predict_fns:\n self.predict_fns[layer] = self.get_output_fn(self.net[layer])\n # apply\n if preprocess:\n rawim, im = self.preprocess(im)\n return batch_predict(self.predict_fns[layer], im)\n\n def get_train_fn(self, last_only=False):\n input_var = self.net['input'].input_var\n target_var = T.ivector('targets')\n prediction = lasagne.layers.get_output(self.output_layer)\n loss = categorical_crossentropy(prediction, target_var)\n loss = loss.mean()\n error = T.mean(T.neq(T.argmax(prediction, axis=1), target_var), dtype=theano.config.floatX)\n regularization = self.regularizer_amount * regularize_network_params(self.output_layer, l2)\n if last_only:\n all_params = self.output_layer.get_params(trainable=True)\n else:\n all_params = lasagne.layers.get_all_params(self.output_layer, trainable=True)\n updates = nesterov_momentum(loss + regularization, all_params, learning_rate=self.lr)\n return theano.function([input_var, target_var], (loss,error), updates=updates)\n \n def train(self, x, y, num_epochs=50, learning_rate=1e-3, batchsize=128, regularizer_amount=5e-4, preprocess=False, last_only=False):\n if last_only not in self.train_fn:\n self.train_fn[last_only] = self.get_train_fn(last_only)\n train_fn = self.train_fn[last_only]\n self.regularizer_amount.set_value(np.float32(regularizer_amount))\n #augment = augment_data\n augment = None\n \n for epoch in range(num_epochs):\n if epoch < 0.8*num_epochs:\n lr = learning_rate\n elif epoch < 0.9*num_epochs:\n lr = learning_rate / 10\n else:\n lr = learning_rate / 100\n self.lr.set_value(np.float32(lr))\n \n loss = 0\n err = 0\n n = 0\n for batch_x,batch_y in iterate_minibatches(x, y, batchsize=batchsize, shuffle=True, augment=augment):\n if preprocess:\n batch_x = self.preprocess_many(batch_x, random=True)\n l,e = train_fn(batch_x,batch_y)\n loss += l\n err += e\n n += 1\n print(\" {:3} / {:3}: loss={:6.3f}, error={:5.3f} \".format(epoch,num_epochs,loss/n,err/n), end='\\r')\n if epoch%10 == 9:\n print()\n \n\ndef batch_predict(fun, x, batchsize=128):\n if x.shape[0] < batchsize:\n return fun(x)\n else:\n y = []\n for start in range(0, x.shape[0], batchsize):\n end = min(start + batchsize, x.shape[0])\n y.append(fun(x[start:end]))\n return np.concatenate(y)\n\n# > We follow the simple data augmentation in [24] for training:\n# > 4 pixels are padded on each side, and a 32*32 crop is randomly sampled\n# > from the padded image or its horizontal flip.\ndef augment_data(data,max_shift=100):\n # input (N,channel,h,w)\n out = np.empty_like(data)\n for i in range(np.shape(data)[0]):\n # sample same size image from padded image\n xoffs = np.random.randint(max_shift*2+1)\n yoffs = np.random.randint(max_shift*2+1)\n out[i] = np.pad(data[i],[(0,0),(max_shift,max_shift),(max_shift,max_shift)],'constant')[:, xoffs:xoffs+np.shape(data)[2], yoffs:yoffs+np.shape(data)[2]]\n if np.random.random_sample() < 0.5:\n # flip horizontally\n out[i] = out[i][:,:,::-1]\n return out\n \n# This is just a simple helper function iterating over training data in\n# mini-batches of a particular size, optionally in random order. It assumes\n# data is available as numpy arrays.\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False, augment=None):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs), batchsize):\n end_idx = min(start_idx + batchsize, len(inputs))\n if shuffle:\n excerpt = indices[start_idx:end_idx]\n else:\n excerpt = slice(start_idx, end_idx)\n # Data augmentation\n if augment:\n yield augment(inputs[excerpt]), targets[excerpt]\n else:\n yield inputs[excerpt], targets[excerpt]\n\ndef resize(im, size=256, smallest=True):\n h, w, _ = im.shape\n if (h < w and smallest) or (h > w and not smallest):\n if h != size:\n im = skimage.transform.resize(im, (size, (w*size)//h), preserve_range=True)\n else:\n if w != size:\n im = skimage.transform.resize(im, ((h*size)//w, size), preserve_range=True)\n return im\n\nclass ConcatDatasets:\n def __init__(self, a, b):\n self.a = a\n self.b = b\n def __len__(self):\n return len(self.a) + len(self.b)\n def __getitem__(self,idxs):\n pass #TODO\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.pad",
"numpy.copy",
"numpy.load",
"numpy.random.shuffle",
"matplotlib.pyplot.figure",
"numpy.shape",
"numpy.random.random_sample",
"numpy.float32",
"numpy.swapaxes",
"numpy.random.randint",
"numpy.empty_like",
"numpy.argmax",
"numpy.argsort",
"matplotlib.pyplot.axis"
]
] |
UoB-HPC/everythingsreduced
|
[
"72e69dafdf0d155c35df38bfda7786d9f88c641a"
] |
[
"results/reorganize.py"
] |
[
"import pandas as pd\nimport glob\nimport csv\n\nfiles = [\n \"a100-results.csv\",\n \"clx-1S-results.csv\",\n \"clx-results.csv\",\n \"gen9-results.csv\",\n \"mi100-results.csv\",\n# \"rome-results-aocc.csv\",\n \"rome-results-cce.csv\"]\n\ncsv_frames = []\nfor f in files:\n csv_frames.append(pd.read_csv(f, skipinitialspace=True))\n\n\ndf = pd.concat(csv_frames, axis=0, ignore_index=True)\n\ndf.loc[df['model'] == 'kokkos-sycl',['model']] = 'kokkos'\n\ndf.set_index([\"kernel\", \"model\", \"arch\", \"compiler\"], inplace=True)\ndf.sort_index(inplace=True)\n\navg = df.groupby(level=[\"kernel\", \"model\", \"arch\", \"compiler\"]).mean()\n\n\n\npeaks = pd.read_csv(\"peaks.csv\", skipinitialspace=True)\npeaks= pd.Series(peaks.bandwidth.values, index=peaks.arch).to_dict()\n\npeakmap= {'rome': (2, 'EPYC 7742'),\n 'clx_1S': (1, 'Xeon 6230'),\n 'clx': (2, 'Xeon 6230'),\n 'gen9': (1, 'Core 6770HQ')\n }\n\narches = avg.index.unique(level='arch')\nfor arch in arches:\n try:\n mul, key = peakmap[arch]\n except KeyError:\n mul, key = 1, arch\n avg.loc[(slice(None), slice(None), arch), 'bandwidth'] /= (mul*peaks[key])\n\n\napp_name_map = {\n \"openmp\": \"OpenMP\",\n \"kokkos-sycl\" : \"Kokkos (SYCL)\",\n \"omp-target\": \"OpenMP (target)\",\n \"onedpl\": \"oneDPL\",\n \"raja\": \"Raja\",\n \"kokkos\": \"Kokkos\",\n \"sycl\": \"SYCL\",\n }\napp_order = ['openmp', 'kokkos', 'raja', 'sycl', 'onedpl']\n\nsubapp_map = {\n 'openmp' : 'openmp',\n 'omp-target' : 'openmp',\n 'kokkos' : 'kokkos',\n 'kokkos-sycl' : 'kokkos',\n 'raja' : 'raja',\n 'sycl' : 'sycl',\n 'onedpl' : 'onedpl',\n }\n\n\nplatform_name_map = {\n 'clx' : \"2 x Intel® Xeon® Gold 6230\",\n 'clx_1S' : \"1 x Intel® Xeon® Gold 6230\",\n 'a100' : \"NVIDIA A100\",\n 'mi100' : \"AMD MI100\",\n 'rome' : '2 x AMD EPYC 7742',\n 'rome_cce' : '2 x AMD EPYC 7742',\n 'rome_aocc' : '2 x AMD EPYC 7742',\n 'gen9' : 'Intel® Iris® Pro 580'\n }\n\nfor kernel in avg.index.unique(level='kernel'):\n with open(f\"{kernel}.csv\", \"w\") as fp:\n ocsv = csv.writer(fp)\n\n kslice = avg.loc[kernel]\n\n kslice.index.remove_unused_levels()\n models = kslice.index.unique(level='model')\n ocsv.writerow([\"Device\"] + list([app_name_map[x] for x in models]))\n for arch in arches:\n res = [platform_name_map[arch]]\n for m in models:\n try:\n v = avg.loc[(kernel, m, arch),'bandwidth'][0]*100\n except KeyError:\n v = 'X'\n res.append(v)\n ocsv.writerow(res)\n"
] |
[
[
"pandas.read_csv",
"pandas.Series",
"pandas.concat"
]
] |
zahra8500/yolov3object_detection-Transfer_Learning-
|
[
"173d4f0d285199b5c354a7e3e777f95c2df2caa1"
] |
[
"train_bottleneck.py"
] |
[
"\"\"\"\nRetrain the YOLO model for your own dataset.\n\"\"\"\nimport os\nimport numpy as np\nimport keras.backend as K\nfrom keras.layers import Input, Lambda\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n\nfrom yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\nfrom yolo3.utils import get_random_data\n\n\ndef _main():\n annotation_path = 'train_labels.csv'\n log_dir = 'logs/000/'\n classes_path = 'model_data/my_classes.txt'\n anchors_path = 'model_data/tiny_yolo_anchors.txt'\n class_names = get_classes(classes_path)\n num_classes = len(class_names)\n anchors = get_anchors(anchors_path)\n\n input_shape = (416,416) # multiple of 32, hw\n\n model, bottleneck_model, last_layer_model = create_model(input_shape, anchors, num_classes,\n freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze\n\n logging = TensorBoard(log_dir=log_dir)\n checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\n monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)\n\n val_split = 0.1\n with open(annotation_path) as f:\n lines = f.readlines()\n np.random.seed(10101)\n np.random.shuffle(lines)\n np.random.seed(None)\n num_val = int(len(lines)*val_split)\n num_train = len(lines) - num_val\n\n # Train with frozen layers first, to get a stable loss.\n # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.\n if True:\n # perform bottleneck training\n if not os.path.isfile(\"bottlenecks.npz\"):\n print(\"calculating bottlenecks\")\n batch_size=8\n bottlenecks=bottleneck_model.predict_generator(data_generator_wrapper(lines, batch_size, input_shape, anchors, num_classes, random=False, verbose=True),\n steps=(len(lines)//batch_size)+1, max_queue_size=1)\n np.savez(\"bottlenecks.npz\", bot0=bottlenecks[0], bot1=bottlenecks[1], bot2=bottlenecks[2])\n \n # load bottleneck features from file\n dict_bot=np.load(\"bottlenecks.npz\")\n bottlenecks_train=[dict_bot[\"bot0\"][:num_train], dict_bot[\"bot1\"][:num_train], dict_bot[\"bot2\"][:num_train]]\n bottlenecks_val=[dict_bot[\"bot0\"][num_train:], dict_bot[\"bot1\"][num_train:], dict_bot[\"bot2\"][num_train:]]\n\n # train last layers with fixed bottleneck features\n batch_size=8\n print(\"Training last layers with bottleneck features\")\n print('with {} samples, val on {} samples and batch size {}.'.format(num_train, num_val, batch_size))\n last_layer_model.compile(optimizer='adam', loss={'yolo_loss': lambda y_true, y_pred: y_pred})\n last_layer_model.fit_generator(bottleneck_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, bottlenecks_train),\n steps_per_epoch=max(1, num_train//batch_size),\n validation_data=bottleneck_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, bottlenecks_val),\n validation_steps=max(1, num_val//batch_size),\n epochs=30,\n initial_epoch=0, max_queue_size=1)\n model.save_weights(log_dir + 'trained_weights_stage_0.h5')\n \n # train last layers with random augmented data\n model.compile(optimizer=Adam(lr=1e-3), loss={\n # use custom yolo_loss Lambda layer.\n 'yolo_loss': lambda y_true, y_pred: y_pred})\n batch_size = 16\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\n steps_per_epoch=max(1, num_train//batch_size),\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\n validation_steps=max(1, num_val//batch_size),\n epochs=50,\n initial_epoch=0,\n callbacks=[logging, checkpoint])\n model.save_weights(log_dir + 'trained_weights_stage_1.h5')\n\n # Unfreeze and continue training, to fine-tune.\n # Train longer if the result is not good.\n if True:\n for i in range(len(model.layers)):\n model.layers[i].trainable = True\n model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change\n print('Unfreeze all of the layers.')\n\n batch_size = 4 # note that more GPU memory is required after unfreezing the body\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\n model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),\n steps_per_epoch=max(1, num_train//batch_size),\n validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),\n validation_steps=max(1, num_val//batch_size),\n epochs=100,\n initial_epoch=50,\n callbacks=[logging, checkpoint, reduce_lr, early_stopping])\n model.save_weights(log_dir + 'trained_weights_final.h5')\n\n # Further training if needed.\n\n\ndef get_classes(classes_path):\n '''loads the classes'''\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\ndef get_anchors(anchors_path):\n '''loads the anchors from a file'''\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n\ndef create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,\n weights_path='model_data/yolo_weights.h5'):\n '''create the training model'''\n K.clear_session() # get a new session\n image_input = Input(shape=(None, None, 3))\n h, w = input_shape\n num_anchors = len(anchors)\n\n y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \\\n num_anchors//3, num_classes+5)) for l in range(3)]\n\n model_body = yolo_body(image_input, num_anchors//3, num_classes)\n print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))\n\n if load_pretrained:\n model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)\n print('Load weights {}.'.format(weights_path))\n if freeze_body in [1, 2]:\n # Freeze darknet53 body or freeze all but 3 output layers.\n num = (185, len(model_body.layers)-3)[freeze_body-1]\n for i in range(num): model_body.layers[i].trainable = False\n print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))\n\n # get output of second last layers and create bottleneck model of it\n out1=model_body.layers[246].output\n out2=model_body.layers[247].output\n out3=model_body.layers[248].output\n bottleneck_model = Model([model_body.input, *y_true], [out1, out2, out3])\n\n # create last layer model of last layers from yolo model\n in0 = Input(shape=bottleneck_model.output[0].shape[1:].as_list()) \n in1 = Input(shape=bottleneck_model.output[1].shape[1:].as_list())\n in2 = Input(shape=bottleneck_model.output[2].shape[1:].as_list())\n last_out0=model_body.layers[249](in0)\n last_out1=model_body.layers[250](in1)\n last_out2=model_body.layers[251](in2)\n model_last=Model(inputs=[in0, in1, in2], outputs=[last_out0, last_out1, last_out2])\n model_loss_last =Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(\n [*model_last.output, *y_true])\n last_layer_model = Model([in0,in1,in2, *y_true], model_loss_last)\n\n \n model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',\n arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(\n [*model_body.output, *y_true])\n model = Model([model_body.input, *y_true], model_loss)\n\n return model, bottleneck_model, last_layer_model\n\ndef data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, random=True, verbose=False):\n '''data generator for fit_generator'''\n n = len(annotation_lines)\n i = 0\n while True:\n image_data = []\n box_data = []\n for b in range(batch_size):\n if i==0 and random:\n np.random.shuffle(annotation_lines)\n image, box = get_random_data(annotation_lines[i], input_shape, random=random)\n image_data.append(image)\n box_data.append(box)\n i = (i+1) % n\n image_data = np.array(image_data)\n if verbose:\n print(\"Progress: \",i,\"/\",n)\n box_data = np.array(box_data)\n y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)\n yield [image_data, *y_true], np.zeros(batch_size)\n\ndef data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes, random=True, verbose=False):\n n = len(annotation_lines)\n if n==0 or batch_size<=0: return None\n return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, random, verbose)\n\ndef bottleneck_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, bottlenecks):\n n = len(annotation_lines)\n i = 0\n while True:\n box_data = []\n b0=np.zeros((batch_size,bottlenecks[0].shape[1],bottlenecks[0].shape[2],bottlenecks[0].shape[3]))\n b1=np.zeros((batch_size,bottlenecks[1].shape[1],bottlenecks[1].shape[2],bottlenecks[1].shape[3]))\n b2=np.zeros((batch_size,bottlenecks[2].shape[1],bottlenecks[2].shape[2],bottlenecks[2].shape[3]))\n for b in range(batch_size):\n _, box = get_random_data(annotation_lines[i], input_shape, random=False, proc_img=False)\n box_data.append(box)\n b0[b]=bottlenecks[0][i]\n b1[b]=bottlenecks[1][i]\n b2[b]=bottlenecks[2][i]\n i = (i+1) % n\n box_data = np.array(box_data)\n y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)\n yield [b0, b1, b2, *y_true], np.zeros(batch_size)\n\nif __name__ == '__main__':\n _main()\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"numpy.load",
"numpy.random.shuffle",
"numpy.savez"
]
] |
lauratomaz/nilearn
|
[
"7e413a51676fea8bdcbab12e20482da6c417567c"
] |
[
"nilearn/image/tests/test_resampling.py"
] |
[
"\"\"\"\nTest the resampling code.\n\"\"\"\nimport os\nimport copy\nimport math\n\nfrom nose import SkipTest\nfrom nose.tools import assert_equal, assert_raises, \\\n assert_false, assert_true, assert_almost_equal, assert_not_equal\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\nimport numpy as np\n\nfrom nibabel import Nifti1Image\n\nfrom nilearn.image.resampling import resample_img, resample_to_img, reorder_img\nfrom nilearn.image.resampling import from_matrix_vector, coord_transform\nfrom nilearn.image.resampling import BoundingBoxError\nfrom nilearn._utils import testing, compat\n\n\n###############################################################################\n# Helper function\ndef rotation(theta, phi):\n \"\"\" Returns a rotation 3x3 matrix.\n \"\"\"\n cos = np.cos\n sin = np.sin\n a1 = np.array([[cos(theta), -sin(theta), 0],\n [sin(theta), cos(theta), 0],\n [0, 0, 1]])\n a2 = np.array([[1, 0, 0],\n [0, cos(phi), -sin(phi)],\n [0, sin(phi), cos(phi)]])\n return np.dot(a1, a2)\n\n\ndef pad(array, *args):\n \"\"\"Pad an ndarray with zeros of quantity specified\n in args as follows args = (x1minpad, x1maxpad, x2minpad,\n x2maxpad, x3minpad, ...)\n \"\"\"\n\n if len(args) % 2 != 0:\n raise ValueError(\"Please specify as many max paddings as min\"\n \" paddings. You have specified %d arguments\" %\n len(args))\n\n all_paddings = np.zeros([array.ndim, 2], dtype=np.int64)\n all_paddings[:len(args) // 2] = np.array(args).reshape(-1, 2)\n\n lower_paddings, upper_paddings = all_paddings.T\n new_shape = np.array(array.shape) + upper_paddings + lower_paddings\n\n padded = np.zeros(new_shape, dtype=array.dtype)\n source_slices = [slice(max(-lp, 0), min(s + up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n array.shape)]\n target_slices = [slice(max(lp, 0), min(s - up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n new_shape)]\n\n padded[target_slices] = array[source_slices].copy()\n return padded\n\n\n###############################################################################\n# Tests\ndef test_identity_resample():\n \"\"\" Test resampling with an identity affine.\n \"\"\"\n shape = (3, 2, 5, 2)\n data = np.random.randint(0, 10, shape)\n affine = np.eye(4)\n affine[:3, -1] = 0.5 * np.array(shape[:3])\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=affine, interpolation='nearest')\n np.testing.assert_almost_equal(data, rot_img.get_data())\n # Smoke-test with a list affine\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=affine.tolist(),\n interpolation='nearest')\n # Test with a 3x3 affine\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=affine[:3, :3],\n interpolation='nearest')\n np.testing.assert_almost_equal(data, rot_img.get_data())\n\n # Test with non native endian data\n\n # Test with big endian data ('>f8')\n for interpolation in ['nearest', 'linear', 'continuous']:\n rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine),\n target_affine=affine.tolist(),\n interpolation=interpolation)\n np.testing.assert_almost_equal(data, rot_img.get_data())\n\n # Test with little endian data ('<f8')\n for interpolation in ['nearest', 'linear', 'continuous']:\n rot_img = resample_img(Nifti1Image(data.astype('<f8'), affine),\n target_affine=affine.tolist(),\n interpolation=interpolation)\n np.testing.assert_almost_equal(data, rot_img.get_data())\n\n\ndef test_downsample():\n \"\"\" Test resampling with a 1/2 down-sampling affine.\n \"\"\"\n rand_gen = np.random.RandomState(0)\n shape = (6, 3, 6, 2)\n data = rand_gen.random_sample(shape)\n affine = np.eye(4)\n rot_img = resample_img(Nifti1Image(data, affine),\n target_affine=2 * affine, interpolation='nearest')\n downsampled = data[::2, ::2, ::2, ...]\n x, y, z = downsampled.shape[:3]\n np.testing.assert_almost_equal(downsampled,\n rot_img.get_data()[:x, :y, :z, ...])\n\n # Test with non native endian data\n\n # Test to check that if giving non native endian data as input should\n # work as normal and expected to return the same output as above tests.\n\n # Big endian data ('>f8')\n for copy in [True, False]:\n rot_img = resample_img(Nifti1Image(data.astype('>f8'), affine),\n target_affine=2 * affine,\n interpolation='nearest',\n copy=copy)\n np.testing.assert_almost_equal(downsampled,\n rot_img.get_data()[:x, :y, :z, ...])\n\n # Little endian data\n for copy in [True, False]:\n rot_img = resample_img(Nifti1Image(data.astype('<f8'), affine),\n target_affine=2 * affine,\n interpolation='nearest',\n copy=copy)\n np.testing.assert_almost_equal(downsampled,\n rot_img.get_data()[:x, :y, :z, ...])\n\n\ndef test_resampling_with_affine():\n \"\"\" Test resampling with a given rotation part of the affine.\n \"\"\"\n prng = np.random.RandomState(10)\n\n data_3d = prng.randint(4, size=(1, 4, 4))\n data_4d = prng.randint(4, size=(1, 4, 4, 3))\n\n for data in [data_3d, data_4d]:\n for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.):\n rot = rotation(0, angle)\n rot_img = resample_img(Nifti1Image(data, np.eye(4)),\n target_affine=rot,\n interpolation='nearest')\n assert_equal(np.max(data),\n np.max(rot_img.get_data()))\n assert_equal(rot_img.get_data().dtype, data.dtype)\n\n # We take the same rotation logic as above and test with nonnative endian\n # data as input\n for data in [data_3d, data_4d]:\n img = Nifti1Image(data.astype('>f8'), np.eye(4))\n for angle in (0, np.pi, np.pi / 2., np.pi / 4., np.pi / 3.):\n rot = rotation(0, angle)\n rot_img = resample_img(img, target_affine=rot,\n interpolation='nearest')\n assert_equal(np.max(data),\n np.max(rot_img.get_data()))\n\n\ndef test_resampling_continuous_with_affine():\n prng = np.random.RandomState(10)\n\n data_3d = prng.randint(1, 4, size=(1, 10, 10))\n data_4d = prng.randint(1, 4, size=(1, 10, 10, 3))\n\n for data in [data_3d, data_4d]:\n for angle in (0, np.pi / 2., np.pi, 3 * np.pi / 2.):\n rot = rotation(0, angle)\n\n img = Nifti1Image(data, np.eye(4))\n rot_img = resample_img(\n img,\n target_affine=rot,\n interpolation='continuous')\n rot_img_back = resample_img(\n rot_img,\n target_affine=np.eye(4),\n interpolation='continuous')\n\n center = slice(1, 9)\n # values on the edges are wrong for some reason\n mask = (0, center, center)\n np.testing.assert_allclose(\n img.get_data()[mask],\n rot_img_back.get_data()[mask])\n assert_equal(rot_img.get_data().dtype,\n np.dtype(data.dtype.name.replace('int', 'float')))\n\n\ndef test_resampling_error_checks():\n shape = (3, 2, 5, 2)\n target_shape = (5, 3, 2)\n affine = np.eye(4)\n data = np.random.randint(0, 10, shape)\n img = Nifti1Image(data, affine)\n\n # Correct parameters: no exception\n resample_img(img, target_shape=target_shape, target_affine=affine)\n resample_img(img, target_affine=affine)\n\n with testing.write_tmp_imgs(img) as filename:\n resample_img(filename, target_shape=target_shape, target_affine=affine)\n\n # Missing parameter\n assert_raises(ValueError, resample_img, img, target_shape=target_shape)\n\n # Invalid shape\n assert_raises(ValueError, resample_img, img, target_shape=(2, 3),\n target_affine=affine)\n\n # Invalid interpolation\n interpolation = 'an_invalid_interpolation'\n pattern = \"interpolation must be either.+{0}\".format(interpolation)\n testing.assert_raises_regex(ValueError, pattern,\n resample_img, img, target_shape=target_shape,\n target_affine=affine,\n interpolation=\"an_invalid_interpolation\")\n\n # Noop\n target_shape = shape[:3]\n\n img_r = resample_img(img, copy=False)\n assert_equal(img_r, img)\n\n img_r = resample_img(img, copy=True)\n assert_false(np.may_share_memory(img_r.get_data(), img.get_data()))\n\n np.testing.assert_almost_equal(img_r.get_data(), img.get_data())\n np.testing.assert_almost_equal(compat.get_affine(img_r), compat.get_affine(img))\n\n img_r = resample_img(img, target_affine=affine, target_shape=target_shape,\n copy=False)\n assert_equal(img_r, img)\n\n img_r = resample_img(img, target_affine=affine, target_shape=target_shape,\n copy=True)\n assert_false(np.may_share_memory(img_r.get_data(), img.get_data()))\n np.testing.assert_almost_equal(img_r.get_data(), img.get_data())\n np.testing.assert_almost_equal(compat.get_affine(img_r), compat.get_affine(img))\n\n\ndef test_4d_affine_bounding_box_error():\n\n small_data = np.ones([4, 4, 4])\n small_data_4D_affine = np.eye(4)\n small_data_4D_affine[:3, -1] = np.array([5, 4, 5])\n\n small_img = Nifti1Image(small_data,\n small_data_4D_affine)\n\n bigger_data_4D_affine = np.eye(4)\n bigger_data = np.zeros([10, 10, 10])\n bigger_img = Nifti1Image(bigger_data,\n bigger_data_4D_affine)\n\n # We would like to check whether all/most of the data\n # will be contained in the resampled image\n # The measure will be the l2 norm, since some resampling\n # schemes approximately conserve it\n\n def l2_norm(arr):\n return (arr ** 2).sum()\n\n # resample using 4D affine and specified target shape\n small_to_big_with_shape = resample_img(\n small_img,\n target_affine=compat.get_affine(bigger_img),\n target_shape=bigger_img.shape)\n # resample using 3D affine and no target shape\n small_to_big_without_shape_3D_affine = resample_img(\n small_img,\n target_affine=compat.get_affine(bigger_img)[:3, :3])\n # resample using 4D affine and no target shape\n small_to_big_without_shape = resample_img(\n small_img,\n target_affine=compat.get_affine(bigger_img))\n\n # The first 2 should pass\n assert_almost_equal(l2_norm(small_data),\n l2_norm(small_to_big_with_shape.get_data()))\n assert_almost_equal(l2_norm(small_data),\n l2_norm(small_to_big_without_shape_3D_affine.get_data()))\n\n # After correcting decision tree for 4x4 affine given + no target shape\n # from \"use initial shape\" to \"calculate minimal bounding box respecting\n # the affine anchor and the data\"\n assert_almost_equal(l2_norm(small_data),\n l2_norm(small_to_big_without_shape.get_data()))\n\n assert_array_equal(small_to_big_without_shape.shape,\n small_data_4D_affine[:3, -1] + np.array(small_img.shape))\n\n\ndef test_raises_upon_3x3_affine_and_no_shape():\n img = Nifti1Image(np.zeros([8, 9, 10]),\n affine=np.eye(4))\n exception = ValueError\n message = (\"Given target shape without anchor \"\n \"vector: Affine shape should be \\(4, 4\\) and \"\n \"not \\(3, 3\\)\")\n testing.assert_raises_regex(\n exception, message,\n resample_img, img, target_affine=np.eye(3) * 2,\n target_shape=(10, 10, 10))\n\n\ndef test_3x3_affine_bbox():\n # Test that the bounding-box is properly computed when\n # transforming with a negative affine component\n # This is specifically to test for a change in behavior between\n # scipy < 0.18 and scipy >= 0.18, which is an interaction between\n # offset and a diagonal affine\n image = np.ones((20, 30))\n source_affine = np.eye(4)\n # Give the affine an offset\n source_affine[:2, 3] = np.array([96, 64])\n\n # We need to turn this data into a nibabel image\n img = Nifti1Image(image[:, :, np.newaxis], affine=source_affine)\n\n target_affine_3x3 = np.eye(3) * 2\n # One negative axes\n target_affine_3x3[1] *= -1\n\n img_3d_affine = resample_img(img, target_affine=target_affine_3x3)\n\n # If the bounding box is computed wrong, the image will be only\n # zeros\n np.testing.assert_allclose(img_3d_affine.get_data().max(), image.max())\n\n\ndef test_raises_bbox_error_if_data_outside_box():\n # Make some cases which should raise exceptions\n\n # original image\n data = np.zeros([8, 9, 10])\n affine = np.eye(4)\n affine_offset = np.array([1, 1, 1])\n affine[:3, 3] = affine_offset\n\n img = Nifti1Image(data, affine)\n\n # some axis flipping affines\n axis_flips = np.array(list(map(np.diag,\n [[-1, 1, 1, 1],\n [1, -1, 1, 1],\n [1, 1, -1, 1],\n [-1, -1, 1, 1],\n [-1, 1, -1, 1],\n [1, -1, -1, 1]])))\n\n # some in plane 90 degree rotations base on these\n # (by permuting two lines)\n af = axis_flips\n rotations = np.array([af[0][[1, 0, 2, 3]],\n af[0][[2, 1, 0, 3]],\n af[1][[1, 0, 2, 3]],\n af[1][[0, 2, 1, 3]],\n af[2][[2, 1, 0, 3]],\n af[2][[0, 2, 1, 3]]])\n\n new_affines = np.concatenate([axis_flips,\n rotations])\n new_offset = np.array([0., 0., 0.])\n new_affines[:, :3, 3] = new_offset[np.newaxis, :]\n\n for new_affine in new_affines:\n exception = BoundingBoxError\n message = (\"The field of view given \"\n \"by the target affine does \"\n \"not contain any of the data\")\n\n testing.assert_raises_regex(\n exception, message,\n resample_img, img, target_affine=new_affine)\n\n\ndef test_resampling_result_axis_permutation():\n # Transform real data using easily checkable transformations\n # For now: axis permutations\n # create a cuboid full of deterministic data, padded with one\n # voxel thickness of zeros\n core_shape = (3, 5, 4)\n core_data = np.arange(np.prod(core_shape)).reshape(core_shape)\n full_data_shape = np.array(core_shape) + 2\n full_data = np.zeros(full_data_shape)\n full_data[[slice(1, 1 + s) for s in core_shape]] = core_data\n\n source_img = Nifti1Image(full_data, np.eye(4))\n\n axis_permutations = [[0, 1, 2],\n [1, 0, 2],\n [2, 1, 0],\n [0, 2, 1]]\n\n # check 3x3 transformation matrix\n for ap in axis_permutations:\n target_affine = np.eye(3)[ap]\n resampled_img = resample_img(source_img,\n target_affine=target_affine)\n\n resampled_data = resampled_img.get_data()\n what_resampled_data_should_be = full_data.transpose(ap)\n assert_array_almost_equal(resampled_data,\n what_resampled_data_should_be)\n\n # check 4x4 transformation matrix\n offset = np.array([-2, 1, -3])\n for ap in axis_permutations:\n target_affine = np.eye(4)\n target_affine[:3, :3] = np.eye(3)[ap]\n target_affine[:3, 3] = offset\n\n resampled_img = resample_img(source_img,\n target_affine=target_affine)\n resampled_data = resampled_img.get_data()\n offset_cropping = np.vstack([-offset[ap][np.newaxis, :],\n np.zeros([1, 3])]\n ).T.ravel().astype(int)\n what_resampled_data_should_be = pad(full_data.transpose(ap),\n *list(offset_cropping))\n\n assert_array_almost_equal(resampled_data,\n what_resampled_data_should_be)\n\n\ndef test_resampling_nan():\n # Test that when the data has NaNs they do not propagate to the\n # whole image\n\n for core_shape in [(3, 5, 4), (3, 5, 4, 2)]:\n # create deterministic data, padded with one\n # voxel thickness of zeros\n core_data = np.arange(np.prod(core_shape)\n ).reshape(core_shape).astype(np.float)\n # Introduce a nan\n core_data[2, 2:4, 1] = np.nan\n full_data_shape = np.array(core_shape) + 2\n full_data = np.zeros(full_data_shape)\n full_data[[slice(1, 1 + s) for s in core_shape]] = core_data\n\n source_img = Nifti1Image(full_data, np.eye(4))\n\n # Transform real data using easily checkable transformations\n # For now: axis permutations\n axis_permutation = [0, 1, 2]\n\n # check 3x3 transformation matrix\n target_affine = np.eye(3)[axis_permutation]\n resampled_img = testing.assert_warns(\n RuntimeWarning, resample_img, source_img,\n target_affine=target_affine)\n\n resampled_data = resampled_img.get_data()\n if full_data.ndim == 4:\n axis_permutation.append(3)\n what_resampled_data_should_be = full_data.transpose(axis_permutation)\n non_nan = np.isfinite(what_resampled_data_should_be)\n\n # Check that the input data hasn't been modified:\n assert_false(np.all(non_nan))\n\n # Check that for finite value resampling works without problems\n assert_array_almost_equal(resampled_data[non_nan],\n what_resampled_data_should_be[non_nan])\n\n # Check that what was not finite is still not finite\n assert_false(np.any(np.isfinite(\n resampled_data[np.logical_not(non_nan)])))\n\n # Test with an actual resampling, in the case of a bigish hole\n # This checks the extrapolation mechanism: if we don't do any\n # extrapolation before resampling, the hole creates big\n # artefacts\n data = 10 * np.ones((10, 10, 10))\n data[4:6, 4:6, 4:6] = np.nan\n source_img = Nifti1Image(data, 2 * np.eye(4))\n resampled_img = testing.assert_warns(\n RuntimeWarning, resample_img, source_img,\n target_affine=np.eye(4))\n\n resampled_data = resampled_img.get_data()\n np.testing.assert_allclose(10, resampled_data[np.isfinite(resampled_data)])\n\n\ndef test_resample_to_img():\n # Testing resample to img function\n rand_gen = np.random.RandomState(0)\n shape = (6, 3, 6, 3)\n data = rand_gen.random_sample(shape)\n\n source_affine = np.eye(4)\n source_img = Nifti1Image(data, source_affine)\n\n target_affine = 2 * source_affine\n target_img = Nifti1Image(data, target_affine)\n\n\n result_img = resample_to_img(source_img, target_img,\n interpolation='nearest')\n\n downsampled = data[::2, ::2, ::2, ...]\n x, y, z = downsampled.shape[:3]\n np.testing.assert_almost_equal(downsampled,\n result_img.get_data()[:x, :y, :z, ...])\n\n\ndef test_reorder_img():\n # We need to test on a square array, as rotation does not change\n # shape, whereas reordering does.\n shape = (5, 5, 5, 2, 2)\n rng = np.random.RandomState(42)\n data = rng.rand(*shape)\n affine = np.eye(4)\n affine[:3, -1] = 0.5 * np.array(shape[:3])\n ref_img = Nifti1Image(data, affine)\n # Test with purely positive matrices and compare to a rotation\n for theta, phi in np.random.randint(4, size=(5, 2)):\n rot = rotation(theta * np.pi / 2, phi * np.pi / 2)\n rot[np.abs(rot) < 0.001] = 0\n rot[rot > 0.9] = 1\n rot[rot < -0.9] = 1\n b = 0.5 * np.array(shape[:3])\n new_affine = from_matrix_vector(rot, b)\n rot_img = resample_img(ref_img, target_affine=new_affine)\n np.testing.assert_array_equal(compat.get_affine(rot_img), new_affine)\n np.testing.assert_array_equal(rot_img.get_data().shape, shape)\n reordered_img = reorder_img(rot_img)\n np.testing.assert_array_equal(compat.get_affine(reordered_img)[:3, :3],\n np.eye(3))\n np.testing.assert_almost_equal(reordered_img.get_data(),\n data)\n\n # Create a non-diagonal affine, and check that we raise a sensible\n # exception\n affine[1, 0] = 0.1\n ref_img = Nifti1Image(data, affine)\n testing.assert_raises_regex(ValueError, 'Cannot reorder the axes',\n reorder_img, ref_img)\n\n # Test that no exception is raised when resample='continuous'\n reorder_img(ref_img, resample='continuous')\n\n # Test that resample args gets passed to resample_img\n interpolation = 'nearest'\n reordered_img = reorder_img(ref_img, resample=interpolation)\n resampled_img = resample_img(ref_img,\n target_affine=compat.get_affine(reordered_img),\n interpolation=interpolation)\n np.testing.assert_array_equal(reordered_img.get_data(),\n resampled_img.get_data())\n\n # Make sure invalid resample argument is included in the error message\n interpolation = 'an_invalid_interpolation'\n pattern = \"interpolation must be either.+{0}\".format(interpolation)\n testing.assert_raises_regex(ValueError, pattern,\n reorder_img, ref_img,\n resample=interpolation)\n\n # Test flipping an axis\n data = rng.rand(*shape)\n for i in (0, 1, 2):\n # Make a diagonal affine with a negative axis, and check that\n # can be reordered, also vary the shape\n shape = (i + 1, i + 2, 3 - i)\n affine = np.eye(4)\n affine[i, i] *= -1\n img = Nifti1Image(data, affine)\n orig_img = copy.copy(img)\n #x, y, z = img.get_world_coords()\n #sample = img.values_in_world(x, y, z)\n img2 = reorder_img(img)\n # Check that img has not been changed\n np.testing.assert_array_equal(compat.get_affine(img),\n compat.get_affine(orig_img))\n np.testing.assert_array_equal(img.get_data(),\n orig_img.get_data())\n # Test that the affine is indeed diagonal:\n np.testing.assert_array_equal(compat.get_affine(img2)[:3, :3],\n np.diag(np.diag(\n compat.get_affine(img2)[:3, :3])))\n assert_true(np.all(np.diag(compat.get_affine(img2)) >= 0))\n\n\ndef test_reorder_img_non_native_endianness():\n def _get_resampled_img(dtype):\n data = np.ones((10, 10, 10), dtype=dtype)\n data[3:7, 3:7, 3:7] = 2\n\n affine = np.eye(4)\n\n theta = math.pi / 6.\n c = math.cos(theta)\n s = math.sin(theta)\n\n affine = np.array([[1, 0, 0, 0],\n [0, c, -s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])\n\n img = Nifti1Image(data, affine)\n return resample_img(img, target_affine=np.eye(4))\n\n img_1 = _get_resampled_img('<f8')\n img_2 = _get_resampled_img('>f8')\n\n np.testing.assert_equal(img_1.get_data(), img_2.get_data())\n\n\ndef test_coord_transform_trivial():\n sform = np.eye(4)\n x = np.random.random((10,))\n y = np.random.random((10,))\n z = np.random.random((10,))\n\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x, x_)\n np.testing.assert_array_equal(y, y_)\n np.testing.assert_array_equal(z, z_)\n\n sform[:, -1] = 1\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x + 1, x_)\n np.testing.assert_array_equal(y + 1, y_)\n np.testing.assert_array_equal(z + 1, z_)\n\n # Test the output in case of one item array\n x, y, z = x[:1], y[:1], z[:1]\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x + 1, x_)\n np.testing.assert_array_equal(y + 1, y_)\n np.testing.assert_array_equal(z + 1, z_)\n\n # Test the output in case of simple items\n x, y, z = x[0], y[0], z[0]\n x_, y_, z_ = coord_transform(x, y, z, sform)\n np.testing.assert_array_equal(x + 1, x_)\n np.testing.assert_array_equal(y + 1, y_)\n np.testing.assert_array_equal(z + 1, z_)\n\n\ndef test_resample_img_segmentation_fault():\n if os.environ.get('APPVEYOR') == 'True':\n raise SkipTest('This test too slow (7-8 minutes) on AppVeyor')\n\n # see https://github.com/nilearn/nilearn/issues/346\n shape_in = (64, 64, 64)\n aff_in = np.diag([2., 2., 2., 1.])\n aff_out = np.diag([3., 3., 3., 1.])\n # fourth_dim = 1024 works fine but for 1025 creates a segmentation\n # fault with scipy < 0.14.1\n fourth_dim = 1025\n\n try:\n data = np.ones(shape_in + (fourth_dim, ), dtype=np.float64)\n except MemoryError:\n # This can happen on AppVeyor and for 32-bit Python on Windows\n raise SkipTest('Not enough RAM to run this test')\n\n img_in = Nifti1Image(data, aff_in)\n\n resample_img(img_in,\n target_affine=aff_out,\n interpolation='nearest')\n\n\ndef test_resampling_with_int_types_no_crash():\n affine = np.eye(4)\n data = np.zeros((2, 2, 2))\n\n for dtype in [np.int, np.int8, np.int16, np.int32, np.int64,\n np.uint, np.uint8, np.uint16, np.uint32, np.uint64,\n np.float32, np.float64, np.float, '>i8', '<i8']:\n img = Nifti1Image(data.astype(dtype), affine)\n resample_img(img, target_affine=2. * affine)\n"
] |
[
[
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.logical_not",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.testing.assert_array_equal",
"numpy.ones",
"numpy.eye",
"numpy.testing.assert_array_almost_equal",
"numpy.prod",
"numpy.random.randint",
"numpy.isfinite",
"numpy.abs",
"numpy.all",
"numpy.random.random",
"numpy.diag"
]
] |
dl-framework-benchmark/Paddle
|
[
"73daa3d6c0001855aea95cdd552b6398de50c349",
"73daa3d6c0001855aea95cdd552b6398de50c349"
] |
[
"python/paddle/fluid/optimizer.py",
"python/paddle/fluid/dygraph/nn.py"
] |
[
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nfrom collections import defaultdict\n\nfrom paddle.fluid.distribute_lookup_table import find_distributed_lookup_table\nfrom paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program\n\nfrom . import framework\nfrom . import layers\nfrom . import unique_name\nfrom .backward import append_backward, _some_in_set_, _append_grad_suffix_\nfrom .clip import append_gradient_clip_ops, error_clip_callback\nfrom .framework import program_guard\nfrom .initializer import Constant\nfrom .layer_helper import LayerHelper\nfrom .layers import ops\nfrom .regularizer import append_regularization_ops\nfrom .dygraph import base as imperative_base\nfrom .dygraph.learning_rate_scheduler import LearningRateDecay\nfrom paddle.fluid import core\nfrom paddle.fluid.layers import tensor\nfrom functools import reduce\nfrom .wrapped_decorator import signature_safe_contextmanager\n\n__all__ = [\n 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'Ftrl',\n 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer',\n 'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'RMSPropOptimizer',\n 'FtrlOptimizer', 'Adadelta', 'ModelAverage', 'LarsMomentum',\n 'LarsMomentumOptimizer', 'DGCMomentumOptimizer', 'LambOptimizer',\n 'ExponentialMovingAverage', 'PipelineOptimizer', 'LookaheadOptimizer'\n]\n\n\nclass Optimizer(object):\n \"\"\"Optimizer Base class.\n\n Define the common interface of an optimizer.\n User should not use this class directly,\n but need to use one of it's implementation.\n \"\"\"\n\n @imperative_base.no_grad\n def __init__(self, learning_rate, regularization=None, name=None):\n if framework.in_dygraph_mode():\n if not isinstance(learning_rate, float) and \\\n not isinstance(learning_rate, LearningRateDecay):\n raise TypeError(\n \"learning rate should be float or LearningRateDecay, got %s here\"\n % type(learning_rate))\n if name is not None:\n self._name = unique_name.generate(name)\n else:\n self._name = unique_name.generate(self.__class__.__name__)\n else:\n if not isinstance(learning_rate, float) and \\\n not isinstance(learning_rate, framework.Variable):\n raise TypeError(\n \"learning rate should be float or Variable, got %s here\" %\n type(learning_rate))\n self._name = name\n\n self.regularization = regularization\n self._learning_rate = learning_rate\n # the learning rate type should be inferenced from loss\n self._dtype = None\n # each program should have a independent learning rate\n # program -> Variable(learning_rate)\n self._learning_rate_map = dict()\n if isinstance(self._learning_rate, framework.Variable):\n self._learning_rate_map[framework.default_main_program(\n )] = self._learning_rate\n # Dictionary of accumulators. Some optimizer subclasses need to\n # allocate and manage extra variables associated with the parameters\n # to train. These variables are called accumulators.\n # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}\n self._accumulators = defaultdict(lambda: dict())\n self.helper = None\n self._opti_name_list = []\n\n def load(self, stat_dict):\n \"\"\"\n load optimizer with learning rate decay in dygraph mode\n :return: None\n\n Args:\n stat_dict: the dict load by load_persistable method\n\n Examples:\n\n .. code-block:: python\n\n from __future__ import print_function\n import numpy as np\n import paddle\n import paddle.fluid as fluid\n from paddle.fluid.optimizer import SGDOptimizer\n from paddle.fluid.dygraph.nn import FC\n from paddle.fluid.dygraph.base import to_variable\n\n class MLP(fluid.Layer):\n def __init__(self, name_scope):\n super(MLP, self).__init__(name_scope)\n\n self._fc1 = FC(self.full_name(), 10)\n self._fc2 = FC(self.full_name(), 10)\n\n def forward(self, inputs):\n y = self._fc1(inputs)\n y = self._fc2(y)\n return y\n\n with fluid.dygraph.guard():\n mlp = MLP('mlp')\n optimizer2 = SGDOptimizer(\n learning_rate=fluid.layers.natural_exp_decay(\n learning_rate=0.1,\n decay_steps=10000,\n decay_rate=0.5,\n staircase=True))\n\n train_reader = paddle.batch(\n paddle.dataset.mnist.train(), batch_size=128, drop_last=True)\n\n for batch_id, data in enumerate(train_reader()):\n dy_x_data = np.array(\n [x[0].reshape(1, 28, 28) for x in data]).astype('float32')\n\n y_data = np.array([x[1] for x in data]).astype('int64').reshape(\n 128, 1)\n\n img = to_variable(dy_x_data)\n label = to_variable(y_data)\n label._stop_gradient = True\n cost = mlp(img)\n avg_loss = fluid.layers.reduce_mean(cost)\n avg_loss.backward()\n optimizer.minimize(avg_loss)\n mlp.clear_gradients()\n fluid.dygraph.save_persistables(\n mlp.state_dict(), [optimizer, optimizer2], \"save_dir_2\")\n if batch_id == 2:\n break\n\n with fluid.dygraph.guard():\n mlp_load = MLP('mlp')\n optimizer_load2 = SGDOptimizer(\n learning_rate=fluid.layers.natural_exp_decay(\n learning_rate=0.1,\n decay_steps=10000,\n decay_rate=0.5,\n staircase=True))\n parameters, optimizers = fluid.dygraph.load_persistables(\n \"save_dir_2\")\n mlp_load.load_dict(parameters)\n optimizer_load2.load(optimizers)\n self.assertTrue(optimizer2._learning_rate.__dict__ == optimizer_load2._learning_rate.__dict__)\n\n \"\"\"\n if framework.in_dygraph_mode():\n self._learning_rate = stat_dict[self._name]\n else:\n raise TypeError(\"load can only be used under DyGraph mode\")\n\n def get_opti_var_name_list(self):\n return self._opti_name_list\n\n def _create_global_learning_rate(self):\n if imperative_base.enabled():\n # create learning rate Variable\n if isinstance(self._learning_rate, float):\n lr = self._global_learning_rate()\n\n if isinstance(lr, framework.Variable):\n return\n else:\n self._learning_rate_map[framework.default_main_program(\n )] = layers.create_global_var(\n name=unique_name.generate(\"learning_rate\"),\n shape=[1],\n value=float(self._learning_rate),\n dtype='float32' if self._dtype is None else self._dtype,\n persistable=True)\n # get learning rate Variable from LearningRateDecay\n elif isinstance(self._learning_rate, LearningRateDecay):\n self._learning_rate_map[framework.default_main_program(\n )] = self._learning_rate()\n else:\n raise TypeError(\n \"optimizer's learning rate must be float or LearningRateDecay\"\n )\n else:\n lr = self._global_learning_rate()\n\n if isinstance(lr, framework.Variable):\n return\n else:\n if not isinstance(self._learning_rate, float):\n raise TypeError(\n \"learning rate variable is create outside optimizer,\"\n \"can not create new learning rate variable for new program\"\n )\n\n # create learning rate in the current main program\n self._learning_rate_map[framework.default_main_program(\n )] = layers.create_global_var(\n name=unique_name.generate(\"learning_rate\"),\n shape=[1],\n value=float(self._learning_rate),\n dtype='float32' if self._dtype is None else self._dtype,\n persistable=True)\n\n def _global_learning_rate(self, program=None):\n \"\"\"\n get global decayed learning rate\n :return:\n \"\"\"\n if program is None:\n program = framework.default_main_program()\n return self._learning_rate_map.get(program, None)\n\n def _append_optimize_op(self, block, param_and_grad):\n \"\"\" append optimize operator to block and return all the added optimize_op\n \"\"\"\n raise NotImplementedError()\n\n def _create_param_lr(self, param_and_grad):\n # create learning rate variable for every parameter\n param = param_and_grad[0]\n param_lr = param.optimize_attr['learning_rate']\n if type(param_lr) == Variable:\n return param_lr\n else:\n if param_lr == 1.0:\n return self._global_learning_rate()\n else:\n with default_main_program()._lr_schedule_guard(\n is_with_opt=True), framework.name_scope(\n 'scale_with_param_lr'):\n return self._global_learning_rate() * param_lr\n\n def _create_accumulators(self, block, parameters):\n \"\"\"Create all accumulators needed by the parameters\n\n Args:\n block: the block in which the loss variable is present\n parameters: list of parameter variables for the optimizer\n \"\"\"\n pass\n\n def _finish_update(self, block, parameters_and_grads):\n \"\"\"Finish any custom updates needed\n before completing an optimization step\n\n Args:\n block: the block in which the loss variable is present\n parameters: list of parameter variables for the optimizer\n\n Returns:\n None\n \"\"\"\n pass\n\n def _add_accumulator(self,\n name,\n param,\n dtype=None,\n fill_value=0.0,\n shape=None):\n \"\"\"Utility function to add an accumulator for a parameter\n\n Args:\n block: the block in which the loss variable is present\n name: name of the accumulator\n param: parameter variable for which accumulator is to be added\n dtype: data type of the accumulator variable\n fill_value: value to initialize the accumulator variable\n \"\"\"\n if self._name is not None:\n name = self._name + \"_\" + name\n if (name in self._accumulators and\n param.name in self._accumulators[name]):\n if framework.in_dygraph_mode():\n return self._accumulators[name][param.name]\n raise Exception(\"Accumulator {} already exists for parameter {}\".\n format(name, param.name))\n if shape == None:\n shape = param.shape\n assert isinstance(self.helper, LayerHelper)\n\n var_name = param.name + \"_\" + name\n var_name = unique_name.generate(var_name)\n self._opti_name_list.append(var_name)\n\n var = self.helper.create_global_variable(\n name=var_name,\n persistable=True,\n dtype=dtype or param.dtype,\n type=param.type,\n shape=shape)\n self.helper.set_variable_initializer(\n var, initializer=Constant(value=float(fill_value)))\n self._accumulators[name][param.name] = var\n return var\n\n def _get_accumulator(self, name, param):\n \"\"\"Utility function to fetch an accumulator for a parameter\n\n Args:\n name: name of the accumulator\n param: parameter variable for which accumulator is to be fetched\n\n Returns:\n accumulator variable for the parameter\n \"\"\"\n if self._name is not None:\n name = self._name + \"_\" + name\n if (name not in self._accumulators or\n param.name not in self._accumulators[name]):\n raise Exception(\"Accumulator {} does not exist for parameter {}\".\n format(name, param.name))\n return self._accumulators[name][param.name]\n\n def _create_optimization_pass(self, parameters_and_grads):\n \"\"\"Add optimization operators to update gradients to variables.\n\n Args:\n parameters_and_grads(list(tuple(Variable, Variable))):\n a list of (variable, gradient) pair to update.\n\n Returns:\n return_op_list: a list of operators that will complete one step of\n optimization. This will include parameter update ops, global step\n update ops and any other custom ops required by subclasses to manage\n their internal state.\n \"\"\"\n # This is a default implementation of create_optimization_pass that\n # can be shared by most optimizers. This implementation assumes that\n # the subclass will implement the _append_optimize_op method and the\n # _initialize_tensors method. The subclass can extend the\n # _create_accumulators method if it needs to create accumulators\n # for parameters and extend _finish_update method to add custom ops.\n\n # Allways called under program_guard use global block as loss block\n global_block = framework.default_main_program().global_block()\n start = len(global_block.ops)\n self.helper = LayerHelper(self.__class__.__name__)\n self._create_accumulators(\n global_block,\n [p[0] for p in parameters_and_grads if p[0].trainable])\n self._create_global_learning_rate()\n\n optimize_ops = []\n if framework.in_dygraph_mode():\n for param_and_grad in parameters_and_grads:\n if param_and_grad[1] is None:\n continue\n with param_and_grad[0].block.program._optimized_guard(\n param_and_grad):\n if param_and_grad[0].trainable is True:\n optimize_op = self._append_optimize_op(global_block,\n param_and_grad)\n optimize_ops.append(optimize_op)\n else:\n for param_and_grad in parameters_and_grads:\n if param_and_grad[1] is None:\n continue\n with param_and_grad[0].block.program._optimized_guard(\n param_and_grad), name_scope(\"optimizer\"):\n if param_and_grad[0].trainable is True:\n optimize_op = self._append_optimize_op(global_block,\n param_and_grad)\n optimize_ops.append(optimize_op)\n\n # Get custom finish ops for subclasses\n # FIXME: Need to fix this once we figure out how to handle dependencies\n self._finish_update(global_block, parameters_and_grads)\n\n end = len(global_block.ops)\n return global_block._slice_ops(start, end)\n\n def _process_distribute_lookuptable(self, param_grads):\n \"\"\"\n Because distribute lookup table only support SGD optimizer for now, not support\n other optimizer and regularization, so we should find the table parameter out,\n and avoid to add regularization and other op for it, and add sgd optimize op\n for it independently.\n :param param_grads(list((Var, Var))): list of (param, grad) pair.\n :param loss: the loss variable.\n :param startup_program: the startup program\n \"\"\"\n program = framework.default_main_program()\n global_block = framework.default_main_program().global_block()\n table_name = find_distributed_lookup_table(program)\n table_param = None\n table_grad = None\n new_param_grads = []\n for p, g in param_grads:\n if p.name == table_name:\n if table_param is not None:\n raise RuntimeError(\n \"multi dist table var found, only support one now!\")\n table_param = p\n table_grad = g\n else:\n new_param_grads.append((p, g))\n sgd_op = None\n if table_param is not None:\n param_and_grad = [table_param, table_grad]\n with table_param.block.program._optimized_guard(param_and_grad), \\\n framework.name_scope(\"optimizer\"):\n self._create_global_learning_rate()\n # create the optimize op\n sgd_op = global_block.append_op(\n type='sgd',\n inputs={\n \"Param\": table_param,\n \"Grad\": table_grad,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0]})\n return new_param_grads, (table_param, table_grad), sgd_op\n\n def _append_dgc_ops(self, param_and_grad):\n pass\n\n def backward(self,\n loss,\n startup_program=None,\n parameter_list=None,\n no_grad_set=None,\n callbacks=None):\n \"\"\"\n First part of `minimize`, do auto-diff to append backward ops for\n the current program.\n\n Args:\n loss (Variable): loss variable to run optimizations.\n startup_program (Program): startup_program for initializing parameters\n in `parameter_list`.\n parameter_list (list): list of Variables to update.\n no_grad_set (set|None): set of Variables should be ignored.\n callbacks (list|None): list of callables to run when appending backward\n operator for one parameter.\n\n Return:\n list: list of (param, grad) pair, grad is the output of backward.\n\n Examples:\n See examples in `apply_gradients`.\n \"\"\"\n self._dtype = loss.dtype\n if framework.in_dygraph_mode():\n if parameter_list is not None:\n parameters = parameter_list\n else:\n parameters = framework._dygraph_tracer().all_parameters()\n\n params_grads = []\n for param in parameters:\n if not param.trainable:\n continue\n if param._ivar._grad_ivar() is not None:\n # create gradient variable\n grad_var = Variable(\n block=loss.block,\n name=param._ivar._grad_name(),\n stop_gradient=True,\n ivar=param._ivar._grad_ivar())\n params_grads.append((param, grad_var))\n else:\n if callbacks is None:\n callbacks = [error_clip_callback]\n else:\n assert (isinstance(callbacks, list))\n program = loss.block.program\n with program_guard(program, startup_program):\n params_grads = append_backward(loss, parameter_list,\n no_grad_set, callbacks)\n # Note: since we can't use all_reduce_op now,\n # dgc_op should be the last op of one grad.\n self._append_dgc_ops(params_grads)\n return params_grads\n\n def apply_gradients(self, params_grads):\n \"\"\"\n Second part of `minimize`, appending optimization operators for\n given `params_grads` pairs.\n\n Args:\n params_grads (list): list of (param, grad) pair to do optimization.\n\n Returns:\n list: A list of operators appended to the current program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n loss = network()\n optimizer = fluid.optimizer.SGD(learning_rate=0.1)\n params_grads = optimizer.backward(loss)\n # you may append operations for params_grads here\n # ...\n optimizer.apply_gradients(params_grads)\n \"\"\"\n params_grads = sorted(params_grads, key=lambda x: x[0].name)\n\n params_grads, table_param_and_grad, table_optimize_op = \\\n self._process_distribute_lookuptable(params_grads)\n\n params_grads = append_gradient_clip_ops(params_grads)\n\n # Add regularization if any\n params_grads = append_regularization_ops(params_grads,\n self.regularization)\n\n optimize_ops = self._create_optimization_pass(params_grads)\n if table_optimize_op is not None:\n optimize_ops.append(table_optimize_op)\n params_grads.append(table_param_and_grad)\n\n return optimize_ops\n\n def apply_optimize(self, loss, startup_program, params_grads):\n \"\"\"\n Second part of `minimize`, appending optimization operators for\n given `params_grads` pairs.\n\n Args:\n loss (Variable): loss variable to run optimizations.\n startup_program (Program): startup_program for initializing parameters\n in `parameter_list`.\n params_grads (list): list of (param, grad) pair to do optimization.\n\n Returns:\n list: A list of operators appended to the current program.\n \"\"\"\n if framework.in_dygraph_mode():\n with program_guard(framework.default_main_program(),\n framework.default_startup_program()):\n params_grads = append_regularization_ops(params_grads,\n self.regularization)\n optimize_ops = self._create_optimization_pass(params_grads)\n else:\n program = loss.block.program\n with program_guard(program, startup_program):\n optimize_ops = self.apply_gradients(params_grads)\n return optimize_ops\n\n @imperative_base.no_grad\n def minimize(self,\n loss,\n startup_program=None,\n parameter_list=None,\n no_grad_set=None,\n grad_clip=None):\n \"\"\"\n Add operations to minimize `loss` by updating `parameter_list`.\n\n This method combines interface `backward()` and\n `apply_gradients()` into one.\n\n Args:\n loss (Variable): loss variable to run optimizations.\n startup_program (Program): startup_program for initializing parameters\n in `parameter_list`.\n parameter_list (list): list of Variables to update.\n no_grad_set (set|None): set of Variables should be ignored.\n grad_clip (GradClipBase|None) : Gradient clip strategy\n\n Returns:\n tuple: (optimize_ops, params_grads) which are, list of operators appended;\n and list of (param, grad) Variables pair for optimization.\n \"\"\"\n assert isinstance(loss, Variable), \"The loss should be an Variable.\"\n if no_grad_set is None:\n no_grad_set = set()\n elif isinstance(no_grad_set, set) or isinstance(\n no_grad_set, list) or isinstance(no_grad_set, tuple):\n no_grad_set = set(no_grad_set)\n else:\n assert \"no_grad_set should be a set, but the passed type is {}\".format(\n type(no_grad_set))\n parameters = loss.block.program.global_block().all_parameters()\n param_no_trainable = set(\n [param.name for param in parameters if param.trainable is False])\n # If the parameter is no trainable, it should not have a gradient.\n no_grad_set.update(param_no_trainable)\n params_grads = self.backward(\n loss,\n startup_program=startup_program,\n parameter_list=parameter_list,\n no_grad_set=no_grad_set)\n\n if grad_clip is not None and framework.in_dygraph_mode():\n # TODO(hongyu): FIX later, this is only for dygraph, should be work for static mode\n params_grads = grad_clip(params_grads)\n\n optimize_ops = self.apply_optimize(\n loss, startup_program=startup_program, params_grads=params_grads)\n\n return optimize_ops, params_grads\n\n\nclass SGDOptimizer(Optimizer):\n \"\"\"\n Optimizer of the stochastic gradient descent algorithm.\n\n .. math::\n\n param\\_out = param - learning\\_rate * grad\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)\n sgd_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n\n def __init__(self, learning_rate, regularization=None, name=None):\n assert learning_rate is not None\n super(SGDOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"sgd\"\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n # create the optimize op\n sgd_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0]},\n stop_gradient=True)\n\n return sgd_op\n\n\nclass MomentumOptimizer(Optimizer):\n \"\"\"\n\n Simple Momentum optimizer with velocity state\n\n This optimizer has a flag for Nestrov Momentum.\n\n The update equations are as follows:\n\n .. math::\n\n & velocity = mu * velocity + gradient\n\n & if (use\\_nesterov):\n\n &\\quad param = param - (gradient + mu * velocity) * learning\\_rate\n\n & else:\n\n &\\quad param = param - learning\\_rate * velocity\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n momentum (float): momentum factor\n use_nesterov (bool): enables Nesterov momentum\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n moment_optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n _velocity_acc_str = \"velocity\"\n\n def __init__(self,\n learning_rate,\n momentum,\n use_nesterov=False,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert momentum is not None\n super(MomentumOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"momentum\"\n self._momentum = momentum\n self._use_nesterov = bool(use_nesterov)\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._velocity_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n velocity_acc = self._get_accumulator(self._velocity_acc_str,\n param_and_grad[0])\n # create the momentum optimize op\n momentum_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Velocity\": velocity_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"VelocityOut\": velocity_acc\n },\n attrs={\"mu\": self._momentum,\n \"use_nesterov\": self._use_nesterov},\n stop_gradient=True)\n\n return momentum_op\n\n\nclass DGCMomentumOptimizer(MomentumOptimizer):\n \"\"\"\n\n Original paper is https://arxiv.org/abs/1712.01887\n\n DGC reduces the communication bandwidth by sending only the important gradients (sparse update):\\\n only gradients larger than a threshold are transmitted.\n\n To avoid losing information, DGC accumulates the rest of the gradients locally.\n\n Eventually, these gradients become large enough to be transmitted.\n\n Thus, DGC sends the large gradients immediately but eventually send all of the gradients over time.\n\n To ensure no loss of accuracy, DGC employs momentum correction and local gradient clipping on top of the gradient sparsification to maintain model performance.\n\n DGC also uses momentum factor masking and warmup training to overcome the staleness problem caused by reduced communication.\n\n This optimizer will do two things:\n\n 1. Compress the gradient by get TopK import value from tensor \\\n and use it for allreduce to reduce network bandwidth.\n\n 2. Call momentum to optimize on the cost.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n momentum (float): Momentum factor.\n rampup_begin_step (int): The beginning step from which gradient compression is implemented.\n rampup_step (int): How long it use the sparsity periods. Default is 1.\n for example: If the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 5, \\\n it will use 0.75 at 0 step, and 0.9375 at 1 step, and so on. And when reach sparsity array ends, \\\n it will use 0.999 then and after.\n sparsity (list[float]): Get top important element from gradient tensor, the ratio is (1 - current sparsity).\n use_nesterov (bool): Enables Nesterov momentum. True means use nesterov.\n local_grad_clip_norm (float): Clip norm value if needed.\n num_trainers: The number of training nodes.\n regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer.\n name: An optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n optimizer = fluid.optimizer.DGCMomentumOptimizer(\n learning_rate=0.0001,\n momentum=0.9,\n rampup_step=1000,\n rampup_begin_step=1252,\n sparsity=[0.999, 0.999])\n\n \"\"\"\n\n def __init__(self,\n learning_rate,\n momentum,\n rampup_begin_step,\n rampup_step=1,\n sparsity=[0.999],\n use_nesterov=False,\n local_grad_clip_norm=None,\n num_trainers=None,\n regularization=None,\n name=None):\n self._sparsity = sparsity\n self._rampup_step = rampup_step\n self._rampup_step_var = None\n\n self._rampup_begin_step = rampup_begin_step\n self._rampup_begin_step_var = None\n\n self._global_step_var = None\n self._local_grad_clip_norm = None\n self._clip_norm = None\n\n if local_grad_clip_norm is not None:\n assert isinstance(num_trainers, int)\n assert isinstance(local_grad_clip_norm, float)\n assert num_trainers > 0\n\n self._local_grad_clip_norm = local_grad_clip_norm\n self._num_trainers = num_trainers\n self._clip_norm = local_grad_clip_norm / (num_trainers *\n num_trainers)\n\n super(DGCMomentumOptimizer, self).__init__(\n learning_rate, momentum, use_nesterov, regularization, name)\n\n core.init_dgc()\n\n def _add_auto_increment_var(self, counter_name, begin, step=1):\n helper = LayerHelper('global_step_counter')\n counter, is_new_var = helper.create_or_get_global_variable(\n name=counter_name, dtype='float32', shape=[1], persistable=True)\n if is_new_var:\n helper.set_variable_initializer(\n counter,\n initializer=Constant(\n value=float(begin - 1), force_cpu=True))\n helper.main_program.global_block()._prepend_op(\n type='increment',\n inputs={'X': [counter]},\n outputs={'Out': [counter]},\n attrs={'step': float(step)},\n stop_gradient=True)\n counter.stop_gradient = True\n\n return counter\n\n def _append_dgc_ops(self, param_and_grads):\n start_program = default_startup_program()\n main_program = default_main_program()\n main_program._enable_dgc = True\n\n # step counter\n self._global_step_var = self._add_auto_increment_var(\n counter_name=core.dgc.kDGCCounterName(), begin=0)\n\n # rampup begin step var for all_reduce_op_handle\n self._rampup_begin_step_var = tensor.create_global_var(\n shape=[1],\n dtype=core.VarDesc.VarType.FP32,\n persistable=True,\n name=core.dgc.kDGCRampUpBeginStepName(),\n value=self._rampup_begin_step * 1.0,\n force_cpu=True)\n\n for param_var, grad_var in param_and_grads:\n var_numel = abs(reduce(lambda x, y: x * y, param_var.shape))\n if var_numel < 16384 or \\\n param_var.type == core.VarDesc.VarType.SELECTED_ROWS or \\\n grad_var.type == core.VarDesc.VarType.SELECTED_ROWS or \\\n param_var.dtype != core.VarDesc.VarType.FP32 :\n continue\n\n u_var = tensor.create_global_var(\n shape=param_var.shape,\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCUName(),\n value=0.0)\n v_var = tensor.create_global_var(\n shape=param_var.shape,\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCVName(),\n value=0.0)\n\n k_var = tensor.create_global_var(\n shape=[1],\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCKName(),\n value=0.0,\n force_cpu=True)\n\n encoded_var = tensor.create_global_var(\n shape=[1],\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCEncodedName(),\n value=0.0,\n force_cpu=False)\n\n # del back oprolevarname\n op_maker = core.op_proto_and_checker_maker\n backward = core.op_proto_and_checker_maker.OpRole.Backward\n for op in main_program.global_block().ops:\n if not self._is_the_backward_op(op):\n continue\n\n var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]\n if param_var.name not in var_attr:\n continue\n\n var_attr.remove(param_var.name)\n var_attr.remove(grad_var.name)\n if len(var_attr) > 1:\n op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)\n else:\n op._remove_attr(op_maker.kOpRoleVarAttrName())\n\n clip_var = grad_var\n if self._local_grad_clip_norm is not None:\n clip_var = self._append_clip_norm(grad_var, self._clip_norm)\n self._dgc_op(param_var, clip_var, grad_var, u_var, v_var, k_var,\n encoded_var)\n\n def _is_the_backward_op(self, op):\n op_maker = core.op_proto_and_checker_maker\n backward = core.op_proto_and_checker_maker.OpRole.Backward\n if op_maker.kOpRoleVarAttrName() in op.attr_names and \\\n int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):\n return True\n return False\n\n def _clip_by_norm(self, x, max_norm, name=None):\n args = {'x': x, 'max_norm': max_norm, 'name': name}\n\n helper = LayerHelper(\"dgc_clip_by_norm_op\", **args)\n\n if name is None:\n name = unique_name.generate_with_ignorable_key(\".\".join(\n [helper.name, 'tmp']))\n\n out = helper.create_variable(\n type=x.type, name=name, dtype=x.dtype, persistable=False)\n\n helper.append_op(\n type=\"dgc_clip_by_norm\",\n inputs={\"X\": x,\n \"current_step\": self._global_step_var},\n attrs={\n \"max_norm\": max_norm,\n \"rampup_begin_step\": float(self._rampup_begin_step)\n },\n outputs={\"Out\": out})\n return out\n\n def _append_clip_norm(self, grad_var, clip_norm):\n with grad_var.block.program._backward_role_guard():\n return self._clip_by_norm(\n x=grad_var, max_norm=clip_norm, name=grad_var.name)\n\n def _dgc_op(self, param_var, clip_var, grad_var, u_var, v_var, k_var,\n encoded_var):\n block = framework.default_main_program().global_block()\n op_maker = core.op_proto_and_checker_maker\n dgc_op = block.append_op(\n type=\"dgc\",\n inputs={\n \"U\": u_var,\n \"V\": v_var,\n \"Grad\": clip_var,\n \"current_step\": self._global_step_var\n },\n outputs={\n \"U_out\": u_var,\n \"V_out\": v_var,\n \"EncodeGrad\": encoded_var,\n \"k\": k_var,\n \"Grad_out\": grad_var\n },\n attrs={\n \"m\": self._momentum,\n \"sparsity\": self._sparsity,\n \"use_nesterov\": self._use_nesterov,\n \"rampup_begin_step\": float(self._rampup_begin_step),\n \"rampup_step\": float(self._rampup_step)\n },\n stop_gradient=True)\n\n backward = op_maker.OpRole.Backward\n dgc_op._set_attr(op_maker.kOpRoleAttrName(), backward)\n dgc_op._set_attr(op_maker.kOpRoleVarAttrName(),\n [param_var.name, grad_var.name])\n\n\nclass LarsMomentumOptimizer(Optimizer):\n \"\"\"\n Momentum optimizer with LARS support\n\n The update equations are as follows:\n\n .. math::\n\n & local\\_learning\\_rate = learning\\_rate * lars\\_coeff * \\\\\n \\\\frac{||param||}{||gradient|| + lars\\_weight\\_decay * ||param||}\n\n & velocity = mu * velocity + local\\_learning\\_rate * (gradient + lars\\_weight\\_decay * param)\n\n & param = param - velocity\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n momentum (float): momentum factor\n lars_coeff (float): defines how much we trust the layer to change its weights.\n lars_weight_decay (float): weight decay coefficient for decaying using LARS.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(inp, size=3)\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9)\n optimizer.minimize(out)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n exe.run(\n feed={\"inp\": np_inp},\n fetch_list=[out.name])\n \"\"\"\n _velocity_acc_str = \"velocity\"\n\n def __init__(self,\n learning_rate,\n momentum,\n lars_coeff=0.001,\n lars_weight_decay=0.0005,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert momentum is not None\n super(LarsMomentumOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"lars_momentum\"\n self._momentum = momentum\n self._lars_coeff = float(lars_coeff)\n self._lars_weight_decay = float(lars_weight_decay)\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._velocity_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n velocity_acc = self._get_accumulator(self._velocity_acc_str,\n param_and_grad[0])\n # create the momentum optimize op\n momentum_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Velocity\": velocity_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"VelocityOut\": velocity_acc\n },\n attrs={\n \"mu\": self._momentum,\n \"lars_coeff\": self._lars_coeff,\n \"lars_weight_decay\": self._lars_weight_decay\n },\n stop_gradient=True)\n\n return momentum_op\n\n\nclass AdagradOptimizer(Optimizer):\n \"\"\"\n **Adaptive Gradient Algorithm (Adagrad)**\n\n The update is done as follows:\n\n .. math::\n\n moment\\_out &= moment + grad * grad\n\n param\\_out &= param - \\\\frac{learning\\_rate * grad}{\\sqrt{moment\\_out} + \\epsilon}\n\n The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n does not have the epsilon attribute. It is added here in our implementation\n as also proposed here: http://cs231n.github.io/neural-networks-3/#ada\n for numerical stability to avoid the division by zero error.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n initial_accumulator_value (float): Initial value for moment accumulator.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(inp, size=3)\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.Adagrad(learning_rate=0.2)\n optimizer.minimize(out)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n exe.run(\n feed={\"inp\": np_inp},\n fetch_list=[out.name])\n \"\"\"\n _moment_acc_str = \"moment\"\n\n def __init__(self,\n learning_rate,\n epsilon=1.0e-6,\n regularization=None,\n name=None,\n initial_accumulator_value=0.0):\n assert learning_rate is not None\n assert epsilon is not None\n super(AdagradOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adagrad\"\n self._epsilon = epsilon\n self.initial_accumulator_value = initial_accumulator_value\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._moment_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment_acc = self._get_accumulator(self._moment_acc_str,\n param_and_grad[0])\n startup_block = framework.default_startup_program().global_block()\n startup_block.append_op(\n type='fill_constant',\n inputs={},\n outputs={'Out': [moment_acc]},\n attrs={\n 'dtype': moment_acc.dtype,\n 'value': self.initial_accumulator_value,\n 'shape': moment_acc.shape,\n })\n\n # Create the adagrad optimizer op\n adagrad_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Moment\": moment_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0],\n \"MomentOut\": moment_acc},\n attrs={\"epsilon\": self._epsilon},\n stop_gradient=True)\n\n return adagrad_op\n\n\nclass AdamOptimizer(Optimizer):\n \"\"\"\n This implements the Adam optimizer from Section 2 of the Adam\n paper : https://arxiv.org/abs/1412.6980.\n Adam is a first-order gradient-based optimization method based on\n adaptive estimates of lower-order moments.\n\n Adam updates:\n\n .. math::\n\n t & = t + 1\n\n moment\\_1\\_out & = {\\\\beta}_1 * moment\\_1 + (1 - {\\\\beta}_1) * grad\n\n moment\\_2\\_out & = {\\\\beta}_2 * moment\\_2 + (1 - {\\\\beta}_2) * grad * grad\n\n learning\\_rate & = learning\\_rate * \\\\\n \\\\frac{\\sqrt{1 - {\\\\beta}_2^t}}{1 - {\\\\beta}_1^t}\n\n param\\_out & = param - learning\\_rate * \\\\frac{moment\\_1}{\\sqrt{moment\\_2} + \\epsilon}\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n beta1 (float): The exponential decay rate for the 1st moment estimates.\n beta2 (float): The exponential decay rate for the 2nd moment estimates.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n lazy_mode(bool: false): The official Adam algorithm has two moving-average accumulators\n the accumulators are updated at every step. Every element of the two moving-average is updated\n in both dense mode and sparse mode. If the size of parameter is very large, then the update\n may be very slow. The lazy mode only update the element that has gradient is the current\n mini-batch, so it will be much more faster. But this mode has different semantics with the\n original Adam algorithm and may lead to different result.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n adam_optimizer = fluid.optimizer.AdamOptimizer(0.01)\n adam_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n _moment1_acc_str = \"moment1\"\n _moment2_acc_str = \"moment2\"\n _beta1_pow_acc_str = \"beta1_pow_acc\"\n _beta2_pow_acc_str = \"beta2_pow_acc\"\n\n def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n regularization=None,\n name=None,\n lazy_mode=False):\n assert learning_rate is not None\n assert beta1 is not None\n assert beta2 is not None\n assert epsilon is not None\n super(AdamOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adam\"\n self._beta1 = beta1\n self._beta2 = beta2\n self._epsilon = epsilon\n self._lazy_mode = lazy_mode\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n # Create accumulator tensors for first and second moments\n for p in parameters:\n self._add_accumulator(self._moment1_acc_str, p)\n self._add_accumulator(self._moment2_acc_str, p)\n self._add_accumulator(\n name=self._beta1_pow_acc_str,\n param=p,\n dtype='float32',\n fill_value=self._beta1,\n shape=[1])\n self._add_accumulator(\n name=self._beta2_pow_acc_str,\n param=p,\n dtype='float32',\n fill_value=self._beta2,\n shape=[1])\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment1 = self._get_accumulator(self._moment1_acc_str,\n param_and_grad[0])\n moment2 = self._get_accumulator(self._moment2_acc_str,\n param_and_grad[0])\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param_and_grad[0])\n beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,\n param_and_grad[0])\n\n # create the adam optimize op\n adam_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad),\n \"Moment1\": moment1,\n \"Moment2\": moment2,\n \"Beta1Pow\": beta1_pow_acc,\n \"Beta2Pow\": beta2_pow_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"Moment1Out\": moment1,\n \"Moment2Out\": moment2\n },\n attrs={\n \"beta1\": self._beta1,\n \"beta2\": self._beta2,\n \"epsilon\": self._epsilon,\n \"lazy_mode\": self._lazy_mode,\n \"min_row_size_to_use_multithread\": 1000\n },\n stop_gradient=True)\n\n return adam_op\n\n def _finish_update(self, block, param_and_grads):\n \"\"\"Update Beta1 and Beta2 Power accumulators\n \"\"\"\n assert isinstance(block, framework.Block)\n main_block = block.program.global_block()\n for param, grad in param_and_grads:\n if grad is None or param.trainable is False:\n continue\n with param.block.program._optimized_guard(\n [param, grad]), name_scope(\"optimizer\"):\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param)\n beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,\n param)\n main_block.append_op(\n type=\"scale\",\n inputs={\"X\": beta1_pow_acc},\n outputs={\"Out\": beta1_pow_acc},\n attrs={\"scale\": self._beta1},\n stop_gradient=True)\n\n main_block.append_op(\n type=\"scale\",\n inputs={\"X\": beta2_pow_acc},\n outputs={\"Out\": beta2_pow_acc},\n attrs={\"scale\": self._beta2},\n stop_gradient=True)\n\n\nclass AdamaxOptimizer(Optimizer):\n \"\"\"\n We implement the Adamax optimizer from Section 7 of the Adam\n paper: https://arxiv.org/abs/1412.6980. Adamax is a variant of the\n Adam algorithm based on the infinity norm.\n\n Adamax updates:\n\n .. math::\n\n t & = t + 1\n\n moment\\_out & = {\\\\beta}_1 * moment + (1 - {\\\\beta}_1) * grad\n\n inf\\_norm\\_out & = max({\\\\beta}_2 * inf\\_norm + \\epsilon, |grad|)\n\n learning\\_rate & = \\\\frac{learning\\_rate}{1 - {\\\\beta}_1^t}\n\n param\\_out & = param - learning\\_rate * \\\\frac{moment\\_out}{inf\\_norm\\_out}\n\n\n The original paper does not have an epsilon attribute.\n However, it is added here for numerical stability to prevent the\n division by 0 error.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n # First create the Executor.\n place = fluid.CPUPlace() # fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(train_program, startup_program):\n data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n adam = fluid.optimizer.Adamax(learning_rate=0.2)\n adam.minimize(loss)\n\n # Run the startup program once and only once.\n exe.run(startup_program)\n\n x = numpy.random.random(size=(10, 1)).astype('float32')\n outs = exe.run(program=train_program,\n feed={'X': x},\n fetch_list=[loss.name])\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n beta1 (float): The exponential decay rate for the 1st moment estimates.\n beta2 (float): The exponential decay rate for the 2nd moment estimates.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Notes:\n Currently, AdamaxOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n _moment_acc_str = \"moment\"\n _inf_norm_acc_str = \"inf_norm\"\n _beta1_pow_acc_str = \"beta1_pow_acc\"\n\n def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert beta1 is not None\n assert beta2 is not None\n assert epsilon is not None\n super(AdamaxOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adamax\"\n self._beta1 = beta1\n self._beta2 = beta2\n self._epsilon = epsilon\n\n def _create_accumulators(self, block, parameters):\n # Create accumulator tensors for first moment and infinity norm\n for p in parameters:\n self._add_accumulator(self._moment_acc_str, p)\n self._add_accumulator(self._inf_norm_acc_str, p)\n self._add_accumulator(\n name=self._beta1_pow_acc_str,\n param=p,\n dtype='float32',\n fill_value=self._beta1,\n shape=[1])\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0])\n inf_norm = self._get_accumulator(self._inf_norm_acc_str,\n param_and_grad[0])\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param_and_grad[0])\n # create the adamax optimize op\n adamax_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad),\n \"Moment\": moment,\n \"InfNorm\": inf_norm,\n \"Beta1Pow\": beta1_pow_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"MomentOut\": moment,\n \"InfNormOut\": inf_norm\n },\n attrs={\n \"beta1\": self._beta1,\n \"beta2\": self._beta2,\n \"epsilon\": self._epsilon\n },\n stop_gradient=True)\n\n return adamax_op\n\n def _finish_update(self, block, parameters_and_grads):\n \"\"\"Update Beta1 Power accumulator\n \"\"\"\n assert isinstance(block, framework.Block)\n main_block = block.program.global_block()\n for param, grad in parameters_and_grads:\n if grad is None or param.trainable is False:\n continue\n with param.block.program._optimized_guard(\n [param, grad]), name_scope('adamx'):\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param)\n main_block.append_op(\n type=\"scale\",\n inputs={\"X\": beta1_pow_acc},\n outputs={\"Out\": beta1_pow_acc},\n attrs={\"scale\": self._beta1},\n stop_gradient=True)\n\n\nclass DecayedAdagradOptimizer(Optimizer):\n \"\"\"\n **Decayed Adagrad Optimizer**\n\n The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n\n The update is done as follows:\n\n .. math::\n\n moment\\_out & = decay * moment + (1 - decay) * grad * grad\n\n param\\_out & = param - \\\\frac{learning\\_rate * grad}{\\sqrt{moment\\_out} + \\epsilon}\n\n The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n does not have an epsilon attribute. It is added here for numerical\n stability to avoid the division by zero error.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n decay (float): decay rate.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.layers as layers\n from paddle.fluid.optimizer import DecayedAdagrad\n\n x = layers.data( name='x', shape=[-1, 10], dtype='float32' )\n trans = layers.fc( x, 100 )\n cost = layers.reduce_mean( trans )\n optimizer = fluid.optimizer.DecayedAdagrad(learning_rate=0.2)\n optimizer.minimize(cost)\n\n Notes:\n Currently, DecayedAdagradOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n _moment_acc_str = \"moment\"\n\n def __init__(self,\n learning_rate,\n decay=0.95,\n epsilon=1.0e-6,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert decay is not None\n assert epsilon is not None\n\n super(DecayedAdagradOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"decayed_adagrad\"\n self._decay = decay\n self._epsilon = epsilon\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._moment_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment_acc = self._get_accumulator(self._moment_acc_str,\n param_and_grad[0])\n\n # Create the decayed adagrad optimizer op\n decayed_adagrad_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Moment\": moment_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0],\n \"MomentOut\": moment_acc},\n attrs={\"epsilon\": self._epsilon},\n stop_gradient=True)\n\n return decayed_adagrad_op\n\n\nclass AdadeltaOptimizer(Optimizer):\n \"\"\"\n **Adadelta Optimizer**\n\n Simple Adadelta optimizer with average squared grad state and\n average squared update state.\n The details of adadelta please refer to this\n `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD\n <http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_.\n\n .. math::\n\n E(g_t^2) &= \\\\rho * E(g_{t-1}^2) + (1-\\\\rho) * g^2 \\\\\\\\\n learning\\\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\\\epsilon ) / ( \\\\\n E(g_t^2) + \\\\epsilon ) ) \\\\\\\\\n E(dx_t^2) &= \\\\rho * E(dx_{t-1}^2) + (1-\\\\rho) * (-g*learning\\\\_rate)^2\n\n Args:\n learning_rate(float): global learning rate\n rho(float): rho in equation\n epsilon(float): epsilon in equation\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n optimizer = fluid.optimizer.Adadelta(\n learning_rate=0.0003, epsilon=1.0e-6, rho=0.95)\n _, params_grads = optimizer.minimize(cost)\n\n Notes:\n Currently, AdadeltaOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n\n _avg_squared_grad_acc_str = \"_avg_squared_grad\"\n _avg_squared_update_acc_str = \"_avg_squared_update\"\n\n def __init__(self,\n learning_rate,\n epsilon=1.0e-6,\n rho=0.95,\n regularization=None,\n name=None):\n if learning_rate is None:\n raise ValueError(\"learning_rate is not set.\")\n if epsilon is None:\n raise ValueError(\"epsilon is not set.\")\n if rho is None:\n raise ValueError(\"rho is not set.\")\n super(AdadeltaOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adadelta\"\n self._epsilon = epsilon\n self._rho = rho\n\n def _create_accumulators(self, block, parameters):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n for p in parameters:\n self._add_accumulator(self._avg_squared_grad_acc_str, p)\n self._add_accumulator(self._avg_squared_update_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n avg_squared_grad_acc = self._get_accumulator(\n self._avg_squared_grad_acc_str, param_and_grad[0])\n avg_squared_update_acc = self._get_accumulator(\n self._avg_squared_update_acc_str, param_and_grad[0])\n\n # Create the adadelta optimizer op\n adadelta_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"AvgSquaredGrad\": avg_squared_grad_acc,\n \"AvgSquaredUpdate\": avg_squared_update_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"AvgSquaredGradOut\": avg_squared_grad_acc,\n \"AvgSquaredUpdateOut\": avg_squared_update_acc\n },\n attrs={\"epsilon\": self._epsilon,\n \"rho\": self._rho},\n stop_gradient=True)\n\n return adadelta_op\n\n\nclass RMSPropOptimizer(Optimizer):\n \"\"\"\n Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning\n rate method. The original slides proposed RMSProp: Slide 29 of\n http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .\n\n The original equation is as follows:\n\n .. math::\n\n r(w, t) & = \\\\rho r(w, t-1) + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n w & = w - \\\\frac{\\\\eta} {\\\\sqrt{r(w,t) + \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n The first equation calculates moving average of the squared gradient for\n each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`.\n\n In some cases, adding a momentum term :math: `\\\\beta` is beneficial.\n In our implementation, Nesterov momentum is used:\n\n .. math::\n\n r(w, t) & = \\\\rho r(w, t-1) + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n v(w, t) & = \\\\beta v(w, t-1) + \\\\frac{\\\\eta} {\\\\sqrt{r(w,t) +\n \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n w & = w - v(w, t)\n\n if centered is True:\n\n .. math::\n\n r(w, t) & = \\\\rho r(w, t-1) + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n g(w, t) & = \\\\rho g(w, t-1) + (1 - \\\\rho)\\\\nabla Q_{i}(w)\n\n v(w, t) & = \\\\beta v(w, t-1) + \\\\frac{\\\\eta} {\\\\sqrt{r(w,t) - (g(w, t))^2 +\n \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n w & = w - v(w, t)\n\n where, :math:`\\\\rho` is a hyperparameter and typical values are 0.9, 0.95\n and so on. :math: `beta` is the momentum term. :math: `\\\\epsilon` is a\n smoothing term to avoid division by zero, usually set somewhere in range\n from 1e-4 to 1e-8.\n\n\n Args:\n learning_rate(float): global learning rate.\n rho(float): rho is :math: `\\\\rho` in equation, set 0.95 by default.\n epsilon(float): :math: `\\\\epsilon` in equation is smoothing term to\n avoid division by zero, set 1e-6 by default.\n momentum(float): :math:`\\\\beta` in equation is the momentum term,\n set 0.0 by default.\n centered(bool): If True, gradients are normalized by the estimated variance of\n the gradient; if False, by the uncentered second moment. Setting this to\n True may help with training, but is slightly more expensive in terms of\n computation and memory. Defaults to False.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Raises:\n ValueError: If learning_rate, rho, epsilon, momentum are None.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)\n rms_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n\n _momentum_acc_str = \"momentum\"\n _mean_square_acc_str = \"mean_square\"\n _mean_grad_acc_str = \"mean_grad\"\n\n def __init__(self,\n learning_rate,\n rho=0.95,\n epsilon=1.0e-6,\n momentum=0.0,\n centered=False,\n regularization=None,\n name=None):\n super(RMSPropOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n if learning_rate is None:\n raise ValueError(\"learning_rate is not set.\")\n if rho is None:\n raise ValueError(\"rho is not set.\")\n if epsilon is None:\n raise ValueError(\"epsilon is not set.\")\n if momentum is None:\n raise ValueError(\"momentum is not set.\")\n\n self.type = \"rmsprop\"\n self._rho = rho\n self._epsilon = epsilon\n self._momentum = momentum\n self._centered = centered\n\n def _create_accumulators(self, block, parameters):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n for p in parameters:\n self._add_accumulator(self._momentum_acc_str, p)\n self._add_accumulator(self._mean_square_acc_str, p)\n self._add_accumulator(self._mean_grad_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n momentum_acc = self._get_accumulator(self._momentum_acc_str,\n param_and_grad[0])\n mean_square_acc = self._get_accumulator(self._mean_square_acc_str,\n param_and_grad[0])\n mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,\n param_and_grad[0])\n rmsprop_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Moment\": momentum_acc,\n \"MeanSquare\": mean_square_acc,\n \"MeanGrad\": mean_grad_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad),\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"MomentOut\": momentum_acc,\n \"MeanSquareOut\": mean_square_acc,\n \"MeanGradOut\": mean_grad_acc\n },\n attrs={\n \"epsilon\": self._epsilon,\n \"decay\": self._rho,\n \"momentum\": self._momentum,\n \"centered\": self._centered\n },\n stop_gradient=True)\n\n return rmsprop_op\n\n\nclass FtrlOptimizer(Optimizer):\n \"\"\"\n FTRL (Follow The Regularized Leader) Optimizer.\n\n The paper that proposed Follow The Regularized Leader (FTRL):\n (https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)\n\n .. math::\n\n &new\\_accum = squared\\_accum + grad^2\n\n &if (lr\\_power == -0.5):\n\n &\\quad linear\\_accum += grad - \\\\frac{\\\\sqrt{new\\_accum} - \\\\sqrt{squared\\_accum}}{learning\\_rate * param}\n\n &else:\n\n &\\quad linear\\_accum += grad - \\\\frac{new\\_accum^{-lr\\_power} - accum^{-lr\\_power}}{learning\\_rate * param}\n\n\n &x = l1 * sign(linear\\_accum) - linear\\_accum\n\n &if (lr\\_power == -0.5):\n\n &\\quad y = \\\\frac{\\\\sqrt{new\\_accum}}{learning\\_rate} + (2 * l2)\n\n &\\quad pre\\_shrink = \\\\frac{x}{y}\n\n &\\quad param = (abs(linear\\_accum) > l1).select(pre\\_shrink, 0.0)\n\n &else:\n\n &\\quad y = \\\\frac{new\\_accum^{-lr\\_power}}{learning\\_rate} + (2 * l2)\n\n &\\quad pre\\_shrink = \\\\frac{x}{y}\n\n &\\quad param = (abs(linear\\_accum) > l1).select(pre\\_shrink, 0.0)\n\n &squared\\_accum += grad^2\n\n Args:\n learning_rate (float|Variable): global learning rate.\n l1 (float): L1 regularization strength.\n l2 (float): L2 regularization strength.\n lr_power (float): Learning Rate Power.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Raises:\n ValueError: If learning_rate, rho, epsilon, momentum are None.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n ftrl_optimizer = fluid.optimizer.Ftrl(learning_rate=0.1)\n ftrl_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n Notes:\n Currently, FtrlOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n\n _squared_acc_str = \"squared\"\n _linear_acc_str = \"linear\"\n\n def __init__(self,\n learning_rate,\n l1=0.0,\n l2=0.0,\n lr_power=-0.5,\n regularization=None,\n name=None):\n super(FtrlOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n if learning_rate is None:\n raise ValueError(\"learning_rate is not set.\")\n\n self.type = \"ftrl\"\n self._l1 = l1\n self._l2 = l2\n self._lr_power = lr_power\n\n def _create_accumulators(self, block, parameters):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n for p in parameters:\n self._add_accumulator(self._squared_acc_str, p)\n self._add_accumulator(self._linear_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n squared_acc = self._get_accumulator(self._squared_acc_str,\n param_and_grad[0])\n linear_acc = self._get_accumulator(self._linear_acc_str,\n param_and_grad[0])\n ftrl_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"SquaredAccumulator\": squared_acc,\n \"LinearAccumulator\": linear_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad),\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"SquaredAccumOut\": squared_acc,\n \"LinearAccumOut\": linear_acc\n },\n attrs={\"l1\": self._l1,\n \"l2\": self._l1,\n \"lr_power\": self._lr_power},\n stop_gradient=True)\n\n return ftrl_op\n\n\nclass LambOptimizer(AdamOptimizer):\n \"\"\"\n LAMB (Layer-wise Adaptive Moments optimizer for Batching training) Optimizer.\n\n LAMB Optimizer is designed to scale up the batch size of training without losing \n accuracy, which supports adaptive element-wise updating and accurate layer-wise \n correction. For more information, please refer to `Large Batch Optimization for \n Deep Learning: Training BERT in 76 minutes <https://arxiv.org/abs/1904.00962>`_ .\n\n The updating of parameters follows:\n\n .. math::\n\n m_t &= \\\\beta_1 m_{t - 1}+ (1 - \\\\beta_1)g_t \\\\\n\n v_t &= \\\\beta_2 v_{t - 1} + (1 - \\\\beta_2)g_t^2 \\\\\n\n r_t &= \\\\frac{m_t}{\\\\sqrt{v_t}+\\\\epsilon} \\\\\n\n w_t &= w_{t-1} -\\\\eta_t \\\\frac{\\\\left \\| w_{t-1}\\\\right \\|}{\\\\left \\| r_t + \\\\lambda w_{t-1}\\\\right \\|} (r_t + \\\\lambda w_{t-1})\n\n\n where :math:`m` is the 1st moment, and :math:`v` the 2nd moment, :math:`\\\\eta` the \n learning rate, :math:`\\\\lambda` the LAMB weight decay rate.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one \\\n float value as data element.\n lamb_weight_decay (float): The LAMB weight decay rate.\n beta1 (float): The exponential decay rate for the 1st moment estimates.\n beta2 (float): The exponential decay rate for the 2nd moment estimates.\n epsilon (float): A small float value for numerical stability.\n regularization (Regularizer): A Regularizer, such as\n fluid.regularizer.L1DecayRegularizer.\n exclude_from_weight_decay_fn (function): Exclude a parameter from weight \n decay when **exclude_from_weight_decay_fn(parameter)** returns true.\n name (str|None): An optional name prefix.\n\n Examples:\n .. code-block:: python\n \n import paddle.fluid as fluid \n\n data = fluid.layers.data(name='x', shape=[5], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n cost = fluid.layers.mean(hidden)\n\n def exclude_fn(param):\n return param.name.endswith('.b_0')\n\n optimizer = fluid.optimizer.Lamb(learning_rate=0.002,\n exclude_from_weight_decay_fn=exclude_fn)\n optimizer.minimize(cost)\n \"\"\"\n _moment1_acc_str = \"moment1\"\n _moment2_acc_str = \"moment2\"\n # these two not used in op temporarily\n _beta1_pow_acc_str = \"beta1_pow_acc\"\n _beta2_pow_acc_str = \"beta2_pow_acc\"\n\n def __init__(self,\n learning_rate=0.001,\n lamb_weight_decay=0.01,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-6,\n regularization=None,\n exclude_from_weight_decay_fn=None,\n name=None):\n assert learning_rate is not None\n assert lamb_weight_decay is not None\n assert beta1 is not None\n assert beta2 is not None\n assert epsilon is not None\n super(LambOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n name=name)\n self.type = \"lamb\"\n self._weight_decay = lamb_weight_decay\n self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n block.program._use_lamb = True\n\n moment1 = self._get_accumulator(self._moment1_acc_str,\n param_and_grad[0])\n moment2 = self._get_accumulator(self._moment2_acc_str,\n param_and_grad[0])\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param_and_grad[0])\n beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,\n param_and_grad[0])\n\n if self._exclude_from_weight_decay_fn is not None \\\n and self._exclude_from_weight_decay_fn(param_and_grad[0]):\n weight_decay = 0.0\n else:\n weight_decay = self._weight_decay\n\n # create the lamb optimize op\n lamb_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad),\n \"Moment1\": moment1,\n \"Moment2\": moment2,\n \"Beta1Pow\": beta1_pow_acc,\n \"Beta2Pow\": beta2_pow_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"Moment1Out\": moment1,\n \"Moment2Out\": moment2\n },\n attrs={\n \"beta1\": self._beta1,\n \"beta2\": self._beta2,\n \"epsilon\": self._epsilon,\n \"weight_decay\": weight_decay\n },\n stop_gradient=True)\n\n return lamb_op\n\n\n# We short the class name, since users will use the optimizer with the package\n# name. The sample code:\n#\n# import paddle.fluid as fluid\n#\n# sgd = fluid.optimizer.SGD(...)\n#\n# It is no need to add an `Optimizer` as the class suffix\nSGD = SGDOptimizer\nMomentum = MomentumOptimizer\nAdagrad = AdagradOptimizer\nAdam = AdamOptimizer\nAdamax = AdamaxOptimizer\nDecayedAdagrad = DecayedAdagradOptimizer\nAdadelta = AdadeltaOptimizer\nRMSProp = RMSPropOptimizer\nFtrl = FtrlOptimizer\nLarsMomentum = LarsMomentumOptimizer\nLamb = LambOptimizer\n\n\nclass ModelAverage(Optimizer):\n \"\"\"Accumulate the average of parameters within sliding window. The average\n result will be saved in temporary variables which can be applied to\n parameter variables of current model by calling 'apply()' method. And the\n 'restore()' method is used to restore the parameter values of current model.\n\n The size of average window is determined by average_window_rate,\n min_average_window, max_average_window and current update times.\n\n Args:\n average_window_rate: The rate of average window.\n min_average_window: The minimum size of average window.\n max_average_window: The maximum size of average window.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n # First create the Executor.\n place = fluid.CPUPlace() # fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(train_program, startup_program):\n # build net\n data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)\n optimizer.minimize(loss)\n\n # build ModelAverage optimizer\n model_average = fluid.optimizer.ModelAverage(0.15,\n min_average_window=10000,\n max_average_window=20000)\n\n exe.run(startup_program)\n x = numpy.random.random(size=(10, 1)).astype('float32')\n outs = exe.run(program=train_program,\n feed={'X': x},\n fetch_list=[loss.name])\n\n # apply ModelAverage\n with model_average.apply(exe):\n x = numpy.random.random(size=(10, 1)).astype('float32')\n exe.run(program=train_program,\n feed={'X': x},\n fetch_list=[loss.name])\n \"\"\"\n\n def __init__(self,\n average_window_rate,\n min_average_window=10000,\n max_average_window=10000,\n regularization=None,\n name=None):\n super(ModelAverage, self).__init__(\n 0.0, regularization=regularization, name=name)\n self.average_window = average_window_rate\n self.min_average_window = min_average_window\n self.max_average_window = max_average_window\n\n self.params_grads = []\n for param in framework.default_main_program().global_block(\n ).all_parameters():\n if param.do_model_average != False:\n grad = param.block.create_var(\n name=unique_name.generate_with_ignorable_key(\".\".join(\n [param.name, 'tmp'])),\n dtype=param.dtype,\n persistable=False,\n stop_gradient=True)\n self.params_grads.append((param, grad))\n\n for param, grad in self.params_grads:\n if grad is None:\n continue\n with param.block.program._optimized_guard(\n [param, grad]), name_scope('move_average'):\n self._append_average_accumulate_op(param)\n\n self.apply_program = Program()\n block = self.apply_program.global_block()\n with program_guard(main_program=self.apply_program):\n for param_grad in self.params_grads:\n self._add_average_apply_op(block, param_grad)\n\n self.restore_program = Program()\n block = self.restore_program.global_block()\n with program_guard(main_program=self.restore_program):\n for param_grad in self.params_grads:\n self._add_average_restore_op(block, param_grad)\n\n def _add_average_apply_op(self, block, param_grad):\n param = block._clone_variable(param_grad[0])\n grad = block._clone_variable(param_grad[1])\n sum_1 = block._clone_variable(self._get_accumulator('sum_1', param))\n sum_2 = block._clone_variable(self._get_accumulator('sum_2', param))\n sum_3 = block._clone_variable(self._get_accumulator('sum_3', param))\n num_accumulates = block._clone_variable(\n self._get_accumulator('num_accumulates', param))\n old_num_accumulates = block._clone_variable(\n self._get_accumulator('old_num_accumulates', param))\n num_updates = block._clone_variable(\n self._get_accumulator('num_updates', param))\n # backup param value to grad\n layers.assign(input=param, output=grad)\n # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)\n tmp = layers.sum(x=[num_accumulates, old_num_accumulates])\n sum = layers.sum(x=[sum_1, sum_2, sum_3])\n tmp = layers.cast(\n x=tmp, dtype='float32' if self._dtype == None else self._dtype)\n sum = layers.cast(\n x=sum, dtype='float32' if self._dtype == None else self._dtype)\n ops._elementwise_div(x=sum, y=tmp, out=param)\n\n def _add_average_restore_op(self, block, param_grad):\n param = block._clone_variable(param_grad[0])\n grad = block._clone_variable(param_grad[1])\n layers.assign(input=grad, output=param)\n\n def _append_average_accumulate_op(self, param):\n self.helper = LayerHelper(\"average_accumulate\")\n sum_1 = self._add_accumulator('sum_1', param)\n sum_2 = self._add_accumulator('sum_2', param)\n sum_3 = self._add_accumulator('sum_3', param)\n num_accumulates = self._add_accumulator(\n 'num_accumulates', param, dtype='int64', shape=[1])\n old_num_accumulates = self._add_accumulator(\n 'old_num_accumulates', param, dtype='int64', shape=[1])\n num_updates = self._add_accumulator(\n 'num_updates', param, dtype='int64', shape=[1])\n\n self.helper.append_op(\n type='average_accumulates',\n inputs={\n \"param\": param,\n \"in_sum_1\": sum_1,\n \"in_sum_2\": sum_2,\n \"in_sum_3\": sum_3,\n \"in_num_accumulates\": num_accumulates,\n \"in_old_num_accumulates\": old_num_accumulates,\n \"in_num_updates\": num_updates\n },\n outputs={\n \"out_sum_1\": sum_1,\n \"out_sum_2\": sum_2,\n \"out_sum_3\": sum_3,\n \"out_num_accumulates\": num_accumulates,\n \"out_old_num_accumulates\": old_num_accumulates,\n \"out_num_updates\": num_updates,\n },\n attrs={\n \"average_window\": self.average_window,\n \"min_average_window\": self.min_average_window,\n \"max_average_window\": self.max_average_window,\n },\n stop_gradient=True)\n\n @signature_safe_contextmanager\n def apply(self, executor, need_restore=True):\n \"\"\"Apply average values to parameters of current model.\n\n Args:\n executor(fluid.Executor): current executor.\n need_restore(bool): If you finally need to do restore, set it to True. Default is True.\n \"\"\"\n executor.run(self.apply_program)\n try:\n yield\n finally:\n if need_restore:\n self.restore(executor)\n\n def restore(self, executor):\n \"\"\"Restore parameter values of current model.\n \n Args:\n executor(fluid.Executor): current executor.\n \"\"\"\n executor.run(self.restore_program)\n\n\nclass ExponentialMovingAverage(object):\n \"\"\"\n Compute the moving average of parameters with exponential decay.\n Given a parameter :math:`\\\\theta`, its exponential moving average (EMA)\n will be\n\n .. math::\n\n \\\\text{EMA}_0 & = 0\n\n\t\\\\text{EMA}_t & = \\\\text{decay} * \\\\text{EMA}_{t-1} + (1 - \\\\text{decay}) * \\\\theta_t\n\n The average results calculated by **update()** method will be saved in \n temporary variables which are created and maintained by the object, and can \n be applied to parameters of current model by calling **apply()** method. And \n the **restore()** method is used to restore the parameters.\n\n **Bias correction**. All EMAs are initialized to :math:`0` and hence they will be \n zero biased, which can be corrected by divided by a factor \n :math:`(1 - \\\\text{decay}^t)` , i.e., the actual EMAs applied to parameters \n when calling **apply()** method would be \n\n .. math::\n \n \\\\widehat{\\\\text{EMA}}_t = \\\\frac{\\\\text{EMA}_t}{1 - \\\\text{decay}^t}\n\n **Decay rate scheduling**. A large decay rate very close to 1 would result \n in that the averages move very slowly. And a better strategy is to set a \n relative smaller decay rate in the very beginning. The argument **thres_steps**\n allows users to pass a Variable to schedule the decay rate, in this case, \n the actual decay rate becomes\n \n .. math::\n \n \\\\min(\\\\text{decay}, \\\\frac{1 + \\\\text{thres_steps}}{10 + \\\\text{thres_steps}})\n\n Usually **thres_steps** can be the global training steps.\n\n\n Args:\n\tdecay (float): The exponential decay rate, usually close to 1, such as \n 0.999, 0.9999, ... .\n thres_steps (Variable|None): If not `None`, schedule the decay rate.\n\tname (str|None): An optional name prefix.\n\n\n Examples:\n\n\t.. code-block:: python\n\n\t import numpy\n\t import paddle\n\t import paddle.fluid as fluid\n\n\t data = fluid.layers.data(name='x', shape=[5], dtype='float32')\n\t hidden = fluid.layers.fc(input=data, size=10)\n\t cost = fluid.layers.mean(hidden)\n\n\t test_program = fluid.default_main_program().clone(for_test=True)\n\n\t optimizer = fluid.optimizer.Adam(learning_rate=0.001)\n\t optimizer.minimize(cost)\n\n\t global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter()\n\t ema = fluid.optimizer.ExponentialMovingAverage(0.999, thres_steps=global_steps)\n\t ema.update()\n\n\t place = fluid.CPUPlace()\n\t exe = fluid.Executor(place)\n\t exe.run(fluid.default_startup_program())\n\n\t for pass_id in range(3):\n\t\tfor batch_id in range(6):\n\t\t data = numpy.random.random(size=(10, 5)).astype('float32')\n\t\t exe.run(program=fluid.default_main_program(),\n\t\t\tfeed={'x': data}, \n\t\t\tfetch_list=[cost.name])\n\n\t\t# usage 1\n\t\twith ema.apply(exe):\n\t\t data = numpy.random.random(size=(10, 5)).astype('float32')\n\t\t exe.run(program=test_program,\n\t\t\t feed={'x': data}, \n\t\t\t fetch_list=[hidden.name])\n\t\t\t \n\n\t\t # usage 2\n\t\twith ema.apply(exe, need_restore=False):\n\t\t data = numpy.random.random(size=(10, 5)).astype('float32')\n\t\t exe.run(program=test_program,\n\t\t\t feed={'x': data}, \n\t\t\t fetch_list=[hidden.name])\n\t\tema.restore(exe)\n \"\"\"\n\n def __init__(self, decay=0.999, thres_steps=None, name=None):\n self._decay = decay\n self._thres_steps = thres_steps\n self._name = name if name is not None else ''\n self._decay_var = self._get_ema_decay()\n\n self._params_tmps = []\n for param in default_main_program().global_block().all_parameters():\n if param.do_model_average != False:\n tmp = param.block.create_var(\n name=unique_name.generate(\".\".join(\n [self._name + param.name, 'ema_tmp'])),\n dtype=param.dtype,\n persistable=False,\n stop_gradient=True)\n self._params_tmps.append((param, tmp))\n\n self._ema_vars = {}\n for param, tmp in self._params_tmps:\n with param.block.program._optimized_guard(\n [param, tmp]), name_scope('moving_average'):\n self._ema_vars[param.name] = self._create_ema_vars(param)\n\n self.apply_program = Program()\n block = self.apply_program.global_block()\n with program_guard(main_program=self.apply_program):\n decay_pow = self._get_decay_pow(block)\n for param, tmp in self._params_tmps:\n param = block._clone_variable(param)\n tmp = block._clone_variable(tmp)\n ema = block._clone_variable(self._ema_vars[param.name])\n layers.assign(input=param, output=tmp)\n # bias correction\n ema = ema / (1.0 - decay_pow)\n layers.assign(input=ema, output=param)\n\n self.restore_program = Program()\n block = self.restore_program.global_block()\n with program_guard(main_program=self.restore_program):\n for param, tmp in self._params_tmps:\n tmp = block._clone_variable(tmp)\n param = block._clone_variable(param)\n layers.assign(input=tmp, output=param)\n\n def _get_ema_decay(self):\n with default_main_program()._lr_schedule_guard():\n decay_var = layers.tensor.create_global_var(\n shape=[1],\n value=self._decay,\n dtype='float32',\n persistable=True,\n name=\"scheduled_ema_decay_rate\")\n\n if self._thres_steps is not None:\n decay_t = (self._thres_steps + 1.0) / (self._thres_steps + 10.0)\n with layers.control_flow.Switch() as switch:\n with switch.case(decay_t < self._decay):\n layers.tensor.assign(decay_t, decay_var)\n with switch.default():\n layers.tensor.assign(\n np.array(\n [self._decay], dtype=np.float32),\n decay_var)\n return decay_var\n\n def _get_decay_pow(self, block):\n global_steps = layers.learning_rate_scheduler._decay_step_counter()\n decay_var = block._clone_variable(self._decay_var)\n decay_pow_acc = layers.elementwise_pow(decay_var, global_steps + 1)\n return decay_pow_acc\n\n def _create_ema_vars(self, param):\n param_ema = layers.create_global_var(\n name=unique_name.generate(self._name + param.name + '_ema'),\n shape=param.shape,\n value=0.0,\n dtype=param.dtype,\n persistable=True)\n\n return param_ema\n\n def update(self):\n \"\"\" \n Update Exponential Moving Average. Should only call this method in \n train program.\n \"\"\"\n param_master_emas = []\n for param, tmp in self._params_tmps:\n with param.block.program._optimized_guard(\n [param, tmp]), name_scope('moving_average'):\n param_ema = self._ema_vars[param.name]\n if param.name + '.master' in self._ema_vars:\n master_ema = self._ema_vars[param.name + '.master']\n param_master_emas.append([param_ema, master_ema])\n else:\n ema_t = param_ema * self._decay_var + param * (\n 1 - self._decay_var)\n layers.assign(input=ema_t, output=param_ema)\n\n # for fp16 params\n for param_ema, master_ema in param_master_emas:\n default_main_program().global_block().append_op(\n type=\"cast\",\n inputs={\"X\": master_ema},\n outputs={\"Out\": param_ema},\n attrs={\n \"in_dtype\": master_ema.dtype,\n \"out_dtype\": param_ema.dtype\n })\n\n @signature_safe_contextmanager\n def apply(self, executor, need_restore=True):\n \"\"\"\n Apply moving average to parameters for evaluation.\n \n Args:\n executor (Executor): The Executor to execute applying.\n need_restore (bool): Whether to restore parameters after applying.\n \"\"\"\n executor.run(self.apply_program)\n try:\n yield\n finally:\n if need_restore:\n self.restore(executor)\n\n def restore(self, executor):\n \"\"\"Restore parameters.\n \n Args:\n executor (Executor): The Executor to execute restoring.\n \"\"\"\n executor.run(self.restore_program)\n\n\nclass PipelineOptimizer(object):\n \"\"\"\n Pipeline Optimizer\n\n Train with pipeline mode. The program will be splited by cut_list. \n\n If the len of cut_list is k, then the whole program (including \\\n backward part) will be splited to 2*k-1 sections. \n \n So the length of place_list and concurrency_list must be also 2*k-1.\n\n Note: Though the asynchronous mode is applied in pipeline training to speed up, \\\n the final performance depends on the training progress of each pipeline heavily.\n\n And we will try the synchronous mode in the future.\n\n Args:\n optimizer (Optimizer): The based optimizer, such as SGD.\n cut_list (list of Variable list): The cut variable of the main_program.\n place_list (list of Place): The place where the section will run on.\n concurrency_list (list of int): The concurrency degree.\n queue_size (int): Each section will consume scopes from its in-scope queue \n and produce scopes to out-scope queue. And this parameter \n specify the scope queue size. [Optional. Default: 30].\n sync_steps (int): The synchronization steps between different cards. [Optional. Default: 1].\n start_cpu_core_id (int): specify the first cpu core id. [Optional. Default:0].\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.layers as layers\n\n x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0)\n y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0)\n emb_x = layers.embedding(input=x, param_attr=fluid.ParamAttr(name=\"embx\"), size=[10,2], is_sparse=False)\n emb_y = layers.embedding(input=y, param_attr=fluid.ParamAttr(name=\"emby\",learning_rate=0.9), size=[10,2], is_sparse=False)\n concat = layers.concat([emb_x, emb_y], axis=1)\n fc = layers.fc(input=concat, name=\"fc\", size=1, num_flatten_dims=1, bias_attr=False)\n loss = layers.reduce_mean(fc)\n optimizer = fluid.optimizer.SGD(learning_rate=0.5)\n optimizer = fluid.optimizer.PipelineOptimizer(optimizer,\n cut_list=[[emb_x, emb_y], [loss]],\n place_list=[fluid.CPUPlace(), fluid.CUDAPlace(0), fluid.CPUPlace()],\n concurrency_list=[1, 1, 4],\n queue_size=2,\n sync_steps=1,\n )\n optimizer.minimize(loss)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n filelist = [] # you should set your own filelist, e.g. filelist = [\"dataA.txt\"]\n dataset = fluid.DatasetFactory().create_dataset(\"FileInstantDataset\")\n dataset.set_use_var([x,y])\n dataset.set_batch_size(batch_size)\n dataset.set_filelist(filelist)\n exe.train_from_dataset(\n fluid.default_main_program(),\n dataset,\n thread=2,\n debug=False,\n fetch_list=[],\n fetch_info=[],\n print_period=1)\n \"\"\"\n\n def __init__(self,\n optimizer,\n cut_list=None,\n place_list=None,\n concurrency_list=None,\n queue_size=30,\n sync_steps=1,\n start_cpu_core_id=0):\n # TODO: check properties\n self._optimizer = optimizer\n self._cut_list = cut_list\n self._place_list = place_list\n self._concurrency_list = concurrency_list\n self._queue_size = queue_size\n self._sync_steps = sync_steps\n self._start_cpu_core_id = start_cpu_core_id\n\n def _create_vars(self, block, main_program):\n used_var_set = set()\n for op_idx in range(block.desc.op_size()):\n op_desc = block.desc.op(op_idx)\n vars = op_desc.input_arg_names() + op_desc.output_arg_names()\n for var in vars:\n if var in used_var_set:\n continue\n used_var_set.add(var)\n source_var = main_program.block(0).var(str(var))\n block._clone_variable(source_var, False)\n\n def _extract_section_opt_ops(self, ops, cut_point_name):\n \"\"\"\n Extract opt ops in the given section\n \"\"\"\n output_names = set(cut_point_name)\n relevant_op_flags = [True] * len(ops)\n for i, op in reversed(list(enumerate(ops))):\n if _some_in_set_(op.desc.output_arg_names(), output_names):\n for name in op.desc.input_arg_names():\n output_names.add(name)\n else:\n relevant_op_flags[i] = False\n\n op_path = [ops[i] for i in range(len(ops)) if relevant_op_flags[i]]\n return op_path\n\n def _find_input_output(self, ops, name, is_forward=True):\n \"\"\"\n Find the inputs or outputs of a section\n \"\"\"\n all_set = set()\n part_set = set()\n for op in ops:\n if is_forward:\n part_set.update(op.desc.output_arg_names())\n else:\n part_set.update(op.desc.input_arg_names())\n all_set.update(op.desc.output_arg_names())\n all_set.update(op.desc.input_arg_names())\n return all_set - part_set\n\n def _find_persistable_vars(self, ops, whole_parameters):\n \"\"\"\n find the persistable input vars in current section\n \"\"\"\n res = set()\n for op in ops:\n vars = op.desc.input_arg_names()\n for var in vars:\n if var in whole_parameters:\n res.add(var)\n return res\n\n def _is_opt_role_op(self, op):\n op_maker = core.op_proto_and_checker_maker\n optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize\n if op_maker.kOpRoleAttrName() in op.attr_names and \\\n int(op.all_attrs()[op_maker.kOpRoleAttrName()]) & int(optimize_role) != 0:\n return True\n return False\n\n def _is_lr_role_op(self, op):\n op_maker = core.op_proto_and_checker_maker\n optimize_role = core.op_proto_and_checker_maker.OpRole.LRSched\n if op_maker.kOpRoleAttrName() in op.attr_names and \\\n int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):\n return True\n return False\n\n def _extract_section_ops(self, ops, cut_point_name):\n \"\"\"\n Extract ops in the given section \n \"\"\"\n output_names = set(cut_point_name)\n relevant_op_flags = [True] * len(ops)\n for i, op in reversed(list(enumerate(ops))):\n if not self._is_opt_role_op(op) and _some_in_set_(\n op.desc.output_arg_names(), output_names):\n for name in op.desc.input_arg_names():\n output_names.add(name)\n elif op.desc.type() == \"print\" and op.desc.input_arg_names()[\n 0] in output_names:\n continue\n else:\n relevant_op_flags[i] = False\n\n op_path = [ops[i] for i in range(len(ops)) if relevant_op_flags[i]]\n return op_path\n\n def _find_section_opt(self, ops, params):\n res = self._extract_section_opt_ops(ops, params)\n return res\n\n def _split_program(self, main_program, cut_list):\n programs = []\n block = main_program.block(0)\n whole_parameters = [e.name for e in block.all_parameters()]\n cut_var_names = []\n cut_len = len(cut_list)\n sec_params = []\n for i, cut_vars in enumerate(cut_list[:-1]):\n cut_var_names.append([cut_var.name for cut_var in cut_vars])\n for i, cut_vars in reversed(list(enumerate(cut_list[:-1]))):\n cut_var_names.append(\n [_append_grad_suffix_(cut_var.name) for cut_var in cut_vars])\n if i == 0:\n cut_var_names[-1] += [var.name for var in cut_list[-1]]\n ops = block.ops[:]\n for i, cut_vars in enumerate(cut_var_names):\n program = {\n \"program\": Program(),\n \"input_set\": set(),\n \"output_set\": set()\n }\n cur_ops = self._extract_section_ops(ops, cut_vars)\n if i == 0:\n for op in ops:\n if self._is_lr_role_op(op):\n cur_ops.append(op)\n #prevent inplace in/out\n program[\"input_set\"].update(\n self._find_input_output(\n cur_ops, [], is_forward=True))\n for e in cur_ops:\n ops.remove(e)\n\n if i < cut_len:\n sec_params.append(\n self._find_persistable_vars(cur_ops, whole_parameters))\n if i >= cut_len - 1:\n opt_ops = self._find_section_opt(\n ops, sec_params[2 * cut_len - 2 - i])\n\n for e in opt_ops:\n ops.remove(e)\n cur_ops += opt_ops\n\n op_descs = [op.desc for op in cur_ops]\n for op_desc in op_descs:\n ap_op = program[\"program\"].block(0).desc.append_op()\n ap_op.copy_from(op_desc)\n program[\"input_set\"].update(\n self._find_input_output(\n cur_ops, cut_vars, is_forward=True))\n program[\"input_set\"].update(sec_params[min(i, 2 * cut_len - 2 - i)])\n program[\"output_set\"].update(\n self._find_input_output(\n cur_ops, cut_vars, is_forward=False))\n programs.append(program)\n program = {\n \"program\": Program(),\n \"input_set\": set(),\n \"output_set\": set()\n }\n op_descs = [op.desc for op in ops]\n for op_desc in op_descs:\n ap_op = program[\"program\"].block(0).desc.append_op()\n ap_op.copy_from(op_desc)\n program[\"input_set\"].update(\n [cut_var.name + \"@GRAD\" for cut_var in cut_list[0]])\n program[\"input_set\"].update(\n self._find_input_output(\n ops, [], is_forward=True))\n program[\"input_set\"].update(sec_params[0])\n programs.append(program)\n inputs = set()\n for program in reversed(list(programs)):\n output_list = list(program[\"output_set\"])\n for output in output_list:\n if output not in inputs:\n program[\"output_set\"].remove(output)\n inputs.update(program[\"input_set\"])\n return programs\n\n def minimize(self,\n loss,\n startup_program=None,\n parameter_list=None,\n no_grad_set=None):\n self._optimizer.minimize(loss, startup_program, parameter_list,\n no_grad_set)\n program = loss.block.program\n program_list = self._split_program(program, self._cut_list)\n for p in program_list:\n self._create_vars(p[\"program\"].block(0), program)\n whole_parameters = [e.name for e in program.block(0).all_parameters()]\n param_need_sync = []\n for i, section_p in enumerate(program_list):\n if not isinstance(self._place_list[i], core.CUDAPlace):\n continue\n section_var = [e for e in section_p[\"program\"].block(0).vars]\n for p in section_var:\n if p in whole_parameters:\n param_need_sync.append(p)\n program._pipeline_opt = {\n \"trainer\": \"PipelineTrainer\",\n \"device_worker\": \"Section\",\n \"section_program_list\": program_list,\n \"place_list\": self._place_list,\n \"concurrency_list\": self._concurrency_list,\n \"queue_size\": self._queue_size,\n \"start_cpu_core_id\": self._start_cpu_core_id,\n \"sync_steps\": self._sync_steps,\n \"param_need_sync\": param_need_sync\n }\n\n\nclass LookaheadOptimizer(object):\n \"\"\"\n This implements the Lookahead optimizer of the\n paper : https://arxiv.org/abs/1907.08610.\n\n Lookahead keeps two sets of params: the fast_params and\n the slow_params. inner_optimizer update fast_params every \n training step. Lookahead updates the slow_params and fast_params \n every k training steps as follows:\n\n .. math::\n \n slow\\_param_t &= slow\\_param_{t-1} + \\\\alpha * (fast\\_param_{t-1} - slow\\_param_{t-1})\n\t\n\tfast\\_param_t &= slow\\_param_t\n\n Args:\n inner_optimizer (Optimizer): The optimizer that update fast params step by step. \n alpha (float): The learning rate of Lookahead.\n k (int): The slow params is updated every k steps.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n\t x = fluid.layers.data(name='x', shape=[2], dtype='float32')\n\t label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\t y = fluid.layers.fc(input=[x], size=2, act=\"softmax\")\n\t loss = fluid.layers.cross_entropy(input=y, label=label)\n\t loss = fluid.layers.mean(x=loss)\n\t sgd = fluid.optimizer.SGD(learning_rate=0.01)\n\t optimizer = fluid.optimizer.LookaheadOptimizer(sgd,\n alpha=0.5,\n k=5)\n\t optimizer.minimize(loss)\n\t main_program = fluid.default_main_program()\n\t place = fluid.CPUPlace()\n\t exe = fluid.Executor(place)\n\t exe.run(fluid.default_startup_program())\n\n\t feeder = fluid.DataFeeder(feed_list=[x, label], place=place)\n\n\t step = 0\n while(step < 10):\n step += 1\n\t\texe.run(fluid.default_main_program(),\n \tfeed=feeder.feed(batch_data))\n\n \"\"\"\n\n def __init__(self, inner_optimizer, alpha=0.5, k=5):\n\n assert (inner_optimizer is not None), \"inner optimizer can not be None\"\n assert (\n 0.0 <= alpha <= 1.0\n ), \"alpha should be larger or equal to 0.0, and less or equal than 1.0\"\n assert (isinstance(k, int) and k > 0), \"k should be a positive integer\"\n\n self.inner_optimizer = inner_optimizer\n self.alpha = alpha\n self.k = k\n self.type = \"lookahead\"\n\n def minimize(self, loss, startup_program=None):\n\n # Apply inner optimizer to the main_program\n mini_out = self.inner_optimizer.minimize(\n loss, startup_program=startup_program)\n\n # Get startup_program and main_program\n if startup_program is None:\n startup_program = default_startup_program()\n main_block = loss.block\n\n # add some vars to the main_program\n params = [param.name for param in main_block.all_parameters()]\n param_to_slow = {}\n for param in params:\n fast_var = main_block.var(param)\n assert (fast_var is not None)\n slow_var = main_block.create_var(\n name=param + \"@SLOW\",\n shape=fast_var.shape,\n dtype=fast_var.dtype,\n persistable=True)\n param_to_slow[param] = slow_var\n\n # add some vars to the startup_program\n startup_block = startup_program.global_block()\n for param in params:\n fast_var = startup_block.var(param)\n assert (fast_var is not None)\n slow_var = startup_block.create_var(\n name=param + \"@SLOW\",\n shape=fast_var.shape,\n dtype=fast_var.dtype,\n persistable=True)\n\n startup_block.append_op(\n type=\"assign\",\n inputs={\"X\": fast_var},\n outputs={\"Out\": slow_var})\n\n # Add Var k to main prog and startup prog\n k = layers.create_global_var(\n name=\"lookahead_k\",\n shape=[1],\n value=int(self.k),\n dtype='int32',\n persistable=True)\n\n # Add Var alpha to main prog and startup prog\n alpha = layers.create_global_var(\n name=\"lookahead_alpha\",\n shape=[1],\n value=float(self.alpha),\n dtype='float32',\n persistable=True)\n\n # Add Var step\n step = layers.create_global_var(\n name=\"lookahead_step\",\n shape=[1],\n value=int(0),\n dtype='int32',\n persistable=True)\n layers.increment(x=step, value=1.0, in_place=True)\n\n # lookahead\n zero_var = layers.fill_constant(shape=[1], dtype='float32', value=0.0)\n\n one_var = layers.fill_constant(shape=[1], dtype='float32', value=1.0)\n\n mod = layers.elementwise_mod(step, k)\n with layers.control_flow.Switch() as switch:\n with switch.case(mod == zero_var):\n for param_name in params:\n fast_var = main_block.var(param_name)\n slow_var = param_to_slow[param_name]\n tmp_var = layers.elementwise_add(\n layers.elementwise_mul(fast_var, alpha),\n layers.elementwise_mul(\n slow_var, layers.elementwise_sub(one_var, alpha)))\n layers.assign(input=tmp_var, output=slow_var)\n layers.assign(input=tmp_var, output=fast_var)\n with switch.default():\n pass\n return mini_out\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom six.moves import reduce\n\nfrom .. import core\nfrom ..layers import utils\nfrom . import layers\nfrom ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter\nfrom ..param_attr import ParamAttr\nfrom ..initializer import Normal, Constant, NumpyArrayInitializer\nimport numpy as np\nimport logging\n\n__all__ = [\n 'Conv2D', 'Conv3D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding', 'GRUUnit',\n 'LayerNorm', 'NCE', 'PRelu', 'BilinearTensorProduct', 'Conv2DTranspose',\n 'Conv3DTranspose', 'GroupNorm', 'SpectralNorm', 'TreeConv'\n]\n\n\nclass Conv2D(layers.Layer):\n \"\"\"\n The convolution2D layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input and\n Output are in NCHW format, where N is batch size, C is the number of\n channels, H is the height of the feature, and W is the width of the feature.\n Filter is in MCHW format, where M is the number of output image channels,\n C is the number of input image channels, H is the height of the filter,\n and W is the width of the filter. If the groups is greater than 1,\n C will equal the number of input image channels divided by the groups.\n Please refer to UFLDL's `convolution\n <http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`\n for more detials.\n If bias attribution and activation type are provided, bias is added to the\n output of the convolution, and the corresponding activation function is\n applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a tensor with NCHW format.\n * :math:`W`: Filter value, a tensor with MCHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H_{out}&= \\\\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\\\\\\n W_{out}&= \\\\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1\n\n Args:\n name_scope(str) : The name for this class.\n num_filters(int): The number of filter. It is as same as the output\n image channel.\n filter_size (int|tuple|None): The filter size. If filter_size is a tuple,\n it must contain two integers, (filter_size_H, filter_size_W).\n Otherwise, the filter will be a square.\n stride (int|tuple): The stride size. If stride is a tuple, it must\n contain two integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. Default: stride = 1.\n padding (int|tuple): The padding size. If padding is a tuple, it must\n contain two integers, (padding_H, padding_W). Otherwise, the\n padding_H = padding_W = padding. Default: padding = 0.\n dilation (int|tuple): The dilation size. If dilation is a tuple, it must\n contain two integers, (dilation_H, dilation_W). Otherwise, the\n dilation_H = dilation_W = dilation. Default: dilation = 1.\n groups (int): The groups number of the Conv2d Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. Default: groups=1.\n param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights\n of conv2d. If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with :math:`Normal(0.0, std)`,\n and the :math:`std` is :math:`(\\\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. Default: None.\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. Default: True\n act (str): Activation type, if it is set to None, activation is not appended.\n Default: None\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n .. code-block:: python\n\n from paddle.fluid.dygraph.base import to_variable\n import paddle.fluid as fluid\n from paddle.fluid.dygraph import Conv2D\n import numpy as np\n\n data = np.random.uniform( -1, 1, [10, 3, 32, 32] ).astype('float32')\n with fluid.dygraph.guard():\n conv2d = Conv2D( \"conv2d\", 2, 3)\n data = to_variable( data )\n conv = conv2d( data )\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_filters,\n filter_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None,\n dtype='float32'):\n assert param_attr is not False, \"param_attr should not be False here.\"\n super(Conv2D, self).__init__(name_scope, dtype)\n self._groups = groups\n self._stride = utils.convert_to_list(stride, 2, 'stride')\n self._padding = utils.convert_to_list(padding, 2, 'padding')\n self._dilation = utils.convert_to_list(dilation, 2, 'dilation')\n self._act = act\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n self._use_cudnn = use_cudnn\n self._filter_size = filter_size\n self._num_filters = num_filters\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._dtype = dtype\n # if (self._num_channels == self._groups and\n # num_filters % self._num_channels == 0 and not self._use_cudnn):\n # self._l_type = 'depthwise_conv2d'\n # else:\n # TODO(jiabin): recover the usage of depthwise_conv2d when it's\n # kernel fixed https://github.com/PaddlePaddle/Paddle/issues/17275\n self._l_type = 'conv2d'\n\n def _build_once(self, input):\n self._num_channels = input.shape[1]\n if self._groups is None:\n num_filter_channels = self._num_channels\n else:\n if self._num_channels % self._groups != 0:\n raise ValueError(\"num_channels must be divisible by groups.\")\n num_filter_channels = self._num_channels // self._groups\n filter_size = utils.convert_to_list(self._filter_size, 2, 'filter_size')\n filter_shape = [self._num_filters, int(num_filter_channels)\n ] + filter_size\n\n def _get_default_param_initializer():\n filter_elem_num = filter_size[0] * filter_size[\n 1] * self._num_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std, 0)\n\n self._filter_param = self.create_parameter(\n attr=self._param_attr,\n shape=filter_shape,\n dtype=self._dtype,\n default_initializer=_get_default_param_initializer())\n\n self._bias_param = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type=self._l_type,\n inputs={\n 'Input': input,\n 'Filter': self._filter_param,\n },\n outputs={\"Output\": pre_bias},\n attrs={\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups if self._groups else 1,\n 'use_cudnn': self._use_cudnn,\n 'use_mkldnn': False,\n })\n\n if self._bias_param is not None:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self._bias_param]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n # Currently, we don't support inplace in dygraph mode\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass Conv3D(layers.Layer):\n \"\"\"\n **Convlution3D Layer**\n\n The convolution3D layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input(Input) and\n Output(Output) are in NCDHW format. Where N is batch size C is the number of\n channels, D is the depth of the feature, H is the height of the feature,\n and W is the width of the feature. Convlution3D is similar with Convlution2D\n but adds one dimension(depth). If bias attribution and activation type are\n provided, bias is added to the output of the convolution, and the\n corresponding activation function is applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW format.\n * :math:`W`: Filter value, a tensor with MCDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`\n\n - Output:\n Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D_{out}&= \\\\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\\\\\\n H_{out}&= \\\\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\\\\\\n W_{out}&= \\\\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1\n\n Args:\n name_scope(str) : The name for this class.\n num_filters(int): The number of filter. It is as same as the output image channel.\n filter_size (int|tuple|None): The filter size. If filter_size is a tuple,\n it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).\n Otherwise, the filter will be a square.\n stride (int|tuple): The stride size. If stride is a tuple, it must\n contain three integers, (stride_D, stride_H, stride_W). Otherwise, the\n stride_D = stride_H = stride_W = stride. Default: stride = 1.\n padding (int|tuple): The padding size. If padding is a tuple, it must\n contain three integers, (padding_D, padding_H, padding_W). Otherwise, the\n padding_D = padding_H = padding_W = padding. Default: padding = 0.\n dilation (int|tuple): The dilation size. If dilation is a tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1.\n groups (int): The groups number of the Conv3d Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. Default: groups=1\n param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights\n of conv3d. If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. Default: None.\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. Default: True\n act (str): Activation type, if it is set to None, activation is not appended.\n Default: None.\n\n Returns:\n Variable: The tensor variable storing the convolution and \\\n non-linearity activation result.\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')\n conv3d = fluid.dygraph.nn.Conv3D(\n 'Conv3D', num_filters=2, filter_size=3, act=\"relu\")\n ret = conv3d(fluid.dygraph.base.to_variable(data))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_filters,\n filter_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None):\n assert param_attr is not False, \"param_attr should not be False here.\"\n super(Conv3D, self).__init__(name_scope)\n self._groups = groups\n self._stride = utils.convert_to_list(stride, 3, 'stride')\n self._padding = utils.convert_to_list(padding, 3, 'padding')\n self._dilation = utils.convert_to_list(dilation, 3, 'dilation')\n self._act = act\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n self._use_cudnn = use_cudnn\n self._filter_size = filter_size\n self._num_filters = num_filters\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n\n def _build_once(self, input):\n num_channels = input.shape[1]\n self._dtype = self._helper.input_dtype(input)\n\n if self._groups is None:\n num_filter_channels = num_channels\n else:\n if num_channels % self._groups != 0:\n raise ValueError(\"num_channels must be divisible by groups.\")\n num_filter_channels = num_channels // self._groups\n\n filter_size = utils.convert_to_list(self._filter_size, 3, 'filter_size')\n\n filter_shape = [self._num_filters, num_filter_channels] + filter_size\n\n def _get_default_param_initializer():\n filter_elem_num = filter_size[0] * filter_size[1] * filter_size[\n 2] * num_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std, 0)\n\n self._filter_param = self.create_parameter(\n attr=self._param_attr,\n shape=filter_shape,\n dtype=self._dtype,\n default_initializer=_get_default_param_initializer())\n\n self._bias_param = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type='conv3d',\n inputs={\n 'Input': input,\n 'Filter': self._filter_param,\n },\n outputs={\"Output\": pre_bias},\n attrs={\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups if self._groups else 1,\n 'use_cudnn': self._use_cudnn,\n 'use_mkldnn': False\n })\n\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self._bias_param]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass Conv3DTranspose(layers.Layer):\n \"\"\"\n **Convlution3D transpose layer**\n\n The convolution3D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input(Input) and output(Output)\n are in NCDHW format. Where N is batch size, C is the number of channels,\n D is the depth of the feature, H is the height of the feature, and W\n is the width of the feature. Parameters(dilations, strides, paddings) are\n two elements. These two elements represent height and width, respectively.\n The details of convolution transpose layer, please refer to the following\n explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW format.\n * :math:`W`: Filter value, a tensor with MCDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\\\\\\n H_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\\\\\\n W_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1\n\n Args:\n name_scope(str) : The name for this class.\n num_filters(int): The number of the filter. It is as same as the output\n image channel.\n output_size(int|tuple|None): The output image size. If output size is a\n tuple, it must contain three integers, (image_D, image_H, image_W). This\n parameter only works when filter_size is None.\n filter_size(int|tuple|None): The filter size. If filter_size is a tuple,\n it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).\n Otherwise, the filter will be a square. None if use output size to\n calculate filter_size.\n padding(int|tuple): The padding size. If padding is a tuple, it must\n contain three integers, (padding_D, padding_H, padding_W). Otherwise, the\n padding_D = padding_H = padding_W = padding. Default: padding = 0.\n stride(int|tuple): The stride size. If stride is a tuple, it must\n contain three integers, (stride_D, stride_H, stride_W). Otherwise, the\n stride_D = stride_H = stride_W = stride. Default: stride = 1.\n dilation(int|tuple): The dilation size. If dilation is a tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1.\n groups(int): The groups number of the Conv3d transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n Default: groups=1\n param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights\n of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. Default: True\n act (str): Activation type, if it is set to None, activation is not appended.\n Default: None.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Variable: The tensor variable storing the convolution transpose result.\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')\n\n conv3dTranspose = fluid.dygraph.nn.Conv3DTranspose(\n 'Conv3DTranspose',\n num_filters=12,\n filter_size=12,\n use_cudnn=False)\n ret = conv3dTranspose(fluid.dygraph.base.to_variable(data))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_filters,\n output_size=None,\n filter_size=None,\n padding=0,\n stride=1,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None,\n name=None):\n super(Conv3DTranspose, self).__init__(name_scope)\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n assert param_attr is not False, \"param_attr should not be False in conv3d_transpose.\"\n self._padding = utils.convert_to_list(padding, 3, 'padding')\n self._stride = utils.convert_to_list(stride, 3, 'stride')\n self._dilation = utils.convert_to_list(dilation, 3, 'dilation')\n self._param_attr = param_attr\n self._filter_size = filter_size\n self._output_size = output_size\n self._groups = 1 if groups is None else groups\n self._num_filters = num_filters\n self._use_cudnn = use_cudnn\n self._bias_attr = bias_attr\n self._act = act\n\n def _build_once(self, input):\n self._dtype = self._helper.input_dtype(input)\n self._input_channel = input.shape[1]\n\n if self._filter_size is None:\n if self._output_size is None:\n raise ValueError(\n \"output_size must be set when filter_size is None\")\n if isinstance(self._output_size, int):\n self._output_size = [self._output_size, self._output_size]\n\n d_in = input.shape[2]\n h_in = input.shape[3]\n w_in = input.shape[4]\n\n filter_size_d = (self._output_size[0] -\n (d_in - 1) * self._stride[0] + 2 * self._padding[0]\n - 1) // self._dilation[0] + 1\n filter_size_h = (self._output_size[1] -\n (h_in - 1) * self._stride[1] + 2 * self._padding[1]\n - 1) // self._dilation[1] + 1\n filter_size_w = (self._output_size[2] -\n (w_in - 1) * self._stride[2] + 2 * self._padding[2]\n - 1) // self._dilation[2] + 1\n self._filter_size = [filter_size_d, filter_size_h, filter_size_w]\n else:\n self._filter_size = utils.convert_to_list(\n self._filter_size, 3, 'conv3d_transpose.filter_size')\n\n filter_shape = [\n self._input_channel, self._num_filters // self._groups\n ] + self._filter_size\n self._img_filter = self.create_parameter(\n dtype=self._dtype, shape=filter_shape, attr=self._param_attr)\n if self._bias_attr:\n self._bias_param = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type=\"conv3d_transpose\",\n inputs={'Input': [input],\n 'Filter': [self._img_filter]},\n outputs={'Output': pre_bias},\n attrs={\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups if self._groups else 1,\n 'use_cudnn': self._use_cudnn\n })\n\n if self._bias_attr:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self._bias_param]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n # Currently, we don't support inplace in imperative mode\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass Pool2D(layers.Layer):\n \"\"\"\n The pooling2d operation calculates the output based on the input, pooling_type and ksize, strides,\n paddings parameters.Input(X) and output(Out) are in NCHW format, where N is batch size, C is the number of channels,\n H is the height of the feature, and W is the width of the feature.\n Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.\n The input(X) size and output(Out) size may be different.\n\n Args:\n name_scope(str) : The name of this class.\n pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,\n it must contain two integers, (pool_size_Height, pool_size_Width).\n Otherwise, the pool kernel size will be a square of an int. Default: -1\n pool_type(str) : The pooling type, can be \"max\" for max-pooling and \"avg\" for average-pooling. Default: max\n pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,\n it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,\n the pool stride size will be a square of an int. Default: 1\n pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple,\n it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).\n Otherwise, the pool padding size will be a square of an int. Default: 0\n global_pooling (bool): Whether to use the global pooling. If global_pooling = true,\n kernel size and paddings will be ignored. Default: False\n use_cudnn (bool): Only used in cudnn kernel, need install cudnn. Default: True\n ceil_mode (bool): Whether to use the ceil function to calculate output height and width.\n False is the default. If it is set to False, the floor function will be used. Default: False\n exclusive (bool): Whether to exclude padding points in average pooling mode. Default: True\n\n Returns:\n Variable: The pooling result.\n\n Raises:\n ValueError: If 'pool_type' is not \"max\" nor \"avg\"\n ValueError: If 'global_pooling' is False and 'pool_size' is -1\n ValueError: If 'use_cudnn' is not a bool value.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n data = numpy.random.random((3, 32, 32)).astype('float32')\n\n pool2d = fluid.dygraph.Pool2D(\"pool2d\",pool_size=2,\n pool_type='max',\n pool_stride=1,\n global_pooling=False)\n pool2d_res = pool2d(data)\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n pool_size=-1,\n pool_type=\"max\",\n pool_stride=1,\n pool_padding=0,\n global_pooling=False,\n use_cudnn=True,\n ceil_mode=False,\n exclusive=True,\n dtype=core.VarDesc.VarType.FP32):\n if pool_type not in [\"max\", \"avg\"]:\n raise ValueError(\n \"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.\",\n str(pool_type))\n\n if global_pooling is False and pool_size == -1:\n raise ValueError(\n \"When the global_pooling is False, pool_size must be passed \"\n \"and be a valid value. Received pool_size: \" + str(pool_size))\n\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n\n super(Pool2D, self).__init__(name_scope, dtype=dtype)\n\n self._pool_type = pool_type\n self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')\n self._pool_padding = utils.convert_to_list(pool_padding, 2,\n 'pool_padding')\n self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')\n self._global_pooling = global_pooling\n self._use_cudnn = use_cudnn\n self._ceil_mode = ceil_mode\n self._exclusive = exclusive\n self._l_type = 'pool2d'\n\n def forward(self, input):\n pool_out = self._helper.create_variable_for_type_inference(self._dtype)\n\n self._helper.append_op(\n type=self._l_type,\n inputs={\"X\": input},\n outputs={\"Out\": pool_out},\n attrs={\n \"pooling_type\": self._pool_type,\n \"ksize\": self._pool_size,\n \"global_pooling\": self._global_pooling,\n \"strides\": self._pool_stride,\n \"paddings\": self._pool_padding,\n \"use_cudnn\": self._use_cudnn,\n \"ceil_mode\": self._ceil_mode,\n \"use_mkldnn\": False,\n \"exclusive\": self._exclusive,\n })\n return pool_out\n\n\nclass FC(layers.Layer):\n \"\"\"\n **Fully Connected Layer**\n\n This function creates a fully connected layer in the network. It can take\n one or multiple tensors as its inputs(input can be a list of Variable, see\n Args in detail). It creates a variable called weights for each input tensor,\n which represents a fully connected weight matrix from each input unit to\n each output unit. The fully connected layer multiplies each input tensor\n with its corresponding weight to produce an output Tensor with shape [M, `size`],\n where M is batch size. If multiple input tensors are given, the results of\n multiple output tensors with shape [M, `size`] will be summed up. If bias_attr\n is not None, a bias variable will be created and added to the output.\n Finally, if activation is not None, it will be applied to the output as well.\n\n When the input is single tensor:\n\n .. math::\n\n Out = Act({XW + b})\n\n When the input are multiple tensors:\n\n .. math::\n\n Out = Act({\\sum_{i=0}^{N-1}X_iW_i + b})\n\n In the above equation:\n\n * :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.\n * :math:`X_i`: The i-th input tensor.\n * :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.\n * :math:`b`: The bias parameter created by this layer (if needed).\n * :math:`Act`: The activation function.\n * :math:`Out`: The output tensor.\n\n See below for an example.\n\n .. code-block:: text\n\n Given:\n data_1.data = [[[0.1, 0.2],\n [0.3, 0.4]]]\n data_1.shape = (1, 2, 2) # 1 is batch_size\n\n data_2 = [[[0.1, 0.2, 0.3]]]\n data_2.shape = (1, 1, 3)\n\n out = fluid.layers.fc(input=[data_1, data_2], size=2)\n\n Then:\n out.data = [[0.18669507, 0.1893476]]\n out.shape = (1, 2)\n\n Args:\n name_scope(str): The name of this class.\n size(int): The number of output units in this layer.\n num_flatten_dims (int): The fc layer can accept an input tensor with more than\n two dimensions. If this happens, the multidimensional tensor will first be flattened\n into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input\n tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1)\n dimensions will be flatten to form the first dimension of the final matrix (height of\n the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to\n form the second dimension of the final matrix (width of the matrix). For example, suppose\n `X` is a 5-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3.\n Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1\n param_attr (ParamAttr|list of ParamAttr|None): The parameter attribute for learnable\n parameters/weights of this layer.\n bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias\n of this layer. If it is set to False, no bias will be added to the output units.\n If it is set to None, the bias is initialized zero. Default: None.\n act (str|None): Activation to be applied to the output of this layer.\n is_test(bool): A flag indicating whether execution is in test phase. Default: False\n dtype(str): Dtype used for weight\n\n Raises:\n ValueError: If rank of the input tensor is less than 2.\n\n Examples:\n .. code-block:: python\n\n from paddle.fluid.dygraph.base import to_variable\n import paddle.fluid as fluid\n from paddle.fluid.dygraph import FC\n import numpy as np\n\n data = np.random.uniform( -1, 1, [30, 10, 32] ).astype('float32')\n with fluid.dygraph.guard():\n fc = FC( \"fc\", 64, num_flatten_dims=2)\n data = to_variable( data )\n conv = fc( data )\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n size,\n num_flatten_dims=1,\n param_attr=None,\n bias_attr=None,\n act=None,\n is_test=False,\n dtype=\"float32\"):\n super(FC, self).__init__(name_scope, dtype)\n\n self._size = size\n self._num_flatten_dims = num_flatten_dims\n self._dtype = dtype\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n self.__w = list()\n\n @property\n def _w(self, i=0):\n return self.__w[i]\n\n @_w.setter\n def _w(self, value, i=0):\n assert isinstance(value, Parameter)\n self.__w[i] = value\n\n def _build_once(self, input):\n i = 0\n for inp, param in self._helper.iter_inputs_and_params(input,\n self._param_attr):\n input_shape = inp.shape\n\n param_shape = [\n reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:],\n 1)\n ] + [self._size]\n self.__w.append(\n self.add_parameter(\n '_w%d' % i,\n self.create_parameter(\n attr=param,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=False)))\n i += 1\n\n size = list([self._size])\n self._b = self.create_parameter(\n attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True)\n\n def forward(self, input):\n mul_results = list()\n i = 0\n for inp, param in self._helper.iter_inputs_and_params(input,\n self._param_attr):\n tmp = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"mul\",\n inputs={\"X\": inp,\n \"Y\": self.__w[i]},\n outputs={\"Out\": tmp},\n attrs={\n \"x_num_col_dims\": self._num_flatten_dims,\n \"y_num_col_dims\": 1\n })\n i += 1\n mul_results.append(tmp)\n\n if len(mul_results) == 1:\n pre_bias = mul_results[0]\n else:\n pre_bias = self._helper.create_variable_for_type_inference(\n self._dtype)\n self._helper.append_op(\n type=\"sum\",\n inputs={\"X\": mul_results},\n outputs={\"Out\": pre_bias},\n attrs={\"use_mkldnn\": False})\n\n if self._b:\n pre_activation = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self._b]},\n outputs={'Out': [pre_activation]},\n attrs={'axis': self._num_flatten_dims})\n else:\n pre_activation = pre_bias\n # Currently, we don't support inplace in dygraph mode\n return self._helper.append_activation(pre_activation, act=self._act)\n\n\nclass BatchNorm(layers.Layer):\n \"\"\"\n **Batch Normalization Layer**\n\n Can be used as a normalizer function for conv2d and fully_connected operations.\n The required data format for this layer is one of the following:\n\n 1. NHWC `[batch, in_height, in_width, in_channels]`\n\n 2. NCHW `[batch, in_channels, in_height, in_width]`\n\n Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing\n Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_\n for more details.\n\n :math:`input` is the input features over a mini-batch.\n\n .. math::\n\n \\\\mu_{\\\\beta} &\\\\gets \\\\frac{1}{m} \\\\sum_{i=1}^{m} x_i \\\\qquad &//\\\\\n \\ mini-batch\\ mean \\\\\\\\\n \\\\sigma_{\\\\beta}^{2} &\\\\gets \\\\frac{1}{m} \\\\sum_{i=1}^{m}(x_i - \\\\\n \\\\mu_{\\\\beta})^2 \\\\qquad &//\\ mini-batch\\ variance \\\\\\\\\n \\\\hat{x_i} &\\\\gets \\\\frac{x_i - \\\\mu_\\\\beta} {\\\\sqrt{\\\\\n \\\\sigma_{\\\\beta}^{2} + \\\\epsilon}} \\\\qquad &//\\ normalize \\\\\\\\\n y_i &\\\\gets \\\\gamma \\\\hat{x_i} + \\\\beta \\\\qquad &//\\ scale\\ and\\ shift\n\n\n When use_global_stats = True, the :math:`\\\\mu_{\\\\beta}`\n and :math:`\\\\sigma_{\\\\beta}^{2}` are not the statistics of one mini-batch.\n They are global (or running) statistics. (It usually got from the\n pre-trained model.)\n The training and testing (or inference) have the same behavior:\n\n .. math::\n\n \\\\hat{x_i} &\\\\gets \\\\frac{x_i - \\\\mu_\\\\beta} {\\\\sqrt{\\\\\n \\\\sigma_{\\\\beta}^{2} + \\\\epsilon}} \\\\\\\\\n y_i &\\\\gets \\\\gamma \\\\hat{x_i} + \\\\beta\n\n Args:\n name_scope(str): The name of this class.\n act(str|None): Activation type, linear|relu|prelu|...\n is_test (bool): A flag indicating whether it is in\n test phrase or not. Default: False\n momentum(float): The value used for the moving_mean and\n moving_var computation. The updated formula is:\n :math:`moving\\_mean = moving\\_mean * momentum + new\\_mean * (1. - momentum)`\n :math:`moving\\_var = moving\\_var * momentum + new\\_var * (1. - momentum)`\n Default is 0.9.\n epsilon(float): A value added to the denominator for\n numerical stability. Default is 1e-5.\n param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`\n of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.\n If it is set to None or one attribute of ParamAttr, batch_norm\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n data_layout(string): NCHW|NHWC. Default: NCHW\n in_place(bool): Make the input and output of batch norm reuse memory. Default: False\n moving_mean_name(string|None): The name of moving_mean which store the global Mean. Default: None\n moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.\n do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not.\n fuse_with_relu (bool): if True, this OP performs relu after batch norm. Default: False\n use_global_stats(bool): Whether to use global mean and\n variance. In inference or test mode, set use_global_stats to true\n or is_test to true, and the behavior is equivalent.\n In train mode, when setting use_global_stats True, the global mean\n and variance are also used during train period. Default: False\n trainable_statistics(bool): Whether to calculate mean and var in eval mode. In eval mode, when\n setting trainable_statistics True, mean and variance will be calculated by current batch statistics.Default: False\n\n Returns:\n Variable: A tensor variable which is the result after applying batch normalization on the input.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n with fluid.dygraph.guard():\n fc = fluid.FC('fc', size=200, param_attr='fc1.w')\n hidden1 = fc(x)\n batch_norm = fluid.BatchNorm(\"batch_norm\", 10)\n hidden2 = batch_norm(hidden1)\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_channels,\n act=None,\n is_test=False,\n momentum=0.9,\n epsilon=1e-05,\n param_attr=None,\n bias_attr=None,\n dtype='float32',\n data_layout='NCHW',\n in_place=False,\n moving_mean_name=None,\n moving_variance_name=None,\n do_model_average_for_mean_and_var=False,\n fuse_with_relu=False,\n use_global_stats=False,\n trainable_statistics=False):\n super(BatchNorm, self).__init__(name_scope, dtype)\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n\n assert bias_attr is not False, \"bias_attr should not be False in batch_norm.\"\n\n if dtype == \"float16\":\n self._dtype = \"float32\"\n else:\n self._dtype = dtype\n\n param_shape = [num_channels]\n\n # create parameter\n self._scale = self.create_parameter(\n attr=self._param_attr,\n shape=param_shape,\n dtype=self._dtype,\n default_initializer=Constant(1.0))\n if use_global_stats and self._param_attr.learning_rate == 0.:\n self._scale.stop_gradient = True\n\n self._bias = self.create_parameter(\n attr=self._bias_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=True)\n if use_global_stats and self._param_attr.learning_rate == 0.:\n self._bias.stop_gradient = True\n\n self._mean = self.create_parameter(\n attr=ParamAttr(\n name=moving_mean_name,\n initializer=Constant(0.0),\n trainable=False,\n do_model_average=do_model_average_for_mean_and_var),\n shape=param_shape,\n dtype=self._dtype)\n self._mean.stop_gradient = True\n\n self._variance = self.create_parameter(\n attr=ParamAttr(\n name=moving_variance_name,\n initializer=Constant(1.0),\n trainable=False,\n do_model_average=do_model_average_for_mean_and_var),\n shape=param_shape,\n dtype=self._dtype)\n self._variance.stop_gradient = True\n\n self._in_place = in_place\n self._data_layout = data_layout\n self._momentum = momentum\n self._epsilon = epsilon\n self._is_test = is_test\n self._fuse_with_relu = fuse_with_relu\n self._use_global_stats = use_global_stats\n self._trainable_statistics = trainable_statistics\n\n def _build_once(self, input):\n pass\n\n def forward(self, input):\n # create output\n # mean and mean_out share the same memory\n mean_out = self._mean\n # variance and variance out share the same memory\n variance_out = self._variance\n\n saved_mean = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n saved_variance = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference(\n self._dtype)\n\n self._helper.append_op(\n type=\"batch_norm\",\n inputs={\n \"X\": input,\n \"Scale\": self._scale,\n \"Bias\": self._bias,\n \"Mean\": self._mean,\n \"Variance\": self._variance\n },\n outputs={\n \"Y\": batch_norm_out,\n \"MeanOut\": mean_out,\n \"VarianceOut\": variance_out,\n \"SavedMean\": saved_mean,\n \"SavedVariance\": saved_variance\n },\n attrs={\n \"momentum\": self._momentum,\n \"epsilon\": self._epsilon,\n \"is_test\": self._is_test,\n \"data_layout\": self._data_layout,\n \"use_mkldnn\": False,\n \"fuse_with_relu\": self._fuse_with_relu,\n \"use_global_stats\": self._use_global_stats,\n \"trainable_statistics\": self._trainable_statistics\n })\n\n # Currently, we don't support inplace in dygraph mode\n return self._helper.append_activation(batch_norm_out, self._act)\n\n\nclass Embedding(layers.Layer):\n \"\"\"\n **Embedding Layer**\n\n This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in\n a lookup table. The result of this lookup is the embedding of each ID in the\n :attr:`input`.\n All the input variables are passed in as local variables to the LayerHelper constructor\n\n Args:\n name_scope(str): The name of this class.\n size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size\n of the dictionary of embeddings and the size of each embedding vector respectively.\n is_sparse(bool): The flag indicating whether to use sparse update. Default: False\n is_distributed(bool): Whether to run lookup table from remote parameter server. Default: False.\n padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup.\n Otherwise the given :attr:`padding_idx` indicates padding the output with zeros whenever lookup encounters\n it in :attr:`input`. If :math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is :math:`size[0] + dim`. Default: None.\n param_attr(ParamAttr): Parameters for this layer. Default: None.\n dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc. Default: 'float32'.\n\n Returns:\n Variable: The tensor variable storing the embeddings of the \\\n supplied inputs.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.dygraph.base as base\n import numpy as np\n\n inp_word = np.array([[[1]]]).astype('int64')\n dict_size = 20\n with fluid.dygraph.guard():\n emb = fluid.dygraph.Embedding(\n name_scope='embedding',\n size=[dict_size, 32],\n param_attr='emb.w',\n is_sparse=False)\n static_rlt3 = emb(base.to_variable(inp_word))\n \"\"\"\n\n def __init__(self,\n name_scope,\n size,\n is_sparse=False,\n is_distributed=False,\n padding_idx=None,\n param_attr=None,\n dtype='float32'):\n super(Embedding, self).__init__(name_scope, dtype)\n self._size = size\n self._is_sparse = is_sparse\n self._is_distributed = is_distributed\n self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (\n size[0] + padding_idx)\n\n self._param_attr = param_attr\n self._dtype = dtype\n self._remote_prefetch = self._is_sparse and (not self._is_distributed)\n if self._remote_prefetch:\n assert self._is_sparse is True and self._is_distributed is False\n\n self._w = self.create_parameter(\n attr=self._param_attr,\n shape=self._size,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, input):\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type='lookup_table',\n inputs={'Ids': input,\n 'W': self._w},\n outputs={'Out': out},\n attrs={\n 'is_sparse': self._is_sparse,\n 'is_distributed': self._is_distributed,\n 'remote_prefetch': self._remote_prefetch,\n 'padding_idx': self._padding_idx\n })\n\n return out\n\n\nclass LayerNorm(layers.Layer):\n \"\"\"\n Assume feature vectors exist on dimensions\n `begin_norm_axis ... rank(input)` and calculate the moment statistics along these dimensions for each feature\n vector `a` with size `H`, then normalize each feature vector using the corresponding\n statistics. After that, apply learnable gain and bias on the normalized\n tensor to scale and shift if `scale` and `shift` are set.\n\n Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_\n\n The formula is as follows:\n\n .. math::\n\n \\\\mu & = \\\\frac{1}{H}\\\\sum_{i=1}^{H} a_i\n\n \\\\sigma & = \\\\sqrt{\\\\frac{1}{H}\\sum_{i=1}^{H}(a_i - \\\\mu)^2}\n\n h & = f(\\\\frac{g}{\\\\sigma}(a - \\\\mu) + b)\n\n * :math:`a`: the vector representation of the summed inputs to the neurons\n in that layer.\n\n * :math:`H`: the number of hidden units in a layers\n\n * :math:`g`: the trainable scale parameter.\n\n * :math:`b`: the trainable bias parameter.\n\n Args:\n name_scope(str): The name of this class.\n scale(bool): Whether to learn the adaptive gain :math:`g` after\n normalization. Default: True.\n shift(bool): Whether to learn the adaptive bias :math:`b` after\n normalization. Default: True.\n begin_norm_axis(int): The normalization will be performed along\n dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.\n Default: 1.\n epsilon(float): The small value added to the variance to prevent\n division by zero. Default: 1e-05.\n param_attr(ParamAttr|None): The parameter attribute for the learnable\n gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is\n omitted. If :attr:`scale` is True and :attr:`param_attr` is None,\n a default :code:`ParamAttr` would be added as scale. The\n :attr:`param_attr` is initialized as 1 if it is added. Default: None.\n bias_attr(ParamAttr|None): The parameter attribute for the learnable\n bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is\n omitted. If :attr:`shift` is True and :attr:`param_attr` is None,\n a default :code:`ParamAttr` would be added as bias. The\n :attr:`bias_attr` is initialized as 0 if it is added. Default: None.\n act(str): Activation to be applied to the output of layer normalizaiton.\n Default: None.\n Returns:\n Result after normalization\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n x = numpy.random.random((3, 32, 32)).astype('float32')\n layerNorm = fluid.dygraph.nn.LayerNorm(\n 'LayerNorm', begin_norm_axis=1)\n ret = layerNorm(fluid.dygraph.base.to_variable(x))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n scale=True,\n shift=True,\n begin_norm_axis=1,\n epsilon=1e-05,\n param_attr=None,\n bias_attr=None,\n act=None):\n super(LayerNorm, self).__init__(name_scope)\n self._scale = scale\n self._shift = shift\n self._begin_norm_axis = begin_norm_axis\n self._epsilon = epsilon\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n\n def _build_once(self, input):\n self._dtype = self._helper.input_dtype(input)\n input_shape = input.shape\n param_shape = [\n reduce(lambda x, y: x * y, input_shape[self._begin_norm_axis:])\n ]\n if self._scale:\n self._scale_w = self.create_parameter(\n attr=self._param_attr,\n shape=param_shape,\n dtype=self._dtype,\n default_initializer=Constant(1.0))\n else:\n if self._param_attr:\n logging.warn(\"param_attr are only avaliable with scale is True\")\n\n if self._shift:\n assert self._bias_attr is not False\n self._bias_w = self.create_parameter(\n attr=self._bias_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=True)\n else:\n if self._bias_attr:\n logging.warn(\"bias_attr are only avaliable with shift is True\")\n\n def forward(self, input):\n inputs = dict()\n inputs['X'] = input\n if self._scale:\n inputs['Scale'] = self._scale_w\n if self._shift:\n inputs['Bias'] = self._bias_w\n # create output\n mean_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n variance_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n layer_norm_out = self._helper.create_variable_for_type_inference(\n self._dtype)\n\n self._helper.append_op(\n type=\"layer_norm\",\n inputs=inputs,\n outputs={\n \"Y\": layer_norm_out,\n \"Mean\": mean_out,\n \"Variance\": variance_out,\n },\n attrs={\n \"epsilon\": self._epsilon,\n \"begin_norm_axis\": self._begin_norm_axis\n })\n\n return self._helper.append_activation(layer_norm_out, act=self._act)\n\n\nclass GRUUnit(layers.Layer):\n \"\"\"\n **GRU unit layer**\n\n if origin_mode is True, then the equation of a gru step is from paper\n `Learning Phrase Representations using RNN Encoder-Decoder for Statistical\n Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`\n\n .. math::\n u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)\n\n r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)\n\n m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)\n\n h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)\n\n if origin_mode is False, then the equation of a gru step is from paper\n `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence\n Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_\n\n .. math::\n u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)\n\n r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)\n\n m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)\n\n h_t & = dot((1-u_t), h_{t-1}) + dot(u_t, m_t)\n\n\n The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms\n of the equation above, the :math:`z_t` is split into 3 parts -\n :math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to\n implement a full GRU unit operator for an input, a fully\n connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`.\n\n The terms :math:`u_t` and :math:`r_t` represent the update and reset gates\n of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is\n an intermediate candidate hidden output, which is denoted by :math:`m_t`.\n This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`\n and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.\n\n Args:\n name_scope(str): The name of this class.\n size (int): The input dimension value.\n param_attr(ParamAttr|None): The parameter attribute for the learnable\n hidden-hidden weight matrix. Note:\n\n - The shape of the weight matrix is :math:`(T \\\\times 3D)`, where\n :math:`D` is the hidden size.\n - All elements in the weight matrix can be divided into two parts.\n The first part are weights of the update gate and reset gate with\n shape :math:`(D \\\\times 2D)`, and the second part are weights for\n candidate hidden state with shape :math:`(D \\\\times D)`.\n\n If it is set to None or one attribute of ParamAttr, gru_unit will\n create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias\n of GRU.Note that the bias with :math:`(1 \\\\times 3D)` concatenates\n the bias in the update gate, reset gate and candidate calculations.\n If it is set to False, no bias will be applied to the update gate,\n reset gate and candidate calculations. If it is set to None or one\n attribute of ParamAttr, gru_unit will create ParamAttr as\n bias_attr. If the Initializer of the bias_attr is not set, the bias\n is initialized zero. Default: None.\n activation (str): The activation type for cell (actNode).\n Default: 'tanh'\n gate_activation (str): The activation type for gates (actGate).\n Default: 'sigmoid'\n dtype(str): The dtype of the layers. Default: 'float32'\n\n Returns:\n tuple: The hidden value, reset-hidden value and gate values.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.dygraph.base as base\n import numpy\n\n lod = [[2, 4, 3]]\n D = 5\n T = sum(lod[0])\n\n hidden_input = numpy.random.rand(T, D).astype('float32')\n with fluid.dygraph.guard():\n x = numpy.random.random((3, 32, 32)).astype('float32')\n gru = fluid.dygraph.GRUUnit('gru', size=D * 3)\n dy_ret = gru(\n base.to_variable(input), base.to_variable(hidden_input))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n size,\n param_attr=None,\n bias_attr=None,\n activation='tanh',\n gate_activation='sigmoid',\n origin_mode=False,\n dtype='float32'):\n super(GRUUnit, self).__init__(name_scope, dtype)\n\n activation_dict = dict(\n identity=0,\n sigmoid=1,\n tanh=2,\n relu=3, )\n self.activation = activation_dict[activation]\n self.gate_activation = activation_dict[gate_activation]\n\n self._dtype = dtype\n size = size // 3\n # create weight\n self._weight = self.create_parameter(\n attr=param_attr, shape=[size, 3 * size], dtype=dtype)\n\n # create bias\n bias_size = [1, 3 * size]\n self._bias = self.create_parameter(\n attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True)\n\n def forward(self, input, hidden):\n inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': self._weight}\n if self._bias:\n inputs['Bias'] = self._bias\n\n gate = self._helper.create_variable_for_type_inference(self._dtype)\n reset_hidden_pre = self._helper.create_variable_for_type_inference(\n self._dtype)\n updated_hidden = self._helper.create_variable_for_type_inference(\n self._dtype)\n self._helper.append_op(\n type='gru_unit',\n inputs=inputs,\n outputs={\n 'Gate': gate,\n 'ResetHiddenPrev': reset_hidden_pre,\n 'Hidden': updated_hidden,\n },\n attrs={\n 'activation': self.activation,\n 'gate_activation': self.gate_activation,\n })\n\n return updated_hidden, reset_hidden_pre, gate\n\n\nclass NCE(layers.Layer):\n \"\"\"\n Compute and return the noise-contrastive estimation training loss. See\n `Noise-contrastive estimation: A new estimation principle for unnormalized\n statistical models\n <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`.\n By default this operator uses a uniform distribution for sampling.\n\n Args:\n name_scope(str): The name of this class.\n num_total_classes (int): Total number of classes in all samples\n param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights\n of nce. If it is set to None or one attribute of ParamAttr, nce\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of nce.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, nce\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n num_neg_samples (int): The number of negative classes. The default value is 10.\n sampler (str): The sampler used to sample class from negtive classes.\n It can be 'uniform', 'log_uniform' or 'custom_dist'.\n default: 'uniform'.\n custom_dist (float[]|None): A float[] with size=num_total_classes.\n It is used when sampler is set to 'custom_dist'.\n custom_dist[i] is the probsbility of i-th class to be sampled.\n Default: None.\n seed (int): The seed used in sampler. Default: 0.\n is_sparse(bool): The flag indicating whether to use sparse update, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.\n\n Returns:\n Variable: The output nce loss.\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n window_size = 5\n dict_size = 20\n label_word = int(window_size // 2) + 1\n inp_word = np.array([[[1]], [[2]], [[3]], [[4]], [[5]]]).astype('int64')\n nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')\n\n with fluid.dygraph.guard():\n words = []\n for i in range(window_size):\n words.append(fluid.dygraph.base.to_variable(inp_word[i]))\n\n emb = fluid.Embedding(\n 'embedding',\n size=[dict_size, 32],\n param_attr='emb.w',\n is_sparse=False)\n\n embs3 = []\n for i in range(window_size):\n if i == label_word:\n continue\n\n emb_rlt = emb(words[i])\n embs3.append(emb_rlt)\n\n embs3 = fluid.layers.concat(input=embs3, axis=1)\n nce = fluid.NCE('nce',\n num_total_classes=dict_size,\n num_neg_samples=2,\n sampler=\"custom_dist\",\n custom_dist=nid_freq_arr.tolist(),\n seed=1,\n param_attr='nce.w',\n bias_attr='nce.b')\n\n nce_loss3 = nce(embs3, words[label_word])\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_total_classes,\n sample_weight=None,\n param_attr=None,\n bias_attr=None,\n num_neg_samples=None,\n sampler=\"uniform\",\n custom_dist=None,\n seed=0,\n is_sparse=False):\n super(NCE, self).__init__(name_scope)\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._num_total_classes = num_total_classes\n\n self._inputs = dict()\n self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []\n if sampler == \"uniform\":\n sampler = 0\n elif sampler == \"log_uniform\":\n sampler = 1\n elif sampler == \"custom_dist\":\n assert custom_dist is not None\n # assert isinstance(custom_dist, Variable)\n\n custom_dist_len = len(custom_dist)\n alias_probs_ = [0] * custom_dist_len\n alias_ = [0] * custom_dist_len\n bigs = []\n littles = []\n for i in range(custom_dist_len):\n normal_prob = custom_dist[i] * custom_dist_len\n if normal_prob - 1.0 > 0:\n bigs.append((i, normal_prob))\n elif 1.0 - normal_prob > 0:\n littles.append((i, normal_prob))\n else:\n alias_probs_[i] = normal_prob\n alias_[i] = -1\n\n while len(bigs) and len(littles):\n big = bigs.pop(0)\n little = littles.pop(0)\n\n big_idx = big[0]\n big_prob = big[1]\n\n alias_probs_[little[0]] = little[1]\n alias_[little[0]] = big_idx\n big_left = big[1] + little[1] - 1\n if big_left - 1.0 > 0:\n bigs.append((big_idx, big_left))\n elif 1.0 - big_left > 0:\n littles.append((big_idx, big_left))\n else:\n alias_probs_[big_idx] = big_left\n alias_[big_idx] = -1\n\n if len(bigs):\n big = bigs.pop(0)\n alias_probs_[big[0]] = 1.0\n alias_[big[0]] = -1\n if len(littles):\n little = littles.pop(0)\n alias_probs_[little[0]] = 1.0\n alias_[little[0]] = -1\n\n def _init_by_numpy_array(numpy_array):\n ret = self.create_parameter(\n attr=ParamAttr(),\n shape=numpy_array.shape,\n dtype=numpy_array.dtype,\n default_initializer=NumpyArrayInitializer(numpy_array))\n ret.stop_gradient = True\n return ret\n\n self._inputs['CustomDistProbs'] = _init_by_numpy_array(\n np.array(custom_dist).astype('float32'))\n self._inputs['CustomDistAlias'] = _init_by_numpy_array(\n np.array(alias_).astype('int32'))\n self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array(\n np.array(alias_probs_).astype('float32'))\n sampler = 2\n else:\n raise Exception(\"Unsupported sampler type.\")\n\n if num_neg_samples is None:\n num_neg_samples = 10\n else:\n num_neg_samples = int(num_neg_samples)\n self._num_neg_samples = num_neg_samples\n remote_prefetch = is_sparse\n print(\n \"With sparse mode, if your models has only small parameter prefetch may cause speed down\"\n )\n self._attrs = {\n 'num_total_classes': int(num_total_classes),\n 'num_neg_samples': num_neg_samples,\n 'seed': seed,\n 'sampler': sampler,\n 'is_sparse': is_sparse,\n 'remote_prefetch': remote_prefetch\n }\n\n def _build_once(self, input, label, sample_weight=None):\n assert isinstance(input, Variable)\n assert isinstance(label, Variable)\n\n dim = input.shape[1]\n num_true_class = label.shape[1]\n self._w = self.create_parameter(\n attr=self._param_attr,\n shape=[self._num_total_classes, dim],\n is_bias=False,\n dtype=input.dtype)\n if self._bias_attr:\n self._b = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_total_classes, 1],\n is_bias=True,\n dtype=input.dtype)\n self._inputs['Bias'] = self._b\n self._inputs['Weight'] = self._w\n\n def forward(self, input, label, sample_weight=None):\n assert isinstance(input, Variable)\n assert isinstance(label, Variable)\n\n self._inputs['Input'] = input\n self._inputs['Label'] = label\n self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []\n\n cost = self._helper.create_variable_for_type_inference(\n dtype=input.dtype)\n sample_logits = self._helper.create_variable_for_type_inference(\n dtype=input.dtype)\n sample_labels = self._helper.create_variable_for_type_inference(\n dtype=label.dtype)\n\n self._helper.append_op(\n type='nce',\n inputs=self._inputs,\n outputs={\n 'Cost': cost,\n 'SampleLogits': sample_logits,\n 'SampleLabels': sample_labels\n },\n attrs=self._attrs)\n return cost / (self._num_neg_samples + 1)\n\n\nclass PRelu(layers.Layer):\n \"\"\"\n Equation:\n\n .. math::\n y = \\max(0, x) + \\\\alpha * \\min(0, x)\n\n Args:\n name_scope(str): The name of this class.\n mode (str): The mode for weight sharing. It supports all, channel\n and element. all: all elements share same weight\n channel:elements in a channel share same weight\n element:each element has a weight\n param_attr(ParamAttr|None): The parameter attribute for the learnable\n weight (alpha).\n\n Returns:\n Variable: The output tensor with the same shape as input.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n inp_np = np.ones([5, 200, 100, 100]).astype('float32')\n with fluid.dygraph.guard():\n mode = 'channel'\n prelu = fluid.PRelu(\n 'prelu',\n mode=mode,\n param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))\n dy_rlt = prelu(fluid.dygraph.base.to_variable(inp_np))\n\n \"\"\"\n\n def __init__(self, name_scope, mode, param_attr=None):\n\n super(PRelu, self).__init__(name_scope)\n self._mode = mode\n self._param_attr = param_attr\n if self._mode not in ['all', 'channel', 'element']:\n raise ValueError('mode should be one of all, channel, element.')\n self._alpha_shape = [1]\n\n def _build_once(self, input):\n if self._mode == 'channel':\n self._alpha_shape = [1, input.shape[1], 1, 1]\n elif self._mode == 'element':\n self._alpha_shape = input.shape\n self._dtype = self._helper.input_dtype(input)\n self._alpha = self.create_parameter(\n attr=self._param_attr,\n shape=self._alpha_shape,\n dtype='float32',\n is_bias=False,\n default_initializer=Constant(1.0))\n\n def forward(self, input):\n\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"prelu\",\n inputs={\"X\": input,\n 'Alpha': self._alpha},\n attrs={\"mode\": self._mode},\n outputs={\"Out\": out})\n return out\n\n\nclass BilinearTensorProduct(layers.Layer):\n \"\"\"\n **Add Bilinear Tensor Product Layer**\n\n This layer performs bilinear tensor product on two inputs.\n For example:\n\n .. math::\n out_{i} = x * W_{i} * {y^\\mathrm{T}}, i=0,1,...,size-1\n\n In this formula:\n - :math:`x`: the first input contains M elements, shape is [batch_size, M].\n - :math:`y`: the second input contains N elements, shape is [batch_size, N].\n - :math:`W_{i}`: the i-th learned weight, shape is [M, N]\n - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].\n - :math:`y^\\mathrm{T}`: the transpose of :math:`y_{2}`.\n\n Args:\n name_scope(str): The name of this class.\n size (int): The dimension of this layer.\n act (str): Activation to be applied to the output of this layer. Default: None.\n name (str): The name of this layer. Default: None.\n param_attr (ParamAttr): The parameter attribute for the learnable w.\n parameters/weights of this layer. Default: None.\n bias_attr (ParamAttr): The parameter attribute for the bias\n of this layer. If it is set to False, no bias will be added to the output units.\n If it is set to None, the bias is initialized zero. Default: None.\n\n Returns:\n Variable: A 2-D Tensor of shape [batch_size, size].\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n layer1 = numpy.random.random((5, 5)).astype('float32')\n layer2 = numpy.random.random((5, 4)).astype('float32')\n bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct(\n 'BilinearTensorProduct', size=1000)\n ret = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1),\n fluid.dygraph.base.to_variable(layer2))\n \"\"\"\n\n def __init__(self,\n name_scope,\n size,\n name=None,\n act=None,\n param_attr=None,\n bias_attr=None):\n super(BilinearTensorProduct, self).__init__(name_scope)\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n self._size = size\n self._name = name\n self._inputs = dict()\n\n def _build_once(self, x, y):\n self._dtype = self._helper.input_dtype(x)\n\n param_shape = [self._size, x.shape[1], y.shape[1]]\n\n self._w = self.create_parameter(\n attr=self._param_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=False)\n\n bias_size = [1, self._size]\n self._bias_param = self.create_parameter(\n attr=self._bias_attr,\n shape=bias_size,\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, x, y):\n self._inputs = {\"X\": x, \"Y\": y, \"Weight\": self._w}\n if self._bias_param:\n self._inputs[\"Bias\"] = self._bias_param\n if self._name is not None:\n out = self._helper.create_variable(\n name=\".\".join([self.full_name(), self._name]),\n dtype=self._dtype,\n persistable=False)\n else:\n out = self._helper.create_variable(\n dtype=self._dtype, persistable=False)\n self._helper.append_op(\n type=\"bilinear_tensor_product\",\n inputs=self._inputs,\n outputs={\"Out\": out})\n\n # add activation\n return self._helper.append_activation(out, act=self._act)\n\n\nclass Conv2DTranspose(layers.Layer):\n \"\"\"\n **Convlution2D transpose layer**\n\n The convolution2D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input(Input) and output(Output)\n are in NCHW format. Where N is batch size, C is the number of channels,\n H is the height of the feature, and W is the width of the feature.\n Parameters(dilations, strides, paddings) are two elements. These two elements\n represent height and width, respectively. The details of convolution transpose\n layer, please refer to the following explanation and references\n `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a tensor with NCHW format.\n * :math:`W`: Filter value, a tensor with MCHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H^\\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\\\\\\n W^\\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\\\\\\n H_{out} &\\in [ H^\\prime_{out}, H^\\prime_{out} + strides[0] ) \\\\\\\\\n W_{out} &\\in [ W^\\prime_{out}, W^\\prime_{out} + strides[1] )\n\n Args:\n name_scope(str): The name of this class.\n num_filters(int): The number of the filter. It is as same as the output\n image channel.\n output_size(int|tuple|None): The output image size. If output size is a\n tuple, it must contain two integers, (image_H, image_W). None if use\n filter_size, padding, and stride to calculate output_size.\n if output_size and filter_size are specified at the same time, They\n should follow the formula above. Default: None.\n filter_size(int|tuple|None): The filter size. If filter_size is a tuple,\n it must contain two integers, (filter_size_H, filter_size_W).\n Otherwise, the filter will be a square. None if use output size to\n calculate filter_size. Default: None.\n padding(int|tuple): The padding size. If padding is a tuple, it must\n contain two integers, (padding_H, padding_W). Otherwise, the\n padding_H = padding_W = padding. Default: padding = 0.\n stride(int|tuple): The stride size. If stride is a tuple, it must\n contain two integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. Default: stride = 1.\n dilation(int|tuple): The dilation size. If dilation is a tuple, it must\n contain two integers, (dilation_H, dilation_W). Otherwise, the\n dilation_H = dilation_W = dilation. Default: dilation = 1.\n groups(int): The groups number of the Conv2d transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n Default: groups = 1.\n param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights\n of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. Default: True.\n act (str): Activation type, if it is set to None, activation is not appended.\n Default: None.\n\n Returns:\n Variable: The tensor variable storing the convolution transpose result.\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n data = numpy.random.random((3, 32, 32)).astype('float32')\n conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(\n 'Conv2DTranspose', num_filters=2, filter_size=3)\n ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_filters,\n output_size=None,\n filter_size=None,\n padding=0,\n stride=1,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None):\n super(Conv2DTranspose, self).__init__(name_scope)\n assert param_attr is not False, \"param_attr should not be False in conv2d_transpose.\"\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n self._groups = groups\n self._num_filters = num_filters\n self._use_cudnn = use_cudnn\n self._padding = padding\n self._stride = stride\n self._dilation = dilation\n self._filter_size = filter_size\n self._output_size = output_size\n self._op_type = 'conv2d_transpose'\n\n def _build_once(self, input):\n input_channel = input.shape[1]\n if (input_channel == self._groups and\n self._num_filters == input_channel and not self._use_cudnn):\n self._op_type = 'depthwise_conv2d_transpose'\n\n if not isinstance(input, Variable):\n raise TypeError(\"Input of conv2d_transpose must be Variable\")\n\n self._padding = utils.convert_to_list(self._padding, 2, 'padding')\n self._stride = utils.convert_to_list(self._stride, 2, 'stride')\n self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation')\n\n if not isinstance(self._use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n\n if self._filter_size is None:\n if self._output_size is None:\n raise ValueError(\n \"output_size must be set when filter_size is None\")\n if isinstance(self._output_size, int):\n self._output_size = [self._output_size, self._output_size]\n\n h_in = input.shape[2]\n w_in = input.shape[3]\n\n filter_size_h = (self._output_size[0] -\n (h_in - 1) * self._stride[0] + 2 * self._padding[0]\n - 1) // self._dilation[0] + 1\n filter_size_w = (self._output_size[1] -\n (w_in - 1) * self._stride[1] + 2 * self._padding[1]\n - 1) // self._dilation[1] + 1\n self._filter_size = [filter_size_h, filter_size_w]\n else:\n self._filter_size = utils.convert_to_list(\n self._filter_size, 2, 'conv2d_transpose.filter_size')\n\n if self._output_size is None:\n self._output_size = []\n elif isinstance(self._output_size, list) or isinstance(\n self._output_size, int):\n self._output_size = utils.convert_to_list(self._output_size, 2,\n 'output_size')\n else:\n raise ValueError(\"output_size should be list or int\")\n self._padding = utils.convert_to_list(self._padding, 2, 'padding')\n self._groups = 1 if self._groups is None else self._groups\n filter_shape = [input_channel, self._num_filters // self._groups\n ] + self._filter_size\n\n self._img_filter = self.create_parameter(\n dtype=input.dtype, shape=filter_shape, attr=self._param_attr)\n\n self._bias_param = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=input.dtype)\n self._helper.append_op(\n type=self._op_type,\n inputs={'Input': [input],\n 'Filter': [self._img_filter]},\n outputs={'Output': pre_bias},\n attrs={\n 'output_size': self._output_size,\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups,\n 'use_cudnn': self._use_cudnn\n })\n\n if self._bias_param is not None:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self._bias_param]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n out = self._helper.append_activation(pre_act, act=self._act)\n return out\n\n\nclass SequenceConv(layers.Layer):\n \"\"\"\n This function creates the op for sequence_conv, using the inputs and\n other convolutional configurations for the filters and stride as given\n in the input parameters to the function.\n\n Args:\n name_scope(str): The name of this class.\n num_filters (int): number of filters.\n filter_size (int): the filter size (H and W). Default: 3.\n filter_stride (int): stride of the filter. Default: 1.\n padding (bool|None): if True, add paddings. Default: None\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, sequence_conv\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights\n of sequence_conv. If it is set to None or one attribute of ParamAttr, sequence_conv\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n act (str): Activation type, if it is set to None, activation is not appended.\n Default: None.\n\n Returns:\n Variable: output of sequence_conv\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_filters,\n filter_size=3,\n filter_stride=1,\n padding=None,\n bias_attr=None,\n param_attr=None,\n act=None):\n assert not in_dygraph_mode(\n ), \"SequenceConv is not supported by dynamic graph mode yet!\"\n super(SequenceConv, self).__init__(name_scope)\n self._num_filters = num_filters\n self._filter_size = filter_size\n self._filter_stride = filter_stride\n self._padding = padding\n self._bias_attr = bias_attr\n self._param_attr = param_attr\n self._act = act\n\n def _build_once(self, input):\n self._dtype = self._helper.input_dtype(input)\n filter_shape = [self._filter_size * input.shape[1], self._num_filters]\n self._filter_param = self.create_parameter(\n attr=self._param_attr, shape=filter_shape, dtype=self._dtype)\n\n self._bias_param = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type='sequence_conv',\n inputs={\n 'X': [input],\n 'Filter': [self._filter_param],\n },\n outputs={\"Out\": pre_bias},\n attrs={\n 'contextStride': self._filter_stride,\n 'contextStart': -int(self._filter_size // 2),\n 'contextLength': self._filter_size\n })\n\n if self._bias_param is not None:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self._bias_param]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass RowConv(layers.Layer):\n \"\"\"\n ***Row-convolution operator***\n\n The row convolution is called lookahead convolution. This operator was introduced in the following paper for DeepSpeech2:\n http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf\n\n The main motivation is that a bidirectional RNN, useful in DeepSpeech like speech models, learns representation for a sequence by performing a\n forward and a backward pass through the entire sequence. However, unlike\n unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online\n and low-latency setting. The lookahead convolution incorporates information\n from future subsequences in a computationally efficient manner to improve\n unidirectional recurrent neural networks. The row convolution operator is\n different from the 1D sequence convolution, and is computed as follows:\n\n Given an input sequence X of length t and input dimension D, and a filter (W) of size context * D.\n\n More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .\n\n Args:\n name_scope(str): The name of this class.\n future_context_size (int): Future context size. Please note, the shape\n of convolution kernel is [future_context_size + 1, D].\n param_attr (ParamAttr): Attributes of parameters, including\n name, initializer etc. Default: None.\n act (str): Non-linear activation to be applied to output variable. Default: None.\n\n Returns:\n the output(Out) is a LodTensor, which supports variable time-length input sequences.\n The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n x = numpy.random.random((16)).astype('float32')\n rowConv = fluid.dygraph.nn.RowConv(\n 'RowConv', future_context_size=2)\n ret = rowConv(fluid.dygraph.base.to_variable(x))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n future_context_size,\n param_attr=None,\n act=None):\n assert not in_dygraph_mode(\n ), \"RowConv is not supported by dynamic graph mode yet!\"\n super(RowConv, self).__init__(name_scope)\n self._act = act\n self._param_attr = param_attr\n self._future_context_size = future_context_size\n\n def _build_once(self, input):\n self._dtype = self._helper.input_dtype(input)\n filter_shape = [self._future_context_size + 1, input.shape[1]]\n self._filter_param = self.create_parameter(\n attr=self._param_attr,\n shape=filter_shape,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, input):\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type='row_conv',\n inputs={'X': [input],\n 'Filter': [self._filter_param]},\n outputs={'Out': [out]})\n return self._helper.append_activation(out, act=self._act)\n\n\nclass GroupNorm(layers.Layer):\n \"\"\"\n **Group Normalization Layer**\n\n Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .\n\n Args:\n name_scope(str): The name of this class.\n groups(int): The number of groups that divided from channels.\n epsilon(float): The small value added to the variance to prevent\n division by zero. Default: 1e-05.\n param_attr(ParamAttr|None): The parameter attribute for the learnable\n scale :math:`g`. If it is set to False, no scale will be added to the output units.\n If it is set to None, the bias is initialized one. Default: None.\n bias_attr(ParamAttr|None): The parameter attribute for the learnable\n bias :math:`b`. If it is set to False, no bias will be added to the output units.\n If it is set to None, the bias is initialized zero. Default: None.\n act(str): Activation to be applied to the output of group normalizaiton.\n data_layout(string|NCHW): Only NCHW is supported.\n\n Returns:\n Variable: A tensor variable which is the result after applying group normalization on the input.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n x = numpy.random.random((8, 32, 32)).astype('float32')\n groupNorm = fluid.dygraph.nn.GroupNorm('GroupNorm', groups=4)\n ret = groupNorm(fluid.dygraph.base.to_variable(x))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n groups,\n epsilon=1e-05,\n param_attr=None,\n bias_attr=None,\n act=None,\n data_layout='NCHW'):\n super(GroupNorm, self).__init__(name_scope)\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._epsilon = epsilon\n self._groups = groups\n self._act = act\n if data_layout != 'NCHW':\n raise ValueError(\"unsupported data layout:\" + data_layout)\n\n def _build_once(self, input):\n self._dtype = self._helper.input_dtype(input)\n param_shape = [input.shape[1]]\n if self._bias_attr:\n self._bias = self.create_parameter(\n attr=self._bias_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=True)\n\n if self._param_attr:\n self._scale = self.create_parameter(\n attr=self._param_attr,\n shape=param_shape,\n dtype=self._dtype,\n default_initializer=Constant(1.0))\n\n def forward(self, input):\n inputs = {'X': input}\n if self._bias_attr:\n inputs['Bias'] = self._bias\n if self._param_attr:\n inputs['Scale'] = self._scale\n\n # create output\n mean_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n variance_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n group_norm_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type=\"group_norm\",\n inputs=inputs,\n outputs={\n \"Y\": group_norm_out,\n \"Mean\": mean_out,\n \"Variance\": variance_out,\n },\n attrs={\"epsilon\": self._epsilon,\n \"groups\": self._groups})\n\n return self._helper.append_activation(group_norm_out, self._act)\n\n\nclass SpectralNorm(layers.Layer):\n \"\"\"\n **Spectral Normalization Layer**\n\n This layer calculates the spectral normalization value of weight parameters of\n fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D\n Parameters. Calculations are showed as follows.\n\n Step 1:\n Generate vector U in shape of [H], and V in shape of [W].\n While H is the :attr:`dim` th dimension of the input weights,\n and W is the product result of remaining dimensions.\n\n Step 2:\n :attr:`power_iters` shoule be a positive interger, do following\n calculations with U and V for :attr:`power_iters` rounds.\n\n .. math::\n\n \\mathbf{v} := \\\\frac{\\mathbf{W}^{T} \\mathbf{u}}{\\|\\mathbf{W}^{T} \\mathbf{u}\\|_2}\n\n \\mathbf{u} := \\\\frac{\\mathbf{W}^{T} \\mathbf{v}}{\\|\\mathbf{W}^{T} \\mathbf{v}\\|_2}\n\n Step 3:\n Calculate :math:`\\sigma(\\mathbf{W})` and normalize weight values.\n\n .. math::\n\n \\sigma(\\mathbf{W}) = \\mathbf{u}^{T} \\mathbf{W} \\mathbf{v}\n\n \\mathbf{W} = \\\\frac{\\mathbf{W}}{\\sigma(\\mathbf{W})}\n\n\n Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .\n\n Args:\n name_scope(str): The name of this class.\n dim(int): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.\n power_iters(int): The number of power iterations to calculate spectral norm. Default: 1.\n eps(float): The epsilon for numerical stability in calculating norms. Default: 1e-12.\n name (str): The name of this layer. It is optional.\n\n Returns:\n Variable: A tensor variable of weight parameters after spectral normalization.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n x = numpy.random.random((2, 8, 32, 32)).astype('float32')\n spectralNorm = fluid.dygraph.nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2)\n ret = spectralNorm(fluid.dygraph.base.to_variable(x))\n\n \"\"\"\n\n def __init__(self, name_scope, dim=0, power_iters=1, eps=1e-12, name=None):\n super(SpectralNorm, self).__init__(name_scope)\n self._power_iters = power_iters\n self._eps = eps\n self._dim = dim\n\n def _build_once(self, weight):\n self._dtype = self._helper.input_dtype(weight)\n input_shape = weight.shape\n h = input_shape[self._dim]\n w = np.prod(input_shape) // h\n\n self.u = self.create_parameter(\n attr=ParamAttr(),\n shape=[h],\n dtype=self._dtype,\n default_initializer=Normal(0., 1.))\n self.u.stop_gradient = True\n\n self.v = self.create_parameter(\n attr=ParamAttr(),\n shape=[w],\n dtype=self._dtype,\n default_initializer=Normal(0., 1.))\n self.v.stop_gradient = True\n\n def forward(self, weight):\n inputs = {'Weight': weight, 'U': self.u, 'V': self.v}\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"spectral_norm\",\n inputs=inputs,\n outputs={\"Out\": out, },\n attrs={\n \"dim\": self._dim,\n \"power_iters\": self._power_iters,\n \"eps\": self._eps,\n })\n\n return out\n\n\nclass TreeConv(layers.Layer):\n \"\"\"\n ***Tree-Based Convolution Operator***\n\n Tree-Based Convolution is a kind of convolution based on tree structure.\n Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),\n which is used to classify tree structures, such as Abstract Syntax Tree.\n Tree-Based Convolution proposed a kind of data structure called continuous binary tree,\n which regards multiway tree as binary tree.\n The paper of Tree-Based Convolution Operator is here: https://arxiv.org/abs/1409.5718v1\n\n\n Args:\n name_scope(str): The name of this class.\n output_size(int): output feature width\n num_filters(int): number of filters, Default: 1.\n max_depth(int): max depth of filters, Default: 2.\n act(str): activation function, Default: tanh.\n param_attr(ParamAttr): the parameter attribute for the filters, Default: None.\n bias_attr(ParamAttr): the parameter attribute for the bias of this layer, Default: None.\n name(str): a name of this layer(optional). If set None, the layer will be named automatically, Default: None.\n\n Returns:\n out(Variable): (Tensor) The feature vector of subtrees. The shape of the output tensor is [max_tree_node_size, output_size, num_filters]. The output tensor could be a new feature vector for next tree convolution layers\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')\n edge_set = numpy.random.random((1, 9, 2)).astype('int32')\n treeConv = fluid.dygraph.nn.TreeConv(\n 'TreeConv', output_size=6, num_filters=1, max_depth=2)\n ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n output_size,\n num_filters=1,\n max_depth=2,\n act='tanh',\n param_attr=None,\n bias_attr=None,\n name=None):\n super(TreeConv, self).__init__(name_scope)\n self._name = name\n self._output_size = output_size\n self._act = act\n self._max_depth = max_depth\n self._num_filters = num_filters\n self._bias_attr = bias_attr\n self._param_attr = param_attr\n\n def _build_once(self, nodes_vector, edge_set):\n assert isinstance(nodes_vector, Variable)\n assert isinstance(edge_set, Variable)\n self._dtype = self._helper.input_dtype(nodes_vector)\n\n feature_size = nodes_vector.shape[2]\n w_shape = [feature_size, 3, self._output_size, self._num_filters]\n if self._bias_attr:\n self._bias_param = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n self.W = self.create_parameter(\n attr=self._param_attr,\n shape=w_shape,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, nodes_vector, edge_set):\n\n if self._name:\n out = self.create_variable(\n name=self._name, dtype=self._dtype, persistable=False)\n else:\n\n out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type='tree_conv',\n inputs={\n 'NodesVector': nodes_vector,\n 'EdgeSet': edge_set,\n 'Filter': self.W\n },\n outputs={'Out': out, },\n attrs={'max_depth': self._max_depth})\n if self._bias_attr:\n pre_activation = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [out],\n 'Y': [self._bias_param]},\n outputs={'Out': [pre_activation]},\n attrs={'axis': 1})\n else:\n pre_activation = out\n return self._helper.append_activation(pre_activation, act=self._act)\n"
] |
[
[
"numpy.array"
],
[
"numpy.array",
"numpy.prod"
]
] |
scicubator/countGauss
|
[
"7e744c3de1de342d72ef10da76c0c3b4605d70d4"
] |
[
"csnmf/tests/test_snmf.py"
] |
[
"from __future__ import absolute_import, print_function\nimport numpy as np\nimport dask.array as da\nimport timeit\nimport itertools\nimport matplotlib.pyplot as plt\nimport pickle\nimport csnmf.snmf\n\n\ndef run(m, n, q, ncols, blockshape):\n \"\"\"\n Create a low-rank matrix. We then compute its separable NMF\n decomposition, using the SPA and XRAY algorithms, in-core and\n out-of-core computations, and the QR and compression variants.\n :param m: number of rows of the input matrix\n :type m: int\n :param n: number of columns of the input matrix\n :type n: int\n :param q: rank of the input matrix\n :type q: int\n :param ncols: number of columns to use in the decomposition\n :type ncols: int\n :param blockshape: shape of the block to use for out-of-core\n computations.\n :type blockshape: tuple of int\n :return: a list of dictionaries where each dictionary contains the\n following keys:\n - 'alg': specifying the algorithms for finding the extreme columns\n - 'comp': boolean specifying if compression was done; otherwise the\n QR decomposition is used\n - 'data_type': basetring specifying if the computations were done\n in-core or out-of-core\n - 'cols': the delected columns\n - 'error': the relative error of the decomposition residual\n - 'time': execution time\n \"\"\"\n x = np.fabs(np.random.standard_normal(size=(m, q)))\n y = np.fabs(np.random.standard_normal(size=(q, n)))\n mat = x.dot(y)\n\n res_list = []\n\n algorithms = ['SPA', 'XRAY']\n compress = [False, True]\n data_list = [mat, da.from_array(mat, chunks=blockshape)]\n\n base_str = 'algorithm: {alg:4s}; compressed: {comp:d}; ' \\\n 'type: {data_type:11s}; error {error:.4f}; time {time:.2f}'\n\n for alg, comp, data in itertools.product(algorithms, compress, data_list):\n # import ipdb\n # ipdb.set_trace()\n t = timeit.default_timer()\n cols, _, error = csnmf.snmf.compute(data, ncols, alg, compress=comp)\n t = timeit.default_timer() - t\n\n if isinstance(data, np.ndarray):\n dtype = 'in-core'\n elif isinstance(data, da.Array):\n dtype = 'out-of-core'\n\n res_dict = {'alg': alg, 'comp': comp, 'data_type': dtype,\n 'cols': cols, 'error': error, 'time': t}\n\n print(base_str.format(**res_dict))\n res_list.append(res_dict)\n\n return res_list\n\n\ndef test_rank(m, n, only_draw=False):\n \"\"\"\n Test snmf as the matrix rank changes\n :param m: number of rows\n :param n: number of columns\n :param only_draw: do not run test, only read data from file and\n plot it\n \"\"\"\n m = int(m)\n n = int(n)\n q_max = n\n blockshape = (max(m/10, int(1e4)), n)\n\n test_name = 'test_snmf_rank_{0:.0e}_{1:.0e}'.format(m, n)\n\n q_list = range(q_max/10, q_max+1, q_max/10)\n shape = (len(q_list), 1)\n\n if not only_draw:\n time_vecs = {}\n\n for i, q in enumerate(q_list):\n res_list = run(m, n, q, q, blockshape)\n for res in res_list:\n key = (res['alg'], res['data_type'])\n if key not in time_vecs:\n time_vecs[key] = {}\n time_vecs[key][True] = np.zeros(shape)\n time_vecs[key][False] = np.zeros(shape)\n time_vecs[key][res['comp']][i] = res['time']\n\n with open(test_name, 'w') as f:\n pickle.dump(time_vecs, f)\n\n with open(test_name, 'r') as f:\n time_vecs = pickle.load(f)\n\n colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3']\n\n plt.figure(figsize=(10, 5))\n ax1 = plt.axes()\n ax1.hold(True)\n k = 0\n for (alg, dtype) in time_vecs.keys():\n for comp in time_vecs[(alg, dtype)]:\n if len(alg) < 4:\n label = '{0:4s}'.format(alg.upper()) + ' - '\n else:\n label = '{0:4s}'.format(alg.upper()) + ' - '\n if comp:\n label += '{0:5s} - '.format('comp.')\n linestyle = '-'\n else:\n label += '{0:5s} - '.format('QR')\n linestyle = '--'\n label += dtype\n\n ax1.plot(q_list, time_vecs[(alg, dtype)][comp], label=label,\n linestyle=linestyle, linewidth=2, marker='o',\n markeredgecolor='none', color=colors[k])\n k += 1\n\n ax1.hold(False)\n\n ax1.set_xticks(q_list)\n ax1.set_xticklabels(q_list)\n ax1.set_xlabel('Rank of the input matrix')\n ax1.set_ylabel('Time (s)')\n\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0, box.width * 0.55, box.height])\n\n # Put a legend to the right of the current axis\n ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5),\n prop={'family': 'monospace'})\n\n plt.savefig(test_name + '.pdf')\n\n\ndef test_ncols(m, func, only_draw=False):\n \"\"\"\n Test snmf as the number of columns changes\n :param m: number of rows\n :param func: function determining the rank of the input matrix\n as a function of n\n :param only_draw: do not run test, only read data from file and\n plot it.\n \"\"\"\n m = int(m)\n n_max = int(1e3)\n\n test_name = 'test_snmf_ncols_{0:.0e}'.format(m)\n\n n_list = range(n_max/10, n_max+1, n_max/10)\n shape = (len(n_list), 1)\n\n if not only_draw:\n time_vecs = {}\n\n for i, n in enumerate(n_list):\n blockshape = (max(m/10, int(1e4)), n)\n q = func(n)\n res_list = run(m, n, q, q, blockshape)\n for res in res_list:\n key = (res['alg'], res['data_type'])\n if key not in time_vecs:\n time_vecs[key] = {}\n time_vecs[key][True] = np.zeros(shape)\n time_vecs[key][False] = np.zeros(shape)\n time_vecs[key][res['comp']][i] = res['time']\n\n with open(test_name, 'w') as f:\n pickle.dump(time_vecs, f)\n\n with open(test_name, 'r') as f:\n time_vecs = pickle.load(f)\n\n colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3']\n\n plt.figure(figsize=(10, 5))\n ax1 = plt.axes()\n ax1.hold(True)\n k = 0\n for (alg, dtype) in time_vecs.keys():\n for comp in time_vecs[(alg, dtype)]:\n if len(alg) < 4:\n label = '{0:4s}'.format(alg.upper()) + ' - '\n else:\n label = '{0}'.format(alg.upper()) + ' - '\n if comp:\n label += '{0:5s} - '.format('comp.')\n linestyle = '-'\n else:\n label += '{0:5s} - '.format('QR')\n linestyle = '--'\n label += dtype\n\n ax1.semilogy(n_list, time_vecs[(alg, dtype)][comp], label=label,\n linestyle=linestyle, linewidth=2, marker='o',\n markeredgecolor='none', color=colors[k])\n k += 1\n\n ax1.hold(False)\n\n ax1.set_xticks(n_list)\n ax1.set_xticklabels(n_list)\n ax1.set_xlabel('Number of columns in the input matrix')\n ax1.set_ylabel('Time (s)')\n\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0, box.width * 0.55, box.height])\n\n # Put a legend to the right of the current axis\n ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5),\n prop={'family': 'monospace'})\n\n plt.savefig(test_name + '.pdf')\n\n\nif __name__ == '__main__':\n plt.switch_backend('TkAgg') # otherwise, monospace fonts do not work in mac\n test_rank(1e6, 1e2, only_draw=False)\n test_ncols(1e5, lambda x: x/10, only_draw=False)\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.switch_backend",
"numpy.random.standard_normal",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes"
]
] |
zizai/pytorch-transformers
|
[
"d7a4c3252ed5e630b7fb6e4b4616daddfe574fc5"
] |
[
"pytorch_transformers/modeling_xlm.py"
] |
[
"# coding=utf-8\n# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch XLM model.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nimport math\nimport sys\nfrom io import open\n\nimport itertools\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .modeling_utils import (PretrainedConfig, PreTrainedModel, add_start_docstrings,\n prune_linear_layer, SequenceSummary, SQuADHead)\n\nlogger = logging.getLogger(__name__)\n\nXLM_PRETRAINED_MODEL_ARCHIVE_MAP = {\n 'xlm-mlm-en-2048': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-pytorch_model.bin\",\n 'xlm-mlm-ende-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-pytorch_model.bin\",\n 'xlm-mlm-enfr-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-pytorch_model.bin\",\n 'xlm-mlm-enro-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-pytorch_model.bin\",\n 'xlm-mlm-tlm-xnli15-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-pytorch_model.bin\",\n 'xlm-mlm-xnli15-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-pytorch_model.bin\",\n 'xlm-clm-enfr-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-pytorch_model.bin\",\n 'xlm-clm-ende-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-pytorch_model.bin\",\n 'xlm-mlm-17-1280': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-pytorch_model.json\",\n 'xlm-mlm-100-1280': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-pytorch_model.json\",\n}\nXLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n 'xlm-mlm-en-2048': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-config.json\",\n 'xlm-mlm-ende-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-config.json\",\n 'xlm-mlm-enfr-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-config.json\",\n 'xlm-mlm-enro-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-config.json\",\n 'xlm-mlm-tlm-xnli15-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-config.json\",\n 'xlm-mlm-xnli15-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-config.json\",\n 'xlm-clm-enfr-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-config.json\",\n 'xlm-clm-ende-1024': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-config.json\",\n 'xlm-mlm-17-1280': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-config.json\",\n 'xlm-mlm-100-1280': \"https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-config.json\",\n}\n\n\nclass XLMConfig(PretrainedConfig):\n \"\"\"Configuration class to store the configuration of a `XLMModel`.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XLMModel`.\n d_model: Size of the encoder layers and the pooler layer.\n n_layer: Number of hidden layers in the Transformer encoder.\n n_head: Number of attention heads for each attention layer in\n the Transformer encoder.\n d_inner: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n ff_activation: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n untie_r: untie relative position biases\n attn_type: 'bi' for XLM, 'uni' for Transformer-XL\n\n dropout: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n dropatt: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n layer_norm_eps: The epsilon used by LayerNorm.\n\n dropout: float, dropout rate.\n dropatt: float, dropout rate on attention probabilities.\n init: str, the initialization scheme, either \"normal\" or \"uniform\".\n init_range: float, initialize the parameters with a uniform distribution\n in [-init_range, init_range]. Only effective when init=\"uniform\".\n init_std: float, initialize the parameters with a normal distribution\n with mean 0 and stddev init_std. Only effective when init=\"normal\".\n mem_len: int, the number of tokens to cache.\n reuse_len: int, the number of tokens in the currect batch to be cached\n and reused in the future.\n bi_data: bool, whether to use bidirectional input pipeline.\n Usually set to True during pretraining and False during finetuning.\n clamp_len: int, clamp all relative distances larger than clamp_len.\n -1 means no clamping.\n same_length: bool, whether to use the same attention length for each token.\n \"\"\"\n pretrained_config_archive_map = XLM_PRETRAINED_CONFIG_ARCHIVE_MAP\n\n def __init__(self,\n vocab_size_or_config_json_file=30145,\n emb_dim=2048,\n n_layers=12,\n n_heads=16,\n dropout=0.1,\n attention_dropout=0.1,\n gelu_activation=True,\n sinusoidal_embeddings=False,\n causal=False,\n asm=False,\n n_langs=1,\n use_lang_emb=True,\n max_position_embeddings=512,\n embed_init_std=2048 ** -0.5,\n layer_norm_eps=1e-12,\n init_std=0.02,\n bos_index=0,\n eos_index=1,\n pad_index=2,\n unk_index=3,\n mask_index=5,\n is_encoder=True,\n\n finetuning_task=None,\n num_labels=2,\n summary_type='first',\n summary_use_proj=True,\n summary_activation=None,\n summary_proj_to_labels=True,\n summary_first_dropout=0.1,\n start_n_top=5,\n end_n_top=5,\n **kwargs):\n \"\"\"Constructs XLMConfig.\n \"\"\"\n super(XLMConfig, self).__init__(**kwargs)\n\n if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2\n and isinstance(vocab_size_or_config_json_file, unicode)):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.n_words = vocab_size_or_config_json_file\n self.emb_dim = emb_dim\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.gelu_activation = gelu_activation\n self.sinusoidal_embeddings = sinusoidal_embeddings\n self.causal = causal\n self.asm = asm\n self.n_langs = n_langs\n self.use_lang_emb = use_lang_emb\n self.layer_norm_eps = layer_norm_eps\n self.bos_index = bos_index\n self.eos_index = eos_index\n self.pad_index = pad_index\n self.unk_index = unk_index\n self.mask_index = mask_index\n self.is_encoder = is_encoder\n self.max_position_embeddings = max_position_embeddings\n self.embed_init_std = embed_init_std\n self.init_std = init_std\n self.finetuning_task = finetuning_task\n self.num_labels = num_labels\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_proj_to_labels = summary_proj_to_labels\n self.summary_first_dropout = summary_first_dropout\n self.start_n_top = start_n_top\n self.end_n_top = end_n_top\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \" or the path to a pretrained model config file (str)\")\n\n @property\n def vocab_size(self):\n return self.n_words\n\n @vocab_size.setter\n def vocab_size(self, value):\n self.n_words = value\n\n @property\n def hidden_size(self):\n return self.emb_dim\n\n @property\n def num_attention_heads(self):\n return self.n_heads\n\n @property\n def num_hidden_layers(self):\n return self.n_layers\n\n\ndef create_sinusoidal_embeddings(n_pos, dim, out):\n position_enc = np.array([\n [pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]\n for pos in range(n_pos)\n ])\n out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))\n out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))\n out.detach_()\n out.requires_grad = False\n\n\ndef gelu(x):\n \"\"\"\n GELU activation\n https://arxiv.org/abs/1606.08415\n https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14\n https://github.com/huggingface/pytorch-transformers/blob/master/modeling.py\n \"\"\"\n # return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef get_masks(slen, lengths, causal, padding_mask=None):\n \"\"\"\n Generate hidden states mask, and optionally an attention mask.\n \"\"\"\n bs = lengths.size(0)\n if padding_mask is not None:\n mask = padding_mask\n else:\n assert lengths.max().item() <= slen\n alen = torch.arange(slen, dtype=torch.long, device=lengths.device)\n mask = alen < lengths[:, None]\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]\n else:\n attn_mask = mask\n\n # sanity check\n assert mask.size() == (bs, slen)\n assert causal is False or attn_mask.size() == (bs, slen, slen)\n\n return mask, attn_mask\n\n\nclass MultiHeadAttention(nn.Module):\n\n NEW_ID = itertools.count()\n\n def __init__(self, n_heads, dim, config):\n super(MultiHeadAttention, self).__init__()\n self.layer_id = next(MultiHeadAttention.NEW_ID)\n self.output_attentions = config.output_attentions\n self.dim = dim\n self.n_heads = n_heads\n self.dropout = config.attention_dropout\n assert self.dim % self.n_heads == 0\n\n self.q_lin = nn.Linear(dim, dim)\n self.k_lin = nn.Linear(dim, dim)\n self.v_lin = nn.Linear(dim, dim)\n self.out_lin = nn.Linear(dim, dim)\n\n def prune_heads(self, heads):\n attention_head_size = self.dim // self.n_heads\n if len(heads) == 0:\n return\n mask = torch.ones(self.n_heads, attention_head_size)\n for head in heads:\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n # Prune linear layers\n self.q_lin = prune_linear_layer(self.q_lin, index)\n self.k_lin = prune_linear_layer(self.k_lin, index)\n self.v_lin = prune_linear_layer(self.v_lin, index)\n self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.dim = attention_head_size * self.n_heads\n\n def forward(self, input, mask, kv=None, cache=None, head_mask=None):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = input.size()\n if kv is None:\n klen = qlen if cache is None else cache['slen'] + qlen\n else:\n klen = kv.size(1)\n # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)\n n_heads = self.n_heads\n dim_per_head = self.dim // n_heads\n mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)\n\n q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n if kv is None:\n k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n cache[self.layer_id] = (k, v)\n\n q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)\n scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)\n mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)\n scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)\n\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n outputs = (self.out_lin(context),)\n if self.output_attentions:\n outputs = outputs + (weights,)\n return outputs\n\n\nclass TransformerFFN(nn.Module):\n\n def __init__(self, in_dim, dim_hidden, out_dim, config):\n super(TransformerFFN, self).__init__()\n self.dropout = config.dropout\n self.lin1 = nn.Linear(in_dim, dim_hidden)\n self.lin2 = nn.Linear(dim_hidden, out_dim)\n self.act = gelu if config.gelu_activation else F.relu\n\n def forward(self, input):\n x = self.lin1(input)\n x = self.act(x)\n x = self.lin2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n return x\n\n\nclass XLMPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n config_class = XLMConfig\n pretrained_model_archive_map = XLM_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = None\n base_model_prefix = \"transformer\"\n\n def __init__(self, *inputs, **kwargs):\n super(XLMPreTrainedModel, self).__init__(*inputs, **kwargs)\n\n def init_weights(self, module):\n \"\"\" Initialize the weights. \"\"\"\n if isinstance(module, nn.Embedding):\n if self.config is not None and self.config.embed_init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)\n if isinstance(module, nn.Linear):\n if self.config is not None and self.config.init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.init_std)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, 0.)\n if isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nXLM_START_DOCSTRING = r\"\"\" The XLM model was proposed in\n `Cross-lingual Language Model Pretraining`_\n by Guillaume Lample*, Alexis Conneau*. It's a transformer pre-trained using one of the following objectives:\n\n - a causal language modeling (CLM) objective (next token prediction),\n - a masked language modeling (MLM) objective (Bert-like), or\n - a Translation Language Modeling (TLM) object (extension of Bert's MLM to multiple language inputs)\n\n Original code can be found `here`_.\n\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n .. _`Cross-lingual Language Model Pretraining`:\n https://arxiv.org/abs/1901.07291\n\n .. _`torch.nn.Module`:\n https://pytorch.org/docs/stable/nn.html#module\n\n .. _`here`:\n https://github.com/facebookresearch/XLM\n\n Parameters:\n config (:class:`~pytorch_transformers.XLMConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nXLM_INPUTS_DOCSTRING = r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n\n XLM is a model with absolute position embeddings so it's usually advised to pad the inputs on\n the right rather than the left.\n\n Indices can be obtained using :class:`pytorch_transformers.XLMTokenizer`.\n See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and\n :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n A parallel sequence of tokens (can be used to indicate various portions of the inputs).\n The embeddings from these tokens will be summed with the respective token embeddings.\n Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).\n **langs**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n A parallel sequence of tokens to be used to indicate the language of each token in the input.\n Indices are languages ids which can be obtained from the language names by using two conversion mappings\n provided in the configuration of the model (only provided for multilingual models).\n More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and\n the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **lengths**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Length of each sentence that can be used to avoid performing attention on padding token indices.\n You can also use `attention_mask` for the same result (see above), kept here for compatbility.\n Indices selected in ``[0, ..., input_ids.size(-1)]``:\n **cache**:\n dictionary with ``torch.FloatTensor`` that contains pre-computed\n hidden-states (key and values in the attention blocks) as computed by the model\n (see `cache` output below). Can be used to speed up sequential decoding.\n The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n\"\"\"\n\n@add_start_docstrings(\"The bare XLM Model transformer outputing raw hidden-states without any specific head on top.\",\n XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)\nclass XLMModel(XLMPreTrainedModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the last layer of the model.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMModel.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n ATTRIBUTES = ['encoder', 'eos_index', 'pad_index', # 'with_output', \n 'n_langs', 'use_lang_emb', 'n_words', 'dim', 'n_layers', 'n_heads', \n 'hidden_dim', 'dropout', 'attention_dropout', 'asm',\n 'asm_cutoffs', 'asm_div_value']\n\n def __init__(self, config): #, dico, is_encoder, with_output):\n super(XLMModel, self).__init__(config)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n\n # encoder / decoder, output layer\n self.is_encoder = config.is_encoder\n self.is_decoder = not config.is_encoder\n if self.is_decoder:\n raise NotImplementedError(\"Currently XLM can only be used as an encoder\")\n # self.with_output = with_output\n self.causal = config.causal\n\n # dictionary / languages\n self.n_langs = config.n_langs\n self.use_lang_emb = config.use_lang_emb\n self.n_words = config.n_words\n self.eos_index = config.eos_index\n self.pad_index = config.pad_index\n # self.dico = dico\n # self.id2lang = config.id2lang\n # self.lang2id = config.lang2id\n # assert len(self.dico) == self.n_words\n # assert len(self.id2lang) == len(self.lang2id) == self.n_langs\n\n # model parameters\n self.dim = config.emb_dim # 512 by default\n self.hidden_dim = self.dim * 4 # 2048 by default\n self.n_heads = config.n_heads # 8 by default\n self.n_layers = config.n_layers\n self.dropout = config.dropout\n self.attention_dropout = config.attention_dropout\n assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads'\n\n # embeddings\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)\n if config.sinusoidal_embeddings:\n create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)\n if config.n_langs > 1 and config.use_lang_emb:\n self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)\n self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)\n self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)\n\n # transformer layers\n self.attentions = nn.ModuleList()\n self.layer_norm1 = nn.ModuleList()\n self.ffns = nn.ModuleList()\n self.layer_norm2 = nn.ModuleList()\n # if self.is_decoder:\n # self.layer_norm15 = nn.ModuleList()\n # self.encoder_attn = nn.ModuleList()\n\n for _ in range(self.n_layers):\n self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))\n self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # if self.is_decoder:\n # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))\n self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))\n self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n\n self.apply(self.init_weights)\n\n def _resize_token_embeddings(self, new_num_tokens):\n self.embeddings = self._get_resized_embeddings(self.embeddings, new_num_tokens)\n return self.embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.attentions[layer].prune_heads(heads)\n\n def forward(self, input_ids, lengths=None, position_ids=None, langs=None,\n token_type_ids=None, attention_mask=None, cache=None, head_mask=None): # src_enc=None, src_len=None, \n if lengths is None:\n lengths = (input_ids != self.pad_index).sum(dim=1).long()\n # mask = input_ids != self.pad_index\n\n # check inputs\n bs, slen = input_ids.size()\n assert lengths.size(0) == bs\n assert lengths.max().item() <= slen\n # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0\n # assert (src_enc is None) == (src_len is None)\n # if src_enc is not None:\n # assert self.is_decoder\n # assert src_enc.size(0) == bs\n\n # generate masks\n mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)\n # if self.is_decoder and src_enc is not None:\n # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]\n\n # position_ids\n if position_ids is None:\n position_ids = input_ids.new((slen,)).long()\n position_ids = torch.arange(slen, out=position_ids).unsqueeze(0)\n else:\n assert position_ids.size() == (bs, slen) # (slen, bs)\n # position_ids = position_ids.transpose(0, 1)\n\n # langs\n if langs is not None:\n assert langs.size() == (bs, slen) # (slen, bs)\n # langs = langs.transpose(0, 1)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.n_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.n_layers\n\n # do not recompute cached elements\n if cache is not None:\n _slen = slen - cache['slen']\n input_ids = input_ids[:, -_slen:]\n position_ids = position_ids[:, -_slen:]\n if langs is not None:\n langs = langs[:, -_slen:]\n mask = mask[:, -_slen:]\n attn_mask = attn_mask[:, -_slen:]\n\n # embeddings\n tensor = self.embeddings(input_ids)\n tensor = tensor + self.position_embeddings(position_ids).expand_as(tensor)\n if langs is not None and self.use_lang_emb:\n tensor = tensor + self.lang_embeddings(langs)\n if token_type_ids is not None:\n tensor = tensor + self.embeddings(token_type_ids)\n tensor = self.layer_norm_emb(tensor)\n tensor = F.dropout(tensor, p=self.dropout, training=self.training)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n\n # transformer layers\n hidden_states = ()\n attentions = ()\n for i in range(self.n_layers):\n if self.output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # self attention\n attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i])\n attn = attn_outputs[0]\n if self.output_attentions:\n attentions = attentions + (attn_outputs[1],)\n attn = F.dropout(attn, p=self.dropout, training=self.training)\n tensor = tensor + attn\n tensor = self.layer_norm1[i](tensor)\n\n # encoder attention (for decoder only)\n # if self.is_decoder and src_enc is not None:\n # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)\n # attn = F.dropout(attn, p=self.dropout, training=self.training)\n # tensor = tensor + attn\n # tensor = self.layer_norm15[i](tensor)\n\n # FFN\n tensor = tensor + self.ffns[i](tensor)\n tensor = self.layer_norm2[i](tensor)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n\n # Add last hidden state\n if self.output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # update cache length\n if cache is not None:\n cache['slen'] += tensor.size(1)\n\n # move back sequence length to dimension 0\n # tensor = tensor.transpose(0, 1)\n\n outputs = (tensor,)\n if self.output_hidden_states:\n outputs = outputs + (hidden_states,)\n if self.output_attentions:\n outputs = outputs + (attentions,)\n return outputs # outputs, (hidden_states), (attentions)\n\n\nclass XLMPredLayer(nn.Module):\n \"\"\"\n Prediction layer (cross_entropy or adaptive_softmax).\n \"\"\"\n def __init__(self, config):\n super(XLMPredLayer, self).__init__()\n self.asm = config.asm\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n dim = config.emb_dim\n\n if config.asm is False:\n self.proj = nn.Linear(dim, config.n_words, bias=True)\n else:\n self.proj = nn.AdaptiveLogSoftmaxWithLoss(\n in_features=dim,\n n_classes=config.n_words,\n cutoffs=config.asm_cutoffs,\n div_value=config.asm_div_value,\n head_bias=True, # default is False\n )\n\n def forward(self, x, y=None):\n \"\"\" Compute the loss, and optionally the scores.\n \"\"\"\n outputs = ()\n if self.asm is False:\n scores = self.proj(x).view(-1, self.n_words)\n outputs = (scores,) + outputs\n if y is not None:\n loss = F.cross_entropy(scores, y, reduction='elementwise_mean')\n outputs = (loss,) + outputs\n else:\n scores = self.proj.log_prob(x)\n outputs = (scores,) + outputs\n if y is not None:\n _, loss = self.proj(x, y)\n outputs = (loss,) + outputs\n\n return outputs\n\n\n@add_start_docstrings(\"\"\"The XLM Model transformer with a language modeling head on top\n (linear layer with weights tied to the input embeddings). \"\"\",\n XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)\nclass XLMWithLMHeadModel(XLMPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for language modeling.\n Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``\n Indices are selected in ``[-1, 0, ..., config.vocab_size]``\n All labels set to ``-1`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Language modeling loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n def __init__(self, config):\n super(XLMWithLMHeadModel, self).__init__(config)\n self.transformer = XLMModel(config)\n self.pred_layer = XLMPredLayer(config)\n\n self.apply(self.init_weights)\n self.tie_weights()\n\n def tie_weights(self):\n \"\"\" Make sure we are sharing the embeddings\n \"\"\"\n self._tie_or_clone_weights(self.pred_layer.proj, self.transformer.embeddings)\n\n def forward(self, input_ids, lengths=None, position_ids=None, langs=None, token_type_ids=None,\n attention_mask=None, cache=None, labels=None, head_mask=None):\n transformer_outputs = self.transformer(input_ids, lengths=lengths, position_ids=position_ids,\n token_type_ids=token_type_ids, langs=langs,\n attention_mask=attention_mask, cache=cache, head_mask=head_mask)\n\n output = transformer_outputs[0]\n outputs = self.pred_layer(output, labels)\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n\n\n@add_start_docstrings(\"\"\"XLM Model with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)\nclass XLMForSequenceClassification(XLMPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.\n **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, logits = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(XLMForSequenceClassification, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.transformer = XLMModel(config)\n self.sequence_summary = SequenceSummary(config)\n\n self.apply(self.init_weights)\n\n def forward(self, input_ids, lengths=None, position_ids=None, langs=None, token_type_ids=None,\n attention_mask=None, cache=None, labels=None, head_mask=None):\n transformer_outputs = self.transformer(input_ids, lengths=lengths, position_ids=position_ids,\n token_type_ids=token_type_ids, langs=langs,\n attention_mask=attention_mask, cache=cache, head_mask=head_mask)\n\n output = transformer_outputs[0]\n logits = self.sequence_summary(output)\n\n outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs\n\n\n@add_start_docstrings(\"\"\"XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING)\nclass XLMForQuestionAnswering(XLMPreTrainedModel):\n r\"\"\"\n **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels whether a question has an answer or no answer (SQuAD 2.0)\n **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the classification token to use as input for computing plausibility of the answer.\n **p_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...) \n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-start scores (before SoftMax).\n **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-end scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n start_positions = torch.tensor([1])\n end_positions = torch.tensor([3])\n outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)\n loss, start_scores, end_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(XLMForQuestionAnswering, self).__init__(config)\n\n self.transformer = XLMModel(config)\n self.qa_outputs = SQuADHead(config)\n\n self.apply(self.init_weights)\n\n def forward(self, input_ids, lengths=None, position_ids=None, langs=None, token_type_ids=None,\n attention_mask=None, cache=None, start_positions=None, end_positions=None,\n cls_index=None, is_impossible=None, p_mask=None, head_mask=None):\n transformer_outputs = self.transformer(input_ids, lengths=lengths, position_ids=position_ids,\n token_type_ids=token_type_ids, langs=langs,\n attention_mask=attention_mask, cache=cache, head_mask=head_mask)\n\n output = transformer_outputs[0]\n\n outputs = self.qa_outputs(output, start_positions=start_positions, end_positions=end_positions,\n cls_index=cls_index, is_impossible=is_impossible, p_mask=p_mask)\n\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n"
] |
[
[
"torch.nn.Linear",
"numpy.sin",
"torch.nn.LayerNorm",
"torch.nn.AdaptiveLogSoftmaxWithLoss",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.ModuleList",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.functional.dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.nn.init.normal_",
"torch.nn.functional.cross_entropy",
"numpy.power",
"numpy.cos",
"torch.matmul",
"torch.nn.Embedding"
]
] |
imkeines/envirocar-py
|
[
"257e640c910cad710aac2648cad529d69a11944a"
] |
[
"envirocar/EDA/inspection.py"
] |
[
"import pandas as pd\nimport seaborn as sns\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport geopandas as gpd\nfrom scipy import stats\n\n# class Inspection():\n# def __init__(self):\n# print(\"Initializing class 'Inspection'\") \n\n\ndef skewness_num_variables(df): # show_skewness_num_variables\n numericFeaturesIndex = df.dtypes[df.dtypes=='float64'].index\n skewedFeatures=df[numericFeaturesIndex].skew().sort_values(ascending=False)\n skewness=pd.DataFrame({'Skew':skewedFeatures})\n return skewness\n\n\ndef missing_values_per_variable(df, percent=100, dropCol=False): # sum_missing_values\n listCol =[]\n rowCount = df.shape[0]\n for column in df:\n sumColumn = df[column].isna().sum()\n percentNA = sumColumn/rowCount*100\n if percentNA <= percent:\n listCol.append({'column':column ,'missing_values': sumColumn, 'missing_values(%)': percentNA})\n else: \n if dropCol == True:\n print('Column dropped: ', column, ', missing values(%): ', percentNA )\n df.drop([column], axis=1, inplace=True)\n listCol = pd.DataFrame(listCol).sort_values(by='missing_values', ascending=False).reset_index(drop=True)\n return listCol\n\n\ndef missing_values_per_track(df):\n columnList=df.select_dtypes(['float64']).columns.tolist()\n df_count=df.groupby('track.id').apply(lambda x: x.isna().sum()) \n df_prop=df.groupby('track.id').apply(lambda x: x.isna().sum()/len(x)*100)\n return df_count, df_prop\n\n\ndef get_classified_correlations(df, method):\n allCoeffs=[]\n correlationsMatrixAll = df.corr(method=method)\n for column in correlationsMatrixAll:\n for i in correlationsMatrixAll[column].index:\n df = correlationsMatrixAll.at[i, column]\n if df < 1.0:\n allCoeffs.append({'column':column, 'index':i, 'coefficient':df })\n\n correlationsMatrix = correlationsMatrixAll.where(np.tril(np.ones(correlationsMatrixAll.shape)).astype(np.bool))\n\n very_strong=[]\n strong=[]\n moderate=[]\n weak=[] \n for column in correlationsMatrix:\n for i in correlationsMatrix[column].index:\n df = correlationsMatrix.at[i, column]\n if df >= 0.8 and df < 1.0 or df <= -0.8 and df > -1.0:\n very_strong.append({'column':column, 'index':i, 'coefficient':df })# \n if df >= 0.6 and df < 0.8 or df <= -0.6 and df > -0.8:\n strong.append({'column':column, 'index':i, 'coefficient':df })\n if df >= 0.4 and df < 0.6 or df <= -0.4 and df > -0.6:\n moderate.append({'column':column, 'index':i, 'coefficient':df })\n if df < 0.4 and df > -0.4:\n weak.append({'column':column, 'index':i, 'coefficient':df })\n\n very_strong = pd.DataFrame(very_strong).sort_values(by='coefficient', ascending=False).reset_index(drop=True)\n strong = pd.DataFrame(strong).sort_values(by='coefficient', ascending=False).reset_index(drop=True)\n moderate = pd.DataFrame(moderate).sort_values(by='coefficient', ascending=False).reset_index(drop=True)\n weak=pd.DataFrame(weak).sort_values(by='coefficient', ascending=False).reset_index(drop=True)\n allCoeffs= pd.DataFrame(allCoeffs).sort_values(by='coefficient', ascending=False).reset_index(drop=True)\n\n return allCoeffs, very_strong, strong, moderate, weak \n\n\ndef get_correlation(df, method, variable1, variable2):\n allCoeffs=[]\n correlationsMatrixAll = df.corr(method=method)\n for column in correlationsMatrixAll:\n for i in correlationsMatrixAll[column].index:\n df = correlationsMatrixAll.at[i, column]\n if df < 1.0:\n allCoeffs.append({'v1':column, 'v2':i, 'coefficient':df })\n\n correlationsMatrix = correlationsMatrixAll.where(np.tril(np.ones(correlationsMatrixAll.shape)).astype(np.bool))\n allCoeffs= pd.DataFrame(allCoeffs).sort_values(by='coefficient', ascending=False).reset_index(drop=True)\n showCorr= allCoeffs.loc[(allCoeffs['v1'] == variable1) & (allCoeffs['v2'] == variable2)]\n return showCorr\n \n \ndef correlation_heatmap_triangle(df, method, figsize=(20, 16)):\n df = df.select_dtypes(['float64'])\n coefficient = df.corr(method=method)\n coefficient = coefficient.where(np.tril(np.ones(coefficient.shape)).astype(np.bool))\n plt.figure(figsize=figsize)\n sns.heatmap(coefficient, annot = True, vmin=-1, vmax=1.0, cmap=\"RdBu_r\")\n\n\ndef get_single_track(df, track_id):\n grouped = df.groupby('track.id')\n df = grouped.get_group(track_id).copy()\n return df\n\n\ndef show_dublicated_tracks(df): \n dublicates = df[df[['geometry', 'Engine Load.value', 'Calculated MAF.value',\n 'Speed.value', 'CO2.value', 'Intake Pressure.value', 'Rpm.value',\n 'Intake Temperature.value', 'Consumption (GPS-based).value',\n 'GPS Altitude.value', 'Throttle Position.value', 'GPS Bearing.value',\n 'Consumption.value', 'GPS Accuracy.value',\n 'CO2 Emission (GPS-based).value', 'GPS Speed.value', \n 'track.length', 'track.begin', 'track.end', 'sensor.type',\n 'sensor.engineDisplacement', 'sensor.model', 'sensor.id',\n 'sensor.fuelType', 'sensor.constructionYear', 'sensor.manufacturer']].\n duplicated(keep=False)==True]['track.id'].unique().tolist()\n\n newdf= df.copy().loc[df['track.id'].isin(dublicates)]\n ls= newdf['track.id'].unique().tolist()\n print('Dublicated tracks:', ls)\n return newdf\n\n\n\ndef count_tracks(df):\n print(len(df['track.id'].unique().tolist()))\n\ndef show_units(df):\n '''\n Aim: \n get an overview of the variables and corresponding units\n \n Keyword Arguments: \n df {Geodataframe} -- point input\n \n Output: Matrix-like overview on variables an the relevant unit\n '''\n units = df.filter(like='.unit').columns\n for unit in units:\n if unit in df:\n print(df[unit].name, df[unit].iloc[0])\n return units\n \ndef get_units(df):\n '''\n Aim: \n get an overview of the variables and corresponding units\n\n Keyword Arguments: \n df {Geodataframe} -- point input\n\n Output: Matrix-like overview on variables an the relevant unit\n '''\n units = df.filter(like='.unit').columns\n unitList=[]\n for unit in units:\n if unit in df:\n unitList.append(unit)\n #print(df[unit].name, df[unit].iloc[0])\n return(unitList)\n\n\ndef get_categories(df):\n for column in df:\n print(column, df[column].unique())\n\n\ndef get_sensor_columns(df):\n sensor = df.filter(like='sensor.', axis=1).columns.copy()\n sensor = sensor.tolist()\n df = df[sensor]\n return df, sensor\n\n\ndef get_columns(df, name=''):\n columns = df.filter(like=name, axis=1).columns.copy()\n columns = columns.tolist()\n df = df[columns]\n return columns, df\n\n\ndef plot_tracks(points_df, column):\n \"\"\" \n Aim: \n Visualize phenomena of tracks as timeserie in Linechart, in which each line represents one single track\n\n Keyword Arguments: \n df {Geodataframe} -- point input\n\n Returns:\n Chart is shown \n\n \"\"\"\n # Add datetime to data frame\n points_df['datetime'] = pd.to_datetime(points_df['time'])\n points_df['index']=points_df.index\n fig = px.line(points_df, x=\"index\", y=column, color=\"track.id\",\n line_group=\"track.id\", hover_name=\"datetime\")\n fig.update_traces(mode='lines+markers')\n fig.show()\n\n\n\ndef plot_point_values(points, value = None):\n \"\"\" This function is based on a function from the envirocar fork of the github user 'annaformaniuk'.\n\n Aim: \n show points on a map\n\n Keyword Arguments:\n points {GeoDataFrame} -- points input\n value {string} -- column value to use for colouring\n\n Returns:\n No Return\n \"\"\"\n\n points['lat'] = points['geometry'].apply(lambda coord: coord.y)\n points['lng'] = points['geometry'].apply(lambda coord: coord.x)\n\n if value is not None:\n # Visualizing points of the selected variable\n fig = px.scatter_mapbox(points, lat=\"lat\", lon=\"lng\", hover_data=[\"CO2.value\"],\n color=value,\n color_continuous_scale=px.colors.sequential.Reds,\n title=value + \" visualisation\", zoom=8,\n hover_name=\"id\")\n else:\n fig = px.scatter_mapbox(points, lat=\"lat\", lon=\"lng\", hover_data=[\"CO2.value\"],\n title= \" Spatial distribution or requested tracks\", zoom=8,\n hover_name=\"id\")\n\n\n fig.update_layout(mapbox_style=\"open-street-map\",\n margin={\"r\": 5, \"t\": 50, \"l\": 10, \"b\": 5})\n fig.show()\n \n\ndef plot_scatter(df, column1, column2, alpha=0.2):\n relation = df[['track.id',column1, column2]]\n relation.plot(kind='scatter', x = column1, y = column2, alpha=alpha )\n \n \n \ndef plot_normality_with_qqplot(point_df, column):\n '''\n Aim: \n create q-q plot to inspect normality of distribution of selected variable\n\n Keyword Arguments: \n df {Geodataframe} -- points input\n column {str} -- variable name\n\n Output: Q-Q plot\n '''\n plot = stats.probplot(point_df[column], dist=\"norm\", plot=plt, fit = False)\n plt.title(column)\n plt.show()\n \n \n\ndef plot_hist(df, column=''):\n if column !='':\n x = df[column]\n else:\n x = df\n sns.distplot(x)\n \n \ndef plot_linear_regression(variableName1, variableName2, title=''):\n sns.regplot(x=variableName1, y=variableName2).set_title(title)\n \n \ndef plot_distribution_s(points_df, column, column_gps = None):\n \"\"\" \n Aim:\n Plot of two distributions in a single figure for visually comparing the shapes of the two distributions\n \n Keyword Arguments: \n points {GeoDataFrame} -- the GeoDataFrame containing the measurements\n Column {str} -- the column name of measurement of interest,e.g. 'Speed.value'\n Column {str} -- the column name of measurement of same phenomena but measured based on GPS, e.g. 'GPS speed.value'\n \n Return:\n No Return, instead a plot is displayed\n \"\"\"\n if column_gps is not None:\n sns.kdeplot(points_df[column], shade=True)\n sns.kdeplot(points_df[column_gps], shade=True)\n else:\n sns.kdeplot(points_df[column], shade=True)\n \n\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"numpy.ones",
"matplotlib.pyplot.figure",
"scipy.stats.probplot",
"matplotlib.pyplot.show"
]
] |
Leterax/Visualization
|
[
"4f3ad3f1ebc920b1f315ae9ff0d5c44d7a7ec514"
] |
[
"visualization/TextArt/text_art.py"
] |
[
"from pathlib import Path\n\nimport moderngl\nimport moderngl_window as mglw\nimport numpy as np\n\n# noinspection PyUnresolvedReferences\nfrom generate_text import render_text_perimeter_balls\nfrom pyrr import matrix44\n\n\nclass TextArt(mglw.WindowConfig):\n \"\"\"\n Render text assembling from balls.\n Press space to pause\n Drag mouse to scroll through time\n Use `add_text` to add more text to the scene.\n \"\"\"\n\n gl_version = (3, 3)\n title = \"TextArt\"\n samples = 16\n resource_dir = (Path(__file__) / \"../resources\").absolute()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.projection = matrix44.create_orthogonal_projection(\n 0, self.wnd.size[0], 0, self.wnd.size[1], -1.0, 1.0, dtype=\"f4\"\n )\n self.texts = []\n self.is_paused = False\n\n self.add_text(\"Text\", 300, (300, 350), 5, (1, 0, 0))\n self.add_text(\"Art\", 300, (150, 75), 5, (0, 1, 0))\n\n def add_text(self, text: str, scale: int, pos: tuple, ball_size: int, color: tuple):\n target = render_text_perimeter_balls(\n text, scale=scale, pos=pos[::-1], ball_size=ball_size\n )\n n = target.shape[0]\n vertices = np.random.random_sample((n, 2))\n vertices = np.array(\n [vertices[:, 0] * self.window_size[0], vertices[:, 1] * self.window_size[1]]\n ).T\n\n prog = self.load_program(\"text_shader.glsl\")\n prog[\"time\"].value = 0\n prog[\"m_proj\"].write(self.projection.tobytes())\n\n prog[\"size\"].value = ball_size\n prog[\"color\"].value = color\n\n vbo_1 = self.ctx.buffer(vertices.astype(\"f4\").tobytes())\n vbo_2 = self.ctx.buffer(target.astype(\"f4\").tobytes())\n\n vao = self.ctx.vertex_array(\n prog, [(vbo_1, \"2f4\", \"in_position\"), (vbo_2, \"2f4\", \"target\")]\n )\n\n self.texts.append((vao, prog))\n\n def render(self, time, frame_time):\n r = g = b = 51 / 255\n self.ctx.clear(r, g, b)\n self.ctx.enable(moderngl.BLEND)\n self.ctx.blend_func = moderngl.SRC_ALPHA, moderngl.ONE_MINUS_SRC_ALPHA\n\n for vao, prg in self.texts:\n prg[\"time\"].value = min(time / 2.0, 1.0)\n vao.render(mode=moderngl.POINTS)\n\n def mouse_drag_event(self, x, y, dx, dy):\n self.timer.time = max(0, min(x / self.wnd.buffer_width, 1))\n\n def key_event(self, key, action, modifiers):\n keys = self.wnd.keys\n\n if action == keys.ACTION_PRESS:\n if key == keys.SPACE:\n self.timer.toggle_pause()\n\n\nif __name__ == \"__main__\":\n TextArt.run()\n"
] |
[
[
"numpy.array",
"numpy.random.random_sample"
]
] |
TiantianWang/VideoMatting-CRGNN
|
[
"25220077d1a7ca8ad7beed000fccc0559b4ea141"
] |
[
"models/hlmobilenetv2.py"
] |
[
"\"\"\"\nThis implementation is modified from the following repository:\nhttps://github.com/poppinace/indexnet_matting\n\n\"\"\"\n\nimport os\nimport sys\nimport math\nfrom time import time\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom hlaspp import ASPP\nfrom lib.nn import SynchronizedBatchNorm2d\nfrom hlindex import HolisticIndexBlock, DepthwiseO2OIndexBlock, DepthwiseM2OIndexBlock\nfrom hldecoder import *\nfrom hlconv import *\nfrom modelsummary import get_model_summary\n\nimport units\nimport units.ConvGRU2 as ConvGRU\n\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n from urllib.request import urlretrieve\ntry:\n from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN\nexcept ImportError:\n raise ImportError('Failed to import DCNv2 module.')\n\nmodel_urls = {\n 'mobilenetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/mobilenet_v2.pth.tar',\n}\n\n\nCORRESP_NAME = {\n # layer0\n \"features.0.0.weight\": \"layer0.0.weight\",\n \"features.0.1.weight\": \"layer0.1.weight\",\n \"features.0.1.bias\": \"layer0.1.bias\",\n \"features.0.1.running_mean\": \"layer0.1.running_mean\",\n \"features.0.1.running_var\": \"layer0.1.running_var\",\n # layer1\n \"features.1.conv.0.weight\": \"layer1.0.conv.0.weight\",\n \"features.1.conv.1.weight\": \"layer1.0.conv.1.weight\",\n \"features.1.conv.1.bias\": \"layer1.0.conv.1.bias\",\n \"features.1.conv.1.running_mean\": \"layer1.0.conv.1.running_mean\",\n \"features.1.conv.1.running_var\": \"layer1.0.conv.1.running_var\",\n \"features.1.conv.3.weight\": \"layer1.0.conv.3.weight\",\n \"features.1.conv.4.weight\": \"layer1.0.conv.4.weight\",\n \"features.1.conv.4.bias\": \"layer1.0.conv.4.bias\",\n \"features.1.conv.4.running_mean\": \"layer1.0.conv.4.running_mean\",\n \"features.1.conv.4.running_var\": \"layer1.0.conv.4.running_var\",\n # layer2\n \"features.2.conv.0.weight\": \"layer2.0.conv.0.weight\",\n \"features.2.conv.1.weight\": \"layer2.0.conv.1.weight\",\n \"features.2.conv.1.bias\": \"layer2.0.conv.1.bias\",\n \"features.2.conv.1.running_mean\": \"layer2.0.conv.1.running_mean\",\n \"features.2.conv.1.running_var\": \"layer2.0.conv.1.running_var\",\n \"features.2.conv.3.weight\": \"layer2.0.conv.3.weight\",\n \"features.2.conv.4.weight\": \"layer2.0.conv.4.weight\",\n \"features.2.conv.4.bias\": \"layer2.0.conv.4.bias\",\n \"features.2.conv.4.running_mean\": \"layer2.0.conv.4.running_mean\",\n \"features.2.conv.4.running_var\": \"layer2.0.conv.4.running_var\",\n \"features.2.conv.6.weight\": \"layer2.0.conv.6.weight\",\n \"features.2.conv.7.weight\": \"layer2.0.conv.7.weight\",\n \"features.2.conv.7.bias\": \"layer2.0.conv.7.bias\",\n \"features.2.conv.7.running_mean\": \"layer2.0.conv.7.running_mean\",\n \"features.2.conv.7.running_var\": \"layer2.0.conv.7.running_var\",\n\n \"features.3.conv.0.weight\": \"layer2.1.conv.0.weight\",\n \"features.3.conv.1.weight\": \"layer2.1.conv.1.weight\",\n \"features.3.conv.1.bias\": \"layer2.1.conv.1.bias\",\n \"features.3.conv.1.running_mean\": \"layer2.1.conv.1.running_mean\",\n \"features.3.conv.1.running_var\": \"layer2.1.conv.1.running_var\",\n \"features.3.conv.3.weight\": \"layer2.1.conv.3.weight\",\n \"features.3.conv.4.weight\": \"layer2.1.conv.4.weight\",\n \"features.3.conv.4.bias\": \"layer2.1.conv.4.bias\",\n \"features.3.conv.4.running_mean\": \"layer2.1.conv.4.running_mean\",\n \"features.3.conv.4.running_var\": \"layer2.1.conv.4.running_var\",\n \"features.3.conv.6.weight\": \"layer2.1.conv.6.weight\",\n \"features.3.conv.7.weight\": \"layer2.1.conv.7.weight\",\n \"features.3.conv.7.bias\": \"layer2.1.conv.7.bias\",\n \"features.3.conv.7.running_mean\": \"layer2.1.conv.7.running_mean\",\n \"features.3.conv.7.running_var\": \"layer2.1.conv.7.running_var\",\n # layer3\n \"features.4.conv.0.weight\": \"layer3.0.conv.0.weight\",\n \"features.4.conv.1.weight\": \"layer3.0.conv.1.weight\",\n \"features.4.conv.1.bias\": \"layer3.0.conv.1.bias\",\n \"features.4.conv.1.running_mean\": \"layer3.0.conv.1.running_mean\",\n \"features.4.conv.1.running_var\": \"layer3.0.conv.1.running_var\",\n \"features.4.conv.3.weight\": \"layer3.0.conv.3.weight\",\n \"features.4.conv.4.weight\": \"layer3.0.conv.4.weight\",\n \"features.4.conv.4.bias\": \"layer3.0.conv.4.bias\",\n \"features.4.conv.4.running_mean\": \"layer3.0.conv.4.running_mean\",\n \"features.4.conv.4.running_var\": \"layer3.0.conv.4.running_var\",\n \"features.4.conv.6.weight\": \"layer3.0.conv.6.weight\",\n \"features.4.conv.7.weight\": \"layer3.0.conv.7.weight\",\n \"features.4.conv.7.bias\": \"layer3.0.conv.7.bias\",\n \"features.4.conv.7.running_mean\": \"layer3.0.conv.7.running_mean\",\n \"features.4.conv.7.running_var\": \"layer3.0.conv.7.running_var\",\n\n \"features.5.conv.0.weight\": \"layer3.1.conv.0.weight\",\n \"features.5.conv.1.weight\": \"layer3.1.conv.1.weight\",\n \"features.5.conv.1.bias\": \"layer3.1.conv.1.bias\",\n \"features.5.conv.1.running_mean\": \"layer3.1.conv.1.running_mean\",\n \"features.5.conv.1.running_var\": \"layer3.1.conv.1.running_var\",\n \"features.5.conv.3.weight\": \"layer3.1.conv.3.weight\",\n \"features.5.conv.4.weight\": \"layer3.1.conv.4.weight\",\n \"features.5.conv.4.bias\": \"layer3.1.conv.4.bias\",\n \"features.5.conv.4.running_mean\": \"layer3.1.conv.4.running_mean\",\n \"features.5.conv.4.running_var\": \"layer3.1.conv.4.running_var\",\n \"features.5.conv.6.weight\": \"layer3.1.conv.6.weight\",\n \"features.5.conv.7.weight\": \"layer3.1.conv.7.weight\",\n \"features.5.conv.7.bias\": \"layer3.1.conv.7.bias\",\n \"features.5.conv.7.running_mean\": \"layer3.1.conv.7.running_mean\",\n \"features.5.conv.7.running_var\": \"layer3.1.conv.7.running_var\",\n\n \"features.6.conv.0.weight\": \"layer3.2.conv.0.weight\",\n \"features.6.conv.1.weight\": \"layer3.2.conv.1.weight\",\n \"features.6.conv.1.bias\": \"layer3.2.conv.1.bias\",\n \"features.6.conv.1.running_mean\": \"layer3.2.conv.1.running_mean\",\n \"features.6.conv.1.running_var\": \"layer3.2.conv.1.running_var\",\n \"features.6.conv.3.weight\": \"layer3.2.conv.3.weight\",\n \"features.6.conv.4.weight\": \"layer3.2.conv.4.weight\",\n \"features.6.conv.4.bias\": \"layer3.2.conv.4.bias\",\n \"features.6.conv.4.running_mean\": \"layer3.2.conv.4.running_mean\",\n \"features.6.conv.4.running_var\": \"layer3.2.conv.4.running_var\",\n \"features.6.conv.6.weight\": \"layer3.2.conv.6.weight\",\n \"features.6.conv.7.weight\": \"layer3.2.conv.7.weight\",\n \"features.6.conv.7.bias\": \"layer3.2.conv.7.bias\",\n \"features.6.conv.7.running_mean\": \"layer3.2.conv.7.running_mean\",\n \"features.6.conv.7.running_var\": \"layer3.2.conv.7.running_var\",\n # layer4\n \"features.7.conv.0.weight\": \"layer4.0.conv.0.weight\",\n \"features.7.conv.1.weight\": \"layer4.0.conv.1.weight\",\n \"features.7.conv.1.bias\": \"layer4.0.conv.1.bias\",\n \"features.7.conv.1.running_mean\": \"layer4.0.conv.1.running_mean\",\n \"features.7.conv.1.running_var\": \"layer4.0.conv.1.running_var\",\n \"features.7.conv.3.weight\": \"layer4.0.conv.3.weight\",\n \"features.7.conv.4.weight\": \"layer4.0.conv.4.weight\",\n \"features.7.conv.4.bias\": \"layer4.0.conv.4.bias\",\n \"features.7.conv.4.running_mean\": \"layer4.0.conv.4.running_mean\",\n \"features.7.conv.4.running_var\": \"layer4.0.conv.4.running_var\",\n \"features.7.conv.6.weight\": \"layer4.0.conv.6.weight\",\n \"features.7.conv.7.weight\": \"layer4.0.conv.7.weight\",\n \"features.7.conv.7.bias\": \"layer4.0.conv.7.bias\",\n \"features.7.conv.7.running_mean\": \"layer4.0.conv.7.running_mean\",\n \"features.7.conv.7.running_var\": \"layer4.0.conv.7.running_var\",\n\n \"features.8.conv.0.weight\": \"layer4.1.conv.0.weight\",\n \"features.8.conv.1.weight\": \"layer4.1.conv.1.weight\",\n \"features.8.conv.1.bias\": \"layer4.1.conv.1.bias\",\n \"features.8.conv.1.running_mean\": \"layer4.1.conv.1.running_mean\",\n \"features.8.conv.1.running_var\": \"layer4.1.conv.1.running_var\",\n \"features.8.conv.3.weight\": \"layer4.1.conv.3.weight\",\n \"features.8.conv.4.weight\": \"layer4.1.conv.4.weight\",\n \"features.8.conv.4.bias\": \"layer4.1.conv.4.bias\",\n \"features.8.conv.4.running_mean\": \"layer4.1.conv.4.running_mean\",\n \"features.8.conv.4.running_var\": \"layer4.1.conv.4.running_var\",\n \"features.8.conv.6.weight\": \"layer4.1.conv.6.weight\",\n \"features.8.conv.7.weight\": \"layer4.1.conv.7.weight\",\n \"features.8.conv.7.bias\": \"layer4.1.conv.7.bias\",\n \"features.8.conv.7.running_mean\": \"layer4.1.conv.7.running_mean\",\n \"features.8.conv.7.running_var\": \"layer4.1.conv.7.running_var\",\n\n \"features.9.conv.0.weight\": \"layer4.2.conv.0.weight\",\n \"features.9.conv.1.weight\": \"layer4.2.conv.1.weight\",\n \"features.9.conv.1.bias\": \"layer4.2.conv.1.bias\",\n \"features.9.conv.1.running_mean\": \"layer4.2.conv.1.running_mean\",\n \"features.9.conv.1.running_var\": \"layer4.2.conv.1.running_var\",\n \"features.9.conv.3.weight\": \"layer4.2.conv.3.weight\",\n \"features.9.conv.4.weight\": \"layer4.2.conv.4.weight\",\n \"features.9.conv.4.bias\": \"layer4.2.conv.4.bias\",\n \"features.9.conv.4.running_mean\": \"layer4.2.conv.4.running_mean\",\n \"features.9.conv.4.running_var\": \"layer4.2.conv.4.running_var\",\n \"features.9.conv.6.weight\": \"layer4.2.conv.6.weight\",\n \"features.9.conv.7.weight\": \"layer4.2.conv.7.weight\",\n \"features.9.conv.7.bias\": \"layer4.2.conv.7.bias\",\n \"features.9.conv.7.running_mean\": \"layer4.2.conv.7.running_mean\",\n \"features.9.conv.7.running_var\": \"layer4.2.conv.7.running_var\",\n\n \"features.10.conv.0.weight\": \"layer4.3.conv.0.weight\",\n \"features.10.conv.1.weight\": \"layer4.3.conv.1.weight\",\n \"features.10.conv.1.bias\": \"layer4.3.conv.1.bias\",\n \"features.10.conv.1.running_mean\": \"layer4.3.conv.1.running_mean\",\n \"features.10.conv.1.running_var\": \"layer4.3.conv.1.running_var\",\n \"features.10.conv.3.weight\": \"layer4.3.conv.3.weight\",\n \"features.10.conv.4.weight\": \"layer4.3.conv.4.weight\",\n \"features.10.conv.4.bias\": \"layer4.3.conv.4.bias\",\n \"features.10.conv.4.running_mean\": \"layer4.3.conv.4.running_mean\",\n \"features.10.conv.4.running_var\": \"layer4.3.conv.4.running_var\",\n \"features.10.conv.6.weight\": \"layer4.3.conv.6.weight\",\n \"features.10.conv.7.weight\": \"layer4.3.conv.7.weight\",\n \"features.10.conv.7.bias\": \"layer4.3.conv.7.bias\",\n \"features.10.conv.7.running_mean\": \"layer4.3.conv.7.running_mean\",\n \"features.10.conv.7.running_var\": \"layer4.3.conv.7.running_var\",\n # layer5\n \"features.11.conv.0.weight\": \"layer5.0.conv.0.weight\",\n \"features.11.conv.1.weight\": \"layer5.0.conv.1.weight\",\n \"features.11.conv.1.bias\": \"layer5.0.conv.1.bias\",\n \"features.11.conv.1.running_mean\": \"layer5.0.conv.1.running_mean\",\n \"features.11.conv.1.running_var\": \"layer5.0.conv.1.running_var\",\n \"features.11.conv.3.weight\": \"layer5.0.conv.3.weight\",\n \"features.11.conv.4.weight\": \"layer5.0.conv.4.weight\",\n \"features.11.conv.4.bias\": \"layer5.0.conv.4.bias\",\n \"features.11.conv.4.running_mean\": \"layer5.0.conv.4.running_mean\",\n \"features.11.conv.4.running_var\": \"layer5.0.conv.4.running_var\",\n \"features.11.conv.6.weight\": \"layer5.0.conv.6.weight\",\n \"features.11.conv.7.weight\": \"layer5.0.conv.7.weight\",\n \"features.11.conv.7.bias\": \"layer5.0.conv.7.bias\",\n \"features.11.conv.7.running_mean\": \"layer5.0.conv.7.running_mean\",\n \"features.11.conv.7.running_var\": \"layer5.0.conv.7.running_var\",\n\n \"features.12.conv.0.weight\": \"layer5.1.conv.0.weight\",\n \"features.12.conv.1.weight\": \"layer5.1.conv.1.weight\",\n \"features.12.conv.1.bias\": \"layer5.1.conv.1.bias\",\n \"features.12.conv.1.running_mean\": \"layer5.1.conv.1.running_mean\",\n \"features.12.conv.1.running_var\": \"layer5.1.conv.1.running_var\",\n \"features.12.conv.3.weight\": \"layer5.1.conv.3.weight\",\n \"features.12.conv.4.weight\": \"layer5.1.conv.4.weight\",\n \"features.12.conv.4.bias\": \"layer5.1.conv.4.bias\",\n \"features.12.conv.4.running_mean\": \"layer5.1.conv.4.running_mean\",\n \"features.12.conv.4.running_var\": \"layer5.1.conv.4.running_var\",\n \"features.12.conv.6.weight\": \"layer5.1.conv.6.weight\",\n \"features.12.conv.7.weight\": \"layer5.1.conv.7.weight\",\n \"features.12.conv.7.bias\": \"layer5.1.conv.7.bias\",\n \"features.12.conv.7.running_mean\": \"layer5.1.conv.7.running_mean\",\n \"features.12.conv.7.running_var\": \"layer5.1.conv.7.running_var\",\n\n \"features.13.conv.0.weight\": \"layer5.2.conv.0.weight\",\n \"features.13.conv.1.weight\": \"layer5.2.conv.1.weight\",\n \"features.13.conv.1.bias\": \"layer5.2.conv.1.bias\",\n \"features.13.conv.1.running_mean\": \"layer5.2.conv.1.running_mean\",\n \"features.13.conv.1.running_var\": \"layer5.2.conv.1.running_var\",\n \"features.13.conv.3.weight\": \"layer5.2.conv.3.weight\",\n \"features.13.conv.4.weight\": \"layer5.2.conv.4.weight\",\n \"features.13.conv.4.bias\": \"layer5.2.conv.4.bias\",\n \"features.13.conv.4.running_mean\": \"layer5.2.conv.4.running_mean\",\n \"features.13.conv.4.running_var\": \"layer5.2.conv.4.running_var\",\n \"features.13.conv.6.weight\": \"layer5.2.conv.6.weight\",\n \"features.13.conv.7.weight\": \"layer5.2.conv.7.weight\",\n \"features.13.conv.7.bias\": \"layer5.2.conv.7.bias\",\n \"features.13.conv.7.running_mean\": \"layer5.2.conv.7.running_mean\",\n \"features.13.conv.7.running_var\": \"layer5.2.conv.7.running_var\",\n # layer6\n \"features.14.conv.0.weight\": \"layer6.0.conv.0.weight\",\n \"features.14.conv.1.weight\": \"layer6.0.conv.1.weight\",\n \"features.14.conv.1.bias\": \"layer6.0.conv.1.bias\",\n \"features.14.conv.1.running_mean\": \"layer6.0.conv.1.running_mean\",\n \"features.14.conv.1.running_var\": \"layer6.0.conv.1.running_var\",\n \"features.14.conv.3.weight\": \"layer6.0.conv.3.weight\",\n \"features.14.conv.4.weight\": \"layer6.0.conv.4.weight\",\n \"features.14.conv.4.bias\": \"layer6.0.conv.4.bias\",\n \"features.14.conv.4.running_mean\": \"layer6.0.conv.4.running_mean\",\n \"features.14.conv.4.running_var\": \"layer6.0.conv.4.running_var\",\n \"features.14.conv.6.weight\": \"layer6.0.conv.6.weight\",\n \"features.14.conv.7.weight\": \"layer6.0.conv.7.weight\",\n \"features.14.conv.7.bias\": \"layer6.0.conv.7.bias\",\n \"features.14.conv.7.running_mean\": \"layer6.0.conv.7.running_mean\",\n \"features.14.conv.7.running_var\": \"layer6.0.conv.7.running_var\",\n\n \"features.15.conv.0.weight\": \"layer6.1.conv.0.weight\",\n \"features.15.conv.1.weight\": \"layer6.1.conv.1.weight\",\n \"features.15.conv.1.bias\": \"layer6.1.conv.1.bias\",\n \"features.15.conv.1.running_mean\": \"layer6.1.conv.1.running_mean\",\n \"features.15.conv.1.running_var\": \"layer6.1.conv.1.running_var\",\n \"features.15.conv.3.weight\": \"layer6.1.conv.3.weight\",\n \"features.15.conv.4.weight\": \"layer6.1.conv.4.weight\",\n \"features.15.conv.4.bias\": \"layer6.1.conv.4.bias\",\n \"features.15.conv.4.running_mean\": \"layer6.1.conv.4.running_mean\",\n \"features.15.conv.4.running_var\": \"layer6.1.conv.4.running_var\",\n \"features.15.conv.6.weight\": \"layer6.1.conv.6.weight\",\n \"features.15.conv.7.weight\": \"layer6.1.conv.7.weight\",\n \"features.15.conv.7.bias\": \"layer6.1.conv.7.bias\",\n \"features.15.conv.7.running_mean\": \"layer6.1.conv.7.running_mean\",\n \"features.15.conv.7.running_var\": \"layer6.1.conv.7.running_var\",\n\n \"features.16.conv.0.weight\": \"layer6.2.conv.0.weight\",\n \"features.16.conv.1.weight\": \"layer6.2.conv.1.weight\",\n \"features.16.conv.1.bias\": \"layer6.2.conv.1.bias\",\n \"features.16.conv.1.running_mean\": \"layer6.2.conv.1.running_mean\",\n \"features.16.conv.1.running_var\": \"layer6.2.conv.1.running_var\",\n \"features.16.conv.3.weight\": \"layer6.2.conv.3.weight\",\n \"features.16.conv.4.weight\": \"layer6.2.conv.4.weight\",\n \"features.16.conv.4.bias\": \"layer6.2.conv.4.bias\",\n \"features.16.conv.4.running_mean\": \"layer6.2.conv.4.running_mean\",\n \"features.16.conv.4.running_var\": \"layer6.2.conv.4.running_var\",\n \"features.16.conv.6.weight\": \"layer6.2.conv.6.weight\",\n \"features.16.conv.7.weight\": \"layer6.2.conv.7.weight\",\n \"features.16.conv.7.bias\": \"layer6.2.conv.7.bias\",\n \"features.16.conv.7.running_mean\": \"layer6.2.conv.7.running_mean\",\n \"features.16.conv.7.running_var\": \"layer6.2.conv.7.running_var\",\n # layer7\n \"features.17.conv.0.weight\": \"layer7.0.conv.0.weight\",\n \"features.17.conv.1.weight\": \"layer7.0.conv.1.weight\",\n \"features.17.conv.1.bias\": \"layer7.0.conv.1.bias\",\n \"features.17.conv.1.running_mean\": \"layer7.0.conv.1.running_mean\",\n \"features.17.conv.1.running_var\": \"layer7.0.conv.1.running_var\",\n \"features.17.conv.3.weight\": \"layer7.0.conv.3.weight\",\n \"features.17.conv.4.weight\": \"layer7.0.conv.4.weight\",\n \"features.17.conv.4.bias\": \"layer7.0.conv.4.bias\",\n \"features.17.conv.4.running_mean\": \"layer7.0.conv.4.running_mean\",\n \"features.17.conv.4.running_var\": \"layer7.0.conv.4.running_var\",\n \"features.17.conv.6.weight\": \"layer7.0.conv.6.weight\",\n \"features.17.conv.7.weight\": \"layer7.0.conv.7.weight\",\n \"features.17.conv.7.bias\": \"layer7.0.conv.7.bias\",\n \"features.17.conv.7.running_mean\": \"layer7.0.conv.7.running_mean\",\n \"features.17.conv.7.running_var\": \"layer7.0.conv.7.running_var\",\n}\n\ndef pred(inp, oup, conv_operator, k, batch_norm):\n # the last 1x1 convolutional layer is very important\n hlConv2d = hlconv[conv_operator]\n return nn.Sequential(\n hlConv2d(inp, oup, k, 1, batch_norm),\n nn.Conv2d(oup, oup, k, 1, padding=k//2, bias=False)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, dilation, expand_ratio, batch_norm):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n BatchNorm2d = batch_norm\n\n hidden_dim = round(inp * expand_ratio)\n self.use_res_connect = self.stride == 1 and inp == oup\n self.kernel_size = 3\n self.dilation = dilation\n\n if expand_ratio == 1:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),\n BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n BatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),\n BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n BatchNorm2d(oup),\n )\n\n def fixed_padding(self, inputs, kernel_size, dilation):\n kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))\n return padded_inputs\n\n def _nostride_dilate(self, m, dilate):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n if m.kernel_size == (3, 3):\n m.dilation = (dilate, dilate)\n m.padding = (dilate, dilate)\n\n def forward(self, x):\n x_pad = self.fixed_padding(x, self.kernel_size, dilation=self.dilation)\n if self.use_res_connect:\n return x + self.conv(x_pad)\n else:\n return self.conv(x_pad)\n\n\n\n\n#######################################################################################\n# RefineNet B2\n#######################################################################################\nclass CRPBlock(nn.Module):\n def __init__(self, inp, oup, n_stages, batch_norm):\n super(CRPBlock, self).__init__()\n BatchNorm2d = batch_norm\n for i in range(n_stages):\n setattr(\n self, '{}_{}'.format(i + 1, 'outvar_dimred'),\n conv_bn(inp if (i == 0) else oup, oup, 1, 1, BatchNorm2d)\n )\n self.stride = 1\n self.n_stages = n_stages\n self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)\n\n def forward(self, x):\n top = x\n for i in range(self.n_stages):\n top = self.maxpool(top)\n top = getattr(self, '{}_{}'.format(i + 1, 'outvar_dimred'))(top)\n x = top + x\n return x\n\n\n#######################################################################################\n# IndexNet\n#######################################################################################\nclass hlMobileNetV2UNetDecoderIndexLearning(nn.Module):\n def __init__(\n self, \n output_stride=32, \n input_size=320, \n width_mult=1., \n conv_operator='std_conv',\n decoder_kernel_size=5,\n apply_aspp=False,\n freeze_bn=False,\n use_nonlinear=False,\n use_context=False,\n indexnet='holistic',\n index_mode='o2o',\n sync_bn=False\n ):\n super(hlMobileNetV2UNetDecoderIndexLearning, self).__init__()\n\n self.Encoder = Encoder(output_stride=output_stride, \n input_size=input_size, \n width_mult=width_mult, \n conv_operator=conv_operator,\n decoder_kernel_size=decoder_kernel_size,\n apply_aspp=apply_aspp,\n freeze_bn=freeze_bn,\n use_nonlinear=use_nonlinear,\n use_context=use_context,\n indexnet=indexnet,\n index_mode=index_mode,\n sync_bn=sync_bn)\n\n\n self.AlignedNet = AlignedNet(idim=160, odim=160)\n \n self.ConvGRU = ConvGRU.ConvGRUCell(160, 160, 40, kernel_size=1)\n self.propagate_layers = 3\n self.conv_fusion = nn.Conv2d(160*2, 160, kernel_size=3, padding=1, bias= True)\n self.channel = 160\n self.linear_e = nn.Linear(160, 160,bias = False)\n self.gate = nn.Conv2d(160, 1, kernel_size = 1, bias = False)\n self.gate_s = nn.Sigmoid()\n\n self.conv1 = nn.Conv2d(160, 160, kernel_size=3, padding=1, bias = False)\n self.bn1 = nn.BatchNorm2d(160)\n\n\n self.Decoder = Decoder(output_stride=output_stride, \n input_size=input_size, \n width_mult=width_mult, \n conv_operator=conv_operator,\n decoder_kernel_size=decoder_kernel_size,\n apply_aspp=apply_aspp,\n freeze_bn=freeze_bn,\n use_nonlinear=use_nonlinear,\n use_context=use_context,\n indexnet=indexnet,\n index_mode=index_mode,\n sync_bn=sync_bn)\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, pf, ptris, cf, ctris, pf2, ptris2):\n pl, px6, px5, px4, px3, px2, px1, px0 = self.Encoder(pf, ptris)\n pl2, p2x6, p2x5, p2x4, p2x3, p2x2, p2x1, p2x0 = self.Encoder(pf2, ptris2)\n cl, x6, x5, x4, x3, x2, x1, x0 = self.Encoder(cf, ctris)\n\n input_size = pf.size()[2:]\n batch_num = pf.size()[0]\n\n exemplars = cl\n querys = pl\n query1s = pl2\n\n for passing_round in range(self.propagate_layers):\n\n\n attention1 = self.conv_fusion(torch.cat([self.AlignedNet(querys, exemplars),\n self.AlignedNet(query1s, exemplars)],1)) #message passing with concat operation\n attention2 = self.conv_fusion(torch.cat([self.AlignedNet(exemplars, querys),\n self.AlignedNet(query1s, querys)],1))\n attention3 = self.conv_fusion(torch.cat([self.AlignedNet(exemplars, query1s),\n self.AlignedNet(querys, query1s)],1))\n\n h_v1 = self.ConvGRU(attention1, exemplars)\n h_v2 = self.ConvGRU(attention2, querys)\n h_v3 = self.ConvGRU(attention3, query1s)\n \n exemplars = h_v1.clone()\n querys = h_v2.clone()\n query1s = h_v3.clone()\n \n if passing_round == self.propagate_layers -1:\n x1s, x1s_fg = self.Decoder(h_v1, x6, x5, x4, x3, x2, x1, x0)\n x2s, x2s_fg = self.Decoder(h_v2, px6, px5, px4, px3, px2, px1, px0)\n x3s, x3s_fg = self.Decoder(h_v3, p2x6, p2x5, p2x4, p2x3, p2x2, p2x1, p2x0)\n\n return x2s, x2s_fg, x1s, x1s_fg, x3s, x3s_fg\n\n def generate_batchwise_data(self, ii, px6, px5, px4, px3, px2, px1, px0):\n px6_ = [px6[0][ii].unsqueeze(0), px6[1]]\n px5_ = px5[ii].unsqueeze(0)\n if px4[1] is None:\n px4_ = [px4[0][ii].unsqueeze(0), px4[1]]\n else:\n px4_ = [px4[0][ii].unsqueeze(0), px4[1][ii].unsqueeze(0)]\n px3_ = [px3[0][ii].unsqueeze(0), px3[1][ii].unsqueeze(0)]\n px2_ = [px2[0][ii].unsqueeze(0), px2[1][ii].unsqueeze(0)]\n px1_ = px1[ii].unsqueeze(0)\n px0_ = [px0[0][ii].unsqueeze(0), px0[1][ii].unsqueeze(0)]\n return px6_, px5_, px4_, px3_, px2_, px1_, px0_\n\n\nclass AlignedNet(nn.Module):\n # Not using location\n def __init__(self, idim, odim):\n super(AlignedNet, self).__init__()\n\n self.R1_offset_conv1 = nn.Conv2d(idim, odim, kernel_size=(3, 3), padding=(1, 1), stride=1)\n self.R1_offset_conv2 = nn.Conv2d(odim, odim, kernel_size=(3, 3), padding=(1, 1), stride=1)\n self.R1_dcnpack = DCN(idim, odim, 3, stride=1, padding=1, dilation=1, deformable_groups=8,\n extra_offset_mask=True)\n\n self.RQ_conv = nn.Conv2d(odim*2, odim, kernel_size=(3, 3), padding=(1, 1), stride=1)\n \n\n def forward(self, R1, Q0):\n \n R1_offset = R1 - Q0\n R1_offset = F.relu(self.R1_offset_conv1(R1_offset))\n R1_offset = F.relu(self.R1_offset_conv2(R1_offset))\n R1_fea = F.relu(self.R1_dcnpack([R1, R1_offset]))\n\n R1_fea_ = torch.cat([R1_fea, Q0], dim=1)\n R1_fea_ = F.relu(self.RQ_conv(R1_fea_))\n\n return R1_fea_\n\n\nclass Encoder(nn.Module):\n def __init__(\n self,\n output_stride=32, \n input_size=320, \n width_mult=1., \n conv_operator='std_conv',\n decoder_kernel_size=5,\n apply_aspp=False,\n freeze_bn=False,\n use_nonlinear=False,\n use_context=False,\n indexnet='holistic',\n index_mode='o2o',\n sync_bn=False\n ):\n super(Encoder, self).__init__()\n self.width_mult = width_mult\n self.output_stride = output_stride\n self.index_mode = index_mode\n\n BatchNorm2d = SynchronizedBatchNorm2d if sync_bn else nn.BatchNorm2d\n\n block = InvertedResidual\n aspp = ASPP\n \n\n if indexnet == 'holistic':\n index_block = HolisticIndexBlock\n elif indexnet == 'depthwise':\n if 'o2o' in index_mode:\n index_block = DepthwiseO2OIndexBlock\n elif 'm2o' in index_mode:\n index_block = DepthwiseM2OIndexBlock\n else:\n raise NameError\n else:\n raise NameError\n\n initial_channel = 32\n current_stride = 1\n rate = 1\n inverted_residual_setting = [\n # expand_ratio, input_chn, output_chn, num_blocks, stride, dilation\n [1, initial_channel, 16, 1, 1, 1],\n [6, 16, 24, 2, 2, 1],\n [6, 24, 32, 3, 2, 1],\n [6, 32, 64, 4, 2, 1],\n [6, 64, 96, 3, 1, 1],\n [6, 96, 160, 3, 2, 1],\n [6, 160, 320, 1, 1, 1],\n ]\n\n ### encoder ###\n # building the first layer\n # assert input_size % output_stride == 0\n initial_channel = int(initial_channel * width_mult)\n self.layer0 = conv_bn(4, initial_channel, 3, 2, BatchNorm2d)\n self.layer0.apply(partial(self._stride, stride=1)) # set stride = 1\n current_stride *= 2\n # building bottleneck layers\n for i, setting in enumerate(inverted_residual_setting):\n s = setting[4]\n inverted_residual_setting[i][4] = 1 # change stride\n if current_stride == output_stride:\n rate *= s\n inverted_residual_setting[i][5] = rate\n else:\n current_stride *= s\n self.layer1 = self._build_layer(block, inverted_residual_setting[0], BatchNorm2d)\n self.layer2 = self._build_layer(block, inverted_residual_setting[1], BatchNorm2d, downsample=True)\n self.layer3 = self._build_layer(block, inverted_residual_setting[2], BatchNorm2d, downsample=True)\n self.layer4 = self._build_layer(block, inverted_residual_setting[3], BatchNorm2d, downsample=True)\n self.layer5 = self._build_layer(block, inverted_residual_setting[4], BatchNorm2d)\n self.layer6 = self._build_layer(block, inverted_residual_setting[5], BatchNorm2d, downsample=True)\n self.layer7 = self._build_layer(block, inverted_residual_setting[6], BatchNorm2d)\n\n # freeze encoder batch norm layers\n if freeze_bn:\n self.freeze_bn()\n \n # define index blocks\n if output_stride == 32:\n\n self.index0 = index_block(32, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index2 = index_block(24, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index3 = index_block(32, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index4 = index_block(64, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index6 = index_block(160, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n\n\n elif output_stride == 16:\n self.index0 = index_block(32, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index2 = index_block(24, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index3 = index_block(32, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index4 = index_block(64, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n elif output_stride == 8:\n self.index0 = index_block(32, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index2 = index_block(24, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n self.index3 = index_block(32, use_nonlinear=use_nonlinear, use_context=use_context, batch_norm=BatchNorm2d)\n else:\n raise NotImplementedError\n \n ### context aggregation ###\n if apply_aspp:\n self.dconv_pp = aspp(320, 160, output_stride=output_stride, batch_norm=BatchNorm2d)\n else:\n self.dconv_pp = conv_bn(320, 160, 1, 1, BatchNorm2d)\n\n self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))\n self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))\n\n\n def _build_layer(self, block, layer_setting, batch_norm, downsample=False):\n t, p, c, n, s, d = layer_setting\n input_channel = int(p * self.width_mult)\n output_channel = int(c * self.width_mult)\n\n layers = []\n for i in range(n):\n if i == 0:\n d0 = d\n if downsample:\n d0 = d // 2 if d > 1 else 1\n layers.append(block(input_channel, output_channel, s, d0, expand_ratio=t, batch_norm=batch_norm))\n else:\n layers.append(block(input_channel, output_channel, 1, d, expand_ratio=t, batch_norm=batch_norm))\n input_channel = output_channel\n\n return nn.Sequential(*layers)\n\n def _stride(self, m, stride):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n if m.kernel_size == (3, 3):\n m.stride = stride\n return\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, SynchronizedBatchNorm2d):\n m.eval()\n elif isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def forward(self, img, tri):\n\n img = img / 255.\n img -= self.mean\n img /= self.std\n tri /= 255.\n\n x = torch.cat((img, tri), dim=1)\n\n # encode\n l0 = self.layer0(x) # 4x320x320\n\n idx0_en, idx0_de = self.index0(l0)\n l0 = idx0_en * l0\n l0p = 4 * F.avg_pool2d(l0, (2, 2), stride=2) # 32x160x160\n\n l1 = self.layer1(l0p) # 16x160x160\n l2 = self.layer2(l1) # 24x160x160\n\n idx2_en, idx2_de = self.index2(l2)\n l2 = idx2_en * l2\n l2p = 4 * F.avg_pool2d(l2, (2, 2), stride=2) # 24x80x80\n \n l3 = self.layer3(l2p) # 32x80x80\n \n idx3_en, idx3_de = self.index3(l3) \n l3 = idx3_en * l3\n l3p = 4 * F.avg_pool2d(l3, (2, 2), stride=2) # 32x40x40\n\n l4 = self.layer4(l3p) # 64x40x40\n\n if self.output_stride == 8:\n l4p, idx4_de = l4, None\n else:\n idx4_en, idx4_de = self.index4(l4)\n l4 = idx4_en * l4\n l4p = 4 * F.avg_pool2d(l4, (2, 2), stride=2) # 64x20x20\n\n\n l5 = self.layer5(l4p) # 96x20x20\n l6 = self.layer6(l5) # 160x20x20\n\n if self.output_stride == 32:\n idx6_en, idx6_de = self.index6(l6)\n l6 = idx6_en * l6\n l6p = 4 * F.avg_pool2d(l6, (2, 2), stride=2) # 160x10x10\n elif self.output_stride == 16 or self.output_stride == 8:\n l6p, idx6_de = l6, None\n\n l7 = self.layer7(l6p) # 320x10x10\n\n # pyramid pooling\n xl = self.dconv_pp(l7) # 160x10x10\n\n return xl, [l6, idx6_de], l5, [l4, idx4_de], [l3, idx3_de], [l2, idx2_de], l1, [l0, idx0_de]\n\n\nclass Decoder(nn.Module):\n def __init__(\n self,\n output_stride=32, \n input_size=320, \n width_mult=1., \n conv_operator='std_conv',\n decoder_kernel_size=5,\n apply_aspp=False,\n freeze_bn=False,\n use_nonlinear=False,\n use_context=False,\n indexnet='holistic',\n index_mode='o2o',\n sync_bn=False\n ):\n super(Decoder, self).__init__()\n decoder_block = IndexedUpsamlping\n BatchNorm2d = SynchronizedBatchNorm2d if sync_bn else nn.BatchNorm2d\n ### decoder ###\n self.decoder_layer6 = decoder_block(160*2, 96, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer5 = decoder_block(96*2, 64, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer4 = decoder_block(64*2, 32, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer3 = decoder_block(32*2, 24, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer2 = decoder_block(24*2, 16, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer1 = decoder_block(16*2, 32, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer0 = decoder_block(32*2, 32, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.pred = pred(32, 1, conv_operator, k=decoder_kernel_size, batch_norm=BatchNorm2d)\n\n self.decoder_layer6_fg = decoder_block(160*2, 96, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer5_fg = decoder_block(96*2, 64, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer4_fg = decoder_block(64*2, 32, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer3_fg = decoder_block(32*2, 24, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer2_fg = decoder_block(24*2, 16, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer1_fg = decoder_block(16*2, 32, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.decoder_layer0_fg = decoder_block(32*2, 32, conv_operator=conv_operator, kernel_size=decoder_kernel_size, batch_norm=BatchNorm2d)\n self.pred_fg = pred(32, 3, conv_operator, k=3, batch_norm=BatchNorm2d)\n \n self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))\n self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))\n\n def forward(self, xl, x6, l5, x4, x3, x2, l1, x0):\n l6, idx6_de = x6[0], x6[1]\n l4, idx4_de = x4[0], x4[1]\n l3, idx3_de = x3[0], x3[1]\n l2, idx2_de = x2[0], x2[1]\n l0, idx0_de = x0[0], x0[1]\n\n # decode\n xl_alpha = self.decoder_layer6(xl, l6, idx6_de)\n xl_fg = self.decoder_layer6_fg(xl, l6, idx6_de)\n\n xl_alpha = self.decoder_layer5(xl_alpha, l5)\n xl_fg = self.decoder_layer5_fg(xl_fg, l5)\n\n xl_alpha = self.decoder_layer4(xl_alpha, l4, idx4_de)\n xl_fg = self.decoder_layer4_fg(xl_fg, l4, idx4_de)\n\n xl_alpha = self.decoder_layer3(xl_alpha, l3, idx3_de)\n xl_fg = self.decoder_layer3_fg(xl_fg, l3, idx3_de)\n\n xl_alpha = self.decoder_layer2(xl_alpha, l2, idx2_de)\n xl_fg = self.decoder_layer2_fg(xl_fg, l2, idx2_de)\n\n xl_alpha = self.decoder_layer1(xl_alpha, l1)\n xl_fg = self.decoder_layer1_fg(xl_fg, l1)\n\n xl_alpha = self.decoder_layer0(xl_alpha, l0, idx0_de)\n xl_fg = self.decoder_layer0_fg(xl_fg, l0, idx0_de)\n\n xl_alpha = self.pred(xl_alpha)\n xl_fg = self.pred_fg(xl_fg)\n xl_fg *= self.std\n xl_fg += self.mean\n\n return xl_alpha, xl_fg\n\ndef hlmobilenetv2(pretrained=False, decoder='unet_style', **kwargs):\n \"\"\"Constructs a MobileNet_V2 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if decoder == 'unet_style':\n model = hlMobileNetV2UNetDecoder(**kwargs)\n elif decoder == 'indexnet':\n model = hlMobileNetV2UNetDecoderIndexLearning(**kwargs)\n elif decoder == 'deeplabv3+':\n model = MobileNetV2DeepLabv3Plus(**kwargs)\n elif decoder == 'refinenet':\n model = hlMobileNetV2RefineNet(**kwargs)\n else:\n raise NotImplementedError\n\n if pretrained:\n corresp_name = CORRESP_NAME\n model_dict = model.state_dict()\n pretrained_dict = load_url(model_urls['mobilenetv2'])\n\n for name in pretrained_dict:\n\n if name not in corresp_name:\n continue\n # if corresp_name[name] not in model_dict.keys():\n # continue\n\n if 'Encoder.' + corresp_name[name] not in model_dict.keys():\n continue\n\n if name == \"features.0.0.weight\":\n # model_weight = model_dict[corresp_name[name]]\n model_weight = model_dict['Encoder.'+corresp_name[name]]\n assert model_weight.shape[1] == 4\n model_weight[:, 0:3, :, :] = pretrained_dict[name]\n model_weight[:, 3, :, :] = torch.tensor(0)\n # model_dict[corresp_name[name]] = model_weight\n model_dict['Encoder.'+corresp_name[name]] = model_weight\n\n else:\n # model_dict[corresp_name[name]] = pretrained_dict[name]\n\n model_dict['Encoder.'+corresp_name[name]] = pretrained_dict[name]\n\n model.load_state_dict(model_dict)\n\n return model\n\n\ndef load_url(url, model_dir='./pretrained', map_location=None):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n filename = url.split('/')[-1]\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n sys.stderr.write('Downloading: \"{}\" to {}\\n'.format(url, cached_file))\n urlretrieve(url, cached_file)\n return torch.load(cached_file, map_location=map_location)\n\n\nif __name__ == \"__main__\":\n import numpy as np\n\n net = hlmobilenetv2(\n width_mult=1,\n pretrained=True, \n freeze_bn=True, \n sync_bn=False,\n apply_aspp=True,\n output_stride=32,\n conv_operator='std_conv',\n decoder_kernel_size=5,\n decoder='unet_style',\n indexnet='depthwise',\n index_mode='m2o',\n use_nonlinear=True,\n use_context=True,\n )\n net.eval()\n net.cuda()\n\n dump_x = torch.randn(1, 4, 224, 224).cuda()\n print(get_model_summary(net, dump_x))\n\n frame_rate = np.zeros((10, 1))\n for i in range(10):\n x = torch.randn(1, 4, 320, 320).cuda()\n torch.cuda.synchronize()\n start = time()\n y = net(x)\n torch.cuda.synchronize()\n end = time()\n running_frame_rate = 1 * float(1 / (end - start))\n frame_rate[i] = running_frame_rate\n print(np.mean(frame_rate))\n print(y.shape)\n"
] |
[
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.functional.avg_pool2d",
"torch.cuda.synchronize",
"numpy.zeros",
"torch.nn.MaxPool2d",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.FloatTensor",
"numpy.mean",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.tensor",
"torch.load",
"torch.nn.functional.pad",
"torch.randn"
]
] |
alphagov-mirror/govuk-entity-personalisation
|
[
"a674bca4c15691fe2c4e32ea213dfccf3cb0e8ec"
] |
[
"notebooks/entities-synonyms/02_cbow_embeddings.py"
] |
[
"from gensim.models import Word2Vec\nimport json\nimport pandas as pd\n\n\n# load model and entities\nmodel_w2v = Word2Vec.load('model/word2vec_cbow.model')\n# generated from data/interim/kg_entities.cypher\ndf_entities = pd.read_csv(filepath_or_buffer='data/interim/kg_entities.csv')\n\n# transform each entity so suitable for comparing\nentities = [item.lower() for item in df_entities['e.name']]\nentities = [x.replace(' ', '_') for x in entities]\n\n# get cbow terms and those that are entities\ncbow_terms = list(model_w2v.wv.vocab.keys())\ncbow_entities = list(set(entities) & set(cbow_terms))\nsynonyms = [model_w2v.wv.most_similar(positive=x) for x in cbow_entities]\ncbow_synonyms = dict(zip(cbow_entities, synonyms))\n\n# save as json file - human-readable\nwith open('data/processed/cbow_synonyms.json', mode='w') as fp:\n json.dump(obj=cbow_synonyms,\n fp=fp,\n sort_keys=True,\n indent=4)\n\n# format to df for knowledge graph\n# extract synonyms with cosine-similarity greater than 0.5 from tuples within nested list\nsynonyms = [[y[0] for y in x if y[1] > 0.5] for x in synonyms]\ndf_cbow_entities = pd.DataFrame(data={'entity': cbow_entities,\n 'synonym': synonyms})\ndf_cbow_entities = df_cbow_entities.merge(right=df_entities,\n how='left',\n left_on='entity',\n right_on='e.name')\ndf_cbow_entities = df_cbow_entities[['entity',\n 'e.entity_type',\n 'synonym']].rename(columns={'e.entity_type': 'entity_type'})\n# remove duplicates to ensure can use Cypher CREATE instead of MERGE for efficiency\ndf_cbow_entities = df_cbow_entities.drop_duplicates(subset=['entity', 'entity_type'])\n\ndf_cbow_entities = df_cbow_entities.explode(column='synonym')\ndf_cbow_entities = df_cbow_entities.dropna(subset=['synonym'])\ndf_cbow_entities.to_csv(path_or_buf='data/processed/cbow_synonyms.csv',\n index=False)\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
AtsushiHAM/EEI_Analysis_model_based_rl
|
[
"800fa57a0ef5609e487c7844a2b21b31a937ece4"
] |
[
"pddm/envs/vreacher/vreacher_sincosless_ngr.py"
] |
[
"import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nimport tensorflow as tf\nimport os\nimport numpy as np\n\n#GYM_ASSET_PATH2=os.path.join(os.getcwd(),'assets')\n#GYM_ASSET_PATH=os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'assets'))\nGYM_ASSET_PATH = os.path.join(os.path.dirname(__file__), 'assets')\nPI=3.14159265359\n\nclass VReacherEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, file_path=os.path.join(GYM_ASSET_PATH, \"vertical_arm_ngr.xml\"), max_step=1000):\n self.time = 0\n self.num_step = 0\n self.max_step = max_step # maximum number of time steps for one episode\n self.switch_timing = 0\n\n mujoco_env.MujocoEnv.__init__(self, os.path.join(file_path), 2)\n utils.EzPickle.__init__(self)\n self.skip = self.frame_skip # #maximum number of time steps for one episode\n\n def get_reward(self, observations, actions):\n\n \"\"\"get rewards of a given (observations, actions) pair\n\n Args:\n observations: (batchsize, obs_dim) or (obs_dim,)\n actions: (batchsize, ac_dim) or (ac_dim,)\n\n Return:\n r_total: (batchsize,1) or (1,), reward for that pair\n done: (batchsize,1) or (1,), True if reaches terminal state\n \"\"\"\n\n # initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, axis=0)\n actions = np.expand_dims(actions, axis=0)\n batch_mode = False\n else:\n batch_mode = True\n\n # get vars\n difference_posx= observations[:, 4]\n difference_posy = observations[:, 5]\n\n # calc rew\n self.reward_dict['actions'] = np.sum(np.square(actions), axis=1)\n self.reward_dict['goal_difference'] = np.sqrt((difference_posx)**2+(difference_posy)**2)\n self.reward_dict['r_allive'] = np.array(10)#np.array(10 - 50 * (np.abs(difference_posx) + np.abs(difference_posy)))\n self.reward_dict['r_total'] =self.reward_dict['r_allive']- 50 *(self.reward_dict['goal_difference'] +0.01*self.reward_dict['actions'])\n\n # check if done\n dones = np.zeros((observations.shape[0],))\n dones[(np.abs(difference_posx)+np.abs(difference_posy)) > 36000000] = 1\n\n # return\n if not batch_mode:\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones\n\n def get_score(self, obs):\n goal_difference = abs(obs[8]) + abs(obs[9])\n return goal_difference\n\n def step(self, action):\n self.num_step += 1\n timing=100\n self.do_simulation(action, self.frame_skip)\n if self.num_step % timing < timing/2:\n self.switch_timing = 0\n elif self.num_step % timing >= timing/2:\n self.switch_timing = 1\n else:\n self.switch_timing = 0\n ob = self._get_obs()\n rew, done = self.get_reward(ob, action)\n score = self.get_score(ob)\n\n # return\n env_info = {'time': self.time,\n 'obs_dict': self.obs_dict,\n 'rewards': self.reward_dict,\n 'score': score}\n return ob, rew, done, env_info\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n\n def reset_model(self):\n self.num_step = 0\n self.reset_pose = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n self.reset_vel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n #self.set_state(qpos, qvel)\n return self.do_reset(self.reset_pose.copy(), self.reset_vel.copy())\n\n def _get_obs(self):\n theta = self.sim.data.qpos.flat[:2]\n self.obs_dict = {}\n self.obs_dict['cos'] = np.cos(theta).copy()\n self.obs_dict['sin'] = np.sin(theta).copy()\n self.obs_dict['joints_pos'] = self.sim.data.qpos.flat[:2].copy()\n self.obs_dict['joints_vel'] = self.sim.data.qvel.flat[:2].copy()\n if self.switch_timing == 0:\n self.obs_dict['diferrence_pos']=self.get_body_com(\"fingertip\")[::2]-self.get_body_com(\"target\")[::2]\n elif self.switch_timing == 1:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target1\")[::2]\n # self.sim.data.qpos.flat[2:].copy()#self.get_body_com(\"fingertip\") - self.get_body_com(\"target\").copy()\n #self.obs_dict['goal_vel'] = self.sim.data.qvel.flat[2:].copy()\n #theta = self.sim.data.qpos.flat[:2]\n return np.concatenate([\n #np.cos(theta),\n #np.sin(theta),\n #self.model.data.qpos.flat[2:],\n #self.sim.data.qvel.flat[:2],\n #self.get_body_com(\"target\"),\n #self.get_body_com(\"fingertip\") - self.get_body_com(\"target\")\n self.obs_dict['joints_pos'],##01\n self.obs_dict['joints_vel'],##23\n self.obs_dict['diferrence_pos'],##45\n #self.obs_dict['goal_vel']\n\n ])\n\n def do_reset(self, reset_pose, reset_vel, reset_goal=None):\n\n #reset\n self.set_state(reset_pose, reset_vel)\n\n #return\n return self._get_obs()\n\n ##$$added by hamada fpr perturb\n def perturb_joint(self, force=0.01):\n self.data.qfrc_applied[:] = np.asarray([0, force])\n\n def perturb_pendulum(self, fx=0, fy=25, fz=0, tx=0, ty=0, tz=0):\n # Function to apply an external force to the center of gravity of\n # the pendulum. If a mass is added at the end of the pendulum/pole,\n # the external force is applied to the center of gravity of that mass\n # instead.\n # f : External forces along the three axes.\n # t : External torques along the three axes.\n force = [fx, fy, fz, tx, ty, tz]\n all_dim = np.zeros([6, 6])\n all_dim[-1, :] = force\n\n self.data.xfrc_applied = all_dim\n\nclass VReacherEnv1_1(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, file_path=os.path.join(GYM_ASSET_PATH, \"vertical_arm_ngr.xml\"), max_step=1000):\n self.time = 0\n self.num_step = 0\n self.max_step = max_step # maximum number of time steps for one episode\n self.switch_timing = 0\n\n mujoco_env.MujocoEnv.__init__(self, os.path.join(file_path), 2)\n utils.EzPickle.__init__(self)\n self.skip = self.frame_skip # #maximum number of time steps for one episode\n\n def get_reward(self, observations, actions):\n\n \"\"\"get rewards of a given (observations, actions) pair\n\n Args:\n observations: (batchsize, obs_dim) or (obs_dim,)\n actions: (batchsize, ac_dim) or (ac_dim,)\n\n Return:\n r_total: (batchsize,1) or (1,), reward for that pair\n done: (batchsize,1) or (1,), True if reaches terminal state\n \"\"\"\n\n # initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, axis=0)\n actions = np.expand_dims(actions, axis=0)\n batch_mode = False\n else:\n batch_mode = True\n\n # get vars\n difference_posx= observations[:, 4]\n difference_posy = observations[:, 5]\n self.reward_dict['actions'] = np.sum(np.square(actions), axis=1)\n self.reward_dict['goal_difference'] = np.sqrt((difference_posx) ** 2 + (difference_posy) ** 2)\n self.reward_dict['r_allive'] = np.array(10) # np.array(10 - 50 * (np.abs(difference_posx) + np.abs(difference_posy)))\n self.reward_dict['r_total'] = self.reward_dict['r_allive'] - 50 * (self.reward_dict['goal_difference'] + 0.01 * self.reward_dict['actions'])\n\n # check if done\n dones = np.zeros((observations.shape[0],))\n dones[(np.abs(difference_posx)+np.abs(difference_posy)) > 36000000] = 1\n\n # return\n if not batch_mode:\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones\n\n def get_score(self, obs):\n goal_difference=abs(obs[8]) + abs(obs[9])\n return goal_difference\n\n def step(self, action):\n self.num_step += 1\n timing=100\n self.do_simulation(action, self.frame_skip)\n if self.num_step % timing < timing/2:\n self.switch_timing = 0\n elif self.num_step % timing >= timing/2:\n self.switch_timing = 1\n else:\n self.switch_timing = 0\n ob = self._get_obs()\n rew, done = self.get_reward(ob, action)\n score = self.get_score(ob)\n\n # return\n env_info = {'time': self.time,\n 'obs_dict': self.obs_dict,\n 'rewards': self.reward_dict,\n 'score': score}\n return ob, rew, done, env_info\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n\n def reset_model(self):\n self.num_step = 0\n self.reset_pose = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n self.reset_vel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n # self.set_state(qpos, qvel)\n return self.do_reset(self.reset_pose.copy(), self.reset_vel.copy())\n\n def _get_obs(self):\n theta = self.sim.data.qpos.flat[:2]\n self.obs_dict = {}\n self.obs_dict['cos'] = np.cos(theta).copy()\n self.obs_dict['sin'] = np.sin(theta).copy()\n self.obs_dict['joints_pos'] = self.sim.data.qpos.flat[:2].copy()\n self.obs_dict['joints_vel'] = self.sim.data.qvel.flat[:2].copy()\n if self.switch_timing == 0:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target\")[::2]\n elif self.switch_timing == 1:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target1\")[::2]\n self.obs_dict['joints_force'] = np.asarray([self.data.sensordata[1]]).flat.copy()\n # self.sim.data.qpos.flat[2:].copy()#self.get_body_com(\"fingertip\") - self.get_body_com(\"target\").copy()\n #self.obs_dict['goal_vel'] = self.sim.data.qvel.flat[2:].copy()\n #theta = self.sim.data.qpos.flat[:2]\n return np.concatenate([\n #np.cos(theta),\n #np.sin(theta),\n #self.model.data.qpos.flat[2:],\n #self.sim.data.qvel.flat[:2],\n #self.get_body_com(\"target\"),\n #self.get_body_com(\"fingertip\") - self.get_body_com(\"target\")\n self.obs_dict['joints_pos'],##01\n self.obs_dict['joints_vel'],##23\n self.obs_dict['diferrence_pos'] , ##45\n self.obs_dict['joints_force'],\n\n\n ])\n\n def do_reset(self, reset_pose, reset_vel, reset_goal=None):\n\n #reset\n self.set_state(reset_pose, reset_vel)\n\n #return\n return self._get_obs()\n\n ##$$added by hamada fpr perturb\n def perturb_joint(self, force=0.01):\n self.data.qfrc_applied[:] = np.asarray([0, force])\n\n def perturb_pendulum(self, fx=25, fy=0, fz=0, tx=0, ty=0, tz=0):\n # Function to apply an external force to the center of gravity of\n # the pendulum. If a mass is added at the end of the pendulum/pole,\n # the external force is applied to the center of gravity of that mass\n # instead.\n # f : External forces along the three axes.\n # t : External torques along the three axes.\n force = [fx, fy, fz, tx, ty, tz]\n all_dim = np.zeros([6, 6])\n all_dim[-1, :] = force\n\n self.data.xfrc_applied = all_dim\n\n\nclass VReacherEnv1_4(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, file_path=os.path.join(GYM_ASSET_PATH, \"vertical_arm_ngr.xml\"), max_step=1000):\n self.time = 0\n self.num_step = 0\n self.max_step = max_step # maximum number of time steps for one episode\n self.switch_timing = 0\n\n mujoco_env.MujocoEnv.__init__(self, os.path.join(file_path), 2)\n utils.EzPickle.__init__(self)\n self.skip = self.frame_skip # #maximum number of time steps for one episode\n\n def get_reward(self, observations, actions):\n\n \"\"\"get rewards of a given (observations, actions) pair\n\n Args:\n observations: (batchsize, obs_dim) or (obs_dim,)\n actions: (batchsize, ac_dim) or (ac_dim,)\n\n Return:\n r_total: (batchsize,1) or (1,), reward for that pair\n done: (batchsize,1) or (1,), True if reaches terminal state\n \"\"\"\n\n # initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, axis=0)\n actions = np.expand_dims(actions, axis=0)\n batch_mode = False\n else:\n batch_mode = True\n\n # get vars\n difference_posx= observations[:, 4]\n difference_posy = observations[:, 5]\n\n # calc rew\n self.reward_dict['actions'] = np.sum(np.square(actions), axis=1)\n self.reward_dict['goal_difference'] = np.sqrt((difference_posx) ** 2 + (difference_posy) ** 2)\n self.reward_dict['r_allive'] = np.array(10) # np.array(10 - 50 * (np.abs(difference_posx) + np.abs(difference_posy)))\n self.reward_dict['r_total'] =self.reward_dict['r_allive']- 50 *(self.reward_dict['goal_difference'] +0.01*self.reward_dict['actions'])\n\n # check if done\n dones = np.zeros((observations.shape[0],))\n dones[(np.abs(difference_posx)+np.abs(difference_posy)) > 36000000] = 1\n\n # return\n if not batch_mode:\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones\n\n def get_score(self, obs):\n goal_difference=abs(obs[8]) + abs(obs[9])\n return goal_difference\n\n def step(self, action):\n self.num_step += 1\n timing=100\n self.do_simulation(action, self.frame_skip)\n if self.num_step % timing < timing/2:\n self.switch_timing = 0\n elif self.num_step % timing >= timing/2:\n self.switch_timing = 1\n else:\n self.switch_timing = 0\n ob = self._get_obs()\n rew, done = self.get_reward(ob, action)\n score = self.get_score(ob)\n\n # return\n env_info = {'time': self.time,\n 'obs_dict': self.obs_dict,\n 'rewards': self.reward_dict,\n 'score': score}\n return ob, rew, done, env_info\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n\n def reset_model(self):\n self.num_step = 0\n self.reset_pose = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n self.reset_vel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n # self.set_state(qpos, qvel)\n return self.do_reset(self.reset_pose.copy(), self.reset_vel.copy())\n\n def _get_obs(self):\n theta = self.sim.data.qpos.flat[:2]\n self.obs_dict = {}\n self.obs_dict['cos'] = np.cos(theta).copy()\n self.obs_dict['sin'] = np.sin(theta).copy()\n self.obs_dict['joints_pos'] = self.sim.data.qpos.flat[:2].copy()\n self.obs_dict['joints_vel'] = self.sim.data.qvel.flat[:2].copy()\n if self.switch_timing == 0:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target\")[::2]\n elif self.switch_timing == 1:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target1\")[::2]\n self.obs_dict['joints_force'] = np.asarray([self.data.sensordata[4]]).flat.copy()\n # self.sim.data.qpos.flat[2:].copy()#self.get_body_com(\"fingertip\") - self.get_body_com(\"target\").copy()\n #self.obs_dict['goal_vel'] = self.sim.data.qvel.flat[2:].copy()\n #theta = self.sim.data.qpos.flat[:2]\n return np.concatenate([\n #np.cos(theta),\n #np.sin(theta),\n #self.model.data.qpos.flat[2:],\n #self.sim.data.qvel.flat[:2],\n #self.get_body_com(\"target\"),\n #self.get_body_com(\"fingertip\") - self.get_body_com(\"target\")\n self.obs_dict['joints_pos'],##01\n self.obs_dict['joints_vel'],##23\n self.obs_dict['diferrence_pos'] , ##45\n self.obs_dict['joints_force'],\n\n\n ])\n\n def do_reset(self, reset_pose, reset_vel, reset_goal=None):\n\n #reset\n self.set_state(reset_pose, reset_vel)\n\n #return\n return self._get_obs()\n\n ##$$added by hamada fpr perturb\n def perturb_joint(self, force=0.01):\n self.data.qfrc_applied[:] = np.asarray([0, force])\n\n def perturb_pendulum(self, fx=25, fy=0, fz=0, tx=0, ty=0, tz=0):\n # Function to apply an external force to the center of gravity of\n # the pendulum. If a mass is added at the end of the pendulum/pole,\n # the external force is applied to the center of gravity of that mass\n # instead.\n # f : External forces along the three axes.\n # t : External torques along the three axes.\n force = [fx, fy, fz, tx, ty, tz]\n all_dim = np.zeros([6, 6])\n all_dim[-1, :] = force\n\n self.data.xfrc_applied = all_dim\n\nclass VReacherEnv2(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, file_path=os.path.join(GYM_ASSET_PATH, \"vertical_arm_ngr.xml\"), max_step=1000):\n self.time = 0\n self.num_step = 0\n self.max_step = max_step # maximum number of time steps for one episode\n self.switch_timing = 0\n\n mujoco_env.MujocoEnv.__init__(self, os.path.join(file_path), 2)\n utils.EzPickle.__init__(self)\n self.skip = self.frame_skip # #maximum number of time steps for one episode\n\n def get_reward(self, observations, actions):\n\n \"\"\"get rewards of a given (observations, actions) pair\n\n Args:\n observations: (batchsize, obs_dim) or (obs_dim,)\n actions: (batchsize, ac_dim) or (ac_dim,)\n\n Return:\n r_total: (batchsize,1) or (1,), reward for that pair\n done: (batchsize,1) or (1,), True if reaches terminal state\n \"\"\"\n\n # initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, axis=0)\n actions = np.expand_dims(actions, axis=0)\n batch_mode = False\n else:\n batch_mode = True\n\n # get vars\n difference_posx= observations[:, 4]\n difference_posy = observations[:, 5]\n\n # calc re\n # self.reward_dict['actions'] = -0.1 * np.sum(np.square(actions), axis=1)\n # self.reward_dict['stable'] = np.cos(pendulum_angle)\n self.reward_dict['actions'] = np.sum(np.square(actions), axis=1)\n self.reward_dict['goal_difference'] = np.sqrt((difference_posx) ** 2 + (difference_posy) ** 2)\n self.reward_dict['r_allive'] = np.array(10) # np.array(10 - 50 * (np.abs(difference_posx) + np.abs(difference_posy)))\n self.reward_dict['r_total'] =self.reward_dict['r_allive']- 50 *(self.reward_dict['goal_difference'] +0.01*self.reward_dict['actions'])\n\n # check if done\n dones = np.zeros((observations.shape[0],))\n dones[(np.abs(difference_posx)+np.abs(difference_posy)) > 36000000] = 1\n\n # return\n if not batch_mode:\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones\n\n def get_score(self, obs):\n goal_difference=abs(obs[4]) + abs(obs[5])\n return goal_difference\n\n def step(self, action):\n self.num_step += 1\n timing=100\n self.do_simulation(action, self.frame_skip)\n if self.num_step % timing < timing/2:\n self.switch_timing = 0\n elif self.num_step % timing >= timing/2:\n self.switch_timing = 1\n else:\n self.switch_timing = 0\n ob = self._get_obs()\n rew, done = self.get_reward(ob, action)\n score = self.get_score(ob)\n\n # return\n env_info = {'time': self.time,\n 'obs_dict': self.obs_dict,\n 'rewards': self.reward_dict,\n 'score': score}\n return ob, rew, done, env_info\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n\n def reset_model(self):\n self.num_step = 0\n self.reset_pose = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n self.reset_vel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n # self.set_state(qpos, qvel)\n return self.do_reset(self.reset_pose.copy(), self.reset_vel.copy())\n\n def _get_obs(self):\n theta = self.sim.data.qpos.flat[:2]\n self.obs_dict = {}\n self.obs_dict['cos'] = np.cos(theta).copy()\n self.obs_dict['sin'] = np.sin(theta).copy()\n self.obs_dict['joints_pos'] = self.sim.data.qpos.flat[:2].copy()\n self.obs_dict['joints_vel'] = self.sim.data.qvel.flat[:2].copy()\n if self.switch_timing == 0:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target\")[::2]\n elif self.switch_timing == 1:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target1\")[::2]\n self.obs_dict['joints_force'] = np.asarray([self.data.sensordata[1],\n self.data.sensordata[4]]).flat.copy()\n # self.sim.data.qpos.flat[2:].copy()#self.get_body_com(\"fingertip\") - self.get_body_com(\"target\").copy()\n #self.obs_dict['goal_vel'] = self.sim.data.qvel.flat[2:].copy()\n #theta = self.sim.data.qpos.flat[:2]\n return np.concatenate([\n #np.cos(theta),\n #np.sin(theta),\n #self.model.data.qpos.flat[2:],\n #self.sim.data.qvel.flat[:2],\n #self.get_body_com(\"target\"),\n #self.get_body_com(\"fingertip\") - self.get_body_com(\"target\")\n self.obs_dict['joints_pos'],##01\n self.obs_dict['joints_vel'],##23\n self.obs_dict['diferrence_pos'] , ##45\n self.obs_dict['joints_force'],\n\n\n ])\n\n def do_reset(self, reset_pose, reset_vel, reset_goal=None):\n\n #reset\n self.set_state(reset_pose, reset_vel)\n\n #return\n return self._get_obs()\n\n ##$$added by hamada fpr perturb\n def perturb_joint(self, force=0.01):\n self.data.qfrc_applied[:] = np.asarray([0, force])\n\n def perturb_pendulum(self, fx=25, fy=0, fz=0, tx=0, ty=0, tz=0):\n # Function to apply an external force to the center of gravity of\n # the pendulum. If a mass is added at the end of the pendulum/pole,\n # the external force is applied to the center of gravity of that mass\n # instead.\n # f : External forces along the three axes.\n # t : External torques along the three axes.\n force = [fx, fy, fz, tx, ty, tz]\n all_dim = np.zeros([5, 6])\n all_dim[-1, :] = force\n\n self.data.xfrc_applied = all_dim\n\n\nclass VReacherEnv6(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self, file_path=os.path.join(GYM_ASSET_PATH, \"vertical_arm_ngr.xml\"), max_step=1000):\n self.time = 0\n self.num_step = 0\n self.max_step = max_step # maximum number of time steps for one episode\n self.switch_timing = 0\n\n mujoco_env.MujocoEnv.__init__(self, os.path.join(file_path), 2)\n utils.EzPickle.__init__(self)\n self.skip = self.frame_skip # #maximum number of time steps for one episode\n\n def get_reward(self, observations, actions):\n\n \"\"\"get rewards of a given (observations, actions) pair\n\n Args:\n observations: (batchsize, obs_dim) or (obs_dim,)\n actions: (batchsize, ac_dim) or (ac_dim,)\n\n Return:\n r_total: (batchsize,1) or (1,), reward for that pair\n done: (batchsize,1) or (1,), True if reaches terminal state\n \"\"\"\n\n # initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if len(observations.shape) == 1:\n observations = np.expand_dims(observations, axis=0)\n actions = np.expand_dims(actions, axis=0)\n batch_mode = False\n else:\n batch_mode = True\n\n # get vars\n difference_posx= observations[:, 4]\n difference_posy = observations[:, 5]\n\n # calc rew\n # self.reward_dict['actions'] = -0.1 * np.sum(np.square(actions), axis=1)\n # self.reward_dict['stable'] = np.cos(pendulum_angle)\n #self.reward_dict['goal_difference'] = 10 - 50 * np.linalg.norm(tip_pos-self.obs_dict['goal_pos'])\n self.reward_dict['actions'] = np.sum(np.square(actions), axis=1)\n self.reward_dict['goal_difference'] = np.sqrt((difference_posx) ** 2 + (difference_posy) ** 2)\n self.reward_dict['r_allive'] = np.array(10) # np.array(10 - 50 * (np.abs(difference_posx) + np.abs(difference_posy)))\n self.reward_dict['r_total'] =self.reward_dict['r_allive']- 50 *(self.reward_dict['goal_difference'] +0.01*self.reward_dict['actions'])\n\n # check if done\n dones = np.zeros((observations.shape[0],))\n dones[(np.abs(difference_posx)+np.abs(difference_posy)) > 36000000] = 1\n\n # return\n if not batch_mode:\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones\n\n def get_score(self, obs):\n goal_difference=abs(obs[8]) + abs(obs[9])\n return goal_difference\n\n def step(self, action):\n self.num_step += 1\n timing=100\n self.do_simulation(action, self.frame_skip)\n if self.num_step % timing < timing/2:\n self.switch_timing = 0\n elif self.num_step % timing >= timing/2:\n self.switch_timing = 1\n else:\n self.switch_timing = 0\n ob = self._get_obs()\n rew, done = self.get_reward(ob, action)\n score = self.get_score(ob)\n\n # return\n env_info = {'time': self.time,\n 'obs_dict': self.obs_dict,\n 'rewards': self.reward_dict,\n 'score': score}\n return ob, rew, done, env_info\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 0\n\n def reset_model(self):\n self.num_step = 0\n self.reset_pose = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos\n self.reset_vel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n # self.set_state(qpos, qvel)\n return self.do_reset(self.reset_pose.copy(), self.reset_vel.copy())\n\n def _get_obs(self):\n theta = self.sim.data.qpos.flat[:2]\n self.obs_dict = {}\n self.obs_dict['cos'] = np.cos(theta).copy()\n self.obs_dict['sin'] = np.sin(theta).copy()\n self.obs_dict['joints_pos'] = self.sim.data.qpos.flat[:2].copy()\n self.obs_dict['joints_vel'] = self.sim.data.qvel.flat[:2].copy()\n if self.switch_timing == 0:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target\")[::2]\n elif self.switch_timing == 1:\n self.obs_dict['diferrence_pos'] = self.get_body_com(\"fingertip\")[::2] - self.get_body_com(\"target1\")[::2]\n self.obs_dict['joints_force'] = np.asarray([self.data.sensordata[0],self.data.sensordata[1],self.data.sensordata[2],\n self.data.sensordata[3],self.data.sensordata[4],self.data.sensordata[5]]).flat.copy()\n # self.sim.data.qpos.flat[2:].copy()#self.get_body_com(\"fingertip\") - self.get_body_com(\"target\").copy()\n #self.obs_dict['goal_vel'] = self.sim.data.qvel.flat[2:].copy()\n #theta = self.sim.data.qpos.flat[:2]\n return np.concatenate([\n #np.cos(theta),\n #np.sin(theta),\n #self.model.data.qpos.flat[2:],\n #self.sim.data.qvel.flat[:2],\n #self.get_body_com(\"target\"),\n #self.get_body_com(\"fingertip\") - self.get_body_com(\"target\")\n self.obs_dict['joints_pos'],##01\n self.obs_dict['joints_vel'],##23\n self.obs_dict['diferrence_pos'] , ##45\n self.obs_dict['joints_force'],\n\n\n ])\n\n def do_reset(self, reset_pose, reset_vel, reset_goal=None):\n\n #reset\n self.set_state(reset_pose, reset_vel)\n\n #return\n return self._get_obs()\n\n\n ##$$added by hamada fpr perturb\n def perturb_joint(self, force=0.01):\n self.data.qfrc_applied[:] = np.asarray([0, force])\n\n def perturb_pendulum(self, fx=0, fy=0, fz=0, tx=0, ty=0, tz=0):\n # Function to apply an external force to the center of gravity of\n # the pendulum. If a mass is added at the end of the pendulum/pole,\n # the external force is applied to the center of gravity of that mass\n # instead.\n # f : External forces along the three axes.\n # t : External torques along the three axes.\n force = [fx, fy, fz, tx, ty, tz]\n all_dim = np.zeros([6, 6])\n all_dim[-1, :] = force\n\n self.data.xfrc_applied = all_dim"
] |
[
[
"numpy.concatenate",
"numpy.square",
"numpy.array",
"numpy.sin",
"numpy.asarray",
"numpy.zeros",
"numpy.sqrt",
"numpy.cos",
"numpy.abs",
"numpy.expand_dims"
]
] |
MarcoMernberger/mpathways
|
[
"6041b300f6d512b81ff590019f519c795fdb761a"
] |
[
"src/mpathways/util.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"util.py: Contains some utility functions.\"\"\"\n\nfrom pathlib import Path\nfrom typing import Optional, Callable, List, Dict, Tuple, Any, Union\nfrom pypipegraph import Job, FileGeneratingJob\nfrom mbf_genomics.genes import Genes\nfrom pandas import DataFrame\nimport pandas as pd\nimport pypipegraph as ppg\nimport mbf_genomics\nimport scipy\nimport numpy as np\n\n\n__author__ = \"Marco Mernberger\"\n__copyright__ = \"Copyright (c) 2020 Marco Mernberger\"\n__license__ = \"mit\"\n\n\ndef write_cls(output_directory: Path, phenotypes: Tuple[str, str], columns_a_b: Tuple[List[str], List[str]], dependencies: List[Job] = []) -> FileGeneratingJob:\n \"\"\"\n Creates a Job that writes class information file for GSEA at a specified\n folder. A file named imput.cls is created.\n\n Parameters\n ----------\n output_directory : Path\n The output directory in which an input.cls file is created.\n phenotypes : Tuple[str, str]\n The phenotype/class names of the groups to be compared.\n columns_a_b : Tuple[List[str], List[str]]\n The DataFrame columns of the relevant expression values.\n dependencies : List[Job], optional\n List of prerequisite jobs on which the , by default []\n\n Returns\n -------\n FileGeneratingJob\n The job that creates the file.\n \"\"\"\n output_directory.mkdir(parents=True, exist_ok=True)\n outfile = output_directory / \"input.cls\"\n\n def __dump():\n with outfile.open(\"w\") as handle:\n handle.write(f\"{(len(columns_a_b[0]) + len(columns_a_b[1]))} 2 1\\n\")\n handle.write(f\"# {phenotypes[0]} {phenotypes[1]}\\n\")\n handle.write(\" \".join([\"0\"] * len(columns_a_b[0])))\n handle.write(\" \")\n handle.write(\" \".join([\"1\"] * len(columns_a_b[1])))\n handle.write(\"\\n\")\n handle.close()\n\n return ppg.FileGeneratingJob(outfile, __dump).depends_on(dependencies)\n\n\ndef write_gct(genes_or_dataframe: Union[Genes, DataFrame], output_directory: Path, phenotypes: Tuple[str, str], columns_a_b: Tuple[List[str], List[str]], dependencies: List[Job] = []) -> FileGeneratingJob:\n \"\"\"\n Creates a Job that writes expression data for GSEA at a specified\n folder. A file named imput.gct is created.\n\n Parameters\n ----------\n output_directory : Path\n The output directory in which an input.gct file is created.\n phenotypes : Tuple[str, str]\n The phenotype/class names of the groups to be compared.\n columns_a_b : Tuple[List[str], List[str]]\n The DataFrame columns of the relevant expression values.\n dependencies : List[Job], optional\n List of prerequisite jobs on which the , by default []\n\n Returns\n -------\n FileGeneratingJob\n The job that creates the file.\n \"\"\"\n output_directory.mkdir(parents=True, exist_ok=True)\n outfile = output_directory / \"input.gct\"\n if isinstance(genes_or_dataframe, Genes):\n dependencies.append(genes_or_dataframe.add_annotator(mbf_genomics.genes.annotators.Description()))\n def __dump():\n df = genes_or_dataframe\n if isinstance(genes_or_dataframe, Genes):\n df = genes_or_dataframe.df.copy()\n elif isinstance(genes_or_dataframe, DataFrame):\n df = df.copy()\n else:\n raise ValueError(f\"Parameter genes_or_dataframe must be an instance of Genes or DataFrame, was {type(genes_or_dataframe)}.\")\n with outfile.open(\"w\") as handle:\n handle.write(\"#1.2\\n\")\n handle.write(f\"{len(df)}\\t{len(columns_a_b[0]) + len(columns_a_b[1])}\\n\")\n handle.write(\"ProbeName\\tDescription\\t\")\n handle.write(\"\\t\".join(columns_a_b[0] + columns_a_b[1]))\n handle.write(\"\\n\")\n df = df.rename(columns={\"gene_stable_id\": \"NAME\"})\n description = [f\"{x} {y}\" for x, y in zip(df[\"name\"], df[\"description\"])]\n df[\"Description\"] = description\n df = df[[\"NAME\", \"Description\"] + columns_a_b[0] + columns_a_b[1]]\n df = df.fillna(0)\n for _, row in df.iterrows():\n handle.write(\"\\t\".join([str(x) for x in row]) + \"\\n\")\n\n return ppg.FileGeneratingJob(outfile, __dump).depends_on(dependencies)\n\n\ndef _benjamini_hochberg(col):\n \"\"\"Benjamini-Hochberg p-value correction for multiple hypothesis testing.\n A clever implementation that manipulates the results so that\n anything below your significance threshold is significant - even if\n the original BH calculation had a part where the FDR rose.\n (Remember, we reject all null-hypotheses below the one with FDR < alpha\n with the *highest* p-value - even if their FDR is >= alpha!)\n\n This is a direct translation from the R code, in essence.\n \"\"\"\n p = np.asfarray(col)\n if (pd.isnull(p).any()):\n orig_idx = np.array(list(range(len(p))))\n is_nan = pd.isnull(p)\n q_ommiting_nans = _benjamini_hochberg(p[~is_nan])\n indices_without_nan = orig_idx[~is_nan]\n result = np.empty(len(p))\n result[:] = np.nan\n for q, idx in zip(q_ommiting_nans, indices_without_nan):\n result[idx] = q\n return result\n else:\n by_descend = p.argsort()[::-1]\n by_orig = by_descend.argsort()\n steps = float(len(p)) / np.arange(len(p), 0, -1)\n q = np.minimum(1, np.minimum.accumulate(steps * p[by_descend]))\n return q[by_orig]\n\ndef fdr_control_benjamini_hochberg(df,\n p_value_column='p-value',\n output_column='benjamini_hochberg',\n drop_output_if_exists=False):\n \"\"\"Calculate the benjamini hochberg adjusted p-values in order to control false discovery rate\n Adds two columns, output_column and output_column + '_rank', since you need to order\n the p-values later to decide if a given value is significant.\n \"\"\"\n if output_column in df.columns:\n if drop_output_if_exists:\n df = df.drop(output_column, axis=1)\n else:\n raise ValueError(\n \"Dataframe already has a column called %s\" % output_column)\n col = df[p_value_column]\n bh = _benjamini_hochberg(col)\n df.loc[:, output_column] = bh\n\ndef hypergeometric_test(query_set, reference_set, background_set):\n \"\"\"Query set is what you observed, reference set is what you compare against,\n background set is what you could have observed (genes on array...) - and which were annotated(!)\"\"\"\n query_set = query_set.intersection(background_set)\n reference_set = reference_set.intersection(background_set)\n\n drawn = reference_set.intersection(query_set)\n total_gene_count = len(background_set)\n\n no_of_trials = len(query_set)\n no_of_white_balls_in_urn = len(reference_set)\n no_of_white_balls_drawn = len(drawn)\n no_of_black_balls_in_urn = total_gene_count - no_of_white_balls_in_urn\n return scipy.stats.hypergeom(\n M=no_of_white_balls_in_urn + no_of_black_balls_in_urn, # total number of balls\n n = no_of_white_balls_in_urn, #number of white balls\n N = no_of_trials # no of balls drawn\n ).sf(\n no_of_white_balls_drawn #no of white balls drawn\n -1)\n\n\ndef multi_hypergeom_test(genome, query_set, function_gene_groups_or_list_of_such = None, background_set = None):\n \"\"\"Test a query set against multiple sets from functional.databases.\n Returns a pandas.DataFrame(group, set, benjamini, p, overlap_count,\n sorted by benjamini\n\n \"\"\"\n if function_gene_groups_or_list_of_such is None:\n function_gene_groups_or_list_of_such = databases.get_default_groups()\n query_set = set(query_set)\n list_of_gene_groups = check_function_gene_groups_or_list_of_such(function_gene_groups_or_list_of_such)\n sets_by_func_group = {}\n all_annotated_genes = set()\n for func_group in list_of_gene_groups:\n sets_by_func_group[func_group.name] = func_group.get_sets(genome)\n for v in sets_by_func_group[func_group.name].values():\n all_annotated_genes.update(v)\n if background_set is not None:\n background_set = background_set.intersection(all_annotated_genes)\n else:\n background_set = all_annotated_genes\n query_set = query_set.intersection(background_set)\n result = {'group': [], 'set': [], 'p': [], 'overlap count': [], 'intersection': []}\n for func_group_name in sets_by_func_group:\n for set_name, set_genes in sets_by_func_group[func_group_name].items():\n set_genes = set(set_genes)\n result['group'].append(func_group_name)\n result['set'].append(set_name)\n p = hypergeometric_test(query_set, set_genes, background_set)\n if p > 1:\n raise ValueError(\"p > 1,. was %.15f\" % p)\n\n result['p'].append(p)\n if result['p'][-1] == 0:\n raise ValueError()\n intersection = query_set.intersection(set_genes)\n result['overlap count'].append(len(intersection))\n result['intersection'].append(\", \".join(list(sorted([get_gene_name(genome, x) for x in intersection]))))\n res = pd.DataFrame(result, )\n statistics.fdr_control_benjamini_hochberg(res, 'p', 'benjamini')\n res = res[['group','set','benjamini', 'p','overlap count', 'intersection']].sort_values('benjamini')\n return res"
] |
[
[
"numpy.asfarray",
"pandas.isnull",
"numpy.minimum.accumulate",
"pandas.DataFrame",
"scipy.stats.hypergeom"
]
] |
isaacmg/bokeh
|
[
"1025d1177b8e636c36f6160da4bd2fbf8ca51962"
] |
[
"bokeh/core/property/wrappers.py"
] |
[
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Provide special versions of list and dict, that can automatically notify\nabout changes when used for property values.\n\nMutations to these values are detected, and the properties owning the\ncollection is notified of the changes. Consider the following model\ndefinition:\n\n.. code-block:: python\n\n class SomeModel(Model):\n\n options = List(String)\n\nIf we have an instance of this model, ``m`` then we can set the entire\nvalue of the ``options`` property at once:\n\n.. code-block:: python\n\n m.options = [\"foo\", \"bar\"]\n\nWhen we do this in the context of a Bokeh server application that is being\nviewed in a browser, this change is automatically noticed, and the\ncorresponding BokehJS property in the browser is synchronized, possibly\ncausing some change in the visual state of the application in the browser.\n\nBut it is also desirable that changes *inside* the ``options`` list also\nbe detected. That is, the following kinds of operations should also be\nautomatically synchronized between BokehJS and a Bokeh server:\n\n.. code-block:: python\n\n m.options.append(\"baz\")\n\n m.options[2] = \"quux\"\n\n m.options.insert(0, \"bar\")\n\nThe classes in this module provide this functionality.\n\n.. note::\n These classes form part of the very low-level machinery that implements\n the Bokeh model and property system. It is unlikely that any of these\n classes or their methods will be applicable to any standard usage or to\n anyone who is not directly developing on Bokeh's own infrastructure.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport numpy as np\nimport copy\n\n# External imports\n\n# Bokeh imports\nfrom ...util.dependencies import import_optional\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = ()\n\npd = import_optional('pandas')\n\n#----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\ndef notify_owner(func):\n ''' A decorator for mutating methods of property container classes\n that notifies owners of the property container about mutating changes.\n\n Args:\n func (callable) : the container method to wrap in a notification\n\n Returns:\n wrapped method\n\n Examples:\n\n A ``__setitem__`` could be wrapped like this:\n\n .. code-block:: python\n\n # x[i] = y\n @notify_owner\n def __setitem__(self, i, y):\n return super(PropertyValueDict, self).__setitem__(i, y)\n\n The returned wrapped method will have a docstring indicating what\n original method it is wrapping.\n\n '''\n def wrapper(self, *args, **kwargs):\n old = self._saved_copy()\n result = func(self, *args, **kwargs)\n self._notify_owners(old)\n return result\n wrapper.__doc__ = \"Container method ``%s`` instrumented to notify property owners\" % func.__name__\n return wrapper\n\nclass PropertyValueContainer(object):\n ''' A base class for property container classes that support change\n notifications on mutating operations.\n\n This class maintains an internal list of property owners, and also\n provides a private mechanism for methods wrapped with\n :func:`~bokeh.core.property.wrappers.notify_owners` to update\n those owners when mutating changes occur.\n\n '''\n def __init__(self, *args, **kwargs):\n self._owners = set()\n super(PropertyValueContainer, self).__init__(*args, **kwargs)\n\n def _register_owner(self, owner, descriptor):\n self._owners.add((owner, descriptor))\n\n def _unregister_owner(self, owner, descriptor):\n self._owners.discard((owner, descriptor))\n\n def _notify_owners(self, old, hint=None):\n for (owner, descriptor) in self._owners:\n descriptor._notify_mutated(owner, old, hint=hint)\n\n def _saved_copy(self):\n raise RuntimeError(\"Subtypes must implement this to make a backup copy\")\n\nclass PropertyValueList(PropertyValueContainer, list):\n ''' A list property value container that supports change notifications on\n mutating operations.\n\n When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are\n transparently created to wrap those values. These ``PropertyValueList``\n values are subject to normal property validation. If the property type\n ``foo = List(Str)`` then attempting to set ``x.foo[0] = 10`` will raise\n an error.\n\n Instances of ``PropertyValueList`` can be explicitly created by passing\n any object that the standard list initializer accepts, for example:\n\n .. code-block:: python\n\n >>> PropertyValueList([10, 20])\n [10, 20]\n\n >>> PropertyValueList((10, 20))\n [10, 20]\n\n The following mutating operations on lists automatically trigger\n notifications:\n\n .. code-block:: python\n\n del x[y]\n del x[i:j]\n x += y\n x *= y\n x[i] = y\n x[i:j] = y\n x.append\n x.extend\n x.insert\n x.pop\n x.remove\n x.reverse\n x.sort\n\n '''\n\n def __init__(self, *args, **kwargs):\n return super(PropertyValueList, self).__init__(*args, **kwargs)\n\n def _saved_copy(self):\n return list(self)\n\n # delete x[y]\n @notify_owner\n def __delitem__(self, y):\n return super(PropertyValueList, self).__delitem__(y)\n\n # delete x[i:j]\n @notify_owner\n def __delslice__(self, i, j):\n # Note: this is different py2 vs py3, py3 calls __delitem__ with a\n # slice index, and does not have this method at all\n return super(PropertyValueList, self).__delslice__(i, j)\n\n # x += y\n @notify_owner\n def __iadd__(self, y):\n return super(PropertyValueList, self).__iadd__(y)\n\n # x *= y\n @notify_owner\n def __imul__(self, y):\n return super(PropertyValueList, self).__imul__(y)\n\n # x[i] = y\n @notify_owner\n def __setitem__(self, i, y):\n return super(PropertyValueList, self).__setitem__(i, y)\n\n # x[i:j] = y\n @notify_owner\n def __setslice__(self, i, j, y):\n # Note: this is different py2 vs py3, py3 calls __setitem__ with a\n # slice index, and does not have this method at all\n return super(PropertyValueList, self).__setslice__(i, j, y)\n\n @notify_owner\n def append(self, obj):\n return super(PropertyValueList, self).append(obj)\n\n @notify_owner\n def extend(self, iterable):\n return super(PropertyValueList, self).extend(iterable)\n\n @notify_owner\n def insert(self, index, obj):\n return super(PropertyValueList, self).insert(index, obj)\n\n @notify_owner\n def pop(self, index=-1):\n return super(PropertyValueList, self).pop(index)\n\n @notify_owner\n def remove(self, obj):\n return super(PropertyValueList, self).remove(obj)\n\n @notify_owner\n def reverse(self):\n return super(PropertyValueList, self).reverse()\n\n @notify_owner\n def sort(self, **kwargs):\n return super(PropertyValueList, self).sort(**kwargs)\n\nclass PropertyValueDict(PropertyValueContainer, dict):\n ''' A dict property value container that supports change notifications on\n mutating operations.\n\n When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are\n transparently created to wrap those values. These ``PropertyValueList``\n values are subject to normal property validation. If the property type\n ``foo = Dict(Str, Str)`` then attempting to set ``x.foo['bar'] = 10`` will\n raise an error.\n\n Instances of ``PropertyValueDict`` can be eplicitly created by passing\n any object that the standard dict initializer accepts, for example:\n\n .. code-block:: python\n\n >>> PropertyValueDict(dict(a=10, b=20))\n {'a': 10, 'b': 20}\n\n >>> PropertyValueDict(a=10, b=20)\n {'a': 10, 'b': 20}\n\n >>> PropertyValueDict([('a', 10), ['b', 20]])\n {'a': 10, 'b': 20}\n\n The following mutating operations on dicts automatically trigger\n notifications:\n\n .. code-block:: python\n\n del x[y]\n x[i] = y\n x.clear\n x.pop\n x.popitem\n x.setdefault\n x.update\n\n '''\n def __init__(self, *args, **kwargs):\n return super(PropertyValueDict, self).__init__(*args, **kwargs)\n\n def _saved_copy(self):\n return dict(self)\n\n # delete x[y]\n @notify_owner\n def __delitem__(self, y):\n return super(PropertyValueDict, self).__delitem__(y)\n\n # x[i] = y\n @notify_owner\n def __setitem__(self, i, y):\n return super(PropertyValueDict, self).__setitem__(i, y)\n\n @notify_owner\n def clear(self):\n return super(PropertyValueDict, self).clear()\n\n @notify_owner\n def pop(self, *args):\n return super(PropertyValueDict, self).pop(*args)\n\n @notify_owner\n def popitem(self):\n return super(PropertyValueDict, self).popitem()\n\n @notify_owner\n def setdefault(self, *args):\n return super(PropertyValueDict, self).setdefault(*args)\n\n @notify_owner\n def update(self, *args, **kwargs):\n return super(PropertyValueDict, self).update(*args, **kwargs)\n\nclass PropertyValueColumnData(PropertyValueDict):\n ''' A property value container for ColumnData that supports change\n notifications on mutating operations.\n\n This property value container affords specialized code paths for\n updating the .data dictionary for ColumnDataSource. When possible,\n more efficient ColumnDataChangedEvent hints are generated to perform\n the updates:\n\n .. code-block:: python\n\n x[i] = y\n x.update\n\n '''\n\n # x[i] = y\n # don't wrap with notify_owner --- notifies owners explicitly\n def __setitem__(self, i, y):\n return self.update([(i, y)])\n\n def __copy__(self):\n return PropertyValueColumnData(dict(self))\n\n def __deepcopy__(self, memodict={}):\n return PropertyValueColumnData(copy.deepcopy(dict(self), memodict))\n\n # don't wrap with notify_owner --- notifies owners explicitly\n def update(self, *args, **kwargs):\n old = self._saved_copy()\n\n result = super(PropertyValueDict, self).update(*args, **kwargs)\n\n from ...document.events import ColumnDataChangedEvent\n\n # Grab keys to update according to Python docstring for update([E, ]**F)\n #\n # If E is present and has a .keys() method, then does: for k in E: D[k] = E[k]\n # If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v\n # In either case, this is followed by: for k in F: D[k] = F[k]\n cols = set(kwargs.keys())\n if len(args) == 1:\n E = args[0]\n if hasattr(E, 'keys'):\n cols |= set(E.keys())\n else:\n cols |= { x[0] for x in E }\n\n # we must loop ourselves here instead of calling _notify_owners\n # because the hint is customized for each owner separately\n for (owner, descriptor) in self._owners:\n hint = ColumnDataChangedEvent(owner.document, owner, cols=list(cols))\n descriptor._notify_mutated(owner, old, hint=hint)\n\n return result\n\n # don't wrap with notify_owner --- notifies owners explicitly\n def _stream(self, doc, source, new_data, rollover=None, setter=None):\n ''' Internal implementation to handle special-casing stream events\n on ``ColumnDataSource`` columns.\n\n Normally any changes to the ``.data`` dict attribute on a\n ``ColumnDataSource`` triggers a notification, causing all of the data\n to be synchronized between server and clients.\n\n The ``.stream`` method on column data sources exists to provide a\n more efficient way to perform streaming (i.e. append-only) updates\n to a data source, without having to perform a full synchronization,\n which would needlessly re-send all the data.\n\n To accomplish this, this function bypasses the wrapped methods on\n ``PropertyValueDict`` and uses the unwrapped versions on the dict\n superclass directly. It then explicitly makes a notification, adding\n a special ``ColumnsStreamedEvent`` hint to the message containing\n only the small streamed data that BokehJS needs in order to\n efficiently synchronize.\n\n .. warning::\n This function assumes the integrity of ``new_data`` has already\n been verified.\n\n '''\n old = self._saved_copy()\n\n # TODO (bev) Currently this reports old differently for array vs list\n # For arrays is reports the actual old value. For lists, the old value\n # is actually the already updated value. This is because the method\n # self._saved_copy() makes a shallow copy.\n for k, v in new_data.items():\n if isinstance(self[k], np.ndarray) or isinstance(new_data[k], np.ndarray):\n data = np.append(self[k], new_data[k])\n if rollover and len(data) > rollover:\n data = data[-rollover:]\n super(PropertyValueDict, self).__setitem__(k, data)\n else:\n L = self[k]\n L.extend(new_data[k])\n if rollover is not None:\n del L[:-rollover]\n\n from ...document.events import ColumnsStreamedEvent\n\n self._notify_owners(old,\n hint=ColumnsStreamedEvent(doc, source, new_data, rollover, setter))\n\n # don't wrap with notify_owner --- notifies owners explicitly\n def _patch(self, doc, source, patches, setter=None):\n ''' Internal implementation to handle special-casing patch events\n on ``ColumnDataSource`` columns.\n\n Normally any changes to the ``.data`` dict attribute on a\n ``ColumnDataSource`` triggers a notification, causing all of the data\n to be synchronized between server and clients.\n\n The ``.patch`` method on column data sources exists to provide a\n more efficient way to perform patching (i.e. random access) updates\n to a data source, without having to perform a full synchronization,\n which would needlessly re-send all the data.\n\n To accomplish this, this function bypasses the wrapped methods on\n ``PropertyValueDict`` and uses the unwrapped versions on the dict\n superclass directly. It then explicitly makes a notification, adding\n a special ``ColumnsPatchedEvent`` hint to the message containing\n only the small patched data that BokehJS needs in order to efficiently\n synchronize.\n\n .. warning::\n This function assumes the integrity of ``patches`` has already\n been verified.\n\n '''\n old = self._saved_copy()\n\n for name, patch in patches.items():\n for ind, value in patch:\n if isinstance(ind, (int, slice)):\n self[name][ind] = value\n else:\n shape = self[name][ind[0]][ind[1:]].shape\n self[name][ind[0]][ind[1:]] = np.array(value, copy=False).reshape(shape)\n\n from ...document.events import ColumnsPatchedEvent\n\n self._notify_owners(old,\n hint=ColumnsPatchedEvent(doc, source, patches, setter))\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n"
] |
[
[
"numpy.array",
"numpy.append"
]
] |
masihsultani/infotheory-research
|
[
"11167cbaaa79d49f094f938ed0918aa53752b441"
] |
[
"tools/ngram_entropy.py"
] |
[
"import csv\nimport sys\nfrom ast import literal_eval\nfrom collections import defaultdict\nimport numpy as np\nimport pandas as pd\n\nfrom tools.helper import get_context, file_locations, get_gram_count, gram_conv\n\n\ndef compute_entropy(corpus, gram, stop_words=None):\n \"\"\"\n\n :param gram:\n :param corpus: str\n the corpus we are calculating entropy for\n :param stop_words: str\n corpus with stop words removed or not\n\n :return:\n \"\"\"\n\n df = pd.read_csv(\"../data/all_forms.csv\", encoding=\"utf-8\") # load csv with long and short form words\n short_forms = set(list(df.short.values))\n long_forms = set(list(df.long.values))\n prime_prob = defaultdict(lambda: defaultdict(float))\n long_set = get_context(long_forms, gram, corpus, stop_words)\n short_set = get_context(short_forms, gram, corpus, stop_words)\n\n gram_files = file_locations(gram, corpus=corpus, stop_words=stop_words)\n context_set = short_set | long_set\n context_count = get_gram_count(gram_conv[gram], corpus, stop_words=stop_words)\n\n for file in gram_files:\n with open(file, 'r', encoding=\"utf-8\") as file_2:\n if corpus == \"google\":\n reader = csv.reader(file_2, dialect=\"excel-tab\", quoting=csv.QUOTE_NONE)\n else:\n reader = csv.reader(file_2)\n for row in reader:\n temp_gram = row[0].lower().split()\n temp_context = ' '.join(word for word in temp_gram[:-1])\n if temp_context in context_set:\n try:\n prime_prob[temp_context][temp_gram[-1]] += (literal_eval(row[1]) / context_count[temp_context])\n except ZeroDivisionError:\n print(temp_context, \" ZeroDivision Error\")\n sys.stdout.flush()\n file_2.close()\n\n entropy_dict = entropy_set_calc(context_set, prime_prob)\n return entropy_dict\n\n\ndef entropy_set_calc(context_set, probs):\n entropy_dict = defaultdict(float)\n\n for context in context_set:\n entropy = 0\n for word in probs[context]:\n entropy += probs[context][word] * (np.log2(1 / (probs[context][word])))\n\n entropy_dict[context] = entropy\n\n return entropy_dict\n\n# if __name__ == \"__main__\":\n# corpus = sys.argv[1] # native or nonnative or google\n# stopword = ['True', 'False'] #sys.argv[2]\n#\n# if corpus == 'google':\n# filein = f\"/w/nobackup/131/web1t-5gram-v1\"\n# compute_entropy(filein, corpus)\n# else:\n# for x in stopword:\n# filein = f\"/ais/hal9000/masih/surprisal/{corpus}/\"\n# compute_entropy(filein, corpus,x)\n"
] |
[
[
"pandas.read_csv",
"numpy.log2"
]
] |
Y-Sree-Chaitanya/zeugma
|
[
"f0fda428889d6ceb2ae22eda721473611d7020d2"
] |
[
"tests/test_keras_transformers.py"
] |
[
"import pytest\nfrom sklearn.pipeline import make_pipeline\nfrom zeugma.keras_transformers import Padder, TextsToSequences\n\n\[email protected](scope=\"function\")\ndef sequencer():\n \"\"\" Instantiate trainable word2vec vectorizer \"\"\"\n return TextsToSequences(num_words=5)\n\n\ndef test_sequencer(sample_corpus, sequencer):\n \"\"\" Test text sequencer \"\"\"\n num_words = sequencer.num_words\n out = sequencer.fit_transform(sample_corpus)\n assert len(out) == len(sample_corpus)\n assert len(out[0]) == 2\n assert max([index for sequence in out for index in sequence]) == num_words - 1\n\n\ndef test_padder(sample_corpus, sequencer):\n \"\"\" Test padding uneven sequences \"\"\"\n padder = Padder(max_length=10)\n out = make_pipeline(sequencer, padder).fit_transform(sample_corpus)\n assert out.shape == (len(sample_corpus), 10)\n"
] |
[
[
"sklearn.pipeline.make_pipeline"
]
] |
Jebrankhan/datasets
|
[
"920faf40c0766cb71d2dbba71554181881cc3e7b"
] |
[
"tests/test_arrow_dataset.py"
] |
[
"import copy\nimport itertools\nimport json\nimport os\nimport pickle\nimport re\nimport tempfile\nfrom functools import partial\nfrom pathlib import Path\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pytest\nfrom absl.testing import parameterized\n\nimport datasets.arrow_dataset\nfrom datasets import concatenate_datasets, interleave_datasets, load_from_disk, temp_seed\nfrom datasets.arrow_dataset import Dataset, transmit_format, update_metadata_with_features\nfrom datasets.dataset_dict import DatasetDict\nfrom datasets.features import Array2D, Array3D, ClassLabel, Features, Sequence, Value\nfrom datasets.filesystems import extract_path_from_uri\nfrom datasets.info import DatasetInfo\nfrom datasets.splits import NamedSplit\nfrom datasets.table import ConcatenationTable, InMemoryTable, MemoryMappedTable\nfrom datasets.tasks import (\n AutomaticSpeechRecognition,\n LanguageModeling,\n QuestionAnsweringExtractive,\n Summarization,\n TextClassification,\n)\nfrom datasets.utils.logging import WARNING\n\nfrom .conftest import s3_test_bucket_name\nfrom .utils import (\n assert_arrow_memory_doesnt_increase,\n assert_arrow_memory_increases,\n require_jax,\n require_s3,\n require_tf,\n require_torch,\n require_transformers,\n set_current_working_directory_to_temp_dir,\n)\n\n\nclass Unpicklable:\n def __getstate__(self):\n raise pickle.PicklingError()\n\n\ndef picklable_map_function(x):\n return {\"id\": int(x[\"filename\"].split(\"_\")[-1])}\n\n\ndef picklable_map_function_with_indices(x, i):\n return {\"id\": i}\n\n\ndef picklable_map_function_with_rank(x, r):\n return {\"rank\": r}\n\n\ndef picklable_map_function_with_indices_and_rank(x, i, r):\n return {\"id\": i, \"rank\": r}\n\n\ndef picklable_filter_function(x):\n return int(x[\"filename\"].split(\"_\")[-1]) < 10\n\n\ndef assert_arrow_metadata_are_synced_with_dataset_features(dataset: Dataset):\n assert dataset.data.schema.metadata is not None\n assert \"huggingface\".encode(\"utf-8\") in dataset.data.schema.metadata\n metadata = json.loads(dataset.data.schema.metadata[\"huggingface\".encode(\"utf-8\")].decode())\n assert \"info\" in metadata\n features = DatasetInfo.from_dict(metadata[\"info\"]).features\n assert features is not None\n assert dataset.features is not None\n assert sorted(features) == sorted(field.name for field in dataset.data.schema)\n\n\nIN_MEMORY_PARAMETERS = [\n {\"testcase_name\": name, \"in_memory\": im} for im, name in [(True, \"in_memory\"), (False, \"on_disk\")]\n]\n\n\[email protected]_parameters(IN_MEMORY_PARAMETERS)\nclass BaseDatasetTest(TestCase):\n def setUp(self):\n # google colab doesn't allow to pickle loggers\n # so we want to make sure each tests passes without pickling the logger\n def reduce_ex(self):\n raise pickle.PicklingError()\n\n datasets.arrow_dataset.logger.__reduce_ex__ = reduce_ex\n\n @pytest.fixture(autouse=True)\n def inject_fixtures(self, caplog):\n self._caplog = caplog\n\n def _create_dummy_dataset(\n self, in_memory: bool, tmp_dir: str, multiple_columns=False, array_features=False, nested_features=False\n ) -> Dataset:\n assert int(multiple_columns) + int(array_features) + int(nested_features) < 2\n if multiple_columns:\n data = {\"col_1\": [3, 2, 1, 0], \"col_2\": [\"a\", \"b\", \"c\", \"d\"], \"col_3\": [False, True, False, True]}\n dset = Dataset.from_dict(data)\n elif array_features:\n data = {\n \"col_1\": [[[True, False], [False, True]]] * 4, # 2D\n \"col_2\": [[[[\"a\", \"b\"], [\"c\", \"d\"]], [[\"e\", \"f\"], [\"g\", \"h\"]]]] * 4, # 3D array\n \"col_3\": [[3, 2, 1, 0]] * 4, # Sequence\n }\n features = Features(\n {\n \"col_1\": Array2D(shape=(2, 2), dtype=\"bool\"),\n \"col_2\": Array3D(shape=(2, 2, 2), dtype=\"string\"),\n \"col_3\": Sequence(feature=Value(\"int64\")),\n }\n )\n dset = Dataset.from_dict(data, features=features)\n elif nested_features:\n data = {\"nested\": [{\"a\": i, \"x\": i * 10, \"c\": i * 100} for i in range(1, 11)]}\n features = Features({\"nested\": {\"a\": Value(\"int64\"), \"x\": Value(\"int64\"), \"c\": Value(\"int64\")}})\n dset = Dataset.from_dict(data, features=features)\n else:\n dset = Dataset.from_dict({\"filename\": [\"my_name-train\" + \"_\" + str(x) for x in np.arange(30).tolist()]})\n if not in_memory:\n dset = self._to(in_memory, tmp_dir, dset)\n return dset\n\n def _to(self, in_memory, tmp_dir, *datasets):\n if in_memory:\n datasets = [dataset.map(keep_in_memory=True) for dataset in datasets]\n else:\n start = 0\n while os.path.isfile(os.path.join(tmp_dir, f\"dataset{start}.arrow\")):\n start += 1\n datasets = [\n dataset.map(cache_file_name=os.path.join(tmp_dir, f\"dataset{start + i}.arrow\"))\n for i, dataset in enumerate(datasets)\n ]\n return datasets if len(datasets) > 1 else datasets[0]\n\n def test_dummy_dataset(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n self.assertDictEqual(\n dset.features,\n Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\"), \"col_3\": Value(\"bool\")}),\n )\n self.assertEqual(dset[0][\"col_1\"], 3)\n self.assertEqual(dset[\"col_1\"][0], 3)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:\n self.assertDictEqual(\n dset.features,\n Features(\n {\n \"col_1\": Array2D(shape=(2, 2), dtype=\"bool\"),\n \"col_2\": Array3D(shape=(2, 2, 2), dtype=\"string\"),\n \"col_3\": Sequence(feature=Value(\"int64\")),\n }\n ),\n )\n self.assertEqual(dset[0][\"col_2\"], [[[\"a\", \"b\"], [\"c\", \"d\"]], [[\"e\", \"f\"], [\"g\", \"h\"]]])\n self.assertEqual(dset[\"col_2\"][0], [[[\"a\", \"b\"], [\"c\", \"d\"]], [[\"e\", \"f\"], [\"g\", \"h\"]]])\n\n def test_dataset_getitem(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n self.assertEqual(dset[-1][\"filename\"], \"my_name-train_29\")\n self.assertEqual(dset[\"filename\"][-1], \"my_name-train_29\")\n\n self.assertListEqual(dset[:2][\"filename\"], [\"my_name-train_0\", \"my_name-train_1\"])\n self.assertListEqual(dset[\"filename\"][:2], [\"my_name-train_0\", \"my_name-train_1\"])\n\n self.assertEqual(dset[:-1][\"filename\"][-1], \"my_name-train_28\")\n self.assertEqual(dset[\"filename\"][:-1][-1], \"my_name-train_28\")\n\n self.assertListEqual(dset[[0, -1]][\"filename\"], [\"my_name-train_0\", \"my_name-train_29\"])\n self.assertListEqual(dset[np.array([0, -1])][\"filename\"], [\"my_name-train_0\", \"my_name-train_29\"])\n\n with dset.select(range(2)) as dset_subset:\n self.assertListEqual(dset_subset[-1:][\"filename\"], [\"my_name-train_1\"])\n self.assertListEqual(dset_subset[\"filename\"][-1:], [\"my_name-train_1\"])\n\n def test_dummy_dataset_deepcopy(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:\n with assert_arrow_memory_doesnt_increase():\n dset2 = copy.deepcopy(dset)\n # don't copy the underlying arrow data using memory\n self.assertEqual(len(dset2), 10)\n self.assertDictEqual(dset2.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset2[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset2[\"filename\"][0], \"my_name-train_0\")\n del dset2\n\n def test_dummy_dataset_pickle(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"dset.pt\")\n\n with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:\n with open(tmp_file, \"wb\") as f:\n pickle.dump(dset, f)\n\n with open(tmp_file, \"rb\") as f:\n with pickle.load(f) as dset:\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n with self._create_dummy_dataset(in_memory, tmp_dir).select(\n range(10), indices_cache_file_name=os.path.join(tmp_dir, \"ind.arrow\")\n ) as dset:\n if not in_memory:\n dset._data.table = Unpicklable()\n dset._indices.table = Unpicklable()\n with open(tmp_file, \"wb\") as f:\n pickle.dump(dset, f)\n\n with open(tmp_file, \"rb\") as f:\n with pickle.load(f) as dset:\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n def test_dummy_dataset_serialize(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with set_current_working_directory_to_temp_dir():\n with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:\n dataset_path = \"my_dataset\" # rel path\n dset.save_to_disk(dataset_path)\n\n with Dataset.load_from_disk(dataset_path) as dset:\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:\n dataset_path = os.path.join(tmp_dir, \"my_dataset\") # abs path\n dset.save_to_disk(dataset_path)\n\n with Dataset.load_from_disk(dataset_path) as dset:\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n with self._create_dummy_dataset(in_memory, tmp_dir).select(\n range(10), indices_cache_file_name=os.path.join(tmp_dir, \"ind.arrow\")\n ) as dset:\n with assert_arrow_memory_doesnt_increase():\n dset.save_to_disk(dataset_path)\n\n with Dataset.load_from_disk(dataset_path) as dset:\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n with self._create_dummy_dataset(in_memory, tmp_dir, nested_features=True) as dset:\n with assert_arrow_memory_doesnt_increase():\n dset.save_to_disk(dataset_path)\n\n with Dataset.load_from_disk(dataset_path) as dset:\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(\n dset.features,\n Features({\"nested\": {\"a\": Value(\"int64\"), \"x\": Value(\"int64\"), \"c\": Value(\"int64\")}}),\n )\n self.assertDictEqual(dset[0][\"nested\"], {\"a\": 1, \"c\": 100, \"x\": 10})\n self.assertDictEqual(dset[\"nested\"][0], {\"a\": 1, \"c\": 100, \"x\": 10})\n\n def test_dummy_dataset_load_from_disk(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n\n with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:\n dataset_path = os.path.join(tmp_dir, \"my_dataset\")\n dset.save_to_disk(dataset_path)\n\n with load_from_disk(dataset_path) as dset:\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n def test_set_format_numpy_multiple_columns(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n fingerprint = dset._fingerprint\n dset.set_format(type=\"numpy\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], np.int64)\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n self.assertIsInstance(dset[\"col_1\"], np.ndarray)\n self.assertListEqual(list(dset[\"col_1\"].shape), [4])\n np.testing.assert_array_equal(dset[\"col_1\"], np.array([3, 2, 1, 0]))\n self.assertNotEqual(dset._fingerprint, fingerprint)\n\n dset.reset_format()\n with dset.formatted_as(type=\"numpy\", columns=[\"col_1\"]):\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], np.int64)\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n self.assertIsInstance(dset[\"col_1\"], np.ndarray)\n self.assertListEqual(list(dset[\"col_1\"].shape), [4])\n np.testing.assert_array_equal(dset[\"col_1\"], np.array([3, 2, 1, 0]))\n\n self.assertEqual(dset.format[\"type\"], None)\n self.assertEqual(dset.format[\"format_kwargs\"], {})\n self.assertEqual(dset.format[\"columns\"], dset.column_names)\n self.assertEqual(dset.format[\"output_all_columns\"], False)\n\n dset.set_format(type=\"numpy\", columns=[\"col_1\"], output_all_columns=True)\n self.assertEqual(len(dset[0]), 3)\n self.assertIsInstance(dset[0][\"col_2\"], str)\n self.assertEqual(dset[0][\"col_2\"], \"a\")\n\n dset.set_format(type=\"numpy\", columns=[\"col_1\", \"col_2\"])\n self.assertEqual(len(dset[0]), 2)\n self.assertIsInstance(dset[0][\"col_2\"], np.str_)\n self.assertEqual(dset[0][\"col_2\"].item(), \"a\")\n\n @require_torch\n def test_set_format_torch(self, in_memory):\n import torch\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset.set_format(type=\"torch\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], torch.Tensor)\n self.assertIsInstance(dset[\"col_1\"], torch.Tensor)\n self.assertListEqual(list(dset[0][\"col_1\"].shape), [])\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n\n dset.set_format(type=\"torch\", columns=[\"col_1\"], output_all_columns=True)\n self.assertEqual(len(dset[0]), 3)\n self.assertIsInstance(dset[0][\"col_2\"], str)\n self.assertEqual(dset[0][\"col_2\"], \"a\")\n\n dset.set_format(type=\"torch\", columns=[\"col_1\", \"col_2\"])\n with self.assertRaises(TypeError):\n dset[0]\n\n @require_tf\n def test_set_format_tf(self, in_memory):\n import tensorflow as tf\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset.set_format(type=\"tensorflow\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], tf.Tensor)\n self.assertListEqual(list(dset[0][\"col_1\"].shape), [])\n self.assertEqual(dset[0][\"col_1\"].numpy().item(), 3)\n\n dset.set_format(type=\"tensorflow\", columns=[\"col_1\"], output_all_columns=True)\n self.assertEqual(len(dset[0]), 3)\n self.assertIsInstance(dset[0][\"col_2\"], str)\n self.assertEqual(dset[0][\"col_2\"], \"a\")\n\n dset.set_format(type=\"tensorflow\", columns=[\"col_1\", \"col_2\"])\n self.assertEqual(len(dset[0]), 2)\n self.assertEqual(dset[0][\"col_2\"].numpy().decode(\"utf-8\"), \"a\")\n\n def test_set_format_pandas(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset.set_format(type=\"pandas\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0].columns), 1)\n self.assertIsInstance(dset[0], pd.DataFrame)\n self.assertListEqual(list(dset[0].shape), [1, 1])\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n\n dset.set_format(type=\"pandas\", columns=[\"col_1\", \"col_2\"])\n self.assertEqual(len(dset[0].columns), 2)\n self.assertEqual(dset[0][\"col_2\"].item(), \"a\")\n\n def test_set_transform(self, in_memory):\n def transform(batch):\n return {k: [str(i).upper() for i in v] for k, v in batch.items()}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset.set_transform(transform=transform, columns=[\"col_1\"])\n self.assertEqual(dset.format[\"type\"], \"custom\")\n self.assertEqual(len(dset[0].keys()), 1)\n self.assertEqual(dset[0][\"col_1\"], \"3\")\n self.assertEqual(dset[:2][\"col_1\"], [\"3\", \"2\"])\n self.assertEqual(dset[\"col_1\"][:2], [\"3\", \"2\"])\n\n prev_format = dset.format\n dset.set_format(**dset.format)\n self.assertEqual(prev_format, dset.format)\n\n dset.set_transform(transform=transform, columns=[\"col_1\", \"col_2\"])\n self.assertEqual(len(dset[0].keys()), 2)\n self.assertEqual(dset[0][\"col_2\"], \"A\")\n\n def test_transmit_format(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n transform = datasets.arrow_dataset.transmit_format(lambda x: x)\n # make sure identity transform doesn't apply unnecessary format\n self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)\n dset.set_format(**dset.format)\n self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)\n # check lists comparisons\n dset.set_format(columns=[\"col_1\"])\n self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)\n dset.set_format(columns=[\"col_1\", \"col_2\"])\n self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)\n dset.set_format(\"numpy\", columns=[\"col_1\", \"col_2\"])\n self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)\n\n def test_cast_in_place(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n features = dset.features\n features[\"col_1\"] = Value(\"float64\")\n features = Features({k: features[k] for k in list(features)[::-1]})\n fingerprint = dset._fingerprint\n dset.cast_(features)\n self.assertEqual(dset.num_columns, 3)\n self.assertEqual(dset.features[\"col_1\"], Value(\"float64\"))\n self.assertIsInstance(dset[0][\"col_1\"], float)\n self.assertNotEqual(dset._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n\n def test_cast(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n features = dset.features\n features[\"col_1\"] = Value(\"float64\")\n features = Features({k: features[k] for k in list(features)[::-1]})\n fingerprint = dset._fingerprint\n # TODO: with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():\n with dset.cast(features) as casted_dset:\n self.assertEqual(casted_dset.num_columns, 3)\n self.assertEqual(casted_dset.features[\"col_1\"], Value(\"float64\"))\n self.assertIsInstance(casted_dset[0][\"col_1\"], float)\n self.assertNotEqual(casted_dset._fingerprint, fingerprint)\n self.assertNotEqual(casted_dset, dset)\n assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)\n\n def test_class_encode_column(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n with self.assertRaises(ValueError):\n dset.class_encode_column(column=\"does not exist\")\n\n with dset.class_encode_column(\"col_1\") as casted_dset:\n self.assertIsInstance(casted_dset.features[\"col_1\"], ClassLabel)\n self.assertListEqual(casted_dset.features[\"col_1\"].names, [\"0\", \"1\", \"2\", \"3\"])\n self.assertListEqual(casted_dset[\"col_1\"], [3, 2, 1, 0])\n self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint)\n self.assertNotEqual(casted_dset, dset)\n assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)\n\n with dset.class_encode_column(\"col_2\") as casted_dset:\n self.assertIsInstance(casted_dset.features[\"col_2\"], ClassLabel)\n self.assertListEqual(casted_dset.features[\"col_2\"].names, [\"a\", \"b\", \"c\", \"d\"])\n self.assertListEqual(casted_dset[\"col_2\"], [0, 1, 2, 3])\n self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint)\n self.assertNotEqual(casted_dset, dset)\n assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)\n\n with dset.class_encode_column(\"col_3\") as casted_dset:\n self.assertIsInstance(casted_dset.features[\"col_3\"], ClassLabel)\n self.assertListEqual(casted_dset.features[\"col_3\"].names, [\"False\", \"True\"])\n self.assertListEqual(casted_dset[\"col_3\"], [0, 1, 0, 1])\n self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint)\n self.assertNotEqual(casted_dset, dset)\n assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)\n\n # Test raises if feature is an array / sequence\n with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:\n for column in dset.column_names:\n with self.assertRaises(ValueError):\n dset.class_encode_column(column)\n\n def test_remove_columns_in_place(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n fingerprint = dset._fingerprint\n with assert_arrow_memory_doesnt_increase():\n dset.remove_columns_(column_names=\"col_1\")\n self.assertEqual(dset.num_columns, 2)\n self.assertListEqual(list(dset.column_names), [\"col_2\", \"col_3\"])\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset.remove_columns_(column_names=[\"col_1\", \"col_2\", \"col_3\"])\n self.assertEqual(dset.num_columns, 0)\n self.assertNotEqual(dset._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n\n def test_remove_columns(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n fingerprint = dset._fingerprint\n with dset.remove_columns(column_names=\"col_1\") as new_dset:\n self.assertEqual(new_dset.num_columns, 2)\n self.assertListEqual(list(new_dset.column_names), [\"col_2\", \"col_3\"])\n self.assertNotEqual(new_dset._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(new_dset)\n\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n with dset.remove_columns(column_names=[\"col_1\", \"col_2\", \"col_3\"]) as new_dset:\n self.assertEqual(new_dset.num_columns, 0)\n self.assertNotEqual(new_dset._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(new_dset)\n\n def test_rename_column_in_place(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n fingerprint = dset._fingerprint\n dset.rename_column_(original_column_name=\"col_1\", new_column_name=\"new_name\")\n self.assertEqual(dset.num_columns, 3)\n self.assertListEqual(list(dset.column_names), [\"new_name\", \"col_2\", \"col_3\"])\n self.assertNotEqual(dset._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n\n def test_rename_column(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n fingerprint = dset._fingerprint\n with dset.rename_column(original_column_name=\"col_1\", new_column_name=\"new_name\") as new_dset:\n self.assertEqual(new_dset.num_columns, 3)\n self.assertListEqual(list(new_dset.column_names), [\"new_name\", \"col_2\", \"col_3\"])\n self.assertListEqual(list(dset.column_names), [\"col_1\", \"col_2\", \"col_3\"])\n self.assertNotEqual(new_dset._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(new_dset)\n\n def test_rename_columns(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n fingerprint = dset._fingerprint\n with dset.rename_columns({\"col_1\": \"new_name\"}) as new_dset:\n self.assertEqual(new_dset.num_columns, 3)\n self.assertListEqual(list(new_dset.column_names), [\"new_name\", \"col_2\", \"col_3\"])\n self.assertListEqual(list(dset.column_names), [\"col_1\", \"col_2\", \"col_3\"])\n self.assertNotEqual(new_dset._fingerprint, fingerprint)\n\n with dset.rename_columns({\"col_1\": \"new_name\", \"col_2\": \"new_name2\"}) as new_dset:\n self.assertEqual(new_dset.num_columns, 3)\n self.assertListEqual(list(new_dset.column_names), [\"new_name\", \"new_name2\", \"col_3\"])\n self.assertListEqual(list(dset.column_names), [\"col_1\", \"col_2\", \"col_3\"])\n self.assertNotEqual(new_dset._fingerprint, fingerprint)\n\n # Original column not in dataset\n with self.assertRaises(ValueError):\n dset.rename_columns({\"not_there\": \"new_name\"})\n\n # Empty new name\n with self.assertRaises(ValueError):\n dset.rename_columns({\"col_1\": \"\"})\n\n # Duplicates\n with self.assertRaises(ValueError):\n dset.rename_columns({\"col_1\": \"new_name\", \"col_2\": \"new_name\"})\n\n def test_concatenate(self, in_memory):\n data1, data2, data3 = {\"id\": [0, 1, 2]}, {\"id\": [3, 4, 5]}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)\n\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n del dset1, dset2, dset3\n\n def test_concatenate_formatted(self, in_memory):\n data1, data2, data3 = {\"id\": [0, 1, 2]}, {\"id\": [3, 4, 5]}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)\n\n dset1.set_format(\"numpy\")\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertEqual(dset_concat.format[\"type\"], None)\n dset2.set_format(\"numpy\")\n dset3.set_format(\"numpy\")\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertEqual(dset_concat.format[\"type\"], \"numpy\")\n del dset1, dset2, dset3\n\n def test_concatenate_with_indices(self, in_memory):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7, 8]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)\n dset1, dset2, dset3 = dset1.select([0, 1, 2]), dset2.select([0, 1, 2]), dset3\n\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7, 8])\n # in_memory = False:\n # 3 cache files for the dset_concat._data table\n # no cache file for the indices because it's in memory\n # in_memory = True:\n # no cache files since both dset_concat._data and dset_concat._indices are in memory\n self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n\n dset1 = dset1.rename_columns({\"id\": \"id1\"})\n dset2 = dset2.rename_columns({\"id\": \"id2\"})\n dset3 = dset3.rename_columns({\"id\": \"id3\"})\n with concatenate_datasets([dset1, dset2, dset3], axis=1) as dset_concat:\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3))\n self.assertEqual(len(dset_concat), len(dset1))\n self.assertListEqual(dset_concat[\"id1\"], [0, 1, 2])\n self.assertListEqual(dset_concat[\"id2\"], [3, 4, 5])\n self.assertListEqual(dset_concat[\"id3\"], [6, 7, 8])\n # in_memory = False:\n # 3 cache files for the dset_concat._data table\n # no cache file for the indices because it's None\n # in_memory = True:\n # no cache files since dset_concat._data is in memory and dset_concat._indices is None\n self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3)\n self.assertIsNone(dset_concat._indices)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n\n with concatenate_datasets([dset1], axis=1) as dset_concat:\n self.assertEqual(len(dset_concat), len(dset1))\n self.assertListEqual(dset_concat[\"id1\"], [0, 1, 2])\n # in_memory = False:\n # 1 cache file for the dset_concat._data table\n # no cache file for the indices because it's in memory\n # in_memory = True:\n # no cache files since both dset_concat._data and dset_concat._indices are in memory\n self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 1)\n self.assertTrue(dset_concat._indices == dset1._indices)\n self.assertEqual(dset_concat.info.description, \"Dataset1\")\n del dset1, dset2, dset3\n\n def test_concatenate_with_indices_from_disk(self, in_memory):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)\n dset1, dset2, dset3 = (\n dset1.select([0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, \"i1.arrow\")),\n dset2.select([0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, \"i2.arrow\")),\n dset3.select([0, 1], indices_cache_file_name=os.path.join(tmp_dir, \"i3.arrow\")),\n )\n\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n # in_memory = False:\n # 3 cache files for the dset_concat._data table, and 1 for the dset_concat._indices_table\n # There is only 1 for the indices tables (i1.arrow)\n # Indeed, the others are brought to memory since an offset is applied to them.\n # in_memory = True:\n # 1 cache file for i1.arrow since both dset_concat._data and dset_concat._indices are in memory\n self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 3 + 1)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n del dset1, dset2, dset3\n\n def test_concatenate_pickle(self, in_memory):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7], \"foo\": [\"bar\", \"bar\"]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n # mix from in-memory and on-disk datasets\n dset1, dset2 = self._to(in_memory, tmp_dir, dset1, dset2)\n dset3 = self._to(not in_memory, tmp_dir, dset3)\n dset1, dset2, dset3 = (\n dset1.select(\n [0, 1, 2],\n keep_in_memory=in_memory,\n indices_cache_file_name=os.path.join(tmp_dir, \"i1.arrow\") if not in_memory else None,\n ),\n dset2.select(\n [0, 1, 2],\n keep_in_memory=in_memory,\n indices_cache_file_name=os.path.join(tmp_dir, \"i2.arrow\") if not in_memory else None,\n ),\n dset3.select(\n [0, 1],\n keep_in_memory=in_memory,\n indices_cache_file_name=os.path.join(tmp_dir, \"i3.arrow\") if not in_memory else None,\n ),\n )\n\n dset3 = dset3.rename_column(\"foo\", \"new_foo\")\n dset3.remove_columns_(\"new_foo\")\n if in_memory:\n dset3._data.table = Unpicklable()\n else:\n dset1._data.table, dset2._data.table = Unpicklable(), Unpicklable()\n dset1, dset2, dset3 = [pickle.loads(pickle.dumps(d)) for d in (dset1, dset2, dset3)]\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n if not in_memory:\n dset_concat._data.table = Unpicklable()\n with pickle.loads(pickle.dumps(dset_concat)) as dset_concat:\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n # in_memory = True: 1 cache file for dset3\n # in_memory = False: 2 caches files for dset1 and dset2, and 1 cache file for i1.arrow\n self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 2 + 1)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n del dset1, dset2, dset3\n\n def test_flatten(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(\n {\"a\": [{\"b\": {\"c\": [\"text\"]}}] * 10, \"foo\": [1] * 10},\n features=Features({\"a\": {\"b\": Sequence({\"c\": Value(\"string\")})}, \"foo\": Value(\"int64\")}),\n ) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n fingerprint = dset._fingerprint\n dset.flatten_()\n self.assertListEqual(sorted(dset.column_names), [\"a.b.c\", \"foo\"])\n self.assertListEqual(sorted(dset.features.keys()), [\"a.b.c\", \"foo\"])\n self.assertDictEqual(\n dset.features, Features({\"a.b.c\": Sequence(Value(\"string\")), \"foo\": Value(\"int64\")})\n )\n self.assertNotEqual(dset._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n\n def test_map(self, in_memory):\n # standard\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n fingerprint = dset._fingerprint\n with dset.map(\n lambda x: {\"name\": x[\"filename\"][:-2], \"id\": int(x[\"filename\"].split(\"_\")[-1])}\n ) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertListEqual(dset_test[\"id\"], list(range(30)))\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n # no transform\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.map(lambda x: None) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n # with indices\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(\n lambda x, i: {\"name\": x[\"filename\"][:-2], \"id\": i}, with_indices=True\n ) as dset_test_with_indices:\n self.assertEqual(len(dset_test_with_indices), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_with_indices.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertListEqual(dset_test_with_indices[\"id\"], list(range(30)))\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices)\n\n # interrupted\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n\n def func(x, i):\n if i == 4:\n raise KeyboardInterrupt()\n return {\"name\": x[\"filename\"][:-2], \"id\": i}\n\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n self.assertRaises(\n KeyboardInterrupt,\n dset.map,\n function=func,\n with_indices=True,\n cache_file_name=tmp_file,\n writer_batch_size=2,\n )\n self.assertFalse(os.path.exists(tmp_file))\n with dset.map(\n lambda x, i: {\"name\": x[\"filename\"][:-2], \"id\": i},\n with_indices=True,\n cache_file_name=tmp_file,\n writer_batch_size=2,\n ) as dset_test_with_indices:\n self.assertTrue(os.path.exists(tmp_file))\n self.assertEqual(len(dset_test_with_indices), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_with_indices.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertListEqual(dset_test_with_indices[\"id\"], list(range(30)))\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices)\n\n # formatted\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset.set_format(\"numpy\", columns=[\"col_1\"])\n with dset.map(lambda x: {\"col_1_plus_one\": x[\"col_1\"] + 1}) as dset_test:\n self.assertEqual(len(dset_test), 4)\n self.assertEqual(dset_test.format[\"type\"], \"numpy\")\n self.assertIsInstance(dset_test[\"col_1\"], np.ndarray)\n self.assertIsInstance(dset_test[\"col_1_plus_one\"], np.ndarray)\n self.assertListEqual(sorted(dset_test[0].keys()), [\"col_1\", \"col_1_plus_one\"])\n self.assertListEqual(sorted(dset_test.column_names), [\"col_1\", \"col_1_plus_one\", \"col_2\", \"col_3\"])\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n def test_map_multiprocessing(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir: # standard\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n fingerprint = dset._fingerprint\n with dset.map(picklable_map_function, num_proc=2) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)\n self.assertListEqual(dset_test[\"id\"], list(range(30)))\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n with tempfile.TemporaryDirectory() as tmp_dir: # num_proc > num rows\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n fingerprint = dset._fingerprint\n with dset.select([0, 1], keep_in_memory=True).map(picklable_map_function, num_proc=10) as dset_test:\n self.assertEqual(len(dset_test), 2)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)\n self.assertListEqual(dset_test[\"id\"], list(range(2)))\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n with tempfile.TemporaryDirectory() as tmp_dir: # with_indices\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.map(picklable_map_function_with_indices, num_proc=3, with_indices=True) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3)\n self.assertListEqual(dset_test[\"id\"], list(range(30)))\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n with tempfile.TemporaryDirectory() as tmp_dir: # with_rank\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.map(picklable_map_function_with_rank, num_proc=3, with_rank=True) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"rank\": Value(\"int64\")}),\n )\n self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3)\n self.assertListEqual(dset_test[\"rank\"], [0] * 10 + [1] * 10 + [2] * 10)\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n with tempfile.TemporaryDirectory() as tmp_dir: # with_indices AND with_rank\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.map(\n picklable_map_function_with_indices_and_rank, num_proc=3, with_indices=True, with_rank=True\n ) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"id\": Value(\"int64\"), \"rank\": Value(\"int64\")}),\n )\n self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3)\n self.assertListEqual(dset_test[\"id\"], list(range(30)))\n self.assertListEqual(dset_test[\"rank\"], [0] * 10 + [1] * 10 + [2] * 10)\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n with tempfile.TemporaryDirectory() as tmp_dir: # new_fingerprint\n new_fingerprint = \"foobar\"\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.map(picklable_map_function, num_proc=2, new_fingerprint=new_fingerprint) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)\n self.assertListEqual(dset_test[\"id\"], list(range(30)))\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n file_names = sorted([Path(cache_file[\"filename\"]).name for cache_file in dset_test.cache_files])\n for i, file_name in enumerate(file_names):\n self.assertIn(new_fingerprint + f\"_{i:05d}\", file_name)\n\n with tempfile.TemporaryDirectory() as tmp_dir: # lambda (requires multiprocess from pathos)\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.map(lambda x: {\"id\": int(x[\"filename\"].split(\"_\")[-1])}, num_proc=2) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)\n self.assertListEqual(dset_test[\"id\"], list(range(30)))\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test)\n\n def test_new_features(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n features = Features({\"filename\": Value(\"string\"), \"label\": ClassLabel(names=[\"positive\", \"negative\"])})\n with dset.map(\n lambda x, i: {\"label\": i % 2}, with_indices=True, features=features\n ) as dset_test_with_indices:\n self.assertEqual(len(dset_test_with_indices), 30)\n self.assertDictEqual(\n dset_test_with_indices.features,\n features,\n )\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices)\n\n def test_map_batched(self, in_memory):\n def map_batched(example):\n return {\"filename_new\": [x + \"_extension\" for x in example[\"filename\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(map_batched, batched=True) as dset_test_batched:\n self.assertEqual(len(dset_test_batched), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_batched.features,\n Features({\"filename\": Value(\"string\"), \"filename_new\": Value(\"string\")}),\n )\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.formatted_as(\"numpy\", columns=[\"filename\"]):\n with dset.map(map_batched, batched=True) as dset_test_batched:\n self.assertEqual(len(dset_test_batched), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_batched.features,\n Features({\"filename\": Value(\"string\"), \"filename_new\": Value(\"string\")}),\n )\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched)\n\n def map_batched_with_indices(example, idx):\n return {\"filename_new\": [x + \"_extension_\" + str(idx) for x in example[\"filename\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(\n map_batched_with_indices, batched=True, with_indices=True\n ) as dset_test_with_indices_batched:\n self.assertEqual(len(dset_test_with_indices_batched), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_with_indices_batched.features,\n Features({\"filename\": Value(\"string\"), \"filename_new\": Value(\"string\")}),\n )\n assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices_batched)\n\n def test_map_nested(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict({\"field\": [\"a\", \"b\"]}) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n with dset.map(lambda example: {\"otherfield\": {\"capital\": example[\"field\"].capitalize()}}) as dset:\n with dset.map(lambda example: {\"otherfield\": {\"append_x\": example[\"field\"] + \"x\"}}) as dset:\n self.assertEqual(dset[0], {\"field\": \"a\", \"otherfield\": {\"append_x\": \"ax\"}})\n\n def test_map_fn_kwargs(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict({\"id\": range(10)}) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n fn_kwargs = {\"offset\": 3}\n with dset.map(\n lambda example, offset: {\"id+offset\": example[\"id\"] + offset}, fn_kwargs=fn_kwargs\n ) as mapped_dset:\n assert mapped_dset[\"id+offset\"] == list(range(3, 13))\n with dset.map(\n lambda id, offset: {\"id+offset\": id + offset}, fn_kwargs=fn_kwargs, input_columns=\"id\"\n ) as mapped_dset:\n assert mapped_dset[\"id+offset\"] == list(range(3, 13))\n with dset.map(\n lambda id, i, offset: {\"id+offset\": i + offset},\n fn_kwargs=fn_kwargs,\n input_columns=\"id\",\n with_indices=True,\n ) as mapped_dset:\n assert mapped_dset[\"id+offset\"] == list(range(3, 13))\n\n def test_map_caching(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._caplog.clear()\n with self._caplog.at_level(WARNING):\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(lambda x: {\"foo\": \"bar\"}) as dset_test1:\n dset_test1_data_files = list(dset_test1.cache_files)\n with dset.map(lambda x: {\"foo\": \"bar\"}) as dset_test2:\n self.assertEqual(dset_test1_data_files, dset_test2.cache_files)\n self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory))\n self.assertTrue((\"Loading cached processed dataset\" in self._caplog.text) ^ in_memory)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._caplog.clear()\n with self._caplog.at_level(WARNING):\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(lambda x: {\"foo\": \"bar\"}) as dset_test1:\n dset_test1_data_files = list(dset_test1.cache_files)\n with dset.map(lambda x: {\"foo\": \"bar\"}, load_from_cache_file=False) as dset_test2:\n self.assertEqual(dset_test1_data_files, dset_test2.cache_files)\n self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory))\n self.assertNotIn(\"Loading cached processed dataset\", self._caplog.text)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._caplog.clear()\n with self._caplog.at_level(WARNING):\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with patch(\"datasets.arrow_dataset.Pool\", side_effect=datasets.arrow_dataset.Pool) as mock_pool:\n with dset.map(lambda x: {\"foo\": \"bar\"}, num_proc=2) as dset_test1:\n dset_test1_data_files = list(dset_test1.cache_files)\n self.assertEqual(mock_pool.call_count, 1)\n with dset.map(lambda x: {\"foo\": \"bar\"}, num_proc=2) as dset_test2:\n self.assertEqual(dset_test1_data_files, dset_test2.cache_files)\n self.assertTrue(\n (len(re.findall(\"Loading cached processed dataset\", self._caplog.text)) == 2)\n ^ in_memory\n )\n self.assertEqual(mock_pool.call_count, 2 if in_memory else 1)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._caplog.clear()\n with self._caplog.at_level(WARNING):\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(lambda x: {\"foo\": \"bar\"}, num_proc=2) as dset_test1:\n dset_test1_data_files = list(dset_test1.cache_files)\n with dset.map(lambda x: {\"foo\": \"bar\"}, num_proc=2, load_from_cache_file=False) as dset_test2:\n self.assertEqual(dset_test1_data_files, dset_test2.cache_files)\n self.assertEqual(len(dset_test2.cache_files), (1 - int(in_memory)) * 2)\n self.assertNotIn(\"Loading cached processed dataset\", self._caplog.text)\n\n if not in_memory:\n try:\n self._caplog.clear()\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._caplog.at_level(WARNING):\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n datasets.set_caching_enabled(False)\n with dset.map(lambda x: {\"foo\": \"bar\"}) as dset_test1:\n with dset.map(lambda x: {\"foo\": \"bar\"}) as dset_test2:\n self.assertNotEqual(dset_test1.cache_files, dset_test2.cache_files)\n self.assertEqual(len(dset_test1.cache_files), 1)\n self.assertEqual(len(dset_test2.cache_files), 1)\n self.assertNotIn(\"Loading cached processed dataset\", self._caplog.text)\n # make sure the arrow files are going to be removed\n self.assertIn(\"tmp\", dset_test1.cache_files[0][\"filename\"])\n self.assertIn(\"tmp\", dset_test2.cache_files[0][\"filename\"])\n finally:\n datasets.set_caching_enabled(True)\n\n @require_torch\n def test_map_torch(self, in_memory):\n import torch\n\n def func(example):\n return {\"tensor\": torch.tensor([1.0, 2, 3])}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(func) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"tensor\": Sequence(Value(\"float32\"))}),\n )\n self.assertListEqual(dset_test[0][\"tensor\"], [1, 2, 3])\n\n @require_tf\n def test_map_tf(self, in_memory):\n import tensorflow as tf\n\n def func(example):\n return {\"tensor\": tf.constant([1.0, 2, 3])}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(func) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"tensor\": Sequence(Value(\"float32\"))}),\n )\n self.assertListEqual(dset_test[0][\"tensor\"], [1, 2, 3])\n\n @require_jax\n def test_map_jax(self, in_memory):\n import jax.numpy as jnp\n\n def func(example):\n return {\"tensor\": jnp.asarray([1.0, 2, 3])}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(func) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"tensor\": Sequence(Value(\"float32\"))}),\n )\n self.assertListEqual(dset_test[0][\"tensor\"], [1, 2, 3])\n\n def test_map_numpy(self, in_memory):\n def func(example):\n return {\"tensor\": np.array([1.0, 2, 3])}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(func) as dset_test:\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"tensor\": Sequence(Value(\"float64\"))}),\n )\n self.assertListEqual(dset_test[0][\"tensor\"], [1, 2, 3])\n\n def test_map_remove_columns(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.map(lambda x, i: {\"name\": x[\"filename\"][:-2], \"id\": i}, with_indices=True) as dset:\n self.assertTrue(\"id\" in dset[0])\n self.assertDictEqual(\n dset.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n with dset.map(lambda x: x, remove_columns=[\"id\"]) as dset:\n self.assertTrue(\"id\" not in dset[0])\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\")})\n )\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n with dset.with_format(\"numpy\", columns=dset.column_names) as dset:\n with dset.map(lambda x: {\"name\": 1}, remove_columns=dset.column_names) as dset:\n self.assertTrue(\"filename\" not in dset[0])\n self.assertTrue(\"name\" in dset[0])\n self.assertDictEqual(dset.features, Features({\"name\": Value(dtype=\"int64\")}))\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n\n def test_map_stateful_callable(self, in_memory):\n # be sure that the state of the map callable is unaffected\n # before processing the dataset examples\n\n class ExampleCounter:\n def __init__(self, batched=False):\n self.batched = batched\n # state\n self.cnt = 0\n\n def __call__(self, example):\n if self.batched:\n self.cnt += len(example)\n else:\n self.cnt += 1\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n\n ex_cnt = ExampleCounter()\n dset.map(ex_cnt)\n self.assertEqual(ex_cnt.cnt, len(dset))\n\n ex_cnt = ExampleCounter(batched=True)\n dset.map(ex_cnt)\n self.assertEqual(ex_cnt.cnt, len(dset))\n\n def test_filter(self, in_memory):\n # keep only first five examples\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five:\n self.assertEqual(len(dset_filter_first_five), 5)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_filter_first_five.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_filter_first_five._fingerprint, fingerprint)\n\n # filter filenames with even id at the end + formatted\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n dset.set_format(\"numpy\")\n fingerprint = dset._fingerprint\n with dset.filter(lambda x: (int(x[\"filename\"][-1]) % 2 == 0)) as dset_filter_even_num:\n self.assertEqual(len(dset_filter_even_num), 15)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_filter_even_num.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_filter_even_num._fingerprint, fingerprint)\n self.assertEqual(dset_filter_even_num.format[\"type\"], \"numpy\")\n\n def test_filter_with_indices_mapping(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset = Dataset.from_dict({\"col\": [0, 1, 2]})\n with self._to(in_memory, tmp_dir, dset) as dset:\n with dset.filter(lambda x: x[\"col\"] > 0) as dset:\n self.assertListEqual(dset[\"col\"], [1, 2])\n with dset.filter(lambda x: x[\"col\"] < 2) as dset:\n self.assertListEqual(dset[\"col\"], [1])\n\n def test_filter_batched(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset = Dataset.from_dict({\"col\": [0, 1, 2]})\n with self._to(in_memory, tmp_dir, dset) as dset:\n with dset.filter(lambda x: [i > 0 for i in x[\"col\"]], batched=True) as dset:\n self.assertListEqual(dset[\"col\"], [1, 2])\n with dset.filter(lambda x: [i < 2 for i in x[\"col\"]], batched=True) as dset:\n self.assertListEqual(dset[\"col\"], [1])\n\n def test_filter_fn_kwargs(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict({\"id\": range(10)}) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n fn_kwargs = {\"max_offset\": 3}\n with dset.filter(\n lambda example, max_offset: example[\"id\"] < max_offset, fn_kwargs=fn_kwargs\n ) as filtered_dset:\n assert len(filtered_dset) == 3\n with dset.filter(\n lambda id, max_offset: id < max_offset, fn_kwargs=fn_kwargs, input_columns=\"id\"\n ) as filtered_dset:\n assert len(filtered_dset) == 3\n with dset.filter(\n lambda id, i, max_offset: i < max_offset,\n fn_kwargs=fn_kwargs,\n input_columns=\"id\",\n with_indices=True,\n ) as filtered_dset:\n assert len(filtered_dset) == 3\n\n def test_filter_multiprocessing(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n with dset.filter(picklable_filter_function, num_proc=2) as dset_filter_first_ten:\n self.assertEqual(len(dset_filter_first_ten), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_filter_first_ten.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(len(dset_filter_first_ten.cache_files), 0 if in_memory else 2)\n self.assertNotEqual(dset_filter_first_ten._fingerprint, fingerprint)\n\n def test_filter_caching(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._caplog.clear()\n with self._caplog.at_level(WARNING):\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five1:\n dset_test1_data_files = list(dset_filter_first_five1.cache_files)\n with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five2:\n self.assertEqual(dset_test1_data_files, dset_filter_first_five2.cache_files)\n self.assertEqual(len(dset_filter_first_five2.cache_files), 0 if in_memory else 2)\n self.assertTrue((\"Loading cached processed dataset\" in self._caplog.text) ^ in_memory)\n\n def test_keep_features_after_transform_specified(self, in_memory):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(\n {\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features\n ) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n with dset.map(invert_labels, features=features) as inverted_dset:\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset)\n\n def test_keep_features_after_transform_unspecified(self, in_memory):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(\n {\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features\n ) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n with dset.map(invert_labels) as inverted_dset:\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset)\n\n def test_keep_features_after_transform_to_file(self, in_memory):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(\n {\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features\n ) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset.map(invert_labels, cache_file_name=tmp_file)\n with Dataset.from_file(tmp_file) as inverted_dset:\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n\n def test_keep_features_after_transform_to_memory(self, in_memory):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(\n {\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features\n ) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n with dset.map(invert_labels, keep_in_memory=True) as inverted_dset:\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n\n def test_keep_features_after_loading_from_cache(self, in_memory):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(\n {\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features\n ) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n tmp_file1 = os.path.join(tmp_dir, \"test1.arrow\")\n tmp_file2 = os.path.join(tmp_dir, \"test2.arrow\")\n # TODO: Why mapped twice?\n inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file1)\n inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file2)\n self.assertGreater(len(inverted_dset.cache_files), 0)\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n del inverted_dset\n\n def test_keep_features_with_new_features(self, in_memory):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]], \"labels2\": x[\"labels\"]}\n\n expected_features = Features(\n {\n \"tokens\": Sequence(Value(\"string\")),\n \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"])),\n \"labels2\": Sequence(Value(\"int64\")),\n }\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(\n {\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features\n ) as dset:\n with self._to(in_memory, tmp_dir, dset) as dset:\n with dset.map(invert_labels) as inverted_dset:\n self.assertEqual(inverted_dset.features.type, expected_features.type)\n self.assertDictEqual(inverted_dset.features, expected_features)\n assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset)\n\n def test_select(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n # select every two example\n indices = list(range(0, len(dset), 2))\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n fingerprint = dset._fingerprint\n with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_even:\n self.assertEqual(len(dset_select_even), 15)\n for row in dset_select_even:\n self.assertEqual(int(row[\"filename\"][-1]) % 2, 0)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_select_even.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_select_even._fingerprint, fingerprint)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n bad_indices = list(range(5))\n bad_indices[3] = \"foo\"\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n self.assertRaises(\n Exception,\n dset.select,\n indices=bad_indices,\n indices_cache_file_name=tmp_file,\n writer_batch_size=2,\n )\n self.assertFalse(os.path.exists(tmp_file))\n dset.set_format(\"numpy\")\n with dset.select(\n range(5),\n indices_cache_file_name=tmp_file,\n writer_batch_size=2,\n ) as dset_select_five:\n self.assertTrue(os.path.exists(tmp_file))\n self.assertEqual(len(dset_select_five), 5)\n self.assertEqual(dset_select_five.format[\"type\"], \"numpy\")\n for i, row in enumerate(dset_select_five):\n self.assertEqual(int(row[\"filename\"][-1]), i)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_select_five.features, Features({\"filename\": Value(\"string\")}))\n\n def test_select_then_map(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.select([0]) as d1:\n with d1.map(lambda x: {\"id\": int(x[\"filename\"].split(\"_\")[-1])}) as d1:\n self.assertEqual(d1[0][\"id\"], 0)\n with dset.select([1]) as d2:\n with d2.map(lambda x: {\"id\": int(x[\"filename\"].split(\"_\")[-1])}) as d2:\n self.assertEqual(d2[0][\"id\"], 1)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n with dset.select([0], indices_cache_file_name=os.path.join(tmp_dir, \"i1.arrow\")) as d1:\n with d1.map(lambda x: {\"id\": int(x[\"filename\"].split(\"_\")[-1])}) as d1:\n self.assertEqual(d1[0][\"id\"], 0)\n with dset.select([1], indices_cache_file_name=os.path.join(tmp_dir, \"i2.arrow\")) as d2:\n with d2.map(lambda x: {\"id\": int(x[\"filename\"].split(\"_\")[-1])}) as d2:\n self.assertEqual(d2[0][\"id\"], 1)\n\n def test_pickle_after_many_transforms_on_disk(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n self.assertEqual(len(dset.cache_files), 0 if in_memory else 1)\n dset.rename_column_(\"filename\", \"file\")\n self.assertListEqual(dset.column_names, [\"file\"])\n with dset.select(range(5)) as dset:\n self.assertEqual(len(dset), 5)\n with dset.map(lambda x: {\"id\": int(x[\"file\"][-1])}) as dset:\n self.assertListEqual(sorted(dset.column_names), [\"file\", \"id\"])\n dset.rename_column_(\"id\", \"number\")\n self.assertListEqual(sorted(dset.column_names), [\"file\", \"number\"])\n with dset.select([1]) as dset:\n self.assertEqual(dset[0][\"file\"], \"my_name-train_1\")\n self.assertEqual(dset[0][\"number\"], 1)\n\n self.assertEqual(dset._indices[\"indices\"].to_pylist(), [1])\n if not in_memory:\n self.assertIn(\n (\"rename_columns\", ([\"file\", \"number\"],), {}),\n dset._data.replays,\n )\n if not in_memory:\n dset._data.table = Unpicklable() # check that we don't pickle the entire table\n\n pickled = pickle.dumps(dset)\n with pickle.loads(pickled) as loaded:\n self.assertEqual(loaded[0][\"file\"], \"my_name-train_1\")\n self.assertEqual(loaded[0][\"number\"], 1)\n\n def test_shuffle(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n fingerprint = dset._fingerprint\n with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled:\n self.assertEqual(len(dset_shuffled), 30)\n self.assertEqual(dset_shuffled[0][\"filename\"], \"my_name-train_28\")\n self.assertEqual(dset_shuffled[2][\"filename\"], \"my_name-train_10\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_shuffled.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_shuffled._fingerprint, fingerprint)\n\n # Reproducibility\n tmp_file = os.path.join(tmp_dir, \"test_2.arrow\")\n with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled_2:\n self.assertListEqual(dset_shuffled[\"filename\"], dset_shuffled_2[\"filename\"])\n\n # Compatible with temp_seed\n with temp_seed(42), dset.shuffle() as d1:\n with temp_seed(42), dset.shuffle() as d2, dset.shuffle() as d3:\n self.assertListEqual(d1[\"filename\"], d2[\"filename\"])\n self.assertEqual(d1._fingerprint, d2._fingerprint)\n self.assertNotEqual(d3[\"filename\"], d2[\"filename\"])\n self.assertNotEqual(d3._fingerprint, d2._fingerprint)\n\n def test_sort(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n # Keep only 10 examples\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n with dset.select(range(10), indices_cache_file_name=tmp_file) as dset:\n tmp_file = os.path.join(tmp_dir, \"test_2.arrow\")\n with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset:\n self.assertEqual(len(dset), 10)\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_8\")\n self.assertEqual(dset[1][\"filename\"], \"my_name-train_9\")\n # Sort\n tmp_file = os.path.join(tmp_dir, \"test_3.arrow\")\n fingerprint = dset._fingerprint\n with dset.sort(\"filename\", indices_cache_file_name=tmp_file) as dset_sorted:\n for i, row in enumerate(dset_sorted):\n self.assertEqual(int(row[\"filename\"][-1]), i)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sorted.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_sorted._fingerprint, fingerprint)\n # Sort reversed\n tmp_file = os.path.join(tmp_dir, \"test_4.arrow\")\n fingerprint = dset._fingerprint\n with dset.sort(\"filename\", indices_cache_file_name=tmp_file, reverse=True) as dset_sorted:\n for i, row in enumerate(dset_sorted):\n self.assertEqual(int(row[\"filename\"][-1]), len(dset_sorted) - 1 - i)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sorted.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_sorted._fingerprint, fingerprint)\n # formatted\n dset.set_format(\"numpy\")\n with dset.sort(\"filename\") as dset_sorted_formatted:\n self.assertEqual(dset_sorted_formatted.format[\"type\"], \"numpy\")\n\n @require_tf\n def test_export(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n # Export the data\n tfrecord_path = os.path.join(tmp_dir, \"test.tfrecord\")\n with dset.map(\n lambda ex, i: {\n \"id\": i,\n \"question\": f\"Question {i}\",\n \"answers\": {\"text\": [f\"Answer {i}-0\", f\"Answer {i}-1\"], \"answer_start\": [0, 1]},\n },\n with_indices=True,\n remove_columns=[\"filename\"],\n ) as formatted_dset:\n formatted_dset.flatten_()\n formatted_dset.set_format(\"numpy\")\n formatted_dset.export(filename=tfrecord_path, format=\"tfrecord\")\n\n # Import the data\n import tensorflow as tf\n\n tf_dset = tf.data.TFRecordDataset([tfrecord_path])\n feature_description = {\n \"id\": tf.io.FixedLenFeature([], tf.int64),\n \"question\": tf.io.FixedLenFeature([], tf.string),\n \"answers.text\": tf.io.VarLenFeature(tf.string),\n \"answers.answer_start\": tf.io.VarLenFeature(tf.int64),\n }\n tf_parsed_dset = tf_dset.map(\n lambda example_proto: tf.io.parse_single_example(example_proto, feature_description)\n )\n # Test that keys match original dataset\n for i, ex in enumerate(tf_parsed_dset):\n self.assertEqual(ex.keys(), formatted_dset[i].keys())\n # Test for equal number of elements\n self.assertEqual(i, len(formatted_dset) - 1)\n\n def test_to_csv(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n # File path argument\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n file_path = os.path.join(tmp_dir, \"test_path.csv\")\n bytes_written = dset.to_csv(path_or_buf=file_path)\n\n self.assertTrue(os.path.isfile(file_path))\n self.assertEqual(bytes_written, os.path.getsize(file_path))\n csv_dset = pd.read_csv(file_path, header=0, index_col=0)\n\n self.assertEqual(csv_dset.shape, dset.shape)\n self.assertListEqual(list(csv_dset.columns), list(dset.column_names))\n\n # File buffer argument\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n file_path = os.path.join(tmp_dir, \"test_buffer.csv\")\n with open(file_path, \"wb+\") as buffer:\n bytes_written = dset.to_csv(path_or_buf=buffer)\n\n self.assertTrue(os.path.isfile(file_path))\n self.assertEqual(bytes_written, os.path.getsize(file_path))\n csv_dset = pd.read_csv(file_path, header=0, index_col=0)\n\n self.assertEqual(csv_dset.shape, dset.shape)\n self.assertListEqual(list(csv_dset.columns), list(dset.column_names))\n\n # After a select/shuffle transform\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset = dset.select(range(0, len(dset), 2)).shuffle()\n file_path = os.path.join(tmp_dir, \"test_path.csv\")\n bytes_written = dset.to_csv(path_or_buf=file_path)\n\n self.assertTrue(os.path.isfile(file_path))\n self.assertEqual(bytes_written, os.path.getsize(file_path))\n csv_dset = pd.read_csv(file_path, header=0, index_col=0)\n\n self.assertEqual(csv_dset.shape, dset.shape)\n self.assertListEqual(list(csv_dset.columns), list(dset.column_names))\n\n # With array features\n with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:\n file_path = os.path.join(tmp_dir, \"test_path.csv\")\n bytes_written = dset.to_csv(path_or_buf=file_path)\n\n self.assertTrue(os.path.isfile(file_path))\n self.assertEqual(bytes_written, os.path.getsize(file_path))\n csv_dset = pd.read_csv(file_path, header=0, index_col=0)\n\n self.assertEqual(csv_dset.shape, dset.shape)\n self.assertListEqual(list(csv_dset.columns), list(dset.column_names))\n\n def test_to_dict(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Batched\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n bacth_size = dset.num_rows - 1\n to_dict_generator = dset.to_dict(batched=True, batch_size=bacth_size)\n\n for batch in to_dict_generator:\n self.assertIsInstance(batch, dict)\n self.assertListEqual(sorted(batch.keys()), sorted(dset.column_names))\n for col_name in dset.column_names:\n self.assertIsInstance(batch[col_name], list)\n self.assertLessEqual(len(batch[col_name]), bacth_size)\n\n # Full\n dset_to_dict = dset.to_dict()\n self.assertIsInstance(dset_to_dict, dict)\n self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names))\n\n for col_name in dset.column_names:\n self.assertLessEqual(len(dset_to_dict[col_name]), len(dset))\n\n # With index mapping\n with dset.select([1, 0, 3]) as dset:\n dset_to_dict = dset.to_dict()\n self.assertIsInstance(dset_to_dict, dict)\n self.assertEqual(len(dset_to_dict), 3)\n self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names))\n\n for col_name in dset.column_names:\n self.assertIsInstance(dset_to_dict[col_name], list)\n self.assertEqual(len(dset_to_dict[col_name]), len(dset))\n\n def test_to_pandas(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Batched\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n bacth_size = dset.num_rows - 1\n to_pandas_generator = dset.to_pandas(batched=True, batch_size=bacth_size)\n\n for batch in to_pandas_generator:\n self.assertIsInstance(batch, pd.DataFrame)\n self.assertListEqual(sorted(batch.columns), sorted(dset.column_names))\n for col_name in dset.column_names:\n self.assertLessEqual(len(batch[col_name]), bacth_size)\n\n # Full\n dset_to_pandas = dset.to_pandas()\n self.assertIsInstance(dset_to_pandas, pd.DataFrame)\n self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names))\n for col_name in dset.column_names:\n self.assertEqual(len(dset_to_pandas[col_name]), len(dset))\n\n # With index mapping\n with dset.select([1, 0, 3]) as dset:\n dset_to_pandas = dset.to_pandas()\n self.assertIsInstance(dset_to_pandas, pd.DataFrame)\n self.assertEqual(len(dset_to_pandas), 3)\n self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names))\n\n for col_name in dset.column_names:\n self.assertEqual(len(dset_to_pandas[col_name]), dset.num_rows)\n\n def test_to_parquet(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n # File path argument\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n file_path = os.path.join(tmp_dir, \"test_path.parquet\")\n dset.to_parquet(path_or_buf=file_path)\n\n self.assertTrue(os.path.isfile(file_path))\n # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match\n parquet_dset = pd.read_parquet(file_path)\n\n self.assertEqual(parquet_dset.shape, dset.shape)\n self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))\n\n # File buffer argument\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n file_path = os.path.join(tmp_dir, \"test_buffer.parquet\")\n with open(file_path, \"wb+\") as buffer:\n dset.to_parquet(path_or_buf=buffer)\n\n self.assertTrue(os.path.isfile(file_path))\n # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match\n parquet_dset = pd.read_parquet(file_path)\n\n self.assertEqual(parquet_dset.shape, dset.shape)\n self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))\n\n # After a select/shuffle transform\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset = dset.select(range(0, len(dset), 2)).shuffle()\n file_path = os.path.join(tmp_dir, \"test_path.parquet\")\n dset.to_parquet(path_or_buf=file_path)\n\n self.assertTrue(os.path.isfile(file_path))\n # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match\n parquet_dset = pd.read_parquet(file_path)\n\n self.assertEqual(parquet_dset.shape, dset.shape)\n self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))\n\n # With array features\n with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:\n file_path = os.path.join(tmp_dir, \"test_path.parquet\")\n dset.to_parquet(path_or_buf=file_path)\n\n self.assertTrue(os.path.isfile(file_path))\n # self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match\n parquet_dset = pd.read_parquet(file_path)\n\n self.assertEqual(parquet_dset.shape, dset.shape)\n self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))\n\n def test_train_test_split(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n fingerprint = dset._fingerprint\n dset_dict = dset.train_test_split(test_size=10, shuffle=False)\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 20)\n self.assertEqual(len(dset_test), 10)\n self.assertEqual(dset_train[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset_train[-1][\"filename\"], \"my_name-train_19\")\n self.assertEqual(dset_test[0][\"filename\"], \"my_name-train_20\")\n self.assertEqual(dset_test[-1][\"filename\"], \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_train._fingerprint, fingerprint)\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n self.assertNotEqual(dset_train._fingerprint, dset_test._fingerprint)\n\n dset_dict = dset.train_test_split(test_size=0.5, shuffle=False)\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 15)\n self.assertEqual(len(dset_test), 15)\n self.assertEqual(dset_train[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset_train[-1][\"filename\"], \"my_name-train_14\")\n self.assertEqual(dset_test[0][\"filename\"], \"my_name-train_15\")\n self.assertEqual(dset_test[-1][\"filename\"], \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n\n dset_dict = dset.train_test_split(train_size=10, shuffle=False)\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 10)\n self.assertEqual(len(dset_test), 20)\n self.assertEqual(dset_train[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset_train[-1][\"filename\"], \"my_name-train_9\")\n self.assertEqual(dset_test[0][\"filename\"], \"my_name-train_10\")\n self.assertEqual(dset_test[-1][\"filename\"], \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n\n dset.set_format(\"numpy\")\n dset_dict = dset.train_test_split(train_size=10, seed=42)\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 10)\n self.assertEqual(len(dset_test), 20)\n self.assertEqual(dset_train.format[\"type\"], \"numpy\")\n self.assertEqual(dset_test.format[\"type\"], \"numpy\")\n self.assertNotEqual(dset_train[0][\"filename\"].item(), \"my_name-train_0\")\n self.assertNotEqual(dset_train[-1][\"filename\"].item(), \"my_name-train_9\")\n self.assertNotEqual(dset_test[0][\"filename\"].item(), \"my_name-train_10\")\n self.assertNotEqual(dset_test[-1][\"filename\"].item(), \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n del dset_test, dset_train, dset_dict # DatasetDict\n\n def test_shard(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n with dset.select(range(10), indices_cache_file_name=tmp_file) as dset:\n self.assertEqual(len(dset), 10)\n # Shard\n tmp_file_1 = os.path.join(tmp_dir, \"test_1.arrow\")\n fingerprint = dset._fingerprint\n with dset.shard(num_shards=8, index=1, indices_cache_file_name=tmp_file_1) as dset_sharded:\n self.assertEqual(2, len(dset_sharded))\n self.assertEqual([\"my_name-train_1\", \"my_name-train_9\"], dset_sharded[\"filename\"])\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sharded.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_sharded._fingerprint, fingerprint)\n # Shard contiguous\n tmp_file_2 = os.path.join(tmp_dir, \"test_2.arrow\")\n with dset.shard(\n num_shards=3, index=0, contiguous=True, indices_cache_file_name=tmp_file_2\n ) as dset_sharded_contiguous:\n self.assertEqual([f\"my_name-train_{i}\" for i in (0, 1, 2, 3)], dset_sharded_contiguous[\"filename\"])\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sharded_contiguous.features, Features({\"filename\": Value(\"string\")}))\n # Test lengths of sharded contiguous\n self.assertEqual(\n [4, 3, 3],\n [\n len(dset.shard(3, index=i, contiguous=True, indices_cache_file_name=tmp_file_2 + str(i)))\n for i in range(3)\n ],\n )\n # formatted\n dset.set_format(\"numpy\")\n with dset.shard(num_shards=3, index=0) as dset_sharded_formatted:\n self.assertEqual(dset_sharded_formatted.format[\"type\"], \"numpy\")\n\n def test_flatten_indices(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir) as dset:\n self.assertEqual(dset._indices, None)\n\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n with dset.select(range(10), indices_cache_file_name=tmp_file) as dset:\n self.assertEqual(len(dset), 10)\n\n self.assertNotEqual(dset._indices, None)\n\n tmp_file_2 = os.path.join(tmp_dir, \"test_2.arrow\")\n fingerprint = dset._fingerprint\n dset.set_format(\"numpy\")\n with dset.flatten_indices(cache_file_name=tmp_file_2) as dset:\n self.assertEqual(len(dset), 10)\n self.assertEqual(dset._indices, None)\n self.assertNotEqual(dset._fingerprint, fingerprint)\n self.assertEqual(dset.format[\"type\"], \"numpy\")\n # Test unique works\n dset.unique(dset.column_names[0])\n assert_arrow_metadata_are_synced_with_dataset_features(dset)\n\n @require_tf\n @require_torch\n def test_format_vectors(self, in_memory):\n import numpy as np\n import tensorflow as tf\n import torch\n\n with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(\n in_memory, tmp_dir\n ) as dset, dset.map(lambda ex, i: {\"vec\": np.ones(3) * i}, with_indices=True) as dset:\n columns = dset.column_names\n\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (str, list))\n self.assertIsInstance(dset[:2][col], list)\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"vec\": Sequence(Value(\"float64\"))})\n )\n\n dset.set_format(\"tensorflow\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor))\n self.assertEqual(tuple(dset[:2][\"vec\"].shape), (2, 3))\n self.assertEqual(tuple(dset[\"vec\"][:2].shape), (2, 3))\n\n dset.set_format(\"numpy\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[0][\"filename\"], np.str_)\n self.assertIsInstance(dset[:2][\"filename\"], np.ndarray)\n self.assertIsInstance(dset[\"filename\"], np.ndarray)\n self.assertIsInstance(dset[0][\"vec\"], np.ndarray)\n self.assertIsInstance(dset[:2][\"vec\"], np.ndarray)\n self.assertIsInstance(dset[\"vec\"], np.ndarray)\n self.assertEqual(dset[:2][\"vec\"].shape, (2, 3))\n self.assertEqual(dset[\"vec\"][:2].shape, (2, 3))\n\n dset.set_format(\"torch\", columns=[\"vec\"])\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n # torch.Tensor is only for numerical columns\n self.assertIsInstance(dset[0][\"vec\"], torch.Tensor)\n self.assertIsInstance(dset[:2][\"vec\"], torch.Tensor)\n self.assertIsInstance(dset[\"vec\"][:2], torch.Tensor)\n self.assertEqual(dset[:2][\"vec\"].shape, (2, 3))\n self.assertEqual(dset[\"vec\"][:2].shape, (2, 3))\n\n @require_tf\n @require_torch\n def test_format_ragged_vectors(self, in_memory):\n import numpy as np\n import tensorflow as tf\n import torch\n\n with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(\n in_memory, tmp_dir\n ) as dset, dset.map(lambda ex, i: {\"vec\": np.ones(3 + i) * i}, with_indices=True) as dset:\n columns = dset.column_names\n\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (str, list))\n self.assertIsInstance(dset[:2][col], list)\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"vec\": Sequence(Value(\"float64\"))})\n )\n\n dset.set_format(\"tensorflow\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor))\n # dim is None for ragged vectors in tensorflow\n self.assertListEqual(dset[:2][\"vec\"].shape.as_list(), [2, None])\n self.assertListEqual(dset[\"vec\"][:2].shape.as_list(), [2, None])\n\n dset.set_format(\"numpy\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[0][\"filename\"], np.str_)\n self.assertIsInstance(dset[:2][\"filename\"], np.ndarray)\n self.assertIsInstance(dset[\"filename\"], np.ndarray)\n self.assertIsInstance(dset[0][\"vec\"], np.ndarray)\n self.assertIsInstance(dset[:2][\"vec\"], np.ndarray)\n self.assertIsInstance(dset[\"vec\"], np.ndarray)\n # array is flat for ragged vectors in numpy\n self.assertEqual(dset[:2][\"vec\"].shape, (2,))\n self.assertEqual(dset[\"vec\"][:2].shape, (2,))\n\n dset.set_format(\"torch\", columns=[\"vec\"])\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n # torch.Tensor is only for numerical columns\n self.assertIsInstance(dset[0][\"vec\"], torch.Tensor)\n self.assertIsInstance(dset[:2][\"vec\"][0], torch.Tensor)\n self.assertIsInstance(dset[\"vec\"][0], torch.Tensor)\n # pytorch doesn't support ragged tensors, so we should have lists\n self.assertIsInstance(dset[:2][\"vec\"], list)\n self.assertIsInstance(dset[\"vec\"][:2], list)\n\n @require_tf\n @require_torch\n def test_format_nested(self, in_memory):\n import numpy as np\n import tensorflow as tf\n import torch\n\n with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(\n in_memory, tmp_dir\n ) as dset, dset.map(lambda ex: {\"nested\": [{\"foo\": np.ones(3)}] * len(ex[\"filename\"])}, batched=True) as dset:\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"nested\": {\"foo\": Sequence(Value(\"float64\"))}})\n )\n\n dset.set_format(\"tensorflow\")\n self.assertIsNotNone(dset[0])\n self.assertIsInstance(dset[0][\"nested\"][\"foo\"], (tf.Tensor, tf.RaggedTensor))\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[:2][\"nested\"][0][\"foo\"], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[\"nested\"][0][\"foo\"], (tf.Tensor, tf.RaggedTensor))\n\n dset.set_format(\"numpy\")\n self.assertIsNotNone(dset[0])\n self.assertIsInstance(dset[0][\"nested\"][\"foo\"], np.ndarray)\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[:2][\"nested\"][0][\"foo\"], np.ndarray)\n self.assertIsInstance(dset[\"nested\"][0][\"foo\"], np.ndarray)\n\n dset.set_format(\"torch\", columns=\"nested\")\n self.assertIsNotNone(dset[0])\n self.assertIsInstance(dset[0][\"nested\"][\"foo\"], torch.Tensor)\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[:2][\"nested\"][0][\"foo\"], torch.Tensor)\n self.assertIsInstance(dset[\"nested\"][0][\"foo\"], torch.Tensor)\n\n def test_format_pandas(self, in_memory):\n import pandas as pd\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n dset.set_format(\"pandas\")\n self.assertIsInstance(dset[0], pd.DataFrame)\n self.assertIsInstance(dset[:2], pd.DataFrame)\n self.assertIsInstance(dset[\"col_1\"], pd.Series)\n\n def test_transmit_format_single(self, in_memory):\n @transmit_format\n def my_single_transform(self, return_factory, *args, **kwargs):\n return return_factory()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n return_factory = partial(\n self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True\n )\n with return_factory() as dset:\n dset.set_format(\"numpy\", columns=[\"col_1\"])\n prev_format = dset.format\n with my_single_transform(dset, return_factory) as transformed_dset:\n self.assertDictEqual(transformed_dset.format, prev_format)\n\n def test_transmit_format_dict(self, in_memory):\n @transmit_format\n def my_split_transform(self, return_factory, *args, **kwargs):\n return DatasetDict({\"train\": return_factory()})\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n return_factory = partial(\n self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True\n )\n with return_factory() as dset:\n dset.set_format(\"numpy\", columns=[\"col_1\"])\n prev_format = dset.format\n transformed_dset = my_split_transform(dset, return_factory)[\"train\"]\n self.assertDictEqual(transformed_dset.format, prev_format)\n\n del transformed_dset # DatasetDict\n\n def test_with_format(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n with dset.with_format(\"numpy\", columns=[\"col_1\"]) as dset2:\n dset.set_format(\"numpy\", columns=[\"col_1\"])\n self.assertDictEqual(dset.format, dset2.format)\n self.assertEqual(dset._fingerprint, dset2._fingerprint)\n # dset.reset_format()\n # self.assertNotEqual(dset.format, dset2.format)\n # self.assertNotEqual(dset._fingerprint, dset2._fingerprint)\n\n def test_with_transform(self, in_memory):\n with tempfile.TemporaryDirectory() as tmp_dir:\n with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:\n transform = lambda x: {\"foo\": x[\"col_1\"]} # noqa: E731\n with dset.with_transform(transform, columns=[\"col_1\"]) as dset2:\n dset.set_transform(transform, columns=[\"col_1\"])\n self.assertDictEqual(dset.format, dset2.format)\n self.assertEqual(dset._fingerprint, dset2._fingerprint)\n dset.reset_format()\n self.assertNotEqual(dset.format, dset2.format)\n self.assertNotEqual(dset._fingerprint, dset2._fingerprint)\n\n @require_tf\n def test_tf_dataset_conversion(self, in_memory):\n def tf_default_data_collator(features):\n \"\"\"This is the tf_default_data_collator from transformers, copied so we avoid depending on that library.\"\"\"\n import numpy as np\n import tensorflow as tf\n\n first = features[0]\n batch = {}\n\n # Special handling for labels.\n # Ensure that tensor is created with the correct type\n # (it should be automatically the case, but let's make sure of it.)\n if \"label\" in first and first[\"label\"] is not None:\n if isinstance(first[\"label\"], tf.Tensor):\n dtype = tf.int64 if first[\"label\"].dtype.is_integer() else tf.float32\n elif isinstance(first[\"label\"], np.ndarray):\n dtype = tf.int64 if np.issubdtype(first[\"label\"].dtype, np.integer) else tf.float32\n elif isinstance(first[\"label\"], (tuple, list)):\n dtype = tf.int64 if isinstance(first[\"label\"][0], int) else tf.float32\n else:\n dtype = tf.int64 if isinstance(first[\"label\"], int) else tf.float32\n batch[\"labels\"] = tf.convert_to_tensor([f[\"label\"] for f in features], dtype=dtype)\n elif \"label_ids\" in first and first[\"label_ids\"] is not None:\n if isinstance(first[\"label_ids\"], tf.Tensor):\n batch[\"labels\"] = tf.stack([f[\"label_ids\"] for f in features])\n else:\n dtype = tf.int64 if type(first[\"label_ids\"][0]) is int else tf.float32\n batch[\"labels\"] = tf.convert_to_tensor([f[\"label_ids\"] for f in features], dtype=dtype)\n\n # Handling of all other possible keys.\n # Again, we will use the first element to figure out which key/values are not None for this model.\n for k, v in first.items():\n if k not in (\"label\", \"label_ids\") and v is not None and not isinstance(v, str):\n if isinstance(v, (tf.Tensor, np.ndarray)):\n batch[k] = tf.stack([f[k] for f in features])\n else:\n batch[k] = tf.convert_to_tensor([f[k] for f in features])\n\n return batch\n\n tmp_dir = tempfile.TemporaryDirectory()\n with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset:\n tf_dataset = dset.to_tf_dataset(\n columns=\"col_3\", batch_size=4, shuffle=False, dummy_labels=False, collate_fn=tf_default_data_collator\n )\n batch = next(iter(tf_dataset))\n self.assertEqual(batch.shape.as_list(), [4, 4])\n self.assertEqual(batch.dtype.name, \"int64\")\n with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:\n tf_dataset = dset.to_tf_dataset(\n columns=\"col_1\", batch_size=4, shuffle=False, dummy_labels=False, collate_fn=tf_default_data_collator\n )\n batch = next(iter(tf_dataset))\n self.assertEqual(batch.shape.as_list(), [4])\n self.assertEqual(batch.dtype.name, \"int64\")\n del tf_dataset # For correct cleanup\n\n\nclass MiscellaneousDatasetTest(TestCase):\n def test_from_pandas(self):\n data = {\"col_1\": [3, 2, 1, 0], \"col_2\": [\"a\", \"b\", \"c\", \"d\"]}\n df = pd.DataFrame.from_dict(data)\n with Dataset.from_pandas(df) as dset:\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n with Dataset.from_pandas(df, features=features) as dset:\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n with Dataset.from_pandas(df, features=features, info=DatasetInfo(features=features)) as dset:\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"string\"), \"col_2\": Value(\"string\")})\n self.assertRaises(pa.ArrowTypeError, Dataset.from_pandas, df, features=features)\n\n def test_from_dict(self):\n data = {\"col_1\": [3, 2, 1, 0], \"col_2\": [\"a\", \"b\", \"c\", \"d\"]}\n with Dataset.from_dict(data) as dset:\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n with Dataset.from_dict(data, features=features) as dset:\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n with Dataset.from_dict(data, features=features, info=DatasetInfo(features=features)) as dset:\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"string\"), \"col_2\": Value(\"string\")})\n with Dataset.from_dict(data, features=features) as dset:\n # the integers are converted to strings\n self.assertListEqual(dset[\"col_1\"], [str(x) for x in data[\"col_1\"]])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"string\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"int64\")})\n self.assertRaises(ValueError, Dataset.from_dict, data, features=features)\n\n def test_concatenate_mixed_memory_and_disk(self):\n data1, data2, data3 = {\"id\": [0, 1, 2]}, {\"id\": [3, 4, 5]}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n with Dataset.from_dict(data1, info=info1).map(\n cache_file_name=os.path.join(tmp_dir, \"d1.arrow\")\n ) as dset1, Dataset.from_dict(data2, info=info2).map(\n cache_file_name=os.path.join(tmp_dir, \"d2.arrow\")\n ) as dset2, Dataset.from_dict(\n data3\n ) as dset3:\n with concatenate_datasets([dset1, dset2, dset3]) as concatenated_dset:\n self.assertEqual(len(concatenated_dset), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(concatenated_dset[\"id\"], dset1[\"id\"] + dset2[\"id\"] + dset3[\"id\"])\n\n @require_transformers\n def test_set_format_encode(self):\n from transformers import BertTokenizer\n\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n\n def encode(batch):\n return tokenizer(batch[\"text\"], padding=\"longest\", return_tensors=\"np\")\n\n with Dataset.from_dict({\"text\": [\"hello there\", \"foo\"]}) as dset:\n dset.set_transform(transform=encode)\n self.assertEqual(str(dset[:2]), str(encode({\"text\": [\"hello there\", \"foo\"]})))\n\n\ndef test_cast_with_sliced_list():\n old_features = Features({\"foo\": Sequence(Value(\"int64\"))})\n new_features = Features({\"foo\": Sequence(Value(\"int32\"))})\n dataset = Dataset.from_dict({\"foo\": [[i] * (i % 3) for i in range(20)]}, features=old_features)\n casted_dataset = dataset.cast(new_features, batch_size=2) # small batch size to slice the ListArray\n assert dataset[\"foo\"] == casted_dataset[\"foo\"]\n assert casted_dataset.features == new_features\n\n\[email protected](\"include_nulls\", [False, True])\ndef test_class_encode_column_with_none(include_nulls):\n dataset = Dataset.from_dict({\"col_1\": [\"a\", \"b\", \"c\", None, \"d\", None]})\n dataset = dataset.class_encode_column(\"col_1\", include_nulls=include_nulls)\n class_names = [\"a\", \"b\", \"c\", \"d\"]\n if include_nulls:\n class_names += [\"None\"]\n assert isinstance(dataset.features[\"col_1\"], ClassLabel)\n assert set(dataset.features[\"col_1\"].names) == set(class_names)\n assert (None in dataset.unique(\"col_1\")) == (not include_nulls)\n\n\[email protected](\"null_placement\", [\"first\", \"last\"])\ndef test_sort_with_none(null_placement):\n dataset = Dataset.from_dict({\"col_1\": [\"item_2\", \"item_3\", \"item_1\", None, \"item_4\", None]})\n dataset = dataset.sort(\"col_1\", null_placement=null_placement)\n if null_placement == \"first\":\n assert dataset[\"col_1\"] == [None, None, \"item_1\", \"item_2\", \"item_3\", \"item_4\"]\n else:\n assert dataset[\"col_1\"] == [\"item_1\", \"item_2\", \"item_3\", \"item_4\", None, None]\n\n\ndef test_update_metadata_with_features(dataset_dict):\n table1 = pa.Table.from_pydict(dataset_dict)\n features1 = Features.from_arrow_schema(table1.schema)\n features2 = features1.copy()\n features2[\"col_2\"] = ClassLabel(num_classes=len(table1))\n assert features1 != features2\n\n table2 = update_metadata_with_features(table1, features2)\n metadata = json.loads(table2.schema.metadata[\"huggingface\".encode(\"utf-8\")].decode())\n assert features2 == Features.from_dict(metadata[\"info\"][\"features\"])\n\n with Dataset(table1) as dset1, Dataset(table2) as dset2:\n assert dset1.features == features1\n assert dset2.features == features2\n\n\[email protected](\"dataset_type\", [\"in_memory\", \"memory_mapped\", \"mixed\"])\[email protected](\"axis, expected_shape\", [(0, (4, 3)), (1, (2, 6))])\ndef test_concatenate_datasets(dataset_type, axis, expected_shape, dataset_dict, arrow_path):\n table = {\n \"in_memory\": InMemoryTable.from_pydict(dataset_dict),\n \"memory_mapped\": MemoryMappedTable.from_file(arrow_path),\n }\n tables = [\n table[dataset_type if dataset_type != \"mixed\" else \"memory_mapped\"].slice(0, 2), # shape = (2, 3)\n table[dataset_type if dataset_type != \"mixed\" else \"in_memory\"].slice(2, 4), # shape = (2, 3)\n ]\n if axis == 1: # don't duplicate columns\n tables[1] = tables[1].rename_columns([col + \"_bis\" for col in tables[1].column_names])\n datasets = [Dataset(table) for table in tables]\n dataset = concatenate_datasets(datasets, axis=axis)\n assert dataset.shape == expected_shape\n assert_arrow_metadata_are_synced_with_dataset_features(dataset)\n\n\ndef test_concatenate_datasets_new_columns():\n dataset1 = Dataset.from_dict({\"col_1\": [\"a\", \"b\", \"c\"]})\n dataset2 = Dataset.from_dict({\"col_1\": [\"d\", \"e\", \"f\"], \"col_2\": [True, False, True]})\n dataset = concatenate_datasets([dataset1, dataset2])\n assert dataset.data.shape == (6, 2)\n assert dataset.features == Features({\"col_1\": Value(\"string\"), \"col_2\": Value(\"bool\")})\n assert dataset[:] == {\"col_1\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], \"col_2\": [None, None, None, True, False, True]}\n dataset3 = Dataset.from_dict({\"col_3\": [\"a_1\"]})\n dataset = concatenate_datasets([dataset, dataset3])\n assert dataset.data.shape == (7, 3)\n assert dataset.features == Features({\"col_1\": Value(\"string\"), \"col_2\": Value(\"bool\"), \"col_3\": Value(\"string\")})\n assert dataset[:] == {\n \"col_1\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", None],\n \"col_2\": [None, None, None, True, False, True, None],\n \"col_3\": [None, None, None, None, None, None, \"a_1\"],\n }\n\n\[email protected](\"axis\", [0, 1])\ndef test_concatenate_datasets_complex_features(axis):\n n = 5\n dataset1 = Dataset.from_dict(\n {\"col_1\": [0] * n, \"col_2\": list(range(n))},\n features=Features({\"col_1\": Value(\"int32\"), \"col_2\": ClassLabel(num_classes=n)}),\n )\n if axis == 1:\n dataset2 = dataset1.rename_columns({col: col + \"_\" for col in dataset1.column_names})\n expected_features = Features({**dataset1.features, **dataset2.features})\n else:\n dataset2 = dataset1\n expected_features = dataset1.features\n assert concatenate_datasets([dataset1, dataset2], axis=axis).features == expected_features\n\n\[email protected](\"other_dataset_type\", [\"in_memory\", \"memory_mapped\", \"concatenation\"])\[email protected](\"axis, expected_shape\", [(0, (8, 3)), (1, (4, 6))])\ndef test_concatenate_datasets_with_concatenation_tables(\n axis, expected_shape, other_dataset_type, dataset_dict, arrow_path\n):\n def _create_concatenation_table(axis):\n if axis == 0: # shape: (4, 3) = (4, 1) + (4, 2)\n concatenation_table = ConcatenationTable.from_blocks(\n [\n [\n InMemoryTable.from_pydict({\"col_1\": dataset_dict[\"col_1\"]}),\n MemoryMappedTable.from_file(arrow_path).remove_column(0),\n ]\n ]\n )\n elif axis == 1: # shape: (4, 3) = (1, 3) + (3, 3)\n concatenation_table = ConcatenationTable.from_blocks(\n [\n [InMemoryTable.from_pydict(dataset_dict).slice(0, 1)],\n [MemoryMappedTable.from_file(arrow_path).slice(1, 4)],\n ]\n )\n return concatenation_table\n\n concatenation_table = _create_concatenation_table(axis)\n assert concatenation_table.shape == (4, 3)\n\n if other_dataset_type == \"in_memory\":\n other_table = InMemoryTable.from_pydict(dataset_dict)\n elif other_dataset_type == \"memory_mapped\":\n other_table = MemoryMappedTable.from_file(arrow_path)\n elif other_dataset_type == \"concatenation\":\n other_table = _create_concatenation_table(axis)\n assert other_table.shape == (4, 3)\n\n tables = [concatenation_table, other_table]\n\n if axis == 1: # don't duplicate columns\n tables[1] = tables[1].rename_columns([col + \"_bis\" for col in tables[1].column_names])\n\n for tables in [tables, reversed(tables)]:\n datasets = [Dataset(table) for table in tables]\n dataset = concatenate_datasets(datasets, axis=axis)\n assert dataset.shape == expected_shape\n\n\ndef test_concatenate_datasets_duplicate_columns(dataset):\n with pytest.raises(ValueError) as excinfo:\n concatenate_datasets([dataset, dataset], axis=1)\n assert \"duplicated\" in str(excinfo.value)\n\n\ndef test_interleave_datasets():\n d1 = Dataset.from_dict({\"a\": [0, 1, 2]})\n d2 = Dataset.from_dict({\"a\": [10, 11, 12, 13]})\n d3 = Dataset.from_dict({\"a\": [22, 21, 20]}).select([2, 1, 0])\n dataset = interleave_datasets([d1, d2, d3])\n expected_length = 3 * min(len(d1), len(d2), len(d3))\n expected_values = [x[\"a\"] for x in itertools.chain(*zip(d1, d2, d3))]\n assert isinstance(dataset, Dataset)\n assert len(dataset) == expected_length\n assert dataset[\"a\"] == expected_values\n assert dataset._fingerprint == interleave_datasets([d1, d2, d3])._fingerprint\n\n\ndef test_interleave_datasets_probabilities():\n seed = 42\n probabilities = [0.3, 0.5, 0.2]\n d1 = Dataset.from_dict({\"a\": [0, 1, 2]})\n d2 = Dataset.from_dict({\"a\": [10, 11, 12, 13]})\n d3 = Dataset.from_dict({\"a\": [22, 21, 20]}).select([2, 1, 0])\n dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)\n expected_length = 7 # hardcoded\n expected_values = [10, 11, 20, 12, 0, 21, 13] # hardcoded\n assert isinstance(dataset, Dataset)\n assert len(dataset) == expected_length\n assert dataset[\"a\"] == expected_values\n assert (\n dataset._fingerprint == interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)._fingerprint\n )\n\n\[email protected](\n \"column, expected_dtype\",\n [([\"a\", \"b\", \"c\", \"d\"], \"string\"), ([1, 2, 3, 4], \"int64\"), ([1.0, 2.0, 3.0, 4.0], \"float64\")],\n)\[email protected](\"in_memory\", [False, True])\[email protected](\n \"transform\",\n [None, (\"shuffle\", (42,), {}), (\"with_format\", (\"pandas\",), {}), (\"class_encode_column\", (\"col_2\",), {})],\n)\ndef test_dataset_add_column(column, expected_dtype, in_memory, transform, dataset_dict, arrow_path):\n column_name = \"col_4\"\n original_dataset = (\n Dataset(InMemoryTable.from_pydict(dataset_dict))\n if in_memory\n else Dataset(MemoryMappedTable.from_file(arrow_path))\n )\n if transform is not None:\n transform_name, args, kwargs = transform\n original_dataset: Dataset = getattr(original_dataset, transform_name)(*args, **kwargs)\n dataset = original_dataset.add_column(column_name, column)\n assert dataset.data.shape == (4, 4)\n expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n # Sort expected features as in the original dataset\n expected_features = {feature: expected_features[feature] for feature in original_dataset.features}\n # Add new column feature\n expected_features[column_name] = expected_dtype\n assert dataset.data.column_names == list(expected_features.keys())\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature].dtype == expected_dtype\n assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one\n assert dataset.format[\"type\"] == original_dataset.format[\"type\"]\n assert dataset._fingerprint != original_dataset._fingerprint\n dataset.reset_format()\n original_dataset.reset_format()\n assert all(dataset[col] == original_dataset[col] for col in original_dataset.column_names)\n assert set(dataset[\"col_4\"]) == set(column)\n if dataset._indices is not None:\n dataset_indices = dataset._indices[\"indices\"].to_pylist()\n expected_dataset_indices = original_dataset._indices[\"indices\"].to_pylist()\n assert dataset_indices == expected_dataset_indices\n assert_arrow_metadata_are_synced_with_dataset_features(dataset)\n\n\[email protected](\n \"transform\",\n [None, (\"shuffle\", (42,), {}), (\"with_format\", (\"pandas\",), {}), (\"class_encode_column\", (\"col_2\",), {})],\n)\[email protected](\"in_memory\", [False, True])\[email protected](\n \"item\",\n [\n {\"col_1\": \"4\", \"col_2\": 4, \"col_3\": 4.0},\n {\"col_1\": \"4\", \"col_2\": \"4\", \"col_3\": \"4\"},\n {\"col_1\": 4, \"col_2\": 4, \"col_3\": 4},\n {\"col_1\": 4.0, \"col_2\": 4.0, \"col_3\": 4.0},\n ],\n)\ndef test_dataset_add_item(item, in_memory, dataset_dict, arrow_path, transform):\n dataset_to_test = (\n Dataset(InMemoryTable.from_pydict(dataset_dict))\n if in_memory\n else Dataset(MemoryMappedTable.from_file(arrow_path))\n )\n if transform is not None:\n transform_name, args, kwargs = transform\n dataset_to_test: Dataset = getattr(dataset_to_test, transform_name)(*args, **kwargs)\n dataset = dataset_to_test.add_item(item)\n assert dataset.data.shape == (5, 3)\n expected_features = dataset_to_test.features\n assert sorted(dataset.data.column_names) == sorted(expected_features.keys())\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature] == expected_dtype\n assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one\n assert dataset.format[\"type\"] == dataset_to_test.format[\"type\"]\n assert dataset._fingerprint != dataset_to_test._fingerprint\n dataset.reset_format()\n dataset_to_test.reset_format()\n assert dataset[:-1] == dataset_to_test[:]\n assert {k: int(v) for k, v in dataset[-1].items()} == {k: int(v) for k, v in item.items()}\n if dataset._indices is not None:\n dataset_indices = dataset._indices[\"indices\"].to_pylist()\n dataset_to_test_indices = dataset_to_test._indices[\"indices\"].to_pylist()\n assert dataset_indices == dataset_to_test_indices + [len(dataset_to_test._data)]\n\n\ndef test_dataset_add_item_new_columns():\n dataset = Dataset.from_dict({\"col_1\": [0, 1, 2]}, features=Features({\"col_1\": Value(\"uint8\")}))\n dataset = dataset.add_item({\"col_1\": 3, \"col_2\": \"a\"})\n assert dataset.data.shape == (4, 2)\n assert dataset.features == Features({\"col_1\": Value(\"uint8\"), \"col_2\": Value(\"string\")})\n assert dataset[:] == {\"col_1\": [0, 1, 2, 3], \"col_2\": [None, None, None, \"a\"]}\n dataset = dataset.add_item({\"col_3\": True})\n assert dataset.data.shape == (5, 3)\n assert dataset.features == Features({\"col_1\": Value(\"uint8\"), \"col_2\": Value(\"string\"), \"col_3\": Value(\"bool\")})\n assert dataset[:] == {\n \"col_1\": [0, 1, 2, 3, None],\n \"col_2\": [None, None, None, \"a\", None],\n \"col_3\": [None, None, None, None, True],\n }\n\n\ndef test_dataset_add_item_introduce_feature_type():\n dataset = Dataset.from_dict({\"col_1\": [None, None, None]})\n dataset = dataset.add_item({\"col_1\": \"a\"})\n assert dataset.data.shape == (4, 1)\n assert dataset.features == Features({\"col_1\": Value(\"string\")})\n assert dataset[:] == {\"col_1\": [None, None, None, \"a\"]}\n\n\[email protected](\"in_memory\", [False, True])\ndef test_dataset_from_file(in_memory, dataset, arrow_file):\n filename = arrow_file\n with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():\n dataset_from_file = Dataset.from_file(filename, in_memory=in_memory)\n assert dataset_from_file.features.type == dataset.features.type\n assert dataset_from_file.features == dataset.features\n assert dataset_from_file.cache_files == ([{\"filename\": filename}] if not in_memory else [])\n\n\ndef _check_csv_dataset(dataset, expected_features):\n assert isinstance(dataset, Dataset)\n assert dataset.num_rows == 4\n assert dataset.num_columns == 3\n assert dataset.column_names == [\"col_1\", \"col_2\", \"col_3\"]\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature].dtype == expected_dtype\n\n\[email protected](\"keep_in_memory\", [False, True])\ndef test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"int64\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\n dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)\n _check_csv_dataset(dataset, expected_features)\n\n\[email protected](\n \"features\",\n [\n None,\n {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"},\n {\"col_1\": \"string\", \"col_2\": \"string\", \"col_3\": \"string\"},\n {\"col_1\": \"int32\", \"col_2\": \"int32\", \"col_3\": \"int32\"},\n {\"col_1\": \"float32\", \"col_2\": \"float32\", \"col_3\": \"float32\"},\n ],\n)\ndef test_dataset_from_csv_features(features, csv_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n # CSV file loses col_1 string dtype information: default now is \"int64\" instead of \"string\"\n default_expected_features = {\"col_1\": \"int64\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n expected_features = features.copy() if features else default_expected_features\n features = (\n Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None\n )\n dataset = Dataset.from_csv(csv_path, features=features, cache_dir=cache_dir)\n _check_csv_dataset(dataset, expected_features)\n\n\[email protected](\"split\", [None, NamedSplit(\"train\"), \"train\", \"test\"])\ndef test_dataset_from_csv_split(split, csv_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"int64\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, split=split)\n _check_csv_dataset(dataset, expected_features)\n assert dataset.split == str(split) if split else \"train\"\n\n\[email protected](\"path_type\", [str, list])\ndef test_dataset_from_csv_path_type(path_type, csv_path, tmp_path):\n if issubclass(path_type, str):\n path = csv_path\n elif issubclass(path_type, list):\n path = [csv_path]\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"int64\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n dataset = Dataset.from_csv(path, cache_dir=cache_dir)\n _check_csv_dataset(dataset, expected_features)\n\n\ndef _check_json_dataset(dataset, expected_features):\n assert isinstance(dataset, Dataset)\n assert dataset.num_rows == 4\n assert dataset.num_columns == 3\n assert dataset.column_names == [\"col_1\", \"col_2\", \"col_3\"]\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature].dtype == expected_dtype\n\n\[email protected](\"keep_in_memory\", [False, True])\ndef test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\n dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)\n _check_json_dataset(dataset, expected_features)\n\n\[email protected](\n \"features\",\n [\n None,\n {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"},\n {\"col_1\": \"string\", \"col_2\": \"string\", \"col_3\": \"string\"},\n {\"col_1\": \"int32\", \"col_2\": \"int32\", \"col_3\": \"int32\"},\n {\"col_1\": \"float32\", \"col_2\": \"float32\", \"col_3\": \"float32\"},\n ],\n)\ndef test_dataset_from_json_features(features, jsonl_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n default_expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n expected_features = features.copy() if features else default_expected_features\n features = (\n Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None\n )\n dataset = Dataset.from_json(jsonl_path, features=features, cache_dir=cache_dir)\n _check_json_dataset(dataset, expected_features)\n\n\ndef test_dataset_from_json_with_class_label_feature(jsonl_str_path, tmp_path):\n features = Features(\n {\"col_1\": ClassLabel(names=[\"s0\", \"s1\", \"s2\", \"s3\"]), \"col_2\": Value(\"int64\"), \"col_3\": Value(\"float64\")}\n )\n cache_dir = tmp_path / \"cache\"\n dataset = Dataset.from_json(jsonl_str_path, features=features, cache_dir=cache_dir)\n assert dataset.features[\"col_1\"].dtype == \"int64\"\n\n\[email protected](\"split\", [None, NamedSplit(\"train\"), \"train\", \"test\"])\ndef test_dataset_from_json_split(split, jsonl_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, split=split)\n _check_json_dataset(dataset, expected_features)\n assert dataset.split == str(split) if split else \"train\"\n\n\[email protected](\"path_type\", [str, list])\ndef test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path):\n if issubclass(path_type, str):\n path = jsonl_path\n elif issubclass(path_type, list):\n path = [jsonl_path]\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n dataset = Dataset.from_json(path, cache_dir=cache_dir)\n _check_json_dataset(dataset, expected_features)\n\n\ndef _check_parquet_dataset(dataset, expected_features):\n assert isinstance(dataset, Dataset)\n assert dataset.num_rows == 4\n assert dataset.num_columns == 3\n assert dataset.column_names == [\"col_1\", \"col_2\", \"col_3\"]\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature].dtype == expected_dtype\n\n\[email protected](\"keep_in_memory\", [False, True])\ndef test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\n dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)\n _check_parquet_dataset(dataset, expected_features)\n\n\[email protected](\n \"features\",\n [\n None,\n {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"},\n {\"col_1\": \"string\", \"col_2\": \"string\", \"col_3\": \"string\"},\n {\"col_1\": \"int32\", \"col_2\": \"int32\", \"col_3\": \"int32\"},\n {\"col_1\": \"float32\", \"col_2\": \"float32\", \"col_3\": \"float32\"},\n ],\n)\ndef test_dataset_from_parquet_features(features, parquet_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n default_expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n expected_features = features.copy() if features else default_expected_features\n features = (\n Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None\n )\n dataset = Dataset.from_parquet(parquet_path, features=features, cache_dir=cache_dir)\n _check_parquet_dataset(dataset, expected_features)\n\n\[email protected](\"split\", [None, NamedSplit(\"train\"), \"train\", \"test\"])\ndef test_dataset_from_parquet_split(split, parquet_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, split=split)\n _check_parquet_dataset(dataset, expected_features)\n assert dataset.split == str(split) if split else \"train\"\n\n\[email protected](\"path_type\", [str, list])\ndef test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path):\n if issubclass(path_type, str):\n path = parquet_path\n elif issubclass(path_type, list):\n path = [parquet_path]\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"col_1\": \"string\", \"col_2\": \"int64\", \"col_3\": \"float64\"}\n dataset = Dataset.from_parquet(path, cache_dir=cache_dir)\n _check_parquet_dataset(dataset, expected_features)\n\n\ndef _check_text_dataset(dataset, expected_features):\n assert isinstance(dataset, Dataset)\n assert dataset.num_rows == 4\n assert dataset.num_columns == 1\n assert dataset.column_names == [\"text\"]\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature].dtype == expected_dtype\n\n\[email protected](\"keep_in_memory\", [False, True])\ndef test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"text\": \"string\"}\n with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\n dataset = Dataset.from_text(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)\n _check_text_dataset(dataset, expected_features)\n\n\[email protected](\n \"features\",\n [\n None,\n {\"text\": \"string\"},\n {\"text\": \"int32\"},\n {\"text\": \"float32\"},\n ],\n)\ndef test_dataset_from_text_features(features, text_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n default_expected_features = {\"text\": \"string\"}\n expected_features = features.copy() if features else default_expected_features\n features = (\n Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None\n )\n dataset = Dataset.from_text(text_path, features=features, cache_dir=cache_dir)\n _check_text_dataset(dataset, expected_features)\n\n\[email protected](\"split\", [None, NamedSplit(\"train\"), \"train\", \"test\"])\ndef test_dataset_from_text_split(split, text_path, tmp_path):\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"text\": \"string\"}\n dataset = Dataset.from_text(text_path, cache_dir=cache_dir, split=split)\n _check_text_dataset(dataset, expected_features)\n assert dataset.split == str(split) if split else \"train\"\n\n\[email protected](\"path_type\", [str, list])\ndef test_dataset_from_text_path_type(path_type, text_path, tmp_path):\n if issubclass(path_type, str):\n path = text_path\n elif issubclass(path_type, list):\n path = [text_path]\n cache_dir = tmp_path / \"cache\"\n expected_features = {\"text\": \"string\"}\n dataset = Dataset.from_text(path, cache_dir=cache_dir)\n _check_text_dataset(dataset, expected_features)\n\n\ndef test_dataset_to_json(dataset, tmp_path):\n file_path = tmp_path / \"test_path.jsonl\"\n bytes_written = dataset.to_json(path_or_buf=file_path)\n assert file_path.is_file()\n assert bytes_written == file_path.stat().st_size\n df = pd.read_json(file_path, orient=\"records\", lines=True)\n assert df.shape == dataset.shape\n assert list(df.columns) == list(dataset.column_names)\n\n\[email protected](\"in_memory\", [False, True])\[email protected](\n \"method_and_params\",\n [\n (\"rename_column\", tuple(), {\"original_column_name\": \"labels\", \"new_column_name\": \"label\"}),\n (\"remove_columns\", tuple(), {\"column_names\": \"labels\"}),\n (\n \"cast\",\n tuple(),\n {\n \"features\": Features(\n {\n \"tokens\": Sequence(Value(\"string\")),\n \"labels\": Sequence(Value(\"int16\")),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n \"id\": Value(\"int32\"),\n }\n )\n },\n ),\n (\"flatten\", tuple(), {}),\n (\"rename_column_\", tuple(), {\"original_column_name\": \"labels\", \"new_column_name\": \"label\"}),\n (\"remove_columns_\", tuple(), {\"column_names\": \"labels\"}),\n (\n \"cast_\",\n tuple(),\n {\n \"features\": Features(\n {\n \"tokens\": Sequence(Value(\"string\")),\n \"labels\": Sequence(Value(\"int16\")),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n \"id\": Value(\"int32\"),\n }\n )\n },\n ),\n (\"flatten_\", tuple(), {}),\n ],\n)\ndef test_pickle_dataset_after_transforming_the_table(in_memory, method_and_params, arrow_file):\n method, args, kwargs = method_and_params\n with Dataset.from_file(arrow_file, in_memory=in_memory) as dataset, Dataset.from_file(\n arrow_file, in_memory=in_memory\n ) as reference_dataset:\n out = getattr(dataset, method)(*args, **kwargs)\n dataset = out if out is not None else dataset\n pickled_dataset = pickle.dumps(dataset)\n reloaded_dataset = pickle.loads(pickled_dataset)\n\n assert dataset._data != reference_dataset._data\n assert dataset._data.table == reloaded_dataset._data.table\n\n\n@require_s3\ndef test_dummy_dataset_serialize_s3(s3, dataset):\n mock_bucket = s3_test_bucket_name\n dataset_path = f\"s3://{mock_bucket}/my_dataset\"\n features = dataset.features\n dataset.save_to_disk(dataset_path, s3)\n dataset = dataset.load_from_disk(dataset_path, s3)\n assert os.path.isfile(dataset.cache_files[0][\"filename\"])\n\n assert len(dataset) == 10\n assert len(dataset.shuffle()) == 10\n assert dataset.features == features\n assert dataset[0][\"id\"] == 0\n assert dataset[\"id\"][0] == 0\n\n\[email protected](\n \"uri_or_path\",\n [\n \"relative/path\",\n \"/absolute/path\",\n \"s3://bucket/relative/path\",\n \"hdfs://relative/path\",\n \"hdfs:///absolute/path\",\n ],\n)\ndef test_build_local_temp_path(uri_or_path):\n extracted_path = extract_path_from_uri(uri_or_path)\n local_temp_path = Dataset._build_local_temp_path(extracted_path)\n path_relative_to_tmp_dir = local_temp_path.as_posix().split(\"tmp\")[-1].split(\"/\", 1)[1]\n\n assert (\n \"tmp\" in local_temp_path.as_posix()\n and \"hdfs\" not in path_relative_to_tmp_dir\n and \"s3\" not in path_relative_to_tmp_dir\n and not local_temp_path.as_posix().startswith(extracted_path)\n and local_temp_path.as_posix().endswith(extracted_path)\n ), f\"Local temp path: {local_temp_path.as_posix()}\"\n\n\nclass TaskTemplatesTest(TestCase):\n def test_task_text_classification(self):\n labels = sorted([\"pos\", \"neg\"])\n features_before_cast = Features(\n {\n \"input_text\": Value(\"string\"),\n \"input_labels\": ClassLabel(names=labels),\n }\n )\n # Labels are cast to tuple during `TextClassification.__post_init_`, so we do the same here\n features_after_cast = Features(\n {\n \"text\": Value(\"string\"),\n \"labels\": ClassLabel(names=labels),\n }\n )\n # Label names are added in `DatasetInfo.__post_init__` so not needed here\n task_without_labels = TextClassification(text_column=\"input_text\", label_column=\"input_labels\")\n info1 = DatasetInfo(\n features=features_before_cast,\n task_templates=task_without_labels,\n )\n # Label names are required when passing a TextClassification template directly to `Dataset.prepare_for_task`\n # However they also can be used to define `DatasetInfo` so we include a test for this too\n task_with_labels = TextClassification(text_column=\"input_text\", label_column=\"input_labels\")\n info2 = DatasetInfo(\n features=features_before_cast,\n task_templates=task_with_labels,\n )\n data = {\"input_text\": [\"i love transformers!\"], \"input_labels\": [1]}\n # Test we can load from task name when label names not included in template (default behaviour)\n with Dataset.from_dict(data, info=info1) as dset:\n self.assertSetEqual(set([\"input_text\", \"input_labels\"]), set(dset.column_names))\n self.assertDictEqual(features_before_cast, dset.features)\n with dset.prepare_for_task(task=\"text-classification\") as dset:\n self.assertSetEqual(set([\"labels\", \"text\"]), set(dset.column_names))\n self.assertDictEqual(features_after_cast, dset.features)\n # Test we can load from task name when label names included in template\n with Dataset.from_dict(data, info=info2) as dset:\n self.assertSetEqual(set([\"input_text\", \"input_labels\"]), set(dset.column_names))\n self.assertDictEqual(features_before_cast, dset.features)\n with dset.prepare_for_task(task=\"text-classification\") as dset:\n self.assertSetEqual(set([\"labels\", \"text\"]), set(dset.column_names))\n self.assertDictEqual(features_after_cast, dset.features)\n # Test we can load from TextClassification template\n info1.task_templates = None\n with Dataset.from_dict(data, info=info1) as dset:\n with dset.prepare_for_task(task=task_with_labels) as dset:\n self.assertSetEqual(set([\"labels\", \"text\"]), set(dset.column_names))\n self.assertDictEqual(features_after_cast, dset.features)\n\n def test_task_question_answering(self):\n features_before_cast = Features(\n {\n \"input_context\": Value(\"string\"),\n \"input_question\": Value(\"string\"),\n \"input_answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n }\n )\n features_after_cast = Features(\n {\n \"context\": Value(\"string\"),\n \"question\": Value(\"string\"),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n }\n )\n task = QuestionAnsweringExtractive(\n context_column=\"input_context\", question_column=\"input_question\", answers_column=\"input_answers\"\n )\n info = DatasetInfo(features=features_before_cast, task_templates=task)\n data = {\n \"input_context\": [\"huggingface is going to the moon!\"],\n \"input_question\": [\"where is huggingface going?\"],\n \"input_answers\": [{\"text\": [\"to the moon!\"], \"answer_start\": [2]}],\n }\n # Test we can load from task name\n with Dataset.from_dict(data, info=info) as dset:\n self.assertSetEqual(\n set([\"input_context\", \"input_question\", \"input_answers.text\", \"input_answers.answer_start\"]),\n set(dset.flatten().column_names),\n )\n self.assertDictEqual(features_before_cast, dset.features)\n with dset.prepare_for_task(task=\"question-answering-extractive\") as dset:\n self.assertSetEqual(\n set([\"context\", \"question\", \"answers.text\", \"answers.answer_start\"]),\n set(dset.flatten().column_names),\n )\n self.assertDictEqual(features_after_cast, dset.features)\n # Test we can load from QuestionAnsweringExtractive template\n info.task_templates = None\n with Dataset.from_dict(data, info=info) as dset:\n with dset.prepare_for_task(task=task) as dset:\n self.assertSetEqual(\n set([\"context\", \"question\", \"answers.text\", \"answers.answer_start\"]),\n set(dset.flatten().column_names),\n )\n self.assertDictEqual(features_after_cast, dset.features)\n\n def test_task_summarization(self):\n # Include a dummy extra column `dummy` to test we drop it correctly\n features_before_cast = Features(\n {\"input_text\": Value(\"string\"), \"input_summary\": Value(\"string\"), \"dummy\": Value(\"string\")}\n )\n features_after_cast = Features({\"text\": Value(\"string\"), \"summary\": Value(\"string\")})\n task = Summarization(text_column=\"input_text\", summary_column=\"input_summary\")\n info = DatasetInfo(features=features_before_cast, task_templates=task)\n data = {\n \"input_text\": [\"jack and jill took a taxi to attend a super duper party in the city.\"],\n \"input_summary\": [\"jack and jill attend party\"],\n \"dummy\": [\"123456\"],\n }\n # Test we can load from task name\n with Dataset.from_dict(data, info=info) as dset:\n with dset.prepare_for_task(task=\"summarization\") as dset:\n self.assertSetEqual(\n set([\"text\", \"summary\"]),\n set(dset.column_names),\n )\n self.assertDictEqual(features_after_cast, dset.features)\n # Test we can load from Summarization template\n info.task_templates = None\n with Dataset.from_dict(data, info=info) as dset:\n with dset.prepare_for_task(task=task) as dset:\n self.assertSetEqual(\n set([\"text\", \"summary\"]),\n set(dset.column_names),\n )\n self.assertDictEqual(features_after_cast, dset.features)\n\n def test_task_automatic_speech_recognition(self):\n # Include a dummy extra column `dummy` to test we drop it correctly\n features_before_cast = Features(\n {\n \"input_audio_file_path\": Value(\"string\"),\n \"input_transcription\": Value(\"string\"),\n \"dummy\": Value(\"string\"),\n }\n )\n features_after_cast = Features({\"audio_file_path\": Value(\"string\"), \"transcription\": Value(\"string\")})\n task = AutomaticSpeechRecognition(\n audio_file_path_column=\"input_audio_file_path\", transcription_column=\"input_transcription\"\n )\n info = DatasetInfo(features=features_before_cast, task_templates=task)\n data = {\n \"input_audio_file_path\": [\"path/to/some/audio/file.wav\"],\n \"input_transcription\": [\"hello, my name is bob!\"],\n \"dummy\": [\"123456\"],\n }\n # Test we can load from task name\n with Dataset.from_dict(data, info=info) as dset:\n with dset.prepare_for_task(task=\"automatic-speech-recognition\") as dset:\n self.assertSetEqual(\n set([\"audio_file_path\", \"transcription\"]),\n set(dset.column_names),\n )\n self.assertDictEqual(features_after_cast, dset.features)\n # Test we can load from Summarization template\n info.task_templates = None\n with Dataset.from_dict(data, info=info) as dset:\n with dset.prepare_for_task(task=task) as dset:\n self.assertSetEqual(\n set([\"audio_file_path\", \"transcription\"]),\n set(dset.column_names),\n )\n self.assertDictEqual(features_after_cast, dset.features)\n\n def test_task_with_no_template(self):\n data = {\"input_text\": [\"i love transformers!\"], \"input_labels\": [1]}\n with Dataset.from_dict(data) as dset:\n with self.assertRaises(ValueError):\n dset.prepare_for_task(\"text-classification\")\n\n def test_task_with_incompatible_templates(self):\n labels = sorted([\"pos\", \"neg\"])\n features = Features(\n {\n \"input_text\": Value(\"string\"),\n \"input_labels\": ClassLabel(names=labels),\n }\n )\n task = TextClassification(text_column=\"input_text\", label_column=\"input_labels\")\n info = DatasetInfo(\n features=features,\n task_templates=task,\n )\n data = {\"input_text\": [\"i love transformers!\"], \"input_labels\": [1]}\n with Dataset.from_dict(data, info=info) as dset:\n # Invalid task name\n self.assertRaises(ValueError, dset.prepare_for_task, \"this-task-does-not-exist\")\n # Invalid task type\n self.assertRaises(ValueError, dset.prepare_for_task, 1)\n\n def test_task_with_multiple_compatible_task_templates(self):\n features = Features(\n {\n \"text1\": Value(\"string\"),\n \"text2\": Value(\"string\"),\n }\n )\n task1 = LanguageModeling(text_column=\"text1\")\n task2 = LanguageModeling(text_column=\"text2\")\n info = DatasetInfo(\n features=features,\n task_templates=[task1, task2],\n )\n data = {\"text1\": [\"i love transformers!\"], \"text2\": [\"i love datasets!\"]}\n with Dataset.from_dict(data, info=info) as dset:\n self.assertRaises(ValueError, dset.prepare_for_task, \"language-modeling\", id=3)\n with dset.prepare_for_task(\"language-modeling\") as dset1:\n self.assertEqual(dset1[0][\"text\"], \"i love transformers!\")\n with dset.prepare_for_task(\"language-modeling\", id=1) as dset2:\n self.assertEqual(dset2[0][\"text\"], \"i love datasets!\")\n\n def test_task_templates_empty_after_preparation(self):\n features = Features(\n {\n \"input_text\": Value(\"string\"),\n \"input_labels\": ClassLabel(names=[\"pos\", \"neg\"]),\n }\n )\n task = TextClassification(text_column=\"input_text\", label_column=\"input_labels\")\n info = DatasetInfo(\n features=features,\n task_templates=task,\n )\n data = {\"input_text\": [\"i love transformers!\"], \"input_labels\": [1]}\n with Dataset.from_dict(data, info=info) as dset:\n with dset.prepare_for_task(task=\"text-classification\") as dset:\n self.assertIsNone(dset.info.task_templates)\n\n def test_align_labels_with_mapping(self):\n features = Features(\n {\n \"input_text\": Value(\"string\"),\n \"input_labels\": ClassLabel(num_classes=3, names=[\"entailment\", \"neutral\", \"contradiction\"]),\n }\n )\n data = {\"input_text\": [\"a\", \"a\", \"b\", \"b\", \"c\", \"c\"], \"input_labels\": [0, 0, 1, 1, 2, 2]}\n label2id = {\"CONTRADICTION\": 0, \"ENTAILMENT\": 2, \"NEUTRAL\": 1}\n id2label = {v: k for k, v in label2id.items()}\n expected_labels = [2, 2, 1, 1, 0, 0]\n expected_label_names = [id2label[idx] for idx in expected_labels]\n with Dataset.from_dict(data, features=features) as dset:\n with dset.align_labels_with_mapping(label2id, \"input_labels\") as dset:\n self.assertListEqual(expected_labels, dset[\"input_labels\"])\n aligned_label_names = [dset.features[\"input_labels\"].int2str(idx) for idx in dset[\"input_labels\"]]\n self.assertListEqual(expected_label_names, aligned_label_names)\n\n def test_concatenate_with_no_task_templates(self):\n info = DatasetInfo(task_templates=None)\n data = {\"text\": [\"i love transformers!\"], \"labels\": [1]}\n with Dataset.from_dict(data, info=info) as dset1, Dataset.from_dict(\n data, info=info\n ) as dset2, Dataset.from_dict(data, info=info) as dset3:\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertEqual(dset_concat.info.task_templates, None)\n\n def test_concatenate_with_equal_task_templates(self):\n labels = [\"neg\", \"pos\"]\n task_template = TextClassification(text_column=\"text\", label_column=\"labels\")\n info = DatasetInfo(\n features=Features({\"text\": Value(\"string\"), \"labels\": ClassLabel(names=labels)}),\n # Label names are added in `DatasetInfo.__post_init__` so not included here\n task_templates=TextClassification(text_column=\"text\", label_column=\"labels\"),\n )\n data = {\"text\": [\"i love transformers!\"], \"labels\": [1]}\n with Dataset.from_dict(data, info=info) as dset1, Dataset.from_dict(\n data, info=info\n ) as dset2, Dataset.from_dict(data, info=info) as dset3:\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertListEqual(dset_concat.info.task_templates, [task_template])\n\n def test_concatenate_with_mixed_task_templates_in_common(self):\n tc_template = TextClassification(text_column=\"text\", label_column=\"labels\")\n qa_template = QuestionAnsweringExtractive(\n question_column=\"question\", context_column=\"context\", answers_column=\"answers\"\n )\n info1 = DatasetInfo(\n task_templates=[qa_template],\n features=Features(\n {\n \"text\": Value(\"string\"),\n \"labels\": ClassLabel(names=[\"pos\", \"neg\"]),\n \"context\": Value(\"string\"),\n \"question\": Value(\"string\"),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n }\n ),\n )\n info2 = DatasetInfo(\n task_templates=[qa_template, tc_template],\n features=Features(\n {\n \"text\": Value(\"string\"),\n \"labels\": ClassLabel(names=[\"pos\", \"neg\"]),\n \"context\": Value(\"string\"),\n \"question\": Value(\"string\"),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n }\n ),\n )\n data = {\n \"text\": [\"i love transformers!\"],\n \"labels\": [1],\n \"context\": [\"huggingface is going to the moon!\"],\n \"question\": [\"where is huggingface going?\"],\n \"answers\": [{\"text\": [\"to the moon!\"], \"answer_start\": [2]}],\n }\n with Dataset.from_dict(data, info=info1) as dset1, Dataset.from_dict(\n data, info=info2\n ) as dset2, Dataset.from_dict(data, info=info2) as dset3:\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertListEqual(dset_concat.info.task_templates, [qa_template])\n\n def test_concatenate_with_no_mixed_task_templates_in_common(self):\n tc_template1 = TextClassification(text_column=\"text\", label_column=\"labels\")\n tc_template2 = TextClassification(text_column=\"text\", label_column=\"sentiment\")\n qa_template = QuestionAnsweringExtractive(\n question_column=\"question\", context_column=\"context\", answers_column=\"answers\"\n )\n info1 = DatasetInfo(\n features=Features(\n {\n \"text\": Value(\"string\"),\n \"labels\": ClassLabel(names=[\"pos\", \"neg\"]),\n \"sentiment\": ClassLabel(names=[\"pos\", \"neg\", \"neutral\"]),\n \"context\": Value(\"string\"),\n \"question\": Value(\"string\"),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n }\n ),\n task_templates=[tc_template1],\n )\n info2 = DatasetInfo(\n features=Features(\n {\n \"text\": Value(\"string\"),\n \"labels\": ClassLabel(names=[\"pos\", \"neg\"]),\n \"sentiment\": ClassLabel(names=[\"pos\", \"neg\", \"neutral\"]),\n \"context\": Value(\"string\"),\n \"question\": Value(\"string\"),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n }\n ),\n task_templates=[tc_template2],\n )\n info3 = DatasetInfo(\n features=Features(\n {\n \"text\": Value(\"string\"),\n \"labels\": ClassLabel(names=[\"pos\", \"neg\"]),\n \"sentiment\": ClassLabel(names=[\"pos\", \"neg\", \"neutral\"]),\n \"context\": Value(\"string\"),\n \"question\": Value(\"string\"),\n \"answers\": Sequence(\n {\n \"text\": Value(\"string\"),\n \"answer_start\": Value(\"int32\"),\n }\n ),\n }\n ),\n task_templates=[qa_template],\n )\n data = {\n \"text\": [\"i love transformers!\"],\n \"labels\": [1],\n \"sentiment\": [0],\n \"context\": [\"huggingface is going to the moon!\"],\n \"question\": [\"where is huggingface going?\"],\n \"answers\": [{\"text\": [\"to the moon!\"], \"answer_start\": [2]}],\n }\n with Dataset.from_dict(data, info=info1) as dset1, Dataset.from_dict(\n data, info=info2\n ) as dset2, Dataset.from_dict(data, info=info3) as dset3:\n with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:\n self.assertEqual(dset_concat.info.task_templates, None)\n\n def test_task_text_classification_when_columns_removed(self):\n labels = sorted([\"pos\", \"neg\"])\n features_before_map = Features(\n {\n \"input_text\": Value(\"string\"),\n \"input_labels\": ClassLabel(names=labels),\n }\n )\n features_after_map = Features({\"new_column\": Value(\"int64\")})\n # Label names are added in `DatasetInfo.__post_init__` so not needed here\n task = TextClassification(text_column=\"input_text\", label_column=\"input_labels\")\n info = DatasetInfo(\n features=features_before_map,\n task_templates=task,\n )\n data = {\"input_text\": [\"i love transformers!\"], \"input_labels\": [1]}\n with Dataset.from_dict(data, info=info) as dset:\n with dset.map(lambda x: {\"new_column\": 0}, remove_columns=dset.column_names) as dset:\n self.assertDictEqual(dset.features, features_after_map)\n"
] |
[
[
"numpy.array",
"tensorflow.convert_to_tensor",
"tensorflow.data.TFRecordDataset",
"pandas.DataFrame.from_dict",
"tensorflow.io.FixedLenFeature",
"numpy.ones",
"tensorflow.constant",
"pandas.read_json",
"tensorflow.io.parse_single_example",
"torch.tensor",
"numpy.issubdtype",
"tensorflow.io.VarLenFeature",
"tensorflow.stack",
"numpy.arange",
"pandas.read_csv",
"pandas.read_parquet"
]
] |
chetakks/ddsp
|
[
"72923ec557e208a6e4374b7b5dfb6d871130807e"
] |
[
"ddsp/losses_test.py"
] |
[
"# Copyright 2020 The DDSP Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for ddsp.losses.\"\"\"\n\nfrom ddsp import losses\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\nclass SpectralLossTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n \"\"\"Test correct shape with all losses active.\"\"\"\n loss_obj = losses.SpectralLoss(\n mag_weight=1.0,\n delta_time_weight=1.0,\n delta_delta_time_weight=1.0,\n delta_freq_weight=1.0,\n delta_delta_freq_weight=1.0,\n logmag_weight=1.0,\n loudness_weight=1.0,\n )\n\n input_audio = tf.random.uniform((3, 8000), dtype=tf.float32)\n target_audio = tf.random.uniform((3, 8000), dtype=tf.float32)\n\n loss = loss_obj(input_audio, target_audio)\n\n self.assertListEqual([], loss.shape.as_list())\n self.assertTrue(np.isfinite(loss))\n\n\nclass PretrainedCREPEEmbeddingLossTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n loss_obj = losses.PretrainedCREPEEmbeddingLoss()\n\n input_audio = tf.random.uniform((3, 16000), dtype=tf.float32)\n target_audio = tf.random.uniform((3, 16000), dtype=tf.float32)\n\n loss = loss_obj(input_audio, target_audio)\n\n self.assertListEqual([], loss.shape.as_list())\n self.assertTrue(np.isfinite(loss))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"numpy.isfinite",
"tensorflow.compat.v2.random.uniform",
"tensorflow.compat.v2.test.main"
]
] |
Slow-Rain/NPTEL-The-Joy-of-Computing-using-Python
|
[
"e4bd830ef7a3f171a14a88f94df626c766a7649b"
] |
[
"Week12/Week 12 programming assignment 1.py"
] |
[
"import numpy as np # importing numpy module\r\n\r\n# taking endpoints from the user as point_1, point_2 & point_3\r\n\r\npoint_1 = list(map(float,input().split())) \r\npoint_2 = list(map(float,input().split()))\r\npoint_3 = list(map(float,input().split()))\r\n\r\narr = np.array([point_1,point_2,point_3]) \r\n\r\nvolume = abs(np.linalg.det(arr))\r\n\r\nfinal = float(\"{0:.1f}\". format(volume))\r\n\r\nprint(final,end=\"\")"
] |
[
[
"numpy.array",
"numpy.linalg.det"
]
] |
RosenblumLabUser/Qcodes
|
[
"042c5e25ab9e40b20c316b4055c4842844834d1e"
] |
[
"qcodes/instrument_drivers/rohde_schwarz/RTO1000.py"
] |
[
"# All manual references are to R&S RTO Digital Oscilloscope User Manual\n# for firmware 3.65, 2017\n\nimport logging\nimport warnings\nimport time\nfrom typing import Optional, Any\n\nimport numpy as np\nfrom distutils.version import LooseVersion\n\nfrom qcodes import Instrument\nfrom qcodes.instrument.visa import VisaInstrument\nfrom qcodes.instrument.channel import InstrumentChannel\nfrom qcodes.utils import validators as vals\nfrom qcodes.instrument.parameter import ArrayParameter\nfrom qcodes.utils.helpers import create_on_off_val_mapping\n\nlog = logging.getLogger(__name__)\n\n\nclass ScopeTrace(ArrayParameter):\n\n def __init__(self, name: str, instrument: InstrumentChannel,\n channum: int) -> None:\n \"\"\"\n The ScopeTrace parameter is attached to a channel of the oscilloscope.\n\n For now, we only support reading out the entire trace.\n \"\"\"\n super().__init__(name=name,\n shape=(1,),\n label='Voltage', # TODO: Is this sometimes dbm?\n unit='V',\n setpoint_names=('Time',),\n setpoint_labels=('Time',),\n setpoint_units=('s',),\n docstring='Holds scope trace')\n\n self.channel = instrument\n self.channum = channum\n\n def prepare_trace(self) -> None:\n \"\"\"\n Prepare the scope for returning data, calculate the setpoints\n \"\"\"\n # We always use 16 bit integers for the data format\n self.channel._parent.dataformat('INT,16')\n # ensure little-endianess\n self.channel._parent.write('FORMat:BORder LSBFirst')\n # only export y-values\n self.channel._parent.write('EXPort:WAVeform:INCXvalues OFF')\n # only export one channel\n self.channel._parent.write('EXPort:WAVeform:MULTichannel OFF')\n\n # now get setpoints\n\n hdr = self.channel._parent.ask(f'CHANnel{self.channum}:'\n 'DATA:HEADER?')\n hdr_vals = list(map(float, hdr.split(',')))\n t_start = hdr_vals[0]\n t_stop = hdr_vals[1]\n no_samples = int(hdr_vals[2])\n values_per_sample = hdr_vals[3]\n\n # NOTE (WilliamHPNielsen):\n # If samples are multi-valued, we need a `MultiParameter`\n # instead of an `ArrayParameter`.\n if values_per_sample > 1:\n raise NotImplementedError('There are several values per sample '\n 'in this trace (are you using envelope'\n ' or peak detect?). We currently do '\n 'not support saving such a trace.')\n\n self.shape = (no_samples,)\n self.setpoints = (tuple(np.linspace(t_start, t_stop, no_samples)),)\n\n self._trace_ready = True\n # we must ensure that all this took effect before proceeding\n self.channel._parent.ask('*OPC?')\n\n def get_raw(self) -> np.ndarray:\n \"\"\"\n Returns a trace\n \"\"\"\n\n instr = self.channel._parent\n\n if not self._trace_ready:\n raise ValueError('Trace not ready! Please call '\n 'prepare_trace().')\n\n if instr.run_mode() == 'RUN Nx SINGLE':\n total_acquisitions = instr.num_acquisitions()\n completed_acquisitions = instr.completed_acquisitions()\n log.info(f'Acquiring {total_acquisitions} traces.')\n while completed_acquisitions < total_acquisitions:\n log.info(f'Acquired {completed_acquisitions}:'\n f'{total_acquisitions}')\n time.sleep(0.25)\n completed_acquisitions = instr.completed_acquisitions()\n\n log.info('Acquisition completed. Polling trace from instrument.')\n vh = instr.visa_handle\n vh.write(f'CHANnel{self.channum}:DATA?')\n raw_vals = vh.read_raw()\n\n num_length = int(raw_vals[1:2])\n no_points = int(raw_vals[2:2+num_length])\n\n # cut of the header and the trailing '\\n'\n raw_vals = raw_vals[2+num_length:-1]\n\n dataformat = instr.dataformat.get_latest()\n\n if dataformat == 'INT,8':\n int_vals = np.fromstring(raw_vals, dtype=np.int8, count=no_points)\n else:\n int_vals = np.fromstring(raw_vals, dtype=np.int16,\n count=no_points//2)\n\n # now the integer values must be converted to physical\n # values\n\n scale = self.channel.scale()\n no_divs = 10 # TODO: Is this ever NOT 10?\n\n # we always export as 16 bit integers\n quant_levels = 253*256\n conv_factor = scale*no_divs/quant_levels\n output = conv_factor*int_vals + self.channel.offset()\n\n return output\n\n\nclass ScopeMeasurement(InstrumentChannel):\n \"\"\"\n Class to hold a measurement of the scope.\n \"\"\"\n\n def __init__(self, parent: Instrument, name: str, meas_nr: int) -> None:\n \"\"\"\n Args:\n parent: The instrument to which the channel is attached\n name: The name of the measurement\n meas_nr: The number of the measurement in question. Must match the\n actual number as used by the instrument (1..8)\n \"\"\"\n\n if meas_nr not in range(1, 9):\n raise ValueError('Invalid measurement number; Min: 1, max 8')\n\n self.meas_nr = meas_nr\n super().__init__(parent, name)\n\n self.sources = vals.Enum('C1W1', 'C1W2', 'C1W3',\n 'C2W1', 'C2W2', 'C2W3',\n 'C3W1', 'C3W2', 'C3W3',\n 'C4W1', 'C4W2', 'C4W3',\n 'M1', 'M2', 'M3', 'M4',\n 'R1', 'R2', 'R3', 'R4',\n 'SBUS1', 'SBUS2', 'SBUS3', 'SBUS4',\n 'D0', 'D1', 'D2', 'D3',\n 'D4', 'D5', 'D6', 'D7',\n 'D8', 'D9', 'D10', 'D11',\n 'D12', 'D13', 'D14', 'D15',\n 'TRK1', 'TRK2', 'TRK3', 'TRK4',\n 'TRK5', 'TRK6', 'TRK7', 'TRK8',\n 'SG1TL1', 'SG1TL2',\n 'SG2TL1', 'SG2TL2',\n 'SG3TL1', 'SG3TL2',\n 'SG4TL1', 'SG4TL2',\n 'Z1V1', 'Z1V2', 'Z1V3', 'Z1V4',\n 'Z1I1', 'Z1I2', 'Z1I3', 'Z1I4',\n 'Z2V1', 'Z2V2', 'Z2V3', 'Z2V4',\n 'Z2I1', 'Z2I2', 'Z2I3', 'Z2I4')\n\n self.categories = vals.Enum('AMPTime', 'JITTer', 'EYEJitter',\n 'SPECtrum', 'HISTogram', 'PROTocol')\n\n self.meas_type = vals.Enum(\n # Amplitude/time measurements\n 'HIGH', 'LOW', 'AMPLitude',\n 'MAXimum', 'MINimum', 'PDELta',\n 'MEAN', 'RMS', 'STDDev',\n 'POVershoot', 'NOVershoot', 'AREA',\n 'RTIMe', 'FTIMe', 'PPULse',\n 'NPULse', 'PERiod', 'FREQuency',\n 'PDCYcle', 'NDCYcle', 'CYCarea',\n 'CYCMean', 'CYCRms', 'CYCStddev',\n 'PULCnt', 'DELay', 'PHASe',\n 'BWIDth', 'PSWitching', 'NSWitching',\n 'PULSetrain', 'EDGecount', 'SHT',\n 'SHR', 'DTOTrigger', 'PROBemeter',\n 'SLERising', 'SLEFalling',\n # Jitter measurements\n 'CCJitter', 'NCJitter', 'CCWidth',\n 'CCDutycycle', 'TIE', 'UINTerval',\n 'DRATe', 'SKWDelay', 'SKWPhase',\n # Eye diagram measurements\n 'ERPercent', 'ERDB', 'EHEight',\n 'EWIDth', 'ETOP', 'EBASe',\n 'QFACtor', 'RMSNoise', 'SNRatio',\n 'DCDistortion', 'ERTime', 'EFTime',\n 'EBRate', 'EAMPlitude', 'PPJitter',\n 'STDJitter', 'RMSJitter',\n # Spectrum measurements\n 'CPOWer', 'OBWidth', 'SBWidth',\n 'THD', 'THDPCT', 'THDA',\n 'THDU', 'THDR', 'HAR',\n 'PLISt',\n # Histogram measurements\n 'WCOunt', 'WSAMples', 'HSAMples',\n 'HPEak', 'PEAK', 'UPEakvalue',\n 'LPEakvalue', 'HMAXimum', 'HMINimum',\n 'MEDian', 'MAXMin', 'HMEan',\n 'HSTDdev', 'M1STddev', 'M2STddev',\n 'M3STddev', 'MKPositive', 'MKNegative'\n )\n\n self.add_parameter('enable',\n label=f'Measurement {meas_nr} enable',\n set_cmd=f'MEASurement{meas_nr}:ENABle {{}}',\n vals=vals.Enum('ON', 'OFF'),\n docstring='Switches the measurement on or off.')\n\n self.add_parameter('source',\n label=f'Measurement {meas_nr} source',\n set_cmd=f'MEASurement{meas_nr}:SOURce {{}}',\n vals=self.sources,\n docstring='Set the source of a measurement if the '\n 'measurement only needs one source.')\n\n self.add_parameter('source_first',\n label=f'Measurement {meas_nr} first source',\n set_cmd=f'MEASurement{meas_nr}:FSRC {{}}',\n vals=self.sources,\n docstring='Set the first source of a measurement'\n ' if the measurement only needs multiple'\n ' sources.')\n\n self.add_parameter('source_second',\n label=f'Measurement {meas_nr} second source',\n set_cmd=f'MEASurement{meas_nr}:SSRC {{}}',\n vals=self.sources,\n docstring='Set the second source of a measurement'\n ' if the measurement only needs multiple'\n ' sources.')\n\n self.add_parameter('category',\n label=f'Measurement {meas_nr} category',\n set_cmd=f'MEASurement{meas_nr}:CATegory {{}}',\n vals=self.categories,\n docstring='Set the category of a measurement.')\n\n self.add_parameter('main',\n label=f'Measurement {meas_nr} main',\n set_cmd=f'MEASurement{meas_nr}:MAIN {{}}',\n vals=self.meas_type,\n docstring='Set the main of a measurement.')\n\n self.add_parameter('statistics_enable',\n label=f'Measurement {meas_nr} enable statistics',\n set_cmd=f'MEASurement{meas_nr}:STATistics:ENABle'\n f' {{}}',\n vals=vals.Enum('ON', 'OFF'),\n docstring='Switches the measurement on or off.')\n\n self.add_parameter('clear',\n label=f'Measurement {meas_nr} clear statistics',\n set_cmd=f'MEASurement{meas_nr}:CLEar',\n docstring='Clears/reset measurement.')\n\n self.add_parameter('event_count',\n label=f'Measurement {meas_nr} number of events',\n get_cmd=f'MEASurement{meas_nr}:RESult:EVTCount?',\n get_parser=int,\n docstring='Number of measurement results in the'\n ' long-term measurement.')\n\n self.add_parameter('result_avg',\n label=f'Measurement {meas_nr} averages',\n get_cmd=f'MEASurement{meas_nr}:RESult:AVG?',\n get_parser=float,\n docstring='Average of the long-term measurement'\n ' results.')\n\n\nclass ScopeChannel(InstrumentChannel):\n \"\"\"\n Class to hold an input channel of the scope.\n\n Exposes: state, coupling, ground, scale, range, position, offset,\n invert, bandwidth, impedance, overload.\n \"\"\"\n\n def __init__(self, parent: Instrument, name: str, channum: int) -> None:\n \"\"\"\n Args:\n parent: The instrument to which the channel is attached\n name: The name of the channel\n channum: The number of the channel in question. Must match the\n actual number as used by the instrument (1..4)\n \"\"\"\n\n if channum not in [1, 2, 3, 4]:\n raise ValueError('Invalid channel number! Must be 1, 2, 3, or 4.')\n\n self.channum = channum\n\n super().__init__(parent, name)\n\n self.add_parameter('state',\n label=f'Channel {channum} state',\n get_cmd=f'CHANnel{channum}:STATe?',\n set_cmd=f'CHANnel{channum}:STATE {{}}',\n vals=vals.Enum('ON', 'OFF'),\n docstring='Switches the channel on or off')\n\n self.add_parameter('coupling',\n label=f'Channel {channum} coupling',\n get_cmd=f'CHANnel{channum}:COUPling?',\n set_cmd=f'CHANnel{channum}:COUPling {{}}',\n vals=vals.Enum('DC', 'DCLimit', 'AC'),\n docstring=('Selects the connection of the channel'\n 'signal. DC: 50 Ohm, DCLimit 1 MOhm, '\n 'AC: Con. through DC capacitor'))\n\n self.add_parameter('ground',\n label=f'Channel {channum} ground',\n get_cmd=f'CHANnel{channum}:GND?',\n set_cmd=f'CHANnel{channum}:GND {{}}',\n vals=vals.Enum('ON', 'OFF'),\n docstring=('Connects/disconnects the signal to/from'\n 'the ground.'))\n\n # NB (WilliamHPNielsen): This parameter depends on other parameters and\n # should be dynamically updated accordingly. Cf. p 1178 of the manual\n self.add_parameter('scale',\n label=f'Channel {channum} Y scale',\n unit='V/div',\n get_cmd=f'CHANnel{channum}:SCALe?',\n set_cmd=self._set_scale,\n get_parser=float,\n )\n\n self.add_parameter('range',\n label=f'Channel {channum} Y range',\n unit='V',\n get_cmd=f'CHANnel{channum}:RANGe?',\n set_cmd=self._set_range,\n get_parser=float\n )\n\n # TODO (WilliamHPNielsen): would it be better to recast this in terms\n # of Volts?\n self.add_parameter('position',\n label=f'Channel {channum} vert. pos.',\n unit='div',\n get_cmd=f'CHANnel{channum}:POSition?',\n set_cmd=f'CHANnel{channum}:POSition {{}}',\n get_parser=float,\n vals=vals.Numbers(-5, 5),\n docstring=('Positive values move the waveform up,'\n ' negative values move it down.'))\n\n self.add_parameter('offset',\n label=f'Channel {channum} offset',\n unit='V',\n get_cmd=f'CHANnel{channum}:OFFSet?',\n set_cmd=f'CHANnel{channum}:OFFSet {{}}',\n get_parser=float,\n )\n\n self.add_parameter('invert',\n label=f'Channel {channum} inverted',\n get_cmd=f'CHANnel{channum}:INVert?',\n set_cmd=f'CHANnel{channum}:INVert {{}}',\n vals=vals.Enum('ON', 'OFF'))\n\n # TODO (WilliamHPNielsen): This parameter should be dynamically\n # validated since 800 MHz BW is only available for 50 Ohm coupling\n self.add_parameter('bandwidth',\n label=f'Channel {channum} bandwidth',\n get_cmd=f'CHANnel{channum}:BANDwidth?',\n set_cmd=f'CHANnel{channum}:BANDwidth {{}}',\n vals=vals.Enum('FULL', 'B800', 'B200', 'B20')\n )\n\n self.add_parameter('impedance',\n label=f'Channel {channum} impedance',\n unit='Ohm',\n get_cmd=f'CHANnel{channum}:IMPedance?',\n set_cmd=f'CHANnel{channum}:IMPedance {{}}',\n vals=vals.Ints(1, 100000),\n docstring=('Sets the impedance of the channel '\n 'for power calculations and '\n 'measurements.'))\n\n self.add_parameter('overload',\n label=f'Channel {channum} overload',\n get_cmd=f'CHANnel{channum}:OVERload?')\n\n self.add_parameter('arithmetics',\n label=f'Channel {channum} arithmetics',\n set_cmd=f'CHANnel{channum}:ARIThmetics {{}}',\n get_cmd=f'CHANnel{channum}:ARIThmetics?',\n val_mapping={'AVERAGE': 'AVER',\n 'OFF': 'OFF',\n 'ENVELOPE': 'ENV'}\n )\n\n self.add_parameter('trace',\n channum=self.channum,\n parameter_class=ScopeTrace)\n\n self._trace_ready = False\n\n # Specialised/interlinked set/getters\n def _set_range(self, value: float) -> None:\n self.scale.cache.set(value/10)\n\n self._parent.write(f'CHANnel{self.channum}:RANGe {value}')\n\n def _set_scale(self, value: float) -> None:\n self.range.cache.set(value*10)\n\n self._parent.write(f'CHANnel{self.channum}:SCALe {value}')\n\n\nclass RTO1000(VisaInstrument):\n \"\"\"\n QCoDeS Instrument driver for the\n Rohde-Schwarz RTO1000 series oscilloscopes.\n\n \"\"\"\n\n def __init__(self, name: str, address: str,\n model: Optional[str] = None, timeout: float = 5.,\n HD: bool = True,\n terminator: str = '\\n',\n **kwargs: Any) -> None:\n \"\"\"\n Args:\n name: name of the instrument\n address: VISA resource address\n model: The instrument model. For newer firmware versions,\n this can be auto-detected\n timeout: The VISA query timeout\n HD: Does the unit have the High Definition Option (allowing\n 16 bit vertical resolution)\n terminator: Command termination character to strip from VISA\n commands.\n \"\"\"\n super().__init__(name=name, address=address, timeout=timeout,\n terminator=terminator, **kwargs)\n\n # With firmware versions earlier than 3.65, it seems that the\n # model number can NOT be queried from the instrument\n # (at least fails with RTO1024, fw 2.52.1.1), so in that case\n # the user must provide the model manually.\n firmware_version = self.get_idn()['firmware']\n\n if LooseVersion(firmware_version) < LooseVersion('3'):\n log.warning('Old firmware version detected. This driver may '\n 'not be compatible. Please upgrade your firmware.')\n\n if LooseVersion(firmware_version) >= LooseVersion('3.65'):\n # strip just in case there is a newline character at the end\n self.model = self.ask('DIAGnostic:SERVice:WFAModel?').strip()\n if model is not None and model != self.model:\n warnings.warn(\"The model number provided by the user \"\n \"does not match the instrument's response.\"\n \" I am going to assume that this oscilloscope \"\n f\"is a model {self.model}\")\n else:\n if model is None:\n raise ValueError('No model number provided. Please provide '\n 'a model number (eg. \"RTO1024\").')\n else:\n self.model = model\n\n self.HD = HD\n\n # Now assign model-specific values\n self.num_chans = int(self.model[-1])\n self.num_meas = 8\n\n self._horisontal_divs = int(self.ask('TIMebase:DIVisions?'))\n\n self.add_parameter('display',\n label='Display state',\n set_cmd='SYSTem:DISPlay:UPDate {}',\n val_mapping={'remote': 0,\n 'view': 1})\n\n # Triggering\n\n self.add_parameter('trigger_display',\n label='Trigger display state',\n set_cmd='DISPlay:TRIGger:LINes {}',\n get_cmd='DISPlay:TRIGger:LINes?',\n val_mapping={'ON': 1, 'OFF': 0})\n\n # TODO: (WilliamHPNielsen) There are more available trigger\n # settings than implemented here. See p. 1261 of the manual\n # here we just use trigger1, which is the A-trigger\n\n self.add_parameter('trigger_source',\n label='Trigger source',\n set_cmd='TRIGger1:SOURce {}',\n get_cmd='TRIGger1:SOURce?',\n val_mapping={'CH1': 'CHAN1',\n 'CH2': 'CHAN2',\n 'CH3': 'CHAN3',\n 'CH4': 'CHAN4',\n 'EXT': 'EXT'})\n\n self.add_parameter('trigger_mode',\n label='Trigger mode',\n set_cmd='TRIGger:MODE {}',\n get_cmd='TRIGger1:SOURce?',\n vals=vals.Enum('AUTO', 'NORMAL', 'FREERUN'),\n docstring='Sets the trigger mode which determines'\n ' the behaviour of the instrument if no'\n ' trigger occurs.\\n'\n 'Options: AUTO, NORMAL, FREERUN.',\n unit='none')\n\n self.add_parameter('trigger_type',\n label='Trigger type',\n set_cmd='TRIGger1:TYPE {}',\n get_cmd='TRIGger1:TYPE?',\n val_mapping={'EDGE': 'EDGE',\n 'GLITCH': 'GLIT',\n 'WIDTH': 'WIDT',\n 'RUNT': 'RUNT',\n 'WINDOW': 'WIND',\n 'TIMEOUT': 'TIM',\n 'INTERVAL': 'INT',\n 'SLEWRATE': 'SLEW',\n 'DATATOCLOCK': 'DAT',\n 'STATE': 'STAT',\n 'PATTERN': 'PATT',\n 'ANEDGE': 'ANED',\n 'SERPATTERN': 'SERP',\n 'NFC': 'NFC',\n 'TV': 'TV',\n 'CDR': 'CDR'}\n )\n # See manual p. 1262 for an explanation of trigger types\n\n self.add_parameter('trigger_level',\n label='Trigger level',\n set_cmd=self._set_trigger_level,\n get_cmd=self._get_trigger_level)\n\n self.add_parameter('trigger_edge_slope',\n label='Edge trigger slope',\n set_cmd='TRIGger1:EDGE:SLOPe {}',\n get_cmd='TRIGger1:EDGE:SLOPe?',\n vals=vals.Enum('POS', 'NEG', 'EITH'))\n\n # Horizontal settings\n\n self.add_parameter('timebase_scale',\n label='Timebase scale',\n set_cmd=self._set_timebase_scale,\n get_cmd='TIMebase:SCALe?',\n unit='s/div',\n get_parser=float,\n vals=vals.Numbers(25e-12, 10000))\n\n self.add_parameter('timebase_range',\n label='Timebase range',\n set_cmd=self._set_timebase_range,\n get_cmd='TIMebase:RANGe?',\n unit='s',\n get_parser=float,\n vals=vals.Numbers(250e-12, 100e3))\n\n self.add_parameter('timebase_position',\n label='Horizontal position',\n set_cmd=self._set_timebase_position,\n get_cmd='TIMEbase:HORizontal:POSition?',\n get_parser=float,\n unit='s',\n vals=vals.Numbers(-100e24, 100e24))\n\n # Acquisition\n\n # I couldn't find a way to query the run mode, so we manually keep\n # track of it. It is very important when getting the trace to make\n # sense of completed_acquisitions.\n self.add_parameter('run_mode',\n label='Run/acquisition mode of the scope',\n get_cmd=None,\n set_cmd=None)\n\n self.run_mode('RUN CONT')\n\n self.add_parameter('num_acquisitions',\n label='Number of single acquisitions to perform',\n get_cmd='ACQuire:COUNt?',\n set_cmd='ACQuire:COUNt {}',\n vals=vals.Ints(1, 16777215),\n get_parser=int)\n\n self.add_parameter('completed_acquisitions',\n label='Number of completed acquisitions',\n get_cmd='ACQuire:CURRent?',\n get_parser=int)\n\n self.add_parameter('sampling_rate',\n label='Sample rate',\n docstring='Number of averages for measuring '\n 'trace.',\n unit='Sa/s',\n get_cmd='ACQuire:POINts:ARATe' + '?',\n get_parser=int)\n\n self.add_parameter('acquisition_sample_rate',\n label='Acquisition sample rate',\n unit='Sa/s',\n docstring='recorded waveform samples per second',\n get_cmd='ACQuire:SRATe'+'?',\n set_cmd='ACQuire:SRATe ' + ' {:.2f}',\n vals=vals.Numbers(2, 20e12),\n get_parser=float)\n\n # Data\n\n self.add_parameter('dataformat',\n label='Export data format',\n set_cmd='FORMat:DATA {}',\n get_cmd='FORMat:DATA?',\n vals=vals.Enum('ASC,0', 'REAL,32',\n 'INT,8', 'INT,16'))\n\n # High definition mode (might not be available on all instruments)\n\n if HD:\n self.add_parameter('high_definition_state',\n label='High definition (16 bit) state',\n set_cmd=self._set_hd_mode,\n get_cmd='HDEFinition:STAte?',\n val_mapping=create_on_off_val_mapping(on_val=1,\n off_val=0),\n docstring='Sets the filter bandwidth for the'\n ' high definition mode.\\n'\n 'ON: high definition mode, up to 16'\n ' bit digital resolution\\n'\n 'Options: ON, OFF\\n\\n'\n 'Warning/Bug: By opening the HD '\n 'acquisition menu on the scope, '\n 'this value will be set to \"ON\".')\n\n self.add_parameter('high_definition_bandwidth',\n label='High definition mode bandwidth',\n set_cmd='HDEFinition:BWIDth {}',\n get_cmd='HDEFinition:BWIDth?',\n unit='Hz',\n get_parser=float,\n vals=vals.Numbers(1e4, 1e9))\n\n self.add_parameter('error_count',\n label='Number of errors in the error stack',\n get_cmd='SYSTem:ERRor:COUNt?',\n unit='#',\n get_parser=int)\n\n self.add_parameter('error_next',\n label='Next error from the error stack',\n get_cmd='SYSTem:ERRor:NEXT?',\n get_parser=str)\n\n # Add the channels to the instrument\n for ch in range(1, self.num_chans+1):\n chan = ScopeChannel(self, f'channel{ch}', ch)\n self.add_submodule(f'ch{ch}', chan)\n\n for measId in range(1, self.num_meas+1):\n measCh = ScopeMeasurement(self, f'measurement{measId}', measId)\n self.add_submodule(f'meas{measId}', measCh)\n\n self.add_function('stop', call_cmd='STOP')\n self.add_function('reset', call_cmd='*RST')\n self.add_parameter('opc', get_cmd='*OPC?')\n self.add_parameter('stop_opc', get_cmd='STOP;*OPC?')\n self.add_parameter('status_operation',\n get_cmd='STATus:OPERation:CONDition?',\n get_parser=int)\n self.add_function('run_continues', call_cmd='RUNContinous')\n # starts the shutdown of the system\n self.add_function('system_shutdown', call_cmd='SYSTem:EXIT')\n\n self.connect_message()\n\n def run_cont(self) -> None:\n \"\"\"\n Set the instrument in 'RUN CONT' mode\n \"\"\"\n self.write('RUN')\n self.run_mode.set('RUN CONT')\n\n def run_single(self) -> None:\n \"\"\"\n Set the instrument in 'RUN Nx SINGLE' mode\n \"\"\"\n self.write('SINGLE')\n self.run_mode.set('RUN Nx SINGLE')\n\n def is_triggered(self) -> bool:\n wait_trigger_mask = 0b01000\n return bool(self.status_operation() & wait_trigger_mask) == False\n\n def is_running(self) -> bool:\n measuring_mask = 0b10000\n return bool(self.status_operation() & measuring_mask)\n\n def is_acquiring(self) -> bool:\n return self.is_triggered() & self.is_running()\n\n # Specialised set/get functions\n\n def _set_hd_mode(self, value: int) -> None:\n \"\"\"\n Set/unset the high def mode\n \"\"\"\n self._make_traces_not_ready()\n self.write(f'HDEFinition:STAte {value}')\n\n def _set_timebase_range(self, value: float) -> None:\n \"\"\"\n Set the full range of the timebase\n \"\"\"\n self._make_traces_not_ready()\n self.timebase_scale.cache.set(value/self._horisontal_divs)\n\n self.write(f'TIMebase:RANGe {value}')\n\n def _set_timebase_scale(self, value: float) -> None:\n \"\"\"\n Set the length of one horizontal division.\n \"\"\"\n self._make_traces_not_ready()\n self.timebase_range.cache.set(value*self._horisontal_divs)\n\n self.write(f'TIMebase:SCALe {value}')\n\n def _set_timebase_position(self, value: float) -> None:\n \"\"\"\n Set the horizontal position.\n \"\"\"\n self._make_traces_not_ready()\n self.write(f'TIMEbase:HORizontal:POSition {value}')\n\n def _make_traces_not_ready(self) -> None:\n \"\"\"\n Make the scope traces be not ready.\n \"\"\"\n self.ch1.trace._trace_ready = False\n self.ch2.trace._trace_ready = False\n self.ch3.trace._trace_ready = False\n self.ch4.trace._trace_ready = False\n\n def _set_trigger_level(self, value: float) -> None:\n \"\"\"\n Set the trigger level on the currently used trigger source\n channel.\n \"\"\"\n trans = {'CH1': 1, 'CH2': 2, 'CH3': 3, 'CH4': 4, 'EXT': 5}\n # We use get and not get_latest because we don't trust users to\n # not touch the front panel of an oscilloscope.\n source = trans[self.trigger_source.get()]\n if source != 5:\n submodule = self.submodules[f'ch{source}']\n assert isinstance(submodule, InstrumentChannel)\n v_range = submodule.range()\n offset = submodule.offset()\n\n if (value < -v_range/2 + offset) or (value > v_range/2 + offset):\n raise ValueError('Trigger level outside channel range.')\n\n self.write(f'TRIGger1:LEVel{source} {value}')\n\n def _get_trigger_level(self) -> float:\n \"\"\"\n Get the trigger level from the currently used trigger source\n \"\"\"\n trans = {'CH1': 1, 'CH2': 2, 'CH3': 3, 'CH4': 4, 'EXT': 5}\n # we use get and not get_latest because we don't trust users to\n # not touch the front panel of an oscilloscope\n source = trans[self.trigger_source.get()]\n\n val = self.ask(f'TRIGger1:LEVel{source}?')\n\n return float(val.strip())\n"
] |
[
[
"numpy.linspace",
"numpy.fromstring"
]
] |
maierbn/streaming_software
|
[
"2cfe5da8e28b45751147c62906e8ef85ccf91fca"
] |
[
"mouse/mouse_control.py"
] |
[
"# module to interface the 3D SpaceMouse\n\nimport time\nimport spacemouse\nimport numpy as np\nimport pickle\n\ndef increase_sensitivity(a):\n if a < 0:\n return -a*a\n return a*a\n\ndef loop(camera):\n \"\"\"\n Main loop to handle mouse input and apply it to the camera.\n :param camera: the camera object as defined in camera.py, used to control the PTZ camera.\n \"\"\"\n \n # get handle to space mouse\n space_mouse = spacemouse.SpaceMouse()\n \n button_pressed = [False, False]\n time_button_press = [0,0]\n is_pan_tilt_in_progress = False\n is_zoom_in_progress = False\n is_no_zoom_mode = False\n \n while True:\n # control contains: [x,y,z,roll,pitch,yaw]\n \n current_vector = np.array(space_mouse.control)\n #current_control = current_vector - initial_vector\n \n x = -increase_sensitivity(current_vector[4]) # pan \n y = increase_sensitivity(current_vector[3]) # tilt \n z = -current_vector[2] # zoom\n\n if is_no_zoom_mode:\n z = 0\n\n #print(\"mouse control: {} (left button: {}, right button: {})\".format(\n # (x,y,z), space_mouse.is_left_button_pressed, space_mouse.is_right_button_pressed))\n \n if x == 0 and y == 0 and is_pan_tilt_in_progress:\n is_pan_tilt_in_progress = False\n sequence_no = camera.send_command(\"Pan-tiltDrive_Stop\")\n camera.wait_for_result(sequence_no)\n \n if z == 0 and is_zoom_in_progress:\n is_zoom_in_progress = False\n sequence_no = camera.send_command(\"CAM_Zoom_Stop\")\n camera.wait_for_result(sequence_no)\n \n direction_x = 0\n direction_y = 0\n \n if x < 0:\n direction_x = -1\n elif x > 0:\n direction_x = 1\n \n if y < 0:\n direction_y = -1\n elif y > 0:\n direction_y = 1\n \n velocity_x = abs(x)\n velocity_y = abs(y)\n \n if z > 0:\n zoom_speed = (int)(z*7)\n sequence_no = camera.send_command(\"CAM_Zoom_Tele_Variable\", zoom_speed)\n is_zoom_in_progress = True\n elif z < 0:\n zoom_speed = (int)(-z*7)\n sequence_no = camera.send_command(\"CAM_Zoom_Wide_Variable\", zoom_speed)\n is_zoom_in_progress = True\n elif x != 0 or y != 0:\n sequence_no = camera.send_command(\"Pan-tiltDrive\", direction_x, direction_y, velocity_x, velocity_y)\n is_pan_tilt_in_progress = True\n \n time.sleep(0.02)\n \n # left button pressed\n if not button_pressed[0] and space_mouse.is_left_button_pressed:\n button_pressed[0] = True\n time_button_press[0] = time.time()\n \n # toggle no zoom mode\n is_no_zoom_mode = not is_no_zoom_mode\n print(\"is_no_zoom_mode: {}\".format(is_no_zoom_mode))\n\n # right button pressed\n elif not button_pressed[1] and space_mouse.is_right_button_pressed:\n button_pressed[1] = True\n time_button_press[1] = time.time()\n \n # toggle no zoom mode\n is_no_zoom_mode = not is_no_zoom_mode\n print(\"is_no_zoom_mode: {}\".format(is_no_zoom_mode))\n \n # left button released\n elif button_pressed[0] and not space_mouse.is_left_button_pressed:\n button_pressed[0] = False\n \n # right button released\n elif button_pressed[1] and not space_mouse.is_right_button_pressed:\n button_pressed[1] = False\n\n \"\"\"\n elif button_pressed[0] and not space_mouse.is_left_button_pressed:\n press_duration = time.time() - time_button_press[0]\n \n # toggle no zoom mode\n is_no_zoom_mode = not is_no_zoom_mode\n print(\"is_no_zoom_mode: {}\".format(is_no_zoom_mode))\n \n # implementation of saved positions, no longer used\n # long press\n if press_duration >= 2:\n \n # get current absolute position\n sequence_no = camera.send_command(\"Pan-tiltPosInq\")\n pos_xy = camera.wait_for_result(sequence_no)\n \n sequence_no = camera.send_command(\"CAM_OpticalZoomPosInq\")\n pos_zoom = camera.wait_for_result(sequence_no)\n \n print(\"Save position 1 (x,y,zoom) = ({},{},{})\".format(pos_xy[0], pos_xy[1], pos_zoom))\n \n with open(\"pos1\", \"wb\") as f:\n pickle.dump((pos_xy, pos_zoom), f)\n # short press\n else:\n with open(\"pos1\", \"rb\") as f:\n (pos_xy, pos_zoom) = pickle.load(f)\n \n print(\"Load position 1 (x,y,zoom) = ({},{},{})\".format(pos_xy[0], pos_xy[1], pos_zoom))\n \n # Pan-tiltDrive_absolute(x,y,vx,vy)\n sequence_no = camera.send_command(\"Pan-tiltDrive_absolute\", pos_xy[0], pos_xy[1], 0.8, 0.8)\n camera.wait_for_result(sequence_no)\n \n sequence_no = camera.send_command(\"CAM_Zoom_Direct_Speed\", pos_zoom, 0.8)\n # right button released\n elif button_pressed[1] and not space_mouse.is_right_button_pressed:\n press_duration = time.time() - time_button_press[1]\n \n if press_duration >= 2:\n \n # get current absolute position\n sequence_no = camera.send_command(\"Pan-tiltPosInq\")\n pos_xy = camera.wait_for_result(sequence_no)\n \n sequence_no = camera.send_command(\"CAM_OpticalZoomPosInq\")\n pos_zoom = camera.wait_for_result(sequence_no)\n \n print(\"Save position 2 (x,y,zoom) = ({},{},{})\".format(pos_xy[0], pos_xy[1], pos_zoom))\n \n with open(\"pos2\", \"wb\") as f:\n pickle.dump((pos_xy, pos_zoom), f)\n \n # short press\n else:\n with open(\"pos2\", \"rb\") as f:\n (pos_xy, pos_zoom) = pickle.load(f)\n \n print(\"Load position 2 (x,y,zoom) = ({},{},{})\".format(pos_xy[0], pos_xy[1], pos_zoom))\n \n # Pan-tiltDrive_absolute(x,y,vx,vy)\n sequence_no = camera.send_command(\"Pan-tiltDrive_absolute\", pos_xy[0], pos_xy[1], 0.8, 0.8)\n camera.wait_for_result(sequence_no)\n \n # CAM_Zoom_Direct_Speed(float f, int speed)\n sequence_no = camera.send_command(\"CAM_Zoom_Direct_Speed\", pos_zoom, 0.8)\n \"\"\"\n \n"
] |
[
[
"numpy.array"
]
] |
karanvijaygit/DMLM
|
[
"aaeb3e65d0a58ad583289aaa39b089f11d06a4eb"
] |
[
"packages/regression_model/regression_model/processing/data_management.py"
] |
[
"import pandas as pd\nimport joblib\nfrom sklearn.pipeline import Pipeline\n\nfrom regression_model.config import config\nfrom regression_model import __version__ as _version\n\nimport logging\nimport typing as t\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef load_dataset(*, file_name: str) -> pd.DataFrame:\n _data = pd.read_csv(f\"{config.DATASET_DIR}/{file_name}\")\n return _data\n\n\ndef save_pipeline(*, pipeline_to_persist) -> None:\n \"\"\"Persist the pipeline.\n Saves the versioned model, and overwrites any previous\n saved models. This ensures that when the package is\n published, there is only one trained model that can be\n called, and we know exactly how it was built.\n \"\"\"\n\n # Prepare versioned save file name\n save_file_name = f\"{config.PIPELINE_SAVE_FILE}{_version}.pkl\"\n save_path = config.TRAINED_MODEL_DIR / save_file_name\n\n remove_old_pipelines(files_to_keep=[save_file_name])\n joblib.dump(pipeline_to_persist, save_path)\n _logger.info(f\"saved pipeline: {save_file_name}\")\n\n\ndef load_pipeline(*, file_name: str) -> Pipeline:\n \"\"\"Load a persisted pipeline.\"\"\"\n\n file_path = config.TRAINED_MODEL_DIR / file_name\n trained_model = joblib.load(filename=file_path)\n return trained_model\n\n\ndef remove_old_pipelines(*, files_to_keep: t.List[str]) -> None:\n \"\"\"\n Remove old model pipelines.\n\n This is to ensure there is a simple one-to-one\n mapping between the package version and the model\n version to be imported and used by other applications.\n However, we do also include the immediate previous\n pipeline version for differential testing purposes.\n \"\"\"\n do_not_delete = files_to_keep + ['__init__.py']\n for model_file in config.TRAINED_MODEL_DIR.iterdir():\n if model_file.name not in do_not_delete:\n model_file.unlink()\n"
] |
[
[
"pandas.read_csv"
]
] |
ashok-arjun/prototypical-network-pytorch
|
[
"afe38c109194a850beffd0e893f0292c59baedb3"
] |
[
"samplers.py"
] |
[
"import torch\nimport numpy as np\n\n\nclass CategoriesSampler():\n \"\"\"\n CategoriesSampler\n \n Samples data points for the current batch. This is present to sample N-way (N-classes) and k shot + q query samples in every batch.\n This is called in every iteration of a single epoch. Hence its length is the number of episodes, which is equal to the number of batches.\n This returns the indices for the current batch, which are passed on to the __getitem__ of the dataloader to get the image and label.\n \n To check: \n 1. Why isn't this inheriting ```Sampler``` class from PyTorch?\n 2. The paper used RANDOMSAMPLE without replacement, but here it is done w/ replacement?\n \"\"\"\n def __init__(self, label, n_batch, n_cls, n_per):\n self.n_batch = n_batch\n self.n_cls = n_cls\n self.n_per = n_per\n\n label = np.array(label)\n self.m_ind = []\n for i in range(max(label) + 1):\n ind = np.argwhere(label == i).reshape(-1)\n ind = torch.from_numpy(ind)\n self.m_ind.append(ind)\n\n def __len__(self):\n return self.n_batch\n \n def __iter__(self):\n for i_batch in range(self.n_batch):\n batch = []\n classes = torch.randperm(len(self.m_ind))[:self.n_cls]\n for c in classes:\n l = self.m_ind[c]\n pos = torch.randperm(len(l))[:self.n_per]\n batch.append(l[pos])\n batch = torch.stack(batch).t().reshape(-1)\n yield batch\n\n"
] |
[
[
"numpy.array",
"torch.stack",
"numpy.argwhere",
"torch.from_numpy"
]
] |
GuanlinLee/SCNet
|
[
"f4c13f9afc113fa535566528fba776ee785a31d2",
"f4c13f9afc113fa535566528fba776ee785a31d2"
] |
[
"ASCAD/ASCAD_test_models_v1.py",
"ASCAD/ASCAD_test_models_v2.py"
] |
[
"import os.path\nimport sys\nimport h5py\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport os\n\n\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\nAES_Sbox = np.array([\n 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,\n 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,\n 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,\n 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,\n 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,\n 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,\n 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,\n 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,\n 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,\n 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,\n 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,\n 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,\n 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,\n 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,\n 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,\n 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16\n ])\n\ndef check_file_exists(file_path):\n\tif os.path.exists(file_path) == False:\n\t\tprint(\"Error: provided file path '%s' does not exist!\" % file_path)\n\t\tsys.exit(-1)\n\treturn\n\ndef load_sca_model(model_file):\n\tcheck_file_exists(model_file)\n\ttry:\n\t\tmodel = load_model(model_file)\n\texcept:\n\t\tprint(\"Error: can't load Keras model file '%s'\" % model_file)\n\t\tsys.exit(-1)\n\treturn model\n\n# Compute the rank of the real key for a give set of predictions\ndef rank(predictions, metadata, real_key, min_trace_idx, max_trace_idx, last_key_bytes_proba):\n\t# Compute the rank\n\tif len(last_key_bytes_proba) == 0:\n\t\t# If this is the first rank we compute, initialize all the estimates to zero\n\t\tkey_bytes_proba = np.zeros(256)\n\telse:\n\t\t# This is not the first rank we compute: we optimize things by using the\n\t\t# previous computations to save time!\n\t\tkey_bytes_proba = last_key_bytes_proba\n\n\tfor p in range(0, max_trace_idx-min_trace_idx):\n\t\t# Go back from the class to the key byte. '2' is the index of the byte (third byte) of interest.\n\t\tplaintext = metadata[min_trace_idx + p]['plaintext'][2]\n\t\tfor i in range(0, 256):\n\t\t\t# Our candidate key byte probability is the sum of the predictions logs\n\t\t\tproba = predictions[p][AES_Sbox[plaintext ^ i]]\n\t\t\tif proba != 0:\n\t\t\t\tkey_bytes_proba[i] += np.log(proba)\n\t\t\telse:\n\t\t\t\t# We do not want an -inf here, put a very small epsilon\n\t\t\t\t# that correspondis to a power of our min non zero proba\n\t\t\t\tmin_proba_predictions = predictions[p][np.array(predictions[p]) != 0]\n\t\t\t\tif len(min_proba_predictions) == 0:\n\t\t\t\t\tprint(\"Error: got a prediction with only zeroes ... this should not happen!\")\n\t\t\t\t\tsys.exit(-1)\n\t\t\t\tmin_proba = min(min_proba_predictions)\n\t\t\t\tkey_bytes_proba[i] += np.log(min_proba**2)\n\t# Now we find where our real key candidate lies in the estimation.\n\t# We do this by sorting our estimates and find the rank in the sorted array.\n\tsorted_proba = np.array(list(map(lambda a : key_bytes_proba[a], key_bytes_proba.argsort()[::-1])))\n\treal_key_rank = np.where(sorted_proba == key_bytes_proba[real_key])[0][0]\n\treturn (real_key_rank, key_bytes_proba)\n\ndef full_ranks(model, dataset, metadata, min_trace_idx, max_trace_idx, rank_step):\n\t# Real key byte value that we will use. '2' is the index of the byte (third byte) of interest.\n\treal_key = metadata[0]['key'][2]\n\t# Check for overflow\n\tif max_trace_idx > dataset.shape[0]:\n\t\tprint(\"Error: asked trace index %d overflows the total traces number %d\" % (max_trace_idx, dataset.shape[0]))\n\t\tsys.exit(-1)\n\t# Get the input layer shape\n\tinput_layer_shape = model.get_layer(index=0).input_shape\n\t# Sanity check\n\tif input_layer_shape[1] != len(dataset[0, :]):\n\t\tprint(\"Error: model input shape %d instead of %d is not expected ...\" % (input_layer_shape[1], len(dataset[0, :])))\n\t\tsys.exit(-1)\n\t# Adapt the data shape according our model input\n\tif len(input_layer_shape) == 2:\n\t\t# This is a MLP\n\t\tinput_data = dataset[min_trace_idx:max_trace_idx, :]\n\telif len(input_layer_shape) == 3:\n\t\t# This is a CNN: reshape the data\n\t\tinput_data = dataset[min_trace_idx:max_trace_idx, :]\n\t\tinput_data = input_data.reshape((input_data.shape[0], input_data.shape[1], 1))\n\telse:\n\t\tprint(\"Error: model input shape length %d is not expected ...\" % len(input_layer_shape))\n\t\tsys.exit(-1)\n\n\t# Predict our probabilities\n\tpredictions = model.predict(input_data)\n\n\tindex = np.arange(min_trace_idx+rank_step, max_trace_idx, rank_step)\n\tf_ranks = np.zeros((len(index), 2), dtype=np.uint32)\n\tkey_bytes_proba = []\n\tfor t, i in zip(index, range(0, len(index))):\n\t\treal_key_rank, key_bytes_proba = rank(predictions[t-rank_step:t], metadata, real_key, t-rank_step, t, key_bytes_proba)\n\t\tf_ranks[i] = [t - min_trace_idx, real_key_rank]\n\treturn f_ranks\n\n\n#### ASCAD helper to load profiling and attack data (traces and labels)\n# Loads the profiling and attack datasets from the ASCAD\n# database\ndef load_ascad(ascad_database_file, load_metadata=False):\n\tcheck_file_exists(ascad_database_file)\n\t# Open the ASCAD database HDF5 for reading\n\ttry:\n\t\tin_file = h5py.File(ascad_database_file, \"r\")\n\texcept:\n\t\tprint(\"Error: can't open HDF5 file '%s' for reading (it might be malformed) ...\" % ascad_database_file)\n\t\tsys.exit(-1)\n\t# Load profiling traces\n\tX_profiling = np.array(in_file['Profiling_traces/traces'], dtype=np.int8)\n\t# Load profiling labels\n\tY_profiling = np.array(in_file['Profiling_traces/labels'])\n\t# Load attacking traces\n\tX_attack = np.array(in_file['Attack_traces/traces'], dtype=np.int8)\n\t# Load attacking labels\n\tY_attack = np.array(in_file['Attack_traces/labels'])\n\tif load_metadata == False:\n\t\treturn (X_profiling, Y_profiling), (X_attack, Y_attack)\n\telse:\n\t\treturn (X_profiling, Y_profiling), (X_attack, Y_attack), (in_file['Profiling_traces/metadata'], in_file['Attack_traces/metadata'])\n\n# Check a saved model against one of the ASCAD databases Attack traces\ndef check_model(model_file, ascad_database, num_traces=5000):\n\tcheck_file_exists(model_file)\n\tcheck_file_exists(ascad_database)\n\t# Load profiling and attack data and metadata from the ASCAD database\n\t(X_profiling, Y_profiling), (X_attack, Y_attack), (Metadata_profiling, Metadata_attack) = load_ascad(ascad_database, load_metadata=True)\n\t# Load model\n\tmodel = load_sca_model(model_file)\n\t# We test the rank over traces of the Attack dataset, with a step of 10 traces\n\tranks = full_ranks(model, X_attack, Metadata_attack, 0, num_traces, 10)\n\t# We plot the results\n\tx = [ranks[i][0] for i in range(0, ranks.shape[0])]\n\ty = [ranks[i][1] for i in range(0, ranks.shape[0])]\n\tplt.title(model_file.split('/')[-1])\n\tplt.xlabel('number of traces')\n\tplt.ylabel('rank')\n\tplt.grid(True)\n\tplt.plot(x, y)\n\tplt.savefig('Performance of '+model_file.split('/')[-1]+' against '+ascad_database.split('/')[-1]+'.png',transparent=True,bbox_inches= 'tight')\n\t#plt.show(block=False)\n\tplt.figure()\n\n# Our folders\nascad_data_folder = \"ASCAD_data/\"\nascad_databases_folder = ascad_data_folder + \"ASCAD_databases/\"\nascad_trained_models_folder = ascad_data_folder + \"ASCAD_trained_models/\"\n\nto_check_all = [\n\t(ascad_trained_models_folder + \"best_model_desync0_SCNet_v1.h5\", ascad_databases_folder + \"ASCAD.h5\"),\n\t(ascad_trained_models_folder + \"best_model_desync50_SCNet_v1.h5\", ascad_databases_folder + \"ASCAD_desync50.h5\"),\n\t(ascad_trained_models_folder + \"best_model_desync100_SCNet_v1.h5\", ascad_databases_folder + \"ASCAD_desync100.h5\"),\n\n\t]\n\n# No argument: check all the trained models\nif (len(sys.argv) == 1) or (len(sys.argv) == 2):\n\tif len(sys.argv) == 2:\n\t\tnum_traces = int(sys.argv[1])\n\telse:\n\t\tnum_traces = 5000\n\n\tfor (m, db) in to_check_all:\n\t\tcheck_model(m, db, num_traces)\n\nelse:\n\tif len(sys.argv) == 4:\n\t\tnum_traces = int(sys.argv[3])\n\telse:\n\t\tnum_traces = 5000\n\n\tcheck_model(sys.argv[1], sys.argv[2], num_traces)\n\ntry:\n\tinput(\"Press enter to exit ...\")\nexcept SyntaxError:\n\tpass\n\n",
"import os.path\nimport sys\nimport h5py\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport os\n\n\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nAES_Sbox = np.array([\n 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,\n 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,\n 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,\n 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,\n 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,\n 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,\n 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,\n 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,\n 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,\n 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,\n 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,\n 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,\n 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,\n 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,\n 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,\n 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16\n ])\n\ndef check_file_exists(file_path):\n\tif os.path.exists(file_path) == False:\n\t\tprint(\"Error: provided file path '%s' does not exist!\" % file_path)\n\t\tsys.exit(-1)\n\treturn\n\ndef load_sca_model(model_file):\n\tcheck_file_exists(model_file)\n\ttry:\n\t\tmodel = load_model(model_file)\n\texcept:\n\t\tprint(\"Error: can't load Keras model file '%s'\" % model_file)\n\t\tsys.exit(-1)\n\treturn model\n\n# Compute the rank of the real key for a give set of predictions\ndef rank(predictions, metadata, real_key, min_trace_idx, max_trace_idx, last_key_bytes_proba):\n\t# Compute the rank\n\tif len(last_key_bytes_proba) == 0:\n\t\t# If this is the first rank we compute, initialize all the estimates to zero\n\t\tkey_bytes_proba = np.zeros(256)\n\telse:\n\t\t# This is not the first rank we compute: we optimize things by using the\n\t\t# previous computations to save time!\n\t\tkey_bytes_proba = last_key_bytes_proba\n\n\tfor p in range(0, max_trace_idx-min_trace_idx):\n\t\t# Go back from the class to the key byte. '2' is the index of the byte (third byte) of interest.\n\t\tplaintext = metadata[min_trace_idx + p]['plaintext'][2]\n\t\tfor i in range(0, 256):\n\t\t\t# Our candidate key byte probability is the sum of the predictions logs\n\t\t\tproba = predictions[p][AES_Sbox[plaintext ^ i]]\n\t\t\tif proba != 0:\n\t\t\t\tkey_bytes_proba[i] += np.log(proba)\n\t\t\telse:\n\t\t\t\t# We do not want an -inf here, put a very small epsilon\n\t\t\t\t# that correspondis to a power of our min non zero proba\n\t\t\t\tmin_proba_predictions = predictions[p][np.array(predictions[p]) != 0]\n\t\t\t\tif len(min_proba_predictions) == 0:\n\t\t\t\t\tprint(\"Error: got a prediction with only zeroes ... this should not happen!\")\n\t\t\t\t\tsys.exit(-1)\n\t\t\t\tmin_proba = min(min_proba_predictions)\n\t\t\t\tkey_bytes_proba[i] += np.log(min_proba**2)\n\t# Now we find where our real key candidate lies in the estimation.\n\t# We do this by sorting our estimates and find the rank in the sorted array.\n\tsorted_proba = np.array(list(map(lambda a : key_bytes_proba[a], key_bytes_proba.argsort()[::-1])))\n\treal_key_rank = np.where(sorted_proba == key_bytes_proba[real_key])[0][0]\n\treturn (real_key_rank, key_bytes_proba)\n\ndef full_ranks(model, dataset, metadata, min_trace_idx, max_trace_idx, rank_step):\n\t# Real key byte value that we will use. '2' is the index of the byte (third byte) of interest.\n\treal_key = metadata[0]['key'][2]\n\t# Check for overflow\n\tif max_trace_idx > dataset.shape[0]:\n\t\tprint(\"Error: asked trace index %d overflows the total traces number %d\" % (max_trace_idx, dataset.shape[0]))\n\t\tsys.exit(-1)\n\t# Get the input layer shape\n\tinput_layer_shape = model.get_layer(index=0).input_shape\n\t# Sanity check\n\tif input_layer_shape[1] != len(dataset[0, :]):\n\t\tprint(\"Error: model input shape %d instead of %d is not expected ...\" % (input_layer_shape[1], len(dataset[0, :])))\n\t\tsys.exit(-1)\n\t# Adapt the data shape according our model input\n\tif len(input_layer_shape) == 2:\n\t\t# This is a MLP\n\t\tinput_data = dataset[min_trace_idx:max_trace_idx, :]\n\telif len(input_layer_shape) == 3:\n\t\t# This is a CNN: reshape the data\n\t\tinput_data = dataset[min_trace_idx:max_trace_idx, :]\n\t\tinput_data = input_data.reshape((input_data.shape[0], input_data.shape[1], 1))\n\telse:\n\t\tprint(\"Error: model input shape length %d is not expected ...\" % len(input_layer_shape))\n\t\tsys.exit(-1)\n\n\t# Predict our probabilities\n\tpredictions = model.predict(input_data)\n\n\tindex = np.arange(min_trace_idx+rank_step, max_trace_idx, rank_step)\n\tf_ranks = np.zeros((len(index), 2), dtype=np.uint32)\n\tkey_bytes_proba = []\n\tfor t, i in zip(index, range(0, len(index))):\n\t\treal_key_rank, key_bytes_proba = rank(predictions[t-rank_step:t], metadata, real_key, t-rank_step, t, key_bytes_proba)\n\t\tf_ranks[i] = [t - min_trace_idx, real_key_rank]\n\treturn f_ranks\n\n\n#### ASCAD helper to load profiling and attack data (traces and labels)\n# Loads the profiling and attack datasets from the ASCAD\n# database\ndef load_ascad(ascad_database_file, load_metadata=False):\n\tcheck_file_exists(ascad_database_file)\n\t# Open the ASCAD database HDF5 for reading\n\ttry:\n\t\tin_file = h5py.File(ascad_database_file, \"r\")\n\texcept:\n\t\tprint(\"Error: can't open HDF5 file '%s' for reading (it might be malformed) ...\" % ascad_database_file)\n\t\tsys.exit(-1)\n\t# Load profiling traces\n\tX_profiling = np.array(in_file['Profiling_traces/traces'], dtype=np.int8)\n\t# Load profiling labels\n\tY_profiling = np.array(in_file['Profiling_traces/labels'])\n\t# Load attacking traces\n\tX_attack = np.array(in_file['Attack_traces/traces'], dtype=np.int8)\n\t# Load attacking labels\n\tY_attack = np.array(in_file['Attack_traces/labels'])\n\tif load_metadata == False:\n\t\treturn (X_profiling, Y_profiling), (X_attack, Y_attack)\n\telse:\n\t\treturn (X_profiling, Y_profiling), (X_attack, Y_attack), (in_file['Profiling_traces/metadata'], in_file['Attack_traces/metadata'])\n\n# Check a saved model against one of the ASCAD databases Attack traces\ndef check_model(model_file, ascad_database, num_traces=5000):\n\tcheck_file_exists(model_file)\n\tcheck_file_exists(ascad_database)\n\t# Load profiling and attack data and metadata from the ASCAD database\n\t(X_profiling, Y_profiling), (X_attack, Y_attack), (Metadata_profiling, Metadata_attack) = load_ascad(ascad_database, load_metadata=True)\n\t# Load model\n\tmodel = load_sca_model(model_file)\n\t# We test the rank over traces of the Attack dataset, with a step of 10 traces\n\tranks = full_ranks(model, X_attack, Metadata_attack, 0, num_traces, 10)\n\t# We plot the results\n\tx = [ranks[i][0] for i in range(0, ranks.shape[0])]\n\ty = [ranks[i][1] for i in range(0, ranks.shape[0])]\n\tplt.title(model_file.split('/')[-1])\n\tplt.xlabel('number of traces')\n\tplt.ylabel('rank')\n\tplt.grid(True)\n\tplt.plot(x, y)\n\tplt.savefig('Performance of '+model_file.split('/')[-1]+' against '+ascad_database.split('/')[-1]+'.png',transparent=True,bbox_inches= 'tight')\n\t#plt.show(block=False)\n\tplt.figure()\n\n# Our folders\nascad_data_folder = \"ASCAD_data/\"\nascad_databases_folder = ascad_data_folder + \"ASCAD_databases/\"\nascad_trained_models_folder = ascad_data_folder + \"ASCAD_trained_models/\"\n\nto_check_all = [\n\t(ascad_trained_models_folder + \"best_model_desync0_SCNet_v2.h5\", ascad_databases_folder + \"ASCAD.h5\"),\n\t(ascad_trained_models_folder + \"best_model_desync50_SCNet_v2.h5\", ascad_databases_folder + \"ASCAD_desync50.h5\"),\n\t(ascad_trained_models_folder + \"best_model_desync100_SCNet_v2.h5\", ascad_databases_folder + \"ASCAD_desync100.h5\"),\n\n\t]\n\n# No argument: check all the trained models\nif (len(sys.argv) == 1) or (len(sys.argv) == 2):\n\tif len(sys.argv) == 2:\n\t\tnum_traces = int(sys.argv[1])\n\telse:\n\t\tnum_traces = 5000\n\n\tfor (m, db) in to_check_all:\n\t\tcheck_model(m, db, num_traces)\n\nelse:\n\tif len(sys.argv) == 4:\n\t\tnum_traces = int(sys.argv[3])\n\telse:\n\t\tnum_traces = 5000\n\n\tcheck_model(sys.argv[1], sys.argv[2], num_traces)\n\ntry:\n\tinput(\"Press enter to exit ...\")\nexcept SyntaxError:\n\tpass\n\n"
] |
[
[
"matplotlib.use",
"numpy.array",
"numpy.zeros",
"numpy.log",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.use",
"numpy.array",
"numpy.zeros",
"numpy.log",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"matplotlib.pyplot.ylabel"
]
] |
mostafajubayerkhan/Image-Captioning
|
[
"7545fe89448b597400a0c8dccea533d2795631e2"
] |
[
"model.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torchvision.models as models\n\n'''\nEncoderCNN class provided by Udacity (utilizing Resnet)\nwas used as is by me, while attempting this project.\n'''\nclass EncoderCNN(nn.Module):\n def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet50(pretrained=True)\n for param in resnet.parameters():\n param.requires_grad_(False)\n \n modules = list(resnet.children())[:-1]\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features, embed_size)\n\n def forward(self, images):\n features = self.resnet(images)\n features = features.view(features.size(0), -1)\n features = self.embed(features)\n return features\n \n\nclass DecoderRNN(nn.Module):\n def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):\n \n '''\n [See the diagram of the decoder in Notebook 1]\n The RNN needs to have 4 basic components :\n \n 1. Word Embedding layer : maps the captions to embedded word vector of embed_size.\n 2. LSTM layer : inputs( embedded feature vector from CNN , embedded word vector ).\n 3. Hidden layer : Takes LSTM output as input and maps it \n to (batch_size, caption length, hidden_size) tensor.\n 4. Linear layer : Maps the hidden layer output to the number of words\n we want as output, vocab_size.\n \n NOTE : I did not define any init_hidden method based on the discussion \n in the following thread in student hub.\n Hidden state defaults to zero when nothing is specified, \n thus not requiring the need to explicitly define init_hidden.\n \n [https://study-hall.udacity.com/rooms/community:nd891:682337-project-461/community:thread-11927138595-435532?contextType=room]\n \n '''\n \n super().__init__()\n\n '''\n vocab_size : size of the dictionary of embeddings, \n basically the number of tokens in the vocabulary(word2idx) \n for that batch of data.\n embed_size : the size of each embedding vector of captions\n '''\n self.word_embedding_layer = nn.Embedding(vocab_size, embed_size)\n \n \n '''\n LSTM layer parameters :\n \n input_size = embed_size \n hidden_size = hidden_size # number of units in hidden layer of LSTM \n num_layers = 1 # number of LSTM layers ( = 1, by default )\n batch_first = True # input , output need to have batch size as 1st dimension\n dropout = 0 # did not use dropout \n \n Other parameters were not changed from default values provided in the PyTorch implementation.\n '''\n self.lstm = nn.LSTM( input_size = embed_size, \n hidden_size = hidden_size, \n num_layers = num_layers, \n dropout = 0, \n batch_first=True\n )\n \n self.linear_fc = nn.Linear(hidden_size, vocab_size)\n\n \n def forward(self, features, captions):\n \n '''\n \n Arguments :\n \n For a forward pass, the instantiation of the RNNDecoder class\n receives as inputs 2 arguments :\n \n -> features : ouput of CNNEncoder having shape (batch_size, embed_size).\n -> captions : a PyTorch tensor corresponding to the last batch of captions \n having shape (batch_size, caption_length) .\n \n NOTE : Input parameters have first dimension as batch_size.\n \n '''\n \n # Discard the <end> word to avoid the following error in Notebook 1 : Step 4\n # (outputs.shape[1]==captions.shape[1]) condition won't be satisfied otherwise.\n # AssertionError: The shape of the decoder output is incorrect.\n captions = captions[:, :-1] \n \n # Pass image captions through the word_embeddings layer.\n # output shape : (batch_size, caption length , embed_size)\n captions = self.word_embedding_layer(captions)\n \n # Concatenate the feature vectors for image and captions.\n # Features shape : (batch_size, embed_size)\n # Word embeddings shape : (batch_size, caption length , embed_size)\n # output shape : (batch_size, caption length, embed_size)\n inputs = torch.cat((features.unsqueeze(1), captions), dim=1)\n \n # Get the output and hidden state by passing the lstm over our word embeddings\n # the hidden state is not used, so the returned value is denoted by _.\n # Input to LSTM : concatenated tensor(features, embeddings) and hidden state\n # output shape : (batch_size, caption length, hidden_size)\n outputs, _ = self.lstm(inputs)\n \n # output shape : (batch_size, caption length, vocab_size)\n # NOTE : First dimension of output shape is batch_size.\n outputs = self.linear_fc(outputs)\n \n return outputs\n\n \n def sample(self, inputs, states=None, max_len=20):\n '''\n Arguments : accepts pre-processed image tensor (inputs) \n Returns : predicted sentence (list of tensor ids of length max_len)\n \n Implementation details :\n \n features : (batch_size , embed_size) [ See Notebook 1 ]\n inputs = features.unsqueeze(1) : (batch_size , 1, embed_size) [ See Notebook 3 ]\n sample function is used for only 1 image at a time. Thus, batch_size = 1\n input shape : (1,1,embed_size)\n \n shape of LSTM output : (batch_size,caption length, hidden_size)\n The input has to be fed to the lstm till the <end> is reached.\n every time the input is fed to the lstm, caption of length 1 is produced by the RNNDecoder.\n Thus LSTM output shape : (1,1,hidden_size)\n LSTM output is linear_fc input. (This is wrong. See NOTE )\n shape of input : (1,1,hidden_size)\n shape of output : (1,1,vocab_size)\n \n NOTE :\n \n Even after training my model, I was getting as output a sequence of <unk> in [ Notebook 3 ].\n So, I looked at answers in Knowledge and discussions in Student Hub.\n \n The following thread in the student hub gave me intuition for :\n \n 1. Passing states as an input to the LSTM layer in the sample() function.\n 2. Squeezing the ouput of LSTM layer before passing it to the Linear layer.\n \n https://study-hall.udacity.com/rooms/community:nd891:682337-project-461/community:thread-11730509939-428812?contextType=room\n '''\n # The output of this function will be a Python list of integers,\n # indicating the corresponding token words in the dictionary.\n outputs = [] \n output_length = 0\n \n while (output_length != max_len+1):\n \n ''' LSTM layer '''\n # input : (1,1,embed_size)\n # output : (1,1,hidden_size)\n # States should be passed to LSTM on each iteration in order for it to recall the last word it produced.\n output, states = self.lstm(inputs,states)\n \n ''' Linear layer '''\n # input : (1,hidden_size)\n # output : (1,vocab_size)\n output = self.linear_fc(output.squeeze(dim = 1))\n _, predicted_index = torch.max(output, 1)\n \n # CUDA tensor has to be first converted to cpu and then to numpy.\n # Because numpy doesn't support CUDA ( GPU memory ) directly.\n # See this link for reference : https://discuss.pytorch.org/t/convert-to-numpy-cuda-variable/499\n outputs.append(predicted_index.cpu().numpy()[0].item())\n \n # <end> has index_value = 1 in the vocabulary [ Notebook 1 ]\n # This conditional statement helps to break out of the while loop,\n # as soon as the first <end> is encountered. Length of caption maybe less than 20 at this point.\n if (predicted_index == 1):\n break\n \n # Prepare for net loop iteration \n # Embed the last predicted word to be the new input of the LSTM\n # To understand this step, again look at the diagram at end of [ Notebook 1 ]\n inputs = self.word_embedding_layer(predicted_index) \n inputs = inputs.unsqueeze(1)\n \n # To move to the next iteration of the while loop.\n output_length += 1\n\n return outputs"
] |
[
[
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.max",
"torch.nn.Sequential",
"torch.nn.Embedding"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.