repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
AustinSchuh/971-Robot-Code
[ "99abc66fd2d899c0bdab338dc6f57dc5def9be8d" ]
[ "frc971/control_loops/python/linear_system.py" ]
[ "#!/usr/bin/python3\n\nfrom aos.util.trapezoid_profile import TrapezoidProfile\nfrom frc971.control_loops.python import control_loop\nfrom frc971.control_loops.python import controls\nimport numpy\nfrom matplotlib import pylab\nimport glog\n\n\nclass LinearSystemParams(object):\n def __init__(self,\n name,\n motor,\n G,\n radius,\n mass,\n q_pos,\n q_vel,\n kalman_q_pos,\n kalman_q_vel,\n kalman_q_voltage,\n kalman_r_position,\n dt=0.00505):\n self.name = name\n self.motor = motor\n self.G = G\n self.radius = radius\n self.mass = mass\n self.q_pos = q_pos\n self.q_vel = q_vel\n self.kalman_q_pos = kalman_q_pos\n self.kalman_q_vel = kalman_q_vel\n self.kalman_q_voltage = kalman_q_voltage\n self.kalman_r_position = kalman_r_position\n self.dt = dt\n\n\nclass LinearSystem(control_loop.ControlLoop):\n def __init__(self, params, name='LinearSystem'):\n super(LinearSystem, self).__init__(name)\n self.params = params\n\n self.motor = params.motor\n\n # Gear ratio\n self.G = params.G\n self.radius = params.radius\n\n # Mass in kg\n self.mass = params.mass + self.motor.motor_inertia / (\n (self.G * self.radius)**2.0)\n\n # Control loop time step\n self.dt = params.dt\n\n # State is [position, velocity]\n # Input is [Voltage]\n C1 = self.motor.Kt / (\n self.G * self.G * self.radius * self.radius * self.motor.resistance\n * self.mass * self.motor.Kv)\n C2 = self.motor.Kt / (\n self.G * self.radius * self.motor.resistance * self.mass)\n\n self.A_continuous = numpy.matrix([[0, 1], [0, -C1]])\n\n # Start with the unmodified input\n self.B_continuous = numpy.matrix([[0], [C2]])\n glog.debug(repr(self.A_continuous))\n glog.debug(repr(self.B_continuous))\n\n self.C = numpy.matrix([[1, 0]])\n self.D = numpy.matrix([[0]])\n\n self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,\n self.B_continuous, self.dt)\n\n controllability = controls.ctrb(self.A, self.B)\n glog.debug('Controllability of %d',\n numpy.linalg.matrix_rank(controllability))\n glog.debug('Mass: %f', self.mass)\n glog.debug('Stall force: %f',\n self.motor.stall_torque / self.G / self.radius)\n glog.debug('Stall acceleration: %f',\n self.motor.stall_torque / self.G / self.radius / self.mass)\n\n glog.debug('Free speed is %f',\n -self.B_continuous[1, 0] / self.A_continuous[1, 1] * 12.0)\n\n self.Q = numpy.matrix([[(1.0 / (self.params.q_pos**2.0)), 0.0],\n [0.0, (1.0 / (self.params.q_vel**2.0))]])\n\n self.R = numpy.matrix([[(1.0 / (12.0**2.0))]])\n self.K = controls.dlqr(self.A, self.B, self.Q, self.R)\n\n q_pos_ff = 0.005\n q_vel_ff = 1.0\n self.Qff = numpy.matrix([[(1.0 / (q_pos_ff**2.0)), 0.0],\n [0.0, (1.0 / (q_vel_ff**2.0))]])\n\n self.Kff = controls.TwoStateFeedForwards(self.B, self.Qff)\n\n glog.debug('K %s', repr(self.K))\n glog.debug('Poles are %s',\n repr(numpy.linalg.eig(self.A - self.B * self.K)[0]))\n\n self.Q = numpy.matrix([[(self.params.kalman_q_pos**2.0), 0.0],\n [0.0, (self.params.kalman_q_vel**2.0)]])\n\n self.R = numpy.matrix([[(self.params.kalman_r_position**2.0)]])\n\n self.KalmanGain, self.Q_steady = controls.kalman(\n A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)\n\n glog.debug('Kal %s', repr(self.KalmanGain))\n\n # The box formed by U_min and U_max must encompass all possible values,\n # or else Austin's code gets angry.\n self.U_max = numpy.matrix([[12.0]])\n self.U_min = numpy.matrix([[-12.0]])\n\n self.InitializeState()\n\n\nclass IntegralLinearSystem(LinearSystem):\n def __init__(self, params, name='IntegralLinearSystem'):\n super(IntegralLinearSystem, self).__init__(params, name=name)\n\n self.A_continuous_unaugmented = self.A_continuous\n self.B_continuous_unaugmented = self.B_continuous\n\n self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))\n self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented\n self.A_continuous[0:2, 2] = self.B_continuous_unaugmented\n\n self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))\n self.B_continuous[0:2, 0] = self.B_continuous_unaugmented\n\n self.C_unaugmented = self.C\n self.C = numpy.matrix(numpy.zeros((1, 3)))\n self.C[0:1, 0:2] = self.C_unaugmented\n\n self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,\n self.B_continuous, self.dt)\n\n self.Q = numpy.matrix([[(self.params.kalman_q_pos**2.0), 0.0, 0.0],\n [0.0, (self.params.kalman_q_vel**2.0), 0.0],\n [0.0, 0.0, (self.params.kalman_q_voltage\n **2.0)]])\n\n self.R = numpy.matrix([[(self.params.kalman_r_position**2.0)]])\n\n self.KalmanGain, self.Q_steady = controls.kalman(\n A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)\n\n self.K_unaugmented = self.K\n self.K = numpy.matrix(numpy.zeros((1, 3)))\n self.K[0, 0:2] = self.K_unaugmented\n self.K[0, 2] = 1\n\n self.Kff = numpy.concatenate(\n (self.Kff, numpy.matrix(numpy.zeros((1, 1)))), axis=1)\n\n self.InitializeState()\n\n\ndef RunTest(plant,\n end_goal,\n controller,\n observer=None,\n duration=1.0,\n use_profile=True,\n kick_time=0.5,\n kick_magnitude=0.0,\n max_velocity=0.3,\n max_acceleration=10.0):\n \"\"\"Runs the plant with an initial condition and goal.\n\n Args:\n plant: plant object to use.\n end_goal: end_goal state.\n controller: LinearSystem object to get K from, or None if we should\n use plant.\n observer: LinearSystem object to use for the observer, or None if we\n should use the actual state.\n duration: float, time in seconds to run the simulation for.\n kick_time: float, time in seconds to kick the robot.\n kick_magnitude: float, disturbance in volts to apply.\n max_velocity: float, the max speed in m/s to profile.\n max_acceleration: float, the max acceleration in m/s/s to profile.\n \"\"\"\n t_plot = []\n x_plot = []\n v_plot = []\n a_plot = []\n x_goal_plot = []\n v_goal_plot = []\n x_hat_plot = []\n u_plot = []\n offset_plot = []\n\n if controller is None:\n controller = plant\n\n vbat = 12.0\n\n goal = numpy.concatenate((plant.X, numpy.matrix(numpy.zeros((1, 1)))),\n axis=0)\n\n profile = TrapezoidProfile(plant.dt)\n profile.set_maximum_acceleration(max_acceleration)\n profile.set_maximum_velocity(max_velocity)\n profile.SetGoal(goal[0, 0])\n\n U_last = numpy.matrix(numpy.zeros((1, 1)))\n iterations = int(duration / plant.dt)\n for i in range(iterations):\n t = i * plant.dt\n observer.Y = plant.Y\n observer.CorrectObserver(U_last)\n\n offset_plot.append(observer.X_hat[2, 0])\n x_hat_plot.append(observer.X_hat[0, 0])\n\n next_goal = numpy.concatenate(\n (profile.Update(end_goal[0, 0], end_goal[1, 0]),\n numpy.matrix(numpy.zeros((1, 1)))),\n axis=0)\n\n ff_U = controller.Kff * (next_goal - observer.A * goal)\n\n if use_profile:\n U_uncapped = controller.K * (goal - observer.X_hat) + ff_U\n x_goal_plot.append(goal[0, 0])\n v_goal_plot.append(goal[1, 0])\n else:\n U_uncapped = controller.K * (end_goal - observer.X_hat)\n x_goal_plot.append(end_goal[0, 0])\n v_goal_plot.append(end_goal[1, 0])\n\n U = U_uncapped.copy()\n U[0, 0] = numpy.clip(U[0, 0], -vbat, vbat)\n x_plot.append(plant.X[0, 0])\n\n if v_plot:\n last_v = v_plot[-1]\n else:\n last_v = 0\n\n v_plot.append(plant.X[1, 0])\n a_plot.append((v_plot[-1] - last_v) / plant.dt)\n\n u_offset = 0.0\n if t >= kick_time:\n u_offset = kick_magnitude\n plant.Update(U + u_offset)\n\n observer.PredictObserver(U)\n\n t_plot.append(t)\n u_plot.append(U[0, 0])\n\n ff_U -= U_uncapped - U\n goal = controller.A * goal + controller.B * ff_U\n\n if U[0, 0] != U_uncapped[0, 0]:\n profile.MoveCurrentState(\n numpy.matrix([[goal[0, 0]], [goal[1, 0]]]))\n\n glog.debug('Time: %f', t_plot[-1])\n glog.debug('goal_error %s', repr(end_goal - goal))\n glog.debug('error %s', repr(observer.X_hat - end_goal))\n\n pylab.subplot(3, 1, 1)\n pylab.plot(t_plot, x_plot, label='x')\n pylab.plot(t_plot, x_hat_plot, label='x_hat')\n pylab.plot(t_plot, x_goal_plot, label='x_goal')\n pylab.legend()\n\n pylab.subplot(3, 1, 2)\n pylab.plot(t_plot, u_plot, label='u')\n pylab.plot(t_plot, offset_plot, label='voltage_offset')\n pylab.legend()\n\n pylab.subplot(3, 1, 3)\n pylab.plot(t_plot, a_plot, label='a')\n pylab.legend()\n\n pylab.show()\n\n\ndef PlotStep(params, R, plant_params=None):\n \"\"\"Plots a step move to the goal.\n\n Args:\n params: LinearSystemParams for the controller and observer\n plant_params: LinearSystemParams for the plant. Defaults to params if\n plant_params is None.\n R: numpy.matrix(2, 1), the goal\"\"\"\n plant = LinearSystem(plant_params or params, params.name)\n controller = IntegralLinearSystem(params, params.name)\n observer = IntegralLinearSystem(params, params.name)\n\n # Test moving the system.\n initial_X = numpy.matrix([[0.0], [0.0]])\n augmented_R = numpy.matrix(numpy.zeros((3, 1)))\n augmented_R[0:2, :] = R\n RunTest(\n plant,\n end_goal=augmented_R,\n controller=controller,\n observer=observer,\n duration=2.0,\n use_profile=False,\n kick_time=1.0,\n kick_magnitude=0.0)\n\n\ndef PlotKick(params, R, plant_params=None):\n \"\"\"Plots a step motion with a kick at 1.0 seconds.\n\n Args:\n params: LinearSystemParams for the controller and observer\n plant_params: LinearSystemParams for the plant. Defaults to params if\n plant_params is None.\n R: numpy.matrix(2, 1), the goal\"\"\"\n plant = LinearSystem(plant_params or params, params.name)\n controller = IntegralLinearSystem(params, params.name)\n observer = IntegralLinearSystem(params, params.name)\n\n # Test moving the system.\n initial_X = numpy.matrix([[0.0], [0.0]])\n augmented_R = numpy.matrix(numpy.zeros((3, 1)))\n augmented_R[0:2, :] = R\n RunTest(\n plant,\n end_goal=augmented_R,\n controller=controller,\n observer=observer,\n duration=2.0,\n use_profile=False,\n kick_time=1.0,\n kick_magnitude=2.0)\n\n\ndef PlotMotion(params,\n R,\n max_velocity=0.3,\n max_acceleration=10.0,\n plant_params=None):\n \"\"\"Plots a trapezoidal motion.\n\n Args:\n params: LinearSystemParams for the controller and observer\n plant_params: LinearSystemParams for the plant. Defaults to params if\n plant_params is None.\n R: numpy.matrix(2, 1), the goal,\n max_velocity: float, The max velocity of the profile.\n max_acceleration: float, The max acceleration of the profile.\n \"\"\"\n plant = LinearSystem(plant_params or params, params.name)\n controller = IntegralLinearSystem(params, params.name)\n observer = IntegralLinearSystem(params, params.name)\n\n # Test moving the system.\n initial_X = numpy.matrix([[0.0], [0.0]])\n augmented_R = numpy.matrix(numpy.zeros((3, 1)))\n augmented_R[0:2, :] = R\n RunTest(\n plant,\n end_goal=augmented_R,\n controller=controller,\n observer=observer,\n duration=2.0,\n use_profile=True,\n max_velocity=max_velocity,\n max_acceleration=max_acceleration)\n\n\ndef WriteLinearSystem(params, plant_files, controller_files, year_namespaces):\n \"\"\"Writes out the constants for a linear system to a file.\n\n Args:\n params: list of LinearSystemParams or LinearSystemParams, the\n parameters defining the system.\n plant_files: list of strings, the cc and h files for the plant.\n controller_files: list of strings, the cc and h files for the integral\n controller.\n year_namespaces: list of strings, the namespace list to use.\n \"\"\"\n # Write the generated constants out to a file.\n linear_systems = []\n integral_linear_systems = []\n\n if type(params) is list:\n name = params[0].name\n for index, param in enumerate(params):\n linear_systems.append(LinearSystem(param, param.name + str(index)))\n integral_linear_systems.append(\n IntegralLinearSystem(param,\n 'Integral' + param.name + str(index)))\n else:\n name = params.name\n linear_systems.append(LinearSystem(params, params.name))\n integral_linear_systems.append(\n IntegralLinearSystem(params, 'Integral' + params.name))\n\n loop_writer = control_loop.ControlLoopWriter(\n name, linear_systems, namespaces=year_namespaces)\n loop_writer.AddConstant(\n control_loop.Constant('kFreeSpeed', '%f',\n linear_systems[0].motor.free_speed))\n loop_writer.AddConstant(\n control_loop.Constant('kOutputRatio', '%f',\n linear_systems[0].G * linear_systems[0].radius))\n loop_writer.AddConstant(\n control_loop.Constant('kRadius', '%f', linear_systems[0].radius))\n loop_writer.Write(plant_files[0], plant_files[1])\n\n integral_loop_writer = control_loop.ControlLoopWriter(\n 'Integral' + name, integral_linear_systems, namespaces=year_namespaces)\n integral_loop_writer.Write(controller_files[0], controller_files[1])\n" ]
[ [ "numpy.matrix", "numpy.linalg.matrix_rank", "numpy.zeros", "matplotlib.pylab.legend", "matplotlib.pylab.show", "matplotlib.pylab.subplot", "numpy.linalg.eig", "numpy.clip", "matplotlib.pylab.plot" ] ]
StuartMolnar/Whale-Optimization
[ "05ebebdb3c676768f8fe6a0e7e7d3c18f70162d2" ]
[ "venv/Lib/site-packages/fcmaes/advretry.py" ]
[ "# Copyright (c) Dietmar Wolz.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory.\n\nimport time\nimport os\nimport sys\nimport math\nimport random\nimport _pickle as cPickle\nimport bz2\nimport ctypes as ct\nimport numpy as np\nfrom numpy.linalg import norm\nfrom random import Random\nimport multiprocessing as mp\nfrom multiprocessing import Process\nfrom numpy.random import Generator, MT19937, SeedSequence\nfrom scipy.optimize import OptimizeResult, Bounds\n\nfrom fcmaes.retry import _convertBounds\nfrom fcmaes.optimizer import dtime, fitting, de_cma, logger\n\nos.environ['MKL_DEBUG_CPU_TYPE'] = '5'\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['OPENBLAS_NUM_THREADS'] = '1'\n\ndef minimize(fun, \n bounds, \n value_limit = math.inf,\n num_retries = 5000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n min_evaluations = 1500, \n max_eval_fac = None, \n check_interval = 100,\n capacity = 500,\n stop_fitness = -math.inf,\n optimizer = None,\n statistic_num = 0,\n datafile = None\n ): \n \"\"\"Minimization of a scalar function of one or more variables using \n coordinated parallel CMA-ES retry.\n \n Parameters\n ----------\n fun : callable\n The objective function to be minimized.\n ``fun(x, *args) -> float``\n where ``x`` is an 1-D array with shape (n,) and ``args``\n is a tuple of the fixed parameters needed to completely\n specify the function.\n bounds : sequence or `Bounds`, optional\n Bounds on variables. There are two ways to specify the bounds:\n 1. Instance of the `scipy.Bounds` class.\n 2. Sequence of ``(min, max)`` pairs for each element in `x`. None\n is used to specify no bound.\n value_limit : float, optional\n Upper limit for optimized function values to be stored. \n This limit needs to be carefully set to a value which is seldom\n found by optimization retry to keep the store free of bad runs.\n The crossover offspring of bad parents can\n cause the algorithm to get stuck at local minima. \n num_retries : int, optional\n Number of optimization retries. \n logger : logger, optional\n logger for log output of the retry mechanism. If None, logging\n is switched off. Default is a logger which logs both to stdout and\n appends to a file ``optimizer.log``.\n workers : int, optional\n number of parallel processes used. Default is mp.cpu_count()\n popsize = int, optional\n CMA-ES population size used for all CMA-ES runs. \n Not used for differential evolution. \n Ignored if parameter optimizer is defined. \n min_evaluations : int, optional \n Initial limit of the number of function evaluations. Only used if optimizer is undefined, \n otherwise this setting is defined in the optimizer. \n max_eval_fac : int, optional\n Final limit of the number of function evaluations = max_eval_fac*min_evaluations\n check_interval : int, optional\n After ``check_interval`` runs the store is sorted and the evaluation limit\n is incremented by ``evals_step_size``\n capacity : int, optional\n capacity of the evaluation store. Higher value means broader search.\n stop_fitness : float, optional \n Limit for fitness value. optimization runs terminate if this value is reached. \n optimizer : optimizer.Optimizer, optional\n optimizer to use. Default is a sequence of differential evolution and CMA-ES.\n Since advanced retry sets the initial step size it works best if CMA-ES is \n used / in the sequence of optimizers. \n datafile, optional\n file to persist / retrieve the internal state of the optimizations. \n \n Returns\n -------\n res : scipy.OptimizeResult\n The optimization result is represented as an ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array, \n ``fun`` the best function value, ``nfev`` the number of function evaluations,\n ``success`` a Boolean flag indicating if the optimizer exited successfully. \"\"\"\n\n if optimizer is None:\n optimizer = de_cma(min_evaluations, popsize, stop_fitness) \n if max_eval_fac is None:\n max_eval_fac = int(min(50, 1 + num_retries // check_interval))\n store = Store(bounds, max_eval_fac, check_interval, capacity, logger, num_retries, \n statistic_num, datafile)\n if not datafile is None:\n try:\n store.load(datafile)\n except:\n pass\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers, stop_fitness)\n\ndef retry(fun, store, optimize, num_retries, value_limit = math.inf, \n workers=mp.cpu_count(), stop_fitness = -math.inf):\n sg = SeedSequence()\n rgs = [Generator(MT19937(s)) for s in sg.spawn(workers)]\n proc=[Process(target=_retry_loop,\n args=(pid, rgs, fun, store, optimize, num_retries, value_limit, stop_fitness)) for pid in range(workers)]\n [p.start() for p in proc]\n [p.join() for p in proc]\n store.sort()\n store.dump()\n return OptimizeResult(x=store.get_x_best(), fun=store.get_y_best(), \n nfev=store.get_count_evals(), success=True)\n \nclass Store(object):\n \"\"\"thread safe storage for optimization retry results; \n delivers boundary and initial step size vectors for advanced retry crossover operation.\"\"\"\n \n def __init__(self, \n bounds, # bounds of the objective function arguments\n max_eval_fac = None, # maximal number of evaluations factor\n check_interval = 100, # sort evaluation store after check_interval iterations\n capacity = 500, # capacity of the evaluation store\n logger = None, # if None logging is switched off\n num_retries = None,\n statistic_num = 0,\n datafile = None\n ):\n\n self.lower, self.upper = _convertBounds(bounds)\n self.delta = self.upper - self.lower\n self.logger = logger \n self.capacity = capacity\n if max_eval_fac is None:\n if num_retries is None:\n max_eval_fac = 50\n else:\n max_eval_fac = int(min(50, 1 + num_retries // check_interval))\n if num_retries == None:\n num_retries = max_eval_fac * check_interval\n # increment eval_fac so that max_eval_fac is reached at last retry\n self.eval_fac_incr = max_eval_fac / (num_retries/check_interval)\n self.max_eval_fac = max_eval_fac\n self.check_interval = check_interval \n self.dim = len(self.lower)\n self.random = Random()\n self.t0 = time.perf_counter()\n \n #shared between processes\n self.add_mutex = mp.Lock() \n self.check_mutex = mp.Lock() \n self.xs = mp.RawArray(ct.c_double, capacity * self.dim)\n self.ys = mp.RawArray(ct.c_double, capacity) \n self.eval_fac = mp.RawValue(ct.c_double, 1)\n self.count_evals = mp.RawValue(ct.c_long, 0) \n self.count_runs = mp.RawValue(ct.c_int, 0) \n self.num_stored = mp.RawValue(ct.c_int, 0) \n self.num_sorted = mp.RawValue(ct.c_int, 0) \n self.best_y = mp.RawValue(ct.c_double, math.inf) \n self.worst_y = mp.RawValue(ct.c_double, math.inf) \n self.best_x = mp.RawArray(ct.c_double, self.dim)\n self.statistic_num = statistic_num\n self.datafile = datafile\n \n if statistic_num > 0: # enable statistics \n self.statistic_num = 1000\n self.time = mp.RawArray(ct.c_double, self.statistic_num)\n self.val = mp.RawArray(ct.c_double, self.statistic_num)\n self.si = mp.RawValue(ct.c_int, 0)\n\n # persist store\n def save(self, name):\n with bz2.BZ2File(name + '.pbz2', 'w') as f: \n cPickle.dump(self.get_data(), f)\n\n def load(self, name):\n data = cPickle.load(bz2.BZ2File(name + '.pbz2', 'rb'))\n self.set_data(data)\n \n def get_data(self):\n data = []\n data.append(self.get_xs())\n data.append(self.get_ys())\n data.append(self.get_x_best())\n data.append(self.get_y_best())\n data.append(self.num_stored.value)\n return data\n \n def set_data(self, data):\n xs = data[0]\n ys = data[1]\n for i in range(len(ys)):\n self.replace(i, ys[i], xs[i])\n self.best_x[:] = data[2][:]\n self.best_y.value = data[3]\n self.num_stored.value = data[4]\n self.sort()\n \n # store improvement - time and value\n def add_statistics(self):\n if self.statistic_num > 0:\n si = self.si.value\n if si < self.statistic_num - 1:\n self.si.value = si + 1\n self.time[si] = dtime(self.t0)\n self.val[si] = self.best_y.value \n \n def get_improvements(self):\n return zip(self.time[:self.si.value], self.val[:self.si.value])\n \n # get num best values at evenly distributed times\n def get_statistics(self, num):\n ts = self.time[:self.si.value]\n vs = self.val[:self.si.value]\n mt = ts[-1]\n dt = 0.9999999 * mt / num\n stats = []\n ti = 0\n val = vs[0]\n for i in range(num):\n while ts[ti] < (i+1) * dt:\n ti += 1\n val = vs[ti]\n stats.append(val)\n return stats\n \n def eval_num(self, max_evals):\n return int(self.eval_fac.value * max_evals)\n \n def limits(self): \n \"\"\"guess, boundaries and initial step size for crossover operation.\"\"\"\n diff_fac = self.random.uniform(0.5, 1.0)\n lim_fac = self.random.uniform(2.0, 4.0) * diff_fac\n with self.add_mutex:\n i, j = self.crossover()\n if i < 0:\n return math.inf, None, None, None, None\n x0 = np.asarray(self.get_x(i))\n x1 = np.asarray(self.get_x(j))\n y0 = np.asarray(self.get_y(i))\n \n deltax = np.abs(x1 - x0)\n delta_bound = np.maximum(0.0001, lim_fac * deltax)\n lower = np.maximum(self.lower, x0 - delta_bound)\n upper = np.minimum(self.upper, x0 + delta_bound)\n sdev = np.maximum(0.001, np.minimum(0.5, diff_fac * deltax / self.delta)) \n return y0, x1, lower, upper, sdev\n \n def distance(self, xprev, x): \n \"\"\"distance between entries in store.\"\"\"\n return norm((x - xprev) / self.delta) / math.sqrt(self.dim)\n \n def replace(self, i, y, xs):\n \"\"\"replace entry in store.\"\"\"\n self.set_y(i, y)\n self.set_x(i, xs)\n \n def crossover(self): # Choose two good entries for recombination\n \"\"\"indices of store entries to be used for crossover operation.\"\"\"\n n = self.num_sorted.value\n if n < 2:\n return -1, -1\n lim = self.random.uniform(min(0.1*n, 1), 0.2*n)/n\n for _ in range(100):\n i1 = -1\n i2 = -1\n for j in range(n):\n if self.random.random() < lim:\n if i1 < 0:\n i1 = j\n else:\n i2 = j\n return i1, i2\n return -1, -1\n\n def sort(self): \n \"\"\"sorts all store entries, keep only the 90% best to make room for new ones;\n skip entries having similar x values than their neighbors to preserve diversity\"\"\"\n ns = self.num_stored.value\n if ns < 2:\n return\n\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n\n xprev = xprev2 = None\n for i in range(ns):\n y = ys[yi[i]]\n x = np.asarray(self.get_x(yi[i]))\n if (xprev is None or self.distance(xprev, x) > 0.15) and \\\n (xprev2 is None or self.distance(xprev2, x) > 0.15): \n sortRuns.append( (y, x) )\n xprev2 = xprev\n xprev = x\n\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n self.worst_y.value = self.get_y(numStored-1)\n return numStored \n\n def add_result(self, y, xs, lower, upper, evals, limit=math.inf):\n \"\"\"registers an optimization result at the store.\"\"\"\n with self.add_mutex:\n self.incr_count_evals(evals)\n if y < limit:\n if y < self.best_y.value:\n self.best_y.value = y\n self.best_x[:] = xs[:]\n self.add_statistics()\n self.dump()\n if not self.datafile is None:\n self.save(self.datafile)\n\n if self.num_stored.value >= self.capacity - 1:\n self.sort()\n ns = self.num_stored.value\n self.num_stored.value = ns + 1\n self.replace(ns, y, xs)\n \n def get_x(self, pid):\n return self.xs[pid*self.dim:(pid+1)*self.dim]\n\n def get_xs(self):\n return [self.get_x(i) for i in range(self.num_stored.value)]\n\n def get_x_best(self):\n return self.best_x[:]\n\n def get_y(self, pid):\n return self.ys[pid]\n\n def get_ys(self):\n return self.ys[:self.num_stored.value]\n\n def get_y_best(self):\n return self.best_y.value\n\n def get_count_evals(self):\n return self.count_evals.value\n \n def get_count_runs(self):\n return self.count_runs.value\n\n def set_x(self, pid, xs):\n self.xs[pid*self.dim:(pid+1)*self.dim] = xs[:]\n\n def set_y(self, pid, y):\n self.ys[pid] = y \n\n def get_runs_compare_incr(self, limit):\n with self.add_mutex:\n if self.count_runs.value < limit:\n self.count_runs.value += 1\n return True\n else:\n return False \n\n def incr_count_evals(self, evals):\n \"\"\"registers the number of evaluations of an optimization run; \n trigger sorting after check_interval calls. \"\"\"\n if self.count_runs.value % self.check_interval == self.check_interval-1:\n if self.eval_fac.value < self.max_eval_fac:\n self.eval_fac.value += self.eval_fac_incr\n #print(self.eval_fac.value)\n self.sort()\n self.count_evals.value += evals\n\n def dump(self):\n \"\"\"logs the current status of the store if logger defined.\"\"\"\n if self.logger is None:\n return\n Ys = self.get_ys()\n vals = []\n for i in range(min(20, len(Ys))):\n vals.append(round(Ys[i],2)) \n dt = dtime(self.t0) \n message = '{0} {1} {2} {3} {4:.6f} {5:.2f} {6} {7} {8!s} {9!s}'.format(\n dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value, \n self.best_y.value, self.worst_y.value, self.num_stored.value, int(self.eval_fac.value), \n vals, self.best_x[:])\n self.logger.info(message)\n \ndef _retry_loop(pid, rgs, fun, store, optimize, num_retries, value_limit, stop_fitness = -math.inf):\n \n #reinitialize logging config for windows - multi threading fix\n if 'win' in sys.platform and not store.logger is None:\n store.logger = logger()\n \n while store.get_runs_compare_incr(num_retries) and store.best_y.value > stop_fitness: \n if _crossover(fun, store, optimize, rgs[pid]):\n continue\n try:\n dim = len(store.lower)\n sol, y, evals = optimize(fun, Bounds(store.lower, store.upper), None, \n [random.uniform(0.05, 0.1)]*dim, rgs[pid], store)\n store.add_result(y, sol, store.lower, store.upper, evals, value_limit)\n except Exception as ex:\n continue\n# if pid == 0:\n# store.dump()\n \ndef _crossover(fun, store, optimize, rg):\n if random.random() < 0.5:\n return False\n y0, guess, lower, upper, sdev = store.limits()\n if guess is None:\n return False\n guess = fitting(guess, lower, upper) # take X from lower\n try: \n sol, y, evals = optimize(fun, Bounds(lower, upper), guess, sdev, rg, store)\n store.add_result(y, sol, lower, upper, evals, y0) # limit to y0 \n except:\n return False \n return True\n" ]
[ [ "numpy.linalg.norm", "numpy.asarray", "scipy.optimize.Bounds", "numpy.minimum", "numpy.random.MT19937", "numpy.abs", "numpy.random.SeedSequence", "numpy.maximum" ] ]
jamesholcombe/pandas
[ "247e8f3831c93dfeb83e3f410ea375d0537ea989" ]
[ "pandas/tests/window/test_rolling.py" ]
[ "from datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n is_platform_arm,\n is_platform_mac,\n)\nfrom pandas.errors import UnsupportedFunctionCall\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n MultiIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n period_range,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import BaseIndexer\nfrom pandas.core.window import Rolling\n\n\ndef test_doc_string():\n\n df = DataFrame({\"B\": [0, 1, 2, np.nan, 4]})\n df\n df.rolling(2).sum()\n df.rolling(2, min_periods=1).sum()\n\n\ndef test_constructor(frame_or_series):\n # GH 12669\n\n c = frame_or_series(range(5)).rolling\n\n # valid\n c(0)\n c(window=2)\n c(window=2, min_periods=1)\n c(window=2, min_periods=1, center=True)\n c(window=2, min_periods=1, center=False)\n\n # GH 13383\n\n msg = \"window must be an integer 0 or greater\"\n\n with pytest.raises(ValueError, match=msg):\n c(-1)\n\n\[email protected](\"w\", [2.0, \"foo\", np.array([2])])\ndef test_invalid_constructor(frame_or_series, w):\n # not valid\n\n c = frame_or_series(range(5)).rolling\n\n msg = (\n \"window must be an integer|\"\n \"passed window foo is not compatible with a datetimelike index\"\n )\n with pytest.raises(ValueError, match=msg):\n c(window=w)\n\n msg = \"min_periods must be an integer\"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=w)\n\n msg = \"center must be a boolean\"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=1, center=w)\n\n\[email protected](\"window\", [timedelta(days=3), Timedelta(days=3)])\ndef test_constructor_with_timedelta_window(window):\n # GH 15440\n n = 10\n df = DataFrame(\n {\"value\": np.arange(n)},\n index=date_range(\"2015-12-24\", periods=n, freq=\"D\"),\n )\n expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))\n\n result = df.rolling(window=window).sum()\n expected = DataFrame(\n {\"value\": expected_data},\n index=date_range(\"2015-12-24\", periods=n, freq=\"D\"),\n )\n tm.assert_frame_equal(result, expected)\n expected = df.rolling(\"3D\").sum()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"window\", [timedelta(days=3), Timedelta(days=3), \"3D\"])\ndef test_constructor_timedelta_window_and_minperiods(window, raw):\n # GH 15305\n n = 10\n df = DataFrame(\n {\"value\": np.arange(n)},\n index=date_range(\"2017-08-08\", periods=n, freq=\"D\"),\n )\n expected = DataFrame(\n {\"value\": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))},\n index=date_range(\"2017-08-08\", periods=n, freq=\"D\"),\n )\n result_roll_sum = df.rolling(window=window, min_periods=2).sum()\n result_roll_generic = df.rolling(window=window, min_periods=2).apply(sum, raw=raw)\n tm.assert_frame_equal(result_roll_sum, expected)\n tm.assert_frame_equal(result_roll_generic, expected)\n\n\[email protected](\"method\", [\"std\", \"mean\", \"sum\", \"max\", \"min\", \"var\"])\ndef test_numpy_compat(method):\n # see gh-12811\n r = Rolling(Series([2, 4, 6]), window=2)\n\n msg = \"numpy operations are not valid with window objects\"\n\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(r, method)(1, 2, 3)\n with pytest.raises(UnsupportedFunctionCall, match=msg):\n getattr(r, method)(dtype=np.float64)\n\n\ndef test_closed_fixed(closed, arithmetic_win_operators):\n # GH 34315\n func_name = arithmetic_win_operators\n df_fixed = DataFrame({\"A\": [0, 1, 2, 3, 4]})\n df_time = DataFrame({\"A\": [0, 1, 2, 3, 4]}, index=date_range(\"2020\", periods=5))\n\n result = getattr(\n df_fixed.rolling(2, closed=closed, min_periods=1),\n func_name,\n )()\n expected = getattr(\n df_time.rolling(\"2D\", closed=closed, min_periods=1),\n func_name,\n )().reset_index(drop=True)\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"closed, window_selections\",\n [\n (\n \"both\",\n [\n [True, True, False, False, False],\n [True, True, True, False, False],\n [False, True, True, True, False],\n [False, False, True, True, True],\n [False, False, False, True, True],\n ],\n ),\n (\n \"left\",\n [\n [True, False, False, False, False],\n [True, True, False, False, False],\n [False, True, True, False, False],\n [False, False, True, True, False],\n [False, False, False, True, True],\n ],\n ),\n (\n \"right\",\n [\n [True, True, False, False, False],\n [False, True, True, False, False],\n [False, False, True, True, False],\n [False, False, False, True, True],\n [False, False, False, False, True],\n ],\n ),\n (\n \"neither\",\n [\n [True, False, False, False, False],\n [False, True, False, False, False],\n [False, False, True, False, False],\n [False, False, False, True, False],\n [False, False, False, False, True],\n ],\n ),\n ],\n)\ndef test_datetimelike_centered_selections(\n closed, window_selections, arithmetic_win_operators\n):\n # GH 34315\n func_name = arithmetic_win_operators\n df_time = DataFrame(\n {\"A\": [0.0, 1.0, 2.0, 3.0, 4.0]}, index=date_range(\"2020\", periods=5)\n )\n\n expected = DataFrame(\n {\"A\": [getattr(df_time[\"A\"].iloc[s], func_name)() for s in window_selections]},\n index=date_range(\"2020\", periods=5),\n )\n\n if func_name == \"sem\":\n kwargs = {\"ddof\": 0}\n else:\n kwargs = {}\n\n result = getattr(\n df_time.rolling(\"2D\", closed=closed, min_periods=1, center=True),\n func_name,\n )(**kwargs)\n\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n\[email protected](\n \"window,closed,expected\",\n [\n (\"3s\", \"right\", [3.0, 3.0, 3.0]),\n (\"3s\", \"both\", [3.0, 3.0, 3.0]),\n (\"3s\", \"left\", [3.0, 3.0, 3.0]),\n (\"3s\", \"neither\", [3.0, 3.0, 3.0]),\n (\"2s\", \"right\", [3.0, 2.0, 2.0]),\n (\"2s\", \"both\", [3.0, 3.0, 3.0]),\n (\"2s\", \"left\", [1.0, 3.0, 3.0]),\n (\"2s\", \"neither\", [1.0, 2.0, 2.0]),\n ],\n)\ndef test_datetimelike_centered_offset_covers_all(\n window, closed, expected, frame_or_series\n):\n # GH 42753\n\n index = [\n Timestamp(\"20130101 09:00:01\"),\n Timestamp(\"20130101 09:00:02\"),\n Timestamp(\"20130101 09:00:02\"),\n ]\n df = frame_or_series([1, 1, 1], index=index)\n\n result = df.rolling(window, closed=closed, center=True).sum()\n expected = frame_or_series(expected, index=index)\n tm.assert_equal(result, expected)\n\n\[email protected](\n \"window,closed,expected\",\n [\n (\"2D\", \"right\", [4, 4, 4, 4, 4, 4, 2, 2]),\n (\"2D\", \"left\", [2, 2, 4, 4, 4, 4, 4, 4]),\n (\"2D\", \"both\", [4, 4, 6, 6, 6, 6, 4, 4]),\n (\"2D\", \"neither\", [2, 2, 2, 2, 2, 2, 2, 2]),\n ],\n)\ndef test_datetimelike_nonunique_index_centering(\n window, closed, expected, frame_or_series\n):\n index = DatetimeIndex(\n [\n \"2020-01-01\",\n \"2020-01-01\",\n \"2020-01-02\",\n \"2020-01-02\",\n \"2020-01-03\",\n \"2020-01-03\",\n \"2020-01-04\",\n \"2020-01-04\",\n ]\n )\n\n df = frame_or_series([1] * 8, index=index, dtype=float)\n expected = frame_or_series(expected, index=index, dtype=float)\n\n result = df.rolling(window, center=True, closed=closed).sum()\n\n tm.assert_equal(result, expected)\n\n\ndef test_even_number_window_alignment():\n # see discussion in GH 38780\n s = Series(range(3), index=date_range(start=\"2020-01-01\", freq=\"D\", periods=3))\n\n # behavior of index- and datetime-based windows differs here!\n # s.rolling(window=2, min_periods=1, center=True).mean()\n\n result = s.rolling(window=\"2D\", min_periods=1, center=True).mean()\n\n expected = Series([0.5, 1.5, 2], index=s.index)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_closed_fixed_binary_col(center):\n # GH 34315\n data = [0, 1, 1, 0, 0, 1, 0, 1]\n df = DataFrame(\n {\"binary_col\": data},\n index=date_range(start=\"2020-01-01\", freq=\"min\", periods=len(data)),\n )\n\n if center:\n expected_data = [2 / 3, 0.5, 0.4, 0.5, 0.428571, 0.5, 0.571429, 0.5]\n else:\n expected_data = [np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571]\n\n expected = DataFrame(\n expected_data,\n columns=[\"binary_col\"],\n index=date_range(start=\"2020-01-01\", freq=\"min\", periods=len(expected_data)),\n )\n\n rolling = df.rolling(window=len(df), closed=\"left\", min_periods=1, center=center)\n result = rolling.mean()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"closed\", [\"neither\", \"left\"])\ndef test_closed_empty(closed, arithmetic_win_operators):\n # GH 26005\n func_name = arithmetic_win_operators\n ser = Series(data=np.arange(5), index=date_range(\"2000\", periods=5, freq=\"2D\"))\n roll = ser.rolling(\"1D\", closed=closed)\n\n result = getattr(roll, func_name)()\n expected = Series([np.nan] * 5, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_closed_one_entry(func):\n # GH24718\n ser = Series(data=[2], index=date_range(\"2000\", periods=1))\n result = getattr(ser.rolling(\"10D\", closed=\"left\"), func)()\n tm.assert_series_equal(result, Series([np.nan], index=ser.index))\n\n\[email protected](\"func\", [\"min\", \"max\"])\ndef test_closed_one_entry_groupby(func):\n # GH24718\n ser = DataFrame(\n data={\"A\": [1, 1, 2], \"B\": [3, 2, 1]},\n index=date_range(\"2000\", periods=3),\n )\n result = getattr(\n ser.groupby(\"A\", sort=False)[\"B\"].rolling(\"10D\", closed=\"left\"), func\n )()\n exp_idx = MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], names=(\"A\", None))\n expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name=\"B\")\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"input_dtype\", [\"int\", \"float\"])\[email protected](\n \"func,closed,expected\",\n [\n (\"min\", \"right\", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"min\", \"both\", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"min\", \"neither\", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"min\", \"left\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"max\", \"right\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n (\"max\", \"both\", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n (\"max\", \"neither\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n (\"max\", \"left\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n ],\n)\ndef test_closed_min_max_datetime(input_dtype, func, closed, expected):\n # see gh-21704\n ser = Series(\n data=np.arange(10).astype(input_dtype),\n index=date_range(\"2000\", periods=10),\n )\n\n result = getattr(ser.rolling(\"3D\", closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_closed_uneven():\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n\n # uneven\n ser = ser.drop(index=ser.index[[1, 5]])\n result = ser.rolling(\"3D\", closed=\"left\").min()\n expected = Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"func,closed,expected\",\n [\n (\"min\", \"right\", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n (\"min\", \"both\", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n (\"min\", \"neither\", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n (\"min\", \"left\", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n (\"max\", \"right\", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),\n (\"max\", \"both\", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),\n (\"max\", \"neither\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),\n (\"max\", \"left\", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]),\n ],\n)\ndef test_closed_min_max_minp(func, closed, expected):\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n ser[ser.index[-3:]] = np.nan\n result = getattr(ser.rolling(\"3D\", min_periods=2, closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"closed,expected\",\n [\n (\"right\", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]),\n (\"both\", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n (\"neither\", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n (\"left\", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]),\n ],\n)\ndef test_closed_median_quantile(closed, expected):\n # GH 26005\n ser = Series(data=np.arange(10), index=date_range(\"2000\", periods=10))\n roll = ser.rolling(\"3D\", closed=closed)\n expected = Series(expected, index=ser.index)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.5)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"roller\", [\"1s\", 1])\ndef tests_empty_df_rolling(roller):\n # GH 15819 Verifies that datetime and integer rolling windows can be\n # applied to empty DataFrames\n expected = DataFrame()\n result = DataFrame().rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n # Verifies that datetime and integer rolling windows can be applied to\n # empty DataFrames with datetime index\n expected = DataFrame(index=DatetimeIndex([]))\n result = DataFrame(index=DatetimeIndex([])).rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_window_median_quantile():\n # GH 26005\n expected = Series([np.nan, np.nan, np.nan])\n roll = Series(np.arange(3)).rolling(0)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero():\n # https://github.com/pandas-dev/pandas/pull/18921\n # minp=0\n x = Series([np.nan])\n result = x.rolling(1, min_periods=0).sum()\n expected = Series([0.0])\n tm.assert_series_equal(result, expected)\n\n # minp=1\n result = x.rolling(1, min_periods=1).sum()\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero_variable():\n # https://github.com/pandas-dev/pandas/pull/18921\n x = Series(\n [np.nan] * 4,\n index=DatetimeIndex([\"2017-01-01\", \"2017-01-04\", \"2017-01-06\", \"2017-01-07\"]),\n )\n result = x.rolling(Timedelta(\"2d\"), min_periods=0).sum()\n expected = Series(0.0, index=x.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_multi_index_names():\n\n # GH 16789, 16825\n cols = MultiIndex.from_product([[\"A\", \"B\"], [\"C\", \"D\", \"E\"]], names=[\"1\", \"2\"])\n df = DataFrame(np.ones((10, 6)), columns=cols)\n result = df.rolling(3).cov()\n\n tm.assert_index_equal(result.columns, df.columns)\n assert result.index.names == [None, \"1\", \"2\"]\n\n\ndef test_rolling_axis_sum(axis_frame):\n # see gh-23372.\n df = DataFrame(np.ones((10, 20)))\n axis = df._get_axis_number(axis_frame)\n\n if axis == 0:\n expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)})\n else:\n # axis == 1\n expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10)\n\n result = df.rolling(3, axis=axis_frame).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_count(axis_frame):\n # see gh-26055\n df = DataFrame({\"x\": range(3), \"y\": range(3)})\n\n axis = df._get_axis_number(axis_frame)\n\n if axis in [0, \"index\"]:\n expected = DataFrame({\"x\": [1.0, 2.0, 2.0], \"y\": [1.0, 2.0, 2.0]})\n else:\n expected = DataFrame({\"x\": [1.0, 1.0, 1.0], \"y\": [2.0, 2.0, 2.0]})\n\n result = df.rolling(2, axis=axis_frame, min_periods=0).count()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_readonly_array():\n # GH-27766\n arr = np.array([1, 3, np.nan, 3, 5])\n arr.setflags(write=False)\n result = Series(arr).rolling(2).mean()\n expected = Series([np.nan, 2, np.nan, np.nan, 4])\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_datetime(axis_frame, tz_naive_fixture):\n # GH-28192\n tz = tz_naive_fixture\n df = DataFrame(\n {i: [1] * 2 for i in date_range(\"2019-8-01\", \"2019-08-03\", freq=\"D\", tz=tz)}\n )\n if axis_frame in [0, \"index\"]:\n result = df.T.rolling(\"2D\", axis=axis_frame).sum().T\n else:\n result = df.rolling(\"2D\", axis=axis_frame).sum()\n expected = DataFrame(\n {\n **{\n i: [1.0] * 2\n for i in date_range(\"2019-8-01\", periods=1, freq=\"D\", tz=tz)\n },\n **{\n i: [2.0] * 2\n for i in date_range(\"2019-8-02\", \"2019-8-03\", freq=\"D\", tz=tz)\n },\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"center, expected_data\",\n [\n (\n True,\n (\n [88.0] * 7\n + [97.0] * 9\n + [98.0]\n + [99.0] * 21\n + [95.0] * 16\n + [93.0] * 5\n + [89.0] * 5\n + [96.0] * 21\n + [94.0] * 14\n + [90.0] * 13\n + [88.0] * 2\n + [90.0] * 9\n + [96.0] * 21\n + [95.0] * 6\n + [91.0]\n + [87.0] * 6\n + [92.0] * 21\n + [83.0] * 2\n + [86.0] * 10\n + [87.0] * 5\n + [98.0] * 21\n + [97.0] * 14\n + [93.0] * 7\n + [87.0] * 4\n + [86.0] * 4\n + [95.0] * 21\n + [85.0] * 14\n + [83.0] * 2\n + [76.0] * 5\n + [81.0] * 2\n + [98.0] * 21\n + [95.0] * 14\n + [91.0] * 7\n + [86.0]\n + [93.0] * 3\n + [95.0] * 29\n + [77.0] * 2\n ),\n ),\n (\n False,\n (\n [np.nan] * 2\n + [88.0] * 16\n + [97.0] * 9\n + [98.0]\n + [99.0] * 21\n + [95.0] * 16\n + [93.0] * 5\n + [89.0] * 5\n + [96.0] * 21\n + [94.0] * 14\n + [90.0] * 13\n + [88.0] * 2\n + [90.0] * 9\n + [96.0] * 21\n + [95.0] * 6\n + [91.0]\n + [87.0] * 6\n + [92.0] * 21\n + [83.0] * 2\n + [86.0] * 10\n + [87.0] * 5\n + [98.0] * 21\n + [97.0] * 14\n + [93.0] * 7\n + [87.0] * 4\n + [86.0] * 4\n + [95.0] * 21\n + [85.0] * 14\n + [83.0] * 2\n + [76.0] * 5\n + [81.0] * 2\n + [98.0] * 21\n + [95.0] * 14\n + [91.0] * 7\n + [86.0]\n + [93.0] * 3\n + [95.0] * 20\n ),\n ),\n ],\n)\ndef test_rolling_window_as_string(center, expected_data):\n # see gh-22590\n date_today = datetime.now()\n days = date_range(date_today, date_today + timedelta(365), freq=\"D\")\n\n npr = np.random.RandomState(seed=421)\n\n data = npr.randint(1, high=100, size=len(days))\n df = DataFrame({\"DateCol\": days, \"metric\": data})\n\n df.set_index(\"DateCol\", inplace=True)\n result = df.rolling(window=\"21D\", min_periods=2, closed=\"left\", center=center)[\n \"metric\"\n ].agg(\"max\")\n\n index = days.rename(\"DateCol\")\n index = index._with_freq(None)\n expected = Series(expected_data, index=index, name=\"metric\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_min_periods1():\n # GH#6795\n df = DataFrame([0, 1, 2, 1, 0], columns=[\"a\"])\n result = df[\"a\"].rolling(3, center=True, min_periods=1).max()\n expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name=\"a\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_count_with_min_periods(frame_or_series):\n # GH 26996\n result = frame_or_series(range(5)).rolling(3, min_periods=3).count()\n expected = frame_or_series([np.nan, np.nan, 3.0, 3.0, 3.0])\n tm.assert_equal(result, expected)\n\n\ndef test_rolling_count_default_min_periods_with_null_values(frame_or_series):\n # GH 26996\n values = [1, 2, 3, np.nan, 4, 5, 6]\n expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]\n\n # GH 31302\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = frame_or_series(values).rolling(3).count()\n expected = frame_or_series(expected_counts)\n tm.assert_equal(result, expected)\n\n\[email protected](\n \"df,expected,window,min_periods\",\n [\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n None,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n 2,\n 1,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n 2,\n 2,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n 1,\n 1,\n ),\n (\n DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}),\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n 1,\n 0,\n ),\n (DataFrame({\"A\": [1], \"B\": [4]}), [], 2, None),\n (DataFrame({\"A\": [1], \"B\": [4]}), [], 2, 1),\n (DataFrame(), [({}, [])], 2, None),\n (\n DataFrame({\"A\": [1, np.nan, 3], \"B\": [np.nan, 5, 6]}),\n [\n ({\"A\": [1.0], \"B\": [np.nan]}, [0]),\n ({\"A\": [1, np.nan], \"B\": [np.nan, 5]}, [0, 1]),\n ({\"A\": [1, np.nan, 3], \"B\": [np.nan, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n 2,\n ),\n ],\n)\ndef test_iter_rolling_dataframe(df, expected, window, min_periods):\n # GH 11704\n expected = [DataFrame(values, index=index) for (values, index) in expected]\n\n for (expected, actual) in zip(\n expected, df.rolling(window, min_periods=min_periods)\n ):\n tm.assert_frame_equal(actual, expected)\n\n\[email protected](\n \"expected,window\",\n [\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [2, 3], \"B\": [5, 6]}, [1, 2]),\n ],\n \"2D\",\n ),\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [1, 2], \"B\": [4, 5]}, [0, 1]),\n ({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}, [0, 1, 2]),\n ],\n \"3D\",\n ),\n (\n [\n ({\"A\": [1], \"B\": [4]}, [0]),\n ({\"A\": [2], \"B\": [5]}, [1]),\n ({\"A\": [3], \"B\": [6]}, [2]),\n ],\n \"1D\",\n ),\n ],\n)\ndef test_iter_rolling_on_dataframe(expected, window):\n # GH 11704, 40373\n df = DataFrame(\n {\n \"A\": [1, 2, 3, 4, 5],\n \"B\": [4, 5, 6, 7, 8],\n \"C\": date_range(start=\"2016-01-01\", periods=5, freq=\"D\"),\n }\n )\n\n expected = [\n DataFrame(values, index=df.loc[index, \"C\"]) for (values, index) in expected\n ]\n for (expected, actual) in zip(expected, df.rolling(window, on=\"C\")):\n tm.assert_frame_equal(actual, expected)\n\n\ndef test_iter_rolling_on_dataframe_unordered():\n # GH 43386\n df = DataFrame({\"a\": [\"x\", \"y\", \"x\"], \"b\": [0, 1, 2]})\n results = list(df.groupby(\"a\").rolling(2))\n expecteds = [df.iloc[idx, [1]] for idx in [[0], [0, 2], [1]]]\n for result, expected in zip(results, expecteds):\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"ser,expected,window, min_periods\",\n [\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n None,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n 1,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])],\n 2,\n 1,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])],\n 2,\n 2,\n ),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 0),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 1),\n (Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2, 0),\n (Series([], dtype=\"int64\"), [], 2, 1),\n ],\n)\ndef test_iter_rolling_series(ser, expected, window, min_periods):\n # GH 11704\n expected = [Series(values, index=index) for (values, index) in expected]\n\n for (expected, actual) in zip(\n expected, ser.rolling(window, min_periods=min_periods)\n ):\n tm.assert_series_equal(actual, expected)\n\n\[email protected](\n \"expected,expected_index,window\",\n [\n (\n [[0], [1], [2], [3], [4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-02\", periods=1, freq=\"D\"),\n date_range(\"2020-01-03\", periods=1, freq=\"D\"),\n date_range(\"2020-01-04\", periods=1, freq=\"D\"),\n date_range(\"2020-01-05\", periods=1, freq=\"D\"),\n ],\n \"1D\",\n ),\n (\n [[0], [0, 1], [1, 2], [2, 3], [3, 4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-01\", periods=2, freq=\"D\"),\n date_range(\"2020-01-02\", periods=2, freq=\"D\"),\n date_range(\"2020-01-03\", periods=2, freq=\"D\"),\n date_range(\"2020-01-04\", periods=2, freq=\"D\"),\n ],\n \"2D\",\n ),\n (\n [[0], [0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [\n date_range(\"2020-01-01\", periods=1, freq=\"D\"),\n date_range(\"2020-01-01\", periods=2, freq=\"D\"),\n date_range(\"2020-01-01\", periods=3, freq=\"D\"),\n date_range(\"2020-01-02\", periods=3, freq=\"D\"),\n date_range(\"2020-01-03\", periods=3, freq=\"D\"),\n ],\n \"3D\",\n ),\n ],\n)\ndef test_iter_rolling_datetime(expected, expected_index, window):\n # GH 11704\n ser = Series(range(5), index=date_range(start=\"2020-01-01\", periods=5, freq=\"D\"))\n\n expected = [\n Series(values, index=idx) for (values, idx) in zip(expected, expected_index)\n ]\n\n for (expected, actual) in zip(expected, ser.rolling(window)):\n tm.assert_series_equal(actual, expected)\n\n\[email protected](\n \"grouping,_index\",\n [\n (\n {\"level\": 0},\n MultiIndex.from_tuples(\n [(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None]\n ),\n ),\n (\n {\"by\": \"X\"},\n MultiIndex.from_tuples(\n [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=[\"X\", None]\n ),\n ),\n ],\n)\ndef test_rolling_positional_argument(grouping, _index, raw):\n # GH 34605\n\n def scaled_sum(*args):\n if len(args) < 2:\n raise ValueError(\"The function needs two arguments\")\n array, scale = args\n return array.sum() / scale\n\n df = DataFrame(data={\"X\": range(5)}, index=[0, 0, 1, 1, 1])\n\n expected = DataFrame(data={\"X\": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index)\n # GH 40341\n if \"by\" in grouping:\n expected = expected.drop(columns=\"X\", errors=\"ignore\")\n result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"add\", [0.0, 2.0])\ndef test_rolling_numerical_accuracy_kahan_mean(add):\n # GH: 36031 implementing kahan summation\n df = DataFrame(\n {\"A\": [3002399751580331.0 + add, -0.0, -0.0]},\n index=[\n Timestamp(\"19700101 09:00:00\"),\n Timestamp(\"19700101 09:00:03\"),\n Timestamp(\"19700101 09:00:06\"),\n ],\n )\n result = (\n df.resample(\"1s\").ffill().rolling(\"3s\", closed=\"left\", min_periods=3).mean()\n )\n dates = date_range(\"19700101 09:00:00\", periods=7, freq=\"S\")\n expected = DataFrame(\n {\n \"A\": [\n np.nan,\n np.nan,\n np.nan,\n 3002399751580330.5,\n 2001599834386887.25,\n 1000799917193443.625,\n 0.0,\n ]\n },\n index=dates,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_kahan_sum():\n # GH: 13254\n df = DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=[\"x\"])\n result = df[\"x\"].rolling(3).sum()\n expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name=\"x\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_jump():\n # GH: 32761\n index = date_range(start=\"2020-01-01\", end=\"2020-01-02\", freq=\"60s\").append(\n DatetimeIndex([\"2020-01-03\"])\n )\n data = np.random.rand(len(index))\n\n df = DataFrame({\"data\": data}, index=index)\n result = df.rolling(\"60s\").mean()\n tm.assert_frame_equal(result, df[[\"data\"]])\n\n\ndef test_rolling_numerical_accuracy_small_values():\n # GH: 10319\n s = Series(\n data=[0.00012456, 0.0003, -0.0, -0.0],\n index=date_range(\"1999-02-03\", \"1999-02-06\"),\n )\n result = s.rolling(1).mean()\n tm.assert_series_equal(result, s)\n\n\ndef test_rolling_numerical_too_large_numbers():\n # GH: 11645\n dates = date_range(\"2015-01-01\", periods=10, freq=\"D\")\n ds = Series(data=range(10), index=dates, dtype=np.float64)\n ds[2] = -9e33\n result = ds.rolling(5).mean()\n expected = Series(\n [\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n -1.8e33,\n -1.8e33,\n -1.8e33,\n 5.0,\n 6.0,\n 7.0,\n ],\n index=dates,\n )\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"func\", \"value\"),\n [(\"sum\", 2.0), (\"max\", 1.0), (\"min\", 1.0), (\"mean\", 1.0), (\"median\", 1.0)],\n)\ndef test_rolling_mixed_dtypes_axis_1(func, value):\n # GH: 20649\n df = DataFrame(1, index=[1, 2], columns=[\"a\", \"b\", \"c\"])\n df[\"c\"] = 1.0\n result = getattr(df.rolling(window=2, min_periods=1, axis=1), func)()\n expected = DataFrame(\n {\"a\": [1.0, 1.0], \"b\": [value, value], \"c\": [value, value]},\n index=[1, 2],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_one_with_nan():\n # GH: 35596\n df = DataFrame(\n [\n [0, 1, 2, 4, np.nan, np.nan, np.nan],\n [0, 1, 2, np.nan, np.nan, np.nan, np.nan],\n [0, 2, 2, np.nan, 2, np.nan, 1],\n ]\n )\n result = df.rolling(window=7, min_periods=1, axis=\"columns\").sum()\n expected = DataFrame(\n [\n [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0],\n [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0],\n [0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0],\n ]\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"value\",\n [\"test\", to_datetime(\"2019-12-31\"), to_timedelta(\"1 days 06:05:01.00003\")],\n)\ndef test_rolling_axis_1_non_numeric_dtypes(value):\n # GH: 20649\n df = DataFrame({\"a\": [1, 2]})\n df[\"b\"] = value\n result = df.rolling(window=2, min_periods=1, axis=1).sum()\n expected = DataFrame({\"a\": [1.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_on_df_transposed():\n # GH: 32724\n df = DataFrame({\"A\": [1, None], \"B\": [4, 5], \"C\": [7, 8]})\n expected = DataFrame({\"A\": [1.0, np.nan], \"B\": [5.0, 5.0], \"C\": [11.0, 13.0]})\n result = df.rolling(min_periods=1, window=2, axis=1).sum()\n tm.assert_frame_equal(result, expected)\n\n result = df.T.rolling(min_periods=1, window=2).sum().T\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n (\"index\", \"window\"),\n [\n (\n period_range(start=\"2020-01-01 08:00\", end=\"2020-01-01 08:08\", freq=\"T\"),\n \"2T\",\n ),\n (\n period_range(start=\"2020-01-01 08:00\", end=\"2020-01-01 12:00\", freq=\"30T\"),\n \"1h\",\n ),\n ],\n)\[email protected](\n (\"func\", \"values\"),\n [\n (\"min\", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6]),\n (\"max\", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7]),\n (\"sum\", [np.nan, 0, 1, 3, 5, 7, 9, 11, 13]),\n ],\n)\ndef test_rolling_period_index(index, window, func, values):\n # GH: 34225\n ds = Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)\n result = getattr(ds.rolling(window, closed=\"left\"), func)()\n expected = Series(values, index=index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_sem(frame_or_series):\n # GH: 26476\n obj = frame_or_series([0, 1, 2])\n result = obj.rolling(2, min_periods=1).sem()\n if isinstance(result, DataFrame):\n result = Series(result[0].values)\n expected = Series([np.nan] + [0.7071067811865476] * 2)\n tm.assert_series_equal(result, expected)\n\n\[email protected](is_platform_arm() and not is_platform_mac(), reason=\"GH 38921\")\[email protected](\n (\"func\", \"third_value\", \"values\"),\n [\n (\"var\", 1, [5e33, 0, 0.5, 0.5, 2, 0]),\n (\"std\", 1, [7.071068e16, 0, 0.7071068, 0.7071068, 1.414214, 0]),\n (\"var\", 2, [5e33, 0.5, 0, 0.5, 2, 0]),\n (\"std\", 2, [7.071068e16, 0.7071068, 0, 0.7071068, 1.414214, 0]),\n ],\n)\ndef test_rolling_var_numerical_issues(func, third_value, values):\n # GH: 37051\n ds = Series([99999999999999999, 1, third_value, 2, 3, 1, 1])\n result = getattr(ds.rolling(2), func)()\n expected = Series([np.nan] + values)\n tm.assert_series_equal(result, expected)\n\n\ndef test_timeoffset_as_window_parameter_for_corr():\n # GH: 28266\n exp = DataFrame(\n {\n \"B\": [\n np.nan,\n np.nan,\n 0.9999999999999998,\n -1.0,\n 1.0,\n -0.3273268353539892,\n 0.9999999999999998,\n 1.0,\n 0.9999999999999998,\n 1.0,\n ],\n \"A\": [\n np.nan,\n np.nan,\n -1.0,\n 1.0000000000000002,\n -0.3273268353539892,\n 0.9999999999999966,\n 1.0,\n 1.0000000000000002,\n 1.0,\n 1.0000000000000002,\n ],\n },\n index=MultiIndex.from_tuples(\n [\n (Timestamp(\"20130101 09:00:00\"), \"B\"),\n (Timestamp(\"20130101 09:00:00\"), \"A\"),\n (Timestamp(\"20130102 09:00:02\"), \"B\"),\n (Timestamp(\"20130102 09:00:02\"), \"A\"),\n (Timestamp(\"20130103 09:00:03\"), \"B\"),\n (Timestamp(\"20130103 09:00:03\"), \"A\"),\n (Timestamp(\"20130105 09:00:05\"), \"B\"),\n (Timestamp(\"20130105 09:00:05\"), \"A\"),\n (Timestamp(\"20130106 09:00:06\"), \"B\"),\n (Timestamp(\"20130106 09:00:06\"), \"A\"),\n ]\n ),\n )\n\n df = DataFrame(\n {\"B\": [0, 1, 2, 4, 3], \"A\": [7, 4, 6, 9, 3]},\n index=[\n Timestamp(\"20130101 09:00:00\"),\n Timestamp(\"20130102 09:00:02\"),\n Timestamp(\"20130103 09:00:03\"),\n Timestamp(\"20130105 09:00:05\"),\n Timestamp(\"20130106 09:00:06\"),\n ],\n )\n\n res = df.rolling(window=\"3d\").corr()\n\n tm.assert_frame_equal(exp, res)\n\n\[email protected](\"method\", [\"var\", \"sum\", \"mean\", \"skew\", \"kurt\", \"min\", \"max\"])\ndef test_rolling_decreasing_indices(method):\n \"\"\"\n Make sure that decreasing indices give the same results as increasing indices.\n\n GH 36933\n \"\"\"\n df = DataFrame({\"values\": np.arange(-15, 10) ** 2})\n df_reverse = DataFrame({\"values\": df[\"values\"][::-1]}, index=df.index[::-1])\n\n increasing = getattr(df.rolling(window=5), method)()\n decreasing = getattr(df_reverse.rolling(window=5), method)()\n\n assert np.abs(decreasing.values[::-1][:-4] - increasing.values[4:]).max() < 1e-12\n\n\[email protected](\n \"window,closed,expected\",\n [\n (\"2s\", \"right\", [1.0, 3.0, 5.0, 3.0]),\n (\"2s\", \"left\", [0.0, 1.0, 3.0, 5.0]),\n (\"2s\", \"both\", [1.0, 3.0, 6.0, 5.0]),\n (\"2s\", \"neither\", [0.0, 1.0, 2.0, 3.0]),\n (\"3s\", \"right\", [1.0, 3.0, 6.0, 5.0]),\n (\"3s\", \"left\", [1.0, 3.0, 6.0, 5.0]),\n (\"3s\", \"both\", [1.0, 3.0, 6.0, 5.0]),\n (\"3s\", \"neither\", [1.0, 3.0, 6.0, 5.0]),\n ],\n)\ndef test_rolling_decreasing_indices_centered(window, closed, expected, frame_or_series):\n \"\"\"\n Ensure that a symmetrical inverted index return same result as non-inverted.\n \"\"\"\n # GH 43927\n\n index = date_range(\"2020\", periods=4, freq=\"1s\")\n df_inc = frame_or_series(range(4), index=index)\n df_dec = frame_or_series(range(4), index=index[::-1])\n\n expected_inc = frame_or_series(expected, index=index)\n expected_dec = frame_or_series(expected, index=index[::-1])\n\n result_inc = df_inc.rolling(window, closed=closed, center=True).sum()\n result_dec = df_dec.rolling(window, closed=closed, center=True).sum()\n\n tm.assert_equal(result_inc, expected_inc)\n tm.assert_equal(result_dec, expected_dec)\n\n\[email protected](\n \"window,expected\",\n [\n (\"1ns\", [1.0, 1.0, 1.0, 1.0]),\n (\"3ns\", [2.0, 3.0, 3.0, 2.0]),\n ],\n)\ndef test_rolling_center_nanosecond_resolution(\n window, closed, expected, frame_or_series\n):\n index = date_range(\"2020\", periods=4, freq=\"1ns\")\n df = frame_or_series([1, 1, 1, 1], index=index, dtype=float)\n expected = frame_or_series(expected, index=index, dtype=float)\n result = df.rolling(window, closed=closed, center=True).sum()\n tm.assert_equal(result, expected)\n\n\[email protected](\n \"method,expected\",\n [\n (\n \"var\",\n [\n float(\"nan\"),\n 43.0,\n float(\"nan\"),\n 136.333333,\n 43.5,\n 94.966667,\n 182.0,\n 318.0,\n ],\n ),\n (\n \"mean\",\n [float(\"nan\"), 7.5, float(\"nan\"), 21.5, 6.0, 9.166667, 13.0, 17.5],\n ),\n (\n \"sum\",\n [float(\"nan\"), 30.0, float(\"nan\"), 86.0, 30.0, 55.0, 91.0, 140.0],\n ),\n (\n \"skew\",\n [\n float(\"nan\"),\n 0.709296,\n float(\"nan\"),\n 0.407073,\n 0.984656,\n 0.919184,\n 0.874674,\n 0.842418,\n ],\n ),\n (\n \"kurt\",\n [\n float(\"nan\"),\n -0.5916711736073559,\n float(\"nan\"),\n -1.0028993131317954,\n -0.06103844629409494,\n -0.254143227116194,\n -0.37362637362637585,\n -0.45439658241367054,\n ],\n ),\n ],\n)\ndef test_rolling_non_monotonic(method, expected):\n \"\"\"\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n \"\"\"\n # Based on an example found in computation.rst\n use_expanding = [True, False, True, False, True, True, True, True]\n df = DataFrame({\"values\": np.arange(len(use_expanding)) ** 2})\n\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed):\n start = np.empty(num_values, dtype=np.int64)\n end = np.empty(num_values, dtype=np.int64)\n for i in range(num_values):\n if self.use_expanding[i]:\n start[i] = 0\n end[i] = i + 1\n else:\n start[i] = i\n end[i] = i + self.window_size\n return start, end\n\n indexer = CustomIndexer(window_size=4, use_expanding=use_expanding)\n\n result = getattr(df.rolling(indexer), method)()\n expected = DataFrame({\"values\": expected})\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n (\"index\", \"window\"),\n [\n ([0, 1, 2, 3, 4], 2),\n (date_range(\"2001-01-01\", freq=\"D\", periods=5), \"2D\"),\n ],\n)\ndef test_rolling_corr_timedelta_index(index, window):\n # GH: 31286\n x = Series([1, 2, 3, 4, 5], index=index)\n y = x.copy()\n x[0:2] = 0.0\n result = x.rolling(window).corr(y)\n expected = Series([np.nan, np.nan, 1, 1, 1], index=index)\n tm.assert_almost_equal(result, expected)\n\n\ndef test_groupby_rolling_nan_included():\n # GH 35542\n data = {\"group\": [\"g1\", np.nan, \"g1\", \"g2\", np.nan], \"B\": [0, 1, 2, 3, 4]}\n df = DataFrame(data)\n result = df.groupby(\"group\", dropna=False).rolling(1, min_periods=1).mean()\n expected = DataFrame(\n {\"B\": [0.0, 2.0, 3.0, 1.0, 4.0]},\n # GH-38057 from_tuples puts the NaNs in the codes, result expects them\n # to be in the levels, at the moment\n # index=MultiIndex.from_tuples(\n # [(\"g1\", 0), (\"g1\", 2), (\"g2\", 3), (np.nan, 1), (np.nan, 4)],\n # names=[\"group\", None],\n # ),\n index=MultiIndex(\n [[\"g1\", \"g2\", np.nan], [0, 1, 2, 3, 4]],\n [[0, 0, 1, 2, 2], [0, 2, 3, 1, 4]],\n names=[\"group\", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_groupby_rolling_non_monotonic():\n # GH 43909\n shuffled = [3, 0, 1, 2]\n sec = 1_000_000_000\n df = DataFrame(\n [{\"t\": Timestamp(2 * x * sec), \"x\": x + 1, \"c\": 42} for x in shuffled]\n )\n with pytest.raises(ValueError, match=r\".* must be monotonic\"):\n df.groupby(\"c\").rolling(on=\"t\", window=\"3s\")\n\n\[email protected](\"method\", [\"skew\", \"kurt\"])\ndef test_rolling_skew_kurt_numerical_stability(method):\n # GH#6929\n ser = Series(np.random.rand(10))\n ser_copy = ser.copy()\n expected = getattr(ser.rolling(3), method)()\n tm.assert_series_equal(ser, ser_copy)\n ser = ser + 50000\n result = getattr(ser.rolling(3), method)()\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n (\"method\", \"values\"),\n [\n (\"skew\", [2.0, 0.854563, 0.0, 1.999984]),\n (\"kurt\", [4.0, -1.289256, -1.2, 3.999946]),\n ],\n)\ndef test_rolling_skew_kurt_large_value_range(method, values):\n # GH: 37557\n s = Series([3000000, 1, 1, 2, 3, 4, 999])\n result = getattr(s.rolling(4), method)()\n expected = Series([np.nan] * 3 + values)\n tm.assert_series_equal(result, expected)\n\n\ndef test_invalid_method():\n with pytest.raises(ValueError, match=\"method must be 'table' or 'single\"):\n Series(range(1)).rolling(1, method=\"foo\")\n\n\[email protected](\"window\", [1, \"1d\"])\ndef test_rolling_descending_date_order_with_offset(window, frame_or_series):\n # GH#40002\n idx = date_range(start=\"2020-01-01\", end=\"2020-01-03\", freq=\"1d\")\n obj = frame_or_series(range(1, 4), index=idx)\n result = obj.rolling(\"1d\", closed=\"left\").sum()\n expected = frame_or_series([np.nan, 1, 2], index=idx)\n tm.assert_equal(result, expected)\n\n result = obj.iloc[::-1].rolling(\"1d\", closed=\"left\").sum()\n idx = date_range(start=\"2020-01-03\", end=\"2020-01-01\", freq=\"-1d\")\n expected = frame_or_series([np.nan, 3, 2], index=idx)\n tm.assert_equal(result, expected)\n\n\ndef test_rolling_var_floating_artifact_precision():\n # GH 37051\n s = Series([7, 5, 5, 5])\n result = s.rolling(3).var()\n expected = Series([np.nan, np.nan, 4 / 3, 0])\n tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15)\n\n\ndef test_rolling_std_small_values():\n # GH 37051\n s = Series(\n [\n 0.00000054,\n 0.00000053,\n 0.00000054,\n ]\n )\n result = s.rolling(2).std()\n expected = Series([np.nan, 7.071068e-9, 7.071068e-9])\n tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15)\n\n\[email protected](\n \"start, exp_values\",\n [\n (1, [0.03, 0.0155, 0.0155, 0.011, 0.01025]),\n (2, [0.001, 0.001, 0.0015, 0.00366666]),\n ],\n)\ndef test_rolling_mean_all_nan_window_floating_artifacts(start, exp_values):\n # GH#41053\n df = DataFrame(\n [\n 0.03,\n 0.03,\n 0.001,\n np.NaN,\n 0.002,\n 0.008,\n np.NaN,\n np.NaN,\n np.NaN,\n np.NaN,\n np.NaN,\n np.NaN,\n 0.005,\n 0.2,\n ]\n )\n\n values = exp_values + [\n 0.00366666,\n 0.005,\n 0.005,\n 0.008,\n np.NaN,\n np.NaN,\n 0.005,\n 0.102500,\n ]\n expected = DataFrame(\n values,\n index=list(range(start, len(values) + start)),\n )\n result = df.iloc[start:].rolling(5, min_periods=0).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_sum_all_nan_window_floating_artifacts():\n # GH#41053\n df = DataFrame([0.002, 0.008, 0.005, np.NaN, np.NaN, np.NaN])\n result = df.rolling(3, min_periods=0).sum()\n expected = DataFrame([0.002, 0.010, 0.015, 0.013, 0.005, 0.0])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_zero_window():\n # GH 22719\n s = Series(range(1))\n result = s.rolling(0).min()\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_float_dtype(float_numpy_dtype):\n # GH#42452\n df = DataFrame({\"A\": range(5), \"B\": range(10, 15)}, dtype=float_numpy_dtype)\n expected = DataFrame(\n {\"A\": [np.nan] * 5, \"B\": range(10, 20, 2)},\n dtype=float_numpy_dtype,\n )\n result = df.rolling(2, axis=1).sum()\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n\ndef test_rolling_numeric_dtypes():\n # GH#41779\n df = DataFrame(np.arange(40).reshape(4, 10), columns=list(\"abcdefghij\")).astype(\n {\n \"a\": \"float16\",\n \"b\": \"float32\",\n \"c\": \"float64\",\n \"d\": \"int8\",\n \"e\": \"int16\",\n \"f\": \"int32\",\n \"g\": \"uint8\",\n \"h\": \"uint16\",\n \"i\": \"uint32\",\n \"j\": \"uint64\",\n }\n )\n result = df.rolling(window=2, min_periods=1, axis=1).min()\n expected = DataFrame(\n {\n \"a\": range(0, 40, 10),\n \"b\": range(0, 40, 10),\n \"c\": range(1, 40, 10),\n \"d\": range(2, 40, 10),\n \"e\": range(3, 40, 10),\n \"f\": range(4, 40, 10),\n \"g\": range(5, 40, 10),\n \"h\": range(6, 40, 10),\n \"i\": range(7, 40, 10),\n \"j\": range(8, 40, 10),\n },\n dtype=\"float64\",\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"window\", [1, 3, 10, 20])\[email protected](\"method\", [\"min\", \"max\", \"average\"])\[email protected](\"pct\", [True, False])\[email protected](\"ascending\", [True, False])\[email protected](\"test_data\", [\"default\", \"duplicates\", \"nans\"])\ndef test_rank(window, method, pct, ascending, test_data):\n length = 20\n if test_data == \"default\":\n ser = Series(data=np.random.rand(length))\n elif test_data == \"duplicates\":\n ser = Series(data=np.random.choice(3, length))\n elif test_data == \"nans\":\n ser = Series(\n data=np.random.choice([1.0, 0.25, 0.75, np.nan, np.inf, -np.inf], length)\n )\n\n expected = ser.rolling(window).apply(\n lambda x: x.rank(method=method, pct=pct, ascending=ascending).iloc[-1]\n )\n result = ser.rolling(window).rank(method=method, pct=pct, ascending=ascending)\n\n tm.assert_series_equal(result, expected)\n" ]
[ [ "numpy.random.rand", "pandas.DatetimeIndex", "numpy.random.choice", "pandas.Timestamp", "pandas._testing.assert_series_equal", "pandas.period_range", "numpy.empty", "pandas.Timedelta", "pandas.DataFrame", "numpy.arange", "pandas.MultiIndex", "pandas.compat.is_platform_mac", "pandas.to_datetime", "numpy.array", "pandas.MultiIndex.from_tuples", "pandas._testing.assert_frame_equal", "pandas.MultiIndex.from_arrays", "pandas.MultiIndex.from_product", "pandas._testing.assert_equal", "pandas._testing.assert_index_equal", "pandas.compat.is_platform_arm", "pandas._testing.assert_produces_warning", "numpy.random.RandomState", "pandas.date_range", "numpy.ones", "pandas.to_timedelta", "pandas._testing.assert_almost_equal", "numpy.abs", "pandas.Series" ] ]
GoldbergNeuroLab/Kaneko-et-al.-2022
[ "66480b70beb5f00638c07e966522e53d0c614f79" ]
[ "pv-scn1a/src/data.py" ]
[ "import os\nimport warnings\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom typing import Union\n\nimport pandas as pd\nfrom tables import NaturalNameWarning, PerformanceWarning\n\nfrom src.run import get_trace\nfrom src.constants import (AIS_LABEL, DISTANCE_LABEL, SECTION_LABEL,\n SITE_LABEL, SOMA_LABEL, TERMINAL_LABEL, TIME_LABEL,\n VOLTAGE_LABEL)\n\nAPCount = namedtuple(\"APCount\", \"n\")\n\n_cache_root = \".cache\"\n\n\ndef set_cache_root(root):\n global _cache_root\n _cache_root = root\n\n\ndef get_cache_root():\n global _cache_root\n return _cache_root\n\n\ndef get_file_path(name: Union[str, Path], root=None, ext=\".h5\"):\n \"\"\"From a base name, get the Path object that specifies the 'ext' file in the '.cache' directory.\"\"\"\n if root is None:\n root = get_cache_root()\n\n root_dir = Path(root)\n if not ext.startswith(\".\"):\n ext = \".\" + ext\n path = Path(str(name).replace(ext, \"\") + ext)\n\n if root_dir not in path.parents:\n path = root_dir / path\n if not path.parent.exists():\n path.parent.mkdir()\n\n return path\n\n\ndef _ap_series_to_ap(ap_series):\n return {key: APCount(val)\n if isinstance(val, float)\n else [APCount(v) for v in val]\n for key, val in ap_series.items()\n }\n\n\ndef get_cached_df(name, *args, **kwargs):\n \"\"\"Like `get_trace` but saves a copy.\n\n Internally, calls `get_trace` if it cannot find a local cached version according to `name` in the `cache_root`.\n \"\"\"\n cache_root = kwargs.pop(\"cache_root\", None)\n\n path = get_file_path(name, root=cache_root)\n is_test = \"test\" in name\n\n if not is_test and path.exists():\n try:\n x_df = pd.read_hdf(path, \"df\")\n except KeyError:\n x_df = None\n ap_series = pd.read_hdf(path, \"apn\")\n AP = _ap_series_to_ap(ap_series)\n return AP, x_df\n\n t, v, AP, x_df = get_trace(*args, **kwargs)\n\n if x_df is not None and not is_test:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=NaturalNameWarning)\n x_df.to_hdf(path, f\"/df\", \"w\", complevel=7)\n\n apn = {}\n if isinstance(AP, dict):\n for key, val in AP.items():\n if isinstance(val, list):\n apn[key] = tuple([sub_val.n for sub_val in val])\n else:\n apn[key] = val.n\n else:\n apn[\"soma\"] = AP.n\n\n # copy hoc data to a pandas Series\n ap_series = pd.Series(apn)\n AP = _ap_series_to_ap(ap_series)\n\n if not is_test:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=Warning)\n ap_series.to_hdf(path, f\"/apn\", complevel=7)\n\n return AP, x_df\n\n\ndef wide_to_long(df):\n _x = df.reset_index(drop=False)\n new_df = _x.melt(id_vars=[TIME_LABEL], var_name=df.columns.names,\n value_name=VOLTAGE_LABEL).convert_dtypes()\n return new_df\n\n\ndef is_long_form(df):\n return isinstance(df, pd.DataFrame) and TIME_LABEL in df.columns\n\n\ndef concise_df(long_df, soma=False):\n assert is_long_form(long_df), \"expected dataframe to be in long form\"\n\n section_map = {\"axon[1]\": AIS_LABEL}\n\n if soma:\n soma_mask = long_df[DISTANCE_LABEL] == 0\n else:\n soma_mask = long_df[DISTANCE_LABEL] < 0\n axon_mask = long_df[DISTANCE_LABEL] == long_df[\n long_df[SECTION_LABEL] == \"axon[1]\"\n ][DISTANCE_LABEL].max()\n node_mask = long_df[DISTANCE_LABEL] == long_df[DISTANCE_LABEL].max()\n mask_long_df = long_df[(soma_mask | axon_mask | node_mask)]\n\n ser = mask_long_df[SECTION_LABEL].map(\n lambda x: section_map.get(x, TERMINAL_LABEL))\n\n ser.name = SITE_LABEL\n\n return pd.concat([mask_long_df, ser], axis=1)\n\n\nif __name__ == \"__main__\":\n import numpy as np\n try:\n from pv_nrn import get_pv\n except ImportError:\n print(\"must be run from `pv-scn1a` directory\")\n pv = get_pv()\n amp = 0.1\n dur = 10\n test_name = \"test\"\n test_path = Path(\".cache\") / f\"{test_name}.h5\"\n\n if test_path.exists():\n os.remove(test_path)\n assert not test_path.exists()\n\n AP, _df = get_cached_df(\"test\", pv, amp, dur, shape_plot=True)\n assert test_path.exists()\n\n AP, loaded_df = get_cached_df(test_path)\n assert np.all(loaded_df == _df), \"values weren't stored/loaded properly!\"\n" ]
[ [ "pandas.concat", "pandas.read_hdf", "numpy.all", "pandas.Series" ] ]
KamWithK/PyParquetLoaders
[ "09f8529a8c9b6854f6965346cced1169b83ba96b" ]
[ "PyTorchLoader.py" ]
[ "import pyarrow as pa\nimport pyarrow.parquet as pq\nimport pyarrow.dataset as ds\nimport pandas as pd\n\nfrom torch.utils.data import IterableDataset\nfrom torch.utils.data import get_worker_info\nfrom torch.multiprocessing import Queue\n\nclass IterableManualParquetDataset(IterableDataset):\n def __init__(self, path, process_func):\n super().__init__()\n self.dataset = ds.dataset(path)\n self.process_func = process_func\n\n def __iter__(self):\n worker_info = get_worker_info()\n\n # Only divide up batches when using multiple worker processes\n if worker_info != None:\n batches = list(self.dataset.to_batches())\n worker_load = len(batches) // worker_info.num_workers\n\n # If more workers than batches exist, some won't be used\n if worker_load == 0:\n if worker_info.id < len(batches): self.batches = [batches[worker_info.id]]\n else: return\n else:\n start = worker_load * worker_info.id\n end = min(start + worker_load, len(batches))\n self.batches = batches[start:end]\n else: self.batches = self.dataset.to_batches()\n\n # Process and yield each batch\n for batch in self.batches:\n batch = batch.to_pydict()\n batch.update(self.process_func(batch))\n\n yield batch\n\nclass IterableParquetDataset(IterableDataset):\n def __init__(self, path, process_func):\n super().__init__()\n dataset = ds.dataset(path)\n self.process_func = process_func\n\n self.batches = Queue()\n [self.batches.put(batch) for batch in dataset.to_batches()]\n\n def __iter__(self):\n while True:\n if self.batches.empty() == True:\n self.batches.close()\n break\n\n batch = self.batches.get().to_pydict()\n batch.update(self.process_func(batch))\n yield batch\n" ]
[ [ "torch.utils.data.get_worker_info", "torch.multiprocessing.Queue" ] ]
jamesthatcher/ML-python-pytorch-cnn-gpu-training
[ "0f2feba6094bb934046ea2c2a7c3cac7b1c941f9" ]
[ "model.py" ]
[ "import torch\nfrom torch import nn\n\n\n# Convolutional neural network\nclass CNN(nn.Module):\n\n def __init__(self, n_classes=10):\n super(CNN, self).__init__()\n\n # Convolutional layers\n self.layers = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n\n # Fully connected layers with dropout\n self.classifier = nn.Sequential(\n nn.Dropout(0.2),\n nn.Linear(128, 512),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(512, 256),\n nn.ReLU(0.2),\n nn.Linear(256, n_classes)\n )\n\n def forward(self, x):\n x = self.layers(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.flatten" ] ]
hassancpu/Coursera-AI-for-Medicine-Specialization
[ "c9b265ec002bb41fd3a71ac250dfef1c555f9568" ]
[ "Treatment/Week 3/util.py" ]
[ "# UTILS FILE\nimport keras\nfrom keras.applications.densenet import DenseNet121\nfrom keras.models import Model\nfrom keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization, GlobalAveragePooling2D\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping, TensorBoard\nfrom keras import backend as K\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport time\nimport cv2\nimport pickle\n\n\n\n# For part 2\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n\nimport lifelines\n\nIMAGE_DIR = \"nih_new/images-small/\"\n\ndef get_mean_std_per_batch(df, H=320, W=320):\n sample_data = []\n for idx, img in enumerate(df.sample(100)[\"Image\"].values):\n path = IMAGE_DIR + img\n sample_data.append(np.array(image.load_img(path, target_size=(H, W))))\n\n mean = np.mean(sample_data[0])\n std = np.std(sample_data[0])\n return mean, std \n\ndef load_image_normalize(path, mean, std, H=320, W=320):\n x = image.load_img(path, target_size=(H, W))\n x -= mean\n x /= std\n x = np.expand_dims(x, axis=0)\n return x\n\ndef load_image(path, df, preprocess=True, H = 320, W = 320):\n \"\"\"Load and preprocess image.\"\"\"\n x = image.load_img(path, target_size=(H, W))\n if preprocess:\n mean, std = get_mean_std_per_batch(df, H=H, W=W)\n x -= mean\n x /= std\n x = np.expand_dims(x, axis=0)\n return x\n\ndef compute_gradcam(model, img, data_dir, df, labels, selected_labels, layer_name='bn'):\n img_path = data_dir + img\n preprocessed_input = load_image(img_path, df)\n predictions = model.predict(preprocessed_input)\n print(\"Ground Truth: \", \", \".join(np.take(labels, np.nonzero(df[df[\"Image\"] == img][labels].values[0]))[0]))\n\n plt.figure(figsize=(15, 10))\n plt.subplot(151)\n plt.title(\"Original\")\n plt.axis('off')\n plt.imshow(load_image(img_path, df, preprocess=False), cmap='gray')\n \n j = 1\n for i in range(len(labels)):\n if labels[i] in selected_labels:\n print(\"Generating gradcam for class %s (p=%2.2f)\" % (labels[i], round(predictions[0][i], 3)))\n gradcam = grad_cam(model, preprocessed_input, i, layer_name)\n plt.subplot(151 + j)\n plt.title(labels[i] + \": \" + str(round(predictions[0][i], 3)))\n plt.axis('off')\n plt.imshow(load_image(img_path, df, preprocess=False), cmap='gray')\n plt.imshow(gradcam, cmap='jet', alpha=min(0.5, predictions[0][i]))\n j +=1\n\n\ndef cindex(y_true, scores):\n return lifelines.utils.concordance_index(y_true, scores)\n\n# LOAD MODEL FROM C1M2\ndef load_C3M3_model():\n labels = ['Cardiomegaly', 'Emphysema', 'Effusion', 'Hernia', 'Infiltration', 'Mass', 'Nodule', 'Atelectasis',\n 'Pneumothorax', 'Pleural_Thickening', 'Pneumonia', 'Fibrosis', 'Edema', 'Consolidation']\n\n train_df = pd.read_csv(\"nih_new/train-small.csv\")\n valid_df = pd.read_csv(\"nih_new/valid-small.csv\")\n test_df = pd.read_csv(\"nih_new/test.csv\")\n\n class_pos = train_df.loc[:, labels].sum(axis=0)\n class_neg = len(train_df) - class_pos\n class_total = class_pos + class_neg\n\n pos_weights = class_pos / class_total\n neg_weights = class_neg / class_total\n print(\"Got loss weights\")\n # create the base pre-trained model\n base_model = DenseNet121(weights='imagenet', include_top=False)\n print(\"Loaded DenseNet\")\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n # and a logistic layer\n predictions = Dense(len(labels), activation=\"sigmoid\")(x)\n print(\"Added layers\")\n\n model = Model(inputs=base_model.input, outputs=predictions)\n\n def get_weighted_loss(neg_weights, pos_weights, epsilon=1e-7):\n def weighted_loss(y_true, y_pred):\n # L(X, y) = −w * y log p(Y = 1|X) − w * (1 − y) log p(Y = 0|X)\n # from https://arxiv.org/pdf/1711.05225.pdf\n loss = 0\n for i in range(len(neg_weights)):\n loss -= (neg_weights[i] * y_true[:, i] * K.log(y_pred[:, i] + epsilon) + \n pos_weights[i] * (1 - y_true[:, i]) * K.log(1 - y_pred[:, i] + epsilon))\n \n loss = K.sum(loss)\n return loss\n return weighted_loss\n \n model.compile(optimizer='adam', loss=get_weighted_loss(neg_weights, pos_weights))\n print(\"Compiled Model\")\n\n #model.load_weights(\"nih_new/pretrained_model.h5\")\n #print(\"Loaded Weights\")\n return model" ]
[ [ "matplotlib.pyplot.title", "numpy.mean", "matplotlib.pyplot.figure", "numpy.nonzero", "numpy.std", "matplotlib.pyplot.axis", "pandas.read_csv", "numpy.expand_dims", "matplotlib.pyplot.subplot" ] ]
Linfeng-Lee/IIM
[ "c63bf8b023ccc6750e178112662972f721dcabe1" ]
[ "model/HR_Net/seg_hrnet.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Ke Sun ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport functools\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch._utils\nimport torch.nn.functional as F\n\nBatchNorm2d = nn.BatchNorm2d\nBN_MOMENTUM = 0.01\nlogger = logging.getLogger(__name__)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super(HighResolutionModule, self).__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n\n self.multi_scale_output = multi_scale_output\n\n self.branches = self._make_branches(\n num_branches, blocks, num_blocks, num_channels)\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU(inplace=True)\n\n def _check_branches(self, num_branches, blocks, num_blocks,\n num_inchannels, num_channels):\n if num_branches != len(num_blocks):\n error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(\n num_branches, len(num_blocks))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_channels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(\n num_branches, len(num_channels))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_inchannels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(\n num_branches, len(num_inchannels))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n def _make_one_branch(self, branch_index, block, num_blocks, num_channels,\n stride=1):\n downsample = None\n if stride != 1 or \\\n self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.num_inchannels[branch_index],\n num_channels[branch_index] * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(num_channels[branch_index] * block.expansion,\n momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index], stride, downsample))\n self.num_inchannels[branch_index] = \\\n num_channels[branch_index] * block.expansion\n for i in range(1, num_blocks[branch_index]):\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index]))\n\n return nn.Sequential(*layers)\n\n def _make_branches(self, num_branches, block, num_blocks, num_channels):\n branches = []\n\n for i in range(num_branches):\n branches.append(\n self._make_one_branch(i, block, num_blocks, num_channels))\n\n return nn.ModuleList(branches)\n\n def _make_fuse_layers(self):\n if self.num_branches == 1:\n return None\n\n num_branches = self.num_branches\n num_inchannels = self.num_inchannels\n fuse_layers = []\n for i in range(num_branches if self.multi_scale_output else 1):\n fuse_layer = []\n for j in range(num_branches):\n if j > i:\n fuse_layer.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_inchannels[i],\n 1,\n 1,\n 0,\n bias=False),\n BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))\n elif j == i:\n fuse_layer.append(None)\n else:\n conv3x3s = []\n for k in range(i-j):\n if k == i - j - 1:\n num_outchannels_conv3x3 = num_inchannels[i]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3, \n momentum=BN_MOMENTUM)))\n else:\n num_outchannels_conv3x3 = num_inchannels[j]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3,\n momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n fuse_layer.append(nn.Sequential(*conv3x3s))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def get_num_inchannels(self):\n return self.num_inchannels\n\n def forward(self, x):\n if self.num_branches == 1:\n return [self.branches[0](x[0])]\n\n for i in range(self.num_branches):\n x[i] = self.branches[i](x[i])\n\n x_fuse = []\n for i in range(len(self.fuse_layers)):\n y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])\n for j in range(1, self.num_branches):\n if i == j:\n y = y + x[j]\n elif j > i:\n width_output = x[i].shape[-1]\n height_output = x[i].shape[-2]\n y = y + F.interpolate(\n self.fuse_layers[i][j](x[j]),\n size=[height_output, width_output],\n mode='bilinear')\n else:\n y = y + self.fuse_layers[i][j](x[j])\n x_fuse.append(self.relu(y))\n\n return x_fuse\n\n\nblocks_dict = {\n 'BASIC': BasicBlock,\n 'BOTTLENECK': Bottleneck\n}\n\n\nclass HighResolutionNet(nn.Module):\n\n def __init__(self, config, **kwargs):\n extra = config.MODEL.EXTRA\n super(HighResolutionNet, self).__init__()\n\n # stem net\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n \n self.stage1_cfg = extra['STAGE1']\n num_channels = self.stage1_cfg['NUM_CHANNELS'][0]\n block = blocks_dict[self.stage1_cfg['BLOCK']]\n num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]\n self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)\n stage1_out_channel = block.expansion*num_channels\n\n self.stage2_cfg = extra['STAGE2']\n num_channels = self.stage2_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage2_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition1 = self._make_transition_layer(\n [stage1_out_channel], num_channels)\n self.stage2, pre_stage_channels = self._make_stage(\n self.stage2_cfg, num_channels)\n\n self.stage3_cfg = extra['STAGE3']\n num_channels = self.stage3_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage3_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition2 = self._make_transition_layer(\n pre_stage_channels, num_channels)\n self.stage3, pre_stage_channels = self._make_stage(\n self.stage3_cfg, num_channels)\n\n self.stage4_cfg = extra['STAGE4']\n num_channels = self.stage4_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage4_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition3 = self._make_transition_layer(\n pre_stage_channels, num_channels)\n self.stage4, pre_stage_channels = self._make_stage(\n self.stage4_cfg, num_channels, multi_scale_output=True)\n \n last_inp_channels = np.int(np.sum(pre_stage_channels))\n\n self.last_layer = nn.Sequential(\n nn.Conv2d(\n in_channels=last_inp_channels,\n out_channels=last_inp_channels,\n kernel_size=1,\n stride=1,\n padding=0),\n BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(720, 64, 4, stride=2, padding=1, output_padding=0, bias=True),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1, output_padding=0, bias=True),\n\n nn.Sigmoid()\n )\n\n def _make_transition_layer(\n self, num_channels_pre_layer, num_channels_cur_layer):\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n transition_layers.append(nn.Sequential(\n nn.Conv2d(num_channels_pre_layer[i],\n num_channels_cur_layer[i],\n 3,\n 1,\n 1,\n bias=False),\n BatchNorm2d(\n num_channels_cur_layer[i], momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n else:\n transition_layers.append(None)\n else:\n conv3x3s = []\n for j in range(i+1-num_branches_pre):\n inchannels = num_channels_pre_layer[-1]\n outchannels = num_channels_cur_layer[i] \\\n if j == i-num_branches_pre else inchannels\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(\n inchannels, outchannels, 3, 2, 1, bias=False),\n BatchNorm2d(outchannels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n transition_layers.append(nn.Sequential(*conv3x3s))\n\n return nn.ModuleList(transition_layers)\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _make_stage(self, layer_config, num_inchannels,\n multi_scale_output=True):\n num_modules = layer_config['NUM_MODULES']\n num_branches = layer_config['NUM_BRANCHES']\n num_blocks = layer_config['NUM_BLOCKS']\n num_channels = layer_config['NUM_CHANNELS']\n block = blocks_dict[layer_config['BLOCK']]\n fuse_method = layer_config['FUSE_METHOD']\n\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multi_scale_output and i == num_modules - 1:\n reset_multi_scale_output = False\n else:\n reset_multi_scale_output = True\n modules.append(\n HighResolutionModule(num_branches,\n block,\n num_blocks,\n num_inchannels,\n num_channels,\n fuse_method,\n reset_multi_scale_output)\n )\n num_inchannels = modules[-1].get_num_inchannels()\n\n return nn.Sequential(*modules), num_inchannels\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.layer1(x)\n\n x_list = []\n for i in range(self.stage2_cfg['NUM_BRANCHES']):\n if self.transition1[i] is not None:\n x_list.append(self.transition1[i](x))\n else:\n x_list.append(x)\n y_list = self.stage2(x_list)\n\n x_list = []\n for i in range(self.stage3_cfg['NUM_BRANCHES']):\n if self.transition2[i] is not None:\n x_list.append(self.transition2[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n y_list = self.stage3(x_list)\n\n x_list = []\n for i in range(self.stage4_cfg['NUM_BRANCHES']):\n if self.transition3[i] is not None:\n x_list.append(self.transition3[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n x = self.stage4(x_list)\n\n # Upsampling\n x0_h, x0_w = x[0].size(2), x[0].size(3)\n x1 = F.upsample(x[1], size=(x0_h, x0_w), mode='bilinear')\n x2 = F.upsample(x[2], size=(x0_h, x0_w), mode='bilinear')\n x3 = F.upsample(x[3], size=(x0_h, x0_w), mode='bilinear')\n\n f = torch.cat([x[0], x1, x2, x3], 1)\n\n x = self.last_layer(f)\n\n\n return f, x\n\n def init_weights(self, pretrained='',):\n logger.info('=> init weights from normal distribution')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n if os.path.isfile(pretrained):\n pretrained_dict = torch.load(pretrained)\n logger.info('=> loading pretrained model {}'.format(pretrained))\n model_dict = self.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items()\n if k in model_dict.keys()}\n model_dict.update(pretrained_dict)\n self.load_state_dict(model_dict)\n\n print(\"load pre_trained parameters for HR_Net\")\n\ndef get_seg_model(**kwargs):\n from model.HR_Net.default import _C as hr_config\n from model.HR_Net.default import update_config\n \n update_config(hr_config, './model/HR_Net/seg_hrnet_w48.yaml')\n model = HighResolutionNet(hr_config, **kwargs)\n from config import cfg\n\n model.init_weights(cfg.PRE_HR_WEIGHTS)\n return model\n\nif __name__ == '__main__':\n from torchsummary import summary\n model = get_seg_model().cuda()\n print(model)\n summary(model,(3,224,224))" ]
[ [ "torch.cat", "torch.nn.functional.upsample", "torch.nn.ModuleList", "torch.nn.Sequential", "numpy.sum", "torch.nn.Sigmoid", "torch.nn.ConvTranspose2d", "torch.nn.init.constant_", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.init.normal_", "torch.load" ] ]
ewang2002/UCSDHistEnrollData
[ "a8cde6168e28285420b4eefe446e6a629296fa2d" ]
[ "plot.py" ]
[ "from datetime import datetime, timedelta\nfrom os import listdir, mkdir\nfrom os.path import exists, join\nimport sys\nfrom typing import List, Tuple, TypeVar\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom math import floor\nfrom multiprocessing import Process\nimport gc\nimport importlib\n\n# Settings for input/output, basic plot stuff\nGENERAL_SETTINGS = {\n 'id': 'general',\n 'overall_plot_folder': 'plot_overall',\n 'section_plot_folder': 'plot_section',\n\n 'figure_size': (17, 7),\n 'num_ticks': 50\n}\n\nWIDE_SETTINGS = {\n 'id': 'wide',\n 'overall_plot_folder': 'plot_overall_wide',\n 'section_plot_folder': 'plot_section_wide',\n\n 'figure_size': (50, 10),\n 'num_ticks': 200\n}\n\nFSP_SETTINGS = {\n 'id': 'fsp',\n 'overall_plot_folder': 'plot_overall_fsp',\n 'section_plot_folder': 'plot_section_fsp',\n\n 'figure_size': (17, 7),\n 'num_ticks': 50\n}\n\nOVERALL_FOLDER = 'overall'\nSECTION_FOLDER = 'section'\n\n\n# For the plotconfig.py file\nMARKERS = \"markers\"\nMARKER_DATES = 'd'\nMARKER_TIME = 't'\nLINE_STYLE = 'l'\nLINE_COLOR = 'c'\nNAME_OF_MARKER = 'n'\nCONFIG_SETTINGS = 'settings'\nSHADE = 's'\n\n# Multiprocessing options\nCHUNK_SIZE = 20\nWIDE_CHUNK_SIZE = 10\nPROCESS_COUNT = 10\n\n\nT = TypeVar('T')\ndef subsets_with_limits(arr: List[T], num_subsets: int, max_per_elem: int) -> List[List[T]]:\n arr.reverse()\n subsets = []\n len_to_use = max(0, len(arr) - max_per_elem * num_subsets)\n idx = 0\n while len(arr) > len_to_use:\n if idx < len(subsets):\n subsets[idx].append(arr.pop())\n idx = (idx + 1) % num_subsets\n continue \n\n subsets.append([arr.pop()])\n idx = (idx + 1) % num_subsets\n \n arr.reverse()\n return subsets\n\ndef process_overall(num: int, files: List[str], from_folder: str, out_folder: str, settings, config):\n \"\"\"\n Processes the folder containing overall data.\n :param num: The process label number (just for identification).\n :param files: List of files to process\n :param from_folder: Folder to read from\n :param out_folder: Folder to write to\n :param settings: Settings to use\n :param config: The configuration object, from the plotconfig.py file.\n \"\"\"\n\n # Uncomment if you want to skip the images that were already generated\n # temp_files = [f for f in listdir(out_folder) if exists(join(out_folder, f))]\n completed = 0\n for file in files:\n print(f\"\\t[{num}] Processing {file}.\")\n \n #if file.replace('csv', 'png') in temp_files: \n # completed += 1\n # print(f\"\\t\\t[{num}] Skipped {file} (Completed {completed}/{len(files)}).\")\n # continue \n\n # Read in our CSV file\n df = pd.read_csv(join(from_folder, file))\n if settings['id'] == 'fsp':\n if len(config[MARKERS]) == 0 or \"end\" not in config[MARKERS][-1][NAME_OF_MARKER].lower():\n completed += 1\n print(f'\\t\\t[{num}] Skipped {file} due to no end marker despite fsp (Completed {completed}/{len(files)}).')\n continue \n\n\n end_date_str = config[MARKERS][-1][MARKER_DATES][-1]\n # Parse this date\n end_date = datetime.strptime(end_date_str, '%Y-%m-%d') + timedelta(days=1)\n # Filter all rows in df so that the date is earlier than the end date, noting that\n # the date in df['time'] needs to be converted first\n df = df[df['time'].apply(lambda x: datetime.strptime(x, \"%Y-%m-%dT%H:%M:%S\") < end_date)]\n\n if len(df.index) == 0:\n completed += 1\n print(f\"\\t\\t[{num}] Skipped {file} (Completed {completed}/{len(files)}).\")\n continue \n\n # Adjust the figure so it's big enough for display\n plt.figure(figsize=settings['figure_size'])\n\n max_y = 0\n # Plot the number of available & waitlisted seats\n if config[CONFIG_SETTINGS]['showTotal']:\n sns.lineplot(data=df, x='time', y='total', color='purple', label='Total Seats', linestyle='--', linewidth=4)\n max_y = df['total'].max()\n\n sns.lineplot(data=df, x='time', y='waitlisted', color='blue', label='Waitlisted', linewidth=1)\n max_y = max(max_y, df['waitlisted'].max())\n \n if config[CONFIG_SETTINGS]['useEnrolledTtl']:\n sns.lineplot(data=df, x='time', y='enrolled', color='red', label='Enrolled', linewidth=2)\n max_y = max(df['enrolled'].max(), max_y)\n else:\n sns.lineplot(data=df, x='time', y='available', color='red', label='Available', linewidth=2)\n max_y = max(df['available'].max(), max_y)\n \n\n plot = plt.gca()\n # Modify plot properties to make it more readable\n title = file.replace('.csv', '')\n if '_' in title:\n course, section = title.split('_')\n title = f'{course}, Section {section}'\n\n plot.set_title(title + f' ({config[CONFIG_SETTINGS][\"termName\"]})')\n plot.set_xlabel('Time')\n plot.set_ylabel('Seats')\n plot.grid(True)\n plot.margins(0)\n\n # Set bottom-left corner to (0, 0)\n plt.xlim(xmin=0)\n plt.ylim(ymin=0, ymax=max(1.05*max_y, 1))\n\n # To make the x-axis more readable, purposely hide some dates and then\n # adjust the labels appropriately\n plt.setp(plot.xaxis.get_majorticklabels(), rotation=45, ha=\"right\")\n # We want NUM_TICKS ticks on the x-axis\n plot.xaxis.set_major_locator(ticker.MultipleLocator(max(floor(len(df) / settings['num_ticks']), 1)))\n plot.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n\n if config[CONFIG_SETTINGS]['useMarkers']: \n p_max = 2 if config[CONFIG_SETTINGS]['isNormal'] else 1\n all_dates = df['time'].tolist()\n # map all dates in all_dates to a tuple of string date and datetime object\n all_dates: Tuple[str, datetime] = list(map(lambda x: (x, datetime.strptime(x, \"%Y-%m-%dT%H:%M:%S\")), all_dates))\n\n spans = []\n spans2 = []\n seen_grades = set()\n \n for marker in config[MARKERS]:\n # index [0, 1] -> 0 = first pass, 1 = second pass\n for p in range(0, p_max):\n hr = marker[MARKER_TIME]\n date = marker[MARKER_DATES][p]\n # find the first date in all_dates whose date is equal to date\n # and has the closest hour to hr\n axis_date = list(filter(lambda x: x[1].strftime(\"%Y-%m-%d\") == date and (x[1].hour == hr or\\\n x[1].hour == hr + 1 or x[1].hour == hr + 2 or x[1].hour == hr + 3), all_dates))\n if len(axis_date) == 0:\n continue\n\n if marker[SHADE]:\n (spans if p == 0 else spans2).append({\n 'start': axis_date[0][0],\n 'color': marker[LINE_COLOR],\n 'legend': marker[NAME_OF_MARKER],\n })\n\n plt.axvline(x=axis_date[0][0], \\\n color=marker[LINE_COLOR], \\\n linestyle=marker[LINE_STYLE], \\\n label=None if marker[NAME_OF_MARKER] in seen_grades else marker[NAME_OF_MARKER])\n seen_grades.add(marker[NAME_OF_MARKER])\n\n # Note that the reason why I didn't just combine the lists is because I don't want to add the \"End\" from first pass\n # to the graph. \n\n seen_shades = set()\n # For first-pass stuff\n for i in range(0, len(spans) - 1):\n # fill plot between combined_spans[i] and combined_spans[i+1]\n plt.axvspan(spans[i]['start'], \\\n spans[i+1]['start'], \\\n color=spans[i]['color'], \\\n alpha=0.2, \\\n label=None if spans[i]['legend'] in seen_shades else spans[i]['legend'])\n seen_shades.add(spans[i]['legend'])\n\n # For second-pass stuff\n for i in range(0, len(spans2) - 1):\n # fill plot between combined_spans[i] and combined_spans[i+1]\n plt.axvspan(spans2[i]['start'], \\\n spans2[i+1]['start'], \\\n color=spans2[i]['color'], \\\n alpha=0.2, \\\n label=None if spans2[i]['legend'] in seen_shades else spans2[i]['legend'])\n seen_shades.add(spans2[i]['legend'])\n\n # https://matplotlib.org/2.0.2/users/legend_guide.html\n plt.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)\n # Adjusts the padding\n plt.tight_layout()\n\n # Then, saves the figure and closes it to save memory\n fig = plot.get_figure()\n fig.savefig(join(out_folder, file.replace('.csv', '')))\n \n # Clear the plot, close it, and clear the memory\n plot.cla()\n plt.clf()\n plt.cla()\n plt.close('all')\n del plot\n del fig\n del df\n gc.collect()\n completed += 1\n print(f\"\\t\\t[{num}] Finished {file} (Completed {completed}/{len(files)}).\")\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(\"Usage: plot.py <base folder> <'s', 'o', 'sw', 'ow', 'sfsp', 'ofsp'>\")\n sys.exit(1)\n\n # Get the cleaned folder\n base_folder = sys.argv[-2]\n if not exists(base_folder):\n print(f\"Folder '{base_folder}' does not exist\")\n sys.exit(1)\n\n # Get the type of data to process\n dt = sys.argv[-1]\n if dt not in ['s', 'o', 'sw', 'ow', 'sfsp', 'ofsp']:\n print(f\"Invalid data type '{dt}' - must be one of:\")\n print(\"\\t's' (section)\")\n print(\"\\t'o' (overall)\")\n print(\"\\t'sw' (section, wide display)\")\n print(\"\\t'ow' (overall, wide display)\")\n print(\"\\t'sfsp' (section, first/second-pass only)\")\n print(\"\\t'ofsp' (overall, first/second-pass only)\")\n sys.exit(1)\n\n # Get the relevant configuration object\n try: \n config = importlib.import_module(f'{base_folder}.plotconfig').CONFIG\n except ModuleNotFoundError:\n print(f'{base_folder} does not contain a plotconfig.py file. Please set one up and then try again.')\n exit(1)\n\n chunk_size = CHUNK_SIZE\n if dt in ['s', 'o']:\n settings_obj = GENERAL_SETTINGS\n elif dt in ['sw', 'ow']:\n settings_obj = WIDE_SETTINGS\n chunk_size = WIDE_CHUNK_SIZE\n elif dt in ['sfsp', 'ofsp']:\n settings_obj = FSP_SETTINGS\n\n plot_folder = join(base_folder, settings_obj['overall_plot_folder'] if dt in ['o', 'ow', 'ofsp'] else settings_obj['section_plot_folder'])\n if not exists(plot_folder):\n mkdir(plot_folder)\n\n in_folder = join(base_folder, OVERALL_FOLDER if dt in ['o', 'ow', 'ofsp'] else SECTION_FOLDER)\n all_files = listdir(in_folder)\n\n # If we're working with sections, we only want the files that appear more than once\n # Categorize each file by the class that they represent.\n if dt == 's':\n # The key is the course (e.g. CSE 100.csv) and the value is a list\n # of all sections (e.g. CSE 100_A.csv)\n file_secs = {}\n for file in all_files:\n f_name = file.split('_')[0]\n if f_name not in file_secs:\n file_secs[f_name] = [file]\n else:\n file_secs[f_name].append(file)\n\n all_files = []\n for f_name in file_secs:\n if len(file_secs[f_name]) > 1:\n all_files += file_secs[f_name]\n\n # Begin running\n print(f'Processing {len(all_files)} files into chunks of {chunk_size} files each.')\n print(f'\\tWide? {dt == \"sw\" or dt == \"ow\"}')\n print(f'\\tInput Folder: {in_folder}')\n print(f'\\tPlot Folder: {plot_folder}')\n print(f'\\tProcesses: {PROCESS_COUNT}')\n\n len_of_files = len(all_files)\n completed = 0\n while len(all_files) > 0:\n files_to_process = subsets_with_limits(all_files, PROCESS_COUNT, chunk_size)\n processes = []\n # Limit ourselves to PROCESS_COUNT processes, or else we might\n # end up crashing the host device with too many processes.\n for (i, chunk) in enumerate(files_to_process):\n print(f'Starting process {i} (with count {len(chunk)}).')\n # Create a process to process the chunk\n p = Process(target=process_overall, args=(i, \\\n chunk, \\\n in_folder, \\\n plot_folder, \\\n settings_obj, \\\n config))\n p.start()\n processes.append(p)\n \n # Wait for all processes to finish\n for p in processes:\n p.join()\n completed += sum(len(x) for x in files_to_process)\n print(f'\\t\\tCompleted {completed}/{len_of_files} files ({len(all_files)} left).')" ]
[ [ "matplotlib.pyplot.axvspan", "matplotlib.pyplot.xlim", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.axvline", "matplotlib.pyplot.legend", "matplotlib.pyplot.close", "matplotlib.pyplot.cla", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.clf", "matplotlib.pyplot.gca" ] ]
w121211/CoordConv
[ "269a8da039f0df08833c5d2324895d37e6e33a0a" ]
[ "experiments/regressor/regressor_rect.py" ]
[ "import os\nimport itertools\n\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport torch\nimport torch.nn as nn\nimport torch.nn.modules.conv as conv\nimport torch.utils.data as utils\nimport torch.nn.functional as F\nfrom torchsummary import summary\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nfrom coordconv import AddCoords\nfrom hrnet import HighResolutionNet\nfrom config import get_cfg_defaults\n\n# from models.CornerNet_Squeeze import corner_pool\n# from models.py_utils import TopPool, LeftPool\n\n\ndef norm(x, width):\n return (int)(x * (width - 1) + 0.5)\n\n\ndef one_hot(x, y, width=64):\n z = np.zeros((width, width), dtype=int)\n z[y][x] = 1\n z = z.transpose(1, 0).reshape(width * width) # (W, H) -> (H, W) -> (H*W)\n return z\n\n\ndef draw_rect_pil(xy, width=64):\n x0, y0, x1, y1 = xy\n x1 -= 0.5\n y1 -= 0.5\n im = Image.new(\"F\", (width, width))\n draw = ImageDraw.Draw(im)\n draw.rectangle([x0, y0, x1, y1], fill=1)\n im = np.array(im) # (H, W)\n return im\n\n\ndef draw_rect_np(xy, width=3):\n x0, y0, x1, y1 = xy\n im = np.zeros((width, width))\n for i, j in itertools.product(range(x0, x1), range(y0, y1)):\n im[i][j] = 1.0\n return im\n\n\ndef rand_draw(draw_fn=draw_rect_pil, n_strokes=1, width=128, action_dim=4):\n canvas = np.zeros((width, width, 3), dtype=int)\n x = []\n\n for _ in range(n_strokes):\n _x = np.random.rand(action_dim)\n color = np.random.randint(255, size=(3)) # (3)\n x.append(np.concatenate((_x, color / 255.0)))\n\n stroke = draw_fn(_x, width) # (w, w)\n stroke = np.expand_dims(stroke, axis=2) # (w, w, 1)\n canvas = canvas * (1 - stroke) + stroke * color # (w, h, 3)\n\n x = np.stack(x) # (n_strokes, action_dim+3)\n return canvas.astype(int), x\n\n\ndef draw_l2_distance(x, y, width=64):\n im = np.zeros((width, width), dtype=float)\n for (i, j), _ in np.ndenumerate(im):\n im[i][j] = np.linalg.norm(np.array([x, y]) - np.array([i, j])) / width\n return im\n\n\ndef generate_data(width=64, n_sample=1000):\n print(\"Generating datasets...\")\n if not os.path.exists(\"data-rect/\"):\n os.makedirs(\"data-rect/\")\n\n _xs, x, im, dist = [], [], [], []\n if width < 10:\n for x0, y0 in itertools.product(range(width), range(width)):\n for _w, _h in itertools.product(\n range(1, width - x0 + 1), range(1, width - y0 + 1)\n ):\n x1 = x0 + _w\n y1 = y0 + _h\n _xs.append(np.array((x0, y0, x1, y1), dtype=int))\n else:\n for _ in range(n_sample):\n x0, y0 = np.random.randint(width, size=2)\n x1 = x0 + np.random.randint(1, width - x0 + 1)\n y1 = y0 + np.random.randint(1, width - y0 + 1)\n _xs.append(np.array((x0, y0, x1, y1), dtype=int))\n for _x in _xs:\n # _im = draw_rect_np(_x, width)\n _im = draw_rect_pil(_x, width)\n # _dist = draw_l2_distance(x0, y0, width)\n im.append(_im)\n _x = _x.astype(float) / width\n x.append(_x)\n # dist.append(_dist)\n\n x = np.stack(x)\n im = np.stack(im) # (N, W, H)\n im = np.expand_dims(im, axis=-1)\n # im = im.transpose(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) only when PIL.Image\n im = im.transpose(0, 3, 2, 1) # (N, W, H, C) -> (N, C, H, W)\n # dist = np.stack(dist)\n # dist = np.expand_dims(dist, axis=-1)\n # dist = dist.transpose(0, 3, 2, 1) # (N, H, W, C) -> (N, C, H, W)\n\n indices = np.arange(0, len(x), dtype=\"int32\")\n train, test = train_test_split(indices, test_size=0.2, random_state=0)\n\n np.save(\"data-rect/train_x.npy\", x[train])\n np.save(\"data-rect/train_images.npy\", im[train])\n # np.save(\"data-rect/train_dist.npy\", dist[train])\n np.save(\"data-rect/test_x.npy\", x[test])\n np.save(\"data-rect/test_images.npy\", im[test])\n\n\n# np.save(\"data-rect/test_dist.npy\", dist[test])\n\n\ndef load_data():\n print(\"loading data...\")\n train_x = np.load(\"data-rect/train_x.npy\").astype(\"float32\")\n train_im = np.load(\"data-rect/train_images.npy\").astype(\"float32\")\n # train_dist = np.load(\"data-rect/train_dist.npy\").astype(\"float32\")\n train_dist = None\n test_x = np.load(\"data-rect/test_x.npy\").astype(\"float32\")\n test_im = np.load(\"data-rect/test_images.npy\").astype(\"float32\")\n # test_dist = np.load(\"data-rect/test_dist.npy\").astype(\"float32\")\n test_dist = None\n\n # print(\"Train set : \", train_set.shape, train_set.max(), train_set.min())\n # print(\"Test set : \", test_set.shape, test_set.max(), test_set.min())\n\n # Visualize the datasets\n # plt.imshow(np.sum(train_onehot, axis=0)[0, :, :], cmap=\"gray\")\n # plt.title(\"Train One-hot dataset\")\n # plt.show()\n # plt.imshow(np.sum(test_onehot, axis=0)[0, :, :], cmap=\"gray\")\n # plt.title(\"Test One-hot dataset\")\n # plt.show()\n\n return train_x, train_im, train_dist, test_x, test_im, test_dist\n\n\nclass OnehotNet(nn.Module):\n def __init__(self, width=64):\n super(OnehotNet, self).__init__()\n self.width = width\n # self.coordconv = CoordConv2d(2, 32, 1, with_r=True)\n self.conv1 = nn.Conv2d(1, 4, 3, padding=1)\n self.conv2 = nn.Conv2d(4, 1, 3, padding=1)\n # self.bn1 = nn.BatchNorm2d(1)\n # self.conv2 = nn.Conv2d(8, 16, 3, padding=1)\n # self.conv3 = nn.Conv2d(16, 1, 3, padding=1)\n self.conv4 = nn.Conv2d(1, 1, 1)\n\n def forward(self, x):\n # x = self.coordconv(x)\n # x = F.relu(self.conv1(x))\n x = self.conv1(x)\n # x = self.bn1(x)\n x = F.relu(x)\n x = F.relu(self.conv2(x))\n # x = F.relu(self.conv3(x))\n # x = self.conv4(x)\n x = x.view(-1, self.width ** 2)\n return x\n\n\nclass SimpleNet(nn.Module):\n def __init__(self, width):\n super(SimpleNet, self).__init__()\n self.width = width\n self.conv1 = nn.Conv2d(1, 8, 3, padding=1)\n self.conv2 = nn.Conv2d(8, 16, 3, padding=1)\n self.pool = nn.MaxPool2d(2)\n # self.conv3 = nn.Conv2d(16, 32, 3, padding=1)\n # self.conv4 = nn.Conv2d(32, 16, 3, padding=1)\n # self.pool = corner_pool(32, TopPool, LeftPool)\n self.add_coords = AddCoords(rank=2)\n self.conv5 = nn.Conv2d(19, 19, 3, padding=1)\n self.conv6 = nn.Conv2d(19, 19, 3, padding=1)\n self.conv7 = nn.Conv2d(19, 4, 1)\n self.pool2 = nn.MaxPool2d(width, stride=width)\n\n # regressor\n\n def forward(self, x):\n \"\"\"\n x: (N, C, H, W)\n \"\"\"\n # heatmap\n # x0 = self.add_coords(x)\n x1 = F.relu(self.conv1(x))\n x1 = F.relu(self.conv2(x1))\n # x = F.relu(self.conv3(x))\n # x = F.relu(self.conv4(x))\n # x = F.relu(self.conv5(x))\n x1 = self.pool(x1)\n x1 = F.interpolate(x1, scale_factor=2)\n # print(x.shape)\n # print(x1.shape)\n x = torch.cat((x, x1), dim=1)\n\n # regression\n x = self.add_coords(x)\n x = F.relu(self.conv5(x))\n x = F.relu(self.conv6(x))\n x = self.conv7(x)\n x = self.pool2(x)\n x = x.view(-1, 4)\n return x\n\n\nclass HRNet(nn.Module):\n def __init__(self, width):\n super(HRNet, self).__init__()\n cfg = get_cfg_defaults()\n cfg.merge_from_file(\"./experiments/exp.yaml\")\n self.hr = HighResolutionNet(cfg)\n self.add_coords = AddCoords(rank=2)\n self.conv5 = nn.Conv2d(7, 7, 3, padding=1)\n self.conv6 = nn.Conv2d(7, 7, 3, padding=1)\n self.conv7 = nn.Conv2d(7, 4, 1)\n self.pool = nn.MaxPool2d(width, stride=width)\n\n def forward(self, x):\n x1 = self.hr(x)\n x1 = F.interpolate(x1, scale_factor=4)\n x = torch.cat((x, x1), dim=1)\n x = self.add_coords(x)\n x = F.relu(self.conv5(x))\n x = F.relu(self.conv6(x))\n x = self.conv7(x)\n x = self.pool(x)\n x = x.view(-1, 4)\n return x\n\n\ndef train(epoch, net, train_dataloader, optimizer, loss_fn, device):\n net.train()\n iters = 0\n for batch_idx, (x, y_target) in enumerate(train_dataloader):\n x, y_target = x.to(device), y_target.to(device)\n optimizer.zero_grad()\n y = net(x)\n # print('-------')\n # print(x.shape)\n # print(y.shape)\n # print(y_target)\n loss = loss_fn(y, y_target)\n loss.backward()\n optimizer.step()\n iters += len(x)\n print(\n \"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\".format(\n epoch,\n iters,\n len(train_dataloader.dataset),\n 100.0 * (batch_idx + 1) / len(train_dataloader),\n loss.data.item(),\n ),\n end=\"\\r\",\n flush=True,\n )\n # print(y[0])\n # print(y_target[0])\n print(\"\")\n\n\nif __name__ == \"__main__\":\n np.random.seed(0)\n torch.manual_seed(0)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # prepare dataset\n width = 512\n generate_data(width)\n train_x, train_im, train_dist, test_x, test_im, test_dist = load_data()\n train_tensor_x = torch.from_numpy(train_x)\n train_tensor_im = torch.from_numpy(train_im)\n # train_tensor_dist = torch.from_numpy(train_dist)\n\n train_dataset = utils.TensorDataset(train_tensor_im, train_tensor_x)\n train_dataloader = utils.DataLoader(train_dataset, batch_size=16, shuffle=True)\n\n # test_tensor_x = torch.stack([torch.Tensor(i) for i in test_set])\n # test_tensor_y = torch.stack([torch.LongTensor(i) for i in test_onehot])\n # test_dataset = utils.TensorDataset(test_tensor_y, test_tensor_x)\n # test_dataloader = utils.DataLoader(test_dataset, batch_size=32, shuffle=False)\n\n def cross_entropy_one_hot(input, target):\n _, labels = target.max(dim=1)\n return nn.CrossEntropyLoss()(input, labels)\n\n # model = SimpleNet(width).to(device)\n model = HRNet(width).to(device)\n # summary(model, input_size=(1, 64, 64))\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n loss_fn = nn.MSELoss()\n # loss_fn = cross_entropy_one_hot\n epochs = 1000\n\n for epoch in range(1, epochs + 1):\n train(epoch, model, train_dataloader, optimizer, loss_fn, device)\n" ]
[ [ "torch.cat", "numpy.random.rand", "numpy.load", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss", "numpy.concatenate", "torch.nn.MaxPool2d", "numpy.save", "torch.manual_seed", "numpy.random.randint", "torch.utils.data.DataLoader", "torch.nn.functional.relu", "numpy.expand_dims", "numpy.array", "numpy.zeros", "torch.nn.Conv2d", "numpy.stack", "sklearn.model_selection.train_test_split", "torch.utils.data.TensorDataset", "numpy.ndenumerate", "torch.nn.MSELoss", "numpy.random.seed", "torch.nn.functional.interpolate", "torch.from_numpy" ] ]
brycatch/challenge-prework-ds
[ "28c2c256f7ee359eebcdfbade534399980578217" ]
[ "main.py" ]
[ "import pandas as pd\nimport numpy as np\n\ndef read_csv(name):\n # 1 Read file by name\n data_frame = pd.read_csv(name) \n # 2 Cleaning data frame\n data_frame = data_frame.convert_dtypes() \n data_frame[[\"Estado\",\"Año\",\"Mes\",\"Tipo de visitantes\",\"Nacionalidad\",\"Centro de trabajo\"]] = data_frame[[\"Estado\",\"Año\",\"Mes\",\"Tipo de visitantes\",\"Nacionalidad\",\"Centro de trabajo\"]].astype(\"category\") \n data_frame[\"Número de visitas\"] = pd.to_numeric(data_frame[\"Número de visitas\"], errors=\"coerce\") \n # Fields to use: Estado, Clave SIINAH, Centro de trabajo, Año, Mes, Tipo de visitantes, Número de visitas y Nacionalidad\n fields_to_use = ['Estado', 'Clave SIINAH','Centro de trabajo', 'Año', 'Mes', 'Tipo de visitantes','Número de visitas','Nacionalidad']\n return data_frame[fields_to_use]\n\ndef totals_by_state_and_year(data_frame):\n # 3.1 Totals by state, month and year\n data_copy = data_frame.copy(deep=True)\n data_copy = data_copy.set_index(['Estado','Año', 'Mes']).sort_index()\n print(\"Totals by state, month and year\")\n print(data_copy.head(5))\n\ndef totals_by_state_and_visitor(data_frame):\n # 3.2 Totals by state and visitor\n data_copy = data_frame.copy(deep=True)\n data_copy = data_copy.set_index(['Estado','Tipo de visitantes']).sort_index()\n print(\"Totals by state and visitor\")\n print(data_copy.head(5))\n\ndef totals_by_state_visitor_and_year(data_frame):\n # 3.3 Totals by state, visitor, year and month\n data_copy = data_frame.copy(deep=True)\n data_copy = data_copy.set_index(['Estado','Tipo de visitantes','Año','Mes']).sort_index()\n print(\"Totals by state, visitor, year and month\")\n print(data_copy.head(5))\n\ndef totals_by_workplace_and_year(data_frame):\n # 3.4 Totals by workplace, year and month\n data_copy = data_frame.copy(deep=True)\n data_copy = data_copy.set_index(['Centro de trabajo','Año','Mes']).sort_index()\n print(\"Totals by workplace, year and month\")\n print(data_copy.head(5))\n\ndef totals_by_workplace_and_visitor(data_frame):\n # 3.5 Totals by workplace and visitors\n data_copy = data_frame.copy(deep=True)\n data_copy = data_copy.set_index([\"Centro de trabajo\", \"Tipo de visitantes\"]).sort_index()\n print(\"Totals by workplace and visitors\")\n print(data_copy.head(5))\n\ndef run():\n data_frame = read_csv(\"INAH_detallado_2019.csv\")\n totals_by_state_and_year(data_frame) \n totals_by_state_and_visitor(data_frame)\n totals_by_state_visitor_and_year(data_frame)\n totals_by_workplace_and_year(data_frame)\n totals_by_workplace_and_visitor(data_frame) \n\nif __name__ == '__main__':\n run()" ]
[ [ "pandas.read_csv", "pandas.to_numeric" ] ]
benelot/tensorforce
[ "3c3b9c3ac153761016cf9883b76613c9d93952bf" ]
[ "tensorforce/core/distributions/categorical.py" ]
[ "# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom math import log\nimport tensorflow as tf\n\nfrom tensorforce import util\nfrom tensorforce.core.networks import Linear\nfrom tensorforce.core.distributions import Distribution\n\n\nclass Categorical(Distribution):\n \"\"\"\n Categorical distribution, for discrete actions.\n \"\"\"\n\n def __init__(self, shape, num_actions, probabilities=None, scope='categorical', summary_labels=()):\n \"\"\"\n Categorical distribution.\n\n Args:\n shape: Action shape.\n num_actions: Number of discrete action alternatives.\n probabilities: Optional distribution bias.\n \"\"\"\n self.num_actions = num_actions\n\n action_size = util.prod(shape) * self.num_actions\n if probabilities is None:\n logits = 0.0\n else:\n logits = [log(prob) for _ in range(util.prod(shape)) for prob in probabilities]\n self.logits = Linear(size=action_size, bias=logits, scope='logits')\n\n super(Categorical, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)\n\n def tf_parameterize(self, x):\n # Flat logits\n logits = self.logits.apply(x=x)\n\n # Reshape logits to action shape\n shape = (-1,) + self.shape + (self.num_actions,)\n logits = tf.reshape(tensor=logits, shape=shape)\n\n # !!!\n state_value = tf.reduce_logsumexp(input_tensor=logits, axis=-1)\n\n # Softmax for corresponding probabilities\n probabilities = tf.nn.softmax(logits=logits, axis=-1)\n\n # Min epsilon probability for numerical stability\n probabilities = tf.maximum(x=probabilities, y=util.epsilon)\n\n # \"Normalized\" logits\n logits = tf.log(x=probabilities)\n\n return logits, probabilities, state_value\n\n def state_value(self, distr_params):\n _, _, state_value = distr_params\n return state_value\n\n def state_action_value(self, distr_params, action=None):\n logits, _, state_value = distr_params\n if action is None:\n state_value = tf.expand_dims(input=state_value, axis=-1)\n else:\n one_hot = tf.one_hot(indices=action, depth=self.num_actions)\n logits = tf.reduce_sum(input_tensor=(logits * one_hot), axis=-1)\n return state_value + logits\n\n def tf_sample(self, distr_params, deterministic):\n logits, _, _ = distr_params\n\n # Deterministic: maximum likelihood action\n definite = tf.argmax(input=logits, axis=-1, output_type=util.tf_dtype('int'))\n\n # Non-deterministic: sample action using Gumbel distribution\n uniform_distribution = tf.random_uniform(\n shape=tf.shape(input=logits),\n minval=util.epsilon,\n maxval=(1.0 - util.epsilon)\n )\n gumbel_distribution = -tf.log(x=-tf.log(x=uniform_distribution))\n sampled = tf.argmax(input=(logits + gumbel_distribution), axis=-1, output_type=util.tf_dtype('int'))\n\n return tf.where(condition=deterministic, x=definite, y=sampled)\n\n def tf_log_probability(self, distr_params, action):\n logits, _, _ = distr_params\n one_hot = tf.one_hot(indices=action, depth=self.num_actions)\n return tf.reduce_sum(input_tensor=(logits * one_hot), axis=-1)\n\n def tf_entropy(self, distr_params):\n logits, probabilities, _ = distr_params\n return -tf.reduce_sum(input_tensor=(probabilities * logits), axis=-1)\n\n def tf_kl_divergence(self, distr_params1, distr_params2):\n logits1, probabilities1, _ = distr_params1\n logits2, _, _ = distr_params2\n log_prob_ratio = logits1 - logits2\n return tf.reduce_sum(input_tensor=(probabilities1 * log_prob_ratio), axis=-1)\n\n def tf_regularization_loss(self):\n regularization_loss = super(Categorical, self).tf_regularization_loss()\n if regularization_loss is None:\n losses = list()\n else:\n losses = [regularization_loss]\n\n regularization_loss = self.logits.regularization_loss()\n if regularization_loss is not None:\n losses.append(regularization_loss)\n\n if len(losses) > 0:\n return tf.add_n(inputs=losses)\n else:\n return None\n\n def get_variables(self, include_nontrainable=False):\n distribution_variables = super(Categorical, self).get_variables(include_nontrainable=include_nontrainable)\n logits_variables = self.logits.get_variables(include_nontrainable=include_nontrainable)\n\n return distribution_variables + logits_variables\n\n def get_summaries(self):\n distribution_summaries = super(Categorical, self).get_summaries()\n logits_summaries = self.logits.get_summaries()\n\n return distribution_summaries + logits_summaries\n" ]
[ [ "tensorflow.shape", "tensorflow.where", "tensorflow.expand_dims", "tensorflow.reshape", "tensorflow.add_n", "tensorflow.log", "tensorflow.reduce_sum", "tensorflow.nn.softmax", "tensorflow.maximum", "tensorflow.one_hot", "tensorflow.reduce_logsumexp" ] ]
tank671/fastai_dev
[ "aa9ff3f78ac560881f46f7161c0c17a6ca38e87b" ]
[ "dev/local/vision/augment.py" ]
[ "#AUTOGENERATED! DO NOT EDIT! File to edit: dev/09_vision_augment.ipynb (unless otherwise specified).\n\n__all__ = ['RandTransform', 'TensorTypes', 'FlipItem', 'DihedralItem', 'PadMode', 'CropPad', 'RandomCrop',\n 'ResizeMethod', 'Resize', 'RandomResizedCrop', 'AffineCoordTfm', 'RandomResizedCropGPU', 'affine_mat',\n 'mask_tensor', 'flip_mat', 'Flip', 'dihedral_mat', 'Dihedral', 'rotate_mat', 'Rotate', 'zoom_mat', 'Zoom',\n 'find_coeffs', 'apply_perspective', 'Warp', 'LightingTfm', 'Brightness', 'Contrast', 'setup_aug_tfms',\n 'aug_transforms']\n\n#Cell\nfrom ..test import *\nfrom ..data.all import *\nfrom .core import *\nfrom .data import *\n\n#Cell\nfrom torch import stack, zeros_like as t0, ones_like as t1\nfrom torch.distributions.bernoulli import Bernoulli\n\n#Cell\nclass RandTransform(Transform):\n \"A transform that before_call its state at each `__call__`, only applied on the training set\"\n split_idx,do,nm,supports = 0,True,None,[]\n def __init__(self, p=1., nm=None, before_call=None, **kwargs):\n super().__init__(**kwargs)\n self.p,self.before_call = p,ifnone(before_call,self.before_call)\n\n def before_call(self, b, split_idx):\n \"before_call the state for input `b`\"\n self.do = random.random() < self.p\n\n def __call__(self, b, split_idx=None, **kwargs):\n self.before_call(b, split_idx=split_idx)\n return super().__call__(b, split_idx=split_idx, **kwargs) if self.do else b\n\n#Cell\ndef _neg_axis(x, axis):\n x[...,axis] = -x[...,axis]\n return x\n\nTensorTypes = (TensorImage,TensorMask,TensorPoint,TensorBBox)\n\n#Cell\n@patch\ndef flip_lr(x:Image.Image): return x.transpose(Image.FLIP_LEFT_RIGHT)\n@patch\ndef flip_lr(x:TensorImage): return x.flip(-1)\n@patch\ndef flip_lr(x:TensorPoint): return _neg_axis(x, 0)\n@patch\ndef flip_lr(x:TensorBBox): return TensorBBox(TensorPoint(x.view(-1,2)).flip_lr().view(-1,4))\n\n#Cell\nclass FlipItem(RandTransform):\n \"Randomly flip with probability `p`\"\n def __init__(self, p=0.5): super().__init__(p=p)\n def encodes(self, x:(Image.Image,*TensorTypes)): return x.flip_lr()\n\n#Cell\n@patch\ndef dihedral(x:PILImage, k): return x if k==0 else x.transpose(k-1)\n@patch\ndef dihedral(x:TensorImage, k):\n if k in [1,3,4,7]: x = x.flip(-1)\n if k in [2,4,5,7]: x = x.flip(-2)\n if k in [3,5,6,7]: x = x.transpose(-1,-2)\n return x\n@patch\ndef dihedral(x:TensorPoint, k):\n if k in [1,3,4,7]: x = _neg_axis(x, 0)\n if k in [2,4,5,7]: x = _neg_axis(x, 1)\n if k in [3,5,6,7]: x = x.flip(1)\n return x\n@patch\ndef dihedral(x:TensorBBox, k):\n pnts = TensorPoint(x.view(-1,2)).dihedral(k).view(-1,2,2)\n tl,br = pnts.min(dim=1)[0],pnts.max(dim=1)[0]\n return TensorBBox(torch.cat([tl, br], dim=1), sz=x._meta.get('sz', None))\n\n#Cell\nclass DihedralItem(RandTransform):\n \"Randomly flip with probability `p`\"\n def __init__(self, p=0.5): super().__init__(p=p)\n\n def before_call(self, b, split_idx):\n super().before_call(b, split_idx)\n self.k = random.randint(0,7)\n\n def encodes(self, x:(Image.Image,*TensorTypes)): return x.dihedral(self.k)\n\n#Cell\nfrom torchvision.transforms.functional import pad as tvpad\n\n#Cell\nmk_class('PadMode', **{o:o.lower() for o in ['Zeros', 'Border', 'Reflection']},\n doc=\"All possible padding mode as attributes to get tab-completion and typo-proofing\")\n\n#Cell\n_pad_modes = {'zeros': 'constant', 'border': 'edge', 'reflection': 'reflect'}\n\n@patch\ndef _do_crop_pad(x:Image.Image, sz, tl, orig_sz,\n pad_mode=PadMode.Zeros, resize_mode=Image.BILINEAR, resize_to=None):\n if any(tl.gt(0)):\n # At least one dim is inside the image, so needs to be cropped\n c = tl.max(0)\n x = x.crop((*c, *c.add(sz).min(orig_sz)))\n if any(tl.lt(0)):\n # At least one dim is outside the image, so needs to be padded\n p = (-tl).max(0)\n f = (sz-orig_sz-p).max(0)\n x = tvpad(x, (*p, *f), padding_mode=_pad_modes[pad_mode])\n if resize_to is not None: x = x.resize(resize_to, resize_mode)\n return x\n\n@patch\ndef _do_crop_pad(x:TensorPoint, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):\n #assert pad_mode==PadMode.Zeros,\"Only zero padding is supported for `TensorPoint` and `TensorBBox`\"\n orig_sz,sz,tl = map(FloatTensor, (orig_sz,sz,tl))\n return TensorPoint((x+1)*orig_sz/sz - tl*2/sz - 1, sz=sz if resize_to is None else resize_to)\n\n@patch\ndef _do_crop_pad(x:TensorBBox, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):\n bbox = TensorPoint._do_crop_pad(x.view(-1,2), sz, tl, orig_sz, pad_mode, resize_to).view(-1,4)\n return TensorBBox(bbox, sz=x._meta.get('sz', None))\n\n@patch\ndef crop_pad(x:(TensorBBox,TensorPoint,Image.Image),\n sz, tl=None, orig_sz=None, pad_mode=PadMode.Zeros, resize_mode=Image.BILINEAR, resize_to=None):\n if isinstance(sz,int): sz = (sz,sz)\n orig_sz = Tuple(x.size if orig_sz is None else orig_sz)\n sz,tl = Tuple(sz),Tuple(((x.size-sz)//2) if tl is None else tl)\n return x._do_crop_pad(sz, tl, orig_sz=orig_sz, pad_mode=pad_mode, resize_mode=resize_mode, resize_to=resize_to)\n\n#Cell\nclass CropPad(RandTransform):\n \"Center crop or pad an image to `size`\"\n mode,mode_mask,order,final_size,split_idx = Image.BILINEAR,Image.NEAREST,5,None,None\n def __init__(self, size, pad_mode=PadMode.Zeros, **kwargs):\n super().__init__(**kwargs)\n if isinstance(size,int): size=(size,size)\n self.size,self.pad_mode = Tuple(size[1],size[0]),pad_mode\n\n def before_call(self, b, split_idx):\n self.do = True\n self.cp_size = self.size\n self.orig_sz = Tuple((b[0] if isinstance(b, tuple) else b).size)\n self.tl = (self.orig_sz-self.cp_size)//2\n\n def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):\n return x.crop_pad(self.cp_size, self.tl, orig_sz=self.orig_sz, pad_mode=self.pad_mode,\n resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.final_size)\n\n#Cell\nclass RandomCrop(CropPad):\n \"Randomly crop an image to `size`\"\n def before_call(self, b, split_idx):\n super().before_call(b, split_idx)\n w,h = self.orig_sz\n if not split_idx: self.tl = (random.randint(0,w-self.cp_size[0]), random.randint(0,h-self.cp_size[1]))\n\n#Cell\nmk_class('ResizeMethod', **{o:o.lower() for o in ['Squish', 'Crop', 'Pad']},\n doc=\"All possible resize method as attributes to get tab-completion and typo-proofing\")\n\n#Cell\nclass Resize(CropPad):\n order=10\n \"Resize image to `size` using `method`\"\n def __init__(self, size, method=ResizeMethod.Squish, pad_mode=PadMode.Reflection,\n resamples=(Image.BILINEAR, Image.NEAREST), **kwargs):\n super().__init__(size, pad_mode=pad_mode, **kwargs)\n (self.mode,self.mode_mask),self.method = resamples,method\n\n def before_call(self, b, split_idx):\n super().before_call(b, split_idx)\n self.final_size = self.size\n if self.method==ResizeMethod.Squish:\n self.tl,self.cp_size = (0,0),self.orig_sz\n return\n w,h = self.orig_sz\n op = (operator.lt,operator.gt)[self.method==ResizeMethod.Pad]\n m = w/self.final_size[0] if op(w/self.final_size[0],h/self.final_size[1]) else h/self.final_size[1]\n self.cp_size = (int(m*self.final_size[0]),int(m*self.final_size[1]))\n if self.method==ResizeMethod.Pad or split_idx: self.tl = ((w-self.cp_size[0])//2, (h-self.cp_size[1])//2)\n else: self.tl = (random.randint(0,w-self.cp_size[0]), random.randint(0,h-self.cp_size[1]))\n\n#Cell\nclass RandomResizedCrop(CropPad):\n \"Picks a random scaled crop of an image and resize it to `size`\"\n def __init__(self, size, min_scale=0.08, ratio=(3/4, 4/3), resamples=(Image.BILINEAR, Image.NEAREST), **kwargs):\n super().__init__(size, **kwargs)\n self.min_scale,self.ratio = min_scale,ratio\n self.mode,self.mode_mask = resamples\n\n def before_call(self, b, split_idx):\n super().before_call(b, split_idx)\n self.final_size = self.size\n w,h = self.orig_sz\n for attempt in range(10):\n if split_idx: break\n area = random.uniform(self.min_scale,1.) * w * h\n ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))\n nw = int(round(math.sqrt(area * ratio)))\n nh = int(round(math.sqrt(area / ratio)))\n if nw <= w and nh <= h:\n self.cp_size = (nw,nh)\n self.tl = random.randint(0,w-nw), random.randint(0,h - nh)\n return\n if w/h < self.ratio[0]: self.cp_size = (w, int(w/self.ratio[0]))\n elif w/h > self.ratio[1]: self.cp_size = (int(h*self.ratio[1]), h)\n else: self.cp_size = (w, h)\n self.tl = ((w-self.cp_size[0])//2, (h-self.cp_size[1])//2)\n\n#Cell\ndef _init_mat(x):\n mat = torch.eye(3, device=x.device).float()\n return mat.unsqueeze(0).expand(x.size(0), 3, 3).contiguous()\n\n#Cell\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.nn.functional\")\ndef _grid_sample(x, coords, mode='bilinear', padding_mode='reflection'):\n \"Resample pixels in `coords` from `x` by `mode`, with `padding_mode` in ('reflection','border','zeros').\"\n #coords = coords.permute(0, 3, 1, 2).contiguous().permute(0, 2, 3, 1) # optimize layout for grid_sample\n if mode=='bilinear': # hack to get smoother downwards resampling\n mn,mx = coords.min(),coords.max()\n # max amount we're affine zooming by (>1 means zooming in)\n z = 1/(mx-mn).item()*2\n # amount we're resizing by, with 100% extra margin\n d = min(x.shape[-2]/coords.shape[-2], x.shape[-1]/coords.shape[-1])/2\n # If we're resizing up by >200%, and we're zooming less than that, interpolate first\n if d>1 and d>z:\n x = F.interpolate(x, scale_factor=1/d, mode='area')\n with warnings.catch_warnings():\n #To avoid the warning that come from grid_sample.\n warnings.simplefilter(\"ignore\")\n return F.grid_sample(x, coords, mode=mode, padding_mode=padding_mode)\n\n#Cell\n@patch\ndef affine_coord(x: TensorImage, mat=None, coord_tfm=None, sz=None, mode='bilinear', pad_mode=PadMode.Reflection):\n if mat is None and coord_tfm is None and sz is None: return x\n size = tuple(x.shape[-2:]) if sz is None else (sz,sz) if isinstance(sz,int) else tuple(sz)\n if mat is None: mat = _init_mat(x)[:,:2]\n coords = F.affine_grid(mat, x.shape[:2] + size)\n if coord_tfm is not None: coords = coord_tfm(coords)\n return TensorImage(_grid_sample(x, coords, mode=mode, padding_mode=pad_mode))\n\n@patch\ndef affine_coord(x: TensorMask, mat=None, coord_tfm=None, sz=None, mode='nearest', pad_mode=PadMode.Reflection):\n add_dim = (x.ndim==3)\n if add_dim: x = x[:,None]\n res = TensorImage.affine_coord(x.float(), mat, coord_tfm, sz, mode, pad_mode).long()\n if add_dim: res = res[:,0]\n return TensorMask(res)\n\n@patch\ndef affine_coord(x: TensorPoint, mat=None, coord_tfm=None, sz=None, mode='nearest', pad_mode=PadMode.Zeros):\n #assert pad_mode==PadMode.Zeros, \"Only zero padding is supported for `TensorPoint` and `TensorBBox`\"\n if sz is None: sz = x._meta.get('sz', None)\n if coord_tfm is not None: x = coord_tfm(x, invert=True)\n if mat is not None: x = (x - mat[:,:,2].unsqueeze(1)) @ torch.inverse(mat[:,:,:2].transpose(1,2))\n return TensorPoint(x, sz=sz)\n\n@patch\ndef affine_coord(x: TensorBBox, mat=None, coord_tfm=None, sz=None, mode='nearest', pad_mode=PadMode.Zeros):\n if mat is None and coord_tfm is None: return x\n bs,n = x.shape[:2]\n pnts = stack([x[...,:2], stack([x[...,0],x[...,3]],dim=2),\n stack([x[...,2],x[...,1]],dim=2), x[...,2:]], dim=2)\n pnts = TensorPoint(TensorPoint(pnts.view(bs, 4*n, 2), sz=x._meta.get('sz', None))).affine_coord(mat, coord_tfm, sz, mode, pad_mode)\n pnts = pnts.view(bs, n, 4, 2)\n tl,dr = pnts.min(dim=2)[0],pnts.max(dim=2)[0]\n return TensorBBox(torch.cat([tl, dr], dim=2), sz=x._meta.get('sz', None) if sz is None else sz)\n\n#Cell\ndef _prepare_mat(x, mat):\n h,w = (x._meta['sz'] if hasattr(x, '_meta') and 'sz' in x._meta else x.shape[-2:])\n mat[:,0,1] *= h/w\n mat[:,1,0] *= w/h\n return mat[:,:2]\n\n#Cell\nclass AffineCoordTfm(RandTransform):\n \"Combine and apply affine and coord transforms\"\n split_idx,order = None,30\n def __init__(self, aff_fs=None, coord_fs=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection, mode_mask='nearest'):\n self.aff_fs,self.coord_fs = L(aff_fs),L(coord_fs)\n store_attr(self, 'size,mode,pad_mode,mode_mask')\n self.cp_size = None if size is None else (size,size) if isinstance(size, int) else tuple(size)\n\n def before_call(self, b, split_idx):\n if isinstance(b, tuple): b = b[0]\n self.split_idx = split_idx\n self.do,self.mat = True,self._get_affine_mat(b)\n for t in self.coord_fs: t.before_call(b)\n\n def compose(self, tfm):\n \"Compose `self` with another `AffineCoordTfm` to only do the interpolation step once\"\n self.aff_fs += tfm.aff_fs\n self.coord_fs += tfm.coord_fs\n\n def _get_affine_mat(self, x):\n aff_m = _init_mat(x)\n if self.split_idx: return _prepare_mat(x, aff_m)\n ms = [f(x) for f in self.aff_fs]\n ms = [m for m in ms if m is not None]\n for m in ms: aff_m = aff_m @ m\n return _prepare_mat(x, aff_m)\n\n def _encode(self, x, mode, reverse=False):\n coord_func = None if len(self.coord_fs)==0 or self.split_idx else partial(compose_tfms, tfms=self.coord_fs, reverse=reverse)\n return x.affine_coord(self.mat, coord_func, sz=self.size, mode=mode, pad_mode=self.pad_mode)\n\n def encodes(self, x:TensorImage): return self._encode(x, self.mode)\n def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)\n def encodes(self, x:(TensorPoint, TensorBBox)): return self._encode(x, self.mode, reverse=True)\n\n#Cell\nclass RandomResizedCropGPU(RandTransform):\n \"Picks a random scaled crop of an image and resize it to `size`\"\n split_idx,order = None,30\n def __init__(self, size, min_scale=0.08, ratio=(3/4, 4/3), mode='bilinear', **kwargs):\n super().__init__(**kwargs)\n self.size = (size,size) if isinstance(size, int) else size\n store_attr(self, 'min_scale,ratio,mode')\n\n def before_call(self, b, split_idx):\n self.do = True\n h,w = Tuple((b[0] if isinstance(b, tuple) else b).shape[-2:])\n for attempt in range(10):\n if split_idx: break\n area = random.uniform(self.min_scale,1.) * w * h\n ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))\n nw = int(round(math.sqrt(area * ratio)))\n nh = int(round(math.sqrt(area / ratio)))\n if nw <= w and nh <= h:\n self.cp_size = (nh,nw)\n self.tl = random.randint(0,h - nh),random.randint(0,w-nw)\n return\n if w/h < self.ratio[0]: self.cp_size = (int(w/self.ratio[0]), w)\n elif w/h > self.ratio[1]: self.cp_size = (h, int(h*self.ratio[1]))\n else: self.cp_size = (h, w)\n self.tl = ((h-self.cp_size[0])//2,(w-self.cp_size[1])//2)\n\n def encodes(self, x:TensorImage):\n x = x[...,self.tl[0]:self.tl[0]+self.cp_size[0], self.tl[1]:self.tl[1]+self.cp_size[1]]\n return TensorImage(x).affine_coord(sz=self.size, mode=self.mode)\n\n#Cell\ndef affine_mat(*ms):\n \"Restructure length-6 vector `ms` into an affine matrix with 0,0,1 in the last line\"\n return stack([stack([ms[0], ms[1], ms[2]], dim=1),\n stack([ms[3], ms[4], ms[5]], dim=1),\n stack([t0(ms[0]), t0(ms[0]), t1(ms[0])], dim=1)], dim=1)\n\n#Cell\ndef mask_tensor(x, p=0.5, neutral=0.):\n \"Mask elements of `x` with `neutral` with probability `1-p`\"\n if p==1.: return x\n if neutral != 0: x.add_(-neutral)\n mask = x.new_empty(*x.size()).bernoulli_(p)\n x.mul_(mask)\n return x.add_(neutral) if neutral != 0 else x\n\n#Cell\ndef flip_mat(x, p=0.5):\n \"Return a random flip matrix\"\n mask = mask_tensor(-x.new_ones(x.size(0)), p=p, neutral=1.)\n return affine_mat(mask, t0(mask), t0(mask),\n t0(mask), t1(mask), t0(mask))\n\n#Cell\ndef _get_default(x, mode=None, pad_mode=None):\n if mode is None: mode='bilinear' if isinstance(x, TensorMask) else 'bilinear'\n if pad_mode is None: pad_mode=PadMode.Zeros if isinstance(x, (TensorPoint, TensorBBox)) else PadMode.Reflection\n x0 = x[0] if isinstance(x, tuple) else x\n return x0,mode,pad_mode\n\n#Cell\n@patch\ndef flip_batch(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), p=0.5, size=None, mode=None, pad_mode=None):\n x0,mode,pad_mode = _get_default(x, mode, pad_mode)\n return x.affine_coord(mat=flip_mat(x0, p=p)[:,:2], sz=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef Flip(p=0.5, size=None, mode='bilinear', pad_mode=PadMode.Reflection):\n \"Randomly flip a batch of images with a probability `p`\"\n return AffineCoordTfm(aff_fs=partial(flip_mat, p=p), size=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef _draw_mask(x, def_draw, draw=None, p=0.5, neutral=0.):\n if draw is None: draw=def_draw\n if callable(draw): return draw(x)\n elif is_listy(draw):\n test_eq(len(draw), x.size(0))\n res = tensor(draw, dtype=x.dtype, device=x.device)\n else: res = x.new_zeros(x.size(0)) + draw\n return mask_tensor(res, p=p, neutral=neutral)\n\n#Cell\ndef dihedral_mat(x, p=0.5, draw=None):\n \"Return a random dihedral matrix\"\n def _def_draw(x): return torch.randint(0,8, (x.size(0),), device=x.device)\n idx = _draw_mask(x, _def_draw, draw=draw, p=p).long()\n xs = tensor([1,-1,1,-1,-1,1,1,-1], device=x.device).gather(0, idx)\n ys = tensor([1,1,-1,1,-1,-1,1,-1], device=x.device).gather(0, idx)\n m0 = tensor([1,1,1,0,1,0,0,0], device=x.device).gather(0, idx)\n m1 = tensor([0,0,0,1,0,1,1,1], device=x.device).gather(0, idx)\n res = affine_mat(xs*m0, xs*m1, t0(xs),\n ys*m1, ys*m0, t0(xs)).float()\n return affine_mat(xs*m0, xs*m1, t0(xs),\n ys*m1, ys*m0, t0(xs)).float()\n\n#Cell\n@patch\ndef dihedral_batch(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), p=0.5, draw=None, size=None, mode=None, pad_mode=None):\n x0,mode,pad_mode = _get_default(x, mode, pad_mode)\n mat = _prepare_mat(x, dihedral_mat(x0, p=p, draw=draw))\n return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef Dihedral(p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection):\n \"Apply a random dihedral transformation to a batch of images with a probability `p`\"\n return AffineCoordTfm(aff_fs=partial(dihedral_mat, p=p, draw=draw), size=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef rotate_mat(x, max_deg=10, p=0.5, draw=None):\n \"Return a random rotation matrix with `max_deg` and `p`\"\n def _def_draw(x): return x.new(x.size(0)).uniform_(-max_deg, max_deg)\n thetas = _draw_mask(x, _def_draw, draw=draw, p=p) * math.pi/180\n return affine_mat(thetas.cos(), thetas.sin(), t0(thetas),\n -thetas.sin(), thetas.cos(), t0(thetas))\n\n#Cell\n@delegates(rotate_mat)\n@patch\ndef rotate(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode=None, pad_mode=None, **kwargs):\n x0,mode,pad_mode = _get_default(x, mode, pad_mode)\n mat = _prepare_mat(x, rotate_mat(x0, **kwargs))\n return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef Rotate(max_deg=10, p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection):\n \"Apply a random rotation of at most `max_deg` with probability `p` to a batch of images\"\n return AffineCoordTfm(partial(rotate_mat, max_deg=max_deg, p=p, draw=draw),\n size=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef zoom_mat(x, max_zoom=1.1, p=0.5, draw=None, draw_x=None, draw_y=None):\n \"Return a random zoom matrix with `max_zoom` and `p`\"\n def _def_draw(x): return x.new(x.size(0)).uniform_(1, max_zoom)\n def _def_draw_ctr(x): return x.new(x.size(0)).uniform_(0,1)\n s = 1/_draw_mask(x, _def_draw, draw=draw, p=p, neutral=1.)\n col_pct = _draw_mask(x, _def_draw_ctr, draw=draw_x, p=1.)\n row_pct = _draw_mask(x, _def_draw_ctr, draw=draw_y, p=1.)\n col_c = (1-s) * (2*col_pct - 1)\n row_c = (1-s) * (2*row_pct - 1)\n return affine_mat(s, t0(s), col_c,\n t0(s), s, row_c)\n\n#Cell\n@delegates(zoom_mat)\n@patch\ndef zoom(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode='bilinear', pad_mode=PadMode.Reflection, **kwargs):\n x0,mode,pad_mode = _get_default(x, mode, pad_mode)\n return x.affine_coord(mat=zoom_mat(x0, **kwargs)[:,:2], sz=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef Zoom(max_zoom=1.1, p=0.5, draw=None, draw_x=None, draw_y=None, size=None, mode='bilinear',\n pad_mode=PadMode.Reflection):\n \"Apply a random zoom of at most `max_zoom` with probability `p` to a batch of images\"\n return AffineCoordTfm(partial(zoom_mat, max_zoom=max_zoom, p=p, draw=draw, draw_x=draw_x, draw_y=draw_y),\n size=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef find_coeffs(p1, p2):\n \"Find coefficients for warp tfm from `p1` to `p2`\"\n m = []\n p = p1[:,0,0]\n #The equations we'll need to solve.\n for i in range(p1.shape[1]):\n m.append(stack([p2[:,i,0], p2[:,i,1], t1(p), t0(p), t0(p), t0(p), -p1[:,i,0]*p2[:,i,0], -p1[:,i,0]*p2[:,i,1]]))\n m.append(stack([t0(p), t0(p), t0(p), p2[:,i,0], p2[:,i,1], t1(p), -p1[:,i,1]*p2[:,i,0], -p1[:,i,1]*p2[:,i,1]]))\n #The 8 scalars we seek are solution of AX = B\n A = stack(m).permute(2, 0, 1)\n B = p1.view(p1.shape[0], 8, 1)\n return torch.solve(B,A)[0]\n\n#Cell\ndef apply_perspective(coords, coeffs):\n \"Apply perspective tranfom on `coords` with `coeffs`\"\n sz = coords.shape\n coords = coords.view(sz[0], -1, 2)\n coeffs = torch.cat([coeffs, t1(coeffs[:,:1])], dim=1).view(coeffs.shape[0], 3,3)\n coords = coords @ coeffs[...,:2].transpose(1,2) + coeffs[...,2].unsqueeze(1)\n coords = coords/coords[...,2].unsqueeze(-1)\n return coords[...,:2].view(*sz)\n\n#Cell\nclass _WarpCoord():\n def __init__(self, magnitude=0.2, p=0.5, draw_x=None, draw_y=None):\n self.coeffs,self.magnitude,self.p,self.draw_x,self.draw_y = None,magnitude,p,draw_x,draw_y\n\n def _def_draw(self, x): return x.new(x.size(0)).uniform_(-self.magnitude, self.magnitude)\n def before_call(self, x):\n x_t = _draw_mask(x, self._def_draw, self.draw_x, p=self.p)\n y_t = _draw_mask(x, self._def_draw, self.draw_y, p=self.p)\n orig_pts = torch.tensor([[-1,-1], [-1,1], [1,-1], [1,1]], dtype=x.dtype, device=x.device)\n self.orig_pts = orig_pts.unsqueeze(0).expand(x.size(0),4,2)\n targ_pts = stack([stack([-1-y_t, -1-x_t]), stack([-1+y_t, 1+x_t]),\n stack([ 1+y_t, -1+x_t]), stack([ 1-y_t, 1-x_t])])\n self.targ_pts = targ_pts.permute(2,0,1)\n\n def __call__(self, x, invert=False):\n coeffs = find_coeffs(self.targ_pts, self.orig_pts) if invert else find_coeffs(self.orig_pts, self.targ_pts)\n return apply_perspective(x, coeffs)\n\n#Cell\n@delegates(_WarpCoord.__init__)\n@patch\ndef warp(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode='bilinear', pad_mode=PadMode.Reflection, **kwargs):\n x0,mode,pad_mode = _get_default(x, mode, pad_mode)\n coord_tfm = _WarpCoord(**kwargs)\n coord_tfm.before_call(x0)\n return x.affine_coord(coord_tfm=coord_tfm, sz=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\ndef Warp(magnitude=0.2, p=0.5, draw_x=None, draw_y=None,size=None, mode='bilinear', pad_mode=PadMode.Reflection):\n \"Apply perspective warping with `magnitude` and `p` on a batch of matrices\"\n return AffineCoordTfm(coord_fs=_WarpCoord(magnitude=magnitude, p=p, draw_x=draw_x, draw_y=draw_y),\n size=size, mode=mode, pad_mode=pad_mode)\n\n#Cell\n@patch\ndef lighting(x: TensorImage, func):\n return TensorImage(torch.sigmoid(func(logit(x))))\n\n#Cell\nclass LightingTfm(RandTransform):\n \"Apply `fs` to the logits\"\n order = 40\n def __init__(self, fs): self.fs=L(fs)\n def before_call(self, b, split_idx):\n self.do = True\n if isinstance(b, tuple): b = b[0]\n for t in self.fs: t.before_call(b)\n\n def compose(self, tfm):\n \"Compose `self` with another `LightingTransform`\"\n self.fs += tfm.fs\n\n def encodes(self,x:TensorImage): return x.lighting(partial(compose_tfms, tfms=self.fs))\n\n#Cell\nclass _BrightnessLogit():\n def __init__(self, max_lighting=0.2, p=0.75, draw=None):\n self.max_lighting,self.p,self.draw = max_lighting,p,draw\n\n def _def_draw(self, x): return x.new(x.size(0)).uniform_(0.5*(1-self.max_lighting), 0.5*(1+self.max_lighting))\n\n def before_call(self, x):\n self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0.5)\n\n def __call__(self, x): return x.add_(logit(self.change[:,None,None,None]))\n\n#Cell\n@delegates(_BrightnessLogit.__init__)\n@patch\ndef brightness(x: TensorImage, **kwargs):\n func = _BrightnessLogit(**kwargs)\n func.before_call(x)\n return x.lighting(func)\n\n#Cell\ndef Brightness(max_lighting=0.2, p=0.75, draw=None):\n \"Apply change in brightness of `max_lighting` to batch of images with probability `p`.\"\n return LightingTfm(_BrightnessLogit(max_lighting, p, draw))\n\n#Cell\nclass _ContrastLogit():\n def __init__(self, max_lighting=0.2, p=0.75, draw=None):\n self.max_lighting,self.p,self.draw = max_lighting,p,draw\n\n def _def_draw(self, x):\n return torch.exp(x.new(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting)))\n\n def before_call(self, x):\n self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1.)\n\n def __call__(self, x): return x.mul_(self.change[:,None,None,None])\n\n#Cell\n@delegates(_ContrastLogit.__init__)\n@patch\ndef contrast(x: TensorImage, **kwargs):\n func = _ContrastLogit(**kwargs)\n func.before_call(x)\n return x.lighting(func)\n\n#Cell\ndef Contrast(max_lighting=0.2, p=0.75, draw=None):\n \"Apply change in contrast of `max_lighting` to batch of images with probability `p`.\"\n return LightingTfm(_ContrastLogit(max_lighting, p, draw))\n\n#Cell\ndef _compose_same_tfms(tfms):\n tfms = L(tfms)\n if len(tfms) == 0: return None\n res = tfms[0]\n for tfm in tfms[1:]: res.compose(tfm)\n return res\n\n#Cell\ndef setup_aug_tfms(tfms):\n \"Go through `tfms` and combines together affine/coord or lighting transforms\"\n aff_tfms = [tfm for tfm in tfms if isinstance(tfm, AffineCoordTfm)]\n lig_tfms = [tfm for tfm in tfms if isinstance(tfm, LightingTfm)]\n others = [tfm for tfm in tfms if tfm not in aff_tfms+lig_tfms]\n aff_tfm,lig_tfm = _compose_same_tfms(aff_tfms),_compose_same_tfms(lig_tfms)\n res = [aff_tfm] if aff_tfm is not None else []\n if lig_tfm is not None: res.append(lig_tfm)\n return res + others\n\n#Cell\ndef aug_transforms(do_flip=True, flip_vert=False, max_rotate=10., max_zoom=1.1, max_lighting=0.2,\n max_warp=0.2, p_affine=0.75, p_lighting=0.75, xtra_tfms=None,\n size=None, mode='bilinear', pad_mode=PadMode.Reflection):\n \"Utility func to easily create a list of flip, rotate, zoom, warp, lighting transforms.\"\n res,tkw = [],dict(size=size, mode=mode, pad_mode=pad_mode)\n if do_flip: res.append(Dihedral(p=0.5, **tkw) if flip_vert else Flip(p=0.5, **tkw))\n if max_warp: res.append(Warp(magnitude=max_warp, p=p_affine, **tkw))\n if max_rotate: res.append(Rotate(max_deg=max_rotate, p=p_affine, **tkw))\n if max_zoom>1: res.append(Zoom(max_zoom=max_zoom, p=p_affine, **tkw))\n if max_lighting:\n res.append(Brightness(max_lighting=max_lighting, p=p_lighting))\n res.append(Contrast(max_lighting=max_lighting, p=p_lighting))\n return setup_aug_tfms(res + L(xtra_tfms))" ]
[ [ "torch.zeros_like", "torch.stack", "torch.ones_like" ] ]
jamesdamillington/Wildfire_Human_Agency_Model
[ "771d5a89a9f26badcf1fa412a1a7a10b2e981aeb" ]
[ "src/Core_functionality/AFTs/agent_class.py" ]
[ "\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 22 10:41:39 2021\r\n\r\n@author: Oli\r\n\"\"\"\r\n\r\nimport agentpy as ap\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom copy import deepcopy\r\n\r\nfrom Core_functionality.Trees.Transfer_tree import define_tree_links, predict_from_tree, update_pars, predict_from_tree_fast\r\nfrom Core_functionality.prediction_tools.regression_families import regression_link, regression_transformation\r\nfrom Core_functionality.Trees.parallel_predict import make_boot_frame, make_boot_frame_AFT, parallel_predict, combine_bootstrap\r\n\r\n\r\nclass AFT(ap.Agent):\r\n \r\n ''' \r\n Core model class containing key drivers of model function\r\n '''\r\n \r\n def setup(self):\r\n \r\n '''\r\n \r\n Basic constants:\r\n ls = land system of AFT\r\n afr= anthropogenic fire regime of AFT\r\n sub_AFT = does this AFT subdivide an LFS? Kind is one of fraction, addition\r\n \r\n ''' \r\n \r\n self.ls = ''\r\n self.afr= ''\r\n self.sub_AFT = {'exists': False,\r\n 'kind' : ''}\r\n \r\n self.Fire_use = {}\r\n self.Fire_vars = {}\r\n \r\n\r\n def get_pars(self, AFT_dict):\r\n \r\n ### Distribution tree for LFS\r\n self.Dist_frame = AFT_dict['AFT_dist'][str(self.ls + '/' + self.afr)]\r\n self.Dist_struct = define_tree_links(self.Dist_frame)\r\n self.Dist_vars = [x for x in self.Dist_frame.iloc[:,1].tolist() if x != '<leaf>']\r\n \r\n \r\n ### Sub-split from LFS to AFT\r\n if self.sub_AFT['exists'] == True:\r\n \r\n if self.sub_AFT['kind'] != 'Multiple':\r\n \r\n self.AFT_frame = AFT_dict['AFT_dist'][str('Sub_AFTs' + '/' + self.sub_AFT['afr'] + '_' + self.sub_AFT['ls'])]\r\n self.AFT_struct = define_tree_links(self.AFT_frame)\r\n self.AFT_vars = [x for x in self.AFT_frame.iloc[:,1].tolist() if x != '<leaf>']\r\n \r\n else:\r\n \r\n ### Where AFT splits across more than 2 LFS\r\n ### afr & LFS should be lists of same length\r\n \r\n self.AFT_frame = []\r\n self.AFT_struct = []\r\n self.AFT_vars = []\r\n \r\n for i in range(len(self.sub_AFT['afr'])):\r\n \r\n self.AFT_frame.append(AFT_dict['AFT_dist'][str('Sub_AFTs' + '/' + self.sub_AFT['afr'][i] + '_' + self.sub_AFT['ls'][i])])\r\n self.AFT_struct.append(define_tree_links(self.AFT_frame[i]))\r\n self.AFT_vars.append([x for x in self.AFT_frame[i].iloc[:,1].tolist() if x != '<leaf>'])\r\n \r\n \r\n else:\r\n \r\n self.AFT_frame = 'None'\r\n \r\n \r\n \r\n \r\n def get_boot_vals(self, AFT_dict):\r\n \r\n ### get parameter values for bootstrapping of tree thresholds\r\n \r\n self.boot_Dist_pars = {}\r\n self.boot_AFT_pars = {}\r\n \r\n if self.p.bootstrap == True:\r\n \r\n\r\n self.boot_Dist_pars['Thresholds'] = AFT_dict['Dist_pars']['Thresholds'][str(self.ls + '/' + self.afr)]\r\n self.boot_Dist_pars['Probs'] = AFT_dict['Dist_pars']['Probs'][str(self.ls + '/' + self.afr)]\r\n \r\n \r\n if self.sub_AFT['exists'] == True:\r\n \r\n if self.sub_AFT['kind'] != 'Multiple':\r\n \r\n self.boot_AFT_pars['Thresholds'] = AFT_dict['Dist_pars']['Thresholds'][str('Sub_AFTs' + '/' + self.sub_AFT['afr'] + '_' + self.sub_AFT['ls'])]\r\n self.boot_AFT_pars['Probs'] = AFT_dict['Dist_pars']['Probs'][str('Sub_AFTs' + '/' + self.sub_AFT['afr'] + '_' + self.sub_AFT['ls'])]\r\n \r\n \r\n else:\r\n \r\n ### Where AFT splits across more than 2 LFS\r\n ### afr & LFS should be lists of same length\r\n \r\n self.boot_AFT_pars = []\r\n \r\n for i in range(len(self.sub_AFT['afr'])):\r\n \r\n self.boot_AFT_pars.append({}) \r\n \r\n self.boot_AFT_pars[i]['Thresholds'] = AFT_dict['Dist_pars']['Thresholds'][str('Sub_AFTs' + '/' + self.sub_AFT['afr'][i] + '_' + self.sub_AFT['ls'][i])]\r\n self.boot_AFT_pars[i]['Probs'] = AFT_dict['Dist_pars']['Probs'][str('Sub_AFTs' + '/' + self.sub_AFT['afr'][i] + '_' + self.sub_AFT['ls'][i])]\r\n \r\n \r\n else:\r\n \r\n self.boot_AFT_pars = 'None'\r\n \r\n\r\n \r\n ### Fire use parameters\r\n def get_fire_pars(self):\r\n \r\n \r\n for par in self.Fire_use.keys():\r\n \r\n ### get parameters for fire use bool (yes/no) \r\n \r\n #parameters can be specified in par dict directly either with a pandas\r\n # {'type': 'constant', 'pars':float} for a constant value\r\n if self.Fire_use[par]['bool'] in ['lin_mod', 'tree_mod']: \r\n \r\n self.Fire_use[par]['bool'] = {'type': self.Fire_use[par]['bool'], \r\n 'pars': self.p.AFT_pars['Fire_use']['bool'][par + '/' + type(self).__name__]}\r\n \r\n self.Fire_vars[par] = {}\r\n \r\n ###########################################\r\n ### extract parameter names\r\n ###########################################\r\n \r\n if self.Fire_use[par]['bool']['type'] == 'lin_mod':\r\n \r\n self.Fire_vars[par]['bool'] = [x for x in self.Fire_use[par]['bool']['pars'].iloc[:,0].tolist() if x != 'Intercept'] \r\n \r\n elif self.Fire_use[par]['bool']['type'] == 'tree_mod':\r\n \r\n self.Fire_vars[par]['bool'] = [x for x in self.Fire_use[par]['bool']['pars'].iloc[:,1].tolist() if x != '<leaf>']\r\n \r\n \r\n ### get parameters for fire use degree (target % burned area)\r\n \r\n if self.Fire_use[par]['ba'] in ['lin_mod', 'tree_mod']: \r\n \r\n self.Fire_use[par]['ba'] = {'type': self.Fire_use[par]['ba'], \r\n 'pars': self.p.AFT_pars['Fire_use']['ba'][par + '/' + type(self).__name__]}\r\n \r\n self.Fire_vars[par] = {} if not par in self.Fire_vars.keys() else self.Fire_vars[par]\r\n \r\n \r\n ###########################################\r\n ### extract parameter names\r\n ###########################################\r\n if self.Fire_use[par]['ba']['type'] == 'lin_mod':\r\n \r\n self.Fire_vars[par]['ba'] = [x for x in self.Fire_use[par]['ba']['pars'].iloc[:,0].tolist() if x != 'Intercept'] \r\n \r\n elif self.Fire_use[par]['ba']['type'] == 'tree_mod':\r\n \r\n self.Fire_vars[par]['ba'] = [x for x in self.Fire_use[par]['ba']['pars'].iloc[:,1].tolist() if x != '<leaf>']\r\n \r\n \r\n\r\n \r\n ### Needs doing\r\n def get_suppression_pars(self):\r\n \r\n pass\r\n \r\n #########################################################################\r\n\r\n ### AFT Distribution\r\n\r\n ######################################################################### \r\n \r\n def compete(self):\r\n \r\n ''' \r\n Competition between LFS\r\n \r\n Can we find a way to stop predicting over duplicate parameter sets for LFS?\r\n '''\r\n \r\n ### single set of parameter values\r\n if self.p.bootstrap != True:\r\n \r\n ### gather correct numpy arrays 4 predictor variables\r\n self.Dist_dat = [self.model.p.Maps[x][self.model.p.timestep, :, :] if len(self.model.p.Maps[x].shape) == 3 else self.model.p.Maps[x] for x in self.Dist_vars]\r\n\r\n\r\n ### combine numpy arrays to single pandas \r\n self.Dist_dat = pd.DataFrame.from_dict(dict(zip(self.Dist_vars, \r\n [x.reshape(self.model.p.xlen*self.model.p.ylen).data for x in self.Dist_dat])))\r\n \r\n ### do prediction\r\n self.Dist_vals = predict_from_tree_fast(dat = self.Dist_dat, \r\n tree = self.Dist_frame, struct = self.Dist_struct, \r\n prob = 'yprob.TRUE', skip_val = -3.3999999521443642e+38, na_return = 0)\r\n \r\n \r\n ### apply theta zero-ing out constraint\r\n self.Dist_vals = [0 if x <= self.p.theta else x for x in self.Dist_vals]\r\n \r\n \r\n ### bootstrapped version\r\n elif self.p.bootstrap == True:\r\n \r\n self.Dist_vals = []\r\n \r\n ### gather correct numpy arrays 4 predictor variables\r\n self.Dist_dat = [self.model.p.Maps[x][self.model.p.timestep, :, :] if len(self.model.p.Maps[x].shape) == 3 else self.model.p.Maps[x] for x in self.Dist_vars]\r\n\r\n ### combine numpy arrays to single pandas \r\n self.Dist_dat = pd.DataFrame.from_dict(dict(zip(self.Dist_vars, \r\n [x.reshape(self.model.p.xlen*self.model.p.ylen).data for x in self.Dist_dat])))\r\n \r\n ### Parallel prediction\r\n boot_frame = make_boot_frame(self)\r\n self.Dist_vals = parallel_predict(boot_frame, self.model.client, 'yprob.TRUE')\r\n self.Dist_vals = combine_bootstrap(self)\r\n \r\n \r\n def sub_compete(self):\r\n \r\n ''' Competition between AFTs within each LFS '''\r\n \r\n ### 1 parameter version\r\n \r\n if self.sub_AFT['exists'] == True and self.p.bootstrap == False:\r\n \r\n if self.sub_AFT['kind'] != 'Multiple': \r\n \r\n ### gather correct numpy arrays 4 predictor variables\r\n self.AFT_dat = [self.model.p.Maps[x][self.model.p.timestep, :, :] if len(self.model.p.Maps[x].shape) == 3 else self.model.p.Maps[x] for x in self.AFT_vars]\r\n \r\n ### combine numpy arrays to single pandas \r\n self.AFT_dat = pd.DataFrame.from_dict(dict(zip(self.AFT_vars, \r\n [x.reshape(self.model.p.xlen*self.model.p.ylen).data for x in self.AFT_dat])))\r\n \r\n ### do prediction\r\n self.AFT_vals = predict_from_tree_fast(self.AFT_dat, tree = self.AFT_frame, \r\n struct = self.AFT_struct, prob = type(self).__name__, \r\n skip_val = -3.3999999521443642e+38, na_return = 0)\r\n\r\n \r\n elif self.sub_AFT['kind'] == 'Multiple':\r\n \r\n self.AFT_dat = []\r\n self.AFT_vals = []\r\n \r\n for i in range(len(self.sub_AFT['afr'])): \r\n \r\n ### gather correct numpy arrays 4 predictor variables\r\n self.AFT_dat.append([self.model.p.Maps[x][self.model.p.timestep, :, :] if len(self.model.p.Maps[x].shape) == 3 else self.model.p.Maps[x] for x in self.AFT_vars[i]])\r\n \r\n ### combine numpy arrays to single pandas \r\n self.AFT_dat[i] = pd.DataFrame.from_dict(dict(zip(self.AFT_vars[i], \r\n [x.reshape(self.model.p.xlen*self.model.p.ylen).data for x in self.AFT_dat[i]])))\r\n \r\n ### do prediction - these are added together in the WHAM AFT allocate routine\r\n self.AFT_vals.append(predict_from_tree_fast(self.AFT_dat[i], tree = self.AFT_frame[i], \r\n struct = self.AFT_struct[i], prob = type(self).__name__, \r\n skip_val = -3.3999999521443642e+38, na_return = 0))\r\n\r\n \r\n \r\n ### bootstrapped parameters\r\n \r\n elif self.sub_AFT['exists'] == True and self.p.bootstrap == True:\r\n \r\n if self.sub_AFT['kind'] != 'Multiple': \r\n \r\n ### gather correct numpy arrays 4 predictor variables\r\n self.AFT_dat = [self.model.p.Maps[x][self.model.p.timestep, :, :] if len(self.model.p.Maps[x].shape) == 3 else self.model.p.Maps[x] for x in self.AFT_vars]\r\n \r\n ### combine numpy arrays to single pandas \r\n self.AFT_dat = pd.DataFrame.from_dict(dict(zip(self.AFT_vars, \r\n [x.reshape(self.model.p.xlen*self.model.p.ylen).data for x in self.AFT_dat])))\r\n \r\n ### Parallel prediction, no theta threshold for sub-splits \r\n boot_frame = make_boot_frame_AFT(self)\r\n av = parallel_predict(boot_frame, self.model.client, type(self).__name__)\r\n self.AFT_vals = pd.DataFrame(np.column_stack(av)).mean(axis = 1).to_list()\r\n \r\n \r\n elif self.sub_AFT['kind'] == 'Multiple':\r\n \r\n self.AFT_dat = []\r\n self.AFT_vals = []\r\n \r\n for z in range(len(self.sub_AFT['afr'])): \r\n \r\n self.AFT_vals.append([])\r\n \r\n ### gather correct numpy arrays 4 predictor variables\r\n self.AFT_dat = [self.model.p.Maps[x][self.model.p.timestep, :, :] if len(self.model.p.Maps[x].shape) == 3 else self.model.p.Maps[x] for x in self.AFT_vars[z]]\r\n \r\n ### combine numpy arrays to single pandas \r\n self.AFT_dat = pd.DataFrame.from_dict(dict(zip(self.AFT_vars[z], \r\n [x.reshape(self.model.p.xlen*self.model.p.ylen).data for x in self.AFT_dat])))\r\n \r\n ### do parallel prediction\r\n boot_frame = make_boot_frame_AFT(self, par_set = z)\r\n av = parallel_predict(boot_frame, self.model.client, type(self).__name__)\r\n self.AFT_vals[z]= pd.DataFrame(np.column_stack(av)).mean(axis = 1).to_list()\r\n \r\n else:\r\n \r\n self.AFT_vals = 'None'\r\n \r\n \r\n #######################################################################\r\n \r\n ### Fire\r\n \r\n #######################################################################\r\n \r\n ################\r\n ### Fire use\r\n ################\r\n \r\n \r\n def fire_use(self):\r\n \r\n \r\n ####################################\r\n \r\n ### Prepare data\r\n \r\n ####################################\r\n \r\n self.Fire_dat = {}\r\n self.Fire_vals= {}\r\n\r\n probs_key = {'bool': 'yprob.Presence', \r\n 'ba' : 'yval'} ### used for gathering final results\r\n \r\n \r\n ### gather numpy arrays 4 predictor variables\r\n for x in self.Fire_use.keys():\r\n \r\n for b in ['bool', 'ba']: \r\n \r\n \r\n if b in self.Fire_vars[x].keys(): \r\n \r\n \r\n ### containers for fire outputs\r\n self.Fire_dat[x] = {} if not x in self.Fire_dat.keys() else self.Fire_dat[x] \r\n self.Fire_dat[x][b] = []\r\n self.Fire_vals[x] = {} if not x in self.Fire_vals.keys() else self.Fire_vals[x] \r\n self.Fire_vals[x][b] = []\r\n \r\n temp_key = self.Fire_vars[x][b]\r\n \r\n \r\n ### Gather relevant map data\r\n for y in range(len(temp_key)):\r\n \r\n temp_val = self.model.p.Maps[temp_key[y]][self.model.p.timestep, :, :] if len(self.model.p.Maps[temp_key[y]].shape) == 3 else self.model.p.Maps[temp_key[y]]\r\n \r\n self.Fire_dat[x][b].append(temp_val)\r\n\r\n ### combine predictor numpy arrays to a single pandas \r\n self.Fire_dat[x][b] = pd.DataFrame.from_dict(dict(zip(self.Fire_vars[x][b], \r\n [z.reshape(self.model.p.xlen*self.model.p.ylen).data for z in self.Fire_dat[x][b]])))\r\n \r\n \r\n ####################################\r\n \r\n ### Make predictions\r\n \r\n ####################################\r\n \r\n ##########\r\n ### Tree\r\n ##########\r\n \r\n if self.Fire_use[x][b]['type'] == 'tree_mod':\r\n \r\n \r\n Fire_struct = define_tree_links(self.Fire_use[x][b]['pars'])\r\n\r\n self.Fire_vals[x][b] = predict_from_tree_fast(dat = self.Fire_dat[x][b], \r\n tree = self.Fire_use[x][b]['pars'], struct = Fire_struct, \r\n prob = probs_key[b], skip_val = -3.3999999521443642e+38, na_return = 0)\r\n \r\n ################\r\n ### Regression\r\n ################\r\n \r\n elif self.Fire_use[x][b]['type'] == 'lin_mod':\r\n \r\n self.Fire_vals[x][b] = deepcopy(self.Fire_dat[x][b])\r\n \r\n ### Mulitply data by regression coefs\r\n for coef in self.Fire_vars[x][b]:\r\n \r\n self.Fire_vals[x][b][coef] = self.Fire_vals[x][b][coef] * self.Fire_use[x][b]['pars']['coef'].iloc[np.where(self.Fire_use[x][b]['pars']['var'] == coef)[0][0]]\r\n \r\n ### Add intercept\r\n self.Fire_vals[x][b] = self.Fire_vals[x][b].sum(axis = 1) + self.Fire_use[x][b]['pars']['coef'].iloc[np.where(self.Fire_use[x][b]['pars']['var'] == 'Intercept')[0][0]]\r\n \r\n ### Link function\r\n self.Fire_vals[x][b] = regression_transformation(regression_link(self.Fire_vals[x][b], \r\n link = self.Fire_use[x][b]['pars']['link'][0]), \r\n transformation = self.Fire_use[x][b]['pars']['transformation'][0])\r\n ### control for negative values\r\n self.Fire_vals[x][b] = pd.Series([x if x > 0 else 0 for x in self.Fire_vals[x][b]])\r\n \r\n #################################\r\n ### specified\r\n #################################\r\n \r\n elif 'constant' in self.Fire_use[x][b].keys(): \r\n \r\n self.Fire_vals[x] = {} if not x in self.Fire_vals.keys() else self.Fire_vals[x] \r\n self.Fire_vals[x][b] = []\r\n self.Fire_vals[x][b] = pd.Series([self.Fire_use[x][b]['constant']] * (self.model.p.ylen * self.model.p.xlen))\r\n \r\n else:\r\n \r\n pass\r\n \r\n ### calculate burned area through bool & ba%\r\n self.Fire_vals[x] = self.Fire_vals[x]['bool'] * self.Fire_vals[x]['ba']\r\n \r\n ### Adjust for AFT specific constraints\r\n self.fire_constraints()\r\n \r\n \r\n def fire_constraints(self):\r\n \r\n ''' container for agent-specific fire constraints'''\r\n \r\n pass\r\n \r\n #######################\r\n \r\n ### Fire suppression\r\n \r\n ####################### \r\n \r\n \r\n def fire_suppression(self):\r\n \r\n pass\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n##################################################################\r\n\r\n### dummy agents for testing\r\n\r\n##################################################################\r\n\r\nclass dummy_agent(AFT):\r\n \r\n def setup(self):\r\n AFT.setup(self)\r\n self.afr = 'Test'\r\n self.ls = 'Test'\r\n \r\n self.sub_AFT = {'exists': True, 'kind': 'Addition', \r\n 'afr': 'Test', 'ls': 'Test'} \r\n\r\n\r\nclass multiple_agent(AFT):\r\n \r\n def setup(self):\r\n AFT.setup(self)\r\n self.afr = 'Test'\r\n self.ls = 'Test'\r\n \r\n self.sub_AFT = {'exists': True, 'kind': 'Multiple', \r\n 'afr': ['Test', 'Test'], 'ls': ['Test', 'Test']} \r\n \r\n \r\n \r\n " ]
[ [ "numpy.where", "numpy.column_stack", "pandas.Series" ] ]
ybarancan/STSU
[ "0b9efa88739c517a7ca00e61faefa4b45714d312" ]
[ "src/utils/bezier.py" ]
[ "\r\n\r\n\r\n\r\nimport os\r\nimport glob\r\n\r\nimport numpy as np\r\nimport scipy.interpolate as si \r\nimport torch\r\n# from scipy.interpolate import UnivariateSpline\r\nimport logging\r\n# import pwlf\r\nfrom math import factorial\r\n\r\ndef comb(n, k):\r\n return factorial(n) // (factorial(k) * factorial(n - k))\r\n\r\ndef fit_bezier(points, n_control):\r\n n_points = len(points)\r\n A = np.zeros((n_points,n_control))\r\n \r\n t = np.arange(n_points)/(n_points-1)\r\n \r\n for i in range(n_points):\r\n for j in range(n_control):\r\n A[i,j] = comb(n_control - 1, j)*np.power(1-t[i],n_control - 1 - j)*np.power(t[i],j)\r\n \r\n conts = np.linalg.lstsq(A,points,rcond=None)\r\n return conts\r\ndef interpolate_bezier(conts, n_int=100): \r\n n_control = len(conts)\r\n A = np.zeros((n_int,n_control))\r\n \r\n t = np.arange(n_int)/(n_int-1)\r\n \r\n for i in range(n_int):\r\n for j in range(n_control):\r\n A[i,j] = comb(n_control - 1, j)*np.power(1-t[i],n_control - 1 - j)*np.power(t[i],j)\r\n \r\n res = np.dot(A,conts)\r\n return res\r\n\r\n\r\ndef bezier_matrix(n_control=5,n_int=100): \r\n \r\n A = np.zeros((n_int,n_control))\r\n \r\n t = np.arange(n_int)/(n_int-1)\r\n \r\n for i in range(n_int):\r\n for j in range(n_control):\r\n A[i,j] = comb(n_control - 1, j)*np.power(1-t[i],n_control - 1 - j)*np.power(t[i],j)\r\n \r\n \r\n A = torch.Tensor(A)\r\n \r\n A = torch.unsqueeze(A,dim=0)\r\n return A\r\n#\r\n#def gaussian_line_from_traj(points,size=(196,200)):\r\n# \r\n# var = 0.05\r\n# \r\n# \r\n# my_x = torch.linspace(0,1,size[1])\r\n# my_y = torch.linspace(0,1,size[0])\r\n# \r\n# \r\n## grid_y, grid_x = torch.meshgrid(my_y, my_x)\r\n# \r\n# grid_x = torch.unsqueeze(grid_x,dim=0).cuda()\r\n# grid_y = torch.unsqueeze(grid_y,dim=0).cuda()\r\n# \r\n# x_est = torch.unsqueeze(torch.unsqueeze(points[0],dim=-1),dim=-1)\r\n# y_est = torch.unsqueeze(torch.unsqueeze(points[1],dim=-1),dim=-1)\r\n# \r\n# gauss = torch.exp(-(torch.square(x_est - grid_x) + torch.square(y_est - grid_y))/var)\r\n# \r\n# \r\n# return gauss.sum(0)\r\n\r\ndef gaussian_line_from_traj(points,size=(196,200)):\r\n \r\n var = 0.01\r\n \r\n \r\n my_x = torch.linspace(0,1,size[1])\r\n my_y = torch.linspace(0,1,size[0])\r\n\r\n grid_x = torch.unsqueeze(torch.unsqueeze(my_x,dim=0),dim=0).cuda()\r\n grid_y = torch.unsqueeze(torch.unsqueeze(my_y,dim=0),dim=0).cuda()\r\n \r\n x_est = points[:,:,0:1]\r\n y_est = points[:,:,1:]\r\n \r\n x_part = torch.exp(-(torch.square(x_est - grid_x))/var)\r\n y_part = torch.exp(-(torch.square(y_est - grid_y))/var)\r\n \r\n gauss = torch.matmul(torch.transpose(y_part,1,2),x_part)\r\n \r\n \r\n# gauss = torch.exp(-(torch.square(x_est - grid_x) + torch.square(y_est - grid_y))/var)\r\n \r\n \r\n return torch.clamp(gauss,0,1)\r\n\r\n\r\ndef interpolate_bezier_torch(conts, n_int=100): \r\n n_control = len(conts)\r\n A = np.zeros((n_int,n_control))\r\n \r\n t = np.arange(n_int)/(n_int-1)\r\n \r\n for i in range(n_int):\r\n for j in range(n_control):\r\n A[i,j] = comb(n_control - 1, j)*np.power(1-t[i],n_control - 1 - j)*np.power(t[i],j)\r\n \r\n \r\n A = torch.Tensor(A).cuda()\r\n \r\n A = torch.unsqueeze(A,dim=0)\r\n A = A.expand(conts.size(0),-1,-1)\r\n \r\n \r\n \r\n \r\n res = torch.dot(A,conts)\r\n return res\r\n" ]
[ [ "numpy.dot", "numpy.zeros", "torch.square", "torch.linspace", "torch.clamp", "torch.unsqueeze", "numpy.linalg.lstsq", "numpy.arange", "numpy.power", "torch.transpose", "torch.Tensor", "torch.dot" ] ]
michahashkes/IML.HUJI
[ "e5e58da0ae8d13b540a9894f4df6dd2332a3f6da" ]
[ "IMLearn/utils/utils.py" ]
[ "from typing import Tuple\nimport numpy as np\nimport pandas as pd\n\n\ndef split_train_test(X: pd.DataFrame, y: pd.Series, train_proportion: float = .75) \\\n -> Tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]:\n \"\"\"\n Randomly split given sample to a training- and testing sample\n\n Parameters\n ----------\n X : DataFrame of shape (n_samples, n_features)\n Data frame of samples and feature values.\n\n y : Series of shape (n_samples, )\n Responses corresponding samples in data frame.\n\n train_proportion: Fraction of samples to be split as training set\n\n Returns\n -------\n train_X : DataFrame of shape (ceil(train_proportion * n_samples), n_features)\n Design matrix of train set\n\n train_y : Series of shape (ceil(train_proportion * n_samples), )\n Responses of training samples\n\n test_X : DataFrame of shape (floor((1-train_proportion) * n_samples), n_features)\n Design matrix of test set\n\n test_y : Series of shape (floor((1-train_proportion) * n_samples), )\n Responses of test samples\n\n \"\"\"\n chosen_index = np.sort(np.random.choice(X.shape[0], int(np.ceil(train_proportion * X.shape[0])), replace=False))\n train_X = X.iloc[chosen_index]\n train_y = y.iloc[chosen_index]\n non_chosen_index = np.setdiff1d(np.arange(X.shape[0]), chosen_index)\n test_X = X.iloc[non_chosen_index]\n test_y = y.iloc[non_chosen_index]\n return train_X, train_y, test_X, test_y\n\n\n\ndef confusion_matrix(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute a confusion matrix between two sets of integer vectors\n\n Parameters\n ----------\n a: ndarray of shape (n_samples,)\n First vector of integers\n\n b: ndarray of shape (n_samples,)\n Second vector of integers\n\n Returns\n -------\n confusion_matrix: ndarray of shape (a_unique_values, b_unique_values)\n A confusion matrix where the value of the i,j index shows the number of times value `i` was found in vector `a`\n while value `j` vas found in vector `b`\n \"\"\"\n raise NotImplementedError()\n" ]
[ [ "numpy.ceil", "numpy.arange" ] ]
MoraesCaio/ktrain
[ "3ffc3594daf36b5a02f7720d337d25173a29cfcf" ]
[ "ktrain/text/eda.py" ]
[ "from ..imports import *\nfrom .. import utils as U\nfrom . import textutils as TU\nfrom . import preprocessor as pp\nimport time\n\nclass TopicModel():\n\n\n def __init__(self,texts=None, n_topics=None, n_features=10000, \n min_df=5, max_df=0.5, stop_words='english',\n lda_max_iter=5, lda_mode='online', \n token_pattern=None, verbose=1):\n \"\"\"\n Fits a topic model to documents in <texts>.\n Example:\n tm = ktrain.text.get_topic_model(docs, n_topics=20, \n n_features=1000, min_df=2, max_df=0.95)\n Args:\n texts (list of str): list of texts\n n_topics (int): number of topics.\n If None, n_topics = min{400, sqrt[# documents/2]})\n n_features (int): maximum words to consider\n max_df (float): words in more than max_df proportion of docs discarded\n stop_words (str or list): either 'english' for built-in stop words or\n a list of stop words to ignore\n lda_max_iter (int): maximum iterations for 'lda'. 5 is default if using lda_mode='online'.\n If lda_mode='batch', this should be increased (e.g., 1500).\n Ignored if model_type != 'lda'\n lda_mode (str): one of {'online', 'batch'}. Ignored of model_type !='lda'\n token_pattern(str): regex pattern to use to tokenize documents. \n verbose(bool): verbosity\n\n \"\"\"\n self.verbose=verbose\n\n # estimate n_topics\n if n_topics is None:\n if texts is None:\n raise ValueError('If n_topics is None, texts must be supplied')\n estimated = max(1, int(math.floor(math.sqrt(len(texts) / 2))))\n n_topics = min(400, estimated)\n print('n_topics automatically set to %s' % (n_topics))\n\n # train model\n if texts is not None:\n (model, vectorizer) = self.train(texts, \n n_topics=n_topics, n_features=n_features,\n min_df = min_df, max_df = max_df, \n stop_words=stop_words,\n lda_max_iter=lda_max_iter, lda_mode=lda_mode,\n token_pattern=token_pattern)\n else:\n vectorizer = None\n model = None\n\n\n\n # save model and vectorizer and hyperparameter settings\n self.vectorizer = vectorizer\n self.model = model\n self.n_topics = n_topics\n self.n_features = n_features\n if verbose: print('done.')\n\n # these variables are set by self.build():\n self.topic_dict = None\n self.doc_topics = None\n self.bool_array = None\n\n self.scorer = None # set by self.train_scorer()\n self.recommender = None # set by self.train_recommender()\n return\n\n\n def train(self,texts, n_topics=None, n_features=10000, \n min_df=5, max_df=0.5, stop_words='english',\n lda_max_iter=5, lda_mode='online',\n token_pattern=None):\n \"\"\"\n Fits a topic model to documents in <texts>.\n Example:\n tm = ktrain.text.get_topic_model(docs, n_topics=20, \n n_features=1000, min_df=2, max_df=0.95)\n Args:\n texts (list of str): list of texts\n n_topics (int): number of topics.\n If None, n_topics = min{400, sqrt[# documents/2]})\n n_features (int): maximum words to consider\n max_df (float): words in more than max_df proportion of docs discarded\n stop_words (str or list): either 'english' for built-in stop words or\n a list of stop words to ignore\n lda_max_iter (int): maximum iterations for 'lda'. 5 is default if using lda_mode='online'.\n If lda_mode='batch', this should be increased (e.g., 1500).\n Ignored if model_type != 'lda'\n lda_mode (str): one of {'online', 'batch'}. Ignored of model_type !='lda'\n token_pattern(str): regex pattern to use to tokenize documents. \n If None, a default tokenizer will be used\n Returns:\n tuple: (model, vectorizer)\n \"\"\"\n\n # adjust defaults based on language detected\n if texts is not None:\n lang = pp.detect_lang(texts)\n if lang != 'en':\n stopwords = None if stop_words=='english' else stop_words\n token_pattern = r'(?u)\\b\\w+\\b' if token_pattern is None else token_pattern\n if pp.is_nospace_lang(lang):\n text_list = []\n for t in texts:\n text_list.append(' '.join(jieba.cut(t, HMM=False)))\n texts = text_list\n if self.verbose: print('lang: %s' % (lang))\n\n\n # preprocess texts\n if self.verbose: print('preprocessing texts...')\n if token_pattern is None: token_pattern = TU.DEFAULT_TOKEN_PATTERN\n #if token_pattern is None: token_pattern = r'(?u)\\b\\w\\w+\\b'\n vectorizer = CountVectorizer(max_df=max_df, min_df=min_df,\n max_features=n_features, stop_words=stop_words,\n token_pattern=token_pattern)\n \n\n x_train = vectorizer.fit_transform(texts)\n\n # fit model\n alpha = 5./n_topics\n beta = 0.01\n if self.verbose: print('fitting model...')\n model = LatentDirichletAllocation(n_components=n_topics, max_iter=lda_max_iter,\n learning_method=lda_mode, learning_offset=50.,\n doc_topic_prior=alpha, topic_word_prior=beta,\n verbose=self.verbose, random_state=0)\n model.fit(x_train)\n\n # save model and vectorizer and hyperparameter settings\n return (model, vectorizer)\n\n\n @property\n def topics(self):\n \"\"\"\n convenience method/property\n \"\"\"\n return self.get_topics()\n\n\n def get_topics(self, n_words=10, as_string=True):\n \"\"\"\n Returns a list of discovered topics\n Args:\n n_words(int): number of words to use in topic summary\n as_string(bool): If True, each summary is a space-delimited string instead of list of words\n \"\"\"\n self._check_model()\n feature_names = self.vectorizer.get_feature_names()\n topic_summaries = []\n for topic_idx, topic in enumerate(self.model.components_):\n summary = [feature_names[i] for i in topic.argsort()[:-n_words - 1:-1]]\n if as_string: summary = \" \".join(summary)\n topic_summaries.append(summary)\n return topic_summaries\n\n\n def print_topics(self, n_words=10, show_counts=False):\n \"\"\"\n print topics\n \"\"\"\n topics = self.get_topics(n_words=n_words, as_string=True)\n if show_counts:\n self._check_build()\n topic_counts = sorted([ (k, topics[k], len(v)) for k,v in self.topic_dict.items()], \n key=lambda kv:kv[-1], reverse=True)\n for (idx, topic, count) in topic_counts:\n print(\"topic:%s | count:%s | %s\" %(idx, count, topic))\n else:\n for i, t in enumerate(topics):\n print('topic %s | %s' % (i, t))\n return\n\n\n def build(self, texts, threshold=None):\n \"\"\"\n Builds the document-topic distribution showing the topic probability distirbution\n for each document in <texts> with respect to the learned topic space.\n Args:\n texts (list of str): list of text documents\n threshold (float): If not None, documents with whose highest topic probability\n is less than threshold are filtered out.\n \"\"\"\n doc_topics, bool_array = self.predict(texts, threshold=0.25)\n self.doc_topics = doc_topics\n self.bool_array = bool_array\n\n texts = [text for i, text in enumerate(texts) if bool_array[i]]\n self.topic_dict = self._rank_documents(texts, doc_topics=doc_topics)\n return\n\n\n def filter(self, lst):\n \"\"\"\n The build method may prune documents based on threshold.\n This method prunes other lists based on how build pruned documents.\n This is useful to filter lists containing metadata associated with documents\n for use with visualize_documents.\n Args:\n lst(list): a list of data\n Returns:\n list: a filtered list of data based on how build filtered the documents\n \"\"\"\n if len(lst) != self.bool_array.shape[0]:\n raise ValueError('Length of lst is not consistent with the number of documents ' +\n 'supplied to get_topic_model')\n arr = np.array(lst)\n return list(arr[self.bool_array])\n \n\n \n def get_docs(self, topic_ids=[], doc_ids=[], rank=False):\n \"\"\"\n Returns document entries for supplied topic_ids\n Args:\n topic_ids(list of ints): list of topid IDs where each id is in the range\n of range(self.n_topics).\n doc_ids (list of ints): list of document IDs where each id is an index\n into self.doctopics\n rank(bool): If True, the list is sorted first by topic_id (ascending)\n and then ty topic probability (descending).\n Otherwise, list is sorted by doc_id (i.e., the order\n of texts supplied to self.build (which is the order of self.doc_topics).\n\n Returns:\n list of tuples: list of tuples where each tuple is of the form \n (text, doc_id, topic_probability, topic_id).\n \n \"\"\"\n self._check_build()\n if not topic_ids:\n topic_ids = list(range(self.n_topics))\n result_texts = []\n for topic_id in topic_ids:\n if topic_id not in self.topic_dict: continue\n texts = [tup + (topic_id,) for tup in self.topic_dict[topic_id] \n if not doc_ids or tup[1] in doc_ids]\n result_texts.extend(texts)\n if not rank:\n result_texts = sorted(result_texts, key=lambda x:x[1])\n return result_texts\n\n\n def get_doctopics(self, topic_ids=[], doc_ids=[]):\n \"\"\"\n Returns a topic probability distribution for documents\n with primary topic that is one of <topic_ids>\n Args:\n topic_ids(list of ints): list of topid IDs where each id is in the range\n of range(self.n_topics).\n doc_ids (list of ints): list of document IDs where each id is an index\n into self.doctopics\n Returns:\n np.ndarray: Each row is the topic probability distribution of a document.\n Array is sorted in the order returned by self.get_docs.\n \n \"\"\"\n docs = self.get_docs(topic_ids=topic_ids, doc_ids=doc_ids)\n return np.array([self.doc_topics[idx] for idx in [x[1] for x in docs]])\n\n\n def get_texts(self, topic_ids=[]):\n \"\"\"\n Returns texts for documents\n with primary topic that is one of <topic_ids>\n Args:\n topic_ids(list of ints): list of topic IDs\n Returns:\n list of str\n \"\"\"\n if not topic_ids: topic_ids = list(range(self.n_topics))\n docs = self.get_docs(topic_ids)\n return [x[0] for x in docs]\n\n\n\n def predict(self, texts, threshold=None, harden=False):\n \"\"\"\n Args:\n texts (list of str): list of texts\n threshold (float): If not None, documents with maximum topic scores\n less than <threshold> are filtered out\n harden(bool): If True, each document is assigned to a single topic for which\n it has the highest score\n Returns:\n if threshold is None:\n np.ndarray: topic distribution for each text document\n else:\n (np.ndarray, np.ndarray): topic distribution and boolean array\n \"\"\"\n self._check_model()\n transformed_texts = self.vectorizer.transform(texts)\n X_topics = self.model.transform(transformed_texts)\n #if self.model_type == 'nmf':\n #scores = np.matrix(X_topics)\n #scores_normalized= scores/scores.sum(axis=1)\n #X_topics = scores_normalized\n _idx = np.array([True] * len(texts))\n if threshold is not None:\n _idx = np.amax(X_topics, axis=1) > threshold # idx of doc that above the threshold\n _idx = np.array(_idx)\n X_topics = X_topics[_idx]\n if harden: X_topics = self._harden_topics(X_topics)\n if threshold is not None:\n return (X_topics, _idx)\n else:\n return X_topics\n\n\n def visualize_documents(self, texts=None, doc_topics=None, \n width=700, height=700, point_size=5, title='Document Visualization',\n extra_info={},\n colors=None,\n filepath=None,):\n \"\"\"\n Generates a visualization of a set of documents based on model.\n If <texts> is supplied, raw documents will be first transformed into document-topic\n matrix. If <doc_topics> is supplied, then this will be used for visualization instead.\n Args:\n texts(list of str): list of document texts. Mutually-exclusive with <doc_topics>\n doc_topics(ndarray): pre-computed topic distribution for each document in texts.\n Mutually-exclusive with <texts>.\n width(int): width of image\n height(int): height of image\n point_size(int): size of circles in plot\n title(str): title of visualization\n extra_info(dict of lists): A user-supplied information for each datapoint (attributes of the datapoint).\n The keys are field names. The values are lists - each of which must\n be the same number of elements as <texts> or <doc_topics>. These fields are displayed\n when hovering over datapoints in the visualization.\n colors(list of str): list of Hex color codes for each datapoint.\n Length of list must match either len(texts) or doc_topics.shape[0]\n filepath(str): Optional filepath to save the interactive visualization\n \"\"\"\n\n # error-checking\n if texts is not None: length = len(texts)\n else: length = doc_topics.shape[0]\n if colors is not None and len(colors) != length:\n raise ValueError('length of colors is not consistent with length of texts or doctopics')\n if texts is not None and doc_topics is not None:\n raise ValueError('texts is mutually-exclusive with doc_topics')\n if texts is None and doc_topics is None:\n raise ValueError('One of texts or doc_topics is required.')\n if extra_info:\n invalid_keys = ['x', 'y', 'topic', 'fill_color']\n for k in extra_info.keys():\n if k in invalid_keys:\n raise ValueError('cannot use \"%s\" as key in extra_info' %(k))\n lst = extra_info[k]\n if len(lst) != length:\n raise ValueError('texts and extra_info lists must be same size')\n\n # check fo bokeh\n try:\n import bokeh.plotting as bp\n from bokeh.plotting import save\n from bokeh.models import HoverTool\n from bokeh.io import output_notebook\n except:\n warnings.warn('visualize_documents method requires bokeh package: pip3 install bokeh')\n return\n\n # prepare data\n if doc_topics is not None:\n X_topics = doc_topics\n else:\n if self.verbose: print('transforming texts...', end='')\n X_topics = self.predict(texts, harden=False)\n if self.verbose: print('done.')\n\n # reduce to 2-D\n if self.verbose: print('reducing to 2 dimensions...', end='')\n tsne_model = TSNE(n_components=2, verbose=self.verbose, random_state=0, angle=.99, init='pca')\n tsne_lda = tsne_model.fit_transform(X_topics)\n print('done.')\n\n # get random colormap\n colormap = U.get_random_colors(self.n_topics)\n\n # generate inline visualization in Jupyter notebook\n lda_keys = self._harden_topics(X_topics)\n if colors is None: colors = colormap[lda_keys]\n topic_summaries = self.get_topics(n_words=5)\n os.environ[\"BOKEH_RESOURCES\"]=\"inline\"\n output_notebook()\n dct = { \n 'x':tsne_lda[:,0],\n 'y':tsne_lda[:, 1],\n 'topic':[topic_summaries[tid] for tid in lda_keys],\n 'fill_color':colors,}\n tool_tups = [('index', '$index'),\n ('(x,y)','($x,$y)'),\n ('topic', '@topic')]\n for k in extra_info.keys():\n dct[k] = extra_info[k]\n tool_tups.append((k, '@'+k))\n\n source = bp.ColumnDataSource(data=dct)\n hover = HoverTool( tooltips=tool_tups)\n p = bp.figure(plot_width=width, plot_height=height, \n tools=[hover, 'previewsave', 'pan', 'wheel_zoom', 'box_zoom', 'reset'],\n #tools=\"pan,wheel_zoom,box_zoom,reset,hover,previewsave\",\n title=title)\n #plot_lda = bp.figure(plot_width=1400, plot_height=1100,\n\t\t\t #title=title,\n\t\t\t #tools=\"pan,wheel_zoom,box_zoom,reset,hover,previewsave\",\n\t\t\t #x_axis_type=None, y_axis_type=None, min_border=1)\n p.circle('x', 'y', size=point_size, source=source, fill_color= 'fill_color')\n bp.show(p)\n if filepath is not None:\n bp.output_file('/tmp/t.html')\n bp.save(p)\n return\n\n\n def train_recommender(self):\n \"\"\"\n Trains a recommender that, given a single document, will return\n documents in the corpus that are semantically similar to it.\n\n Args:\n n_neighbors (int): \n Returns:\n None\n \"\"\"\n from sklearn.neighbors import NearestNeighbors\n rec = NearestNeighbors(n_neighbors=20)\n probs = self.get_doctopics()\n rec.fit(probs)\n self.recommender = rec\n return\n\n\n\n def recommend(self, text=None, doc_topic=None, n=5):\n \"\"\"\n Given an example document, recommends documents similar to it\n from the set of documents supplied to build().\n \n Args:\n texts(list of str): list of document texts. Mutually-exclusive with <doc_topics>\n doc_topics(ndarray): pre-computed topic distribution for each document in texts.\n Mutually-exclusive with <texts>.\n n (int): number of recommendations to return\n Returns:\n list of tuples: each tuple is of the form:\n (text, doc_id, topic_probability, topic_id)\n\n \"\"\"\n n_neighbors = 100 \n\n # error-checks\n if text is not None and doc_topic is not None:\n raise ValueError('text is mutually-exclusive with doc_topic')\n if text is None and doc_topic is None:\n raise ValueError('One of text or doc_topic is required.')\n if text is not None and type(text) not in [str]:\n raise ValueError('text must be a str ')\n if doc_topic is not None and type(doc_topic) not in [np.ndarray]:\n raise ValueError('doc_topic must be a np.ndarray')\n\n if n > n_neighbors: n_neighbors = n\n\n x_test = [doc_topic]\n if text:\n x_test = self.predict([text])\n docs = self.get_docs()\n indices = self.recommender.kneighbors(x_test, return_distance=False, n_neighbors=n_neighbors)\n results = [doc for i, doc in enumerate(docs) if i in indices]\n return results[:n]\n\n\n\n\n def train_scorer(self, topic_ids=[], doc_ids=[]):\n \"\"\"\n Trains a scorer that can score documents based on similarity to a\n seed set of documents represented by topic_ids and doc_ids.\n\n Args:\n topic_ids(list of ints): list of topid IDs where each id is in the range\n of range(self.n_topics). Documents associated\n with these topic_ids will be used as seed set.\n doc_ids (list of ints): list of document IDs where each id is an index\n into self.doctopics. Documents associated \n with these doc_ids will be used as seed set.\n Returns:\n None\n \"\"\"\n from sklearn.neighbors import LocalOutlierFactor\n clf = LocalOutlierFactor(n_neighbors=20, novelty=True, contamination=0.1)\n probs = self.get_doctopics(topic_ids=topic_ids, doc_ids=doc_ids)\n clf.fit(probs)\n self.scorer = clf\n return\n\n\n\n def score(self, texts=None, doc_topics=None):\n \"\"\"\n Given a new set of documents (supplied as texts or doc_topics), the score method\n uses a One-Class classifier to score documents based on similarity to a\n seed set of documents (where seed set is computed by train_scorer() method).\n\n Higher scores indicate a higher degree of similarity.\n Positive values represent a binary decision of similar.\n Negative values represent a binary decision of dissimlar.\n In practice, negative scores closer to zer will also be simlar as One-Class\n classifiers are more strict than traditional binary classifiers.\n Documents with negative scores closer to zero are good candidates for\n inclusion in a training set for binary classification (e.g., active labeling).\n \n Args:\n texts(list of str): list of document texts. Mutually-exclusive with <doc_topics>\n doc_topics(ndarray): pre-computed topic distribution for each document in texts.\n Mutually-exclusive with <texts>.\n Returns:\n list of floats: larger values indicate higher degree of similarity\n positive values indicate a binary decision of similar\n negative values indicate binary decision of dissimilar\n In practice, negative scores closer to zero will also \n be similar as One-class classifiers are more strict\n than traditional binary classifiers.\n\n \"\"\"\n # error-checks\n if texts is not None and doc_topics is not None:\n raise ValueError('texts is mutually-exclusive with doc_topics')\n if texts is None and doc_topics is None:\n raise ValueError('One of texts or doc_topics is required.')\n if texts is not None and type(texts) not in [list, np.ndarray]:\n raise ValueError('texts must be either a list or numpy ndarray')\n if doc_topics is not None and type(doc_topics) not in [np.ndarray]:\n raise ValueError('doc_topics must be a np.ndarray')\n\n x_test = doc_topics\n if texts:\n x_test = self.predict(texts)\n return self.scorer.decision_function(x_test)\n\n\n def search(self, query, topic_ids=[], doc_ids=[], case_sensitive=False):\n \"\"\"\n search documents for query string.\n Args:\n query(str): the word or phrase to search\n topic_ids(list of ints): list of topid IDs where each id is in the range\n of range(self.n_topics).\n doc_ids (list of ints): list of document IDs where each id is an index\n into self.doctopics\n case_sensitive(bool): If True, case sensitive search\n \"\"\"\n\n # setup pattern\n if not case_sensitive: query = query.lower()\n pattern = re.compile(r'\\b%s\\b' % query)\n\n # retrive docs\n docs = self.get_docs(topic_ids=topic_ids, doc_ids=doc_ids)\n\n # search\n mb = master_bar(range(1))\n results = []\n for i in mb:\n for doc in progress_bar(docs, parent=mb):\n text = doc[0]\n if not case_sensitive: text = text.lower()\n matches = pattern.findall(text)\n if matches: results.append(doc)\n if self.verbose: mb.write('done.')\n return results\n\n\n\n def _rank_documents(self, \n texts,\n doc_topics=None):\n \"\"\"\n Rank documents by topic score.\n If topic_index is supplied, rank documents based on relevance to supplied topic.\n Otherwise, rank all texts by their highest topic score (for any topic).\n Args:\n texts(list of str): list of document texts.\n doc_topics(ndarray): pre-computed topic distribution for each document\n If None, re-computed from texts.\n \n Returns:\n dict of lists: each element in list is a tuple of (doc_index, topic_index, score)\n ... where doc_index is an index into either texts \n \"\"\"\n if doc_topics is not None:\n X_topics = doc_topics\n else:\n if self.verbose: print('transforming texts to topic space...')\n X_topics = self.predict(texts)\n topics = np.argmax(X_topics, axis=1)\n scores = np.amax(X_topics, axis=1)\n doc_ids = np.array([i for i, x in enumerate(texts)])\n result = list(zip(texts, doc_ids, topics, scores))\n if self.verbose: print('done.')\n result = sorted(result, key=lambda x: x[-1], reverse=True)\n result_dict = {}\n for r in result:\n text = r[0]\n doc_id = r[1]\n topic_id = r[2]\n score = r[3]\n lst = result_dict.get(topic_id, [])\n lst.append((text, doc_id, score))\n result_dict[topic_id] = lst\n return result_dict\n\n\n def _harden_topics(self, X_topics):\n \"\"\"\n Transforms soft-clustering to hard-clustering\n \"\"\"\n max_topics = []\n for i in range(X_topics.shape[0]):\n max_topics.append(X_topics[i].argmax())\n X_topics = np.array(max_topics)\n return X_topics\n\n\n def _check_build(self):\n self._check_model()\n if self.topic_dict is None: \n raise Exception('Must call build() method.')\n\n def _check_scorer(self):\n if self.scorer is None:\n raise Exception('Must call train_scorer()')\n\n def _check_recommender(self):\n if self.recommender is None:\n raise Exception('Must call train_recommender()')\n\n\n def _check_model(self):\n if self.model is None or self.vectorizer is None:\n raise Exception('Must call train()')\n\n\n\n def save(self, fname):\n \"\"\"\n save TopicModel object\n \"\"\"\n\n \n with open(fname+'.tm_vect', 'wb') as f:\n pickle.dump(self.vectorizer, f)\n with open(fname+'.tm_model', 'wb') as f:\n pickle.dump(self.model, f)\n params = {'n_topics': self.n_topics,\n 'n_features': self.n_features,\n 'verbose': self.verbose}\n with open(fname+'.tm_params', 'wb') as f:\n pickle.dump(params, f)\n\n return\n\nget_topic_model = TopicModel \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "sklearn.neighbors.NearestNeighbors", "sklearn.neighbors.LocalOutlierFactor" ] ]
HienDT27/annotation_utils
[ "1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9" ]
[ "build/lib/annotation_utils/dataset/dataset_prep.py" ]
[ "import sys\nimport math\nimport pandas as pd\nfrom tqdm import tqdm\nfrom typing import cast, List, Dict\nfrom common_utils.path_utils import get_dirnames_in_dir\nfrom common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir, \\\n get_dir_contents_len, dir_exists, file_exists\nfrom ..coco.structs import COCO_Dataset\nfrom .config import DatasetConfig, DatasetConfigCollection, \\\n DatasetConfigCollectionHandler\n\ndef prepare_datasets_from_dir(\n scenario_root_dir: str, dst_root_dir: str, annotation_filename: str='output.json', skip_existing: bool=False,\n val_target_proportion: float=0.05, min_val_size: int=None, max_val_size: int=None,\n orig_config_save: str='orig.yaml', reorganized_config_save: str='dataset_config.yaml', show_pbar: bool=True\n):\n \"\"\"\n Parameters:\n scenario_root_dir - Path to the source root directory containing all of your scenario folders. [Required]\n dst_root_dir - Path to where you would like to save your prepared scenario datasets (split into train and val) [Required]\n annotation_filename - The filename of every annotation file under scenario_root_dir [Default: 'output.json']\n skip_existing - If you terminated dataset preparation midway, you can skip the scenarios that were already made using skip_existing=True. [Default: False]\n val_target_proportion - The proportion of your scenario that you would like to allocate to validation. [Default: 0.05]\n min_val_size - The minimum number of images that you would like to use for validation. [Default: None]\n max_val_size - The maximum number of images that you would like to use for validation. [Default: None]\n orig_config_save - Where you would like to save the dataset configuration representing your scenario_root_dir. [Default: 'orig.yaml]\n reorganized_config_save - Where you would like to save the dataset configuration representing your dst_root_dir. [Default: 'dataset_config.yaml']\n show_pbar - Whether or not you would like to show a progress bar during preparation. [Default: True]\n\n Description:\n The datasets under each scenario directory will be combined and then split into a train + validation folder.\n The source root directory should have the following structure:\n scenario_root_dir\n scenario0\n scenario0_part0\n scenario0_part1\n scenario0_part2\n ...\n scenario1\n scenario1_part0\n scenario1_part1\n scenario1_part2\n ...\n scenario2\n scenario2_part0\n scenario2_part1\n scenario2_part2\n ...\n ...\n Note that there is no restriction on directory names, so the directory names should be anything.\n This method reads a fixed directory structure regardless of the directory names.\n Also note that it is necessary for the coco annotation file saved in each scenario part directory to have the same filename.\n If you need a more flexible approach for preparing your datasets, please define where your datasets are located in an excel sheet\n and use prepare_datasets_from_excel instead.\n\n The destination root directory will have the following structure:\n dst_root_dir\n scenario0\n train\n val\n scenario1\n train\n val\n scenario2\n train\n val\n ...\n \n The dataset configuration file saved at reorganized_config_save will reflect the directory structure of dst_root_dir.\n The configuration file representing the directory structure of the scenario_root_dir is saved under orig_config_save.\n\n Note that orig_config_save and reorganized_config_save do not have to be inside of dst_root_dir.\n On the contrary, it is recommended to not save orig_config_save and reorganized_config_save inside of dst_root_dir.\n It is recommended that you change the path of orig_config_save and reorganized_config_save everytime you make an addition to your datasets.\n This is because you will likely want to keep track of the previous states of your dataset configuration, and you\n may also want to rollback to a previous configuration at any given time.\n \"\"\"\n make_dir_if_not_exists(dst_root_dir)\n if get_dir_contents_len(dst_root_dir) > 0 and not skip_existing:\n print(f'Directory {dst_root_dir} is not empty.\\nAre you sure you want to delete the contents?')\n answer = input('yes/no')\n if answer.lower() in ['yes', 'y']:\n delete_all_files_in_dir(dst_root_dir)\n elif answer.lower() in ['no', 'n']:\n print(f'Terminating program.')\n sys.exit()\n else:\n raise ValueError(f'Invalid answer: {answer}')\n\n # Gather datasets from source root directory and combine.\n scenario_names = get_dirnames_in_dir(scenario_root_dir)\n scenario_datasets = cast(List[COCO_Dataset], [])\n orig_collection_handler = DatasetConfigCollectionHandler()\n pbar = tqdm(total=len(scenario_names), unit='scenario(s)') if show_pbar else None\n if pbar is not None:\n pbar.set_description('Gathering Scenarios')\n for scenario_name in scenario_names:\n orig_scenario_collection = DatasetConfigCollection(tag=scenario_name)\n src_scenario_dir = f'{scenario_root_dir}/{scenario_name}'\n part_names = get_dirnames_in_dir(src_scenario_dir)\n part_datasets = cast(List[COCO_Dataset], [])\n part_dataset_dirs = cast(List[str], [])\n for part_name in part_names:\n src_part_dir = f'{src_scenario_dir}/{part_name}'\n src_part_ann_path = f'{src_part_dir}/{annotation_filename}'\n part_dataset = COCO_Dataset.load_from_path(\n json_path=src_part_ann_path,\n img_dir=src_part_dir\n )\n part_datasets.append(part_dataset)\n part_dataset_dirs.append(src_part_dir)\n orig_scenario_part_config = DatasetConfig(\n img_dir=src_part_dir,\n ann_path=src_part_ann_path,\n ann_format='coco',\n tag=part_name\n )\n orig_scenario_collection.append(orig_scenario_part_config)\n scenario_dataset = COCO_Dataset.combine(dataset_list=part_datasets, img_dir_list=part_dataset_dirs, show_pbar=False)\n scenario_datasets.append(scenario_dataset)\n orig_collection_handler.append(orig_scenario_collection)\n if pbar is not None:\n pbar.update()\n orig_collection_handler.save_to_path(orig_config_save, overwrite=True)\n pbar.close()\n \n # Split combined scenario datasets into train and val and save them.\n train_collection = DatasetConfigCollection(tag='train')\n val_collection = DatasetConfigCollection(tag='val')\n pbar = tqdm(total=len(scenario_names)) if show_pbar else None\n if pbar is not None:\n pbar.set_description('Splitting Scenarios Into Train/Val')\n for i in range(len(scenario_names)):\n dst_scenario_dir = f'{dst_root_dir}/{scenario_names[i]}'\n if dir_exists(dst_scenario_dir):\n if skip_existing:\n if pbar is not None:\n pbar.update()\n continue\n else:\n raise FileExistsError(f'Directory already exists: {dst_scenario_dir}')\n else:\n make_dir_if_not_exists(dst_scenario_dir)\n orig_num_images = len(scenario_datasets[i].images)\n assert orig_num_images >= 2, f'{scenario_names[i]} has only {orig_num_images} images, and thus cannot be split into train and val.'\n num_val = int(len(scenario_datasets[i].images) * val_target_proportion)\n num_val = 1 if num_val == 0 else num_val\n num_val = min_val_size if min_val_size is not None and num_val < min_val_size else num_val\n num_val = max_val_size if max_val_size is not None and num_val > max_val_size else num_val\n num_train = orig_num_images - num_val\n train_dataset, val_dataset = scenario_datasets[i].split_into_parts(ratio=[num_train, num_val], shuffle=True)\n \n dst_train_dir = f'{dst_scenario_dir}/train'\n make_dir_if_not_exists(dst_train_dir)\n train_dataset.move_images(\n dst_img_dir=dst_train_dir,\n preserve_filenames=False,\n update_img_paths=True,\n show_pbar=False\n )\n train_ann_path = f'{dst_train_dir}/output.json'\n train_dataset.save_to_path(train_ann_path, overwrite=True)\n train_dataset_config = DatasetConfig(img_dir=dst_train_dir, ann_path=train_ann_path, ann_format='coco', tag=f'{scenario_names[i]}_train')\n train_collection.append(train_dataset_config)\n\n dst_val_dir = f'{dst_scenario_dir}/val'\n make_dir_if_not_exists(dst_val_dir)\n val_dataset.move_images(\n dst_img_dir=dst_val_dir,\n preserve_filenames=False,\n update_img_paths=True,\n show_pbar=False\n )\n val_ann_path = f'{dst_val_dir}/output.json'\n val_dataset.save_to_path(val_ann_path, overwrite=True)\n val_dataset_config = DatasetConfig(img_dir=dst_val_dir, ann_path=val_ann_path, ann_format='coco', tag=f'{scenario_names[i]}_val')\n val_collection.append(val_dataset_config)\n if pbar is not None:\n pbar.update()\n pbar.close()\n collection_handler = DatasetConfigCollectionHandler([train_collection, val_collection])\n collection_handler.save_to_path(reorganized_config_save, overwrite=True)\n\ndef prepare_datasets_from_excel(\n xlsx_path: str, dst_root_dir: str,\n usecols: str='A:L', skiprows: int=None, skipfooter: int=0,\n skip_existing: bool=False,\n val_target_proportion: float=0.05, min_val_size: int=None, max_val_size: int=None,\n orig_config_save: str='orig.yaml', reorganized_config_save: str='dataset_config.yaml',\n show_pbar: bool=True\n):\n \"\"\"\n Parameters:\n xlsx_path - Path to excel sheet that contains all of the information about where your datasets are located.\n dst_root_dir - Path to where you would like to save your prepared scenario datasets (split into train and val)\n usecols - Specify which columns you would like to parse from the excel sheet at xlsx_path. [Default: 'A:L']\n skiprows - Specify the number of rows from the top that you would like to skip when parsing the excel sheet. [Default: None]\n skipfooter - Specify the number of rows from the bottom that you would like to skip when parsing the excel sheet. [Default: 0]\n skip_existing - If you terminated dataset preparation midway, you can skip the scenarios that were already made using skip_existing=True. [Default: False]\n val_target_proportion - The proportion of your scenario that you would like to allocate to validation. [Default: 0.05]\n min_val_size - The minimum number of images that you would like to use for validation. [Default: None]\n max_val_size - The maximum number of images that you would like to use for validation. [Default: None]\n orig_config_save - Where you would like to save the dataset configuration representing your scenario_root_dir. [Default: 'orig.yaml]\n reorganized_config_save - Where you would like to save the dataset configuration representing your dst_root_dir. [Default: 'dataset_config.yaml']\n show_pbar - Whether or not you would like to show a progress bar during preparation. [Default: True]\n \n Description:\n The datasets specified in the excel sheet at xlsx_path will be combined and then split into a train + validation folder.\n Since the absolute paths of both image directories and annotation paths are parsed from the excel sheet, there is no need to place any restrictions\n on where each dataset needs to be located.\n\n The destination root directory will have the following structure:\n dst_root_dir\n scenario0\n train\n val\n scenario1\n train\n val\n scenario2\n train\n val\n ...\n\n The dataset configuration file saved at reorganized_config_save will reflect the directory structure of dst_root_dir.\n The configuration file representing the directory structure defined in your excel sheet is saved under orig_config_save.\n\n Note that orig_config_save and reorganized_config_save do not have to be inside of dst_root_dir.\n On the contrary, it is recommended to not save orig_config_save and reorganized_config_save inside of dst_root_dir.\n It is recommended that you change the path of orig_config_save and reorganized_config_save everytime you make an addition to your datasets.\n This is because you will likely want to keep track of the previous states of your dataset configuration, and you\n may also want to rollback to a previous configuration at any given time.\n \"\"\"\n # Parse Excel Sheet\n if not file_exists(xlsx_path):\n raise FileNotFoundError(f'File not found: {xlsx_path}')\n data_df = pd.read_excel(xlsx_path, usecols=usecols, skiprows=skiprows, skipfooter=skipfooter)\n data_records = data_df.to_dict(orient='records')\n\n required_keys = [\n 'Scenario Name', 'Dataset Name', 'Image Directory', 'Annotation Path'\n ]\n parsed_keys = list(data_records[0].keys())\n missing_keys = []\n for required_key in required_keys:\n if required_key not in parsed_keys:\n missing_keys.append(required_key)\n if len(missing_keys) > 0:\n raise KeyError(\n f\"\"\"\n Couldn't find the following required keys in the given excel sheet:\n missing_keys: {missing_keys}\n required_keys: {required_keys}\n parsed_keys: {parsed_keys}\n xlsx_path: {xlsx_path}\n\n Please check your excel sheet and script parameters and try again.\n Note: usecols, skiprows, and skipfooter affect which parts of the excel sheet are parsed.\n \"\"\"\n )\n\n def is_empty_cell(info_dict: Dict[str, str], key: str, expected_type: type=str) -> bool:\n return not isinstance(info_dict[key], expected_type) and math.isnan(info_dict[key])\n\n collection_handler = DatasetConfigCollectionHandler()\n current_scenario_name = None\n working_config_list = cast(List[DatasetConfig], [])\n pbar = tqdm(total=len(data_records), unit='item(s)') if show_pbar else None\n if pbar is not None:\n pbar.set_description('Parsing Excel Sheet')\n for info_dict in data_records:\n for required_cell_key in ['Dataset Name', 'Image Directory', 'Annotation Path']:\n if is_empty_cell(info_dict, key=required_cell_key, expected_type=str):\n raise ValueError(\n f\"\"\"\n Encountered empty cell under {required_cell_key}.\n Row Dictionary: {info_dict}\n xlsx_path: {xlsx_path}\n Please check your excel sheet.\n \"\"\"\n )\n assert 'Scenario Name' in info_dict\n scenario_name = info_dict['Scenario Name'] \\\n if 'Scenario Name' in info_dict and not is_empty_cell(info_dict, key='Scenario Name', expected_type=str) \\\n else None\n dataset_name = info_dict['Dataset Name']\n img_dir = info_dict['Image Directory']\n ann_path = info_dict['Annotation Path']\n if scenario_name is not None:\n if len(working_config_list) > 0:\n collection = DatasetConfigCollection(working_config_list, tag=current_scenario_name)\n collection_handler.append(collection)\n working_config_list = []\n current_scenario_name = scenario_name\n config = DatasetConfig(img_dir=img_dir, ann_path=ann_path, ann_format='coco', tag=dataset_name)\n working_config_list.append(config)\n if pbar is not None:\n pbar.update()\n if len(working_config_list) > 0:\n collection = DatasetConfigCollection(working_config_list, tag=current_scenario_name)\n collection_handler.append(collection)\n working_config_list = []\n if pbar is not None:\n pbar.close()\n collection_handler.save_to_path(orig_config_save, overwrite=True)\n\n # Combine Datasets\n train_collection = DatasetConfigCollection(tag='train')\n val_collection = DatasetConfigCollection(tag='val')\n\n make_dir_if_not_exists(dst_root_dir)\n pbar = tqdm(total=len(collection_handler), unit='scenario(s)') if show_pbar else None\n if pbar is not None:\n pbar.set_description('Combining Scenarios')\n for collection in collection_handler:\n scenario_root_dir = f'{dst_root_dir}/{collection.tag}'\n make_dir_if_not_exists(scenario_root_dir)\n scenario_train_dir = f'{scenario_root_dir}/train'\n make_dir_if_not_exists(scenario_train_dir)\n scenario_val_dir = f'{scenario_root_dir}/val'\n make_dir_if_not_exists(scenario_val_dir)\n\n if (not file_exists(f'{scenario_train_dir}/output.json') or not file_exists(f'{scenario_val_dir}/output.json')) or not skip_existing:\n combined_dataset = COCO_Dataset.combine_from_config(collection, img_sort_attr_name='file_name', show_pbar=False)\n orig_num_images = len(combined_dataset.images)\n assert orig_num_images >= 2, f'{collection.tag} has only {orig_num_images} images, and thus cannot be split into train and val.'\n num_val = int(len(combined_dataset.images) * val_target_proportion)\n num_val = 1 if num_val == 0 else num_val\n num_val = min_val_size if min_val_size is not None and num_val < min_val_size else num_val\n num_val = max_val_size if max_val_size is not None and num_val > max_val_size else num_val\n num_train = orig_num_images - num_val\n train_dataset, val_dataset = combined_dataset.split_into_parts(ratio=[num_train, num_val], shuffle=True)\n\n train_dataset.move_images(\n dst_img_dir=scenario_train_dir,\n preserve_filenames=False,\n update_img_paths=True,\n overwrite=True,\n show_pbar=False\n )\n train_dataset.save_to_path(f'{scenario_train_dir}/output.json', overwrite=True)\n train_collection.append(DatasetConfig(img_dir=scenario_train_dir, ann_path=f'{scenario_train_dir}/output.json', tag=f'{collection.tag}_train'))\n\n val_dataset.move_images(\n dst_img_dir=scenario_val_dir,\n preserve_filenames=False,\n update_img_paths=True,\n overwrite=True,\n show_pbar=False\n )\n val_dataset.save_to_path(f'{scenario_val_dir}/output.json', overwrite=True)\n val_collection.append(DatasetConfig(img_dir=scenario_val_dir, ann_path=f'{scenario_val_dir}/output.json', tag=f'{collection.tag}_val'))\n else:\n train_dataset = COCO_Dataset.load_from_path(f'{scenario_train_dir}/output.json', img_dir=f'{scenario_train_dir}')\n train_collection.append(DatasetConfig(img_dir=scenario_train_dir, ann_path=f'{scenario_train_dir}/output.json', tag=f'{collection.tag}_train'))\n val_dataset = COCO_Dataset.load_from_path(f'{scenario_val_dir}/output.json', img_dir=f'{scenario_val_dir}')\n val_collection.append(DatasetConfig(img_dir=scenario_val_dir, ann_path=f'{scenario_val_dir}/output.json', tag=f'{collection.tag}_val'))\n if pbar is not None:\n pbar.update()\n if pbar is not None:\n pbar.close()\n\n organized_collection_handler = DatasetConfigCollectionHandler([train_collection, val_collection])\n organized_collection_handler.save_to_path(reorganized_config_save, overwrite=True)" ]
[ [ "pandas.read_excel" ] ]
guillefix/captionRL-env
[ "7129bf01cfe069a1bd5d38e171b4a5f74a5701ed" ]
[ "src/envs/color_generation.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom gym.spaces import Box\n\nn_colors = 10\ndef plot_colors(color):\n \"\"\"\n Plots a sample of colors from the color x shade color class.\n\n Parameters\n ----------\n color: str\n Color in red, blue, green.\n shade: str\n Shade in light, dark.\n\n \"\"\"\n color_class = Color(color)\n array = np.zeros([n_colors, n_colors, 3])\n for i in range(n_colors):\n for j in range(n_colors):\n array[i, j, :] = color_class.sample()\n plt.figure()\n plt.imshow(array)\n\nmax_min = 0.8\nmin_max = 0.2\nclass Color:\n def __init__(self, color):\n \"\"\"\n Implements a color class characterized by a color and shade attributes.\n Parameters\n ----------\n color: str\n Color in red, blue, green.\n shade: str\n Shade in light, dark.\n \"\"\"\n self.color = color\n if color == 'blue':\n self.space = Box(low=np.array([0, 0, max_min]).astype(np.float32), high=np.array([min_max, min_max, 1]).astype(np.float32), dtype=np.float32)\n elif color == 'red':\n self.space = Box(low=np.array([max_min, 0, 0]).astype(np.float32), high=np.array([1, min_max, min_max]).astype(np.float32), dtype=np.float32)\n elif color == 'green':\n self.space = Box(low=np.array([0, max_min, 0]).astype(np.float32), high=np.array([min_max, 1, min_max]).astype(np.float32), dtype=np.float32)\n elif color == 'cyan':\n self.space = Box(low=np.array([0, max_min, max_min]).astype(np.float32), high=np.array([min_max, 1, 1]).astype(np.float32), dtype=np.float32)\n elif color == 'yellow':\n self.space = Box(low=np.array([max_min, max_min, 0]).astype(np.float32), high=np.array([1, 1, min_max]).astype(np.float32), dtype=np.float32)\n elif color == 'magenta':\n self.space = Box(low=np.array([max_min, 0, max_min]).astype(np.float32), high=np.array([1, min_max, 1]).astype(np.float32), dtype=np.float32)\n elif color == 'black':\n self.space = Box(low=np.array([0, 0, 0]).astype(np.float32), high=np.array([min_max, min_max, min_max]).astype(np.float32), dtype=np.float32)\n elif color == 'white':\n self.space = Box(low=np.array([max_min, max_min, max_min]).astype(np.float32), high=np.array([1, 1, 1]).astype(np.float32), dtype=np.float32)\n else:\n raise NotImplementedError(\"color is 'red', 'blue' or 'green'\")\n\n def contains(self, rgb):\n \"\"\"\n Whether the class contains a given rgb code.\n Parameters\n ----------\n rgb: 1D nd.array of size 3\n\n Returns\n -------\n contains: Bool\n True if rgb code in given Color class.\n \"\"\"\n return self.space.contains(rgb)\n\n def sample(self):\n \"\"\"\n Sample an rgb code from the Color class\n\n Returns\n -------\n rgb: 1D nd.array of size 3\n \"\"\"\n return np.random.uniform(self.space.low, self.space.high, 3)\n\n\ndef sample_color(color):\n \"\"\"\n Sample an rgb code from the Color class\n\n Parameters\n ----------\n color: str\n Color in red, blue, green.\n shade: str\n Shade in light, dark.\n\n Returns\n -------\n rgb: 1D nd.array of size 3\n \"\"\"\n color_class = Color(color)\n return color_class.sample()\n\ndef infer_color(rgb):\n rgb = rgb.astype(np.float32)\n for c in ['yellow', 'magenta', 'blue', 'green', 'red', 'cyan', 'black', 'white']:\n color_class = Color(c)\n # import pdb; pdb.set_trace()\n if color_class.contains(rgb):\n return c\n raise ValueError\n\nif __name__ == '__main__':\n for c in ['yellow', 'magenta', 'blue', 'green', 'red', 'cyan', 'black', 'white']:\n plot_colors(c)\n plt.show()\n" ]
[ [ "numpy.array", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.random.uniform", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
richardsliu/ray
[ "0c27d925886e1fcfa0a22cb50715ac921091ea83" ]
[ "mnist/inference.py" ]
[ "from typing import Sequence\n\nfrom absl import app\nimport numpy as np\nimport requests\n\n\ndef main(argv: Sequence[str]) -> None:\n resp = requests.get(\n \"http://localhost:8000/mnist\",\n json={\"array\": np.random.randn(28 * 28).tolist()})\n print(resp.json())\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "numpy.random.randn" ] ]
gbernstein6/private_bayesian_expfam
[ "4cd61ab4bd69858b2a233fba76298e269d923e05" ]
[ "Bounded_Suff_Stats/distributions.py" ]
[ "import scipy, scipy.stats\nimport numpy as np\n\n'''\nclass Conjugate_Pair:\n\n def __init__(self):\n\n # let p = number of parameters used by likelihood model\n # let s = number of sufficient statistics used by likelihood model (note multinomial actually uses s-1)\n # let q = number of parameters used by prior model\n\n self.sensitivity = maximum amount addition/removal of an individual will change sufficient statistics\n\n self.prior_parameters = sequence of prior parameters to be used in self.draw_model_parameters(), shape (s,1)\n\n self.num_sufficient_statistics = number of feature functions for this model, to match length of self.draw_sufficient_statistics() return\n\n @staticmethod\n def draw_model_parameters(parameters, size=1):\n # parameters: (prior or posterior) parameters as numpy array of shape (p,1)\n # return: numpy array of shape (p,size)\n\n @staticmethod\n def draw_sufficient_statistics(N, model_parameters):\n # N: number of individuals in population (float or int)\n # model_parameters: sequence returned by self.draw_model_parameters()\n # return: numpy array of shape (s,1)\n\n @staticmethod\n def conjugate_update(N, prior_parameters, sufficient_statistics):\n # N: number of individuals in population (float or int)\n # prior_parameters: prior parameters as numpy array of shape (p,)\n # sufficient_statistics: numpy array of shape (s,1)\n # return: posterior parameters as numpy array of shape (q,)\n\n @staticmethod\n def calculate_sufficient_statistics_CLT_parameters(N, model_parameters):\n # N: number of individuals in population (float or int)\n # model_parameters: parameters as numpy array of shape (p,)\n # return: ss_mean: numpy array of shape (s,1)\n # ss_covariance: numpy array of shape (s,s)\n\n @staticmethod\n def sample_sufficient_statistics(N, conditional_mean, conditional_covariance):\n # N: number of individuals in population (float or int)\n # conditional mean: numpy array of shape (s,1)\n # conditional_covariance: numpy array of shape (s,s)\n # return: sufficient_statistics: numpy array of shape (s,1)\n'''\n\n\nclass Beta_Binomial:\n\n def __init__(self):\n\n self.sensitivity = 1\n\n self.prior_parameters = np.array([[10, 10]]).T # [alpha, beta]\n\n self.num_sufficient_statistics = 1 # one binomial parameter\n\n @staticmethod\n def draw_model_parameters(parameters, size=1):\n return scipy.stats.beta.rvs(parameters[0, 0], parameters[1, 0], size=size)[:, None].T\n\n @staticmethod\n def draw_sufficient_statistics(N, model_parameters):\n return np.array([scipy.stats.binom.rvs(int(N), model_parameters)])[:, None]\n\n @staticmethod\n def conjugate_update(N, prior_parameters, sufficient_statistics):\n\n posterior_parameters = np.array([prior_parameters[0, 0] + sufficient_statistics,\n prior_parameters[1, 0] + N - sufficient_statistics])\n\n # make sure beta parameters are positive\n posterior_parameters = np.maximum(posterior_parameters, .001)\n\n return posterior_parameters\n\n @staticmethod\n def calculate_sufficient_statistics_CLT_parameters(N, model_parameters):\n\n ss_mean = np.array([N * model_parameters])\n ss_covariance = np.array([N * model_parameters * (1 - model_parameters)])\n\n return ss_mean, ss_covariance\n\n @staticmethod\n def sample_sufficient_statistics(N, conditional_mean, conditional_covariance):\n\n conditional_std = np.sqrt(conditional_covariance)\n\n # draw sufficient statistics constrained to [0, N]\n a = (0 - conditional_mean) / conditional_std\n b = (N - conditional_mean) / conditional_std\n\n sufficient_statistics = np.array([[scipy.stats.truncnorm.rvs(a, b, loc=conditional_mean, scale=conditional_std)]])\n\n return sufficient_statistics\n\n\nclass Dirichlet_Multinomial:\n # NOTE: If the model has M parameters, then we only carry around M-1 parameters and sufficient statistics\n # so that the parameters can sum to 1 and sufficient statistics can sum to N\n # The last value is only ever added into the sufficient statistics when drawing model parameters\n\n def __init__(self):\n\n self.sensitivity = 1\n\n self.prior_parameters = np.ones((3, 1)) * 5\n\n self.num_sufficient_statistics = len(self.prior_parameters) - 1\n\n @staticmethod\n def draw_model_parameters(parameters, size=1):\n\n model_parameters = scipy.stats.dirichlet.rvs(parameters.flatten(), size=size).T\n\n # only carry around s-1 parameters\n return model_parameters[:-1, :]\n\n @staticmethod\n def draw_sufficient_statistics(N, model_parameters):\n\n # add last parameter back in to sum to 1\n model_parameters = np.vstack((model_parameters, 1.0 - sum(model_parameters)))\n\n sufficient_statistics = np.array([float(x) for x in np.random.multinomial(N, model_parameters.flatten())])[:, None]\n\n # only carry around s-1 sufficient statistics\n sufficient_statistics = sufficient_statistics[:-1]\n\n return sufficient_statistics\n\n @staticmethod\n # first parameter N is unneeded\n def conjugate_update(N, prior_parameters, sufficient_statistics):\n\n sufficient_statistics = np.vstack((sufficient_statistics, N - sum(sufficient_statistics)))\n\n posterior_parameters = prior_parameters + sufficient_statistics\n\n return posterior_parameters\n\n @staticmethod\n def calculate_sufficient_statistics_CLT_parameters(N, model_parameters):\n\n ss_mean = N * model_parameters\n\n ss_covariance = - N * model_parameters.dot(model_parameters.T)\n np.fill_diagonal(ss_covariance, N * model_parameters * (1 - model_parameters))\n\n return ss_mean, ss_covariance\n\n @staticmethod\n def sample_sufficient_statistics(N, conditional_mean, conditional_covariance):\n\n sufficient_statistics = scipy.stats.multivariate_normal.rvs(mean=conditional_mean.flatten(), cov=conditional_covariance, size=1)\n\n # ensure positive values and that at least one count is left over for the tacked on value\n tries = 0\n while not all([ss > 0 for ss in sufficient_statistics[:-1]]) and sum(sufficient_statistics[:-1]) <= N - 1:\n sufficient_statistics = scipy.stats.multivariate_normal.rvs(mean=conditional_mean.flatten(), cov=conditional_covariance, size=1)\n\n if tries > 100:\n raise Exception('Multinomial unable to sample sufficient statistics!')\n\n return sufficient_statistics[:, None]\n" ]
[ [ "numpy.array", "numpy.fill_diagonal", "numpy.ones", "scipy.stats.beta.rvs", "numpy.sqrt", "scipy.stats.truncnorm.rvs", "numpy.maximum" ] ]
volkoshkursk/NeMo
[ "2df92ec9ca3204b7d3cd52f1b133c971de8dc00a" ]
[ "nemo/utils/exp_manager.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nimport subprocess\nimport sys\nimport time\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom shutil import copy, move\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.utils import get_original_cwd\nfrom omegaconf import DictConfig, OmegaConf, open_dict\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint\nfrom pytorch_lightning.callbacks.timer import Interval, Timer\nfrom pytorch_lightning.loggers import LoggerCollection as _LoggerCollection\nfrom pytorch_lightning.loggers import TensorBoardLogger, WandbLogger\nfrom pytorch_lightning.plugins.training_type.ddp import DDPPlugin\nfrom pytorch_lightning.trainer.states import RunningStage\nfrom pytorch_lightning.utilities.distributed import rank_zero_info\nfrom pytorch_lightning.utilities.types import _METRIC\n\nfrom nemo.constants import NEMO_ENV_VARNAME_TESTING, NEMO_ENV_VARNAME_VERSION\nfrom nemo.utils import logging, timers\nfrom nemo.utils.app_state import AppState\nfrom nemo.utils.env_var_parsing import get_envbool\nfrom nemo.utils.exceptions import NeMoBaseException\nfrom nemo.utils.get_rank import is_global_rank_zero\nfrom nemo.utils.lightning_logger_patch import add_filehandlers_to_pl_logger\n\n\nclass NotFoundError(NeMoBaseException):\n \"\"\" Raised when a file or folder is not found\"\"\"\n\n\nclass LoggerMisconfigurationError(NeMoBaseException):\n \"\"\" Raised when a mismatch between trainer.logger and exp_manager occurs\"\"\"\n\n def __init__(self, message):\n message = (\n message\n + \" You can disable lighning's trainer from creating a logger by passing logger=False to its constructor.\"\n )\n super().__init__(message)\n\n\nclass CheckpointMisconfigurationError(NeMoBaseException):\n \"\"\" Raised when a mismatch between trainer.callbacks and exp_manager occurs\"\"\"\n\n\n@dataclass\nclass CallbackParams:\n filepath: Optional[str] = None # Deprecated\n dirpath: Optional[str] = None # If None, exp_manager will attempt to handle the filepath\n filename: Optional[str] = None # If None, exp_manager will attempt to handle the filepath\n monitor: Optional[str] = \"val_loss\"\n verbose: Optional[bool] = True\n save_last: Optional[bool] = True\n save_top_k: Optional[int] = 3\n save_weights_only: Optional[bool] = False\n mode: Optional[str] = \"min\"\n every_n_val_epochs: Optional[int] = 1\n prefix: Optional[str] = None # If None, exp_manager will attempt to handle the filepath\n postfix: str = \".nemo\"\n save_best_model: bool = False\n always_save_nemo: bool = False\n model_parallel_size: Optional[int] = None\n\n\n@dataclass\nclass StepTimingParams:\n reduction: Optional[str] = \"mean\"\n # if True torch.cuda.synchronize() is called on start/stop\n sync_cuda: Optional[bool] = False\n # if positive, defines the size of a sliding window for computing mean\n buffer_size: Optional[int] = -1\n\n\n@dataclass\nclass ExpManagerConfig:\n # Log dir creation parameters\n explicit_log_dir: Optional[str] = None\n exp_dir: Optional[str] = None\n name: Optional[str] = None\n version: Optional[str] = None\n use_datetime_version: Optional[bool] = True\n resume_if_exists: Optional[bool] = False\n resume_past_end: Optional[bool] = False\n resume_ignore_no_checkpoint: Optional[bool] = False\n # Logging parameters\n create_tensorboard_logger: Optional[bool] = True\n summary_writer_kwargs: Optional[Dict[Any, Any]] = None\n create_wandb_logger: Optional[bool] = False\n wandb_logger_kwargs: Optional[Dict[Any, Any]] = None\n # Checkpointing parameters\n create_checkpoint_callback: Optional[bool] = True\n checkpoint_callback_params: Optional[CallbackParams] = CallbackParams()\n # Additional exp_manager arguments\n files_to_copy: Optional[List[str]] = None\n # logs timing of train/val/test steps\n log_step_timing: Optional[bool] = True\n step_timing_kwargs: Optional[StepTimingParams] = StepTimingParams()\n model_parallel_size: Optional[int] = None\n\n\nclass TimingCallback(Callback):\n \"\"\"\n Logs execution time of train/val/test steps\n \"\"\"\n\n def __init__(self, timer_kwargs={}):\n self.timer = timers.NamedTimer(**timer_kwargs)\n\n def _on_batch_start(self, name):\n # reset only if we do not return mean of a sliding window\n if self.timer.buffer_size <= 0:\n self.timer.reset(name)\n\n self.timer.start(name)\n\n def _on_batch_end(self, name, pl_module):\n self.timer.stop(name)\n pl_module.log(name, self.timer[name], on_step=True, on_epoch=False)\n\n def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):\n self._on_batch_start(\"train_step_timing\")\n\n def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n self._on_batch_end(\"train_step_timing\", pl_module)\n\n def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):\n self._on_batch_start(\"validation_step_timing\")\n\n def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n self._on_batch_end(\"validation_step_timing\", pl_module)\n\n def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):\n self._on_batch_start(\"test_step_timing\")\n\n def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n self._on_batch_end(\"test_step_timing\", pl_module)\n\n def on_before_backward(self, trainer, pl_module, loss):\n self._on_batch_start(\"train_backward_timing\")\n\n def on_after_backward(self, trainer, pl_module):\n self._on_batch_end(\"train_backward_timing\", pl_module)\n\n\ndef exp_manager(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None) -> Path:\n \"\"\"\n exp_manager is a helper function used to manage folders for experiments. It follows the pytorch lightning paradigm\n of exp_dir/model_or_experiment_name/version. If the lightning trainer has a logger, exp_manager will get exp_dir,\n name, and version from the logger. Otherwise it will use the exp_dir and name arguments to create the logging\n directory. exp_manager also allows for explicit folder creation via explicit_log_dir.\n\n The version can be a datetime string or an integer. Datestime version can be disabled if use_datetime_version is set\n to False. It optionally creates TensorBoardLogger, WandBLogger, ModelCheckpoint objects from pytorch lightning.\n It copies sys.argv, and git information if available to the logging directory. It creates a log file for each\n process to log their output into.\n\n exp_manager additionally has a resume feature (resume_if_exists) which can be used to continuing training from\n the constructed log_dir. When you need to continue the training repeatedly (like on a cluster which you need\n multiple consecutive jobs), you need to avoid creating the version folders. Therefore from v1.0.0, when\n resume_if_exists is set to True, creating the version folders is ignored.\n\n Args:\n trainer (pytorch_lightning.Trainer): The lightning trainer.\n cfg (DictConfig, dict): Can have the following keys:\n - explicit_log_dir (str, Path): Can be used to override exp_dir/name/version folder creation. Defaults to\n None, which will use exp_dir, name, and version to construct the logging directory.\n - exp_dir (str, Path): The base directory to create the logging directory. Defaults to None, which logs to\n ./nemo_experiments.\n - name (str): The name of the experiment. Defaults to None which turns into \"default\" via name = name or\n \"default\".\n - version (str): The version of the experiment. Defaults to None which uses either a datetime string or\n lightning's TensorboardLogger system of using version_{int}.\n - use_datetime_version (bool): Whether to use a datetime string for version. Defaults to True.\n - resume_if_exists (bool): Whether this experiment is resuming from a previous run. If True, it sets\n trainer.checkpoint_connector.resume_checkpoint_path so that the trainer should auto-resume. exp_manager will move files\n under log_dir to log_dir/run_{int}. Defaults to False. From v1.0.0, when resume_if_exists is True,\n we would not create version folders to make it easier to find the log folder for next runs.\n - resume_past_end (bool): exp_manager errors out if resume_if_exists is True and a checkpoint matching\n *end.ckpt indicating a previous training run fully completed. This behaviour can be disabled, in which\n case the *end.ckpt will be loaded by setting resume_past_end to True. Defaults to False.\n - resume_ignore_no_checkpoint (bool): exp_manager errors out if resume_if_exists is True and no checkpoint\n could be found. This behaviour can be disabled, in which case exp_manager will print a message and\n continue without restoring, by setting resume_ignore_no_checkpoint to True. Defaults to False.\n - create_tensorboard_logger (bool): Whether to create a tensorboard logger and attach it to the pytorch\n lightning trainer. Defaults to True.\n - summary_writer_kwargs (dict): A dictionary of kwargs that can be passed to lightning's TensorboardLogger\n class. Note that log_dir is passed by exp_manager and cannot exist in this dict. Defaults to None.\n - create_wandb_logger (bool): Whether to create a Weights and Baises logger and attach it to the pytorch\n lightning trainer. Defaults to False.\n - wandb_logger_kwargs (dict): A dictionary of kwargs that can be passed to lightning's WandBLogger\n class. Note that name and project are required parameters if create_wandb_logger is True.\n Defaults to None.\n - create_checkpoint_callback (bool): Whether to create a ModelCheckpoint callback and attach it to the\n pytorch lightning trainer. The ModelCheckpoint saves the top 3 models with the best \"val_loss\", the most\n recent checkpoint under *last.ckpt, and the final checkpoint after training completes under *end.ckpt.\n Defaults to True.\n - files_to_copy (list): A list of files to copy to the experiment logging directory. Defaults to None which\n copies no files.\n\n returns:\n log_dir (Path): The final logging directory where logging files are saved. Usually the concatenation of\n exp_dir, name, and version.\n \"\"\"\n # Add rank information to logger\n # Note: trainer.global_rank and trainer.is_global_zero are not set until trainer.fit, so have to hack around it\n local_rank = int(os.environ.get(\"LOCAL_RANK\", 0))\n global_rank = trainer.node_rank * trainer.num_gpus + local_rank\n logging.rank = global_rank\n world_size = trainer.world_size\n\n if cfg is None:\n logging.error(\"exp_manager did not receive a cfg argument. It will be disabled.\")\n return\n if trainer.fast_dev_run:\n logging.info(\"Trainer was called with fast_dev_run. exp_manager will return without any functionality.\")\n return\n\n # Ensure passed cfg is compliant with ExpManagerConfig\n schema = OmegaConf.structured(ExpManagerConfig)\n if isinstance(cfg, dict):\n cfg = OmegaConf.create(cfg)\n elif not isinstance(cfg, DictConfig):\n raise ValueError(f\"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig\")\n cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True))\n cfg = OmegaConf.merge(schema, cfg)\n\n error_checks(trainer, cfg) # Ensures that trainer options are compliant with NeMo and exp_manager arguments\n\n log_dir, exp_dir, name, version = get_log_dir(\n trainer=trainer,\n exp_dir=cfg.exp_dir,\n name=cfg.name,\n version=cfg.version,\n explicit_log_dir=cfg.explicit_log_dir,\n use_datetime_version=cfg.use_datetime_version,\n resume_if_exists=cfg.resume_if_exists,\n )\n\n if cfg.resume_if_exists:\n check_resume(trainer, log_dir, cfg.resume_past_end, cfg.resume_ignore_no_checkpoint)\n\n checkpoint_name = name\n # If name returned from get_log_dir is \"\", use cfg.name for checkpointing\n if checkpoint_name is None or checkpoint_name == '':\n checkpoint_name = cfg.name or \"default\"\n cfg.name = name # Used for configure_loggers so that the log_dir is properly set even if name is \"\"\n cfg.version = version\n\n # update app_state with log_dir, exp_dir, etc\n app_state = AppState()\n app_state.log_dir = log_dir\n app_state.exp_dir = exp_dir\n app_state.name = name\n app_state.version = version\n app_state.checkpoint_name = checkpoint_name\n app_state.create_checkpoint_callback = cfg.create_checkpoint_callback\n app_state.checkpoint_callback_params = cfg.checkpoint_callback_params\n\n # Create the logging directory if it does not exist\n os.makedirs(log_dir, exist_ok=True) # Cannot limit creation to global zero as all ranks write to own log file\n logging.info(f'Experiments will be logged at {log_dir}')\n trainer._default_root_dir = log_dir\n\n # Handle logging to file\n if get_envbool(NEMO_ENV_VARNAME_TESTING, False) or world_size <= 32:\n # If NEMO_TESTING is set (debug mode) or if less than 32 ranks save all log files\n log_file = log_dir / f'nemo_log_globalrank-{global_rank}_localrank-{local_rank}.txt'\n logging.add_file_handler(log_file)\n elif world_size <= 256 and local_rank == 0:\n # If less than 256 ranks, try to save 1 log file per \"machine\"\n log_file = log_dir / f'nemo_log_globalrank-{global_rank}_localrank-{local_rank}.txt'\n logging.add_file_handler(log_file)\n elif global_rank == 0:\n # If running more than 256 ranks, only save 1 log file\n log_file = log_dir / f'nemo_log_globalrank-{global_rank}_localrank-{local_rank}.txt'\n logging.add_file_handler(log_file)\n\n # For some reason, LearningRateLogger requires trainer to have a logger. Safer to create logger on all ranks\n # not just global rank 0.\n if cfg.create_tensorboard_logger or cfg.create_wandb_logger:\n configure_loggers(\n trainer,\n exp_dir,\n cfg.name,\n cfg.version,\n cfg.create_tensorboard_logger,\n cfg.summary_writer_kwargs,\n cfg.create_wandb_logger,\n cfg.wandb_logger_kwargs,\n )\n\n # add loggers timing callbacks\n if cfg.log_step_timing:\n timing_callback = TimingCallback(timer_kwargs=cfg.step_timing_kwargs or {})\n trainer.callbacks.insert(0, timing_callback)\n\n if cfg.create_checkpoint_callback:\n configure_checkpointing(\n trainer, log_dir, checkpoint_name, cfg.resume_if_exists, cfg.checkpoint_callback_params\n )\n\n if is_global_rank_zero():\n # Move files_to_copy to folder and add git information if present\n if cfg.files_to_copy:\n for _file in cfg.files_to_copy:\n copy(Path(_file), log_dir)\n\n # Create files for cmd args and git info\n with open(log_dir / 'cmd-args.log', 'w') as _file:\n _file.write(\" \".join(sys.argv))\n\n # Try to get git hash\n git_repo, git_hash = get_git_hash()\n if git_repo:\n with open(log_dir / 'git-info.log', 'w') as _file:\n _file.write(f'commit hash: {git_hash}')\n _file.write(get_git_diff())\n\n # Add err_file logging to global_rank zero\n logging.add_err_file_handler(log_dir / 'nemo_error_log.txt')\n\n # Add lightning file logging to global_rank zero\n add_filehandlers_to_pl_logger(log_dir / 'lightning_logs.txt', log_dir / 'nemo_error_log.txt')\n\n return log_dir\n\n\ndef error_checks(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None):\n \"\"\"\n Checks that the passed trainer is compliant with NeMo and exp_manager's passed configuration. Checks that:\n - Throws error when hydra has changed the working directory. This causes issues with lightning's DDP\n - Throws error when trainer has loggers defined but create_tensorboard_logger or create_WandB_logger is True\n - Prints error messages when 1) run on multi-node and not Slurm, and 2) run on multi-gpu without DDP\n \"\"\"\n if HydraConfig.initialized() and get_original_cwd() != os.getcwd():\n raise ValueError(\n \"Hydra changed the working directory. This interferes with ExpManger's functionality. Please pass \"\n \"hydra.run.dir=. to your python script.\"\n )\n if trainer.logger is not None and (cfg.create_tensorboard_logger or cfg.create_wandb_logger):\n raise LoggerMisconfigurationError(\n \"The pytorch lightning trainer that was passed to exp_manager contained a logger, and either \"\n f\"create_tensorboard_logger: {cfg.create_tensorboard_logger} or create_wandb_logger: \"\n f\"{cfg.create_wandb_logger} was set to True. These can only be used if trainer does not already have a\"\n \" logger.\"\n )\n if trainer.num_nodes > 1 and not check_slurm(trainer):\n logging.error(\n \"You are running multi-node training without SLURM handling the processes.\"\n \" Please note that this is not tested in NeMo and could result in errors.\"\n )\n if trainer.num_gpus > 1 and not isinstance(trainer.accelerator.training_type_plugin, DDPPlugin):\n logging.error(\n \"You are running multi-gpu without ddp.Please note that this is not tested in NeMo and could result in \"\n \"errors.\"\n )\n\n\ndef check_resume(\n trainer: 'pytorch_lightning.Trainer',\n log_dir: str,\n resume_past_end: bool = False,\n resume_ignore_no_checkpoint: bool = False,\n):\n \"\"\"Checks that resume=True was used correctly with the arguments pass to exp_manager. Sets\n trainer.checkpoint_connector.resume_checkpoint_path as necessary.\n\n Returns:\n log_dir (Path): the log_dir\n exp_dir (str): the base exp_dir without name nor version\n name (str): The name of the experiment\n version (str): The version of the experiment\n\n Raises:\n NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.\n ValueError: If resume is True, and there were more than 1 checkpoint could found.\n \"\"\"\n\n if not log_dir:\n raise ValueError(f\"Resuming requires the log_dir {log_dir} to be passed to exp_manager\")\n\n checkpoint_dir = Path(Path(log_dir) / \"checkpoints\")\n\n checkpoint = None\n end_checkpoints = list(checkpoint_dir.rglob(\"*end.ckpt\"))\n last_checkpoints = list(checkpoint_dir.rglob(\"*last.ckpt\"))\n if not checkpoint_dir.exists():\n if resume_ignore_no_checkpoint:\n logging.warning(\n f\"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Training from scratch.\"\n )\n return\n else:\n raise NotFoundError(f\"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Cannot resume.\")\n elif len(end_checkpoints) > 0:\n if resume_past_end:\n if len(end_checkpoints) > 1:\n if 'mp_rank' in str(end_checkpoints[0]):\n checkpoint = end_checkpoints[0]\n else:\n raise ValueError(f\"Multiple checkpoints {end_checkpoints} that matches *end.ckpt.\")\n logging.info(f\"Resuming from {end_checkpoints[0]}\")\n else:\n raise ValueError(\n f\"Found {end_checkpoints[0]} indicating that the last training run has already completed.\"\n )\n elif not len(last_checkpoints) > 0:\n if resume_ignore_no_checkpoint:\n logging.warning(f\"There were no checkpoints found in {checkpoint_dir}. Training from scratch.\")\n return\n else:\n raise NotFoundError(f\"There were no checkpoints found in {checkpoint_dir}. Cannot resume.\")\n elif len(last_checkpoints) > 1:\n if 'mp_rank' in str(last_checkpoints[0]):\n checkpoint = last_checkpoints[0]\n else:\n raise ValueError(f\"Multiple checkpoints {last_checkpoints} that matches *last.ckpt.\")\n else:\n logging.info(f\"Resuming from {last_checkpoints[0]}\")\n checkpoint = last_checkpoints[0]\n\n trainer.checkpoint_connector.resume_checkpoint_path = str(checkpoint)\n\n if is_global_rank_zero():\n # Check to see if any files exist that need to be moved\n files_to_move = []\n for child in Path(log_dir).iterdir():\n if child.is_file():\n files_to_move.append(child)\n\n if len(files_to_move) > 0:\n # Move old files to a new folder\n other_run_dirs = Path(log_dir).glob(\"run_*\")\n run_count = 0\n for fold in other_run_dirs:\n if fold.is_dir():\n run_count += 1\n new_run_dir = Path(Path(log_dir) / f\"run_{run_count}\")\n new_run_dir.mkdir()\n for _file in files_to_move:\n move(str(_file), str(new_run_dir))\n\n\ndef check_explicit_log_dir(\n trainer: 'pytorch_lightning.Trainer', explicit_log_dir: [Path, str], exp_dir: str, name: str, version: str\n) -> (Path, str, str, str):\n \"\"\" Checks that the passed arguments are compatible with explicit_log_dir.\n\n Returns:\n log_dir (Path): the log_dir\n exp_dir (str): the base exp_dir without name nor version\n name (str): The name of the experiment\n version (str): The version of the experiment\n\n Raise:\n LoggerMisconfigurationError\n \"\"\"\n if trainer.logger is not None:\n raise LoggerMisconfigurationError(\n \"The pytorch lightning trainer that was passed to exp_manager contained a logger and explicit_log_dir: \"\n f\"{explicit_log_dir} was pass to exp_manager. Please remove the logger from the lightning trainer.\"\n )\n # Checking only (explicit_log_dir) vs (exp_dir and version).\n # The `name` will be used as the actual name of checkpoint/archive.\n if exp_dir or version:\n logging.error(\n f\"exp_manager received explicit_log_dir: {explicit_log_dir} and at least one of exp_dir: {exp_dir}, \"\n f\"or version: {version}. Please note that exp_dir, name, and version will be ignored.\"\n )\n if is_global_rank_zero() and Path(explicit_log_dir).exists():\n logging.warning(f\"Exp_manager is logging to {explicit_log_dir}, but it already exists.\")\n return Path(explicit_log_dir), str(explicit_log_dir), \"\", \"\"\n\n\ndef get_log_dir(\n trainer: 'pytorch_lightning.Trainer',\n exp_dir: str = None,\n name: str = None,\n version: str = None,\n explicit_log_dir: str = None,\n use_datetime_version: bool = True,\n resume_if_exists: bool = False,\n) -> (Path, str, str, str):\n \"\"\"\n Obtains the log_dir used for exp_manager.\n\n Returns:\n log_dir (Path): the log_dir\n exp_dir (str): the base exp_dir without name nor version\n name (str): The name of the experiment\n version (str): The version of the experiment\n explicit_log_dir (str): The explicit path to the log folder. Defaults to False.\n use_datetime_version (bool): Uses date and time as the version of the log folder. Defaults to True.\n resume_if_exists (bool): if resume_if_exists of the exp_manager's config is enabled or not. When enabled, the\n version folders would not get created.\n\n Raise:\n LoggerMisconfigurationError: If trainer is incompatible with arguments\n NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.\n ValueError: If resume is True, and there were more than 1 checkpoint could found.\n \"\"\"\n if explicit_log_dir: # If explicit log_dir was passed, short circuit\n return check_explicit_log_dir(trainer, explicit_log_dir, exp_dir, name, version)\n\n # Default exp_dir to ./nemo_experiments if None was passed\n _exp_dir = exp_dir\n if exp_dir is None:\n _exp_dir = str(Path.cwd() / 'nemo_experiments')\n\n # If the user has already defined a logger for the trainer, use the logger defaults for logging directory\n if trainer.logger is not None:\n if trainer.logger.save_dir:\n if exp_dir:\n raise LoggerMisconfigurationError(\n \"The pytorch lightning trainer that was passed to exp_manager contained a logger, the logger's \"\n f\"save_dir was not None, and exp_dir ({exp_dir}) was not None. If trainer.logger.save_dir \"\n \"exists, exp_manager will use trainer.logger.save_dir as the logging directory and exp_dir \"\n \"must be None.\"\n )\n _exp_dir = trainer.logger.save_dir\n if name:\n raise LoggerMisconfigurationError(\n \"The pytorch lightning trainer that was passed to exp_manager contained a logger, and name: \"\n f\"{name} was also passed to exp_manager. If the trainer contains a \"\n \"logger, exp_manager will use trainer.logger.name, and name passed to exp_manager must be None.\"\n )\n name = trainer.logger.name\n version = f\"version_{trainer.logger.version}\"\n # Use user-defined exp_dir, project_name, exp_name, and versioning options\n else:\n name = name or \"default\"\n version = version or os.environ.get(NEMO_ENV_VARNAME_VERSION, None)\n\n if not version:\n if resume_if_exists:\n logging.warning(\n \"No version folders would be created under the log folder as 'resume_if_exists' is enabled.\"\n )\n version = None\n elif is_global_rank_zero():\n if use_datetime_version:\n version = time.strftime('%Y-%m-%d_%H-%M-%S')\n else:\n tensorboard_logger = TensorBoardLogger(save_dir=Path(_exp_dir), name=name, version=version)\n version = f\"version_{tensorboard_logger.version}\"\n os.environ[NEMO_ENV_VARNAME_VERSION] = \"\" if version is None else version\n\n log_dir = Path(_exp_dir) / Path(str(name)) / Path(\"\" if version is None else str(version))\n return log_dir, str(_exp_dir), name, version\n\n\ndef get_git_hash():\n \"\"\"\n Helper function that tries to get the commit hash if running inside a git folder\n\n returns:\n Bool: Whether the git subprocess ran without error\n str: git subprocess output or error message\n \"\"\"\n try:\n return (\n True,\n subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT).decode(),\n )\n except subprocess.CalledProcessError as err:\n return False, \"{}\\n\".format(err.output.decode(\"utf-8\"))\n\n\ndef get_git_diff():\n \"\"\"\n Helper function that tries to get the git diff if running inside a git folder\n\n returns:\n Bool: Whether the git subprocess ran without error\n str: git subprocess output or error message\n \"\"\"\n try:\n return subprocess.check_output(['git', 'diff'], stderr=subprocess.STDOUT).decode()\n except subprocess.CalledProcessError as err:\n return \"{}\\n\".format(err.output.decode(\"utf-8\"))\n\n\nclass LoggerList(_LoggerCollection):\n \"\"\" A thin wrapper on Lightning's LoggerCollection such that name and version are better aligned with exp_manager\n \"\"\"\n\n def __init__(self, _logger_iterable, nemo_name=None, nemo_version=\"\"):\n super().__init__(_logger_iterable)\n self._nemo_name = nemo_name\n self._nemo_version = nemo_version\n\n @property\n def name(self) -> str:\n return self._nemo_name\n\n @property\n def version(self) -> str:\n return self._nemo_version\n\n\ndef configure_loggers(\n trainer: 'pytorch_lightning.Trainer',\n exp_dir: [Path, str],\n name: str,\n version: str,\n create_tensorboard_logger: bool,\n summary_writer_kwargs: dict,\n create_wandb_logger: bool,\n wandb_kwargs: dict,\n):\n \"\"\" Creates TensorboardLogger and/or WandBLogger and attach them to trainer. Raises ValueError if\n summary_writer_kwargs or wandb_kwargs are misconfigured.\n \"\"\"\n # Potentially create tensorboard logger and/or WandBLogger\n logger_list = []\n if create_tensorboard_logger:\n if summary_writer_kwargs is None:\n summary_writer_kwargs = {}\n elif \"log_dir\" in summary_writer_kwargs:\n raise ValueError(\n \"You cannot pass `log_dir` as part of `summary_writer_kwargs`. `log_dir` is handled by lightning's \"\n \"TensorBoardLogger logger.\"\n )\n tensorboard_logger = TensorBoardLogger(save_dir=exp_dir, name=name, version=version, **summary_writer_kwargs)\n logger_list.append(tensorboard_logger)\n logging.info(\"TensorboardLogger has been set up\")\n\n if create_wandb_logger:\n if wandb_kwargs is None:\n wandb_kwargs = {}\n if \"name\" not in wandb_kwargs and \"project\" not in wandb_kwargs:\n raise ValueError(\"name and project are required for wandb_logger\")\n wandb_logger = WandbLogger(save_dir=exp_dir, version=version, **wandb_kwargs)\n\n logger_list.append(wandb_logger)\n logging.info(\"WandBLogger has been set up\")\n\n logger_list = (\n LoggerList(logger_list, nemo_name=name, nemo_version=version) if len(logger_list) > 1 else logger_list[0]\n )\n trainer.logger_connector.configure_logger(logger_list)\n\n\nclass NeMoModelCheckpoint(ModelCheckpoint):\n \"\"\" Light wrapper around Lightning's ModelCheckpoint to force a saved checkpoint on train_end\n \"\"\"\n\n def __init__(\n self,\n always_save_nemo=False,\n save_best_model=False,\n postfix=\".nemo\",\n n_resume=False,\n model_parallel_size=None,\n **kwargs,\n ):\n # Parse and store \"extended\" parameters: save_best model and postfix.\n self.always_save_nemo = always_save_nemo\n self.save_best_model = save_best_model\n self.postfix = postfix\n self.previous_best_path = \"\"\n self.model_parallel_size = model_parallel_size\n\n # `prefix` is deprecated\n if 'prefix' in kwargs:\n self.prefix = kwargs.pop('prefix')\n else:\n self.prefix = \"\"\n\n # Call the parent class constructor with the remaining kwargs.\n super().__init__(**kwargs)\n\n if self.save_top_k != -1 and n_resume:\n logging.debug(\"Checking previous runs\")\n self.nemo_topk_check_previous_run()\n\n def nemo_topk_check_previous_run(self):\n try:\n self.best_k_models\n self.kth_best_model_path\n self.best_model_score\n self.best_model_path\n except AttributeError:\n raise AttributeError(\"Lightning's ModelCheckpoint was updated. NeMoModelCheckpoint will need an update.\")\n self.best_k_models = {}\n self.kth_best_model_path = \"\"\n self.best_model_score = None\n self.best_model_path = \"\"\n\n checkpoints = list(Path(self.dirpath).rglob(\"*.ckpt\"))\n for checkpoint in checkpoints:\n if self.model_parallel_size is not None and self.model_parallel_size > 1:\n checkpoint = self._uninject_mp_rank(checkpoint)\n checkpoint = str(checkpoint)\n if checkpoint[-10:] == '-last.ckpt':\n continue\n index = checkpoint.find(self.monitor) + len(self.monitor) + 1 # Find monitor in str + 1 for '='\n if index != -1:\n match = re.search('[A-z]', checkpoint[index:])\n if match:\n value = checkpoint[index : index + match.start() - 1] # -1 due to separator hypen\n self.best_k_models[checkpoint] = float(value)\n if len(self.best_k_models) < 1:\n return # No saved checkpoints yet\n\n _reverse = False if self.mode == \"min\" else True\n\n best_k_models = sorted(self.best_k_models, key=self.best_k_models.get, reverse=_reverse)\n\n ### This section should be ok as rank zero will delete all excess checkpoints, since all other ranks are\n ### instantiated after rank zero. models_to_delete should be 0 for all other ranks.\n if self.model_parallel_size is not None:\n models_to_delete = len(best_k_models) - self.model_parallel_size * self.save_top_k\n else:\n models_to_delete = len(best_k_models) - self.save_top_k\n logging.debug(f'Number of models to delete: {models_to_delete}')\n for _ in range(models_to_delete):\n model = best_k_models.pop(-1)\n self.best_k_models.pop(model)\n self._del_model_without_trainer(model)\n logging.debug(f\"Removed checkpoint: {model}\")\n\n self.kth_best_model_path = best_k_models[-1]\n self.best_model_path = best_k_models[0]\n self.best_model_score = self.best_k_models[self.best_model_path]\n\n @staticmethod\n def _uninject_mp_rank(filepath):\n dirname = os.path.dirname(os.path.dirname(filepath))\n basename = os.path.basename(filepath)\n filepath = os.path.join(dirname, basename)\n return filepath\n\n def on_save_checkpoint(self, trainer, pl_module, checkpoint):\n output = super().on_save_checkpoint(trainer, pl_module, checkpoint)\n if not self.always_save_nemo:\n return output\n\n else:\n # Load the best model and then re-save it\n app_state = AppState()\n if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:\n raise ValueError(f'always_save_nemo is not implemented for model parallel models.')\n # since we are creating tarfile artifacts we need to update .nemo path\n app_state.model_restore_path = os.path.abspath(\n os.path.expanduser(os.path.join(self.dirpath, self.prefix + self.postfix))\n )\n if self.save_best_model:\n if not os.path.exists(self.best_model_path):\n return output\n\n if self.best_model_path == self.previous_best_path:\n return output\n\n self.previous_model_path = self.best_model_path\n old_state_dict = deepcopy(pl_module.state_dict())\n checkpoint = torch.load(self.best_model_path, map_location='cpu')\n if 'state_dict' in checkpoint:\n checkpoint = checkpoint['state_dict']\n # get a new instanace of the model\n pl_module.load_state_dict(checkpoint, strict=True)\n pl_module.save_to(save_path=app_state.model_restore_path)\n pl_module.load_state_dict(old_state_dict, strict=True)\n else:\n pl_module.save_to(save_path=app_state.model_restore_path)\n return output\n\n def on_train_end(self, trainer, pl_module):\n if trainer.fast_dev_run:\n return None\n\n # Call parent on_train_end() to save the -last checkpoint\n super().on_train_end(trainer, pl_module)\n\n # Load the best model and then re-save it\n if self.save_best_model:\n if self.best_model_path == \"\":\n logging.warning(\n f\"{self} was told to save the best checkpoint at the end of training, but no saved checkpoints \"\n \"were found. Saving latest model instead.\"\n )\n else:\n trainer.checkpoint_connector.restore(self.best_model_path)\n\n pl_module.save_to(save_path=os.path.join(self.dirpath, self.prefix + self.postfix))\n\n def _del_model_without_trainer(self, filepath: str) -> None:\n app_state = AppState()\n if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:\n # filepath needs to be updated to include mp_rank\n dirname = os.path.dirname(filepath)\n basename = os.path.basename(filepath)\n filepath = f'{dirname}/mp_rank_{app_state.model_parallel_rank:02d}/{basename}'\n\n # each model parallel rank needs to remove its model\n if is_global_rank_zero() or (app_state.model_parallel_size is not None and app_state.data_parallel_rank == 0):\n try:\n self._fs.rm(filepath)\n logging.info(f\"Removed checkpoint: {filepath}\")\n except:\n logging.info(f\"Tried to remove checkpoint: {filepath} but failed.\")\n\n\ndef configure_checkpointing(\n trainer: 'pytorch_lightning.Trainer', log_dir: Path, name: str, resume: bool, params: 'DictConfig'\n):\n \"\"\" Adds ModelCheckpoint to trainer. Raises CheckpointMisconfigurationError if trainer already has a ModelCheckpoint\n callback or if trainer.weights_save_path was passed to Trainer.\n \"\"\"\n for callback in trainer.callbacks:\n if isinstance(callback, ModelCheckpoint):\n raise CheckpointMisconfigurationError(\n \"The pytorch lightning trainer that was passed to exp_manager contained a ModelCheckpoint \"\n \"and create_checkpoint_callback was set to True. Please either set create_checkpoint_callback \"\n \"to False, or remove ModelCheckpoint from the lightning trainer\"\n )\n if Path(trainer.weights_save_path) != Path.cwd():\n raise CheckpointMisconfigurationError(\n \"The pytorch lightning was passed weights_save_path. This variable is ignored by exp_manager\"\n )\n\n # Create the callback and attach it to trainer\n if \"filepath\" in params:\n if params.filepath is not None:\n logging.warning(\"filepath is deprecated. Please switch to dirpath and filename instead\")\n if params.dirpath is None:\n params.dirpath = Path(params.filepath).parent\n if params.filename is None:\n params.filename = Path(params.filepath).name\n with open_dict(params):\n del params[\"filepath\"]\n if params.dirpath is None:\n params.dirpath = Path(log_dir / 'checkpoints')\n if params.filename is None:\n params.filename = f'{name}--{{{params.monitor}:.4f}}-{{epoch}}'\n if params.prefix is None:\n params.prefix = name\n NeMoModelCheckpoint.CHECKPOINT_NAME_LAST = params.filename + '-last'\n\n logging.debug(params.dirpath)\n logging.debug(params.filename)\n logging.debug(params.prefix)\n\n if \"val\" in params.monitor:\n if (\n trainer.max_epochs is not None\n and trainer.max_epochs != -1\n and trainer.max_epochs < trainer.check_val_every_n_epoch\n ):\n logging.error(\n \"The checkpoint callback was told to monitor a validation value but trainer.max_epochs(\"\n f\"{trainer.max_epochs}) was less than trainer.check_val_every_n_epoch({trainer.check_val_every_n_epoch}\"\n f\"). It is very likely this run will fail with ModelCheckpoint(monitor='{params.monitor}') not found \"\n \"in the returned metrics. Please ensure that validation is run within trainer.max_epochs.\"\n )\n elif trainer.max_steps is not None:\n logging.warning(\n \"The checkpoint callback was told to monitor a validation value and trainer's max_steps was set to \"\n f\"{trainer.max_steps}. Please ensure that max_steps will run for at least \"\n f\"{trainer.check_val_every_n_epoch} epochs to ensure that checkpointing will not error out.\"\n )\n\n checkpoint_callback = NeMoModelCheckpoint(n_resume=resume, **params)\n checkpoint_callback.last_model_path = trainer.checkpoint_connector.resume_checkpoint_path or \"\"\n if params.model_parallel_size is not None and params.model_parallel_size > 1:\n checkpoint_callback.last_model_path = NeMoModelCheckpoint._uninject_mp_rank(\n checkpoint_callback.last_model_path\n )\n trainer.callbacks.append(checkpoint_callback)\n\n\ndef check_slurm(trainer):\n try:\n return trainer.accelerator_connector.is_slurm_managing_tasks\n except AttributeError:\n return False\n\n\nclass StatelessTimer(Timer):\n \"\"\"Extension of PTL timers to be per run.\"\"\"\n\n def __init__(self, duration: timedelta = None, interval: str = Interval.step, verbose: bool = True,) -> None:\n super().__init__(duration, interval, verbose)\n\n def on_save_checkpoint(self, trainer, pl_module, checkpoint) -> Dict[str, Any]:\n return\n\n def on_load_checkpoint(self, trainer, pl_module, callback_state) -> None:\n return\n\n def _check_time_remaining(self, trainer) -> None:\n # Default timer only checks for train time exceeding max_time, this includes time for all stages.\n train_duration = self.time_elapsed(RunningStage.TRAINING)\n validation_duration = self.time_elapsed(RunningStage.VALIDATING)\n test_duration = self.time_elapsed(RunningStage.TESTING)\n total_duration = train_duration + validation_duration + test_duration\n should_stop = total_duration >= self._duration\n # should_stop = trainer.training_type_plugin.broadcast(should_stop)\n should_stop = trainer.training_type_plugin.reduce_boolean_decision(should_stop)\n trainer.should_stop = trainer.should_stop or should_stop\n if should_stop and self._verbose:\n rank_zero_info(f\"Time limit reached. Signaling Trainer to stop.\")\n rank_zero_info(\n f\"Spent {timedelta(seconds=train_duration)} seconds on training, {timedelta(seconds=validation_duration)} seconds on validation and {timedelta(seconds=test_duration)} seconds on testing\"\n )\n" ]
[ [ "torch.load" ] ]
ebranlard/wtDigiTwin
[ "2c1e965ab5fdca10e67b0db9ef87837f5abebc02" ]
[ "wtDigiTwin/fast/fastlib.py" ]
[ "\n# --- For cmd.py\nfrom __future__ import division, print_function\nimport os\nimport subprocess\nimport multiprocessing\n\nimport collections\nimport glob\nimport pandas as pd\nimport numpy as np\nimport distutils.dir_util\nimport shutil \nimport stat\nimport re\n\n# --- External library for io\ntry:\n import weio\nexcept:\n try:\n import welib.weio as weio\n print('Using `weio` from `welib`')\n except:\n print('[WARN] Fastlib relies on the package `weio` to be installed from https://github.com/ebranlard/weio/`')\n# --- Allowing FASTInputFile to be shipped separately..\ntry:\n from weio.fast_input_file import *\n from weio.fast_output_file import *\nexcept:\n try:\n from weio.weio.fast_input_file import *\n from weio.weio.fast_output_file import *\n except:\n try:\n from fast_input_file import * \n from fast_output_file import * \n except:\n pass\n\nFAST_EXE='openfast'\n\n# --------------------------------------------------------------------------------}\n# --- \n# --------------------------------------------------------------------------------{\ndef createStepWind(filename,WSstep=1,WSmin=3,WSmax=25,tstep=100,dt=0.5,tmin=0,tmax=999):\n f = weio.FASTWndFile()\n Steps= np.arange(WSmin,WSmax+WSstep,WSstep)\n print(Steps)\n nCol = len(f.colNames)\n nRow = len(Steps)*2\n M = np.zeros((nRow,nCol));\n M[0,0] = tmin\n M[0,1] = WSmin\n for i,s in enumerate(Steps[:-1]):\n M[2*i+1,0] = tmin + (i+1)*tstep-dt \n M[2*i+2,0] = tmin + (i+1)*tstep\n M[2*i+1,1] = Steps[i]\n if i<len(Steps)-1:\n M[2*i+2,1] = Steps[i+1]\n else:\n M[2*i+2,1] = Steps[-1]\n M[-1,0]= max(tmax, (len(Steps)+1)*tstep)\n M[-1,1]= WSmax\n f.data=pd.DataFrame(data=M,columns=f.colNames)\n #\n print(f.data)\n f.write(filename)\n #plt.plot(M[:,0],M[:,1])\n #plt.show()\n\n #print(f.toDataFrame())\n #pass\n#createStepWind('test.wnd',tstep=200,WSmax=28)\n# createStepWind('test.wnd',tstep=200,WSmin=5,WSmax=7,WSstep=2)\n\n\n# --------------------------------------------------------------------------------}\n# --- Tools for executing FAST\n# --------------------------------------------------------------------------------{\n# --- START cmd.py\ndef run_cmds(inputfiles, exe, parallel=True, showOutputs=True, nCores=None, showCommand=True): \n \"\"\" Run a set of simple commands of the form `exe input_file`\n By default, the commands are run in \"parallel\" (though the method needs to be improved)\n The stdout and stderr may be displayed on screen (`showOutputs`) or hidden. \n A better handling is yet required.\n \"\"\"\n Failed=[]\n def _report(p):\n if p.returncode==0:\n print('[ OK ] Input : ',p.input_file)\n else:\n Failed.append(p)\n print('[FAIL] Input : ',p.input_file)\n print(' Directory: '+os.getcwd())\n print(' Command : '+p.cmd)\n print(' Use `showOutputs=True` to debug, or run the command above.')\n #out, err = p.communicate()\n #print('StdOut:\\n'+out)\n #print('StdErr:\\n'+err)\n ps=[]\n iProcess=0\n if nCores is None:\n nCores=multiprocessing.cpu_count()\n if nCores<0:\n nCores=len(inputfiles)+1\n for i,f in enumerate(inputfiles):\n #print('Process {}/{}: {}'.format(i+1,len(inputfiles),f))\n ps.append(run_cmd(f, exe, wait=(not parallel), showOutputs=showOutputs, showCommand=showCommand))\n iProcess += 1\n # waiting once we've filled the number of cores\n # TODO: smarter method with proper queue, here processes are run by chunks\n if parallel:\n if iProcess==nCores:\n for p in ps:\n p.wait()\n for p in ps:\n _report(p)\n ps=[]\n iProcess=0\n # Extra process if not multiptle of nCores (TODO, smarter method)\n for p in ps:\n p.wait()\n for p in ps:\n _report(p)\n # --- Giving a summary\n if len(Failed)==0:\n print('[ OK ] All simulations run successfully.')\n return True\n else:\n print('[FAIL] {}/{} simulations failed:'.format(len(Failed),len(inputfiles)))\n for p in Failed:\n print(' ',p.input_file)\n return False\n\ndef run_cmd(input_file_or_arglist, exe, wait=True, showOutputs=False, showCommand=True):\n \"\"\" Run a simple command of the form `exe input_file` or `exe arg1 arg2` \"\"\"\n # TODO Better capture STDOUT\n if isinstance(input_file_or_arglist, list):\n args= [exe] + input_file_or_arglist\n input_file = ' '.join(input_file_or_arglist)\n input_file_abs = input_file\n else:\n input_file=input_file_or_arglist\n if not os.path.isabs(input_file):\n input_file_abs=os.path.abspath(input_file)\n else:\n input_file_abs=input_file\n if not os.path.exists(exe):\n raise Exception('Executable not found: {}'.format(exe))\n args= [exe,input_file]\n #args = 'cd '+workDir+' && '+ exe +' '+basename\n shell=False\n if showOutputs:\n STDOut= None\n else:\n STDOut= open(os.devnull, 'w') \n if showCommand:\n print('Running: '+' '.join(args))\n if wait:\n class Dummy():\n pass\n p=Dummy()\n p.returncode=subprocess.call(args , stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)\n else:\n p=subprocess.Popen(args, stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)\n # Storing some info into the process\n p.cmd = ' '.join(args)\n p.args = args\n p.input_file = input_file\n p.input_file_abs = input_file_abs\n p.exe = exe\n return p\n# --- END cmd.py\n\ndef run_fastfiles(fastfiles, fastExe=None, parallel=True, showOutputs=True, nCores=None, showCommand=True, reRun=True):\n if fastExe is None:\n fastExe=FAST_EXE\n if not reRun:\n # Figure out which files exist\n newfiles=[]\n for f in fastfiles:\n base=os.path.splitext(f)[0]\n if os.path.exists(base+'.outb') or os.path.exists(base+'.out'):\n print('>>> Skipping existing simulation for: ',f)\n pass\n else:\n newfiles.append(f)\n fastfiles=newfiles\n\n return run_cmds(fastfiles, fastExe, parallel=parallel, showOutputs=showOutputs, nCores=nCores, showCommand=showCommand)\n\ndef run_fast(input_file, fastExe=None, wait=True, showOutputs=False, showCommand=True):\n if fastExe is None:\n fastExe=FAST_EXE\n return run_cmd(input_file, fastExe, wait=wait, showOutputs=showOutputs, showCommand=showCommand)\n\n\ndef writeBatch(batchfile, fastfiles, fastExe=None):\n \"\"\" Write batch file, everything is written relative to the batch file\"\"\"\n if fastExe is None:\n fastExe=FAST_EXE\n fastExe_abs = os.path.abspath(fastExe)\n batchfile_abs = os.path.abspath(batchfile)\n batchdir = os.path.dirname(batchfile_abs)\n fastExe_rel = os.path.relpath(fastExe_abs, batchdir)\n with open(batchfile,'w') as f:\n for ff in fastfiles:\n ff_abs = os.path.abspath(ff)\n ff_rel = os.path.relpath(ff_abs, batchdir)\n l = fastExe_rel + ' '+ ff_rel\n f.write(\"%s\\n\" % l)\n\n\ndef removeFASTOuputs(workDir):\n # Cleaning folder\n for f in glob.glob(os.path.join(workDir,'*.out')):\n os.remove(f)\n for f in glob.glob(os.path.join(workDir,'*.outb')):\n os.remove(f)\n for f in glob.glob(os.path.join(workDir,'*.ech')):\n os.remove(f)\n for f in glob.glob(os.path.join(workDir,'*.sum')):\n os.remove(f)\n\n# --------------------------------------------------------------------------------}\n# --- Tools for IO \n# --------------------------------------------------------------------------------{\ndef ED_BldStations(ED):\n \"\"\" Returns ElastoDyn Blade Station positions, useful to know where the outputs are.\n INPUTS:\n - ED: either:\n - a filename of a ElastoDyn input file\n - an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)\n\n OUTUPTS:\n - bld_fract: fraction of the blade length were stations are defined\n - r_nodes: spanwise position from the rotor apex of the Blade stations\n \"\"\"\n if not isinstance(ED,FASTInputFile):\n ED = FASTInputFile(ED)\n\n nBldNodes = ED['BldNodes']\n bld_fract = np.arange(1./nBldNodes/2., 1, 1./nBldNodes)\n r_nodes = bld_fract*(ED['TipRad']-ED['HubRad']) + ED['HubRad']\n return bld_fract, r_nodes\n\ndef ED_TwrStations(ED):\n \"\"\" Returns ElastoDyn Tower Station positions, useful to know where the outputs are.\n INPUTS:\n - ED: either:\n - a filename of a ElastoDyn input file\n - an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)\n\n OUTPUTS:\n - r_fract: fraction of the towet length were stations are defined\n - h_nodes: height from the *ground* of the stations (not from the Tower base)\n \"\"\"\n if not isinstance(ED,FASTInputFile):\n ED = FASTInputFile(ED)\n\n nTwrNodes = ED['TwrNodes']\n twr_fract = np.arange(1./nTwrNodes/2., 1, 1./nTwrNodes)\n h_nodes = twr_fract*(ED['TowerHt']-ED['TowerBsHt']) + ED['TowerBsHt']\n return twr_fract, h_nodes\n\n\n\ndef ED_BldGag(ED):\n \"\"\" Returns the radial position of ElastoDyn blade gages \n INPUTS:\n - ED: either:\n - a filename of a ElastoDyn input file\n - an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)\n OUTPUTS:\n - r_gag: The radial positions of the gages, given from the rotor apex\n \"\"\"\n if not isinstance(ED,FASTInputFile):\n ED = FASTInputFile(ED)\n _,r_nodes= ED_BldStations(ED)\n nOuts = ED['NBlGages']\n if nOuts<=0:\n return np.array([]), np.array([])\n if type(ED['BldGagNd']) is list:\n Inodes = np.asarray(ED['BldGagNd'])\n else:\n Inodes = np.array([ED['BldGagNd']])\n r_gag = r_nodes[ Inodes[:nOuts] -1]\n return r_gag, Inodes\n\ndef ED_TwrGag(ED):\n \"\"\" Returns the heights of ElastoDyn blade gages \n INPUTS:\n - ED: either:\n - a filename of a ElastoDyn input file\n - an instance of FileCl, as returned by reading the file, ED = weio.read(ED_filename)\n OUTPUTS:\n - h_gag: The heights of the gages, given from the ground height (tower base + TowerBsHt)\n \"\"\"\n if not isinstance(ED,FASTInputFile):\n ED = FASTInputFile(ED)\n _,h_nodes= ED_TwrStations(ED)\n nOuts = ED['NTwGages']\n if nOuts<=0:\n return np.array([])\n if type(ED['TwrGagNd']) is list:\n Inodes = np.asarray(ED['TwrGagNd'])\n else:\n Inodes = np.array([ED['TwrGagNd']])\n h_gag = h_nodes[ Inodes[:nOuts] -1]\n return h_gag\n\n\ndef AD14_BldGag(AD):\n \"\"\" Returns the radial position of AeroDyn 14 blade gages (based on \"print\" in column 6)\n INPUTS:\n - AD: either:\n - a filename of a AeroDyn input file\n - an instance of FileCl, as returned by reading the file, AD = weio.read(AD_filename)\n OUTPUTS:\n - r_gag: The radial positions of the gages, given from the blade root\n \"\"\"\n if not isinstance(AD,FASTInputFile):\n AD = FASTInputFile(AD)\n\n Nodes=AD['BldAeroNodes'] \n if Nodes.shape[1]==6:\n doPrint= np.array([ n.lower().find('p')==0 for n in Nodes[:,5]])\n else:\n doPrint=np.array([ True for n in Nodes[:,0]])\n\n r_gag = Nodes[doPrint,0].astype(float)\n IR = np.arange(1,len(Nodes)+1)[doPrint]\n return r_gag, IR\n\ndef AD_BldGag(AD,AD_bld,chordOut=False):\n \"\"\" Returns the radial position of AeroDyn blade gages \n INPUTS:\n - AD: either:\n - a filename of a AeroDyn input file\n - an instance of FileCl, as returned by reading the file, AD = weio.read(AD_filename)\n - AD_bld: either:\n - a filename of a AeroDyn Blade input file\n - an instance of FileCl, as returned by reading the file, AD_bld = weio.read(AD_bld_filename)\n OUTPUTS:\n - r_gag: The radial positions of the gages, given from the blade root\n \"\"\"\n if not isinstance(AD,FASTInputFile):\n AD = FASTInputFile(AD)\n if not isinstance(AD_bld,FASTInputFile):\n AD_bld = FASTInputFile(AD_bld)\n #print(AD_bld.keys())\n\n nOuts=AD['NBlOuts']\n if nOuts<=0:\n if chordOut:\n return np.array([]), np.array([])\n else:\n return np.array([])\n INodes = np.array(AD['BlOutNd'][:nOuts])\n r_gag = AD_bld['BldAeroNodes'][INodes-1,0]\n if chordOut:\n chord_gag = AD_bld['BldAeroNodes'][INodes-1,5]\n return r_gag,chord_gag\n else:\n return r_gag\n\ndef BD_BldGag(BD):\n \"\"\" Returns the radial position of BeamDyn blade gages \n INPUTS:\n - BD: either:\n - a filename of a BeamDyn input file\n - an instance of FileCl, as returned by reading the file, BD = weio.read(BD_filename)\n OUTPUTS:\n - r_gag: The radial positions of the gages, given from the rotor apex\n \"\"\"\n if not isinstance(BD,FASTInputFile):\n BD = FASTInputFile(BD)\n\n M = BD['MemberGeom']\n r_nodes = M[:,2] # NOTE: we select the z axis here, and we don't take curvilenear coord\n nOuts = BD['NNodeOuts']\n if nOuts<=0:\n nOuts=0\n if type(BD['OutNd']) is list:\n Inodes = np.asarray(BD['OutNd'])\n else:\n Inodes = np.array([BD['OutNd']])\n r_gag = r_nodes[ Inodes[:nOuts] -1]\n return r_gag, Inodes, r_nodes\n\n# \n# \n# 1, 7, 14, 21, 30, 36, 43, 52, 58 BldGagNd List of blade nodes that have strain gages [1 to BldNodes] (-) [unused if NBlGages=0]\n\n# --------------------------------------------------------------------------------}\n# --- Helper functions for radial data \n# --------------------------------------------------------------------------------{\ndef _HarmonizeSpanwiseData(Name, Columns, vr, R, IR=None) :\n \"\"\" helper function to use with spanwiseAD and spanwiseED \"\"\"\n # --- Data present\n data = [c for _,c in Columns if c is not None]\n ColNames = [n for n,_ in Columns if n is not None]\n Lengths = [len(d) for d in data]\n if len(data)<=0:\n print('[WARN] No spanwise data for '+Name)\n return None, None, None\n\n # --- Harmonize data so that they all have the same length\n nrMax = np.max(Lengths)\n ids=np.arange(nrMax)\n if vr is None:\n bFakeVr=True\n vr_bar = ids/(nrMax-1)\n else:\n vr_bar=vr/R\n bFakeVr=False\n if (nrMax)<len(vr_bar):\n vr_bar=vr_bar[1:nrMax]\n elif (nrMax)>len(vr_bar):\n raise Exception('Inconsitent length between radial stations and max index present in output chanels')\n\n for i in np.arange(len(data)):\n d=data[i]\n if len(d)<nrMax:\n Values = np.zeros((nrMax,1))\n Values[:] = np.nan\n Values[1:len(d)] = d\n data[i] = Values\n\n # --- Combine data and remove \n dataStack = np.column_stack([d for d in data])\n ValidRow = np.logical_not([np.isnan(dataStack).all(axis=1)])\n dataStack = dataStack[ValidRow[0],:]\n ids = ids [ValidRow[0]]\n vr_bar = vr_bar [ValidRow[0]]\n\n # --- Create a dataframe\n dfRad = pd.DataFrame(data= dataStack, columns = ColNames)\n\n if bFakeVr:\n dfRad.insert(0, 'i/n_[-]', vr_bar)\n else:\n dfRad.insert(0, 'r/R_[-]', vr_bar)\n if R is not None:\n r = vr_bar*R\n if IR is not None:\n dfRad['Node_[#]']=IR[:nrMax]\n dfRad['i_[#]']=ids+1\n if not bFakeVr:\n dfRad['r_[m]'] = r\n\n return dfRad, nrMax, ValidRow\n\ndef insert_radial_columns(df, vr=None, R=None, IR=None):\n \"\"\"\n Add some columns to the radial data\n \"\"\"\n if df is None:\n return df\n if df.shape[1]==0:\n return None\n nrMax=len(df)\n ids=np.arange(nrMax)\n if vr is None or R is None:\n # Radial position unknown\n vr_bar = ids/(nrMax-1)\n df.insert(0, 'i/n_[-]', vr_bar)\n else:\n vr_bar=vr/R\n if (nrMax)<=len(vr_bar):\n vr_bar=vr_bar[:nrMax]\n elif (nrMax)>len(vr_bar):\n print(vr_bar)\n raise Exception('Inconsitent length between radial stations ({:d}) and max index present in output chanels ({:d})'.format(len(vr_bar),nrMax))\n df.insert(0, 'r/R_[-]', vr_bar)\n\n if IR is not None:\n df['Node_[#]']=IR[:nrMax]\n df['i_[#]']=ids+1\n if vr is not None:\n df['r_[m]'] = vr[:nrMax]\n return df\n\ndef find_matching_columns(Cols, PatternMap):\n ColsInfo=[]\n nrMax=0\n for colpattern,colmap in PatternMap.items():\n # Extracting columns matching pattern\n cols, sIdx = find_matching_pattern(Cols, colpattern)\n if len(cols)>0:\n # Sorting by ID\n cols = np.asarray(cols)\n Idx = np.array([int(s) for s in sIdx])\n Isort = np.argsort(Idx)\n Idx = Idx[Isort]\n cols = cols[Isort]\n col={'name':colmap,'Idx':Idx,'cols':cols}\n nrMax=max(nrMax,np.max(Idx))\n ColsInfo.append(col)\n return ColsInfo,nrMax\n\ndef extract_spanwise_data(ColsInfo, nrMax, df=None,ts=None):\n \"\"\" \n Extract spanwise data based on some column info\n ColsInfo: see find_matching_columns\n \"\"\"\n nCols = len(ColsInfo)\n if nCols==0:\n return None\n if ts is not None:\n Values = np.zeros((nrMax,nCols))\n Values[:] = np.nan\n elif df is not None:\n raise NotImplementedError()\n\n ColNames =[c['name'] for c in ColsInfo]\n\n for ic,c in enumerate(ColsInfo):\n Idx, cols, colname = c['Idx'], c['cols'], c['name']\n for idx,col in zip(Idx,cols):\n Values[idx-1,ic]=ts[col]\n nMissing = np.sum(np.isnan(Values[:,ic]))\n if len(cols)<nrMax:\n #print(Values)\n print('[WARN] Not all values found for {}, missing {}/{}'.format(colname,nMissing,nrMax))\n if len(cols)>nrMax:\n print('[WARN] More values found for {}, found {}/{}'.format(colname,len(cols),nrMax))\n\n df = pd.DataFrame(data=Values, columns=ColNames)\n df = df.reindex(sorted(df.columns), axis=1)\n return df\n\ndef spanwiseColBD(Cols):\n \"\"\" Return column info, available columns and indices that contain BD spanwise data\"\"\"\n BDSpanMap=dict()\n for sB in ['B1','B2','B3']:\n BDSpanMap['^'+sB+'N(\\d)TDxr_\\[m\\]']=sB+'TDxr_[m]'\n BDSpanMap['^'+sB+'N(\\d)TDyr_\\[m\\]']=sB+'TDyr_[m]'\n BDSpanMap['^'+sB+'N(\\d)TDzr_\\[m\\]']=sB+'TDzr_[m]'\n return find_matching_columns(Cols, BDSpanMap)\n\ndef spanwiseColED(Cols):\n \"\"\" Return column info, available columns and indices that contain ED spanwise data\"\"\"\n EDSpanMap=dict()\n for sB in ['b1','b2','b3']:\n SB=sB.upper()\n EDSpanMap['^Spn(\\d)ALx'+sB+'_\\[m/s^2\\]']=SB+'ALx_[m/s^2]'\n EDSpanMap['^Spn(\\d)ALy'+sB+'_\\[m/s^2\\]']=SB+'ALy_[m/s^2]'\n EDSpanMap['^Spn(\\d)ALz'+sB+'_\\[m/s^2\\]']=SB+'ALz_[m/s^2]'\n EDSpanMap['^Spn(\\d)TDx'+sB+'_\\[m\\]' ]=SB+'TDx_[m]'\n EDSpanMap['^Spn(\\d)TDy'+sB+'_\\[m\\]' ]=SB+'TDy_[m]'\n EDSpanMap['^Spn(\\d)TDz'+sB+'_\\[m\\]' ]=SB+'TDz_[m]'\n EDSpanMap['^Spn(\\d)RDx'+sB+'_\\[deg\\]' ]=SB+'RDx_[deg]'\n EDSpanMap['^Spn(\\d)RDy'+sB+'_\\[deg\\]' ]=SB+'RDy_[deg]'\n EDSpanMap['^Spn(\\d)RDz'+sB+'_\\[deg\\]' ]=SB+'RDz_[deg]'\n EDSpanMap['^Spn(\\d)FLx'+sB+'_\\[kN\\]' ]=SB+'FLx_[kN]'\n EDSpanMap['^Spn(\\d)FLy'+sB+'_\\[kN\\]' ]=SB+'FLy_[kN]'\n EDSpanMap['^Spn(\\d)FLz'+sB+'_\\[kN\\]' ]=SB+'FLz_[kN]'\n EDSpanMap['^Spn(\\d)MLy'+sB+'_\\[kN-m\\]' ]=SB+'MLx_[kN-m]'\n EDSpanMap['^Spn(\\d)MLx'+sB+'_\\[kN-m\\]' ]=SB+'MLy_[kN-m]' \n EDSpanMap['^Spn(\\d)MLz'+sB+'_\\[kN-m\\]' ]=SB+'MLz_[kN-m]'\n return find_matching_columns(Cols, EDSpanMap)\n\ndef spanwiseColAD(Cols):\n \"\"\" Return column info, available columns and indices that contain AD spanwise data\"\"\"\n ADSpanMap=dict()\n for sB in ['B1','B2','B3']:\n ADSpanMap['^[A]*'+sB+'N(\\d*)Alpha_\\[deg\\]']=sB+'Alpha_[deg]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)AOA_\\[deg\\]' ]=sB+'Alpha_[deg]' # DBGOuts\n ADSpanMap['^[A]*'+sB+'N(\\d*)AxInd_\\[-\\]' ]=sB+'AxInd_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)TnInd_\\[-\\]' ]=sB+'TnInd_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)AIn_\\[deg\\]' ]=sB+'AxInd_[-]' # DBGOuts NOTE BUG Unit\n ADSpanMap['^[A]*'+sB+'N(\\d*)ApI_\\[deg\\]' ]=sB+'TnInd_[-]' # DBGOuts NOTE BUG Unit\n ADSpanMap['^[A]*'+sB+'N(\\d*)AIn_\\[-\\]' ]=sB+'AxInd_[-]' # DBGOuts\n ADSpanMap['^[A]*'+sB+'N(\\d*)ApI_\\[-\\]' ]=sB+'TnInd_[-]' # DBGOuts\n ADSpanMap['^[A]*'+sB+'N(\\d*)Uin_\\[m/s\\]' ]=sB+'Uin_[m/s]' # DBGOuts\n ADSpanMap['^[A]*'+sB+'N(\\d*)Uit_\\[m/s\\]' ]=sB+'Uit_[m/s]' # DBGOuts\n ADSpanMap['^[A]*'+sB+'N(\\d*)Uir_\\[m/s\\]' ]=sB+'Uir_[m/s]' # DBGOuts\n ADSpanMap['^[A]*'+sB+'N(\\d*)Cl_\\[-\\]' ]=sB+'Cl_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Cd_\\[-\\]' ]=sB+'Cd_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Cm_\\[-\\]' ]=sB+'Cm_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Cx_\\[-\\]' ]=sB+'Cx_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Cy_\\[-\\]' ]=sB+'Cy_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Cn_\\[-\\]' ]=sB+'Cn_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Ct_\\[-\\]' ]=sB+'Ct_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Re_\\[-\\]' ]=sB+'Re_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Vrel_\\[m/s\\]' ]=sB+'Vrel_[m/s]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Theta_\\[deg\\]']=sB+'Theta_[deg]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Phi_\\[deg\\]' ]=sB+'Phi_[deg]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Twst_\\[deg\\]' ]=sB+'Twst_[deg]' #DBGOuts\n ADSpanMap['^[A]*'+sB+'N(\\d*)Curve_\\[deg\\]']=sB+'Curve_[deg]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Vindx_\\[m/s\\]']=sB+'Vindx_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Vindy_\\[m/s\\]']=sB+'Vindy_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Fx_\\[N/m\\]' ]=sB+'Fx_[N/m]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Fy_\\[N/m\\]' ]=sB+'Fy_[N/m]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Fl_\\[N/m\\]' ]=sB+'Fl_[N/m]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Fd_\\[N/m\\]' ]=sB+'Fd_[N/m]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Fn_\\[N/m\\]' ]=sB+'Fn_[N/m]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Ft_\\[N/m\\]' ]=sB+'Ft_[N/m]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)VUndx_\\[m/s\\]']=sB+'VUndx_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)VUndy_\\[m/s\\]']=sB+'VUndy_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)VUndz_\\[m/s\\]']=sB+'VUndz_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)VDisx_\\[m/s\\]']=sB+'VDisx_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)VDisy_\\[m/s\\]']=sB+'VDisy_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)VDisz_\\[m/s\\]']=sB+'VDisz_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Vx_\\[m/s\\]' ]=sB+'Vx_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Vy_\\[m/s\\]' ]=sB+'Vy_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)Vz_\\[m/s\\]' ]=sB+'Vz_[m/s]'\n ADSpanMap['^[A]*'+sB+'N(\\d*)DynP_\\[Pa\\]' ]=sB+'DynP_[Pa]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)M_\\[-\\]' ]=sB+'M_[-]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Mm_\\[N-m/m\\]' ]=sB+'Mm_[N-m/m]' \n ADSpanMap['^[A]*'+sB+'N(\\d*)Gam_\\[' ]=sB+'Gam_[m^2/s]' #DBGOuts\n # --- AD 14\n ADSpanMap['^Alpha(\\d*)_\\[deg\\]' ]='Alpha_[deg]' \n ADSpanMap['^DynPres(\\d*)_\\[Pa\\]' ]='DynPres_[Pa]' \n ADSpanMap['^CLift(\\d*)_\\[-\\]' ]='CLift_[-]' \n ADSpanMap['^CDrag(\\d*)_\\[-\\]' ]='CDrag_[-]' \n ADSpanMap['^CNorm(\\d*)_\\[-\\]' ]='CNorm_[-]' \n ADSpanMap['^CTang(\\d*)_\\[-\\]' ]='CTang_[-]' \n ADSpanMap['^CMomt(\\d*)_\\[-\\]' ]='CMomt_[-]' \n ADSpanMap['^Pitch(\\d*)_\\[deg\\]' ]='Pitch_[deg]' \n ADSpanMap['^AxInd(\\d*)_\\[-\\]' ]='AxInd_[-]' \n ADSpanMap['^TanInd(\\d*)_\\[-\\]' ]='TanInd_[-]' \n ADSpanMap['^ForcN(\\d*)_\\[N\\]' ]='ForcN_[N]' \n ADSpanMap['^ForcT(\\d*)_\\[N\\]' ]='ForcT_[N]' \n ADSpanMap['^Pmomt(\\d*)_\\[N-m\\]' ]='Pmomt_[N-N]' \n ADSpanMap['^ReNum(\\d*)_\\[x10^6\\]']='ReNum_[x10^6]'\n ADSpanMap['^Gamma(\\d*)_\\[m^2/s\\]']='Gamma_[m^2/s]'\n\n return find_matching_columns(Cols, ADSpanMap)\n\ndef insert_extra_columns_AD(dfRad, tsAvg, vr=None, rho=None, R=None, nB=None, chord=None):\n # --- Compute additional values (AD15 only)\n if dfRad is None:\n return None\n if dfRad.shape[1]==0:\n return dfRad\n if chord is not None:\n if vr is not None:\n chord =chord[0:len(dfRad)]\n for sB in ['B1','B2','B3']:\n try:\n vr_bar=vr/R\n Fx = dfRad[sB+'Fx_[N/m]']\n U0 = tsAvg['Wind1VelX_[m/s]']\n Ct=nB*Fx/(0.5 * rho * 2 * U0**2 * np.pi * vr)\n Ct[vr<0.01*R] = 0\n dfRad[sB+'Ctloc_[-]'] = Ct\n CT=2*np.trapz(vr_bar*Ct,vr_bar)\n dfRad[sB+'CtAvg_[-]']= CT*np.ones(vr.shape)\n except:\n pass\n try:\n dfRad[sB+'Gamma_[m^2/s]'] = 1/2 * chord* dfRad[sB+'Vrel_[m/s]'] * dfRad[sB+'Cl_[-]'] \n except:\n pass\n try: \n if not sB+'Vindx_[m/s]' in dfRad.columns:\n dfRad[sB+'Vindx_[m/s]']= -dfRad[sB+'AxInd_[-]'].values * dfRad[sB+'Vx_[m/s]'].values \n dfRad[sB+'Vindy_[m/s]']= dfRad[sB+'TnInd_[-]'].values * dfRad[sB+'Vy_[m/s]'].values \n except:\n pass\n return dfRad\n\n\n\ndef spanwisePostPro(FST_In=None,avgMethod='constantwindow',avgParam=5,out_ext='.outb',df=None):\n \"\"\"\n Postprocess FAST radial data\n\n INPUTS:\n - FST_IN: Fast .fst input file\n - avgMethod='periods', avgParam=2: average over 2 last periods, Needs Azimuth sensors!!!\n - avgMethod='constantwindow', avgParam=5: average over 5s of simulation\n - postprofile: outputfile to write radial data\n \"\"\"\n # --- Opens Fast output and performs averaging\n if df is None:\n df = FASTOutputFile(FST_In.replace('.fst',out_ext)).toDataFrame()\n returnDF=True\n else:\n returnDF=False\n # NOTE: spanwise script doest not support duplicate columns\n df = df.loc[:,~df.columns.duplicated()]\n dfAvg = averageDF(df,avgMethod=avgMethod ,avgParam=avgParam) # NOTE: average 5 last seconds\n\n # --- Extract info (e.g. radial positions) from Fast input file\n # We don't have a .fst input file, so we'll rely on some default values for \"r\"\n rho = 1.225\n chord = None\n # --- Extract radial positions of output channels\n r_AD, r_ED, r_BD, IR_AD, IR_ED, IR_BD, R, r_hub, fst = FASTRadialOutputs(FST_In, OutputCols=df.columns.values)\n if R is None: \n R=1\n try:\n chord = fst.AD.Bld1['BldAeroNodes'][:,5] # Full span\n except:\n pass\n try:\n rho = fst.AD['Rho']\n except:\n try:\n rho = fst.AD['AirDens']\n except:\n pass\n #print('r_AD:', r_AD)\n #print('r_ED:', r_ED)\n #print('r_BD:', r_BD)\n #print('I_AD:', IR_AD)\n #print('I_ED:', IR_ED)\n #print('I_BD:', IR_BD)\n # --- Extract radial data and export to csv if needed\n dfRad_AD = None\n dfRad_ED = None\n dfRad_BD = None\n Cols=dfAvg.columns.values\n # --- AD\n ColsInfoAD, nrMaxAD = spanwiseColAD(Cols)\n dfRad_AD = extract_spanwise_data(ColsInfoAD, nrMaxAD, df=None, ts=dfAvg.iloc[0])\n dfRad_AD = insert_extra_columns_AD(dfRad_AD, dfAvg.iloc[0], vr=r_AD, rho=rho, R=R, nB=3, chord=chord)\n dfRad_AD = insert_radial_columns(dfRad_AD, r_AD, R=R, IR=IR_AD)\n # --- ED\n ColsInfoED, nrMaxED = spanwiseColED(Cols)\n dfRad_ED = extract_spanwise_data(ColsInfoED, nrMaxED, df=None, ts=dfAvg.iloc[0])\n dfRad_ED = insert_radial_columns(dfRad_ED, r_ED, R=R, IR=IR_ED)\n # --- BD\n ColsInfoBD, nrMaxBD = spanwiseColBD(Cols)\n dfRad_BD = extract_spanwise_data(ColsInfoBD, nrMaxBD, df=None, ts=dfAvg.iloc[0])\n dfRad_BD = insert_radial_columns(dfRad_BD, r_BD, R=R, IR=IR_BD)\n if returnDF:\n return dfRad_ED , dfRad_AD, dfRad_BD, df\n else:\n return dfRad_ED , dfRad_AD, dfRad_BD\n\n\n\ndef spanwisePostProRows(df, FST_In=None):\n \"\"\" \n Returns a 3D matrix: n x nSpan x nColumn where df is of size n x nColumn\n\n NOTE: this is really not optimal. Spanwise columns should be extracted only once..\n \"\"\"\n # --- Extract info (e.g. radial positions) from Fast input file\n # We don't have a .fst input file, so we'll rely on some default values for \"r\"\n rho = 1.225\n chord = None\n # --- Extract radial positions of output channels\n r_AD, r_ED, r_BD, IR_AD, IR_ED, IR_BD, R, r_hub, fst = FASTRadialOutputs(FST_In, OutputCols=df.columns.values)\n #print('r_AD:', r_AD)\n #print('r_ED:', r_ED)\n #print('r_BD:', r_BD)\n if R is None: \n R=1\n try:\n chord = fst.AD.Bld1['BldAeroNodes'][:,5] # Full span\n except:\n pass\n try:\n rho = fst.AD['Rho']\n except:\n try:\n rho = fst.AD['AirDens']\n except:\n pass\n # --- Extract radial data for each azimuthal average\n M_AD=None\n M_ED=None\n M_BD=None\n Col_AD=None\n Col_ED=None\n Col_BD=None\n v = df.index.values\n\n # --- Getting Column info\n Cols=df.columns.values\n if r_AD is not None:\n ColsInfoAD, nrMaxAD = spanwiseColAD(Cols)\n if r_ED is not None:\n ColsInfoED, nrMaxED = spanwiseColED(Cols)\n if r_BD is not None:\n ColsInfoBD, nrMaxBD = spanwiseColBD(Cols)\n for i,val in enumerate(v):\n if r_AD is not None:\n dfRad_AD = extract_spanwise_data(ColsInfoAD, nrMaxAD, df=None, ts=df.iloc[i])\n dfRad_AD = insert_extra_columns_AD(dfRad_AD, df.iloc[i], vr=r_AD, rho=rho, R=R, nB=3, chord=chord)\n dfRad_AD = insert_radial_columns(dfRad_AD, r_AD, R=R, IR=IR_AD)\n if i==0:\n M_AD = np.zeros((len(v), len(dfRad_AD), len(dfRad_AD.columns)))\n Col_AD=dfRad_AD.columns.values\n M_AD[i, :, : ] = dfRad_AD.values\n if r_ED is not None and len(r_ED)>0:\n dfRad_ED = extract_spanwise_data(ColsInfoED, nrMaxED, df=None, ts=df.iloc[i])\n dfRad_ED = insert_radial_columns(dfRad_ED, r_ED, R=R, IR=IR_ED)\n if i==0:\n M_ED = np.zeros((len(v), len(dfRad_ED), len(dfRad_ED.columns)))\n Col_ED=dfRad_ED.columns.values\n M_ED[i, :, : ] = dfRad_ED.values\n if r_BD is not None and len(r_BD)>0:\n dfRad_BD = extract_spanwise_data(ColsInfoBD, nrMaxBD, df=None, ts=df.iloc[i])\n dfRad_BD = insert_radial_columns(dfRad_BD, r_BD, R=R, IR=IR_BD)\n if i==0:\n M_BD = np.zeros((len(v), len(dfRad_BD), len(dfRad_BD.columns)))\n Col_BD=dfRad_BD.columns.values\n M_BD[i, :, : ] = dfRad_BD.values\n return M_AD, Col_AD, M_ED, Col_ED, M_BD, Col_BD\n\n\ndef FASTRadialOutputs(FST_In, OutputCols=None):\n \"\"\" Returns radial positions where FAST has outputs\n INPUTS:\n FST_In: fast input file (.fst)\n \"\"\"\n R = None\n r_hub =0\n r_AD = None \n r_ED = None\n r_BD = None\n IR_ED = None\n IR_AD = None\n IR_BD = None\n fst=None\n if FST_In is not None:\n fst = weio.FASTInputDeck(FST_In, readlist=['AD','ED','BD'])\n # NOTE: all this below should be in FASTInputDeck\n if fst.version == 'F7':\n # --- FAST7\n if not hasattr(fst,'AD'):\n raise Exception('The AeroDyn file couldn''t be found or read, from main file: '+FST_In)\n r_AD,IR_AD = AD14_BldGag(fst.AD)\n R = fst.fst['TipRad']\n try:\n rho = fst.AD['Rho']\n except:\n rho = fst.AD['AirDens']\n else:\n # --- OpenFAST 2\n R = None\n\n # --- ElastoDyn\n if not hasattr(fst,'ED'):\n print('[WARN] The Elastodyn file couldn''t be found or read, from main file: '+FST_In)\n #raise Exception('The Elastodyn file couldn''t be found or read, from main file: '+FST_In)\n else:\n R = fst.ED['TipRad']\n r_hub = fst.ED['HubRad']\n r_ED, IR_ED = ED_BldGag(fst.ED)\n\n # --- BeamDyn\n if hasattr(fst,'BD'):\n r_BD, IR_BD, r_BD_All = BD_BldGag(fst.BD)\n r_BD= r_BD+r_hub\n if R is None:\n R = r_BD_All[-1] # just in case ED file missing\n\n # --- AeroDyn\n if not hasattr(fst,'AD'):\n print('[WARN] The AeroDyn file couldn''t be found or read, from main file: '+FST_In)\n #raise Exception('The AeroDyn file couldn''t be found or read, from main file: '+FST_In)\n else:\n\n if fst.ADversion == 'AD15':\n if not hasattr(fst.AD,'Bld1'):\n raise Exception('The AeroDyn blade file couldn''t be found or read, from main file: '+FST_In)\n \n if 'B1N001Cl_[-]' in OutputCols or np.any(np.char.find(list(OutputCols),'AB1N')==0):\n # This was compiled with all outs\n r_AD = fst.AD.Bld1['BldAeroNodes'][:,0] # Full span\n r_AD += r_hub\n IR_AD = None\n else:\n r_AD,_ = AD_BldGag(fst.AD,fst.AD.Bld1, chordOut = True) # Only at Gages locations\n\n elif fst.ADversion == 'AD14':\n r_AD,IR_AD = AD14_BldGag(fst.AD)\n\n else:\n raise Exception('AeroDyn version unknown')\n return r_AD, r_ED, r_BD, IR_AD, IR_ED, IR_BD, R, r_hub, fst\n\n\n\ndef addToOutlist(OutList, Signals):\n if not isinstance(Signals,list):\n raise Exception('Signals must be a list')\n for s in Signals:\n ss=s.split()[0].strip().strip('\"').strip('\\'')\n AlreadyIn = any([o.find(ss)==1 for o in OutList ])\n if not AlreadyIn:\n OutList.append(s)\n return OutList\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Generic df \n# --------------------------------------------------------------------------------{\ndef remap_df(df, ColMap, bColKeepNewOnly=False, inPlace=False):\n \"\"\" Add/rename columns of a dataframe, potentially perform operations between columns\n\n Example:\n\n ColumnMap={\n 'WS_[m/s]' : '{Wind1VelX_[m/s]}' , # create a new column from existing one\n 'RtTSR_[-]' : '{RtTSR_[-]} * 2 + {RtAeroCt_[-]}' , # change value of column\n 'RotSpeed_[rad/s]' : '{RotSpeed_[rpm]} * 2*np.pi/60 ', # new column [rpm] -> [rad/s]\n }\n # Read\n df = weio.read('FASTOutBin.outb').toDataFrame()\n # Change columns based on formulae, potentially adding new columns\n df = fastlib.remap_df(df, ColumnMap, inplace=True)\n\n \"\"\"\n if not inPlace:\n df=df.copy()\n ColMapMiss=[]\n ColNew=[]\n RenameMap=dict()\n for k0,v in ColMap.items():\n k=k0.strip()\n v=v.strip()\n if v.find('{')>=0:\n search_results = re.finditer(r'\\{.*?\\}', v)\n expr=v\n # For more advanced operations, we use an eval\n bFail=False\n for item in search_results:\n col=item.group(0)[1:-1]\n if col not in df.columns:\n ColMapMiss.append(col)\n bFail=True\n expr=expr.replace(item.group(0),'df[\\''+col+'\\']')\n #print(k0, '=', expr)\n if not bFail:\n df[k]=eval(expr)\n ColNew.append(k)\n else:\n print('[WARN] Column not present in dataframe, cannot evaluate: ',expr)\n else:\n #print(k0,'=',v)\n if v not in df.columns:\n ColMapMiss.append(v)\n print('[WARN] Column not present in dataframe: ',v)\n else:\n RenameMap[k]=v\n\n # Applying renaming only now so that expressions may be applied in any order\n for k,v in RenameMap.items():\n k=k.strip()\n iCol = list(df.columns).index(v)\n df.columns.values[iCol]=k\n ColNew.append(k)\n df.columns = df.columns.values # Hack to ensure columns are updated\n\n if len(ColMapMiss)>0:\n print('[FAIL] The following columns were not found in the dataframe:',ColMapMiss)\n #print('Available columns are:',df.columns.values)\n\n if bColKeepNewOnly:\n ColNew = [c for c,_ in ColMap.items() if c in ColNew]# Making sure we respec order from user\n ColKeepSafe = [c for c in ColNew if c in df.columns.values]\n ColKeepMiss = [c for c in ColNew if c not in df.columns.values]\n if len(ColKeepMiss)>0:\n print('[WARN] Signals missing and omitted for ColKeep:\\n '+'\\n '.join(ColKeepMiss))\n df=df[ColKeepSafe]\n return df\n\n# --------------------------------------------------------------------------------}\n# --- Template replace \n# --------------------------------------------------------------------------------{\ndef handleRemoveReadonlyWin(func, path, exc_info):\n \"\"\"\n Error handler for ``shutil.rmtree``.\n If the error is due to an access error (read only file)\n it attempts to add write permission and then retries.\n Usage : ``shutil.rmtree(path, onerror=onerror)``\n \"\"\"\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise\n\n\ndef copyTree(src, dst):\n \"\"\" \n Copy a directory to another one, overwritting files if necessary.\n copy_tree from distutils and copytree from shutil fail on Windows (in particular on git files)\n \"\"\"\n def forceMergeFlatDir(srcDir, dstDir):\n if not os.path.exists(dstDir):\n os.makedirs(dstDir)\n for item in os.listdir(srcDir):\n srcFile = os.path.join(srcDir, item)\n dstFile = os.path.join(dstDir, item)\n forceCopyFile(srcFile, dstFile)\n\n def forceCopyFile (sfile, dfile):\n # ---- Handling error due to wrong mod\n if os.path.isfile(dfile):\n if not os.access(dfile, os.W_OK):\n os.chmod(dfile, stat.S_IWUSR)\n #print(sfile, ' > ', dfile)\n shutil.copy2(sfile, dfile)\n\n def isAFlatDir(sDir):\n for item in os.listdir(sDir):\n sItem = os.path.join(sDir, item)\n if os.path.isdir(sItem):\n return False\n return True\n\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isfile(s):\n if not os.path.exists(dst):\n os.makedirs(dst)\n forceCopyFile(s,d)\n if os.path.isdir(s):\n isRecursive = not isAFlatDir(s)\n if isRecursive:\n copyTree(s, d)\n else:\n forceMergeFlatDir(s, d)\n\n\ndef templateReplaceGeneral(PARAMS, templateDir=None, outputDir=None, main_file=None, removeAllowed=False, removeRefSubFiles=False, oneSimPerDir=False):\n \"\"\" Generate inputs files by replacing different parameters from a template file.\n The generated files are placed in the output directory `outputDir` \n The files are read and written using the library `weio`. \n The template file is read and its content can be changed like a dictionary.\n Each item of `PARAMS` correspond to a set of parameters that will be replaced\n in the template file to generate one input file.\n\n For \"FAST\" input files, parameters can be changed recursively.\n \n\n INPUTS:\n PARAMS: list of dictionaries. Each key of the dictionary should be a key present in the \n template file when read with `weio` (see: weio.read(main_file).keys() )\n\n PARAMS[0]={'FAST|DT':0.1, 'EDFile|GBRatio':1, 'ServoFile|GenEff':0.8}\n\n templateDir: if provided, this directory and its content will be copied to `outputDir` \n before doing the parametric substitution\n\n outputDir : directory where files will be generated. \n \"\"\"\n # --- Helper functions\n def rebase_rel(wd,s,sid):\n split = os.path.splitext(s)\n return os.path.join(wd,split[0]+sid+split[1])\n\n def get_strID(p) :\n if '__name__' in p.keys():\n strID=p['__name__']\n else:\n raise Exception('When calling `templateReplace`, provide the key `__name_` in the parameter dictionaries')\n return strID\n\n def splitAddress(sAddress):\n sp = sAddress.split('|')\n if len(sp)==1:\n return sp[0],[]\n else:\n return sp[0],sp[1:]\n\n def rebaseFileName(org_filename, workDir, strID):\n new_filename_full = rebase_rel(workDir, org_filename,'_'+strID)\n new_filename = os.path.relpath(new_filename_full,workDir).replace('\\\\','/')\n return new_filename, new_filename_full\n\n def replaceRecurse(templatename_or_newname, FileKey, ParamKey, ParamValue, Files, strID, workDir, TemplateFiles):\n \"\"\" \n FileKey: a single key defining which file we are currently modifying e.g. :'AeroFile', 'EDFile','FVWInputFileName'\n ParamKey: the address key of the parameter to be changed, relative to the current FileKey\n e.g. 'EDFile|IntMethod' (if FileKey is '') \n 'IntMethod' (if FileKey is 'EDFile') \n ParamValue: the value to be used\n Files: dict of files, as returned by weio, keys are \"FileKeys\" \n \"\"\"\n # --- Special handling for the root\n if FileKey=='':\n FileKey='Root'\n # --- Open (or get if already open) file where a parameter needs to be changed\n if FileKey in Files.keys():\n # The file was already opened, it's stored\n f = Files[FileKey]\n newfilename_full = f.filename\n newfilename = os.path.relpath(newfilename_full,workDir).replace('\\\\','/')\n\n else:\n templatefilename = templatename_or_newname\n templatefilename_full = os.path.join(workDir,templatefilename)\n TemplateFiles.append(templatefilename_full)\n if FileKey=='Root':\n # Root files, we start from strID\n ext = os.path.splitext(templatefilename)[-1]\n newfilename_full = os.path.join(wd,strID+ext)\n newfilename = strID+ext\n else:\n newfilename, newfilename_full = rebaseFileName(templatefilename, workDir, strID)\n #print('--------------------------------------------------------------')\n #print('TemplateFile :', templatefilename)\n #print('TemplateFileFull:', templatefilename_full)\n #print('NewFile :', newfilename)\n #print('NewFileFull :', newfilename_full)\n shutil.copyfile(templatefilename_full, newfilename_full)\n f= FASTInputFile(newfilename_full) # open the template file for that filekey \n Files[FileKey]=f # store it\n\n # --- Changing parameters in that file\n NewFileKey_or_Key, ChildrenKeys = splitAddress(ParamKey)\n if len(ChildrenKeys)==0:\n # A simple parameter is changed \n Key = NewFileKey_or_Key\n #print('Setting', FileKey, '|',Key, 'to',ParamValue)\n if Key=='OutList':\n OutList=f[Key]\n f[Key]=addToOutlist(OutList, ParamValue)\n else:\n f[Key] = ParamValue\n else:\n # Parameters needs to be changed in subfiles (children)\n NewFileKey = NewFileKey_or_Key\n ChildrenKey = '|'.join(ChildrenKeys)\n child_templatefilename = f[NewFileKey].strip('\"') # old filename that will be used as a template\n baseparent = os.path.dirname(newfilename)\n #print('Child templatefilename:',child_templatefilename)\n #print('Parent base dir :',baseparent)\n workDir = os.path.join(workDir, baseparent)\n\n # \n newchildFilename, Files = replaceRecurse(child_templatefilename, NewFileKey, ChildrenKey, ParamValue, Files, strID, workDir, TemplateFiles)\n #print('Setting', FileKey, '|',NewFileKey, 'to',newchildFilename)\n f[NewFileKey] = '\"'+newchildFilename+'\"'\n\n return newfilename, Files\n\n\n # --- Safety checks\n if templateDir is None and outputDir is None:\n raise Exception('Provide at least a template directory OR an output directory')\n\n if templateDir is not None:\n if not os.path.exists(templateDir):\n raise Exception('Template directory does not exist: '+templateDir)\n\n # Default value of outputDir if not provided\n if templateDir[-1]=='/' or templateDir[-1]=='\\\\' :\n templateDir=templateDir[0:-1]\n if outputDir is None:\n outputDir=templateDir+'_Parametric'\n\n # --- Main file use as \"master\"\n if templateDir is not None:\n main_file=os.path.join(outputDir, os.path.basename(main_file))\n else:\n main_file=main_file\n\n # Params need to be a list\n if not isinstance(PARAMS,list):\n PARAMS=[PARAMS]\n\n if oneSimPerDir:\n workDirS=[os.path.join(outputDir,get_strID(p)) for p in PARAMS]\n else:\n workDirS=[outputDir]*len(PARAMS)\n # --- Creating outputDir - Copying template folder to outputDir if necessary\n # Copying template folder to workDir\n for wd in list(set(workDirS)):\n if removeAllowed:\n removeFASTOuputs(wd)\n if os.path.exists(wd) and removeAllowed:\n shutil.rmtree(wd, ignore_errors=False, onerror=handleRemoveReadonlyWin)\n copyTree(templateDir, wd)\n if removeAllowed:\n removeFASTOuputs(wd)\n\n\n TemplateFiles=[]\n files=[]\n for ip,(wd,p) in enumerate(zip(workDirS,PARAMS)):\n if '__index__' not in p.keys():\n p['__index__']=ip\n\n main_file_base = os.path.basename(main_file)\n strID = get_strID(p)\n # --- Setting up files for this simulation\n Files=dict()\n for k,v in p.items():\n if k =='__index__' or k=='__name__':\n continue\n new_mainFile, Files = replaceRecurse(main_file_base, '', k, v, Files, strID, wd, TemplateFiles)\n\n # --- Writting files\n for k,f in Files.items():\n if k=='Root':\n files.append(f.filename)\n f.write()\n\n # --- Remove extra files at the end\n if removeRefSubFiles:\n TemplateFiles, nCounts = np.unique(TemplateFiles, return_counts=True)\n if not oneSimPerDir:\n # we can only detele template files that were used by ALL simulations\n TemplateFiles=[t for nc,t in zip(nCounts, TemplateFiles) if nc==len(PARAMS)]\n for tf in TemplateFiles:\n try:\n os.remove(tf)\n except:\n print('[FAIL] Removing '+tf)\n pass\n return files\n\ndef templateReplace(PARAMS, templateDir, outputDir=None, main_file=None, removeAllowed=False, removeRefSubFiles=False, oneSimPerDir=False):\n \"\"\" Replace parameters in a fast folder using a list of dictionaries where the keys are for instance:\n 'FAST|DT', 'EDFile|GBRatio', 'ServoFile|GenEff'\n \"\"\"\n # --- For backward compatibility, remove \"FAST|\" from the keys\n for p in PARAMS:\n old_keys=[ k for k,_ in p.items() if k.find('FAST|')==0]\n for k_old in old_keys:\n k_new=k_old.replace('FAST|','')\n p[k_new] = p.pop(k_old)\n \n return templateReplaceGeneral(PARAMS, templateDir, outputDir=outputDir, main_file=main_file, \n removeAllowed=removeAllowed, removeRefSubFiles=removeRefSubFiles, oneSimPerDir=oneSimPerDir)\n\n# --------------------------------------------------------------------------------}\n# --- Tools for template replacement \n# --------------------------------------------------------------------------------{\ndef paramsSteadyAero(p=dict()):\n p['AeroFile|AFAeroMod']=1 # remove dynamic effects dynamic\n p['AeroFile|WakeMod']=1 # remove dynamic inflow dynamic\n p['AeroFile|TwrPotent']=0 # remove tower shadow\n return p\n\ndef paramsNoGen(p=dict()):\n p['EDFile|GenDOF' ] = 'False'\n return p\n\ndef paramsGen(p=dict()):\n p['EDFile|GenDOF' ] = 'True'\n return p\n\ndef paramsNoController(p=dict()):\n p['ServoFile|PCMode'] = 0;\n p['ServoFile|VSContrl'] = 0;\n p['ServoFile|YCMode'] = 0;\n return p\n\ndef paramsControllerDLL(p=dict()):\n p['ServoFile|PCMode'] = 5;\n p['ServoFile|VSContrl'] = 5;\n p['ServoFile|YCMode'] = 5;\n p['EDFile|GenDOF'] = 'True';\n return p\n\n\ndef paramsStiff(p=dict()):\n p['EDFile|FlapDOF1'] = 'False'\n p['EDFile|FlapDOF2'] = 'False'\n p['EDFile|EdgeDOF' ] = 'False'\n p['EDFile|TeetDOF' ] = 'False'\n p['EDFile|DrTrDOF' ] = 'False'\n p['EDFile|YawDOF' ] = 'False'\n p['EDFile|TwFADOF1'] = 'False'\n p['EDFile|TwFADOF2'] = 'False'\n p['EDFile|TwSSDOF1'] = 'False'\n p['EDFile|TwSSDOF2'] = 'False'\n p['EDFile|PtfmSgDOF'] = 'False'\n p['EDFile|PtfmSwDOF'] = 'False'\n p['EDFile|PtfmHvDOF'] = 'False'\n p['EDFile|PtfmRDOF'] = 'False'\n p['EDFile|PtfmPDOF'] = 'False'\n p['EDFile|PtfmYDOF'] = 'False'\n return p\n\ndef paramsWS_RPM_Pitch(WS,RPM,Pitch,baseDict=None,FlatInputs=False):\n \"\"\" \"\"\"\n # --- Ensuring everythin is an iterator\n def iterify(x):\n if not isinstance(x, collections.Iterable): x = [x]\n return x\n WS = iterify(WS)\n RPM = iterify(RPM)\n Pitch = iterify(Pitch)\n # --- If inputs are not flat but different vectors to length through, we flatten them (TODO: meshgrid and ravel?)\n if not FlatInputs :\n WS_flat = []\n Pitch_flat = []\n RPM_flat = []\n for pitch in Pitch:\n for rpm in RPM:\n for ws in WS:\n WS_flat.append(ws)\n RPM_flat.append(rpm)\n Pitch_flat.append(pitch)\n else:\n WS_flat, Pitch_flat, RPM_flat = WS, Pitch, RPM\n\n # --- Defining the parametric study \n PARAMS=[]\n i=0\n for ws,rpm,pitch in zip(WS_flat,RPM_flat,Pitch_flat):\n if baseDict is None:\n p=dict()\n else:\n p = baseDict.copy()\n p['EDFile|RotSpeed'] = rpm\n p['InflowFile|HWindSpeed'] = ws\n p['InflowFile|WindType'] = 1 # Setting steady wind\n p['EDFile|BlPitch(1)'] = pitch\n p['EDFile|BlPitch(2)'] = pitch\n p['EDFile|BlPitch(3)'] = pitch\n\n p['__index__'] = i\n p['__name__'] = '{:03d}_ws{:04.1f}_pt{:04.2f}_om{:04.2f}'.format(p['__index__'],p['InflowFile|HWindSpeed'],p['EDFile|BlPitch(1)'],p['EDFile|RotSpeed'])\n i=i+1\n PARAMS.append(p)\n return PARAMS\n\n\n# --------------------------------------------------------------------------------}\n# --- Tools for PostProcessing one or several simulations\n# --------------------------------------------------------------------------------{\ndef _zero_crossings(y,x=None,direction=None):\n \"\"\"\n Find zero-crossing points in a discrete vector, using linear interpolation.\n direction: 'up' or 'down', to select only up-crossings or down-crossings\n Returns: \n x values xzc such that y(yzc)==0\n indexes izc, such that the zero is between y[izc] (excluded) and y[izc+1] (included)\n if direction is not provided, also returns:\n sign, equal to 1 for up crossing\n \"\"\"\n y=np.asarray(y)\n if x is None:\n x=np.arange(len(y))\n\n if np.any((x[1:] - x[0:-1]) <= 0.0):\n raise Exception('x values need to be in ascending order')\n\n # Indices before zero-crossing\n iBef = np.where(y[1:]*y[0:-1] < 0.0)[0]\n \n # Find the zero crossing by linear interpolation\n xzc = x[iBef] - y[iBef] * (x[iBef+1] - x[iBef]) / (y[iBef+1] - y[iBef])\n \n # Selecting points that are exactly 0 and where neighbor change sign\n iZero = np.where(y == 0.0)[0]\n iZero = iZero[np.where((iZero > 0) & (iZero < x.size-1))]\n iZero = iZero[np.where(y[iZero-1]*y[iZero+1] < 0.0)]\n\n # Concatenate \n xzc = np.concatenate((xzc, x[iZero]))\n iBef = np.concatenate((iBef, iZero))\n\n # Sort\n iSort = np.argsort(xzc)\n xzc, iBef = xzc[iSort], iBef[iSort]\n\n # Return up-crossing, down crossing or both\n sign = np.sign(y[iBef+1]-y[iBef])\n if direction == 'up':\n I= np.where(sign==1)[0]\n return xzc[I],iBef[I]\n elif direction == 'down':\n I= np.where(sign==-1)[0]\n return xzc[I],iBef[I]\n elif direction is not None:\n raise Exception('Direction should be either `up` or `down`')\n return xzc, iBef, sign\n\ndef find_matching_pattern(List, pattern):\n \"\"\" Return elements of a list of strings that match a pattern\n and return the first matching group\n \"\"\"\n reg_pattern=re.compile(pattern)\n MatchedElements=[]\n MatchedStrings=[]\n for l in List:\n match=reg_pattern.search(l)\n if match:\n MatchedElements.append(l)\n if len(match.groups(1))>0:\n MatchedStrings.append(match.groups(1)[0])\n else:\n MatchedStrings.append('')\n return MatchedElements, MatchedStrings\n\n \n\ndef extractSpanTSReg(ts, col_pattern, colname, IR=None):\n \"\"\" Helper function to extract spanwise results, like B1N1Cl B1N2Cl etc. \n\n Example\n col_pattern: 'B1N(\\d*)Cl_\\[-\\]'\n colname : 'B1Cl_[-]'\n \"\"\"\n # Extracting columns matching pattern\n cols, sIdx = find_matching_pattern(ts.keys(), col_pattern)\n if len(cols) ==0:\n return (None,None)\n\n # Sorting by ID\n cols = np.asarray(cols)\n Idx = np.array([int(s) for s in sIdx])\n Isort = np.argsort(Idx)\n Idx = Idx[Isort]\n cols = cols[Isort]\n\n nrMax = np.max(Idx)\n Values = np.zeros((nrMax,1))\n Values[:] = np.nan\n# if IR is None:\n# cols = [col_pattern.format(ir+1) for ir in range(nr)]\n# else:\n# cols = [col_pattern.format(ir) for ir in IR]\n for idx,col in zip(Idx,cols):\n Values[idx-1]=ts[col]\n nMissing = np.sum(np.isnan(Values))\n if nMissing==nrMax:\n return (None,None)\n if len(cols)<nrMax:\n #print(Values)\n print('[WARN] Not all values found for {}, missing {}/{}'.format(colname,nMissing,nrMax))\n if len(cols)>nrMax:\n print('[WARN] More values found for {}, found {}/{}'.format(colname,len(cols),nrMax))\n return (colname,Values)\n\ndef extractSpanTS(ts, nr, col_pattern, colname, IR=None):\n \"\"\" Helper function to extract spanwise results, like B1N1Cl B1N2Cl etc. \n\n Example\n col_pattern: 'B1N{:d}Cl_[-]'\n colname : 'B1Cl_[-]'\n \"\"\"\n Values=np.zeros((nr,1))\n if IR is None:\n cols = [col_pattern.format(ir+1) for ir in range(nr)]\n else:\n cols = [col_pattern.format(ir) for ir in IR]\n colsExist = [c for c in cols if c in ts.keys() ]\n if len(colsExist)==0:\n return (None,None)\n\n Values = [ts[c] if c in ts.keys() else np.nan for c in cols ]\n nMissing = np.sum(np.isnan(Values))\n #Values = ts[cols].T\n #nCoun=len(Values)\n if nMissing==nr:\n return (None,None)\n if len(colsExist)<nr:\n print(Values)\n print('[WARN] Not all values found for {}, missing {}/{}'.format(colname,nMissing,nr))\n if len(colsExist)>nr:\n print('[WARN] More values found for {}, found {}/{}'.format(colname,len(cols),nr))\n return (colname,Values)\n\ndef bin_mean_DF(df, xbins, colBin ):\n \"\"\" \n Perform bin averaging of a dataframe\n \"\"\"\n if colBin not in df.columns.values:\n raise Exception('The column `{}` does not appear to be in the dataframe'.format(colBin))\n xmid = (xbins[:-1]+xbins[1:])/2\n df['Bin'] = pd.cut(df[colBin], bins=xbins, labels=xmid ) # Adding a column that has bin attribute\n df2 = df.groupby('Bin').mean() # Average by bin\n # also counting\n df['Counts'] = 1\n dfCount=df[['Counts','Bin']].groupby('Bin').sum()\n df2['Counts'] = dfCount['Counts']\n # Just in case some bins are missing (will be nan)\n df2 = df2.reindex(xmid)\n return df2\n\ndef azimuthal_average_DF(df, psiBin=None, colPsi='Azimuth_[deg]', tStart=None, colTime='Time_[s]'):\n \"\"\" \n Average a dataframe based on azimuthal value\n Returns a dataframe with same amount of columns as input, and azimuthal values as index\n \"\"\"\n if psiBin is None: \n psiBin = np.arange(0,360+1,10)\n\n if tStart is not None:\n if colTime not in df.columns.values:\n raise Exception('The column `{}` does not appear to be in the dataframe'.format(colTime))\n df=df[ df[colTime]>tStart].copy()\n\n dfPsi= bin_mean_DF(df, psiBin, colPsi)\n if np.any(dfPsi['Counts']<1):\n print('[WARN] some bins have no data! Increase the bin size.')\n\n return dfPsi\n\n\ndef averageDF(df,avgMethod='periods',avgParam=None,ColMap=None,ColKeep=None,ColSort=None,stats=['mean']):\n \"\"\"\n See average PostPro for documentation, same interface, just does it for one dataframe\n \"\"\"\n def renameCol(x):\n for k,v in ColMap.items():\n if x==v:\n return k\n return x\n # Before doing the colomn map we store the time\n time = df['Time_[s]'].values\n timenoNA = time[~np.isnan(time)]\n # Column mapping\n if ColMap is not None:\n ColMapMiss = [v for _,v in ColMap.items() if v not in df.columns.values]\n if len(ColMapMiss)>0:\n print('[WARN] Signals missing and omitted for ColMap:\\n '+'\\n '.join(ColMapMiss))\n df.rename(columns=renameCol,inplace=True)\n ## Defining a window for stats (start time and end time)\n if avgMethod.lower()=='constantwindow':\n tEnd = timenoNA[-1]\n if avgParam is None:\n tStart=timenoNA[0]\n else:\n tStart =tEnd-avgParam\n elif avgMethod.lower()=='periods':\n # --- Using azimuth to find periods\n if 'Azimuth_[deg]' not in df.columns:\n raise Exception('The sensor `Azimuth_[deg]` does not appear to be in the output file. You cannot use the averaging method by `periods`, use `constantwindow` instead.')\n # NOTE: potentially we could average over each period and then average\n psi=df['Azimuth_[deg]'].values\n _,iBef = _zero_crossings(psi-psi[-10],direction='up')\n if len(iBef)==0:\n _,iBef = _zero_crossings(psi-180,direction='up')\n if len(iBef)==0:\n print('[WARN] Not able to find a zero crossing!')\n tEnd = time[-1]\n iBef=[0]\n else:\n tEnd = time[iBef[-1]]\n\n if avgParam is None:\n tStart=time[iBef[0]]\n else:\n avgParam=int(avgParam) \n if len(iBef)-1<avgParam:\n print('[WARN] Not enough periods found ({}) compared to number requested to average ({})!'.format(len(iBef)-1,avgParam))\n avgParam=len(iBef)-1\n if avgParam==0:\n tStart = time[0]\n tEnd = time[-1]\n else:\n tStart=time[iBef[-1-avgParam]]\n elif avgMethod.lower()=='periods_omega':\n # --- Using average omega to find periods\n if 'RotSpeed_[rpm]' not in df.columns:\n raise Exception('The sensor `RotSpeed_[rpm]` does not appear to be in the output file. You cannot use the averaging method by `periods_omega`, use `periods` or `constantwindow` instead.')\n Omega=df['RotSpeed_[rpm]'].mean()/60*2*np.pi\n Period = 2*np.pi/Omega \n if avgParam is None:\n nRotations=np.floor(tEnd/Period)\n else:\n nRotations=avgParam\n tStart =tEnd-Period*nRotations\n else:\n raise Exception('Unknown averaging method {}'.format(avgMethod))\n # Narrowind number of columns here (azimuth needed above)\n if ColKeep is not None:\n ColKeepSafe = [c for c in ColKeep if c in df.columns.values]\n ColKeepMiss = [c for c in ColKeep if c not in df.columns.values]\n if len(ColKeepMiss)>0:\n print('[WARN] Signals missing and omitted for ColKeep:\\n '+'\\n '.join(ColKeepMiss))\n df=df[ColKeepSafe]\n if tStart<time[0]:\n print('[WARN] Simulation time ({}) too short compared to required averaging window ({})!'.format(tEnd-time[0],tStart-tEnd))\n IWindow = np.where((time>=tStart) & (time<=tEnd) & (~np.isnan(time)))[0]\n iEnd = IWindow[-1]\n iStart = IWindow[0]\n ## Absolute and relative differences at window extremities\n DeltaValuesAbs=(df.iloc[iEnd]-df.iloc[iStart]).abs()\n# DeltaValuesRel=(df.iloc[iEnd]-df.iloc[iStart]).abs()/df.iloc[iEnd]\n DeltaValuesRel=(df.iloc[IWindow].max()-df.iloc[IWindow].min())/df.iloc[IWindow].mean()\n #EndValues=df.iloc[iEnd]\n #if avgMethod.lower()=='periods_omega':\n # if DeltaValuesRel['RotSpeed_[rpm]']*100>5:\n # print('[WARN] Rotational speed vary more than 5% in averaging window ({}%) for simulation: {}'.format(DeltaValuesRel['RotSpeed_[rpm]']*100,f))\n ## Stats values during window\n # MeanValues = df[IWindow].mean()\n # StdValues = df[IWindow].std()\n if 'mean' in stats:\n MeanValues = pd.DataFrame(df.iloc[IWindow].mean()).transpose()\n else:\n raise NotImplementedError()\n return MeanValues\n\n\n\ndef averagePostPro(outFiles,avgMethod='periods',avgParam=None,ColMap=None,ColKeep=None,ColSort=None,stats=['mean']):\n \"\"\" Opens a list of FAST output files, perform average of its signals and return a panda dataframe\n For now, the scripts only computes the mean within a time window which may be a constant or a time that is a function of the rotational speed (see `avgMethod`).\n The script only computes the mean for now. Other stats will be added\n\n `ColMap` : dictionary where the key is the new column name, and v the old column name.\n Default: None, output is not sorted\n NOTE: the mapping is done before sorting and `ColKeep` is applied\n ColMap = {'WS':Wind1VelX_[m/s], 'RPM': 'RotSpeed_[rpm]'}\n `ColKeep` : List of strings corresponding to the signals to analyse. \n Default: None, all columns are analysed\n Example: ColKeep=['RotSpeed_[rpm]','BldPitch1_[deg]','RtAeroCp_[-]']\n or: ColKeep=list(ColMap.keys())\n `avgMethod` : string defining the method used to determine the extent of the averaging window:\n - 'periods': use a number of periods(`avgParam`), determined by the azimuth. \n - 'periods_omega': use a number of periods(`avgParam`), determined by the mean RPM\n - 'constantwindow': the averaging window is constant (defined by `avgParam`).\n `avgParam`: based on `avgMethod` it is either\n - for 'periods_*': the number of revolutions for the window. \n Default: None, as many period as possible are used\n - for 'constantwindow': the number of seconds for the window\n Default: None, full simulation length is used\n \"\"\"\n result=None\n invalidFiles =[]\n # Loop trough files and populate result\n for i,f in enumerate(outFiles):\n try:\n df=FASTOutputFile(f).toDataFrame()\n except:\n invalidFiles.append(f)\n continue\n postpro=averageDF(df, avgMethod=avgMethod, avgParam=avgParam, ColMap=ColMap, ColKeep=ColKeep,ColSort=ColSort,stats=stats)\n MeanValues=postpro # todo\n if result is None:\n # We create a dataframe here, now that we know the colums\n columns = MeanValues.columns\n result = pd.DataFrame(np.nan, index=np.arange(len(outFiles)), columns=columns)\n result.iloc[i,:] = MeanValues.copy().values\n\n if ColSort is not None:\n # Sorting \n result.sort_values([ColSort],inplace=True,ascending=True)\n result.reset_index(drop=True,inplace=True) \n\n if len(invalidFiles)==len(outFiles):\n raise Exception('None of the files can be read (or exist)!')\n elif len(invalidFiles)>0:\n print('[WARN] There were {} missing/invalid files: {}'.format(len(invalidFiles),invalidFiles))\n\n\n return result \n\n# --------------------------------------------------------------------------------}\n# --- Tools for typical wind turbine study \n# --------------------------------------------------------------------------------{\ndef CPCT_LambdaPitch(refdir,main_fastfile,Lambda=None,Pitch=np.linspace(-10,40,5),WS=None,Omega=None, # operating conditions\n TMax=20,bStiff=True,bNoGen=True,bSteadyAero=True, # simulation options\n reRun=True, \n fastExe=None,showOutputs=True,nCores=4): # execution options\n \"\"\" Computes CP and CT as function of tip speed ratio (lambda) and pitch.\n There are two main ways to define the inputs:\n - Option 1: provide Lambda and Pitch (deg)\n - Option 2: provide WS (m/s), Omega (in rpm) and Pitch (deg), in which case len(WS)==len(Omega)\n \"\"\"\n\n WS_default=5 # If user does not provide a wind speed vector, wind speed used\n\n # if the user provided a full path to the main file, we scrap the directory. TODO, should be cleaner\n if len(os.path.dirname(main_fastfile))>0:\n main_fastfile=os.path.basename(main_fastfile)\n\n # --- Reading main fast file to get rotor radius \n fst = FASTInputFile(os.path.join(refdir,main_fastfile))\n ed = FASTInputFile(os.path.join(refdir,fst['EDFile'].replace('\"','')))\n R = ed['TipRad']\n\n # --- Making sure we have \n if (Omega is not None):\n if (Lambda is not None):\n WS = np.ones(Omega.shape)*WS_default\n elif (WS is not None):\n if len(WS)!=len(Omega):\n raise Exception('When providing Omega and WS, both vectors should have the same dimension')\n else:\n WS = np.ones(Omega.shape)*WS_default\n else:\n Omega = WS_default * Lambda/R*60/(2*np.pi) # TODO, use more realistic combinations of WS and Omega\n WS = np.ones(Omega.shape)*WS_default\n\n\n # --- Defining flat vectors of operating conditions\n WS_flat = []\n RPM_flat = []\n Pitch_flat = []\n for pitch in Pitch:\n for (rpm,ws) in zip(Omega,WS):\n WS_flat.append(ws)\n RPM_flat.append(rpm)\n Pitch_flat.append(pitch)\n # --- Setting up default options\n baseDict={'FAST|TMax': TMax, 'FAST|DT': 0.01, 'FAST|DT_Out': 0.1} # NOTE: Tmax should be at least 2pi/Omega\n baseDict = paramsNoController(baseDict)\n if bStiff:\n baseDict = paramsStiff(baseDict)\n if bNoGen:\n baseDict = paramsNoGen(baseDict)\n if bSteadyAero:\n baseDict = paramsSteadyAero(baseDict)\n\n # --- Creating set of parameters to be changed\n # TODO: verify that RtAeroCp and RtAeroCt are present in AeroDyn outlist\n PARAMS = paramsWS_RPM_Pitch(WS_flat,RPM_flat,Pitch_flat,baseDict=baseDict, FlatInputs=True)\n\n # --- Generating all files in a workDir\n workDir = refdir.strip('/').strip('\\\\')+'_CPLambdaPitch'\n print('>>> Generating inputs files in {}'.format(workDir))\n RemoveAllowed=reRun # If the user want to rerun, we can remove, otherwise we keep existing simulations\n fastFiles=templateReplace(PARAMS, refdir, outputDir=workDir,removeRefSubFiles=True,removeAllowed=RemoveAllowed,main_file=main_fastfile)\n\n # --- Running fast simulations\n print('>>> Running {} simulations...'.format(len(fastFiles)))\n run_fastfiles(fastFiles, showOutputs=showOutputs, fastExe=fastExe, nCores=nCores, reRun=reRun)\n\n # --- Postpro - Computing averages at the end of the simluation\n print('>>> Postprocessing...')\n outFiles = [os.path.splitext(f)[0]+'.outb' for f in fastFiles]\n # outFiles = glob.glob(os.path.join(workDir,'*.outb'))\n ColKeepStats = ['RotSpeed_[rpm]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]','Wind1VelX_[m/s]']\n result = averagePostPro(outFiles,avgMethod='periods',avgParam=1,ColKeep=ColKeepStats,ColSort='RotSpeed_[rpm]')\n # print(result) \n\n # --- Adding lambda, sorting and keeping only few columns\n result['lambda_[-]'] = result['RotSpeed_[rpm]']*R*2*np.pi/60/result['Wind1VelX_[m/s]']\n result.sort_values(['lambda_[-]','BldPitch1_[deg]'],ascending=[True,True],inplace=True)\n ColKeepFinal=['lambda_[-]','BldPitch1_[deg]','RtAeroCp_[-]','RtAeroCt_[-]']\n result=result[ColKeepFinal]\n print('>>> Done')\n\n # --- Converting to a matrices\n CP = result['RtAeroCp_[-]'].values\n CT = result['RtAeroCt_[-]'].values\n MCP =CP.reshape((len(Lambda),len(Pitch)))\n MCT =CT.reshape((len(Lambda),len(Pitch)))\n LAMBDA, PITCH = np.meshgrid(Lambda, Pitch)\n # --- CP max\n i,j = np.unravel_index(MCP.argmax(), MCP.shape)\n MaxVal={'CP_max':MCP[i,j],'lambda_opt':LAMBDA[j,i],'pitch_opt':PITCH[j,i]}\n\n return MCP,MCT,Lambda,Pitch,MaxVal,result\n\n\n# def detectFastFiles(workDir):\n# FstFiles=glob.glob(os.path.join(workDir,'*.fst'))+glob.glob(os.path.join(workDir,'*.FST'))\n# DatFiles=glob.glob(os.path.join(workDir,'*.dat'))+glob.glob(os.path.join(workDir,'*.DAT'))\n# Files=dict()\n# Files['Main'] = FstFiles\n# Files['Inflow'] = None\n# Files['Aero'] = None\n# Files['Tower'] = None\n# Files['Blade'] = None\n# Files['AeroBlade'] = None\n# Files['ServoDyn'] = None\n# for f in DatFiles:\n# b = os.path.basename(f).lower()\n# if b.find('inflow'):\n# Files['Inflow'] = f\n# windfile_ref = 'InflowWind.dat';\n# fastfile_ref = 'Turbine.fst';\n# elasfile_ref = 'ElastoDyn.dat';\n# remove\n \n\n\nif __name__=='__main__':\n pass\n # --- Test of templateReplace\n PARAMS = {}\n PARAMS['FAST|TMax'] = 10\n PARAMS['__name__'] = 'MyName'\n PARAMS['FAST|DT'] = 0.01\n PARAMS['FAST|DT_Out'] = 0.1\n PARAMS['EDFile|RotSpeed'] = 100\n PARAMS['EDFile|BlPitch(1)'] = 1\n PARAMS['EDFile|GBoxEff'] = 0.92\n PARAMS['ServoFile|VS_Rgn2K'] = 0.00038245\n PARAMS['ServoFile|GenEff'] = 0.95\n PARAMS['InflowFile|HWindSpeed'] = 8\n templateReplace(PARAMS,refDir,RemoveRefSubFiles=True)\n\n" ]
[ [ "numpy.sign", "numpy.where", "numpy.max", "numpy.concatenate", "pandas.DataFrame", "numpy.arange", "numpy.trapz", "numpy.column_stack", "numpy.array", "numpy.zeros", "numpy.argsort", "numpy.floor", "numpy.isnan", "pandas.cut", "numpy.asarray", "numpy.ones", "numpy.any", "numpy.linspace", "numpy.meshgrid", "numpy.unique" ] ]
yamauchi1132/Research_Codes
[ "c9e104f8592277cb4aa5c479b014c78c702a0939" ]
[ "analysis/unbound_mass/unbound_mass_alltimecheck.py" ]
[ "import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nimport operator\nimport math\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom common import *\n\ndirname = \"data_thesis/snap_unbound_10.0Msun_4.0Rsun_pori1.5_rp1.0R_vinf1.00e+06/sph_t%04d.dat\"\nstart = 100\nend = 1700\nstep = 100\n\nMsun = 1.989e+33\n\ndef calc_unbound_mass(p, mass_total, vel_cg):\n mass_unbound = 0.\n\n for i in range(len(p)):\n vx = p[i].vel[0] - vel_cg[0]\n vy = p[i].vel[1] - vel_cg[1]\n vz = p[i].vel[2] - vel_cg[2]\n vx2 = vx * vx\n vy2 = vy * vy\n vz2 = vz * vz\n v2 = vx2 + vy2 + vz2\n\n ene = 0.5*v2 + p[i].pot + p[i].uene\n if(ene > 0):\n mass_unbound += p[i].mass\n\n mass_bound = mass_total - mass_unbound\n\n return mass_bound, mass_unbound\n\ndef plot(mass, time):\n fig = plt.figure()\n\n for i in range(len(mass)):\n mass[i] = mass[i] / Msun\n \n plt.plot(time, mass)\n\n mpl.rcParams['axes.xmargin'] = 0\n mpl.rcParams['axes.ymargin'] = 0\n plt.tight_layout()\n \n plt.show()\n plt.close()\n\nif __name__ == '__main__':\n args = sys.argv\n\n result_bound = []\n result_unbound = []\n time = []\n for t in range(start,end+step,step):\n data = np.loadtxt(dirname % t)\n p = [Particle() for i in range(len(data))]\n readfile(data, p)\n \n pos_cg = np.array([0.,0.,0.])\n vel_cg = np.array([0.,0.,0.])\n pos_cg, vel_cg = calc_center_of_gravity(p)\n\n mass_total = 0.\n for i in range(len(p)):\n mass_total += p[i].mass\n\n m_bound, m_unbound = calc_unbound_mass(p, mass_total, vel_cg)\n\n result_bound.append(m_bound)\n result_unbound.append(m_unbound)\n time.append(t*1e+4)\n\n # plot(result_unbound, time)\n plot_thesis(result_unbound, time)" ]
[ [ "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.loadtxt", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show" ] ]
TrustAI/DEAT
[ "630e59c3f8483a8cda6633157387f64735e2de51" ]
[ "standard training/fat_mart_cifar10.py" ]
[ "import os\nimport argparse\nimport torchvision\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport time\nimport numpy as np\nimport logging\nfrom preact_resnet import PreActResNet18\nfrom wideresnet import WideResNet\nfrom earlystop import earlystop\nfrom utils import *\n\n\nparser = argparse.ArgumentParser(description='PyTorch Friendly Adversarial Training')\nparser.add_argument('--batch-size', type=int, default=128)\nparser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train')\nparser.add_argument('--weight_decay', '--wd', default=5e-4, type=float, metavar='W')\nparser.add_argument('--model', default='pre', type=str, choices=['pre', 'wide'])\nparser.add_argument('--wide-factor', default=10, type=int, help='Widen factor')\nparser.add_argument('--lr', type=float, default=0.05, metavar='LR', help='learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum')\nparser.add_argument('--epsilon', type=float, default=8, help='perturbation bound')\nparser.add_argument('--num_steps', type=int, default=10, help='maximum perturbation step K')\nparser.add_argument('--step_size', type=float, default=2, help='step size')\nparser.add_argument('--normalization', default='std', type=str, choices=['std', '01','+-1'])\nparser.add_argument('--seed', type=int, default=0, metavar='S', help='random seed')\nparser.add_argument('--tau', type=int, default=3, help='step tau')\nparser.add_argument('--beta',type=float,default=6.0,help='regularization parameter')\nparser.add_argument('--rand_init', type=bool, default=True, help=\"whether to initialize adversarial sample with random noise\")\nparser.add_argument('--omega', type=float, default=0.0, help=\"random sample parameter for adv data generation\")\nparser.add_argument('--dynamictau', type=bool, default=False, help='whether to use dynamic tau')\nparser.add_argument('--fname', default='output', type=str)\nparser.add_argument('--data-dir', default='/mnt/storage0_8/torch_datasets/cifar-data', type=str)\nparser.add_argument('--out-dir', default='fat_mart_out', type=str, help='Output directory')\nparser.add_argument('--resume', type=str, default='', help='whether to resume training, default: None')\nparser.add_argument('--save-model', action='store_true')\nargs = parser.parse_args()\n\n# training settings\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.benchmark = True\ndevice = torch.device(\"cuda\")\nepsilon = (args.epsilon / 255.)\nstep_size = (args.step_size / 255.)\nif args.normalization == 'std':\n mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()\n std = torch.tensor(cifar10_std).view(3,1,1).cuda()\nelif args.normalization == '01':\n mu = torch.tensor((0.,0.,0.)).view(3,1,1).cuda()\n std = torch.tensor((1.,1.,1.)).view(3,1,1).cuda()\nelif args.normalization == '+-1':\n mu = torch.tensor((0.5, 0.5, 0.5)).view(3,1,1).cuda()\n std = torch.tensor((0.5, 0.5, 0.5)).view(3,1,1).cuda()\ndef MART_loss(adv_logits, natural_logits, target, beta):\n # Based on the repo MART https://github.com/YisenWang/MART\n kl = nn.KLDivLoss(reduction='none')\n batch_size = len(target)\n adv_probs = F.softmax(adv_logits, dim=1)\n tmp1 = torch.argsort(adv_probs, dim=1)[:, -2:]\n new_y = torch.where(tmp1[:, -1] == target, tmp1[:, -2], tmp1[:, -1])\n loss_adv = F.cross_entropy(adv_logits, target) + F.nll_loss(torch.log(1.0001 - adv_probs + 1e-12), new_y)\n nat_probs = F.softmax(natural_logits, dim=1)\n true_probs = torch.gather(nat_probs, 1, (target.unsqueeze(1)).long()).squeeze()\n loss_robust = (1.0 / batch_size) * torch.sum(\n torch.sum(kl(torch.log(adv_probs + 1e-12), nat_probs), dim=1) * (1.0000001 - true_probs))\n loss = loss_adv + float(beta) * loss_robust\n return loss\n\ndef train(model, train_loader, optimizer, tau):\n start_epoch_time = time.time()\n train_loss = 0\n train_n = 0\n bp_count = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.cuda(), target.cuda()\n\n # Get friendly adversarial training data via early-stopped PGD\n output_adv, output_target, output_natural, count = earlystop(model, data, target, step_size=step_size,\n epsilon=epsilon, perturb_steps=args.num_steps,\n tau=tau, randominit_type=\"normal_distribution_randominit\", loss_fn='kl', \n mu=mu, std=std, rand_init=args.rand_init,omega=args.omega)\n bp_count += count\n model.train()\n optimizer.zero_grad()\n\n natural_logits = model(normalize(output_natural,mu,std))\n adv_logits = model(normalize(output_adv,mu,std))\n\n # calculate MART adversarial training loss\n loss = MART_loss(adv_logits,natural_logits,output_target,args.beta)\n\n loss.backward()\n optimizer.step()\n train_loss += loss.item() * target.size(0)\n train_n += target.size(0)\n\n end_epoch_time = time.time()\n epoch_time = end_epoch_time - start_epoch_time\n\n return epoch_time, train_loss/train_n, bp_count/train_n\n\ndef adjust_tau(epoch, dynamictau):\n tau = args.tau\n if dynamictau:\n if epoch <= 50:\n tau = 0\n elif epoch <= 90:\n tau = 1\n else:\n tau = 2\n return tau\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"decrease the learning rate\"\"\"\n lr = args.lr\n if epoch >= 25:\n lr = args.lr * 0.1\n if epoch >= 40:\n lr = args.lr * 0.01\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\nout_dir = args.out_dir\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\nlogfile = os.path.join(args.out_dir, args.fname+'.log')\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(\n filename=logfile,\n format='[%(asctime)s] - %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S',\n level=logging.INFO)\nlogger.info(args)\ntrain_loader, test_loader = get_loaders(args.data_dir, args.batch_size)\nif args.model == 'pre':\n model = PreActResNet18().cuda()\nelif args.model == 'wide':\n model = WideResNet(34, 10, widen_factor=args.wide_factor, dropRate=0.0)\nmodel = torch.nn.DataParallel(model).cuda()\nmodel.train()\noptimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n\ntrain_time = 0\nhighest_acc = 0\nhighest_idx = 0\nlogger.info('Epoch \\t Seconds \\t LR \\t \\t Train Loss \\t #BP \\t Val Acc \\t PGD Acc')\nfor epoch in range(args.epochs):\n cur_lr = adjust_learning_rate(optimizer, epoch + 1)\n epoch_time, train_loss, nb_bp = train(model, train_loader, optimizer, adjust_tau(epoch + 1, args.dynamictau))\n train_time += epoch_time\n\n # Evaluation\n if args.model == 'pre':\n model_test = PreActResNet18().cuda()\n elif args.model == 'wide':\n model_test = WideResNet(34, 10, widen_factor=args.wide_factor, dropRate=0.0)\n model_test = torch.nn.DataParallel(model_test).cuda()\n model_test.load_state_dict(model.state_dict())\n model_test.float()\n model_test.eval()\n\n val_adv_loss, val_adv_acc = evaluate_pgd(test_loader, model_test, mu, std, 10, 1, val=20, use_CWloss=True)\n val_loss, val_acc = evaluate_standard(test_loader, model_test, mu, std, val=20)\n logger.info('%d \\t %.1f \\t \\t %.4f \\t %.4f \\t %d \\t %.4f \\t %.4f',\n epoch, epoch_time, cur_lr, train_loss,nb_bp+1, val_acc, val_adv_acc)\n\n if val_adv_acc > highest_acc and args.save_model:\n highest_acc = val_adv_acc\n highest_idx = epoch\n torch.save(model.state_dict(), os.path.join(args.out_dir, f'model_{args.model}.pth'))\nlogger.info('Total train time: %.4f minutes', (train_time)/60)\nlogger.info(f'Best checkpoint at {highest_idx}, {highest_acc}')\n\n\n" ]
[ [ "torch.device", "torch.cuda.manual_seed_all", "torch.nn.DataParallel", "numpy.random.seed", "torch.argsort", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.tensor", "torch.nn.KLDivLoss", "torch.nn.functional.softmax", "torch.log", "torch.where" ] ]
hydoai/velovision
[ "28a0428608bd67a3f14a600eb742642cdc079167" ]
[ "computer_vision/subvision/iou_tracker/iou_tracker.py" ]
[ "import numpy as np\nfrom lapsolver import solve_dense\nfrom time import time\n\ndef iou(bbox1, bbox2):\n \"\"\"\n Calculates the intersection-over-union of two bounding boxes.\n\n Args:\n bbox1 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.\n bbox2 (numpy.array, list of floats): bounding box in format x1,y1,x2,y2.\n\n Returns:\n int: intersection-over-onion of bbox1, bbox2\n \"\"\"\n\n bbox1 = [float(x) for x in bbox1]\n bbox2 = [float(x) for x in bbox2]\n\n (x0_1, y0_1, x1_1, y1_1) = bbox1\n (x0_2, y0_2, x1_2, y1_2) = bbox2\n\n # get the overlap rectangle\n overlap_x0 = max(x0_1, x0_2)\n overlap_y0 = max(y0_1, y0_2)\n overlap_x1 = min(x1_1, x1_2)\n overlap_y1 = min(y1_1, y1_2)\n\n # check if there is an overlap\n if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:\n return 0\n\n # if yes, calculate the ratio of the overlap to each ROI size and the unified size\n size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)\n size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)\n size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)\n size_union = size_1 + size_2 - size_intersection\n\n return size_intersection / size_union\n\ndef associate(tracks, detections, sigma_iou):\n \"\"\" perform association between tracks and detections in a frame.\n Args:\n tracks (list): input tracks\n detections (list): input detections\n sigma_iou (float): minimum intersection-over-union of a valid association\n\n Returns:\n (tuple): tuple containing:\n\n track_ids (numpy.array): 1D array with indexes of the tracks\n det_ids (numpy.array): 1D array of the associated indexes of the detections\n \"\"\"\n costs = np.empty(shape=(len(tracks), len(detections)), dtype=np.float32)\n for row, track in enumerate(tracks):\n for col, detection in enumerate(detections):\n costs[row, col] = 1 - iou(track['bboxes'], detection['bbox'])\n\n np.nan_to_num(costs)\n costs[costs > 1 - sigma_iou] = np.nan\n track_ids, det_ids = solve_dense(costs)\n return track_ids, det_ids\n\nclass IOUTracker:\n def __init__(self, sigma_l=0.5, sigma_h=1.0, sigma_iou=0.3, t_min=7, ttl=1):\n \"\"\"\n Args:\n sigma_l (float) : low detection threshold\n sigma_h (float) : high detection threshold\n sigma_iou (float) : IOU threshold\n t_min (float) : minimum track length in frames\n ttl (float) : maximum number of frames to perform visual tracking. This can fill 'gaps' of up to 2*ttl frames (ttl times forward and backward).\n\n Usage:\n\n tracker = IOUTracker()\n\n for frame in video:\n detections = object_detector()\n tracker.update(detections)\n ...(do something with tracked detections)\n \"\"\"\n self.sigma_l = sigma_l\n self.sigma_h = sigma_h\n self.sigma_iou = sigma_iou\n self.t_min = t_min\n self.ttl = ttl\n\n self.tracking_id = 0\n\n self.tracks_active = []\n self.tracks_extendable = []\n self.tracks_finished = []\n self.frame_num = 0\n\n def update(self, detections):\n '''\n Args:\n detections: a list of dictionaries, for example:\n [{'bbox': (520.0, 208.0, 645.0, 266.0),\n 'score': 0.96,\n 'class': 'pedestrian'},\n {'bbox': (783.0, 162.0, 807.0, 209.0),\n 'score': 0.88,\n 'class': 'pedestrian'}]\n '''\n\n self.frame_num += 1\n\n # apply low threshold to detections\n dets = [det for det in detections if det['score'] >= self.sigma_l]\n\n track_ids, det_ids = associate(self.tracks_active, dets, self.sigma_iou)\n\n updated_tracks = []\n\n for track_id, det_id in zip(track_ids, det_ids):\n # This upstream code keeps all the past information about bounding box locations. This is not needed for my purpuosese.\n #self.tracks_active[track_id]['bboxes'].append(dets[det_id]['bbox'])\n #self.tracks_active[track_id]['max_score'] = max(self.tracks_active[track_id]['max_score'], dets[det_id]['score'])\n #self.tracks_active[track_id]['classes'].append(dets[det_id]['class'])\n #self.tracks_active[track_id]['det_counter'] += 1\n self.tracks_active[track_id]['bboxes'] = dets[det_id]['bbox']\n self.tracks_active[track_id]['score'] = dets[det_id]['score']\n self.tracks_active[track_id]['classes'] = dets[det_id]['class']\n self.tracks_active[track_id]['det_counter'] += 1\n\n if self.tracks_active[track_id]['ttl'] != self.ttl:\n # reset visual tracker if active\n self.tracks_active[track_id]['ttl'] = self.ttl\n self.tracks_active[track_id]['visual_tracker'] = None\n\n updated_tracks.append(self.tracks_active[track_id])\n\n tracks_not_updated = [self.tracks_active[idx] for idx in set(range(len(self.tracks_active))).difference(set(track_ids))]\n\n for track in tracks_not_updated:\n if track['ttl'] > 0:\n self.tracks_extendable.append(track)\n\n # update the list of extenable tracks.\n # tracks that are too old are deleted\n # this should not be necessary but may improve the performance for large numbers of tracks\n tracks_extendable_updated = []\n for track in self.tracks_extendable:\n if track['start_frame'] + len(track['bboxes']) + self.ttl - track['ttl'] >= self.frame_num:\n # still hope for revival?\n tracks_extendable_updated.append(track)\n elif track['score'] >= self.sigma_h and track['det_counter'] >= self.t_min:\n # too old!\n del track\n #self.tracks_finished.append(track)\n\n self.tracks_extendable = tracks_extendable_updated\n\n new_dets = [dets[idx] for idx in set(range(len(dets))).difference(set(det_ids))]\n dets_for_new = new_dets\n\n # create new tracks\n new_tracks = []\n for det in new_dets:\n self.tracking_id += 1\n new_tracks.append({\n 'bboxes' : det['bbox'],\n 'score' : det['score'],\n 'start_frame' : self.frame_num,\n 'ttl' : self.ttl,\n 'classes' : det['class'],\n 'det_counter' : 1,\n 'visual_tracker' : None,\n 'tracking_id':self.tracking_id,\n })\n\n\n\n self.tracks_active = []\n for track in updated_tracks + new_tracks:\n if track['ttl'] == 0:\n self.tracks_extendable.append(track)\n else:\n self.tracks_active.append(track)\n\n return self.tracks_active\n\n\nif __name__ == '__main__':\n example_data = [{'bbox': (520.0, 208.0, 645.0, 266.0),'score': 0.96,'class': 'pedestrian'},\n {'bbox': (783.0, 162.0, 807.0, 209.0),'score': 0.88,'class': 'pedestrian'}]\n example_data2 = [{'bbox': (550.0, 228.0, 685.0, 286.0),'score': 0.94,'class': 'pedestrian'},\n {'bbox': (788.0, 169.0, 804.0, 208.0),'score': 0.80,'class': 'pedestrian'}]\n viou_tracker = IOUTracker()\n\n output1 = viou_tracker.update(example_data)\n output2 = viou_tracker.update(example_data2)\n\n print(output1)\n print(output2)\n\n\n\n\n" ]
[ [ "numpy.nan_to_num" ] ]
gsaha36/B-CellClassification
[ "05a4a22ee432e51e7da5a849ff0b9584ad3228fa" ]
[ "src/SDLayer.py" ]
[ "import torch.nn as nn\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\n\nclass SDLayer(nn.Module):\n def __init__(self, W):\n super(SDLayer, self).__init__()\n self.W = nn.Parameter(W)\n self.activation = nn.Tanh()\n\n def forward(self, x):\n PHI = self.W\n PHI = PHI.view(3, 3)\n PHI_INV = PHI.inverse()\n #PHI_INV = PHI_INV.view(3,3,1,1)\n\n mask = torch.tensor((1.0 - (x > 0.)) * 255.0).float()\n x = x + mask # this image contains 255 wherever it had 0 initially\n\n I_OD = - torch.log(x / 255.0)\n #print(I_OD.shape)\n #print(PHI_INV.shape)\n #A = torch.mm(I_OD, PHI_INV)\n A = self.activation(I_OD)\n return A\n" ]
[ [ "torch.nn.Tanh", "torch.log", "torch.tensor", "torch.nn.Parameter" ] ]
gautam1858/autograd
[ "8d7acaf79e33139b4ebfedf7da0602a965b47c63" ]
[ "examples/sinusoid.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom builtins import range, map\nimport autograd.numpy as np\nimport matplotlib.pyplot as plt\nfrom autograd import grad\n\ndef fun(x):\n return np.sin(x)\n\nd_fun = grad(fun) # First derivative\ndd_fun = grad(d_fun) # Second derivative\n\nx = np.linspace(-10, 10, 100)\nplt.plot(x, list(map(fun, x)), x, list(map(d_fun, x)), x, list(map(dd_fun, x)))\n\nplt.xlim([-10, 10])\nplt.ylim([-1.2, 1.2])\nplt.axis('off')\nplt.savefig(\"sinusoid.png\")\nplt.clf()\n\n# Taylor approximation to sin function\ndef fun(x):\n currterm = x\n ans = currterm\n for i in range(1000):\n print(i, end=' ')\n currterm = - currterm * x ** 2 / ((2 * i + 3) * (2 * i + 2))\n ans = ans + currterm\n if np.abs(currterm) < 0.2: break # (Very generous tolerance!)\n\n return ans\n\nd_fun = grad(fun)\ndd_fun = grad(d_fun)\n\nx = np.linspace(-10, 10, 100)\nplt.plot(x, list(map(fun, x)), x, list(map(d_fun, x)), x, list(map(dd_fun, x)))\n\nplt.xlim([-10, 10])\nplt.ylim([-1.2, 1.2])\nplt.axis('off')\nplt.savefig(\"sinusoid_taylor.png\")\nplt.clf()\n" ]
[ [ "matplotlib.pyplot.xlim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylim", "matplotlib.pyplot.clf", "matplotlib.pyplot.axis" ] ]
Jaekyumkim/spml_retinanet_fixedsize
[ "1668229dba5c3a713ebaf8ab781f82d913196ad5" ]
[ "datasets.py" ]
[ "\nimport os\nimport pdb\nimport random\n\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\nfrom encoder import DataEncoder\n\n#coco_label = ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'sofa', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tvmonitor', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\n#\nclass LoadDataset(Dataset):\n def __init__(self, root, scale=None, shuffle=True, transform=None, train=False, \\\n batch_size=16, num_workers=2):\n with open(root, 'r') as file:\n self.lines = file.readlines()\n\n if shuffle:\n random.shuffle(self.lines)\n\n self.nSamples = len(self.lines)\n self.transform = transform\n self.train = train\n self.scale = scale\n self.batch_size = batch_size\n\n self.encoder = DataEncoder()\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n imgpath = self.lines[index].rstrip()\n\n img, label = load_data_detection(imgpath, self.scale, self.train)\n label = torch.from_numpy(label)\n\n if self.transform is not None:\n img = self.transform(img)\n boxes = label[:,1:] # split the bbx label and cls label\n labels = label[:,0]\n return (img, boxes, labels)\n\n def collate_fn(self, batch):\n imgs = [x[0] for x in batch]\n boxes = [x[1] for x in batch]\n labels = [x[2] for x in batch]\n \n inputs = []\n ori_img_shape = []\n for i in range(len(imgs)):\n w = imgs[i].shape[1]\n h = imgs[i].shape[2]\n input = torch.zeros(1,3,h,w)\n inputs.append(input)\n ori_img_shape.append(imgs[i].shape)\n torch.stack(inputs)\n\n w = self.scale\n h = self.scale\n num_img = len(imgs)\n inputs = torch.zeros(num_img,3,h,w)\n loc_targets = []\n cls_targets = []\n for i in range(num_img):\n inputs[i] = imgs[i]\n loc_target, cls_target = self.encoder.encode(boxes[i],labels[i],input_size=(w,h))\n loc_targets.append(loc_target)\n cls_targets.append(cls_target)\n \n return inputs, torch.stack(loc_targets), torch.stack(cls_targets), ori_img_shape\n\n\ndef load_data_detection(imgpath, scale, train):\n labelpath = imgpath.replace('images','labels').replace('JPEGImages','labels').replace('.jpg','.txt').replace('.png','.txt')\n img = Image.open(imgpath).convert('RGB')\n resized_img,flip = data_augmentation(img, scale, train) # augment the img \n label = load_label(labelpath, flip, img, resized_img) # load the label\n return resized_img, label\n\ndef data_augmentation(img, scale, train):\n flip = random.randint(1,10000)%2 # apply the flip\n img = img.resize((scale,scale))\n if train:\n if flip:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n return img, flip\n\ndef load_label(labelpath, flip, img, resized_img):\n bbx = np.loadtxt(labelpath) # load the label \n if len(bbx.shape) == 1:\n bbx = np.reshape(bbx,[1,5]) # if the label is only one, we have to resize the shape of the bbx\n x1 = (bbx[:,1] - bbx[:,3]/2)*img.width # calculate the original label x1_min\n x2 = (bbx[:,1] + bbx[:,3]/2)*img.width # calculate the original label x2_max\n y1 = (bbx[:,2] - bbx[:,4]/2)*img.height # calculate the original label y1_min\n y2 = (bbx[:,2] + bbx[:,4]/2)*img.height # calculate the original label y2_max\n r_x1 = x1 * resized_img.width / img.width # calculate the resized label x1_min\n r_x2 = x2 * resized_img.width / img.width # calculate the resized label x2_max\n r_y1 = y1 * resized_img.height / img.height # calculate the resized label y1_min\n r_y2 = y2 * resized_img.height / img.height # calculate the resized label y2_max\n bbx[:,1] = ((r_x1 + r_x2)/2) # center_x\n bbx[:,2] = ((r_y1 + r_y2)/2) # center_y\n bbx[:,3] = ((r_x2 - r_x1)) # width\n bbx[:,4] = ((r_y2 - r_y1)) # height\n if flip:\n bbx[:,1] = resized_img.width - bbx[:,1]\n return bbx \n\n\ndef debug_img(img, labels):\n draw = ImageDraw.Draw(img)\n COLOR = (255, 0, 0)\n for label in labels:\n xyxy = [label[1]-label[3]/2, label[2]-label[4]/2,label[1]+label[3]/2, label[2]+label[4]/2]\n draw.rectangle(xyxy, outline=COLOR)\n draw.rectangle([xyxy[0], xyxy[1], xyxy[0]+len(coco_label[int(label[0])])*7, \\\n xyxy[1]+15], fill=COLOR)\n draw.text([xyxy[0]+2, xyxy[1]+2], coco_label[int(label[0])])\n img.save('bbox_test.png')\n \n\ndef test():\n import torchvision\n\n transform = transforms.Compose([transforms.ToTensor(), \\\n transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))])\n trainlist = '/media/NAS/dataset/PASCALVOC/train.txt'\n dataset = VOCDataset(trainlist, shape=(600,600), shuffle=True, transform=transform, \\\n train=True,batch_size=16,num_workers=0)\n trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, \\\n num_workers=0, collate_fn=dataset.collate_fn)\n\n for images, loc_targets, cls_targets in trainloader:\n print(images.size())\n print(loc_targets.size())\n print(cls_targets.size())\n grid = torchvision.utils.make_grid(images, 1)\n torchvision.utils.save_image(grid, 'a.jpg')\n\n#test()\n\n" ]
[ [ "torch.zeros", "torch.stack", "numpy.reshape", "torch.from_numpy", "numpy.loadtxt", "torch.utils.data.DataLoader" ] ]
MFSJMenger/pysurf
[ "99c6a94d4cb5046f16a0961b907061d989ffb6dc" ]
[ "pysurf/sampling/nm_sampler.py" ]
[ "import numpy as np\n\nfrom colt import Colt\nfrom ..spp import ModelFactory\nfrom ..molden import MoldenParser\nfrom ..constants import U_TO_AMU, CM_TO_HARTREE\n\nfrom .base_sampler import CrdSamplerBase\nfrom .normalmodes import NormalModes as nm\nfrom ..system import Molecule\nfrom ..system.atominfo import ATOMNAME_TO_ID, MASSES\nfrom .normalmodes import Mode\nfrom .base_sampler import CrdCondition\nfrom .n_grid_iter import NGridIterator\n\nclass Moldenfile(Colt):\n _questions = \"\"\"\n moldenfile = :: existing_file\n \"\"\"\n\nclass NMSampler(CrdSamplerBase):\n _questions = \"\"\"\n # Where do the coorindates come from? If it is a moldenfile, modes are converted into dimensionless \n # normal modes. If they come from a model, they are used as they are.\n from = molden :: str :: [molden, model]\n\n stepsize = 0.3 :: float\n\n include_combinations = False :: bool\n\n # Decide whether sampling should be done along all normal modes\n select_nmodes = all :: str :: [all, select]\n\n \"\"\"\n\n _from = {'molden': Moldenfile,\n 'model': ModelFactory,\n }\n\n _select_nmodes = {\n 'all': \"\",\n 'select': \"mode_list = :: ilist\"\n }\n\n _step = 0\n _mode = 0 \n _sign = 1\n\n @classmethod\n def _extend_questions(cls, questions):\n questions.generate_cases(\"from\", {name: method.questions\n for name, method in cls._from.items()})\n questions.generate_cases(\"select_nmodes\", {name: value\n for name, value in cls._select_nmodes.items()})\n\n\n def __init__(self, config, system, modes, start=0):\n self.system = system\n self.modes = modes\n self.config = config\n if config['select_nmodes'] == 'all':\n self.sel_modes = modes\n else:\n self.sel_modes = []\n for idx in config['select_nmodes']['mode_list']:\n self.sel_modes += [modes[idx]]\n self.nmodes_sel = len(self.sel_modes)\n self.config = config\n self.stepsize = config['stepsize']\n if config['from'].value == 'model':\n self.model = True\n else:\n self.model = False\n \n self._check_modes()\n if config['include_combinations']:\n self.myiter = NGridIterator(len(self.sel_modes))\n\n if start != 0:\n for i in range(start):\n self.get_condition()\n \n\n def get_init(self):\n \"\"\"Return all infos needed for the initial condition parser\"\"\"\n return {'system': self.system,\n 'modes': self.modes}\n\n def get_condition(self):\n if self.config['include_combinations']:\n return self.get_condition_combined()\n else:\n return self.get_condition_pure()\n\n\n def get_condition_combined(self):\n vec = next(self.myiter)\n crd = np.copy(self.system.crd)\n\n for fac, mode in zip(vec, self.sel_modes):\n crd += np.array(fac*self.stepsize) * np.array(mode.displacements)\n print(crd)\n return CrdCondition(crd)\n \n\n def get_condition_pure(self):\n \"\"\"Return a single created initial condition\"\"\"\n crd = np.copy(self.system.crd)\n # for reference point:\n if self._step == 0:\n self._step += 1\n return CrdCondition(crd)\n \n crd += self._sign * self._step * self.stepsize * np.array(self.sel_modes[self._mode].displacements)\n\n # to sample in both directions\n if self._sign == 1:\n self._sign = -1\n return CrdCondition(crd)\n\n if self._mode == self.nmodes_sel - 1:\n self._sign = 1\n self._mode = 0\n self._step += 1\n else:\n self._sign = 1\n self._mode += 1\n cond = CrdCondition(crd)\n return cond\n\n @classmethod\n def from_config(cls, config, start=0):\n \"\"\" \"\"\"\n if config['from'] == 'molden':\n return cls.from_molden(config, start)\n elif config['from'].value == 'model':\n return cls.from_model(config, start)\n\n @classmethod\n def from_molden(cls, config, start=0):\n filename = config['from']['moldenfile']\n molden = MoldenParser(filename, ['Info', 'Freqs', 'FrCoords', 'FrNormCoords'])\n # get molecule info\n atoms = [atom for atom, _, _, _ in molden['FrCoords']]\n atomids = np.array([ATOMNAME_TO_ID[atom] for atom in atoms])\n crd = np.array([[x, y, z] for _, x, y, z in molden['FrCoords']])\n masses = np.array([MASSES[idx]*U_TO_AMU for idx in atomids])\n # create molecule\n molecule = Molecule(atomids, crd, masses)\n #\n print('nmsampler, molecule', molecule)\n modes = [Mode(freq * CM_TO_HARTREE, np.array(molden['FrNormCoords'][imode]))\n for imode, freq in enumerate(molden['Freqs'])]\n #\n modes = nm.create_mass_weighted_normal_modes(modes, molecule)\n #\n return cls(config, molecule, modes, start=start)\n\n @classmethod\n def from_model(cls, config, start=0):\n model = ModelFactory.plugin_from_config(config['from']['model'])\n return cls(config, system=model, modes=model.modes, start=start)\n\n def _check_modes(self):\n img = [mode.freq for mode in self.modes if mode.freq < 0.0]\n nimg_freq = len(img)\n if nimg_freq == 0:\n return\n\n def to_strg(number):\n return \"%12.8f\" % number\n\n print(f\"Found {nimg_freq} imaginary frequencies:\")\n print(\"[\" + \", \".join(map(to_strg, img)) + \"]\")\n\n\n \n\n" ]
[ [ "numpy.array", "numpy.copy" ] ]
TTKunt/keras-attention-mechanism
[ "0309dbf79da32c0d8d90925660fc4cc7fe53dc8a" ]
[ "examples/imdb.py" ]
[ "import numpy\nimport numpy as np\nfrom tensorflow.keras import Input\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.callbacks import Callback\nfrom tensorflow.keras.datasets import imdb\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import Embedding\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.preprocessing import sequence\n\nfrom attention import attention_3d_block\n\n\ndef train_and_evaluate_model_on_imdb(add_attention=True):\n numpy.random.seed(7)\n # load the dataset but only keep the top n words, zero the rest\n top_words = 5000\n (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)\n # truncate and pad input sequences\n max_review_length = 500\n X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)\n X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)\n # create the model\n embedding_vector_length = 32\n i = Input(shape=(max_review_length,))\n x = Embedding(top_words, embedding_vector_length, input_length=max_review_length)(i)\n x = Dropout(0.5)(x)\n if add_attention:\n x = LSTM(100, return_sequences=True)(x)\n x = attention_3d_block(x)\n else:\n x = LSTM(100, return_sequences=False)(x)\n x = Dense(350, activation='relu')(x) # same number of parameters so fair comparison.\n x = Dropout(0.5)(x)\n x = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=[i], outputs=[x])\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n print(model.summary())\n\n class RecordBestTestAccuracy(Callback):\n\n def __init__(self):\n super().__init__()\n self.val_accuracies = []\n self.val_losses = []\n\n def on_epoch_end(self, epoch, logs=None):\n self.val_accuracies.append(logs['val_accuracy'])\n self.val_losses.append(logs['val_loss'])\n\n rbta = RecordBestTestAccuracy()\n model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64, callbacks=[rbta])\n\n print(f\"Max Test Accuracy: {100 * np.max(rbta.val_accuracies):.2f} %\")\n print(f\"Mean Test Accuracy: {100 * np.mean(rbta.val_accuracies):.2f} %\")\n\n\ndef main():\n # 10 epochs.\n # Max Test Accuracy: 88.02 %\n # Mean Test Accuracy: 87.26 %\n train_and_evaluate_model_on_imdb(add_attention=False)\n # 10 epochs.\n # Max Test Accuracy: 88.74 %\n # Mean Test Accuracy: 88.00 %\n train_and_evaluate_model_on_imdb(add_attention=True)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.keras.preprocessing.sequence.pad_sequences", "tensorflow.keras.datasets.imdb.load_data", "numpy.max", "numpy.random.seed", "numpy.mean", "tensorflow.keras.layers.Embedding", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.keras.layers.LSTM", "tensorflow.keras.Input" ] ]
sheha919/new_directory
[ "b2564ceee1fefa948efa63ef5620b0fc537b2bc9" ]
[ "first_module_name/molecule.py" ]
[ "\"\"\"\r\nmolecule.py\r\nA python package for the MolSSI Software Summer School.\r\nContains a molecule class\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\nfrom .measure import calculate_angle, calculate_distance\r\n\r\n\r\nclass Molecule:\r\n def __init__(self, name, symbols, coordinates):\r\n if isinstance(name, str):\r\n self.name = name\r\n else:\r\n raise TypeError(\"Name is not a string.\")\r\n\r\n self.symbols = symbols\r\n self._coordinates = coordinates\r\n self.bonds = self.build_bond_list()\r\n\r\n @property\r\n def num_atoms(self):\r\n return len(self.coordinates)\r\n\r\n @property\r\n def coordinates(self):\r\n return self._coordinates\r\n\r\n @coordinates.setter\r\n def coordinates(self, new_coordinates):\r\n self._coordinates = new_coordinates\r\n self.bonds = self.build_bond_list()\r\n\r\n def build_bond_list(self, max_bond=2.93, min_bond=0):\r\n \"\"\"\r\n Build a list of bonds based on a distance criteria.\r\n Atoms within a specified distance of one another will be considered bonded.\r\n Parameters\r\n ----------\r\n max_bond : float, optional\r\n min_bond : float, optional\r\n Returns\r\n -------\r\n bond_list : list\r\n List of bonded atoms. Returned as list of tuples where the values are the atom indices.\r\n \"\"\"\r\n\r\n bonds = {}\r\n\r\n for atom1 in range(self.num_atoms):\r\n for atom2 in range(atom1, self.num_atoms):\r\n distance = calculate_distance(self.coordinates[atom1], self.coordinates[atom2])\r\n\r\n if distance > min_bond and distance < max_bond:\r\n bonds[(atom1, atom2)] = distance\r\n\r\n return bonds\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Do something if this file is invoked on its own\r\n random_coordinates = np.random.random([3, 3])\r\n name = \"my molecule\"\r\n\r\n symbols = [\"H\", \"O\", \"H\"]\r\n my_molecule = Molecule(name, symbols, random_coordinates)\r\n\r\n print(F'There are {len(my_molecule.bonds)} bonds')\r\n print(F'The coordinates are {my_molecule.coordinates}')\r\n\r\n random_coordinates[0] += 100\r\n\r\n my_molecule.coordinates = random_coordinates\r\n\r\n print(F'\\n\\nThere are {len(my_molecule.bonds)} bonds')\r\n print(F'The coordinates are {my_molecule.coordinates}')\r\n" ]
[ [ "numpy.random.random" ] ]
guoyi118/KnowledgeEditor
[ "167d75149d77cd5371c9f625a371e0443098ae99" ]
[ "src/models/bart_seq2seq_kilt.py" ]
[ "from argparse import ArgumentParser\n\nimport pytorch_lightning as pl\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch.utils.data import DataLoader\nfrom transformers import (\n BartForConditionalGeneration,\n BartTokenizer,\n get_linear_schedule_with_warmup,\n)\nimport jsonlines\nfrom src.data.seq2seq_kilt import Seq2SeqKILT\nfrom src.utils import label_smoothed_nll_loss, batch_it\n\n\nclass BartSeq2Seq(LightningModule):\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument(\n \"--train_data_path\",\n type=str,\n default=\"/root/sparqling-queries/data/break/logical-forms-fixed/train_data_df.jsonl\",\n )\n parser.add_argument(\n \"--dev_data_path\",\n type=str,\n default=\"/root/sparqling-queries/data/break/logical-forms-fixed/dev_data_df.jsonl\",\n )\n parser.add_argument(\"--batch_size\", type=int, default=48)\n parser.add_argument(\"--lr\", type=float, default=3e-5)\n parser.add_argument(\"--max_length\", type=int, default=32)\n parser.add_argument(\"--weight_decay\", type=int, default=0.01)\n parser.add_argument(\"--total_num_updates\", type=int, default=50000)\n parser.add_argument(\"--warmup_updates\", type=int, default=500)\n parser.add_argument(\"--num_workers\", type=int, default=0)\n\n parser.add_argument(\"--model_name\", type=str, default=\"facebook/bart-base\")\n parser.add_argument(\"--eps\", type=float, default=0.1)\n return parser\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.save_hyperparameters()\n self.tokenizer = BartTokenizer.from_pretrained(self.hparams.model_name)\n self.model = BartForConditionalGeneration.from_pretrained(\n self.hparams.model_name\n )\n\n self.train_acc = pl.metrics.Accuracy()\n self.valid_acc = pl.metrics.Accuracy()\n\n def train_dataloader(self, shuffle=True):\n if not hasattr(self, \"train_dataset\"):\n self.train_dataset = Seq2SeqKILT(\n tokenizer=self.tokenizer,\n data_path=self.hparams.train_data_path,\n max_length=self.hparams.max_length,\n templates=True,\n )\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n collate_fn=self.train_dataset.collate_fn,\n num_workers=self.hparams.num_workers,\n shuffle=shuffle,\n )\n\n def val_dataloader(self):\n if not hasattr(self, \"val_dataset\"):\n self.val_dataset = Seq2SeqKILT(\n tokenizer=self.tokenizer,\n data_path=self.hparams.dev_data_path,\n max_length=self.hparams.max_length,\n validation=True,\n templates=True,\n )\n return DataLoader(\n self.val_dataset,\n batch_size=4,\n collate_fn=self.val_dataset.collate_fn,\n num_workers=self.hparams.num_workers,\n )\n\n def forward(self, batch):\n return self.model(\n input_ids=batch[\"src_input_ids\"],\n attention_mask=batch[\"src_attention_mask\"],\n decoder_input_ids=batch[\"trg_input_ids\"][:, :-1],\n decoder_attention_mask=batch[\"trg_attention_mask\"][:, :-1],\n use_cache=False,\n ).logits\n\n def training_step(self, batch, batch_idx=None):\n logits = self.forward(batch)\n\n loss, nll_loss = label_smoothed_nll_loss(\n logits.log_softmax(-1),\n batch[\"trg_input_ids\"][:, 1:],\n epsilon=self.hparams.eps,\n ignore_index=self.tokenizer.pad_token_id,\n )\n\n ntokens = batch[\"trg_attention_mask\"][:, 1:].sum()\n loss, nll_loss = loss / ntokens, nll_loss / ntokens\n\n self.log(\"nll_loss\", nll_loss, on_step=True, on_epoch=False, prog_bar=True)\n\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx=None):\n gold = [b[\"trg\"] for b in batch[\"raw\"]]\n guess = self.tokenizer.batch_decode(\n self.model.generate(\n input_ids=batch[\"src_input_ids\"],\n attention_mask=batch[\"src_attention_mask\"],\n min_length=0,\n num_beams=5,\n num_return_sequences=1,\n ),\n skip_special_tokens=True,\n )\n\n acc = torch.tensor(\n [\n a.lower().strip() in [c.lower().strip() for c in b]\n for a, b in zip(guess, gold)\n ]\n ).long()\n self.valid_acc(acc, torch.ones_like(acc))\n self.log(\n \"valid_acc\", self.valid_acc, on_step=False, on_epoch=True, prog_bar=True\n )\n\n return {'batch': batch[\"raw\"], \"guess\": guess}\n\n\n def validation_epoch_end(self, validation_step_outputs):\n with jsonlines.open('/root/KnowledgeEditor/result.jsonl', \"a\") as f:\n f.write_all([validation_step_outputs])\n\n\n def sample(self, sentences, num_return_sequences=1):\n self.eval()\n with torch.no_grad():\n return list(\n batch_it(\n self.tokenizer.batch_decode(\n self.model.generate(\n **{\n k: v.to(self.device)\n for k, v in self.tokenizer(\n sentences,\n return_tensors=\"pt\",\n padding=True,\n max_length=self.hparams.max_length,\n truncation=True,\n ).items()\n },\n min_length=0,\n num_beams=5,\n num_return_sequences=num_return_sequences,\n ),\n skip_special_tokens=True,\n ),\n num_return_sequences,\n )\n )\n\n def configure_optimizers(self):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in self.model.named_parameters()\n if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in self.model.named_parameters()\n if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer = torch.optim.AdamW(\n optimizer_grouped_parameters,\n lr=self.hparams.lr,\n weight_decay=self.hparams.weight_decay,\n )\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.hparams.warmup_updates,\n num_training_steps=self.hparams.total_num_updates,\n )\n\n return [optimizer], [\n {\"scheduler\": scheduler, \"interval\": \"step\", \"frequency\": 1}\n ]\n" ]
[ [ "torch.no_grad", "torch.optim.AdamW", "torch.utils.data.DataLoader", "torch.ones_like" ] ]
minhtannguyen/RAdam
[ "44f403288df375bae0785cc82dd8c888eaaaa441" ]
[ "cifar_imagenet/models_backup/imagenet_v3_1/horesnet.py" ]
[ "from __future__ import division\n\"\"\" \nCreates a ResNeXt Model as defined in:\nXie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016). \nAggregated residual transformations for deep neural networks. \narXiv preprint arXiv:1611.05431.\nimport from https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua\n\"\"\"\nimport math\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport torch\n\n__all__ = ['HOResNet', 'horesnet18', 'horesnet34', 'horesnet50', 'horesnet101',\n 'horesnet152', 'horesnext50_32x4d', 'horesnext101_32x8d',\n 'howide_resnet50_2', 'howide_resnet101_2']\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None, eta=1.0):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n self.eta = eta\n self.scale_factor = (self.eta - 1)/(self.eta + 2)\n\n def forward(self, invec):\n x, y = invec[0], invec[1]\n \n identityx = x\n identityy = y\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identityx = self.downsample(x)\n identityy = self.downsample(y)\n\n outy = out + identityx\n outx = (1 + self.scale_factor) * outy - self.scale_factor * identityy \n outx = self.relu(outx)\n\n return [outx, outy]\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None, eta=1.0):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.eta = eta\n self.scale_factor = (self.eta - 1)/(self.eta + 2)\n\n def forward(self, invec):\n x, y = invec[0], invec[1]\n \n identityx = x\n identityy = y\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identityx = self.downsample(x)\n identityy = self.downsample(y)\n \n outy = out + identityx\n outx = (1 + self.scale_factor) * outy - self.scale_factor * identityy \n outx = self.relu(outx)\n\n return [outx, outy]\n\n\nclass HOResNet(nn.Module):\n\n def __init__(self, block, layers, eta=1.0, num_classes=1000, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n super(HOResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n self.eta = eta\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], eta=self.eta)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0], eta=self.eta)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1], eta=self.eta)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2], eta=self.eta)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False, eta=1.0):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer, eta=eta))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer, eta=eta))\n\n return nn.Sequential(*layers)\n\n def _forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n \n out = [x, x]\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n\n x = self.avgpool(out[1])\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n # Allow for accessing forward method in a inherited class\n forward = _forward\n\ndef _horesnet(arch, block, layers, pretrained, progress, **kwargs):\n model = HOResNet(block, layers, **kwargs)\n\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef horesnet18(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _horesnet('horesnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n **kwargs)\n\n\ndef horesnet34(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _horesnet('horesnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef horesnet50(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _horesnet('horesnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\ndef horesnet101(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _horesnet('horesnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,\n **kwargs)\n\n\ndef horesnet152(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _horesnet('horesnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,\n **kwargs)\n\n\ndef horesnext50_32x4d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _horesnet('horesnext50_32x4d', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\ndef horesnext101_32x8d(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _horesnet('horesnext101_32x8d', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n\n\ndef howide_resnet50_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _horesnet('howide_resnet50_2', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\ndef howide_resnet101_2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _horesnet('howide_resnet101_2', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)" ]
[ [ "torch.nn.Linear", "torch.flatten", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.init.kaiming_normal_", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d" ] ]
dywsjtu/model_search
[ "116c4f9016d8b89cf06d057dda020dae3371f211" ]
[ "model_search/data/csv_data.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Simple csv reader for small classification problems.\"\"\"\n\nfrom model_search.data import data\nimport tensorflow.compat.v2 as tf\n\n\nclass Provider(data.Provider):\n \"\"\"A csv data provider.\"\"\"\n\n def __init__(self,\n label_index,\n logits_dimension,\n record_defaults,\n filename,\n field_delim=\",\"):\n self._filename = filename\n self._logits_dimension = logits_dimension\n self._record_defaults = record_defaults\n self._field_delim = field_delim\n self._label_index = label_index\n # Indices of the features\n self._features = [str(i) for i in range(len(record_defaults))]\n\n def get_input_fn(self, hparams, mode, batch_size):\n \"\"\"See `data.Provider` get_input_fn.\"\"\"\n del hparams\n\n def input_fn(params=None):\n \"\"\"Provides batches of data.\"\"\"\n del params\n\n features_dataset = tf.data.experimental.CsvDataset(\n self._filename,\n record_defaults=self._record_defaults,\n header=True,\n field_delim=self._field_delim,\n use_quote_delim=True)\n\n def _map_fn(*args):\n features = {str(i): tensor for i, tensor in enumerate(args)}\n label = features.pop(str(self._label_index))\n return features, label\n\n features_dataset = features_dataset.map(_map_fn).prefetch(\n tf.data.experimental.AUTOTUNE).batch(batch_size)\n if mode == tf.estimator.ModeKeys.TRAIN:\n features_dataset = features_dataset.repeat().shuffle(100 * batch_size)\n\n return features_dataset\n\n return input_fn\n\n def get_serving_input_fn(self, hparams):\n \"\"\"Returns an `input_fn` for serving in an exported SavedModel.\n\n Args:\n hparams: tf.HParams object.\n\n Returns:\n Returns an `input_fn` that takes no arguments and returns a\n `ServingInputReceiver`.\n \"\"\"\n features_ind = [\n idx for idx in self._features if idx != str(self._label_index)\n ]\n tf.compat.v1.disable_eager_execution()\n features = {\n idx: tf.compat.v1.placeholder(tf.float32, [None], idx)\n for idx in features_ind\n }\n return tf.estimator.export.build_raw_serving_input_receiver_fn(\n features=features)\n\n def number_of_classes(self):\n return self._logits_dimension\n\n def get_feature_columns(self):\n \"\"\"Returns feature columns.\"\"\"\n features = [f for f in self._features if f != str(self._label_index)]\n feature_columns = [\n tf.feature_column.numeric_column(key=key) for key in features\n ]\n return feature_columns\n" ]
[ [ "tensorflow.compat.v2.compat.v1.disable_eager_execution", "tensorflow.compat.v2.data.experimental.CsvDataset", "tensorflow.compat.v2.compat.v1.placeholder", "tensorflow.compat.v2.feature_column.numeric_column", "tensorflow.compat.v2.estimator.export.build_raw_serving_input_receiver_fn" ] ]
championway/asv_ros
[ "4ded50c48077e1e63586cd32be2354633c163975" ]
[ "catkin_ws/src/localization/src/localization_gps_imu_gps_duplicate.py" ]
[ "#!/usr/bin/env python\n'''\nAuthor: Tony Hsiao \nDate:2019/01/16 \nLast update: 2019/01/16 \nLocailization by gps and imu\n'''\nimport rospy\nfrom sensor_msgs.msg import NavSatFix, Imu\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Pose\nfrom std_msgs.msg import Float64\nfrom message_filters import ApproximateTimeSynchronizer, TimeSynchronizer\nfrom std_srvs.srv import EmptyRequest, EmptyResponse, Empty\nfrom duckiepond.srv import SetValue, SetValueRequest, SetValueResponse\nimport message_filters\n\nfrom geodesy.utm import UTMPoint, fromLatLong\nimport tf\nimport math\nfrom scipy.stats import norm\nimport numpy as np\n\nclass LocailizationGPSImu(object):\n def __init__(self):\n self.node_name = rospy.get_name()\n\n self.pose = Pose()\n self.prior_pose = Pose()\n self.prior_roll = 0\n self.prior_pitch = 0\n self.prior_yaw = 0\n self.start = False \n self.covariance = np.zeros((36,), dtype=float)\n self.odometry = Odometry()\n self.br = tf.TransformBroadcaster()\n \n # param\n self.imu_offset = 0\n self.lat_orig = rospy.get_param('~latitude', 0.0)\n self.long_orig = rospy.get_param('~longitude', 0.0)\n self.utm_orig = fromLatLong(self.lat_orig, self.long_orig)\n\n # Service\n self.srv_imu_offset = rospy.Service('~imu_offset', SetValue, self.cb_srv_imu_offest)\n\n # Publisher\n self.pub_odm = rospy.Publisher(\"~odometry\", Odometry, queue_size=1)\n\n # Subscriber\n sub_imu = rospy.Subscriber(\"~imu/data\", Imu,self.cb_imu,queue_size=1)\n sub_gps = rospy.Subscriber(\"~fix\", NavSatFix,self.cb_gps,queue_size=1)\n \n def cb_gps(self, msg_gps):\n utm_point = fromLatLong(msg_gps.latitude, msg_gps.longitude)\n self.pose.position.x = utm_point.easting - self.utm_orig.easting\n self.pose.position.y = utm_point.northing - self.utm_orig.northing\n self.pose.position.z = 0\n\n\n def cb_imu(self, msg_imu):\n self.pose.orientation = msg_imu.orientation\n self.kalman_filter()\n\n def kalman_filter(self):\n\n q = (self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z, self.pose.orientation.w)\n roll = tf.transformations.euler_from_quaternion(q)[0]\n pitch = tf.transformations.euler_from_quaternion(q)[1]\n yaw = tf.transformations.euler_from_quaternion(q)[2]\n yaw = yaw + np.pi/2.\n\n if self.start == False:\n self.start = True\n self.prior_pose.position.x = norm(loc = self.pose.position.x, scale = 100)\n self.prior_pose.position.y = norm(loc = self.pose.position.y, scale = 100)\n self.prior_pose.position.z = norm(loc = self.pose.position.z, scale = 100)\n self.prior_roll = norm(loc = roll, scale = 10)\n self.prior_pitch = norm(loc = pitch, scale = 10)\n self.prior_yaw = norm(loc = yaw, scale = 10)\n return\n\n covariance = self.covariance\n\n #p rediction step\n kernel = norm(loc = 0, scale = 2)\n kernel_euler = norm(loc = 0, scale = 0.5)\n x = self.pose.position.x\n y = self.pose.position.y\n z = self.pose.position.z\n \n predicted_x = norm(loc = self.prior_pose.position.x.mean()+kernel.mean(), scale = np.sqrt(self.prior_pose.position.x.var()+kernel.var()))\n predicted_y = norm(loc = self.prior_pose.position.y.mean()+kernel.mean(), scale = np.sqrt(self.prior_pose.position.y.var()+kernel.var()))\n predicted_z = norm(loc = self.prior_pose.position.z.mean()+kernel.mean(), scale = np.sqrt(self.prior_pose.position.z.var()+kernel.var()))\n predicted_roll = norm(loc = self.prior_roll.mean()+kernel_euler.mean(), scale = np.sqrt(self.prior_roll.var()+kernel_euler.var()))\n predicted_pitch = norm(loc = self.prior_pitch.mean()+kernel_euler.mean(), scale = np.sqrt(self.prior_pitch.var()+kernel_euler.var()))\n predicted_yaw = norm(loc = self.prior_yaw.mean()+kernel_euler.mean(), scale = np.sqrt(self.prior_yaw.var()+kernel_euler.var()))\n\n # update step\n posterior_x = self.update_con(predicted_x, x, 0.05)\n posterior_y = self.update_con(predicted_y, y, 0.05)\n posterior_z = self.update_con(predicted_z, z, 0.05)\n posterior_roll = self.update_con(predicted_roll, roll, 0.05)\n posterior_pitch = self.update_con(predicted_pitch, pitch, 0.05)\n posterior_yaw = self.update_con(predicted_yaw, yaw, 0.05)\n\n self.prior_roll = posterior_roll\n self.prior_pitch = posterior_pitch\n self.prior_yaw = posterior_yaw\n\n self.prior_pose.position.x = posterior_x\n self.prior_pose.position.y = posterior_y\n self.prior_pose.position.z = posterior_z \n\n self.odometry.pose.pose.position.x = posterior_x.mean()\n self.odometry.pose.pose.position.y = posterior_y.mean()\n self.odometry.pose.pose.position.z = posterior_z.mean()\n kf_euler = posterior_yaw.mean()\n qu = tf.transformations.quaternion_from_euler(0, 0, kf_euler+self.imu_offset)\n pose = Pose()\n pose.orientation.x = qu[0]\n pose.orientation.y = qu[1]\n pose.orientation.z = qu[2]\n pose.orientation.w = qu[3]\n self.odometry.pose.pose.orientation = pose.orientation\n \n # publish\n self.odometry.header.stamp = rospy.Time.now()\n self.odometry.header.frame_id = \"map\"\n self.pub_odm.publish(self.odometry)\n\n # tf transform\n self.br.sendTransform((self.odometry.pose.pose.position.x, \\\n self.odometry.pose.pose.position.y, self.odometry.pose.pose.position.z), \\\n (self.odometry.pose.pose.orientation.x, self.odometry.pose.pose.orientation.y, \\\n self.odometry.pose.pose.orientation.z, self.odometry.pose.pose.orientation.w), \\\n rospy.Time.now(),\"/base_link\",\"/odom\")\n\n q = tf.transformations.quaternion_from_euler(0, 0, 0)\n self.br.sendTransform((self.utm_orig.easting, self.utm_orig .northing, 0), \\\n (q[0], q[1], q[2], q[3]), \\\n rospy.Time.now(),\"/odom\",\"/utm\")\n\n rad_2_deg = 180/math.pi\n print(\"X = \", self.pose.position.x, \", Y = \", self.pose.position.y)\n print(\", RPY = \", posterior_roll.mean()*rad_2_deg, posterior_pitch.mean()*rad_2_deg, posterior_yaw.mean()*rad_2_deg+self.imu_offset*rad_2_deg)\n print(\"========================================================\")\n\n def cb_srv_imu_offest(self, request): \n self.imu_offset = request.data\n print (\"Set imu offset = \" + str(self.imu_offset))\n return SetFloatResponse()\n\n def measurement(self, measurementx, variance):\n likelihood = norm(loc = measurementx, scale = np.sqrt(variance))\n return likelihood\n\n def gaussian_multiply(self, g1, g2):\n g1_mean, g1_var = g1.stats(moments='mv')\n g2_mean, g2_var = g2.stats(moments='mv')\n mean = (g1_var * g2_mean + g2_var * g1_mean) / (g1_var + g2_var)\n variance = (g1_var * g2_var) / (g1_var + g2_var)\n #print mean, variance\n return norm(loc = mean, scale = np.sqrt(variance))\n\n def update_con(self, prior, measurementz, covariance):\n likelihood = self.measurement(measurementz, covariance)\n posterior = self.gaussian_multiply(likelihood, prior)\n return posterior\n\n def on_shutdown(self):\n rospy.loginfo(\"[%s] Shutdown.\" %(self.node_name))\n\nif __name__ == '__main__':\n rospy.init_node('localization_gps_imu_node',anonymous=False)\n localization_gps_imu_node = LocailizationGPSImu()\n rospy.on_shutdown(localization_gps_imu_node.on_shutdown)\n rospy.spin()\n\n" ]
[ [ "numpy.sqrt", "scipy.stats.norm", "numpy.zeros" ] ]
gogobd/PyTorch-GAN
[ "a163b82beff3d01688d8315a3fd39080400e7c01", "a163b82beff3d01688d8315a3fd39080400e7c01" ]
[ "implementations/ebgan/ebgan.py", "implementations/srgan/datasets.py" ]
[ "import argparse\nimport os\nimport numpy as np\nimport math\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nos.makedirs(\"images\", exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=64, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=62, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--img_size\", type=int, default=32, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=1, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=400, help=\"number of image channels\")\nopt = parser.parse_args()\nprint(opt)\n\nimg_shape = (opt.channels, opt.img_size, opt.img_size)\n\ncuda = True if torch.cuda.is_available() else False\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n self.init_size = opt.img_size // 4\n self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))\n\n self.conv_blocks = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, noise):\n out = self.l1(noise)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n # Upsampling\n self.down = nn.Sequential(nn.Conv2d(opt.channels, 64, 3, 2, 1), nn.ReLU())\n # Fully-connected layers\n self.down_size = opt.img_size // 2\n down_dim = 64 * (opt.img_size // 2) ** 2\n\n self.embedding = nn.Linear(down_dim, 32)\n\n self.fc = nn.Sequential(\n nn.BatchNorm1d(32, 0.8),\n nn.ReLU(inplace=True),\n nn.Linear(32, down_dim),\n nn.BatchNorm1d(down_dim),\n nn.ReLU(inplace=True),\n )\n # Upsampling\n self.up = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv2d(64, opt.channels, 3, 1, 1))\n\n def forward(self, img):\n out = self.down(img)\n embedding = self.embedding(out.view(out.size(0), -1))\n out = self.fc(embedding)\n out = self.up(out.view(out.size(0), 64, self.down_size, self.down_size))\n return out, embedding\n\n\n# Reconstruction loss of AE\npixelwise_loss = nn.MSELoss()\n\n# Initialize generator and discriminator\ngenerator = Generator()\ndiscriminator = Discriminator()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n pixelwise_loss.cuda()\n\n# Initialize weights\ngenerator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\n\n# Configure data loader\nos.makedirs(\"../../data/mnist\", exist_ok=True)\ndataloader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"../../data/mnist\",\n train=True,\n download=True,\n transform=transforms.Compose(\n [transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]\n ),\n ),\n batch_size=opt.batch_size,\n shuffle=True,\n)\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n\ndef pullaway_loss(embeddings):\n norm = torch.sqrt(torch.sum(embeddings ** 2, -1, keepdim=True))\n normalized_emb = embeddings / norm\n similarity = torch.matmul(normalized_emb, normalized_emb.transpose(1, 0))\n batch_size = embeddings.size(0)\n loss_pt = (torch.sum(similarity) - batch_size) / (batch_size * (batch_size - 1))\n return loss_pt\n\n\n# ----------\n# Training\n# ----------\n\n# BEGAN hyper parameters\nlambda_pt = 0.1\nmargin = max(1, opt.batch_size / 64.0)\n\nfor epoch in range(opt.n_epochs):\n for i, (imgs, _) in enumerate(dataloader):\n\n # Configure input\n real_imgs = Variable(imgs.type(Tensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise as generator input\n z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))\n\n # Generate a batch of images\n gen_imgs = generator(z)\n recon_imgs, img_embeddings = discriminator(gen_imgs)\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = pixelwise_loss(recon_imgs, gen_imgs.detach()) + lambda_pt * pullaway_loss(img_embeddings)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated samples\n real_recon, _ = discriminator(real_imgs)\n fake_recon, _ = discriminator(gen_imgs.detach())\n\n d_loss_real = pixelwise_loss(real_recon, real_imgs)\n d_loss_fake = pixelwise_loss(fake_recon, gen_imgs.detach())\n\n d_loss = d_loss_real\n if (margin - d_loss_fake.data).item() > 0:\n d_loss += margin - d_loss_fake\n\n d_loss.backward()\n optimizer_D.step()\n\n # --------------\n # Log Progress\n # --------------\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())\n )\n\n batches_done = epoch * len(dataloader) + i\n if batches_done % opt.sample_interval == 0:\n save_image(gen_imgs.data[:25], \"images/%d.png\" % batches_done, nrow=5, normalize=True)\n", "import glob\nimport random\nimport os\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\n# Normalization parameters for pre-trained PyTorch models\nmean = np.array([0.485, 0.456, 0.406])\nstd = np.array([0.229, 0.224, 0.225])\n\n\nclass ImageDataset(Dataset):\n def __init__(self, root, hr_shape):\n hr_height, hr_width = hr_shape\n # Transforms for low resolution images and high resolution images\n self.lr_transform = transforms.Compose(\n [\n transforms.Resize((hr_height // 4, hr_height // 4), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ]\n )\n self.hr_transform = transforms.Compose(\n [\n transforms.Resize((hr_height, hr_height), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ]\n )\n\n self.files = sorted(glob.glob(root + \"/*.*\"))\n\n def __getitem__(self, index):\n img = Image.open(self.files[index % len(self.files)])\n img_lr = self.lr_transform(img)\n img_hr = self.hr_transform(img)\n\n return {\"lr\": img_lr, \"hr\": img_hr}\n\n def __len__(self):\n return len(self.files)\n" ]
[ [ "torch.nn.Linear", "numpy.random.normal", "torch.nn.MSELoss", "torch.nn.init.constant_", "torch.nn.Tanh", "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.nn.Upsample", "torch.nn.ReLU", "torch.nn.init.normal_", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.sum" ], [ "numpy.array" ] ]
johnson7788/transformers
[ "005519f2c50851b219d6946ea4d6ef9ec4f87bbc" ]
[ "myexample/msra_run_ner.py" ]
[ "# coding=utf-8\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport numpy as np\nfrom datasets import ClassLabel, load_dataset\nfrom seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n BertTokenizerFast,\n DataCollatorForTokenClassification,\n HfArgumentParser,\n PreTrainedTokenizerFast,\n Trainer,\n TrainingArguments,\n set_seed,\n)\nfrom transformers.trainer_utils import is_main_process\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n task_name: Optional[str] = field(default=\"ner\", metadata={\"help\": \"The name of the task (ner, pos...).\"})\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n train_file: Optional[str] = field(\n default=None, metadata={\"help\": \"The input training data file (a csv or JSON file).\"}\n )\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate on (a csv or JSON file).\"},\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input test data file to predict on (a csv or JSON file).\"},\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_length: int = field(\n default=64,\n metadata={\n \"help\": \"padding的最大序列长度,默认是64,如果是bert,最长是512\"\n },\n )\n label_all_tokens: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to put the label for one word on all tokens of generated by that word or just on the \"\n \"one (in which case the other tokens will have a padding index).\"\n },\n )\n\n def __post_init__(self):\n #在parse参数时,会自动检查dataset的后缀或文件 parser.parse_args_into_dataclasses()\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n self.task_name = self.task_name.lower()\n\n\ndef main():\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # 如果我们仅将一个参数传递给脚本,并且它是json文件的路径,对其进行解析以获取参数。\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n #检查output文件夹\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty.\"\n \"Use --overwrite_output_dir to overcome.\"\n )\n\n # 日志\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,\n )\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # 自动下载数据集\n if data_args.dataset_name is not None:\n # 下载数据集并加载\n msra_path = 'data/msra_ner.py'\n # datasets = load_dataset(path=data_args.dataset_name, name=data_args.dataset_config_name)\n datasets = load_dataset(path=msra_path, name=data_args.dataset_config_name)\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.train_file.split(\".\")[-1]\n datasets = load_dataset(extension, data_files=data_files)\n\n #使用数据\n if training_args.do_train:\n column_names = datasets[\"train\"].column_names\n features = datasets[\"train\"].features\n else:\n column_names = datasets[\"validation\"].column_names\n features = datasets[\"validation\"].features\n # 使用哪个column作为text, column_names: ['id', 'ner_tags', 'tokens']\n text_column_name = \"tokens\" if \"tokens\" in column_names else column_names[0]\n # 哪列作为label,这里是'ner_tags'\n label_column_name = (\n f\"{data_args.task_name}_tags\" if f\"{data_args.task_name}_tags\" in column_names else column_names[1]\n )\n #如果labels不是`Sequence [ClassLabel]`,我们将需要遍历数据集以获得唯一标签。\n def get_label_list(labels):\n unique_labels = set()\n for label in labels:\n unique_labels = unique_labels | set(label)\n label_list = list(unique_labels)\n label_list.sort()\n return label_list\n # 看一下label的feautre是不是ClassLabel类型,已经定制好的\n if isinstance(features[label_column_name].feature, ClassLabel):\n # label_list: ['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC']\n label_list = features[label_column_name].feature.names\n # 由名称变成id格式的字典\n label_to_id = {i: i for i in range(len(label_list))}\n else:\n label_list = get_label_list(datasets[\"train\"][label_column_name])\n label_to_id = {l: i for i, l in enumerate(label_list)}\n num_labels = len(label_list)\n\n # 开始加载预训练模型和tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n # 加载模型配置\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=data_args.task_name, #ner\n cache_dir=model_args.cache_dir, #None\n )\n if model_args.model_name_or_path == \"albert_model\":\n #我们的albertmodel使用的是Bert的tokenizer\n tokenizer = BertTokenizerFast.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=True,\n )\n else:\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=True,\n )\n model = AutoModelForTokenClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # 这个只能用fast tokenizer, Tokenizer check\n if not isinstance(tokenizer, PreTrainedTokenizerFast):\n raise ValueError(\n \"This example script only works for models that have a fast tokenizer. Checkout the big table of models \"\n \"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this \"\n \"requirement\"\n )\n\n # 数据预处理\n # 最大序列长度\n max_length = data_args.max_length\n # Padding 策略\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n # Tokenize所有文本并将label与它们对齐。\n def tokenize_and_align_labels(examples):\n \"\"\"\n datasets.map函数处理时会调用\n Args:\n examples: 这里是2条样本,\n 例如: examples = {dict: 5}\n 'chunk_tags' = {list: 2} [[11, 21, 11, 12, 21, 22, 11, 12, 0], [11, 12]]\n 'id' = {list: 2} ['0', '1']\n 'ner_tags' = {list: 2} [[3, 0, 7, 0, 0, 0, 7, 0, 0], [1, 2]]\n 'pos_tags' = {list: 2} [[22, 42, 16, 21, 35, 37, 16, 21, 7], [22, 22]]\n 'tokens' = {list: 2} [['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.'], ['Peter', 'Blackburn']]\n Returns:\n\n \"\"\"\n # 对单条样本的examples的tokens字段,即文本字段进行tokenizer\n tokenized_inputs = tokenizer(\n examples[text_column_name],\n padding=padding,\n max_length=max_length,\n truncation=True,\n # 我们使用此参数是因为数据集中的文本是单词列表(每个单词带有标签)\n is_split_into_words=True,\n )\n labels = []\n for i, label in enumerate(examples[label_column_name]):\n # 对每条样本进行处理, label 是列表[3, 0, 7, 0, 0, 0, 7, 0, 0]\n word_ids = tokenized_inputs.word_ids(batch_index=i)\n # word_ids: [None, 0, 1, 2, 3, 4, 5, 6, 7, 8, None]\n previous_word_idx = None\n label_ids = []\n for word_idx in word_ids:\n # 特殊token的单词ID为None的。 我们将label设置为-100,以便在损失函数中自动将其忽略\n if word_idx is None:\n label_ids.append(-100)\n # 我们为每个单词的第一个token设置label。\n elif word_idx != previous_word_idx:\n label_ids.append(label_to_id[label[word_idx]])\n # 对于单词中的其他token,我们根据label_all_tokens标志将label设置为当前label或-100。\n # 这里是对token中不是ner的部分,给label,默认给的-100\n else:\n label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100)\n previous_word_idx = word_idx\n\n labels.append(label_ids)\n # 最终labels是一个列表\n # {list: 11}[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, -100]\n # {list: 4}[-100, 1, 2, -100]\n tokenized_inputs[\"labels\"] = labels\n return tokenized_inputs\n #处理数据,用map函数\n tokenized_datasets = datasets.map(\n tokenize_and_align_labels,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Data collator, 在for循环dataloader时调用\n data_collator = DataCollatorForTokenClassification(tokenizer)\n\n # 计算metrics\n def compute_metrics(p):\n predictions, labels = p\n predictions = np.argmax(predictions, axis=2)\n\n # Remove ignored index (special tokens)\n true_predictions = [\n [label_list[p] for (p, l) in zip(prediction, label) if l != -100]\n for prediction, label in zip(predictions, labels)\n ]\n true_labels = [\n [label_list[l] for (p, l) in zip(prediction, label) if l != -100]\n for prediction, label in zip(predictions, labels)\n ]\n\n return {\n \"accuracy_score\": accuracy_score(true_labels, true_predictions),\n \"precision\": precision_score(true_labels, true_predictions),\n \"recall\": recall_score(true_labels, true_predictions),\n \"f1\": f1_score(true_labels, true_predictions),\n }\n\n # Initialize our Trainer, msra没有验证集,这里用test测试集,其实应该用train中分离出来\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_datasets[\"train\"] if training_args.do_train else None,\n eval_dataset=tokenized_datasets[\"test\"] if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n # 这里model_path是用来是否继续训练的,恢复训练\n trainer.train(\n model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None\n )\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n #评估模型\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n results = trainer.evaluate()\n\n output_eval_file = os.path.join(training_args.output_dir, \"eval_results_ner.txt\")\n if trainer.is_world_process_zero():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key, value in results.items():\n logger.info(f\" {key} = {value}\")\n writer.write(f\"{key} = {value}\\n\")\n\n # Predict\n if training_args.do_predict:\n logger.info(\"*** Predict ***\")\n\n test_dataset = tokenized_datasets[\"test\"]\n predictions, labels, metrics = trainer.predict(test_dataset)\n predictions = np.argmax(predictions, axis=2)\n\n # Remove ignored index (special tokens)\n true_predictions = [\n [label_list[p] for (p, l) in zip(prediction, label) if l != -100]\n for prediction, label in zip(predictions, labels)\n ]\n\n output_test_results_file = os.path.join(training_args.output_dir, \"test_results.txt\")\n if trainer.is_world_process_zero():\n with open(output_test_results_file, \"w\") as writer:\n for key, value in metrics.items():\n logger.info(f\" {key} = {value}\")\n writer.write(f\"{key} = {value}\\n\")\n\n # Save predictions\n output_test_predictions_file = os.path.join(training_args.output_dir, \"test_predictions.txt\")\n if trainer.is_world_process_zero():\n with open(output_test_predictions_file, \"w\") as writer:\n for prediction in true_predictions:\n writer.write(\" \".join(prediction) + \"\\n\")\n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.argmax" ] ]
Dominykas-Tautkus/corr-app
[ "deb6bb45095ef570b40875985d1c8f931b90f4bc" ]
[ "_notebooks/scripts/.ipynb_checkpoints/read_data-checkpoint.py" ]
[ "import plotly.graph_objects as go\nimport pandas as pd\nimport plotly.express as px\nfrom datetime import datetime, timedelta\nimport requests\nimport json\nimport time\n\ndef read():\n df1 = pd.read_csv(\"CSV/ETH_BTC_USD_2015-08-09_2020-04-04-CoinDesk.csv\")\n df1.columns = ['date', 'ETH', 'BTC']\n df1.date = pd.to_datetime(df1.date, dayfirst=True)\n df1.set_index('date', inplace=True)\n \n EOS = pd.read_csv(\"ICO_coins/EOS_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n IOTA = pd.read_csv(\"ICO_coins/IOTA_USD_2018-06-06_2020-04-02-CoinDesk.csv\") \n LSK = pd.read_csv(\"ICO_coins/LSK_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n NEO = pd.read_csv(\"ICO_coins/NEO_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n TRX = pd.read_csv(\"ICO_coins/tron/TRX_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n ADA = pd.read_csv(\"ICO_coins/cardano/ADA_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n GOLD = pd.read_csv(\"CSV/XAU-GOLD_USD_Historical Data_2018-06-06--2020-04-04.csv\")\n SP500 = pd.read_csv(\"CSV/S_P_500_Historical Data_2018-06-06--2020-04-04.csv\")\n \n GOLD['Currency'] = GOLD.apply(lambda row:'XAU', axis=1)\n SP500['Currency'] = SP500.apply(lambda row:'SP500', axis=1)\n \n df = EOS.append(IOTA).append(LSK).append(NEO).append(TRX).append(ADA).append(GOLD).append(SP500)\n df.Date = pd.to_datetime(df.Date, dayfirst=True)\n\n df['Closing Price (USD)'] = df.apply(lambda row: str(row['Closing Price (USD)']).replace(',', ''), axis=1 )\n df['Closing Price (USD)'] = df['Closing Price (USD)'].astype(float)\n\n tbl = df.pivot_table('Closing Price (USD)', ['Date'], 'Currency')\n tbl=tbl.dropna()\n \n df_all = pd.concat([df1,tbl], join='inner', axis=1)\n \n return df_all\n\n\n\n\n\n\n\ndef read_json():\n \n BTChistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=BTC&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n EOShistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=EOS&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n LSKhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=LSK&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n \n BTChistory = requests.get(BTChistory_url).json() \n pd_BTC = pd.read_json(BTChistory_url, typ='series')['Data']['Data'] \n \n \n \n '''\n df1 = pd.read_csv(\"CSV/ETH_BTC_USD_2015-08-09_2020-04-04-CoinDesk.csv\")\n df1.columns = ['date', 'ETH', 'BTC']\n df1.date = pd.to_datetime(df1.date, dayfirst=True)\n df1.set_index('date', inplace=True)\n \n EOS = pd.read_csv(\"ICO_coins/EOS_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n IOTA = pd.read_csv(\"ICO_coins/IOTA_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n LSK = pd.read_csv(\"ICO_coins/LSK_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n NEO = pd.read_csv(\"ICO_coins/NEO_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n TRX = pd.read_csv(\"ICO_coins/tron/TRX_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n ADA = pd.read_csv(\"ICO_coins/cardano/ADA_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n GOLD = pd.read_csv(\"CSV/XAU-GOLD_USD_Historical Data_2018-06-06--2020-04-04.csv\")\n SP500 = pd.read_csv(\"CSV/S_P_500_Historical Data_2018-06-06--2020-04-04.csv\")\n \n GOLD['Currency'] = GOLD.apply(lambda row:'XAU', axis=1)\n SP500['Currency'] = SP500.apply(lambda row:'SP500', axis=1)\n \n df = EOS.append(IOTA).append(LSK).append(NEO).append(TRX).append(ADA).append(GOLD).append(SP500)\n df.Date = pd.to_datetime(df.Date, dayfirst=True)\n\n df['Closing Price (USD)'] = df.apply(lambda row: str(row['Closing Price (USD)']).replace(',', ''), axis=1 )\n df['Closing Price (USD)'] = df['Closing Price (USD)'].astype(float)\n\n tbl = df.pivot_table('Closing Price (USD)', ['Date'], 'Currency')\n tbl=tbl.dropna()\n \n df_all = pd.concat([df1,tbl], join='inner', axis=1)'''\n \n return df_all\n\n\ndef read_crypto():\n df1 = pd.read_csv(\"CSV/ETH_BTC_USD_2015-08-09_2020-04-04-CoinDesk.csv\")\n df1.columns = ['date', 'ETH', 'BTC']\n df1.date = pd.to_datetime(df1.date, dayfirst=True)\n df1.set_index('date', inplace=True)\n \n EOS = pd.read_csv(\"ICO_coins/EOS_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n IOTA = pd.read_csv(\"ICO_coins/IOTA_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n LSK = pd.read_csv(\"ICO_coins/LSK_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n NEO = pd.read_csv(\"ICO_coins/NEO_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n TRX = pd.read_csv(\"ICO_coins/tron/TRX_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n ADA = pd.read_csv(\"ICO_coins/cardano/ADA_USD_2018-06-06_2020-04-02-CoinDesk.csv\")\n\n\n \n df = EOS.append(IOTA).append(LSK).append(NEO).append(TRX).append(ADA)\n df.Date = pd.to_datetime(df.Date, dayfirst=True)\n\n df['Closing Price (USD)'] = df.apply(lambda row: str(row['Closing Price (USD)']).replace(',', ''), axis=1 )\n df['Closing Price (USD)'] = df['Closing Price (USD)'].astype(float)\n\n tbl = df.pivot_table('Closing Price (USD)', ['Date'], 'Currency')\n tbl=tbl.dropna()\n \n df_all = pd.concat([df1,tbl], join='inner', axis=1)\n \n return df_all\n\ndef read_api(short_frm = False): \n BTChistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=BTC&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n EOShistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=EOS&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n LSKhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=LSK&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n #XAUhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=XAU&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n ETHhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=ETH&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n ADAhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=ADA&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n \n XRPhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=XRP&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n \n NEOhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=NEO&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n TRXhistory_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=TRX&tsym=USD&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n #SP500history_url = 'https://min-api.cryptocompare.com/data/v2/histoday?fsym=US500.CUR&tsym=USD&e=currency&limit=2000&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n \n BTC = format_response(BTChistory_url, 'BTC') \n EOS = format_response(EOShistory_url, 'EOS') \n LSK = format_response(LSKhistory_url, 'LSK') \n #XAU = format_response(XAUhistory_url, 'XAU') \n ETH = format_response(ETHhistory_url, 'ETH')\n ADA = format_response(ADAhistory_url, 'ADA')\n XRP = format_response(XRPhistory_url, 'XRP')\n NEO = format_response(NEOhistory_url, 'NEO')\n TRX = format_response(TRXhistory_url, 'TRX')\n #SP500 = format_response(SP500history_url, 'SP500')\n \n #XAU = XAU[XAU.close != 79987.0] \n #XAU = XAU[XAU.close > 1080]\n markets = read_markets_api() \n df = BTC.append(EOS).append(LSK).append(ETH).append(ADA).append(NEO).append(TRX).append(XRP).append(markets)\n df['date'] = df['date'].dt.date\n if short_frm: \n return df\n \n tbl = df.pivot_table('close', ['date'], 'currency')\n tbl = tbl.dropna()\n #tbl = tbl[(tbl != 0).all(1)] #remove rows where at least one column contains 0\n return tbl\n\ndef read_news(category): \n url1 = 'https://min-api.cryptocompare.com/data/v2/news/?categories='\n url2 = '&api_key=c96436b332e3c9f1b6784db0ec59cb81b161eb5853ecfa81cc025366512d6594'\n url = url1 + category + url2 \n response = format_news(url, category)\n #df = BTC.append(EOS).append(LSK).append(XAU).append(ETH).append(ADA).append(NEO).append(TRX).append(XRP) \n #tbl = df.pivot_table(['date'], ['imageurl'], ['title'], ['url'], ['category'])\n #tbl = tbl.dropna() \n return response\n\ndef read_covid(): \n #url = 'https://newsapi.org/v2/everything?q=covid&sources=bbc-news%2Cassociated-press&apiKey=1c5ca7e1e5c349cdb7ec5d1a40dc66fa&fbclid=IwAR3K8zO7eGe-Y6VFCzyyxwIrw68ktKmU-er-IOsTN9BuGfNIpK1ulo-IGzA'\n url = 'http://newsapi.org/v2/top-headlines?sources=bbc-news%2Ccbc-news%2Cfinancial-times%2Cnbc-news%2Cindependent%2Cthe-wall-street-journal&q=covid&apiKey=1c5ca7e1e5c349cdb7ec5d1a40dc66fa&fbclid=IwAR0d3GaaaiwpDtwWUrHFxuwcDfMW9Hnea_ncxbrKPjPI5H2ixMIe7cBjM-M'\n pd_resp = pd.read_json(url, typ='series')['articles'][0] \n df_resp = pd.DataFrame(pd_resp) \n df_final = df_resp[['title', 'description', 'url']]\n return df_final.iloc[0]\n\ndef read_markets_api(): \n # ^GSPC-->sp500 ; GC=F-->XAU\n SP500 = format_markets('^GSPC', 'SP500') \n XAU = format_markets('GC=F', 'XAU') \n markets = SP500.append(XAU)\n return markets\n\n\n \ndef format_response(url, fsym): \n pd_resp = pd.read_json(url, typ='series')['Data']['Data']\n df_resp = pd.DataFrame(pd_resp)\n df_resp['date'] = pd.to_datetime(df_resp.time, unit='s')\n df_final = df_resp[['close', 'date']]\n df_final['currency'] = df_final.apply(lambda row: fsym, axis=1) \n return df_final\n\ndef format_news(url, category): \n pd_resp = pd.read_json(url, typ='series')['Data'][0] \n df_resp = pd.DataFrame(pd_resp)\n df_resp['date'] = pd.to_datetime(df_resp.published_on, unit='s')\n df_final = df_resp[['date', 'imageurl', 'title', 'body', 'url']]\n df_final['category'] = df_final.apply(lambda row: category, axis=1) \n return df_final.iloc[0]\n\ndef format_markets(symbol, crypto): \n todayTS = int(time.time()) \n url = \"https://apidojo-yahoo-finance-v1.p.rapidapi.com/stock/v2/get-historical-data\"\n \n querystring = {\"period1\":\"1461763800\",\"period2\":todayTS,\"symbol\":symbol,\"frequency\":\"1d\",\"filter\":\"history\"}\n headers = {\n 'x-rapidapi-key': \"b685990ecamsh74862ef65eb2958p10db79jsn0fdc487f900b\",\n 'x-rapidapi-host': \"apidojo-yahoo-finance-v1.p.rapidapi.com\"\n } \n\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n \n json_data = json.loads(response.content)['prices'] \n df_resp = pd.DataFrame(json_data)\n df_resp['date'] = pd.to_datetime(df_resp.date, unit='s')\n #df_resp['date'] = df_resp['date'].dt.date\n df_final = df_resp[['close', 'date']]\n df_final['currency'] = df_final.apply(lambda row: crypto, axis=1)\n return df_final\n\n\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.read_json", "pandas.concat", "pandas.read_csv" ] ]
vatj/feature-store-api
[ "5703a2b3292c233ce3652fad058806f373d83967" ]
[ "python/hsfs/engine/spark.py" ]
[ "#\n# Copyright 2020 Logical Clocks AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport json\nimport datetime\nimport importlib.util\n\nimport numpy as np\nimport pandas as pd\nimport avro\n\n# in case importing in %%local\nfrom hsfs.feature_group import StreamFeatureGroup\n\ntry:\n from pyspark import SparkFiles\n from pyspark.sql import SparkSession, DataFrame\n from pyspark.rdd import RDD\n from pyspark.sql.functions import struct, concat, col, lit, from_json\n from pyspark.sql.avro.functions import from_avro, to_avro\nexcept ImportError:\n pass\n\nfrom hsfs import feature, training_dataset_feature, client, util\nfrom hsfs.storage_connector import StorageConnector\nfrom hsfs.client.exceptions import FeatureStoreException\nfrom hsfs.core import hudi_engine\nfrom hsfs.constructor import query\n\n\nclass Engine:\n HIVE_FORMAT = \"hive\"\n KAFKA_FORMAT = \"kafka\"\n\n APPEND = \"append\"\n OVERWRITE = \"overwrite\"\n\n def __init__(self):\n self._spark_session = SparkSession.builder.enableHiveSupport().getOrCreate()\n self._spark_context = self._spark_session.sparkContext\n self._jvm = self._spark_context._jvm\n\n self._spark_session.conf.set(\"hive.exec.dynamic.partition\", \"true\")\n self._spark_session.conf.set(\"hive.exec.dynamic.partition.mode\", \"nonstrict\")\n self._spark_session.conf.set(\"spark.sql.hive.convertMetastoreParquet\", \"false\")\n\n if importlib.util.find_spec(\"pydoop\"):\n # If we are on Databricks don't setup Pydoop as it's not available and cannot be easily installed.\n util.setup_pydoop()\n\n def sql(self, sql_query, feature_store, connector, dataframe_type, read_options):\n if not connector:\n result_df = self._sql_offline(sql_query, feature_store)\n else:\n result_df = connector.read(sql_query, None, {}, None)\n\n self.set_job_group(\"\", \"\")\n return self._return_dataframe_type(result_df, dataframe_type)\n\n def _sql_offline(self, sql_query, feature_store):\n # set feature store\n self._spark_session.sql(\"USE {}\".format(feature_store))\n return self._spark_session.sql(sql_query)\n\n def show(self, sql_query, feature_store, n, online_conn):\n return self.sql(sql_query, feature_store, online_conn, \"default\", {}).show(n)\n\n def set_job_group(self, group_id, description):\n self._spark_session.sparkContext.setJobGroup(group_id, description)\n\n def register_on_demand_temporary_table(self, on_demand_fg, alias):\n on_demand_dataset = on_demand_fg.storage_connector.read(\n on_demand_fg.query,\n on_demand_fg.data_format,\n on_demand_fg.options,\n on_demand_fg.storage_connector._get_path(on_demand_fg.path),\n )\n if on_demand_fg.location:\n self._spark_session.sparkContext.textFile(on_demand_fg.location).collect()\n\n on_demand_dataset.createOrReplaceTempView(alias)\n return on_demand_dataset\n\n def register_hudi_temporary_table(\n self, hudi_fg_alias, feature_store_id, feature_store_name, read_options\n ):\n hudi_engine_instance = hudi_engine.HudiEngine(\n feature_store_id,\n feature_store_name,\n hudi_fg_alias.feature_group,\n self._spark_context,\n self._spark_session,\n )\n hudi_engine_instance.register_temporary_table(\n hudi_fg_alias.alias,\n hudi_fg_alias.left_feature_group_start_timestamp,\n hudi_fg_alias.left_feature_group_end_timestamp,\n read_options,\n )\n\n def _return_dataframe_type(self, dataframe, dataframe_type):\n if dataframe_type.lower() in [\"default\", \"spark\"]:\n return dataframe\n if dataframe_type.lower() == \"pandas\":\n return dataframe.toPandas()\n if dataframe_type.lower() == \"numpy\":\n return dataframe.toPandas().values\n if dataframe_type == \"python\":\n return dataframe.toPandas().values.tolist()\n\n raise TypeError(\n \"Dataframe type `{}` not supported on this platform.\".format(dataframe_type)\n )\n\n def convert_to_default_dataframe(self, dataframe):\n if isinstance(dataframe, pd.DataFrame):\n dataframe = self._spark_session.createDataFrame(dataframe)\n elif isinstance(dataframe, list):\n dataframe = np.array(dataframe)\n elif isinstance(dataframe, np.ndarray):\n if dataframe.ndim != 2:\n raise TypeError(\n \"Cannot convert numpy array that do not have two dimensions to a dataframe. \"\n \"The number of dimensions are: {}\".format(dataframe.ndim)\n )\n num_cols = dataframe.shape[1]\n dataframe_dict = {}\n for n_col in list(range(num_cols)):\n col_name = \"col_\" + str(n_col)\n dataframe_dict[col_name] = dataframe[:, n_col]\n pandas_df = pd.DataFrame(dataframe_dict)\n dataframe = self._spark_session.createDataFrame(pandas_df)\n elif isinstance(dataframe, RDD):\n dataframe = dataframe.toDF()\n\n if isinstance(dataframe, DataFrame):\n return dataframe.select(\n [col(x).alias(x.lower()) for x in dataframe.columns]\n )\n\n raise TypeError(\n \"The provided dataframe type is not recognized. Supported types are: spark rdds, spark dataframes, \"\n \"pandas dataframes, python 2D lists, and numpy 2D arrays. The provided dataframe has type: {}\".format(\n type(dataframe)\n )\n )\n\n def save_dataframe(\n self,\n feature_group,\n dataframe,\n operation,\n online_enabled,\n storage,\n offline_write_options,\n online_write_options,\n validation_id=None,\n ):\n try:\n if isinstance(feature_group, StreamFeatureGroup):\n self._save_online_dataframe(\n feature_group, dataframe, online_write_options\n )\n else:\n if storage == \"offline\" or not online_enabled:\n self._save_offline_dataframe(\n feature_group,\n dataframe,\n operation,\n offline_write_options,\n validation_id,\n )\n elif storage == \"online\":\n self._save_online_dataframe(\n feature_group, dataframe, online_write_options\n )\n elif online_enabled and storage is None:\n self._save_offline_dataframe(\n feature_group,\n dataframe,\n operation,\n offline_write_options,\n )\n self._save_online_dataframe(\n feature_group, dataframe, online_write_options\n )\n except Exception:\n raise FeatureStoreException(\n \"Error writing to offline and online feature store\"\n )\n\n def save_stream_dataframe(\n self,\n feature_group,\n dataframe,\n query_name,\n output_mode,\n await_termination,\n timeout,\n write_options,\n ):\n serialized_df = self._online_fg_to_avro(\n feature_group, self._encode_complex_features(feature_group, dataframe)\n )\n if query_name is None:\n query_name = (\n \"insert_stream_\"\n + feature_group._online_topic_name\n + \"_\"\n + datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n )\n\n query = (\n serialized_df.writeStream.outputMode(output_mode)\n .format(self.KAFKA_FORMAT)\n .option(\n \"checkpointLocation\",\n \"/Projects/\"\n + client.get_instance()._project_name\n + \"/Resources/\"\n + query_name\n + \"-checkpoint\",\n )\n .options(**write_options)\n .option(\"topic\", feature_group._online_topic_name)\n .queryName(query_name)\n .start()\n )\n\n if await_termination:\n query.awaitTermination(timeout)\n\n return query\n\n def _save_offline_dataframe(\n self,\n feature_group,\n dataframe,\n operation,\n write_options,\n validation_id=None,\n ):\n if feature_group.time_travel_format == \"HUDI\":\n hudi_engine_instance = hudi_engine.HudiEngine(\n feature_group.feature_store_id,\n feature_group.feature_store_name,\n feature_group,\n self._spark_session,\n self._spark_context,\n )\n hudi_engine_instance.save_hudi_fg(\n dataframe, self.APPEND, operation, write_options, validation_id\n )\n else:\n dataframe.write.format(self.HIVE_FORMAT).mode(self.APPEND).options(\n **write_options\n ).partitionBy(\n feature_group.partition_key if feature_group.partition_key else []\n ).saveAsTable(\n feature_group._get_table_name()\n )\n\n def _save_online_dataframe(self, feature_group, dataframe, write_options):\n\n serialized_df = self._online_fg_to_avro(\n feature_group, self._encode_complex_features(feature_group, dataframe)\n )\n serialized_df.write.format(self.KAFKA_FORMAT).options(**write_options).option(\n \"topic\", feature_group._online_topic_name\n ).save()\n\n def _encode_complex_features(self, feature_group, dataframe):\n \"\"\"Encodes all complex type features to binary using their avro type as schema.\"\"\"\n return dataframe.select(\n [\n field[\"name\"]\n if field[\"name\"] not in feature_group.get_complex_features()\n else to_avro(\n field[\"name\"], feature_group._get_feature_avro_schema(field[\"name\"])\n ).alias(field[\"name\"])\n for field in json.loads(feature_group.avro_schema)[\"fields\"]\n ]\n )\n\n def _online_fg_to_avro(self, feature_group, dataframe):\n \"\"\"Packs all features into named struct to be serialized to single avro/binary\n column. And packs primary key into arry to be serialized for partitioning.\n \"\"\"\n return dataframe.select(\n [\n # be aware: primary_key array should always be sorted\n to_avro(\n concat(\n *[\n col(f).cast(\"string\")\n for f in sorted(feature_group.primary_key)\n ]\n )\n ).alias(\"key\"),\n to_avro(\n struct(\n [\n field[\"name\"]\n for field in json.loads(feature_group.avro_schema)[\"fields\"]\n ]\n ),\n feature_group._get_encoded_avro_schema(),\n ).alias(\"value\"),\n ]\n )\n\n def write_training_dataset(\n self, training_dataset, dataset, user_write_options, save_mode\n ):\n if isinstance(dataset, query.Query):\n dataset = dataset.read()\n\n dataset = self.convert_to_default_dataframe(dataset)\n\n if training_dataset.coalesce:\n dataset = dataset.coalesce(1)\n\n self.training_dataset_schema_match(dataset, training_dataset.schema)\n write_options = self.write_options(\n training_dataset.data_format, user_write_options\n )\n\n # check if there any transformation functions that require statistics attached to td features\n builtin_tffn_features = [\n ft_name\n for ft_name in training_dataset.transformation_functions\n if training_dataset._transformation_function_engine.is_builtin(\n training_dataset.transformation_functions[ft_name]\n )\n ]\n\n if len(training_dataset.splits) == 0:\n if builtin_tffn_features:\n # compute statistics before transformations are applied\n stats = training_dataset._statistics_engine.compute_transformation_fn_statistics(\n td_metadata_instance=training_dataset,\n columns=builtin_tffn_features,\n feature_dataframe=dataset,\n )\n # Populate builtin transformations (if any) with respective arguments\n training_dataset._transformation_function_engine.populate_builtin_attached_fns(\n training_dataset.transformation_functions, stats.content\n )\n # apply transformation functions (they are applied separately if there are splits)\n dataset = self._apply_transformation_function(training_dataset, dataset)\n\n path = training_dataset.location + \"/\" + training_dataset.name\n self._write_training_dataset_single(\n dataset,\n training_dataset.storage_connector,\n training_dataset.data_format,\n write_options,\n save_mode,\n path,\n )\n else:\n split_names = sorted([*training_dataset.splits])\n split_weights = [training_dataset.splits[i] for i in split_names]\n self._write_training_dataset_splits(\n training_dataset,\n dataset.randomSplit(split_weights, training_dataset.seed),\n write_options,\n save_mode,\n split_names,\n builtin_tffn_features=builtin_tffn_features,\n )\n\n def _write_training_dataset_splits(\n self,\n training_dataset,\n feature_dataframe_list,\n write_options,\n save_mode,\n split_names,\n builtin_tffn_features,\n ):\n stats = None\n if builtin_tffn_features:\n # compute statistics before transformations are applied\n i = [\n i\n for i, name in enumerate(split_names)\n if name == training_dataset.train_split\n ][0]\n stats = training_dataset._statistics_engine.compute_transformation_fn_statistics(\n td_metadata_instance=training_dataset,\n columns=builtin_tffn_features,\n feature_dataframe=feature_dataframe_list[i],\n )\n\n for i in range(len(feature_dataframe_list)):\n # Populate builtin transformations (if any) with respective arguments for each split\n if stats is not None:\n training_dataset._transformation_function_engine.populate_builtin_attached_fns(\n training_dataset.transformation_functions, stats.content\n )\n # apply transformation functions (they are applied separately to each split)\n dataset = self._apply_transformation_function(\n training_dataset, dataset=feature_dataframe_list[i]\n )\n\n split_path = training_dataset.location + \"/\" + str(split_names[i])\n self._write_training_dataset_single(\n dataset,\n training_dataset.storage_connector,\n training_dataset.data_format,\n write_options,\n save_mode,\n split_path,\n )\n\n def _write_training_dataset_single(\n self,\n feature_dataframe,\n storage_connector,\n data_format,\n write_options,\n save_mode,\n path,\n ):\n # TODO: currently not supported petastorm, hdf5 and npy file formats\n if data_format.lower() == \"tsv\":\n data_format = \"csv\"\n\n path = self.setup_storage_connector(storage_connector, path)\n\n feature_dataframe.write.format(data_format).options(**write_options).mode(\n save_mode\n ).save(path)\n\n def read(self, storage_connector, data_format, read_options, location):\n if isinstance(location, str):\n if data_format.lower() in [\"delta\", \"parquet\", \"hudi\", \"orc\"]:\n # All the above data format readers can handle partitioning\n # by their own, they don't need /**\n path = location\n else:\n path = location + \"/**\"\n\n if data_format.lower() == \"tsv\":\n data_format = \"csv\"\n else:\n path = None\n\n path = self.setup_storage_connector(storage_connector, path)\n\n return (\n self._spark_session.read.format(data_format)\n .options(**(read_options if read_options else {}))\n .load(path)\n )\n\n def read_stream(\n self,\n storage_connector,\n message_format,\n schema,\n options,\n include_metadata,\n ):\n # ideally all this logic should be in the storage connector in case we add more\n # streaming storage connectors...\n stream = self._spark_session.readStream.format(storage_connector.SPARK_FORMAT)\n\n # set user options last so that they overwrite any default options\n stream = stream.options(**storage_connector.spark_options(), **options)\n\n if storage_connector.type == StorageConnector.KAFKA:\n return self._read_stream_kafka(\n stream, message_format, schema, include_metadata\n )\n\n def _read_stream_kafka(self, stream, message_format, schema, include_metadata):\n kafka_cols = [\n col(\"key\"),\n col(\"topic\"),\n col(\"partition\"),\n col(\"offset\"),\n col(\"timestamp\"),\n col(\"timestampType\"),\n ]\n\n if message_format == \"avro\" and schema is not None:\n # check if vallid avro schema\n avro.schema.parse(schema)\n df = stream.load()\n if include_metadata is True:\n return df.select(\n *kafka_cols, from_avro(df.value, schema).alias(\"value\")\n ).select(*kafka_cols, col(\"value.*\"))\n return df.select(from_avro(df.value, schema).alias(\"value\")).select(\n col(\"value.*\")\n )\n elif message_format == \"json\" and schema is not None:\n df = stream.load()\n if include_metadata is True:\n return df.select(\n *kafka_cols,\n from_json(df.value.cast(\"string\"), schema).alias(\"value\")\n ).select(*kafka_cols, col(\"value.*\"))\n return df.select(\n from_json(df.value.cast(\"string\"), schema).alias(\"value\")\n ).select(col(\"value.*\"))\n\n if include_metadata is True:\n return stream.load()\n return stream.load().select(\"key\", \"value\")\n\n def add_file(self, file):\n self._spark_context.addFile(\"hdfs://\" + file)\n return SparkFiles.get(os.path.basename(file))\n\n def profile(\n self,\n dataframe,\n relevant_columns,\n correlations,\n histograms,\n exact_uniqueness=True,\n ):\n \"\"\"Profile a dataframe with Deequ.\"\"\"\n return (\n self._jvm.com.logicalclocks.hsfs.engine.SparkEngine.getInstance().profile(\n dataframe._jdf,\n relevant_columns,\n correlations,\n histograms,\n exact_uniqueness,\n )\n )\n\n def validate(self, dataframe, expectations, log_activity=True):\n \"\"\"Run data validation on the dataframe with Deequ.\"\"\"\n\n expectations_java = []\n for expectation in expectations:\n rules = []\n for rule in expectation.rules:\n rules.append(\n self._jvm.com.logicalclocks.hsfs.metadata.validation.Rule.builder()\n .name(\n self._jvm.com.logicalclocks.hsfs.metadata.validation.RuleName.valueOf(\n rule.get(\"name\")\n )\n )\n .level(\n self._jvm.com.logicalclocks.hsfs.metadata.validation.Level.valueOf(\n rule.get(\"level\")\n )\n )\n .min(rule.get(\"min\", None))\n .max(rule.get(\"max\", None))\n .pattern(rule.get(\"pattern\", None))\n .acceptedType(\n self._jvm.com.logicalclocks.hsfs.metadata.validation.AcceptedType.valueOf(\n rule.get(\"accepted_type\")\n )\n if rule.get(\"accepted_type\") is not None\n else None\n )\n .feature((rule.get(\"feature\", None)))\n .legalValues(rule.get(\"legal_values\", None))\n .build()\n )\n expectation = (\n self._jvm.com.logicalclocks.hsfs.metadata.Expectation.builder()\n .name(expectation.name)\n .description(expectation.description)\n .features(expectation.features)\n .rules(rules)\n .build()\n )\n expectations_java.append(expectation)\n\n return self._jvm.com.logicalclocks.hsfs.engine.DataValidationEngine.getInstance().validate(\n dataframe._jdf, expectations_java\n )\n\n def write_options(self, data_format, provided_options):\n if data_format.lower() == \"tfrecords\":\n options = dict(recordType=\"Example\")\n options.update(provided_options)\n elif data_format.lower() == \"tfrecord\":\n options = dict(recordType=\"Example\")\n options.update(provided_options)\n elif data_format.lower() == \"csv\":\n options = dict(delimiter=\",\", header=\"true\")\n options.update(provided_options)\n elif data_format.lower() == \"tsv\":\n options = dict(delimiter=\"\\t\", header=\"true\")\n options.update(provided_options)\n else:\n options = {}\n options.update(provided_options)\n return options\n\n def read_options(self, data_format, provided_options):\n if data_format.lower() == \"tfrecords\":\n options = dict(recordType=\"Example\", **provided_options)\n options.update(provided_options)\n elif data_format.lower() == \"tfrecord\":\n options = dict(recordType=\"Example\")\n options.update(provided_options)\n elif data_format.lower() == \"csv\":\n options = dict(delimiter=\",\", header=\"true\", inferSchema=\"true\")\n options.update(provided_options)\n elif data_format.lower() == \"tsv\":\n options = dict(delimiter=\"\\t\", header=\"true\", inferSchema=\"true\")\n options.update(provided_options)\n else:\n options = {}\n options.update(provided_options)\n return options\n\n def parse_schema_feature_group(self, dataframe):\n return [\n feature.Feature(\n feat.name.lower(),\n feat.dataType.simpleString(),\n feat.metadata.get(\"description\", None),\n )\n for feat in dataframe.schema\n ]\n\n def parse_schema_training_dataset(self, dataframe):\n return [\n training_dataset_feature.TrainingDatasetFeature(\n feat.name.lower(), feat.dataType.simpleString()\n )\n for feat in dataframe.schema\n ]\n\n def parse_schema_dict(self, dataframe):\n return {\n feat.name: feature.Feature(\n feat.name.lower(),\n feat.dataType.simpleString(),\n feat.metadata.get(\"description\", \"\"),\n )\n for feat in dataframe.schema\n }\n\n def training_dataset_schema_match(self, dataframe, schema):\n schema_sorted = sorted(schema, key=lambda f: f.index)\n insert_schema = dataframe.schema\n if len(schema_sorted) != len(insert_schema):\n raise SchemaError(\n \"Schemas do not match. Expected {} features, the dataframe contains {} features\".format(\n len(schema_sorted), len(insert_schema)\n )\n )\n\n i = 0\n for feat in schema_sorted:\n if feat.name != insert_schema[i].name.lower():\n raise SchemaError(\n \"Schemas do not match, expected feature {} in position {}, found {}\".format(\n feat.name, str(i), insert_schema[i].name\n )\n )\n\n i += 1\n\n def setup_storage_connector(self, storage_connector, path=None):\n # update storage connector to get new session token\n storage_connector.refetch()\n\n if storage_connector.type == StorageConnector.S3:\n return self._setup_s3_hadoop_conf(storage_connector, path)\n elif storage_connector.type == StorageConnector.ADLS:\n return self._setup_adls_hadoop_conf(storage_connector, path)\n else:\n return path\n\n def _setup_s3_hadoop_conf(self, storage_connector, path):\n if storage_connector.access_key:\n self._spark_context._jsc.hadoopConfiguration().set(\n \"fs.s3a.access.key\", storage_connector.access_key\n )\n if storage_connector.secret_key:\n self._spark_context._jsc.hadoopConfiguration().set(\n \"fs.s3a.secret.key\", storage_connector.secret_key\n )\n if storage_connector.server_encryption_algorithm:\n self._spark_context._jsc.hadoopConfiguration().set(\n \"fs.s3a.server-side-encryption-algorithm\",\n storage_connector.server_encryption_algorithm,\n )\n if storage_connector.server_encryption_key:\n self._spark_context._jsc.hadoopConfiguration().set(\n \"fs.s3a.server-side-encryption-key\",\n storage_connector.server_encryption_key,\n )\n if storage_connector.session_token:\n self._spark_context._jsc.hadoopConfiguration().set(\n \"fs.s3a.aws.credentials.provider\",\n \"org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider\",\n )\n self._spark_context._jsc.hadoopConfiguration().set(\n \"fs.s3a.session.token\",\n storage_connector.session_token,\n )\n return path.replace(\"s3\", \"s3a\", 1) if path is not None else None\n\n def _setup_adls_hadoop_conf(self, storage_connector, path):\n for k, v in storage_connector.spark_options().items():\n self._spark_context._jsc.hadoopConfiguration().set(k, v)\n\n return path\n\n def is_spark_dataframe(self, dataframe):\n if isinstance(dataframe, DataFrame):\n return True\n return False\n\n def get_empty_appended_dataframe(self, dataframe, new_features):\n dataframe = dataframe.limit(0)\n for f in new_features:\n dataframe = dataframe.withColumn(f.name, lit(None).cast(f.type))\n return dataframe\n\n def save_empty_dataframe(self, feature_group, dataframe):\n \"\"\"Wrapper around save_dataframe in order to provide no-op in python engine.\"\"\"\n self.save_dataframe(\n feature_group,\n dataframe,\n \"upsert\",\n feature_group.online_enabled,\n \"offline\",\n {},\n {},\n )\n\n def _apply_transformation_function(self, training_dataset, dataset):\n # generate transformation function expressions\n transformed_feature_names = []\n transformation_fn_expressions = []\n for (\n feature_name,\n transformation_fn,\n ) in training_dataset.transformation_functions.items():\n fn_registration_name = (\n transformation_fn.name + \"_\" + str(transformation_fn.version)\n )\n self._spark_session.udf.register(\n fn_registration_name, transformation_fn.transformation_fn\n )\n transformation_fn_expressions.append(\n \"{fn_name:}({name:}) AS {name:}\".format(\n fn_name=fn_registration_name, name=feature_name\n )\n )\n transformed_feature_names.append(feature_name)\n\n # generate non transformation expressions\n no_transformation_expr = [\n \"{name:} AS {name:}\".format(name=col_name)\n for col_name in dataset.columns\n if col_name not in transformed_feature_names\n ]\n\n # generate entire expression and execute it\n transformation_fn_expressions.extend(no_transformation_expr)\n dataset = dataset.selectExpr(*transformation_fn_expressions)\n\n # sort feature order if it was altered by transformation functions\n sorded_features = sorted(training_dataset._features, key=lambda ft: ft.index)\n sorted_feature_names = [ft.name for ft in sorded_features]\n dataset = dataset.select(*sorted_feature_names)\n return dataset\n\n\nclass SchemaError(Exception):\n \"\"\"Thrown when schemas don't match\"\"\"\n" ]
[ [ "pandas.DataFrame", "numpy.array" ] ]
vprzybylo/ai2es
[ "cd54222ca377e15dfada6436564e7f4ee942527c" ]
[ "day-night-classification/train_model.py" ]
[ "# USAGE\n# python train_model.py --dataset images --model model/day_night.hd5\n\n# set the matplotlib backend so figures can be saved in the background\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\n# import the necessary packages\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.image import img_to_array\nfrom keras.utils import to_categorical\nfrom utils.lenet import LeNet\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport random\nimport cv2\nimport os\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True,\n help=\"path to input dataset\")\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to output model\")\nap.add_argument(\"-e\", \"--epochs\", type=int, default=140, required=False,\n help=\"path to output loss/accuracy plot\")\nap.add_argument(\"-p\", \"--plot\", type=str, default=\"plot.png\",\n help=\"path to output loss/accuracy plot\")\nargs = vars(ap.parse_args())\n\n# initialize the number of epochs to train for, initia learning rate,\n# and batch size\nEPOCHS = args[\"epochs\"]\nINIT_LR = 1e-3\nBS = 32\n\n# initialize the data and labels\nprint(\"[INFO] loading images...\")\ndata = []\nlabels = []\n\n# grab the image paths and randomly shuffle them\nimagePaths = sorted(list(paths.list_images(args[\"dataset\"])))\nrandom.seed(42)\nrandom.shuffle(imagePaths)\n\n# loop over the input images\nfor imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (28, 28))\n image = img_to_array(image)\n data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n label = 1 if label == \"day\" else 0\n labels.append(label)\n\n# scale the raw pixel intensities to the range [0, 1]\ndata = np.array(data, dtype=\"float\") / 255.0\nlabels = np.array(labels)\n\n# partition the data into training and testing splits using 75% of\n# the data for training and the remaining 25% for testing\n(trainX, testX, trainY, testY) = train_test_split(data,\n labels, test_size=0.25, random_state=42)\n\n# convert the labels from integers to vectors\ntrainY = to_categorical(trainY, num_classes=2)\ntestY = to_categorical(testY, num_classes=2)\n\n# construct the image generator for data augmentation\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,\n height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True, fill_mode=\"nearest\")\n\n# initialize the model\nprint(\"[INFO] compiling model...\")\nmodel = LeNet.build(width=28, height=28, depth=3, classes=2)\nopt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt,\n metrics=[\"accuracy\"])\n\n# train the network\nprint(\"[INFO] training network...\")\nH = model.fit(aug.flow(trainX, trainY, batch_size=BS),\n validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS,\n epochs=EPOCHS, verbose=1)\n\n# save the model to disk\nprint(\"[INFO] serializing network...\")\nmodel.save(args[\"model\"])\n\n# plot the training loss and accuracy\nplt.style.use(\"ggplot\")\nplt.figure()\nN = EPOCHS\nplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, N), H.history[\"accuracy\"], label=\"train_acc\")\n\nplt.title(\"Training Loss and Accuracy on Day/Night\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend(loc=\"lower left\")\nplt.savefig(args[\"plot\"])" ]
[ [ "matplotlib.use", "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split" ] ]
sstcam/TargetCalibSB
[ "f327b3e7ed63609c82264962863fc1e686c58858" ]
[ "TargetCalibSB/scripts/plot_cell_pedestal_vs_position.py" ]
[ "import argparse\nfrom argparse import ArgumentDefaultsHelpFormatter as Formatter\nfrom TargetCalibSB.pedestal import PedestalCellPosition\nfrom TargetCalibSB.stats import PixelStats, OnlineStats, OnlineHist\nfrom CHECLabPy.core.io import TIOReader\nfrom CHECLabPy.plotting.setup import Plotter\nfrom CHECLabPy.plotting.camera import CameraImage\nfrom CHECLabPy.utils.files import create_directory\nfrom tqdm import tqdm\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import FuncFormatter, MultipleLocator\nfrom os.path import join\nfrom IPython import embed\n\n\nclass CellWaveform(Plotter):\n def plot(self, pedestal, std, hits, cell):\n n_samples = pedestal.size\n n_blocks = n_samples // 32\n block_end_samples = np.arange(n_blocks+1) * 32 + cell % 32\n\n for end_of_block in block_end_samples:\n start_of_block = end_of_block - 31\n color = self.ax._get_lines.get_next_color()\n if end_of_block >= n_samples:\n end_of_block = n_samples - 1\n self.ax.axvline(end_of_block, color=color, ls='--', alpha=0.7)\n if start_of_block < 0:\n start_of_block = 0\n self.ax.axvline(start_of_block, color=color, ls='--', alpha=0.7)\n\n x = np.where(hits > 0)[0]\n self.ax.errorbar(x, pedestal[x], yerr=std[x], color='black')\n\n self.ax.set_xlabel(\"Position in waveform\")\n self.ax.set_ylabel(\"Amplitude (Raw ADC)\")\n self.ax.set_title(\"Cell = {}\".format(cell))\n\n self.ax.xaxis.set_major_locator(MultipleLocator(16))\n\n\ndef main():\n description = (\n \"Generate the pedestals from an R0 file, subtract it from another \"\n \"R0 file, and plot the comparison of residuals from different \"\n \"pedestal methods\"\n )\n parser = argparse.ArgumentParser(description=description,\n formatter_class=Formatter)\n parser.add_argument('-f', '--file', dest='r0_path', required=True,\n help='R0 file to obtain residuals from')\n parser.add_argument('-o', '--output', dest='output_dir', required=True,\n help='directort to store output plots')\n args = parser.parse_args()\n\n r0_path = args.r0_path\n channel = 0\n output_dir = args.output_dir\n\n create_directory(output_dir)\n reader = TIOReader(r0_path, max_events=100000)\n\n # Generate Pedestals\n pedestal = PedestalCellPosition(\n reader.n_pixels, reader.n_samples, reader.n_cells\n )\n desc = \"Generating pedestal\"\n for wfs in tqdm(reader, total=reader.n_events, desc=desc):\n if wfs.missing_packets:\n continue\n pedestal.add_to_pedestal(wfs, wfs.first_cell_id)\n\n # embed()\n\n for cell in range(reader.n_cells):\n if (pedestal.hits[channel, cell] == 0).all():\n continue\n p_cell_wf = CellWaveform()\n p_cell_wf.plot(\n pedestal.pedestal[channel, cell],\n pedestal.std[channel, cell],\n pedestal.hits[channel, cell],\n cell\n )\n p_cell_wf.save(\n join(output_dir, f\"cell_pedestal_vs_position/{cell:04d}.pdf\")\n )\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.where", "numpy.arange", "matplotlib.ticker.MultipleLocator" ] ]
daviskirk/subseqs
[ "8e538b1d1857413721c27eb47b567e9b7fedddf6" ]
[ "example.py" ]
[ "\nimport pyximport; pyximport.install()\n\nimport pandas as pd\nimport numpy as np\nimport numba as nb\nfrom numba import jit, njit\nfrom time import perf_counter, perf_counter_ns\nfrom subseq import is_subseq_py, is_subseq_rs\nfrom cy_subseq import find_loop_cy\n\nimport cython\n\ndef find_loop(seq, subseq):\n n = len(seq)\n m = len(subseq)\n for i in range(n - m + 1):\n found = True\n for j in range(m):\n if seq[i + j] != subseq[j]:\n found = False\n break\n if found:\n return True\n return False\n\nfind_loop_jit = jit(forceobj=True, cache=True)(find_loop)\nfind_loop_njit = njit(cache=True)(find_loop)\n\nsubseq = ['dd', 'ee']\nseq = ['a', 'b', 'c'] * 100 + subseq\n\n\nnp_seq = np.array(seq)\nnp_subseq = np.array(subseq)\n\npd_seq = pd.Series(seq).astype(\"string\").values\npd_subseq = pd.Series(subseq).astype(\"string\").values\n\n\ncat_seq = pd.Series(seq).astype(\"category\").values.codes\ncat_subseq = pd.Series(subseq).astype(\"category\").values.codes\n\n\nif __name__ == \"__main__\":\n\n fcn_map = {\n \"py\": lambda: find_loop(seq, subseq),\n \"cy\": lambda: find_loop_cy(seq, subseq),\n \"rs\": lambda: is_subseq_rs(seq, subseq),\n \"rs_py\": lambda: is_subseq_py(seq, subseq),\n \"py_np\": lambda: find_loop(np_seq, np_subseq),\n \"py_pd\": lambda: find_loop(pd_seq, pd_subseq),\n \"jit\": lambda: find_loop_jit(pd_seq, pd_subseq),\n \"njit\": lambda: find_loop_njit(np_seq, np_subseq),\n }\n\n for k, fcn in fcn_map.items():\n result = fcn()\n print(f\"{k}: {result}\")\n\n n = 1000\n\n for k, fcn in fcn_map.items():\n dt = 0\n for i in range(n):\n t0 = perf_counter_ns()\n fcn()\n t1 = perf_counter_ns()\n dt += t1 - t0\n print(f\"{k}: {dt / n}\")\n" ]
[ [ "numpy.array", "pandas.Series" ] ]
rosinality/melgan-pytorch
[ "c03662fe0a63c5502ea33d2c17da75b77e278a30" ]
[ "train_melgan.py" ]
[ "import random\n\nimport torch\nfrom torch import optim, nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, Subset\nfrom tqdm import tqdm\nimport librosa.display\n\nfrom dataset import MelInversionDataset\nfrom melgan import Generator, MultiScaleDiscriminator\n\nimport matplotlib as mpl\n\nmpl.use('Agg')\nfrom matplotlib import pyplot as plt\n\n\ndef visualize(real, fake, filename):\n real = real.squeeze(1).detach().to('cpu').numpy()\n fake = fake.squeeze(1).detach().to('cpu').numpy()\n fig = plt.figure()\n plt.subplot(2, 1, 1)\n librosa.display.waveplot(real, sr=22050)\n plt.title('Real')\n plt.subplot(2, 1, 2)\n librosa.display.waveplot(fake, sr=22050)\n plt.title('Fake')\n fig.savefig(filename)\n\n\ndef requires_grad(model, flag=True):\n for p in model.parameters():\n p.requires_grad = flag\n\n\ndef train(loader, generator, discriminator, g_optimizer, d_optimizer):\n pbar = tqdm(loader)\n\n for i, (mel, real_wav) in enumerate(pbar):\n mel = mel.to(device) / 100\n real_wav = real_wav.to(device)\n\n lr = 1e-4\n # lr = 1e-4 / 100 + i / 300 * (1e-4 - 1e-4 / 100)\n # lr = min(lr, 1e-4)\n # g_optimizer.param_groups[0]['lr'] = lr\n # d_optimizer.param_groups[0]['lr'] = lr\n\n discriminator.zero_grad()\n requires_grad(discriminator, True)\n requires_grad(generator, False)\n\n fake_wav = generator(mel)\n real_predict, _ = discriminator(real_wav)\n fake_predict, _ = discriminator(fake_wav)\n\n disc_loss = 0\n for real, fake in zip(real_predict, fake_predict):\n loss = ((real - 1) ** 2).mean() + (fake ** 2).mean()\n disc_loss += loss\n\n disc_loss.backward()\n # nn.utils.clip_grad_norm_(discriminator.parameters(), 1)\n d_optimizer.step()\n\n generator.zero_grad()\n requires_grad(discriminator, False)\n requires_grad(generator, True)\n\n fake_wav = generator(mel)\n real_predict, real_feats = discriminator(real_wav)\n fake_predict, fake_feats = discriminator(fake_wav)\n\n gen_loss = 0\n feat_loss = 0\n\n for fake in real_predict:\n loss = ((fake - 1) ** 2).mean()\n gen_loss += loss\n\n for real, fake in zip(real_feats, fake_feats):\n loss = F.l1_loss(fake, real)\n feat_loss += loss\n\n loss = gen_loss + 10 * feat_loss\n loss.backward()\n # nn.utils.clip_grad_norm_(generator.parameters(), 1)\n g_optimizer.step()\n\n pbar.set_description(\n f'G: {gen_loss.item():.5f}; D: {disc_loss.item():.5f}; feat: {feat_loss.item():.5f}; lr: {lr:.5f}'\n )\n\n if i % 100 == 0:\n # print(real_wav[0])\n # print(fake_wav[0])\n # visualize(real_wav[0], fake_wav[0], f'sample/{str(i).zfill(4)}.png')\n torch.save(\n {\n 'real': real_wav.detach().to('cpu'),\n 'fake': fake_wav.detach().to('cpu'),\n },\n f'sample/{str(i).zfill(4)}.pt',\n )\n\n\nif __name__ == '__main__':\n device = 'cuda'\n torch.backends.cudnn.deterministic = True\n\n generator = Generator(80)\n discriminator = MultiScaleDiscriminator()\n dataset = MelInversionDataset('kss.lmdb', target_len=16384)\n\n indices = list(range(len(dataset)))\n random.seed(17)\n random.shuffle(indices)\n train_ind = indices[200:]\n valid_ind = indices[:200]\n\n train_set = Subset(dataset, train_ind)\n valid_set = Subset(dataset, valid_ind)\n\n train_loader = DataLoader(train_set, batch_size=16, shuffle=True, num_workers=4)\n valid_loader = DataLoader(valid_set, batch_size=16, shuffle=True, num_workers=4)\n\n generator = generator.to(device)\n discriminator = discriminator.to(device)\n\n g_optim = optim.Adam(generator.parameters(), lr=1e-4, betas=(0.5, 0.9))\n d_optim = optim.Adam(discriminator.parameters(), lr=1e-4, betas=(0.5, 0.9))\n\n for i in range(500):\n train(train_loader, generator, discriminator, g_optim, d_optim)\n torch.save(\n {\n 'g': generator.state_dict(),\n 'd': discriminator.state_dict(),\n 'g_optim': g_optim.state_dict(),\n 'd_optim': d_optim.state_dict(),\n },\n f'checkpoint/melgan-{str(i).zfill(3)}.pt',\n )\n\n" ]
[ [ "matplotlib.use", "torch.nn.functional.l1_loss", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "torch.utils.data.DataLoader", "torch.utils.data.Subset", "matplotlib.pyplot.subplot" ] ]
pboley/PMOIRED
[ "7018322504f2b9d979889b1f7e95faa6d8b7c0a3" ]
[ "pmoired/__init__.py" ]
[ "try:\n from pmoired import oimodels, oifits, oicandid, oifake\nexcept:\n import oimodels, oifits, oicandid , oifake\n\n\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport multiprocessing\ntry:\n # -- see https://stackoverflow.com/questions/64174552\n multiprocessing.set_start_method('spawn')\nexcept:\n pass\n\nprint('[P]arametric [M]odeling of [O]ptical [I]nte[r]ferom[e]tric [D]ata', end=' ')\nprint('https://github.com/amerand/PMOIRED')\n\nclass OI:\n def __init__(self, filenames, insname=None, targname=None, verbose=True,\n withHeader=True, medFilt=None, tellurics=None, debug=False,\n binning=None):\n \"\"\"\n filenames: is either a single file (str) or a list of OIFITS files (list\n of str).\n\n insname: which instrument to select. Not needed if only one instrument\n per file. If multi instruments in files, all will be loaded.\n\n targname: which target. Not needed if only one target in files\n\n with_header: will load full header (default=False)\n\n medFilt: apply median filter of width 'medFilt'. Default no filter\n\n binning: bin data by this factor (integer). default no binning\n\n tellurics: pass a telluric correction vector, or a list of vectors,\n one per file. If nothing given, will use the tellurics in the OIFITS\n file. Works with results from 'pmoired.tellcorr'\n \"\"\"\n # -- load data\n self.data = []\n self.debug = debug\n self.addData(filenames, insname=insname, targname=targname,\n verbose=verbose, withHeader=withHeader, medFilt=medFilt,\n tellurics=tellurics, binning=binning)\n\n # -- last best fit to the data\n self.bestfit = None\n # -- bootstrap results:\n self.boot = None\n # -- grid / random fits:\n self.grid = None\n # -- CANDID results:\n self.candidFits = None\n # -- current figure\n self.fig = 0\n # -- modeled quantities:\n self.fluxes = {}\n self.spectra = {}\n\n def addData(self, filenames, insname=None, targname=None, verbose=True,\n withHeader=False, medFilt=None, tellurics=None, binning=None):\n if not type(filenames)==list:\n filenames = [filenames]\n self.data.extend(oifits.loadOI(filenames, insname=insname, targname=targname,\n verbose=verbose, withHeader=withHeader, medFilt=medFilt,\n tellurics=tellurics, debug=self.debug, binning=binning))\n return\n def setSED(self, wl, sed, err=0.01):\n \"\"\"\n force the SED in data to \"sed\" as function of \"wl\" (in um)\n err is the relative error (default 0.01 == 1%)\n\n will update/create OI_FLUX and interpolate the SED in \"FLUX\" for\n each telescope.\n \"\"\"\n for d in self.data:\n if 'OI_FLUX' in d:\n # -- replace flux\n tmp = np.interp(d['WL'], wl, sed)\n for k in d['OI_FLUX'].keys():\n s = tmp[None,:] + 0*d['OI_FLUX'][k]['MJD'][:,None]\n d['OI_FLUX'][k]['FLUX'] = s\n d['OI_FLUX'][k]['RFLUX'] = s\n d['OI_FLUX'][k]['EFLUX'] = err*s\n d['OI_FLUX'][k]['FLAG'] *= False\n else:\n # -- add oi_flux\n flux = {}\n for mjd in d['configurations per MJD']:\n d['configurations per MJD'][mjd].extend(d['telescopes'])\n\n mjd = np.array(sorted(list(d['configurations per MJD'])))\n s = np.interp(d['WL'], wl, sed)[None,:] + mjd[:,None]\n for t in d['telescopes']:\n flux[t] = {'FLUX':s, 'RFLUX':s, 'EFLUX':err*s, 'FLAG':s==0, 'MJD':mjd}\n d['OI_FLUX'] = flux\n return\n def setupFit(self, fit, update=False, debug=False):\n \"\"\"\n set fit parameters by giving a dictionnary (or a list of dict, same length\n as 'data'):\n\n 'obs': list of observables in\n 'FLUX': Flux\n 'NFLUX': Flux normalized to continuum\n 'V2': sqared Visibility\n '|V|': visibility modulus\n 'DPHI': differential phase (wrt continuum)\n 'T3PHI': closure phases\n 'T3AMP': closure amplitude\n -> by default, all possible observables are fitted\n\n 'wl ranges': gives a list of wavelength ranges (in um) where to fit.\n e.g. [(1.5, 1.6), (1.65, 1.75)]\n it will not override flagged data\n -> by default, the full defined range is fitted.\n\n 'min error': forcing errors to have a minimum value. Keyed by the same\n values as 'obs'. e.g. {'V2':0.04, 'T3PHI':1.5} sets the minimum error\n to 0.04 in V2 (absolute) and 1.5 degrees for T3PHI\n\n 'min relative error': same as 'min error', but for relative values. Useful\n for FLUX, V2, |V| or T3AMP\n\n 'max error': similar to 'min error' but will ignore (flag) data above a\n certain error\n \"\"\"\n correctType = type(fit)==dict\n correctType = correctType or (type(fit)==list and\n len(fit)==len(self.data) and\n all([type(f)==dict for f in fit]))\n assert correctType, \"parameter 'fit' must be a dictionnary or a list of dict\"\n\n if type(fit)==dict:\n for d in self.data:\n assert _checkSetupFit(fit), 'setup dictionnary is incorrect'\n if 'fit' in d and update:\n d['fit'].update(fit)\n else:\n d['fit'] = fit.copy()\n\n if type(fit)==list:\n for i,d in enumerate(self.data):\n assert _checkSetupFit(fit[i]), 'setup dictionnary is incorrect'\n if 'fit' in d and update:\n d['fit'].update(fit[i])\n else:\n d['fit'] = fit[i].copy()\n if debug:\n print([d['fit']['obs'] for d in self.data])\n\n for d in self.data:\n if 'obs' in d['fit']:\n if debug:\n print(d['filename'],\n list(filter(lambda x: x.startswith('OI_'), d.keys())))\n d['fit']['obs'] = _checkObs(d, d['fit']['obs']).copy()\n if debug:\n print(d['fit']['obs'])\n return\n\n def doFit(self, model=None, fitOnly=None, doNotFit='auto', useMerged=True, verbose=2,\n maxfev=10000, ftol=1e-5, epsfcn=1e-8, follow=None):\n \"\"\"\n model: a dictionnary describing the model\n fitOnly: list of parameters to fit (default: all)\n doNotFit: list of parameters not to fit (default: none)\n maxfev: maximum number of iterations\n ftol: chi2 stopping criteria\n follow: list of parameters to display as fit is going on\n \"\"\"\n if model is None:\n try:\n model = self.bestfit['best']\n if doNotFit=='auto':\n doNotFit = self.bestfit['doNotFit']\n fitOnly = self.bestfit['fitOnly']\n except:\n assert True, ' first guess as \"model={...}\" should be provided'\n\n if doNotFit=='auto':\n doNotFit = []\n # -- merge data to accelerate computations\n self._merged = oifits.mergeOI(self.data, collapse=True, verbose=False)\n self.bestfit = oimodels.fitOI(self._merged, model, fitOnly=fitOnly,\n doNotFit=doNotFit, verbose=verbose,\n maxfev=maxfev, ftol=ftol, epsfcn=epsfcn,\n follow=follow)\n self._model = oimodels.VmodelOI(self._merged, self.bestfit['best'])\n return\n def showFit(self):\n if not self.bestfit is None:\n self.fig += 1\n oimodels.dpfit.exploreFit(self.bestfit, fig=self.fig)\n return\n\n def candidFitMap(self, rmin=None, rmax=None, rstep=None, cmap=None,\n firstGuess=None, fitAlso=[], fig=None, doNotFit=[],\n logchi2=False, multi=True):\n self._merged = oifits.mergeOI(self.data, collapse=True, verbose=False)\n if fig is None:\n self.fig += 1\n fig = self.fig\n self.candidFits = oicandid.fitMap(self._merged, rmin=rmin, rmax=rmax,\n rstep=rstep, firstGuess=firstGuess,\n fitAlso=fitAlso, fig=fig, cmap=cmap,\n doNotFit=doNotFit, logchi2=logchi2,\n multi=multi)\n self.bestfit = self.candidFits[0]\n return\n\n def gridFit(self, expl, Nfits=None, param=None, fitOnly=None, doNotFit=None,\n maxfev=5000, ftol=1e-6, multi=True, epsfcn=1e-7):\n \"\"\"\n perform \"Nfits\" fit on data, starting from \"param\" (default last best fit),\n with grid / randomised parameters. Nfits can be determined from \"expl\" if\n \"grid\" param are defined.\n\n expl = {'grid':{'p1':(0,1,0.1), 'p2':(-1,1,0.5), ...},\n 'rand':{'p3':(0,1), 'p4':(-np.pi, np.pi), ...},\n 'randn':{'p5':(0, 1), 'p6':(np.pi/2, np.pi), ...}}\n\n grid=(min, max, step): explore all values for \"min\" to \"max\" with \"step\"\n rand=(min, max): uniform randomized parameter\n randn=(mean, std): normaly distributed parameter\n\n parameters should only appear once in either grid, rand or randn\n\n if \"grid\" are defined, they will define N as:\n Nfits = prod_i (max_i-min_i)/step_i + 1\n \"\"\"\n if param is None and not self.bestfit is None:\n param = self.bestfit['best']\n if doNotFit is None:\n doNotFit = self.bestfit['doNotFit']\n if fitOnly is None:\n fiitOnly = self.bestfit['fitOnly']\n assert not param is None, 'first guess should be provided: param={...}'\n self._merged = oifits.mergeOI(self.data, collapse=True, verbose=False)\n self.grid = oimodels.gridFitOI(self._merged, param, expl, Nfits,\n fitOnly=fitOnly, doNotFit=doNotFit,\n maxfev=maxfev, ftol=ftol, multi=multi,\n epsfcn=epsfcn)\n self.bestfit = self.grid[0]\n return\n\n def showGrid(self, px, py, color='chi2', aspect=None,\n vmin=None, vmax=None, cmap='spring'):\n assert not self.grid is None, 'You should run gridFit first!'\n self.fig += 1\n oimodels.showGrid(self.grid, px, py, color=color, fig=self.fig,\n vmin=vmin, vmax=vmax, aspect=aspect, cmap=cmap)\n return\n\n def bootstrapFit(self, Nfits=None, model=None, multi=True):\n \"\"\"\n perform 'Nfits' bootstrapped fits around dictionnary parameters 'model'.\n by default Nfits is set to the number of data, and model to the last best\n fit. 'multi' sets the number of threads (default==all available).\n \"\"\"\n self._merged = oifits.mergeOI(self.data, collapse=True, verbose=False)\n if model is None:\n assert not self.bestfit is None, 'you should run a fit first'\n model = self.bestfit\n self.boot = oimodels.bootstrapFitOI(self._merged, model, Nfits, multi=multi)\n return\n\n def showBootstrap(self, sigmaClipping=4.5, combParam={}, showChi2=False):\n \"\"\"\n example:\n combParam={'SEP':'np.sqrt($c,x**2+$c,y**2)',\n 'PA':'np.arctan2($c,x, $c,y)*180/np.pi'}\n \"\"\"\n if combParam=={}:\n self.boot = oimodels.analyseBootstrap(self.boot,\n sigmaClipping=sigmaClipping, verbose=0)\n self.fig += 1\n oimodels.showBootstrap(self.boot, showRejected=0, fig=self.fig,\n combParam=combParam, sigmaClipping=sigmaClipping,\n showChi2=showChi2)\n self.fig += 1\n return\n\n def show(self, model='best', fig=None, obs=None, logV=False, logB=False,\n showFlagged=False, spectro=None, showUV=True, perSetup=False,\n allInOne=False, imFov=None, imPix=None, imPow=1., imMax=None,\n checkImVis=False, vLambda0=None, imWl0=None, cmap='inferno',\n imX=0, imY=0):\n \"\"\"\n - model: dict defining a model to be overplotted. if a fit was performed,\n the best fit models will be displayed by default. Set to None for no\n models\n - fig: figure number (int)\n - obs: list of pbservables to show (in ['|V|', 'V2', 'T3PHI', 'DPHI',\n 'FLUX']). Defautl will show all data. If fit was performed, fitted\n observables will be shown.\n - logV, logB: show visibilities, baselines in log scale (boolean)\n - showFlagged: show data flagged in the file (boolean)\n - showUV: show u,v coordinated (boolean)\n - spectro: force spectroscopic mode\n - vLambda0: show sepctroscopic data with velocity scale,\n around this central wavelength (in microns)\n - perSetup: each instrument/spectroscopic setup in a differn plot (boolean)\n - allInOne: all data in a single figure\n\n show image and sepctrum of model: set imFov to a value to show image\n - imFov: field of view in mas\n - imPix: imPixel size in mas\n - imMax: cutoff for image display (0..1) or in percentile ('0'..'100')\n - imPow: power law applied to image for display. use 0<imPow<1 to show low\n surface brightness features\n - imX, imY: center of image (in mas)\n - imWl0: list of wavelength (um) to show the image default (min, max)\n - cmap: color map (default 'bone')\n - checkImVis: compute visibility from image to check (can be wrong if\n fov is too small)\n \"\"\"\n t0 = time.time()\n\n if not imFov is None and imPix is None:\n imPix = imFov/100.\n\n if not imFov is None:\n assert imPix>imFov/500, \"the pixel of the synthetic image is too small!\"\n\n if spectro is None:\n N = [len(d['WL']) for d in self.data]\n spectro = max(N)>20\n\n if perSetup or allInOne:\n data = oifits.mergeOI(self.data, collapse=False, verbose=False)\n else:\n data = self.data\n\n if not fig is None:\n self.fig = fig\n else:\n self.fig += 1\n fig = self.fig\n\n if model=='best' and type(self.bestfit) is dict and \\\n 'best' in self.bestfit:\n #print('showing best fit model')\n model = self.bestfit['best']\n elif not type(model) is dict:\n #print('no model to show...')\n model = None\n\n if perSetup:\n if perSetup is True:\n # -- try to guess the groupings by \"ins_wl_resolution\"\n R = lambda wl, dwl: 5*np.round(np.mean(wl/dwl)/5,\n 1-int(np.log10(np.mean(wl/dwl)/5)))\n setups = [d['insname'].split('_')[0]+' %.1fum R%.0f'%(d['WL'].mean(), R(d['WL'], d['dWL'])) for d in data]\n perSetup = list(set(setups))\n #print('setups:', setups)\n #print('perSetup:', perSetup)\n else:\n setups = [d['insname'] for d in data]\n\n # -- group\n group = []\n for i,d in enumerate(data):\n for s in perSetup:\n if s in setups[i]:\n group.append(s)\n continue\n for j,g in enumerate(sorted(set(group))):\n oimodels.showOI([d for i,d in enumerate(data) if group[i]==g],\n param=model, fig=self.fig, obs=obs, logV=logV,\n logB=logB, showFlagged=showFlagged,\n spectro=spectro, showUV=showUV, allInOne=True,\n imFov=imFov, imPix=imPix, imPow=imPow, imMax=imMax,\n checkImVis=checkImVis, vLambda0=vLambda0, imWl0=imWl0,\n cmap=cmap, imX=imX, imY=imY)\n self.fig+=1\n if imFov is None:\n plt.suptitle(g)\n else:\n plt.figure(self.fig)\n plt.suptitle(g)\n self.fig+=1\n return\n elif allInOne:\n # -- figure out the list of obs, could be heteregenous\n if not obs is None:\n obs = list(obs)\n else:\n obs = []\n for d in data:\n if not 'fit' in d or not 'obs' in d['fit']:\n if 'OI_T3' in d:\n obs.append('T3PHI')\n if 'OI_VIS2' in d:\n obs.append('V2')\n if 'OI_VIS' in d:\n obs.append('|V|')\n if 'OI_FLUX' in d:\n obs.append('FLUX')\n else:\n obs.extend(d['fit']['obs'])\n obs = list(set(obs))\n\n self._model = oimodels.showOI(self.data, param=model, fig=self.fig,\n obs=obs, logV=logV, logB=logB, showFlagged=showFlagged,\n spectro=spectro, showUV=showUV, allInOne=allInOne,\n imFov=imFov, imPix=imPix, imPow=imPow, imMax=imMax,\n checkImVis=checkImVis, vLambda0=vLambda0, imWl0=imWl0,\n cmap=cmap, imX=imX, imY=imY)\n if allInOne:\n self.fig += 1\n else:\n self.fig += len(self.data)\n else:\n for i,d in enumerate(data):\n self._model = oimodels.showOI([d], param=model, fig=self.fig,\n obs=obs, logV=logV, logB=logB, showFlagged=showFlagged,\n spectro=spectro, showUV=showUV,\n imFov=imFov if i==(len(data)-1) else None,\n imPix=imPix, imPow=imPow, imMax=imMax,\n checkImVis=checkImVis, vLambda0=vLambda0,\n imWl0=imWl0, cmap=cmap, imX=imX, imY=imY)\n self.fig += 1\n if not imFov is None:\n self.fig += 1\n print('done in %.2fs'%(time.time()-t0))\n return\n\n def halfLightRadii(self):\n if not self.bestfit is None:\n self.halfrad = oimodels.halfLightRadiusFromParam(self.bestfit, verbose=1)\n else:\n print('no best fit model to compute half light radii!')\n return\n\n def computeModelSpectrum(self, model='best'):\n if model=='best' and not self.bestfit is None:\n model = self.bestfit['best']\n assert type(model) is dict, \"model must be a dictionnary\"\n\n allWLc = [] # -- continuum -> absolute flux\n allWLs = [] # -- with spectral lines -> normalized flux\n\n for i,o in enumerate(self.data):\n if 'fit' in o and 'obs' in o['fit'] and 'NFLUX' in o['fit']['obs']:\n if 'WL mask' in o:\n allWLs.extend(list(o['WL'][o['WL mask']]))\n else:\n allWLs.extend(list(o['WL']))\n else:\n if 'WL mask' in o:\n allWLc.extend(list(o['WL'][o['WL mask']]))\n else:\n allWLc.extend(list(o['WL']))\n allWLc = np.array(sorted(list(set(allWLc))))\n allWLs = np.array(sorted(list(set(allWLs))))\n M = {}\n if len(allWLc):\n allWL = {'WL':allWLc, 'fit':{'obs':[]}} # minimum required\n tmp = oimodels.VmodelOI(allWL, model)\n try:\n fluxes = {k.split(',')[0]:tmp['MODEL'][k] for k in\n tmp['MODEL'].keys() if k.endswith(',flux')}\n except:\n fluxes = {'total': tmp['MODEL']['totalflux']}\n M['flux WL'] = allWLc\n M['flux COMP'] = fluxes\n M['flux TOTAL'] = tmp['MODEL']['totalflux']\n else:\n M['flux WL'] = np.array([])\n M['flux COMP'] = {}\n M['flux TOTAL'] = np.array([])\n\n if len(allWLs):\n allWL = {'WL':allWLs, 'fit':{'obs':[]}} # minimum required\n tmp = oimodels.VmodelOI(allWL, model)\n print(type(tmp), tmp.keys())\n fluxes = {k.split(',')[0]:tmp['MODEL'][k] for k in\n tmp['MODEL'].keys() if k.endswith(',flux')}\n #print('normalised spectra computed for each components in dict \".spectra\"')\n M['normalised spectrum WL'] = allWLs\n M['normalised spectrum COMP'] = fluxes\n M['normalised spectrum TOTAL'] = tmp['MODEL']['totalflux']\n else:\n M['normalised spectrum WL'] = np.array([])\n M['normalised spectrum COMP'] = {}\n M['normalised spectrum TOTAL'] = np.array([])\n\n return M\n\ndef _checkObs(data, obs):\n \"\"\"\n data: OI dict\n obs: list of observable in ['|V|', 'V2', 'DPHI', 'T3PHI', 'FLUX', 'NFLUX']\n\n returns list of obs actually in data\n \"\"\"\n ext = {'|V|':'OI_VIS', 'DPHI':'OI_VIS', 'PHI':'OI_VIS',\n 'V2':'OI_VIS2',\n 'T3PHI':'OI_T3', 'T3AMP':'OI_T3',\n 'FLUX':'OI_FLUX',\n 'NFLUX':'OI_FLUX',\n }\n return [o for o in obs if o in ext and ext[o] in data]\n\ndef _checkSetupFit(fit):\n \"\"\"\n check for setupFit:\n \"\"\"\n keys = {'min error':dict, 'min relative error':dict,\n 'max error':dict, 'max relative error':dict,\n 'mult error':dict,\n 'obs':list, 'wl ranges':list,\n 'Nr':int, 'spec res pix':float,\n 'cont ranges':list,\n 'ignore negative flux':bool}\n ok = True\n for k in fit.keys():\n if not k in keys.keys():\n print('!WARNING! unknown fit setup \"'+k+'\"')\n ok = False\n elif type(fit[k]) != keys[k]:\n print('!WARNING! fit setup \"'+k+'\" should be of type', keys[k])\n ok = False\n return ok\n" ]
[ [ "numpy.array", "matplotlib.pyplot.suptitle", "numpy.interp", "numpy.mean", "matplotlib.pyplot.figure" ] ]
12rambau/sepal_translator
[ "8b4e924e9e00bcddb111bc6348cfd9b18eaeb571" ]
[ "nlp_sandbox/examples/run_translation.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for sequence to sequence.\n\"\"\"\n# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport numpy as np\nfrom datasets import load_dataset, load_metric\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n DataCollatorForSeq2Seq,\n HfArgumentParser,\n MBartTokenizer,\n MBartTokenizerFast,\n Seq2SeqTrainer,\n Seq2SeqTrainingArguments,\n default_data_collator,\n set_seed,\n)\nfrom transformers.trainer_utils import get_last_checkpoint, is_main_process\nfrom transformers.utils import check_min_version\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\n# check_min_version(\"4.5.0.dev0\")\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n source_lang: str = field(default=None, metadata={\"help\": \"Source language id for translation.\"})\n target_lang: str = field(default=None, metadata={\"help\": \"Target language id for translation.\"})\n\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a jsonlines).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input evaluation data file to evaluate the metrics (sacreblue) on \"\n \"a jsonlines file.\"\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input test data file to evaluate the metrics (sacreblue) on \" \"a jsonlines file.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_source_length: Optional[int] = field(\n default=1024,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n max_target_length: Optional[int] = field(\n default=128,\n metadata={\n \"help\": \"The maximum total sequence length for target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n val_max_target_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total sequence length for validation target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.\"\n \"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used \"\n \"during ``evaluate`` and ``predict``.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_val_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of validation examples to this \"\n \"value if set.\"\n },\n )\n max_test_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of test examples to this \"\n \"value if set.\"\n },\n )\n num_beams: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, \"\n \"which is used during ``evaluate`` and ``predict``.\"\n },\n )\n ignore_pad_token_for_loss: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to ignore the tokens corresponding to padded labels in the loss computation or not.\"\n },\n )\n source_prefix: Optional[str] = field(\n default=None, metadata={\"help\": \"A prefix to add before every source text (useful for T5 models).\"}\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n elif self.source_lang is None or self.target_lang is None:\n raise ValueError(\"Need to specify the source language and the target language.\")\n\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension == \"json\", \"`train_file` should be a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension == \"json\", \"`validation_file` should be a json file.\"\n if self.val_max_target_length is None:\n self.val_max_target_length = self.max_target_length\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if data_args.source_prefix is None and model_args.model_name_or_path in [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n ]:\n logger.warning(\n \"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with \"\n \"`--source_prefix 'translate English to German: ' `\"\n )\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own JSON training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For translation, only JSON files are supported, with one field named \"translation\" containing two keys for the\n # source and target languages (unless you adapt what follows).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n extension = data_args.train_file.split(\".\")[-1]\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.validation_file.split(\".\")[-1]\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.test_file.split(\".\")[-1]\n datasets = load_dataset(extension, data_files=data_files)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model = AutoModelForSeq2SeqLM.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n # Set decoder_start_token_id\n if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):\n assert (\n data_args.target_lang is not None and data_args.source_lang is not None\n ), \"mBart requires --target_lang and --source_lang\"\n if isinstance(tokenizer, MBartTokenizer):\n model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]\n else:\n model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)\n\n if model.config.decoder_start_token_id is None:\n raise ValueError(\"Make sure that `config.decoder_start_token_id` is correctly defined\")\n\n prefix = data_args.source_prefix if data_args.source_prefix is not None else \"\"\n\n # Preprocessing the datasets.\n # We need to tokenize inputs and targets.\n if training_args.do_train:\n column_names = datasets[\"train\"].column_names\n elif training_args.do_eval:\n column_names = datasets[\"validation\"].column_names\n elif training_args.do_predict:\n column_names = datasets[\"test\"].column_names\n else:\n logger.info(\"There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.\")\n return\n\n # For translation we set the codes of our source and target languages (only useful for mBART, the others will\n # ignore those attributes).\n if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):\n if data_args.source_lang is not None:\n tokenizer.src_lang = data_args.source_lang\n if data_args.target_lang is not None:\n tokenizer.tgt_lang = data_args.target_lang\n\n # Get the language codes for input/target.\n source_lang = data_args.source_lang.split(\"_\")[0]\n target_lang = data_args.target_lang.split(\"_\")[0]\n\n # Temporarily set max_target_length for training.\n max_target_length = data_args.max_target_length\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n if training_args.label_smoothing_factor > 0 and not hasattr(model, \"prepare_decoder_input_ids_from_labels\"):\n logger.warn(\n \"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for\"\n f\"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory\"\n )\n\n def preprocess_function(examples):\n inputs = [ex[source_lang] for ex in examples[\"translation\"]]\n targets = [ex[target_lang] for ex in examples[\"translation\"]]\n inputs = [prefix + inp for inp in inputs]\n model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)\n\n # Setup the tokenizer for targets\n with tokenizer.as_target_tokenizer():\n labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)\n\n # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore\n # padding in the loss.\n if padding == \"max_length\" and data_args.ignore_pad_token_for_loss:\n labels[\"input_ids\"] = [\n [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels[\"input_ids\"]\n ]\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\n if training_args.do_train:\n train_dataset = datasets[\"train\"]\n if \"train\" not in datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_eval:\n max_target_length = data_args.val_max_target_length\n if \"validation\" not in datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = datasets[\"validation\"]\n if data_args.max_val_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_val_samples))\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_predict:\n max_target_length = data_args.val_max_target_length\n if \"test\" not in datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n test_dataset = datasets[\"test\"]\n if data_args.max_test_samples is not None:\n test_dataset = test_dataset.select(range(data_args.max_test_samples))\n test_dataset = test_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Data collator\n label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id\n if data_args.pad_to_max_length:\n data_collator = default_data_collator\n else:\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=label_pad_token_id,\n pad_to_multiple_of=8 if training_args.fp16 else None,\n )\n\n # Metric\n metric = load_metric(\"sacrebleu\")\n\n def postprocess_text(preds, labels):\n preds = [pred.strip() for pred in preds]\n labels = [[label.strip()] for label in labels]\n\n return preds, labels\n\n def compute_metrics(eval_preds):\n preds, labels = eval_preds\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n if data_args.ignore_pad_token_for_loss:\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # Some simple post-processing\n decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n\n result = metric.compute(predictions=decoded_preds, references=decoded_labels)\n result = {\"bleu\": result[\"score\"]}\n\n prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]\n result[\"gen_len\"] = np.mean(prediction_lens)\n result = {k: round(v, 4) for k, v in result.items()}\n return result\n\n # Initialize our Trainer\n trainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics if training_args.predict_with_generate else None,\n )\n\n # Training\n if training_args.do_train:\n if last_checkpoint is not None:\n checkpoint = last_checkpoint\n elif os.path.isdir(model_args.model_name_or_path):\n checkpoint = model_args.model_name_or_path\n else:\n checkpoint = None\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n metrics = train_result.metrics\n max_train_samples = (\n data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)\n )\n metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n metrics = trainer.evaluate(\n max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix=\"eval\"\n )\n max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)\n metrics[\"eval_samples\"] = min(max_val_samples, len(eval_dataset))\n\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\"eval\", metrics)\n\n if training_args.do_predict:\n logger.info(\"*** Test ***\")\n\n test_results = trainer.predict(\n test_dataset,\n metric_key_prefix=\"test\",\n max_length=data_args.val_max_target_length,\n num_beams=data_args.num_beams,\n )\n metrics = test_results.metrics\n max_test_samples = data_args.max_test_samples if data_args.max_test_samples is not None else len(test_dataset)\n metrics[\"test_samples\"] = min(max_test_samples, len(test_dataset))\n\n trainer.log_metrics(\"test\", metrics)\n trainer.save_metrics(\"test\", metrics)\n\n if trainer.is_world_process_zero():\n if training_args.predict_with_generate:\n test_preds = tokenizer.batch_decode(\n test_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True\n )\n test_preds = [pred.strip() for pred in test_preds]\n output_test_preds_file = os.path.join(training_args.output_dir, \"test_generations.txt\")\n with open(output_test_preds_file, \"w\") as writer:\n writer.write(\"\\n\".join(test_preds))\n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.where", "numpy.count_nonzero", "numpy.mean" ] ]
bartnijssen/neuralhydrology
[ "a710d616ff67e5e321b3df216959ae6456e6e4c0" ]
[ "neuralhydrology/utils/samplingutils.py" ]
[ "from typing import List, Dict, Callable\n\nimport numpy as np\nimport torch\nfrom torch.distributions import Categorical, Uniform\n\nfrom neuralhydrology.utils.config import Config\nfrom neuralhydrology.training.utils import umal_extend_batch\n\n\ndef sample_pointpredictions(model: 'BaseModel', data: Dict[str, torch.Tensor],\n n_samples: int) -> Dict[str, torch.Tensor]:\n \"\"\"Point prediction samplers for the different uncertainty estimation approaches.\n \n This function provides different point sampling functions for the different uncertainty estimation approaches \n (i.e. Gaussian Mixture Models (GMM), Countable Mixtures of Asymmetric Laplacians (CMAL), Uncountable Mixtures of \n Asymmetric Laplacians (UMAL), and Monte-Carlo Dropout (MCD); note: MCD can be combined with the others, by setting \n `mc_dropout` to `True` in the configuration file). \n \n There are also options to handle negative point prediction samples that arise while sampling from the uncertainty \n estimates. This functionality currently supports (a) 'clip' for directly clipping values at zero and \n (b) 'truncate' for resampling values that are below zero. \n \n Parameters\n ----------\n model : BaseModel\n The neuralhydrology model from which to sample from.\n data : Dict[str, torch.Tensor]\n Dictionary, containing input features as key-value pairs.\n n_samples : int\n The number of point prediction samples that should be created.\n\n Returns\n -------\n Dict[str, torch.Tensor]\n Dictionary, containing the sampled model outputs for the `predict_last_n` (config argument) time steps of \n each frequency.\n \"\"\"\n\n if model.cfg.head.lower() == \"gmm\":\n samples = sample_gmm(model, data, n_samples)\n elif model.cfg.head.lower() == \"cmal\":\n samples = sample_cmal(model, data, n_samples)\n elif model.cfg.head.lower() == \"umal\":\n samples = sample_umal(model, data, n_samples)\n elif model.cfg.head.lower() == \"regression\":\n samples = sample_mcd(model, data, n_samples) # regression head assumes mcd\n else:\n raise NotImplementedError(f\"Sampling mode not supported for head {model.cfg.head.lower()}!\")\n\n return samples\n\n\ndef _subset_target(parameter: Dict[str, torch.Tensor], n_target: int, steps: int) -> Dict[str, torch.Tensor]:\n # determine which output neurons correspond to the n_target target variable\n start = n_target * steps\n end = (n_target + 1) * steps\n parameter_sub = parameter[:, :, start:end]\n\n return parameter_sub\n\n\ndef _handle_negative_values(cfg: Config, values: torch.Tensor, sample_values: Callable) -> torch.Tensor:\n \"\"\"Handle negative samples that arise while sampling from the uncertainty estimates.\n\n Currently supports (a) 'clip' for directly clipping values at zero and (b) 'truncate' for resampling values \n that are below zero. \n\n Parameters\n ----------\n cfg : Config\n The run configuration.\n values : torch.Tensor\n Tensor with the sampled values.\n sample_values : Callable\n Sampling function to allow for repeated sampling in the case of truncation-handling. \n\n Returns\n -------\n torch.Tensor\n Bound values according to user specifications. \n \"\"\"\n if cfg.negative_sample_handling.lower() == 'clip':\n values = torch.relu(values)\n elif cfg.negative_sample_handling.lower() == 'truncate':\n values_smaller_zero = values < 0\n try_count = 0\n while any(values_smaller_zero.flatten()):\n values[values_smaller_zero] = sample_values(values_smaller_zero)\n values_smaller_zero = values < 0\n try_count += 1\n if try_count >= cfg.negative_sample_max_retries:\n break\n elif cfg.negative_sample_handling is None or cfg.negative_sample_handling.lower() == 'none':\n pass\n else:\n raise NotImplementedError(\n f\"The option {cfg.negative_sample_handling} is not supported for handling negative samples!\")\n\n return values\n\n\ndef _sample_gaussian_mixtures(ids: List[int], m_sub: torch.Tensor, s_sub: torch.Tensor,\n p_sub: torch.Tensor) -> torch.Tensor:\n # unbound sampling:\n categorical = Categorical(p_sub)\n pis = categorical.sample().data\n mask_gmm = torch.zeros(p_sub.shape, dtype=torch.bool) \\\n .to(device=p_sub.device) \\\n .scatter_(2, pis.unsqueeze(2), True)\n\n # The ids are used for location-specific resampling for 'truncation' in '_handle_negative_values'\n values = s_sub \\\n .data.new(s_sub[ids][mask_gmm[ids]].shape[0]) \\\n .normal_() \\\n .flatten() \\\n .mul(s_sub[ids][mask_gmm[ids]]) \\\n .add(m_sub[ids][mask_gmm[ids]])\n return values\n\n\ndef _sample_asymmetric_laplacians(ids: List[int], m_sub: torch.Tensor, b_sub: torch.Tensor,\n t_sub: torch.Tensor) -> torch.Tensor:\n # The ids are used for location-specific resampling for 'truncation' in '_handle_negative_values'\n prob = torch.FloatTensor(m_sub[ids].shape) \\\n .uniform_(0, 1) \\\n .to(m_sub.device) # sample uniformly between zero and 1\n values = torch.where(\n prob < t_sub[ids], # needs to be in accordance with the loss\n m_sub[ids] + ((b_sub[ids] * torch.log(prob / t_sub[ids])) / (1 - t_sub[ids])),\n m_sub[ids] - ((b_sub[ids] * torch.log((1 - prob) / (1 - t_sub[ids]))) / t_sub[ids]))\n return values.flatten()\n\n\nclass _SamplingSetup():\n\n def __init__(self, model: 'BaseModel', data: Dict[str, torch.Tensor], head: str):\n # make model checks:\n cfg = model.cfg\n if not cfg.head.lower() == head.lower():\n raise NotImplementedError(f\"{head} sampling not supported for the {cfg.head} head!\")\n\n dropout_modules = [model.dropout.p]\n\n # Multi-Timescale models don't have an embedding_net\n implied_statics_embedding, implied_dynamics_embedding = None, None\n if cfg.model.lower() not in ['mtslstm', 'odelstm']:\n implied_statics_embedding = model.embedding_net.statics_embedding_p_dropout\n implied_dynamics_embedding = model.embedding_net.dynamics_embedding_p_dropout\n dropout_modules += [implied_statics_embedding, implied_dynamics_embedding]\n # account for transformer\n implied_transformer_dropout = None\n if cfg.model.lower() == 'transfomer':\n implied_transformer_dropout = cfg.transformer_dropout\n dropout_modules.append(implied_transformer_dropout)\n\n max_implied_dropout = max(dropout_modules)\n # check lower bound dropout:\n if cfg.mc_dropout and max_implied_dropout <= 0.0:\n raise RuntimeError(f\"\"\"{cfg.model} with `mc_dropout` activated requires a dropout rate larger than 0.0\n The current implied dropout-rates are:\n - model: {cfg.output_dropout}\n - statics_embedding: {implied_statics_embedding}\n - dynamics_embedding: {implied_dynamics_embedding}\n - transformer: {implied_transformer_dropout}\"\"\")\n # check upper bound dropout:\n if cfg.mc_dropout and max_implied_dropout >= 1.0:\n raise RuntimeError(f\"\"\"The maximal dropout-rate is 1. Please check your dropout-settings:\n The current implied dropout-rates are:\n - model: {cfg.output_dropout}\n - statics_embedding: {implied_statics_embedding}\n - dynamics_embedding: {implied_dynamics_embedding}\n - transformer: {implied_transformer_dropout}\"\"\")\n\n # assign setup properties:\n self.cfg = cfg\n self.device = next(model.parameters()).device\n self.number_of_targets = len(cfg.target_variables)\n self.mc_dropout = cfg.mc_dropout\n self.predict_last_n = cfg.predict_last_n\n\n # determine appropriate frequency suffix:\n if self.cfg.use_frequencies:\n self.freq_suffixes = [f'_{freq}' for freq in cfg.use_frequencies]\n else:\n self.freq_suffixes = ['']\n\n self.batch_size_data = data[f'x_d{self.freq_suffixes[0]}'].shape[0]\n\n def _get_frequency_last_n(self, freq_suffix: str):\n if isinstance(self.predict_last_n, int):\n frequency_last_n = self.predict_last_n\n else:\n frequency_last_n = self.predict_last_n[freq_suffix[1:]]\n return frequency_last_n\n\n\ndef sample_mcd(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int) -> Dict[str, torch.Tensor]:\n \"\"\"MC-Dropout based point predictions sampling.\n\n Naive sampling. This function does `n_samples` forward passes for each sample in the batch. Currently it is \n only useful for models with dropout, to perform MC-Dropout sampling. \n Note: Calling this function will force the model to train mode (`model.train()`) and not set it back to its original\n state. \n\n The negative sample handling currently supports (a) 'clip' for directly clipping sample_points at zero and (b) \n 'truncate' for resampling sample_points that are below zero. The mode can be defined by the config argument \n 'negative_sample_handling'.\n\n Parameters\n ----------\n model : BaseModel\n A model with a non-probabilistic head.\n data : Dict[str, torch.Tensor]\n Dictionary, containing input features as key-value pairs.\n n_samples : int\n Number of samples to generate for each input sample.\n\n Returns\n -------\n Dict[str, torch.Tensor]\n Dictionary, containing the sampled model outputs for the `predict_last_n` (config argument) time steps of \n each frequency.\n \"\"\"\n setup = _SamplingSetup(model, data, model.cfg.head)\n\n # force model into train mode for mc_dropout:\n if setup.mc_dropout:\n model.train()\n\n # sample for different frequencies and targets:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n sample_points = []\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n for nth_target in range(setup.number_of_targets):\n # unbound sampling:\n def _sample_values(ids: List[int]) -> torch.Tensor:\n # The ids are used for location-specific resampling for 'truncation' in '_handle_negative_values'\n target_values = torch.zeros(len(ids), frequency_last_n, n_samples)\n for i in range(n_samples): # forward-pass for each frequency separately to guarantee independence\n prediction = model(data)\n value_buffer = prediction[f'y_hat{freq_suffix}'][:, -frequency_last_n:, 0]\n target_values[ids, -frequency_last_n:, i] = value_buffer.detach().cpu()\n return target_values\n\n ids = list(range(data[f'x_d{freq_suffix}'].shape[0]))\n values = _sample_values(ids)\n\n # bind values and add to sample_points:\n values = _handle_negative_values(setup.cfg, values, _sample_values)\n sample_points.append(values)\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: torch.stack(sample_points, 2)})\n\n return samples\n\n\ndef sample_gmm(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int) -> Dict[str, torch.Tensor]:\n \"\"\"Sample point predictions with the Gaussian Mixture (GMM) head.\n\n This function generates `n_samples` GMM sample points for each entry in the batch. Concretely, the model is \n executed once (forward pass) and then the sample points are generated by sampling from the resulting mixtures. \n Good references for learning about GMMs are [#]_ and [#]_. \n\n The negative sample handling currently supports (a) 'clip' for directly clipping sample_points at zero and \n (b) 'truncate' for resampling sample_points that are below zero. The mode can be defined by the config argument \n 'negative_sample_handling'.\n \n Note: If the config setting 'mc_dropout' is true this function will force the model to train mode (`model.train()`) \n and not set it back to its original state. \n\n Parameters\n ----------\n model : BaseModel\n A model with a GMM head.\n data : Dict[str, torch.Tensor]\n Dictionary, containing input features as key-value pairs.\n n_samples : int\n Number of samples to generate for each input sample.\n\n Returns\n -------\n Dict[str, torch.Tensor]\n Dictionary, containing the sampled model outputs for the `predict_last_n` (config argument) time steps of \n each frequency. \n\n References\n ----------\n .. [#] C. M. Bishop: Mixture density networks. 1994.\n .. [#] D. Ha: Mixture density networks with tensorflow. blog.otoro.net, \n URL: http://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow, 2015.\n \"\"\"\n setup = _SamplingSetup(model, data, \"gmm\")\n\n # force model into train mode if mc_dropout:\n if setup.mc_dropout:\n model.train()\n\n # make predictions:\n pred = model(data)\n\n # sample for different frequencies:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n # get predict_last_n for the given the mode:\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n # initialize sample_points tensor for sampling:\n sample_points = torch.zeros((setup.batch_size_data, frequency_last_n, setup.number_of_targets, n_samples))\n sample_points *= torch.tensor(float('nan')) # set initial sample_points to nan\n\n # GMM has 3 parts: means (m/mu), variances (s/sigma), and weights (p/pi):\n m, s, p = pred[f'mu{freq_suffix}'], \\\n pred[f'sigma{freq_suffix}'], \\\n pred[f'pi{freq_suffix}']\n\n for nth_target in range(setup.number_of_targets):\n m_target = _subset_target(m[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n s_target = _subset_target(s[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n p_target = _subset_target(p[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n\n mask_nan = ~torch.isnan(m_target[:, -1, 0])\n if any(mask_nan): # skip if the complete mini-batch is invalid\n m_sub = torch.repeat_interleave(m_target[mask_nan, :, :], n_samples, dim=0)\n s_sub = torch.repeat_interleave(s_target[mask_nan, :, :], n_samples, dim=0)\n p_sub = torch.repeat_interleave(p_target[mask_nan, :, :], n_samples, dim=0)\n\n # sample values, handle negatives and add to sample points:\n values = _sample_gaussian_mixtures(np.ones(s_sub.shape, dtype=bool), m_sub, s_sub, p_sub)\n values = _handle_negative_values(\n setup.cfg, values, sample_values=lambda ids: _sample_gaussian_mixtures(ids, m_sub, s_sub, p_sub))\n values = values.view(-1, frequency_last_n, n_samples)\n\n sample_points[mask_nan, :, nth_target, :] = values.detach().cpu()\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: sample_points})\n return samples\n\n\ndef sample_cmal(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int) -> Dict[str, torch.Tensor]:\n \"\"\"Sample point predictions with the Countable Mixture of Asymmetric Laplacians (CMAL) head.\n\n This function generates `n_samples` CMAL sample points for each entry in the batch. Concretely, the model is \n executed once (forward pass) and then the sample points are generated by sampling from the resulting mixtures. \n General information about CMAL can be found in [#]_.\n\n The negative sample handling currently supports (a) 'clip' for directly clipping sample_points at zero and (b) \n 'truncate' for resampling sample_points that are below zero. The mode can be defined by the config argument \n 'negative_sample_handling'.\n\n Note: If the config setting 'mc_dropout' is true this function will force the model to train mode (`model.train()`) \n and not set it back to its original state. \n\n Parameters\n ----------\n model : BaseModel\n A model with a CMAL head.\n data : Dict[str, torch.Tensor]\n Dictionary, containing input features as key-value pairs.\n n_samples : int\n Number of samples to generate for each input sample.\n\n Returns\n -------\n Dict[str, torch.Tensor]\n Dictionary, containing the sampled model outputs for the `predict_last_n` (config argument) time steps of \n each frequency. The shape of the output tensor for each frequency is \n ``[batch size, predict_last_n, n_samples]``.\n\n References\n ----------\n .. [#] D.Klotz, F. Kratzert, M. Gauch, A. K. Sampson, G. Klambauer, S. Hochreiter, and G. Nearing: \n Uncertainty Estimation with Deep Learning for Rainfall-Runoff Modelling. arXiv preprint arXiv:2012.14295,\n 2020.\n \"\"\"\n setup = _SamplingSetup(model, data, \"cmal\")\n\n # force model into train mode if mc_dropout\n if setup.mc_dropout:\n model.train()\n\n # make predictions:\n pred = model(data)\n\n # sample for different frequencies:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n # get predict_last_n for the given the mode:\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n # CMAL has 4 parts: means (m/mu), scales (b), asymmetries (t/) and weights (p/pi):\n m = pred[f'mu{freq_suffix}']\n b = pred[f'b{freq_suffix}']\n t = pred[f'tau{freq_suffix}']\n p = pred[f'pi{freq_suffix}']\n\n sample_points = []\n for nth_target in range(setup.number_of_targets):\n # sampling presets:\n m_target = _subset_target(m[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n b_target = _subset_target(b[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n t_target = _subset_target(t[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n p_target = _subset_target(p[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n\n m_target = torch.repeat_interleave(m_target, n_samples, dim=0)\n b_target = torch.repeat_interleave(b_target, n_samples, dim=0)\n t_target = torch.repeat_interleave(t_target, n_samples, dim=0)\n p_target = torch.repeat_interleave(p_target, n_samples, dim=0)\n\n # sampling procedure:\n values = torch.zeros((setup.batch_size_data * n_samples, frequency_last_n)).to(setup.device)\n values *= torch.tensor(float('nan')) # set target sample_points to nan\n for nth_timestep in range(frequency_last_n):\n\n mask_nan = ~torch.isnan(p_target[:, nth_timestep, 0])\n if any(mask_nan): # skip if the complete mini-batch is invalid\n sub_choices = torch.multinomial(p_target[mask_nan, nth_timestep, :], num_samples=1)\n t_sub = t_target[mask_nan, nth_timestep, :].gather(1, sub_choices)\n m_sub = m_target[mask_nan, nth_timestep, :].gather(1, sub_choices)\n b_sub = b_target[mask_nan, nth_timestep, :].gather(1, sub_choices)\n\n ids = np.ones(b_sub.shape, dtype=bool)\n values_unbound = _sample_asymmetric_laplacians(ids, m_sub, b_sub, t_sub)\n values[mask_nan, nth_timestep] = _handle_negative_values(\n setup.cfg,\n values_unbound,\n sample_values=lambda ids: _sample_asymmetric_laplacians(ids, m_sub, b_sub, t_sub))\n\n # add the values to the sample_points:\n values = values.reshape(-1, frequency_last_n, n_samples)\n values = values.detach().cpu()\n sample_points.append(values)\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: torch.stack(sample_points, 2)})\n return samples\n\n\ndef sample_umal(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int) -> Dict[str, torch.Tensor]:\n \"\"\"Sample point predictions with the Uncountable Mixture of Asymmetric Laplacians (UMAL) head.\n\n This function generates `n_samples` UMAL sample points for each entry in the batch. Concretely, the model is \n executed once (forward pass) and then the sample points are generated by sampling from the resulting mixtures. \n Details about the UMAL approach can be found in [#]_.\n\n The negative sample handling currently supports (a) 'clip' for directly clipping sample_points at zero and (b) \n 'truncate' for resampling sample_points that are below zero. The mode can be defined by the config argument \n 'negative_sample_handling'.\n \n Note: If the config setting 'mc_dropout' is true this function will force the model to train mode (`model.train()`) \n and not set it back to its original state. \n\n Parameters\n ----------\n model : BaseModel\n A model with an UMAL head.\n data : Dict[str, torch.Tensor]\n Dictionary, containing input features as key-value pairs.\n n_samples : int\n Number of samples to generate for each input sample.\n\n Returns\n -------\n Dict[str, torch.Tensor]\n Dictionary containing the sampled model outputs for the `predict_last_n` (config argument) time steps of \n each frequency.\n\n References\n ----------\n .. [#] A. Brando, J. A. Rodriguez, J. Vitria, and A. R. Munoz: Modelling heterogeneous distributions \n with an Uncountable Mixture of Asymmetric Laplacians. Advances in Neural Information Processing Systems, \n pp. 8838-8848, 2019.\n \"\"\"\n setup = _SamplingSetup(model, data, \"umal\")\n\n # force model into train mode if mc_dropout:\n if setup.mc_dropout:\n model.train()\n\n # n_taus expands the batch by itself and adds a sampled tau as input (new_batch_size = n_taus*batch_size):\n data = umal_extend_batch(data, setup.cfg, n_taus=setup.cfg.n_taus)\n\n # make predictions:\n pred = model(data)\n\n # sample:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n # get predict_last_n for the given the mode:\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n # UMAL has 2 parts: means (m/mu), scales (b); the tau is randomly chosen:\n m = pred[f'mu{freq_suffix}']\n b = pred[f'b{freq_suffix}']\n t = data[f'tau{freq_suffix}']\n\n # sampling presets:\n m_wide = torch.cat(m[:, -frequency_last_n:, :].split(setup.batch_size_data, 0), 2)\n b_wide = torch.cat(b[:, -frequency_last_n:, :].split(setup.batch_size_data, 0), 2)\n\n # for now we just use a single tau for all targets:\n t_target = torch.cat(t[:, -frequency_last_n:, :].split(setup.batch_size_data, 0), 2)\n\n # sample over targets:\n sample_points = torch.zeros((setup.batch_size_data, frequency_last_n, setup.number_of_targets, n_samples))\n sample_points *= torch.tensor(float('nan')) # set initial sample_points to nan\n for nth_target in range(setup.number_of_targets):\n # sampling presets:\n m_target = _subset_target(m_wide[:, -frequency_last_n:, :], nth_target, setup.cfg.n_taus)\n b_target = _subset_target(b_wide[:, -frequency_last_n:, :], nth_target, setup.cfg.n_taus)\n\n # sample over n_samples:\n for nth_sample in range(n_samples):\n sub_choice = np.random.randint(0, setup.cfg.n_taus)\n\n mask_nan = ~torch.isnan(m_target[:, 0, 0])\n if any(mask_nan): # skip computation if entire mini-batch is invalid\n m_sub = m_target[mask_nan, :, sub_choice]\n b_sub = b_target[mask_nan, :, sub_choice]\n t_sub = t_target[mask_nan, :, sub_choice]\n\n ids = np.ones(b_sub.shape, dtype=bool)\n values_unbound = _sample_asymmetric_laplacians(ids, m_sub, b_sub, t_sub)\n values = _handle_negative_values(\n setup.cfg,\n values_unbound,\n sample_values=lambda ids: _sample_asymmetric_laplacians(ids, m_sub, b_sub, t_sub))\n\n # add values to sample_points:\n values = values.detach().cpu().unsqueeze(1)\n sample_points[mask_nan, -frequency_last_n:, nth_target, nth_sample] = values\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: sample_points})\n return samples\n" ]
[ [ "torch.zeros", "torch.distributions.Categorical", "torch.stack", "torch.relu", "torch.isnan", "torch.FloatTensor", "numpy.ones", "torch.repeat_interleave", "torch.multinomial", "numpy.random.randint", "torch.log" ] ]
bjkomer/hyperopt
[ "ddb32ecf384312ae0fd9f9e17d2d9e17e90e6096" ]
[ "hyperopt/pyll/tests/test_stochastic.py" ]
[ "import numpy as np\nfrom hyperopt.pyll import scope, as_apply, dfs, rec_eval\nfrom hyperopt.pyll.stochastic import *\n\ndef test_recursive_set_rng_kwarg():\n uniform = scope.uniform\n a = as_apply([uniform(0, 1), uniform(2, 3)])\n rng = np.random.RandomState(234)\n recursive_set_rng_kwarg(a, rng)\n print(a)\n val_a = rec_eval(a)\n assert 0 < val_a[0] < 1\n assert 2 < val_a[1] < 3\n\n\ndef test_lnorm():\n G = scope\n choice = G.choice\n uniform = G.uniform\n quantized_uniform = G.quniform\n\n inker_size = quantized_uniform(low=0, high=7.99, q=2) + 3\n # -- test that it runs\n lnorm = as_apply({'kwargs': {'inker_shape' : (inker_size, inker_size),\n 'outker_shape' : (inker_size, inker_size),\n 'remove_mean' : choice([0, 1]),\n 'stretch' : uniform(low=0, high=10),\n 'threshold' : uniform(\n low=.1 / np.sqrt(10.),\n high=10 * np.sqrt(10))\n }})\n print(lnorm)\n print('len', len(str(lnorm)))\n # not sure what to assert\n # ... this is too fagile\n # assert len(str(lnorm)) == 980\n\n\ndef test_sample_deterministic():\n aa = as_apply([0, 1])\n print(aa)\n dd = sample(aa, np.random.RandomState(3))\n assert dd == (0, 1)\n\n\ndef test_repeatable():\n u = scope.uniform(0, 1)\n aa = as_apply(dict(\n u = u,\n n = scope.normal(5, 0.1),\n l = [0, 1, scope.one_of(2, 3), u]))\n dd1 = sample(aa, np.random.RandomState(3))\n dd2 = sample(aa, np.random.RandomState(3))\n dd3 = sample(aa, np.random.RandomState(4))\n assert dd1 == dd2\n assert dd1 != dd3\n\n\ndef test_sample():\n u = scope.uniform(0, 1)\n aa = as_apply(dict(\n u = u,\n n = scope.normal(5, 0.1),\n l = [0, 1, scope.one_of(2, 3), u]))\n print(aa)\n dd = sample(aa, np.random.RandomState(3))\n assert 0 < dd['u'] < 1\n assert 4 < dd['n'] < 6\n assert dd['u'] == dd['l'][3]\n assert dd['l'][:2] == (0, 1)\n assert dd['l'][2] in (2, 3)\n\n" ]
[ [ "numpy.sqrt", "numpy.random.RandomState" ] ]
PerceptronV/Exnate
[ "900f26d9387828519e468918d0711d3363f96239" ]
[ "data/dataloader.py" ]
[ "import requests\nimport investpy as ivpy\nimport os\nimport json\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime as dt\nfrom tqdm import tqdm\nimport warnings\n\nif not os.path.exists('imfloader.py'):\n r = requests.get('https://raw.githubusercontent.com/PerceptronV/Exnate/master/data/imfloader.py')\n open('imfloader.py', 'wb').write(r.content)\nfrom imfloader import get_imf\n\n\ndef create_indices(df):\n new_indices = []\n\n for i in df.index:\n new_indices.append(save_date(i))\n df = df.rename(save_date, axis='index')\n\n return df\n\n\ndef load_date(idx):\n return dt.strptime(idx, '%Y-%m-%d').date()\n\n\ndef save_date(date):\n return date.strftime('%Y-%m-%d')\n\n\ndef pad(i):\n if i < 10:\n return '0' + str(i)\n return str(i)\n\n\ndef last_month(s):\n [yr, month] = s.split('-')\n month = int(month)\n if month == 1:\n return '-'.join([str(int(yr) - 1), '12'])\n return '-'.join([yr, pad(month - 1)])\n\n\ndef add_imf_api(base, areas, indicators, progress):\n try:\n imf, cols = get_imf(areas, indicators, progress=progress)\n aug = pd.DataFrame()\n indices = list(base.index)\n\n if progress:\n iter = tqdm(range(base.shape[0]))\n else:\n iter = range(base.shape[0])\n\n for i in iter:\n if last_month(indices[i].strftime('%Y-%m')) in imf.index:\n imf_dat = imf.loc[last_month(indices[i].strftime('%Y-%m'))]\n imf_dat = imf_dat.rename(index=indices[i])\n\n else:\n imf_dat = pd.DataFrame({i: np.nan for i in cols}, index=[indices[i]])\n imf_dat.columns = cols\n\n aug = aug.append(imf_dat)\n\n return pd.concat([base, aug], axis=1)\n\n except:\n warnings.warn('Error in data collection')\n return base\n\n\ndef add_imf_legacy(base, progress):\n r = requests.get('https://raw.githubusercontent.com/PerceptronV/Exnate/master/data/weo_data_oct_2020.csv')\n open('weo_dat.csv', 'wb').write(r.content)\n df = pd.read_csv('weo_dat.csv').transpose()\n os.remove('weo_dat.csv')\n\n cols = []\n for i in range(df.shape[1]):\n if df.loc['WEO Country Code'][i] == '112':\n cols.append('UK: {} /{} fr last yr'.format(\n df.loc['Subject Descriptor'][i],\n df.loc['Units'][i]\n ))\n elif df.loc['WEO Country Code'][i] == '111':\n cols.append('US: {} /{} fr last yr'.format(\n df.loc['Subject Descriptor'][i],\n df.loc['Units'][i]\n ))\n\n weo = df.iloc[8:-1, :-2]\n weo.columns = cols\n aug = pd.DataFrame()\n indices = list(base.index)\n\n if progress:\n iter = tqdm(range(base.shape[0]))\n else:\n iter = range(base.shape[0])\n\n for i in iter:\n if '{}'.format(indices[i].year - 1) in weo.index:\n weo_dat = weo.loc['{}'.format(indices[i].year - 1)]\n weo_dat = weo_dat.rename(index=indices[i])\n\n else:\n weo_dat = pd.DataFrame({i:np.nan for i in cols}, index=[indices[i]])\n weo_dat.columns = cols\n\n aug = aug.append(weo_dat)\n\n return pd.concat([base, aug], axis=1)\n\n\ndef hkd2gbp(date1, date2):\n df = ivpy.get_currency_cross_historical_data(currency_cross='GBP/HKD',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'HKD->GBP: ' + x, axis='columns')\n\n return df\n\n\ndef hkd2usd(date1, date2):\n df = ivpy.get_currency_cross_historical_data(currency_cross='USD/HKD',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'HKD->USD: ' + x, axis='columns')\n\n return df\n\n\ndef eur2gbp(date1, date2):\n df = ivpy.get_currency_cross_historical_data(currency_cross='GBP/EUR',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'EUR->GBP: ' + x, axis='columns')\n\n return df\n\n\ndef ftse100(date1, date2):\n df = ivpy.get_index_historical_data(index='FTSE 100',\n country='united kingdom',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'UK FTSE 100: ' + x, axis='columns')\n\n return df\n\n\ndef ftse250(date1, date2):\n df = ivpy.get_index_historical_data(index='FTSE 250',\n country='united kingdom',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'UK FTSE 250: ' + x, axis='columns')\n\n return df\n\n\ndef arca(date1, date2):\n df = ivpy.get_index_historical_data(index='ARCA Major Markets',\n country='united states',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'US ARCA: ' + x, axis='columns')\n\n return df\n\n\ndef sp500(date1, date2):\n df = ivpy.get_index_historical_data(index='S&P 500',\n country='united states',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'US SP500: ' + x, axis='columns')\n\n return df\n\n\ndef nasdaq(date1, date2):\n df = ivpy.get_index_historical_data(index='nasdaq',\n country='united states',\n from_date=date1.strftime('%d/%m/%Y'),\n to_date=date2.strftime('%d/%m/%Y'))\n df = df.iloc[:, :4]\n df = df.rename(lambda x: 'US SP500: ' + x, axis='columns')\n\n return df\n\n\ndef chunks(lst, n):\n lst = list(lst)\n ret = []\n for i in range(0, len(lst), n):\n ret.append(lst[i:i + n])\n\n return ret\n\n\ndef get_features(date1, date2, args=(\n hkd2gbp, hkd2usd, eur2gbp, ftse100, ftse250, sp500, nasdaq\n), imf_areas=('HK', 'GB', 'US'), imf_indicators=(\n 'AIP_SA_IX', 'AOMPC_IX', 'NGDP_SA_XDC', 'NC_GDP_PT', 'NFI_SA_XDC', 'NGDPNPI_SA_XDC', 'NX_SA_XDC', 'NXS_SA_XDC', 'NSDGDP_R_CH_SA_XDC', 'NYGDP_XDC',\n 'ARS_IX', 'ENEER_IX', 'NYFC_XDC', 'NYG_SA_XDC', 'NYP_XDC', 'BFDA_BP6_USD', 'BFOAE_BP6_USD', 'BFPAE_BP6_USD', 'BFPLXF_BP6_USD', 'FISR_PA',\n 'FITB_IX', 'FMVB_IX', 'NNL_SA_XDC', 'NSG_XDC', 'NYG_SA_XDC', 'PCPI_PC_PP_PT', 'NM_XDC', 'LUR_PC_PP_PT', 'LUR_PT', 'LP_PE_NUM',\n 'FPE_IX', 'LE_IX', 'BCG_GRTI_G01_CA_XDC', 'FIDR_ON_PA', 'BCG_GRTGS_G01_XDC', 'BCG_GX_G01_XDC', 'BCG_GXOB_G01_XDC', 'BFDAE_BP6_USD', 'BCG_GXCBG_G01_XDC', 'BCG_GXOBP_G01_XDC',\n 'LWR_IX', 'NGDP_D_SA_IX', 'FMD_SA_USD', 'GG_GALM_G01_XDC', '26N___XDC', 'NM_SA_XDC', 'TMG_CIF_PC_PP_PT',\n 'NGDP_R_K_IX', 'PPPIFG_IX', 'TMG_D_CIF_IX', 'PMP_IX', 'PCPI_IX', 'PPPI_IX', 'PXP_IX',\n), beta=False, progress=True):\n base = pd.DataFrame()\n\n if progress:\n print('Loading exchange rate and stock market data...')\n iter = tqdm(args)\n else:\n iter = args\n\n for func in iter:\n base = pd.concat([base, func(date1, date2)], axis=1)\n\n if beta:\n if progress:\n print('Loading IMF International Financial Statistics data...')\n iter = tqdm(chunks(imf_indicators, 10))\n else:\n iter = chunks(imf_indicators, 10)\n\n for ind in iter:\n base = add_imf_api(base, imf_areas, ind, progress=progress)\n\n if progress:\n print('Loading IMF World Economic Outlook data...')\n\n base = add_imf_legacy(base, progress=progress)\n\n base = create_indices(base)\n base = base.fillna(0)\n\n features_dict = {int(e): i for e, i in enumerate(base.columns)}\n\n return base, features_dict\n\n\ndef get_save(date1, date2, args=None, csv_path='full_data.csv', json_path='feature_names.json', beta=False):\n if args is None:\n data, feats = get_features(date1, date2, beta=beta)\n else:\n data, feats = get_features(date1, date2, args, beta=beta)\n\n data.to_csv(csv_path)\n json.dump(feats, open(json_path, 'w'))\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.concat" ] ]
arunmarria/test_package
[ "cd5cbcf189e56453eda94485c8b9c636dd5fb95e" ]
[ "tests/test_test_package.py" ]
[ "from test_package import test_package\n\nfrom test_package import test_package\nimport pandas as pd\n\n\ndef test_catbind():\n a = pd.Categorical([\"character\", \"hits\", \"your\", \"eyeballs\"])\n b = pd.Categorical([\"but\", \"integer\", \"where it\", \"counts\"])\n assert ((test_package.catbind(a, b)).codes == [1, 4, 7, 3, 0, 5, 6, 2]).all()\n assert ((test_package.catbind(a, b)).categories == [\"but\", \"character\",\n \"counts\", \"eyeballs\", \"hits\", \"integer\", \"where it\", \"your\"]).all()" ]
[ [ "pandas.Categorical" ] ]
sonomarina/Stock-sentiment-analysis
[ "2b3ddd88c73916ea06d39ef5d455f9220c69fdf7" ]
[ "Strategy_IV_renko_macd.py" ]
[ "# =============================================================================\r\n# Backtesting strategy - IV : combining renko with other MACD\r\n# Author : Mayank Rasu\r\n\r\n# Please report bug/issues in the Q&A section\r\n# =============================================================================\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom stocktrends import Renko\r\nimport statsmodels.api as sm\r\nfrom alpha_vantage.timeseries import TimeSeries\r\nimport copy\r\n\r\ndef MACD(DF,a,b,c):\r\n \"\"\"function to calculate MACD\r\n typical values a = 12; b =26, c =9\"\"\"\r\n df = DF.copy()\r\n df[\"MA_Fast\"]=df[\"Adj Close\"].ewm(span=a,min_periods=a).mean()\r\n df[\"MA_Slow\"]=df[\"Adj Close\"].ewm(span=b,min_periods=b).mean()\r\n df[\"MACD\"]=df[\"MA_Fast\"]-df[\"MA_Slow\"]\r\n df[\"Signal\"]=df[\"MACD\"].ewm(span=c,min_periods=c).mean()\r\n df.dropna(inplace=True)\r\n return (df[\"MACD\"],df[\"Signal\"])\r\n\r\ndef ATR(DF,n):\r\n \"function to calculate True Range and Average True Range\"\r\n df = DF.copy()\r\n df['H-L']=abs(df['High']-df['Low'])\r\n df['H-PC']=abs(df['High']-df['Adj Close'].shift(1))\r\n df['L-PC']=abs(df['Low']-df['Adj Close'].shift(1))\r\n df['TR']=df[['H-L','H-PC','L-PC']].max(axis=1,skipna=False)\r\n df['ATR'] = df['TR'].rolling(n).mean()\r\n #df['ATR'] = df['TR'].ewm(span=n,adjust=False,min_periods=n).mean()\r\n df2 = df.drop(['H-L','H-PC','L-PC'],axis=1)\r\n return df2\r\n\r\ndef slope(ser,n):\r\n \"function to calculate the slope of n consecutive points on a plot\"\r\n slopes = [i*0 for i in range(n-1)]\r\n for i in range(n,len(ser)+1):\r\n y = ser[i-n:i]\r\n x = np.array(range(n))\r\n y_scaled = (y - y.min())/(y.max() - y.min())\r\n x_scaled = (x - x.min())/(x.max() - x.min())\r\n x_scaled = sm.add_constant(x_scaled)\r\n model = sm.OLS(y_scaled,x_scaled)\r\n results = model.fit()\r\n slopes.append(results.params[-1])\r\n slope_angle = (np.rad2deg(np.arctan(np.array(slopes))))\r\n return np.array(slope_angle)\r\n\r\ndef renko_DF(DF):\r\n \"function to convert ohlc data into renko bricks\"\r\n df = DF.copy()\r\n df.reset_index(inplace=True)\r\n df = df.iloc[:,[0,1,2,3,4,5]]\r\n df.columns = [\"date\",\"open\",\"high\",\"low\",\"close\",\"volume\"]\r\n df2 = Renko(df)\r\n df2.brick_size = max(0.5,round(ATR(DF,120)[\"ATR\"][-1],0))\r\n renko_df = df2.get_bricks()\r\n renko_df[\"bar_num\"] = np.where(renko_df[\"uptrend\"]==True,1,np.where(renko_df[\"uptrend\"]==False,-1,0))\r\n for i in range(1,len(renko_df[\"bar_num\"])):\r\n if renko_df[\"bar_num\"][i]>0 and renko_df[\"bar_num\"][i-1]>0:\r\n renko_df[\"bar_num\"][i]+=renko_df[\"bar_num\"][i-1]\r\n elif renko_df[\"bar_num\"][i]<0 and renko_df[\"bar_num\"][i-1]<0:\r\n renko_df[\"bar_num\"][i]+=renko_df[\"bar_num\"][i-1]\r\n renko_df.drop_duplicates(subset=\"date\",keep=\"last\",inplace=True)\r\n return renko_df\r\n\r\n\r\ndef CAGR(DF):\r\n \"function to calculate the Cumulative Annual Growth Rate of a trading strategy\"\r\n df = DF.copy()\r\n df[\"cum_return\"] = (1 + df[\"ret\"]).cumprod()\r\n n = len(df)/(252*78)\r\n CAGR = (df[\"cum_return\"].tolist()[-1])**(1/n) - 1\r\n return CAGR\r\n\r\ndef volatility(DF):\r\n \"function to calculate annualized volatility of a trading strategy\"\r\n df = DF.copy()\r\n vol = df[\"ret\"].std() * np.sqrt(252*78)\r\n return vol\r\n\r\ndef sharpe(DF,rf):\r\n \"function to calculate sharpe ratio ; rf is the risk free rate\"\r\n df = DF.copy()\r\n sr = (CAGR(df) - rf)/volatility(df)\r\n return sr\r\n \r\n\r\ndef max_dd(DF):\r\n \"function to calculate max drawdown\"\r\n df = DF.copy()\r\n df[\"cum_return\"] = (1 + df[\"ret\"]).cumprod()\r\n df[\"cum_roll_max\"] = df[\"cum_return\"].cummax()\r\n df[\"drawdown\"] = df[\"cum_roll_max\"] - df[\"cum_return\"]\r\n df[\"drawdown_pct\"] = df[\"drawdown\"]/df[\"cum_roll_max\"]\r\n max_dd = df[\"drawdown_pct\"].max()\r\n return max_dd\r\n\r\n# Download historical data for DJI constituent stocks\r\n\r\ntickers = [\"MSFT\",\"AAPL\",\"FB\",\"AMZN\",\"INTC\", \"CSCO\",\"VZ\",\"IBM\",\"QCOM\",\"LYFT\"]\r\n\r\n\r\nohlc_intraday = {} # directory with ohlc value for each stock \r\nkey_path = \"D:\\\\Udemy\\\\Quantitative Investing Using Python\\\\1_Getting Data\\\\AlphaVantage\\\\key.txt\"\r\nts = TimeSeries(key=open(key_path,'r').read(), output_format='pandas')\r\n\r\nattempt = 0 # initializing passthrough variable\r\ndrop = [] # initializing list to store tickers whose close price was successfully extracted\r\nwhile len(tickers) != 0 and attempt <=5:\r\n tickers = [j for j in tickers if j not in drop]\r\n for i in range(len(tickers)):\r\n try:\r\n ohlc_intraday[tickers[i]] = ts.get_intraday(symbol=tickers[i],interval='5min', outputsize='full')[0]\r\n ohlc_intraday[tickers[i]].columns = [\"Open\",\"High\",\"Low\",\"Adj Close\",\"Volume\"]\r\n drop.append(tickers[i]) \r\n except:\r\n print(tickers[i],\" :failed to fetch data...retrying\")\r\n continue\r\n attempt+=1\r\n\r\n \r\ntickers = ohlc_intraday.keys() # redefine tickers variable after removing any tickers with corrupted data\r\n\r\n################################Backtesting####################################\r\n\r\n#Merging renko df with original ohlc df\r\nohlc_renko = {}\r\ndf = copy.deepcopy(ohlc_intraday)\r\ntickers_signal = {}\r\ntickers_ret = {}\r\nfor ticker in tickers:\r\n print(\"merging for \",ticker)\r\n renko = renko_DF(df[ticker])\r\n df[ticker][\"date\"] = df[ticker].index\r\n ohlc_renko[ticker] = df[ticker].merge(renko.loc[:,[\"date\",\"bar_num\"]],how=\"outer\",on=\"date\")\r\n ohlc_renko[ticker][\"bar_num\"].fillna(method='ffill',inplace=True)\r\n ohlc_renko[ticker][\"macd\"]= MACD(ohlc_renko[ticker],12,26,9)[0]\r\n ohlc_renko[ticker][\"macd_sig\"]= MACD(ohlc_renko[ticker],12,26,9)[1]\r\n ohlc_renko[ticker][\"macd_slope\"] = slope(ohlc_renko[ticker][\"macd\"],5)\r\n ohlc_renko[ticker][\"macd_sig_slope\"] = slope(ohlc_renko[ticker][\"macd_sig\"],5)\r\n tickers_signal[ticker] = \"\"\r\n tickers_ret[ticker] = []\r\n\r\n \r\n#Identifying signals and calculating daily return\r\nfor ticker in tickers:\r\n print(\"calculating daily returns for \",ticker)\r\n for i in range(len(ohlc_intraday[ticker])):\r\n if tickers_signal[ticker] == \"\":\r\n tickers_ret[ticker].append(0)\r\n if i > 0:\r\n if ohlc_renko[ticker][\"bar_num\"][i]>=2 and ohlc_renko[ticker][\"macd\"][i]>ohlc_renko[ticker][\"macd_sig\"][i] and ohlc_renko[ticker][\"macd_slope\"][i]>ohlc_renko[ticker][\"macd_sig_slope\"][i]:\r\n tickers_signal[ticker] = \"Buy\"\r\n elif ohlc_renko[ticker][\"bar_num\"][i]<=-2 and ohlc_renko[ticker][\"macd\"][i]<ohlc_renko[ticker][\"macd_sig\"][i] and ohlc_renko[ticker][\"macd_slope\"][i]<ohlc_renko[ticker][\"macd_sig_slope\"][i]:\r\n tickers_signal[ticker] = \"Sell\"\r\n \r\n elif tickers_signal[ticker] == \"Buy\":\r\n tickers_ret[ticker].append((ohlc_renko[ticker][\"Adj Close\"][i]/ohlc_renko[ticker][\"Adj Close\"][i-1])-1)\r\n if i > 0:\r\n if ohlc_renko[ticker][\"bar_num\"][i]<=-2 and ohlc_renko[ticker][\"macd\"][i]<ohlc_renko[ticker][\"macd_sig\"][i] and ohlc_renko[ticker][\"macd_slope\"][i]<ohlc_renko[ticker][\"macd_sig_slope\"][i]:\r\n tickers_signal[ticker] = \"Sell\"\r\n elif ohlc_renko[ticker][\"macd\"][i]<ohlc_renko[ticker][\"macd_sig\"][i] and ohlc_renko[ticker][\"macd_slope\"][i]<ohlc_renko[ticker][\"macd_sig_slope\"][i]:\r\n tickers_signal[ticker] = \"\"\r\n \r\n elif tickers_signal[ticker] == \"Sell\":\r\n tickers_ret[ticker].append((ohlc_renko[ticker][\"Adj Close\"][i-1]/ohlc_renko[ticker][\"Adj Close\"][i])-1)\r\n if i > 0:\r\n if ohlc_renko[ticker][\"bar_num\"][i]>=2 and ohlc_renko[ticker][\"macd\"][i]>ohlc_renko[ticker][\"macd_sig\"][i] and ohlc_renko[ticker][\"macd_slope\"][i]>ohlc_renko[ticker][\"macd_sig_slope\"][i]:\r\n tickers_signal[ticker] = \"Buy\"\r\n elif ohlc_renko[ticker][\"macd\"][i]>ohlc_renko[ticker][\"macd_sig\"][i] and ohlc_renko[ticker][\"macd_slope\"][i]>ohlc_renko[ticker][\"macd_sig_slope\"][i]:\r\n tickers_signal[ticker] = \"\"\r\n ohlc_renko[ticker][\"ret\"] = np.array(tickers_ret[ticker])\r\n\r\n#calculating overall strategy's KPIs\r\nstrategy_df = pd.DataFrame()\r\nfor ticker in tickers:\r\n strategy_df[ticker] = ohlc_renko[ticker][\"ret\"]\r\nstrategy_df[\"ret\"] = strategy_df.mean(axis=1)\r\nCAGR(strategy_df)\r\nsharpe(strategy_df,0.025)\r\nmax_dd(strategy_df) \r\n\r\n#visualizing strategy returns\r\n(1+strategy_df[\"ret\"]).cumprod().plot()\r\n\r\n#calculating individual stock's KPIs\r\ncagr = {}\r\nsharpe_ratios = {}\r\nmax_drawdown = {}\r\nfor ticker in tickers:\r\n print(\"calculating KPIs for \",ticker) \r\n cagr[ticker] = CAGR(ohlc_renko[ticker])\r\n sharpe_ratios[ticker] = sharpe(ohlc_renko[ticker],0.025)\r\n max_drawdown[ticker] = max_dd(ohlc_renko[ticker])\r\n\r\nKPI_df = pd.DataFrame([cagr,sharpe_ratios,max_drawdown],index=[\"Return\",\"Sharpe Ratio\",\"Max Drawdown\"]) \r\nKPI_df.T" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.sqrt", "numpy.where" ] ]
Kexiii/DeepImageAnalogy
[ "3c4baf577eac4592de9cdde4245ba081ce18cc4a" ]
[ "utils.py" ]
[ "from __future__ import print_function, division\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport torch\r\n\r\ndef ts2np(x):\r\n x = x.squeeze(0)\r\n x = x.cpu().numpy()\r\n x = x.transpose(1,2,0)\r\n return x\r\n\r\ndef np2ts(x):\r\n x = x.transpose(2,0,1)\r\n x = torch.from_numpy(x)\r\n x = x.unsqueeze(0)\r\n return x.cuda()\r\n\r\ndef load_image(file,resize_ratio=1.0):\r\n ori_img = cv2.imread(file)\r\n res = cv2.resize(ori_img, (int(ori_img.shape[0]*resize_ratio), int(ori_img.shape[1]*resize_ratio)),\r\n interpolation=cv2.INTER_CUBIC)\r\n print(\"Output Image Shape: {}\".format(res.shape))\r\n return res\r\n \r\ndef normalize(feature_map):\r\n response = torch.sum(feature_map*feature_map, dim=1, keepdim=True)\r\n normed_feature_map = feature_map/torch.sqrt(response)\r\n response = (response-torch.min(response))/(torch.max(response)-torch.min(response))\r\n return normed_feature_map, response\r\n \r\ndef blend(response, f_a, r_bp, alpha=0.8, tau=0.05):\r\n \"\"\"Equotion(4) stated in the paper\r\n We use the indicator function instead of sigmoid here according to the official implementation:\r\n https://github.com/msracver/Deep-Image-Analogy\r\n \"\"\"\r\n weight = (response > tau).type(torch.FloatTensor) * alpha\r\n weight = weight.expand(1, f_a.size(1), weight.size(2), weight.size(3))\r\n weight = weight.cuda()\r\n f_ap = f_a*weight + r_bp*(1. - weight)\r\n return f_ap\r\n\r\n" ]
[ [ "torch.sqrt", "torch.min", "torch.max", "torch.from_numpy", "torch.sum" ] ]
syyxsxx/PaddleDetection
[ "da0e1eed6472d492abc9db1db324569f9be62a5f" ]
[ "dygraph/ppdet/data/transform/operator.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# function:\n# operators to process sample,\n# eg: decode/resize/crop image\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\ntry:\n from collections.abc import Sequence\nexcept Exception:\n from collections import Sequence\n\nfrom numbers import Number, Integral\n\nimport uuid\nimport random\nimport math\nimport numpy as np\nimport os\nimport copy\n\nimport cv2\nfrom PIL import Image, ImageEnhance, ImageDraw\n\nfrom ppdet.core.workspace import serializable\nfrom ppdet.modeling.layers import AnchorGrid\n\nfrom .op_helper import (satisfy_sample_constraint, filter_and_process,\n generate_sample_bbox, clip_bbox, data_anchor_sampling,\n satisfy_sample_constraint_coverage, crop_image_sampling,\n generate_sample_bbox_square, bbox_area_sampling,\n is_poly, gaussian_radius, draw_gaussian)\n\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n\nregistered_ops = []\n\n\ndef register_op(cls):\n registered_ops.append(cls.__name__)\n if not hasattr(BaseOperator, cls.__name__):\n setattr(BaseOperator, cls.__name__, cls)\n else:\n raise KeyError(\"The {} class has been registered.\".format(cls.__name__))\n return serializable(cls)\n\n\nclass BboxError(ValueError):\n pass\n\n\nclass ImageError(ValueError):\n pass\n\n\nclass BaseOperator(object):\n def __init__(self, name=None):\n if name is None:\n name = self.__class__.__name__\n self._id = name + '_' + str(uuid.uuid4())[-6:]\n\n def apply(self, sample, context=None):\n \"\"\" Process a sample.\n Args:\n sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}\n context (dict): info about this sample processing\n Returns:\n result (dict): a processed sample\n \"\"\"\n return sample\n\n def __call__(self, sample, context=None):\n \"\"\" Process a sample.\n Args:\n sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}\n context (dict): info about this sample processing\n Returns:\n result (dict): a processed sample\n \"\"\"\n if isinstance(sample, Sequence):\n for i in range(len(sample)):\n sample[i] = self.apply(sample[i], context)\n else:\n sample = self.apply(sample, context)\n return sample\n\n def __str__(self):\n return str(self._id)\n\n\n@register_op\nclass DecodeOp(BaseOperator):\n def __init__(self):\n \"\"\" Transform the image data to numpy format following the rgb format\n \"\"\"\n super(DecodeOp, self).__init__()\n\n def apply(self, sample, context=None):\n \"\"\" load image if 'im_file' field is not empty but 'image' is\"\"\"\n if 'image' not in sample:\n with open(sample['im_file'], 'rb') as f:\n sample['image'] = f.read()\n sample.pop('im_file')\n\n im = sample['image']\n data = np.frombuffer(im, dtype='uint8')\n im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode\n\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n\n sample['image'] = im\n if 'h' not in sample:\n sample['h'] = im.shape[0]\n elif sample['h'] != im.shape[0]:\n logger.warn(\n \"The actual image height: {} is not equal to the \"\n \"height: {} in annotation, and update sample['h'] by actual \"\n \"image height.\".format(im.shape[0], sample['h']))\n sample['h'] = im.shape[0]\n if 'w' not in sample:\n sample['w'] = im.shape[1]\n elif sample['w'] != im.shape[1]:\n logger.warn(\n \"The actual image width: {} is not equal to the \"\n \"width: {} in annotation, and update sample['w'] by actual \"\n \"image width.\".format(im.shape[1], sample['w']))\n sample['w'] = im.shape[1]\n\n sample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)\n sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)\n return sample\n\n\n@register_op\nclass PermuteOp(BaseOperator):\n def __init__(self):\n \"\"\"\n Change the channel to be (C, H, W)\n \"\"\"\n super(PermuteOp, self).__init__()\n\n def apply(self, sample, context=None):\n im = sample['image']\n im = im.transpose((2, 0, 1))\n sample['image'] = im\n return sample\n\n\n@register_op\nclass LightingOp(BaseOperator):\n \"\"\"\n Lighting the imagen by eigenvalues and eigenvectors\n Args:\n eigval (list): eigenvalues\n eigvec (list): eigenvectors\n alphastd (float): random weight of lighting, 0.1 by default\n \"\"\"\n\n def __init__(self, eigval, eigvec, alphastd=0.1):\n super(LightingOp, self).__init__()\n self.alphastd = alphastd\n self.eigval = np.array(eigval).astype('float32')\n self.eigvec = np.array(eigvec).astype('float32')\n\n def apply(self, sample, context=None):\n alpha = np.random.normal(scale=self.alphastd, size=(3, ))\n sample['image'] += np.dot(self.eigvec, self.eigval * alpha)\n return sample\n\n\n@register_op\nclass RandomErasingImageOp(BaseOperator):\n def __init__(self, prob=0.5, lower=0.02, higher=0.4, aspect_ratio=0.3):\n \"\"\"\n Random Erasing Data Augmentation, see https://arxiv.org/abs/1708.04896\n Args:\n prob (float): probability to carry out random erasing\n lower (float): lower limit of the erasing area ratio\n heigher (float): upper limit of the erasing area ratio\n aspect_ratio (float): aspect ratio of the erasing region\n \"\"\"\n super(RandomErasingImageOp, self).__init__()\n self.prob = prob\n self.lower = lower\n self.heigher = heigher\n self.aspect_ratio = aspect_ratio\n\n def apply(self, sample):\n gt_bbox = sample['gt_bbox']\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n\n for idx in range(gt_bbox.shape[0]):\n if self.prob <= np.random.rand():\n continue\n\n x1, y1, x2, y2 = gt_bbox[idx, :]\n w_bbox = x2 - x1 + 1\n h_bbox = y2 - y1 + 1\n area = w_bbox * h_bbox\n\n target_area = random.uniform(self.lower, self.higher) * area\n aspect_ratio = random.uniform(self.aspect_ratio,\n 1 / self.aspect_ratio)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w < w_bbox and h < h_bbox:\n off_y1 = random.randint(0, int(h_bbox - h))\n off_x1 = random.randint(0, int(w_bbox - w))\n im[int(y1 + off_y1):int(y1 + off_y1 + h), int(x1 + off_x1):int(\n x1 + off_x1 + w), :] = 0\n sample['image'] = im\n return sample\n\n\n@register_op\nclass NormalizeImageOp(BaseOperator):\n def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1],\n is_scale=True):\n \"\"\"\n Args:\n mean (list): the pixel mean\n std (list): the pixel variance\n \"\"\"\n super(NormalizeImageOp, self).__init__()\n self.mean = mean\n self.std = std\n self.is_scale = is_scale\n if not (isinstance(self.mean, list) and isinstance(self.std, list) and\n isinstance(self.is_scale, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n\n def apply(self, sample, context=None):\n \"\"\"Normalize the image.\n Operators:\n 1.(optional) Scale the image to [0,1]\n 2. Each pixel minus mean and is divided by std\n \"\"\"\n im = sample['image']\n im = im.astype(np.float32, copy=False)\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n\n if self.is_scale:\n im = im / 255.0\n\n im -= mean\n im /= std\n\n sample['image'] = im\n return sample\n\n\n@register_op\nclass GridMask(BaseOperator):\n def __init__(self,\n use_h=True,\n use_w=True,\n rotate=1,\n offset=False,\n ratio=0.5,\n mode=1,\n prob=0.7,\n upper_iter=360000):\n \"\"\"\n GridMask Data Augmentation, see https://arxiv.org/abs/2001.04086\n Args:\n use_h (bool): whether to mask vertically\n use_w (boo;): whether to mask horizontally\n rotate (float): angle for the mask to rotate\n offset (float): mask offset\n ratio (float): mask ratio\n mode (int): gridmask mode\n prob (float): max probability to carry out gridmask\n upper_iter (int): suggested to be equal to global max_iter\n \"\"\"\n super(GridMask, self).__init__()\n self.use_h = use_h\n self.use_w = use_w\n self.rotate = rotate\n self.offset = offset\n self.ratio = ratio\n self.mode = mode\n self.prob = prob\n self.upper_iter = upper_iter\n\n from .gridmask_utils import GridMask\n self.gridmask_op = GridMask(\n use_h,\n use_w,\n rotate=rotate,\n offset=offset,\n ratio=ratio,\n mode=mode,\n prob=prob,\n upper_iter=upper_iter)\n\n def apply(self, sample, context=None):\n sample['image'] = self.gridmask_op(sample['image'], sample['curr_iter'])\n return sample\n\n\n@register_op\nclass RandomDistortOp(BaseOperator):\n \"\"\"Random color distortion.\n Args:\n hue (list): hue settings. in [lower, upper, probability] format.\n saturation (list): saturation settings. in [lower, upper, probability] format.\n contrast (list): contrast settings. in [lower, upper, probability] format.\n brightness (list): brightness settings. in [lower, upper, probability] format.\n random_apply (bool): whether to apply in random (yolo) or fixed (SSD)\n order.\n count (int): the number of doing distrot\n random_channel (bool): whether to swap channels randomly\n \"\"\"\n\n def __init__(self,\n hue=[-18, 18, 0.5],\n saturation=[0.5, 1.5, 0.5],\n contrast=[0.5, 1.5, 0.5],\n brightness=[0.5, 1.5, 0.5],\n random_apply=True,\n count=4,\n random_channel=False):\n super(RandomDistortOp, self).__init__()\n self.hue = hue\n self.saturation = saturation\n self.contrast = contrast\n self.brightness = brightness\n self.random_apply = random_apply\n self.count = count\n self.random_channel = random_channel\n\n def apply_hue(self, img):\n low, high, prob = self.hue\n if np.random.uniform(0., 1.) < prob:\n return img\n\n img = img.astype(np.float32)\n # it works, but result differ from HSV version\n delta = np.random.uniform(low, high)\n u = np.cos(delta * np.pi)\n w = np.sin(delta * np.pi)\n bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])\n tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n t = np.dot(np.dot(ityiq, bt), tyiq).T\n img = np.dot(img, t)\n return img\n\n def apply_saturation(self, img):\n low, high, prob = self.saturation\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n img = img.astype(np.float32)\n # it works, but result differ from HSV version\n gray = img * np.array([[[0.299, 0.587, 0.114]]], dtype=np.float32)\n gray = gray.sum(axis=2, keepdims=True)\n gray *= (1.0 - delta)\n img *= delta\n img += gray\n return img\n\n def apply_contrast(self, img):\n low, high, prob = self.contrast\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n img = img.astype(np.float32)\n img *= delta\n return img\n\n def apply_brightness(self, img):\n low, high, prob = self.brightness\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n img = img.astype(np.float32)\n img += delta\n return img\n\n def apply(self, sample, context=None):\n img = sample['image']\n if self.random_apply:\n functions = [\n self.apply_brightness, self.apply_contrast,\n self.apply_saturation, self.apply_hue\n ]\n distortions = np.random.permutation(functions)[:self.count]\n for func in distortions:\n img = func(img)\n sample['image'] = img\n return sample\n\n img = self.apply_brightness(img)\n mode = np.random.randint(0, 2)\n\n if mode:\n img = self.apply_contrast(img)\n\n img = self.apply_saturation(img)\n img = self.apply_hue(img)\n\n if not mode:\n img = self.apply_contrast(img)\n\n if self.random_channel:\n if np.random.randint(0, 2):\n img = img[..., np.random.permutation(3)]\n sample['image'] = img\n return sample\n\n\n@register_op\nclass AutoAugmentOp(BaseOperator):\n def __init__(self, autoaug_type=\"v1\"):\n \"\"\"\n Args:\n autoaug_type (str): autoaug type, support v0, v1, v2, v3, test\n \"\"\"\n super(AutoAugmentOp, self).__init__()\n self.autoaug_type = autoaug_type\n\n def apply(self, sample, context=None):\n \"\"\"\n Learning Data Augmentation Strategies for Object Detection, see https://arxiv.org/abs/1906.11172\n \"\"\"\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n if len(gt_bbox) == 0:\n return sample\n\n height, width, _ = im.shape\n norm_gt_bbox = np.ones_like(gt_bbox, dtype=np.float32)\n norm_gt_bbox[:, 0] = gt_bbox[:, 1] / float(height)\n norm_gt_bbox[:, 1] = gt_bbox[:, 0] / float(width)\n norm_gt_bbox[:, 2] = gt_bbox[:, 3] / float(height)\n norm_gt_bbox[:, 3] = gt_bbox[:, 2] / float(width)\n\n from .autoaugment_utils import distort_image_with_autoaugment\n im, norm_gt_bbox = distort_image_with_autoaugment(im, norm_gt_bbox,\n self.autoaug_type)\n\n gt_bbox[:, 0] = norm_gt_bbox[:, 1] * float(width)\n gt_bbox[:, 1] = norm_gt_bbox[:, 0] * float(height)\n gt_bbox[:, 2] = norm_gt_bbox[:, 3] * float(width)\n gt_bbox[:, 3] = norm_gt_bbox[:, 2] * float(height)\n\n sample['image'] = im\n sample['gt_bbox'] = gt_bbox\n return sample\n\n\n@register_op\nclass RandomFlipOp(BaseOperator):\n def __init__(self, prob=0.5, is_mask_flip=False):\n \"\"\"\n Args:\n prob (float): the probability of flipping image\n is_mask_flip (bool): whether flip the segmentation\n \"\"\"\n super(RandomFlipOp, self).__init__()\n self.prob = prob\n self.is_mask_flip = is_mask_flip\n if not (isinstance(self.prob, float) and\n isinstance(self.is_mask_flip, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def apply_segm(self, segms, height, width):\n def _flip_poly(poly, width):\n flipped_poly = np.array(poly)\n flipped_poly[0::2] = width - np.array(poly[0::2]) - 1\n return flipped_poly.tolist()\n\n def _flip_rle(rle, height, width):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n mask = mask[:, ::-1]\n rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))\n return rle\n\n flipped_segms = []\n for segm in segms:\n if is_poly(segm):\n # Polygon format\n flipped_segms.append([_flip_poly(poly, width) for poly in segm])\n else:\n # RLE format\n import pycocotools.mask as mask_util\n flipped_segms.append(_flip_rle(segm, height, width))\n return flipped_segms\n\n def apply_keypoint(self, gt_keypoint, width):\n for i in range(gt_keypoint.shape[1]):\n if i % 2 == 0:\n old_x = gt_keypoint[:, i].copy()\n gt_keypoint[:, i] = width - old_x - 1\n return gt_keypoint\n\n def apply_image(self, image):\n return image[:, ::-1, :]\n\n def apply_bbox(self, bbox, width):\n oldx1 = bbox[:, 0].copy()\n oldx2 = bbox[:, 2].copy()\n bbox[:, 0] = width - oldx2 - 1\n bbox[:, 2] = width - oldx1 - 1\n return bbox\n\n def apply(self, sample, context=None):\n \"\"\"Filp the image and bounding box.\n Operators:\n 1. Flip the image numpy.\n 2. Transform the bboxes' x coordinates.\n (Must judge whether the coordinates are normalized!)\n 3. Transform the segmentations' x coordinates.\n (Must judge whether the coordinates are normalized!)\n Output:\n sample: the image, bounding box and segmentation part\n in sample are flipped.\n \"\"\"\n if np.random.uniform(0, 1) < self.prob:\n im = sample['image']\n height, width = im.shape[:2]\n im = self.apply_image(im)\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], width)\n if self.is_mask_flip and 'gt_poly' in sample and len(sample[\n 'gt_poly']) > 0:\n sample['gt_poly'] = self.apply_segm(sample['gt_poly'], height,\n width)\n if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:\n sample['gt_keypoint'] = self.apply_keypoint(\n sample['gt_keypoint'], width)\n\n if 'semantic' in sample and sample['semantic']:\n sample['semantic'] = sample['semantic'][:, ::-1]\n\n if 'gt_segm' in sample and sample['gt_segm']:\n sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]\n\n sample['flipped'] = True\n sample['image'] = im\n return sample\n\n\n@register_op\nclass ResizeOp(BaseOperator):\n def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):\n \"\"\"\n Resize image to target size. if keep_ratio is True, \n resize the image's long side to the maximum of target_size\n if keep_ratio is False, resize the image to target size(h, w)\n Args:\n target_size (int|list): image target size\n keep_ratio (bool): whether keep_ratio or not, default true\n interp (int): the interpolation method\n \"\"\"\n super(ResizeOp, self).__init__()\n self.keep_ratio = keep_ratio\n self.interp = interp\n if not isinstance(target_size, (Integral, Sequence)):\n raise TypeError(\n \"Type of target_size is invalid. Must be Integer or List or Tuple, now is {}\".\n format(type(target_size)))\n if isinstance(target_size, Integral):\n target_size = [target_size, target_size]\n self.target_size = target_size\n\n def apply_image(self, image, scale):\n im_scale_x, im_scale_y = scale\n return cv2.resize(\n image,\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n\n def apply_bbox(self, bbox, scale, size):\n im_scale_x, im_scale_y = scale\n resize_w, resize_h = size\n bbox[:, 0::2] *= im_scale_x\n bbox[:, 1::2] *= im_scale_y\n bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, resize_w - 1)\n bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, resize_h - 1)\n return bbox\n\n def apply_segm(self, segms, im_size, scale):\n def _resize_poly(poly, im_scale_x, im_scale_y):\n resized_poly = np.array(poly)\n resized_poly[0::2] *= im_scale_x\n resized_poly[1::2] *= im_scale_y\n return resized_poly.tolist()\n\n def _resize_rle(rle, im_h, im_w, im_scale_x, im_scale_y):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, im_h, im_w)\n\n mask = mask_util.decode(rle)\n mask = cv2.resize(\n image,\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))\n return rle\n\n im_h, im_w = im_size\n im_scale_x, im_scale_y = scale\n resized_segms = []\n for segm in segms:\n if is_poly(segm):\n # Polygon format\n resized_segms.append([\n _resize_poly(poly, im_scale_x, im_scale_y) for poly in segm\n ])\n else:\n # RLE format\n import pycocotools.mask as mask_util\n resized_segms.append(\n _resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))\n\n return resized_segms\n\n def apply(self, sample, context=None):\n \"\"\" Resize the image numpy.\n \"\"\"\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image type is not numpy.\".format(self))\n if len(im.shape) != 3:\n raise ImageError('{}: image is not 3-dimensional.'.format(self))\n\n # apply image\n im_shape = im.shape\n if self.keep_ratio:\n\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n target_size_min = np.min(self.target_size)\n target_size_max = np.max(self.target_size)\n\n im_scale = min(target_size_min / im_size_min,\n target_size_max / im_size_max)\n\n resize_h = im_scale * float(im_shape[0])\n resize_w = im_scale * float(im_shape[1])\n\n im_scale_x = im_scale\n im_scale_y = im_scale\n else:\n resize_h, resize_w = self.target_size\n im_scale_y = resize_h / im_shape[0]\n im_scale_x = resize_w / im_shape[1]\n\n im = self.apply_image(sample['image'], [im_scale_x, im_scale_y])\n sample['image'] = im\n sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)\n if 'scale_factor' in sample:\n scale_factor = sample['scale_factor']\n sample['scale_factor'] = np.asarray(\n [scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],\n dtype=np.float32)\n else:\n sample['scale_factor'] = np.asarray(\n [im_scale_y, im_scale_x], dtype=np.float32)\n\n # apply bbox\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'],\n [im_scale_x, im_scale_y],\n [resize_w, resize_h])\n\n # apply polygon\n if 'gt_poly' in sample and len(sample['gt_poly']) > 0:\n sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],\n [im_scale_x, im_scale_y])\n\n # apply semantic\n if 'semantic' in sample and sample['semantic']:\n semantic = sample['semantic']\n semantic = cv2.resize(\n semantic.astype('float32'),\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n semantic = np.asarray(semantic).astype('int32')\n semantic = np.expand_dims(semantic, 0)\n sample['semantic'] = semantic\n\n # apply gt_segm\n if 'gt_segm' in sample and len(sample['gt_segm']) > 0:\n masks = [\n cv2.resize(\n gt_segm,\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=cv2.INTER_NEAREST)\n for gt_segm in sample['gt_segm']\n ]\n sample['gt_segm'] = np.asarray(masks).astype(np.uint8)\n\n return sample\n\n\n@register_op\nclass MultiscaleTestResizeOp(BaseOperator):\n def __init__(self,\n origin_target_size=[800, 1333],\n target_size=[],\n interp=cv2.INTER_LINEAR,\n use_flip=True):\n \"\"\"\n Rescale image to the each size in target size, and capped at max_size.\n Args:\n origin_target_size (list): origin target size of image\n target_size (list): A list of target sizes of image.\n interp (int): the interpolation method.\n use_flip (bool): whether use flip augmentation.\n \"\"\"\n super(MultiscaleTestResizeOp, self).__init__()\n self.interp = interp\n self.use_flip = use_flip\n\n if not isinstance(target_size, Sequence):\n raise TypeError(\n \"Type of target_size is invalid. Must be List or Tuple, now is {}\".\n format(type(target_size)))\n self.target_size = target_size\n\n if not isinstance(origin_target_size, Sequence):\n raise TypeError(\n \"Type of origin_target_size is invalid. Must be List or Tuple, now is {}\".\n format(type(origin_target_size)))\n\n self.origin_target_size = origin_target_size\n\n def apply(self, sample, context=None):\n \"\"\" Resize the image numpy for multi-scale test.\n \"\"\"\n samples = []\n resizer = ResizeOp(\n self.origin_target_size, keep_ratio=True, interp=self.interp)\n samples.append(resizer(sample.copy(), context))\n if self.use_flip:\n flipper = RandomFlipOp(1.1)\n samples.append(flipper(sample.copy(), context=context))\n\n for size in self.target_size:\n resizer = ResizeOp(size, keep_ratio=True, interp=self.interp)\n samples.append(resizer(sample.copy(), context))\n\n return samples\n\n\n@register_op\nclass RandomResizeOp(BaseOperator):\n def __init__(self,\n target_size,\n keep_ratio=True,\n interp=cv2.INTER_LINEAR,\n random_size=True,\n random_interp=False):\n \"\"\"\n Resize image to target size randomly. random target_size and interpolation method\n Args:\n target_size (int, list, tuple): image target size, if random size is True, must be list or tuple\n keep_ratio (bool): whether keep_raio or not, default true\n interp (int): the interpolation method\n random_size (bool): whether random select target size of image\n random_interp (bool): whether random select interpolation method\n \"\"\"\n super(RandomResizeOp, self).__init__()\n self.keep_ratio = keep_ratio\n self.interp = interp\n self.interps = [\n cv2.INTER_NEAREST,\n cv2.INTER_LINEAR,\n cv2.INTER_AREA,\n cv2.INTER_CUBIC,\n cv2.INTER_LANCZOS4,\n ]\n assert isinstance(target_size, (\n Integral, Sequence)), \"target_size must be Integer, List or Tuple\"\n if random_size and not isinstance(target_size, Sequence):\n raise TypeError(\n \"Type of target_size is invalid when random_size is True. Must be List or Tuple, now is {}\".\n format(type(target_size)))\n self.target_size = target_size\n self.random_size = random_size\n self.random_interp = random_interp\n\n def apply(self, sample, context=None):\n \"\"\" Resize the image numpy.\n \"\"\"\n if self.random_size:\n target_size = random.choice(self.target_size)\n else:\n target_size = self.target_size\n\n if self.random_interp:\n interp = random.choice(self.interps)\n else:\n interp = self.interp\n\n resizer = ResizeOp(target_size, self.keep_ratio, interp)\n return resizer(sample, context=context)\n\n\n@register_op\nclass RandomExpandOp(BaseOperator):\n \"\"\"Random expand the canvas.\n Args:\n ratio (float): maximum expansion ratio.\n prob (float): probability to expand.\n fill_value (list): color value used to fill the canvas. in RGB order.\n \"\"\"\n\n def __init__(self, ratio=4., prob=0.5, fill_value=(127.5, 127.5, 127.5)):\n super(RandomExpandOp, self).__init__()\n assert ratio > 1.01, \"expand ratio must be larger than 1.01\"\n self.ratio = ratio\n self.prob = prob\n assert isinstance(fill_value, (Number, Sequence)), \\\n \"fill value must be either float or sequence\"\n if isinstance(fill_value, Number):\n fill_value = (fill_value, ) * 3\n if not isinstance(fill_value, tuple):\n fill_value = tuple(fill_value)\n self.fill_value = fill_value\n\n def apply(self, sample, context=None):\n if np.random.uniform(0., 1.) < self.prob:\n return sample\n\n im = sample['image']\n height, width = im.shape[:2]\n ratio = np.random.uniform(1., self.ratio)\n h = int(height * ratio)\n w = int(width * ratio)\n if not h > height or not w > width:\n return sample\n y = np.random.randint(0, h - height)\n x = np.random.randint(0, w - width)\n offsets, size = [x, y], [h, w]\n\n pad = Pad(size,\n pad_mode=-1,\n offsets=offsets,\n fill_value=self.fill_value)\n\n return pad(sample, context=context)\n\n\n@register_op\nclass CropWithSampling(BaseOperator):\n def __init__(self, batch_sampler, satisfy_all=False, avoid_no_bbox=True):\n \"\"\"\n Args:\n batch_sampler (list): Multiple sets of different\n parameters for cropping.\n satisfy_all (bool): whether all boxes must satisfy.\n e.g.[[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]\n [max sample, max trial, min scale, max scale,\n min aspect ratio, max aspect ratio,\n min overlap, max overlap]\n avoid_no_bbox (bool): whether to to avoid the\n situation where the box does not appear.\n \"\"\"\n super(CropWithSampling, self).__init__()\n self.batch_sampler = batch_sampler\n self.satisfy_all = satisfy_all\n self.avoid_no_bbox = avoid_no_bbox\n\n def apply(self, sample, context):\n \"\"\"\n Crop the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Crop the image according to a radom sample.\n 3. Rescale the bounding box.\n 4. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n im_height, im_width = im.shape[:2]\n gt_score = None\n if 'gt_score' in sample:\n gt_score = sample['gt_score']\n sampled_bbox = []\n gt_bbox = gt_bbox.tolist()\n for sampler in self.batch_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = generate_sample_bbox(sampler)\n if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox,\n self.satisfy_all):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n sample_bbox = clip_bbox(sample_bbox)\n crop_bbox, crop_class, crop_score = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class, scores=gt_score)\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n xmin = int(sample_bbox[0] * im_width)\n xmax = int(sample_bbox[2] * im_width)\n ymin = int(sample_bbox[1] * im_height)\n ymax = int(sample_bbox[3] * im_height)\n im = im[ymin:ymax, xmin:xmax]\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n return sample\n return sample\n\n\n@register_op\nclass CropWithDataAchorSampling(BaseOperator):\n def __init__(self,\n batch_sampler,\n anchor_sampler=None,\n target_size=None,\n das_anchor_scales=[16, 32, 64, 128],\n sampling_prob=0.5,\n min_size=8.,\n avoid_no_bbox=True):\n \"\"\"\n Args:\n anchor_sampler (list): anchor_sampling sets of different\n parameters for cropping.\n batch_sampler (list): Multiple sets of different\n parameters for cropping.\n e.g.[[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]]\n [[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0]]\n [max sample, max trial, min scale, max scale,\n min aspect ratio, max aspect ratio,\n min overlap, max overlap, min coverage, max coverage]\n target_size (bool): target image size.\n das_anchor_scales (list[float]): a list of anchor scales in data\n anchor smapling.\n min_size (float): minimum size of sampled bbox.\n avoid_no_bbox (bool): whether to to avoid the\n situation where the box does not appear.\n \"\"\"\n super(CropWithDataAchorSampling, self).__init__()\n self.anchor_sampler = anchor_sampler\n self.batch_sampler = batch_sampler\n self.target_size = target_size\n self.sampling_prob = sampling_prob\n self.min_size = min_size\n self.avoid_no_bbox = avoid_no_bbox\n self.das_anchor_scales = np.array(das_anchor_scales)\n\n def apply(self, sample, context):\n \"\"\"\n Crop the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Crop the image according to a radom sample.\n 3. Rescale the bounding box.\n 4. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n image_height, image_width = im.shape[:2]\n gt_score = None\n if 'gt_score' in sample:\n gt_score = sample['gt_score']\n sampled_bbox = []\n gt_bbox = gt_bbox.tolist()\n\n prob = np.random.uniform(0., 1.)\n if prob > self.sampling_prob: # anchor sampling\n assert self.anchor_sampler\n for sampler in self.anchor_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = data_anchor_sampling(\n gt_bbox, image_width, image_height,\n self.das_anchor_scales, self.target_size)\n if sample_bbox == 0:\n break\n if satisfy_sample_constraint_coverage(sampler, sample_bbox,\n gt_bbox):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n\n if 'gt_keypoint' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n crop_bbox, crop_class, crop_score, gt_keypoints = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class,\n scores=gt_score,\n keypoints=keypoints)\n else:\n crop_bbox, crop_class, crop_score = filter_and_process(\n sample_bbox, gt_bbox, gt_class, scores=gt_score)\n crop_bbox, crop_class, crop_score = bbox_area_sampling(\n crop_bbox, crop_class, crop_score, self.target_size,\n self.min_size)\n\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n im = crop_image_sampling(im, sample_bbox, image_width,\n image_height, self.target_size)\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n return sample\n return sample\n\n else:\n for sampler in self.batch_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = generate_sample_bbox_square(\n sampler, image_width, image_height)\n if satisfy_sample_constraint_coverage(sampler, sample_bbox,\n gt_bbox):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n sample_bbox = clip_bbox(sample_bbox)\n\n if 'gt_keypoint' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n crop_bbox, crop_class, crop_score, gt_keypoints = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class,\n scores=gt_score,\n keypoints=keypoints)\n else:\n crop_bbox, crop_class, crop_score = filter_and_process(\n sample_bbox, gt_bbox, gt_class, scores=gt_score)\n # sampling bbox according the bbox area\n crop_bbox, crop_class, crop_score = bbox_area_sampling(\n crop_bbox, crop_class, crop_score, self.target_size,\n self.min_size)\n\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n xmin = int(sample_bbox[0] * image_width)\n xmax = int(sample_bbox[2] * image_width)\n ymin = int(sample_bbox[1] * image_height)\n ymax = int(sample_bbox[3] * image_height)\n im = im[ymin:ymax, xmin:xmax]\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n return sample\n return sample\n\n\n@register_op\nclass RandomCropOp(BaseOperator):\n \"\"\"Random crop image and bboxes.\n Args:\n aspect_ratio (list): aspect ratio of cropped region.\n in [min, max] format.\n thresholds (list): iou thresholds for decide a valid bbox crop.\n scaling (list): ratio between a cropped region and the original image.\n in [min, max] format.\n num_attempts (int): number of tries before giving up.\n allow_no_crop (bool): allow return without actually cropping them.\n cover_all_box (bool): ensure all bboxes are covered in the final crop.\n is_mask_crop(bool): whether crop the segmentation.\n \"\"\"\n\n def __init__(self,\n aspect_ratio=[.5, 2.],\n thresholds=[.0, .1, .3, .5, .7, .9],\n scaling=[.3, 1.],\n num_attempts=50,\n allow_no_crop=True,\n cover_all_box=False,\n is_mask_crop=False):\n super(RandomCropOp, self).__init__()\n self.aspect_ratio = aspect_ratio\n self.thresholds = thresholds\n self.scaling = scaling\n self.num_attempts = num_attempts\n self.allow_no_crop = allow_no_crop\n self.cover_all_box = cover_all_box\n self.is_mask_crop = is_mask_crop\n\n def crop_segms(self, segms, valid_ids, crop, height, width):\n def _crop_poly(segm, crop):\n xmin, ymin, xmax, ymax = crop\n crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]\n crop_p = np.array(crop_coord).reshape(4, 2)\n crop_p = Polygon(crop_p)\n\n crop_segm = list()\n for poly in segm:\n poly = np.array(poly).reshape(len(poly) // 2, 2)\n polygon = Polygon(poly)\n if not polygon.is_valid:\n exterior = polygon.exterior\n multi_lines = exterior.intersection(exterior)\n polygons = shapely.ops.polygonize(multi_lines)\n polygon = MultiPolygon(polygons)\n multi_polygon = list()\n if isinstance(polygon, MultiPolygon):\n multi_polygon = copy.deepcopy(polygon)\n else:\n multi_polygon.append(copy.deepcopy(polygon))\n for per_polygon in multi_polygon:\n inter = per_polygon.intersection(crop_p)\n if not inter:\n continue\n if isinstance(inter, (MultiPolygon, GeometryCollection)):\n for part in inter:\n if not isinstance(part, Polygon):\n continue\n part = np.squeeze(\n np.array(part.exterior.coords[:-1]).reshape(1,\n -1))\n part[0::2] -= xmin\n part[1::2] -= ymin\n crop_segm.append(part.tolist())\n elif isinstance(inter, Polygon):\n crop_poly = np.squeeze(\n np.array(inter.exterior.coords[:-1]).reshape(1, -1))\n crop_poly[0::2] -= xmin\n crop_poly[1::2] -= ymin\n crop_segm.append(crop_poly.tolist())\n else:\n continue\n return crop_segm\n\n def _crop_rle(rle, crop, height, width):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n mask = mask[crop[1]:crop[3], crop[0]:crop[2]]\n rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))\n return rle\n\n crop_segms = []\n for id in valid_ids:\n segm = segms[id]\n if is_poly(segm):\n import copy\n import shapely.ops\n from shapely.geometry import Polygon, MultiPolygon, GeometryCollection\n logging.getLogger(\"shapely\").setLevel(logging.WARNING)\n # Polygon format\n crop_segms.append(_crop_poly(segm, crop))\n else:\n # RLE format\n import pycocotools.mask as mask_util\n crop_segms.append(_crop_rle(segm, crop, height, width))\n return crop_segms\n\n def apply(self, sample, context=None):\n if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:\n return sample\n\n h, w = sample['image'].shape[:2]\n gt_bbox = sample['gt_bbox']\n\n # NOTE Original method attempts to generate one candidate for each\n # threshold then randomly sample one from the resulting list.\n # Here a short circuit approach is taken, i.e., randomly choose a\n # threshold and attempt to find a valid crop, and simply return the\n # first one found.\n # The probability is not exactly the same, kinda resembling the\n # \"Monty Hall\" problem. Actually carrying out the attempts will affect\n # observability (just like opening doors in the \"Monty Hall\" game).\n thresholds = list(self.thresholds)\n if self.allow_no_crop:\n thresholds.append('no_crop')\n np.random.shuffle(thresholds)\n\n for thresh in thresholds:\n if thresh == 'no_crop':\n return sample\n\n found = False\n for i in range(self.num_attempts):\n scale = np.random.uniform(*self.scaling)\n if self.aspect_ratio is not None:\n min_ar, max_ar = self.aspect_ratio\n aspect_ratio = np.random.uniform(\n max(min_ar, scale**2), min(max_ar, scale**-2))\n h_scale = scale / np.sqrt(aspect_ratio)\n w_scale = scale * np.sqrt(aspect_ratio)\n else:\n h_scale = np.random.uniform(*self.scaling)\n w_scale = np.random.uniform(*self.scaling)\n crop_h = h * h_scale\n crop_w = w * w_scale\n if self.aspect_ratio is None:\n if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:\n continue\n\n crop_h = int(crop_h)\n crop_w = int(crop_w)\n crop_y = np.random.randint(0, h - crop_h)\n crop_x = np.random.randint(0, w - crop_w)\n crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]\n iou = self._iou_matrix(\n gt_bbox, np.array(\n [crop_box], dtype=np.float32))\n if iou.max() < thresh:\n continue\n\n if self.cover_all_box and iou.min() < thresh:\n continue\n\n cropped_box, valid_ids = self._crop_box_with_center_constraint(\n gt_bbox, np.array(\n crop_box, dtype=np.float32))\n if valid_ids.size > 0:\n found = True\n break\n\n if found:\n if self.is_mask_crop and 'gt_poly' in sample and len(sample[\n 'gt_poly']) > 0:\n crop_polys = self.crop_segms(\n sample['gt_poly'],\n valid_ids,\n np.array(\n crop_box, dtype=np.int64),\n h,\n w)\n if [] in crop_polys:\n delete_id = list()\n valid_polys = list()\n for id, crop_poly in enumerate(crop_polys):\n if crop_poly == []:\n delete_id.append(id)\n else:\n valid_polys.append(crop_poly)\n valid_ids = np.delete(valid_ids, delete_id)\n if len(valid_polys) == 0:\n return sample\n sample['gt_poly'] = valid_polys\n else:\n sample['gt_poly'] = crop_polys\n\n if 'gt_segm' in sample:\n sample['gt_segm'] = self._crop_segm(sample['gt_segm'],\n crop_box)\n sample['gt_segm'] = np.take(\n sample['gt_segm'], valid_ids, axis=0)\n\n sample['image'] = self._crop_image(sample['image'], crop_box)\n sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)\n sample['gt_class'] = np.take(\n sample['gt_class'], valid_ids, axis=0)\n if 'gt_score' in sample:\n sample['gt_score'] = np.take(\n sample['gt_score'], valid_ids, axis=0)\n\n if 'is_crowd' in sample:\n sample['is_crowd'] = np.take(\n sample['is_crowd'], valid_ids, axis=0)\n return sample\n\n return sample\n\n def _iou_matrix(self, a, b):\n tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n\n area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)\n area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)\n area_o = (area_a[:, np.newaxis] + area_b - area_i)\n return area_i / (area_o + 1e-10)\n\n def _crop_box_with_center_constraint(self, box, crop):\n cropped_box = box.copy()\n\n cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])\n cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])\n cropped_box[:, :2] -= crop[:2]\n cropped_box[:, 2:] -= crop[:2]\n\n centers = (box[:, :2] + box[:, 2:]) / 2\n valid = np.logical_and(crop[:2] <= centers,\n centers < crop[2:]).all(axis=1)\n valid = np.logical_and(\n valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))\n\n return cropped_box, np.where(valid)[0]\n\n def _crop_image(self, img, crop):\n x1, y1, x2, y2 = crop\n return img[y1:y2, x1:x2, :]\n\n def _crop_segm(self, segm, crop):\n x1, y1, x2, y2 = crop\n return segm[:, y1:y2, x1:x2]\n\n\n@register_op\nclass RandomScaledCropOp(BaseOperator):\n \"\"\"Resize image and bbox based on long side (with optional random scaling),\n then crop or pad image to target size.\n Args:\n target_dim (int): target size.\n scale_range (list): random scale range.\n interp (int): interpolation method, default to `cv2.INTER_LINEAR`.\n \"\"\"\n\n def __init__(self,\n target_dim=512,\n scale_range=[.1, 2.],\n interp=cv2.INTER_LINEAR):\n super(RandomScaledCropOp, self).__init__()\n self.target_dim = target_dim\n self.scale_range = scale_range\n self.interp = interp\n\n def apply(self, sample, context=None):\n img = sample['image']\n h, w = img.shape[:2]\n random_scale = np.random.uniform(*self.scale_range)\n dim = self.target_dim\n random_dim = int(dim * random_scale)\n dim_max = max(h, w)\n scale = random_dim / dim_max\n resize_w = w * scale\n resize_h = h * scale\n offset_x = int(max(0, np.random.uniform(0., resize_w - dim)))\n offset_y = int(max(0, np.random.uniform(0., resize_h - dim)))\n\n img = cv2.resize(img, (resize_w, resize_h), interpolation=self.interp)\n img = np.array(img)\n canvas = np.zeros((dim, dim, 3), dtype=img.dtype)\n canvas[:min(dim, resize_h), :min(dim, resize_w), :] = img[\n offset_y:offset_y + dim, offset_x:offset_x + dim, :]\n sample['image'] = canvas\n sample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)\n scale_factor = sample['sacle_factor']\n sample['scale_factor'] = np.asarray(\n [scale_factor[0] * scale, scale_factor[1] * scale],\n dtype=np.float32)\n\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n scale_array = np.array([scale, scale] * 2, dtype=np.float32)\n shift_array = np.array([offset_x, offset_y] * 2, dtype=np.float32)\n boxes = sample['gt_bbox'] * scale_array - shift_array\n boxes = np.clip(boxes, 0, dim - 1)\n # filter boxes with no area\n area = np.prod(boxes[..., 2:] - boxes[..., :2], axis=1)\n valid = (area > 1.).nonzero()[0]\n sample['gt_bbox'] = boxes[valid]\n sample['gt_class'] = sample['gt_class'][valid]\n\n return sample\n\n\n@register_op\nclass CutmixOp(BaseOperator):\n def __init__(self, alpha=1.5, beta=1.5):\n \"\"\" \n CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://arxiv.org/abs/1905.04899\n Cutmix image and gt_bbbox/gt_score\n Args:\n alpha (float): alpha parameter of beta distribute\n beta (float): beta parameter of beta distribute\n \"\"\"\n super(CutmixOp, self).__init__()\n self.alpha = alpha\n self.beta = beta\n if self.alpha <= 0.0:\n raise ValueError(\"alpha shold be positive in {}\".format(self))\n if self.beta <= 0.0:\n raise ValueError(\"beta shold be positive in {}\".format(self))\n\n def apply_image(self, img1, img2, factor):\n \"\"\" _rand_bbox \"\"\"\n h = max(img1.shape[0], img2.shape[0])\n w = max(img1.shape[1], img2.shape[1])\n cut_rat = np.sqrt(1. - factor)\n\n cut_w = np.int(w * cut_rat)\n cut_h = np.int(h * cut_rat)\n\n # uniform\n cx = np.random.randint(w)\n cy = np.random.randint(h)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, w - 1)\n bby1 = np.clip(cy - cut_h // 2, 0, h - 1)\n bbx2 = np.clip(cx + cut_w // 2, 0, w - 1)\n bby2 = np.clip(cy + cut_h // 2, 0, h - 1)\n\n img_1 = np.zeros((h, w, img1.shape[2]), 'float32')\n img_1[:img1.shape[0], :img1.shape[1], :] = \\\n img1.astype('float32')\n img_2 = np.zeros((h, w, img2.shape[2]), 'float32')\n img_2[:img2.shape[0], :img2.shape[1], :] = \\\n img2.astype('float32')\n img_1[bby1:bby2, bbx1:bbx2, :] = img2[bby1:bby2, bbx1:bbx2, :]\n return img_1\n\n def __call__(self, sample, context=None):\n if not isinstance(sample, Sequence):\n return sample\n\n assert len(sample) == 2, 'cutmix need two samples'\n\n factor = np.random.beta(self.alpha, self.beta)\n factor = max(0.0, min(1.0, factor))\n if factor >= 1.0:\n return sample[0]\n if factor <= 0.0:\n return sample[1]\n img1 = sample[0]['image']\n img2 = sample[1]['image']\n img = self.apply_image(img1, img2, factor)\n gt_bbox1 = sample[0]['gt_bbox']\n gt_bbox2 = sample[1]['gt_bbox']\n gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)\n gt_class1 = sample[0]['gt_class']\n gt_class2 = sample[1]['gt_class']\n gt_class = np.concatenate((gt_class1, gt_class2), axis=0)\n gt_score1 = sample[0]['gt_score']\n gt_score2 = sample[1]['gt_score']\n gt_score = np.concatenate(\n (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)\n sample = sample[0]\n sample['image'] = img\n sample['gt_bbox'] = gt_bbox\n sample['gt_score'] = gt_score\n sample['gt_class'] = gt_class\n return sample\n\n\n@register_op\nclass MixupOp(BaseOperator):\n def __init__(self, alpha=1.5, beta=1.5):\n \"\"\" Mixup image and gt_bbbox/gt_score\n Args:\n alpha (float): alpha parameter of beta distribute\n beta (float): beta parameter of beta distribute\n \"\"\"\n super(MixupOp, self).__init__()\n self.alpha = alpha\n self.beta = beta\n if self.alpha <= 0.0:\n raise ValueError(\"alpha shold be positive in {}\".format(self))\n if self.beta <= 0.0:\n raise ValueError(\"beta shold be positive in {}\".format(self))\n\n def apply_image(self, img1, img2, factor):\n h = max(img1.shape[0], img2.shape[0])\n w = max(img1.shape[1], img2.shape[1])\n img = np.zeros((h, w, img1.shape[2]), 'float32')\n img[:img1.shape[0], :img1.shape[1], :] = \\\n img1.astype('float32') * factor\n img[:img2.shape[0], :img2.shape[1], :] += \\\n img2.astype('float32') * (1.0 - factor)\n return img.astype('uint8')\n\n def __call__(self, sample, context=None):\n if not isinstance(sample, Sequence):\n return sample\n\n assert len(sample) == 2, 'mixup need two samples'\n\n factor = np.random.beta(self.alpha, self.beta)\n factor = max(0.0, min(1.0, factor))\n if factor >= 1.0:\n return sample[0]\n if factor <= 0.0:\n return sample[1]\n im = self.apply_image(sample[0]['image'], sample[1]['image'], factor)\n result = copy.deepcopy(sample[0])\n result['image'] = im\n # apply bbox and score\n if 'gt_bbox' in sample[0]:\n gt_bbox1 = sample[0]['gt_bbox']\n gt_bbox2 = sample[1]['gt_bbox']\n gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)\n result['gt_bbox'] = gt_bbox\n if 'gt_class' in sample[0]:\n gt_class1 = sample[0]['gt_class']\n gt_class2 = sample[1]['gt_class']\n gt_class = np.concatenate((gt_class1, gt_class2), axis=0)\n result['gt_class'] = gt_class\n\n gt_score1 = np.ones_like(sample[0]['gt_class'])\n gt_score2 = np.ones_like(sample[1]['gt_class'])\n gt_score = np.concatenate(\n (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)\n result['gt_score'] = gt_score\n if 'is_crowd' in sample[0]:\n is_crowd1 = sample[0]['is_crowd']\n is_crowd2 = sample[1]['is_crowd']\n is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)\n result['is_crowd'] = is_crowd\n\n return result\n\n\n@register_op\nclass NormalizeBoxOp(BaseOperator):\n \"\"\"Transform the bounding box's coornidates to [0,1].\"\"\"\n\n def __init__(self):\n super(NormalizeBoxOp, self).__init__()\n\n def apply(self, sample, context):\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n height, width, _ = im.shape\n for i in range(gt_bbox.shape[0]):\n gt_bbox[i][0] = gt_bbox[i][0] / width\n gt_bbox[i][1] = gt_bbox[i][1] / height\n gt_bbox[i][2] = gt_bbox[i][2] / width\n gt_bbox[i][3] = gt_bbox[i][3] / height\n sample['gt_bbox'] = gt_bbox\n\n if 'gt_keypoint' in sample.keys():\n gt_keypoint = sample['gt_keypoint']\n\n for i in range(gt_keypoint.shape[1]):\n if i % 2:\n gt_keypoint[:, i] = gt_keypoint[:, i] / height\n else:\n gt_keypoint[:, i] = gt_keypoint[:, i] / width\n sample['gt_keypoint'] = gt_keypoint\n\n return sample\n\n\n@register_op\nclass BboxXYXY2XYWHOp(BaseOperator):\n \"\"\"\n Convert bbox XYXY format to XYWH format.\n \"\"\"\n\n def __init__(self):\n super(BboxXYXY2XYWHOp, self).__init__()\n\n def apply(self, sample, context=None):\n assert 'gt_bbox' in sample\n bbox = sample['gt_bbox']\n bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]\n bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.\n sample['gt_bbox'] = bbox\n return sample\n\n\n@register_op\nclass PadBoxOp(BaseOperator):\n def __init__(self, num_max_boxes=50):\n \"\"\"\n Pad zeros to bboxes if number of bboxes is less than num_max_boxes.\n Args:\n num_max_boxes (int): the max number of bboxes\n \"\"\"\n self.num_max_boxes = num_max_boxes\n super(PadBoxOp, self).__init__()\n\n def apply(self, sample, context=None):\n assert 'gt_bbox' in sample\n bbox = sample['gt_bbox']\n gt_num = min(self.num_max_boxes, len(bbox))\n num_max = self.num_max_boxes\n # fields = context['fields'] if context else []\n pad_bbox = np.zeros((num_max, 4), dtype=np.float32)\n if gt_num > 0:\n pad_bbox[:gt_num, :] = bbox[:gt_num, :]\n sample['gt_bbox'] = pad_bbox\n if 'gt_class' in sample:\n pad_class = np.zeros((num_max, ), dtype=np.int32)\n if gt_num > 0:\n pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]\n sample['gt_class'] = pad_class\n if 'gt_score' in sample:\n pad_score = np.zeros((num_max, ), dtype=np.float32)\n if gt_num > 0:\n pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]\n sample['gt_score'] = pad_score\n # in training, for example in op ExpandImage,\n # the bbox and gt_class is expandded, but the difficult is not,\n # so, judging by it's length\n if 'difficult' in sample:\n pad_diff = np.zeros((num_max, ), dtype=np.int32)\n if gt_num > 0:\n pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]\n sample['difficult'] = pad_diff\n if 'is_crowd' in sample:\n pad_crowd = np.zeros((num_max, ), dtype=np.int32)\n if gt_num > 0:\n pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]\n sample['is_crowd'] = pad_crowd\n return sample\n\n\n@register_op\nclass DebugVisibleImageOp(BaseOperator):\n \"\"\"\n In debug mode, visualize images according to `gt_box`.\n (Currently only supported when not cropping and flipping image.)\n \"\"\"\n\n def __init__(self, output_dir='output/debug', is_normalized=False):\n super(DebugVisibleImageOp, self).__init__()\n self.is_normalized = is_normalized\n self.output_dir = output_dir\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n if not isinstance(self.is_normalized, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def apply(self, sample, context=None):\n image = Image.open(sample['im_file']).convert('RGB')\n out_file_name = sample['im_file'].split('/')[-1]\n width = sample['w']\n height = sample['h']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n draw = ImageDraw.Draw(image)\n for i in range(gt_bbox.shape[0]):\n if self.is_normalized:\n gt_bbox[i][0] = gt_bbox[i][0] * width\n gt_bbox[i][1] = gt_bbox[i][1] * height\n gt_bbox[i][2] = gt_bbox[i][2] * width\n gt_bbox[i][3] = gt_bbox[i][3] * height\n\n xmin, ymin, xmax, ymax = gt_bbox[i]\n draw.line(\n [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),\n (xmin, ymin)],\n width=2,\n fill='green')\n # draw label\n text = str(gt_class[i][0])\n tw, th = draw.textsize(text)\n draw.rectangle(\n [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill='green')\n draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))\n\n if 'gt_keypoint' in sample.keys():\n gt_keypoint = sample['gt_keypoint']\n if self.is_normalized:\n for i in range(gt_keypoint.shape[1]):\n if i % 2:\n gt_keypoint[:, i] = gt_keypoint[:, i] * height\n else:\n gt_keypoint[:, i] = gt_keypoint[:, i] * width\n for i in range(gt_keypoint.shape[0]):\n keypoint = gt_keypoint[i]\n for j in range(int(keypoint.shape[0] / 2)):\n x1 = round(keypoint[2 * j]).astype(np.int32)\n y1 = round(keypoint[2 * j + 1]).astype(np.int32)\n draw.ellipse(\n (x1, y1, x1 + 5, y1 + 5), fill='green', outline='green')\n save_path = os.path.join(self.output_dir, out_file_name)\n image.save(save_path, quality=95)\n return sample\n\n\n@register_op\nclass Pad(BaseOperator):\n def __init__(self,\n size=None,\n size_divisor=32,\n pad_mode=0,\n offsets=None,\n fill_value=(127.5, 127.5, 127.5)):\n \"\"\"\n Pad image to a specified size or multiple of size_divisor. random target_size and interpolation method\n Args:\n size (int, Sequence): image target size, if None, pad to multiple of size_divisor, default None\n size_divisor (int): size divisor, default 32\n pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets\n if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top\n fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)\n \"\"\"\n super(Pad, self).__init__()\n\n if not isinstance(size, (int, Sequence)):\n raise TypeError(\n \"Type of target_size is invalid when random_size is True. \\\n Must be List, now is {}\".format(type(size)))\n\n if isinstance(size, int):\n size = [size, size]\n\n assert pad_mode in [\n -1, 0, 1, 2\n ], 'currently only supports four modes [-1, 0, 1, 2]'\n assert pad_mode == -1 and offsets, 'if pad_mode is -1, offsets should not be None'\n\n self.size = size\n self.size_divisor = size_divisor\n self.pad_mode = pad_mode\n self.fill_value = fill_value\n self.offsets = offsets\n\n def apply_segm(self, segms, offsets, im_size, size):\n def _expand_poly(poly, x, y):\n expanded_poly = np.array(poly)\n expanded_poly[0::2] += x\n expanded_poly[1::2] += y\n return expanded_poly.tolist()\n\n def _expand_rle(rle, x, y, height, width, h, w):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n expanded_mask = np.full((h, w), 0).astype(mask.dtype)\n expanded_mask[y:y + height, x:x + width] = mask\n rle = mask_util.encode(\n np.array(\n expanded_mask, order='F', dtype=np.uint8))\n return rle\n\n x, y = offsets\n height, width = im_size\n h, w = size\n expanded_segms = []\n for segm in segms:\n if is_poly(segm):\n # Polygon format\n expanded_segms.append(\n [_expand_poly(poly, x, y) for poly in segm])\n else:\n # RLE format\n import pycocotools.mask as mask_util\n expanded_segms.append(\n _expand_rle(segm, x, y, height, width, h, w))\n return expanded_segms\n\n def apply_bbox(self, bbox, offsets):\n return bbox + np.array(offsets * 2, dtype=np.float32)\n\n def apply_keypoint(self, keypoints, offsets):\n n = len(keypoints[0]) // 2\n return keypoints + np.array(offsets * n, dtype=np.float32)\n\n def apply_image(self, image, offsets, im_size, size):\n x, y = offsets\n im_h, im_w = im_size\n h, w = size\n canvas = np.ones((h, w, 3), dtype=np.float32)\n canvas *= np.array(self.fill_value, dtype=np.float32)\n canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)\n return canvas\n\n def apply(self, sample, context=None):\n im = sample['image']\n im_h, im_w = im.shape[:2]\n if self.size:\n h, w = self.size\n assert (\n im_h < h and im_w < w\n ), '(h, w) of target size should be greater than (im_h, im_w)'\n else:\n h = np.ceil(im_h // self.size_divisor) * self.size_divisor\n w = np.ceil(im_w / self.size_divisor) * self.size_divisor\n\n if h == im_h and w == im_w:\n return sample\n\n if self.pad_mode == -1:\n offset_x, offset_y = self.offsets\n elif self.pad_mode == 0:\n offset_y, offset_x = 0, 0\n elif self.pad_mode == 1:\n offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2\n else:\n offset_y, offset_x = h - im_h, w - im_w\n\n offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]\n\n sample['image'] = self.apply_image(im, offsets, im_size, size)\n\n if self.pad_mode == 0:\n return sample\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], offsets)\n\n if 'gt_poly' in sample and len(sample['gt_poly']) > 0:\n sample['gt_poly'] = self.apply_segm(sample['gt_poly'], offsets,\n im_size, size)\n\n if 'gt_keypoint' in sample and len(sample['gt_keypoint']) > 0:\n sample['gt_keypoint'] = self.apply_keypoint(sample['gt_keypoint'],\n offsets)\n\n return sample\n\n\n@register_op\nclass Poly2Mask(BaseOperator):\n \"\"\"\n gt poly to mask annotations\n \"\"\"\n\n def __init__(self):\n super(Poly2Mask, self).__init__()\n import pycocotools.mask as maskUtils\n self.maskutils = maskUtils\n\n def _poly2mask(self, mask_ann, img_h, img_w):\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = self.maskutils.frPyObjects(mask_ann, img_h, img_w)\n rle = self.maskutils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = self.maskutils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = self.maskutils.decode(rle)\n return mask\n\n def apply(self, sample, context=None):\n assert 'gt_poly' in sample\n im_h = sample['h']\n im_w = sample['w']\n masks = [\n self._poly2mask(gt_poly, im_h, im_w)\n for gt_poly in sample['gt_poly']\n ]\n sample['gt_segm'] = np.asarray(masks).astype(np.uint8)\n return sample\n" ]
[ [ "numpy.dot", "numpy.ones_like", "numpy.random.rand", "numpy.minimum", "numpy.min", "numpy.where", "numpy.cos", "numpy.frombuffer", "numpy.concatenate", "numpy.random.normal", "numpy.sin", "numpy.max", "numpy.full", "numpy.logical_and", "numpy.take", "numpy.prod", "numpy.random.randint", "numpy.sqrt", "numpy.expand_dims", "numpy.array", "numpy.int", "numpy.delete", "numpy.zeros", "numpy.random.shuffle", "numpy.clip", "numpy.ceil", "numpy.asarray", "numpy.random.permutation", "numpy.ones", "numpy.random.beta", "numpy.random.uniform", "numpy.maximum" ] ]
tomgtqq/deep-reinforcement-learning-navigation
[ "41a06c0b561f1275c08cb81ae7130417241ac199" ]
[ "(Optional) Challenge Learning from Pixels/model_Pixels.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass QNetwork(nn.Module):\n def __init__(self, in_channels, num_actions, seed):\n super(QNetwork, self).__init__()\n self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=10, kernel_size=3, padding = 1)\n self.conv2 = nn.Conv3d(in_channels=10, out_channels=32, kernel_size=3, padding = 1)\n self.pool = nn.MaxPool3d((1,2,2))\n self.fc1 = nn.Linear(4704, num_actions)\n\n def forward(self, x):\n # print(x.size())\n output = self.pool(F.relu(self.conv1(x)))\n # print(output.size())\n output = self.pool(F.relu(self.conv2(output)))\n output = output.contiguous().view(output.size(0), -1)\n # print(output.size())\n output = self.fc1(output)\n\n return output\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool3d", "torch.nn.Conv3d" ] ]
phamthuy1320/Face-Sketch
[ "fd147386cffa41572140cc165bbb0edb975cfeb5" ]
[ "evaluate.py" ]
[ "\"\"\"\nEvaluate the performance of face sketch synthesis.\nMethod: PCA and SVM face recognition.\nModified from face recognition examples from sklearn: \n http://scikit-learn.org/stable/tutorial/statistical_inference/putting_together.html\n\"\"\"\nfrom __future__ import print_function\n\nfrom time import time\nfrom sklearn.decomposition import PCA\n\nimport numpy as np\nimport cv2 as cv\nimport os\n\ndef load_dataset(dir_path, img_list=None, size=(200, 250)):\n data = []\n label = []\n for root, dirs, files in os.walk(dir_path):\n if img_list:\n files = img_list\n for f in sorted(files):\n img = cv.imread(os.path.join(root, f), 0)\n if img.shape != size[::-1]:\n out_size = np.array(size[::-1])\n border = out_size - img.shape\n border1 = np.floor(border / 2.).astype('int')\n border2 = np.ceil(border / 2.).astype('int')\n img = cv.copyMakeBorder(img, border1[0], border2[0], border1[1], border2[1], cv.BORDER_CONSTANT, value=255)\n data.append(img)\n if f.startswith('AR'):\n name = f[3:-4]\n else:\n name = f[:-4]\n label.append(int(name))\n return np.array(data), np.array(label)\n\n\ndef pred_acc(train_pca, y_train, test_pca, y_test, topk=1):\n pred = []\n # train_pca = train_pca / np.linalg.norm(train_pca)\n # test_pca = test_pca / np.linalg.norm(test_pca)\n \n for i in test_pca:\n dist = np.sum((i - train_pca)**2, 1)\n # dist = 1 - np.dot(train_pca, i)\n pred.append(np.argmin(dist))\n pred = np.array(pred)\n pred_label = y_train[pred]\n return (np.sum(pred_label==y_test)*1. / y_test.shape[0])\n\ndef PCA_recognition(train_dir, test_dirs, n_components=30, test_file_lists=None):\n X_train, y_train = load_dataset(train_dir)\n n_samples, h, w = X_train.shape\n X_train = X_train.reshape(-1, h*w)\n mean_face = np.mean(X_train, 0)\n # X_train = X_train - mean_face\n pca = PCA(n_components=n_components, svd_solver='auto', whiten=True).fit(X_train)\n\n for idx, test_dir in enumerate(test_dirs):\n if test_file_lists:\n X_test, y_test = load_dataset(test_dir, test_file_lists[idx])\n else:\n X_test, y_test = load_dataset(test_dir)\n X_test = X_test.reshape(-1, h*w)\n # X_test = X_test - mean_face \n X_train_pca = pca.transform(X_train)\n X_test_pca = pca.transform(X_test)\n print(test_dir.split('/')[-1], '\\t', pred_acc(X_train_pca, y_train, X_test_pca, y_test))\n\n\nif __name__ == '__main__':\n print('Evaluate CUHK dataset')\n train_dir = './Data/CUHK_student_test/sketches'\n test_dirs = ['./other_results/CUHK/MRF',\n './other_results/CUHK/MWF',\n './other_results/CUHK/SSD',\n './other_results/CUHK/FCNN',\n './other_results/CUHK/BFCN',\n './result_CUHK/sketch']\n PCA_recognition(train_dir, test_dirs, 98)\n\n print('Evaluate AR dataset')\n train_dir = './Data/AR_test/sketches'\n test_dirs = ['./other_results/AR/MRF',\n './other_results/AR/MWF',\n './other_results/AR/SSD',\n './other_results/AR/FCNN',\n './other_results/AR/BFCN',\n './result_AR/sketch']\n img_list =[x.strip() for x in open('./Data/AR/test.txt').readlines()]\n test_file_lists = [img_list, img_list, img_list, img_list, img_list, None]\n PCA_recognition(train_dir, test_dirs, 10, test_file_lists)\n\n\n\n" ]
[ [ "numpy.array", "numpy.ceil", "numpy.argmin", "numpy.sum", "numpy.mean", "sklearn.decomposition.PCA", "numpy.floor" ] ]
jisqyv/tensorflow
[ "4c28fbf2c0d20ed714fd0209913b868a7955ac5a" ]
[ "tensorflow/python/estimator/canned/dnn.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Deep Neural Network estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.python.estimator import estimator\nfrom tensorflow.python.estimator import model_fn\nfrom tensorflow.python.estimator import warm_starting_util\nfrom tensorflow.python.estimator.canned import head as head_lib\nfrom tensorflow.python.estimator.canned import optimizers\nfrom tensorflow.python.feature_column import feature_column as feature_column_lib\nfrom tensorflow.python.layers import core as core_layers\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import training_util\n\n# The default learning rate of 0.05 is a historical artifact of the initial\n# implementation, but seems a reasonable choice.\n_LEARNING_RATE = 0.05\n\n\ndef _add_hidden_layer_summary(value, tag):\n summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))\n summary.histogram('%s/activation' % tag, value)\n\n\ndef _dnn_logit_fn_builder(units, hidden_units, feature_columns, activation_fn,\n dropout, input_layer_partitioner):\n \"\"\"Function builder for a dnn logit_fn.\n\n Args:\n units: An int indicating the dimension of the logit layer. In the\n MultiHead case, this should be the sum of all component Heads' logit\n dimensions.\n hidden_units: Iterable of integer number of hidden units per layer.\n feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.\n activation_fn: Activation function applied to each layer.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Partitioner for input layer.\n\n Returns:\n A logit_fn (see below).\n\n Raises:\n ValueError: If units is not an int.\n \"\"\"\n if not isinstance(units, int):\n raise ValueError('units must be an int. Given type: {}'.format(\n type(units)))\n\n def dnn_logit_fn(features, mode):\n \"\"\"Deep Neural Network logit_fn.\n\n Args:\n features: This is the first item returned from the `input_fn`\n passed to `train`, `evaluate`, and `predict`. This should be a\n single `Tensor` or `dict` of same.\n mode: Optional. Specifies if this training, evaluation or prediction. See\n `ModeKeys`.\n\n Returns:\n A `Tensor` representing the logits, or a list of `Tensor`'s representing\n multiple logits in the MultiHead case.\n \"\"\"\n with variable_scope.variable_scope(\n 'input_from_feature_columns',\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner):\n net = feature_column_lib.input_layer(\n features=features, feature_columns=feature_columns)\n for layer_id, num_hidden_units in enumerate(hidden_units):\n with variable_scope.variable_scope(\n 'hiddenlayer_%d' % layer_id, values=(net,)) as hidden_layer_scope:\n net = core_layers.dense(\n net,\n units=num_hidden_units,\n activation=activation_fn,\n kernel_initializer=init_ops.glorot_uniform_initializer(),\n name=hidden_layer_scope)\n if dropout is not None and mode == model_fn.ModeKeys.TRAIN:\n net = core_layers.dropout(net, rate=dropout, training=True)\n _add_hidden_layer_summary(net, hidden_layer_scope.name)\n\n with variable_scope.variable_scope('logits', values=(net,)) as logits_scope:\n logits = core_layers.dense(\n net,\n units=units,\n activation=None,\n kernel_initializer=init_ops.glorot_uniform_initializer(),\n name=logits_scope)\n _add_hidden_layer_summary(logits, logits_scope.name)\n\n return logits\n\n return dnn_logit_fn\n\n\ndef _dnn_model_fn(features,\n labels,\n mode,\n head,\n hidden_units,\n feature_columns,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None):\n \"\"\"Deep Neural Net model_fn.\n\n Args:\n features: dict of `Tensor`.\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of\n dtype `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction.\n See `ModeKeys`.\n head: A `head_lib._Head` instance.\n hidden_units: Iterable of integer number of hidden units per layer.\n feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.\n optimizer: String, `tf.Optimizer` object, or callable that creates the\n optimizer to use for training. If not specified, will use the Adagrad\n optimizer with a default learning rate of 0.05.\n activation_fn: Activation function applied to each layer.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n predictions: A dict of `Tensor` objects.\n loss: A scalar containing the loss of the step.\n train_op: The op for training.\n\n Raises:\n ValueError: If features has the wrong type.\n \"\"\"\n if not isinstance(features, dict):\n raise ValueError('features should be a dictionary of `Tensor`s. '\n 'Given type: {}'.format(type(features)))\n\n optimizer = optimizers.get_optimizer_instance(\n optimizer, learning_rate=_LEARNING_RATE)\n num_ps_replicas = config.num_ps_replicas if config else 0\n\n partitioner = partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas)\n with variable_scope.variable_scope(\n 'dnn',\n values=tuple(six.itervalues(features)),\n partitioner=partitioner):\n input_layer_partitioner = input_layer_partitioner or (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas,\n min_slice_size=64 << 20))\n\n logit_fn = _dnn_logit_fn_builder(\n units=head.logits_dimension,\n hidden_units=hidden_units,\n feature_columns=feature_columns,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner)\n logits = logit_fn(features=features, mode=mode)\n\n def _train_op_fn(loss):\n \"\"\"Returns the op to optimize the loss.\"\"\"\n return optimizer.minimize(\n loss,\n global_step=training_util.get_global_step())\n\n return head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=_train_op_fn,\n logits=logits)\n\n\nclass DNNClassifier(estimator.Estimator):\n \"\"\"A classifier for TensorFlow DNN models.\n\n Example:\n\n ```python\n categorical_feature_a = categorical_column_with_hash_bucket(...)\n categorical_feature_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_column=categorical_feature_b, ...)\n\n estimator = DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Or estimator with warm-starting from a previous checkpoint.\n estimator = DNNClassifier(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column` is not `None`, a feature with\n `key=weight_column` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using softmax cross entropy.\n\n @compatibility(eager)\n Estimators are not compatible with eager execution.\n @end_compatibility\n \"\"\"\n\n def __init__(\n self,\n hidden_units,\n feature_columns,\n model_dir=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n ):\n \"\"\"Initializes a `DNNClassifier` instance.\n\n Args:\n hidden_units: Iterable of number hidden units per layer. All layers are\n fully connected. Ex. `[64, 32]` means first layer has 64 nodes and\n second one has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `_FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n n_classes: Number of label classes. Defaults to 2, namely binary\n classification. Must be > 1.\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n label_vocabulary: A list of strings represents possible label values. If\n given, labels must be string type and have any value in\n `label_vocabulary`. If it is not given, that means labels are\n already encoded as integer or float within [0, 1] for `n_classes=2` and\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\n Also there will be errors if vocabulary is not provided and labels are\n string.\n optimizer: An instance of `tf.Optimizer` used to train the model. Defaults\n to Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n \"\"\"\n if n_classes == 2:\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access\n weight_column=weight_column,\n label_vocabulary=label_vocabulary)\n else:\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access\n n_classes, weight_column=weight_column,\n label_vocabulary=label_vocabulary)\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn and possibly warm-start.\"\"\"\n estimator_spec = _dnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config)\n # pylint: disable=protected-access\n warm_start_settings = warm_starting_util._get_default_warm_start_settings(\n warm_start_from)\n if warm_start_settings:\n warm_starting_util._warm_start(warm_start_settings)\n # pylint: enable=protected-access\n\n return estimator_spec\n\n super(DNNClassifier, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config)\n\n\nclass DNNRegressor(estimator.Estimator):\n \"\"\"A regressor for TensorFlow DNN models.\n\n Example:\n\n ```python\n categorical_feature_a = categorical_column_with_hash_bucket(...)\n categorical_feature_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_column=categorical_feature_b, ...)\n\n estimator = DNNRegressor(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNRegressor(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Or estimator with warm-starting from a previous checkpoint.\n estimator = DNNRegressor(\n feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],\n hidden_units=[1024, 512, 256],\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column` is not `None`, a feature with\n `key=weight_column` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using mean squared error.\n\n @compatibility(eager)\n Estimators are not compatible with eager execution.\n @end_compatibility\n \"\"\"\n\n def __init__(\n self,\n hidden_units,\n feature_columns,\n model_dir=None,\n label_dimension=1,\n weight_column=None,\n optimizer='Adagrad',\n activation_fn=nn.relu,\n dropout=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n ):\n \"\"\"Initializes a `DNNRegressor` instance.\n\n Args:\n hidden_units: Iterable of number hidden units per layer. All layers are\n fully connected. Ex. `[64, 32]` means first layer has 64 nodes and\n second one has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `_FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n label_dimension: Number of regression targets per example. This is the\n size of the last dimension of the labels and logits `Tensor` objects\n (typically, these have shape `[batch_size, label_dimension]`).\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n optimizer: An instance of `tf.Optimizer` used to train the model. Defaults\n to Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n \"\"\"\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the defined shared _dnn_model_fn and possibly warm-start.\"\"\"\n estimator_spec = _dnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib. # pylint: disable=protected-access\n _regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension, weight_column=weight_column),\n hidden_units=hidden_units,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n activation_fn=activation_fn,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config)\n # pylint: disable=protected-access\n warm_start_settings = warm_starting_util._get_default_warm_start_settings(\n warm_start_from)\n if warm_start_settings:\n warm_starting_util._warm_start(warm_start_settings)\n # pylint: enable=protected-access\n\n return estimator_spec\n\n super(DNNRegressor, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config)\n" ]
[ [ "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.layers.core.dropout", "tensorflow.python.estimator.warm_starting_util._warm_start", "tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss", "tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner", "tensorflow.python.feature_column.feature_column.input_layer", "tensorflow.python.ops.init_ops.glorot_uniform_initializer", "tensorflow.python.training.training_util.get_global_step", "tensorflow.python.estimator.canned.optimizers.get_optimizer_instance", "tensorflow.python.ops.nn.zero_fraction", "tensorflow.python.summary.summary.histogram", "tensorflow.python.estimator.canned.head._multi_class_head_with_softmax_cross_entropy_loss", "tensorflow.python.estimator.warm_starting_util._get_default_warm_start_settings", "tensorflow.python.estimator.canned.head._regression_head_with_mean_squared_error_loss" ] ]
SHIMengjie/Machine-Learning-Andrew-Ng-Matlab
[ "2f54790e33dc538aea1534f40342791fb7c3abb1" ]
[ "python/ex5/ex5.py" ]
[ "import scipy.io as scio\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom linearCostFunction import linear_cost_function\nfrom trainLinearRegression import train_linear_reg\nfrom learningCurve import learning_curve\nfrom polyFeatures import ploy_feature\nfrom featuresNormalize import feature_nomalize\nfrom plotFit import plot_fit\nfrom validationCurve import validation_curve\n# ============================== 1.读取并显示数据 ==============================\ndata = scio.loadmat('ex5data1.mat')\nX = data['X']\nY = data['y'].flatten()\n\nXval = data['Xval']\nYval = data['yval'].flatten()\n\nXtest = data['Xtest']\nYtest = data['ytest'].flatten()\n\nplt.figure(1)\nplt.scatter(X,Y,c='r',marker='x')\nplt.xlabel('Change in water level (x)')\nplt.ylabel('Water folowing out of the dam (y)')\n# plt.show()\n\n# ============================ 2.计算代价和梯度 ==============================\n(m,n)= X.shape\ntheta = np.ones((n+1))\nlmd=1\ncost,grad = linear_cost_function(np.column_stack((np.ones(m),X)),Y,theta,lmd)\nprint('Cost at theta = [1 1]: {:0.6f}\\n(this value should be about 303.993192)'.format(cost))\nprint('Gradient at theta = [1 1]: {}\\n(this value should be about [-15.303016 598.250744]'.format(grad))\n\n# =========================== 3.训练线性回归\nlmd = 0\ntheta = train_linear_reg(np.column_stack((np.ones(m),X)),Y,lmd)\nplt.plot(X,np.column_stack((np.ones(m),X)).dot(theta))\n# plt.show()\n\n# =========================== 4.线性回归的学习曲线 ==============\nlmd = 0\nerror_train,error_val = learning_curve(np.column_stack((np.ones(m),X)),Y,\n\t\t\t\t\t\tnp.column_stack((np.ones(Yval.size),Xval)),Yval,lmd)\nplt.figure(2)\nplt.plot(range(m),error_train,range(m),error_val)\nplt.title('Learning Curve for Linear Regression')\nplt.legend(['Train', 'Cross Validation'])\nplt.xlabel('Number of Training Examples')\nplt.ylabel('Error')\nplt.axis([0, 13, 0, 150])\n# plt.show()\n\n# =============================== 5.投影特征为多项式 ================\np = 8\n# 投影和标准化训练集\nX_poly = ploy_feature(X,p)\nX_poly,mu,sigma = feature_nomalize(X_poly)\nX_poly = np.column_stack((np.ones(Y.size),X_poly))\n\n# 投影和标准化验证集\nX_poly_val = ploy_feature(Xval,p)\nX_poly_val -= mu\nX_poly_val /= sigma\nX_poly_val = np.column_stack((np.ones(Yval.size),X_poly_val))\n\n# 投影和标准化测试集\nX_poly_test = ploy_feature(Xtest,p)\nX_poly_test -= mu\nX_poly_test /= sigma\nX_poly_test = np.column_stack((np.ones(Ytest.size),X_poly_test))\n\nprint('Normalized Training Example 1 : \\n{}'.format(X_poly[0]))\n\n# ======================== 6.多项式特征的学习曲线\nlmd = 0\n# 绘制拟合曲线\ntheta = train_linear_reg(X_poly,Y,lmd)\nx_fit,y_fit = plot_fit(np.min(X),np.max(X),mu,sigma,theta,p)\nplt.figure(3)\nplt.scatter(X,Y,c='r',marker='x')\nplt.plot(x_fit,y_fit)\nplt.xlabel('Change in water level (x)')\nplt.ylabel('Water folowing out of the dam (y)')\nplt.ylim([0, 60])\nplt.title('Polynomial Regression Fit (lambda = {})'.format(lmd))\n# plt.show()\n# 计算代价误差\nerror_train, error_val = learning_curve(X_poly, Y, X_poly_val, Yval, lmd)\nplt.figure(4)\nplt.plot(np.arange(m), error_train, np.arange(m), error_val)\nplt.title('Polynomial Regression Learning Curve (lambda = {})'.format(lmd))\nplt.legend(['Train', 'Cross Validation'])\nplt.xlabel('Number of Training Examples')\nplt.ylabel('Error')\nplt.axis([0, 13, 0, 150])\n# plt.show()\nprint('Polynomial Regression (lambda = {})'.format(lmd))\nprint('# Training Examples\\tTrain Error\\t\\tCross Validation Error')\nfor i in range(m):\n print(' \\t{}\\t\\t{}\\t{}'.format(i, error_train[i], error_val[i]))\n\n# ======================= 7.通过交叉验证集选择正则项系数lambda\nlambda_vec,error_train,error_val = validation_curve(X_poly,Y,X_poly_test,Ytest)\nplt.figure(5)\nplt.plot(lambda_vec, error_train, lambda_vec, error_val)\nplt.legend(['Train', 'Test Validation'])\nplt.xlabel('lambda')\nplt.ylabel('Error')\nplt.show()" ]
[ [ "numpy.max", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "scipy.io.loadmat", "numpy.ones", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "numpy.min", "matplotlib.pyplot.show", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.axis" ] ]
bharadwaj1098/salina
[ "ac71cadacc54ae48377c67c88d269fc05209341c" ]
[ "salina/agents/dataloader.py" ]
[ "#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport gym\nimport torch\nfrom gym.utils import seeding\nfrom torch.utils.data import DataLoader\n\nfrom salina import Agent\n\n\nclass ShuffledDatasetAgent(Agent):\n \"\"\"An agent that read a dataset in a shuffle order, in an infinite way.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n output_names=(\"x\", \"y\"),\n ):\n \"\"\"Create the agent\n\n Args:\n dataset ([torch.utils.data.Dataset]): the Dataset\n batch_size ([int]): The number of datapoints to write at each call\n output_names (tuple, optional): The name of the variables. Defaults to (\"x\", \"y\").\n \"\"\"\n super().__init__()\n self.output_names = output_names\n self.dataset = dataset\n self.batch_size = batch_size\n self.ghost_params = torch.nn.Parameter(torch.randn(()))\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def forward(self, **kwargs):\n \"\"\"Write a batch of data at timestep==0 in the workspace\"\"\"\n vs = []\n for k in range(self.batch_size):\n idx = self.np_random.randint(len(self.dataset))\n x = self.dataset[idx]\n xs = []\n for xx in x:\n if isinstance(xx, torch.Tensor):\n xs.append(xx.unsqueeze(0))\n else:\n xs.append(torch.tensor(xx).unsqueeze(0))\n vs.append(xs)\n\n vals = []\n for k in range(len(vs[0])):\n val = [v[k] for v in vs]\n val = torch.cat(val, dim=0)\n vals.append(val)\n\n for name, value in zip(self.output_names, vals):\n self.set((name, 0), value.to(self.ghost_params.device))\n\n\nclass DataLoaderAgent(Agent):\n \"\"\"An agent based on a DataLoader that read a single dataset\n Usage is: agent.forward(), then one has to check if agent.finished() is True or Not. If True, then no data have been written in the workspace since the reading of the daaset is terminated\n \"\"\"\n\n def __init__(self, dataloader, output_names=(\"x\", \"y\")):\n \"\"\" Create the agent based on a dataloader\n\n Args:\n dataloader ([DataLader]): The underlying pytoch daaloader object\n output_names (tuple, optional): Names of the variable to write in the workspace. Defaults to (\"x\", \"y\").\n \"\"\"\n super().__init__()\n self.dataloader = dataloader\n self.iter = iter(self.dataloader)\n self.output_names = output_names\n self._finished = False\n self.ghost_params = torch.nn.Parameter(torch.randn(()))\n\n def reset(self):\n self.iter = iter(self.dataloader)\n self._finished = False\n\n def finished(self):\n return self._finished\n\n def forward(self, **kwargs):\n try:\n output_values = next(self.iter)\n except StopIteration:\n self.iter = None\n self._finished = True\n else:\n for name, value in zip(self.output_names, output_values):\n self.set((name, 0), value.to(self.ghost_params.device))\n" ]
[ [ "torch.cat", "torch.tensor", "torch.randn" ] ]
daiquocnguyen/Graph-Transformer
[ "f9dc818a06ff0490f3d1bcc2e7b9ccd188950dcd" ]
[ "UGformerV1_TF/train_UGformerV1_UnSup.py" ]
[ "#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nnp.random.seed(123456789)\ntf.compat.v1.set_random_seed(123456789)\n\nimport os\nimport time\nimport datetime\nfrom UGformerV1_UnSup import UGformerV1\nimport pickle as cPickle\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom scipy.sparse import coo_matrix\nfrom util import *\nimport statistics\nfrom sklearn.linear_model import LogisticRegression\n\n# Parameters\n# ==================================================\n\nparser = ArgumentParser(\"UGformerV1\", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')\n\nparser.add_argument(\"--run_folder\", default=\"../\", help=\"\")\nparser.add_argument(\"--dataset\", default=\"PTC\", help=\"Name of the dataset.\")\nparser.add_argument(\"--embedding_dim\", default=4, type=int, help=\"Dimensionality of character embedding\")\nparser.add_argument(\"--learning_rate\", default=0.0005, type=float, help=\"Learning rate\")\nparser.add_argument(\"--batch_size\", default=4, type=int, help=\"Batch Size\")\nparser.add_argument(\"--idx_time\", default=1, type=int, help=\"\")\nparser.add_argument(\"--num_epochs\", default=50, type=int, help=\"Number of training epochs\")\nparser.add_argument(\"--saveStep\", default=1, type=int, help=\"\")\nparser.add_argument(\"--allow_soft_placement\", default=True, type=bool, help=\"Allow device soft device placement\")\nparser.add_argument(\"--log_device_placement\", default=False, type=bool, help=\"Log placement of ops on devices\")\nparser.add_argument(\"--model_name\", default='PTC', help=\"\")\nparser.add_argument('--num_sampled', default=512, type=int, help='')\nparser.add_argument(\"--dropout_keep_prob\", default=1.0, type=float, help=\"Dropout keep probability\")\nparser.add_argument(\"--num_timesteps\", default=6, type=int, help=\"Number of attention layers in Transformer. The number T of timesteps in Universal Transformer\")\nparser.add_argument(\"--num_heads\", default=1, type=int, help=\"Number of attention heads within each attention layer\")\nparser.add_argument(\"--ff_hidden_size\", default=1024, type=int, help=\"The hidden size for the feedforward layer\")\nparser.add_argument(\"--num_neighbors\", default=8, type=int, help=\"\")\nparser.add_argument('--degree_as_tag', action=\"store_false\", help='let the input node features be the degree of nodes (heuristics for unlabeled graph)')\nparser.add_argument('--fold_idx', type=int, default=0, help='the index of fold in 10-fold validation. 0-9.')\nparser.add_argument(\"--num_hidden_layers\", default=1, type=int, help=\"\")\nargs = parser.parse_args()\nprint(args)\n\n# Load data\nprint(\"Loading data...\")\n\nuse_degree_as_tag = False\nif args.dataset == 'COLLAB' or args.dataset == 'IMDBBINARY' or args.dataset == 'IMDBMULTI':\n use_degree_as_tag = True\ngraphs, num_classes = load_data(args.dataset, use_degree_as_tag)\ngraph_labels = np.array([graph.label for graph in graphs])\nprint(len(graphs))\nprint(num_classes)\nfeature_dim_size = graphs[0].node_features.shape[1]\nprint(feature_dim_size)\nnum_nodes = sum([len(graph.g) for graph in graphs])\nhparams_batch_size = int(num_nodes/len(graphs)) + 1\nprint(num_nodes, hparams_batch_size)\nif \"REDDIT\" in args.dataset:\n feature_dim_size = 4\n\ndef get_Adj_matrix(batch_graph):\n edge_mat_list = []\n start_idx = [0]\n for i, graph in enumerate(batch_graph):\n start_idx.append(start_idx[i] + len(graph.g))\n edge_mat_list.append(graph.edge_mat + start_idx[i])\n Adj_block_idx = np.concatenate(edge_mat_list, 1)\n Adj_block_elem = np.ones(Adj_block_idx.shape[1])\n\n Adj_block_idx_row = Adj_block_idx[0,:]\n Adj_block_idx_cl = Adj_block_idx[1,:]\n\n # Adj_block = coo_matrix((Adj_block_elem, (Adj_block_idx_row, Adj_block_idx_cl)), shape=(start_idx[-1], start_idx[-1]))\n return Adj_block_idx_row, Adj_block_idx_cl\n\ndef get_graphpool(batch_graph):\n start_idx = [0]\n # compute the padded neighbor list\n for i, graph in enumerate(batch_graph):\n start_idx.append(start_idx[i] + len(graph.g))\n\n idx = []\n elem = []\n for i, graph in enumerate(batch_graph):\n elem.extend([1.0] * len(graph.g))\n idx.extend([[i, j] for j in range(start_idx[i], start_idx[i + 1], 1)])\n\n elem = np.array(elem)\n idx = np.array(idx)\n idx_row = idx[:,0]\n idx_cl = idx[:,1]\n\n graph_pool = coo_matrix((elem, (idx_row, idx_cl)), shape=(len(batch_graph), start_idx[-1]))\n return graph_pool\n # return idx_row, idx_cl\n\ngraph_pool = get_graphpool(graphs)\n\n\ndef get_idx_nodes(selected_graph_idx):\n idx_nodes = [np.where(graph_pool.getrow(i).toarray()[0] == 1)[0] for i in selected_graph_idx]\n idx_nodes = np.reshape(np.concatenate(idx_nodes), (-1, 1))\n return idx_nodes\n\ndef get_batch_data(batch_graph):\n X_concat = np.concatenate([graph.node_features for graph in batch_graph], 0)\n if \"REDDIT\" in args.dataset:\n X_concat = np.tile(X_concat, feature_dim_size) #[1,1,1,1]\n X_concat = X_concat * 0.01\n\n Adj_block_idx_row, Adj_block_idx_cl = get_Adj_matrix(batch_graph)\n dict_Adj_block = {}\n for i in range(len(Adj_block_idx_row)):\n if Adj_block_idx_row[i] not in dict_Adj_block:\n dict_Adj_block[Adj_block_idx_row[i]] = []\n dict_Adj_block[Adj_block_idx_row[i]].append(Adj_block_idx_cl[i])\n\n input_neighbors = []\n for input_node in range(X_concat.shape[0]):\n if input_node in dict_Adj_block:\n input_neighbors.append([input_node] + list(np.random.choice(dict_Adj_block[input_node], args.num_neighbors, replace=True)))\n else:\n input_neighbors.append([input_node for _ in range(args.num_neighbors + 1)])\n input_x = np.array(input_neighbors)\n\n return X_concat, input_x\n\nclass Batch_Loader(object):\n def __call__(self):\n selected_idx = np.random.permutation(len(graphs))[:args.batch_size]\n batch_graph = [graphs[idx] for idx in selected_idx]\n X_concat, input_x = get_batch_data(batch_graph)\n input_y = get_idx_nodes(selected_idx)\n return X_concat, input_x, input_y\n\nbatch_nodes = Batch_Loader()\n\nprint(\"Loading data... finished!\")\n\n# Training\n# ==================================================\nwith tf.Graph().as_default():\n session_conf = tf.compat.v1.ConfigProto(allow_soft_placement=args.allow_soft_placement, log_device_placement=args.log_device_placement)\n session_conf.gpu_options.allow_growth = True\n sess = tf.compat.v1.Session(config=session_conf)\n with sess.as_default():\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n ugformer = UGformerV1(num_self_att_layers=args.num_timesteps,\n vocab_size=graph_pool.shape[1],\n hparams_batch_size=hparams_batch_size,\n num_sampled=args.num_sampled,\n feature_dim_size=feature_dim_size,\n ff_hidden_size=args.ff_hidden_size,\n seq_length=args.num_neighbors+1,\n num_GNN_layers=args.num_hidden_layers\n )\n\n # Define Training procedure\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=args.learning_rate)\n grads_and_vars = optimizer.compute_gradients(ugformer.total_loss)\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n out_dir = os.path.abspath(os.path.join(args.run_folder, \"../runs_TF_UGformerV1_UnSup\", args.model_name))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n graph = tf.get_default_graph()\n\n def train_step(X_concat, input_x, input_y):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n ugformer.input_x: input_x,\n ugformer.input_y: input_y,\n ugformer.X_concat:X_concat,\n ugformer.dropout_keep_prob: args.dropout_keep_prob\n }\n _, step, loss = sess.run([train_op, global_step, ugformer.total_loss], feed_dict)\n return loss\n\n write_acc = open(checkpoint_prefix + '_acc.txt', 'w')\n\n num_batches_per_epoch = int((len(graphs) - 1) / args.batch_size) + 1\n for epoch in range(1, args.num_epochs+1):\n loss = 0\n for _ in range(num_batches_per_epoch):\n X_concat, input_x, input_y = batch_nodes()\n loss += train_step(X_concat, input_x, input_y)\n # current_step = tf.compat.v1.train.global_step(sess, global_step)\n print(loss)\n\n # It will give tensor object\n node_embeddings = graph.get_tensor_by_name('W:0')\n node_embeddings = sess.run(node_embeddings)\n\n graph_embeddings = graph_pool.dot(node_embeddings)\n\n acc_10folds = []\n for fold_idx in range(10):\n train_idx, test_idx = separate_data_idx(graphs, fold_idx)\n train_graph_embeddings = graph_embeddings[train_idx]\n test_graph_embeddings = graph_embeddings[test_idx]\n train_labels = graph_labels[train_idx]\n test_labels = graph_labels[test_idx]\n\n cls = LogisticRegression(solver=\"liblinear\", tol=0.001)\n cls.fit(train_graph_embeddings, train_labels)\n ACC = cls.score(test_graph_embeddings, test_labels)\n acc_10folds.append(ACC)\n print('epoch ', epoch, ' fold ', fold_idx, ' acc ', ACC)\n\n mean_10folds = statistics.mean(acc_10folds)\n std_10folds = statistics.stdev(acc_10folds)\n print('epoch ', epoch, ' mean: ', str(mean_10folds*100), ' std: ', str(std_10folds*100))\n\n write_acc.write('epoch ' + str(epoch) + ' mean: ' + str(mean_10folds*100) + ' std: ' + str(std_10folds*100) + '\\n')\n\n write_acc.close()" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.random.choice", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.get_default_graph", "numpy.random.seed", "tensorflow.compat.v1.ConfigProto", "numpy.ones", "numpy.tile", "tensorflow.Variable", "tensorflow.Graph", "tensorflow.compat.v1.Session", "sklearn.linear_model.LogisticRegression", "tensorflow.compat.v1.set_random_seed", "tensorflow.global_variables_initializer" ] ]
wobushishuiguo/Rotation-ship-detection
[ "e49f2c7fd71d6f05b3d0fa6dd67ad751b306592e" ]
[ "mmdet/datasets/pipelines/formating.py" ]
[ "from collections.abc import Sequence\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import DataContainer as DC\n\nfrom ..builder import PIPELINES\n\n\ndef to_tensor(data):\n \"\"\"Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n \"\"\"\n\n if isinstance(data, torch.Tensor):\n return data\n elif isinstance(data, np.ndarray):\n return torch.from_numpy(data)\n elif isinstance(data, Sequence) and not mmcv.is_str(data):\n return torch.tensor(data)\n elif isinstance(data, int):\n return torch.LongTensor([data])\n elif isinstance(data, float):\n return torch.FloatTensor([data])\n else:\n raise TypeError(f'type {type(data)} cannot be converted to tensor.')\n\n\[email protected]_module()\nclass ToTensor(object):\n \"\"\"Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n \"\"\"Call function to convert data in results to :obj:`torch.Tensor`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted\n to :obj:`torch.Tensor`.\n \"\"\"\n for key in self.keys:\n results[key] = to_tensor(results[key])\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass ImageToTensor(object):\n \"\"\"Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequence[str]): Key of images to be converted to Tensor.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n \"\"\"Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n \"\"\"\n for key in self.keys:\n img = results[key]\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n results[key] = to_tensor(img.transpose(2, 0, 1))\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass Transpose(object):\n \"\"\"Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n \"\"\"\n\n def __init__(self, keys, order):\n self.keys = keys\n self.order = order\n\n def __call__(self, results):\n \"\"\"Call function to transpose the channel order of data in results.\n\n Args:\n results (dict): Result dict contains the data to transpose.\n\n Returns:\n dict: The result dict contains the data transposed to \\\n ``self.order``.\n \"\"\"\n for key in self.keys:\n results[key] = results[key].transpose(self.order)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, order={self.order})'\n\n\[email protected]_module()\nclass ToDataContainer(object):\n \"\"\"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))``.\n \"\"\"\n\n def __init__(self,\n fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))):\n self.fields = fields\n\n def __call__(self, results):\n \"\"\"Call function to convert data in results to\n :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted to \\\n :obj:`mmcv.DataContainer`.\n \"\"\"\n\n for field in self.fields:\n field = field.copy()\n key = field.pop('key')\n results[key] = DC(results[key], **field)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(fields={self.fields})'\n\n\[email protected]_module()\nclass DefaultFormatBundle(object):\n \"\"\"Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including \"img\",\n \"proposals\", \"gt_bboxes\", \"gt_labels\", \"gt_masks\" and \"gt_semantic_seg\".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \\\n (3)to DataContainer (stack=True)\n \"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with \\\n default bundle.\n \"\"\"\n\n if 'img' in results:\n img = results['img']\n # add default meta keys\n results = self._add_default_meta_keys(results)\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n results['img'] = DC(to_tensor(img), stack=True)\n for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels', 'gt_ratios']:\n if key not in results:\n continue\n results[key] = DC(to_tensor(results[key]))\n if 'gt_masks' in results:\n results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)\n if 'gt_semantic_seg' in results:\n results['gt_semantic_seg'] = DC(\n to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)\n return results\n\n def _add_default_meta_keys(self, results):\n \"\"\"Add default meta keys.\n\n We set default meta keys including `pad_shape`, `scale_factor` and\n `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and\n `Pad` are implemented during the whole pipeline.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n results (dict): Updated result dict contains the data to convert.\n \"\"\"\n img = results['img']\n results.setdefault('pad_shape', img.shape)\n results.setdefault('scale_factor', 1.0)\n num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n results.setdefault(\n 'img_norm_cfg',\n dict(\n mean=np.zeros(num_channels, dtype=np.float32),\n std=np.ones(num_channels, dtype=np.float32),\n to_rgb=False))\n return results\n\n def __repr__(self):\n return self.__class__.__name__\n\n\[email protected]_module()\nclass Collect(object):\n \"\"\"Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of \"img\", \"proposals\", \"gt_bboxes\",\n \"gt_bboxes_ignore\", \"gt_labels\", and/or \"gt_masks\".\n\n The \"img_meta\" item is always populated. The contents of the \"img_meta\"\n dictionary depends on \"meta_keys\". By default this includes:\n\n - \"img_shape\": shape of the image input to the network as a tuple \\\n (h, w, c). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - \"scale_factor\": a float indicating the preprocessing scale\n\n - \"flip\": a boolean indicating if image flip transform was used\n\n - \"filename\": path to the image file\n\n - \"ori_shape\": original shape of the image as a tuple (h, w, c)\n\n - \"pad_shape\": image shape after padding\n\n - \"img_norm_cfg\": a dict of normalization information:\n\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',\n 'pad_shape', 'scale_factor', 'flip', 'flip_direction',\n 'img_norm_cfg')``\n \"\"\"\n\n def __init__(self,\n keys,\n meta_keys=('filename', 'ori_filename', 'ori_shape',\n 'img_shape', 'pad_shape', 'scale_factor', 'flip',\n 'flip_direction', 'img_norm_cfg')):\n self.keys = keys\n self.meta_keys = meta_keys\n\n def __call__(self, results):\n \"\"\"Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n\n - keys in``self.keys``\n - ``img_metas``\n \"\"\"\n\n data = {}\n img_meta = {}\n for key in self.meta_keys:\n img_meta[key] = results[key]\n data['img_metas'] = DC(img_meta, cpu_only=True)\n for key in self.keys:\n data[key] = results[key]\n return data\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, meta_keys={self.meta_keys})'\n\n\[email protected]_module()\nclass WrapFieldsToLists(object):\n \"\"\"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='Pad', size_divisor=32),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapIntoLists')\n >>> ]\n \"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to wrap fields into lists.\n\n Args:\n results (dict): Result dict contains the data to wrap.\n\n Returns:\n dict: The result dict where value of ``self.keys`` are wrapped \\\n into list.\n \"\"\"\n\n # Wrap dict fields into lists\n for key, val in results.items():\n results[key] = [val]\n return results\n\n def __repr__(self):\n return f'{self.__class__.__name__}()'\n" ]
[ [ "numpy.zeros", "torch.FloatTensor", "numpy.ones", "torch.from_numpy", "torch.LongTensor", "torch.tensor", "numpy.expand_dims" ] ]
shahakshay11/medical-entity-extraction-nlp
[ "012a52dc76f54c0835d73124ca857426adee049e", "012a52dc76f54c0835d73124ca857426adee049e" ]
[ "code/dnc_code/tasks/archive/ner_task_bio_orig.py", "code/data_preprocessing/data_analysis.py" ]
[ "# Named Entity Recognition on Medical Data (BIO Tagging)\n# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec\n\nimport os\nimport re\nimport torch\nimport pickle\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport random\n\nfrom DNC.dnc import DNC_Module # Importing DNC Implementation\n\nclass task_NER():\n\n def __init__(self):\n self.name = \"NER_task_bio\"\n\n # Controller Params\n self.controller_size = 128\n self.controller_layers = 1\n\n # Head Params\n self.num_read_heads = 1\n self.num_write_heads = 1\n\n # Processor Params\n self.num_inputs = 200 # Length of Embeddings\n self.num_outputs = 7 # Class size\n\n # Memory Params\n self.memory_N = 128\n self.memory_M = 128\n\n # Training Params\n self.num_batches = -1\n self.save_batch = 5 # Saving model after every save_batch number of batches\n self.batch_size = 10\n self.num_epoch = 4\n\n # Optimizer Params\n self.adam_lr = 1e-4\n self.adam_betas = (0.9, 0.999)\n self.adam_eps = 1e-8\n\n # Handles\n self.machine = None\n self.loss = None\n self.optimizer = None\n\n # Class Dictionaries\n self.labelDict = None # Label Dictionary - Labels to Index\n self.reverseDict = None # Inverse Label Dictionary - Index to Labels\n\n # File Paths\n self.concept_path_train = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/medical_data/train_data/concept\" # Path to train concept files\n self.text_path_train = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/medical_data/train_data/txt\" # Path to train text summaries\n self.concept_path_test = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/medical_data/test_data/concept\" # Path to test concept files\n self.text_path_test = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/medical_data/test_data/txt\" # Path to test text summaries\n self.save_path = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/medical_data/cleaned_files\" # Save path\n self.embed_dic_path = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/medical_data/embeddings/bio_embedding_dictionary.dat\" # Word2Vec embeddings Dictionary path\n self.random_vec = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/medical_data/embeddings/random_vec.dat\" # Path to random embedding (Used to create new vectors)\n self.model_path = \"/media/ramkabir/PC Data/ASU Data/Semester 3/BMNLP/Projects/Code/DNC Code/Saved_Models/\" # Stores Trained Models\n\n # Miscellaneous\n self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x num_outputs)\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y): # Calculates % Cost\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x sequence_width)\n\n # Stores correct class labels for each entity type\n class_bag = {}\n class_bag['problem'] = 0 # Total labels\n class_bag['test'] = 0 # Total labels\n class_bag['treatment'] = 0 # Total labels\n class_bag['problem_cor'] = 0 # Correctly classified labels\n class_bag['test_cor'] = 0 # Correctly classified labels\n class_bag['treatment_cor'] = 0 # Correctly classified labels\n \n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)\n Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing\n cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions\n\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)\n\n # Getting the beginning index of all the entities\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n\n # Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size-1)\n\n assert len(beg_idx) == len(end_idx) # Sanity Check\n class_bag['total'] = len(beg_idx) # Total number of Entities\n\n # Counting Entities\n sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector\n for b, e in zip(beg_idx, end_idx):\n idx_range = e-b+1 # Entity span\n sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly\n\n lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)\n class_bag[lab] = class_bag[lab]+1 # Getting count of each entities\n \n if sum_range == idx_range: # +1 if entity is classified correctly\n class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1\n return class_bag\n \n def print_word(self, token_class): # Prints the Class name from Class number\n word = self.reverseDict[token_class]\n print(word + \"\\n\")\n\n def clip_grads(self): # Clipping gradients for stability\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels\n self.labelDict = {} # Label Dictionary - Labels to Index\n self.reverseDict = {} # Inverse Label Dictionary - Index to Labels\n\n # Using BIEOS labelling scheme\n self.labelDict['b-problem'] = 0 # Problem - Beginning \n self.labelDict['i-problem'] = 1 # Problem - Inside\n self.labelDict['b-test'] = 2 # Test - Beginning\n self.labelDict['i-test'] = 3 # Test - Inside\n self.labelDict['b-treatment'] = 4 # Treatment - Beginning\n self.labelDict['i-treatment'] = 5 # Treatment - Inside\n self.labelDict['o'] = 6 # Outside Token\n\n # Making Inverse Label Dictionary\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n\n # Saving the diictionaries into a file\n self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, \"label_dicts_bio.dat\"))\n\n def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels\n conceptList = [] # Stores all the Concept in the File\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close() # Closing the concept file\n\n for x in content: # Reading each line in the concept file\n dic = {}\n\n # Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n x = x.strip().split('||')\n\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n\n if len(entity) >= 1:\n lab = ['i']*len(entity)\n lab[0] = 'b'\n lab = [l+\"-\"+label for l in lab]\n else:\n print(\"Data in File: \" + file_path + \", not in expected format..\")\n exit()\n\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(\":\")[0]), int(temp1[-2].split(\":\")[1])\n eLine, eCol = int(temp1[-1].split(\":\")[0]), int(temp1[-1].split(\":\")[1])\n \n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n '''\n\n # Storing the information as a dictionary\n dic['entity'] = entity # Entity Name (In the form of list of words)\n dic['label'] = label # Common Label\n dic['BIO_labels'] = lab # List of BIO labels for each word\n dic['label_index'] = noLab # Labels in the index form\n dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries\n dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line\n dic['end_line'] = eLine # End line of the concept in the corresponding text summaries\n dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line\n\n # Appending the concept dictionary to the list\n conceptList.append(dic)\n\n return conceptList # Returning the all the concepts in the current file in the form of dictionary list\n\n def parse_summary(self, file_path): # Parses the Text summaries\n file_lines = [] # Stores the lins of files in the list form\n tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])\n default_label = len(self.labelDict)-1 # default_label is \"7\" (Corresponding to 'Other' entity) \n # counter = 1 # Temporary variable used during print\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close()\n\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n file_lines.append(x.strip().split(\" \")) # Spliting the lines into word list and Appending each of them in the file list\n tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line\n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n '''\n assert len(tags[-1]) == len(file_lines[-1]), \"Line length is not matching labels length...\" # Sanity Check\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files\n for e in conceptList: # Iterating over all the dictionary elements in the Concept List\n if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary\n tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries\n if i == start:\n tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]\n beg = len(tags[i-1])-e['start_word_no']\n elif i == end:\n tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]\n else:\n tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]\n beg = beg+len(tags[i-1])\n return tags\n\n def print_data(self, file, file_lines, tags): # Prints the given data\n counter = 1\n\n print(\"\\n************ Printing details of the file: \" + file + \" ************\\n\")\n for x in file_lines:\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(x)\n print(\"\\nCorresponding labels:\")\n print([self.reverseDict[i] for i in tags[counter-1]])\n print(\"\\nCorresponding Label Indices:\")\n print(tags[counter-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n\n def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle\n # Note: The 'obj_list' must be a list and none other than that\n pickle.dump(tuple(obj_list), open(s_path,'wb'))\n\n def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them\n data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name\n\n if task == 'train': # Determining the task type to assign the data path accordingly\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + \".con\"\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file\n file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes\n tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files\n data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary\n # self.print_data(f, file_lines, tags) # Printing the details\n return data\n\n def structure_data(self, data_dict): # Structures the data in proper trainable form\n final_line_list = [] # Stores words of all the files in separate sub-lists\n final_tag_list = [] # Stores tags of all the files in separate sub-lists\n\n for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary\n file_lines = data_dict[k][1] # Extracting story\n tags = data_dict[k][2] # Extracting corresponding labels\n\n # Creating empty lists\n temp1 = []\n temp2 = []\n\n # Merging all the lines in file into a single list. Same for corresponding labels\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n \n assert len(temp1) == len(temp2), \"Word length not matching Label length for story in \" + str(k) # Sanity Check\n\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n \n assert len(final_line_list) == len(final_tag_list), \"Number of stories not matching number of labels list\" # Sanity Check\n return final_line_list, final_tag_list\n \n def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length \n diff = 0\n max_len = 0\n outside_class = len(self.labelDict)-1 # Classifying padding symbol as \"outside\" term\n\n # Calculating Max Summary Length\n for i in range(len(line_list)):\n if len(line_list[i])>max_len:\n max_len = len(line_list[i])\n\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol]*diff)\n tag_list[i].extend([outside_class]*diff)\n assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), \"Padding unsuccessful\" # Sanity check\n return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively\n\n def embed_input(self, line_list): # Converts words to vector embeddings\n final_list = [] # Stores embedded words\n summary = None # Temp variable\n word = None # Temp variable\n temp = None # Temp variable\n\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle\n r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding\n\n for i in range(len(line_list)): # Iterating over all the summaries\n summary = line_list[i]\n final_list.append([]) # Reserving space for curent summary\n\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic: # Checking for existence of word in dictionary\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:] # Copying the values of the list\n random.shuffle(temp) # Randomly shuffling the word embedding to make it unique\n temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'): # Preparing all the data necessary\n line_list, tag_list = None, None\n\n '''\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n '''\n\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data\n \n if not os.path.exists(os.path.join(self.save_path, \"label_dicts_bio.dat\")):\n self.initialize_labels() # Initialize label to index dictionaries\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, \"label_dicts_bio.dat\"), 'rb')) # Loading Label dictionaries\n \n if not os.path.exists(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\")):\n data_dict = self.acquire_data(task) # Read data from file\n line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form\n line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings\n self.save_data([line_list, tag_list], os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"), 'rb')) # Loading Data dictionary\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n\n # Shuffling stories\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n\n num_batch = int(len(story_idx)/self.batch_size)\n self.num_batches = num_batch\n\n # Out Data\n x_out = []\n y_out = []\n \n counter = 1\n\n for i in story_idx:\n if num_batch<=0:\n break\n\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n\n if counter % self.batch_size == 0:\n counter = 0\n \n # Padding and converting labels to one hot vectors\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)\n\n x_out = []\n y_out = []\n num_batch -= 1\n\n yield (self.num_batches - num_batch), x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n # Here, the model is optimized using Cross Entropy Loss.\n loss_list = []\n seq_length = []\n last_batch = 0\n\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards\n\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n\n if (batch_num % self.save_batch) == 0:\n self.save_model(j, batch_num)\n\n last_batch = batch_num\n print(\"Epoch: \" + str(j) + \"/\" + str(self.num_epoch) + \", Batch: \" + str(batch_num) + \"/\" + str(self.num_batches) + \", Loss: {0:.2f}, \".format(loss.item()) + \\\n \"Batch Accuracy (Entity Prediction): {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self): # Testing the model\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0 # Total labels in data\n result_dict['total_test'] = 0 # Total labels in data\n result_dict['total_treatment'] = 0 # Total labels in data\n result_dict['correct_problem'] = 0 # Correctly classified labels\n result_dict['correct_test'] = 0 # Correctly classified labels\n result_dict['correct_treatment'] = 0 # Correctly classified labels\n print(\"\\n\")\n\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag['test']\n result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']\n\n correct += corr\n total += tot\n print(\"Test Example \" + str(batch_num) + \"/\" + str(self.num_batches) + \" processed, Batch Accuracy: {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n \n result_dict['accuracy'] = (float(correct)/float(total))*100.0\n print(\"\\nOverall Entity Prediction Accuracy: {0:.2f} %\".format(result_dict['accuracy']))\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n # Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading\n # Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}\n filename = self.model_path + self.name + \"/\" + self.name + \"_\" + str(curr_epoch) + \"_\" + str(curr_batch) + \"_saved_model.pth.tar\"\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + \"/\" + self.name + \"_\" + str(epoch) + \"_\" + str(batch) + \"_saved_model.pth.tar\"\n if option == 1: # Loading for training\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else: # Loading for testing\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()", "import os\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\nlabelDict = {} # Label Dictionary - Labels to Index\nreverseDict = {} # Inverse Label Dictionary - Index to Labels\n\ntot_prob = 0\ntot_treatment = 0\ntot_test = 0\n\ndef initialize_labels(s_path): # Initializing label dictionaries for Labels->IDX and IDX->Labels\n # Using BIEOS labelling scheme\n labelDict['problem_b'] = 0 # Problem - Beginning \n labelDict['problem_i'] = 1 # Problem - Inside\n labelDict['problem_e'] = 2 # Problem - End\n labelDict['problem_s'] = 3 # Problem - Single\n labelDict['test_b'] = 4 # Test - Beginning\n labelDict['test_i'] = 5 # Test - Inside\n labelDict['test_e'] = 6 # Test - End\n labelDict['test_s'] = 7 # Test - Single\n labelDict['treatment_b'] = 8 # Treatment - Beginning\n labelDict['treatment_i'] = 9 # Treatment - Inside\n labelDict['treatment_e'] = 10 # Treatment - End\n labelDict['treatment_s'] = 11 # Treatment - Single\n labelDict['o'] = 12 # Outside Token\n\n # Making Inverse Label Dictionary\n for k in labelDict.keys():\n reverseDict[labelDict[k]] = k\n \n # Saving the diictionaries into a file\n save_data([labelDict, reverseDict], os.path.join(s_path, \"label_dicts.dat\"))\n\ndef parse_concepts(file_path): # Parses the concept file to extract concepts and labels\n conceptList = [] # Stores all the Concept in the File\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close() # Closing the concept file\n\n for x in content: # Reading each line in the concept file\n dic = {}\n\n # Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries\n x = x.strip().split('||')\n\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n\n if len(entity) > 1:\n lab = ['i']*len(entity)\n lab[0] = 'b'\n lab[-1] = 'e'\n lab = [label+\"_\"+l for l in lab]\n elif len(entity) == 1:\n lab = [label+\"_\"+\"s\"]\n else:\n print(\"Data in File: \" + file_path + \", not in expected format..\")\n exit()\n\n noLab = [labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(\":\")[0]), int(temp1[-2].split(\":\")[1])\n eLine, eCol = int(temp1[-1].split(\":\")[0]), int(temp1[-1].split(\":\")[1])\n \n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIEOS form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n '''\n\n # Storing the information as a dictionary\n dic['entity'] = entity # Entity Name (In the form of list of words)\n dic['label'] = label # Common Label\n dic['BIEOS_labels'] = lab # List of BIEOS label for each word\n dic['label_index'] = noLab # Labels in the index form\n dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries\n dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line\n dic['end_line'] = eLine # End line of the concept in the corresponding text summaries\n dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line\n\n # Appending the concept dictionary to the list\n conceptList.append(dic)\n\n return conceptList # Returning the all the concepts in the current file in the form of dictionary list\n\ndef parse_summary(file_path): # Parses the Text summaries\n file_lines = [] # Stores the lins of files in the list form\n tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])\n # counter = 1 # Temporary variable\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close()\n\n for x in content:\n file_lines.append(x.strip().split(\" \")) # Appending the lines in the list\n tags.append([12]*len(file_lines[-1])) # Assigining the default labels to all the words in a line\n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n '''\n assert len(tags[-1]) == len(file_lines[-1]), \"Line length is not matching labels length...\" # Sanity Check\n return file_lines, tags\n\ndef modify_labels(conceptList, tags): # Modifies he default labels to each word with the true labels from the concept files\n for e in conceptList: # Iterating over all the dictionary elements in the Concept List\n if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary\n tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries\n if i == start:\n tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]\n beg = len(tags[i-1])-e['start_word_no']\n elif i == end:\n tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]\n else:\n tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]\n beg = beg+len(tags[i-1])\n return tags\n\ndef print_data(file, file_lines, tags): # Prints the given data\n counter = 1\n\n print(\"\\n************ Printing details of the file: \" + file + \" ************\\n\")\n for x in file_lines:\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(x)\n print(\"\\nCorresponding labels:\")\n print([reverseDict[i] for i in tags[counter-1]])\n print(\"\\nCorresponding Label Indices:\")\n print(tags[counter-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n\ndef save_data(obj_list, s_path): # Saves the file into the binary file using Pickle\n pickle.dump(tuple(obj_list), open(s_path,'wb'))\n\ndef concept_metric(conceptList): # Gathering Concepts metadata\n global tot_prob\n global tot_test\n global tot_treatment\n \n loc_prob = 0\n loc_treatment = 0\n loc_test = 0\n avg_concept_length = []\n\n for c in conceptList:\n avg_concept_length.append(len(c['entity']))\n\n if c['label'] == 'problem':\n loc_prob += 1\n tot_prob += 1\n elif c['label'] == 'treatment':\n loc_treatment += 1\n tot_treatment += 1\n else:\n loc_test += 1\n tot_test += 1\n \n return avg_concept_length, loc_prob, loc_treatment, loc_test\n\ndef plot_histogram(data, title, xlab, bin_size=5):\n data = np.asarray(data)\n mean = \"{:.2f}\".format(data.mean())\n std_dev = \"{:.2f}\".format(data.std())\n\n # String Statement\n line = ', Mean: ' + str(mean) + ', Standard Deviation: ' + str(std_dev)\n\n # Calculating Histogram\n hist, bin_edges = np.histogram(data, bins=np.linspace(start = data.min(), stop = data.max(), num = int((data.max()-data.min())/bin_size)))\n\n # Plotting Histogram\n # plt.figure(figsize=[10,8])\n fig, ax = plt.subplots()\n plt.bar(bin_edges[:-1], hist, width = 1, color='#0504aa')\n plt.xlim(min(bin_edges)-1, max(bin_edges)+1)\n ax.xaxis.set_major_locator(MultipleLocator(bin_size))\n plt.xlabel(xlab,fontsize=15)\n plt.ylabel('Counts',fontsize=15)\n plt.title(title + line,fontsize=15)\n plt.show()\n\ndef process_data(c_path, t_path, s_path, counter): # Read all the concept files to get concepts and labels, proces them and save them\n prob_list = []\n treat_list = []\n test_list = []\n avg_length_list = []\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + \".con\"\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file\n file_lines, tags = parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes\n tags = modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files\n avg_concept_length, loc_prob, loc_treatment, loc_test = concept_metric(conceptList)\n\n counter += 1\n prob_list.append(loc_prob)\n treat_list.append(loc_treatment)\n test_list.append(loc_test)\n avg_length_list.extend(avg_concept_length)\n # save_data([conceptList, file_lines, tags], os.path.join(s_path, f.split('.')[0]+\".dat\")) # Saving the objects into a file\n # print_data(f, file_lines, tags) # Printing the details\n return prob_list, treat_list, test_list, avg_length_list, counter\n\nif __name__ == '__main__':\n\n # File paths\n save_path = \"../../Medical Data/cleaned_files\"\n concept_path = \"../../Medical Data/training_data/partners/concept\"\n text_path = \"../../Medical Data/training_data/partners/txt\"\n concept_path1 = \"../../Medical Data/training_data/beth/concept\"\n text_path1 = \"../../Medical Data/training_data/beth/txt\"\n counter = 0\n\n super_prob_list = []\n super_treat_list = []\n super_test_list = []\n super_len_list = []\n\n initialize_labels(save_path) # Initializing and saving the label dictionaries\n\n # 1\n prob_list, treat_list, test_list, avg_length_list, counter = process_data(concept_path, text_path, save_path, counter) # Processing the data\n\n super_prob_list.extend(prob_list)\n super_treat_list.extend(treat_list)\n super_test_list.extend(test_list)\n super_len_list.extend(avg_length_list)\n\n # 2\n prob_list, treat_list, test_list, avg_length_list, counter = process_data(concept_path1, text_path1, save_path, counter) # Processing the data\n\n super_prob_list.extend(prob_list)\n super_treat_list.extend(treat_list)\n super_test_list.extend(test_list)\n super_len_list.extend(avg_length_list)\n\n # Plotting Histogram\n plot_histogram(super_prob_list, 'Average Problem Concepts Distribution', 'Average Problem concepts per file', 3)\n plot_histogram(super_treat_list, 'Average Treatment Concepts Distribution', 'Average Treatment concepts per file', 3)\n plot_histogram(super_test_list, 'Average Test Concepts Distribution', 'Average Test concepts per file', 3)\n plot_histogram(super_len_list, 'Concept Length Distribution', 'Concepts length', 1)\n\n # Calculating Overall Mean Average\n avg_prob = tot_prob/counter\n avg_treat = tot_treatment/counter\n avg_test = tot_test/counter\n\n print(\"Total Concepts: \" + str(len(super_len_list)))\n print(\"Total Files: \" + str(counter))\n print(\"Total Problem concepts in Dataset: \" + \"{:.0f}\".format(tot_prob))\n print(\"Average Problem concepts per file in Dataset: \" + \"{:.2f}\".format(avg_prob))\n print(\"Total Treatment concepts in Dataset: \" + \"{:.0f}\".format(tot_treatment))\n print(\"Average Treatment concepts per file in Dataset: \" + \"{:.2f}\".format(avg_treat))\n print(\"Total Test concepts in Dataset: \" + \"{:.0f}\".format(tot_test))\n print(\"Average Test concepts per file in Dataset: \" + \"{:.2f}\".format(avg_test))" ]
[ [ "numpy.full", "numpy.asarray", "torch.nn.functional.softmax", "numpy.sum", "torch.save", "numpy.where", "torch.load", "numpy.cumsum", "torch.mean", "torch.empty", "torch.nn.CrossEntropyLoss", "numpy.in1d" ], [ "matplotlib.ticker.MultipleLocator", "numpy.asarray", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.bar" ] ]
kilsenp/person-multi-task-dataset
[ "df98f3f658c39fb3fa4ac0456f1214f7918009f6" ]
[ "models/merging_blocks.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass SingleBlock(nn.Module):\n def calc_output_size(self, dimensions):\n return dimensions[self.endpoint]\n\n def __init__(self, endpoint):\n super().__init__()\n self.endpoint = endpoint\n\n\n def forward(self, outputs):\n # TODO output is also a list\n return torch.cat(outputs[self.endpoint], dim=1)\n\nclass ConcatBlock(nn.Module):\n def calc_output_size(self, dimensions):\n return sum([dimensions[endpoint] for endpoint in self.endpoints])\n\n def __init__(self, endpoints):\n super().__init__()\n self.endpoints = endpoints\n\n def forward(self, outputs):\n concat_list = []\n for endpoint in self.endpoints:\n emb = outputs[endpoint]\n if isinstance(emb, list):\n concat_list.extend(emb)\n else:\n concat_list.append(emb)\n\n return torch.cat(concat_list, dim=1)\n" ]
[ [ "torch.cat" ] ]
jasonlai777/Faster-R-CNN
[ "b5c0c18a9b5faabd4b6ef23346aff85104df7356" ]
[ "cfmap_v2.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pickle\nimport numpy as np\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\ndef voc_eval(detpath,\n imagesetfile,\n classname,\n cachefile,\n ovthresh=0.5,\n csthresh=0.05):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n imagesetfile,\n classname,\n [ovthresh],\n csthresh)\n\n Top level function that does the PASCAL VOC evaluation.\n\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n with open(cachefile, 'rb') as f:\n recs = pickle.load(f)\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read dets\n \n with open(detpath, 'r') as f:\n lines = f.readlines()\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n \n count =0 \n if BB.shape[0] > 0:\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n keep_score = [sorted_scores < -csthresh]\n sorted_ind = sorted_ind[keep_score]\n# for sc in sorted_scores:\n# print(sorted_scores.shape)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n #print(image_ids)\n nd = len(image_ids)\n #print(nd)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n\n # go down dets and mark TPs and FPs\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n count +=1\n R['det'][jmax] = 1\n #print(image_ids[d])\n \n\n\n return count\n\n\n\ndef heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n #cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n #cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\", fontsize=16)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.set_xlabel(\"Predicted Classes\", fontsize=16, weight='bold')\n ax.set_ylabel(\"Actual Classes\", fontsize=16, weight='bold')\n ax.xaxis.set_label_position('top') \n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n \n return im, cax\n\n\ndef annotate_heatmap(im, data=None, valfmt=\"{x:.2f}\",\n textcolors=[\"black\", \"white\"],\n threshold=None, **textkw):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A list or array of two color specifications. The first is used for\n values below a threshold, the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n texts.append(text)\n\n return texts\n\n\n\n \ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--use_voc_class', dest='use_default_cls',\n help='whether sort classes like voc',\n action='store_true') \n parser.add_argument('--ovthr', dest='ovthr',\n help='IoU threshold',\n default=0.5, type=float) \n parser.add_argument('--csthr', dest='csthr',\n help='confidence score threshold',\n default=0.8, type=float) \n parser.add_argument('--out', dest='out_file',\n help='save cfmap file',\n default='cfmap.csv', type=str)\n parser.add_argument('--gt_file', dest='gt_file',\n help='ground truth pickle file',\n default='', type=str)\n parser.add_argument('--test_file', dest='test_file',\n help='VOC test.txt path',\n default='', type=str)\n parser.add_argument('--result_path', dest='result_path',\n help='result detect txt file dir path',\n default='', type=str)\n args = parser.parse_args()\n \n return args\n \n \ndef valfmt(x, pos):\n if x < 0.01:\n return \"0\"\n return '{:.2f}'.format(x)\n## -------------------------------------------------------------\ndef main(classes, ignore_cls, args):\n num =0\n result_path = args.result_path\n test_file = args.test_file\n gt_file = args.gt_file\n \n cfmap = np.zeros((len(classes)+1,len(classes)))\n\n for i, detcls in enumerate(classes):\n if detcls == '__background__' or detcls in ignore_cls:\n continue\n det_file = result_path +\"comp4_det_\"+ 'test_{}.txt'.format(detcls)\n num_sum = 0\n for j, cls in enumerate(classes):\n if cls == '__background__' :\n continue \n num = voc_eval(\n det_file, test_file, cls, gt_file,\n ovthresh=args.ovthr, csthresh=args.csthr)\n #print(num)\n cfmap[j][i] = num\n num_sum += num\n # count background number\n with open(det_file, 'r') as f:\n lines = f.readlines()\n splitlines = [x.strip().split(' ') for x in lines]\n confidence = np.array([float(x[1]) for x in splitlines])\n keep = [c for c in confidence if c > 0.8]\n cfmap[j+1][i] = len(keep)- num_sum\n #print(cfmap[j+1][i])\n \n \n cfmap = np.delete(cfmap, 0, axis=0)\n cfmap = np.delete(cfmap, 0, axis=1)\n \n sum_of_col = np.sum(cfmap,axis = 0)\n for i in range(len(classes)-1):\n for j in range(len(classes)):\n cfmap[j][i] = round(cfmap[j][i] / sum_of_col[i], 2)\n \n fig, ax = plt.subplots(figsize=(10,10))\n #print(cfmap.shape, cfmap[1:][1:].shape)\n im, cax = heatmap(cfmap, classes[1:]+(\"Background\",), classes[1:], ax=ax,\n cmap='GnBu', cbarlabel=\"Probalility\")\n texts = annotate_heatmap(im, valfmt=valfmt)\n \n fig.tight_layout()\n plt.colorbar(im, cax=cax)\n #plt.show() \n plt.savefig('CF_matrix_T.png') \n \n \nif __name__ == '__main__':\n \n args = parse_args()\n \n args.gt_file = './data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt_annots.pkl'\n args.test_file = './data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt'\n args.result_path = './data/VOCdevkit2007/results/VOC2007/Main/'\n \n # classes = ('__background__', # always index 0\n # 'A.bes(H)','A.bes(T)','A.bes','A.bic(H)','A.bic(T)','A.bic',\n # 'A.fuj(H)','A.fuj(T)','A.fuj','B.xyl(H)','B.xyl(T)','B.xyl',\n # 'C.ele(H)','C.ele(T)','C.ele','M.ent(H)','M.ent(T)','M.ent',\n # 'M.gra(H)','M.gra(T)','M.gra','M.inc(H)','M.inc(T)','M.inc',\n # 'P.cof(H)','P.cof(T)','P.cof','P.vul(H)','P.vul(T)','P.vul',\n # 'P.spe(H)','P.spe(T)','P.spe','H.sp(H)','H.sp(T)','H.sp',\n # 'M.ams(H)' ,'M.ams(T)','M.ams')###################\n \n classes = ('__background__', # always index 0\n 'A.bes(T)','A.bic(T)','A.fuj(T)','B.xyl(T)',\n 'C.ele(T)','M.ent(T)','M.gra(T)','M.inc(T)',\n 'P.cof(T)','P.vul(T)','P.spe(T)','H.sp(T)',\n 'M.ams(T)')###################\n \n ignore_cls = []\n \n main(classes, ignore_cls, args)\n " ]
[ [ "numpy.max", "numpy.delete", "matplotlib.pyplot.colorbar", "numpy.array", "numpy.zeros", "numpy.minimum", "numpy.sum", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.sort", "numpy.argsort", "numpy.maximum", "numpy.argmax", "matplotlib.pyplot.gca", "matplotlib.ticker.StrMethodFormatter" ] ]
bladmorv/lantz
[ "0e600df1a94102715346d0acef0657774e8ddede" ]
[ "lantz/drivers/andor/andor.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n lantz.drivers.andor.andor\r\n ~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n Low level driver wrapping atcore andor library.\r\n\r\n\r\n Sources::\r\n\r\n - Andor Manual\r\n\r\n :copyright: 2012 by Lantz Authors, see AUTHORS for more details.\r\n :license: BSD, see LICENSE for more details.\r\n\"\"\"\r\n\r\nimport ctypes as ct\r\n\r\nfrom lantz import Driver, Feat, Action\r\nfrom lantz.errors import InstrumentError\r\nfrom lantz.foreign import LibraryDriver\r\n\r\n_ERRORS = {\r\n 0: 'SUCCESS',\r\n 1: 'AT_ERR_NOTINITIALISED',\r\n 1: 'AT_HANDLE_SYSTEM', # TODO: Check twice the same key!\r\n 2: 'AT_ERR_NOTIMPLEMENTED',\r\n 3: 'AT_ERR_READONLY',\r\n 4: 'AT_ERR_NOTREADABLE',\r\n 5: 'AT_ERR_NOTWRITABLE',\r\n 6: 'AT_ERR_OUTOFRANGE',\r\n 7: 'AT_ERR_INDEXNOTAVAILABLE',\r\n 8: 'AT_ERR_INDEXNOTIMPLEMENTED',\r\n 9: 'AT_ERR_EXCEEDEDMAXSTRINGLENGTH',\r\n 10: 'AT_ERR_CONNECTION',\r\n 11: 'AT_ERR_NODATA',\r\n 12: 'AT_ERR_INVALIDHANDLE',\r\n 13: 'AT_ERR_TIMEDOUT',\r\n 14: 'AT_ERR_BUFFERFULL',\r\n 15: 'AT_ERR_INVALIDSIZE',\r\n 16: 'AT_ERR_INVALIDALIGNMENT',\r\n 17: 'AT_ERR_COMM',\r\n 18: 'AT_ERR_STRINGNOTAVAILABLE',\r\n 19: 'AT_ERR_STRINGNOTIMPLEMENTED',\r\n 20: 'AT_ERR_NULL_FEATURE',\r\n 21: 'AT_ERR_NULL_HANDLE',\r\n 22: 'AT_ERR_NULL_IMPLEMENTED_VAR',\r\n 23: 'AT_ERR_NULL_READABLE_VAR',\r\n 24: 'AT_ERR_NULL_READONLY_VAR',\r\n 25: 'AT_ERR_NULL_WRITABLE_VAR',\r\n 26: 'AT_ERR_NULL_MINVALUE',\r\n 27: 'AT_ERR_NULL_MAXVALUE',\r\n 28: 'AT_ERR_NULL_VALUE',\r\n 29: 'AT_ERR_NULL_STRING',\r\n 30: 'AT_ERR_NULL_COUNT_VAR',\r\n 31: 'AT_ERR_NULL_ISAVAILABLE_VAR',\r\n 32: 'AT_ERR_NULL_MAXSTRINGLENGTH',\r\n 33: 'AT_ERR_NULL_EVCALLBACK',\r\n 34: 'AT_ERR_NULL_QUEUE_PTR',\r\n 35: 'AT_ERR_NULL_WAIT_PTR',\r\n 36: 'AT_ERR_NULL_PTRSIZE',\r\n 37: 'AT_ERR_NOMEMORY',\r\n 100: 'AT_ERR_HARDWARE_OVERFLOW',\r\n -1: 'AT_HANDLE_UNINITIALISED'\r\n}\r\n\r\nclass Andor(LibraryDriver):\r\n\r\n LIBRARY_NAME = 'atcore.dll'\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n self.AT_H = ct.c_int()\r\n self.AT_U8 = ct.c_ubyte()\r\n self.cameraIndex = ct.c_int(0)\r\n\r\n def _patch_functions(self):\r\n internal = self.lib.internal\r\n internal.AT_Command.argtypes = [ct.c_int, ct.c_wchar_p, ]\r\n\r\n internal.AT_GetInt.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_longlong)]\r\n internal.AT_SetInt.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_longlong]\r\n\r\n internal.AT_GetFloat.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_double)]\r\n internal.AT_SetFloat.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_double]\r\n\r\n internal.AT_GetBool.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_bool)]\r\n internal.AT_SetBool.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_bool]\r\n\r\n internal.AT_GetEnumerated.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_int)]\r\n internal.AT_SetEnumerated.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_int]\r\n\r\n internal.AT_SetEnumString.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_wchar_p]\r\n\r\n def _return_handler(self, func_name, ret_value):\r\n if ret_value != 0:\r\n raise InstrumentError('{} ({})'.format(ret_value, _ERRORS[ret_value]))\r\n return ret_value\r\n\r\n def initialize(self):\r\n \"\"\"Initialise Library.\r\n \"\"\"\r\n self.lib.AT_InitialiseLibrary()\r\n self.open()\r\n\r\n def finalize(self):\r\n \"\"\"Finalise Library. Concluding function.\r\n \"\"\"\r\n self.close()\r\n self.lib.AT_FinaliseLibrary()\r\n\r\n @Action()\r\n def open(self):\r\n \"\"\"Open camera self.AT_H.\r\n \"\"\"\r\n camidx = ct.c_int(0)\r\n self.lib.AT_Open(camidx, ct.addressof(self.AT_H))\r\n return self.AT_H\r\n\r\n @Action()\r\n def close(self):\r\n \"\"\"Close camera self.AT_H.\r\n \"\"\"\r\n self.lib.AT_Close(self.AT_H)\r\n\r\n def is_implemented(self, strcommand):\r\n \"\"\"Checks if command is implemented.\r\n \"\"\"\r\n result = ct.c_bool()\r\n command = ct.c_wchar_p(strcommand)\r\n self.lib.AT_IsImplemented(self.AT_H, command, ct.addressof(result))\r\n return result.value\r\n\r\n def is_writable(self, strcommand):\r\n \"\"\"Checks if command is writable.\r\n \"\"\"\r\n result = ct.c_bool()\r\n command = ct.c_wchar_p(strcommand)\r\n self.lib.AT_IsWritable(self.AT_H, command, ct.addressof(result))\r\n return result.value\r\n\r\n def queuebuffer(self, bufptr, value):\r\n \"\"\"Put buffer in queue.\r\n \"\"\"\r\n value = ct.c_int(value)\r\n self.lib.AT_QueueBuffer(self.AT_H, ct.byref(bufptr), value)\r\n\r\n def waitbuffer(self, ptr, bufsize):\r\n \"\"\"Wait for next buffer ready.\r\n \"\"\"\r\n timeout = ct.c_int(20000)\r\n self.lib.AT_WaitBuffer(self.AT_H, ct.byref(ptr), ct.byref(bufsize), timeout)\r\n\r\n def command(self, strcommand):\r\n \"\"\"Run command.\r\n \"\"\"\r\n command = ct.c_wchar_p(strcommand)\r\n self.lib.AT_Command(self.AT_H, command)\r\n\r\n def getint(self, strcommand):\r\n \"\"\"Run command and get Int return value.\r\n \"\"\"\r\n result = ct.c_longlong()\r\n command = ct.c_wchar_p(strcommand)\r\n self.lib.AT_GetInt(self.AT_H, command, ct.addressof(result))\r\n return result.value\r\n\r\n def setint(self, strcommand, value):\r\n \"\"\"SetInt function.\r\n \"\"\"\r\n command = ct.c_wchar_p(strcommand)\r\n value = ct.c_longlong(value)\r\n self.lib.AT_SetInt(self.AT_H, command, value)\r\n\r\n def getfloat(self, strcommand):\r\n \"\"\"Run command and get Int return value.\r\n \"\"\"\r\n result = ct.c_double()\r\n command = ct.c_wchar_p(strcommand)\r\n self.lib.AT_GetFloat(self.AT_H, command, ct.addressof(result))\r\n return result.value\r\n\r\n def setfloat(self, strcommand, value):\r\n \"\"\"Set command with Float value parameter.\r\n \"\"\"\r\n command = ct.c_wchar_p(strcommand)\r\n value = ct.c_double(value)\r\n self.lib.AT_SetFloat(self.AT_H, command, value)\r\n\r\n def getbool(self, strcommand):\r\n \"\"\"Run command and get Bool return value.\r\n \"\"\"\r\n result = ct.c_bool()\r\n command = ct.c_wchar_p(strcommand)\r\n self.lib.AT_GetBool(self.AT_H, command, ct.addressof(result))\r\n return result.value\r\n\r\n def setbool(self, strcommand, value):\r\n \"\"\"Set command with Bool value parameter.\r\n \"\"\"\r\n command = ct.c_wchar_p(strcommand)\r\n value = ct.c_bool(value)\r\n self.lib.AT_SetBool(self.AT_H, command, value)\r\n\r\n def getenumerated(self, strcommand):\r\n \"\"\"Run command and set Enumerated return value.\r\n \"\"\"\r\n result = ct.c_int()\r\n command = ct.c_wchar_p(strcommand)\r\n self.lib.AT_GetEnumerated(self.AT_H, command, ct.addressof(result))\r\n\r\n def setenumerated(self, strcommand, value):\r\n \"\"\"Set command with Enumerated value parameter.\r\n \"\"\"\r\n command = ct.c_wchar_p(strcommand)\r\n value = ct.c_bool(value)\r\n self.lib.AT_SetEnumerated(self.AT_H, command, value) #TODO: IS THIS CORRECT\r\n\r\n def setenumstring(self, strcommand, item):\r\n \"\"\"Set command with EnumeratedString value parameter.\r\n \"\"\"\r\n command = ct.c_wchar_p(strcommand)\r\n item = ct.c_wchar_p(item)\r\n self.lib.AT_SetEnumString(self.AT_H, command, item)\r\n \r\n def flush(self):\r\n self.lib.AT_Flush(self.AT_H)\r\n\r\nif __name__ == '__main__':\r\n import numpy as np\r\n import ctypes as ct\r\n from andor import Andor\r\n from matplotlib import pyplot as plt\r\n\r\n with Andor() as andor:\r\n andor.flush()\r\n width = andor.getint(\"SensorWidth\")\r\n height = andor.getint(\"SensorHeight\")\r\n length = width * height\r\n\r\n #andor.setenumerated(\"FanSpeed\", 2)\r\n andor.getfloat(\"SensorTemperature\")\r\n andor.setfloat(\"ExposureTime\", 0.001)\r\n andor.setenumstring(\"PixelReadoutRate\", \"100 MHz\")\r\n andor.setenumstring(\"PixelEncoding\", \"Mono32\")\r\n #andor.setenumstring(\"PixelEncoding\", \"Mono16\")\r\n\r\n imagesizebytes = andor.getint(\"ImageSizeBytes\")\r\n\r\n userbuffer = ct.create_string_buffer(' ' * imagesizebytes)\r\n andor.queuebuffer(userbuffer, imagesizebytes)\r\n\r\n imsize = ct.c_int(1)\r\n ubuffer = ct.create_string_buffer(\" \" * 1)\r\n\r\n andor.command(\"AcquisitionStart\")\r\n andor.waitbuffer(ubuffer, imsize)\r\n andor.command(\"AcquisitionStop\")\r\n andor.flush()\r\n\r\n image = np.fromstring(userbuffer, dtype=np.uint32, count=length)\r\n #image = np.fromstring(userbuffer, dtype=np.uint16, count=length)\r\n image.shape = (height, width)\r\n\r\n im = plt.imshow(image, cmap = 'gray')\r\n plt.show()\r\n\r\n print(image.min(), image.max(), image.mean())\r\n\r\n" ]
[ [ "matplotlib.pyplot.show", "numpy.fromstring", "matplotlib.pyplot.imshow" ] ]
pimajor/py-simulitis
[ "6e0f2d419c28c47dcb5f9b6ee90c6b466e47204a" ]
[ "step_Report.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport datetime as dt\nfrom matplotlib.dates import DateFormatter\n\n\npath = \"C:\\\\Users\\\\pierre\\\\Downloads\\\\files\\\\Download\\\\samsunghealth_pierre.major_202008021218\\\\com.samsung.shealth.step_daily_trend.202008021218.csv\"\n\ndf = pd.read_csv(path, skiprows=1,index_col=False)\nprint(df.info())\n# print(df[\"com.samsung.health.step_count.count\"].head())\n\n\ndf=df.drop(columns = [\"binning_data\",\"datauuid\",\"source_pkg_name\",\"pkg_name\",\"deviceuuid\"])\n\n\ndf[\"datetime\"]=pd.to_datetime(df[\"day_time\"]/1000, unit = 's')\n\n# df[\"datetime\"]=df[\"datetime\"].apply(lambda x: x-dt.timedelta(hours=x.hour+1) if x.hour < 2 else x)\n# df[\"end_datetime\"]=pd.to_datetime(df[\"end_time\"]/1000, unit = 's')\n\n# df[\"diff\"]=(df[\"end_time\"] - df[\"start_time\"])/3600000\n\n# df_zero=df.loc[df[\"diff\"]<1]\n# print(df_zero.info())\n\n# exit()\n# df_j=df.loc[ df[\"datetime\"] > dt.datetime.strptime(\"2020-06-12 11:59:24\",'%Y-%m-%d %H:%M:%S')]\n# df_j=df_j.loc[ df_j[\"datetime\"] < dt.datetime.strptime(\"2020-06-14 12:59:24\",'%Y-%m-%d %H:%M:%S')]\n# print(df_j.describe())\n# print(df_j.head())\n\n# df.index = df[\"datetime\"]\n\n# # print(df.head())\n\n# dfm =df.resample('D').sum()\n\n\n# print(df[\"diff\"].describe())\nfig, ax = plt.subplots()\ndf.plot.scatter(x=\"datetime\",y=\"count\")\ndate_form = DateFormatter(\"%y-%m-%d\")\nax.xaxis.set_major_formatter(date_form)\nax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))\nplt.show()\n\nprint(df[\"count\"].loc[df[\"count\"]>0].describe())\nprint(df[\"count\"].describe())" ]
[ [ "pandas.to_datetime", "matplotlib.dates.DateFormatter", "matplotlib.dates.MonthLocator", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
wallfacerlogic/CNN-Style
[ "206389ff5f5f6ad9a702270df0292a1424944efd" ]
[ "eval.py" ]
[ "# coding: utf-8\nfrom __future__ import print_function\nimport tensorflow as tf\nfrom preprocessing import preprocessing_factory\nimport reader\nimport model\nimport time\nimport os\n\ntf.app.flags.DEFINE_string('loss_model', 'vgg_16', 'The name of the architecture to evaluate. '\n 'You can view all the support models in nets/nets_factory.py')\ntf.app.flags.DEFINE_integer('image_size', 256, 'Image size to train.')\ntf.app.flags.DEFINE_string(\"model_file\", \"models.ckpt\", \"\")\ntf.app.flags.DEFINE_string(\"image_file\", \"a.jpg\", \"\")\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef main(_):\n\n # Get image's height and width.\n height = 0\n width = 0\n with open(FLAGS.image_file, 'rb') as img:\n with tf.Session().as_default() as sess:\n if FLAGS.image_file.lower().endswith('png'):\n image = sess.run(tf.image.decode_png(img.read()))\n else:\n image = sess.run(tf.image.decode_jpeg(img.read()))\n height = image.shape[0]\n width = image.shape[1]\n tf.logging.info('Image size: %d X %d' % (width, height))\n\n with tf.Graph().as_default():\n with tf.Session().as_default() as sess:\n\n # Read image data.\n image_preprocessing_fn, _ = preprocessing_factory.get_preprocessing(\n FLAGS.loss_model,\n is_training=False)\n image = reader.get_image(FLAGS.image_file, height, width, image_preprocessing_fn)\n\n # Add batch dimension\n image = tf.expand_dims(image, 0)\n\n generated = model.net(image, training=False)\n generated = tf.cast(generated, tf.uint8)\n\n # Remove batch dimension\n generated = tf.squeeze(generated, [0])\n\n # Restore model variables.\n saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V1)\n sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])\n # Use absolute path\n FLAGS.model_file = os.path.abspath(FLAGS.model_file)\n saver.restore(sess, FLAGS.model_file)\n\n # Make sure 'generated' directory exists.\n generated_file = 'generated/out.jpg'\n if os.path.exists('generated') is False:\n os.makedirs('generated')\n\n # Generate and write image data to file.\n with open(generated_file, 'wb') as img:\n start_time = time.time()\n img.write(sess.run(tf.image.encode_jpeg(generated)))\n end_time = time.time()\n tf.logging.info('Elapsed time: %fs' % (end_time - start_time))\n\n tf.logging.info('Done.')\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n" ]
[ [ "tensorflow.logging.set_verbosity", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_string", "tensorflow.expand_dims", "tensorflow.Graph", "tensorflow.Session", "tensorflow.logging.info", "tensorflow.global_variables", "tensorflow.image.encode_jpeg", "tensorflow.squeeze", "tensorflow.local_variables_initializer", "tensorflow.app.run", "tensorflow.global_variables_initializer", "tensorflow.cast" ] ]
JohannesBuchner/nnest
[ "d46e1920e4aa5c0e542c327ecca9cd0629728f0a" ]
[ "examples/nested/mog4.py" ]
[ "import os\nimport sys\nimport argparse\nimport copy\n\nimport numpy as np\nimport scipy.special\n\nsys.path.append(os.getcwd())\n\n\ndef log_gaussian_pdf(theta, sigma=1, mu=0, ndim=None):\n if ndim is None:\n try:\n ndim = len(theta)\n except TypeError:\n assert isinstance(theta, (float, int)), theta\n ndim = 1\n logl = -(np.sum((theta - mu) ** 2) / (2 * sigma ** 2))\n logl -= np.log(2 * np.pi * (sigma ** 2)) * ndim / 2.0\n return logl\n\n\nclass Gaussian(object):\n\n def __init__(self, sigma=1.0, nderived=0):\n self.sigma = sigma\n self.nderived = nderived\n\n def __call__(self, theta):\n logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)\n return logl, [0.0] * self.nderived\n\n\nclass GaussianMix(object):\n\n def __init__(self, sep=4, weights=(0.4, 0.3, 0.2, 0.1), sigma=1,\n nderived=0):\n assert len(weights) in [2, 3, 4], (\n 'Weights must have 2, 3 or 4 components. Weights=' + str(weights))\n assert np.isclose(sum(weights), 1), (\n 'Weights must sum to 1! Weights=' + str(weights))\n self.nderived = nderived\n self.weights = weights\n self.sigmas = [sigma] * len(weights)\n positions = []\n positions.append(np.asarray([0, sep]))\n positions.append(np.asarray([0, -sep]))\n positions.append(np.asarray([sep, 0]))\n positions.append(np.asarray([-sep, 0]))\n self.positions = positions[:len(weights)]\n\n def __call__(self, theta):\n thetas = []\n for pos in self.positions:\n thetas.append(copy.deepcopy(theta))\n thetas[-1][:2] -= pos\n logls = [(Gaussian(sigma=self.sigmas[i])(thetas[i])[0]\n + np.log(self.weights[i])) for i in range(len(self.weights))]\n logl = scipy.special.logsumexp(logls)\n return logl, [0.0] * self.nderived\n\n\ndef main(args):\n\n from nnest.nested import NestedSampler\n\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n\n g = GaussianMix()\n\n def loglike(z):\n return np.array([g(x)[0] for x in z])\n\n def transform(x):\n return 10. * x\n\n sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,\n hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow)\n sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=args.switch, noise=args.noise)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--x_dim', type=int, default=5,\n help=\"Dimensionality\")\n parser.add_argument('--train_iters', type=int, default=50,\n help=\"number of train iters\")\n parser.add_argument(\"--mcmc_steps\", type=int, default=0)\n parser.add_argument(\"--num_live_points\", type=int, default=1000)\n parser.add_argument('--switch', type=float, default=-1)\n parser.add_argument('--hidden_dim', type=int, default=128)\n parser.add_argument('--num_layers', type=int, default=2)\n parser.add_argument('--batch_size', type=int, default=100)\n parser.add_argument('-use_gpu', action='store_true')\n parser.add_argument('--flow', type=str, default='nvp')\n parser.add_argument('--num_blocks', type=int, default=5)\n parser.add_argument('--noise', type=float, default=-1)\n parser.add_argument(\"--test_samples\", type=int, default=0)\n parser.add_argument('--run_num', type=str, default='')\n parser.add_argument('--num_slow', type=int, default=0)\n parser.add_argument('--log_dir', type=str, default='logs/mog4')\n\n args = parser.parse_args()\n main(args)\n" ]
[ [ "numpy.sum", "numpy.asarray", "numpy.log" ] ]
Odiurd/deep-reinforcement-learning-collaboration-and-competition
[ "f9c95c498fa385777ea3b27c58349a0d40824736" ]
[ "train.py" ]
[ "from unityagents import UnityEnvironment\nimport numpy as np\nfrom collections import deque\nfrom ddpg_agent import Agent\nimport torch\nimport matplotlib.pyplot as plt\nfrom time import strftime, gmtime\n\n\nENV_PATH = \"Tennis_Windows_x86_64/Tennis.exe\"\nACTOR_CHECKPOINT_NAME = 'checkpoint_actor.pth'\nCRITIC_CHECKPOINT_NAME = 'checkpoint_critic.pth'\nIMAGE_NAME = 'scores.png'\nTARGET_SCORE = 0.5\nGRAPHICS_OFF = True\n\n\ndef plot(scores, IMAGE_NAME):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n plt.savefig('img/{}'.format(IMAGE_NAME)) \n \n \ndef ddpg(n_episodes=10000, store_every=250):\n scores_deque = deque(maxlen=store_every)\n scores = []\n \n agents = Agent(state_size=state_size, action_size=action_size, num_agents=num_agents, random_seed=0)\n \n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=GRAPHICS_OFF)[brain_name]\n state = env_info.vector_observations\n agents.reset()\n score = np.zeros(num_agents)\n while True:\n action = agents.act(state)\n \n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n \n agents.step(state, action, rewards, next_state, dones)\n state = next_state\n score += rewards\n \n if np.any(dones):\n break \n scores_deque.append(np.mean(score))\n scores.append(np.mean(score))\n avg_score = np.mean(scores_deque)\n \n print('\\rEpisode {}\\tAverage Score: {:.3f}\\tScore: {:.3f}\\t {}'.format(i_episode,\n np.mean(scores_deque), np.mean(score),\n strftime(\"%H:%M:%S\", gmtime())), end=\"\") \n if i_episode % store_every == 0 or avg_score >= TARGET_SCORE:\n print('\\rEpisode {}\\tAverage Score: {:.3f}'.format(i_episode, avg_score))\n \n if avg_score >= TARGET_SCORE:\n torch.save(agents.actor_local.state_dict(), \"ckpt/{}\".format(ACTOR_CHECKPOINT_NAME))\n torch.save(agents.critic_local.state_dict(), \"ckpt/{}\".format(CRITIC_CHECKPOINT_NAME)) \n break\n \n return scores \n\n\nenv = UnityEnvironment(file_name=ENV_PATH, no_graphics=GRAPHICS_OFF)\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\nenv_info = env.reset(train_mode=GRAPHICS_OFF)[brain_name]\nnum_agents = len(env_info.agents)\naction_size = brain.vector_action_space_size\nstates = env_info.vector_observations\nstate_size = states.shape[1]\n\nprint('Number of agents: {}'.format(num_agents))\nprint('Number of actions: {}'.format(action_size))\nprint('Number of states: {}'.format(state_size))\n\nprint('First state: {}'.format(states[0]))\n\n\nif torch.cuda.is_available():\n print(\"trainining on GPU\")\nelse:\n print(\"training on CPU\")\n \nprint('Training start time: {}'.format(strftime(\"%H:%M:%S\", gmtime())))\n\nscores_tot = ddpg()\nplot(scores_tot, IMAGE_NAME)\nenv.close()\n\nprint('\\nTraining end time: {}'.format(strftime(\"%H:%M:%S\", gmtime())))\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.xlabel", "numpy.mean", "matplotlib.pyplot.figure", "numpy.any", "torch.cuda.is_available", "matplotlib.pyplot.ylabel" ] ]
jacubillos10/ProyectoDeGradoFisica
[ "6af56217069d0170b164a7eb4a09ef1262a97aee" ]
[ "MinP_fnpeaks1.py" ]
[ "#!/usr/bin/env/python\n#-*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport sys\nimport os\n\ntipo_estrella=\"RR_Lyrae\";\nnumero_estrella='01981';\nporc_ini=0.99;\n\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nif tipo_estrella=='Cefeida' or tipo_estrella==0:\n\tlabel_path='Datos/'+'1_Cefeidas'+'/I/OGLE-LMC-CEP-';\n\tnombre_res='OGLE-LMC-CEP-';\n\tlosRangos=' 0.005 2.5 0.00002495';\n\tcol_per=4;\n\tcol_ID=0;\n\t#numero_estrella=vecCep;\nelif tipo_estrella=='RR_Lyrae' or tipo_estrella==1:\n\tlabel_path='Datos/'+'2_RR_Lyrae'+'/I/OGLE-LMC-RRLYR-';\n\tnombre_res='OGLE-LMC-RRLYR-';\n\tlosRangos=' 0.8333 5 0.00004167';\n\tcol_per=7;\n\tcol_ID=1;\n\t#numero_estrella=vecRRLyr;\nelse:\n\tlabel_path='Datos/'+'3_BinariasEclipsantes'+'/I/OGLE-LMC-ECL-';\n\tnombre_res='OGLE-LMC-ECL-';\n\tlosRangos=' 0.002 2.5 0.00002498';\n\tcol_per=10;\n\tcol_ID=2;\n\t#numero_estrella=vecECL;\n#fin if \nextension='.dat';\nextensionMax='.max';\nperiodos=np.genfromtxt('numero_estrellas.csv',delimiter=',',skip_header=1, usecols=col_per);\nIDs=np.loadtxt('numero_estrellas.csv',delimiter=',',dtype='str', skiprows=1, usecols=col_ID);\n\nelSeniorArchivoOR=label_path+numero_estrella+extension;\ndatos_originales=np.genfromtxt(elSeniorArchivoOR,delimiter=' ');\nN_tot=len(datos_originales[:,0]);\npaso_min=1/N_tot;\nperiodo_halladoP1=periodos[IDs==numero_estrella][0];\nmaxBusq=int(porc_ini*N_tot);\ncBusq=0;\nporc=porc_ini;\ntolP=1e-3;\nencontro=False; \nperiodo_new=0;\n\nwhile encontro==False and cBusq<=maxBusq:\n\tos.system('python3 remover_puntos.py '+str(porc)+' '+str(tipo_estrella)+' '+numero_estrella);\n\telSeniorArchivo=nombre_res+numero_estrella+extension;\n\tos.system('./fnpeaks '+elSeniorArchivo+losRangos);\n\telSeniorArchivoMax=nombre_res+numero_estrella+extensionMax;\n\tdat_tp=np.genfromtxt(elSeniorArchivoMax,delimiter=' ',skip_header=9, usecols=2);\n\tfor l in range(len(dat_tp)):\n\t\tdiferenciaPc=abs(dat_tp[l]-periodo_halladoP1)/periodo_halladoP1;\n\t\tif diferenciaPc<=tolP:\n\t\t\tencontro=True; \n\t\t\tperiodo_new=dat_tp[l];\n\t\t#fin if\n\t#fin for \n\tporc=porc-paso_min;\n\tcBusq=cBusq+1;\n#fin while\n\ndatos_fin=np.genfromtxt(elSeniorArchivo,delimiter=' ');\nN_datos_fin=len(datos_fin[:,0]);\n\nprint(\"El porcentaje donde funcionó es: \",porc);\nprint(\"Eso equivale a :\",N_datos_fin,\" puntos\");\nprint(\"El periodo con esos puntos es: \", periodo_new); \n" ]
[ [ "numpy.loadtxt", "numpy.genfromtxt" ] ]
binghong-ml/multiobj-rationale
[ "735916854fba1886730ecac306dd509e930d67bd" ]
[ "multiobj_rationale/fuseprop/inc_graph.py" ]
[ "import torch\nimport rdkit.Chem as Chem\nimport networkx as nx\n\nfrom multiobj_rationale.fuseprop.mol_graph import MolGraph\nfrom multiobj_rationale.fuseprop.chemutils import *\nfrom collections import defaultdict\n\nclass IncBase(object):\n\n def __init__(self, batch_size, node_fdim, edge_fdim, max_nodes, max_edges, max_nb):\n self.max_nb = max_nb\n self.graph = nx.DiGraph()\n self.graph.add_node(0) #make sure node is 1 index\n self.edge_dict = {None : 0} #make sure edge is 1 index\n\n self.fnode = torch.zeros(max_nodes * batch_size, node_fdim).cuda()\n self.fmess = self.fnode.new_zeros(max_edges * batch_size, edge_fdim)\n self.agraph = self.fnode.new_zeros(max_nodes * batch_size, max_nb).long()\n self.bgraph = self.fnode.new_zeros(max_edges * batch_size, max_nb).long()\n\n def add_node(self, feature):\n idx = len(self.graph)\n if idx >= len(self.fnode) - 1:\n self.fnode = torch.cat([self.fnode, self.fnode * 0], dim=0)\n self.agraph = torch.cat([self.agraph, self.agraph * 0], dim=0)\n\n self.graph.add_node(idx)\n self.fnode[idx, :len(feature)] = feature\n return idx\n\n def can_expand(self, idx):\n return self.graph.in_degree(idx) < self.max_nb\n\n def add_edge(self, i, j, feature=None):\n if (i,j) in self.edge_dict: \n return self.edge_dict[(i,j)]\n\n self.graph.add_edge(i, j)\n self.edge_dict[(i,j)] = idx = len(self.edge_dict)\n\n if idx >= len(self.fmess) - 1:\n self.fmess = torch.cat([self.fmess, self.fmess * 0], dim=0)\n self.bgraph = torch.cat([self.bgraph, self.bgraph * 0], dim=0)\n\n self.agraph[j, self.graph.in_degree(j) - 1] = idx\n if feature is not None:\n self.fmess[idx, :len(feature)] = feature\n\n in_edges = [self.edge_dict[(k,i)] for k in self.graph.predecessors(i) if k != j]\n self.bgraph[idx, :len(in_edges)] = self.fnode.new_tensor(in_edges)\n\n for k in self.graph.successors(j):\n if k == i: continue\n nei_idx = self.edge_dict[(j,k)]\n self.bgraph[nei_idx, self.graph.in_degree(j) - 2] = idx\n\n return idx\n\n\nclass IncGraph(IncBase):\n\n def __init__(self, avocab, batch_size, node_fdim, edge_fdim, max_nodes=20, max_edges=50, max_nb=6):\n super(IncGraph, self).__init__(batch_size, node_fdim, edge_fdim, max_nodes, max_edges, max_nb)\n self.avocab = avocab\n self.mol = Chem.RWMol()\n self.mol.AddAtom( Chem.Atom('C') ) #make sure node is 1 index, consistent to self.graph\n self.batch = defaultdict(list)\n self.interior_atoms = defaultdict(list)\n\n def get_mol(self):\n mol_list = [None] * len(self.batch)\n for batch_idx, batch_atoms in self.batch.items():\n mol = get_sub_mol(self.mol, batch_atoms)\n mol = sanitize(mol, kekulize=False)\n if mol is None: \n mol_list[batch_idx] = None\n else:\n for atom in mol.GetAtoms():\n atom.SetAtomMapNum(0)\n mol_list[batch_idx] = Chem.MolToSmiles(mol)\n return mol_list\n\n def get_tensors(self):\n return self.fnode, self.fmess, self.agraph, self.bgraph\n\n def add_mol(self, bid, smiles):\n mol = get_mol(smiles) #Important: must kekulize!\n root_atoms = []\n amap = {}\n for atom in mol.GetAtoms():\n symbol, charge = atom.GetSymbol(), atom.GetFormalCharge()\n nth_atom = atom.GetAtomMapNum()\n idx = self.add_atom(bid, (symbol, charge), nth_atom)\n amap[atom.GetIdx()] = idx\n if nth_atom > 0:\n root_atoms.append( (idx, nth_atom) )\n else:\n self.interior_atoms[bid].append(idx)\n\n for bond in mol.GetBonds():\n a1 = amap[bond.GetBeginAtom().GetIdx()]\n a2 = amap[bond.GetEndAtom().GetIdx()]\n bt = bond.GetBondType()\n self.add_bond(a1, a2, MolGraph.BOND_LIST.index(bt))\n\n root_atoms = sorted(root_atoms, key=lambda x:x[1])\n root_atoms = next(zip(*root_atoms))\n return root_atoms\n\n def add_atom(self, bid, atom_type, nth_atom=None):\n if nth_atom is None:\n nth_atom = len(self.batch[bid]) - len(self.interior_atoms[bid]) + 1\n new_atom = Chem.Atom(atom_type[0])\n new_atom.SetFormalCharge(atom_type[1])\n atom_feature = self.get_atom_feature(new_atom, nth_atom)\n aid = self.mol.AddAtom( new_atom )\n assert aid == self.add_node( atom_feature )\n self.batch[bid].append(aid)\n return aid\n \n def add_bond(self, a1, a2, bond_pred):\n assert 1 <= bond_pred <= 3\n if a1 == a2: return\n if self.can_expand(a1) == False or self.can_expand(a2) == False:\n return\n if self.mol.GetBondBetweenAtoms(a1, a2) is not None:\n return\n\n atom1, atom2 = self.mol.GetAtomWithIdx(a1), self.mol.GetAtomWithIdx(a2)\n if valence_check(atom1, bond_pred) and valence_check(atom2, bond_pred):\n bond_type = MolGraph.BOND_LIST[bond_pred]\n self.mol.AddBond(a1, a2, bond_type)\n self.add_edge( a1, a2, self.get_mess_feature(self.fnode[a1], bond_pred) )\n self.add_edge( a2, a1, self.get_mess_feature(self.fnode[a2], bond_pred) )\n \n # TOO SLOW!\n #if sanitize(self.mol.GetMol(), kekulize=False) is None:\n # self.mol.RemoveBond(a1, a2)\n # return\n\n\n def get_atom_feature(self, atom, nth_atom):\n nth_atom = min(MolGraph.MAX_POS - 1, nth_atom)\n f_atom = torch.zeros(self.avocab.size())\n f_pos = torch.zeros( MolGraph.MAX_POS )\n symbol, charge = atom.GetSymbol(), atom.GetFormalCharge()\n f_atom[ self.avocab[(symbol,charge)] ] = 1\n f_pos[ nth_atom ] = 1\n return torch.cat( [f_atom, f_pos], dim=-1 ).cuda()\n\n def get_mess_feature(self, atom_fea, bond_type):\n bond_fea = torch.zeros(len(MolGraph.BOND_LIST)).cuda()\n bond_fea[ bond_type ] = 1\n return torch.cat( [atom_fea, bond_fea], dim=-1 )\n\n" ]
[ [ "torch.zeros", "torch.cat" ] ]
Bsuniverse/NWPU-CFD
[ "b893575451e5309cd1a81cf43fda88a5935b1ebc" ]
[ "Burgers/burgers.py" ]
[ "#!/usr/bin/python\r\n#-*- coding: <encoding name> -*-\r\nimport numpy as np\r\nimport pandas as pd \r\nimport os\r\nimport matplotlib.pyplot as plt \r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nclass Burgers:\r\n\t'''\r\n\tA Burgers 1-demension partial differential equation numerical solver with:\r\n\t\t- 3 inputs: viscosity ,time and iteration times N\r\n\t\t- viscosity = 0, 0.01, 0.05\r\n\t\t- time = 0.5, 1.2, 2.0\r\n\tInitial and boundary conditions are:\r\n\t\t- u(x,0) = -0.5*x, x belongs to [-1, 1]\r\n\t - u(-1, t) = 0.5, u(1, t) = -0.5\r\n\t'''\r\n\r\n\tdef __init__(self, visco, time, N):\r\n\t\t# Give the viscosity, time to stop and gird spacing.\r\n\t self.visco = visco\r\n\t self.time = time\r\n\t self.N = N\r\n\r\n\tdef discretization(self):\r\n\t\t# Discretization of the calculation zone.\r\n\t\tdx = 2.0 / self.N\r\n\t\tdt = 0.0001\r\n\t\treturn (dx, dt)\r\n\r\n\tdef macCormack(self, u):\r\n\t\t'''\r\n\t\tu is a numpy array with initial condition.\r\n\t\t\t- Predictive step: us;\r\n\t\t\t- Patial of test step: up\r\n\t\t\t- Intermediate step: unew\r\n\t\t\t- Storage of all the time step: U\r\n\t\t'''\r\n\t\tdx, dt = self.discretization()\r\n\t\tnt = int(self.time / dt)\r\n\r\n\t\tus = np.zeros(self.N + 1)\r\n\t\tup = np.zeros(self.N + 1)\r\n\t\tunew = np.zeros(self.N + 1)\r\n\t\tU = np.zeros((nt + 1, self.N + 1))\r\n\t\tU[0] = u\r\n\r\n\t\tfor i in range(1, nt + 1):\r\n\t\t\t# Predictive step\r\n\t\t\tus[1: -1] = u[1: -1] + (self.visco * dt / (dx) ** 2) * (u[2:] - 2.0 * u[1:-1] + u[0:-2]) \\\r\n\t\t\t- dt / (2.0 * dx) * (u[2:] ** 2 - u[1:-1] ** 2)\r\n\t\t\tus[0] = 0.5\r\n\t\t\tus[-1] = -0.5 \r\n\r\n\t\t\t# Correction step and combine step\r\n\t\t\tup[1: -1] = (self.visco / (dx) ** 2) * (us[2:] - 2 * us[1: -1] + us[0: -2]) \\\r\n\t\t\t- 1.0 / (2.0 * dx) * (us[1:-1] ** 2 - us[0: -2] ** 2)\r\n\t\t\tunew[1: -1] = 0.5 * (u[1: -1] + us[1: -1] + dt * up[1: -1])\r\n\t\t\tu[1: -1] = unew[1: -1]\r\n\r\n\t\t\tU[i] = u\r\n\r\n\t\treturn U\r\n\r\nif __name__ == '__main__':\r\n\tN = 200\r\n\tseper = os.sep \t\t# Use this seperator to satisfy both Windows and Linux usage\r\n\r\n\t# Read input file of time and viscosity\r\n\ttime_visco = pd.read_csv('input.txt', sep = ',', usecols=['time', 'mu'])\r\n\ttime = max(time_visco['time'])\r\n\tvisco = time_visco['mu']\r\n\r\n\t# Compute Burgers equation at different viscosity\r\n\tfor i in range(0, len(visco)):\r\n\t\tu = np.zeros(N + 1)\r\n\t\tx = np.zeros(N + 1)\r\n\r\n\t\tfor j in range(0, N + 1):\r\n\t\t\tu[j] = -0.5 * (-1 + j * 2 / N)\r\n\t\t\tx[j] = -1 + j * 2 / N\r\n\r\n\t\tburgers = Burgers(visco[i], time, N)\r\n\t\tuall = burgers.macCormack(u)\r\n\r\n\t\t# Save 2-D data to files\r\n\t\toutput = pd.DataFrame({'x': x, 't='+str(time_visco['time'][0]): uall[int(time_visco['time'][0] * 10000)], \\\r\n\t\t\t't='+str(time_visco['time'][1]): uall[int(time_visco['time'][1] * 10000)], \\\r\n\t\t\t't='+str(time_visco['time'][2]): uall[int(time_visco['time'][2] * 10000)]})\r\n\t\toutput.to_csv('2D' + seper + 'mu=' + str(visco[i]) + '.dat', sep='\\t', index=False)\r\n\r\n\t\t'''\r\n\t\t\tPlot 3-D Burgers results, this cost about 16 s, so I comment code below.\r\n\t\t\tIf you want to generate 3-D images by yourself, please uncomment the code below, \r\n\t\t\tand change some value of the code, to see if there are some differences between\r\n\t\t\tmy output 3-D images\r\n\t\t'''\r\n\r\n\t\t'''\r\n\t\tfig = plt.figure()\r\n\t\tax = fig.add_subplot(111, projection='3d')\r\n\t\ty = np.array([[i * 0.0001] for i in range(0, len(uall))])\r\n\t\tax.plot_surface(x, y, uall, cmap='viridis')\r\n\t\tax.set_xlabel('x')\r\n\t\tax.set_ylabel('t')\r\n\t\tax.set_zlabel('u')\r\n\t\tax.set_title(r'$\\mu = $' + str(visco[i]))\r\n\t\tplt.savefig('3D' + seper + 'mu=' + str(visco[i]) + '.png', bbox_inches='tight')\r\n\t\t'''" ]
[ [ "pandas.read_csv", "numpy.zeros" ] ]
bernardomig/neural
[ "7e45208de4f0be7b183977df176c8d23428c7cde", "7e45208de4f0be7b183977df176c8d23428c7cde" ]
[ "scripts/models/segmentation/icnet/train_cityscapes.py", "neural/models/segmentation/lednet.py" ]
[ "\nfrom itertools import chain\nimport cv2\nimport argparse\nimport os\nfrom logging import info\n\nimport torch\nfrom torch import nn\nfrom torch import distributed as dist\nfrom torch.utils.data import DataLoader, DistributedSampler\n\nfrom apex import amp\nfrom apex.parallel import (\n DistributedDataParallel, convert_syncbn_model)\n\nfrom ignite.engine import Events\nfrom ignite.handlers import ModelCheckpoint, global_step_from_engine\nfrom ignite.contrib.handlers import (\n create_lr_scheduler_with_warmup, CosineAnnealingScheduler, LRScheduler, )\nfrom ignite.contrib.handlers import ProgressBar\n\nimport albumentations as albu\nfrom albumentations.pytorch import ToTensorV2 as ToTensor\n\nfrom neural.models.segmentation.icnet import icnet_resnet50\nfrom neural.engines.segmentation import (\n create_segmentation_trainer, create_segmentation_evaluator)\nfrom neural.data.cityscapes import Cityscapes, MEAN, STD\nfrom neural.nn.util import DeepSupervision\n\nfrom neural.losses import OHEMLoss\n\nfrom neural.utils.training import (\n setup_distributed, get_datasets_root, create_sampler)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, required=True)\nparser.add_argument('--learning_rate', type=float, required=True)\nparser.add_argument('--weight_decay', type=float, default=1e-4)\nparser.add_argument('--epochs', type=int, required=True)\nparser.add_argument('--crop_size', type=int, default=768)\nparser.add_argument('--state_dict', type=str, required=False)\n\nparser.add_argument('--distributed', action='store_true')\nparser.add_argument('--local_rank', type=int, default=0)\nargs = parser.parse_args()\n\ndistributed = args.distributed\nworld_size, world_rank, local_rank = setup_distributed(\n distributed, args.local_rank)\n\ndevice = torch.device('cuda')\n\ncrop_size = args.crop_size\n\ntrain_tfms = albu.Compose([\n albu.RandomScale([-0.25, 1.0], interpolation=cv2.INTER_CUBIC, always_apply=True),\n albu.RandomCrop(712, 712),\n albu.HorizontalFlip(),\n albu.HueSaturationValue(),\n albu.Normalize(\n mean=MEAN,\n std=STD,\n ),\n ToTensor(),\n])\nval_tfms = albu.Compose([\n albu.Normalize(\n mean=MEAN,\n std=STD,\n ),\n ToTensor(),\n])\n\ndataset_dir = get_datasets_root('cityscapes')\ntrain_dataset = Cityscapes(dataset_dir, split='train', transforms=train_tfms)\nval_dataset = Cityscapes(dataset_dir, split='val', transforms=val_tfms)\n\n\nsampler_args = dict(world_size=world_size,\n local_rank=local_rank,\n enable=distributed)\n\ntrain_loader = DataLoader(\n train_dataset,\n batch_size=args.batch_size,\n drop_last=True,\n num_workers=8,\n sampler=create_sampler(train_dataset, **sampler_args),\n shuffle=not distributed,\n)\nval_loader = DataLoader(\n val_dataset,\n batch_size=4,\n shuffle=False,\n drop_last=False,\n num_workers=8,\n sampler=create_sampler(val_dataset, training=False, **sampler_args),\n)\n\nmodel = icnet_resnet50(3, 19)\nmodel = DeepSupervision(model, {\n model.head.cff24.lowres: nn.Conv2d(128, 19, 1),\n model.head.cff12.lowres: nn.Conv2d(128, 19, 1),\n})\n\n\nif args.state_dict is not None:\n state_dict = torch.load(args.state_dict, map_location='cpu')\n model.load_state_dict(state_dict, strict=True)\n\n\nmodel = model.to(device)\n\n\ndef non_wd_params(params):\n for p in params:\n if len(p.shape) == 1:\n yield p\n\n\ndef wd_params(params):\n for p in params:\n if len(p.shape) != 1:\n yield p\n\n\npretrained_parameters = model.module.encoder.context.parameters()\nnon_pretrained_parameters = chain(\n model.auxiliary.parameters(),\n model.module.encoder.spatial.parameters(),\n model.module.head.parameters(),\n model.module.classifier.parameters()\n)\n\noptimizer = torch.optim.SGD(\n [\n # encoder parameters\n {'params': wd_params(pretrained_parameters),\n 'weight_decay': args.weight_decay},\n {'params': non_wd_params(pretrained_parameters)},\n {'params': wd_params(non_pretrained_parameters),\n 'weight_decay': args.weight_decay},\n {'params': non_wd_params(non_pretrained_parameters), }\n ],\n lr=args.learning_rate,\n momentum=0.9,\n)\n\n# ohem_fn = OHEMLoss(ignore_index=255).cuda()\n# loss_fn = ohem_fn\n\n\nclass_freq = torch.from_numpy(Cityscapes.CLASS_FREQ).float()\nweight = 1 / torch.log(1.02 + class_freq)\nloss_fn = torch.nn.CrossEntropyLoss(ignore_index=255, weight=weight)\n# loss_fn = OHEMLoss(ignore_index=255)\nloss_fn = loss_fn.cuda()\n\n\ndef aux_loss(y_pred, y):\n from torch.nn.functional import interpolate\n y_pred = interpolate(y_pred, size=y.shape[1:], mode='bilinear', align_corners=True)\n return loss_fn(y_pred, y)\n\n\ndef supervised_loss_fn(y_pred, y):\n y_pred, aux_y_pred = y_pred\n return \\\n loss_fn(y_pred, y) \\\n + 0.4 * sum((aux_loss(y_pred, y) for y_pred in aux_y_pred))\n\n\nscheduler1 = CosineAnnealingScheduler(\n optimizer, param_name='lr',\n start_value=args.learning_rate / 10, end_value=args.learning_rate / 10 * 1e-4,\n cycle_size=args.epochs * len(train_loader) - 1000,\n param_group_index=0,\n)\nscheduler1 = create_lr_scheduler_with_warmup(scheduler1, 0, args.learning_rate / 10, 1000)\nscheduler2 = CosineAnnealingScheduler(\n optimizer, param_name='lr',\n start_value=args.learning_rate / 10, end_value=args.learning_rate / 10 * 1e-4,\n cycle_size=args.epochs * len(train_loader) - 1000,\n param_group_index=1,\n)\nscheduler2 = create_lr_scheduler_with_warmup(scheduler2, 0, args.learning_rate / 10, 1000)\nscheduler3 = CosineAnnealingScheduler(\n optimizer, param_name='lr',\n start_value=args.learning_rate, end_value=args.learning_rate * 1e-4,\n cycle_size=args.epochs * len(train_loader)-1000,\n param_group_index=2,\n)\nscheduler3 = create_lr_scheduler_with_warmup(scheduler3, 0, args.learning_rate, 1000)\nscheduler4 = CosineAnnealingScheduler(\n optimizer, param_name='lr',\n start_value=args.learning_rate, end_value=args.learning_rate * 1e-4,\n cycle_size=args.epochs * len(train_loader)-1000,\n param_group_index=3,\n)\nscheduler4 = create_lr_scheduler_with_warmup(scheduler4, 0, args.learning_rate, 1000)\n\nmodel, optimizer = amp.initialize(model, optimizer, opt_level=\"O2\")\nif args.distributed:\n model = convert_syncbn_model(model)\n model = DistributedDataParallel(model)\n\n\ntrainer = create_segmentation_trainer(\n model, optimizer, supervised_loss_fn,\n device=device,\n use_f16=True,\n)\n\ntrainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler1)\ntrainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler2)\ntrainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler3)\ntrainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler4)\n\n\nevaluator = create_segmentation_evaluator(\n model,\n device=device,\n num_classes=19,\n)\n\nif local_rank == 0:\n ProgressBar(persist=True).attach(trainer, ['loss'])\n ProgressBar(persist=True).attach(evaluator)\n\n\[email protected](Events.EPOCH_COMPLETED(every=5))\ndef evaluate(engine):\n evaluator.run(val_loader)\n\n\nif local_rank == 0:\n @evaluator.on(Events.COMPLETED)\n def log_results(engine):\n epoch = trainer.state.epoch\n metrics = engine.state.metrics\n miou, accuracy = metrics['miou'], metrics['accuracy']\n\n print(f'Epoch [{epoch}]: miou={miou}, accuracy={accuracy}')\n\n\nif local_rank == 0:\n checkpointer = ModelCheckpoint(\n dirname=os.path.join('checkpoints', 'icnet-weights'),\n filename_prefix='icnet_resnet50',\n score_name='miou',\n score_function=lambda engine: engine.state.metrics['miou'],\n n_saved=5,\n global_step_transform=global_step_from_engine(trainer),\n )\n evaluator.add_event_handler(\n Events.COMPLETED, checkpointer,\n to_save={\n 'wrapped': model if not args.distributed else model.module,\n },\n )\n\ntrainer.run(train_loader, max_epochs=args.epochs)\n", "from collections import OrderedDict\nfrom functools import partial\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n__all__ = [\n 'LedNet',\n 'lednet',\n]\n\n\ndef lednet(in_channels, out_channels):\n return LedNet(in_channels, out_channels)\n\n\nclass LedNet(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super(LedNet, self).__init__()\n\n self.encoder = nn.Sequential(OrderedDict([\n ('layer1', nn.Sequential(\n DownsamplingBlock(in_channels, 32),\n SSnbtBlock(32, 32, dropout_p=0.03),\n SSnbtBlock(32, 32, dropout_p=0.03),\n SSnbtBlock(32, 32, dropout_p=0.03),\n )),\n ('layer2', nn.Sequential(\n DownsamplingBlock(32, 64),\n SSnbtBlock(64, 64, dropout_p=0.03),\n SSnbtBlock(64, 64, dropout_p=0.03),\n )),\n ('layer3', nn.Sequential(\n DownsamplingBlock(64, 128),\n SSnbtBlock(128, 128),\n SSnbtBlock(128, 128, dilation=2, dropout_p=0.3),\n SSnbtBlock(128, 128, dilation=5, dropout_p=0.3),\n SSnbtBlock(128, 128, dilation=9, dropout_p=0.3),\n SSnbtBlock(128, 128, dilation=2, dropout_p=0.3),\n SSnbtBlock(128, 128, dilation=5, dropout_p=0.3),\n SSnbtBlock(128, 128, dilation=9, dropout_p=0.3),\n SSnbtBlock(128, 128, dilation=17, dropout_p=0.3),\n )),\n ]))\n\n self.decoder = APNModule(128, out_channels)\n\n def forward(self, input):\n x = self.encoder(input)\n x = self.decoder(x)\n x = F.interpolate(\n x, scale_factor=8,\n mode='bilinear', align_corners=True)\n return x\n\n\nclass APNModule(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super(APNModule, self).__init__()\n\n self.conv1 = ConvBlock(in_channels, in_channels, 3, 1, stride=2)\n self.conv2 = ConvBlock(in_channels, in_channels, 5, 2, stride=2)\n self.conv3 = ConvBlock(in_channels, in_channels, 7, 3, stride=2)\n\n self.level1 = ConvBlock(in_channels, out_channels, 1)\n self.level2 = ConvBlock(in_channels, out_channels, 1)\n self.level3 = ConvBlock(in_channels, out_channels, 1)\n self.level4 = ConvBlock(in_channels, out_channels, 1)\n self.level5 = ConvBlock(in_channels, out_channels, 1)\n\n def forward(self, input):\n scale = partial(F.interpolate, scale_factor=2,\n mode='bilinear', align_corners=True)\n\n b3 = self.conv1(input)\n b2 = self.conv2(b3)\n b1 = self.conv3(b2)\n\n b1 = self.level1(b1)\n b2 = self.level2(b2)\n b3 = self.level3(b3)\n\n x = scale(b3 + scale(b2 + scale(b1)))\n\n x = x * self.level4(input)\n\n p = F.adaptive_avg_pool2d(input, 1)\n x = x + self.level5(p)\n\n return x\n\n\nclass SSnbtBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, dilation=1, dropout_p=0.0):\n super(SSnbtBlock, self).__init__()\n\n if in_channels != out_channels:\n raise ValueError(\"input and output channels must match\")\n\n channels = in_channels // 2\n self.left = nn.Sequential(\n FactorizedConvBlock(channels, channels),\n FactorizedConvBlock(channels, channels, dilation, use_relu=False),\n )\n self.right = nn.Sequential(\n FactorizedConvBlock(channels, channels),\n FactorizedConvBlock(channels, channels, dilation, use_relu=False),\n )\n\n self.activation = nn.ReLU(inplace=True)\n self.dropout = nn.Dropout2d(p=dropout_p)\n\n def forward(self, input):\n left, right = torch.chunk(input, 2, 1)\n left = self.left(left)\n right = self.right(right)\n x = torch.cat([left, right], dim=1)\n x = self.dropout(x)\n x = self.activation(input + x)\n return channel_shuffle(x, 2)\n\n\nclass DownsamplingBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super(DownsamplingBlock, self).__init__()\n\n self.conv = nn.Conv2d(in_channels, out_channels - in_channels,\n kernel_size=3, padding=1, stride=2)\n self.pool = nn.MaxPool2d(kernel_size=2, ceil_mode=True)\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, input):\n x = torch.cat([\n self.conv(input),\n self.pool(input),\n ], dim=1)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\ndef ConvBlock(in_channels, out_channels, kernel_size,\n padding=0, stride=1):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size,\n padding=padding, stride=stride, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True)\n )\n\n\ndef FactorizedConvBlock(in_channels, out_channels, dilation=1, use_relu=True):\n if in_channels != out_channels:\n raise ValueError(\"input and output channels must match\")\n\n layers = [\n nn.Conv2d(\n in_channels, in_channels,\n kernel_size=(3, 1), padding=(dilation, 0),\n dilation=(dilation, 1),\n bias=True,\n ),\n nn.ReLU(inplace=True),\n nn.Conv2d(\n in_channels, in_channels,\n kernel_size=(1, 3), padding=(0, dilation),\n dilation=(1, dilation),\n bias=False,\n ),\n nn.BatchNorm2d(in_channels),\n ]\n if use_relu:\n layers += [nn.ReLU(inplace=True)]\n return nn.Sequential(*layers)\n\n\ndef channel_shuffle(x, groups):\n batch_size, channels, height, width = x.shape\n\n return x \\\n .reshape(batch_size, groups, channels // groups, height, width) \\\n .transpose(1, 2) \\\n .reshape(batch_size, channels, height, width)\n" ]
[ [ "torch.device", "torch.nn.functional.interpolate", "torch.from_numpy", "torch.nn.Conv2d", "torch.load", "torch.log", "torch.nn.CrossEntropyLoss" ], [ "torch.cat", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Dropout2d", "torch.chunk" ] ]
zpao/captum
[ "06009a0c68e56f549964a132ba9c4c4f418af7bd" ]
[ "tests/attr/test_layer_conductance.py" ]
[ "#!/usr/bin/env python3\n\nimport unittest\n\nimport torch\nfrom captum.attr._core.layer_conductance import LayerConductance\n\nfrom .helpers.basic_models import (\n BasicModel_ConvNet,\n BasicModel_MultiLayer,\n BasicModel_MultiLayer_MultiInput,\n)\nfrom .helpers.conductance_reference import ConductanceReference\nfrom .helpers.utils import assertArraysAlmostEqual, BaseTest\n\n\nclass Test(BaseTest):\n def test_simple_input_conductance(self):\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 100.0, 0.0]])\n self._conductance_test_assert(net, net.linear0, inp, [[0.0, 390.0, 0.0]])\n\n def test_simple_linear_conductance(self):\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)\n self._conductance_test_assert(\n net, net.linear1, inp, [[90.0, 100.0, 100.0, 100.0]]\n )\n\n def test_simple_relu_conductance(self):\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 100.0, 0.0]])\n self._conductance_test_assert(net, net.relu, inp, [[90.0, 100.0, 100.0, 100.0]])\n\n def test_simple_output_conductance(self):\n net = BasicModel_MultiLayer()\n inp = torch.tensor([[0.0, 100.0, 0.0]], requires_grad=True)\n self._conductance_test_assert(net, net.linear2, inp, [[390.0, 0.0]])\n\n def test_simple_multi_input_linear2_conductance(self):\n net = BasicModel_MultiLayer_MultiInput()\n inp1 = torch.tensor([[0.0, 10.0, 0.0]])\n inp2 = torch.tensor([[0.0, 10.0, 0.0]])\n inp3 = torch.tensor([[0.0, 5.0, 0.0]])\n self._conductance_test_assert(\n net, net.model.linear2, (inp1, inp2, inp3), [[390.0, 0.0]], (4,)\n )\n\n def test_simple_multi_input_relu_conductance(self):\n net = BasicModel_MultiLayer_MultiInput()\n inp1 = torch.tensor([[0.0, 10.0, 1.0]])\n inp2 = torch.tensor([[0.0, 4.0, 5.0]])\n inp3 = torch.tensor([[0.0, 0.0, 0.0]])\n self._conductance_test_assert(\n net, net.model.relu, (inp1, inp2), [[90.0, 100.0, 100.0, 100.0]], (inp3, 5)\n )\n\n def test_simple_multi_input_relu_conductance_batch(self):\n net = BasicModel_MultiLayer_MultiInput()\n inp1 = torch.tensor([[0.0, 10.0, 1.0], [0.0, 0.0, 10.0]])\n inp2 = torch.tensor([[0.0, 4.0, 5.0], [0.0, 0.0, 10.0]])\n inp3 = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 5.0]])\n self._conductance_test_assert(\n net,\n net.model.relu,\n (inp1, inp2),\n [[90.0, 100.0, 100.0, 100.0], [100.0, 100.0, 100.0, 100.0]],\n (inp3, 5),\n )\n\n def test_matching_conv1_conductance(self):\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(1, 1, 10, 10, requires_grad=True)\n self._conductance_reference_test_assert(net, net.conv1, inp)\n\n def test_matching_pool1_conductance(self):\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(1, 1, 10, 10)\n self._conductance_reference_test_assert(net, net.pool1, inp)\n\n def test_matching_conv2_conductance(self):\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(1, 1, 10, 10, requires_grad=True)\n self._conductance_reference_test_assert(net, net.conv2, inp)\n\n def test_matching_pool2_conductance(self):\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(1, 1, 10, 10)\n self._conductance_reference_test_assert(net, net.pool2, inp)\n\n def test_matching_conv_multi_input_conductance(self):\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(4, 1, 10, 10, requires_grad=True)\n self._conductance_reference_test_assert(net, net.relu3, inp)\n\n def test_matching_conv_with_baseline_conductance(self):\n net = BasicModel_ConvNet()\n inp = 100 * torch.randn(3, 1, 10, 10)\n baseline = 100 * torch.randn(3, 1, 10, 10, requires_grad=True)\n self._conductance_reference_test_assert(net, net.fc1, inp, baseline)\n\n def _conductance_test_assert(\n self,\n model,\n target_layer,\n test_input,\n expected_conductance,\n additional_args=None,\n ):\n cond = LayerConductance(model, target_layer)\n for internal_batch_size in (None, 1, 20):\n attributions, delta = cond.attribute(\n test_input,\n target=0,\n n_steps=500,\n method=\"gausslegendre\",\n additional_forward_args=additional_args,\n internal_batch_size=internal_batch_size,\n return_convergence_delta=True,\n )\n delta_condition = all(abs(delta.numpy().flatten()) < 0.01)\n self.assertTrue(\n delta_condition,\n \"Sum of attributions does {}\"\n \" not match the difference of endpoints.\".format(delta),\n )\n\n for i in range(len(expected_conductance)):\n assertArraysAlmostEqual(\n attributions[i : i + 1].squeeze(0).tolist(),\n expected_conductance[i],\n delta=0.1,\n )\n\n def _conductance_reference_test_assert(\n self, model, target_layer, test_input, test_baseline=None\n ):\n layer_output = None\n\n def forward_hook(module, inp, out):\n nonlocal layer_output\n layer_output = out\n\n hook = target_layer.register_forward_hook(forward_hook)\n final_output = model(test_input)\n hook.remove()\n target_index = torch.argmax(torch.sum(final_output, 0))\n cond = LayerConductance(model, target_layer)\n cond_ref = ConductanceReference(model, target_layer)\n attributions, delta = cond.attribute(\n test_input,\n baselines=test_baseline,\n target=target_index,\n n_steps=300,\n method=\"gausslegendre\",\n return_convergence_delta=True,\n )\n delta_condition = all(abs(delta.numpy().flatten()) < 0.005)\n self.assertTrue(\n delta_condition,\n \"Sum of attribution values does {} \"\n \" not match the difference of endpoints.\".format(delta),\n )\n\n attributions_reference = cond_ref.attribute(\n test_input,\n baselines=test_baseline,\n target=target_index,\n n_steps=300,\n method=\"gausslegendre\",\n )\n\n # Check that layer output size matches conductance size.\n self.assertEqual(layer_output.shape, attributions.shape)\n # Check that reference implementation output matches standard implementation.\n assertArraysAlmostEqual(\n attributions.reshape(-1).tolist(),\n attributions_reference.reshape(-1).tolist(),\n delta=0.07,\n )\n\n # Test if batching is working correctly for inputs with multiple examples\n if test_input.shape[0] > 1:\n for i in range(test_input.shape[0]):\n single_attributions = cond.attribute(\n test_input[i : i + 1],\n baselines=test_baseline[i : i + 1]\n if test_baseline is not None\n else None,\n target=target_index,\n n_steps=300,\n method=\"gausslegendre\",\n )\n # Verify that attributions when passing example independently\n # matches corresponding attribution of batched input.\n assertArraysAlmostEqual(\n attributions[i : i + 1].reshape(-1).tolist(),\n single_attributions.reshape(-1).tolist(),\n delta=0.01,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.tensor", "torch.randn", "torch.sum" ] ]
ArchibaldChain/python-workspace
[ "71890f296c376155e374b2096ac3d8f1d286b7d2" ]
[ "Workspace for Python/WebCrawler/Taobao_price_comparesion.py" ]
[ "import re\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\n\ndef getHTMLText(url):\n try:\n header = {\"user-agent\": \n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36\"}\n r = requests.get(url, timeout = 20, headers = header)\n print(r.status_code)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n with open('web.html', 'w') as f:\n f.write(r.text)\n f.close\n return r.text\n\n except:\n print(\"Failed on fetch: \" + url)\n return \"\"\n\n\ndef parsePage(ilt, html):\n try:\n plt = re.findall(r'\\\"view_price\\\"\\:\\\"[\\d\\.]*\\\"', html)\n tlt = re.findall(r'\\\"raw_title\\\"\\:\\\".*?\\\"', html)\n for i in range(len(plt)): \n price = eval(plt[i].split(':')[1])\n title = eval(tlt[i].split(':')[1])\n ilt.append([price, title])\n\n except:\n print(\"failed to parse\") \n \ndef parsePage_soup(html):\n soup = BeautifulSoup(html, \"html.parser\")\n items = soup.find('div', class_ = \"m-itemlist\")\n item_list = items.find_all('div', class_ = \"item\")\n\n price_list = []\n name_list = []\n url_list = []\n for item in item_list:\n\n p = item.find('div', class_ = \"price g_price g_price-highlight\")\n price = p.strong.string.replace('\\n', '')\n price = price.replace(' ', '')\n\n price_list.append(p.span.string + price)\n text_temp = item.find('div', class_ = 'ctx-box J_MouseEneterLeave J_IconMoreNew')\n\n u = text_temp.find('a', class_ = \"J_ClickStat\")\n url_list.append(u['href'])\n name = \"\"\n for str in u.strings:\n name = name + str\n name = name.replace('\\n', '')\n name = name.replace(' ', '')\n name_list.append(name)\n\n ilt = {'price': price_list, \n 'name': name_list, \n 'url' : url_list}\n\n return ilt\n\ndef printGoodsList(ilt):\n tplt = \"{:4}\\t{:8}\\t{:16}\"\n print(tplt.format(\"序号\", \"价格¥\", \"商品名称\"))\n count = 0\n for g in ilt:\n count = count + 1\n print(tplt.format(count, g[0], g[1]))\n \ndef saveGoodsList(ilt):\n df = pd.DataFrame(ilt)\n df.to_csv(\"dress_Price_comparsion.csv\")\n\n\ndef main():\n goods = '裙子'\n depth = 2 # pages to be scratch\n start_url = 'https://s.taobao.com/search?q='+goods\n infoList = []\n # for i in range(depth):\n # try:\n # url = start_url + \"&s=\" + str(44*i)\n # html = getHTMLText(url)\n # parsePage_soup(infoList, html)\n # except:\n # print('Failed, url: ' + start_url + \"&s=\" + str(44*i))\n # continue\n\n with open(\"裙子_淘宝搜索.html\", 'r') as f:\n html = f.read()\n f.close()\n info = parsePage_soup(html)\n saveGoodsList(info)\n\n parsePage(infoList, html)\n printGoodsList(infoList)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
jiangqn/KSTER
[ "fe59dca2d046439bbde4be9f708e4f620f089977" ]
[ "joeynmt/kernel.py" ]
[ "import torch\nfrom typing import Tuple, Union\n\nclass Kernel(object):\n\n def __init__(self) -> None:\n super(Kernel, self).__init__()\n \n def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:\n raise NotImplementedError\n\n def compute_example_based_distribution(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor], token_indices: torch.Tensor,\n vocab_size: int) -> Tuple[torch.Tensor, torch.Tensor]:\n scores = self.similarity(distances, bandwidth)\n sparse_distribution = torch.softmax(scores, dim=-1)\n zeros = torch.zeros(size=(sparse_distribution.size(0), vocab_size), device=sparse_distribution.device, dtype=sparse_distribution.dtype)\n distribution = torch.scatter_add(zeros, -1, token_indices, sparse_distribution)\n return distribution, sparse_distribution\n\nclass GaussianKernel(Kernel):\n\n def __init__(self) -> None:\n super(GaussianKernel, self).__init__()\n \n def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:\n return - distances / bandwidth\n\nclass LaplacianKernel(Kernel):\n\n def __init__(self) -> None:\n super(LaplacianKernel, self).__init__()\n\n def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:\n return - torch.sqrt(distances) / bandwidth" ]
[ [ "torch.sqrt", "torch.softmax", "torch.scatter_add" ] ]
xiankgx/taming-transformers
[ "8d154c34d88be5d41f32cbc40dd115f55d7589d9" ]
[ "taming/data/base.py" ]
[ "import bisect\nimport numpy as np\nimport albumentations\nfrom PIL import Image, ImageFile\nfrom torch.utils.data import Dataset, ConcatDataset\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass ConcatDatasetWithIndex(ConcatDataset):\n \"\"\"Modified from original pytorch code to return dataset idx\"\"\"\n def __getitem__(self, idx):\n if idx < 0:\n if -idx > len(self):\n raise ValueError(\"absolute value of index should not exceed dataset length\")\n idx = len(self) + idx\n dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)\n if dataset_idx == 0:\n sample_idx = idx\n else:\n sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]\n return self.datasets[dataset_idx][sample_idx], dataset_idx\n\n\nclass ImagePaths(Dataset):\n def __init__(self, paths, size=None, random_crop=False, labels=None):\n self.size = size\n self.random_crop = random_crop\n\n self.labels = dict() if labels is None else labels\n self.labels[\"file_path_\"] = paths\n self._length = len(paths)\n\n if self.size is not None and self.size > 0:\n self.rescaler = albumentations.SmallestMaxSize(max_size = self.size)\n if not self.random_crop:\n self.cropper = albumentations.CenterCrop(height=self.size,width=self.size)\n else:\n self.cropper = albumentations.RandomCrop(height=self.size,width=self.size)\n self.preprocessor = albumentations.Compose([self.rescaler, self.cropper])\n else:\n self.preprocessor = lambda **kwargs: kwargs\n\n def __len__(self):\n return self._length\n\n def preprocess_image(self, image_path):\n image = Image.open(image_path)\n if not image.mode == \"RGB\":\n image = image.convert(\"RGB\")\n image = np.array(image).astype(np.uint8)\n image = self.preprocessor(image=image)[\"image\"]\n image = (image/127.5 - 1.0).astype(np.float32)\n return image\n\n def __getitem__(self, i):\n example = dict()\n example[\"image\"] = self.preprocess_image(self.labels[\"file_path_\"][i])\n for k in self.labels:\n example[k] = self.labels[k][i]\n return example\n\n\nclass NumpyPaths(ImagePaths):\n def preprocess_image(self, image_path):\n image = np.load(image_path).squeeze(0) # 3 x 1024 x 1024\n image = np.transpose(image, (1,2,0))\n image = Image.fromarray(image, mode=\"RGB\")\n image = np.array(image).astype(np.uint8)\n image = self.preprocessor(image=image)[\"image\"]\n image = (image/127.5 - 1.0).astype(np.float32)\n return image\n" ]
[ [ "numpy.array", "numpy.transpose", "numpy.load" ] ]
Lvigilante/correctionlib
[ "5c426e44faa34e939fd1a7cc2df95f7424498211" ]
[ "tests/test_highlevel.py" ]
[ "import pickle\n\nimport numpy\nimport pytest\n\nimport correctionlib\nfrom correctionlib import schemav2 as model\n\n\ndef test_highlevel():\n cset = correctionlib.CorrectionSet(\n model.CorrectionSet(\n schema_version=model.VERSION,\n corrections=[\n model.Correction(\n name=\"test corr\",\n version=2,\n inputs=[\n model.Variable(name=\"a\", type=\"real\"),\n model.Variable(name=\"b\", type=\"real\"),\n ],\n output=model.Variable(name=\"a scale\", type=\"real\"),\n data=1.234,\n )\n ],\n )\n )\n assert set(cset) == {\"test corr\"}\n sf = cset[\"test corr\"]\n assert sf.version == 2\n assert sf.description == \"\"\n\n with pytest.raises(RuntimeError):\n sf.evaluate(0, 1.2, 35.0, 0.01)\n\n assert sf.evaluate(1.0, 1.0) == 1.234\n numpy.testing.assert_array_equal(\n sf.evaluate(numpy.ones((3, 4)), 1.0),\n numpy.full((3, 4), 1.234),\n )\n numpy.testing.assert_array_equal(\n sf.evaluate(numpy.ones((3, 4)), numpy.ones(4)),\n numpy.full((3, 4), 1.234),\n )\n\n sf2 = pickle.loads(pickle.dumps(sf))\n assert sf2.evaluate(1.0, 1.0) == 1.234\n" ]
[ [ "numpy.full", "numpy.ones" ] ]
omarjarkas1997/Graduate-Analyst-Programmer-Code-Test
[ "f22711d1239146d57462b66edac0ee18a196b78f" ]
[ "test.py" ]
[ "import unittest\nimport pandas as pd\nimport app\n\n\nclass MoveTests(unittest.TestCase):\n\n def test_five_plus_five(self):\n assert 5 + 5 == 10\n\n def test_ont_plus_one(self):\n assert not 1+ 1 == 3\n \n def test_getUniqOS(self):\n df = pd.read_csv(r'../data.csv')\n list1 = app.getUniqOS(df)\n list2 = app.getUniqOS(df)\n self.assertEqual(list1, list2)\n self.assertIn('ANDROID', list2)\n self.assertIn('IOS', list2)\n\n def test_getUniqCountries(self):\n df = pd.read_csv(r'../data.csv')\n list1 = app.getUniqOS(df)\n list2 = app.getUniqCountries(df)\n self.assertNotEqual(list1, list2)\n self.assertIn('Brazil', list2)\n self.assertIn('India',list2)\n\n def test_indiaStates(self):\n df = pd.read_csv(r'../data.csv')\n ios_india_state_count, and_india_state_count, india_uniq_states = app.indiaStates(df)\n \n self.assertIn('Telangana', ios_india_state_count)\n self.assertIn('Punjab', and_india_state_count)\n self.assertIn('Sikkim', india_uniq_states)\n\n def test_indiaStates(self):\n df = pd.read_csv(r'../data.csv')\n brands = app.phone_brands(df)\n \n self.assertIn('Huawei', brands)\n self.assertIn('Apple', brands)\n self.assertIn('LG', brands)\n\n def test_topBrandsandCountries(self):\n df = pd.read_csv(r'../data.csv')\n countries_uniq = app.getUniqCountries(df)\n top_countries,apple,samsung,huawei = app.topBrandsandCountries(df,countries_uniq)\n\n self.assertIn('Spain',top_countries)\n self.assertListEqual([72, 70, 1478, 50, 214, 438, 205] , apple)\n self.assertListEqual([1161, 377, 30, 338, 1041, 708, 351] , samsung)\n self.assertListEqual([1, 192, 273, 231, 114, 606, 254] , huawei)\n\n\n \nif __name__ == \"__main__\":\n unittest.main()" ]
[ [ "pandas.read_csv" ] ]
hadim/pytorch-lightning
[ "89d5772f5549d383cbc4cf4ee602fd5e30db3def" ]
[ "pl_examples/full_examples/semantic_segmentation/models/unet/parts.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DoubleConv(nn.Module):\n '''\n Double Convolution and BN and ReLU\n (3x3 conv -> BN -> ReLU) ** 2\n '''\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nclass Down(nn.Module):\n '''\n Combination of MaxPool2d and DoubleConv in series\n '''\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.net = nn.Sequential(\n nn.MaxPool2d(kernel_size=2, stride=2),\n DoubleConv(in_ch, out_ch)\n )\n\n def forward(self, x):\n return self.net(x)\n\n\nclass Up(nn.Module):\n '''\n Upsampling (by either bilinear interpolation or transpose convolutions)\n followed by concatenation of feature map from contracting path,\n followed by double 3x3 convolution.\n '''\n def __init__(self, in_ch, out_ch, bilinear=False):\n super().__init__()\n self.upsample = None\n if bilinear:\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n else:\n self.upsample = nn.ConvTranspose2d(in_ch, in_ch // 2, kernel_size=2, stride=2)\n\n self.conv = DoubleConv(in_ch, out_ch)\n\n def forward(self, x1, x2):\n x1 = self.upsample(x1)\n\n # Pad x1 to the size of x2\n diff_h = x2.shape[2] - x1.shape[2]\n diff_w = x2.shape[3] - x1.shape[3]\n\n x1 = F.pad(x1, [diff_w // 2, diff_w - diff_w // 2, diff_h // 2, diff_h - diff_h // 2])\n\n # Concatenate along the channels axis\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n" ]
[ [ "torch.cat", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.nn.functional.pad" ] ]
HendrickZhou/Chirp-EE123
[ "6d34f588d99ee8801f1c9137ad4f05e862593435" ]
[ "src/utils/Helper_functions.py" ]
[ "\"\"\"utilities funcitons for project\"\"\"\nimport sys\nimport os\nscript_path = os.path.abspath('')\nmodule_path = script_path[:script_path.rfind('src')]+ 'src' + '/'\nasset_path = script_path[:script_path.rfind('src')]+ 'asset' + '/'\nsys.path.append(module_path)\nimport numpy as np\n\nfrom PIL import Image\nimport time\nimport glob\nimport bokeh.plotting as bk\nfrom bokeh.io import push_notebook\nfrom bokeh.resources import INLINE\nfrom bokeh.models import GlyphRenderer\nimport matplotlib.pyplot as plt\n\nbk.output_notebook(INLINE)\n\nimport re\n\nnumbers = re.compile(r'(\\d+)')\n\ndef numericalSort(value):\n parts = numbers.split(value)\n parts[1::2] = map(int, parts[1::2])\n return parts\n\n\n# Tiff stack player\n\ndef Tiff_play(path, display_size = 500, frame_rate = 2):\n image_files = sorted(glob.glob(path+\"*.tiff\"), key=numericalSort)\n Nframe = len(image_files)\n\n im = Image.open(image_files[0])\n xdim, ydim= im.size\n display_array = np.zeros((Nframe,ydim,xdim,4),dtype='uint8')\n \n # load image stack\n for i in range(0,Nframe):\n im = Image.open(image_files[i])\n im = im.convert(\"RGBA\")\n imarray = np.array(im)\n display_array[i] = np.flipud(imarray)\n \n # Play video\n\n wait_time = 1/frame_rate\n normalized_size = display_size\n max_size = np.maximum(xdim,ydim)\n width = (xdim/max_size * normalized_size).astype('int')\n height = (ydim/max_size * normalized_size).astype('int')\n \n counter = 0\n first_round = True\n try:\n while True:\n if counter == 0 and first_round:\n p = bk.figure(x_range=(0,xdim), y_range=(0,ydim), plot_height = height, plot_width = width)\n p.image_rgba(image=[display_array[counter]], x=0, y=0, dw=xdim, dh=ydim, name='video')\n bk.show(p, notebook_handle=True)\n counter += 1\n first_round = False\n else:\n renderer = p.select(dict(name='video', type=GlyphRenderer))\n source = renderer[0].data_source\n source.data['image'] = [display_array[counter]]\n push_notebook()\n if counter == Nframe-1:\n counter = 0\n else:\n counter += 1\n time.sleep(wait_time)\n \n except KeyboardInterrupt:\n pass\n \n# Image_stack loader\n\ndef Tiff_load(path):\n image_files = sorted(glob.glob(path+\"*.tiff\"), key=numericalSort)\n Nframe = len(image_files)\n \n im = Image.open(image_files[0])\n xdim, ydim= im.size\n image_stack = np.zeros((Nframe,ydim,xdim,3),dtype='uint8')\n \n for i in range(0,Nframe):\n im = Image.open(image_files[i])\n image_stack[i] = np.array(im)\n \n return image_stack\n\n\ndef Tiff_show(path):\n image_files = sorted(glob.glob(path+\"*.tiff\"), key=numericalSort)\n Nframe = len(image_files)\n\n im = Image.open(image_files[0])\n xdim, ydim= im.size \n\n row = int(np.ceil(Nframe/3))\n col = 3\n fig, axs = plt.subplots(row, col, figsize = (15, 5))\n # load image stack\n for i in range(row):\n for j in range(col):\n fileIndex = i*row + j\n if fileIndex > Nframe - 1:\n imarray = np.zeros((ydim, xdim))\n else:\n im = Image.open(image_files[fileIndex])\n im = im.convert(\"RGBA\")\n imarray = np.array(im)\n if row == 1:\n axs[j].imshow(imarray)\n else:\n axs[i, j].imshow(imarray)\n\n\ndef npArray_show(npArray):\n Nframe = npArray.shape[0]\n xdim = npArray.shape[2]\n ydim = npArray.shape[1]\n row = int(np.ceil(Nframe/3))\n col = 3\n fig, axs = plt.subplots(row, col, figsize = (15, 5))\n # load image stack\n for i in range(row):\n for j in range(col):\n imIndex = i*row + j\n if imIndex > Nframe - 1:\n imarray = np.zeros((ydim, xdim))\n else:\n imarray = npArray[imIndex, :, :, :]\n if row == 1:\n axs[j].imshow(imarray)\n else:\n axs[i, j].imshow(imarray)\n\n\n\ndef npArray_play(npArray, display_size = 500, frame_rate = 2):\n\n Nframe = npArray.shape[0]\n xdim = npArray.shape[2]\n ydim = npArray.shape[1]\n \n # load image stack\n display_array = np.zeros((Nframe,ydim,xdim,4),dtype='uint8')\n rArray = np.empty((Nframe,ydim,xdim,3),dtype='uint8')\n \n for i in range(Nframe):\n rArray[i] = np.flipud(npArray[i])\n \n alpha_pad = 255*np.ones((Nframe,ydim,xdim,1),dtype='uint8') \n display_array = np.append(rArray, alpha_pad, axis=3)\n \n # Play video\n wait_time = 1/frame_rate\n normalized_size = display_size\n max_size = np.maximum(xdim,ydim)\n width = (xdim/max_size * normalized_size).astype('int')\n height = (ydim/max_size * normalized_size).astype('int')\n \n counter = 0\n first_round = True\n try:\n while True:\n if counter == 0 and first_round:\n p = bk.figure(x_range=(0,xdim), y_range=(0,ydim), plot_height = height, plot_width = width)\n p.image_rgba(image=[display_array[counter]], x=0, y=0, dw=xdim, dh=ydim, name='video')\n bk.show(p, notebook_handle=True)\n counter += 1\n first_round = False\n else:\n renderer = p.select(dict(name='video', type=GlyphRenderer))\n source = renderer[0].data_source\n source.data['image'] = [display_array[counter]]\n push_notebook()\n if counter == Nframe-1:\n counter = 0\n else:\n counter += 1\n time.sleep(wait_time)\n \n except KeyboardInterrupt:\n pass\n\n\n\n# Image_stack loader with ffmpeg\n\ndef imageStack_load(filename):\n path = filename[:filename.rfind('.')]+'/'\n os.system(\"rm -rf {:s}\".format(path))\n os.system(\"mkdir {:s}\".format(path))\n os.system(\"ffmpeg -i {:s} {:s}frame_%2d.tiff\".format(filename, path))\n image_files = sorted(glob.glob(path+\"*.tiff\"), key=numericalSort)\n Nframe = len(image_files)\n \n im = Image.open(image_files[0])\n xdim, ydim= im.size\n num_channel = len(im.split())\n image_stack = np.zeros((Nframe,ydim,xdim,num_channel),dtype='uint8')\n \n for i in range(0,Nframe):\n im = Image.open(image_files[i])\n image_stack[i] = np.array(im)\n \n return image_stack\n\n# Save gif with ffmpeg\n\n# def GIF_save(path, framerate):\n# os.system(\"ffmpeg -r {:d} -i {:s}frame_%2d.png -compression_level 0 -plays 0 -f apng {:s}animation.png\".format(framerate, path,path))\n\n# Compute video PSNR\n\ndef psnr(ref, meas, maxVal=255):\n assert np.shape(ref) == np.shape(meas), \"Test video must match measured vidoe dimensions\"\n\n\n dif = (ref.astype(float)-meas.astype(float)).ravel()\n mse = np.linalg.norm(dif)**2/np.prod(np.shape(ref))\n psnr = 10*np.log10(maxVal**2.0/mse)\n return psnr\n\n\ndef npArray_save(path, result, framerate, method):\n os.system(\"rm -rf {:s}\".format(path))\n os.system(\"mkdir {:s}\".format(path))\n for img in range(result.shape[0]):\n if method == \"pca\":\n plt.imsave(path+\"frame_0\"+str(img)+'.png', result[img]/np.max(result[img]))\n elif method == \"resample\":\n plt.imsave(path+\"frame_0\"+str(img)+'.png', result[img]) \n elif method == \"jpeg\":\n plt.imsave(path+\"frame_0\"+str(img)+'.png', result[img])\n os.system(\"ffmpeg -r {:d} -i {:s}frame_%2d.png -compression_level 0 -plays 0 -f apng {:s}animation.png\".format(framerate, path,path))\n\n\n\nclass TwoWayDict(dict):\n def __len__(self):\n return dict.__len__(self) / 2\n\n def __setitem__(self, key, value):\n dict.__setitem__(self, key, value)\n dict.__setitem__(self, value, key)\n" ]
[ [ "numpy.max", "numpy.array", "numpy.ceil", "numpy.empty", "numpy.linalg.norm", "numpy.zeros", "numpy.ones", "matplotlib.pyplot.subplots", "numpy.shape", "numpy.flipud", "numpy.append", "numpy.log10", "numpy.maximum" ] ]
yishantao/DailyPractice
[ "ee26859af3faf48e63d6c2850db1d895a8a88fb1" ]
[ "temp_201806/0615test.py" ]
[ "import numpy as np\nimport pandas as pd\n\n# data1 = pd.DataFrame({'a': [np.nan, 1, 2, 3], 'b': [np.nan, np.nan, 5, 6]})\ndata2 = pd.DataFrame({'a': [5, 6, 7, 8], 'b': [10, 11, 12, 13]}, index=['h', 'i', 'j', 'k'])\nprint(data2)\nprint(data2.loc['h', ['a','b']])\n" ]
[ [ "pandas.DataFrame" ] ]
sjgosai/basenji
[ "0bc02718319db12896d2854bcb2f6bd7ed67fa4a" ]
[ "bin/basenji_map.py" ]
[ "#!/usr/bin/env python\n# Copyright 2017 Calico LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =========================================================================\nfrom __future__ import print_function\n\nfrom optparse import OptionParser\nimport gc\nimport os\nimport sys\nimport time\n\nimport h5py\nimport numpy as np\nimport pyBigWig\nfrom scipy.stats import ttest_1samp\nimport tensorflow as tf\n\nfrom basenji import batcher\nfrom basenji import genedata\nfrom basenji import params\nfrom basenji import seqnn\n\nfrom basenji_test import bigwig_open\n\n'''\nbasenji_map.py\n\nVisualize a sequence's prediction's gradients as a map of influence across\nthe genomic region.\n'''\n\n################################################################################\n# main\n################################################################################\ndef main():\n usage = 'usage: %prog [options] <params_file> <model_file> <genes_hdf5_file>'\n parser = OptionParser(usage)\n parser.add_option('-g', dest='genome_file',\n default='%s/data/human.hg38.genome' % os.environ['BASENJIDIR'],\n help='Chromosome lengths file [Default: %default]')\n parser.add_option('-l', dest='gene_list',\n help='Process only gene ids in the given file')\n parser.add_option('--mc', dest='mc_n',\n default=0, type='int',\n help='Monte carlo test iterations [Default: %default]')\n parser.add_option('-n', dest='norm',\n default=None, type='int',\n help='Compute saliency norm [Default% default]')\n parser.add_option('-o', dest='out_dir',\n default='grad_map',\n help='Output directory [Default: %default]')\n parser.add_option('--rc', dest='rc',\n default=False, action='store_true',\n help='Average the forward and reverse complement predictions when testing [Default: %default]')\n parser.add_option('--shifts', dest='shifts',\n default='0',\n help='Ensemble prediction shifts [Default: %default]')\n parser.add_option('-t', dest='target_indexes',\n default=None,\n help='Target indexes to plot')\n (options, args) = parser.parse_args()\n\n if len(args) != 3:\n parser.error('Must provide parameters, model, and genomic position')\n else:\n params_file = args[0]\n model_file = args[1]\n genes_hdf5_file = args[2]\n\n if not os.path.isdir(options.out_dir):\n os.mkdir(options.out_dir)\n\n options.shifts = [int(shift) for shift in options.shifts.split(',')]\n\n #################################################################\n # reads in genes HDF5\n\n gene_data = genedata.GeneData(genes_hdf5_file)\n\n # subset gene sequences\n genes_subset = set()\n if options.gene_list:\n for line in open(options.gene_list):\n genes_subset.add(line.rstrip())\n\n gene_data.subset_genes(genes_subset)\n print('Filtered to %d sequences' % gene_data.num_seqs)\n\n # extract sequence chrom and start\n seqs_chrom = [gene_data.gene_seqs[si].chrom for si in range(gene_data.num_seqs)]\n seqs_start = [gene_data.gene_seqs[si].start for si in range(gene_data.num_seqs)]\n\n\n #######################################################\n # model parameters and placeholders\n\n job = params.read_job_params(params_file)\n\n job['seq_length'] = gene_data.seq_length\n job['seq_depth'] = gene_data.seq_depth\n job['target_pool'] = gene_data.pool_width\n\n if 'num_targets' not in job:\n print(\n \"Must specify number of targets (num_targets) in the parameters file.\",\n file=sys.stderr)\n exit(1)\n\n # set target indexes\n if options.target_indexes is not None:\n options.target_indexes = [int(ti) for ti in options.target_indexes.split(',')]\n target_subset = options.target_indexes\n else:\n options.target_indexes = list(range(job['num_targets']))\n target_subset = None\n\n # build model\n model = seqnn.SeqNN()\n model.build_feed(job, target_subset=target_subset)\n\n # determine latest pre-dilated layer\n cnn_dilation = np.array([cp.dilation for cp in model.hp.cnn_params])\n dilated_mask = cnn_dilation > 1\n dilated_indexes = np.where(dilated_mask)[0]\n pre_dilated_layer = np.min(dilated_indexes)\n print('Pre-dilated layer: %d' % pre_dilated_layer)\n\n # build gradients ops\n t0 = time.time()\n print('Building target/position-specific gradient ops.', end='')\n model.build_grads(layers=[pre_dilated_layer])\n print(' Done in %ds' % (time.time()-t0), flush=True)\n\n\n #######################################################\n # acquire gradients\n\n # initialize saver\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n # load variables into session\n saver.restore(sess, model_file)\n\n # score sequences and write bigwigs\n score_write(sess, model, options, gene_data.seqs_1hot, seqs_chrom, seqs_start)\n\n\ndef score_write(sess, model, options, seqs_1hot, seqs_chrom, seqs_start):\n ''' Compute scores and write them as BigWigs for a set of sequences. '''\n\n for si in range(seqs_1hot.shape[0]):\n # initialize batcher\n batcher_si = batcher.Batcher(seqs_1hot[si:si+1],\n batch_size=model.hp.batch_size,\n pool_width=model.hp.target_pool)\n\n # get layer representations\n t0 = time.time()\n print('Computing gradients.', end='', flush=True)\n _, _, _, batch_grads, batch_reprs, _ = model.gradients(sess, batcher_si,\n rc=options.rc, shifts=options.shifts, mc_n=options.mc_n, return_all=True)\n print(' Done in %ds.' % (time.time()-t0), flush=True)\n\n # only layer\n batch_reprs = batch_reprs[0]\n batch_grads = batch_grads[0]\n\n # increase resolution\n batch_reprs = batch_reprs.astype('float32')\n batch_grads = batch_grads.astype('float32')\n\n # S (sequences) x T (targets) x P (seq position) x U (units layer i) x E (ensembles)\n print('batch_grads', batch_grads.shape)\n pooled_length = batch_grads.shape[2]\n\n # S (sequences) x P (seq position) x U (Units layer i) x E (ensembles)\n print('batch_reprs', batch_reprs.shape)\n\n # write bigwigs\n t0 = time.time()\n print('Writing BigWigs.', end='', flush=True)\n\n # for each target\n for tii in range(len(options.target_indexes)):\n ti = options.target_indexes[tii]\n\n # compute scores\n if options.norm is None:\n batch_grads_scores = np.multiply(batch_reprs[0], batch_grads[0,tii,:,:,:]).sum(axis=1)\n else:\n batch_grads_scores = np.multiply(batch_reprs[0], batch_grads[0,tii,:,:,:])\n batch_grads_scores = np.power(np.abs(batch_grads_scores), options.norm)\n batch_grads_scores = batch_grads_scores.sum(axis=1)\n batch_grads_scores = np.power(batch_grads_scores, 1./options.norm)\n\n # compute score statistics\n batch_grads_mean = batch_grads_scores.mean(axis=1)\n\n if options.norm is None:\n batch_grads_pval = ttest_1samp(batch_grads_scores, 0, axis=1)[1]\n else:\n batch_grads_pval = ttest_1samp(batch_grads_scores, 0, axis=1)[1]\n # batch_grads_pval = chi2(df=)\n batch_grads_pval /= 2\n\n # open bigwig\n bws_file = '%s/s%d_t%d_scores.bw' % (options.out_dir, si, ti)\n bwp_file = '%s/s%d_t%d_pvals.bw' % (options.out_dir, si, ti)\n bws_open = bigwig_open(bws_file, options.genome_file)\n # bwp_open = bigwig_open(bwp_file, options.genome_file)\n\n # specify bigwig locations and values\n bw_chroms = [seqs_chrom[si]]*pooled_length\n bw_starts = [int(seqs_start[si] + pi*model.hp.target_pool) for pi in range(pooled_length)]\n bw_ends = [int(bws + model.hp.target_pool) for bws in bw_starts]\n bws_values = [float(bgs) for bgs in batch_grads_mean]\n # bwp_values = [float(bgp) for bgp in batch_grads_pval]\n\n # write\n bws_open.addEntries(bw_chroms, bw_starts, ends=bw_ends, values=bws_values)\n # bwp_open.addEntries(bw_chroms, bw_starts, ends=bw_ends, values=bwp_values)\n\n # close\n bws_open.close()\n # bwp_open.close()\n\n print(' Done in %ds.' % (time.time()-t0), flush=True)\n gc.collect()\n\n\n################################################################################\n# __main__\n################################################################################\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "tensorflow.Session", "tensorflow.train.Saver", "numpy.min", "numpy.multiply", "numpy.where", "scipy.stats.ttest_1samp", "numpy.power", "numpy.abs" ] ]
tlentali/leab
[ "1f2eda95953738c375f389479b160d7ecb494e40" ]
[ "leab/before/leSample.py" ]
[ "\nimport numpy as np\nfrom scipy.stats import norm\n\nfrom .leReport import leReport\n\nclass leSample:\n \"\"\"\n Build leSample object.\n\n Parameters:\n\n conversion_rate (float): baseline conversion rate.\n min_detectable_effect (float): minimum detectable effect.\n significance_level (float): alpha, percent of the time a difference will be detected, assuming one does NOT exist.\n statistical_power (float): 1-beta, percent of the time the minimum effect size will be detected, assuming it exists.\n \n Example:\n\n ::\n\n >>> from leab import before\n\n >>> ab_test = before.leSample(conversion_rate=20, \n ... min_detectable_effect=2)\n >>> ab_test.get_size_per_variation()\n 6347\n\n >>> ab_test.get_duration(avg_daily_total_visitor=1000)\n 13\n \"\"\"\n def __init__(\n self,\n conversion_rate: float,\n min_detectable_effect: float,\n significance_level: float = 0.05,\n statistical_power: float = 0.8,\n absolute: bool = True,\n ):\n self.conversion_rate = conversion_rate / 100\n self.absolute = absolute\n self.min_detectable_effect = min_detectable_effect / 100\n self.absolute_or_relative()\n self.significance_level = significance_level\n self.statistical_power = statistical_power\n self.alpha = significance_level\n self.beta = 1 - statistical_power\n self.n = None\n self.size = self.get_size_per_variation()\n\n def absolute_or_relative(self) -> None:\n \"\"\"\n Set up the min_detectable_effect absolute value or relative to conversion_rate.\n \"\"\"\n if self.absolute:\n self.min_detectable_effect = self.min_detectable_effect\n else:\n self.min_detectable_effect = (\n self.conversion_rate * self.min_detectable_effect\n )\n\n @staticmethod\n def compute_z_score(alpha: float) -> float:\n \"\"\"\n Compute z score from alpha value.\n\n Parameters: \n\n alpha (float): required alpha value (alpha should already fit the required test).\n\n Returns: \n\n Z-score.\n \"\"\"\n return norm.ppf(alpha)\n\n def _get_z_1(self) -> None:\n self.significance = 1 - (self.alpha / 2)\n self.z_1 = self.compute_z_score(self.significance)\n\n def _get_z_2(self) -> None:\n self.power = 1 - self.beta\n self.z_2 = self.compute_z_score(self.power)\n\n def _get_zs(self) -> None:\n self._get_z_1()\n self._get_z_2()\n\n def _get_sd1(self) -> None:\n \"\"\"\n Compute standard deviation v1. \n p-baseline conversion rate which is our estimated p and d-minimum detectable change.\n \"\"\"\n self.sd1 = np.sqrt(2 * self.conversion_rate * (1 - self.conversion_rate))\n\n def _get_sd2(self) -> None:\n \"\"\"\n Compute standard deviation v1.\n p-baseline conversion rate which is our estimated p and d-minimum detectable change.\n \"\"\"\n self.sd2 = np.sqrt(\n self.conversion_rate * (1 - self.conversion_rate)\n + (self.conversion_rate + self.min_detectable_effect)\n * (1 - (self.conversion_rate + self.min_detectable_effect))\n )\n\n def _get_sds(self) -> None:\n self._get_sd1()\n self._get_sd2()\n\n def _compute_n(self) -> None:\n self.n = int(\n np.round(\n ((self.z_1 * self.sd1 + self.z_2 * self.sd2) ** 2)\n / (self.min_detectable_effect ** 2)\n )\n )\n\n def get_size_per_variation(self) -> int:\n \"\"\"\n Calls all methods used to get the size needed per group to get significance on the test.\n\n Returns:\n\n Minimum sample size required per group according to metric denominator.\n \"\"\"\n self._get_zs()\n self._get_sds()\n self._compute_n()\n return self.n\n\n def get_total_size(self) -> int:\n \"\"\"\n Calls all methods used to get the total size needed to get significance on the test.\n\n Returns:\n\n Minimum total sample size required according to metric denominator.\n \"\"\"\n self.total_sample_size = self.n * 2\n return self.total_sample_size\n\n def get_duration(self, avg_daily_total_visitor: int, nb_split: int = 2) -> int:\n \"\"\"\n Compute the estimate duration in day needed to get significance on the test.\n\n Parameters:\n\n avg_daily_total_visitor (int): The first parameter.\n nb_split (int): The second parameter.\n\n Returns:\n\n Return the estimate duration in day needed to get significance on the test.\n \"\"\"\n self.avg_daily_total_visitor = avg_daily_total_visitor\n self.nb_split = nb_split\n if self.n:\n self.duration = int(\n np.round(self.n / (self.avg_daily_total_visitor / self.nb_split))\n )\n else:\n self.get_size_per_variation()\n self.duration = int(\n np.round(self.n / (self.avg_daily_total_visitor / self.nb_split))\n )\n return self.duration\n\n def report(self, path: str = \"leReport.html\"):\n build_report = leReport( \n baseline_conversion_rate = self.conversion_rate*100,\n minimum_detectable_effect = self.min_detectable_effect*100,\n sample_size = self.n,\n path = path,\n )\n return build_report.build_leReport()\n" ]
[ [ "numpy.round", "scipy.stats.norm.ppf", "numpy.sqrt" ] ]
nasa/GHRC-FieldCampaign-eXplorer-core
[ "60532f5946efcbec047abbed5c8413fc695781cb" ]
[ "flight_track.py" ]
[ "import numpy as np\nfrom copy import deepcopy\nimport json\nimport boto3\nimport os\n\nmodel = {\n \"id\": \"Flight Track\",\n \"name\": \"ER2\",\n \"availability\": \"{}/{}\",\n \"model\": {\n \"gltf\": \"https://s3.amazonaws.com/visage-czml/iphex_HIWRAP/img/er2.gltf\",\n \"scale\": 100.0,\n \"minimumPixelSize\": 32,\n \"maximumScale\": 150.0\n },\n \"position\": {\n \"cartographicDegrees\": []\n },\n \"path\": {\n \"material\": {\n \"solidColor\": {\n \"color\": {\n \"rgba\": [0, 255, 128, 255]\n }\n }\n },\n \"width\": 1,\n \"resolution\": 5\n },\n \"properties\": {\n \"roll\": {},\n \"pitch\": {},\n \"heading\": {}\n }\n}\n\nczml_head = {\n \"id\": \"document\",\n \"name\": \"wall czml\",\n \"version\": \"1.0\"\n}\n\n\nclass FlightTrackCzmlWriter:\n\n def __init__(self, length):\n self.model = deepcopy(model)\n self.length = length\n self.model['position']['cartographicDegrees'] = [0] * 4 * length\n self.model['properties']['roll']['number'] = [0] * 2 * length\n self.model['properties']['pitch']['number'] = [0] * 2 * length\n self.model['properties']['heading']['number'] = [0] * 2 * length\n\n def set_time(self, time_window, time_steps):\n epoch = time_window[0]\n end = time_window[1]\n self.model['availability'] = \"{}/{}\".format(epoch, end)\n self.model['position']['epoch'] = epoch\n self.model['position']['cartographicDegrees'][0::4] = time_steps\n self.model['properties']['roll']['epoch'] = epoch\n self.model['properties']['pitch']['epoch'] = epoch\n self.model['properties']['heading']['epoch'] = epoch\n self.model['properties']['roll']['number'][0::2] = time_steps\n self.model['properties']['pitch']['number'][0::2] = time_steps\n self.model['properties']['heading']['number'][0::2] = time_steps\n\n def set_position(self, longitude, latitude, altitude):\n self.model['position']['cartographicDegrees'][1::4] = longitude\n self.model['position']['cartographicDegrees'][2::4] = latitude\n self.model['position']['cartographicDegrees'][3::4] = altitude\n\n def set_orientation(self, roll, pitch, heading):\n self.model['properties']['roll']['number'][1::2] = roll\n self.model['properties']['pitch']['number'][1::2] = pitch\n self.model['properties']['heading']['number'][1::2] = heading\n\n def set_with_df(self, df):\n self.set_time(*self.get_time_info(df['timestamp']))\n self.set_position(df['lon'], df['lat'], df['height_msl'])\n self.set_orientation(df['roll'], df['pitch'], df['track'])\n\n def get_time_info(self, time):\n time_window = time[[0, -1]].astype(np.string_)\n time_window = np.core.defchararray.add(time_window, np.string_('Z'))\n time_window = np.core.defchararray.decode(time_window, 'UTF-8')\n time_steps = (time - time[0]).astype(int)\n return time_window, time_steps\n\n def get_string(self):\n return json.dumps([czml_head, self.model])\n\n\n# 1: time\n# 2: latitude\n# 3: longitude\n# 4: altitude\n# 13: heading\n# 16: pitch\n# 17: roll\n# with open('olympex_naver2_IWG1_20151109-2159.txt', newline='') as csvfile:\nclass FlightTrackReader:\n\n def __init__(self):\n self.converters = {}\n for i in range(33):\n self.converters[i] = self.ignore\n self.converters[1] = self.string_to_date\n self.converters[2] = self.string_to_float\n self.converters[3] = self.string_to_float\n self.converters[4] = self.string_to_float\n self.converters[14] = self.string_to_float\n self.converters[16] = self.string_to_float\n self.converters[17] = self.string_to_float\n\n def read_csv(self, infile):\n data = np.loadtxt(infile, delimiter=',', converters=self.converters)\n time = data[:, 1]\n latitude = data[:, 2]\n longitude = data[:, 3]\n altitude = data[:, 4]\n heading = data[:, 14] * np.pi / 180. - np.pi / 2.\n pitch = data[:, 16] * np.pi / 180.\n roll = data[:, 17] * np.pi / 180.\n\n mask = np.logical_not(np.isnan(latitude))\n mask = np.logical_and(mask, np.logical_not(np.isnan(longitude)))\n mask = np.logical_and(mask, np.logical_not(np.isnan(altitude)))\n mask = np.logical_and(mask, np.logical_not(np.isnan(heading)))\n mask = np.logical_and(mask, np.logical_not(np.isnan(pitch)))\n mask = np.logical_and(mask, np.logical_not(np.isnan(roll)))\n\n _, unique_idx = np.unique(time, return_index=True)\n unique = np.copy(mask)\n unique[:] = False\n unique[unique_idx] = True\n\n mask = np.logical_and(mask, unique)\n\n time = time[mask].astype('datetime64[s]')\n time_window = time[[0, -1]].astype(np.string_)\n time_window = np.core.defchararray.add(time_window, np.string_('Z'))\n self.time_window = np.core.defchararray.decode(time_window, 'UTF-8')\n self.time_steps = (time - time[0]).astype(int).tolist()[::5]\n self.latitude = latitude[mask][::5]\n self.longitude = longitude[mask][::5]\n self.altitude = altitude[mask][::5]\n self.heading = heading[mask][::5]\n self.pitch = pitch[mask][::5]\n self.roll = roll[mask][::5]\n self.length = mask[mask][::5].size\n\n def string_to_float(self, str):\n value = np.nan\n try:\n value = float(str)\n except:\n pass\n return value\n\n def string_to_date(self, str):\n time = np.datetime64(str, 's')\n return time.astype(np.int64)\n\n def ignore(self, value):\n return np.nan\n\n\ndef process_tracks():\n s3_resource = boto3.resource('s3')\n s3bucket = s3_resource.Bucket(os.getenv('RAW_DATA_BUCKET'))\n keys = []\n for obj in s3bucket.objects.filter(\n Prefix=f\"fieldcampaign/goesrplt/NAV_ER2/data/goesrplt_naver2\"):\n keys.append(obj.key)\n\n result = keys\n\n s3_client = boto3.client('s3')\n for infile in result:\n s3_file = s3_client.get_object(Bucket=os.getenv('RAW_DATA_BUCKET'), Key=infile)\n data = s3_file['Body'].iter_lines()\n reader = FlightTrackReader()\n reader.read_csv(data)\n\n writer = FlightTrackCzmlWriter(reader.length)\n writer.set_time(reader.time_window, reader.time_steps)\n writer.set_position(reader.longitude, reader.latitude, reader.altitude)\n writer.set_orientation(reader.roll, reader.pitch, reader.heading)\n\n output_name = os.path.splitext(os.path.basename(infile))[0]\n outfile = f\"{os.environ['OUTPUT_DATA_BUCKET_KEY']}/fieldcampaign/goesrplt/czml/flight_track/{output_name}\"\n\n s3_client.put_object(Body=writer.get_string(), Bucket=os.environ['OUTPUT_DATA_BUCKET'], Key=outfile)\n\n\nprocess_tracks()\n" ]
[ [ "numpy.datetime64", "numpy.core.defchararray.decode", "numpy.isnan", "numpy.string_", "numpy.copy", "numpy.logical_and", "numpy.loadtxt", "numpy.unique" ] ]
lethucuyen/python-stock-price-prediction
[ "54aa2b5a9b24eddece5e09e74c13095c5f535566" ]
[ "build_xgboost_model_RSI_MA.py" ]
[ "import numpy as np\nimport pandas as pd\nimport datetime as dt\nimport pandas_datareader as web\nfrom xgboost import XGBRegressor\nimport pickle\n\n#from sklearn.model_selection import GridSearchCV\n\n#from sklearn.model_selection import train_test_split\n#from sklearn.metrics import accuracy_score\n#from sklearn import preprocessing\n\n\n\n\n#load Data\n\n\n\n\nstart = dt.datetime(2020,1,1)\nend = dt.datetime.now()\n\ndf = web.DataReader(\"NOK\",'yahoo',start,end)\n\n#MA\ndf['EMA_9'] = df['Adj Close'].ewm(9).mean().shift()\ndf['SMA_5'] = df['Adj Close'].rolling(5).mean().shift()\ndf['SMA_10'] = df['Adj Close'].rolling(10).mean().shift()\ndf['SMA_15'] = df['Adj Close'].rolling(15).mean().shift()\ndf['SMA_30'] = df['Adj Close'].rolling(30).mean().shift()\n\n#Relative Strength Index\ndef relative_strength_idx(df, n=14):\n close = df['Adj Close']\n delta = close.diff()\n delta = delta[1:]\n pricesUp = delta.copy()\n pricesDown = delta.copy()\n pricesUp[pricesUp < 0] = 0\n pricesDown[pricesDown > 0] = 0\n rollUp = pricesUp.rolling(n).mean()\n rollDown = pricesDown.abs().rolling(n).mean()\n rs = rollUp / rollDown\n rsi = 100.0 - (100.0 / (1.0 + rs))\n return rsi\n\n#MACD\nEMA_12 = pd.Series(df['Adj Close'].ewm(span=12, min_periods=12).mean())\nEMA_26 = pd.Series(df['Adj Close'].ewm(span=26, min_periods=26).mean())\ndf['MACD'] = pd.Series(EMA_12 - EMA_26)\ndf['MACD_signal'] = pd.Series(df.MACD.ewm(span=9, min_periods=9).mean())\ndf['RSI']=relative_strength_idx(df).fillna(0)\n\n\nprint(\"before: \",df['Adj Close'])\n\ndf['Adj Close']=df['Adj Close'].shift(-1)\ndf = df.iloc[33:] # Because of moving averages and MACD line\ndf = df[:-1] # Because of shifting close price\n#df.index = range(len(df))\n\n\ndrop_cols = [ 'Volume', 'Open', 'Low', 'High','Close']\n\ndf = df.drop(drop_cols, 1)\n\nprint(\"DF: \",df)\ndatasetY = df['Adj Close'].copy()\ndatasetX = df.drop(['Adj Close'], 1)\n\nY = datasetY.values\nX = datasetX.values\n\nmodel = XGBRegressor(objective='reg:squarederror', verbose=False)\nmodel.fit(X, Y)\n# model.save_model('0001.model')\npickle.dump(model, open(\"XGB_RSI_MA_NOK_Model.pkl\", \"wb\"))\n\n# make predictions for test data\ny_pred = model.predict(X)\nprint(Y)\nprint(y_pred)\nprint(\"last item: \",y_pred[-1])\n#Build the model LMST\n\n\n\n\n#model.save(\"saved_xgboost_closed_model_NOK.h5\")\n\n" ]
[ [ "pandas.Series" ] ]
ntalabot/base_dl_project_struct
[ "2a8b52081baf678fec4b74b16f41dd22a3d0eb21" ]
[ "src/utils/image.py" ]
[ "\"\"\"\nUtility module for images.\n\"\"\"\n\nimport numpy as np\n\n\nIMAGENET_MEAN = np.array([0.485, 0.456, 0.406])\nIMAGENET_STD = np.array([0.229, 0.224, 0.225])\n\n\ndef normalize(image):\n \"\"\"Normalize the image with ImageNet statistics.\"\"\"\n out = image / 255\n out = (out - IMAGENET_MEAN) / IMAGENET_STD\n return np.float32(out)\n\n\ndef reverse_normalize(image):\n \"\"\"Reverse the ImageNet normalization of the image.\"\"\"\n out = image * IMAGENET_STD + IMAGENET_MEAN\n out = out * 255\n return np.uint8(out)" ]
[ [ "numpy.uint8", "numpy.array", "numpy.float32" ] ]
Golbstein/python-graphslam
[ "cccc022b2f5d797f6511bda9e7dd3a24af403016" ]
[ "tests/test_pose_se3.py" ]
[ "# Copyright (c) 2020 Jeff Irion and contributors\n\n\"\"\"Unit tests for the pose/pose_se3.py module.\n\n\"\"\"\n\n\nimport unittest\n\nimport numpy as np\n\nfrom graphslam.vertex import Vertex\nfrom graphslam.pose.se3 import PoseSE3\nfrom graphslam.edge.base_edge import BaseEdge\nfrom .edge_oplus_ominus import EdgeOMinus, EdgeOMinusCompact, EdgeOPlus, EdgeOPlusCompact\n\n\nclass TestPoseSE3(unittest.TestCase):\n \"\"\"Tests for the ``PoseSE3`` class.\n\n \"\"\"\n\n def test_constructor(self):\n \"\"\"Test that a ``PoseSE3`` instance can be created.\n\n \"\"\"\n p1 = PoseSE3([1, 2, 3], [0, 0, 0, 1])\n p2 = PoseSE3(np.array([4, 5, 6]), np.array([1, 0, 0, 0]))\n self.assertIsInstance(p1, PoseSE3)\n self.assertIsInstance(p2, PoseSE3)\n\n def test_normalize(self):\n \"\"\"Test that the ``normalize`` method works as expected.\n\n \"\"\"\n p1 = PoseSE3([1, 2, 3], [2, 2, 2, 2])\n p2 = PoseSE3(np.array([4, 5, 6]), np.array([2, 0, 0, 0]))\n\n p1.normalize()\n p2.normalize()\n\n self.assertAlmostEqual(np.linalg.norm(p1.to_array() - np.array([1, 2, 3, 0.5, 0.5, 0.5, 0.5])), 0.)\n self.assertAlmostEqual(np.linalg.norm(p2.to_array() - np.array([4, 5, 6, 1, 0, 0, 0])), 0.)\n\n def test_copy(self):\n \"\"\"Test that the ``copy`` method works as expected.\n\n \"\"\"\n p1 = PoseSE3([1, 2, 3], [0, 0, 0, 1])\n p2 = p1.copy()\n\n p2[0] = 0\n self.assertEqual(p1[0], 1)\n\n def test_to_array(self):\n \"\"\"Test that the ``to_array`` method works as expected.\n\n \"\"\"\n p1 = PoseSE3([1, 2, 3], [0, 0, 0, 1])\n arr = p1.to_array()\n\n self.assertIsInstance(arr, np.ndarray)\n self.assertNotIsInstance(arr, PoseSE3)\n self.assertAlmostEqual(np.linalg.norm(arr - np.array([1, 2, 3, 0, 0, 0, 1])), 0.)\n\n def test_to_compact(self):\n \"\"\"Test that the ``to_compact`` method works as expected.\n\n \"\"\"\n p1 = PoseSE3([1, 2, 3], [0, 0, 0, 1])\n arr = p1.to_compact()\n\n self.assertIsInstance(arr, np.ndarray)\n self.assertNotIsInstance(arr, PoseSE3)\n self.assertAlmostEqual(np.linalg.norm(arr - np.array([1, 2, 3, 0, 0, 0])), 0.)\n\n # ======================================================================= #\n # #\n # Properties #\n # #\n # ======================================================================= #\n def test_position(self):\n \"\"\"Test that the ``position`` property works as expected.\n\n \"\"\"\n p1 = PoseSE3([1, 2, 3], [0, 0, 0, 1])\n pos = p1.position\n\n true_pos = np.array([1, 2, 3])\n self.assertIsInstance(pos, np.ndarray)\n self.assertNotIsInstance(pos, PoseSE3)\n self.assertAlmostEqual(np.linalg.norm(true_pos - pos), 0.)\n\n def test_orientation(self):\n \"\"\"Test that the ``orientation`` property works as expected.\n\n \"\"\"\n p1 = PoseSE3([1, 2, 3], [0, 0, 0, 1])\n\n self.assertAlmostEqual(np.linalg.norm(p1.orientation - np.array([0, 0, 0, 1])), 0.)\n\n def test_inverse(self):\n \"\"\"Test that the ``inverse`` property works as expected.\n\n \"\"\"\n np.random.seed(0)\n\n for _ in range(10):\n p = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p.normalize()\n\n expected = np.linalg.inv(p.to_matrix())\n self.assertAlmostEqual(np.linalg.norm(p.inverse.to_matrix() - expected), 0.)\n\n # ======================================================================= #\n # #\n # Magic Methods #\n # #\n # ======================================================================= #\n def test_add(self):\n \"\"\"Test that the overloaded ``__add__`` method works as expected.\n\n \"\"\"\n np.random.seed(0)\n\n # PoseSE3 (+) PoseSE3\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n\n p1.normalize()\n p2.normalize()\n\n expected = np.dot(p1.to_matrix(), p2.to_matrix())\n self.assertAlmostEqual(np.linalg.norm((p1 + p2).to_matrix() - expected), 0.)\n\n # PoseSE3 [+] numpy.ndarray\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2_compact = p2.to_compact()\n\n if np.linalg.norm(p2.orientation[:3]) > 1.0:\n p2[3:] = [0., 0., 0., 1.]\n else:\n p2.normalize()\n p2_compact[3:] = p2.orientation[:3]\n\n p1.normalize()\n\n expected = np.dot(p1.to_matrix(), p2.to_matrix())\n self.assertAlmostEqual(np.linalg.norm((p1 + p2_compact).to_matrix() - expected), 0.)\n\n p1 += p2_compact\n self.assertAlmostEqual(np.linalg.norm(p1.to_matrix() - expected), 0.)\n\n with self.assertRaises(NotImplementedError):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n _ = p1 + 5\n\n def test_sub(self):\n \"\"\"Test that the overloaded ``__sub__`` method works as expected.\n\n \"\"\"\n np.random.seed(0)\n\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n\n p1.normalize()\n p2.normalize()\n\n expected = np.dot(np.linalg.inv(p2.to_matrix()), p1.to_matrix())\n self.assertAlmostEqual(np.linalg.norm((p1 - p2).to_matrix() - expected), 0.)\n\n # ======================================================================= #\n # #\n # Jacobians #\n # #\n # ======================================================================= #\n def test_jacobian_self_oplus_other(self):\n \"\"\"Test that the ``jacobian_self_oplus_other_wrt_self`` and ``jacobian_self_oplus_other_wrt_other`` methods are correctly implemented.\n\n \"\"\"\n np.random.seed(0)\n\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n\n p1.normalize()\n p2.normalize()\n\n v1 = Vertex(1, p1)\n v2 = Vertex(2, p2)\n\n e = EdgeOPlus([1, 2], np.eye(7), np.zeros(7), [v1, v2])\n\n numerical_jacobians = BaseEdge.calc_jacobians(e)\n\n analytical_jacobians = e.calc_jacobians()\n\n self.assertEqual(len(numerical_jacobians), len(analytical_jacobians))\n for n, a in zip(numerical_jacobians, analytical_jacobians):\n self.assertAlmostEqual(np.linalg.norm(n[:, :3] - a[:, :3]), 0.)\n\n def test_jacobian_self_ominus_other(self):\n \"\"\"Test that the ``jacobian_self_ominus_other_wrt_self`` and ``jacobian_self_ominus_other_wrt_other`` methods are correctly implemented.\n\n \"\"\"\n np.random.seed(0)\n\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n\n p1.normalize()\n p2.normalize()\n\n v1 = Vertex(1, p1)\n v2 = Vertex(2, p2)\n\n e = EdgeOMinus([1, 2], np.eye(7), np.zeros(7), [v1, v2])\n\n numerical_jacobians = BaseEdge.calc_jacobians(e)\n\n analytical_jacobians = e.calc_jacobians()\n\n self.assertEqual(len(numerical_jacobians), len(analytical_jacobians))\n for n, a in zip(numerical_jacobians, analytical_jacobians):\n self.assertAlmostEqual(np.linalg.norm(n - a), 0., 5)\n\n def test_jacobian_self_oplus_other_compact(self):\n \"\"\"Test that the ``jacobian_self_oplus_other_wrt_self_compact`` and ``jacobian_self_oplus_other_wrt_other_compact`` methods are correctly implemented.\n\n \"\"\"\n np.random.seed(0)\n\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n\n p1.normalize()\n p2.normalize()\n\n v1 = Vertex(1, p1)\n v2 = Vertex(2, p2)\n\n e = EdgeOPlusCompact([1, 2], np.eye(7), np.zeros(7), [v1, v2])\n\n numerical_jacobians = BaseEdge.calc_jacobians(e)\n\n analytical_jacobians = e.calc_jacobians()\n\n self.assertEqual(len(numerical_jacobians), len(analytical_jacobians))\n for n, a in zip(numerical_jacobians, analytical_jacobians):\n self.assertAlmostEqual(np.linalg.norm(n[:, :3] - a[:, :3]), 0.)\n\n def test_jacobian_self_ominus_other_compact(self):\n \"\"\"Test that the ``jacobian_self_ominus_other_wrt_self_compact`` and ``jacobian_self_ominus_other_wrt_other_compact`` methods are correctly implemented.\n\n \"\"\"\n np.random.seed(0)\n\n for _ in range(10):\n p1 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n p2 = PoseSE3(np.random.random_sample(3), np.random.random_sample(4))\n\n p1.normalize()\n p2.normalize()\n\n v1 = Vertex(1, p1)\n v2 = Vertex(2, p2)\n\n e = EdgeOMinusCompact([1, 2], np.eye(7), np.zeros(7), [v1, v2])\n\n numerical_jacobians = BaseEdge.calc_jacobians(e)\n\n analytical_jacobians = e.calc_jacobians()\n\n self.assertEqual(len(numerical_jacobians), len(analytical_jacobians))\n for n, a in zip(numerical_jacobians, analytical_jacobians):\n self.assertAlmostEqual(np.linalg.norm(n - a), 0., 5)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.zeros", "numpy.random.seed", "numpy.eye", "numpy.random.random_sample" ] ]
jiyeong-yun/EyeOnYou2021
[ "f70280de27ce50307f50f68fdee3b485aab64651" ]
[ "tensorflow/python/training/tracking/util.py" ]
[ "\"\"\"Utilities for saving/loading Trackable objects.\"\"\"\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport functools\nimport os\nimport threading\nimport time\nimport weakref\n\nimport six\n\nfrom tensorflow.core.protobuf import trackable_object_graph_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_io_ops as io_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import utils_impl\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import py_checkpoint_reader\nfrom tensorflow.python.training import saver as v1_saver_lib\nfrom tensorflow.python.training.saving import checkpoint_options\nfrom tensorflow.python.training.saving import functional_saver\nfrom tensorflow.python.training.saving import saveable_object_util\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import graph_view as graph_view_lib\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# The callable that provide Keras default session that is needed for saving.\n_SESSION_PROVIDER = None\n\n_checkpoint_write_durations = monitoring.Sampler(\n \"/tensorflow/core/checkpoint/write/write_durations\",\n # Scale of 10, power of 1.8 with bucket count 33 (~25 minutes).\n monitoring.ExponentialBuckets(10, 1.8, 33),\n \"Distribution of the wall time duration in microseconds of the \"\n \"`tf.train.Checkpoint.write` operation\",\n \"version\")\n\n_checkpoint_read_durations = monitoring.Sampler(\n \"/tensorflow/core/checkpoint/read/read_durations\",\n # Scale of 10, power of 1.8 with bucket count 33 (~25 minutes).\n monitoring.ExponentialBuckets(10, 1.8, 33),\n \"Distribution of the wall time duration in microseconds of the \"\n \"`tf.train.Checkpoint.restore` operation\",\n \"version\")\n\n# Accumulates total time elapsed between module import time and the last\n# successful Checkpoint write prior to job pre-emption or job completion.\n_checkpoint_training_time_saved = monitoring.Counter(\n \"/tensorflow/core/checkpoint/write/training_time_saved\",\n \"Total time in microseconds elapsed between two consecutive write \"\n \"operations in a single job or between module import time and a \"\n \"Checkpoint's first write.\",\n \"version\")\n\n# Captures the timestamp of module import or end of write operation. Can be\n# accessed by multiple Checkpoint instances.\n_END_TIME_OF_LAST_WRITE = time.time()\n_END_TIME_OF_LAST_WRITE_LOCK = threading.Lock()\n\n\ndef _get_duration_microseconds(start_time_seconds, end_time_seconds):\n if end_time_seconds < start_time_seconds:\n # Avoid returning negative value in case of clock skew.\n return 0\n return round((end_time_seconds - start_time_seconds) * 1000000)\n\n\n@tf_export(\"__internal__.tracking.register_session_provider\", v1=[])\ndef register_session_provider(session_provider):\n global _SESSION_PROVIDER\n if _SESSION_PROVIDER is None:\n _SESSION_PROVIDER = session_provider\n\n\ndef get_session():\n # Prefer TF's default session since get_session from Keras has side-effects.\n session = ops.get_default_session()\n if session is None:\n global _SESSION_PROVIDER\n if _SESSION_PROVIDER is not None:\n session = _SESSION_PROVIDER() # pylint: disable=not-callable\n return session\n\n\nclass _ObjectGraphProtoPrettyPrinter(object):\n \"\"\"Lazily traverses an object graph proto to pretty print names.\n\n If no calls to `node_names` are made this object has no performance\n overhead. On the other hand, it will only traverse the object graph once, so\n repeated naming is cheap after the first.\n \"\"\"\n\n __slots__ = [\"_object_graph_proto\", \"_node_name_cache\"]\n\n def __init__(self, object_graph_proto):\n self._object_graph_proto = object_graph_proto\n self._node_name_cache = None\n\n @property\n def node_names(self):\n \"\"\"Lazily creates a mapping from node id to (\"path\", \"to\", \"root\").\"\"\"\n if self._node_name_cache is not None:\n return self._node_name_cache\n path_to_root = {}\n path_to_root[0] = (\"(root)\",)\n to_visit = collections.deque([0])\n while to_visit:\n node_id = to_visit.popleft()\n obj = self._object_graph_proto.nodes[node_id]\n for child in obj.children:\n if child.node_id not in path_to_root:\n path_to_root[child.node_id] = (\n path_to_root[node_id] + (child.local_name,))\n to_visit.append(child.node_id)\n\n node_names = {}\n for node_id, path_to_root in path_to_root.items():\n node_names[node_id] = \".\".join(path_to_root)\n\n for node_id, node in enumerate(self._object_graph_proto.nodes):\n for slot_reference in node.slot_variables:\n node_names[slot_reference.slot_variable_node_id] = (\n \"{}'s state '{}' for {}\".format(\n node_names[node_id], slot_reference.slot_name,\n node_names[slot_reference.original_variable_node_id]))\n self._node_name_cache = node_names\n return node_names\n\n\nclass _CheckpointRestoreCoordinatorDeleter(object):\n \"\"\"Deleter to avoid overriding _CheckpointRestoreCoordinator.__del__().\"\"\"\n\n __slots__ = [\n \"expect_partial\", \"object_graph_proto\", \"matched_proto_ids\",\n \"unused_attributes\"\n ]\n\n def __init__(self, expect_partial, object_graph_proto, matched_proto_ids,\n unused_attributes):\n self.expect_partial = expect_partial\n self.object_graph_proto = object_graph_proto\n self.matched_proto_ids = matched_proto_ids\n self.unused_attributes = unused_attributes\n\n def set_expect_partial(self, expect_partial):\n self.expect_partial = expect_partial\n\n def __del__(self):\n if self.expect_partial:\n return\n if logging is None:\n # The logging module may have been unloaded when __del__ is called.\n log_fn = print\n else:\n log_fn = logging.warning\n printed_warning = False\n pretty_printer = _ObjectGraphProtoPrettyPrinter(self.object_graph_proto)\n for node_id in range(len(self.object_graph_proto.nodes)):\n if node_id not in self.matched_proto_ids:\n log_fn(\"Unresolved object in checkpoint: {}\"\n .format(pretty_printer.node_names[node_id]))\n printed_warning = True\n for node_id, attribute_name in self.unused_attributes.items():\n log_fn((\"Unused attribute in object {}: {}\"\n .format(pretty_printer.node_names[node_id], attribute_name)))\n printed_warning = True\n if printed_warning:\n log_fn(\n \"A checkpoint was restored (e.g. tf.train.Checkpoint.restore or \"\n \"tf.keras.Model.load_weights) but not all checkpointed values were \"\n \"used. See above for specific issues. Use expect_partial() on the \"\n \"load status object, e.g. \"\n \"tf.train.Checkpoint.restore(...).expect_partial(), to silence these \"\n \"warnings, or use assert_consumed() to make the check explicit. See \"\n \"https://www.tensorflow.org/guide/checkpoint#loading_mechanics\"\n \" for details.\")\n\n\nclass _CheckpointRestoreCoordinator(object):\n \"\"\"Holds the status of an object-based checkpoint load.\"\"\"\n\n def __init__(self, object_graph_proto, save_path, save_path_tensor,\n restore_op_cache, graph_view, options):\n \"\"\"Specify the checkpoint being loaded.\n\n Args:\n object_graph_proto: The TrackableObjectGraph protocol buffer associated\n with this checkpoint.\n save_path: A string, the path to the checkpoint, as returned by\n `tf.train.latest_checkpoint`.\n save_path_tensor: A string `Tensor` which contains or will be fed the save\n path.\n restore_op_cache: A dictionary shared between\n `_CheckpointRestoreCoordinator`s for the same Python objects, used to\n look up restore ops by name to avoid re-creating them across multiple\n `restore()` calls.\n graph_view: A graph_view_lib.ObjectGraphView object for the restored\n objects.\n options: A CheckpointOptions object.\n \"\"\"\n self.options = options\n self.object_graph_proto = object_graph_proto\n self.restore_uid = ops.uid()\n # Maps from proto ids to lists of attributes which were in the checkpoint\n # but not loaded into any object, for error checking.\n self.unused_attributes = {}\n # Dictionary mapping from an id in the protocol buffer flat array to\n # Trackable Python objects. This mapping may be deferred if a\n # checkpoint is restored before all dependencies have been tracked. Uses\n # weak references so that partial restorations don't create reference cycles\n # (as objects with deferred dependencies will generally have references to\n # this object).\n self.object_by_proto_id = weakref.WeakValueDictionary()\n self.matched_proto_ids = set()\n # A set of all Python objects we've seen as dependencies, even if we didn't\n # use them (for example because of inconsistent references when\n # loading). Used to make status assertions fail when loading checkpoints\n # that don't quite match.\n self.all_python_objects = object_identity.ObjectIdentityWeakSet()\n self.save_path_tensor = save_path_tensor\n self.save_path_string = save_path\n reader = py_checkpoint_reader.NewCheckpointReader(save_path)\n self.dtype_map = reader.get_variable_to_dtype_map()\n self.shape_map = reader.get_variable_to_shape_map()\n # A NewCheckpointReader for the most recent checkpoint, for streaming Python\n # state restoration.\n # When graph building, contains a list of ops to run to restore objects from\n # this checkpoint.\n self.restore_ops = []\n self.restore_ops_by_name = restore_op_cache\n self.graph_view = graph_view\n self.new_restore_ops_callback = None\n # A mapping from optimizer proto ids to lists of slot variables to be\n # restored when the optimizer is tracked. Only includes slot variables whose\n # regular variables have already been created, and only for optimizer\n # objects which have not yet been created/tracked.\n self.deferred_slot_restorations = {}\n # A mapping from variable proto ids to lists of slot variables to be\n # restored when the variable is created/tracked. These get shifted over to\n # deferred_slot_restorations if the optimizer hasn't been created when that\n # happens.\n self.slot_restorations = {}\n # Controls whether errors are printed in __del__ if some objects did not\n # match.\n self.expect_partial_attr = False\n for node_index, node in enumerate(self.object_graph_proto.nodes):\n for slot_reference in node.slot_variables:\n # `node` refers to an `Optimizer`, since only these have slot variables.\n self.slot_restorations.setdefault(\n slot_reference.original_variable_node_id, []).append(\n base._SlotVariableRestoration( # pylint: disable=protected-access\n optimizer_id=node_index,\n slot_variable_id=slot_reference.slot_variable_node_id,\n slot_name=slot_reference.slot_name))\n\n self._deleter = _CheckpointRestoreCoordinatorDeleter(\n self.expect_partial_attr,\n self.object_graph_proto,\n self.matched_proto_ids,\n self.unused_attributes)\n\n @property\n def expect_partial(self):\n return self.expect_partial_attr\n\n @expect_partial.setter\n def expect_partial(self, expect_partial):\n self.expect_partial_attr = expect_partial\n self._deleter.set_expect_partial(expect_partial)\n\n def new_restore_ops(self, new_ops):\n self.restore_ops.extend(new_ops)\n if self.new_restore_ops_callback:\n self.new_restore_ops_callback(new_ops) # pylint: disable=not-callable\n\n def restore_saveables(self, tensor_saveables, python_saveables):\n \"\"\"Run or build restore operations for SaveableObjects.\n\n Args:\n tensor_saveables: `SaveableObject`s which correspond to Tensors.\n python_saveables: `PythonStateSaveable`s which correspond to Python\n values.\n\n Returns:\n When graph building, a list of restore operations, either cached or newly\n created, to restore `tensor_saveables`.\n \"\"\"\n restore_ops = []\n # Eagerly run restorations for Python state.\n reader = None\n for saveable in python_saveables:\n if reader is None:\n # Lazily create the NewCheckpointReader, since this requires file access\n # and we may not have any Python saveables.\n reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string)\n spec_names = [spec.name for spec in saveable.specs]\n saveable.python_restore([reader.get_tensor(name) for name in spec_names])\n\n # If we have new SaveableObjects, extract and cache restore ops.\n if tensor_saveables:\n validated_saveables = saveable_object_util.validate_and_slice_inputs(\n tensor_saveables)\n validated_names = set(saveable.name for saveable in validated_saveables)\n if set(tensor_saveables.keys()) != validated_names:\n raise AssertionError(\n (\"Saveable keys changed when validating. Got back %s, was \"\n \"expecting %s\") % (tensor_saveables.keys(), validated_names))\n new_restore_ops = functional_saver.MultiDeviceSaver(\n validated_saveables).restore(self.save_path_tensor, self.options)\n if not context.executing_eagerly():\n for name, restore_op in sorted(new_restore_ops.items()):\n restore_ops.append(restore_op)\n assert name not in self.restore_ops_by_name\n self.restore_ops_by_name[name] = restore_op\n return restore_ops\n\n\nclass _NameBasedRestoreCoordinator(object):\n \"\"\"Keeps the status of a name-based checkpoint restore.\"\"\"\n\n def __init__(self, save_path, dtype_map=None):\n self.save_path = save_path\n self.dtype_map = dtype_map\n # A map from trackable objects to unused attribute names. We don't have\n # proto IDs when doing a name-based restore, so the map keys differ from\n # those in _CheckpointRestoreCoordinator.\n self.unused_attributes = object_identity.ObjectIdentityWeakKeyDictionary()\n self.restore_uid = ops.uid()\n\n def globally_named_object_attributes(self, trackable):\n \"\"\"Create globally named SaveableObjects from attributes.\n\n If an object's attribute has no global name specified (default construction\n for the SaveableObject factory), records the failure in\n `self.unused_attributes` (which can then be used to make status assertions\n fail; see `NameBasedSaverStatus`).\n\n Args:\n trackable: An object to save.\n\n Yields:\n SaveableObjects for `trackable`'s attributes.\n \"\"\"\n for attribute_name, saveable_factory in (\n trackable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access\n if callable(saveable_factory):\n try:\n # This saveable object factory does not have a default name= argument,\n # which means there's no way to save/restore it using a name-based\n # checkpoint. Ignore the error now and make sure assert_consumed()\n # fails.\n saveable = saveable_factory()\n except TypeError:\n # Even if we can't name this object, we should construct it and check\n # whether it's optional to restore it. If it's optional we don't need\n # to make assertions fail.\n if not saveable_factory(\"\").optional_restore:\n self.unused_attributes.setdefault(trackable,\n []).append(attribute_name)\n continue\n else:\n saveable = saveable_factory\n names_to_saveables = saveable_object_util.op_list_to_dict(\n [saveable], convert_variable_to_tensor=False)\n for name, op in names_to_saveables.items():\n for saveable_object in saveable_object_util.saveable_objects_for_op(\n op=op, name=name):\n yield saveable_object\n\n def eager_restore(self, trackable):\n \"\"\"Runs restore ops for `trackable`'s attributes.\"\"\"\n # When graph building, we don't add any restore ops to the graph until\n # run_restore_ops/initialize_or_restore on the status object for name-based\n # checkpoints.\n assert context.executing_eagerly()\n for saveable in self.globally_named_object_attributes(trackable):\n restored_tensors = []\n tensor_missing = False\n for spec in saveable.specs:\n if spec.name in self.dtype_map:\n with ops.device(\"cpu:0\"):\n restored, = io_ops.restore_v2(\n prefix=self.save_path,\n tensor_names=[spec.name],\n shape_and_slices=[\"\"],\n dtypes=[self.dtype_map[spec.name]],\n name=\"%s_checkpoint_read\" % (spec.name,))\n restored_tensors.append(array_ops.identity(restored))\n else:\n tensor_missing = True\n\n if tensor_missing:\n # Record that this variable didn't match so assertions will fail.\n self.unused_attributes.setdefault(trackable, []).append(saveable.name)\n else:\n # Ignores values missing from the checkpoint, as with object-based\n # restore. Status assertions can be used to check exact matches,\n # although it's unlikely to ever happen for name-based checkpoints.\n saveable.restore(\n restored_tensors=restored_tensors, restored_shapes=None)\n\n\n# TODO(allenl): If this ends up in a public API, consider adding LINT.If Change\n# or consolidating the implementation with get_variable.\ndef _default_getter(name,\n shape,\n dtype,\n initializer=None,\n partition_info=None,\n **kwargs):\n \"\"\"A pared-down version of get_variable which does not reuse variables.\"\"\"\n dtype = dtypes.as_dtype(dtype)\n shape_object = tensor_shape.as_shape(shape)\n with ops.init_scope():\n if initializer is None:\n initializer, initializing_from_value = (\n variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access\n name=name,\n shape=shape_object,\n dtype=dtype))\n else:\n initializing_from_value = not callable(initializer)\n # Same logic as get_variable\n variable_dtype = dtype.base_dtype\n if initializing_from_value:\n if shape is not None:\n raise ValueError(\"If initializer is a constant, do not specify shape.\")\n initial_value = initializer\n else:\n # Instantiate initializer if provided initializer is a type object.\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n shape_list = None if shape is None else shape_object.as_list()\n if \"partition_info\" in tf_inspect.getargspec(initializer).args:\n initial_value = functools.partial(initializer,\n shape_list,\n dtype=dtype,\n partition_info=partition_info)\n else:\n initial_value = functools.partial(initializer,\n shape_list,\n dtype=dtype)\n\n return variables.VariableV1(\n initial_value=initial_value,\n name=name,\n dtype=variable_dtype,\n use_resource=True,\n **kwargs)\n\n\ndef add_variable(trackable,\n name,\n shape=None,\n dtype=dtypes.float32,\n initializer=None,\n trainable=True):\n \"\"\"Add a variable to a Trackable with no scope influence.\"\"\"\n return trackable._add_variable_with_custom_getter( # pylint: disable=protected-access\n name=name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n getter=_default_getter,\n trainable=trainable)\n\n\ndef object_metadata(save_path):\n \"\"\"Retrieves information about the objects in a checkpoint.\n\n Example usage:\n\n ```python\n object_graph = tf.contrib.checkpoint.object_metadata(\n tf.train.latest_checkpoint(checkpoint_directory))\n ckpt_variable_names = set()\n for node in object_graph.nodes:\n for attribute in node.attributes:\n ckpt_variable_names.add(attribute.full_name)\n ```\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`.\n\n Returns:\n A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.\n Raises:\n ValueError: If an object graph was not found in the checkpoint.\n \"\"\"\n reader = py_checkpoint_reader.NewCheckpointReader(save_path)\n try:\n object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)\n except errors_impl.NotFoundError:\n raise ValueError(\n ('The specified checkpoint \"%s\" does not appear to be object-based (it '\n 'is missing the key \"%s\"). Likely it was created with a name-based '\n \"saver and does not contain an object dependency graph.\") %\n (save_path, base.OBJECT_GRAPH_PROTO_KEY))\n object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())\n object_graph_proto.ParseFromString(object_graph_string)\n return object_graph_proto\n\n\ndef list_objects(root_trackable):\n \"\"\"Traverse the object graph and list all accessible objects.\n\n Looks for `Trackable` objects which are dependencies of\n `root_trackable`. Includes slot variables only if the variable they are\n slotting for and the optimizer are dependencies of `root_trackable`\n (i.e. if they would be saved with a checkpoint).\n\n Args:\n root_trackable: A `Trackable` object whose dependencies should be flattened.\n\n Returns:\n A flat list of objects.\n \"\"\"\n return graph_view_lib.ObjectGraphView(root_trackable).list_objects()\n\n\ndef gather_initializers(root_trackable):\n \"\"\"Traverse the object graph and find initialization ops.\n\n Looks for `Trackable` objects which are dependencies of\n `root_trackable` and which have an `initializer` property. Includes\n initializers for slot variables only if the variable they are slotting for and\n the optimizer are dependencies of `root_trackable` (i.e. if they would be\n saved with a checkpoint).\n\n Args:\n root_trackable: A `Trackable` object to gather initializers for.\n\n Returns:\n A list of initialization ops.\n \"\"\"\n trackable_objects = list_objects(root_trackable)\n return [\n c.initializer\n for c in trackable_objects\n if hasattr(c, \"initializer\") and c.initializer is not None\n ]\n\n\n@tf_contextlib.contextmanager\ndef capture_dependencies(template):\n \"\"\"Capture variables created within this scope as `Template` dependencies.\n\n Requires that `template.variable_scope` is active.\n\n This scope is intended as a compatibility measure, allowing a trackable\n object to add dependencies on variables created in a block of code which is\n not aware of object-based saving (and instead uses variable names\n heavily). This is how `Template` objects add dependencies on variables and\n sub-`Template`s. Where possible, use `tf.compat.v1.make_template` directly.\n\n Args:\n template: The `Template` object to register dependencies with.\n\n Yields:\n None (when used as a context manager).\n \"\"\"\n name_prefix = template.variable_scope.name\n\n def _trackable_custom_creator(next_creator,\n name,\n initial_value,\n trackable_parent=None,\n **kwargs):\n \"\"\"A variable creation hook which adds Trackable dependencies.\n\n Set for example during a `Template`'s first wrapped function\n execution. Ensures that (a) `template` depends on any trackable\n objects using their own `capture_dependencies` scope inside this scope which\n create variables, and (b) that any variables not in a more deeply nested\n scope are added as dependencies directly.\n\n The `trackable_parent` argument is passed between custom creators but\n ignored when the variable object itself is created. This argument indicates\n (if not `None`) that a more deeply nested scope has already added the\n variable as a dependency, and that parent scopes should add a dependency on\n that object rather than on the variable directly.\n\n Args:\n next_creator: See `variable_scope.variable_creator_scope`; the next\n creator in the chain.\n name: The (full, scope-influenced) name of the variable. The `name_prefix`\n itself is stripped for the purposes of object-based dependency tracking,\n but scopes opened within this scope are respected.\n initial_value: See `variable_scope.variable_creator_scope`. Taken\n explicitly so the argument can be re-named and used with\n `Trackable._add_variable_with_custom_getter`.\n trackable_parent: If not None, a more deeply nested trackable object and\n its name prefix which were passed to `capture_dependencies` to add a\n dependency on (rather than depending on the variable directly).\n **kwargs: Passed through to the next creator.\n\n Returns:\n The output of `next_creator`: the fetched/created variable object.\n \"\"\"\n\n def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):\n inner_kwargs.pop(\"name\") # Ignored; this is the scope-stripped name which\n # we don't want to propagate.\n return next_creator(initial_value=initializer, name=name, **inner_kwargs)\n\n if name is not None and name.startswith(name_prefix):\n scope_stripped_name = name[len(name_prefix) + 1:]\n if not trackable_parent:\n return template._add_variable_with_custom_getter( # pylint: disable=protected-access\n initializer=initial_value,\n name=scope_stripped_name,\n getter=_call_next_creator_renaming_initializer,\n # Disable error checking for Trackable. Exceptions are instead\n # raised if necessary when the object-based saver tries to\n # save/restore the object.\n overwrite=True,\n trackable_parent=(template, name_prefix),\n **kwargs)\n else:\n parent_object, parent_name_prefix = trackable_parent\n template._track_trackable( # pylint: disable=protected-access\n parent_object,\n name=parent_name_prefix[len(name_prefix) + 1:],\n overwrite=True)\n return next_creator(\n name=name,\n initial_value=initial_value,\n trackable_parent=(template, name_prefix),\n **kwargs)\n\n with variable_scope.variable_creator_scope(_trackable_custom_creator):\n yield\n\n\nclass _LoadStatus(object):\n \"\"\"Abstract base for load status callbacks.\"\"\"\n\n @abc.abstractmethod\n def assert_consumed(self):\n \"\"\"Raises an exception unless a non-trivial restoration has completed.\"\"\"\n pass\n\n @abc.abstractmethod\n def assert_existing_objects_matched(self):\n \"\"\"Raises an exception unless existing Python objects have been matched.\"\"\"\n pass\n\n @abc.abstractmethod\n def assert_nontrivial_match(self):\n \"\"\"Raises an exception if only the root object matched.\"\"\"\n pass\n\n @abc.abstractmethod\n def run_restore_ops(self, session=None):\n \"\"\"Runs restore ops from the checkpoint. Requires a valid checkpoint.\"\"\"\n pass\n\n @abc.abstractmethod\n def initialize_or_restore(self, session=None):\n \"\"\"Runs restore ops from the checkpoint, or initializes variables.\"\"\"\n pass\n\n def expect_partial(self):\n \"\"\"Silence warnings about incomplete checkpoint restores.\"\"\"\n return self\n\n\n@tf_export(\"__internal__.tracking.streaming_restore\", v1=[])\ndef streaming_restore(status, session=None):\n \"\"\"When graph building, runs restore ops as soon as they come in.\n\n Args:\n status: A _LoadStatus objects from an object-based saver's restore().\n Streaming restore from name-based checkpoints is not currently supported.\n session: A session to run new restore ops in.\n \"\"\"\n if context.executing_eagerly():\n # Streaming restore is the default/only behavior when executing eagerly.\n return\n if session is None:\n session = get_session()\n if isinstance(status, NameBasedSaverStatus):\n raise NotImplementedError(\n \"Streaming restore not supported from name-based checkpoints when \"\n \"graph building. File a feature request if this limitation bothers \"\n \"you. As a workaround, consider either using tf.train.Checkpoint to \"\n \"load name-based checkpoints or enabling eager execution.\")\n status.run_restore_ops(session=session)\n # pylint: disable=protected-access\n status._checkpoint.new_restore_ops_callback = (\n lambda ops: session.run(ops, feed_dict=status._feed_dict))\n # pylint: enable=protected-access\n\n\ndef _objects_with_attributes(full_list):\n \"\"\"Filters out objects with no direct variable dependencies for assertions.\"\"\"\n return [o for o in full_list if o._gather_saveables_for_checkpoint()] # pylint: disable=protected-access\n\n\nclass CheckpointLoadStatus(_LoadStatus):\n \"\"\"Checks the status of checkpoint loading and manages restore ops.\n\n Returned from `Saver.restore`. Since `restore` may defer the loading of values\n in the checkpoint which don't yet have corresponding Python objects,\n `CheckpointLoadStatus` provides a callback to verify that checkpoint loading\n is complete (`assert_consumed`).\n\n When graph building, `restore` does not run restore ops itself since their\n creation may be deferred. The `run_restore_ops` method must be called once all\n Python objects with values to restore have been created and added to the\n dependency graph (this does not necessarily have to be the whole checkpoint;\n calling `run_restore_ops` while `assert_consumed` fails is supported and will\n partially restore the checkpoint).\n\n See `Saver.restore` for usage examples.\n \"\"\"\n\n def __init__(self, checkpoint, feed_dict, graph_view):\n self._checkpoint = checkpoint\n self._feed_dict = feed_dict\n self._graph_view = graph_view\n # Keep a reference to the root, since graph_view might only have a weakref.\n self._root = graph_view.root\n\n def assert_consumed(self):\n \"\"\"Asserts that all objects in the checkpoint have been created/matched.\n\n Returns:\n `self` for chaining.\n Raises:\n AssertionError: If there are any Python objects in the dependency graph\n which have not been restored from this checkpoint or a later `restore`,\n or if there are any checkpointed values which have not been matched to\n Python objects.\n \"\"\"\n pretty_printer = _ObjectGraphProtoPrettyPrinter(\n self._checkpoint.object_graph_proto)\n self.assert_existing_objects_matched()\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n if not node.attributes:\n # Only raise exceptions for the nodes with attributes themselves. Either\n # they're ultimately not important, or they have a child with an\n # attribute.\n continue\n trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n if trackable is None:\n raise AssertionError(\"Unresolved object in checkpoint {}: {}\"\n .format(pretty_printer.node_names[node_id], node))\n if self._checkpoint.slot_restorations:\n # Sanity check; this collection should be clear if everything has been\n # restored.\n raise AssertionError(\"Unresolved slot restorations: %s\" %\n (self._checkpoint.slot_restorations,))\n if self._checkpoint.unused_attributes:\n unused_attribute_messages = []\n for node_id, attribute in six.iteritems(\n self._checkpoint.unused_attributes):\n obj = self._checkpoint.object_by_proto_id[node_id]\n unused_attribute_messages.append(\n \"{} ({}): {}\"\n .format(pretty_printer.node_names[node_id], obj, attribute))\n raise AssertionError(\n (\"Unused attributes in these objects (the attributes exist in the \"\n \"checkpoint but were not restored):\\n{}\")\n .format(\"\\n\".join(unused_attribute_messages)))\n return self\n\n def assert_existing_objects_matched(self):\n \"\"\"Asserts that trackable Python objects have been matched.\n\n Note that this is a weaker assertion than `assert_consumed`. It will only\n fail for existing Python objects which are (transitive) dependencies of the\n root object and which do not have an entry in the checkpoint.\n\n It will not fail, for example, if a `tf.keras.Layer` object has not yet been\n built and so has not created any `tf.Variable` objects.\n\n Returns:\n `self` for chaining.\n\n Raises:\n AssertionError: If a Python object exists in the transitive dependencies\n of the root object but does not have a value in the checkpoint.\n \"\"\"\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n if (trackable is not None and\n trackable._update_uid < self._checkpoint.restore_uid): # pylint: disable=protected-access\n raise AssertionError(\"Object not assigned a value from checkpoint: %s\" %\n (node,))\n for trackable_object in self._graph_view.list_objects():\n # Remove data structures that do not contain any variables from\n # restoration checks.\n if (isinstance(trackable_object,\n data_structures.TrackableDataStructure) and\n not trackable_object._checkpoint_dependencies):\n continue\n self._checkpoint.all_python_objects.add(trackable_object)\n unused_python_objects = (\n object_identity.ObjectIdentitySet(\n _objects_with_attributes(\n self._checkpoint.all_python_objects)) -\n object_identity.ObjectIdentitySet(\n self._checkpoint.object_by_proto_id.values()))\n if unused_python_objects:\n raise AssertionError(\n (\"Some Python objects were not bound to checkpointed values, likely \"\n \"due to changes in the Python program: %s\") %\n (list(unused_python_objects),))\n return self\n\n def assert_nontrivial_match(self):\n \"\"\"Raises an exception if only the root object matched.\"\"\"\n for trackable_object in self._graph_view.list_objects():\n self._checkpoint.all_python_objects.add(trackable_object)\n if len(self._checkpoint.object_by_proto_id) <= 1:\n unused_python_objects = (\n object_identity.ObjectIdentitySet(\n _objects_with_attributes(self._checkpoint.all_python_objects))\n - object_identity.ObjectIdentitySet(\n self._checkpoint.object_by_proto_id.values()))\n if unused_python_objects:\n raise AssertionError(\n (\"Nothing except the root object matched a checkpointed value. \"\n \"Typically this means that the checkpoint does not match the \"\n \"Python program. The following objects have no matching \"\n \"checkpointed value: %s\") % (list(unused_python_objects),))\n else:\n raise AssertionError(\n \"Nothing to load. No dependencies have been added to %s yet.\" %\n (self._graph_view.root,))\n return self\n\n def run_restore_ops(self, session=None):\n \"\"\"Run operations to restore objects in the dependency graph.\"\"\"\n if context.executing_eagerly():\n return # Run eagerly\n if session is None:\n session = get_session()\n session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)\n\n def initialize_or_restore(self, session=None):\n \"\"\"Run operations to initialize or restore objects in the dependency graph.\n\n Any objects in the dependency graph which have initializers but are not in\n the checkpoint will have those initializers run, unless those variables are\n being restored by a later call to `tf.train.Checkpoint.restore()`.\n\n This method has a sibling in `InitializationOnlyStatus` which instead\n initializes variables. That type is returned if no checkpoint is specified\n in `Saver.restore`.\n\n Args:\n session: The session to run init/restore ops in. If `None`, uses the\n default session.\n \"\"\"\n if context.executing_eagerly():\n return # Initialization and restoration ops are run eagerly\n if session is None:\n session = get_session()\n all_objects = self._graph_view.list_objects()\n already_initialized_objects = object_identity.ObjectIdentitySet(\n self._checkpoint.object_by_proto_id.values())\n initializers_for_non_restored_variables = [\n c.initializer for c in all_objects\n if hasattr(c, \"initializer\")\n and c not in already_initialized_objects\n and (getattr(c, \"_update_uid\", self._checkpoint.restore_uid - 1)\n < self._checkpoint.restore_uid)]\n self.run_restore_ops(session=session)\n session.run(initializers_for_non_restored_variables)\n\n def expect_partial(self):\n \"\"\"Silence warnings about incomplete checkpoint restores.\"\"\"\n self._checkpoint.expect_partial = True\n return self\n\n\nclass InitializationOnlyStatus(_LoadStatus):\n \"\"\"Returned from `Saver.restore` when no checkpoint has been specified.\n\n Objects of this type have the same `assert_consumed` method as\n `CheckpointLoadStatus`, but it always fails. However,\n `initialize_or_restore` works on objects of both types, and will\n initialize variables in `InitializationOnlyStatus` objects or restore them\n otherwise.\n \"\"\"\n\n def __init__(self, graph_view, restore_uid):\n self._restore_uid = restore_uid\n self._graph_view = graph_view\n # Keep a reference to the root, since graph_view might only have a weakref.\n self._root = graph_view.root\n\n def assert_consumed(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\n\n def assert_existing_objects_matched(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\n\n def assert_nontrivial_match(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\n\n def run_restore_ops(self, session=None):\n \"\"\"For consistency with `CheckpointLoadStatus`.\n\n Use `initialize_or_restore` for initializing if no checkpoint was passed\n to `Saver.restore` and restoring otherwise.\n\n Args:\n session: Not used.\n \"\"\"\n raise AssertionError(\n \"No checkpoint specified, so no restore ops are available \"\n \"(save_path=None to Saver.restore).\")\n\n def initialize_or_restore(self, session=None):\n \"\"\"Runs initialization ops for variables.\n\n Objects which would be saved by `Saver.save` will be initialized, unless\n those variables are being restored by a later call to\n `tf.train.Checkpoint.restore()`.\n\n This method does nothing when executing eagerly (initializers get run\n eagerly).\n\n Args:\n session: The session to run initialization ops in. If `None`, uses the\n default session.\n \"\"\"\n if context.executing_eagerly():\n return # run eagerly\n if session is None:\n session = get_session()\n trackable_objects = self._graph_view.list_objects()\n initializers = [\n c.initializer for c in trackable_objects\n if hasattr(c, \"initializer\") and c.initializer is not None\n and (getattr(c, \"_update_uid\", self._restore_uid - 1)\n < self._restore_uid)]\n session.run(initializers)\n\n\n_DEPRECATED_RESTORE_INSTRUCTIONS = (\n \"Restoring a name-based tf.train.Saver checkpoint using the object-based \"\n \"restore API. This mode uses global names to match variables, and so is \"\n \"somewhat fragile. It also adds new restore ops to the graph each time it \"\n \"is called when graph building. Prefer re-encoding training checkpoints in \"\n \"the object-based format: run save() on the object-based saver (the same \"\n \"one this message is coming from) and use that checkpoint in the future.\")\n\n\nclass NameBasedSaverStatus(_LoadStatus):\n \"\"\"Status for loading a name-based training checkpoint.\"\"\"\n\n # Ideally this deprecation decorator would be on the class, but that\n # interferes with isinstance checks.\n @deprecation.deprecated(\n date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)\n def __init__(self, checkpoint, graph_view):\n self._checkpoint = checkpoint\n self._graph_view = graph_view\n self._optionally_restored = []\n # Keep a reference to the root, since graph_view might only have a weakref.\n self._root = graph_view.root\n\n def add_to_optionally_restored(self, var):\n \"\"\"Add a variable to the list of optionally restored variables.\n\n There are situations where certain variables should be ignored in assertions\n such as assert_existing_objects_matched(). One example is that of a\n checkpoint saved with train.Saver(), and restored with train.Checkpoint():\n it is possible for the train.Saver() checkpoint to be missing the internal\n `save_counter` variable, which we want to ignore on restore.\n\n Args:\n var: The variable to treat as optionally restored.\n \"\"\"\n self._optionally_restored.append(var)\n\n def assert_consumed(self):\n \"\"\"Raises an exception if any variables are unmatched.\"\"\"\n unused_attributes = list(self._checkpoint.unused_attributes.items())\n unused_attributes = [\n a for a in unused_attributes\n if all(a[0] is not x for x in self._optionally_restored)\n ]\n if unused_attributes:\n unused_attribute_strings = [\n \"\\n {}: {}\".format(obj, attributes)\n for obj, attributes in unused_attributes\n ]\n raise AssertionError(\n \"Some objects had attributes which were not restored:{}\".format(\n \"\".join(unused_attribute_strings)))\n for trackable in self._graph_view.list_objects():\n # pylint: disable=protected-access\n trackable._maybe_initialize_trackable()\n if trackable._update_uid < self._checkpoint.restore_uid:\n raise AssertionError(\"Object not restored: %s\" % (trackable,))\n # pylint: enable=protected-access\n return self\n\n def assert_existing_objects_matched(self):\n \"\"\"Raises an exception if currently created objects are unmatched.\"\"\"\n # For name-based checkpoints there's no object information in the\n # checkpoint, so there's no distinction between\n # assert_existing_objects_matched and assert_consumed (and both are less\n # useful since we don't touch Python objects or Python state).\n return self.assert_consumed()\n\n def assert_nontrivial_match(self):\n \"\"\"Raises an exception if currently created objects are unmatched.\"\"\"\n # For name-based checkpoints there's no object information in the\n # checkpoint, so there's no distinction between\n # assert_nontrivial_match and assert_consumed (and both are less\n # useful since we don't touch Python objects or Python state).\n return self.assert_consumed()\n\n def _gather_saveable_objects(self):\n \"\"\"Walk the object graph, using global names for SaveableObjects.\"\"\"\n objects = self._graph_view.list_objects()\n saveable_objects = []\n for trackable in objects:\n # pylint: disable=protected-access\n trackable._maybe_initialize_trackable()\n if trackable._update_uid < self._checkpoint.restore_uid:\n trackable._update_uid = self._checkpoint.restore_uid\n else:\n continue\n # pylint: enable=protected-access\n saveable_objects.extend(\n self._checkpoint.globally_named_object_attributes(trackable))\n return saveable_objects\n\n def run_restore_ops(self, session=None):\n \"\"\"Load the name-based checkpoint using a new `tf.compat.v1.train.Saver`.\"\"\"\n if context.executing_eagerly():\n return # Nothing to do, variables are restored on creation.\n if session is None:\n session = get_session()\n with ops.device(\"/cpu:0\"):\n saveables = self._gather_saveable_objects()\n v1_saver_lib.Saver(saveables).restore(\n sess=session, save_path=self._checkpoint.save_path)\n\n def initialize_or_restore(self, session=None):\n \"\"\"Alias for `run_restore_ops`.\"\"\"\n self.run_restore_ops(session=session)\n\n\nclass _SessionWithFeedDictAdditions(session_lib.SessionInterface):\n \"\"\"Pretends to be a session, inserts extra feeds on run().\"\"\"\n\n def __init__(self, session, feed_additions):\n self._wrapped_session = session\n self._feed_additions = feed_additions\n\n def run(self, fetches, feed_dict=None, **kwargs):\n if feed_dict is None:\n feed_dict = {}\n else:\n feed_dict = feed_dict.copy()\n feed_dict.update(self._feed_additions)\n return self._wrapped_session.run(\n fetches=fetches, feed_dict=feed_dict, **kwargs)\n\n\n@tf_export(\"__internal__.tracking.TrackableSaver\", v1=[])\nclass TrackableSaver(object):\n \"\"\"Saves and restores a `Trackable` object and its dependencies.\n\n See `Trackable` for details of dependency management. `Saver` wraps\n `tf.compat.v1.train.Saver` for saving, including extra information about the\n graph of\n dependencies between Python objects. When restoring, it uses this information\n about the save-time dependency graph to more robustly match objects with their\n checkpointed values. When executing eagerly, it supports restoring variables\n on object creation (see `Saver.restore`).\n\n Values in a checkpoint are mapped to `Trackable` Python objects\n (`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the\n checkpoint was written. To avoid breaking existing checkpoints when modifying\n a class, dependency names (the names of attributes to which `Trackable`\n objects are assigned) may not change. These names are local to objects, in\n contrast to the `Variable.name`-based save/restore from\n `tf.compat.v1.train.Saver`, and\n so allow additional program transformations.\n \"\"\"\n\n def __init__(self, graph_view):\n \"\"\"Configure saving.\n\n Args:\n graph_view: A `GraphView` object containing a description of the object\n graph to save.\n \"\"\"\n # The file prefix placeholder is created lazily when graph building (and not\n # at all when executing eagerly) to avoid creating ops in the constructor\n # (when they may never be necessary).\n self._file_prefix_placeholder = None\n\n # Op caching for save\n self._object_graph_feed_tensor = None\n self._last_save_object_graph = None\n self._file_prefix_feed_tensor = None\n self._cached_save_operation = None\n\n # Op caching for restore, shared between _CheckpointRestoreCoordinators\n self._restore_op_cache = {}\n self._graph_view = graph_view\n\n def _gather_saveables(self, object_graph_tensor=None):\n \"\"\"Wraps _serialize_object_graph to include the object graph proto.\"\"\"\n (named_saveable_objects, graph_proto,\n feed_additions) = self._graph_view.serialize_object_graph()\n if object_graph_tensor is None:\n with ops.device(\"/cpu:0\"):\n object_graph_tensor = constant_op.constant(\n graph_proto.SerializeToString(), dtype=dtypes.string)\n else:\n feed_additions.update(\n {object_graph_tensor: graph_proto.SerializeToString()})\n assert base.OBJECT_GRAPH_PROTO_KEY not in named_saveable_objects\n named_saveable_objects.append(\n base.NoRestoreSaveable(\n tensor=object_graph_tensor, name=base.OBJECT_GRAPH_PROTO_KEY))\n return named_saveable_objects, graph_proto, feed_additions\n\n def _save_cached_when_graph_building(self,\n file_prefix,\n object_graph_tensor,\n options):\n \"\"\"Create or retrieve save ops.\n\n Args:\n file_prefix: The prefix for saved checkpoint files.\n object_graph_tensor: A `Tensor` to which the current object graph will be\n fed.\n options: `CheckpointOptions` object.\n\n Returns:\n A two-element tuple with a filename tensor and a feed_dict of tensors to\n feed when running it (if graph building). The feed dict contains the\n current object graph and any Python state to be saved in the\n checkpoint. When executing eagerly only the first argument is meaningful.\n \"\"\"\n (named_saveable_objects, graph_proto,\n feed_additions) = self._gather_saveables(\n object_graph_tensor=object_graph_tensor)\n if (self._last_save_object_graph != graph_proto\n # When executing eagerly, we need to re-create SaveableObjects each time\n # save() is called so they pick up new Tensors passed to their\n # constructors. That means the Saver needs to be copied with a new\n # var_list.\n or context.executing_eagerly() or ops.inside_function()):\n saver = functional_saver.MultiDeviceSaver(named_saveable_objects)\n save_op = saver.save(file_prefix, options=options)\n with ops.device(\"/cpu:0\"):\n with ops.control_dependencies([save_op]):\n self._cached_save_operation = array_ops.identity(file_prefix)\n self._last_save_object_graph = graph_proto\n return self._cached_save_operation, feed_additions\n\n def save(self, file_prefix, checkpoint_number=None, session=None,\n options=None):\n \"\"\"Save a training checkpoint.\n\n The saved checkpoint includes variables created by this object and any\n Trackable objects it depends on at the time `Saver.save()` is called.\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `checkpoint_number`, if provided.\n checkpoint_number: An integer variable or Tensor, used to number\n checkpoints. Typically this value is saved along with other variables in\n training checkpoints, which will happen automatically if it was created\n by `root_trackable` or one of its dependencies (via\n `Trackable._add_variable`).\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n options: Optional `tf.train.CheckpointOptions` object.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n options = options or checkpoint_options.CheckpointOptions()\n feed_dict = {}\n use_session = (not context.executing_eagerly() and\n not ops.inside_function())\n if checkpoint_number:\n file_prefix = \"%s-%d\" % (file_prefix, checkpoint_number)\n if use_session:\n if self._object_graph_feed_tensor is None:\n with ops.device(\"/cpu:0\"):\n self._object_graph_feed_tensor = constant_op.constant(\n \"\", dtype=dtypes.string)\n self._file_prefix_feed_tensor = constant_op.constant(\n \"\", dtype=dtypes.string)\n object_graph_tensor = self._object_graph_feed_tensor\n file_prefix_tensor = self._file_prefix_feed_tensor\n feed_dict[file_prefix_tensor] = file_prefix\n else:\n with ops.device(\"/cpu:0\"):\n file_prefix_tensor = constant_op.constant(\n file_prefix, dtype=dtypes.string)\n object_graph_tensor = None\n\n file_io.recursive_create_dir(os.path.dirname(file_prefix))\n save_path, new_feed_additions = self._save_cached_when_graph_building(\n file_prefix_tensor, object_graph_tensor, options)\n if new_feed_additions:\n feed_dict.update(new_feed_additions)\n if not use_session:\n session = None\n elif session is None:\n session = get_session()\n\n if session:\n return session.run(save_path, feed_dict=feed_dict)\n else:\n return save_path\n\n def restore(self, save_path, options=None):\n \"\"\"Restore a training checkpoint.\n\n Restores `root_trackable` and any objects that it tracks\n (transitive). Either assigns values immediately if variables to restore have\n been created already, or defers restoration until the variables are\n created. Dependencies added to the `root_trackable` passed to the\n constructor after this call will be matched if they have a corresponding\n object in the checkpoint.\n\n When building a graph, restorations are added to the graph but not run.\n\n To disallow deferred loading, assert immediately that all checkpointed\n variables have been matched to variable objects:\n\n ```python\n saver = Saver(root)\n saver.restore(path).assert_consumed()\n ```\n\n An exception will be raised unless every object was matched and its\n variables already exist.\n\n When graph building, `assert_consumed()` indicates that all of the restore\n ops which will be created for this checkpoint have been created. They can be\n run via the `run_restore_ops()` function of the status object:\n\n ```python\n saver.restore(path).assert_consumed().run_restore_ops()\n ```\n\n If the checkpoint has not been consumed completely, then the list of restore\n ops will grow as more objects are added to the dependency graph.\n\n Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this\n method. There is no deferred loading, and names are used to match\n variables. No restore ops are created/run until `run_restore_ops()` or\n `initialize_or_restore()` are called on the returned status object, even\n when executing eagerly. Re-encode name-based checkpoints using this\n object-based `Saver.save` as soon as possible.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If None (as when there is no latest\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\n object which may run initializers for objects in the dependency graph.\n If the checkpoint was written by the name-based\n `tf.compat.v1.train.Saver`, names are used to match variables.\n options: Optional `tf.train.CheckpointOptions` object.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of checkpoint restoration and run initialization/restore ops\n (of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if\n `save_path` is `None`).\n\n If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`\n object is returned which runs restore ops from a name-based saver.\n \"\"\"\n options = options or checkpoint_options.CheckpointOptions()\n if save_path is None:\n return InitializationOnlyStatus(self._graph_view, ops.uid())\n reader = py_checkpoint_reader.NewCheckpointReader(save_path)\n graph_building = not context.executing_eagerly()\n if graph_building:\n dtype_map = None\n else:\n dtype_map = reader.get_variable_to_dtype_map()\n try:\n object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)\n except errors_impl.NotFoundError:\n # The object graph proto does not exist in this checkpoint. Try the\n # name-based compatibility mode.\n restore_coordinator = _NameBasedRestoreCoordinator(\n save_path=save_path,\n dtype_map=dtype_map)\n if not graph_building:\n for existing_trackable in self._graph_view.list_objects():\n # pylint: disable=protected-access\n existing_trackable._maybe_initialize_trackable()\n existing_trackable._name_based_restores.add(restore_coordinator)\n existing_trackable._name_based_attribute_restore(restore_coordinator)\n # pylint: enable=protected-access\n return NameBasedSaverStatus(\n restore_coordinator,\n graph_view=self._graph_view)\n\n if graph_building:\n if self._file_prefix_placeholder is None:\n with ops.device(\"/cpu:0\"):\n self._file_prefix_placeholder = constant_op.constant(\"model\")\n file_prefix_tensor = self._file_prefix_placeholder\n file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}\n else:\n with ops.device(\"/cpu:0\"):\n file_prefix_tensor = constant_op.constant(save_path)\n file_prefix_feed_dict = None\n object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())\n object_graph_proto.ParseFromString(object_graph_string)\n checkpoint = _CheckpointRestoreCoordinator(\n object_graph_proto=object_graph_proto,\n save_path=save_path,\n save_path_tensor=file_prefix_tensor,\n restore_op_cache=self._restore_op_cache,\n graph_view=self._graph_view,\n options=options)\n base.CheckpointPosition(\n checkpoint=checkpoint, proto_id=0).restore(self._graph_view.root)\n\n # Attached dependencies are not attached to the root, so should be restored\n # separately.\n if self._graph_view.attached_dependencies:\n for ref in self._graph_view.attached_dependencies:\n if ref.name == \"root\":\n # Root dependency is automatically added to attached dependencies --\n # this can be ignored since it maps back to the root object.\n continue\n proto_id = None\n # Find proto ID of attached dependency (if it is in the proto).\n for proto_ref in object_graph_proto.nodes[0].children:\n if proto_ref.local_name == ref.name:\n proto_id = proto_ref.node_id\n break\n\n if proto_id in checkpoint.object_by_proto_id:\n # Object has already been restored. This can happen when there's an\n # indirect connection from the attached object to the root.\n continue\n\n base.CheckpointPosition(\n checkpoint=checkpoint, proto_id=proto_id).restore(ref.ref)\n\n load_status = CheckpointLoadStatus(\n checkpoint,\n graph_view=self._graph_view,\n feed_dict=file_prefix_feed_dict)\n return load_status\n\n\ndef frozen_saver(root_trackable):\n \"\"\"Creates a static `tf.compat.v1.train.Saver` from a trackable object.\n\n The returned `Saver` saves object-based checkpoints, but these checkpoints\n will no longer reflect structural changes to the object graph, only changes to\n the values of `Variable`s added as dependencies of the root object before\n `freeze` was called.\n\n `restore` works on the returned `Saver`, but requires that the object graph of\n the checkpoint being loaded exactly matches the object graph when `freeze` was\n called. This is in contrast the object-based restore performed by\n `tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's\n object graph and the current Python object graph.\n\n Args:\n root_trackable: A trackable object to save.\n\n Returns:\n A saver which saves object-based checkpoints for the object graph frozen at\n the time `frozen_saver` was called.\n \"\"\"\n named_saveable_objects = graph_view_lib.ObjectGraphView(\n root_trackable).frozen_saveable_objects()\n return functional_saver.MultiDeviceSaver(named_saveable_objects)\n\n\ndef saver_with_op_caching(obj, attached_dependencies=None):\n if context.executing_eagerly():\n saveables_cache = None\n else:\n saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()\n return TrackableSaver(\n graph_view_lib.ObjectGraphView(\n weakref.ref(obj), saveables_cache=saveables_cache,\n attached_dependencies=attached_dependencies))\n\n\ndef _assert_trackable(obj):\n if not isinstance(\n obj, (base.Trackable, def_function.Function)):\n raise ValueError(\n \"`Checkpoint` was expecting a trackable object (an object \"\n \"derived from `TrackableBase`), got {}. If you believe this \"\n \"object should be trackable (i.e. it is part of the \"\n \"TensorFlow Python API and manages state), please open an issue.\"\n .format(obj))\n\n\n# Mentions graph building / Sessions. The v2 version is below.\n@tf_export(v1=[\"train.Checkpoint\"])\nclass CheckpointV1(tracking.AutoTrackable):\n \"\"\"Groups trackable objects, saving and restoring them.\n\n `Checkpoint`'s constructor accepts keyword arguments whose values are types\n that contain trackable state, such as `tf.compat.v1.train.Optimizer`\n implementations, `tf.Variable`, `tf.keras.Layer` implementations, or\n `tf.keras.Model` implementations. It saves these values with a checkpoint, and\n maintains a `save_counter` for numbering checkpoints.\n\n Example usage when graph building:\n\n ```python\n import tensorflow as tf\n import os\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n train_op = optimizer.minimize( ... )\n status.assert_consumed() # Optional sanity checks.\n with tf.compat.v1.Session() as session:\n # Use the Session to restore variables, or initialize them if\n # tf.train.latest_checkpoint returned None.\n status.initialize_or_restore(session)\n for _ in range(num_training_steps):\n session.run(train_op)\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n Example usage with eager execution enabled:\n\n ```python\n import tensorflow as tf\n import os\n\n tf.compat.v1.enable_eager_execution()\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n for _ in range(num_training_steps):\n optimizer.minimize( ... ) # Variables will be restored on creation.\n status.assert_consumed() # Optional sanity checks.\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n `Checkpoint.save` and `Checkpoint.restore` write and read object-based\n checkpoints, in contrast to `tf.compat.v1.train.Saver` which writes and reads\n `variable.name` based checkpoints. Object-based checkpointing saves a graph of\n dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,\n etc.) with named edges, and this graph is used to match variables when\n restoring a checkpoint. It can be more robust to changes in the Python\n program, and helps to support restore-on-create for variables when executing\n eagerly. Prefer `tf.train.Checkpoint` over `tf.compat.v1.train.Saver` for new\n code.\n\n `Checkpoint` objects have dependencies on the objects passed as keyword\n arguments to their constructors, and each dependency is given a name that is\n identical to the name of the keyword argument for which it was created.\n TensorFlow classes like `Layer`s and `Optimizer`s will automatically add\n dependencies on their variables (e.g. \"kernel\" and \"bias\" for\n `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing\n dependencies easy in user-defined classes, since `Model` hooks into attribute\n assignment. For example:\n\n ```python\n class Regress(tf.keras.Model):\n\n def __init__(self):\n super(Regress, self).__init__()\n self.input_transform = tf.keras.layers.Dense(10)\n # ...\n\n def call(self, inputs):\n x = self.input_transform(inputs)\n # ...\n ```\n\n This `Model` has a dependency named \"input_transform\" on its `Dense` layer,\n which in turn depends on its variables. As a result, saving an instance of\n `Regress` using `tf.train.Checkpoint` will also save all the variables created\n by the `Dense` layer.\n\n When variables are assigned to multiple workers, each worker writes its own\n section of the checkpoint. These sections are then merged/re-indexed to behave\n as a single checkpoint. This avoids copying all variables to one worker, but\n does require that all workers see a common filesystem.\n\n While `tf.keras.Model.save_weights` and `tf.train.Checkpoint.save` save in the\n same format, note that the root of the resulting checkpoint is the object the\n save method is attached to. This means saving a `tf.keras.Model` using\n `save_weights` and loading into a `tf.train.Checkpoint` with a `Model`\n attached (or vice versa) will not match the `Model`'s variables. See the\n [guide to training\n checkpoints](https://www.tensorflow.org/guide/checkpoint) for\n details. Prefer `tf.train.Checkpoint` over `tf.keras.Model.save_weights` for\n training checkpoints.\n\n Attributes:\n save_counter: Incremented when `save()` is called. Used to number\n checkpoints.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Group objects into a training checkpoint.\n\n Args:\n **kwargs: Keyword arguments are set as attributes of this object, and are\n saved with the checkpoint. Values must be trackable objects.\n\n Raises:\n ValueError: If objects in `kwargs` are not trackable.\n \"\"\"\n super(CheckpointV1, self).__init__()\n for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\n setattr(self, k, v)\n if not isinstance(\n getattr(self, k), (base.Trackable, def_function.Function)):\n raise ValueError(\n (\"`Checkpoint` was expecting a trackable object (an object \"\n \"derived from `TrackableBase`), got %s. If you believe this \"\n \"object should be trackable (i.e. it is part of the \"\n \"TensorFlow Python API and manages state), please open an issue.\")\n % (v,))\n self._save_counter = None # Created lazily for restore-on-create.\n self._save_assign_op = None\n self._saver = saver_with_op_caching(self)\n\n def _maybe_create_save_counter(self):\n \"\"\"Create a save counter if it does not yet exist.\"\"\"\n if self._save_counter is None:\n # Initialized to 0 and incremented before saving.\n with ops.device(\"/cpu:0\"):\n # add_variable creates a dependency named \"save_counter\"; NoDependency\n # prevents creating a second dependency named \"_save_counter\".\n self._save_counter = data_structures.NoDependency(\n add_variable(\n self,\n name=\"save_counter\",\n initializer=0,\n dtype=dtypes.int64,\n trainable=False))\n\n def write(self, file_prefix, session=None):\n \"\"\"Writes a training checkpoint.\n\n The checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.write()` is\n called.\n\n `write` does not number checkpoints, increment `save_counter`, or update the\n metadata used by `tf.train.latest_checkpoint`. It is primarily intended for\n use by higher level checkpoint management utilities. `save` provides a very\n basic implementation of these features.\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix).\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n\n Returns:\n The full path to the checkpoint (i.e. `file_prefix`).\n \"\"\"\n start_time = time.time()\n output = self._saver.save(file_prefix=file_prefix, session=session)\n end_time = time.time()\n _checkpoint_write_durations.get_cell(\"V1\").add(\n _get_duration_microseconds(start_time, end_time))\n\n global _END_TIME_OF_LAST_WRITE\n with _END_TIME_OF_LAST_WRITE_LOCK:\n _checkpoint_training_time_saved.get_cell(\"V1\").increase_by(\n _get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n _END_TIME_OF_LAST_WRITE = end_time\n\n if tensor_util.is_tf_type(output):\n if context.executing_eagerly():\n return compat.as_str(output.numpy())\n else:\n # Function building\n return output\n else:\n # Graph + Session, so we already session.ran it.\n return compat.as_str(output)\n\n @property\n def save_counter(self):\n \"\"\"An integer variable which starts at zero and is incremented on save.\n\n Used to number checkpoints.\n\n Returns:\n The save counter variable.\n \"\"\"\n self._maybe_create_save_counter()\n return self._save_counter\n\n def save(self, file_prefix, session=None):\n \"\"\"Saves a training checkpoint and provides basic checkpoint management.\n\n The saved checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.save()` is\n called.\n\n `save` is a basic convenience wrapper around the `write` method,\n sequentially numbering checkpoints using `save_counter` and updating the\n metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint\n management, for example garbage collection and custom numbering, may be\n provided by other utilities which also wrap `write`\n (`tf.train.CheckpointManager` for example).\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `Checkpoint.save_counter`.\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n graph_building = not context.executing_eagerly()\n if graph_building:\n if ops.inside_function():\n raise NotImplementedError(\n \"Calling tf.train.Checkpoint.save() from a function is not \"\n \"supported, as save() modifies saving metadata in ways not \"\n \"supported by TensorFlow Operations. Consider using \"\n \"tf.train.Checkpoint.write(), a lower-level API which does not \"\n \"update metadata. tf.train.latest_checkpoint and related APIs will \"\n \"not see this checkpoint.\")\n if session is None:\n session = get_session()\n if self._save_counter is None:\n # When graph building, if this is a new save counter variable then it\n # needs to be initialized before assign_add. This is only an issue if\n # restore() has not been called first.\n session.run(self.save_counter.initializer)\n if not graph_building or self._save_assign_op is None:\n with ops.colocate_with(self.save_counter):\n assign_op = self.save_counter.assign_add(1, read_value=True)\n if graph_building:\n self._save_assign_op = data_structures.NoDependency(assign_op)\n if graph_building:\n checkpoint_number = session.run(self._save_assign_op)\n else:\n checkpoint_number = assign_op.numpy()\n file_path = self.write(\n \"%s-%d\" % (file_prefix, checkpoint_number), session=session)\n checkpoint_management.update_checkpoint_state_internal(\n save_dir=os.path.dirname(file_prefix),\n model_checkpoint_path=file_path,\n all_model_checkpoint_paths=[file_path],\n save_relative_paths=True)\n return file_path\n\n def restore(self, save_path):\n \"\"\"Restore a training checkpoint.\n\n Restores this `Checkpoint` and any objects it depends on.\n\n When executing eagerly, either assigns values immediately if variables to\n restore have been created already, or defers restoration until the variables\n are created. Dependencies added after this call will be matched if they have\n a corresponding object in the checkpoint (the restore request will queue in\n any trackable object waiting for the expected dependency to be added).\n\n When graph building, restoration ops are added to the graph but not run\n immediately.\n\n To ensure that loading is complete and no more assignments will take place,\n use the `assert_consumed()` method of the status object returned by\n `restore`:\n\n ```python\n checkpoint = tf.train.Checkpoint( ... )\n checkpoint.restore(path).assert_consumed()\n ```\n\n An exception will be raised if any Python objects in the dependency graph\n were not found in the checkpoint, or if any checkpointed values do not have\n a matching Python object.\n\n When graph building, `assert_consumed()` indicates that all of the restore\n ops that will be created for this checkpoint have been created. They can be\n run via the `run_restore_ops()` method of the status object:\n\n ```python\n checkpoint.restore(path).assert_consumed().run_restore_ops()\n ```\n\n If the checkpoint has not been consumed completely, then the list of restore\n ops will grow as more objects are added to the dependency graph.\n\n Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this\n method. Names are used to match variables. No restore ops are created/run\n until `run_restore_ops()` or `initialize_or_restore()` are called on the\n returned status object when graph building, but there is restore-on-creation\n when executing eagerly. Re-encode name-based checkpoints using\n `tf.train.Checkpoint.save` as soon as possible.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If None (as when there is no latest\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\n object which may run initializers for objects in the dependency graph.\n If the checkpoint was written by the name-based\n `tf.compat.v1.train.Saver`, names are used to match variables.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of a checkpoint restoration and run initialization/restore ops.\n\n The returned status object has the following methods:\n\n * `assert_consumed()`:\n Raises an exception if any variables are unmatched: either\n checkpointed values which don't have a matching Python object or\n Python objects in the dependency graph with no values in the\n checkpoint. This method returns the status object, and so may be\n chained with `initialize_or_restore` or `run_restore_ops`.\n\n * `assert_existing_objects_matched()`:\n Raises an exception if any existing Python objects in the dependency\n graph are unmatched. Unlike `assert_consumed`, this assertion will\n pass if values in the checkpoint have no corresponding Python\n objects. For example a `tf.keras.Layer` object which has not yet been\n built, and so has not created any variables, will pass this assertion\n but fail `assert_consumed`. Useful when loading part of a larger\n checkpoint into a new Python program, e.g. a training checkpoint with\n a `tf.compat.v1.train.Optimizer` was saved but only the state required\n for\n inference is being loaded. This method returns the status object, and\n so may be chained with `initialize_or_restore` or `run_restore_ops`.\n\n * `assert_nontrivial_match()`: Asserts that something aside from the root\n object was matched. This is a very weak assertion, but is useful for\n sanity checking in library code where objects may exist in the\n checkpoint which haven't been created in Python and some Python\n objects may not have a checkpointed value.\n\n * `expect_partial()`: Silence warnings about incomplete checkpoint\n restores. Warnings are otherwise printed for unused parts of the\n checkpoint file or object when the `Checkpoint` object is deleted\n (often at program shutdown).\n\n * `initialize_or_restore(session=None)`:\n When graph building, runs variable initializers if `save_path` is\n `None`, but otherwise runs restore operations. If no `session` is\n explicitly specified, the default session is used. No effect when\n executing eagerly (variables are initialized or restored eagerly).\n\n * `run_restore_ops(session=None)`:\n When graph building, runs restore operations. If no `session` is\n explicitly specified, the default session is used. No effect when\n executing eagerly (restore operations are run eagerly). May only be\n called when `save_path` is not `None`.\n \"\"\"\n start_time = time.time()\n status = self._saver.restore(save_path=save_path)\n # Create the save counter now so it gets initialized with other variables\n # when graph building. Creating it earlier would lead to errors when using,\n # say, train.Saver() to save the model before initializing it.\n self._maybe_create_save_counter()\n if isinstance(status, NameBasedSaverStatus):\n status.add_to_optionally_restored(self.save_counter)\n _checkpoint_read_durations.get_cell(\"V1\").add(\n _get_duration_microseconds(start_time, time.time()))\n return status\n\n\n@tf_export(\"train.Checkpoint\", v1=[])\nclass Checkpoint(tracking.AutoTrackable):\n \"\"\"Manages saving/restoring trackable values to disk.\n\n TensorFlow objects may contain trackable state, such as `tf.Variable`s,\n `tf.keras.optimizers.Optimizer` implementations, `tf.data.Dataset` iterators,\n `tf.keras.Layer` implementations, or `tf.keras.Model` implementations.\n These are called **trackable objects**.\n\n A `Checkpoint` object can be constructed to save either a single or group of\n trackable objects to a checkpoint file. It maintains a `save_counter` for\n numbering checkpoints.\n\n Example:\n\n ```python\n model = tf.keras.Model(...)\n checkpoint = tf.train.Checkpoint(model)\n\n # Save a checkpoint to /tmp/training_checkpoints-{save_counter}. Every time\n # checkpoint.save is called, the save counter is increased.\n save_path = checkpoint.save('/tmp/training_checkpoints')\n\n # Restore the checkpointed values to the `model` object.\n checkpoint.restore(save_path)\n ```\n\n Example 2:\n\n ```python\n import tensorflow as tf\n import os\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n # Create a Checkpoint that will manage two objects with trackable state,\n # one we name \"optimizer\" and the other we name \"model\".\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n for _ in range(num_training_steps):\n optimizer.minimize( ... ) # Variables will be restored on creation.\n status.assert_consumed() # Optional sanity checks.\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n `Checkpoint.save()` and `Checkpoint.restore()` write and read object-based\n checkpoints, in contrast to TensorFlow 1.x's `tf.compat.v1.train.Saver` which\n writes and\n reads `variable.name` based checkpoints. Object-based checkpointing saves a\n graph of dependencies between Python objects (`Layer`s, `Optimizer`s,\n `Variable`s, etc.) with named edges, and this graph is used to match variables\n when restoring a checkpoint. It can be more robust to changes in the Python\n program, and helps to support restore-on-create for variables.\n\n `Checkpoint` objects have dependencies on the objects passed as keyword\n arguments to their constructors, and each dependency is given a name that is\n identical to the name of the keyword argument for which it was created.\n TensorFlow classes like `Layer`s and `Optimizer`s will automatically add\n dependencies on their own variables (e.g. \"kernel\" and \"bias\" for\n `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing\n dependencies easy in user-defined classes, since `Model` hooks into attribute\n assignment. For example:\n\n ```python\n class Regress(tf.keras.Model):\n\n def __init__(self):\n super(Regress, self).__init__()\n self.input_transform = tf.keras.layers.Dense(10)\n # ...\n\n def call(self, inputs):\n x = self.input_transform(inputs)\n # ...\n ```\n\n This `Model` has a dependency named \"input_transform\" on its `Dense` layer,\n which in turn depends on its variables. As a result, saving an instance of\n `Regress` using `tf.train.Checkpoint` will also save all the variables created\n by the `Dense` layer.\n\n When variables are assigned to multiple workers, each worker writes its own\n section of the checkpoint. These sections are then merged/re-indexed to behave\n as a single checkpoint. This avoids copying all variables to one worker, but\n does require that all workers see a common filesystem.\n\n This function differs slightly from the Keras Model `save_weights` function.\n `tf.keras.Model.save_weights` creates a checkpoint file with the name\n specified in `filepath`, while `tf.train.Checkpoint` numbers the checkpoints,\n using `filepath` as the prefix for the checkpoint file names. Aside from this,\n `model.save_weights()` and `tf.train.Checkpoint(model).save()` are equivalent.\n\n See the [guide to training\n checkpoints](https://www.tensorflow.org/guide/checkpoint) for\n details.\n\n Attributes:\n save_counter: Incremented when `save()` is called. Used to number\n checkpoints.\n \"\"\"\n\n def __init__(self, root=None, **kwargs):\n \"\"\"Creates a training checkpoint for a single or group of objects.\n\n Args:\n root: The root object to checkpoint.\n **kwargs: Keyword arguments are set as attributes of this object, and are\n saved with the checkpoint. Values must be trackable objects.\n\n Raises:\n ValueError: If `root` or the objects in `kwargs` are not trackable. A\n `ValueError` is also raised if the `root` object tracks different\n objects from the ones listed in attributes in kwargs (e.g.\n `root.child = A` and `tf.train.Checkpoint(root, child=B)` are\n incompatible).\n\n \"\"\"\n super(Checkpoint, self).__init__()\n\n saver_root = self\n attached_dependencies = None\n self._save_counter = None # Created lazily for restore-on-create.\n self._save_assign_op = None\n\n if root:\n _assert_trackable(root)\n saver_root = root\n attached_dependencies = []\n\n # All keyword arguments (including root itself) are set as children\n # of root.\n kwargs[\"root\"] = root\n root._maybe_initialize_trackable()\n\n self._save_counter = data_structures.NoDependency(\n root._lookup_dependency(\"save_counter\"))\n self._root = data_structures.NoDependency(root)\n\n for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\n setattr(self, k, v)\n\n # Call getattr instead of directly using v because setattr converts\n # v to a Trackable data structure when v is a list/dict/tuple.\n converted_v = getattr(self, k)\n _assert_trackable(converted_v)\n\n if root:\n # Make sure that root doesn't already have dependencies with these names\n child = root._lookup_dependency(k)\n if child is None:\n attached_dependencies.append(base.TrackableReference(k, converted_v))\n elif child != converted_v:\n raise ValueError(\n \"Cannot create a Checkpoint with keyword argument {name} if \"\n \"root.{name} already exists.\".format(name=k))\n\n self._saver = saver_with_op_caching(saver_root, attached_dependencies)\n self._attached_dependencies = data_structures.NoDependency(\n attached_dependencies)\n\n def _maybe_create_save_counter(self):\n \"\"\"Create a save counter if it does not yet exist.\"\"\"\n if self._save_counter is None:\n # Initialized to 0 and incremented before saving.\n with ops.device(\"/cpu:0\"):\n # add_variable creates a dependency named \"save_counter\"; NoDependency\n # prevents creating a second dependency named \"_save_counter\".\n self._save_counter = data_structures.NoDependency(\n add_variable(\n self,\n name=\"save_counter\",\n initializer=0,\n dtype=dtypes.int64,\n trainable=False))\n if self._attached_dependencies is not None:\n self._attached_dependencies.append(\n base.TrackableReference(\"save_counter\", self._save_counter))\n # When loading a checkpoint, the save counter is created after\n # the checkpoint has been loaded, so it must be handled in a deferred\n # manner.\n restore = self.root._deferred_dependencies.pop(\"save_counter\", ()) # pylint: disable=protected-access\n if restore:\n restore[0].restore(self._save_counter)\n\n def write(self, file_prefix, options=None):\n \"\"\"Writes a training checkpoint.\n\n The checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.write()` is\n called.\n\n `write` does not number checkpoints, increment `save_counter`, or update the\n metadata used by `tf.train.latest_checkpoint`. It is primarily intended for\n use by higher level checkpoint management utilities. `save` provides a very\n basic implementation of these features.\n\n Checkpoints written with `write` must be read with `read`.\n\n Example usage:\n\n ```\n step = tf.Variable(0, name=\"step\")\n checkpoint = tf.Checkpoint(step=step)\n checkpoint.write(\"/tmp/ckpt\")\n\n # Later, read the checkpoint with read()\n checkpoint.read(\"/tmp/ckpt\").assert_consumed()\n\n # You can also pass options to write() and read(). For example this\n # runs the IO ops on the localhost:\n options = tf.CheckpointOptions(experimental_io_device=\"/job:localhost\")\n checkpoint.write(\"/tmp/ckpt\", options=options)\n\n # Later, read the checkpoint with read()\n checkpoint.read(\"/tmp/ckpt\", options=options).assert_consumed()\n ```\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix).\n options: Optional `tf.train.CheckpointOptions` object.\n\n Returns:\n The full path to the checkpoint (i.e. `file_prefix`).\n \"\"\"\n start_time = time.time()\n options = options or checkpoint_options.CheckpointOptions()\n output = self._saver.save(file_prefix=file_prefix, options=options)\n end_time = time.time()\n _checkpoint_write_durations.get_cell(\"V2\").add(\n _get_duration_microseconds(start_time, end_time))\n\n global _END_TIME_OF_LAST_WRITE\n with _END_TIME_OF_LAST_WRITE_LOCK:\n _checkpoint_training_time_saved.get_cell(\"V2\").increase_by(\n _get_duration_microseconds(_END_TIME_OF_LAST_WRITE, end_time))\n _END_TIME_OF_LAST_WRITE = end_time\n\n if tensor_util.is_tf_type(output):\n if context.executing_eagerly():\n return compat.as_str(output.numpy())\n else:\n # Function building\n return output\n else:\n # Graph + Session, so we already session.ran it.\n return compat.as_str(output)\n\n @property\n def save_counter(self):\n \"\"\"An integer variable which starts at zero and is incremented on save.\n\n Used to number checkpoints.\n\n Returns:\n The save counter variable.\n \"\"\"\n self._maybe_create_save_counter()\n return self._save_counter\n\n def save(self, file_prefix, options=None):\n \"\"\"Saves a training checkpoint and provides basic checkpoint management.\n\n The saved checkpoint includes variables created by this object and any\n trackable objects it depends on at the time `Checkpoint.save()` is\n called.\n\n `save` is a basic convenience wrapper around the `write` method,\n sequentially numbering checkpoints using `save_counter` and updating the\n metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint\n management, for example garbage collection and custom numbering, may be\n provided by other utilities which also wrap `write` and `read`.\n (`tf.train.CheckpointManager` for example).\n\n ```\n step = tf.Variable(0, name=\"step\")\n checkpoint = tf.Checkpoint(step=step)\n checkpoint.save(\"/tmp/ckpt\")\n\n # Later, read the checkpoint with restore()\n checkpoint.restore(\"/tmp/ckpt\").assert_consumed()\n\n # You can also pass options to save() and restore(). For example this\n # runs the IO ops on the localhost:\n options = tf.CheckpointOptions(experimental_io_device=\"/job:localhost\")\n checkpoint.save(\"/tmp/ckpt\", options=options)\n\n # Later, read the checkpoint with restore()\n checkpoint.restore(\"/tmp/ckpt\", options=options).assert_consumed()\n ```\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `Checkpoint.save_counter`.\n options: Optional `tf.train.CheckpointOptions` object.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n options = options or checkpoint_options.CheckpointOptions()\n graph_building = not context.executing_eagerly()\n if graph_building:\n if ops.inside_function():\n raise NotImplementedError(\n \"Calling tf.train.Checkpoint.save() from a function is not \"\n \"supported, as save() modifies saving metadata in ways not \"\n \"supported by TensorFlow Operations. Consider using \"\n \"tf.train.Checkpoint.write(), a lower-level API which does not \"\n \"update metadata. tf.train.latest_checkpoint and related APIs will \"\n \"not see this checkpoint.\")\n session = get_session()\n if self._save_counter is None:\n # When graph building, if this is a new save counter variable then it\n # needs to be initialized before assign_add. This is only an issue if\n # restore() has not been called first.\n session.run(self.save_counter.initializer)\n if not graph_building or self._save_assign_op is None:\n with ops.colocate_with(self.save_counter):\n assign_op = self.save_counter.assign_add(1, read_value=True)\n if graph_building:\n self._save_assign_op = data_structures.NoDependency(assign_op)\n if graph_building:\n checkpoint_number = session.run(self._save_assign_op)\n else:\n checkpoint_number = assign_op.numpy()\n file_path = self.write(\"%s-%d\" % (file_prefix, checkpoint_number),\n options=options)\n checkpoint_management.update_checkpoint_state_internal(\n save_dir=os.path.dirname(file_prefix),\n model_checkpoint_path=file_path,\n all_model_checkpoint_paths=[file_path],\n save_relative_paths=True)\n return file_path\n\n def read(self, save_path, options=None):\n \"\"\"Reads a training checkpoint written with `write`.\n\n Reads this `Checkpoint` and any objects it depends on.\n\n This method is just like `restore()` but does not expect the `save_counter`\n variable in the checkpoint. It only restores the objects that the checkpoint\n already depends on.\n\n The method is primarily intended for use by higher level checkpoint\n management utilities that use `write()` instead of `save()` and have their\n own mechanisms to number and track checkpoints.\n\n Example usage:\n\n ```python\n # Create a checkpoint with write()\n ckpt = tf.train.Checkpoint(v=tf.Variable(1.))\n path = ckpt.write('/tmp/my_checkpoint')\n\n # Later, load the checkpoint with read()\n # With restore() assert_consumed() would have failed.\n checkpoint.read(path).assert_consumed()\n\n # You can also pass options to read(). For example this\n # runs the IO ops on the localhost:\n options = tf.train.CheckpointOptions(\n experimental_io_device=\"/job:localhost\")\n checkpoint.read(path, options=options)\n ```\n\n Args:\n save_path: The path to the checkpoint as returned by `write`.\n options: Optional `tf.train.CheckpointOptions` object.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of a checkpoint restoration. See `restore` for details.\n \"\"\"\n start_time = time.time()\n options = options or checkpoint_options.CheckpointOptions()\n result = self._saver.restore(save_path=save_path, options=options)\n _checkpoint_read_durations.get_cell(\"V2\").add(\n _get_duration_microseconds(start_time, time.time()))\n return result\n\n def restore(self, save_path, options=None):\n \"\"\"Restores a training checkpoint.\n\n Restores this `Checkpoint` and any objects it depends on.\n\n This method is intended to be used to load checkpoints created by `save()`.\n For checkpoints created by `write()` use the `read()` method which does not\n expect the `save_counter` variable added by `save()`.\n\n `restore()` either assigns values immediately if variables to restore have\n been created already, or defers restoration until the variables are\n created. Dependencies added after this call will be matched if they have a\n corresponding object in the checkpoint (the restore request will queue in\n any trackable object waiting for the expected dependency to be added).\n\n To ensure that loading is complete and no more assignments will take place,\n use the `assert_consumed()` method of the status object returned by\n `restore()`:\n\n ```python\n checkpoint = tf.train.Checkpoint( ... )\n checkpoint.restore(path).assert_consumed()\n\n # You can additionally pass options to restore():\n options = tf.CheckpointOptions(experimental_io_device=\"/job:localhost\")\n checkpoint.restore(path, options=options).assert_consumed()\n ```\n\n An exception will be raised if any Python objects in the dependency graph\n were not found in the checkpoint, or if any checkpointed values do not have\n a matching Python object.\n\n Name-based `tf.compat.v1.train.Saver` checkpoints from TensorFlow 1.x can be\n loaded using this method. Names are used to match variables. Re-encode\n name-based checkpoints using `tf.train.Checkpoint.save` as soon as possible.\n\n **Loading from SavedModel checkpoints**\n\n To load values from a SavedModel, just pass the SavedModel directory\n to checkpoint.restore:\n\n ```python\n model = tf.keras.Model(...)\n tf.saved_model.save(model, path) # or model.save(path, save_format='tf')\n\n checkpoint = tf.train.Checkpoint(model)\n checkpoint.restore(path).expect_partial()\n ```\n\n This example calls `expect_partial()` on the loaded status, since\n SavedModels saved from Keras often generates extra keys in the checkpoint.\n Otherwise, the program prints a lot of warnings about unused keys at exit\n time.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If the checkpoint was written by the\n name-based `tf.compat.v1.train.Saver`, names are used to match\n variables. This path may also be a SavedModel directory.\n options: Optional `tf.train.CheckpointOptions` object.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of a checkpoint restoration.\n\n The returned status object has the following methods:\n\n * `assert_consumed()`:\n Raises an exception if any variables are unmatched: either\n checkpointed values which don't have a matching Python object or\n Python objects in the dependency graph with no values in the\n checkpoint. This method returns the status object, and so may be\n chained with other assertions.\n\n * `assert_existing_objects_matched()`:\n Raises an exception if any existing Python objects in the dependency\n graph are unmatched. Unlike `assert_consumed`, this assertion will\n pass if values in the checkpoint have no corresponding Python\n objects. For example a `tf.keras.Layer` object which has not yet been\n built, and so has not created any variables, will pass this assertion\n but fail `assert_consumed`. Useful when loading part of a larger\n checkpoint into a new Python program, e.g. a training checkpoint with\n a `tf.compat.v1.train.Optimizer` was saved but only the state required\n for\n inference is being loaded. This method returns the status object, and\n so may be chained with other assertions.\n\n * `assert_nontrivial_match()`: Asserts that something aside from the root\n object was matched. This is a very weak assertion, but is useful for\n sanity checking in library code where objects may exist in the\n checkpoint which haven't been created in Python and some Python\n objects may not have a checkpointed value.\n\n * `expect_partial()`: Silence warnings about incomplete checkpoint\n restores. Warnings are otherwise printed for unused parts of the\n checkpoint file or object when the `Checkpoint` object is deleted\n (often at program shutdown).\n\n Raises:\n NotFoundError: if the a checkpoint or SavedModel cannot be found at\n `save_path`.\n \"\"\"\n orig_save_path = save_path\n\n if save_path is not None and gfile.IsDirectory(save_path) and (\n (gfile.Exists(utils_impl.get_saved_model_pb_path(save_path)) or\n gfile.Exists(utils_impl.get_saved_model_pbtxt_path(save_path)))):\n save_path = utils_impl.get_variables_path(save_path)\n\n try:\n status = self.read(save_path, options=options)\n except errors_impl.NotFoundError as e:\n raise errors_impl.NotFoundError(\n None, None,\n \"Failed to restore from checkpoint or SavedModel at {}: {}\".format(\n orig_save_path, e.message))\n # Create the save counter now so it gets initialized with other variables\n # when graph building. Creating it earlier would lead to errors when using,\n # say, train.Saver() to save the model before initializing it.\n self._maybe_create_save_counter()\n if isinstance(status, NameBasedSaverStatus):\n status.add_to_optionally_restored(self.save_counter)\n return status\n" ]
[ [ "tensorflow.python.training.tracking.graph_view.ObjectGraphView", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.gen_io_ops.restore_v2", "tensorflow.python.training.py_checkpoint_reader.NewCheckpointReader", "tensorflow.python.framework.ops.inside_function", "tensorflow.python.saved_model.utils_impl.get_saved_model_pb_path", "tensorflow.python.training.saver.Saver", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.eager.monitoring.Counter", "tensorflow.python.training.tracking.data_structures.NoDependency", "tensorflow.python.training.tracking.base._SlotVariableRestoration", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.ops.device", "tensorflow.python.platform.gfile.IsDirectory", "tensorflow.python.util.tf_inspect.getargspec", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.util.object_identity.ObjectIdentityWeakSet", "tensorflow.python.training.saving.saveable_object_util.op_list_to_dict", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.training.saving.functional_saver.MultiDeviceSaver", "tensorflow.python.saved_model.utils_impl.get_variables_path", "tensorflow.python.framework.ops.get_default_session", "tensorflow.python.eager.monitoring.ExponentialBuckets", "tensorflow.python.ops.variables.VariableV1", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.util.object_identity.ObjectIdentityWeakKeyDictionary", "tensorflow.python.framework.tensor_util.is_tf_type", "tensorflow.python.ops.variable_scope.variable_creator_scope", "tensorflow.python.training.tracking.base.TrackableReference", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.training.saving.checkpoint_options.CheckpointOptions", "tensorflow.python.training.tracking.base.CheckpointPosition", "tensorflow.python.ops.variable_scope._get_default_variable_store", "tensorflow.python.framework.ops.uid", "tensorflow.python.training.saving.saveable_object_util.validate_and_slice_inputs", "tensorflow.core.protobuf.trackable_object_graph_pb2.TrackableObjectGraph", "tensorflow.python.training.tracking.base.NoRestoreSaveable", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.training.saving.saveable_object_util.saveable_objects_for_op", "tensorflow.python.saved_model.utils_impl.get_saved_model_pbtxt_path", "tensorflow.python.util.compat.as_str" ] ]
akshaydnicator/webscraping
[ "c18713f777da4d4c85998c3ab2a4ed24ce20e04a" ]
[ "Scraper_News Articles/Scraper_StaticWebPage_moneycontrol.py" ]
[ "\n## This is an Autoscaper which would keep on extracting news on a given page on moneycontrol.com news website\n# given a url or a list of urls untill the work is finished\n\n# Import required libraries\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport time\nfrom datetime import timedelta\nfrom requests.adapters import HTTPAdapter\n\n# Create a requests session and mount HTTPAdapter\ns = requests.Session()\n#s.mount('http://', HTTPAdapter(max_retries=2))\ns.mount('https://', HTTPAdapter(max_retries=2))\n\n# Load news-section urls in a list from text document that contains the list of Moneycontrol section urls\n# you want to scrape\nurls = [line.rstrip('\\n') for line in open('moneycontrol_urls.txt')]\n\n## One of the key challenges faced during execution. The website blocked content if \n# large number of requests are made in a short time\n# Below is the solution to the problem. Using a header agent to immitate a browser request to the server \nheaders={\n'Referer': 'https://www.moneycontrol.com',\n'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'\n}\n\n#headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}\n\n# Empty dictionary to store scraped information on news articles\nnews_articles = {}\nnews_count = 0 \n\n# Just a counter to update the file name before saving to the disk\ntimes_saved = 0\n\n# Start the for loop over the list of section urls to scrape all the historical news from those\n# sections one-by-one\nfor url in urls:\n\n # Page number initialized by 0 before entering the while loop for scraping articles from a single section\n p = 0\n \n # Start while loop to extract text features from the historical articles\n while True:\n \n ## One of the key challenges faced during execution. The program gets stuck after loading 10-15k articles\n # A while loop using try and except was created, which would keep on retrying on exceptions cased by any of the errors such as timeout/internet disconnected/broken links etc.\n while True:\n response = None\n try:\n response = s.get(url, headers=headers, timeout=20)\n break\n except requests.exceptions.RequestException as err:\n print(f'Caught {err}... Sleeping for 80 sec and then retrying...')\n time.sleep(80)\n continue \n \n # Parse the source page to extract html tags and content using Beautiful Soup\n data = response.text\n soup = BeautifulSoup(data,'html.parser')\n articles = soup.find_all('li',{'class':'clearfix'})\n \n # Run for loop on all of the articles found on a given page number of a section to extract text features\n # Features extracted: Title, Link, Date, full news article text\n for article in articles:\n try:\n title = article.find('h2').text\n #print('Title: ',title)\n link = article.find('a').get('href')\n #print('Link: ',link)\n date = article.find('span').text\n #print('Published: ',date)\n\n except AttributeError:\n title = 'N/A'\n link = 'N/A'\n date = 'N/A'\n\n # Used as a count checker and input to the autosave section\n if news_count == 1:\n start_time = time.monotonic() \n \n # Extract full news text by making another server request using the link of the article extracted earlier\n try:\n news_response = s.get(link, headers=headers, timeout=15)\n news_data = news_response.text\n news_soup = BeautifulSoup(news_data,'html.parser')\n \n ## The problem with this website is that when the news link is parsed,\n # it also incldues snippets of backend codes which are totally unnecessary\n # So use If statement to extract the whole div tag first\n if news_soup.find('div',{'class':'arti-flow'}):\n news_text = news_soup.find('div',{'class':'arti-flow'})\n\n # Then decompose the unncessarily repeating 'script' and 'style' tags from the scraped content\n for x in news_text.find_all(\"script\"):\n x.decompose()\n for y in news_text.find_all('style'):\n y.decompose()\n\n # Finally, decompose the extra standard one-liner text from the bottom of the news article and extract the clean text of the news article\n try:\n news_text.find_all('a')[-1].decompose()\n news = news_text.text\n except IndexError:\n news = news_text.text\n else:\n news = 'N/A'\n\n # A countermeasure in place, in case there are any of the errors such as timeout/internet disconnected/broken links etc.\n except requests.exceptions.RequestException as error:\n news = 'N/A'\n print(f'Caught {error}... Slpeeing for 80 sec')\n time.sleep(80)\n \n # Increase the count by 1 for every news scraped and appending it to the empty dictionary\n news_count+=1\n news_articles[news_count] = [title,date,news,link]\n \n ## Not in use countermeasure; Just to give enough sleep time in between while making large server requests \n #if news_count in [2500,5000,7500]:\n # time.sleep(30)\n\n # A counter, that prints number of articles scraped in a multiple of 1000\n if news_count % 1000 == 0:\n print('No. ',news_count)\n\n # Autosave: Extracted text features get saved on local disk with an updated filename every time the news_count reaches 40,000\n if news_count == 40000:\n times_saved+=1 # Number of times the Autosave saved files on the disk in one go\n print('Total Count',news_count)\n end_time = time.monotonic()\n print(timedelta(seconds=end_time - start_time))\n print('\\n\\n')\n\n news_df = pd.DataFrame.from_dict(news_articles,orient='index',columns=['Title','Published','News','Link'])\n news_df['Title'] = news_df['Title'].map(lambda x: x.encode('unicode-escape').decode('utf-8'))\n news_df['Link'] = news_df['Link'].map(lambda x: x.encode('unicode-escape').decode('utf-8'))\n news_df['Published'] = news_df['Published'].map(lambda x: x.encode('unicode-escape').decode('utf-8'))\n news_df['News'] = news_df['News'].map(lambda x: x.encode('unicode-escape').decode('utf-8'))\n print(len(news_df),'\\n\\n')\n news_df.to_csv('mc_'+str(times_saved)+'.csv')\n news_articles = {}\n news_count = 0\n #time.sleep(30)\n \n # Collect the next page url from the bottom of the page\n url_tag = soup.find('a',{'class':'last'})\n \n # Max number of pages to scrape from a single section on the website restricted to 15,655\n max_pages = 15655\n \n # While loop would break if no url is found, as in reached the end of the section itself\n try:\n if \"void\" in url_tag.get('href'):\n break\n \n elif url_tag.get('href') and p < max_pages:\n url = 'https://www.moneycontrol.com'+url_tag.get('href')\n print('\\n',url,'\\n')\n p+=1\n else:\n break\n print('\\n\\nNext page does not exist\\n\\n')\n except AttributeError:\n print('\\n\\nNext page does not exist\\n\\n')" ]
[ [ "pandas.DataFrame.from_dict" ] ]
NaiveTom/all_model
[ "52910c2920526179f65b27d21a21a066e9d7f3b2" ]
[ "DNA2VEC/train.py" ]
[ "# 使用GPU模式,不然永远也训练不完\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nfrom model import get_model\nimport numpy as np\nimport keras\nfrom keras.callbacks import Callback\nfrom datetime import datetime\nfrom sklearn.metrics import roc_auc_score, average_precision_score\nfrom sklearn.model_selection import train_test_split\n\n\n\n'''\n2021-04-11 16:53:06.007063: E tensorflow/stream_executor/dnn.cc:616] CUDNN_STATUS_INTERNAL_ERROR\n\nin tensorflow/stream_executor/cuda/cuda_dnn.cc(2011): 'cudnnRNNBackwardData( cudnn.handle(), rnn_desc.handle(), \nmodel_dims.max_seq_length, output_desc.handles(), output_data.opaque(), output_desc.handles(), output_backprop_data.opaque(), \noutput_h_desc.handle(), output_h_backprop_data.opaque(), output_c_desc.handle(), output_c_backprop_data.opaque(), \nrnn_desc.params_handle(), params.opaque(), input_h_desc.handle(), input_h_data.opaque(), input_c_desc.handle(), \ninput_c_data.opaque(), input_desc.handles(), input_backprop_data->opaque(), input_h_desc.handle(), input_h_backprop_data->opaque(), \ninput_c_desc.handle(), input_c_backprop_data->opaque(), workspace.opaque(), workspace.size(), reserve_space_data->opaque(), reserve_space_data->size())'\n\n2021-04-11 16:53:06.007530: W tensorflow/core/framework/op_kernel.cc:1767] OP_REQUIRES failed at cudnn_rnn_ops.cc:1922: \nInternal: Failed to call ThenRnnBackward with model config: [rnn_mode, rnn_input_mode, rnn_direction_mode]: 3, 0, 0 , \n[num_layers, input_size, num_units, dir_count, max_seq_length, batch_size, cell_num_units]: [1, 64, 50, 1, 100, 32, 0] \n\n2021-04-11 16:53:06.007077: F tensorflow/stream_executor/cuda/cuda_dnn.cc:190] Check failed: status == CUDNN_STATUS_SUCCESS (7 vs. 0)Failed to set cuDNN stream.\n\n解决方案\n'''\n\n# import tensorflow.compat.v1 as tf\n# tf.disable_v2_behavior() # disable for tensorFlow V2\n\nimport tensorflow as tf\n\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\n\n##############################\n#\n# loss数据可视化\n#\n##############################\n\nimport keras\nfrom matplotlib import pyplot as plt\n\n\n\nclass PlotProgress(keras.callbacks.Callback):\n\n def __init__(self, entity = ['loss', 'accuracy']):\n self.entity = entity\n\n def on_train_begin(self, logs={}):\n self.i = 0\n self.x = []\n self.losses = []\n self.val_losses = []\n\n self.accs = []\n self.val_accs = []\n\n self.fig = plt.figure()\n\n self.logs = []\n\n def on_epoch_end(self, epoch, logs={}):\n self.logs.append(logs)\n self.x.append(self.i)\n # 损失函数\n self.losses.append(logs.get('{}'.format(self.entity[0])))\n self.val_losses.append(logs.get('val_{}'.format(self.entity[0])))\n # 准确率\n self.accs.append(logs.get('{}'.format(self.entity[1])))\n self.val_accs.append(logs.get('val_{}'.format(self.entity[1])))\n\n self.i += 1\n\n # clear_output(wait=True)\n plt.figure(0)\n plt.clf() # 清理历史遗迹\n plt.plot(self.x, self.losses, label=\"{}\".format(self.entity[0]))\n plt.plot(self.x, self.val_losses, label=\"val_{}\".format(self.entity[0]))\n plt.legend()\n plt.savefig('loss.jpg')\n plt.pause(0.01)\n # plt.show()\n\n plt.figure(1)\n plt.clf() # 清理历史遗迹\n plt.plot(self.x, self.accs, label=\"{}\".format(self.entity[1]))\n plt.plot(self.x, self.val_accs, label=\"val_{}\".format(self.entity[1]))\n plt.legend()\n plt.savefig('acc.jpg')\n plt.pause(0.01)\n # plt.show()\n\n\n\nclass roc_callback(Callback):\n def __init__(self, name):\n self.name = name\n\n def on_train_begin(self, logs={}):\n return\n\n def on_train_end(self, logs={}):\n return\n\n def on_epoch_begin(self, epoch, logs={}):\n return\n\n def on_epoch_end(self, epoch, logs={}):\n\n self.model.save_weights(\n \"./model/{0}Model{1}.h5\".format(self.name, epoch))\n\n return\n\n def on_batch_begin(self, batch, logs={}):\n return\n\n def on_batch_end(self, batch, logs={}):\n return\n\n\n\nt1 = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\n\n#names = ['GM12878', 'HUVEC', 'HeLa-S3', 'IMR90', 'K562', 'NHEK','all','all-NHEK']\n# name=names[0]\n# The data used here is the sequence processed by data_processing.py.\n\n'''\nnames = ['GM12878', 'HUVEC', 'HeLa-S3', 'IMR90', 'K562', 'NHEK']\nfor name in names:\n'''\nname = 'X5628FC'\n\n# Data_dir = '/home/ycm/data/%s/' % name\ntrain = np.load('%s_train.npz' % name)\nX_en_tra, X_pr_tra, y_tra = train['X_en_tra'], train['X_pr_tra'], train['y_tra']\nmodel = get_model()\nmodel.summary()\nprint('Traing %s cell line specific model ...' % name)\n\n\n\nback = roc_callback(name=name)\nfrom keras.callbacks import EarlyStopping\nearly_stopping = EarlyStopping(monitor = 'val_accuracy', patience = 30, restore_best_weights = True)\n# 绘图函数\nplot_progress = PlotProgress(entity = ['loss', 'accuracy'])\n\n\n\nhistory = model.fit([X_en_tra, X_pr_tra], y_tra, epochs=1000, batch_size=32, validation_split=0.11,\n callbacks=[back, early_stopping, plot_progress])\nt2 = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\n\n\n\nmodel.save('dna2vec_best_model.h5')\n\nprint(\"开始时间:\"+t1+\"结束时间:\"+t2)" ]
[ [ "matplotlib.pyplot.savefig", "tensorflow.config.experimental.set_memory_growth", "numpy.load", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.pause", "matplotlib.pyplot.clf", "tensorflow.config.experimental.list_physical_devices" ] ]
adimanzz/ga-learner-dsmp-repo
[ "af1bc4b62867233b091f40fa07d1ff0bb97236c0" ]
[ "Telecom-Churn-Predictioncode.py" ]
[ "# --------------\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n#path - Path of file \n\n# Code starts here\ndf = pd.read_csv(path)\n\nX = df.drop(['customerID','Churn'],1)\ny = df['Churn']\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 0)\n\n\n\n\n\n\n\n\n\n# --------------\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\n# Code starts here\n\n# print(X_train.dtypes)\n\n\nX_train['TotalCharges'] = X_train['TotalCharges'].replace(' ',np.nan)\nX_test['TotalCharges'] = X_test['TotalCharges'].replace(' ',np.nan)\n\n\n\nX_train['TotalCharges'] = X_train['TotalCharges'].astype(float)\nX_test['TotalCharges'] = X_test['TotalCharges'].astype(float)\n\nX_train['TotalCharges'] = X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean())\nX_test['TotalCharges'] = X_test['TotalCharges'].fillna(X_test['TotalCharges'].mean())\n\n\nle = LabelEncoder()\ns = list(X_train.select_dtypes(exclude = np.number))\nprint(len(X_train[s]),len(y_train))\ndf.apply(LabelEncoder().fit_transform)\n\nX_train[s] = X_train[s].apply(le.fit_transform)\nX_test[s] = X_test[s].apply(le.fit_transform)\n\ny_train = y_train.replace({'No':0, 'Yes':1})\ny_test = y_test.replace({'No':0, 'Yes':1})\n\n\n\n\n\n# --------------\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score,classification_report,confusion_matrix\n\n# Code starts here\n\n\nprint(X_train,X_test,y_train,y_test)\n\nada_model = AdaBoostClassifier(random_state = 0)\n\nada_model.fit(X_train, y_train)\n\ny_pred = ada_model.predict(X_test)\n\nada_score = accuracy_score(y_test,y_pred)\n\nada_cm = confusion_matrix(y_test,y_pred)\n\nada_cr = classification_report(y_test,y_pred)\n\nprint(ada_cr)\n\n\n\n\n\n\n\n\n\n\n# --------------\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n#Parameter list\nparameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],\n 'max_depth':range(1,3)}\n\n# Code starts here\n\n\nxgb_model = XGBClassifier(random_state = 0)\n\nxgb_model.fit(X_train, y_train)\n\ny_pred = xgb_model.predict(X_test)\n\nxgb_score = accuracy_score(y_test,y_pred)\n\nxgb_cm = confusion_matrix(y_test, y_pred)\n\nxgb_cr = classification_report(y_test,y_pred)\n\nprint(xgb_score)\n\nprint(xgb_cr)\n\nclf_model = GridSearchCV(estimator = xgb_model, param_grid = parameters)\n\nclf_model.fit(X_train,y_train)\n\ny_pred = clf_model.predict(X_test)\n\nclf_score = accuracy_score(y_test, y_pred)\n\nclf_cm = confusion_matrix(y_test, y_pred)\n\nclf_cr = classification_report(y_test, y_pred)\n\nprint(clf_score)\nprint(clf_cr)\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.confusion_matrix", "sklearn.ensemble.AdaBoostClassifier", "sklearn.model_selection.GridSearchCV", "sklearn.metrics.accuracy_score", "sklearn.metrics.classification_report", "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
widdowquinn/SI_Holmes_etal_2017
[ "72dc0c3e537b6d940cb9f2ced4d4de19187c14ac" ]
[ "notebooks/tools.py" ]
[ "#!/usr/bin/env python3.5\n#\n# tools.py\n#\n# (C) The James Hutton Institute 2016\n# Author: Leighton Pritchard\n\n\"\"\"\ntools.py\n\nThis module provides helper functions used in the supplementary information\nnotebooks and scripts for the Holmes et al. (2017) paper.\n\"\"\"\n\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport random\nimport scipy\nimport seaborn as sns\n\nfrom collections import defaultdict\n\nfrom Bio import SeqIO\n\n# PRNG seed\nSEED = 123456789\n\n\ndef corrfunc(x, y, **kws):\n \"\"\"Return a matplotlib axis with text describing the Spearman\n correlation coefficient for x and y\n\n This function is written to support plot_correlation\n \"\"\"\n coeff, _ = scipy.stats.spearmanr(x, y)\n ax = plt.gca()\n ax.annotate(\"r = {:.3f}\".format(coeff),\n xy=(.3, .5), size=15,\n xycoords=ax.transAxes)\n\n\ndef plot_correlation(df, title=None):\n \"\"\"Render Seaborn PairGrid of columns in df, with Pearson correlation\n coefficients in the upper triangle, and KDE plots on the diagonal.\n \"\"\"\n g = sns.PairGrid(df)\n g.map_lower(plt.scatter)\n g.map_diag(sns.kdeplot, legend=False)\n g.map_upper(corrfunc)\n g.set(xticklabels=[])\n g.set(title=title or '')\n return g\n\n\ndef quantile_norm(df, columns=None):\n \"\"\"Normalise the columns of df to each have the same distribution\"\"\"\n df_matrix = df.as_matrix(columns=columns)\n quantiles = np.mean(np.sort(df_matrix, axis=0), axis=1)\n ranks = scipy.stats.mstats.rankdata(df_matrix, axis=0).astype(int) - 1\n norm_matrix = quantiles[ranks]\n return(pd.DataFrame(data=norm_matrix, index=df.index,\n columns=columns or df.columns))\n\n\ndef plot_normalised(ctl_in, ctl_out, trt_in, trt_out):\n \"\"\"Return violin plots of input/output control/treatment distributions\"\"\"\n fig, axes = plt.subplots(2, 2, figsize=(12,6))\n fig.subplots_adjust(hspace=.25)\n axes = axes.ravel()\n for ttl, arr, ax in zip((\"control input\", \"control output\",\n \"treatment input\", \"treatment output\"),\n (ctl_in, ctl_out, trt_in, trt_out),\n axes):\n ax.set_title(ttl)\n sns.violinplot(np.log(arr), ax=ax)\n\n\ndef wide_to_long_df(df, stage):\n \"\"\"Convert wide dataframe to long\n\n This function is brittle, and only for Holmes et al SI\n \"\"\"\n if not stage:\n stagestr = 'input'\n else:\n stagestr = 'output'\n\n df.reset_index(level=0, inplace=True) # make probes a column\n df = pd.melt(df, id_vars=['Systematic'],\n value_vars=['{0}.1'.format(stagestr),\n '{0}.2'.format(stagestr),\n '{0}.3'.format(stagestr)])\n df.columns = ['probe', 'class', stagestr]\n df.loc[:, 'replicate'] = df['class'].astype(str).str[-1].astype(np.int64)\n df = df[['probe', 'replicate', stagestr]]\n df.set_index(['probe', 'replicate'], inplace=True)\n return df\n \n\ndef wide_to_long_join(df_in, df_out, treatment):\n \"\"\"Convert two wide dataframes to long and join on common index\n\n This function is brittle and only for Holmes et al SI\n \"\"\"\n if treatment:\n treatval = 1\n else:\n treatval = 0 \n df = pd.merge(wide_to_long_df(df_in, 0), wide_to_long_df(df_out, 1),\n left_index=True, right_index=True)\n df['treatment'] = treatval\n df.reset_index(inplace=True)\n return df\n\n\ndef wide_to_long(ctl_in, ctl_out, trt_in, trt_out):\n \"\"\"Convert four dataframes from wide to long format\n\n This function returns a dataframe with columns:\n \n * probe\n * replicate\n * treatment\n * repXtrt (combination of replicate and treatment)\n * input\n * output\n * log_input\n * log_output\n \"\"\"\n ctl_long = wide_to_long_join(ctl_in, ctl_out, treatment=False)\n trt_long = wide_to_long_join(trt_in, trt_out, treatment=True)\n data = ctl_long.append(trt_long, ignore_index=True)\n data['log_input'] = np.log(data['input'])\n data['log_output'] = np.log(data['output'])\n data['repXtrt'] = 'rep' + data['replicate'].map(str) +\\\n 'trt' + data['treatment'].map(str)\n data = data[['probe',\n 'replicate', 'treatment', 'repXtrt',\n 'input', 'output',\n 'log_input', 'log_output']]\n return data\n\n\ndef plot_input_output_violin(data):\n \"\"\"Plot Seaborn violin plot of log input and output data\"\"\"\n input_v_output = pd.melt(data,\n id_vars=['probe', 'replicate', 'treatment'],\n value_vars=['log_input', 'log_output'])\n input_v_output.columns = ['probe', 'replicate', 'treatment',\n 'stage', 'log_intensity']\n\n g = sns.violinplot(data=input_v_output, x=\"treatment\", y=\"log_intensity\",\n hue=\"stage\", split=True)\n g.set_xticklabels(['control', 'treatment'])\n g.set_ylabel(\"log(intensity)\")\n g.set_xlabel(\"\")\n g.set_title(\"log(intensity) distribution by treatment and input/output\")\n\n\ndef unique_probe_matches(blastfiles):\n \"\"\"Returns a dataframe of unique queries and their unique matches\"\"\"\n # Columns in a BLASTN+ -outfmt 6 file\n blast_columns = ['probe', 'match', 'identity', 'length', 'mismatch',\n 'gapopen', 'qstart', 'qend', 'sstart', 'send',\n 'evalue', 'bitscore']\n df = None\n for bfile in blastfiles:\n if df is None:\n df = pd.read_csv(bfile, sep=\"\\t\", names=blast_columns)\n else:\n df = df.append(pd.read_csv(bfile, sep=\"\\t\",\n names=blast_columns))\n df = df.drop_duplicates('probe') # Drop rows with repeated probes\n return df\n\n\ndef annotate_seqdata(df, seqfiles):\n \"\"\"Returns the passed dataframe, annotated with locus tags\"\"\"\n ids = []\n locus_tags = []\n for seqfile in seqfiles:\n for seq in SeqIO.parse(seqfile, 'fasta'):\n labels = seq.description.split(' ')\n for label in labels:\n if label.startswith('[locus_tag'):\n ids.append(seq.id)\n locus_tags.append(label.split('=')[-1][:-1])\n seqdf = pd.DataFrame({'match': ids, 'locus_tag': locus_tags})\n return pd.merge(df, seqdf, 'inner', ['match']) \n\n\ndef index_column(df, colname):\n \"\"\"Return the dataframe, with an index column for 'probe's\"\"\"\n col_ids = df[colname].unique()\n nvals = len(col_ids)\n col_lookup = dict(zip(col_ids, range(nvals)))\n df['{0}_index'.format(colname)] = df[colname].replace(col_lookup).values\n return df\n \n\ndef reduce_dataset(df, colname, n=2000, seed=True):\n \"\"\"Returns the passed dataframe, with a reduced set of rows\"\"\"\n if seed:\n random.seed(SEED) # for reproducibility of random choice\n\n col_ids = df[colname].unique()\n nvals = len(col_ids)\n\n indices = [random.randint(0, nvals) for i in range(n)] \n reduced = df.loc[df['{0}_index'.format(colname)].isin(indices)]\n\n # create indices and values for probes\n new_ids = reduced[colname].unique()\n nvals = len(new_ids)\n new_lookup = dict(zip(new_ids, range(nvals)))\n\n # add data column with probe index from probe_lookup\n reduced['{0}_index'.format(colname)] =\\\n reduced[colname].replace(new_lookup).values\n\n return reduced\n\n\ndef reduce_dataset_by_column_value(df, colname, values):\n \"\"\"Returns the passed dataframe, with only the passed column values\"\"\"\n col_ids = df[colname].unique()\n nvals = len(col_ids)\n\n # Reduce dataset\n reduced = df.loc[df['locus_tag'].isin(values)]\n\n # create indices and values for probes\n new_ids = reduced[colname].unique()\n nvals = len(new_ids)\n new_lookup = dict(zip(new_ids, range(nvals)))\n\n # add data column with probe index from probe_lookup\n reduced['{0}_index'.format(colname)] =\\\n reduced[colname].replace(new_lookup).values\n\n return reduced\n\n\ndef extract_fit_variable_summary(fit, varname, index=None):\n \"\"\"Returns summary information for a variable in the passed Stan fit object\n\n Calculates mean, std, median, and 5%, 25%, 75% and 95% percentiles\n for the passed variable, returning them as a dataframe.\n \"\"\"\n # Using Pandas methods\n mean = pd.Series(fit[varname].mean(0), index=index)\n se = pd.Series(fit[varname].std(0), index=index)\n\n # Need to use numpy functions\n median = pd.Series(np.median(fit[varname], 0), index=index)\n perc_2_5 = pd.Series(np.percentile(fit[varname], 2.5, 0), index=index)\n perc_25 = pd.Series(np.percentile(fit[varname], 25, 0), index=index)\n perc_75 = pd.Series(np.percentile(fit[varname], 75, 0), index=index)\n perc_97_5 = pd.Series(np.percentile(fit[varname], 97.5, 0), index=index)\n\n return pd.DataFrame({'%s_mean' % varname: mean,\n '%s_sem' % varname: se,\n '%s_median' % varname: median,\n '%s_2.5pc' % varname: perc_2_5,\n '%s_97.5pc' % varname: perc_97_5,\n '%s_25pc' % varname: perc_25,\n '%s_75pc' % varname: perc_75})\n\n\n\ndef extract_df_variable_summary(df, varname, index=None):\n \"\"\"Returns summary information for a variable in the passed datframe object\n\n This function expects a dataframe of pickled fit information\n\n Calculates mean, std, median, and 5%, 25%, 75% and 95% percentiles\n for the passed variable, returning them as a dataframe.\n \"\"\"\n # Using Pandas methods\n mean = pd.Series(df[varname][0].mean(0), index=index)\n se = pd.Series(df[varname][0].std(0), index=index)\n\n # Need to use numpy functions\n median = pd.Series(np.median(df[varname][0], 0), index=index)\n perc_2_5 = pd.Series(np.percentile(df[varname][0], 2.5, 0), index=index)\n perc_25 = pd.Series(np.percentile(df[varname][0], 25, 0), index=index)\n perc_75 = pd.Series(np.percentile(df[varname][0], 75, 0), index=index)\n perc_97_5 = pd.Series(np.percentile(df[varname][0], 97.5, 0), index=index)\n\n return pd.DataFrame({'%s_mean' % varname: mean,\n '%s_sem' % varname: se,\n '%s_median' % varname: median,\n '%s_2.5pc' % varname: perc_2_5,\n '%s_97.5pc' % varname: perc_97_5,\n '%s_25pc' % varname: perc_25,\n '%s_75pc' % varname: perc_75})\n\n\ndef extract_variable_summaries(obj, otype='fit',\n varnames=['a', 'b', 'g', 'd'],\n indices=None,\n data=None):\n \"\"\"Return dataframe of parameter estimate summaries\n\n For this modelling there is a specific issue with estimating variables on\n arrays (length 6), and estimating them on probes (length around 6000),\n and having to combine them.\n\n The calls to extract_*_variable_summary() return a dataframe for each\n variable. We broadcast values for a and g across the probe dataset, and\n join values for b and d directly.\n \"\"\"\n # Choice of function depends on object being passed\n functions = {'fit': extract_fit_variable_summary,\n 'df': extract_df_variable_summary}\n\n # Get dataframes for each fitted variable summary, keyed by variable name\n dfdict = defaultdict()\n for varname, index in zip(varnames, indices):\n dfdict[varname] = functions[otype](obj, varname, index)\n dfdict[varname].reset_index(inplace=True)\n\n # Broadcast parameter estimates across probes\n df = pd.merge(data, dfdict['a'],\n left_on='repXtrt', right_on='index')\n df = pd.merge(df, dfdict['b'],\n left_on='locus_tag', right_on='index')\n df = pd.merge(df, dfdict['g'],\n left_on='repXtrt', right_on='index')\n df = pd.merge(df, dfdict['d'],\n left_on='locus_tag', right_on='index')\n \n # Broadcast parameter estimates across locus tags\n lt = pd.DataFrame(data['locus_tag'].unique())\n lt.columns = ['locus_tag']\n lt = pd.merge(lt, dfdict['b'],\n left_on='locus_tag', right_on='index')\n lt = pd.merge(lt, dfdict['d'],\n left_on='locus_tag', right_on='index')\n \n df.drop('index_x', 1, inplace=True)\n df.drop('index_y', 1, inplace=True) \n lt.drop('index_x', 1, inplace=True)\n lt.drop('index_y', 1, inplace=True) \n\n lt.sort_values('locus_tag', inplace=True)\n\n return df, lt\n\n\ndef boxplot_medians(estimates, varnames=['a', 'b', 'g', 'd']):\n \"\"\"Plot 2x2 boxplot of parameter median estimates\"\"\"\n fig, axes = plt.subplots(int(len(varnames)/2), 2,\n figsize=(12, 2 * len(varnames)))\n axes = axes.ravel()\n fig.subplots_adjust(hspace=0.3)\n\n for idx, varname in enumerate(varnames):\n sns.boxplot(estimates['{0}_median'.format(varname)],\n ax=axes[idx])\n axes[idx].set_title(\"Median {0}\".format(varname))\n\n\ndef split_estimates(df, org):\n \"\"\"Split the passed dataframe into either Sakai or DH10B subsets\"\"\"\n if org == 'dh10b':\n subset = df.loc[df['locus_tag'].str.startswith('ECDH10B')]\n else:\n subset = df.loc[~df['locus_tag'].str.startswith('ECDH10B')]\n return subset\n\n\ndef plot_treatment_vs_control(df):\n \"\"\"Plot median treatment vs control parameters\"\"\"\n fig, axes = plt.subplots(1, 2, figsize=(12, 8))\n axes = axes.ravel()\n fig.subplots_adjust(hspace=0.3)\n \n for idx, xvar, yvar, ax in zip(range(2),\n ['a_median', 'a_median',\n 'b_median', 'b_median'],\n ['g_median', 'd_median',\n 'g_median', 'd_median'],\n axes):\n ax.scatter(df[xvar], df[yvar], alpha=0.2)\n ax.set_xlabel(xvar)\n ax.set_ylabel(yvar)\n\n\ndef label_positive_effects(df):\n \"\"\"Label the locus tags as having positive effects on treatment, control,\n or both.\n \"\"\"\n df['trt_pos'] = df['d_25pc'] > 0\n df['ctl_pos'] = df['b_25pc'] > np.percentile(df['b_median'], 97.5)\n df['combined'] = df['trt_pos'] & df['ctl_pos']\n return df\n\n\ndef plot_parameter(df, ax, varname, thresh, annotations=None, label=None, ylabel=None):\n \"\"\"Plot the estimated parameter median, and 50% CI, in locus tag order on\n the passed matplotlib axis\n \n Credibility intervals are coloured blue if they include the threshold,\n red (value below threshold) or green (value above threshold) otherwise.\n\n annotations expects a dictionary where the key is the annotation text, and\n the value is a tuple of co-ordinates for the centre of the text\n \"\"\"\n vals = df['{0}_median'.format(varname)]\n cilo = df['{0}_25pc'.format(varname)]\n cihi = df['{0}_75pc'.format(varname)]\n \n ax.scatter(range(len(df)), vals, c='k', marker='.')\n for idx, val, vlo, vhi in zip(range(len(df)),\n vals, cilo, cihi):\n if vlo < thresh < vhi:\n color = 'b-'\n elif val < thresh:\n color = 'm-'\n elif val > thresh:\n color = 'g-'\n else:\n color = 'k-'\n ax.plot([idx, idx], [vlo, vhi], color, alpha=0.4)\n\n # Add box annotations, if requested\n y0, y1 = ax.get_ylim() \n max_y_ann = y1\n if annotations is not None:\n bbox_props = dict(boxstyle=\"square,pad=0.3\", color=\"w\")\n for k, v in annotations.items():\n # Text box\n t = ax.text(0.5 * (v[0] + v[1]), v[2], k,\n ha=\"center\", va=\"center\", bbox=bbox_props)\n # Marker\n offset = 0.075 * (y1 - y0)\n ax.plot([v[0], v[0]], [v[2] - offset, v[2] - 0.5 * offset], 'k-')\n ax.plot([v[1], v[1]], [v[2] - offset, v[2] - 0.5 * offset], 'k-')\n ax.plot([v[0], v[1]], [v[2] - 0.75 * offset,\n v[2] - 0.75 * offset], 'k-') \n # Max ylim\n max_y_ann = max(v[2] * 1.1, max_y_ann)\n \n # Set x and y limits\n ax.set_ylim(y0, max_y_ann)\n ax.set_xlim(-1, len(df) + 1)\n\n # Don't show x-axis ticks\n ax.get_xaxis().set_visible(False)\n\n # Draw label if asked\n y0, y1 = ax.get_ylim() \n bbox_props = dict(boxstyle=\"square,pad=0.3\", color=\"w\")\n if label:\n ax.text(-1, (y1 + (y1 - y0) * 0.01), label,\n va=\"bottom\", ha=\"left\", bbox=bbox_props,\n size=\"x-large\")\n\n # Draw y-axis label\n if ylabel:\n ax.set_ylabel(ylabel)\n \n# Get index of locus tag for plotting\ndef get_lt_index(locus_tag, df):\n return list(df['locus_tag']).index(locus_tag)\n\n \ndef get_annotation(tag, anndict):\n try:\n return anndict[tag]\n except KeyError:\n return None\n\n\ndef annotate_locus_tags(df, gbfilepath):\n \"\"\"Add gene product annotations from gbfiles to passed dataframe\n\n The annotations are added/placed in a column called \"annotation\", and are\n identified on the basis of the \"locus_tag\" column\n \"\"\"\n products = dict()\n startpos = defaultdict(int)\n for record in SeqIO.parse(gbfilepath, 'genbank'):\n products.update({ft.qualifiers['locus_tag'][0]:ft.qualifiers['product'][0]\n for ft in record.features if\n (ft.type == 'CDS' and\n 'product' in ft.qualifiers)})\n startpos.update({ft.qualifiers['locus_tag'][0]:\n int(ft.location.nofuzzy_start)\n for ft in record.features if\n ft.type == 'gene'})\n df['annotation'] = df['locus_tag'].apply(get_annotation,\n args=(products,))\n df['startpos'] = df['locus_tag'].apply(get_annotation,\n args=(startpos,))\n return df\n\n\ndef parse_full_fit(picklefilename, datafilename):\n \"\"\"Parses the full model fit into a Pandas dataframe which is returned\n\n The returned dataframe has columns for mean, SEM, median, and 2.5, 25,\n 75, 97.5 percentiles\n \"\"\"\n # Load fit\n with open(picklefilename, 'rb') as ifh:\n fit = pickle.load(ifh)\n indata = pd.read_csv(datafilename, sep=\"\\t\")\n locus_tags = indata['locus_tag'].unique()\n\n # Get dataframes for each fitted variable summary, and join them\n dflist = []\n for varname in ['a', 'b', 'g', 'd']:\n dflist.append(extract_variable_summaries(fit, varname, locus_tags))\n\n return pd.concat(dflist, axis=1)\n\n\ndef plot_errors(df):\n \"\"\"Plot distributions of absolute and relative error in crossvalidation\"\"\"\n fig, axes = plt.subplots(1, 2, figsize=(12,4))\n fig.subplots_adjust(hspace=.25)\n axes = axes.ravel()\n for ttl, col, ax in zip((\"absolute error\", \"relative error\"),\n (\"y_pred_abs_error\", \"y_pred_rel_error\"),\n axes):\n ax.set_title(ttl)\n sns.boxplot(df[col], ax=ax) \n\ndef plot_error_vs_column(df, colname):\n fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n axes = axes.ravel()\n for ttl, col, ax in zip((\"absolute error\", \"relative error\"),\n (\"y_pred_abs_error\", \"y_pred_rel_error\"),\n axes):\n ax.set_title(\"{0} v {1}\".format(ttl, colname))\n ax.set_xlabel(colname)\n ax.set_ylabel(ttl)\n ax.scatter(df[colname], df[col], alpha=0.05)\n\n\ndef plot_probe_predictions(locustag, df):\n \"\"\"Plot prediction range and measured value for a specific gene\"\"\"\n ltdata = df.loc[df['locus_tag'] == locustag].sort_values(['probe',\n 'treatment',\n 'replicate'])\n plt.scatter(range(len(ltdata)), ltdata['log_output'], color='k')\n for idx, obs, plo, pmd, phi in zip(range(len(ltdata)),\n ltdata['log_output'],\n ltdata['y_pred_5pc'],\n ltdata['y_pred_median'],\n ltdata['y_pred_95pc']):\n if plo < obs < phi:\n lcolor = 'b-'\n pcolor = 'b.'\n else:\n lcolor = 'r-'\n pcolor = 'r.'\n plt.plot([idx, idx], [plo, phi], lcolor)\n plt.plot([idx, idx], [pmd, pmd], pcolor)\n plt.xticks(range(len(ltdata)), ltdata['probe'], rotation=90)\n plt.xlim(-1, len(ltdata))\n plt.title(\"Probe predictions: {0}, delta: {1}\".format(locustag,\n ltdata['d_median'].unique()))\n\n\ndef plot_locustag_predictions(df, tag):\n \"\"\"Plot prediction range and measured output for a locus tag\n \n Produce one axis per probe\n \"\"\"\n ltdata = df.loc[df['locus_tag'] == tag].sort_values(['treatment',\n 'probe',\n 'replicate'])\n #print(ltdata)\n probes = list(ltdata['probe'].unique())\n numprobes = len(probes)\n fig, axes = plt.subplots(1, numprobes, figsize=(6 * numprobes, 6))\n try:\n axes = axes.ravel()\n except AttributeError:\n axes = (axes,)\n for ttl, arr, ax in zip(probes,\n [ltdata[ltdata['probe'] == p] for p in probes],\n axes):\n # Plot input (grey) and output (black) measurements\n ax.scatter(range(len(arr)), arr['log_input'], color='k', alpha=0.2)\n ax.scatter(range(len(arr)), arr['log_output'], color='k')\n # Plot prediciton errors\n for idx, obs, trt, plo, pmd, phi in zip(range(len(arr)),\n arr['log_output'],\n arr['treatment'],\n arr['y_pred_5pc'],\n arr['y_pred_median'],\n arr['y_pred_95pc']):\n if plo < obs < phi:\n if trt == 1:\n lcolor = 'b-'\n pcolor = 'b.'\n else:\n lcolor = 'y-'\n pcolor = 'y.'\n else:\n if trt == 1:\n lcolor = 'r-'\n pcolor = 'r.'\n else:\n lcolor = 'g-'\n pcolor = 'g.'\n ax.plot([idx, idx], [plo, phi], lcolor)\n ax.plot([idx, idx], [pmd, pmd], pcolor)\n ax.set_title(\"{2} probe predictions: {0}, delta: {1}\".format(ttl,\n arr['d_median'].unique(),\n tag))\n" ]
[ [ "pandas.melt", "numpy.log", "pandas.merge", "numpy.median", "pandas.DataFrame", "numpy.percentile", "scipy.stats.spearmanr", "matplotlib.pyplot.plot", "scipy.stats.mstats.rankdata", "matplotlib.pyplot.subplots", "numpy.sort", "pandas.concat", "matplotlib.pyplot.gca", "pandas.read_csv" ] ]
bcebere/jax_tabular_examples
[ "62413aba8f9c917ada3f6f1f59d7d94ad2738bf3" ]
[ "tests/test_mlp.py" ]
[ "# third party\nimport jax\nimport pytest\nfrom sklearn.datasets import load_diabetes, load_digits\n\nfrom jax_examples.models.mlp import MLP, BasicNetwork, NetworkConfig, _train_init\n\n\ndef test_network_config() -> None:\n config = NetworkConfig(\n task_type=\"regression\",\n model_type=str,\n input_shape=12,\n output_shape=11,\n hidden_layers=[1, 2, 3, 4],\n batch_size=23,\n epochs=34,\n learning_rate=1e-2,\n dropout=0.5,\n batchnorm=True,\n nonlin=\"elu\",\n patience=66,\n seed=77,\n optimizer=\"sgd\",\n )\n\n assert config.model_type == str\n assert config.input_shape == 12\n assert config.output_shape == 11\n assert list(config.hidden_layers) == [1, 2, 3, 4]\n assert config.batch_size == 23\n assert config.epochs == 34\n assert config.learning_rate == 1e-2\n assert config.dropout == 0.5\n assert config.batchnorm is True\n assert config.nonlin == \"elu\"\n assert config.patience == 66\n assert config.seed == 77\n assert config.optimizer == \"sgd\"\n\n\[email protected](\"optimizer\", [\"adam\", \"sgd\"])\[email protected](\"task_type\", [\"regression\", \"classification\"])\[email protected](\"nonlin\", [\"relu\", \"elu\", \"leaky_relu\"])\[email protected](\"epochs\", [10, 50, 100])\[email protected](\"dropout\", [0, 0.5, 0.2])\[email protected](\"batchnorm\", [True, False])\ndef test_basic_network(\n optimizer: str,\n task_type: str,\n nonlin: str,\n epochs: int,\n dropout: float,\n batchnorm: bool,\n) -> None:\n config = NetworkConfig(\n task_type=task_type,\n model_type=BasicNetwork,\n epochs=epochs,\n dropout=dropout,\n nonlin=nonlin,\n input_shape=5,\n batchnorm=batchnorm,\n output_shape=2,\n hidden_layers=[1, 2],\n optimizer=optimizer,\n )\n\n rng = jax.random.PRNGKey(config.seed)\n state = _train_init(config, rng)\n\n assert state is not None\n\n assert config.epochs == epochs\n assert config.dropout == dropout\n assert config.batchnorm == batchnorm\n assert config.nonlin == nonlin\n assert config.optimizer == optimizer\n assert config.task_type == task_type\n\n\ndef test_mlp_classification() -> None:\n X, y = load_digits(return_X_y=True)\n model = MLP(task_type=\"classification\")\n\n model.fit(X, y)\n\n assert model.predict(X).shape == y.shape\n assert model.predict_proba(X).shape == (len(y), 10)\n\n\ndef test_mlp_regression() -> None:\n X, y = load_diabetes(return_X_y=True)\n model = MLP(task_type=\"regression\")\n\n model.fit(X, y)\n\n assert model.predict(X).shape == y.shape\n with pytest.raises(ValueError):\n model.predict_proba(X)\n" ]
[ [ "sklearn.datasets.load_digits", "sklearn.datasets.load_diabetes" ] ]
abhimat/gatspy
[ "5aba05a839347eef1552cd108b8d3301d3ce63e0" ]
[ "gatspy/datasets/rrlyrae_generated.py" ]
[ "\"\"\"Tools to generate light curves\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = ['RRLyraeGenerated']\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom . import fetch_rrlyrae_templates, fetch_rrlyrae\n\n\nclass RRLyraeGenerated(object):\n \"\"\"Generate RR Lyrae light curves from Sesar 2010 lightcurves\n\n Parameters\n ----------\n lcid : int\n Valid light curve ID from the Sesar 2010 RR Lyrae dataset\n random_state : int\n Random number generator seed\n\n Attributes\n ----------\n lcdata : RRLyraeLC object\n Container for the RR Lyrae light curve dataset\n templates : RRLyraeTemplates object\n Container for the RR Lyrae template dataset\n period : float\n Period of the RR Lyrae object\n\n Examples\n --------\n >>> rrlyrae = fetch_rrlyrae()\n >>> lcid = rrlyrae.ids[0]\n >>> gen = RRLyraeGenerated(lcid, random_state=0)\n >>> gen.period\n 0.61431831\n >>> mag = gen.generated('g', [51080.0, 51080.5], err=0.3)\n >>> mag.round(2)\n array([ 17.74, 17.04])\n \"\"\"\n lcdata = None\n templates = None\n\n # Extinction corrections: Table 1 from Berry et al. (2012, ApJ, 757, 166).\n ext_correction = {'u': 1.810,\n 'g': 1.400,\n 'r': 1.0,\n 'i': 0.759,\n 'z': 0.561}\n\n @classmethod\n def _fetch_data(cls):\n if cls.lcdata is None:\n cls.lcdata = fetch_rrlyrae()\n cls.templates = fetch_rrlyrae_templates()\n\n @classmethod\n def _template_func(cls, num, band, mu=0, A=1):\n template_id = \"{0:.0f}{1}\".format(num, band)\n phase, amp = cls.templates.get_template(template_id)\n phase = np.concatenate([phase, [1]])\n amp = np.concatenate([amp, amp[-1:]])\n return interp1d(phase, mu + A * amp)\n\n def __init__(self, lcid, random_state=None):\n self._fetch_data()\n self.lcid = lcid\n self.meta = self.lcdata.get_metadata(lcid)\n self.obsmeta = self.lcdata.get_obsmeta(lcid)\n self.rng = np.random.RandomState(random_state)\n\n @property\n def period(self):\n \"\"\"Period (in days) of the RR Lyrae\"\"\"\n return self.meta['P']\n\n def observed(self, band, corrected=True):\n \"\"\"Return observed values in the given band\n\n Parameters\n ----------\n band : str\n desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']\n corrected : bool (optional)\n If true, correct for extinction\n\n Returns\n -------\n t, mag, dmag : ndarrays\n The times, magnitudes, and magnitude errors for the specified band.\n \"\"\"\n if band not in 'ugriz':\n raise ValueError(\"band='{0}' not recognized\".format(band))\n i = 'ugriz'.find(band)\n t, y, dy = self.lcdata.get_lightcurve(self.lcid, return_1d=False)\n\n if corrected:\n ext = self.obsmeta['rExt'] * self.ext_correction[band]\n else:\n ext = 0\n\n return t[:, i], y[:, i] - ext, dy[:, i]\n\n def generated(self, band, t, err=None, corrected=True):\n \"\"\"Return generated magnitudes in the specified band\n\n Parameters\n ----------\n band : str\n desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']\n t : array_like\n array of times (in days)\n err : float or array_like\n gaussian error in observations\n corrected : bool (optional)\n If true, correct for extinction\n\n Returns\n -------\n mag : ndarray\n magnitudes at the specified times under the generated model.\n \"\"\"\n t = np.asarray(t)\n num = self.meta[band + 'T']\n mu = self.meta[band + '0']\n amp = self.meta[band + 'A']\n t0 = self.meta[band + 'E']\n\n # if there are nans or infinities, mask them\n bad_vals = np.isnan(t) | np.isinf(t)\n t[bad_vals] = t0\n\n if corrected:\n ext = 0\n else:\n ext = self.obsmeta['rExt'] * self.ext_correction[band]\n\n func = self._template_func(num, band, mu + ext, amp)\n mag = func(((t - t0) / self.period) % 1)\n\n mag[bad_vals] = np.nan\n\n if err is not None:\n mag += self.rng.normal(0, err, t.shape)\n\n return mag\n" ]
[ [ "numpy.concatenate", "numpy.isinf", "scipy.interpolate.interp1d", "numpy.isnan", "numpy.asarray", "numpy.random.RandomState" ] ]
creamcheesesteak/Project_Guardians
[ "bfd499f9422964b35acaada1c2e4872835c06c79" ]
[ "project/Front/Front_s/naver_news.py" ]
[ "import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nurl = 'https://search.naver.com/search.naver?query=%EC%98%A8%EC%8B%A4%EA%B0%80%EC%8A%A4&where=news&ie=utf8&sm=nws_hty'\nreq = requests.get(url)\n\ndef get_now():\n soup = BeautifulSoup(req.text, 'html.parser')\n title = soup.select('div.news_wrap.api_ani_send > div > a')\n press = soup.select('a.info.press')\n\n title_fin = list()\n press_fin = list()\n url_fin = list()\n for data in title:\n news1 = data.get_text().strip()\n title_fin.append(news1)\n\n for data2 in press:\n news2 = data2.get_text().strip()\n press_fin.append(news2)\n\n for link in title:\n url_fin.append(link['href'])\n\n table = pd.DataFrame({'title':title_fin, 'press':press_fin, 'url':url_fin})\n print(table)\n\n# 5분마다 업데이트\nsched = BlockingScheduler()\nsched.add_job(get_now, 'interval', minutes=5)\nsched.start()" ]
[ [ "pandas.DataFrame" ] ]
SafonovMikhail/python_000577
[ "739f764e80f1ca354386f00b8e9db1df8c96531d" ]
[ "000759DataCampPy01/DataCamp000759Py01ch04p01v_NumPy_20200415.py" ]
[ "import numpy as np\n\n# массив, состоящий из роста игроков\nheight = [1.73, 1.68, 1.71, 1.89, 1.79]\n\n# массив весок игроков (в кг.)\nweight = [65.4, 59.2, 63.6, 88.4, 68.7]\n\n# преобразуем массивы в нампи\nnp_height = np.array(height)\nnp_weight = np.array(weight)\n\nprint(np_height)\nprint(np_weight)\n\n# выполним поэлементную операцию вычисления индекса массы тела\n# из полученных результатов сформируем новый список\nbmi = np_weight / np_height ** 2\nprint(bmi)\n\n# округлим массив до второй значащей цифры\n# округление производится с учетом правил приближенных вычислений\nbmi = bmi.round(2)\nprint(bmi)\n" ]
[ [ "numpy.array" ] ]
abulbasar/TensorFlowJavaExamples
[ "91905183657003a5be75bc9336a25c1a0ae7e026" ]
[ "train_tensor_mnist_cnn_saving.py" ]
[ "\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.saved_model import builder as saved_model_builder\r\n\r\ntf.reset_default_graph()\r\n\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\r\n\r\nx = tf.placeholder(tf.float32, shape=[None, 784], name=\"x\")\r\ny_ = tf.placeholder(tf.float32, shape=[None, 10], name=\"y_\")\r\n\r\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n \ndef save_model(session, input_tensor, output_tensor):\n signature = tf.saved_model.signature_def_utils.build_signature_def(\n inputs = {'input': tf.saved_model.utils.build_tensor_info(input_tensor)},\n outputs = {'output': tf.saved_model.utils.build_tensor_info(output_tensor)},\n )\n b = saved_model_builder.SavedModelBuilder('model/')\n b.add_meta_graph_and_variables(session,\n [tf.saved_model.tag_constants.SERVING],\n signature_def_map={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature})\n b.save() \n\nW_conv1 = weight_variable([5, 5, 1, 32])\nb_conv1 = bias_variable([32])\n\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)\n\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)\n\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\nb_fc1 = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\nkeep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\n\ny_conv = tf.add(tf.matmul(h_fc1_drop, W_fc2), b_fc2, name=\"y_conv\")\n\nprint(y_conv)\r\n\r\ncross_entropy = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\r\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\r\ncorrect_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n# saver = tf.train.Saver()\r\n\r\n#Create a saver object which will save all the variables\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for i in range(500):\r\n batch = mnist.train.next_batch(50)\r\n if i % 100 == 0:\r\n train_accuracy = accuracy.eval(feed_dict={\r\n x: batch[0], y_: batch[1], keep_prob: 1.0})\r\n print('step %d, training accuracy %g' % (i, train_accuracy))\r\n train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\r\n#\r\n save_model(sess, x, y_conv)\r\n \r\n print('test accuracy %g' % accuracy.eval(feed_dict={\r\n x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\r\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.conv2d", "tensorflow.saved_model.utils.build_tensor_info", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.argmax", "tensorflow.Variable", "tensorflow.constant", "tensorflow.python.saved_model.builder.SavedModelBuilder", "tensorflow.nn.max_pool", "tensorflow.nn.dropout", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "tensorflow.truncated_normal", "tensorflow.placeholder", "tensorflow.reset_default_graph", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets" ] ]
shifengxu/pytorch-grad-cam
[ "1ae500e8ad10485df5a2eafed9cc0c01012ae6c6" ]
[ "pytorch_grad_cam/base_cam.py" ]
[ "import cv2\nimport numpy as np\nimport torch\nimport ttach as tta\nfrom pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients\nfrom pytorch_grad_cam.utils.svd_on_activations import get_2d_projection\n\n\nclass BaseCAM:\n def __init__(self,\n model,\n target_layers,\n use_cuda=False,\n reshape_transform=None,\n compute_input_gradient=False,\n uses_gradients=True):\n self.model = model.eval()\n self.target_layers = target_layers\n self.cuda = use_cuda\n if self.cuda:\n self.model = model.cuda() if type(self.cuda) is bool else model.to(self.cuda)\n self.reshape_transform = reshape_transform\n self.compute_input_gradient = compute_input_gradient\n self.uses_gradients = uses_gradients\n self.activations_and_grads = ActivationsAndGradients(self.model, target_layers, reshape_transform)\n\n \"\"\" Get a vector of weights for every channel in the target layer.\n Methods that return weights channels,\n will typically need to only implement this function. \"\"\"\n\n def get_cam_weights(self,\n input_tensor,\n target_layers,\n target_category,\n activations,\n grads):\n raise Exception(\"Not Implemented\")\n\n @staticmethod\n def get_loss(output, target_category):\n loss = 0\n for i in range(len(target_category)):\n loss = loss + output[i, target_category[i]]\n return loss\n\n def get_cam_image(self, input_tensor, target_layer, target_category, activations, grads, eigen_smooth=False):\n weights = self.get_cam_weights(input_tensor, target_layer, target_category, activations, grads)\n # weights shape: [1, 2048]\n # activations shape: [1, 2048, 7, 7] ResNet50 last layer output size 7x7\n # weighted_activations shape: [1, 2048, 7, 7]\n weighted_activations = weights[:, :, None, None] * activations\n if eigen_smooth:\n cam = get_2d_projection(weighted_activations)\n else:\n cam = weighted_activations.sum(axis=1)\n return cam\n\n def forward(self, input_tensor, target_category=None, eigen_smooth=False):\n if self.cuda:\n input_tensor = input_tensor.cuda() if type(self.cuda) == bool else input_tensor.to(self.cuda)\n\n if self.compute_input_gradient:\n input_tensor = torch.autograd.Variable(input_tensor, requires_grad=True)\n\n output = self.activations_and_grads(input_tensor)\n if isinstance(target_category, int):\n target_category = [target_category] * input_tensor.size(0)\n\n if target_category is None:\n target_category = np.argmax(output.cpu().data.numpy(), axis=-1)\n else:\n assert(len(target_category) == input_tensor.size(0))\n\n if self.uses_gradients:\n self.model.zero_grad()\n loss = self.get_loss(output, target_category)\n loss.backward(retain_graph=True)\n\n # In most of the saliency attribution papers, the saliency is\n # computed with a single target layer.\n # Commonly it is the last convolutional layer.\n # Here we support passing a list with multiple target layers.\n # It will compute the saliency image for every image,\n # and then aggregate them (with a default mean aggregation).\n # This gives you more flexibility in case you just want to\n # use all conv layers for example, all Batch-norm layers,\n # or something else.\n cam_per_layer = self.compute_cam_per_layer(input_tensor, target_category, eigen_smooth)\n return self.aggregate_multi_layers(cam_per_layer)\n\n @staticmethod\n def get_target_width_height(input_tensor):\n width, height = input_tensor.size(-1), input_tensor.size(-2)\n return width, height\n\n def compute_cam_per_layer(self, input_tensor, target_category, eigen_smooth):\n target_size = self.get_target_width_height(input_tensor)\n\n cam_per_target_layer = []\n act_list = [a.cpu().data.numpy() for a in self.activations_and_grads.activations]\n grd_list = [g.cpu().data.numpy() for g in self.activations_and_grads.gradients]\n # Loop over the saliency image from every layer\n for target_layer, layer_act, layer_grd in zip(self.target_layers, act_list, grd_list):\n cam = self.get_cam_image(input_tensor,\n target_layer,\n target_category,\n layer_act,\n layer_grd,\n eigen_smooth)\n # cam: ndarray, shape: [1, 7, 7]\n cam[cam < 0] = 0 # works like mute the min-max scale in the function of scale_cam_image\n scaled = self.scale_cam_image(cam, target_size) # shape [1, 224, 224]\n cam_per_target_layer.append(scaled[:, None, :])\n\n return cam_per_target_layer\n\n def aggregate_multi_layers(self, cam_per_target_layer):\n cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1)\n cam_per_target_layer = np.maximum(cam_per_target_layer, 0)\n result = np.mean(cam_per_target_layer, axis=1)\n return self.scale_cam_image(result)\n\n @staticmethod\n def scale_cam_image(cam, target_size=None):\n result = []\n for img in cam:\n img = img - np.min(img)\n img = img / (1e-7 + np.max(img))\n if target_size is not None:\n img = cv2.resize(img, target_size)\n result.append(img)\n result = np.float32(result)\n\n return result\n\n def forward_augmentation_smoothing(self,\n input_tensor,\n target_category=None,\n eigen_smooth=False):\n transforms = tta.Compose(\n [\n tta.HorizontalFlip(),\n tta.Multiply(factors=[0.9, 1, 1.1]),\n ]\n )\n cams = []\n for transform in transforms:\n augmented_tensor = transform.augment_image(input_tensor)\n cam = self.forward(augmented_tensor,\n target_category, eigen_smooth)\n\n # The ttach library expects a tensor of size BxCxHxW\n cam = cam[:, None, :, :]\n cam = torch.from_numpy(cam)\n cam = transform.deaugment_mask(cam)\n\n # Back to numpy float32, HxW\n cam = cam.numpy()\n cam = cam[:, 0, :, :]\n cams.append(cam)\n\n cam = np.mean(np.float32(cams), axis=0)\n return cam\n\n def __call__(self,\n input_tensor,\n target_category=None,\n aug_smooth=False,\n eigen_smooth=False):\n\n # Smooth the CAM result with test time augmentation\n if aug_smooth is True:\n return self.forward_augmentation_smoothing(\n input_tensor, target_category, eigen_smooth)\n\n return self.forward(input_tensor, target_category, eigen_smooth)\n\n def __del__(self):\n self.activations_and_grads.release()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.activations_and_grads.release()\n if isinstance(exc_value, IndexError):\n # Handle IndexError here...\n print(\n f\"An exception occurred in CAM with block: {exc_type}. Message: {exc_value}\")\n return True\n" ]
[ [ "numpy.concatenate", "numpy.max", "torch.autograd.Variable", "numpy.min", "numpy.mean", "torch.from_numpy", "numpy.float32", "numpy.maximum" ] ]
kookmin-sw/2019-cap1-2019_8
[ "36c007f345cad637c1efc0e91b570ec467751f7d" ]
[ "util/compare_model.py" ]
[ "import pandas as pd\nimport lightgbm as lgb\nimport numpy as np\n\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\n\ndata_path = ''\n\nmodels = []\nmodels.append((\"LR\", LogisticRegression()))\nmodels.append((\"DT\", DecisionTreeClassifier()))\nmodels.append((\"SVM\", SVC()))\nmodels.append((\"NB\", GaussianNB()))\nmodels.append((\"KNN\", KNeighborsClassifier()))\nmodels.append((\"RF\", RandomForestClassifier()))\nmodels.append((\"GB\", GradientBoostingClassifier()))\nmodels.append((\"AB\", AdaBoostClassifier()))\nmodels.append((\"ANN\", MLPClassifier()))\n\n\nparam = {'num_leaves': 64,\n 'min_data_in_leaf': 64,\n 'objective':'binary',\n 'nthread': 1,\n 'max_depth': -1,\n 'learning_rate': 0.05,\n \"boosting\": \"gbdt\",\n \"feature_fraction\": 0.7,\n \"bagging_freq\": 1,\n \"bagging_fraction\": 0.7 ,\n \"bagging_seed\": 11,\n \"metric\": ['auc','binary_logloss'],\n \"lambda_l1\": 0.1,\n \"random_state\": 24,\n \"verbosity\": -1}\n\ndef get_acc(x_train, y_train):\n for name, model in models:\n model.fit(x_train, y_train)\n y_pred = model.predict(x_train)\n print(name, \"'s Accuracy is \", accuracy_score(y_train, y_pred))\n\n\n\ndef get_cv(x_train, y_train):\n for name, model in models:\n scores = np.mean(cross_val_score(model, x_train, y_train, cv=10))\n print(name, \"'s mean cv 10-fold is \", scores)\n\n\ndef run_lgb(x_train, y_train):\n # 10 Fold Cross Validation\n N_FOLDS = 10\n \n trn_data = lgb.Dataset(x_train, label=y_train)\n\n print('Starting training...')\n cv_results = lgb.cv(param, trn, nfold=N_FOLDS, verbose_eval=20, early_stopping_rounds=100)\n\n print('Best CV score:', cv_results['auc-mean'][-1])\n\n\n\nif __name__ == \"__main__\":\n df = pd.read_csv(data_path)\n x_train = df.iloc[:, :-1].values\n y_train = df.iloc[:, -1].values\n\n get_acc(x_train, y_train)\n get_cv(x_train, y_train)\n run_lgb(x_train, y_train)\n" ]
[ [ "sklearn.ensemble.AdaBoostClassifier", "sklearn.ensemble.RandomForestClassifier", "sklearn.neighbors.KNeighborsClassifier", "sklearn.neural_network.MLPClassifier", "sklearn.naive_bayes.GaussianNB", "sklearn.svm.SVC", "sklearn.metrics.accuracy_score", "sklearn.linear_model.LogisticRegression", "sklearn.tree.DecisionTreeClassifier", "pandas.read_csv", "sklearn.model_selection.cross_val_score", "sklearn.ensemble.GradientBoostingClassifier" ] ]
philip30/xnmt
[ "b5e6985d3bedfac102312cab030a60594bc17baf", "b5e6985d3bedfac102312cab030a60594bc17baf" ]
[ "xnmt/input_readers.py", "test/test_training.py" ]
[ "import functools\nimport itertools\nimport ast\nimport numbers\nimport warnings\nimport numpy as np\n\nfrom typing import Iterator, Optional, Sequence, Union\n\nfrom xnmt import logger\nfrom xnmt import events, vocabs\nfrom xnmt.graph import HyperEdge, HyperGraph\nfrom xnmt.persistence import serializable_init, Serializable\nfrom xnmt import sent\nfrom xnmt import batchers, output_processors\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", lineno=36)\n import h5py\n\n\nclass InputReader(object):\n \"\"\"\n A base class to read in a file and turn it into an input\n \"\"\"\n def read_sents(self, filename: str, filter_ids: Sequence[numbers.Integral] = None) -> Iterator[sent.Sentence]:\n \"\"\"\n Read sentences and return an iterator.\n\n Args:\n filename: data file\n filter_ids: only read sentences with these ids (0-indexed)\n Returns: iterator over sentences from filename\n \"\"\"\n return self.iterate_filtered(filename, filter_ids)\n\n def count_sents(self, filename: str) -> int:\n \"\"\"\n Count the number of sentences in a data file.\n\n Args:\n filename: data file\n Returns: number of sentences in the data file\n \"\"\"\n raise RuntimeError(\"Input readers must implement the count_sents function\")\n\n def needs_reload(self) -> bool:\n \"\"\"\n Overwrite this method if data needs to be reload for each epoch\n \"\"\"\n return False\n\n def iterate_filtered(self, filename: str, filter_ids:Optional[Sequence[numbers.Integral]]=None):\n raise NotImplementedError()\n\n\nclass BaseTextReader(InputReader):\n\n def read_sent(self, line: str, idx: numbers.Integral) -> sent.Sentence:\n \"\"\"\n Convert a raw text line into an input object.\n\n Args:\n line: a single input string\n idx: sentence number\n Returns: a SentenceInput object for the input sentence\n \"\"\"\n raise RuntimeError(\"Input readers must implement the read_sent function\")\n\n @functools.lru_cache(maxsize=1)\n def count_sents(self, filename: str) -> numbers.Integral:\n newlines = 0\n with open(filename, 'r+b') as f:\n for _ in f:\n newlines += 1\n return newlines\n\n def iterate_filtered(self, filename: str, filter_ids: Optional[Sequence[numbers.Integral]]=None) -> Iterator:\n \"\"\"\n Args:\n filename: data file (text file)\n filter_ids:\n Returns: iterator over lines as strings (useful for subclasses to implement read_sents)\n \"\"\"\n sent_count = 0\n max_id = None\n if filter_ids is not None:\n max_id = max(filter_ids)\n filter_ids = set(filter_ids)\n with open(filename, encoding='utf-8') as f:\n for line in f:\n if filter_ids is None or sent_count in filter_ids:\n yield self.read_sent(line=line, idx=sent_count)\n sent_count += 1\n if max_id is not None and sent_count > max_id:\n break\n\n\nclass PlainTextReader(BaseTextReader, Serializable):\n \"\"\"\n Handles the typical case of reading plain text files, with one sent per line.\n\n Args:\n vocab: Vocabulary to convert string tokens to integer ids. If not given, plain text will be assumed to contain\n space-separated integer ids.\n output_proc: output processors to revert the created sentences back to a readable string\n \"\"\"\n yaml_tag = '!PlainTextReader'\n\n @serializable_init\n def __init__(self,\n vocab: Optional[vocabs.Vocab] = None,\n output_proc: Sequence[output_processors.OutputProcessor] = []) -> None:\n self.vocab = vocab\n self.output_procs = output_processors.OutputProcessor.get_output_processor(output_proc)\n\n def read_sent(self, line: str, idx: numbers.Integral) -> sent.SimpleSentence:\n return sent.SimpleSentence(idx=idx,\n words=[self.vocab.convert(word) for word in line.strip().split()] + [vocabs.Vocab.ES],\n vocab=self.vocab,\n output_procs=self.output_procs)\n\n def vocab_size(self) -> numbers.Integral:\n return len(self.vocab)\n\n\nclass LengthTextReader(BaseTextReader, Serializable):\n yaml_tag = '!LengthTextReader'\n\n @serializable_init\n def __init__(self, output_proc: Sequence[output_processors.OutputProcessor] = []) -> None:\n self.output_procs = output_processors.OutputProcessor.get_output_processor(output_proc)\n\n def read_sent(self, line:str, idx:numbers.Integral) -> sent.ScalarSentence:\n return sent.ScalarSentence(idx=idx, value=len(line.strip().split()))\n\n\nclass CompoundReader(InputReader, Serializable):\n \"\"\"\n A compound reader reads inputs using several input readers at the same time.\n\n The resulting inputs will be of type :class:`sent.CompoundSentence`, which holds the results from the different\n readers as a tuple. Inputs can be read from different locations (if input file name is a sequence of filenames) or all\n from the same location (if it is a string). The latter can be used to read the same inputs using several input\n different readers which might capture different aspects of the input data.\n\n Args:\n readers: list of input readers to use\n vocab: not used by this reader, but some parent components may require access to the vocab.\n \"\"\"\n yaml_tag = \"!CompoundReader\"\n @serializable_init\n def __init__(self, readers: Sequence[InputReader], vocab: Optional[vocabs.Vocab] = None) -> None:\n if len(readers) < 2: raise ValueError(\"need at least two readers\")\n self.readers = readers\n if vocab: self.vocab = vocab\n\n def read_sents(self, filename: Union[str,Sequence[str]], filter_ids: Sequence[numbers.Integral] = None) \\\n -> Iterator[sent.Sentence]:\n if isinstance(filename, str): filename = [filename] * len(self.readers)\n generators = [reader.read_sents(filename=cur_filename, filter_ids=filter_ids) for (reader, cur_filename) in\n zip(self.readers, filename)]\n while True:\n try:\n sub_sents = tuple([next(gen) for gen in generators])\n yield sent.CompoundSentence(sents=sub_sents)\n except StopIteration:\n return\n\n def count_sents(self, filename: str) -> int:\n return self.readers[0].count_sents(filename if isinstance(filename,str) else filename[0])\n\n def needs_reload(self) -> bool:\n return any(reader.needs_reload() for reader in self.readers)\n\n\nclass SentencePieceTextReader(BaseTextReader, Serializable):\n \"\"\"\n Read in text and segment it with sentencepiece. Optionally perform sampling\n for subword regularization, only at training time.\n https://arxiv.org/pdf/1804.10959.pdf\n \"\"\"\n yaml_tag = '!SentencePieceTextReader'\n\n @events.register_xnmt_handler\n @serializable_init\n def __init__(self,\n model_file: str,\n sample_train: bool=False,\n l: numbers.Integral=-1,\n alpha: numbers.Real=0.1,\n vocab: Optional[vocabs.Vocab]=None,\n output_proc=[output_processors.JoinPieceTextOutputProcessor]) -> None:\n \"\"\"\n Args:\n model_file: The sentence piece model file\n sample_train: On the training set, sample outputs\n l: The \"l\" parameter for subword regularization, how many sentences to sample\n alpha: The \"alpha\" parameter for subword regularization, how much to smooth the distribution\n vocab: The vocabulary\n output_proc: output processors to revert the created sentences back to a readable string\n \"\"\"\n import sentencepiece as spm\n self.subword_model = spm.SentencePieceProcessor()\n self.subword_model.Load(model_file)\n self.sample_train = sample_train\n self.l = l\n self.alpha = alpha\n self.vocab = vocab\n self.train = False\n self.output_procs = output_processors.OutputProcessor.get_output_processor(output_proc)\n\n @events.handle_xnmt_event\n def on_set_train(self, val):\n self.train = val\n\n def read_sent(self, line: str, idx: numbers.Integral) -> sent.SimpleSentence:\n if self.sample_train and self.train:\n words = self.subword_model.SampleEncodeAsPieces(line.strip(), self.l, self.alpha)\n else:\n words = self.subword_model.EncodeAsPieces(line.strip())\n #words = [w.decode('utf-8') for w in words]\n return sent.SimpleSentence(idx=idx,\n words=[self.vocab.convert(word) for word in words] + [self.vocab.convert(vocabs.Vocab.ES_STR)],\n vocab=self.vocab,\n output_procs=self.output_procs)\n\n def vocab_size(self) -> numbers.Integral:\n return len(self.vocab)\n\n\nclass RamlTextReader(BaseTextReader, Serializable):\n \"\"\"\n Handles the RAML sampling, can be used on the target side, or on both the source and target side.\n Randomly replaces words according to Hamming Distance.\n https://arxiv.org/pdf/1808.07512.pdf\n https://arxiv.org/pdf/1609.00150.pdf\n \"\"\"\n yaml_tag = '!RamlTextReader'\n\n @events.register_xnmt_handler\n @serializable_init\n def __init__(self,\n tau: Optional[float] = 1.,\n vocab: Optional[vocabs.Vocab] = None,\n output_proc: Sequence[output_processors.OutputProcessor]=[]) -> None:\n \"\"\"\n Args:\n tau: The temperature that controls peakiness of the sampling distribution\n vocab: The vocabulary\n \"\"\"\n self.tau = tau\n self.vocab = vocab\n self.output_procs = output_processors.OutputProcessor.get_output_processor(output_proc)\n\n @events.handle_xnmt_event\n def on_set_train(self, val):\n self.train = val\n\n def read_sent(self, line: str, idx: numbers.Integral) -> sent.SimpleSentence:\n words = line.strip().split()\n if not self.train:\n return sent.SimpleSentence(idx=idx,\n words=[self.vocab.convert(word) for word in words] + [vocabs.Vocab.ES],\n vocab=self.vocab,\n output_procs=self.output_procs)\n word_ids = np.array([self.vocab.convert(word) for word in words])\n length = len(word_ids)\n logits = np.arange(length) * (-1) * self.tau\n logits = np.exp(logits - np.max(logits))\n probs = logits / np.sum(logits)\n num_words = np.random.choice(length, p=probs)\n corrupt_pos = np.random.binomial(1, p=num_words/length, size=(length,))\n num_words_to_sample = np.sum(corrupt_pos)\n sampled_words = np.random.choice(np.arange(2, len(self.vocab)), size=(num_words_to_sample,))\n word_ids[np.where(corrupt_pos==1)[0].tolist()] = sampled_words\n return sent.SimpleSentence(idx=idx,\n words=word_ids.tolist() + [vocabs.Vocab.ES],\n vocab=self.vocab,\n output_procs=self.output_procs)\n\n def needs_reload(self) -> bool:\n return True\n\n\nclass CharFromWordTextReader(PlainTextReader, Serializable):\n \"\"\"\n Read in word based corpus and turned that into SegmentedSentence.\n SegmentedSentece's words are characters, but it contains the information of the segmentation.\n \"\"\"\n yaml_tag = \"!CharFromWordTextReader\"\n ONE_MB = 1000 * 1024\n\n @serializable_init\n def __init__(self,\n vocab: vocabs.Vocab = None,\n char_vocab: vocabs.CharVocab = None,\n add_word_begin_marker = True,\n add_word_end_marker = True,\n *args, **kwargs):\n assert char_vocab is not None and vocab is not None\n super().__init__(vocab=vocab, *args, **kwargs)\n self.char_vocab = char_vocab\n self.add_word_begin_marker = add_word_begin_marker\n self.add_word_end_marker = add_word_end_marker\n \n @functools.lru_cache(maxsize=ONE_MB)\n def convert_word(self, word):\n return [self.char_vocab.convert(c) for c in word]\n\n def read_sent(self, line: str, idx: numbers.Integral) -> sent.SegmentedSentence:\n words = []\n segs = []\n offset = 0\n for word in line.strip().split():\n chars = []\n # <SS>\n if self.add_word_begin_marker:\n offset += 1\n chars.append(self.char_vocab.SS)\n # Chars\n chars.extend(self.convert_word(word))\n offset += len(word)\n # <PAD>\n if self.add_word_end_marker:\n offset += 1\n chars.append(self.char_vocab.PAD)\n # Outputs\n segs.append(offset-1)\n words.append(sent.SegmentedWord(tuple(chars), self.vocab.convert(word)))\n # Adding EOS\n segs.append(segs[-1]+1)\n words.append(sent.SegmentedWord(tuple([self.char_vocab.ES]), self.vocab.ES))\n # For segment actions\n segment = np.zeros(segs[-1]+1)\n segment[segs] = 1\n\n return sent.SegmentedSentence(segment=segs, words=words, idx=idx, vocab=self.vocab, output_procs=self.output_procs)\n\n\nclass SimultActionTextReader(PlainTextReader, Serializable):\n yaml_tag = \"!SimultActionTextReader\"\n\n @serializable_init\n def __init__(self):\n self.vocab = vocabs.Vocab(i2w=[\"READ\", \"WRITE\"])\n\n def read_sent(self, line: str, idx: numbers.Integral) -> sent.Sentence:\n try:\n actions = [self._parse_action(x) for x in line.strip().split()]\n except ValueError:\n raise ValueError(\"Error on idx {} on line: \\n{}\".format(idx, line.strip()))\n\n actions.extend([self.vocab.convert(\"READ\"),\n self.vocab.convert(\"WRITE\")])\n\n return sent.AuxSimpleSentence(words=actions, idx=idx, vocab=self.vocab)\n\n def _parse_action(self, action_str: str):\n if action_str.endswith(\")\"):\n start_index = action_str.index(\"(\")\n content = action_str[start_index+1:-1]\n action_str = action_str[:start_index]\n else:\n content = None\n\n if action_str == \"READ\" or action_str == \"WRITE\":\n return self.vocab.convert(action_str)\n else:\n raise ValueError(content)\n\n\nclass H5Reader(InputReader, Serializable):\n \"\"\"\n Handles the case where sents are sequences of continuous-space vectors.\n\n The input is a \".h5\" file, which can be created for example using xnmt.preproc.MelFiltExtractor\n\n The data items are assumed to be labeled with integers 0, 1, .. (converted to strings).\n\n Each data item will be a 2D matrix representing a sequence of vectors. They can\n be in either order, depending on the value of the \"transpose\" variable:\n * sents[sent_id][feat_ind,timestep] if transpose=False\n * sents[sent_id][timestep,feat_ind] if transpose=True\n\n Args:\n transpose: whether inputs are transposed or not.\n feat_from: use feature dimensions in a range, starting at this index (inclusive)\n feat_to: use feature dimensions in a range, ending at this index (exclusive)\n feat_skip: stride over features\n timestep_skip: stride over timesteps\n timestep_truncate: cut off timesteps if sequence is longer than specified value\n \"\"\"\n yaml_tag = u\"!H5Reader\"\n @serializable_init\n def __init__(self,\n transpose: bool = False,\n feat_from: Optional[numbers.Integral] = None,\n feat_to: Optional[numbers.Integral] = None,\n feat_skip: Optional[numbers.Integral] = None,\n timestep_skip: Optional[numbers.Integral] = None,\n timestep_truncate: Optional[numbers.Integral] = None):\n self.transpose = transpose\n self.feat_from = feat_from\n self.feat_to = feat_to\n self.feat_skip = feat_skip\n self.timestep_skip = timestep_skip\n self.timestep_truncate = timestep_truncate\n\n def read_sents(self, filename: str, filter_ids: Optional[Sequence[numbers.Integral]]=None) -> Iterator[sent.ArraySentence]:\n with h5py.File(filename, \"r\") as hf:\n h5_keys = sorted(hf.keys(), key=lambda x: int(x))\n if filter_ids is not None:\n filter_ids = sorted(filter_ids)\n h5_keys = [h5_keys[i] for i in filter_ids]\n h5_keys.sort(key=lambda x: int(x))\n for sent_no, key in enumerate(h5_keys):\n inp = hf[key][:]\n if self.transpose:\n inp = inp.transpose()\n\n sub_inp = inp[self.feat_from: self.feat_to: self.feat_skip, :self.timestep_truncate:self.timestep_skip]\n if sub_inp.size < inp.size:\n inp = np.empty_like(sub_inp)\n np.copyto(inp, sub_inp)\n else:\n inp = sub_inp\n\n if sent_no % 1000 == 999:\n logger.info(f\"Read {sent_no+1} lines ({float(sent_no+1)/len(h5_keys)*100:.2f}%) of {filename} at {key}\")\n yield sent.ArraySentence(idx=filter_ids[sent_no] if filter_ids else sent_no, nparr=inp)\n\n def count_sents(self, filename: str) -> numbers.Integral:\n with h5py.File(filename, \"r\") as hf:\n l = len(hf.keys())\n return l\n\n\nclass NpzReader(InputReader, Serializable):\n \"\"\"\n Handles the case where sents are sequences of continuous-space vectors.\n\n The input is a \".npz\" file, which consists of multiply \".npy\" files, each\n corresponding to a single sequence of continuous features. This can be\n created in two ways:\n * Use the builtin function numpy.savez_compressed()\n * Create a bunch of .npy files, and run \"zip\" on them to zip them into an archive.\n\n The file names should be named XXX_0, XXX_1, etc., where the final number after the underbar\n indicates the order of the sequence in the corpus. This is done automatically by\n numpy.savez_compressed(), in which case the names will be arr_0, arr_1, etc.\n\n Each numpy file will be a 2D matrix representing a sequence of vectors. They can\n be in either order, depending on the value of the \"transpose\" variable.\n * sents[sent_id][feat_ind,timestep] if transpose=False\n * sents[sent_id][timestep,feat_ind] if transpose=True\n\n Args:\n transpose: whether inputs are transposed or not.\n feat_from: use feature dimensions in a range, starting at this index (inclusive)\n feat_to: use feature dimensions in a range, ending at this index (exclusive)\n feat_skip: stride over features\n timestep_skip: stride over timesteps\n timestep_truncate: cut off timesteps if sequence is longer than specified value\n \"\"\"\n yaml_tag = u\"!NpzReader\"\n @serializable_init\n def __init__(self,\n transpose: bool = False,\n feat_from: Optional[numbers.Integral] = None,\n feat_to: Optional[numbers.Integral] = None,\n feat_skip: Optional[numbers.Integral] = None,\n timestep_skip: Optional[numbers.Integral] = None,\n timestep_truncate: Optional[numbers.Integral] = None):\n self.transpose = transpose\n self.feat_from = feat_from\n self.feat_to = feat_to\n self.feat_skip = feat_skip\n self.timestep_skip = timestep_skip\n self.timestep_truncate = timestep_truncate\n\n def read_sents(self, filename: str, filter_ids: Optional[Sequence[numbers.Integral]] = None) -> None:\n npz_file = np.load(filename, mmap_mode=None if filter_ids is None else \"r\")\n npz_keys = sorted(npz_file.files, key=lambda x: int(x.split('_')[-1]))\n if filter_ids is not None:\n filter_ids = sorted(filter_ids)\n npz_keys = [npz_keys[i] for i in filter_ids]\n npz_keys.sort(key=lambda x: int(x.split('_')[-1]))\n for sent_no, key in enumerate(npz_keys):\n inp = npz_file[key]\n if self.transpose:\n inp = inp.transpose()\n\n sub_inp = inp[self.feat_from: self.feat_to: self.feat_skip, :self.timestep_truncate:self.timestep_skip]\n if sub_inp.size < inp.size:\n inp = np.empty_like(sub_inp)\n np.copyto(inp, sub_inp)\n else:\n inp = sub_inp\n\n if sent_no % 1000 == 999:\n logger.info(f\"Read {sent_no+1} lines ({float(sent_no+1)/len(npz_keys)*100:.2f}%) of {filename} at {key}\")\n yield sent.ArraySentence(idx=filter_ids[sent_no] if filter_ids else sent_no, nparr=inp)\n npz_file.close()\n\n def count_sents(self, filename: str) -> numbers.Integral:\n npz_file = np.load(filename, mmap_mode=\"r\") # for counting sentences, only read the index\n l = len(npz_file.files)\n npz_file.close()\n return l\n\n\nclass IDReader(BaseTextReader, Serializable):\n \"\"\"\n Handles the case where we need to read in a single ID (like retrieval problems).\n\n Files must be text files containing a single integer per line.\n \"\"\"\n yaml_tag = \"!IDReader\"\n\n @serializable_init\n def __init__(self) -> None:\n pass\n\n def read_sent(self, line: str, idx: numbers.Integral) -> sent.ScalarSentence:\n return sent.ScalarSentence(idx=idx, value=int(line.strip()))\n\n def read_sents(self, filename: str, filter_ids: Optional[Sequence[numbers.Integral]] = None) -> list:\n return [l for l in self.iterate_filtered(filename, filter_ids)]\n\n\nclass GraphReader(BaseTextReader):\n def __init__(self, node_vocab, edge_vocab, value_vocab):\n self._node_vocab = node_vocab\n self._edge_vocab = edge_vocab\n self._value_vocab = value_vocab\n\n @property\n def node_vocab(self):\n return self._node_vocab\n\n @property\n def edge_vocab(self):\n return self._edge_vocab\n\n @property\n def value_vocab(self):\n return self._value_vocab\n\n\nclass CoNLLToRNNGActionsReader(GraphReader, Serializable):\n \"\"\"\n Handles the reading of CoNLL File Format:\n\n ID FORM LEMMA POS FEAT HEAD DEPREL\n\n A single line represents a single edge of dependency parse tree.\n \"\"\"\n yaml_tag = \"!CoNLLToRNNGActionsReader\"\n @serializable_init\n def __init__(self, surface_vocab: vocabs.Vocab, nt_vocab: vocabs.Vocab, edg_vocab: vocabs.Vocab, output_procs=[]):\n super().__init__(nt_vocab, edg_vocab, surface_vocab)\n self.output_procs = output_processors.OutputProcessor.get_output_processor(output_procs)\n\n def read_sents(self, filename: str, filter_ids: Sequence[numbers.Integral] = None):\n # Routine to add tree\n def emit_tree(idx, lines):\n nodes = {}\n edge_list = []\n max_node = -1\n for node_id, form, lemma, pos, feat, head, deprel in lines:\n nodes[node_id] = sent.SyntaxTreeNode(node_id=node_id, value=form, head=pos)\n max_node = max(max_node, node_id)\n nodes[max_node+1] = sent.SyntaxTreeNode(node_id=max_node+1, value=vocabs.Vocab.ES_STR, head=vocabs.Vocab.ES_STR)\n root = -1\n for node_id, form, lemma, pos, feat, head, deprel in lines:\n if head == 0:\n root =node_id\n else:\n edge_list.append(HyperEdge(head, [node_id], None, deprel))\n edge_list.append(HyperEdge(root, [max_node+1], None, vocabs.Vocab.ES_STR))\n return sent.DepTreeRNNGSequenceSentence(idx,\n score=None,\n graph=HyperGraph(edge_list, nodes),\n surface_vocab=self.value_vocab,\n nt_vocab=self.node_vocab,\n edge_vocab=self.edge_vocab,\n all_surfaces=True,\n output_procs=self.output_procs)\n idx = 0\n lines = []\n # Loop all lines in the file\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if len(line) <= 1:\n yield emit_tree(idx, lines)\n lines.clear()\n idx += 1\n else:\n try:\n node_id, form, lemma, pos, feat, head, deprel = line.strip().split(\"\\t\")\n lines.append((int(node_id), form, lemma, pos, feat, int(head), deprel))\n except ValueError:\n logger.error(\"Bad line: %s\", line)\n raise\n if len(lines) != 0:\n yield emit_tree(idx, lines)\n\n\nclass PennTreeBankReader(GraphReader, Serializable):\n yaml_tag = \"!PennTreeBankReader\"\n @serializable_init\n def __init__(self, word_vocab: vocabs.Vocab, head_vocab: vocabs.Vocab, output_procs=[]):\n super().__init__(head_vocab, None, word_vocab)\n self.output_procs = output_processors.OutputProcessor.get_output_processor(output_procs)\n\n def _read_tree_from_line(self, line):\n stack = []\n edges = []\n nodes = {}\n now_depth = 0\n now_id = 0\n for token in line.split():\n # Process \"(\"\n if token.startswith(\"(\"):\n stack.append([now_depth, sent.SyntaxTreeNode(now_id, None, token[1:], sent.SyntaxTreeNode.Type.NT)])\n nodes[now_id] = stack[-1][1]\n now_id += 1\n now_depth += 1\n else:\n try:\n end_idx = token.index(\")\")\n except IndexError:\n end_idx = len(token)\n if end_idx != 0:\n stack.append([now_depth, sent.SyntaxTreeNode(now_id, token[:end_idx], None, sent.SyntaxTreeNode.Type.T)])\n nodes[now_id] = stack[-1][1]\n now_id += 1\n # Process \")\"\n for _ in range(end_idx, len(token)):\n depth, child = stack.pop()\n children = [child]\n while len(stack) > 0 and stack[-1][0] == depth:\n children.append(stack.pop()[1])\n if len(stack) > 0:\n parent = stack[-1][1]\n for child in children:\n edges.append(HyperEdge(parent.node_id, [child.node_id]))\n now_depth -= 1\n return HyperGraph(edges, nodes)\n\n def read_sents(self, filename: str, filter_ids: Sequence[numbers.Integral] = None):\n with open(filename) as fp:\n for idx, line in enumerate(fp):\n graph = self._read_tree_from_line(line.strip())\n yield sent.ParseTreeRNNGSequenceSentence(idx=idx,\n score=None,\n graph=graph,\n surface_vocab=self.value_vocab,\n nt_vocab=self.node_vocab,\n edge_vocab=self.edge_vocab,\n all_surfaces=False,\n output_procs=self.output_procs)\n\n\nclass LatticeReader(GraphReader, Serializable):\n \"\"\"\n Reads lattices from a text file.\n\n The expected lattice file format is as follows:\n * 1 line per lattice\n * lines are serialized python lists / tuples\n * 2 lists per lattice:\n    - list of nodes, with every node a 4-tuple: (lexicon_entry, fwd_log_prob, marginal_log_prob, bwd_log_prob)\n    - list of arcs, each arc a tuple: (node_id_start, node_id_end)\n            - node_id references the nodes and is 0-indexed\n            - node_id_start < node_id_end\n * All paths must share a common start and end node, i.e. <s> and </s> need to be contained in the lattice\n\n A simple example lattice:\n [('<s>', 0.0, 0.0, 0.0), ('buenas', 0, 0.0, 0.0), ('tardes', 0, 0.0, 0.0), ('</s>', 0.0, 0.0, 0.0)],[(0, 1), (1, 2), (2, 3)]\n\n Args:\n vocab: Vocabulary to convert string tokens to integer ids. If not given, plain text will be assumed to contain\n space-separated integer ids.\n text_input: If ``True``, assume a standard text file as input and convert it to a flat lattice.\n flatten: If ``True``, convert to a flat lattice, with all probabilities set to 1.\n \"\"\"\n yaml_tag = '!LatticeReader'\n\n @serializable_init\n def __init__(self, vocab:vocabs.Vocab, text_input: bool = False, flatten = False, output_procs=[]):\n super().__init__(None, None, vocab)\n self.text_input = text_input\n self.flatten = flatten\n self.output_procs = output_procs\n\n def read_sent(self, line, idx):\n edge_list = []\n if self.text_input:\n # Node List\n nodes = [sent.LatticeNode(node_id=0, value=vocabs.Vocab.SS)]\n for i, word in enumerate(line.strip().split()):\n nodes.append(sent.LatticeNode(node_id=i+1, value=self.value_vocab.convert(word)))\n nodes.append(sent.LatticeNode(node_id=len(nodes), value=vocabs.Vocab.ES))\n # Flat edge list\n for i in range(len(nodes)-1):\n edge_list.append(HyperEdge(i, [i+1]))\n else:\n node_list, arc_list = ast.literal_eval(line)\n nodes = [sent.LatticeNode(node_id=i,\n value=self.value_vocab.convert(item[0]),\n fwd_log_prob=item[1], marginal_log_prob=item[2], bwd_log_prob=item[3])\n for i, item in enumerate(node_list)]\n if self.flatten:\n for i in range(len(nodes)-1):\n edge_list.append(HyperEdge(i, [i+1]))\n nodes[i].reset_prob()\n nodes[-1].reset_prob()\n else:\n for from_index, to_index in arc_list:\n edge_list.append(HyperEdge(from_index, [to_index]))\n\n assert nodes[0].value == self.value_vocab.SS and nodes[-1].value == self.value_vocab.ES\n # Construct graph\n graph = HyperGraph(edge_list, {node.node_id: node for node in nodes})\n assert len(graph.roots()) == 1 # <SOS>\n assert len(graph.leaves()) == 1 # <EOS>\n # Construct LatticeSentence\n return sent.GraphSentence(idx=idx, graph=graph, value_vocab=self.value_vocab, score=None)\n\n def vocab_size(self):\n return len(self.value_vocab)\n\n\n###### A utility function to read a parallel corpus\ndef read_parallel_corpus(src_reader: InputReader,\n trg_reader: InputReader,\n src_file: str,\n trg_file: str,\n batcher: batchers.Batcher=None,\n sample_sents: Optional[numbers.Integral] = None,\n max_num_sents: Optional[numbers.Integral] = None,\n max_src_len: Optional[numbers.Integral] = None,\n max_trg_len: Optional[numbers.Integral] = None) -> tuple:\n \"\"\"\n A utility function to read a parallel corpus.\n\n Args:\n src_reader:\n trg_reader:\n src_file:\n trg_file:\n batcher:\n sample_sents: if not None, denote the number of sents that should be randomly chosen from all available sents.\n max_num_sents: if not None, read only the first this many sents\n max_src_len: skip pair if src side is too long\n max_trg_len: skip pair if trg side is too long\n\n Returns:\n A tuple of (src_data, trg_data, src_batches, trg_batches) where ``*_batches = *_data`` if ``batcher=None``\n \"\"\"\n src_data = []\n trg_data = []\n if sample_sents:\n logger.info(f\"Starting to read {sample_sents} parallel sentences of {src_file} and {trg_file}\")\n src_len = src_reader.count_sents(src_file)\n trg_len = trg_reader.count_sents(trg_file)\n if src_len != trg_len: raise RuntimeError(f\"training src sentences don't match trg sentences: {src_len} != {trg_len}!\")\n if max_num_sents and max_num_sents < src_len: src_len = trg_len = max_num_sents\n filter_ids = np.random.choice(src_len, sample_sents, replace=False)\n else:\n logger.info(f\"Starting to read {src_file} and {trg_file}\")\n filter_ids = None\n src_len, trg_len = 0, 0\n src_train_iterator = src_reader.read_sents(src_file, filter_ids)\n trg_train_iterator = trg_reader.read_sents(trg_file, filter_ids)\n for src_sent, trg_sent in itertools.zip_longest(src_train_iterator, trg_train_iterator):\n if src_sent is None or trg_sent is None:\n raise RuntimeError(f\"training src sentences don't match trg sentences: {src_len or src_reader.count_sents(src_file)} != {trg_len or trg_reader.count_sents(trg_file)}!\")\n if max_num_sents and (max_num_sents <= len(src_data)):\n break\n src_len_ok = max_src_len is None or src_sent.sent_len() <= max_src_len\n trg_len_ok = max_trg_len is None or trg_sent.sent_len() <= max_trg_len\n if src_len_ok and trg_len_ok:\n src_data.append(src_sent)\n trg_data.append(trg_sent)\n\n logger.info(f\"Done reading {src_file} and {trg_file}. Packing into batches.\")\n\n # Pack batches\n if batcher is not None:\n src_batches, trg_batches = batcher.pack(src_data, trg_data)\n else:\n src_batches, trg_batches = src_data, trg_data\n\n logger.info(f\"Done packing batches.\")\n\n return src_data, trg_data, src_batches, trg_batches\n", "import unittest\n\nimport dynet as dy\nimport numpy as np\n\nfrom xnmt.modelparts.attenders import MlpAttender, DotAttender\nfrom xnmt.batchers import mark_as_batch, Mask, SrcBatcher\nfrom xnmt.modelparts.bridges import CopyBridge\nfrom xnmt.modelparts.decoders import AutoRegressiveDecoder\nfrom xnmt.modelparts.embedders import LookupEmbedder\nfrom xnmt.eval.tasks import LossEvalTask\nimport xnmt.events\nfrom xnmt.input_readers import PlainTextReader\nfrom xnmt.transducers.recurrent import UniLSTMSeqTransducer, BiLSTMSeqTransducer\nfrom xnmt.loss_calculators import MLELoss\nfrom xnmt.optimizers import AdamTrainer, DummyTrainer\nfrom xnmt.param_collections import ParamManager\nfrom xnmt.transducers.pyramidal import PyramidalLSTMSeqTransducer\nfrom xnmt.train import regimens\nfrom xnmt.modelparts.transforms import NonLinear\nfrom xnmt.models.translators.default import DefaultTranslator\nfrom xnmt.modelparts.scorers import Softmax\nfrom xnmt.vocabs import Vocab\nfrom xnmt import event_trigger, sent\n\nclass TestTruncatedBatchTraining(unittest.TestCase):\n\n def setUp(self):\n xnmt.events.clear()\n ParamManager.init_param_col()\n\n self.src_reader = PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.ja.vocab\"))\n self.trg_reader = PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.en.vocab\"))\n self.src_data = list(self.src_reader.read_sents(\"examples/data/head.ja\"))\n self.trg_data = list(self.trg_reader.read_sents(\"examples/data/head.en\"))\n\n def assert_single_loss_equals_batch_loss(self, model, pad_src_to_multiple=1):\n \"\"\"\n Tests whether single loss equals batch loss.\n Truncating src / trg sents to same length so no masking is necessary\n \"\"\"\n batch_size=5\n src_sents = self.src_data[:batch_size]\n src_min = min([x.sent_len() for x in src_sents])\n src_sents_trunc = [s.words[:src_min] for s in src_sents]\n for single_sent in src_sents_trunc:\n single_sent[src_min-1] = Vocab.ES\n while len(single_sent)%pad_src_to_multiple != 0:\n single_sent.append(Vocab.ES)\n trg_sents = self.trg_data[:batch_size]\n trg_min = min([x.sent_len() for x in trg_sents])\n trg_sents_trunc = [s.words[:trg_min] for s in trg_sents]\n for single_sent in trg_sents_trunc: single_sent[trg_min-1] = Vocab.ES\n\n src_sents_trunc = [sent.SimpleSentence(words=s) for s in src_sents_trunc]\n trg_sents_trunc = [sent.SimpleSentence(words=s) for s in trg_sents_trunc]\n\n single_loss = 0.0\n for sent_id in range(batch_size):\n dy.renew_cg()\n train_loss, _ = MLELoss().calc_loss(\n model=model,\n src=src_sents_trunc[sent_id],\n trg=trg_sents_trunc[sent_id]).compute()\n single_loss += train_loss.value()\n\n dy.renew_cg()\n\n batched_loss, _ = MLELoss().calc_loss(\n model=model,\n src=mark_as_batch(src_sents_trunc),\n trg=mark_as_batch(trg_sents_trunc)).compute()\n self.assertAlmostEqual(single_loss, np.sum(batched_loss.value()), places=4)\n\n def test_loss_model1(self):\n layer_dim = 512\n model = DefaultTranslator(\n src_reader=self.src_reader,\n trg_reader=self.trg_reader,\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim),\n attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n event_trigger.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model2(self):\n layer_dim = 512\n model = DefaultTranslator(\n src_reader=self.src_reader,\n trg_reader=self.trg_reader,\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=PyramidalLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, layers=3),\n attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n event_trigger.set_train(False)\n self.assert_single_loss_equals_batch_loss(model, pad_src_to_multiple=4)\n\n def test_loss_model3(self):\n layer_dim = 512\n model = DefaultTranslator(\n src_reader=self.src_reader,\n trg_reader=self.trg_reader,\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim, layers=3),\n attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n event_trigger.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model4(self):\n layer_dim = 512\n model = DefaultTranslator(\n src_reader=self.src_reader,\n trg_reader=self.trg_reader,\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim),\n attender=DotAttender(),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n event_trigger.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\nclass TestBatchTraining(unittest.TestCase):\n\n def setUp(self):\n xnmt.events.clear()\n ParamManager.init_param_col()\n\n self.src_reader = PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.ja.vocab\"))\n self.trg_reader = PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.en.vocab\"))\n self.src_data = list(self.src_reader.read_sents(\"examples/data/head.ja\"))\n self.trg_data = list(self.trg_reader.read_sents(\"examples/data/head.en\"))\n\n def assert_single_loss_equals_batch_loss(self, model, pad_src_to_multiple=1):\n \"\"\"\n Tests whether single loss equals batch loss.\n Here we don't truncate the target side and use masking.\n \"\"\"\n batch_size = 5\n src_sents = self.src_data[:batch_size]\n src_min = min([x.sent_len() for x in src_sents])\n src_sents_trunc = [s.words[:src_min] for s in src_sents]\n for single_sent in src_sents_trunc:\n single_sent[src_min-1] = Vocab.ES\n while len(single_sent)%pad_src_to_multiple != 0:\n single_sent.append(Vocab.ES)\n trg_sents = sorted(self.trg_data[:batch_size], key=lambda x: x.sent_len(), reverse=True)\n trg_max = max([x.sent_len() for x in trg_sents])\n np_arr = np.zeros([batch_size, trg_max])\n for i in range(batch_size):\n for j in range(trg_sents[i].sent_len(), trg_max):\n np_arr[i,j] = 1.0\n trg_masks = Mask(np_arr)\n trg_sents_padded = [[w for w in s] + [Vocab.ES]*(trg_max-s.sent_len()) for s in trg_sents]\n\n src_sents_trunc = [sent.SimpleSentence(words=s) for s in src_sents_trunc]\n trg_sents_padded = [sent.SimpleSentence(words=s) for s in trg_sents_padded]\n\n single_loss = 0.0\n for sent_id in range(batch_size):\n dy.renew_cg()\n train_loss, _ = MLELoss().calc_loss(\n model=model,\n src=src_sents_trunc[sent_id],\n trg=trg_sents[sent_id]).compute()\n single_loss += train_loss.value()\n\n dy.renew_cg()\n\n batched_loss, _ = MLELoss().calc_loss(\n model=model,\n src=mark_as_batch(src_sents_trunc),\n trg=mark_as_batch(trg_sents_padded, trg_masks)).compute()\n self.assertAlmostEqual(single_loss, np.sum(batched_loss.value()), places=4)\n\n def test_loss_model1(self):\n layer_dim = 512\n model = DefaultTranslator(\n src_reader=self.src_reader,\n trg_reader=self.trg_reader,\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim),\n attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n event_trigger.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n def test_loss_model2(self):\n layer_dim = 512\n model = DefaultTranslator(\n src_reader=self.src_reader,\n trg_reader=self.trg_reader,\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=PyramidalLSTMSeqTransducer(layers=3, input_dim=layer_dim, hidden_dim=layer_dim),\n attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n event_trigger.set_train(False)\n self.assert_single_loss_equals_batch_loss(model, pad_src_to_multiple=4)\n\n def test_loss_model3(self):\n layer_dim = 512\n model = DefaultTranslator(\n src_reader=self.src_reader,\n trg_reader=self.trg_reader,\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=BiLSTMSeqTransducer(layers=3, input_dim=layer_dim, hidden_dim=layer_dim),\n attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim, hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n event_trigger.set_train(False)\n self.assert_single_loss_equals_batch_loss(model)\n\n\nclass TestTrainDevLoss(unittest.TestCase):\n\n def setUp(self):\n xnmt.events.clear()\n ParamManager.init_param_col()\n\n def test_train_dev_loss_equal(self):\n layer_dim = 512\n batcher = SrcBatcher(batch_size=5, break_ties_randomly=False)\n train_args = {}\n train_args['src_file'] = \"examples/data/head.ja\"\n train_args['trg_file'] = \"examples/data/head.en\"\n train_args['loss_calculator'] = MLELoss()\n train_args['model'] = DefaultTranslator(src_reader=PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.ja.vocab\")),\n trg_reader=PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.en.vocab\")),\n src_embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n encoder=BiLSTMSeqTransducer(input_dim=layer_dim, hidden_dim=layer_dim),\n attender=MlpAttender(input_dim=layer_dim, state_dim=layer_dim,\n hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n train_args['dev_tasks'] = [LossEvalTask(model=train_args['model'],\n src_file=\"examples/data/head.ja\",\n ref_file=\"examples/data/head.en\",\n batcher=batcher)]\n train_args['trainer'] = DummyTrainer()\n train_args['batcher'] = batcher\n train_args['run_for_epochs'] = 1\n training_regimen = regimens.SimpleTrainingRegimen(**train_args)\n# TODO(broken, fix!)\n# training_regimen.run_training(save_fct = lambda: None)\n# self.assertAlmostEqual(training_regimen.train_loss_tracker.epoch_loss.sum_factors() / training_regimen.train_loss_tracker.epoch_words,\n# training_regimen.dev_loss_tracker.dev_score.loss, places=5)\n\nclass TestOverfitting(unittest.TestCase):\n\n def setUp(self):\n xnmt.events.clear()\n ParamManager.init_param_col()\n\n def test_overfitting(self):\n layer_dim = 16\n batcher = SrcBatcher(batch_size=10, break_ties_randomly=False)\n train_args = {}\n train_args['src_file'] = \"examples/data/head.ja\"\n train_args['trg_file'] = \"examples/data/head.en\"\n train_args['loss_calculator'] = MLELoss()\n train_args['model'] = DefaultTranslator(src_reader=PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.ja.vocab\")),\n trg_reader=PlainTextReader(vocab=Vocab(vocab_file=\"examples/data/head.en.vocab\")),\n src_embedder=LookupEmbedder(vocab_size=100, emb_dim=layer_dim),\n encoder=BiLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim),\n attender=MlpAttender(input_dim=layer_dim,\n state_dim=layer_dim,\n hidden_dim=layer_dim),\n decoder=AutoRegressiveDecoder(input_dim=layer_dim,\n embedder=LookupEmbedder(emb_dim=layer_dim, vocab_size=100),\n rnn=UniLSTMSeqTransducer(input_dim=layer_dim,\n hidden_dim=layer_dim,\n decoder_input_dim=layer_dim,\n yaml_path=\"model.decoder.rnn\"),\n transform=NonLinear(input_dim=layer_dim*2, output_dim=layer_dim),\n scorer=Softmax(input_dim=layer_dim, vocab_size=100),\n bridge=CopyBridge(dec_dim=layer_dim, dec_layers=1)),\n )\n train_args['dev_tasks'] = [LossEvalTask(model=train_args['model'],\n src_file=\"examples/data/head.ja\",\n ref_file=\"examples/data/head.en\",\n batcher=batcher)]\n train_args['run_for_epochs'] = 1\n train_args['trainer'] = AdamTrainer(alpha=0.1)\n train_args['batcher'] = batcher\n training_regimen = regimens.SimpleTrainingRegimen(**train_args)\n# TODO(broken, fix!)\n# for _ in range(50):\n# training_regimen.run_training(save_fct=lambda:None)\n# self.assertAlmostEqual(0.0,\n# training_regimen.train_loss_tracker.epoch_loss.sum_factors() / training_regimen.train_loss_tracker.epoch_words,\n# places=2)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.max", "numpy.random.binomial", "numpy.random.choice", "numpy.copyto", "numpy.zeros", "numpy.sum", "numpy.load", "numpy.where", "numpy.arange", "numpy.empty_like" ], [ "numpy.zeros" ] ]