code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if self.normalization == '4pi': norm = 1 elif self.normalization == 'schmidt': norm = 2 elif self.normalization == 'unnorm': norm = 3 elif self.normalization == 'ortho': norm = 4 else: raise ValueError( "Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}" .format(repr(self.normalization))) if degrees is True: latin = lat lonin = lon else: latin = _np.rad2deg(lat) lonin = _np.rad2deg(lon) if type(lat) is not type(lon): raise ValueError('lat and lon must be of the same type. ' + 'Input types are {:s} and {:s}' .format(repr(type(lat)), repr(type(lon)))) if type(lat) is int or type(lat) is float or type(lat) is _np.float_: return _shtools.MakeGridPoint(self.coeffs, lat=latin, lon=lonin, lmax=lmax_calc, norm=norm, csphase=self.csphase) elif type(lat) is _np.ndarray: values = _np.empty_like(lat, dtype=float) for v, latitude, longitude in _np.nditer([values, latin, lonin], op_flags=['readwrite']): v[...] = _shtools.MakeGridPoint(self.coeffs, lat=latitude, lon=longitude, lmax=lmax_calc, norm=norm, csphase=self.csphase) return values elif type(lat) is list: values = [] for latitude, longitude in zip(latin, lonin): values.append( _shtools.MakeGridPoint(self.coeffs, lat=latitude, lon=longitude, lmax=lmax_calc, norm=norm, csphase=self.csphase)) return values else: raise ValueError('lat and lon must be either an int, float, ' + 'ndarray, or list. ' + 'Input types are {:s} and {:s}' .format(repr(type(lat)), repr(type(lon))))
def _expand_coord(self, lat, lon, lmax_calc, degrees)
Evaluate the function at the coordinates lat and lon.
1.900287
1.888409
1.00629
# Test if the coefficients correspond to a real grid. # This is not very elegant, and the equality condition # is probably not robust to round off errors. if check: for l in self.degrees(): if self.coeffs[0, l, 0] != self.coeffs[0, l, 0].conjugate(): raise RuntimeError('Complex coefficients do not ' + 'correspond to a real field. ' + 'l = {:d}, m = 0: {:e}' .format(l, self.coeffs[0, l, 0])) for m in _np.arange(1, l + 1): if m % 2 == 1: if (self.coeffs[0, l, m] != - self.coeffs[1, l, m].conjugate()): raise RuntimeError('Complex coefficients do not ' + 'correspond to a real field. ' + 'l = {:d}, m = {:d}: {:e}, {:e}' .format( l, m, self.coeffs[0, l, 0], self.coeffs[1, l, 0])) else: if (self.coeffs[0, l, m] != self.coeffs[1, l, m].conjugate()): raise RuntimeError('Complex coefficients do not ' + 'correspond to a real field. ' + 'l = {:d}, m = {:d}: {:e}, {:e}' .format( l, m, self.coeffs[0, l, 0], self.coeffs[1, l, 0])) coeffs_rc = _np.zeros((2, self.lmax + 1, self.lmax + 1)) coeffs_rc[0, :, :] = self.coeffs[0, :, :].real coeffs_rc[1, :, :] = self.coeffs[0, :, :].imag real_coeffs = _shtools.SHctor(coeffs_rc, convention=1, switchcs=0) return SHCoeffs.from_array(real_coeffs, normalization=self.normalization, csphase=self.csphase)
def _make_real(self, check=True)
Convert the complex SHCoeffs class to the real class.
2.425612
2.315335
1.047629
# Note that the current method is EXTREMELY inefficient. The complex # coefficients are expanded onto real and imaginary grids, each of # the two components are rotated separately as real data, the rotated # real data are re-expanded on new real and complex grids, they are # combined to make a complex grid, and the resultant is expanded # in complex spherical harmonics. if dj_matrix is None: dj_matrix = _shtools.djpi2(self.lmax + 1) cgrid = self.expand(grid='DH') rgrid, igrid = cgrid.data.real, cgrid.data.imag rgridcoeffs = _shtools.SHExpandDH(rgrid, norm=1, sampling=1, csphase=1) igridcoeffs = _shtools.SHExpandDH(igrid, norm=1, sampling=1, csphase=1) rgridcoeffs_rot = _shtools.SHRotateRealCoef( rgridcoeffs, angles, dj_matrix) igridcoeffs_rot = _shtools.SHRotateRealCoef( igridcoeffs, angles, dj_matrix) rgrid_rot = _shtools.MakeGridDH(rgridcoeffs_rot, norm=1, sampling=1, csphase=1) igrid_rot = _shtools.MakeGridDH(igridcoeffs_rot, norm=1, sampling=1, csphase=1) grid_rot = rgrid_rot + 1j * igrid_rot if self.normalization == '4pi': norm = 1 elif self.normalization == 'schmidt': norm = 2 elif self.normalization == 'unnorm': norm = 3 elif self.normalization == 'ortho': norm = 4 else: raise ValueError( "Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}" .format(repr(self.normalization))) coeffs_rot = _shtools.SHExpandDHC(grid_rot, norm=norm, csphase=self.csphase) return SHCoeffs.from_array(coeffs_rot, normalization=self.normalization, csphase=self.csphase, copy=False)
def _rotate(self, angles, dj_matrix)
Rotate the coefficients by the Euler angles alpha, beta, gamma.
3.200054
3.134976
1.020759
if self.normalization == '4pi': norm = 1 elif self.normalization == 'schmidt': norm = 2 elif self.normalization == 'unnorm': norm = 3 elif self.normalization == 'ortho': norm = 4 else: raise ValueError( "Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}" .format(repr(self.normalization))) data = _shtools.MakeGridDHC(self.coeffs, sampling=sampling, norm=norm, csphase=self.csphase, lmax=lmax, lmax_calc=lmax_calc) gridout = SHGrid.from_array(data, grid='DH', copy=False) return gridout
def _expandDH(self, sampling, lmax, lmax_calc)
Evaluate the coefficients on a Driscoll and Healy (1994) grid.
2.961433
2.919255
1.014448
if self.normalization == '4pi': norm = 1 elif self.normalization == 'schmidt': norm = 2 elif self.normalization == 'unnorm': norm = 3 elif self.normalization == 'ortho': norm = 4 else: raise ValueError( "Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}" .format(repr(self.normalization))) if zeros is None: zeros, weights = _shtools.SHGLQ(self.lmax) data = _shtools.MakeGridGLQC(self.coeffs, zeros, norm=norm, csphase=self.csphase, lmax=lmax, lmax_calc=lmax_calc) gridout = SHGrid.from_array(data, grid='GLQ', copy=False) return gridout
def _expandGLQ(self, zeros, lmax, lmax_calc)
Evaluate the coefficients on a Gauss-Legendre quadrature grid.
3.245352
3.231054
1.004425
if _np.iscomplexobj(array): kind = 'complex' else: kind = 'real' if type(grid) != str: raise ValueError('grid must be a string. ' + 'Input type was {:s}' .format(str(type(grid)))) if grid.upper() not in set(['DH', 'GLQ']): raise ValueError( "grid must be 'DH' or 'GLQ'. Input value was {:s}." .format(repr(grid)) ) for cls in self.__subclasses__(): if cls.istype(kind) and cls.isgrid(grid): return cls(array, copy=copy)
def from_array(self, array, grid='DH', copy=True)
Initialize the class instance from an input array. Usage ----- x = SHGrid.from_array(array, [grid, copy]) Returns ------- x : SHGrid class instance Parameters ---------- array : ndarray, shape (nlat, nlon) 2-D numpy array of the gridded data, where nlat and nlon are the number of latitudinal and longitudinal bands, respectively. grid : str, optional, default = 'DH' 'DH' or 'GLQ' for Driscoll and Healy grids or Gauss Legendre Quadrature grids, respectively. copy : bool, optional, default = True If True (default), make a copy of array when initializing the class instance. If False, initialize the class instance with a reference to array.
2.930494
3.18914
0.918898
if binary is False: data = _np.loadtxt(fname, **kwargs) elif binary is True: data = _np.load(fname, **kwargs) else: raise ValueError('binary must be True or False. ' 'Input value is {:s}'.format(binary)) if _np.iscomplexobj(data): kind = 'complex' else: kind = 'real' if (data.shape[1] == data.shape[0]) or (data.shape[1] == 2 * data.shape[0]): grid = 'DH' elif data.shape[1] == 2 * data.shape[0] - 1: grid = 'GLQ' else: raise ValueError('Input grid must be dimensioned as ' + '(nlat, nlon). For DH grids, nlon = nlat or ' + 'nlon = 2 * nlat. For GLQ grids, nlon = ' + '2 * nlat - 1. Input dimensions are nlat = ' + '{:d}, nlon = {:d}'.format(data.shape[0], data.shape[1])) for cls in self.__subclasses__(): if cls.istype(kind) and cls.isgrid(grid): return cls(data)
def from_file(self, fname, binary=False, **kwargs)
Initialize the class instance from gridded data in a file. Usage ----- x = SHGrid.from_file(fname, [binary, **kwargs]) Returns ------- x : SHGrid class instance Parameters ---------- fname : str The filename containing the gridded data. For text files (default) the file is read using the numpy routine loadtxt(), whereas for binary files, the file is read using numpy.load(). The dimensions of the array must be nlon=nlat or nlon=2*nlat for Driscoll and Healy grids, or nlon=2*nlat-1 for Gauss-Legendre Quadrature grids. binary : bool, optional, default = False If False, read a text file. If True, read a binary 'npy' file. **kwargs : keyword arguments, optional Keyword arguments of numpy.loadtxt() or numpy.load().
2.657515
2.500661
1.062725
if binary is False: _np.savetxt(filename, self.data, **kwargs) elif binary is True: _np.save(filename, self.data, **kwargs) else: raise ValueError('binary must be True or False. ' 'Input value is {:s}'.format(binary))
def to_file(self, filename, binary=False, **kwargs)
Save gridded data to a file. Usage ----- x.to_file(filename, [binary, **kwargs]) Parameters ---------- filename : str Name of output file. For text files (default), the file will be saved automatically in gzip compressed format if the filename ends in .gz. binary : bool, optional, default = False If False, save as text using numpy.savetxt(). If True, save as a 'npy' binary file using numpy.save(). **kwargs : keyword arguments, optional Keyword arguments of numpy.savetxt() and numpy.save().
2.858886
3.042225
0.939735
if degrees is False: return _np.radians(self._lats()) else: return self._lats()
def lats(self, degrees=True)
Return the latitudes of each row of the gridded data. Usage ----- lats = x.lats([degrees]) Returns ------- lats : ndarray, shape (nlat) 1-D numpy array of size nlat containing the latitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians.
3.868449
6.045758
0.639862
if degrees is False: return _np.radians(self._lons()) else: return self._lons()
def lons(self, degrees=True)
Return the longitudes of each column of the gridded data. Usage ----- lons = x.get_lon([degrees]) Returns ------- lons : ndarray, shape (nlon) 1-D numpy array of size nlon containing the longitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians.
3.82057
6.155618
0.620664
if type(normalization) != str: raise ValueError('normalization must be a string. ' + 'Input type was {:s}' .format(str(type(normalization)))) if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'): raise ValueError( "The normalization must be '4pi', 'ortho', 'schmidt', " + "or 'unnorm'. Input value was {:s}." .format(repr(normalization)) ) if csphase != 1 and csphase != -1: raise ValueError( "csphase must be either 1 or -1. Input value was {:s}." .format(repr(csphase)) ) return self._expand(normalization=normalization, csphase=csphase, **kwargs)
def expand(self, normalization='4pi', csphase=1, **kwargs)
Expand the grid into spherical harmonics. Usage ----- clm = x.expand([normalization, csphase, lmax_calc]) Returns ------- clm : SHCoeffs class instance Parameters ---------- normalization : str, optional, default = '4pi' Normalization of the output class: '4pi', 'ortho', 'schmidt', or 'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized coefficients, respectively. csphase : int, optional, default = 1 Condon-Shortley phase convention: 1 to exclude the phase factor, or -1 to include it. lmax_calc : int, optional, default = x.lmax Maximum spherical harmonic degree to return.
1.924927
2.002282
0.961366
lats = _np.linspace(90.0, -90.0 + 180.0 / self.nlat, num=self.nlat) return lats
def _lats(self)
Return the latitudes (in degrees) of the gridded data.
3.655902
3.148688
1.161087
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon) return lons
def _lons(self)
Return the longitudes (in degrees) of the gridded data.
3.594126
3.114101
1.154146
if normalization.lower() == '4pi': norm = 1 elif normalization.lower() == 'schmidt': norm = 2 elif normalization.lower() == 'unnorm': norm = 3 elif normalization.lower() == 'ortho': norm = 4 else: raise ValueError( "The normalization must be '4pi', 'ortho', 'schmidt', " + "or 'unnorm'. Input value was {:s}." .format(repr(normalization)) ) cilm = _shtools.SHExpandDH(self.data, norm=norm, csphase=csphase, sampling=self.sampling, **kwargs) coeffs = SHCoeffs.from_array(cilm, normalization=normalization.lower(), csphase=csphase, copy=False) return coeffs
def _expand(self, normalization, csphase, **kwargs)
Expand the grid into real spherical harmonics.
2.568031
2.469512
1.039894
if ax is None: if colorbar is True: if cb_orientation == 'horizontal': scale = 0.67 else: scale = 0.5 else: scale = 0.55 figsize = (_mpl.rcParams['figure.figsize'][0], _mpl.rcParams['figure.figsize'][0] * scale) fig, axes = _plt.subplots(1, 1, figsize=figsize) else: axes = ax deg = '$^{\circ}$' xticklabels = [str(int(y)) + deg for y in xticks] yticklabels = [str(int(y)) + deg for y in yticks] cim = axes.imshow(self.data, origin='upper', extent=(0., 360., -90., 90.), **kwargs) axes.set(xticks=xticks, yticks=yticks) axes.set_xlabel(xlabel, fontsize=axes_labelsize) axes.set_ylabel(ylabel, fontsize=axes_labelsize) axes.set_xticklabels(xticklabels, fontsize=tick_labelsize) axes.set_yticklabels(yticklabels, fontsize=tick_labelsize) axes.set_xticks(minor_xticks, minor=True) axes.set_yticks(minor_yticks, minor=True) axes.grid(grid, which='major') if colorbar is True: if cb_orientation == 'vertical': divider = _make_axes_locatable(axes) cax = divider.append_axes("right", size="2.5%", pad=0.15) cbar = _plt.colorbar(cim, cax=cax, orientation=cb_orientation) else: divider = _make_axes_locatable(axes) cax = divider.append_axes("bottom", size="5%", pad=0.5) cbar = _plt.colorbar(cim, cax=cax, orientation=cb_orientation) if cb_label is not None: cbar.set_label(cb_label, fontsize=axes_labelsize) cbar.ax.tick_params(labelsize=tick_labelsize) if ax is None: return fig, axes
def _plot(self, xticks=[], yticks=[], minor_xticks=[], minor_yticks=[], xlabel='Longitude', ylabel='Latitude', ax=None, ax2=None, colorbar=None, cb_orientation=None, cb_label=None, grid=False, axes_labelsize=None, tick_labelsize=None, **kwargs)
Plot the raw data using a simply cylindrical projection.
1.686149
1.697837
0.993116
lats = 90. - _np.arccos(self.zeros) * 180. / _np.pi return lats
def _lats(self)
Return a vector containing the latitudes (in degrees) of each row of the gridded data.
4.812286
5.948727
0.808961
return self.geoid.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=True, **kwargs)
def plot(self, colorbar=True, cb_orientation='vertical', cb_label='geoid, m', show=True, **kwargs)
Plot the geoid. Usage ----- x.plot([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname, **kwargs]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = 'geoid, m' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
2.384489
3.34575
0.712692
date = str(date) if '.' in date: date, residual = str(date).split('.') residual = float('0.' + residual) else: residual = 0.0 date = _datetime.datetime.strptime(date, '%Y%m%d') date += _datetime.timedelta(days=residual) year = date.year year_start = _datetime.datetime(year=year, month=1, day=1) next_year_start = _datetime.datetime(year=year + 1, month=1, day=1) year_duration = next_year_start - year_start year_elapsed = date - year_start fraction = year_elapsed / year_duration return year + fraction
def _yyyymmdd_to_year_fraction(date)
Convert YYYMMDD.DD date string or float to YYYY.YYY
2.296522
2.168294
1.059138
# --- input data filename --- infile = os.path.join(os.path.dirname(__file__), '../../ExampleDataFiles/MarsTopo719.shape') coeffs, lmax = shio.shread(infile) # --- plot grid --- grid = expand.MakeGridDH(coeffs, csphase=-1) fig_map = plt.figure() plt.imshow(grid) # ---- compute spectrum ---- ls = np.arange(lmax + 1) pspectrum = spectralanalysis.spectrum(coeffs, unit='per_l') pdensity = spectralanalysis.spectrum(coeffs, unit='per_lm') # ---- plot spectrum ---- fig_spectrum, ax = plt.subplots(1, 1) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel('degree l') ax.grid(True, which='both') ax.plot(ls[1:], pspectrum[1:], label='power per degree l') ax.plot(ls[1:], pdensity[1:], label='power per degree l and order m') ax.legend() fig_map.savefig('SHRtopography_mars.png') fig_spectrum.savefig('SHRspectrum_mars.png') print('mars topography and spectrum saved')
def example()
example that plots the power spectrum of Mars topography data
5.097108
4.570402
1.115243
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'): raise ValueError( "The normalization must be '4pi', 'ortho', 'schmidt', " "or 'unnorm'. Input value was {:s}." .format(repr(normalization)) ) if csphase != 1 and csphase != -1: raise ValueError( "csphase must be either 1 or -1. Input value was {:s}." .format(repr(csphase)) ) if normalization.lower() == 'unnorm' and lmax > 85: _warnings.warn("Calculations using unnormalized coefficients " "are stable only for degrees less than or equal " "to 85. lmax for the coefficients will be set to " "85. Input value was {:d}.".format(lmax), category=RuntimeWarning) lmax = 85 coeffs = _np.zeros((2, lmax + 1, lmax + 1)) coeffs[0, 0, 0] = 1.0 if errors is False: clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega, normalization=normalization.lower(), csphase=csphase) else: clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega, errors=_np.zeros((2, lmax + 1, lmax + 1)), normalization=normalization.lower(), csphase=csphase) return clm
def from_zeros(self, lmax, gm, r0, omega=None, errors=False, normalization='4pi', csphase=1)
Initialize the class with spherical harmonic coefficients set to zero from degree 1 to lmax, and set the degree 0 term to 1. Usage ----- x = SHGravCoeffs.from_zeros(lmax, gm, r0, [omega, errors, normalization, csphase]) Returns ------- x : SHGravCoeffs class instance. Parameters ---------- lmax : int The maximum spherical harmonic degree l of the coefficients. gm : float The gravitational constant times the mass that is associated with the gravitational potential coefficients. r0 : float The reference radius of the spherical harmonic coefficients. omega : float, optional, default = None The angular rotation rate of the body. errors : bool, optional, default = False If True, initialize the attribute errors with zeros. normalization : str, optional, default = '4pi' '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized coefficients, respectively. csphase : int, optional, default = 1 Condon-Shortley phase convention: 1 to exclude the phase factor, or -1 to include it.
2.158407
1.990341
1.084441
if format is 'shtools': if errors is True and self.errors is None: raise ValueError('Can not save errors when then have not been ' 'initialized.') if self.omega is None: omega = 0. else: omega = self.omega with open(filename, mode='w') as file: if header is not None: file.write(header + '\n') file.write('{:.16e}, {:.16e}, {:.16e}, {:d}\n'.format( self.r0, self.gm, omega, self.lmax)) for l in range(self.lmax+1): for m in range(l+1): if errors is True: file.write('{:d}, {:d}, {:.16e}, {:.16e}, ' '{:.16e}, {:.16e}\n' .format(l, m, self.coeffs[0, l, m], self.coeffs[1, l, m], self.errors[0, l, m], self.errors[1, l, m])) else: file.write('{:d}, {:d}, {:.16e}, {:.16e}\n' .format(l, m, self.coeffs[0, l, m], self.coeffs[1, l, m])) elif format is 'npy': _np.save(filename, self.coeffs, **kwargs) else: raise NotImplementedError( 'format={:s} not implemented'.format(repr(format)))
def to_file(self, filename, format='shtools', header=None, errors=False, **kwargs)
Save spherical harmonic coefficients to a file. Usage ----- x.to_file(filename, [format='shtools', header, errors]) x.to_file(filename, [format='npy', **kwargs]) Parameters ---------- filename : str Name of the output file. format : str, optional, default = 'shtools' 'shtools' or 'npy'. See method from_file() for more information. header : str, optional, default = None A header string written to an 'shtools'-formatted file directly before the spherical harmonic coefficients. errors : bool, optional, default = False If True, save the errors in the file (for 'shtools' formatted files only). **kwargs : keyword argument list, optional for format = 'npy' Keyword arguments of numpy.save(). Description ----------- If format='shtools', the coefficients and meta-data will be written to an ascii formatted file. The first line is an optional user provided header line, and the following line provides the attributes r0, gm, omega, and lmax. The spherical harmonic coefficients are then listed, with increasing degree and order, with the format l, m, coeffs[0, l, m], coeffs[1, l, m] where l and m are the spherical harmonic degree and order, respectively. If the errors are to be saved, the format of each line will be l, m, coeffs[0, l, m], coeffs[1, l, m], error[0, l, m], error[1, l, m] If format='npy', the spherical harmonic coefficients (but not the meta-data nor errors) will be saved to a binary numpy 'npy' file using numpy.save().
2.080892
1.811617
1.148638
if lmax is None: lmax = self.lmax clm = self.pad(lmax) if gm is not None and gm != self.gm: clm.coeffs *= self.gm / gm clm.gm = gm if self.errors is not None: clm.errors *= self.gm / gm if r0 is not None and r0 != self.r0: for l in _np.arange(lmax+1): clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**l if self.errors is not None: clm.errors[:, l, :l+1] *= (self.r0 / r0)**l clm.r0 = r0 return clm
def change_ref(self, gm=None, r0=None, lmax=None)
Return a new SHGravCoeffs class instance with a different reference gm or r0. Usage ----- clm = x.change_ref([gm, r0, lmax]) Returns ------- clm : SHGravCoeffs class instance. Parameters ---------- gm : float, optional, default = self.gm The gravitational constant time the mass that is associated with the gravitational potential coefficients. r0 : float, optional, default = self.r0 The reference radius of the spherical harmonic coefficients. lmax : int, optional, default = self.lmax Maximum spherical harmonic degree to output. Description ----------- This method returns a new class instance of the gravitational potential, but using a difference reference gm or r0. When changing the reference radius r0, the spherical harmonic coefficients will be upward or downward continued under the assumption that the reference radius is exterior to the body.
2.321173
2.34927
0.98804
if a is None: a = self.r0 if f is None: f = 0. if normal_gravity is True: ng = 1 else: ng = 0 if lmax is None: lmax = self.lmax if lmax_calc is None: lmax_calc = lmax if self.errors is not None: coeffs, errors = self.to_array(normalization='4pi', csphase=1) else: coeffs = self.to_array(normalization='4pi', csphase=1) rad, theta, phi, total, pot = _MakeGravGridDH( coeffs, self.gm, self.r0, a=a, f=f, lmax=lmax, lmax_calc=lmax_calc, sampling=sampling, omega=self.omega, normal_gravity=ng) return _SHGravGrid(rad, theta, phi, total, pot, self.gm, a, f, self.omega, normal_gravity, lmax, lmax_calc)
def expand(self, a=None, f=None, lmax=None, lmax_calc=None, normal_gravity=True, sampling=2)
Create 2D cylindrical maps on a flattened and rotating ellipsoid of all three components of the gravity field, the gravity disturbance, and the gravitational potential, and return as a SHGravGrid class instance. Usage ----- grav = x.expand([a, f, lmax, lmax_calc, normal_gravity, sampling]) Returns ------- grav : SHGravGrid class instance. Parameters ---------- a : optional, float, default = self.r0 The semi-major axis of the flattened ellipsoid on which the field is computed. f : optional, float, default = 0 The flattening of the reference ellipsoid: f=(a-b)/a. lmax : optional, integer, default = self.lmax The maximum spherical harmonic degree, which determines the number of samples of the output grids, n=2lmax+2, and the latitudinal sampling interval, 90/(lmax+1). lmax_calc : optional, integer, default = lmax The maximum spherical harmonic degree used in evaluating the functions. This must be less than or equal to lmax. normal_gravity : optional, bool, default = True If True, the normal gravity (the gravitational acceleration on the ellipsoid) will be subtracted from the total gravity, yielding the "gravity disturbance." This is done using Somigliana's formula (after converting geocentric to geodetic coordinates). sampling : optional, integer, default = 2 If 1 the output grids are equally sampled (n by n). If 2 (default), the grids are equally spaced in degrees (n by 2n). Description ----------- This method will create 2-dimensional cylindrical maps of the three components of the gravity field, the total field, and the gravitational potential, and return these as an SHGravGrid class instance. Each map is stored as an SHGrid class instance using Driscoll and Healy grids that are either equally sampled (n by n) or equally spaced (n by 2n) in latitude and longitude. All grids use geocentric coordinates, the output is in SI units, and the sign of the radial components is positive when directed upwards. If the optional angular rotation rate omega is specified in the SHGravCoeffs instance, the potential and radial gravitational acceleration will be calculated in a body-fixed rotating reference frame. If normal_gravity is set to True, the normal gravity will be removed from the total field, yielding the gravity disturbance. The gravitational potential is given by V = GM/r Sum_{l=0}^lmax (r0/r)^l Sum_{m=-l}^l C_{lm} Y_{lm}, and the gravitational acceleration is B = Grad V. The coefficients are referenced to the radius r0, and the function is computed on a flattened ellipsoid with semi-major axis a (i.e., the mean equatorial radius) and flattening f. To convert m/s^2 to mGals, multiply the gravity grids by 10^5.
2.935995
2.585497
1.135563
if dj_matrix is None: dj_matrix = _shtools.djpi2(self.lmax + 1) # The coefficients need to be 4pi normalized with csphase = 1 coeffs = _shtools.SHRotateRealCoef( self.to_array(normalization='4pi', csphase=1), angles, dj_matrix) # Convert 4pi normalized coefficients to the same normalization # as the unrotated coefficients. if self.normalization != '4pi' or self.csphase != 1: temp = _convert(coeffs, normalization_in='4pi', csphase=1, normalization_out=self.normalization, csphase_out=self.csphase) return SHGravCoeffs.from_array( temp, normalization=self.normalization, csphase=self.csphase, copy=False, gm=gm, r0=r0, omega=omega) else: return SHGravCoeffs.from_array(coeffs, gm=gm, r0=r0, omega=omega, copy=False)
def _rotate(self, angles, dj_matrix, gm=None, r0=None, omega=None)
Rotate the coefficients by the Euler angles alpha, beta, gamma.
3.299008
3.165527
1.042167
path = '/ws/v1/node/apps' legal_states = set([s for s, _ in ApplicationState]) if state is not None and state not in legal_states: msg = 'Application State %s is illegal' % (state,) raise IllegalArgumentError(msg) loc_args = ( ('state', state), ('user', user)) params = self.construct_parameters(loc_args) return self.request(path, **params)
def node_applications(self, state=None, user=None)
With the Applications API, you can obtain a collection of resources, each of which represents an application. :param str state: application state :param str user: user name :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` :raises yarn_api_client.errors.IllegalArgumentError: if `state` incorrect
4.639808
4.240712
1.094111
path = '/ws/v1/node/apps/{appid}'.format(appid=application_id) return self.request(path)
def node_application(self, application_id)
An application resource contains information about a particular application that was run or is running on this NodeManager. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.64607
5.897153
0.957423
path = '/ws/v1/node/containers/{containerid}'.format( containerid=container_id) return self.request(path)
def node_container(self, container_id)
A container resource contains information about a particular container that is running on this NodeManager. :param str container_id: The container id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.400211
5.495079
0.982736
path = '/ws/v1/cluster/apps' legal_states = set([s for s, _ in YarnApplicationState]) if state is not None and state not in legal_states: msg = 'Yarn Application State %s is illegal' % (state,) raise IllegalArgumentError(msg) legal_final_statuses = set([s for s, _ in FinalApplicationStatus]) if final_status is not None and final_status not in legal_final_statuses: msg = 'Final Application Status %s is illegal' % (final_status,) raise IllegalArgumentError(msg) loc_args = ( ('state', state), ('finalStatus', final_status), ('user', user), ('queue', queue), ('limit', limit), ('startedTimeBegin', started_time_begin), ('startedTimeEnd', started_time_end), ('finishedTimeBegin', finished_time_begin), ('finishedTimeEnd', finished_time_end)) params = self.construct_parameters(loc_args) return self.request(path, **params)
def cluster_applications(self, state=None, final_status=None, user=None, queue=None, limit=None, started_time_begin=None, started_time_end=None, finished_time_begin=None, finished_time_end=None)
With the Applications API, you can obtain a collection of resources, each of which represents an application. :param str state: state of the application :param str final_status: the final status of the application - reported by the application itself :param str user: user name :param str queue: queue name :param str limit: total number of app objects to be returned :param str started_time_begin: applications with start time beginning with this time, specified in ms since epoch :param str started_time_end: applications with start time ending with this time, specified in ms since epoch :param str finished_time_begin: applications with finish time beginning with this time, specified in ms since epoch :param str finished_time_end: applications with finish time ending with this time, specified in ms since epoch :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` :raises yarn_api_client.errors.IllegalArgumentError: if `state` or `final_status` incorrect
2.084677
2.059644
1.012154
path = '/ws/v1/cluster/appstatistics' # TODO: validate state argument states = ','.join(state_list) if state_list is not None else None if application_type_list is not None: application_types = ','.join(application_type_list) else: application_types = None loc_args = ( ('states', states), ('applicationTypes', application_types)) params = self.construct_parameters(loc_args) return self.request(path, **params)
def cluster_application_statistics(self, state_list=None, application_type_list=None)
With the Application Statistics API, you can obtain a collection of triples, each of which contains the application type, the application state and the number of applications of this type and this state in ResourceManager context. This method work in Hadoop > 2.0.0 :param list state_list: states of the applications, specified as a comma-separated list. If states is not provided, the API will enumerate all application states and return the counts of them. :param list application_type_list: types of the applications, specified as a comma-separated list. If application_types is not provided, the API will count the applications of any application type. In this case, the response shows * to indicate any application type. Note that we only support at most one applicationType temporarily. Otherwise, users will expect an BadRequestException. :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
3.043486
3.285395
0.926368
path = '/ws/v1/cluster/apps/{appid}'.format(appid=application_id) return self.request(path)
def cluster_application(self, application_id)
An application resource contains information about a particular application that was submitted to a cluster. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.250539
5.467317
0.96035
path = '/ws/v1/cluster/apps/{appid}/appattempts'.format( appid=application_id) return self.request(path)
def cluster_application_attempts(self, application_id)
With the application attempts API, you can obtain a collection of resources that represent an application attempt. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
4.713536
4.303931
1.09517
path = '/ws/v1/cluster/apps/{appid}/appattempts/{attemptid}'.format( appid=application_id, attemptid=attempt_id) return self.request(path)
def cluster_application_attempt_info(self, application_id, attempt_id)
With the application attempts API, you can obtain an extended info about an application attempt. :param str application_id: The application id :param str attempt_id: The attempt id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
3.388182
3.61978
0.936019
path = '/ws/v1/cluster/apps/{appid}/state'.format( appid=application_id) return self.request(path)
def cluster_application_state(self, application_id)
With the application state API, you can obtain the current state of an application. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
4.771156
4.441825
1.074143
data = '{"state": "KILLED"}' path = '/ws/v1/cluster/apps/{appid}/state'.format( appid=application_id) return self.update(path, data)
def cluster_application_kill(self, application_id)
With the application kill API, you can kill an application that is not in FINISHED or FAILED state. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
4.37016
3.983124
1.097169
path = '/ws/v1/cluster/nodes' # TODO: validate state argument legal_healthy = ['true', 'false'] if healthy is not None and healthy not in legal_healthy: msg = 'Valid Healthy arguments are true, false' raise IllegalArgumentError(msg) loc_args = ( ('state', state), ('healthy', healthy), ) params = self.construct_parameters(loc_args) return self.request(path, **params)
def cluster_nodes(self, state=None, healthy=None)
With the Nodes API, you can obtain a collection of resources, each of which represents a node. :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` :raises yarn_api_client.errors.IllegalArgumentError: if `healthy` incorrect
4.739557
4.688748
1.010836
path = '/ws/v1/cluster/nodes/{nodeid}'.format(nodeid=node_id) return self.request(path)
def cluster_node(self, node_id)
A node resource contains information about a node in the cluster. :param str node_id: The node id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.262376
5.132704
1.025264
path = '/proxy/{appid}/ws/v1/mapreduce/info'.format( appid=application_id) return self.request(path)
def application_information(self, application_id)
The MapReduce application master information resource provides overall information about that mapreduce application master. This includes application id, time it was started, user, name, etc. :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
9.276948
8.066787
1.150018
path = '/proxy/{appid}/ws/v1/mapreduce/jobs'.format( appid=application_id) return self.request(path)
def jobs(self, application_id)
The jobs resource provides a list of the jobs running on this application master. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
7.626992
7.12897
1.069859
path = '/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}'.format( appid=application_id, jobid=job_id) return self.request(path)
def job(self, application_id, job_id)
A job resource contains information about a particular job that was started by this application master. Certain fields are only accessible if user has permissions - depends on acl settings. :param str application_id: The application id :param str job_id: The job id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.107326
5.523638
0.924631
path = '/proxy/{appid}/ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}'.format( appid=application_id, jobid=job_id, taskid=task_id) return self.request(path)
def job_task(self, application_id, job_id, task_id)
A Task resource contains information about a particular task within a job. :param str application_id: The application id :param str job_id: The job id :param str task_id: The task id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
3.6666
4.112291
0.89162
path = '/ws/v1/history/mapreduce/jobs' legal_states = set([s for s, _ in JobStateInternal]) if state is not None and state not in legal_states: msg = 'Job Internal State %s is illegal' % (state,) raise IllegalArgumentError(msg) loc_args = ( ('state', state), ('user', user), ('queue', queue), ('limit', limit), ('startedTimeBegin', started_time_begin), ('startedTimeEnd', started_time_end), ('finishedTimeBegin', finished_time_begin), ('finishedTimeEnd', finished_time_end)) params = self.construct_parameters(loc_args) return self.request(path, **params)
def jobs(self, state=None, user=None, queue=None, limit=None, started_time_begin=None, started_time_end=None, finished_time_begin=None, finished_time_end=None)
The jobs resource provides a list of the MapReduce jobs that have finished. It does not currently return a full list of parameters. :param str user: user name :param str state: the job state :param str queue: queue name :param str limit: total number of app objects to be returned :param str started_time_begin: jobs with start time beginning with this time, specified in ms since epoch :param str started_time_end: jobs with start time ending with this time, specified in ms since epoch :param str finished_time_begin: jobs with finish time beginning with this time, specified in ms since epoch :param str finished_time_end: jobs with finish time ending with this time, specified in ms since epoch :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` :raises yarn_api_client.errors.IllegalArgumentError: if `state` incorrect
2.919046
2.801554
1.041938
path = '/ws/v1/history/mapreduce/jobs/{jobid}'.format(jobid=job_id) return self.request(path)
def job(self, job_id)
A Job resource contains information about a particular job identified by jobid. :param str job_id: The job id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.636978
4.838491
1.165028
path = '/ws/v1/history/mapreduce/jobs/{jobid}/jobattempts'.format( jobid=job_id) return self.request(path)
def job_attempts(self, job_id)
With the job attempts API, you can obtain a collection of resources that represent a job attempt.
5.500812
5.351913
1.027822
path = '/ws/v1/history/mapreduce/jobs/{jobid}/counters'.format( jobid=job_id) return self.request(path)
def job_counters(self, job_id)
With the job counters API, you can object a collection of resources that represent al the counters for that job. :param str job_id: The job id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.205517
4.638923
1.122139
path = '/ws/v1/history/mapreduce/jobs/{jobid}/conf'.format(jobid=job_id) return self.request(path)
def job_conf(self, job_id)
A job configuration resource contains information about the job configuration for this job. :param str job_id: The job id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
5.268372
4.567235
1.153515
path = '/ws/v1/history/mapreduce/jobs/{jobid}/tasks'.format( jobid=job_id) # m - for map # r - for reduce valid_types = ['m', 'r'] if type is not None and type not in valid_types: msg = 'Job type %s is illegal' % (type,) raise IllegalArgumentError(msg) params = {} if type is not None: params['type'] = type return self.request(path, **params)
def job_tasks(self, job_id, type=None)
With the tasks API, you can obtain a collection of resources that represent a task within a job. :param str job_id: The job id :param str type: type of task, valid values are m or r. m for map task or r for reduce task :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
3.495959
3.081903
1.134351
path = '/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}'.format( jobid=job_id, taskid=task_id, attemptid=attempt_id) return self.request(path)
def task_attempt(self, job_id, task_id, attempt_id)
A Task Attempt resource contains information about a particular task attempt within a job. :param str job_id: The job id :param str task_id: The task id :param str attempt_id: The attempt id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
3.046783
3.069829
0.992493
query = Query(model) where = query._add_q(q, used_aliases=set(), allow_joins=False)[0] return list(sorted(set(expression_mentioned_fields(where))))
def q_mentioned_fields(q, model)
Returns list of field names mentioned in Q object. Q(a__isnull=True, b=F('c')) -> ['a', 'b', 'c']
10.297833
10.540735
0.976956
table_name = model._meta.db_table column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders] column_names_with_order = [ (('-%s' if order else '%s') % column_name) for column_name, (field_name, order) in zip(column_names, self.fields_orders) ] # The length of the parts of the name is based on the default max # length of 30 characters. hash_data = [table_name] + column_names_with_order + [self.suffix] + self.name_hash_extra_data() self.name = '%s_%s_%s' % ( table_name[:11], column_names[0][:7], '%s_%s' % (self._hash_generator(*hash_data), self.suffix), ) assert len(self.name) <= self.max_name_length, ( 'Index too long for multiple database support. Is self.suffix ' 'longer than 3 characters?' ) self.check_name()
def set_name_with_model(self, model)
Sets an unique generated name for the index. PartialIndex would like to only override "hash_data = ...", but the entire method must be duplicated for that.
3.959038
3.724729
1.062907
# Find PartialIndexes with unique=True defined on model. unique_idxs = [idx for idx in self._meta.indexes if isinstance(idx, PartialIndex) and idx.unique] if unique_idxs: model_fields = set(f.name for f in self._meta.get_fields(include_parents=True, include_hidden=True)) for idx in unique_idxs: where = idx.where if not isinstance(where, Q): raise ImproperlyConfigured( 'ValidatePartialUniqueMixin is not supported for PartialIndexes with a text-based where condition. ' + 'Please upgrade to Q-object based where conditions.' ) mentioned_fields = set(idx.fields) | set(query.q_mentioned_fields(where, self.__class__)) missing_fields = mentioned_fields - model_fields if missing_fields: raise RuntimeError('Unable to use ValidatePartialUniqueMixin: expecting to find fields %s on model. ' + 'This is a bug in the PartialIndex definition or the django-partial-index library itself.') values = {field_name: getattr(self, field_name) for field_name in mentioned_fields} conflict = self.__class__.objects.filter(**values) # Step 1 and 3 conflict = conflict.filter(where) # Step 2 if self.pk: conflict = conflict.exclude(pk=self.pk) # Step 4 if conflict.exists(): raise PartialUniqueValidationError('%s with the same values for %s already exists.' % ( self.__class__.__name__, ', '.join(sorted(idx.fields)), ))
def validate_partial_unique(self)
Check partial unique constraints on the model and raise ValidationError if any failed. We want to check if another instance already exists with the fields mentioned in idx.fields, but only if idx.where matches. But can't just check for the fields in idx.fields - idx.where may refer to other fields on the current (or other) models. Also can't check for all fields on the current model - should not include irrelevant fields which may hide duplicates. To find potential conflicts, we need to build a queryset which: 1. Filters by idx.fields with their current values on this instance, 2. Filters on idx.where 3. Filters by fields mentioned in idx.where, with their current values on this instance, 4. Excludes current object if it does not match the where condition. Note that step 2 ensures the lookup only looks for conflicts among rows covered by the PartialIndes, and steps 2+3 ensures that the QuerySet is empty if the PartialIndex does not cover the current object.
4.145857
3.763219
1.101679
if unquote: title = parse.unquote(title) return WikipediaPage( self, title=title, ns=ns, language=self.language )
def page( self, title: str, ns: Namespace = Namespace.MAIN, unquote: bool = False, ) -> 'WikipediaPage'
Constructs Wikipedia page with title `title`. Creating `WikipediaPage` object is always the first step for extracting any information. Example:: wiki_wiki = wikipediaapi.Wikipedia('en') page_py = wiki_wiki.page('Python_(programming_language)') print(page_py.title) # Python (programming language) wiki_hi = wikipediaapi.Wikipedia('hi') page_hi_py = wiki_hi.article( title='%E0%A4%AA%E0%A4%BE%E0%A4%87%E0%A4%A5%E0%A4%A8', unquote=True, ) print(page_hi_py.title) # पाइथन :param title: page title as used in Wikipedia URL :param ns: :class:`Namespace` :param unquote: if true it will unquote title :return: object representing :class:`WikipediaPage`
2.862221
4.004568
0.714739
return self.page( title=title, ns=ns, unquote=unquote, )
def article( self, title: str, ns: Namespace = Namespace.MAIN, unquote: bool = False ) -> 'WikipediaPage'
Constructs Wikipedia page with title `title`. This function is an alias for :func:`page` :param title: page title as used in Wikipedia URL :param ns: :class:`Namespace` :param unquote: if true it will unquote title :return: object representing :class:`WikipediaPage`
3.372556
4.357935
0.773888
params = { 'action': 'query', 'prop': 'extracts', 'titles': page.title } # type: Dict[str, Any] if self.extract_format == ExtractFormat.HTML: # we do nothing, when format is HTML pass elif self.extract_format == ExtractFormat.WIKI: params['explaintext'] = 1 params['exsectionformat'] = 'wiki' # elif self.extract_format == ExtractFormat.PLAIN: # params['explaintext'] = 1 # params['exsectionformat'] = 'plain' used_params = kwargs used_params.update(params) raw = self._query( page, used_params ) self._common_attributes(raw['query'], page) pages = raw['query']['pages'] for k, v in pages.items(): if k == '-1': page._attributes['pageid'] = -1 return '' else: return self._build_extracts(v, page) return ''
def extracts( self, page: 'WikipediaPage', **kwargs ) -> str
Returns summary of the page with respect to parameters Parameter `exsectionformat` is taken from `Wikipedia` constructor. API Calls for parameters: - https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bextracts - https://www.mediawiki.org/wiki/Extension:TextExtracts#API Example:: import wikipediaapi wiki = wikipediaapi.Wikipedia('en') page = wiki.page('Python_(programming_language)') print(wiki.extracts(page, exsentences=1)) print(wiki.extracts(page, exsentences=2)) :param page: :class:`WikipediaPage` :param kwargs: parameters used in API call :return: summary of the page
3.0812
3.258786
0.945505
params = { 'action': 'query', 'prop': 'info', 'titles': page.title, 'inprop': '|'.join([ 'protection', 'talkid', 'watched', 'watchers', 'visitingwatchers', 'notificationtimestamp', 'subjectid', 'url', 'readable', 'preload', 'displaytitle' ]) } raw = self._query( page, params ) self._common_attributes(raw['query'], page) pages = raw['query']['pages'] for k, v in pages.items(): if k == '-1': page._attributes['pageid'] = -1 return page else: return self._build_info(v, page) return page
def info( self, page: 'WikipediaPage' ) -> 'WikipediaPage'
https://www.mediawiki.org/w/api.php?action=help&modules=query%2Binfo https://www.mediawiki.org/wiki/API:Info
4.041984
3.929318
1.028673
params = { 'action': 'query', 'prop': 'langlinks', 'titles': page.title, 'lllimit': 500, 'llprop': 'url', } used_params = kwargs used_params.update(params) raw = self._query( page, used_params ) self._common_attributes(raw['query'], page) pages = raw['query']['pages'] for k, v in pages.items(): if k == '-1': page._attributes['pageid'] = -1 return {} else: return self._build_langlinks(v, page) return {}
def langlinks( self, page: 'WikipediaPage', **kwargs ) -> PagesDict
Returns langlinks of the page with respect to parameters API Calls for parameters: - https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blanglinks - https://www.mediawiki.org/wiki/API:Langlinks :param page: :class:`WikipediaPage` :param kwargs: parameters used in API call :return: links to pages in other languages
3.329319
3.364797
0.989456
params = { 'action': 'query', 'prop': 'links', 'titles': page.title, 'pllimit': 500, } used_params = kwargs used_params.update(params) raw = self._query( page, used_params ) self._common_attributes(raw['query'], page) pages = raw['query']['pages'] for k, v in pages.items(): if k == '-1': page._attributes['pageid'] = -1 return {} else: while 'continue' in raw: params['plcontinue'] = raw['continue']['plcontinue'] raw = self._query( page, params ) v['links'] += raw['query']['pages'][k]['links'] return self._build_links(v, page) return {}
def links( self, page: 'WikipediaPage', **kwargs ) -> PagesDict
Returns links to other pages with respect to parameters API Calls for parameters: - https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blinks - https://www.mediawiki.org/wiki/API:Links :param page: :class:`WikipediaPage` :param kwargs: parameters used in API call :return: links to linked pages
3.100994
3.184321
0.973832
params = { 'action': 'query', 'list': 'backlinks', 'bltitle': page.title, 'bllimit': 500, } used_params = kwargs used_params.update(params) raw = self._query( page, used_params ) self._common_attributes(raw['query'], page) v = raw['query'] while 'continue' in raw: params['blcontinue'] = raw['continue']['blcontinue'] raw = self._query( page, params ) v['backlinks'] += raw['query']['backlinks'] return self._build_backlinks(v, page)
def backlinks( self, page: 'WikipediaPage', **kwargs ) -> PagesDict
Returns backlinks from other pages with respect to parameters API Calls for parameters: - https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bbacklinks - https://www.mediawiki.org/wiki/API:Backlinks :param page: :class:`WikipediaPage` :param kwargs: parameters used in API call :return: backlinks from other pages
2.760535
2.84
0.972019
params = { 'action': 'query', 'list': 'categorymembers', 'cmtitle': page.title, 'cmlimit': 500, } used_params = kwargs used_params.update(params) raw = self._query( page, used_params ) self._common_attributes(raw['query'], page) v = raw['query'] while 'continue' in raw: params['cmcontinue'] = raw['continue']['cmcontinue'] raw = self._query( page, params ) v['categorymembers'] += raw['query']['categorymembers'] return self._build_categorymembers(v, page)
def categorymembers( self, page: 'WikipediaPage', **kwargs ) -> PagesDict
Returns pages in given category with respect to parameters API Calls for parameters: - https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bcategorymembers - https://www.mediawiki.org/wiki/API:Categorymembers :param page: :class:`WikipediaPage` :param kwargs: parameters used in API call :return: pages in given category
2.75954
2.914658
0.94678
res = "" if self.wiki.extract_format == ExtractFormat.WIKI: res += self.title elif self.wiki.extract_format == ExtractFormat.HTML: res += "<h{}>{}</h{}>".format(level, self.title, level) else: raise NotImplementedError("Unknown ExtractFormat type") res += "\n" res += self._text if len(self._text) > 0: res += "\n\n" for sec in self.sections: res += sec.full_text(level + 1) return res
def full_text(self, level: int = 1) -> str
Returns text of the current section as well as all its subsections. :param level: indentation level :return: text of the current section as well as all its subsections
3.082886
2.92271
1.054804
if not self._called['extracts']: self._fetch('extracts') return self._section
def sections(self) -> List[WikipediaPageSection]
Returns all sections of the curent page. :return: List of :class:`WikipediaPageSection`
17.770662
24.321825
0.730647
if not self._called['extracts']: self._fetch('extracts') return self._section_mapping.get(title)
def section_by_title( self, title: str, ) -> Optional[WikipediaPageSection]
Returns section of the current page with given `title`. :param title: section title :return: :class:`WikipediaPageSection`
12.240633
14.588903
0.839037
txt = self.summary if len(txt) > 0: txt += "\n\n" for sec in self.sections: txt += sec.full_text(level=2) return txt.strip()
def text(self) -> str
Returns text of the current page. :return: text of the current page
5.143769
6.27527
0.819689
# pylint: disable=unused-argument user = self.user_map.get(identity) if not user: return False return permission in user.permissions
async def permits(self, identity, permission, context=None)
Check user permissions. Return True if the identity is allowed the permission in the current context, else return False.
4.254171
3.632011
1.171299
assert isinstance(identity, str), identity assert identity identity_policy = request.config_dict.get(IDENTITY_KEY) if identity_policy is None: text = ("Security subsystem is not initialized, " "call aiohttp_security.setup(...) first") # in order to see meaningful exception message both: on console # output and rendered page we add same message to *reason* and # *text* arguments. raise web.HTTPInternalServerError(reason=text, text=text) await identity_policy.remember(request, response, identity, **kwargs)
async def remember(request, response, identity, **kwargs)
Remember identity into response. The action is performed by identity_policy.remember() Usually the identity is stored in user cookies somehow but may be pushed into custom header also.
8.359457
7.763227
1.076802
identity_policy = request.config_dict.get(IDENTITY_KEY) if identity_policy is None: text = ("Security subsystem is not initialized, " "call aiohttp_security.setup(...) first") # in order to see meaningful exception message both: on console # output and rendered page we add same message to *reason* and # *text* arguments. raise web.HTTPInternalServerError(reason=text, text=text) await identity_policy.forget(request, response)
async def forget(request, response)
Forget previously remembered identity. Usually it clears cookie or server-side storage to forget user session.
9.894156
9.468866
1.044915
identity_policy = request.config_dict.get(IDENTITY_KEY) if identity_policy is None: return True identity = await identity_policy.identify(request) if identity is None: return True return False
async def is_anonymous(request)
Check if user is anonymous. User is considered anonymous if there is not identity in request.
4.219496
3.841675
1.098348
@wraps(fn) async def wrapped(*args, **kwargs): request = args[-1] if not isinstance(request, web.BaseRequest): msg = ("Incorrect decorator usage. " "Expecting `def handler(request)` " "or `def handler(self, request)`.") raise RuntimeError(msg) await check_authorized(request) return await fn(*args, **kwargs) warnings.warn("login_required decorator is deprecated, " "use check_authorized instead", DeprecationWarning) return wrapped
def login_required(fn)
Decorator that restrict access only for authorized users. User is considered authorized if authorized_userid returns some value.
3.480584
3.685344
0.944439
await check_authorized(request) allowed = await permits(request, permission, context) if not allowed: raise web.HTTPForbidden()
async def check_permission(request, permission, context=None)
Checker that passes only to authoraised users with given permission. If user is not authorized - raises HTTPUnauthorized, if user is authorized and does not have permission - raises HTTPForbidden.
6.824793
6.499123
1.05011
def wrapper(fn): @wraps(fn) async def wrapped(*args, **kwargs): request = args[-1] if not isinstance(request, web.BaseRequest): msg = ("Incorrect decorator usage. " "Expecting `def handler(request)` " "or `def handler(self, request)`.") raise RuntimeError(msg) await check_permission(request, permission, context) return await fn(*args, **kwargs) return wrapped warnings.warn("has_permission decorator is deprecated, " "use check_permission instead", DeprecationWarning) return wrapper
def has_permission( permission, context=None, )
Decorator that restricts access only for authorized users with correct permissions. If user is not authorized - raises HTTPUnauthorized, if user is authorized and does not have permission - raises HTTPForbidden.
3.308062
3.400617
0.972783
'''print colorful text in terminal.''' lines = raw.split('\n') colorful = True detail = False for line in lines: if line: if colorful: colorful = False print(colored(line, 'white', 'on_green') + '\n') continue elif line.startswith('例'): print(line + '\n') continue elif line.startswith('【'): print(colored(line, 'white', 'on_green') + '\n') detail = True continue if not detail: print(colored(line + '\n', 'yellow')) else: print(colored(line, 'cyan') + '\n')
def colorful_print(raw)
print colorful text in terminal.
3.151665
3.018538
1.044103
''' no colorful text, for output.''' lines = raw.split('\n') for line in lines: if line: print(line + '\n')
def normal_print(raw)
no colorful text, for output.
8.152742
4.431838
1.839585
'''search the word or phrase on http://dict.youdao.com.''' url = 'http://dict.youdao.com/w/ %s' % word expl = get_text(url) if printer: colorful_print(expl) return expl
def search_online(word, printer=True)
search the word or phrase on http://dict.youdao.com.
6.265477
5.002951
1.252356
'''offline search.''' conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db')) curs = conn.cursor() curs.execute(r'SELECT expl, pr FROM Word WHERE name LIKE "%s%%"' % word) res = curs.fetchall() if res: print(colored(word + ' 在数据库中存在', 'white', 'on_green')) print() print(colored('★ ' * res[0][1], 'red'), colored('☆ ' * (5 - res[0][1]), 'yellow'), sep='') colorful_print(res[0][0]) else: print(colored(word + ' 不在本地,从有道词典查询', 'white', 'on_red')) search_online(word) input_msg = '若存入本地,请输入优先级(1~5) ,否则 Enter 跳过\n>>> ' if sys.version_info[0] == 2: add_in_db_pr = raw_input(input_msg) else: add_in_db_pr = input(input_msg) if add_in_db_pr and add_in_db_pr.isdigit(): if(int(add_in_db_pr) >= 1 and int(add_in_db_pr) <= 5): add_word(word, int(add_in_db_pr)) print(colored('单词 {word} 已加入数据库中'.format(word=word), 'white', 'on_red')) curs.close() conn.close()
def search_database(word)
offline search.
3.601926
3.58796
1.003893
'''add the word or phrase to database.''' conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db')) curs = conn.cursor() curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word) res = curs.fetchall() if res: print(colored(word + ' 在数据库中已存在,不需要添加', 'white', 'on_red')) sys.exit() try: expl = search_online(word, printer=False) curs.execute('insert into word(name, expl, pr, aset) values ("%s", "%s", %d, "%s")' % ( word, expl, default_pr, word[0].upper())) except Exception as e: print(colored('something\'s wrong, you can\'t add the word', 'white', 'on_red')) print(e) else: conn.commit() print(colored('%s has been inserted into database' % word, 'green')) finally: curs.close() conn.close()
def add_word(word, default_pr)
add the word or phrase to database.
3.642427
3.483275
1.04569
'''delete the word or phrase from database.''' conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db')) curs = conn.cursor() # search fisrt curs.execute('SELECT expl, pr FROM Word WHERE name = "%s"' % word) res = curs.fetchall() if res: try: curs.execute('DELETE FROM Word WHERE name = "%s"' % word) except Exception as e: print(e) else: print(colored('%s has been deleted from database' % word, 'green')) conn.commit() finally: curs.close() conn.close() else: print(colored('%s not exists in the database' % word, 'white', 'on_red'))
def delete_word(word)
delete the word or phrase from database.
3.518122
3.364139
1.045772
''' list words by priority, like this: 1 : list words which the priority is 1, 2+ : list words which the priority is lager than 2, 3-4 : list words which the priority is from 3 to 4. ''' conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db')) curs = conn.cursor() try: if not vb: if len(pr) == 1: curs.execute('SELECT name, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0]))) elif len(pr) == 2 and pr[1] == '+': curs.execute('SELECT name, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0]))) elif len(pr) == 3 and pr[1] == '-': curs.execute('SELECT name, pr FROM Word WHERE pr >= %d AND pr<= % d ORDER by pr, name' % ( int(pr[0]), int(pr[2]))) else: if len(pr) == 1: curs.execute('SELECT expl, pr FROM Word WHERE pr == %d ORDER by pr, name' % (int(pr[0]))) elif len(pr) == 2 and pr[1] == '+': curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d ORDER by pr, name' % (int(pr[0]))) elif len(pr) == 3 and pr[1] == '-': curs.execute('SELECT expl, pr FROM Word WHERE pr >= %d AND pr<= %d ORDER by pr, name' % ( int(pr[0]), int(pr[2]))) except Exception as e: print(colored('something\'s wrong, priority must be 1-5', 'red')) print(e) else: for line in curs.fetchall(): expl = line[0] pr = line[1] print('\n' + '=' * 40 + '\n') if not output: print(colored('★ ' * pr, 'red', ), colored('☆ ' * (5 - pr), 'yellow'), sep='') colorful_print(expl) else: print('★ ' * pr + '☆ ' * (5 - pr)) normal_print(expl) finally: curs.close() conn.close()
def list_priority(pr, vb=False, output=False)
list words by priority, like this: 1 : list words which the priority is 1, 2+ : list words which the priority is lager than 2, 3-4 : list words which the priority is from 3 to 4.
2.354763
2.017301
1.167284
'''count the number of words''' conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db')) curs = conn.cursor() if arg[0].isdigit(): if len(arg) == 1: curs.execute('SELECT count(*) FROM Word WHERE pr == %d' % (int(arg[0]))) elif len(arg) == 2 and arg[1] == '+': curs.execute('SELECT count(*) FROM Word WHERE pr >= %d' % (int(arg[0]))) elif len(arg) == 3 and arg[1] == '-': curs.execute('SELECT count(*) FROM Word WHERE pr >= %d AND pr<= % d' % (int(arg[0]), int(arg[2]))) elif arg[0].isalpha(): if arg == 'all': curs.execute('SELECT count(*) FROM Word') elif len(arg) == 1: curs.execute('SELECT count(*) FROM Word WHERE aset == "%s"' % arg.upper()) res = curs.fetchall() print(res[0][0]) curs.close() conn.close()
def count_word(arg)
count the number of words
2.568996
2.631775
0.976146
d = { 'year': date[0:4], 'month': date[5:7], 'day': date[8:10], 'hour': date[11:13], 'minute': date[14:16], 'second': date[17:], } d = dict((k, int(v)) for k, v in d.items()) return datetime(**d)
def strptime(date)
Returns datetime object from the given date, which is in a specific format: YYYY-MM-ddTHH:mm:ss
1.649378
1.6891
0.976484
params = { 'client_id': self.client_id, 'response_type': self.type, 'redirect_uri': self.callback_url } return AUTHENTICATION_URL + "?" + urlencode(params)
def authentication_url(self)
Redirect your users to here to authenticate them.
2.675641
2.406504
1.111837
if not headers: headers = {} if timeout is None: timeout = self.timeout # All requests must include oauth_token headers['Authorization'] = 'token %s' % self.access_token if path.startswith(('http://', 'https://')): url = path else: url = BASE_URL + path logger.debug('url: %s', url) response = self.session.request( method, url, params=params, data=data, files=files, headers=headers, allow_redirects=allow_redirects, stream=stream, timeout=self.timeout) logger.debug('response: %s', response) if raw: return response return _process_response(response)
def request(self, path, method='GET', params=None, data=None, files=None, headers=None, raw=False, allow_redirects=True, stream=False, timeout=None)
Wrapper around requests.request() Prepends BASE_URL to path. Adds self.oauth_token to authorization header. Parses response as JSON and returns it.
2.370831
2.407687
0.984692
path = '/files/search/{query}/page/{page}'.format(query=query, page=page) result = cls.client.request(path) files = result['files'] return [cls(f) for f in files]
def search(cls, query, page=1)
Search makes a search request with the given query query: The keyword to search page: The result page number. If -1 given, returns all results at a time.
3.550728
4.069249
0.872576
self.state.add_error( 'No `paths` argument provided in recipe, bailing', critical=True) else: self._paths = [path.strip() for path in paths.strip().split(',')]
def setup(self, paths=None): # pylint: disable=arguments-differ if not paths
Sets up the _paths attribute. Args: paths: Comma-separated list of strings representing the paths to collect.
11.272654
10.35465
1.088656
for path in self._paths: if os.path.exists(path): self.state.output.append((os.path.basename(path), path)) else: self.state.add_error( 'Path {0:s} does not exist'.format(str(path)), critical=False) if not self.state.output: self.state.add_error('No valid paths collected, bailing', critical=True)
def process(self)
Checks whether the paths exists and updates the state accordingly.
4.104089
3.462999
1.185126
runner_args = self.grr_api.types.CreateHuntRunnerArgs() runner_args.description = self.reason hunt = self.grr_api.CreateHunt( flow_name=name, flow_args=args, hunt_runner_args=runner_args) print('{0!s}: Hunt created'.format(hunt.hunt_id)) self._check_approval_wrapper(hunt, hunt.Start) return hunt
def _create_hunt(self, name, args)
Create specified hunt. Args: name: string containing hunt name. args: proto (*FlowArgs) for type of hunt, as defined in GRR flow proto. Returns: The newly created GRR hunt object. Raises: ValueError: if approval is needed and approvers were not specified.
4.109986
4.261322
0.964486
super(GRRHuntArtifactCollector, self).setup( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) self.artifacts = [item.strip() for item in artifacts.strip().split(',')] if not artifacts: self.state.add_error('No artifacts were specified.', critical=True) self.use_tsk = use_tsk
def setup(self, artifacts, use_tsk, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True)
Initializes a GRR Hunt artifact collector. Args: artifacts: str, comma-separated list of GRR-defined artifacts. use_tsk: toggle for use_tsk flag. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: str, comma-separated list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
3.105681
2.686294
1.156121
print('Artifacts to be collected: {0!s}'.format(self.artifacts)) hunt_args = flows_pb2.ArtifactCollectorFlowArgs( artifact_list=self.artifacts, use_tsk=self.use_tsk, ignore_interpolation_errors=True, apply_parsers=False,) return self._create_hunt('ArtifactCollectorFlow', hunt_args)
def process(self)
Construct and start new Artifact Collection hunt. Returns: The newly created GRR hunt object. Raises: RuntimeError: if no items specified for collection.
8.905583
5.59643
1.591297
super(GRRHuntFileCollector, self).setup( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) self.file_path_list = [item.strip() for item in file_path_list.strip().split(',')] if not file_path_list: self.state.add_error('Files must be specified for hunts', critical=True)
def setup(self, file_path_list, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True)
Initializes a GRR Hunt file collector. Args: file_path_list: comma-separated list of file paths. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: comma-separated list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
3.192739
2.919214
1.093698
print('Hunt to collect {0:d} items'.format(len(self.file_path_list))) print('Files to be collected: {0!s}'.format(self.file_path_list)) hunt_action = flows_pb2.FileFinderAction( action_type=flows_pb2.FileFinderAction.DOWNLOAD) hunt_args = flows_pb2.FileFinderArgs( paths=self.file_path_list, action=hunt_action) return self._create_hunt('FileFinder', hunt_args)
def process(self)
Construct and start a new File hunt. Returns: The newly created GRR hunt object. Raises: RuntimeError: if no items specified for collection.
4.395752
3.417466
1.28626
super(GRRHuntDownloader, self).setup( reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify) self.hunt_id = hunt_id self.output_path = tempfile.mkdtemp()
def setup(self, hunt_id, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True)
Initializes a GRR Hunt file collector. Args: hunt_id: Hunt ID to download results from. reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: comma-separated list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
2.225151
2.510612
0.886298
if not os.path.isdir(self.output_path): os.makedirs(self.output_path) output_file_path = os.path.join( self.output_path, '.'.join((self.hunt_id, 'zip'))) if os.path.exists(output_file_path): print('{0:s} already exists: Skipping'.format(output_file_path)) return None self._check_approval_wrapper( hunt, self._get_and_write_archive, hunt, output_file_path) results = self._extract_hunt_results(output_file_path) print('Wrote results of {0:s} to {1:s}'.format( hunt.hunt_id, output_file_path)) return results
def collect_hunt_results(self, hunt)
Download current set of files in results. Args: hunt: The GRR hunt object to download files from. Returns: list: tuples containing: str: human-readable description of the source of the collection. For example, the name of the source host. str: path to the collected data. Raises: ValueError: if approval is needed and approvers were not specified.
2.942068
2.945077
0.998978
hunt_archive = hunt.GetFilesArchive() hunt_archive.WriteToFile(output_file_path)
def _get_and_write_archive(self, hunt, output_file_path)
Gets and writes a hunt archive. Function is necessary for the _check_approval_wrapper to work. Args: hunt: The GRR hunt object. output_file_path: The output path where to write the Hunt Archive.
4.380766
5.915698
0.740532
yamldict = yaml.safe_load(client_info_contents) fqdn = yamldict['system_info']['fqdn'] client_id = yamldict['client_id'].split('/')[1] return client_id, fqdn
def _get_client_fqdn(self, client_info_contents)
Extracts a GRR client's FQDN from its client_info.yaml file. Args: client_info_contents: The contents of the client_info.yaml file. Returns: A (str, str) tuple representing client ID and client FQDN.
3.680691
3.174728
1.159372
# Extract items from archive by host for processing collection_paths = [] client_ids = set() client_id_to_fqdn = {} hunt_dir = None try: with zipfile.ZipFile(output_file_path) as archive: items = archive.infolist() for f in items: if not hunt_dir: hunt_dir = f.filename.split('/')[0] # If we're dealing with client_info.yaml, use it to build a client # ID to FQDN correspondence table & skip extraction. if f.filename.split('/')[-1] == 'client_info.yaml': client_id, fqdn = self._get_client_fqdn(archive.read(f)) client_id_to_fqdn[client_id] = fqdn continue client_id = f.filename.split('/')[1] if client_id.startswith('C.'): if client_id not in client_ids: client_directory = os.path.join(self.output_path, hunt_dir, client_id) collection_paths.append((client_id, client_directory)) client_ids.add(client_id) try: archive.extract(f, self.output_path) except KeyError as exception: print('Extraction error: {0:s}'.format(exception)) return [] except OSError as exception: msg = 'Error manipulating file {0:s}: {1!s}'.format( output_file_path, exception) self.state.add_error(msg, critical=True) return [] except zipfile.BadZipfile as exception: msg = 'Bad zipfile {0:s}: {1!s}'.format( output_file_path, exception) self.state.add_error(msg, critical=True) return [] try: os.remove(output_file_path) except OSError as exception: print('Output path {0:s} could not be removed: {1:s}'.format( output_file_path, exception)) # Translate GRR client IDs to FQDNs with the information retrieved # earlier fqdn_collection_paths = [] for client_id, path in collection_paths: fqdn = client_id_to_fqdn.get(client_id, client_id) fqdn_collection_paths.append((fqdn, path)) if not fqdn_collection_paths: self.state.add_error('Nothing was extracted from the hunt archive', critical=True) return [] return fqdn_collection_paths
def _extract_hunt_results(self, output_file_path)
Open a hunt output archive and extract files. Args: output_file_path: The path where the hunt archive is downloaded to. Returns: list: tuples containing: str: The name of the client from where the files were downloaded. str: The directory where the files were downloaded to.
2.567181
2.543555
1.009289
hunt = self.grr_api.Hunt(self.hunt_id).Get() self.state.output = self.collect_hunt_results(hunt)
def process(self)
Construct and start a new File hunt. Raises: RuntimeError: if no items specified for collection.
8.848228
6.722647
1.316182
if not name: return cls._extra_config return cls._extra_config.get(name, None)
def get_extra(cls, name=None)
Gets extra configuration parameters. These parameters should be loaded through load_extra or load_extra_data. Args: name: str, the name of the configuration data to load. Returns: A dictionary containing the requested configuration data. None if data was never loaded under that name.
4.398734
4.221307
1.042031
try: with open(filename, 'rb') as configuration_file: cls.load_extra_data(configuration_file.read()) sys.stderr.write("Config successfully loaded from {0:s}\n".format( filename)) return True except IOError: return False
def load_extra(cls, filename)
Loads extra JSON configuration parameters from a file on the filesystem. Args: filename: str, the filename to open. Returns: bool: True if the extra configuration parameters were read.
4.286199
4.037081
1.061707
try: cls._extra_config.update(json.loads(data)) except ValueError as exception: sys.stderr.write('Could convert to JSON. {0:s}'.format(exception)) exit(-1)
def load_extra_data(cls, data)
Loads extra JSON configuration parameters from a data buffer. The data buffer must represent a JSON object. Args: data: str, the buffer to load the JSON data from.
6.548311
5.42965
1.206028
recipe_name = recipe.contents['name'] cls._recipe_classes[recipe_name] = ( recipe.contents, recipe.args, recipe.__doc__)
def register_recipe(cls, recipe)
Registers a dftimewolf recipe. Args: recipe: imported python module representing the recipe.
7.992289
9.663172
0.827088
# Search for the hostname in GRR print('Searching for client: {0:s}'.format(hostname)) try: search_result = self.grr_api.SearchClients(hostname) except grr_errors.UnknownError as exception: self.state.add_error('Could not search for host {0:s}: {1!s}'.format( hostname, exception ), critical=True) return None result = [] for client in search_result: if hostname.lower() in client.data.os_info.fqdn.lower(): result.append((client.data.last_seen_at, client)) if not result: self.state.add_error( 'Could not get client_id for {0:s}'.format(hostname), critical=True) return None last_seen, client = sorted(result, key=lambda x: x[0], reverse=True)[0] # Remove microseconds and create datetime object last_seen_datetime = datetime.datetime.utcfromtimestamp( last_seen / 1000000) # Timedelta between now and when the client was last seen, in minutes. # First, count total seconds. This will return a float. last_seen_seconds = ( datetime.datetime.utcnow() - last_seen_datetime).total_seconds() last_seen_minutes = int(round(last_seen_seconds / 60)) print('{0:s}: Found active client'.format(client.client_id)) print('Found active client: {0:s}'.format(client.client_id)) print('Client last seen: {0:s} ({1:d} minutes ago)'.format( last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'), last_seen_minutes)) return client
def _get_client_by_hostname(self, hostname)
Search GRR by hostname and get the latest active client. Args: hostname: hostname to search for. Returns: GRR API Client object Raises: DFTimewolfError: if no client ID found for hostname.
2.758012
2.597056
1.061976
# TODO(tomchop): Thread this clients = [] for host in hosts: clients.append(self._get_client_by_hostname(host)) return [client for client in clients if client is not None]
def find_clients(self, hosts)
Finds GRR clients given a list of hosts. Args: hosts: List of hostname FQDNs Returns: List of GRR client objects.
4.897963
5.487749
0.892527