content
stringlengths
22
815k
id
int64
0
4.91M
def enable_logging( level='WARNING' ): """Enable sending logs to stderr. Useful for shell sessions. level Logging threshold, as defined in the logging module of the Python standard library. Defaults to 'WARNING'. """ log = logging.getLogger( 'mrcrowbar' ) log.setLevel( level ) out = logging.StreamHandler() out.setLevel( level ) form = logging.Formatter( '[%(levelname)s] %(name)s - %(message)s' ) out.setFormatter( form ) log.addHandler( out )
5,346,500
def event_detail(request, event_id): """ A View to return an individual selected event details page. """ event = get_object_or_404(Event, pk=event_id) context = { 'event': event } return render(request, 'events/event_detail.html', context)
5,346,501
def variables( metadata: meta.Dataset, selected_variables: Optional[Iterable[str]] = None ) -> Tuple[dataset.Variable]: """Return the variables defined in the dataset. Args: selected_variables: The variables to return. If None, all the variables are returned. Returns: The variables defined in the dataset. """ selected_variables = selected_variables or metadata.variables.keys() return tuple( dataset.VariableArray( v.name, numpy.ndarray((0, ) * len(v.dimensions), v.dtype), v.dimensions, attrs=v.attrs, compressor=v.compressor, fill_value=v.fill_value, filters=v.filters, ) for k, v in metadata.variables.items() if k in selected_variables)
5,346,502
def builder(tiledata, start_tile_id, version, clear_old_tiles=True): """ Deserialize a list of serialized tiles, then re-link all the tiles to re-create the map described by the tile links :param list tiledata: list of serialized tiles :param start_tile_id: tile ID of tile that should be used as the start tile :param str version: object model version of the tile data to be deserialized :return: starting tile of built map :rtype: text_game_maker.tile.tile.Tile """ tiles = {} visited = [] if clear_old_tiles: _tiles.clear() for d in tiledata: tile = deserialize(d, version) tiles[tile.tile_id] = tile if start_tile_id not in tiles: raise RuntimeError("No tile found with ID '%s'" % start_tile_id) tilestack = [tiles[start_tile_id]] while tilestack: t = tilestack.pop(0) if t.tile_id in visited: continue visited.append(t.tile_id) if isinstance(t, LockedDoor) and t.replacement_tile: if t.replacement_tile: t.replacement_tile = tiles[t.replacement_tile] tilestack.append(t.replacement_tile) if t.source_tile: t.source_tile = tiles[t.source_tile] tilestack.append(t.source_tile) else: for direction in ['north', 'south', 'east', 'west']: tile_id = getattr(t, direction) if not tile_id: continue setattr(t, direction, tiles[tile_id]) tilestack.append(tiles[tile_id]) return tiles[start_tile_id]
5,346,503
def data_uri(content_type, data): """Return data as a data: URI scheme""" return "data:%s;base64,%s" % (content_type, base64.urlsafe_b64encode(data))
5,346,504
def next_line(ionex_file): """ next_line Function returns the next line in the file that is not a blank line, unless the line is '', which is a typical EOF marker. """ done = False while not done: line = ionex_file.readline() if line == '': return line elif line.strip(): return line
5,346,505
def create_database(instance, database_id): """Creates a Cloud Spanner database and table.""" database = instance.database( database_id, ddl_statements=[ """CREATE TABLE events_batch ( record_id INT64 NOT NULL, event_time STRING(50) NOT NULL, event_type STRING(50) NOT NULL, product_id STRING(50) NOT NULL, category_id STRING(50) NOT NULL, category_code STRING(200) NOT NULL, brand STRING(30) NOT NULL, price STRING(20) NOT NULL, user_id STRING(50) NOT NULL, user_session STRING(100) NOT NULL, ) PRIMARY KEY (record_id, event_time)""", ], ) operation = database.create() logging.info("Waiting for operation to complete...") operation.result(120) logging.info( "Created database {} on instance {}".format(database, instance))
5,346,506
def saySomething(): """Contemplation...""" if helpers.get_answer(): print(say_hi())
5,346,507
def last(value): """ returns the last value in a list (None if empty list) or the original if value not a list :Example: --------- >>> assert last(5) == 5 >>> assert last([5,5]) == 5 >>> assert last([]) is None >>> assert last([1,2]) == 2 """ values = as_list(value) return values[-1] if len(values) else None
5,346,508
def levup(acur, knxt, ecur=None): """ LEVUP One step forward Levinson recursion Args: acur (array) : knxt (array) : Returns: anxt (array) : the P+1'th order prediction polynomial based on the P'th order prediction polynomial, acur, and the P+1'th order reflection coefficient, Knxt. enxt (array) : the P+1'th order prediction prediction error, based on the P'th order prediction error, ecur. References: P. Stoica R. Moses, Introduction to Spectral Analysis Prentice Hall, N.J., 1997, Chapter 3. """ if acur[0] != 1: raise ValueError( 'At least one of the reflection coefficients is equal to one.') acur = acur[1:] # Drop the leading 1, it is not needed # Matrix formulation from Stoica is used to avoid looping anxt = numpy.concatenate((acur, [0])) + knxt * numpy.concatenate( (numpy.conj(acur[-1::-1]), [1])) enxt = None if ecur is not None: # matlab version enxt = (1-knxt'.*knxt)*ecur enxt = (1. - numpy.dot(numpy.conj(knxt), knxt)) * ecur anxt = numpy.insert(anxt, 0, 1) return anxt, enxt
5,346,509
def plot_dd_curves(row, col, before, after, repnames=None, log=True, **kwargs): """ Plots a comparison of distance dependence curves before and after scaling. Parameters ---------- row, col : np.ndarray Row and column indices identifying the location of pixels in ``before`` and ``after``. before, after: np.ndarray Counts per pixel before and after scaling, respectively. repnames : list of str, optional Pass the replicate names in the same order as the columns of ``before`` and ``after``. log : bool Pass True to log both axes of the distance dependence plot. kwargs : kwargs Typical plotter kwargs. Returns ------- pyplot axis The axis plotted on. """ if repnames is None: repnames = ['%s' % (i+1) for i in range(before.shape[1])] dist = col - row n = max(row.max(), col.max()) + 1 dist_bin_idx = np.digitize(dist, np.linspace(0, 1000, 101), right=True) bs = np.arange(1, dist_bin_idx.max() + 1) before_means = np.array( [np.sum(before[dist_bin_idx == b, :], axis=0) / float(n - b) for b in bs]) after_means = np.array( [np.sum(after[dist_bin_idx == b, :], axis=0) / float(n - b) for b in bs]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) for r, repname in enumerate(repnames): ax1.plot(bs*10 + 5, before_means[:, r], label=repname, color='C%i' % r) ax2.plot(bs*10 + 5, after_means[:, r], label=repname, color='C%i' % r) plt.legend() ax1.set_xlabel('distance (bins)') ax2.set_xlabel('distance (bins)') ax1.set_ylabel('average counts') ax1.set_title('before scaling') ax2.set_title('after scaling') if log: ax1.set_yscale('log') ax1.set_xscale('log') ax2.set_yscale('log') ax2.set_xscale('log')
5,346,510
def gradient_output_wrt_input(model, img, normalization_trick=False): """ Get gradient of softmax with respect to the input. Must check if correct. Do not use # Arguments model: img: # Returns gradient: """ grads = K.gradients(model.output, model.input)[0] if normalization_trick: grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) iterate = K.function([model.input], [grads]) grad_vals = iterate([img])[0] gradient = grad_vals[0] return gradient
5,346,511
def response_GET(client, url): """Fixture that return the result of a GET request.""" return client.get(url)
5,346,512
def load_python_object(name): """ Loads a python module from string """ logger = getLoggerWithNullHandler('commando.load_python_object') (module_name, _, object_name) = name.rpartition(".") if module_name == '': (module_name, object_name) = (object_name, module_name) try: logger.debug('Loading module [%s]' % module_name) module = __import__(module_name) except ImportError: raise CommandoLoaderException( "Module [%s] cannot be loaded." % module_name) if object_name == '': return module try: module = sys.modules[module_name] except KeyError: raise CommandoLoaderException( "Error occured when loading module [%s]" % module_name) try: logger.debug('Getting object [%s] from module [%s]' % (object_name, module_name)) return getattr(module, object_name) except AttributeError: raise CommandoLoaderException( "Cannot load object [%s]. " "Module [%s] does not contain object [%s]. " "Please fix the configuration or " "ensure that the module is installed properly" % ( name, module_name, object_name))
5,346,513
def test_ls_eq_v(eq_date): """Test Ls for the vernal equinox.""" ls = orbit.Ls(eq_date) assert approx(ls, abs=0.05) == 0
5,346,514
def twitter_preprocess(): """ ekphrasis-social tokenizer sentence preprocessor. Substitutes a series of terms by special coins when called over an iterable (dataset) """ norm = ['url', 'email', 'percent', 'money', 'phone', 'user', 'time', 'date', 'number'] ann = {"hashtag", "elongated", "allcaps", "repeated", "emphasis", "censored"} preprocessor = TextPreProcessor( normalize=norm, annotate=ann, all_caps_tag="wrap", fix_text=True, segmenter="twitter_2018", corrector="twitter_2018", unpack_hashtags=True, unpack_contractions=True, spell_correct_elong=False, tokenizer=SocialTokenizer(lowercase=True).tokenize, dicts=[emoticons]).pre_process_doc def preprocess(name, dataset): description = " Ekphrasis-based preprocessing dataset " description += "{}...".format(name) data = [preprocessor(x) for x in tqdm(dataset, desc=description)] return data return preprocess
5,346,515
def rotation_matrix(axis, theta): """ Return the rotation matrix associated with counterclockwise rotation about the given axis by theta radians. """ axis = np.asarray(axis) axis = axis / math.sqrt(np.dot(axis, axis)) a = math.cos(theta / 2.0) b, c, d = -axis * math.sin(theta / 2.0) aa, bb, cc, dd = a * a, b * b, c * c, d * d bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac), 0], [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab), 0], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc, 0], [0,0,0,1]])
5,346,516
def get_routes(interface: Type[Interface]) -> List[ParametrizedRoute]: """ Retrieves the routes from an interface. """ if not issubclass(interface, Interface): raise TypeError('expected Interface subclass, got {}' .format(interface.__name__)) routes = [] for member in interface.members(): if isinstance(member, _InterfaceMethod): route_data = getattr(member.original, '__route__', None) if route_data is not None: assert isinstance(route_data, RouteData) routes.append(ParametrizedRoute.from_function( route_data, interface, member.original)) return routes
5,346,517
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): """A generic function to load mnist-like dataset. Parameters: ---------- shape : tuple The shape of digit images. path : str The path that the data is downloaded to. name : str The dataset name you want to use(the default is 'mnist'). url : str The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). """ path = os.path.join(path, name) # Define functions for loading mnist-like data's images and labels. # For convenience, they also download the requested files if needed. def load_mnist_images(path, filename): filepath = maybe_download_and_extract(filename, path, url) logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) # The inputs are vectors now, we reshape them to monochrome 2D images, # following the shape convention: (examples, channels, rows, columns) data = data.reshape(shape) # The inputs come as bytes, we convert them to float32 in range [0,1]. # (Actually to range [0, 255/256], for compatibility to the version # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.) return data / np.float32(256) def load_mnist_labels(path, filename): filepath = maybe_download_and_extract(filename, path, url) # Read the labels in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=8) # The labels are vectors of integers now, that's exactly what we want. return data # Download and read the training and test set images and labels. logging.info("Load or Download {0} > {1}".format(name.upper(), path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz') # We reserve the last 10000 training examples for validation. X_train, X_val = X_train[:-10000], X_train[-10000:] y_train, y_val = y_train[:-10000], y_train[-10000:] # We just return all the arrays in order, as expected in main(). # (It doesn't matter how we do this as long as we can read them again.) X_train = np.asarray(X_train, dtype=np.float32) y_train = np.asarray(y_train, dtype=np.int32) X_val = np.asarray(X_val, dtype=np.float32) y_val = np.asarray(y_val, dtype=np.int32) X_test = np.asarray(X_test, dtype=np.float32) y_test = np.asarray(y_test, dtype=np.int32) return X_train, y_train, X_val, y_val, X_test, y_test
5,346,518
def _get_realm(response): """Return authentication realm requested by server for 'Basic' type or None :param response: requests.response :type response: requests.Response :returns: realm :rtype: str | None """ if 'www-authenticate' in response.headers: auths = response.headers['www-authenticate'].split(',') basic_realm = next((auth_type for auth_type in auths if auth_type.rstrip().lower().startswith("basic")), None) if basic_realm: realm = basic_realm.split('=')[-1].strip(' \'\"').lower() return realm else: return None else: return None
5,346,519
def exp_mantissa(num, base=10): """Returns e, m such that x = mb^e""" if num == 0: return 1, 0 # avoid floating point error eg log(1e3, 10) = 2.99... exp = math.log(abs(num), base) exp = round(exp, FLOATING_POINT_ERROR_ON_LOG_TENXPONENTS) exp = math.floor(exp) # 1 <= mantissa < 10 mantissa = num / (base**exp) return exp, mantissa
5,346,520
def decide_if_taxed(n_taxed: set[str]) -> Callable[[str], bool]: """To create an decider function for omitting taxation. Args: n_taxed: The set containing all items, which should not be taxed. If empty, a default set will be chosen. Returns: Decider function for omitting taxation. """ local_set = _D_TAX_E if n_taxed: local_set = n_taxed def _decide_if_taxed(in_str: str, /) -> bool: """To check whether an item is taxed or not. A very simple function, which look up the item in a given set. This set contains all item names, which should omitted from taxation. Args: in_str: The name of the purchased item, which should be checked for taxation. Returns: Whether the item is taxed or not. """ for item_sub_name in in_str.split(" "): if item_sub_name in local_set: return False return True return _decide_if_taxed
5,346,521
def test_total_values_for_two_separate_transactions(): """ Tests 'total_market_value', 'total_unrealised_pnl', 'total_realised_pnl' and 'total_pnl' for single transactions in two separate assets. """ ph = PositionHandler_Cash_MC() cash_transaction_1 = tph.get_cash_leg_transaction( 'USD', 1.0, pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC), 10000, 17.4 ) cash_transaction_2 = tph.get_cash_leg_transaction( 'GBP', 1.45, pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC), 22000, 22.4 ) ph.transact_cash_position(cash_transaction_1) ph.transact_cash_position(cash_transaction_2) # Check all total values assert ph.total_cash_market_value_local('GBP') == 22000 assert ph.total_cash_market_value_local('USD') == 10000 assert ph.total_cash_market_value_base() == 41900.0 assert np.isclose(ph.total_cash_unrealised_pnl_base(), -39.8) #49.88 is fx'ing GBP unrealised. 39.8 is not i.e the commissions assert ph.total_cash_realised_pnl_local('GBP') == 0.0 assert ph.total_cash_realised_pnl_local('USD') == 0.0 assert np.isclose(ph.total_cash_pnl_local('GBP'), 0.0) assert np.isclose(ph.total_cash_pnl_local('USD'), 0.0)
5,346,522
def generate_two_cat_relation_heat_map(): """ A correlation matrix for categories """ data = Heatmap( z=df_categories.corr(), y=df_categories.columns, x=df_categories.columns) title = 'Correlation Distribution of Categories' y_title = 'Category' x_title = 'Category' return generate_graph_with_template(data, title, y_title, x_title)
5,346,523
def build_phase2(VS, FS, NS, VT, VTN, marker, wc): """ Build pahase 2 sparse matrix M_P2 closest valid point term with of source vertices (nS) triangles(mS) target vertices (nT) :param VS: deformed source mesh from previous step nS x 3 :param FS: triangle index of source mesh mS * 3 :param NS: triangle normals of source mesh mS * 3 :param VT: target mesh nT * 3 :param VTN: Vertex normals of source mesh nT * 3 :param marker: marker constraint :param wc: weight value :return: M_P2: (3 * nS) x (3 * (nS + mS)) big sparse matrix C_P2: (3 * nS) matrix """ VSN = calc_vertex_norm(FS, NS) S_size = VS.shape[0] valid_pt = np.zeros((S_size, 2)) C_P2 = np.zeros((3*S_size, 1)) for j in range(0, S_size): if len(np.where(marker[:, 0]-1 == j)[0]) != 0: valid_pt[j, :] = np.array([j, marker[marker[:, 0]-1 == j, 1] - 1], dtype=np.int32) else: valid_pt[j, :] = np.array([j, find_closest_validpt(VS[j, :], VSN[j, :], VT, VTN)], dtype=np.int32) C_P2[np.linspace(0, 2, 3, dtype=np.int32) + j*3, 0] = wc * VT[int(valid_pt[j, 1]), :].T M_P2 = sparse.coo_matrix((np.tile(wc, [3*S_size, 1])[:, 0], (np.arange(0, 3*S_size), np.arange(0, 3*S_size))), shape=(3*S_size, 3*(VS.shape[0]+FS.shape[0]))) return M_P2, C_P2
5,346,524
def test_list_input(func): """Test that bn.xxx gives the same output as bn.slow.xxx for list input.""" msg = "\nfunc %s | input %s (%s) | shape %s\n" msg += "\nInput array:\n%s\n" name = func.__name__ if name == "replace": return func0 = eval("bn.slow.%s" % name) for i, a in enumerate(lists()): with warnings.catch_warnings(): warnings.simplefilter("ignore") try: actual = func(a) desired = func0(a) except TypeError: actual = func(a, 2) desired = func0(a, 2) a = np.array(a) tup = (name, "a" + str(i), str(a.dtype), str(a.shape), a) err_msg = msg % tup assert_array_almost_equal(actual, desired, err_msg=err_msg)
5,346,525
def voronoi_to_dist(voronoi): """ voronoi is encoded """ def decoded_nonstacked(p): return np.right_shift(p, 20) & 1023, np.right_shift(p, 10) & 1023, p & 1023 x_i, y_i, z_i = np.indices(voronoi.shape) x_v, y_v, z_v = decoded_nonstacked(voronoi) return np.sqrt((x_v - x_i) ** 2 + (y_v - y_i) ** 2 + (z_v - z_i) ** 2)
5,346,526
def post_update_view(request): """View To Update A Post For Logged In Users""" if request.method == 'POST': token_type, token = request.META.get('HTTP_AUTHORIZATION').split() if(token_type != 'JWT'): return Response({'detail': 'No JWT Authentication Token Found'}, status=status.HTTP_400_BAD_REQUEST) token_data = {'token': token} try: valid_data = VerifyJSONWebTokenSerializer().validate(token_data) logged_in_user = valid_data.get('user') except: return Response({'detail': 'Invalid Token'}, status.HTTP_400_BAD_REQUEST) updated_data = request.data instance = Post.objects.get(slug=updated_data.get('slug')) admin_user = User.objects.get(pk=1) # PK Of Admin User Is 1 if(instance.author == logged_in_user or logged_in_user == admin_user): updated_data.pop('slug') serializer = PostUpdateSerializer(instance, data=updated_data) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_202_ACCEPTED) else: return Response({'detail': 'Something Went Wrong.'}, status=status.HTTP_400_BAD_REQUEST) else: return Response({'detail': 'You Are Not Authorised To Edit This Post'}, status.HTTP_403_FORBIDDEN) else: return Response({'detail': 'You Are Not Authorised To Edit This Post'}, status.HTTP_403_FORBIDDEN)
5,346,527
def assert_equal(actual: Tuple[int, int, int, int], desired: Tuple[int, int, int, int]): """ usage.scipy: 14 usage.skimage: 7 """ ...
5,346,528
def _is_binary_classification(class_list: List[str]) -> bool: """Returns true for binary classification problems.""" if not class_list: return False return len(class_list) == 1
5,346,529
def virtualenv_support_dirs(): """Context manager yielding either [virtualenv_support_dir] or []""" # normal filesystem installation if os.path.isdir(join(HERE, "virtualenv_support")): yield [join(HERE, "virtualenv_support")] elif IS_ZIPAPP: tmpdir = tempfile.mkdtemp() try: with zipfile.ZipFile(HERE) as zipf: for member in zipf.namelist(): if os.path.dirname(member) == "virtualenv_support": zipf.extract(member, tmpdir) yield [join(tmpdir, "virtualenv_support")] finally: shutil.rmtree(tmpdir) # probably a bootstrap script elif os.path.splitext(os.path.dirname(__file__))[0] != "virtualenv": try: # noinspection PyUnresolvedReferences import virtualenv except ImportError: yield [] else: yield [join(os.path.dirname(virtualenv.__file__), "virtualenv_support")] # we tried! else: yield []
5,346,530
def create_post_like(author, post): """ Create a new post like given an author and post """ return models.Like.objects.create(author=author, post=post)
5,346,531
def translate_entries(yamldoc, base_url): """ Reads the field `entries` from the YAML document, processes each entry that is read using the given base_url, and appends them all to a list of processed entries that is then returned. """ if 'entries' in yamldoc and type(yamldoc['entries']) is list: entries = [] for i, entry in enumerate(yamldoc['entries']): entries.append(process_entry(base_url, i, entry)) return entries
5,346,532
def create_task_macapp(self): """ To compile an executable into a Mac application (a .app), set its *mac_app* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_app=True) To force *all* executables to be transformed into Mac applications:: def build(bld): bld.env.MACAPP = True bld.shlib(source='a.c', target='foo') """ if self.env['MACAPP'] or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'MacOS', out.name]) self.apptask = self.create_task('macapp', self.link_task.outputs, n1) inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name self.bld.install_files(inst_to, n1, chmod=Utils.O755) if getattr(self, 'mac_files', None): # this only accepts files; they will be installed as seen from mac_files_root mac_files_root = getattr(self, 'mac_files_root', None) if isinstance(mac_files_root, str): mac_files_root = self.path.find_node(mac_files_root) if not mac_files_root: self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root) res_dir = n1.parent.parent.make_node('Resources') inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name for node in self.to_nodes(self.mac_files): relpath = node.path_from(mac_files_root or node.parent) self.create_task('macapp', node, res_dir.make_node(relpath)) self.bld.install_as(os.path.join(inst_to, relpath), node) if getattr(self, 'mac_resources', None): # TODO remove in waf 1.9 res_dir = n1.parent.parent.make_node('Resources') inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name for x in self.to_list(self.mac_resources): node = self.path.find_node(x) if not node: raise Errors.WafError('Missing mac_resource %r in %r' % (x, self)) parent = node.parent if os.path.isdir(node.abspath()): nodes = node.ant_glob('**') else: nodes = [node] for node in nodes: rel = node.path_from(parent) self.create_task('macapp', node, res_dir.make_node(rel)) self.bld.install_as(inst_to + '/%s' % rel, node) if getattr(self.bld, 'is_install', None): # disable the normal binary installation self.install_task.hasrun = Task.SKIP_ME
5,346,533
def get_in(obj, lookup, default=None): """ Walk obj via __getitem__ for each lookup, returning the final value of the lookup or default. """ tmp = obj for l in lookup: try: # pragma: no cover tmp = tmp[l] except (KeyError, IndexError, TypeError): # pragma: no cover return default return tmp
5,346,534
def GetSpec(resource_type, message_classes, api_version): """Returns a Spec for the given resource type.""" spec = _GetSpecsForVersion(api_version) if resource_type not in spec: raise KeyError('"%s" not found in Specs for version "%s"' % (resource_type, api_version)) spec = spec[resource_type] table_cols = [] for name, action in spec.table_cols: if isinstance(action, six.string_types): table_cols.append((name, property_selector.PropertyGetter(action))) elif callable(action): table_cols.append((name, action)) else: raise ValueError('expected function or property in table_cols list: {0}' .format(spec)) message_class = getattr(message_classes, spec.message_class_name) fields = list(_ProtobufDefinitionToFields(message_class)) return Spec(message_class=message_class, fields=fields, table_cols=table_cols, transformations=spec.transformations, editables=spec.editables)
5,346,535
def ips_between(start: str, end: str) -> int: """ A function that receives two IPv4 addresses, and returns the number of addresses between them (including the first one, excluding the last one). All inputs will be valid IPv4 addresses in the form of strings. The last address will always be greater than the first one. :param start: :param end: :return: """ ip_start = [int(a) for a in start.split('.')] ip_end = [int(b) for b in end.split('.')] ips = zip(ip_start, ip_end) ips_range = [0, 0, 0, 0] for ip_id, ip in enumerate(ips): calc_ip_range(ip, ip_id, ips_range) return calc_result(ips_range)
5,346,536
def configure_mail_body(msg, template_name, context): """Set mail body based on text and html templates. Args: msg (object): flask_mail.Message instance used to send mail template_name (str): name of mail body templates (without the extension) context (dict): Any variables needed within the mail templates """ msg.body = render_template(f'mail/{template_name}.txt', **context) msg.html = render_template(f'mail/{template_name}.html', **context)
5,346,537
def test_multiple_lfcs_el_simple(): """Test sounding with multiple LFCs.""" levels, temperatures, dewpoints = multiple_intersections() profile = parcel.parcel_profile_with_lcl(pressure=levels, temperature=temperatures, parcel_pressure=levels[0], parcel_temperature=temperatures[0], parcel_dewpoint=dewpoints[0]) lfc_el = parcel.lfc_el(profile=profile) assert_almost_equal(lfc_el.lfc_pressure, 884.14790, 3) assert_almost_equal(lfc_el.lfc_temperature, 13.95707016+273.15, 3) assert_almost_equal(lfc_el.el_pressure, 228.151466, 3) assert_almost_equal(lfc_el.el_temperature, -56.81015490+273.15, 3)
5,346,538
def tmNstate(trTrg): """Given (newq, new_tape_sym, dir), return newq. """ return trTrg[0]
5,346,539
def parse_arguments(args): """ Parse the arguments from the user """ parser = argparse.ArgumentParser( description= "Create UniRef database for HUMAnN2\n", formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( "-v","--verbose", help="additional output is printed\n", action="store_true", default=False) parser.add_argument( "-i","--input", help="the UniRef fasta file to read\n", required=True) parser.add_argument( "-o","--output", help="the UniRef database to write\n", required=True) parser.add_argument( "-f","--filter", help="string to use for filtering (example: uncharacterized)\n") parser.add_argument( "--exclude-list", help="file of id list to use for filtering (example: id_list.tsv)\n") parser.add_argument( "--include-list", help="file of id list to use for filtering (example: id_list.tsv)\n") parser.add_argument( "-d","--format-database", choices=["fasta","rapsearch","diamond"], default="fasta", help="format of output files (default: fasta)\n") return parser.parse_args()
5,346,540
def _preprocess_continuous_variable(df: pd.DataFrame, var_col: str, bins: int, min_val: float = None, max_val: float = None) -> pd.DataFrame: """ Pre-processing the histogram for continuous variables by splitting the variable in buckets. :param df: (pd.DataFrame) Data frame containing at least the continuous variable :param var_col: (str) Name of the continuous variable :param bins: (int) Preferred number of bins in histogram :param min_val: (float, optional) Minimal value to be taken by the variable (if other than the minimum observed in the data. :param max_val: (float, optional) Maximal value to be taken by the variable (if other than the maximum observed in the data. :return: pd.DataFrame with *var_col* transformed to range """ # set *min_val* and *max_val* to minimal and maximal values observed in data if min_val is None: min_val = df[var_col].min() if max_val is None: max_val = df[var_col].max() # compute the most appropriate step size for the histogram step_size, decimals = _compute_step_size(min_val, max_val, bins) min_val = min_val - (min_val % step_size) # cut values into buckets df[var_col] = pd.cut(df[var_col], list(np.arange(min_val, max_val, step_size)) + [max_val], include_lowest=True) # convert buckets into strings if decimals == 0: df[var_col] = df[var_col].map(lambda x: f"{int(np.round(x.left))} - {int(np.round(x.right))}") else: df[var_col] = df[var_col].map(lambda x: f"{np.round(x.left, decimals)} - {np.round(x.right, decimals)}") return df
5,346,541
def create_olaf(): """ Creates a bird in the free space. """ y_coordnate = random.uniform(50,300) new_olaf = Olaf(background_module.bg.get_width(), y_coordnate) Olaf.olafs_list.append(new_olaf) Olaf.collision_olaf.append(new_olaf) # To check collision
5,346,542
def main() -> None: """ Main game logic loop shim. """ from providers import cpucheck cpucheck.instantiate() if cpucheck.is_over_limit(): return import logic logic.main()
5,346,543
def reset_environ(): """ Resets `os.environ` to its prior state after the fixtured test finishes. """ old_environ = os.environ.copy() yield os.environ.clear() os.environ.update(old_environ)
5,346,544
def get_pairs(labels): """ For the labels of a given word, creates all possible pairs of labels that match sense """ result = [] unique = np.unique(labels) for label in unique: ulabels = np.where(labels==label)[0] # handles when a word sense has only one occurrence if len(ulabels) == 1: # returns the instance paired with itself, so it can be counted result.append((ulabels[0], ulabels[0])) else: for p in itertools.combinations(ulabels, 2): result.append(p) return result
5,346,545
def iredv(tvp,tton): """ makes sop tvp irredundant relative to onset truth table""" res = [] red = list(tvp) for j in range(len(tvp)): tvj=tvp[j]&tton #care part of cube j if (tvj&~or_redx(red,j)) == m.const(0): # reduce jth cube to 0 red[j]=m.const(0) else: #keep cube j res = res + [tvp[j]] return res
5,346,546
def showPids(): """ Show Information for PIDs created in a KFD (Compute) context """ printLogSpacer(' KFD Processes ') dataArray = [] dataArray.append(['PID', 'PROCESS NAME', 'GPU(s)', 'VRAM USED', 'SDMA USED', 'CU OCCUPANCY']) pidList = getPidList() if not pidList: printLog(None, 'No KFD PIDs currently running', None) printLogSpacer() return dv_indices = c_void_p() num_devices = c_uint32() proc = rsmi_process_info_t() for pid in pidList: gpuNumber = 'UNKNOWN' vramUsage = 'UNKNOWN' sdmaUsage = 'UNKNOWN' cuOccupancy = 'UNKNOWN' ret = rocmsmi.rsmi_compute_process_gpus_get(int(pid), None, byref(num_devices)) if rsmi_ret_ok(ret): dv_indices = (c_uint32 * num_devices.value)() ret = rocmsmi.rsmi_compute_process_gpus_get(int(pid), dv_indices, byref(num_devices)) if rsmi_ret_ok(ret): gpuNumber = str(num_devices.value) else: logging.debug('Unable to fetch GPU number by PID') ret = rocmsmi.rsmi_compute_process_info_by_pid_get(int(pid), byref(proc)) if rsmi_ret_ok(ret): vramUsage = proc.vram_usage sdmaUsage = proc.sdma_usage cuOccupancy = proc.cu_occupancy else: logging.debug('Unable to fetch process info by PID') dataArray.append([pid, getProcessName(pid), str(gpuNumber), str(vramUsage), str(sdmaUsage), str(cuOccupancy)]) printLog(None, 'KFD process information:', None) print2DArray(dataArray) printLogSpacer()
5,346,547
def main(argv): """Parse arguments, extract data, and render the linker script to file""" # pylint: disable=too-many-locals parsed_args = parse_arguments(argv) template = get_template(parsed_args) dts = pydevicetree.Devicetree.parseFile( parsed_args.dts, followIncludes=True) memories = get_memories(dts) print_memories(memories) sorted_ram_memories = get_sorted_ram_memories(dts) ram, rom, itim, lim = get_load_map(memories, scratchpad=parsed_args.scratchpad) text_in_itim = False if parsed_args.ramrodata and get_itim_length(memories) >= MAGIC_RAMRODATA_TEXT_THRESHOLD: text_in_itim = True print(".text section included in ITIM", file=sys.stderr) elif parsed_args.ramrodata: print(".text section included in ROM", file=sys.stderr) harts = dts.get_by_path("/cpus").children chosenboothart = dts.chosen("metal,boothart") if chosenboothart: boot_hart = dts.get_by_reference(chosenboothart[0]).get_reg()[0][0] elif len(harts) > 1: boot_hart = 1 else: boot_hart = 0 if len(sorted_ram_memories) == 0: # If there are no rams to scrub, don't bother scrubbing them ecc_scrub = 0 elif dts.chosen("metal,eccscrub"): # Otherwise default to scrubbing if metal,eccscrub = <1>; ecc_scrub = dts.chosen("metal,eccscrub")[0] else: ecc_scrub = 0 # Pass sorted memories to the template generator so that the generated linker # script is reproducible. sorted_memories = list(memories.values()) sorted_memories.sort(key=lambda m: m["name"]) values = { "memories": sorted_memories, "ram_memories": sorted_ram_memories, "default_stack_size": "0x400", "default_heap_size": "0x800", "num_harts": len(harts), "boot_hart": boot_hart, "chicken_bit": 1, "eccscrub_bit": ecc_scrub, "text_in_itim": text_in_itim, "rom": rom, "itim": itim, "lim": lim, "ram": ram, } if parsed_args.output: parsed_args.output.write(template.render(values)) parsed_args.output.close() else: print(template.render(values))
5,346,548
def ridder_fchp(st, target=0.02, tol=0.001, maxiter=30, maxfc=0.5, config=None): """Search for highpass corner using Ridder's method. Search such that the criterion that the ratio between the maximum of a third order polynomial fit to the displacement time series and the maximum of the displacement timeseries is a target % within a tolerance. This algorithm searches between a low initial corner frequency a maximum fc. Method developed originally by Scott Brandenberg Args: st (StationStream): Stream of data. target (float): target percentage for ratio between max polynomial value and max displacement. tol (float): tolereance for matching the ratio target maxiter (float): maximum number of allowed iterations in Ridder's method maxfc (float): Maximum allowable value of the highpass corner freq. int_method (string): method used to perform integration between acceleration, velocity, and dispacement. Options are "frequency_domain", "time_domain_zero_init" or "time_domain_zero_mean" config (dict): Configuration dictionary (or None). See get_config(). Returns: StationStream. """ if not st.passed: return st if config is None: config = get_config() processing_steps = config["processing"] ps_names = [list(ps.keys())[0] for ps in processing_steps] ind = int(np.where(np.array(ps_names) == "highpass_filter")[0][0]) hp_args = processing_steps[ind]["highpass_filter"] frequency_domain = hp_args["frequency_domain"] if frequency_domain is True: filter_code = 1 elif frequency_domain is False: filter_code = 0 for tr in st: initial_corners = tr.getParameter("corner_frequencies") initial_f_hp = initial_corners["highpass"] new_f_hp = get_fchp( dt=tr.stats.delta, acc=tr.data, target=target, tol=tol, poly_order=FORDER, maxiter=maxiter, fchp_max=maxfc, filter_type=filter_code, ) # Method did not converge if new_f_hp reaches maxfc if (maxfc - new_f_hp) > 1e9: tr.fail("auto_fchp did not find an acceptable f_hp.") continue if new_f_hp > initial_f_hp: tr.setParameter( "corner_frequencies", { "type": "snr_polyfit", "highpass": new_f_hp, "lowpass": initial_corners["lowpass"], }, ) return st
5,346,549
def CommitOffsite( backup_name, backup_suffix=None, output_stream=sys.stdout, preserve_ansi_escape_sequences=False, ): """\ Commits data previously generated by Offsite. This can be useful when additional steps must be taken (for example, upload) before a Backup can be considered as successful. """ with StreamDecorator.GenerateAnsiSequenceStream( output_stream, preserve_ansi_escape_sequences=preserve_ansi_escape_sequences, ) as output_stream: with output_stream.DoneManager( line_prefix="", prefix="\nResults: ", suffix="\n", ) as dm: json_filename = _CreateJsonFilename(backup_name) pending_json_filename = _CreatePendingJsonFilename(json_filename) if not os.path.isfile(pending_json_filename): dm.stream.write("ERROR: Pending data was not found.\n") dm.result = -1 return dm.result FileSystem.RemoveFile(json_filename) shutil.move(pending_json_filename, json_filename) if backup_suffix: shutil.copy2(json_filename, "{}.{}".format(json_filename, backup_suffix)) dm.stream.write("The pending data has been committed.\n") return dm.result
5,346,550
def get_impropers(bonds): """ Iterate over bonds to get impropers. Choose all three bonds that have one atom in common. For each set of bonds you have 3 impropers where one of the noncommon atoms is out of plane. Parameters ---------- bonds : list List of atom ids that make up bonds. Returns ------- list List of atom id quadruplets that make up a improper. """ impropers, checked = [], [] for bond in bonds: for atom in bond: if atom not in checked: bonded_list = [] for bond2 in bonds: if atom in bond2: bonded_list.append(bond2[1 - bond2.index(atom)]) if len(bonded_list) >= 3: for triplet in combinations(bonded_list, 3): for out_of_plane in triplet: imp = tuple([out_of_plane, atom] + sorted([i for i in triplet if i != out_of_plane])) impropers.append(imp) checked.append(atom) return sorted(impropers)
5,346,551
def plot_pain_against_activities(data): """Plot average pain against activities (from diary)""" fig, axes = plt.subplots(3, 2, figsize=(15, 10), sharey=True, sharex=False) fig.suptitle("Participant 8: activities against average pain", fontsize=13) sns.scatterplot(x="household", y="average_pain", data=data, ax=axes[0,0]) sns.scatterplot(x="excercise", y="average_pain",data=data, ax=axes[0,1]) sns.scatterplot(x="medical_appointment", y="average_pain", data=data, ax=axes[1,0]) sns.scatterplot(x="rest", y="average_pain", data=data, ax=axes[1,1]) sns.scatterplot(x="selfcare", y="average_pain", data=data, ax=axes[2,0]) sns.scatterplot(x="social", y="average_pain", data=data, ax=axes[2,1]) plt.tight_layout()
5,346,552
def get_ret_tev_return(*args): """get_ret_tev_return(int n) -> ea_t""" return _idaapi.get_ret_tev_return(*args)
5,346,553
def make_figure_6(prefix=None, rng=None, colors=None): """ Figures 6, Comparison of Performance Ported from MATLAB Code Nicholas O'Donoughue 24 March 2021 :param prefix: output directory to place generated figure :param rng: random number generator :param colors: colormap for plotting :return: figure handle """ # Vary Time-Bandwidth Product tbwp_vec_db = np.arange(start=10., stop=31., step=10., dtype=int) tbwp_vec_lin = np.expand_dims(db_to_lin(tbwp_vec_db), axis=0).astype(int) input_snr_vec_db = np.arange(start=-20, stop=10.1, step=0.1) input_snr_vec_lin = np.expand_dims(db_to_lin(input_snr_vec_db), axis=1) output_snr_vec_lin = tbwp_vec_lin*input_snr_vec_lin**2/(1+2*input_snr_vec_lin) # output_snr_vec_db = lin_to_db(output_snr_vec_lin) # Energy Detector Performance prob_fa = 1e-6 threshold_ed = stats.chi2.ppf(q=1-prob_fa, df=2*tbwp_vec_lin) prob_det_ed = stats.ncx2.sf(x=threshold_ed, df=2*tbwp_vec_lin, nc=2*tbwp_vec_lin*input_snr_vec_lin) # Cross-Correlator Performance threshold_xc = stats.chi2.ppf(q=1-prob_fa, df=2) prob_det_xc = stats.ncx2.sf(x=threshold_xc/(1+2*input_snr_vec_lin), df=2, nc=2*output_snr_vec_lin) # Monte Carlo Trials input_snr_vec_coarse_db = input_snr_vec_db[::10] input_snr_vec_coarse_lin = db_to_lin(input_snr_vec_coarse_db) num_monte_carlo = int(1e4) num_tbwp = int(tbwp_vec_lin.size) num_snr = int(input_snr_vec_coarse_lin.size) # Generate noise vectors noise_pwr = 1 # Unit Variance prob_det_ed_mc = np.zeros(shape=(num_snr, num_tbwp)) prob_det_xc_mc = np.zeros(shape=(num_snr, num_tbwp)) for idx_tbwp, tbwp in enumerate(np.ravel(tbwp_vec_lin)): # Generate the noise vectors noise1 = np.sqrt(noise_pwr/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo)) + 1j*rng.standard_normal(size=(tbwp, num_monte_carlo))) noise2 = np.sqrt(noise_pwr/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo)) + 1j*rng.standard_normal(size=(tbwp, num_monte_carlo))) # Generate a signal vector signal = np.sqrt(1/2)*(rng.standard_normal(size=(tbwp, num_monte_carlo)) + 1j*rng.standard_normal(size=(tbwp, num_monte_carlo))) phase_difference = np.exp(1j*rng.uniform(low=0, high=2*np.pi, size=(1, num_monte_carlo))) for idx_snr, snr in enumerate(input_snr_vec_coarse_lin): # Scale the signal power to match SNR this_signal = signal * np.sqrt(snr) y1 = this_signal+noise1 y2 = this_signal*phase_difference+noise2 det_result_ed = detector.squareLaw.det_test(z=y1, noise_var=noise_pwr/2, prob_fa=prob_fa) prob_det_ed_mc[idx_snr, idx_tbwp] = np.sum(det_result_ed, axis=None)/num_monte_carlo det_result_xc = detector.xcorr.det_test(y1=y1, y2=y2, noise_var=noise_pwr, num_samples=tbwp, prob_fa=prob_fa) prob_det_xc_mc[idx_snr, idx_tbwp] = np.sum(det_result_xc, axis=None)/num_monte_carlo fig6 = plt.figure() for idx, tbwp in enumerate(tbwp_vec_lin[0, :]): if idx == 0: ed_label = 'ED' xc_label = 'XC' ed_mc_label = 'ED (Monte Carlo)' xc_mc_label = 'XC (Monte Carlo)' else: ed_label = None xc_label = None ed_mc_label = None xc_mc_label = None plt.plot(input_snr_vec_db, prob_det_ed[:, idx], color=colors(idx), linestyle='-', label=ed_label) plt.plot(input_snr_vec_db, prob_det_xc[:, idx], color=colors(idx), linestyle='--', label=xc_label) plt.scatter(input_snr_vec_coarse_db, prob_det_ed_mc[:, idx], color=colors(idx), marker='^', label=ed_mc_label) plt.scatter(input_snr_vec_coarse_db, prob_det_xc_mc[:, idx], color=colors(idx), marker='x', label=xc_mc_label) plt.legend(loc='lower right') # Create ellipses ax = plt.gca() ell = Ellipse(xy=(2, .4), width=5, height=.05) ell.set_fill(False) ell.set_edgecolor(colors(0)) ax.add_artist(ell) plt.annotate(s='TB=10', xy=(-.5, .4), xytext=(-16, .3), arrowprops=dict(arrowstyle='-', color=colors(0))) ell = Ellipse(xy=(-3.5, .5), width=3, height=.05) ell.set_fill(False) ell.set_edgecolor(colors(1)) ax.add_artist(ell) plt.annotate(s='TB=100', xy=(-5, .5), xytext=(-16, .5), arrowprops=dict(arrowstyle='-', color=colors(1))) ell = Ellipse(xy=(-8.5, .6), width=3, height=.05) ell.set_fill(False) ell.set_edgecolor(colors(2)) ax.add_artist(ell) plt.annotate(s='TB=1,000', xy=(-10, .6), xytext=(-16, .7), arrowprops=dict(arrowstyle='-', color=colors(2))) # Save figure if prefix is not None: plt.savefig(prefix + 'fig6.svg') plt.savefig(prefix + 'fig6.png') return fig6
5,346,554
def slim_form(domain_pk=None, form=None): """ What is going on? We want only one domain showing up in the choices. We are replacing the query set with just one object. Ther are two querysets. I'm not really sure what the first one does, but I know the second one (the widget) removes the choices. The third line removes the default u'--------' choice from the drop down. """ return form
5,346,555
def input(*args): """ Create a new input :param args: args the define a TensorType, can be either a TensorType or a shape and a DType :return: the input expression """ tensor_type = _tensor_type_polymorhpic(*args) return InputTensor(tensor_type, ExpressionDAG.num_inputs)
5,346,556
def test_error_1(): """ >>> d = {'v':'<span></span>'} >>> try: ... print (template("<html>{{= v </html>", d)) ... except ParseError as e: ... print (e) Missing end expression }} on line <string>:1 """
5,346,557
def commong_substring(input_list): """Finds the common substring in a list of strings""" def longest_substring_finder(string1, string2): """Finds the common substring between two strings""" answer = "" len1, len2 = len(string1), len(string2) for i in range(len1): match = "" for j in range(len2): if i + j < len1 and string1[i + j] == string2[j]: match += string2[j] else: if len(match) > len(answer): answer = match match = "" return answer if len(input_list) == 2: return longest_substring_finder(*input_list) if len(input_list) > 2: item0 = input_list[0] for i in range(len(input_list) - 1): item1 = input_list[i + 1] item0 = commong_substring([item0, item1]) return commong_substring([item0, item1]) if len(input_list) == 1: return input_list[0]
5,346,558
def is_valid_url(url): """Checks if a URL is in proper format. Args: url (str): The URL that should be checked. Returns: bool: Result of the validity check in boolean form. """ valid = validators.url(url) if valid: return True else: return False
5,346,559
def check_valid_authentication_options(options, auth_plugin_name): """Validate authentication options, and provide helpful error messages :param required_scope: indicate whether a scoped token is required """ # Get all the options defined within the plugin. plugin_opts = base.get_plugin_options(auth_plugin_name) plugin_opts = {opt.dest: opt for opt in plugin_opts} # NOTE(aloga): this is an horrible hack. We need a way to specify the # required options in the plugins. Using the "required" argument for # the oslo_config.cfg.Opt does not work, as it is not possible to load the # plugin if the option is not defined, so the error will simply be: # "NoMatchingPlugin: The plugin foobar could not be found" msgs = [] # when no auth params are passed in, user advised to use os-cloud if not options.auth: msgs.append(_( 'Set a cloud-name with --os-cloud or OS_CLOUD' )) else: if ('password' in plugin_opts and not (options.auth.get('username') or options.auth.get('user_id'))): msgs.append(_( 'Set a username with --os-username, OS_USERNAME,' ' or auth.username' ' or set a user-id with --os-user-id, OS_USER_ID,' ' or auth.user_id' )) if 'auth_url' in plugin_opts and not options.auth.get('auth_url'): msgs.append(_( 'Set an authentication URL, with --os-auth-url,' ' OS_AUTH_URL or auth.auth_url' )) if 'url' in plugin_opts and not options.auth.get('url'): msgs.append(_( 'Set a service URL, with --os-url, OS_URL or auth.url' )) if 'token' in plugin_opts and not options.auth.get('token'): msgs.append(_( 'Set a token with --os-token, OS_TOKEN or auth.token' )) if msgs: raise exc.CommandError( _('Missing parameter(s): \n%s') % '\n'.join(msgs) )
5,346,560
def codepoint_to_url(codepoint, style): """ Given an emoji's codepoint (e.g. 'U+FE0E') and a non-apple emoji style, returns a url to to the png image of the emoji in that style. Only works for style = 'twemoji', 'noto', and 'blobmoji'. """ base = codepoint.replace('U+', '').lower() if style == 'twemoji': # See discussion in commit 8115b76 for more information about # why the base needs to be patched like this. patched = re.sub(r'0*([1-9a-f][0-9a-f]*)', lambda m: m.group(1), base.replace(' ', '-').replace('fe0f-20e3', '20e3').replace('1f441-fe0f-200d-1f5e8-fe0f', '1f441-200d-1f5e8')) response = requests.get('https://github.com/twitter/twemoji/raw/gh-pages/v/latest') version = response.text if response.ok else None if version: return 'https://github.com/twitter/twemoji/raw/gh-pages/v/%s/72x72/%s.png' \ % (version, patched) else: return 'https://github.com/twitter/twemoji/raw/master/assets/72x72/%s.png' \ % patched elif style == 'noto': return 'https://github.com/googlefonts/noto-emoji/raw/master/png/128/emoji_u%s.png' \ % base.replace(' ', '_') elif style == 'blobmoji': return 'https://github.com/C1710/blobmoji/raw/master/png/128/emoji_u%s.png' \ % base.replace(' ', '_')
5,346,561
def getRNCS(ChargeSA): """The calculation of relative negative charge surface area -->RNCS """ charge=[] for i in ChargeSA: charge.append(float(i[1])) temp=[] for i in ChargeSA: temp.append(i[2]) try: RNCG = min(charge)/sum([i for i in charge if i < 0.0]) return temp[charge.index(min(charge))]/RNCG except: return 0.0
5,346,562
def handle_auth_manager_auth_exception(error): """Return a custom message and 403 status code""" response_header = {'X-REQUEST-ID': util.create_request_id()} return {'message': error.message}, 403, response_header
5,346,563
def get_default_converter(): """Intended only for advanced uses""" return _TYPECATS_DEFAULT_CONVERTER
5,346,564
def login(request): """ :param: request :return: JSON data """ response = {} if request.method == 'GET': username = request.GET.get('username') password = request.GET.get('password') try: usr = models.User.objects.filter(username=username, password=password) if usr: response['status'] = 'success' response['error_msg'] = '' response['data'] = json.loads(serializers.serialize('json', usr)) else: response['status'] = 'failure' response['error_msg'] = '用户名或密码错误,请重试' response['data'] = None except Exception as e: response['status'] = 'error' response['error_msg'] = str(e) response['data'] = None return JsonResponse(response)
5,346,565
def average_win_rate(strategy, baseline=always_roll(4)): """Return the average win rate of STRATEGY against BASELINE. Averages the winrate when starting the game as player 0 and as player 1. """ win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline) win_rate_as_player_1 = make_averaged(winner)(baseline, strategy) return (win_rate_as_player_0 + win_rate_as_player_1) / 2
5,346,566
def decode(chrom): """ Returns the communities of a locus-based adjacency codification in a vector of int where each position is a node id and the value of that position the id of the community where it belongs. To position with the same number means that those two nodes belongs to same community. """ try: size = len(chrom) last_c = 0 communities = [float("inf")] * size pending = set(range(size)) while len(pending) != 0: index = int(pending.pop()) neighbour = int(chrom[index]) if neighbour != -1: communities[index] = min(last_c, communities[index], communities[neighbour]) while neighbour in pending: pending.remove(neighbour) communities[neighbour] = min(last_c, communities[neighbour]) neighbour = int(chrom[neighbour]) last_c += 1 return communities except Exception as e: raise e
5,346,567
def write_video(frames, outfile, fps, frame): """Write frames to video output file Args: frames (list<numpy.ndarray[H, W, 3]>): video frames outfile (string): path to output file fps (int): frames per second to write frame (tuple<int, int>): video spatial dimensions """ fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(outfile, fourcc, fps, frame) for frame in tqdm(frames): out.write(frame)
5,346,568
def bin_search(query, data): """ Query is a coordinate interval. Approximate binary search for the query in sorted data, which is a list of coordinates. Finishes when the closest overlapping value of query and data is found and returns the index in data. """ i = int(math.floor(len(data)/2)) # binary search prep lower, upper = 0, len(data) if not upper: return -1 tried = set() rightfound = '' # null value in place of 0, which is a valid value for rightfound while not (data[i][0] <= query[0] and data[i][1] >= query[0]): # query left coordinate not found in data yet if data[i][0] <= query[1] and data[i][1] >= query[1]: # query right found, will keep looking for left rightfound = i if data[i][1] < query[0]: # i is too low of an index lower = i i = int(math.floor((lower + upper)/2.)) else: # i is too high of an index upper = i i = int(math.floor((lower + upper)/2.)) if i in tried or i == upper: if data[i][0] >= query[0] and data[i][1] <= query[1]: # data interval sandwiched inside query break elif i + 1 < len(data) and data[i+1][0] > query[0] and data[i+1][1] < query[1]: # data can be incremented i = i + 1 else: i = rightfound if rightfound != '' else -1 break tried.add(i) return i
5,346,569
def scripts(ctx, config): """ A CLI wrapper for some helpful scripts I use on my personal Notion. """ ctx.ensure_object(dict) ctx.obj['logger'] = setup_logger() ctx.obj['logger'].setLevel(logging.DEBUG) ctx.obj['config'] = Config.load_config(config) ctx.obj['logger'].debug("Parsed config: " + repr(ctx.obj['config'])) ctx.obj['client'] = NotionClient(token_v2=ctx.obj['config'].get('token'))
5,346,570
def validate_config(cnf): """ validate configuration, exit on missing item :param cnf: config handle """ for config_item in ['socket_filename', 'pid_filename']: if cnf.has_section('main') == False or cnf.has_option('main', config_item) == False: print('configuration item main/%s not found in %s/conf/configd.conf' % (config_item, program_path)) sys.exit(0)
5,346,571
def test_unexpected(): """Tests unexpected response is returns as False""" response = requests.models.Response() response.status_code = 'unexpected' assert UEM.check_http_response(response) is False
5,346,572
def get_rating(business_id): """ GET Business rating""" rating = list( db.ratings.aggregate( [{"$group": {"_id": "$business", "pop": {"$avg": "$rating"}}}] ) ) if rating is None: return ( jsonify( { "success": False, "message": "Rating for business {} not found.".format(business_id), } ), 404, ) print(rating) return jsonify({"success": True, "rating": clean_dict_helper(rating)})
5,346,573
def fwhm(x,y): """Calulate the FWHM for a set of x and y values. The FWHM is returned in the same units as those of x.""" maxVal = np.max(y) maxVal50 = 0.5*maxVal #this is to detect if there are multiple values biggerCondition = [a > maxVal50 for a in y] changePoints = [] xPoints = [] for k in range(len(biggerCondition)-1): if biggerCondition[k+1] != biggerCondition[k]: changePoints.append(k) assert len(changePoints) == 2, "More than two crossings of the threshold found." for k in changePoints: # do a polyfit # with the points before and after the point where the change occurs. # note that here we are fitting the x values as a function of the y values. # then we can use the polynom to compute the value of x at the threshold, i.e. at maxVal50. yPolyFit = x[k-1:k+2] xPolyFit = y[k-1:k+2] z = np.polyfit(xPolyFit,yPolyFit,2) p = np.poly1d(z) xThis = p(maxVal50) xPoints.append(xThis) if len(xPoints) == 2: linewidth = xPoints[1] - xPoints[0] else: linewidth = None print(sorted(xPoints)) return linewidth
5,346,574
def plan(c, destroy=False): """Call terraform plan.""" print('planning') c.run('./terraform plan {}'.format('-destroy' if destroy else ''))
5,346,575
def test_deckmodel_add_card(): """ GIVEN `DeckModel`'s `addCard` method, WHEN a `Card` model is initialized and passed to addCard, THEN check whether the card exists in the DeckModel (i.e. that the table card contains a row where deckname=DeckModel.deckname) """ pass
5,346,576
async def alert(friend): """ Alert the user using a service of you choice. This has to be filled manually. Input: friend (str) : the email or phone_number of the friend near you. """ # Example print(bcolors.WARNING + f"{friend} is near you." + bcolors.ENDC)
5,346,577
def throw_job(target_pdb_dir: Path, method: str, output_score_path: Path, qsub: bool, execute: bool, ar: str) -> None: """ Throw a job of the specified MQA method. """ target = target_pdb_dir.stem cmd = [f'./{method}.sh', str(target_pdb_dir), str(output_score_path)] if method in ['ProQ3D', 'P3CMQA']: # If fasta path needed fasta_path = dataset_dir / 'fasta' / f'{target}.fasta' cmd += [str(fasta_path), str(target)] if qsub: qsub_cmd = ['qsub', '-g', 'tga-ishidalab', '-N', f'{method}_{target}'] if ar is not None: qsub_cmd += ['-ar', ar] cmd = qsub_cmd + cmd print(' '.join(cmd)) if execute: subprocess.run(cmd)
5,346,578
def parse_names(input_folder): """ :param input_folder: :return: """ name_set = set() if args.suffix: files = sorted(glob(f'{input_folder}/*{args.suffix}')) else: files = sorted(glob(f'{input_folder}/*')) for file in files: with open(file) as f: for record in SeqIO.parse(f, args.in_format): fname = record.description name = fname.split('_')[0] name_set.add(name) return files, sorted(list(name_set))
5,346,579
def check_dir(data_dir): """ Method used to validate the given data directory path :param data_dir: Absolute path for the data directory :type data_dir: str :raise: Exception """ if not os.path.isdir(data_dir): raise Exception("specified data dir does not exist") if not len(os.listdir(data_dir)) > 0: raise Exception("specified data dir does not contain any files")
5,346,580
def build_charencoder(corpus: Iterable[str], wordlen: int=None) \ -> Tuple[int, Mapping[str, int], TextEncoder]: """ Create a char-level encoder: a Callable, mapping strings into integer arrays. Encoders dispatch on input type: if you pass a single string, you will get a 1D array, if you pass an Iterable of strings, you will get a 2D array where row i encodes the i-th string in the Iterable. :param corpus: an Iterable of strings to extract characters from. The encoder will map any non-ASCII character into the OOV code. :param wordlen: when `wordlen` is None and an encoder receives an Iterable of strings, the second dimension in the output array will be as long as the longest string, otherwise it will be `wordlen` long. In the latter case words exceeding `wordlen` will be trimmed. In both cases empty-spaces are filled with zeros. in the Iterable. If wordlen is not :return: the OOV code, a character mapping representing non-OOV character encodings, an encoder """ if wordlen and wordlen < 1: raise ValueError('`wordlen` must be positive') try: charmap = {char: i + 1 for i, char in enumerate(asciicharset(corpus))} except TypeError: raise ValueError('`corpus` can be either a string or an Iterable of ' 'strings') if not charmap: raise ValueError('the `corpus` is empty') oov = len(charmap) + 1 def encode_string(string: str) -> np.ndarray: if not string: raise ValueError("can't encode empty strings") return np.fromiter((charmap.get(char, oov) for char in string), np.int32, len(string)) def charencoder(target: Union[str, Iterable[str]]): if isinstance(target, str): return encode_string(target) encoded_strings = list(map(encode_string, target)) if not encoded_strings: raise ValueError('there are no `target`') return preprocessing.stack( encoded_strings, [wordlen or -1], np.int32, 0, True)[0] return oov, charmap, charencoder
5,346,581
def main(): """ 主函数入口,用于将应用注册到httpserver,并开启事件循环 :return: None """ define("port", default=int(opt_server["port"]), type=int, help="Run on the given port") print_info() if opt_debug["open_log"] == "true": set_log() options.parse_command_line() http_server = HTTPServer(application(), xheaders=opt_proxy["xheaders"], max_buffer_size=128 * 1024 * 1024, max_body_size=128 * 1024 * 1024) http_server.bind(options.port, opt_server["listen"]) http_server.start(1) print("Site initialization is successful !") IOLoop.current().start()
5,346,582
def output(angle_tree, output_qubits): """ Define output qubits""" if angle_tree: output_qubits.insert(0, angle_tree.qubit) # qiskit little-endian if angle_tree.left: output(angle_tree.left, output_qubits) else: output(angle_tree.right, output_qubits)
5,346,583
def send(private_key, public_key, email, subscription, payload): """ """ options = { 'vapidDetails': { 'subject': 'mailto:%s' % email, 'privateKey': private_key, 'publicKey': public_key, } } dic = { 'options': options, 'subscription': subscription, 'payload': payload, } path = os.path.dirname(os.path.realpath(__file__)) cmd = ['node', os.path.join(path, 'node', 'send_push_notification.js'), '%s' % json.dumps(dic)] process = Popen(cmd, stdout=PIPE, stderr=PIPE) (stdout, stderr) = process.communicate() if process.returncode != 0: print unicode(stderr) print 'out', stdout print 'err', stderr
5,346,584
def Decodingfunc(Codebyte): """This is the version 'A' of decoding function, that decodes data coded by 'A' coding function""" Decodedint=struct.unpack('b',Codebyte)[0] N=0 #number of repetitions L=0 # length of single/multiple sequence if Decodedint >= 0: #single N = 1 L = Decodedint+1 else: #multiple L = -Decodedint//16+1 N = -Decodedint-(L-1)*16+1 #print("N =",N," L =",L) return (N,L)
5,346,585
def test_lif_file(lif_file): """Just print the text of all headers, should give an indication of whether all the offsets are correct.""" lif = Container(json_file=lif_file).payload text = lif.text.value view = lif.views[0] for anno in view.annotations: if anno.type.endswith('Header'): print("[{}]".format(text[anno.start:anno.end])) print('')
5,346,586
def _metric_notification_text(metric: MetricNotificationData) -> str: """Return the notification text for the metric.""" new_value = "?" if metric.new_metric_value is None else metric.new_metric_value old_value = "?" if metric.old_metric_value is None else metric.old_metric_value unit = metric.metric_unit if metric.metric_unit.startswith("%") else f" {metric.metric_unit}" old_value_text = " (unchanged)" if new_value == old_value else f", was {old_value}{unit}" return ( f" * *{metric.metric_name}* status is {metric.new_metric_status}, was {metric.old_metric_status}. " f"Value is {new_value}{unit}{old_value_text}.\n" )
5,346,587
def test_run_job_as_admin_with_job_requirements_and_parent_job(): """ A basic unit test of the run() method with an administrative user and job requirements. This test is a fairly minimal test of the run() method. It does not exercise all the potential code paths or provide all the possible run inputs, such as job parameters, cell metadata, etc. Does not include an app_id. Does include a parent job id. """ # Set up data variables client_group = "grotesquememlong" cpus = 4 mem = 32 disk = 2600 # set up mocks mocks = _set_up_mocks(_USER, _TOKEN) sdkmr = mocks[SDKMethodRunner] jrr = mocks[JobRequirementsResolver] # We intentionally do not check the logger methods as there are a lot of them and this is # already a very large test. This may be something to be added later when needed. # Set up call returns. These calls are in the order they occur in the code jrr.normalize_job_reqs.return_value = _create_reqs_dict( cpus, mem, disk, client_group, client_group_regex=True, debug_mode=True ) jrr.get_requirements_type.return_value = RequirementsType.BILLING req_args = _create_reqs_dict( cpus, mem, disk, client_group, client_group_regex=True, ignore_concurrency_limits=True, debug_mode=True, merge_with={ "bill_to_user": _OTHER_USER, "scheduler_requirements": {"foo": "bar", "baz": "bat"}, }, internal_representation=True, ) reqs = ResolvedRequirements(**req_args) jrr.resolve_requirements.return_value = reqs _set_up_common_return_values(mocks) # set up the class to be tested and run the method rj = EE2RunJob(sdkmr) inc_reqs = _create_reqs_dict( cpus, mem, disk, client_group, client_group_regex=1, ignore_concurrency_limits="righty ho, luv", debug_mode="true", merge_with={ "bill_to_user": _OTHER_USER, "scheduler_requirements": {"foo": "bar", "baz": "bat"}, }, ) params = { "method": _METHOD, "source_ws_objects": [_WS_REF_1, _WS_REF_2], "job_requirements": inc_reqs, "parent_job_id": "thisislikesoooofake", } assert rj.run(params, as_admin=True) == _JOB_ID # check mocks called as expected. The order here is the order that they're called in the code. sdkmr.check_as_admin.assert_called_once_with(JobPermissions.WRITE) jrr.normalize_job_reqs.assert_called_once_with(inc_reqs, "input job") jrr.get_requirements_type.assert_called_once_with(**req_args) jrr.resolve_requirements.assert_called_once_with( _METHOD, mocks[CatalogCache], **req_args ) _check_common_mock_calls( mocks, reqs, None, None, parent_job_id="thisislikesoooofake" )
5,346,588
def train_dist( domain: Text, config: Text, training_files: Optional[Union[Text, List[Text]]], output: Text = rasa.shared.constants.DEFAULT_MODELS_PATH, dry_run: bool = False, force_training: bool = False, fixed_model_name: Optional[Text] = None, persist_nlu_training_data: bool = False, core_additional_arguments: Optional[Dict] = None, nlu_additional_arguments: Optional[Dict] = None, model_to_finetune: Optional[Text] = None, finetuning_epoch_fraction: float = 1.0, ) -> TrainingResult: """Trains a Rasa model (Core and NLU). Args: domain: Path to the domain file. config: Path to the config file. training_files: List of paths to training data files. output: Output directory for the trained model. dry_run: If `True` then no training will be done, and the information about whether the training needs to be done will be printed. force_training: If `True` retrain model even if data has not changed. fixed_model_name: Name of model to be stored. persist_nlu_training_data: `True` if the NLU training data should be persisted with the model. core_additional_arguments: Additional training parameters for core training. nlu_additional_arguments: Additional training parameters forwarded to training method of each NLU component. model_to_finetune: Optional path to a model which should be finetuned or a directory in case the latest trained model should be used. finetuning_epoch_fraction: The fraction currently specified training epochs in the model configuration which should be used for finetuning. Returns: An instance of `TrainingResult`. """ file_importer = TrainingDataImporter.load_from_config( config, domain, training_files ) stories = file_importer.get_stories() nlu_data = file_importer.get_nlu_data() training_type = TrainingType.BOTH if nlu_data.has_e2e_examples(): rasa.shared.utils.common.mark_as_experimental_feature("end-to-end training") training_type = TrainingType.END_TO_END if stories.is_empty() and nlu_data.contains_no_pure_nlu_data(): rasa.shared.utils.cli.print_error( "No training data given. Please provide stories and NLU data in " "order to train a Rasa model using the '--data' argument." ) return TrainingResult(code=1) domain = file_importer.get_domain() if domain.is_empty(): rasa.shared.utils.cli.print_warning( "Core training was skipped because no valid domain file was found. " "Only an NLU-model was created. Please specify a valid domain using " "the '--domain' argument or check if the provided domain file exists." ) training_type = TrainingType.NLU elif stories.is_empty(): rasa.shared.utils.cli.print_warning( "No stories present. Just a Rasa NLU model will be trained." ) training_type = TrainingType.NLU # We will train nlu if there are any nlu example, including from e2e stories. elif nlu_data.contains_no_pure_nlu_data() and not nlu_data.has_e2e_examples(): rasa.shared.utils.cli.print_warning( "No NLU data present. Just a Rasa Core model will be trained." ) training_type = TrainingType.CORE with telemetry.track_model_training( file_importer, model_type="rasa", ): return _train_graph_dist( file_importer, training_type=training_type, output_path=output, fixed_model_name=fixed_model_name, model_to_finetune=model_to_finetune, force_full_training=force_training, persist_nlu_training_data=persist_nlu_training_data, finetuning_epoch_fraction=finetuning_epoch_fraction, dry_run=dry_run, **(core_additional_arguments or {}), **(nlu_additional_arguments or {}), )
5,346,589
def run_step_sequences( env, agent, sequence_iterator=episode_iterator, update=True, num_sequences=None ): """Generate sequences of steps from an environment-agent simulation. Args: env: An OpenAI Gym environment. agent: An instance of `Agent`. sequence_iterator: A generator taking an iterator of steps and yielding steps until the end of the sequence. update: Update the agent with step results. num_sequences: Number of sequences to run. By default runs forever. Yields: Successive sequences of steps. """ steps = run_steps(env=env, agent=agent, update=update) if num_sequences is not None: counter = range(num_sequences) else: counter = itertools.count() for _ in counter: yield sequence_iterator(steps)
5,346,590
def convert_image_to_nifti(path_image, path_out_dir=None): """ converting normal image to Nifty Image :param str path_image: input image :param str path_out_dir: path to output folder :return str: resulted image >>> path_img = os.path.join(update_path('data-images'), 'images', ... 'artificial_moving-affine.jpg') >>> path_img2 = convert_image_to_nifti(path_img, '.') >>> path_img2 # doctest: +ELLIPSIS '...artificial_moving-affine.nii' >>> os.path.isfile(path_img2) True >>> path_img3 = convert_image_from_nifti(path_img2) >>> os.path.isfile(path_img3) True >>> list(map(os.remove, [path_img2, path_img3])) # doctest: +ELLIPSIS [...] """ path_image = update_path(path_image) path_img_out = _gene_out_path(path_image, '.nii', path_out_dir) logging.debug('Convert image to Nifti format "%s" -> "%s"', path_image, path_img_out) # img = Image.open(path_file).convert('LA') img = load_image(path_image) nim = nibabel.Nifti1Pair(img, np.eye(4)) del img nibabel.save(nim, path_img_out) return path_img_out
5,346,591
def theater_chase_rainbow(strip: PixelStrip, wait_ms: int = 50): """Rainbow movie theater light style chaser animation. :param strip: :param wait_ms: :return: """ for j in range(256): for q in range(3): for i in range(0, strip.numPixels(), 3): strip.setPixelColor(i + q, wheel((i + j) % 255)) strip.show() time.sleep(wait_ms / 1000.0) for i in range(0, strip.numPixels(), 3): strip.setPixelColor(i + q, 0)
5,346,592
def wav2vec2_base() -> Wav2Vec2Model: """Build wav2vec2 model with "base" configuration This is one of the model architecture used in *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] for pretraining. Returns: Wav2Vec2Model: """ return _get_model( extractor_mode="group_norm", extractor_conv_layer_config=None, extractor_conv_bias=False, encoder_embed_dim=768, encoder_projection_dropout=0.1, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=12, encoder_num_heads=12, encoder_attention_dropout=0.1, encoder_ff_interm_features=3072, encoder_ff_interm_dropout=0.1, encoder_dropout=0.1, encoder_layer_norm_first=False, encoder_layer_drop=0.1, aux_num_out=None, )
5,346,593
def check(context, verbose, files): """Don't modify files, just print the differences. Return code 0 means nothing would change. Return code 1 means some files would be modified. You can use partial and multiple file names in the FILES argument. """ common_fix_or_check(context, verbose, files, True)
5,346,594
def dont_handle_lock_expired_mock(app): """Takes in a raiden app and returns a mock context where lock_expired is not processed """ def do_nothing(raiden, message): # pylint: disable=unused-argument return [] return patch.object( app.raiden.message_handler, "handle_message_lockexpired", side_effect=do_nothing )
5,346,595
def _find_spec(name, path, target=None): """Find a module's spec.""" meta_path = sys.meta_path if meta_path is None: # PyImport_Cleanup() is running or has been called. raise ImportError("sys.meta_path is None, Python is likely " "shutting down") if not meta_path: _warnings.warn('sys.meta_path is empty', ImportWarning) # We check sys.modules here for the reload case. While a passed-in # target will usually indicate a reload there is no guarantee, whereas # sys.modules provides one. is_reload = name in sys.modules for finder in meta_path: with _ImportLockContext(): try: find_spec = finder.find_spec except AttributeError: spec = _find_spec_legacy(finder, name, path) if spec is None: continue else: spec = find_spec(name, path, target) if spec is not None: # The parent import may have already imported this module. if not is_reload and name in sys.modules: module = sys.modules[name] try: __spec__ = module.__spec__ except AttributeError: # We use the found spec since that is the one that # we would have used if the parent module hadn't # beaten us to the punch. return spec else: if __spec__ is None: return spec else: return __spec__ else: return spec else: return None
5,346,596
async def setup(bot: Bot) -> None: """Load the ModPings cog.""" await bot.add_cog(ModPings(bot))
5,346,597
def push_new_group_notification(username, room_id, receiver): """新会话通知""" room = Room.query.get_or_404(room_id) message = '<a href="%s">%s</a> 邀请您加入<a href="%s"> %s </a> !' % \ (url_for('user.index', username=username), username, url_for('group.home', room_id=room_id), room.name) notification = Notification(message=message, receiver=receiver) db.session.add(notification) db.session.commit()
5,346,598
def system(_printer, ast): """Prints the instance system initialization.""" process_names_str = ' < '.join(map(lambda proc_block: ', '.join(proc_block), ast["processNames"])) return f'system {process_names_str};'
5,346,599