content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
from typing import Union from pathlib import Path from typing import Dict from typing import Set import logging import json def load_forces_from_json(json_path: Union[str, Path]) -> Dict[str, Dict[str, Union[Set, Dict]]]: """ Load forced labels, changes, and non-changes from a json file and return them in a dictionary that can be passed through to JointModel.get_harmony(...) as kwargs. Parameters ---------- json_path : Union[str, Path] A reference to a json file containing forced labels, changes, and non-changes, to be used for JointModel.get_harmony(...). It may have the following keys: - "forced_chord_changes": A list of integers at which the chord must change. - "forced_chord_non_changes": A list of integers at which the chord cannot change. - "forced_key_changes": A list of integers at which the key must change. - "forced_key_non_changes": A list of integers at which the key cannot change. - "forced_chords": A dictionary mapping the string form of a tuple in the form (start, end) to a chord_id, saying that the input indexes on the range start (inclusive) to end (exclusive) must be output as the given chord_id. - "forced_keys": Same as forced_chords, but for keys. Returns ------- forces_kwargs: Dict[str, Dict[str, Union[Set, Dict]]] A nested dictionary containing the loaded keyword arguments for each input. The outer-most keys should reference a specific input by string name, or be the keyword "default", in which case the loaded kwargs will be used for all input pieces not otherwise matched by string name. In the inner dictionaries, keyword arguments have been loaded (with the correct types) from the json file that can be passed directly as kwargs to JointModel.get_harmony(...) for that particular piece. """ def load_forces_from_nested_json(raw_data: Dict) -> Dict[str, Union[Set, Dict]]: """ Load an inner forces_kwargs dict from a nested json forces_kwargs dict data. Parameters ---------- raw_data : Dict The inner nested dictionary from which we will load the kwargs. See load_forces_from_json for details. Returns ------- Dict[str, Union[Set, Dict]] The kwargs for a single piece, unnested. """ forces_kwargs = dict() for key in [ "forced_chord_changes", "forced_chord_non_changes", "forced_key_changes", "forced_key_non_changes", ]: if key in raw_data: forces_kwargs[key] = set(raw_data[key]) for key in ["forced_chords", "forced_keys"]: if key in raw_data: forces_kwargs[key] = { tuple(map(int, range_tuple_str[1:-1].split(","))): label_id for range_tuple_str, label_id in raw_data[key].items() } for key in raw_data: if key not in [ "forced_chord_changes", "forced_chord_non_changes", "forced_key_changes", "forced_key_non_changes", "forced_chords", "forced_keys", ]: logging.warning( "--forces-json inner key not recognized: %s. Ignoring that key.", key ) logging.info("Forces:" if len(forces_kwargs) > 0 else "Forces: None") for key, item in sorted(forces_kwargs.items()): if type(item) == dict: logging.info(" %s:", key) for inner_key, inner_item in sorted(item.items()): logging.info(" %s = %s", inner_key, inner_item) else: logging.info(" %s = %s", key, item) return forces_kwargs with open(json_path, "r") as json_file: raw_data = json.load(json_file) if ( "forced_chord_changes" in raw_data or "forced_chord_non_changes" in raw_data or "forced_key_changes" in raw_data or "forced_key_non_changes" in raw_data or "forced_chords" in raw_data or "forced_keys" in raw_data ): logging.info( "Given --json-forces not a nested, piece-specific mapping. Treating as default for " "all inputs." ) raw_data = {"default": raw_data} all_forces_kwargs = {} for key, nested_raw_data in raw_data.items(): logging.info("Loading forces for %s", key) all_forces_kwargs[key] = load_forces_from_nested_json(nested_raw_data) return all_forces_kwargs
ad328af4842cf880889d3fbc5ed465af4feb713e
83,921
def get_train_validation_set(data, train_ind, val_inds): """ Get the train and test set given their indices. """ training_data = data[train_ind] validation_data = data[val_inds] return training_data, validation_data
d87b63b206aff3e3055c3edbec83e2422b1a35a4
593,497
def solution(array, n_rotations): """ Returns the Cyclic Rotation of array with n_rotations positions to the right. """ n = len(array) n_rotations = n_rotations % n if n > 0 else 0 return array[n - n_rotations:] + array[:n - n_rotations]
6f5edc0c8ec3d0e58830466e61577e80a725a624
13,098
def get_iso_time(date_time): """ Converts the provided datetime object to an ISO-format time-tag. Parameters ---------- date_time : datetime.datetime Datetime object to convert to ISO format. Returns ------- time_in_iso : str Provided time in ISO format: YYYY-MM-DDTHH:MM:SS.mmmmmmZ """ time_in_iso = date_time.isoformat(sep='T', timespec='microseconds') + "Z" return time_in_iso
8d819a00c43850fb75a48135b3bd413efff60107
645,864
def wordfreq(text): """Return a dictionary of words and word counts in a string.""" freqs = {} for word in text.split(): freqs[word] = freqs.get(word, 0) + 1 return freqs
63f00ee230b5f26e82dfdccd78026cdae02c645c
90,429
def stations_level_over_threshold(stations, tol): """Takes in a list of monitoring station objects, returns a list of tuples that contain the station and relative water level, sorted by the relative water level""" newlist = [] for station in stations: if station.relative_water_level() != None and station.relative_water_level() > tol: newlist.append((station,station.relative_water_level())) return sorted(newlist, key = lambda tup:tup[1], reverse = True)
bafffbb31c17648a84d28b61c0b2d901aa7003c8
105,088
def rate_bucket(dataset, rate_low, rate_high): """Extract the movies within the specified ratings. This function extracts all the movies that has rating between rate_low and high_rate. Once you ahve extracted the movies, call the explore_data() to print first few rows. Keyword arguments: dataset -- list containing the details of the movie rate_low -- lower range of rating rate_high -- higher range of rating Returns: rated_movies -- list of the details of the movies with required ratings """ rated_movies=[i[:] for i in dataset if (float(i[11])>=rate_low and float(i[11])<=rate_high)] return rated_movies
473ec275aa476694269fcaf94c6fadb6597c3866
233,108
def lookup_module_function( module_reference, function_name ): """ Acquires a function reference from a module given an function name. Takes 2 arguments: module_reference - Module whose interface is searched for function_name. function_name - String specifying the function whose handle is sought. Returns 1 value: function_reference - Reference to the requested function. None if function_name is not part of module_reference's interface or if there was an error acquiring the reference. """ # get a reference to the function requested. return None if it isn't part # of the module's interface. function_reference = getattr( module_reference, function_name, None ) return function_reference
5ce046b604b32b0442814d340bf8ac2a6be4c241
84,863
def highlight_single_token(token): """Highlight a single token with ^.""" return {token.start_row: " " * token.start_col + "^" * len(token.string)}
d2c0287d423bbb41731d3847e211f3eee8233ee7
655,199
def size_of_row_in_memory(row): """ Returns the approximate amount of bytes needed to represent the given row into the python's program memory. The magic numbers are based on `sys.getsizeof`. """ a = 64 + 8 * len(row) # Size of the array a += sum(49 + len(cell) for cell in row) # Size of the contained strings return a
901bd43383b9222ddb79aee18ae9c924e6e4a480
445,977
def load_data(filename): """Open a text file of numbers & turn contents into a list of integers.""" with open(filename) as f: lines = f.read().strip().split('\n') return [int(i) for i in lines]
2baf679166eb1ee36f2b36c3e18f4f1d6a5272d9
694,964
def wh_to_kwh(wh): """ Convert watt hours to kilowatt hours and round to two decimal places :param wh: integer or decimal value :return: two decimal spot kilowatt hours """ kw = float("{0:.2f}".format(wh / 1000.00)) return kw
19488960a9c7a4d2fc748f4d897d082cfdaee2b8
11,244
def nearest_point(pos): """Find the nearest grid point to a position (discretizes).""" (current_row, current_col) = pos grid_row = int(current_row + 0.5) grid_col = int(current_col + 0.5) return (grid_row, grid_col)
ed330fb5a90e6ed7fd61e22eda7f63744bc3bd35
524,845
def notas(*n, sit=False): """ ->Recebe várias notas de alunos, e retorna o número de notas (aceita várias), a maior e menor nota, a média e a situação (opcional) :param n: uma ou mais notas :param sit: (opcional) indica a situação do aluno :return: dicionário com as informações """ informa = dict() cont = tot = maior = menor = média = 0 for c in range(len(n)): if c == 0: maior = menor = n[c] elif maior < n[c]: maior = n[c] elif menor < n[c]: menor = n[c] tot += n[c] cont += 1 média = tot / cont informa['total'] = cont informa['maior'] = maior informa['menor'] = menor informa['média'] = float(f'{média:.2f}') if sit: if média < 5: situação = 'RUÍM' elif média < 7: situação = 'RAZOÁVEL' else: situação = 'BOA' informa['situação'] = situação return informa
c9d932064383423ed4c6359ce2479ff6ea75fcc4
684,187
import json def output_json(metaList): """ Converts the list of dicts into a JSON format """ return json.dumps(metaList, indent=4, separators=(',', ' : '))
53386041564ea8238cdaaa5e058c1f59782f7f59
277,108
def deconstruct_hex_package(data): """ Deconstruct a package in hex format :param data: package content :type data: str :rtype: dict """ dic = { "start": data[0:4], "command_flag": data[4:6], "answer_flag": data[6:8], "unique_code": data[8:42], "encrypto_method": data[42:44], "length": data[44:48], "payload": data[48:-2], "checksum": data[-2:], } return dic
b8caa7c6c743023b6c0ca5c0a46137a3aa17cfb4
552,667
def read_nrevbytes(buffer: bytearray, length: int) -> bytes: """ Read and reverse the given number of bytes, read bytes are consumed. """ array = bytes(reversed(buffer[:length])) del buffer[:length] return array
6c1c01392283a8d393a28aa779e3770a33cc886d
377,740
def keep_keys(new_keys, old_dict): """Return dictionary with items indicated by the keys. Args: new_keys (iterable): Keys to keep from the old dictionary. old_dict (dict): A dictionary from which to extract a subset of items. Returns dict: A dict derived from old_dict only keeping keys from new_keys. Example: To use `keep_keys` directly on a dictionary: >>> keep_keys(["a", "b"], {"a": 1, "b": 2, "c": 3}) {'a': 1, 'b': 2} If the requested keys are not present, they are ignored. >>> keep_keys(["a", "b"], {"b": 2, "c": 3}) {'b': 2} To use `keep_keys` on a stream of dictionaries: >>> dict_gen = iter([{"a": 1, "b": 2, "c": 3}, ... {"b": 5, "c": 6}]) >>> from functools import partial >>> subdict_gen = map(partial(keep_keys, ["a", "b"]), dict_gen) >>> list(subdict_gen) [{'a': 1, 'b': 2}, {'b': 5}] """ new_dict = {k: old_dict[k] for k in new_keys if k in old_dict} return new_dict
6365015024e5c923b6e515d7ac8f4fe6eafbe7e3
74,320
def encode_pattern(mat): """ Encode the matrix into a hashable value. """ return tuple(mat.flatten().tolist())
310c0b6217ebd38cce2d2b8047012a2105143a7e
342,508
import re def parsequoted(args, line): """ Split a line containing quoted strings into columns. """ fields = [] rs = re.compile(args.separator) dquotes = '|""' if args.dquoted else '' squotes = "|''" if args.dquoted else '' rqq = re.compile(r'"(?:[^"\\]|\\(?:x\w{2}|u\w{4}|U\w{8}|u\{\w+\}|[^xuU])%s)*"' % dquotes) rq = re.compile(r'\'(?:[^\'\\]|\\(?:x\w{2}|u\w{4}|U\w{8}|u\{\w+\}|[^xuU])%s)*\'' % squotes) o = 0 needseparator = False while o < len(line): if needseparator: m = rs.match(line, o) if not m: raise Exception("expected separator at pos %d in %s" % (o, line)) o += len(m.group(0)) needseparator = False continue m = rqq.match(line, o) if m: fields.append(m.group(0)) o += len(m.group(0)) needseparator = True continue m = rq.match(line, o) if m: fields.append(m.group(0)) o += len(m.group(0)) needseparator = True continue m = rs.search(line, o) if m: fields.append(line[o:m.start(0)]) o = m.end(0) else: fields.append(line[o:]) break return fields
76ac2016659ae117d936927ff128416807af25f4
539,482
import math import random def rand_scale_log_normal(mean_scale, one_sigma_at_scale): """ Generate a distribution of value at log scale around mean_scale :param mean_scale: :param one_sigma_at_scale: 67% of values between mean_scale/one_sigma_at_scale .. mean_scale*one_sigma_at_scale :return: """ log_sigma = math.log(one_sigma_at_scale) return mean_scale*math.exp(random.normalvariate(0.0, log_sigma))
34b320649583f674038dd58187d532e8fc9d6aa4
546,366
def create_addedkeywds_file(fits_file): """ This function create text file to log added keywords. Args: fits_file: string, name of the fits file to be keyword checked Returns: addedkeywds_file_name: string, the file name where all added keywords were saved """ addedkeywds_file_name = fits_file.replace(".fits", "_addedkeywds.txt") print ('Name of text file containing all keywords added: ', addedkeywds_file_name) tf = open(addedkeywds_file_name, 'w') tf.write('### The following keywords were added or have the wrong format: \n') tf.write('# {:<12} {:<10} {:<25} \n'.format('Keyword', 'Extension', 'Comments')) tf.close() return addedkeywds_file_name
ab70171f1090b8f8a52a75e3c62ae1574675bb8e
178,837
def parse_size(root): """Parse the width and height of the image out of the root node. Args: ---- root: xml.etree.ElementTree.Element Output: ------ width: str height str """ size_node = root.find('size') width = size_node.find('width').text height = size_node.find('height').text return width, height
40ccfc56a4f1a7c8135104a2bebeb0c82fd939a2
641,484
import re def _ibis_sqlite_regex_replace(string, pattern, replacement): """Replace occurences of `pattern` in `string` with `replacement`. Parameters ---------- string : str pattern : str replacement : str Returns ------- result : str """ return re.sub(pattern, replacement, string)
7cb78d939e57954eb3b8568b7cf081680cb0c7ed
491,987
def validate_data_limit(data_limit): """Check if the given datalimit is valid.""" if isinstance(data_limit,int) == False: raise AssertionError('data_limit is not an integer') if data_limit <= 0: raise AssertionError('data_limit should be greater than or equal to 0. We suggest it should be greater than 100K.') return True
5b7ef292cb3c0c6fde29f4bdee146b95bd284ab9
379,512
from fcntl import ioctl from termios import FIONCLEX from termios import FIOCLEX def set_fd_inheritable(fd, inheritable): """ disable the "inheritability" of a file descriptor See Also: https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors https://github.com/python/cpython/blob/65e6c1eff3/Python/fileutils.c#L846-L857 """ if inheritable: return ioctl(fd, FIONCLEX) else: return ioctl(fd, FIOCLEX)
132cd6c1a386ce7cbbada89b248a1e7eef6203aa
104,718
def fit_to_unit(data): """ Shift and rescale `data` to fit unit interval. >>> rescale_x([5, 10, 15]) [0.0, 0.5, 1.0] """ head = data[0] data = [x - head for x in data] tail = data[-1:][0] return [x/float(tail) for x in data]
0cf16c9b2c7d3feb1048376b48904beef3087edd
245,173
import sqlite3 def open_db(fname): """Connect to a Sqlite3 database file.""" db_conn = sqlite3.connect(fname, isolation_level=None) return db_conn
ce5b27d97115f9e64d2123d992273d1892f3f0af
300,807
def is_leaf(depth, node): """ is_terminal_fn for variable-depth trees. Check if a node is a leaf node. """ return node.num_children() == 0
f501e092886ffbbaffaae25b6199cfade030bfa8
606,055
import hashlib def md5_string(string): """ 对字符串进行MD5加密 :param string: 要进行加密的字符串 :return: 加密后是十六进制字符串 """ obj = hashlib.md5() obj.update(string.encode('utf-8')) encry_string = obj.hexdigest() return encry_string
5087e031445f4b67fd646bd512e184efaba8a1f9
333,698
def operator(o): """Extract the head of an expression.""" return o.func
faf102303a274f06e5093eb220cdce86a5581234
54,980
def min_dist(q, dist): """ Returns the node with the smallest distance in q. Implemented to keep the main algorithm clean. """ min_node = None for node in q: if min_node == None: min_node = node elif dist[node] < dist[min_node]: min_node = node return min_node
99c4e868748598a44f79ee3cb876d7ebc3abae08
33,193
def Int2Bin(integer: int,n_bits: int): """Turn an integer into a binary string. Args: integer (int): Integer to convert. n_bits (int): Minimum number of bits to represent the integer with. Returns: str: binary_str """ formatstr = '0' + str(n_bits) + 'b' return format(integer,formatstr)
8a4868861e9d390595f073a4606aaf6ec5786ec9
574,722
import requests def get_tor_session(port): """ The python requests lib does not work nativly with tor, so we need to tell it to connect to the tor proxy. Heavily influenced by: https://stackoverflow.com/a/33875657/5843840 param: port : port which tor is listening on (often 9050, 9051, or 9151) returns: a requests.session object. """ proxies = { 'http': 'socks5h://127.0.0.1:{}'.format(port) } session = requests.Session() session.proxies = proxies return session
8f4eb2c6bae872d5df83b038ac30132e64560a67
92,526
import pickle import base64 def object_to_base_64(obj): """ Pickle and base64-encode an object. """ pickled = pickle.dumps(obj) return base64.b64encode(pickled)
ded77087d13e6170b34e90de9f3ee73820e60e67
457,805
def date_parser(dates): """ This function returns a list of strings where each element in the returned list contains only the date Example ------- Input: ['2019-11-29 12:50:54', '2019-11-29 12:46:53', '2019-11-29 12:46:10'] Output: ['2019-11-29', '2019-11-29', '2019-11-29'] """ c = [] # initialize an empty list for i in dates: i = i[:10] # the date from the datetime string is only made up of 9 characters c.append(i) # Adds items to the list c return c #return list c
30f02622e388db5bd93763f7f2165d9c23b0b2f3
354,101
def create_dirt_prefab(initial_state): """Create a dirt prefab with the given initial state.""" dirt_prefab = { "name": "DirtContainer", "components": [ { "component": "StateManager", "kwargs": { "initialState": initial_state, "stateConfigs": [ { "state": "dirtWait", "layer": "logic", }, { "state": "dirt", "layer": "lowerPhysical", "sprite": "Dirt", }, ], } }, { "component": "Transform", "kwargs": { "position": (0, 0), "orientation": "N" } }, { "component": "Appearance", "kwargs": { "spriteNames": ["Dirt"], # This color is greenish, and quite transparent to expose the # animated water below. "spriteRGBColors": [(2, 230, 80, 50)], } }, { "component": "DirtTracker", "kwargs": { "activeState": "dirt", "inactiveState": "dirtWait", } }, { "component": "DirtCleaning", "kwargs": {} }, ] } return dirt_prefab
08867788dababf435b9cd61eb51b8e99765c1e10
179,254
from datetime import datetime def str_to_datetime(datetime_str): """Convert possible date-like string to datetime object.""" formats = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y-%m-%d %H:%M:%S.%f', '%H:%M:%S.%f', '%H:%M:%S', '%Y%m%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S', '%Y%m%d', '%Y-%m-%dT%H', '%Y%m', ) for frmt in formats: try: return datetime.strptime(datetime_str, frmt) except ValueError: if frmt is formats[-1]: raise
0168b105fb5d32dd93052cc0c170d9b4f6733d08
186,991
def by_indices(xs, ids): """Get elements from the list xs by their indices""" return [xs[i] for i in ids]
6a8fcc9812ab83a8dba84805ed547c921e9c36af
213,879
def sort_by_index(a, idx, reverse=False): """ Sort an array of array by index Input: a: a 2D array, e.g., [[1, "a"], [2, "b"]] idx: the index to sort the arrays inside the array, e.g., 0 """ return sorted(a, key=lambda t: t[idx], reverse=reverse)
9479c7e2674789d69e7b1fe703a419aa7ef81d63
494,082
def clean_title(text): """ A little helper function to process a string into Title Case and remove any leading or trailing whitespaces. If text is None, return None. Arguments: - `text`: the value returned from database row e.g. record.get('fieldname') """ if text is None: return None else: return text.strip().title()
acb7a63505909c5cd17a8930fd31146ceda966b4
465,690
import json def load_MNIST_labels(fn = 'labels.txt'): """Load in MNIST labels data.""" # The labels.txt file contains 3000 digit labels in the same order as the image data file with open(fn, 'r') as f: labels = json.load(f) return labels
08365509012af78b22f3f011ca333b2ee042400c
169,878
from typing import List def transform_vars_data_structure(vars: List[dict]): """Transforms the data structure of the dict. Transforms this: [{ 'environment_scope': '*', 'key': 'DATA', 'masked': False, 'protected': True, 'value': 'Dave', 'variable_type': 'env_var'}, {'environment_scope': '*', 'key': 'DATER', 'masked': False, 'protected': True, 'value': 'Daver', 'variable_type': 'env_var'}, {'environment_scope': '*', 'key': 'DATERR', 'masked': False, 'protected': True, 'value': 'Daverr', 'variable_type': 'env_var' }] To this data structure: { 'DATA': 'Dave', 'DATER': 'Daver', 'DATERR': 'Daverr' } """ transformed_vars = {} for var in vars: key = var['key'] value = var['value'] transformed_vars.update({key: value}) return transformed_vars
20c8989a3863a9f53a416604c8905836cb35c738
431,222
def get_modified(df_old, df_new, unique_id, added_rows=None, trans_col="transaction", trans_val="modified"): """Returns the modified rows in df_new Parameters ---------- df_old : pd.DataFrame dataframe with previous information df_new: pd.DataFrame dataframe with new information unique_id: str or list unique identifier(s) added_rows: pd.DataFrame added rows from calling get_added function trans_col: str name of column to track transaction (default is "transaction") trans_val: str name of value to reflect transaction status (default is "modified") Returns ------- pd.DataFrame dataframe that contains modified rows """ cols = list(df_new.columns) if added_rows is not None: df_new = df_new[~df_new.isin(list(added_rows[unique_id].values))].dropna() modified_rows = df_old.merge(df_new, indicator=True, how='outer') modified_rows = modified_rows[modified_rows['_merge'] == 'right_only'] modified_rows = modified_rows[cols] modified_rows[trans_col] = trans_val return modified_rows
1bf31b8af832012bfe8de63154cf64a88b8e7354
521,208
def process_intersects_filter(dsl_query, geometry: dict): """ Extends received query to include intersect filter with provided geometry :param dsl_query: ES DSL object :param geometry dict: geometry as GeoJSON :rtype: ES DSL object :return: DSL extended with query parameters """ dsl_query = dsl_query.filter( "geo_shape", geometry={ "shape": { "type": geometry["geometry"]["type"], "coordinates": geometry["geometry"]["coordinates"], }, "relation": "intersects", }, ) return dsl_query
7856a1d815e3a569def420152fa386df3fae6930
493,632
import functools def patch(cls, attr): """Patch the function named attr in the object cls with the decorated function.""" orig_func = getattr(cls, attr) @functools.wraps(orig_func) def decorator(func): def wrapped_func(*args, **kwargs): return func(orig_func, *args, **kwargs) setattr(cls, attr, wrapped_func) return orig_func return decorator
cafd5a15e2c3f1c50614073003134727c3d81a62
629,130
def truncate(string: str, width: int, ending: str = "...") -> str: """Truncate string to be no longer than provided width. When truncated, add add `ending` to shortened string as indication of truncation. Parameters ---------- string: str String to be truncated. width: int Maximum amount of characters before truncation. ending: str, optional Indication string of truncation. Returns ------- Truncated string. """ if not len(string) > width: return string length = width - len(ending) return string[:length] + ending
66dd6ca833b6290c51eb3804792b35d291fffb2d
683,413
def fetch_file_name(patch_url): """ fetch zuul file name from a gerrit URL :param patch_url: Gerrit patch URL in a string format :returns file_name: string """ splitted_patch_url = [x for x in patch_url.split('/') if x] if 'yaml' in splitted_patch_url[-1]: if splitted_patch_url[-2] == 'zuul.d': return splitted_patch_url[-2] + '%2F' + splitted_patch_url[-1] return splitted_patch_url[-1] raise Exception(f'No zuul yaml provided in : {patch_url}')
9b1ed4f1e7d7e6670c1ed974aca737b42274d0a5
335,919
def _create_postgres_url(db_user, db_password, db_name, db_host, db_port=5432, db_ssl_mode=None, db_root_cert=None): """Helper function to construct the URL connection string Args: db_user: (string): the username to connect to the Postgres DB as db_password: (string): the password associated with the username being used to connect to the Postgres DB db_name: (string): the name of the Postgres DB to connect to db_host: (string): the host where the Postgres DB is running db_host: (number, optional): the port to connect to the Postgres DB at db_ssl_mode: (string, optional): the SSL mode to use when connecting to the Postgres DB db_root_cert: (string, optional): the root cert to use when connecting to the Postgres DB Returns: [string]: Postgres connection string """ ssl_mode = '' if db_ssl_mode: # see # https://www.postgresql.org/docs/11/libpq-connect.html# # LIBPQ-CONNECT-SSLMODE ssl_mode = '?sslmode=%s' % (db_ssl_mode) if db_root_cert: ssl_mode += '&sslrootcert=%s' % (db_root_cert) return ('postgresql://%(user)s:%(password)s@%(host)s:%(port)s/' '%(db)s%(ssl)s' % { 'user': db_user, 'password': db_password, 'db': db_name, 'host': db_host, 'port': db_port, 'ssl': ssl_mode})
f617f7f85545fcf2a1f60db8c9c43e0209c32c4f
17,096
import urllib3 def get_wikidata_item(wikidata_id: str) -> bytes: """Get Wikidata item structure.""" return urllib3.PoolManager().request( "GET", f"https://www.wikidata.org/wiki/Special:EntityData/Q{wikidata_id}.json", ).data
5461a3a2006b62677c378f90bf6e8a26fdda6735
635,958
def filter_valids(tensor, valids): """Filter out tensor using valids (last index of valid tensors). valids contains last indices of each rows. Args: tensor (torch.Tensor): The tensor to filter valids (list[int]): Array of length of the valid values Returns: torch.Tensor: Filtered Tensor """ return [tensor[i][:valid] for i, valid in enumerate(valids)]
2c4ae105a68a84607716a1a4df355feab8e0ff87
295,786
def convert_list_to_string(_l:list, _sep:str=" "): """Convert list to string.""" # map() method for mapping str (for converting elements in list to string). listToString = _sep.join(map(str, _l)) return listToString
7aaba54013114eebf9c2c3f525e95b013ec79658
169,873
def read_obj_type(d, obj_cls): """ Returns obj_type from the raw dictionary Note: "obj_type" is NOT the "attribute" property of obj_type field instance :param d: :param obj_cls: :return: """ schema_obj = obj_cls.get_schema_obj() if schema_obj is None: if "obj_type" in d: return d["obj_type"] else: return None else: fds = schema_obj.fields if "obj_type" in fds: return fds["obj_type"].read_obj_type_str(d) else: return None
cabc076ab84a875e68b916d823a2c5e77056b60c
382,175
import torch def one_hot_encode(data, code, numeric=False): """Encodes a sequence in one-hot format, given a code. Args: data (Sequence): sequence of non-encoded data points. code (Int | Sequence): sequence of codes for one-hot encoding or a the alphabet length if data is already a list of indices. Returns: FloatTensor containing a one-hot encoding of the input data. """ try: if isinstance(code, int): coded = torch.tensor(data, dtype=torch.int64) alpha_len = code else: coded = torch.tensor(list(map( code.index, data )), dtype=torch.int64) alpha_len = len(code) except: print(data) exit() if numeric: result = coded else: data_len = len(data) result = torch.zeros(alpha_len, data_len, dtype=torch.float) result[coded, torch.arange(data_len)] = 1 return result
87fd283cca8523b5b7753302aef82105ddee1e3d
603,903
from typing import Mapping def split(items, sort=False, key=None, reverse=False): """Split an iterable into unzipped pairs.""" if isinstance(items, Mapping): items = items.items() default = ((), ()) else: default = () if sort: items = sorted(items, key=key, reverse=reverse) unzipped = tuple(zip(*items)) return unzipped if unzipped else default
375c8232f50ff4ccda69b00e8671dfb8b1476533
303,000
import array def square_wave(sample_length: int = 2): """Generate a single square wave of sample_length size""" square = array.array("H", [0] * sample_length) for i in range(sample_length // 2): square[i] = 0xFFFF return square
e6dbdbc3cc730c19a0117ab0407032bb37e17432
575,429
def title_modifier(podcast_title, max_line_word_count=5): """ Utility function to add new line character to the podcast title if it's too long. Increases readability in terminal. :param podcast_title: title of the podcast. :param max_line_word_count: maximum number of words allowed on single line before inserting new line character. :return: modified string with new line characters. """ modified_title_list = podcast_title.split() total_word_count = len(modified_title_list) for word_index in range(max_line_word_count, total_word_count, max_line_word_count): modified_title_list[word_index - 1] = modified_title_list[word_index - 1] + "\n" return " ".join(modified_title_list)
535636494789f5f085eafbe6738f4b91a05eeee6
274,159
def calc_dh(DTheta, F0, rho, cp, dt): """Gets change in height for a change in time per phil's notes? Arguments: DTheta -- change in potential temperature, K F0 -- surface heat flux, w/m2 rho -- density of air, kg/m3 cp -- heat capacity of air dt -- change in time, s Returns: dh -- change in height, m """ dh = 1.0*(dt)*((0.2*F0)/(rho*cp*DTheta)) return dh
641b60582fa2b3cda8be657de4109e0974fa56dc
226,408
def get_content_type(response): """Get the content-type from a download response header. Args: response (Python 2: urllib.addinfourl; Python 3: http.client.HTTPResponse): the http response object Returns: text: the content type from the headers """ try: return response.info().get_content_type() except AttributeError: # python 2 t = response.info().type return t.decode('UTF-8')
b055e91f6b195c7fe83572c9012fab63a9c3bfe5
206,270
def get_board(size): """Returns an n by n board""" board = [0]*size for ix in range(size): board[ix] = [0]*size return board
f16a10a7ba327c5893855540d5ce21283ecae6bb
568,304
import collections def node_degree_counter(g, node, cache=True): """Returns a Counter object with edge_kind tuples as keys and the number of edges with the specified edge_kind incident to the node as counts. """ node_data = g.node[node] if cache and 'degree_counter' in node_data: return node_data['degree_counter'] degree_counter = collections.Counter() for node, neighbor, key in g.edges(node, keys=True): node_kind = node_data['kind'] neighbor_kind = g.node[neighbor]['kind'] edge_kind = node_kind, neighbor_kind, key degree_counter[edge_kind] += 1 if cache: node_data['degree_counter'] = degree_counter return degree_counter
08c08f240e3170f4159e72bc7e69d99b69c37408
702,631
import re def parse(path): """Return the Zotero preferences in a dictionary Args: path: The path to the prefs.js preferences file. Returns: A dictionary of preferences. """ prog = re.compile('^user_pref\("([a-zA-Z.]*)"\s*,\s*(.*)\);$') with open(path, 'r') as f: lines = (bytes(line, 'utf-8').decode('unicode_escape') for line in f) matches = (prog.match(line) for line in lines) prefs = {match.group(1): match.group(2).strip('"\'') for match in matches if match} return prefs
1a9ff413aeae781ee1076f2b634a510afacfa517
422,749
def _flatten_list(mlist): """flattens a list of lists returns the flattened list""" flat =[val for sublist in mlist for val in sublist] return flat
ceb0cfa9aaff7717a5b7fdf3dfea50a42c12f990
371,802
from typing import List def get_data_from_responses(responses: List[dict]) -> list: """Parse list of responses and return extracted assets""" assets = list() for response in responses: if response: assets_found = response.get("results") if assets_found: assets.extend(assets_found) return assets
326a51c1ae73db063b583c2e39662afb34f1be42
241,402
def geo_origin(raster_geo): """Return upper-left corner of geo-transform Returns the upper-left corner cordinates of :class:`GDAL.Geotransform` with the coordinates returned in the same projection/GCS as the input geotransform. Args: raster_geo (:class:`GDAL.Geotransform`): Input GDAL Geotransform Returns: tuple: raster_origin: (x, y) coordinates of the upper left corner """ return (raster_geo[0], raster_geo[3])
704775aca0f04be320c0ddb312059047a3ca8084
397,436
def is_autogenerated(example, scan_width=5): """Check if file is autogenerated by looking for keywords in the first few lines of the file.""" keywords = ["auto-generated", "autogenerated", "automatically generated"] lines = example["content"].splitlines() for _, line in zip(range(scan_width), lines): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False}
3cf64b48698789cc129fa3437fa772f056b86e21
175,774
def repack(dict_obj, key_map, rm_keys=[]): """ Repackage a dict object by renaming and removing keys""" for k, v in key_map.items(): dict_obj[v] = dict_obj.pop(k) for k in rm_keys: del dict_obj[k] return dict_obj
82dc92d1b1ad390d9434d831377c713c0119f317
440,203
def gcd(a, b): """Returns the greatest common divisor of a and b. Should be implemented using recursion. >>> gcd(34, 19) 1 >>> gcd(39, 91) 13 >>> gcd(20, 30) 10 >>> gcd(40, 40) 40 """ if a < b: return gcd(b, a) if not a % b == 0: return gcd(b, a % b) return b
11319c7cb7da5d55922bc3dcfa757374bc4f097b
409,924
def exception_to_message(ex): """Get the message from an exception.""" return ex.args[0]
9be46f5fcc0aa4e641fb923b543a834b801d6439
526,265
def _get_union_type_name(type_names_to_union): """Construct a unique union type name based on the type names being unioned.""" if not type_names_to_union: raise AssertionError( "Expected a non-empty list of type names to union, received: " "{}".format(type_names_to_union) ) return "Union__" + "__".join(sorted(type_names_to_union))
f72f6a5212aa97eba32a3da7249087c97ce471b3
34,071
def parse_resolution(resolution_string): """ Parse and raise ValueError in case of wrong format @param resolution_string string representing a resolution, like "128x128" @return resolution as a tuple of integers """ tokens = resolution_string.split('x') if len(tokens) != 2: raise ValueError return tuple(int(t) for t in tokens)
c13244a06170e33db213ebceec689a5cb8c72c4f
684,721
import collections def sort_dict_by_key(d: dict) -> collections.OrderedDict: """ Sorts a dictionary by key :param d: Dictionary :return: Returns an OrderedDict """ return collections.OrderedDict( { k: v for k, v in sorted(d.items()) } )
cab354823b7d2ccacdfc758afde50ff30aecba5e
364,429
import logging def get_class_logger(module: str, obj: object) -> logging.Logger: """Helper to create logger for a given class (and module). .. testcode:: import anki_vector logger = anki_vector.util.get_class_logger("module_name", "object_name") :param module: The name of the module to which the object belongs. :param obj: the object that owns the logger. """ return logging.getLogger(".".join([module, type(obj).__name__]))
d8c592eb06fb5c04527e84aab35044d609294692
422,603
def mine_set(x, y, *, drifting=False): """Set a mine x: X coordinate y: Y coordinate drifting: Don't anchor the mine and let it drift """ return "Set {0} mine at {1},{2}".format( "drifting" if drifting else "anchored", x, y)
e35799b36e9a990b7171b5d3b84607cfb9e60a67
443,638
from datetime import datetime def get_datetime(hdu): """Determine the datetime of an observation""" d=hdu[0].header['DATE-OBS'] t=hdu[0].header['TIME-OBS'] return datetime.strptime('%s %s' % (d,t), '%Y-%m-%d %H:%M:%S.%f')
5f4ec9c20e6e12136d9fdc3d67c0b5d1b2c7f6f6
512,075
def _render_text_module_mapping(mapping): """Renders the text format proto for a module mapping. Args: mapping: A single module mapping `struct`. Returns: A string containing the module mapping for the target in protobuf text format. """ module_name = mapping.module_name proto_file_paths = mapping.proto_file_paths content = "mapping {\n" content += ' module_name: "%s"\n' % module_name if len(proto_file_paths) == 1: content += ' proto_file_path: "%s"\n' % proto_file_paths[0] else: # Use list form to avoid parsing and looking up the fieldname for # each entry. content += ' proto_file_path: [\n "%s"' % proto_file_paths[0] for path in proto_file_paths[1:]: content += ',\n "%s"' % path content += "\n ]\n" content += "}\n" return content
a1f0e79fafeba5664e5c79327bf7d75f419477b7
586,060
def compute_hmean(accum_hit_recall, accum_hit_prec, gt_num, pred_num): """Compute hmean given hit number, ground truth number and prediction number. Args: accum_hit_recall (int|float): Accumulated hits for computing recall. accum_hit_prec (int|float): Accumulated hits for computing precision. gt_num (int): Ground truth number. pred_num (int): Prediction number. Returns: recall (float): The recall value. precision (float): The precision value. hmean (float): The hmean value. """ assert isinstance(accum_hit_recall, (float, int)) assert isinstance(accum_hit_prec, (float, int)) assert isinstance(gt_num, int) assert isinstance(pred_num, int) assert accum_hit_recall >= 0.0 assert accum_hit_prec >= 0.0 assert gt_num >= 0.0 assert pred_num >= 0.0 if gt_num == 0: recall = 1.0 precision = 0.0 if pred_num > 0 else 1.0 else: recall = float(accum_hit_recall) / gt_num precision = 0.0 if pred_num == 0 else float(accum_hit_prec) / pred_num denom = recall + precision hmean = 0.0 if denom == 0 else (2.0 * precision * recall / denom) return recall, precision, hmean
bee7a6ce0299e5a953c0e57cf455638afec331e2
546,225
def get_short(byte_str): """ Get a short from byte string :param byte_str: byte string :return: byte string, short """ short = int.from_bytes(byte_str[:2], byteorder="little") byte_str = byte_str[2:] return byte_str, short
9efd4ceda4555a050e5df51ef121d6e8df6d23eb
197,406
def check_textgrid_duration(textgrid,duration): """ Check whether the duration of a textgrid file equals to 'duration'. If not, replace duration of the textgrid file. Parameters ---------- textgrid : .TextGrid object A .TextGrid object. duration : float A given length of time. Returns ------- textgrid : .TextGrid object A modified/unmodified textgrid. """ endtime = textgrid.tierDict['phones'].entryList[-1].end if not endtime==duration: last = textgrid.tierDict['phones'].entryList.pop() textgrid.tierDict['phones'].entryList.append(last._replace(end=duration)) return textgrid
1494e1b98bc0c9df9f9e543550fbdb834c490871
620,557
from typing import Dict from typing import Any from typing import List import requests def find_employees_by_work_history( company_url: str, auth_dict: Dict[str, Any] ) -> List[int]: """ Finds a list of employee coresignal id numbers based on where the employees worked. Args ------ company_url: HttpUrl the linkedin_url of the company you want to find past employees of. auth_dict: auth_dict the authorization header. Check here for instructions on how to make this Returns -------- person_ids: List[int] list of strings where every item is an id number of someone who worked at the target comapny """ url = "https://api.coresignal.com/dbapi/v1/search/member" data = {"experience_company_linkedin_url": company_url} response = requests.post(url, headers=auth_dict, json=data) t = [int(x) for x in response.text[1:-1].split(",")] return t
8a4461be06e2bb3b1f256137f337b7eb36d4cf89
85,572
def GetReturnRates(price, days=1): """ Returns percent return rate for last N days. """ startPrice = price[-1 - days] endPrice = price[-1] return ((endPrice - startPrice) * 100) / startPrice
d9410064a69e48c9b1324b47d5498aff17f7b988
66,157
from typing import Dict from typing import List def counts_to_dist(counts: Dict[str, int]) -> List[int]: """Converts counts to a distribution :param counts: a Qiskit-style counts dictionary :return: a probability distribution """ num_qubits = len(list(counts.keys())[0].replace(' ', '')) counts_list = [0] * 2 ** num_qubits for state in counts: f_state = state.replace(' ', '') counts_list[int(f_state, 2)] = counts[state] dist = [element / sum(counts_list) for element in counts_list] return dist
2125c6c330507225f9b217badf0f87b660ff8d7b
322,941
def all_members(cls): """All members of a class. Credit: Jürgen Hermann, Alex Martelli. From https://www.oreilly.com/library/view/python-cookbook/0596001673/ch05s03.html. Parameters ---------- cls : class Returns ------- dict Similar to ``cls.__dict__``, but include also the inherited members. Examples -------- >>> class Parent(object): ... attribute_parent = 42 >>> class Child(Parent): ... attribute_child = 51 >>> 'attribute_child' in all_members(Child).keys() True >>> 'attribute_parent' in all_members(Child).keys() True """ try: # Try getting all relevant classes in method-resolution order mro = list(cls.__mro__) except AttributeError: # If a class has no _ _mro_ _, then it's a classic class def getmro(a_class, recurse): an_mro = [a_class] for base in a_class.__bases__: an_mro.extend(recurse(base, recurse)) return an_mro mro = getmro(cls, getmro) mro.reverse() members = {} for someClass in mro: members.update(vars(someClass)) return members
9cbc05b365c15602e710922143208ea8002f4e78
470,696
def _search(name, obj): """Breadth-first search for name in the JSON response and return value.""" q = [] q.append(obj) while q: obj = q.pop(0) if hasattr(obj, '__iter__'): isdict = isinstance(obj, dict) if isdict and name in obj: return obj[name] for k in obj: q.append(obj[k] if isdict else k) else: return None
86ff63b13da893a1155ad2b466bbf3edd0b1f4f6
390,348
def parse_list_response(list_resp): """ Parse out and format the json response from the kube api Example response: Pod Name | Status | Pod IP | Node Name -------------------------------+-----------+----------------+------------ landing-page-76b8b9677f-nmddz | Running | 10.144.420.69 | salt-work1 """ response_message = ( "Pod Name | Status | Pod IP | Node Name\n" ) response_message += ( (30 * "-") + "+" + (11 * "-") + "+" + (17 * "-") + "+" + (12 * "-") ) for pod in list_resp.get("items"): pod_name = pod.get("metadata", {}).get("name", "Not Found") status = pod.get("status", {}).get("phase", "Not Found") pod_ip = pod.get("status", {}).get("podIP", "Not Found") node_name = pod.get("spec", {}).get("nodeName", "Not Found") response_message += f"\n{pod_name:30}| {status:10}| {pod_ip:16}| {node_name:11}" return response_message
679741e431827e38088223ca4b2c811fe6b81cbf
109,636
def paramsDict2cpu(paramsDict): """Returns a copy of paramsDict with all elements on CPU.""" cpuDict = {} for key, val in paramsDict.items(): cpuDict[key] = val.to('cpu') return cpuDict
86f106772f763362ce46e16ac46a80e71c4b5915
287,618
import hashlib def compute_filehash(filepath, method=None, chunk_size=None): """Computes the hash of the given file. Args: filepath: the path to the file method (None): an optional ``hashlib`` method to use. If not specified, the builtin ``str.__hash__`` will be used chunk_size (None): an optional chunk size to use to read the file, in bytes. Only applicable when a ``method`` is provided. The default is 64kB. If negative, the entire file is read at once Returns: the hash """ if method is None: with open(filepath, "rb") as f: return hash(f.read()) if chunk_size is None: chunk_size = 65536 hasher = getattr(hashlib, method)() with open(filepath, "rb") as f: while True: data = f.read(chunk_size) if not data: break hasher.update(data) return hasher.hexdigest()
e9cd04eb67a11244efc9a02f1cfc0a765bb99cba
236,609
def listify(obj): """Make lists from single objects. No changes are made for the argument of the 'list' type.""" if type(obj) is not list: return [obj] else: return obj
41d43396658625f12634db2c718a41cc2bebcc3d
432,595
def _match_annot(info, **kwargs): """ Matches datasets in `info` to relevant keys Parameters ---------- info : list-of-dict Information on annotations kwargs : key-value pairs Values of data in `info` on which to match Returns ------- matched : list-of-dict Annotations with specified values for keys """ # tags should always be a list tags = kwargs.get('tags') if tags is not None and isinstance(tags, str): kwargs['tags'] = [tags] # 'den' and 'res' are a special case because these are mutually exclusive # values (only one will ever be set for a given annotation) so we want to # match on _either_, not both, if and only if both are provided as keys. # if only one is specified as a key then we should exclude the other! denres = [] for vals in (kwargs.get('den'), kwargs.get('res')): vals = [vals] if isinstance(vals, str) else vals if vals is not None: denres.extend(vals) out = [] for dset in info: match = True for key in ('source', 'desc', 'space', 'hemi', 'tags', 'format'): comp, value = dset.get(key), kwargs.get(key) if value is None: continue elif value is not None and comp is None: match = False elif isinstance(value, str): if value != 'all': match = match and comp == value else: func = all if key == 'tags' else any match = match and func(f in comp for f in value) if len(denres) > 0: match = match and (dset.get('den') or dset.get('res')) in denres if match: out.append(dset) return out
5dfbd902e0f8b8d3e654f8c5e91274af9ce40213
378,463
from functools import reduce def count_leaf_nodes(node): """Count the number of leaf nodes in a tree of nested dictionaries.""" if not isinstance(node, dict): return 1 else: return reduce(lambda x, y: x + y, [count_leaf_nodes(node) for node in node.values()], 0)
8716363896cdc54b25a98ce50abdb8f0096a1f94
101,583
def _header_extensions(source): """Return a list of header extensions to also check for a source file.""" if source.endswith(".c") or source.endswith(".m"): return ["h"] if source.endswith(".cpp") or source.endswith(".cc"): return ["hpp", "hh", "ipp"] return []
a2b9da352d0e88eceead9927835d2d44861b871d
412,451
def get_container_properties_from_inspect(inspect, host_name): """ Gets the container properties from an inspect object :param inspect: The inspect object :param host_name: The host name :return: dict of (Docker host, Docker image, Docker container id, Docker container name) """ return {'Docker host': host_name, 'Docker image': inspect['Config'].get('Image', 'N/A') if 'Config' in inspect else 'N/A', 'Docker container id': inspect.get('Id', 'N/A'), 'Docker container name': inspect.get('Names', [inspect.get('Name', 'N/A')])[0]}
e77f7dca38d319f93d20f116b36646d8bfc11dd0
68,505
def rgb_to_hex(r, g, b): """ Convert ``(r, g, b)`` in range [0.0, 1.0] to ``"RRGGBB"`` hex string. """ return hex(( ((int(r * 255) & 0xff) << 16) | ((int(g * 255) & 0xff) << 8) | (int(b * 255) & 0xff)) )[2:]
482276d4b674a9cc642a0ce6d17f6718a6a3cf5e
530,071
def calculate(number): """Returns the sum of the digits in the factorial of the specified number""" factorial = 1 for permutation in range(1, number + 1): factorial *= permutation answer = sum(list(map(int, str(factorial)))) return answer
5b9571e661914044cf9179c56592b154d5ff44a2
198,044
def _compute_deviations(counts, middle_value): """ For each count, compute the deviation (distance) between it and some value. :counts: The list of counts. :middle_value: The value used to compute the deviations. :return: A list of deviations. """ # For each count, compute the deviation (distance) between it and 'middle_value'. deviations = [abs(count - middle_value) for count in counts] return deviations
99a52170af1a5c474cc84679fde43b62dd883105
377,608
def slope(l): """ Return the slope of line 4-tuple. If vertical, return infinity float """ if l[2] == l[0]: return float("inf") else: return (float(l[3])-float(l[1]))/(float(l[2])-float(l[0]))
9bb3bfe22b930f21ce54d7f36e8a3fa1419a746e
132,242
def truncate_xticklabels(plot, stop_index=30): """ Truncate xtick labels with an ellipsis after `stop_index` """ for label in plot.get_xticklabels(): t = label.get_text() if len(t) < stop_index: continue else: label.set_text(t[:stop_index] + '...') return plot.get_xticklabels()
960448266e8dcdd5dcca593d70c9d612e8cec160
664,762
def permute_recursive(nums: list[int]) -> list[list[int]]: """Recursive implementation to return all permutations of nums""" if len(nums) == 1: return [nums] ans: list[list[int]] = [] for index, num in enumerate(nums): rest = [num for idx, num in enumerate(nums) if idx != index] for perms in permute_recursive(rest): ans.append([num, *perms]) return ans
072619aa8aa24f6a4cfee5e0b9159ff9fe978347
282,502
import re def get_line_and_column(location): """Finds line and column in location""" START_PATTERN = r'(start=)(?P<line>\d+)(,(?P<column>\d+))?' search_result = re.search(START_PATTERN, location or '') line = column = '0' if search_result: line = search_result.group('line') column = search_result.group('column') or '0' return line, column
228e769a64ca49f3b4e83ee127e29b007ae229ed
330,290
from typing import Dict from typing import Set from typing import Tuple def aggregate_unique_objects(associations: Dict[str, Set[Tuple[str, str]]]) -> Dict[str, Set[str]]: """ Utility method to calculate the "unique objects" metric from the associations. """ ret = {} for act in associations: ret[act] = set() for el in associations[act]: ret[act].add(el[1]) return ret
c106a496993983c37377f6a5c04bc30248fa950d
506,220