content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def euclidean_dist_sq(pt1, pt2): """Helper - Euclidean dist between points, squared""" return ((pt1[0] - pt2[0]) ** 2) + ((pt1[1] - pt2[1]) ** 2)
c6b62172700e2cf5759f48a1b91778aa7d81bb97
414,813
import random def __get_temperament() -> str: """Randomgly generate a temperament.""" die = random.randint(1, 10) if 1 <= die <= 5: return "Negligible" if 6 <= die <= 8: return "Fleeting" # 9-10 requires a re-roll if 1 <= random.randint(1, 10) <= 8: return "Intense" return "Acute"
0692b143eb65d33558886b98f38135995c247fa9
555,764
def is_stream(method): """ Checks if a method is an observable (stream) """ return method.server_streaming
2221198ba6092573d25273fe5b9f655ba00d7e20
341,439
def read_categories(filepath): """ Reads a list of line separated category_uris""" with open(filepath, 'r') as f: # Hopefully there aren't any newlines in the URLs return f.read().rstrip().split('\n')
5487498ffb6c2660112719baf3b11b394fa15fc5
456,789
def FindNearestElectrode(x, y, z, electrodes={}): """ finds the nearest electrode in the dictionary electrodes (x,y,z) is the coordinates of a proposed electrode :param x: x coordinate of electrode :param y: y coordinate of electrode :param z: x coordinate of electrode :param electrodes: dictionary of defined electrodes: electrodes[ide]=X=(xe, ye, ze) :return: id of nearest electrode and distance and distance to """ distmin=1e88 idemin=None for ide in electrodes: X=electrodes[ide] dist=((X[0]-x)**2 + (X[1]-y)**2 + (X[2]-z)**2)**0.5 if dist < distmin : distmin=dist idemin=ide if not idemin is None: return int(idemin), distmin else: return idemin, distmin
0b2c9ab8ec789f42125829fbfe83d74835cd605b
414,257
def sonar_sweep(rows): """ counts the number of entries that have a higher value than the previous :param rows: list of depth values :type rows: list :return: number of entries in rows that have a higher value than the previous :rtype: int """ current_depth = None depth_increases = 0 for row in rows: row = int(row) if not current_depth: pass elif row > current_depth: depth_increases += 1 current_depth = row return depth_increases
a6e9596f3b88de428684491c579a38123d3b5db1
436,464
def formatName(repStr, maxLength): """format the string name so that no line exceeds maxLength characters """ if len(repStr)>maxLength: pt = 0 frepStr = "" while pt < len(repStr): frepStr += repStr[pt:min(pt+maxLength, len(repStr))]+'\n' pt += maxLength frepStr = frepStr[:-1] else: frepStr = repStr return frepStr
fb1d2d90d889afd31b8ba21639ed4b997c027d9b
521,907
def parse_manacost(s: str): """ Split a mana cost string into a list of each mana cost symbol in the string. :param cost: The mana cost :class:string. :return: :class:list :rtype: list """ cost = s[:] out = [] buf = '' # Handle split card mana costs cost = cost.replace(' // ', '') for char in cost: buf += char if char == '}' or char == ' ': out.append(buf) buf = '' return out
d839d658404c842cf26c778bf96093783f638370
320,117
def parse_show_qos_cos_map(raw_result): """ Parse the show command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the 'show qos cos-map' command in a \ dictionary where each key is a code point in the cos map: :: { '0': {'code_point': '0', 'local_priority': '1', 'color': 'green', 'name': 'Best_Effort'}, '1': {'code_point': '1', 'local_priority': '0', 'color': 'green', 'name': 'Background'}, ... } """ hyphen_line = raw_result.splitlines()[1] columns = [pos for pos, char in enumerate(hyphen_line) if char == ' '] result = {} for line in raw_result.splitlines(): if line[0].isdecimal(): code_point = line[0:columns[0]].strip() result[code_point] = {} result[code_point]['code_point'] = \ line[0:columns[0]].strip() result[code_point]['local_priority'] = \ line[columns[0]:columns[1]].strip() result[code_point]['color'] = \ line[columns[1]:columns[2]].strip() result[code_point]['name'] = \ line[columns[2]:len(line)].strip() return result
3b02b3af0224aed923230680992eaabae85543cb
632,367
def root(ctx): """Retrieve the root context. :param ctx: Click context :return ctx: Click root context """ while ctx.parent: ctx = ctx.parent return ctx
8f92aa6f04a543e804a10adf26bdcac6148afceb
344,344
import logging def persist_experts_in_physical_partition( folder_path="results", results=None, persist_cv_results=False, persist_cv_data=False, persist_model_reprs=False, persist_best_model=False, persist_partition=False, persist_model_selector_results=True, ): """Store expert files for each partition. The file names follow {partition_hash}.{expert_type} e.g. 795dab1813f05b1abe9ae6ded93e1ec4.cv_data Parameters ---------- results : list of ModelSelectorResults results of model selection for each partition folder_path : str Path to the directory, where expert files are stored, by default '' resulting in current working directory persist_cv_results : bool If True `cv_results` of sklearn.model_selection.GridSearchCV as pandas df will be saved as pickle for each partition persist_cv_data : bool If True the pandas df detail cv data will be saved as pickle for each partition persist_model_reprs : bool If True model reprs will be saved as json for each partition persist_best_model : bool If True best model will be saved as pickle for each partition persist_partition : bool If True dictionary of partition label will be saved as json for each partition persist_model_selector_results : bool If True ModelSelectoResults with all important information will be saved as pickle for each partition Returns ------- str Folder path where experts were stored """ if results is not None: for result in results: if persist_best_model: result.persist(attribute_name="best_model", path=folder_path) if persist_partition: result.persist(attribute_name="partition", path=folder_path) if persist_model_selector_results: result.persist(path=folder_path) if persist_cv_results: result.persist(attribute_name="cv_results", path=folder_path) if persist_cv_data: result.persist(attribute_name="cv_data", path=folder_path) if persist_model_reprs: result.persist(attribute_name="model_reprs", path=folder_path) else: logging.info("You passed empty results. Nothing is being persisted.") return folder_path
a65e4dc96561dd3713a0a88dc1aad9d3ff4c9c43
333,701
def _pil_image_to_bytes(p_img): """ Get the component bytes from the given PIL Image. In recent version of PIL, the tobytes function is the correct thing to call, but some older versions of PIL do not have this function. :param p_img: PIL Image to get the bytes from. :type p_img: PIL.Image.Image :returns: Byte string. :rtype: bytes """ if hasattr(p_img, 'tobytes'): return p_img.tobytes() else: # Older version of the function. return p_img.tostring()
c8dd0a53fdc205429b4ae0074c0fc0b127da60de
574,162
def find_factors(b): """Find factors of a number.""" res = [] for i in range(1, b + 1): if b % i == 0: print(i) res.append(i) return res
61a2d8dc3727eed32752ac6dbd58ac74fdff9d67
46,721
import gzip def open_file(filename, as_text=False): """Open the file gunzipping it if it ends with .gz. If as_text the file is opened in text mode, otherwise the file's opened in binary mode.""" if filename.lower().endswith('.gz'): if as_text: return gzip.open(filename, 'rt') else: return gzip.open(filename, 'rb') else: if as_text: return open(filename, 'rt') else: return open(filename, 'rb')
fa9d13c38096044440a0a4e1e3043f1b551f64d8
356,895
def image_clone(image, pixeltype=None): """ Clone an ANTsImage ANTsR function: `antsImageClone` Arguments --------- image : ANTsImage image to clone dtype : string (optional) new datatype for image Returns ------- ANTsImage """ return image.clone(pixeltype)
4c7ddd82c63a2baba5fe380397c372a314a0b863
122,451
def get_token_to_id_filter(sfile_filter): """ This function returns a filter which converts each token to a numerical id. This filter only affects record_dict by altering the contents of record_dict['feature_values']. Parameters ---------- sfile_filter : instance of SFileFilter Returns ------- token_to_id_filter : function """ token2id = sfile_filter.token2id def token_to_id_filter(record_dict): record_dict['feature_values'] = { token2id[token]: value for token, value in record_dict['feature_values'].iteritems() if token in token2id} keep_doc = True return keep_doc return token_to_id_filter
2bd1ed09d472945f30747b9ba4b2451455b52921
377,785
def get_query_params(request, *args): """ Allows to change one of the URL get parameter while keeping all the others. Usage:: {% load libs_tags %} {% get_query_params request "page" page_obj.next_page_number as query %} <a href="?{{ query }}">Next</a> You can also pass in several pairs of keys and values:: {% get_query_params request "page" 1 "foobar" 2 as query %} You often need this when you have a paginated set of objects with filters. Your url would look something like ``/?region=1&gender=m``. Your paginator needs to create links with ``&page=2`` in them but you must keep the filter values when switching pages. :param request: The request instance. :param *args: Make sure to always pass in paris of args. One is the key, one is the value. If you set the value of a key to "!remove" that parameter will not be included in the returned query. """ query = request.GET.copy() index = 1 key = '' for arg in args: if index % 2 != 0: key = arg else: if arg == "!remove": try: query.pop(key) except KeyError: pass else: query[key] = arg index += 1 return query.urlencode()
ae763ce3bdf9ac53787ba4b050939a786b16d988
218,876
def get_metabolite_name(compound): """Get metabolite id from compound name.""" return f"M_{compound}_c"
6823aa52dff7964288c02a1d484f331d13b465a4
591,348
def update_user_defined_app_port_protocol( self, port: int, protocol: int, name: str, priority: int, disabled: bool, description: str = "", ) -> bool: """ Create or modify a user-defined application for IP port and protocol .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - applicationDefinition - POST - /applicationDefinition/portProtocolClassification/{port}/{protocol} :param port: IP port, e.g. ``8089`` :type port: int :param protocol: IP = ``0``, TCP = ``6``, UDP = ``17`` :type protocol: int :param name: Identifying name of the domain, can also re-use the domain. Maximum of 31 characters. :type name: str :param priority: Confidence metric for classification, e.g. ``95`` :type priority: int :param disabled: ``False`` is if classification is active and ``True`` if disabled :type disabled: bool :param description: Description of the classification, defaults to "" :type description: str, optional :return: Returns True/False based on successful call :rtype: bool """ # noqa: W505 data = { "port": port, "protocol": protocol, "name": name, "description": description, "priority": priority, "disabled": disabled, } return self._post( "/applicationDefinition/portProtocolClassification/{}/{}".format( port, protocol ), data=data, return_type="bool", )
3654e39982c98d8433a5e298e677bcedfb2ad92b
296,511
def _unlib(name): """Inverse of _lib().""" assert name.startswith('lib') and name.endswith('.a') return name[3:-2]
932474c8bae45f70570bd946b8236483df4538c7
174,246
def valueBand(v0, delta=0.05): """ Get the value band of the perturbation for a pixel with intensity of v0 :param v0: the intensity of the target pixel :param delta: the difference constraint :return: a tuple containing the low edge and high edge of the value band """ vl = max(v0 - delta, 0) vh = min(v0 + delta, 1) return vl, vh
a641830aae5198a35ecf5cdc6e8223678efb3e0f
487,598
def merge_bbox(bboxes): """ Merge bounding boxes. Arguments: bboxes: iterator of bbox tuples. Returns: An encompassing bbox. """ minx, miny = [], [] maxx, maxy = [], [] for a, b, c, d in bboxes: minx.append(a) miny.append(b) maxx.append(c) maxy.append(d) return (min(minx), min(miny), max(maxx), max(maxy))
71bbde1791fe2c417bd492d2ed2939d7eea61f93
517,007
def parse_expinfo(filename): """Read Hive and RPi number from filename. Filename e.g.: bgx_hive1_rpi1_190727-140003-utc.jpg """ hive = int(filename.split("hive")[-1][0]) rpi = int(filename.split("rpi")[-1][0]) return hive, rpi
9539ba5d67833e3994998b016aea6a7893aee108
314,254
def parse_illumina_trimstats(path_to_trimstats): """Parse illumina trimstats into a dict. Args: path_to_trimstats: path to file that contains illumina trimstats Returns: dict with trimstats qc metrics Raises: AssertionError if a row that splits into number of tokens other than 2 ValueError if some value is not suitable for converting into int """ qc_dict = {} with open(path_to_trimstats) as f: rawstats = f.readline() tokens = rawstats.split(";") qc_dict["Total read-pairs processed"] = int(tokens[0].split()[-1]) qc_dict["Total read-pairs trimmed"] = int(tokens[1].split()[-1]) return qc_dict
b7390f3b44854e6fb6390be02e8903f2170bb37e
576,389
def make_geoid_field(df): """Make correctly formatted geoid string column in census dataframe Args: df (DataFrame): census data """ df['geoid'] = df['state']+df['county']+df['tract']+df['block group'] return(df)
bc99bbb2fc08259c453c03f8157f1ae766fc1ace
463,055
def get_repo_name(request): """ Returns the repository name from the request :param request: incomming request :return: repository name from the webhook (String) """ return request.json['repository']['full_name']
50ec36f5325270aa7f1e944e998d24e8b8691ee6
485,928
def build_part_check(part, build_parts): """ check if only specific parts were specified to be build when parsing if the list build_parts is empty, then all parts will be parsed """ if not build_parts: return True return bool(part in build_parts)
9b9923b45e794bff3df03a63cbbed3280a25d211
677,591
def _InitScript(context): """Returns the initScript property or a successful no-op if unspecified.""" return context.properties.get('initScript', 'return 0')
0e7145484f3ffe8dfeb9a2f2a8add335110d5a83
529,432
def subset_data(data, required_columns, column_names, substitute = True): """ Take desired columns from US data and rename them. Parameters ---------- data : pd.DataFrame Loaded in data from csv. required_columns : list Which subset of columns is being taken. column_names : list New names for dataframe columns. Makes it clearer for use in microsim. substitute : bool If specified variables arent present in a specific wave of US data then substitute it with an empty column for that wave. For example depression (hcond17) is not available in wave 2 of UKHLS but is available in all other waves. Setting substitute = True replaces the variable in wave 2 with an empty column. Is entirely empty data but preserves consistent data across all waves for later imputation. Otherwise will raise a key error. Returns ------- data : pd.DataFrame Subset of initial data with desired columns. """ # If a column is missing substitute in a dummy column of missings (-9). Keeps data consistent when variables are # are missing from certain waves. Don't go nuts though. if substitute: for item in required_columns: if item not in data.columns: data[item] = -9 print(f"Warning! {item} not found in current wave. Substituting a dummy column. " + "Set substitute = False in the subset_data function to suppress this behaviour.") # Take subset of data for required columns and rename them. data = data[required_columns] data.columns = column_names return data
ff6a7ceef9c476491cc3a8e3c679efa73a34f438
447,893
import time def _to_time_in_iso8601(_time): """Convert int or float to time in iso8601 format.""" return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(_time))
12acaf8c021d4b98ec6c0708d18092e82fe382a7
58,510
def interpolate_position_embeddings(model, layer, param): """ Fine-tuning at a different resolution than that which a model was pretrained at requires interpolating the learned position embeddings. """ if ( hasattr(model.trunk, "interpolate_position_embedding") and layer.shape != param.shape ): interp = model.trunk.interpolate_position_embedding if callable(interp): try: param = interp(param) except BaseException: raise RuntimeError("Unable to interpolate position embeddings") return param
8c8c9c11015b93473a62899baf8301ccd4cdaad0
60,380
def multivalue(config_assistant): """Returns a tuple of a multivalue object and the config_assistant""" config = config_assistant.config vport_1 = config.Vport.add(Name="ethernet-1") topology_1 = config.Topology.add(Vports=vport_1) dg1 = topology_1.DeviceGroup.add() dg1.Multiplier = 50 ipv4 = dg1.Ethernet.add().Ipv4.add() config_assistant.commit() return config.Topology.find()[0].DeviceGroup.find()[0].Ethernet.find()[0].Ipv4.find()[0].Address, config_assistant
a8f661f937f5f22a2d49acfef13ffd0b4d402a75
387,322
def get_most_sold_item(df): """Return a tuple of the name of the most sold item and the number of units sold""" most_sold_df = df.groupby(['Item'])['Units'].sum() return (most_sold_df.idxmax(), most_sold_df.loc[most_sold_df.idxmax()]) pass
0e641279bc47c62b91a82c46749828e6e375e748
639,825
def anagrams(word1, word2): """Return whether two words are anagrams""" return sorted(word1) == sorted(word2)
2e0e68496f53066c85390630572309b20500bb4b
214,980
def load_trace(logfile, root_dir, api, blacklist): """Loads a trace file and returns the Results instance. Arguments: - logfile: File to load. - root_dir: Root directory to use to determine if a file is relevant to the trace or not. - api: A tracing api instance. - blacklist: Optional blacklist function to filter out unimportant files. """ data = api.parse_log(logfile, (blacklist or (lambda _: False))) assert len(data) == 1, 'More than one trace was detected!' if 'exception' in data[0]: # It got an exception, raise it. raise data[0]['exception'] results = data[0]['results'] if root_dir: results = results.strip_root(root_dir) return results
e51ad3e61ee4206e74800f1c24b14fd20f51e477
11,732
def check_convergence(new_measure, old_measure, direction, threshold): """Check if the performance meets the given threshold Args: new_measure (float): New performance old_measure (float): Old performance direction (str): String to indicate how to compare two measures threshold (float): The given threshold Returns: True if the new measure satisfies threshold, False otherwise """ sign = 1.0 if direction == 'higher' else -1.0 if sign * (new_measure - old_measure) / old_measure < threshold: return True else: return False
fca1c9deb85c27f36c9e50b9ee9839121778d074
43,680
def get_slope_intercept(point1 , point2): """ :param point1: lower point of the line :param point2: higher point of the line :return: slope and intercept of this line """ slope = (point1[1] - point2[1]) / (point1[0] - point2[0]) # slope = ( y2-y1 ) / ( x2-x1 ) . intercept = point1[1] - slope * point1[0] # y = m*x + b return slope , intercept
2a6ead371b40461966f15657d8a484752dd83d9c
209,357
def createProtein(element): """ Create dictionary representation of Protein element from Percolator XML output """ defns = element.nsmap[None] #default namespace protein = {} protein["ID"] = element.get("{{{}}}protein_id".format(defns)) protein["q-value"] = float(element.find("{{{}}}q_value".format(defns)).text) protein["Peptides"] = element.xpath("x:peptide_seq/@seq", namespaces = {"x": defns}, smart_strings=False) return protein
42eb7283d61eaad68cd8e01e7a1ad0f3d4d930f6
634,855
def get_colors(all_palettes=False): """ Generates a dictionary of standard colors and returns a sequential color palette. Parameters ---------- all_palettes : bool If True, lists of `dark`, `primary`, and `light` palettes will be returned. If False, only the `primary` palette will be returned. """ # Define the colors colors = { 'dark_black': '#2b2ba', 'black': '#3d3d3d', 'primary_black': '#4c4b4c', 'light_black': '#8c8c8c', 'pale_black': '#afafaf', 'dark_blue': '#154577', 'blue': '#005da2', 'primary_blue': '#3373ba', 'light_blue': '#5fa6db', 'pale_blue': '#8ec1e8', 'dark_green': '#356835', 'green': '#488d48', 'primary_green': '#5cb75b', 'light_green': '#99d097', 'pale_green': '#b8ddb6', 'dark_red': '#79302e', 'red': '#a3433f', 'primary_red': '#d8534f', 'light_red': '#e89290', 'pale_red': '#eeb3b0', 'dark_gold': '#84622c', 'gold': '#b1843e', 'primary_gold': '#f0ad4d', 'light_gold': '#f7cd8e', 'pale_gold': '#f8dab0', 'dark_purple': '#43355d', 'purple': '#5d4a7e', 'primary_purple': '#8066ad', 'light_purple': '#a897c5', 'pale_purple': '#c2b6d6' } # Generate the sequential color palettes. keys = ['black', 'blue', 'green', 'red', 'purple', 'gold'] dark_palette = [colors[f'dark_{k}'] for k in keys] primary_palette = [colors[f'primary_{k}'] for k in keys] light_palette = [colors[f'light_{k}'] for k in keys] # Determine what to return. if all_palettes: palette = [dark_palette, primary_palette, light_palette] else: palette = primary_palette return [colors, palette]
baf26aa376a2b4edef3df74c14e084079d5a0891
575,537
def is_custom_unicode_char(char: str) -> bool: """Return whether a char is in the custom unicode range we use.""" assert isinstance(char, str) if len(char) != 1: raise Exception("Invalid Input; must be length 1") return 0xE000 <= ord(char) <= 0xF8FF
2db33b56b6458bfd8555667d87d09a4ccc9869a7
464,704
def get_cvelists_for_cve_result(cve_result): """ Get (has_cve_list, fixed_cve_list) for CVE check result :param cve_result: the CVE check result, a dict """ has_cve_list = [] fixed_cve_list = [] for cve in cve_result: result = cve_result[cve] if "cvefix" in result and result["cvefix"]: fixed_cve_list.append(cve) elif "cveadd" in result and result["cveadd"]: has_cve_list.append(cve) return (has_cve_list, fixed_cve_list)
0e1a37ac9b602074c1ad4c01b4aa6045b6e86870
237,479
def _get_factory_attr(factory, attr): """ Try getting a meta attribute 'attr' from a factory. The attribute is looked up as '_attr' on the factory, then, if the factory and its model class names match, as 'attr' on the model's meta. The factory's own meta cannot define custom attributes and is skipped. If the attribute is not found in either place, an AttributeError is raised. """ try: return getattr(factory, "_" + attr) except AttributeError: # pylint:disable=protected-access if factory.__name__ == factory._meta.model.__name__ + "Factory": return getattr(factory._meta.model._meta, attr) else: raise
29f8a87fdf573b0fae3864f2fa00ba0ef263ffac
109,435
import re def normalize_path(path): """ Normalize absolute path to a file in a package down to a package path. Not foolproof, but good enough for the tests. """ return re.sub(r"^.*site-packages/", "", path)
08809e3ec1bcabf1866501d53aca0e82687b8275
456,008
from itertools import zip_longest def grouper(n, iterable, fillvalue=None): """ Split an iterable in groups of max N elements. grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx """ args = [iter(iterable)] * n if fillvalue: return zip_longest(fillvalue=fillvalue, *args) for chunk in zip_longest(*args): yield filter(None, chunk)
321a19905603b40b19cdce932b913905c2cf2d83
681,815
def _find_or_none(string, substring, start): """Find the start of the substring or return None.""" index = string.find(substring, start) return index if index != -1 else None
a955e71e077a6662bc05877a68278475a0108325
252,875
import hashlib def generate_hashsum(file_name): """Generate a SHA-256 hashsum of the given file.""" hash_sha256 = hashlib.sha256() with open(file_name, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest()
45e52e7777a30c46ab0d81a923f39a4bdd94d32b
600,882
def matrix_slice(M, corner_shape, corner='nw'): """ Takes a two dimensional array ``M`` and slices into four parts dictated by the ``corner_shape`` and the corner string ``corner``. :: m n p [ A | B ] [-------] q [ C | D ] If the given corner and the shape is the whole array then the remaining arrays are returned as empty arrays, ``numpy.array([])``. Parameters ---------- M : ndarray 2D input matrix corner_shape : tuple An integer valued 2-tuple for the shape of the corner corner : str Defines which corner should be used to start slicing. Possible options are the compass abbreviations: ``'nw', 'ne', 'sw', 'se'``. The default is the north-west corner. Returns ------- A : ndarray Upper left corner slice B : ndarray Upper right corner slice C : ndarray Lower left corner slice D : ndarray Lower right corner slice Examples -------- >>> A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> matrix_slice(A,(1,1)) (array([[1]]), array([[2, 3]]), array([[4], [7]]), array([[5, 6], [8, 9]]) ) >>> matrix_slice(A, (2,2), 'sw') (array([[1, 2]]), array([[3]]), array([[4, 5], [7, 8]]), array([[6], [9]]) ) >>> matrix_slice(A, (0, 0)) % empty A (array([], shape=(0, 0), dtype=int32), array([], shape=(0, 3), dtype=int32), array([], shape=(3, 0), dtype=int32), array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) """ if corner not in ('ne', 'nw', 'se', 'sw'): raise ValueError('The corner string needs to be one of' '"ne, nw, se, sw".') x, y = M.shape z, w = corner_shape if corner == 'nw': p, m = z, w elif corner == 'ne': p, m = x, y - w elif corner == 'sw': p, m = x - z, w else: p, m = x - z, y - w return M[:p, :m], M[:p, m:], M[p:, :m], M[p:, m:]
213f52c85cd92c063ddd273d47f4bd5667cf975a
264,133
def is_operand(c): """ Return True if the given char c is an operand, e.g. it is a number >>> is_operand("1") True >>> is_operand("+") False """ return c.isdigit()
ddc42b0fc6af1ca79d8a504df67bff767672d01b
137,758
def txt_to_list(txt_path): """ Load text file as list Parameters ------- txt_path: str text file path Returns ------- data_list: list data represented as python list """ with open(txt_path, 'r') as file: data_list = [line.rstrip('\n') for line in file] return data_list
772c28afd1a46ebecaceb20ca32359e64284c6d4
412,125
def filt_tracks_by_intensities (df_tracks, df_ints_by_track, int_type, bounds): """Filter tracks based on their intensities (both minimum and maximum). Args: df_tracks (Pandas dataframe): track data (can come from channel of either color) df_ints_by_track (Pandas dataframe): intensities by track (typically this would be the output of the compute_track_intensities function) int_type (string): column name in df_ints_by_track storing the intensity values will be used for track filtering bounds (tuple): lower and upper bounds of the intensities Returns: df_ints_filt (Pandas dataframe): filtered df_ints_by_track with only the desired tracks remaining. df_tracks_filt (Pandas dataframe): filtered df_tracks with only the desired tracks remaining. """ ibt, it = df_ints_by_track, int_type df_ints_filt = ibt.loc[(ibt[it]>bounds[0]) & (ibt[it]<bounds[1])].copy() filt_IDs = df_ints_filt['track_ID'].unique() df_tracks_filt = df_tracks[df_tracks['track_ID'].isin(filt_IDs)].copy() return df_ints_filt, df_tracks_filt
bbabdc62fb017a66d6f374544af5291c48d68167
70,303
def get_min_max_weight_edges(G): """ Return the minimum and maximum weight in the graph G. """ min_weight = 1e10 max_weight = 0 for edge in G.edges(data=True): min_weight = min(1.0/edge[2]["weight"], min_weight) max_weight = max(1.0/edge[2]["weight"], max_weight) return min_weight, max_weight
6b7da7280a8ac15b9770877051a702406bcb9f08
274,659
def generate_thing_group_tree(iot_client, tree_dict, _parent=None): """ Generates a thing group tree given the input tree structure. :param iot_client: the iot client for boto3 :param tree_dict: dictionary with the key being the group_name, and the value being a sub tree. tree_dict = { "group_name_1a":{ "group_name_2a":{ "group_name_3a":{} or None }, }, "group_name_1b":{} } :return: a dictionary of created groups, keyed by group name """ if tree_dict is None: tree_dict = {} created_dict = {} for group_name in tree_dict.keys(): params = {"thingGroupName": group_name} if _parent: params["parentGroupName"] = _parent created_group = iot_client.create_thing_group(**params) created_dict[group_name] = created_group subtree_dict = generate_thing_group_tree( iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name ) created_dict.update(created_dict) created_dict.update(subtree_dict) return created_dict
11e808a6f7afb01a519cb638cffb3a24368baddc
509,316
import re def _obtain_filename_from_response(response): """Obtains the filename from a requests response object Args: response: a response object Returns: filename: a string """ content_disposition = response.headers.get("content-disposition") filename = re.findall('filename=(.+)', content_disposition) \ if content_disposition else None filename = (None if len(filename)==0 else filename[0]) \ if filename else None return filename
e0a3f937536d5a80aabc8789d113e1d0d92762b5
402,988
def get_subtiles(maintile, bb): """ return all tiles contained in main tile""" n=len(maintile) subb = [k for k in bb.keys() if k[:n]==maintile] return subb
252c8e1a9eeb0333bf8fe09bc592297ccf958f93
395,477
def uniformData(tree, promoters, positional): """ Uniformize the format of the data in order to allow polymorphic inputs. Input: - tree: list of step labels - promoters: single list for all steps or dict (no change) - positional: boolean for all steps or list (no change) Returns: - promoters: dictionary of promoters allowed at each step - positional: list of steps that allow shuffling """ if type(positional) is bool: if positional: positional = tree else: positional = [] if type(promoters) is not dict: promoters = {step:promoters for step in tree} return promoters, positional
81d9e5f90080a63d8a7824299bb964f8757823e9
492,672
from typing import Any def is_int(value: Any) -> bool: """Checks if a value is an integer. The type of the value is not important, it might be an int or a float.""" # noinspection PyBroadException try: return value == int(value) except Exception: return False
f9b1c28f6c954376373a1d8ea8de533d2b4c8974
672,668
def auth_link(req, link): """Return an "authenticated" link to `link` for authenticated users. If the user is anonymous, returns `link` unchanged. For authenticated users, returns a link to `/login` that redirects to `link` after authentication. """ if req.authname != 'anonymous': return req.href.login(referer=link) return link
6a0c3eb330145a357a327b0226dd6368ca3583d1
293,681
def average(A_matrices, B_vectors): """ Average over cost function matrices. The cost functions contain the three objects of the cost function: A, B, C A is a quadratic matrix (97x97), B a vector (d=97), and C is a constant. In the end, we are interested in the best-fit charges Q which are the solution to Q = A^-1 B Arguments: A_matrices: a list of NxN matrices B_vectors: a list of vectors with len N Returns: A: the average of all A_matrices. B: the average of all B_matrices. """ # Initialize empty A = A_matrices[0] * 0 B = B_vectors[0] * 0 # Average by adding all objects and dividing by their number for index in range(len(A_matrices)): A += A_matrices[index] B += B_vectors[index] # Divide number_snapshots = len(A_matrices) A /= number_snapshots B /= number_snapshots return A, B
a30ab24be30d07340b1c8dd05a328a3f2b6ebc1a
594,776
def next_power_of_2(x): """ Returns the first power of two >= x, so f(2) = 2, f(127) = 128, f(65530) = 65536 :param x: :return: """ # NOTES for this black magic: # * .bit_length returns the number of bits necessary to represent self in binary # * x << y means 1 with the bits shifted to the left by y, which is the same as multiplying x by 2**y (but faster) return 1 << (x - 1).bit_length()
90f2ae20224461a1a7f7378a289c52723e8aeb8a
390,223
def show_results(results, n=10, print_results=True): """Show the top-n results of a classification.""" # Print headline s = "" if len(results) == 0: s += "-- No results --" else: s += "{:18s} {:}\n".format("Class", "Prob") s += "#" * 50 + "\n" for entry in results: if n == 0: break else: n -= 1 s += "{:18s} {:>7.4f}%\n".format( entry["semantics"], entry["probability"] * 100 ) s += "#" * 50 if print_results: print(s) return s
f3d1bb2b3ad151ddc861744db38989ce571f1a06
485,436
def check_list(data): """check if data is a list, if it is not a list, it will return a list as [data]""" if type(data) is not list: return [data] else: return data
00ae7a857c3f969ca435928edf98ed5bb36c1c34
705,548
def getStationNum(item): """Helper function to sort the list of tuples (river name, number of stations) according to number of stations""" return item[1]
62f1b72a1fa9c96bcc1cc144b2bde22f2758cfb3
152,199
def pick_from_file(inlist, item): """ This funciton helps to pick desirable output from the outputVolume list :param inlist: :param item: :return: """ return inlist[item]
c5e0aa7ec206f4989ae1fb9ac8b54af27bdb404c
482,608
import six def get_async_result_tuples(results): """ Given a dictionary like:: { arg_0: { 0: result or exception obj 1: result or exception obj 2: result or exception obj }, arg_1: { 0: result or exception obj 1: result or exception obj 2: result or exception obj }, } Return a list, composed of tuples of (host, arg, result) where arg is the input argument, host is the host index and result is the response/result object from the zookeeper api call Any results that contain exception objects / errors are ignored. :param result: A result set dictionary as returned from ``get_async_call_per_host`` :returns: ``list`` """ if not isinstance(results, dict): raise ValueError('"result" must be dict, got: %s' % type(dict)) items = [] for arg, host_result in six.viewitems(results): items.extend([(host, arg, result) for host, result in six.viewitems(host_result) if not isinstance(result, Exception)]) return items
157f739a682fc614e899e23c9b906ba2be16874d
357,867
def unlock_params(lock_handle): """Returns parameters for Action Unlock""" return {'_action': 'UNLOCK', 'lockHandle': lock_handle}
ba23cb48ea7f013009aff34fee87f4677bc096d7
122,985
def _providers_string_to_list(val): """Convert string to list if not already""" # Use a set to remove duplicates if type(val) == str: return list(set(val.replace(' ', '').split(','))) return list(set(val))
00ae1b33c2d8064248d2e08ca98a8431d620532a
434,395
def get_ohlc(kline): """Creates open, high, low, close data from kline Arguments: kline {list} -- kline data Returns: list -- lists of open, high, low, close data """ opn = [float(o[1]) for o in kline] close = [float(d[4]) for d in kline] high = [float(h[2]) for h in kline] low = [float(l[3]) for l in kline] return opn, high, low, close
96d8fa373e1d9b2eb0dcf071d3ddb532f57b6230
336,977
import math def rad2deg(radians): """Radians to degrees """ degrees = 180.0 * radians / math.pi return degrees
fede9603c2666d628284d7512c3fe663ff021d5b
504,758
import re import pathlib def _emitter(target, source, env): """Define a generic emitter for the LaTeX exporting""" if re.search("--export-latex", env.get("INKSCAPEFLAGS", "")): target.extend( [str(_) + "_tex" for _ in target if pathlib.Path(str(_)).suffix in (".pdf", ".ps", ".eps") ] ) return target, source
04de32efee309dd0594eab04f1c6a44e9a072c28
432,114
def next_decorator(event, message, decorates): """ Helper method for IAnnouncerEmailDecorators. Call the next decorator or return. """ if decorates and len(decorates) > 0: next = decorates.pop() return next.decorate_message(event, message, decorates)
2dbec5b53e532a2187be5fd3d97078f1db088d9d
82,236
def _num_unique_words(words): """ Count unique number of words per song """ return len(set(words))
e8c3ce3b3c6fa0c4d369a4cca84a3062e1e8ae9d
79,494
import re def removeComponentID(instr): """ Remove a component ID from a filter name :param instr: the filter name :type instr: string :rtype: string :return: the filter name with the component ID removed, or `None` if the input is not a valid string """ try: m = re.match(r"(?P<filt>.*?)_G(.*?)", instr) except TypeError: return None if not m: # There was no "_G" in the input string. Return the input string ret_str = str(instr) else: ret_str = str(m.group("filt")) return ret_str
30a6249b4276f7a70cbb3f3223cb15ce0c833f87
218,986
def find_changes(vals, threshold=None, change_pct=0.02, max_interval=None): """Returns an array of index values that point at the values in the 'vals' array that represent signficant changes. 'threshold' is the absolute amount that a value must change before being included. If 'threshold' is None, 'change_pct' is used to determine a threshold change by multiplying 'change_pct' by the difference between the minimum and maximum value. If 'max_interval' is provided, it will be the maximum allowed separation between returned index values. This forces some some points to be returned even if no signficant change has occurred. Index 0, pointing at the first value is always returned. """ # special case of no values passed if len(vals) == 0: return [] if threshold is None: # compute the threshold difference that constitutes a change # from the change_pct parameter threshold = (max(vals) - min(vals)) * change_pct if threshold == 0.0: threshold = 0.01 # keep it from showing every point # Start with the first value final_ixs = [0] last_val = vals[0] last_ix = 0 for ix in range(1, len(vals)): v = vals[ix] include_this_index = False if abs(v - last_val) >= threshold: # if the prior value was not included, include it if last_ix != ix - 1: final_ixs.append(ix - 1) include_this_index = True elif max_interval is not None and ix - last_ix >= max_interval: include_this_index = True if include_this_index: final_ixs.append(ix) last_val = v last_ix = ix return final_ixs
e6c66d3636f80ee310dcb7ae790bb30a27598aaa
11,366
def generate_incbin_asm(start_address, end_address): """ Return baserom incbin text for an address range. Format: 'INCBIN "baserom.gbc", {start}, {end} - {start}' """ incbin = ( start_address, '\nINCBIN "baserom.gbc", $%x, $%x - $%x\n\n' % ( start_address, end_address, start_address ), end_address ) return incbin
7f3bb96fa40481699bcc72379c6564378f7dfe43
217,961
def generate_graph_with_template(data, title, yaxis_title, xaxi_title): """ This common layout can be used to create Plotly graph layout. INPUT: data - a graph required JSON data i.e list title - a tile of the chart yaxis_title - Y title xaxix_title - X title OUTPUT: layout for particular graph. """ return { 'data': [data], 'layout': { 'title': title, 'yaxis': { 'title': yaxis_title }, 'xaxis': { 'title': xaxi_title } } }
7755118378b4bb912f7168452ba7456b62aac44b
323,795
def undotify(dotdict): """ Expand a dictionary containing keys in "dot notation". This support both standard dict or OrderedDict, or any dict subclass. For example:: .. code-block:: python3 dotdict = { 'key1.key2.key3': 'string1', 'key1.key2.key4': 1000, 'key4.key5': 'string2', } expanded = undotify(dotdict) Will produce: .. code-block:: python3 { 'key1': { 'key2': { 'key3': 'string1', 'key4': 1000, }, }, 'key4': { 'key5': 'string2', }, } :param dict dotdict: Original dictionary containing string keys in dot notation. :return: The expanded dictionary. Same datatype of the input. :rtype: dict """ assert isinstance(dotdict, dict), 'Invalid dictionary' dtype = type(dotdict) result = dtype() for key, value in dotdict.items(): path = key.split('.') assert path, 'Invalid dot-notation path' node = result for part in path[:-1]: node = node.setdefault(part, dtype()) assert isinstance(node, dtype), 'Incompatible paths to {}'.format( key, ) node[path[-1]] = value return result
2df8a3421d12c47c66bdb3a70b15afb2cc693b13
612,416
def velocity_vst(displacement, time): """Usage: Calculate velocity using displacement and time taken.""" return displacement/time
c26fe56d0e2603c7f95054788f38836febd78403
282,434
def get_diff_report(output: str) -> str: """ Get only the diff component from the report """ return "@@".join(output.split("@@")[1:])
30fcd9efbcd66c9a0a68bfc266851b1f86594cb5
511,894
import re def validate_gene_sets(genesets, var_names, context=None): """ Check validity of gene sets, return if correct, else raise error. May also modify the gene set for conditions that should be resolved, but which do not warrant a hard error. Argument gene sets may be either the REST OTA format (list of dicts) or the internal format (dict of dicts, keyed by the gene set name). Will return a modified gene sets (eg, remove warnings) of the same type as the provided argument. Ie, dict->dict, list->list Rules: 0. All gene set names must be unique. [error] 1. Gene set names must conform to the following: [error] * Names must be comprised of 1 or more ASCII characters 32-126 * No leading or trailing spaces (ASCII 32) * No multi-space (ASCII 32) runs 2. Gene symbols must be part of the current var_index. [warning] If gene symbol is not in the var_index, generate a warning and remove the symbol from the gene sets. 3. Gene symbols must not be duplicated in a gene set. [warning] Duplications will be silently de-duped. Items marked [error] will generate a hard error, causing the validation to fail. Items marked [warning] will generate a warning, and will be resolved without failing the validation (typically by removing the offending item from the gene sets). """ messagefn = context["messagefn"] if context else (lambda x: None) # accept genesets args as either the internal (dict) or REST (list) format, # as they are identical except for the dict being keyed by geneset_name. if not isinstance(genesets, dict) and not isinstance(genesets, list): raise ValueError("Gene sets must be either dict or list.") genesets_iterable = genesets if isinstance(genesets, list) else genesets.values() # 0. check for uniqueness of geneset names geneset_names = [gs["geneset_name"] for gs in genesets_iterable] if len(set(geneset_names)) != len(geneset_names): raise KeyError("All gene set names must be unique.") # 1. check gene set character set and format illegal_name = re.compile(r"^\s| |[\u0000-\u001F\u007F-\uFFFF]|\s$") for name in geneset_names: if type(name) != str or len(name) == 0: raise KeyError("Gene set names must be non-null string.") if illegal_name.search(name): messagefn( "Error: " f"Gene set name {name} " "is not valid. Leading, trailing, and multiple spaces within a name are not allowed." ) raise KeyError( "Gene set name is not valid. Leading, trailing, and multiple spaces within a name are not allowed." ) # 2. & 3. check for duplicate gene symbols, and those not present in the dataset. They will # generate a warning and be removed. for geneset in genesets_iterable: if not isinstance(geneset, dict): raise ValueError("Each gene set must be a dict.") geneset_name = geneset["geneset_name"] genes = geneset["genes"] if not isinstance(genes, list): raise ValueError("Gene set genes field must be a list") geneset.setdefault("geneset_description", "") gene_symbol_already_seen = set() new_genes = [] for gene in genes: gene_symbol = gene["gene_symbol"] if not isinstance(gene_symbol, str) or len(gene_symbol) == 0: raise ValueError("Gene symbol must be non-null string.") if gene_symbol in gene_symbol_already_seen: # duplicate check messagefn( f"Warning: a duplicate of gene {gene_symbol} was found in gene set {geneset_name}, " "and will be ignored." ) continue if gene_symbol not in var_names: messagefn( f"Warning: {gene_symbol}, used in gene set {geneset_name}, " "was not found in the dataset and will be ignored." ) continue gene_symbol_already_seen.add(gene_symbol) gene.setdefault("gene_description", "") new_genes.append(gene) geneset["genes"] = new_genes return genesets
0c47b6c5f8674c6d29bfd191af4af4adcbfb7366
635,752
def tokenize_char(sent): """ Return the character tokens of a sentence including punctuation. """ return list(sent.lower())
f8dc50f92239bab90633cd4643f5b73c707c1519
687,942
def evaluate_f1(tp: int, fp: int, fn: int) -> float: """F1-score. *F1-score* $=\dfrac{2TP}{2TP + FP + FN}$ Args: tp: True Positives fp: False Positives fn: False Negatives """ try: return 2 * tp / (2 * tp + fp + fn) except ZeroDivisionError: return 0.0
8ba6beeb0c8fe8c20e9d12fec462474c8d15de5b
628,504
import re def preprocess(s): """ >>> preprocess('#hi there http://www.foo.com @you isn"t RT &lt;&gt;') 'hashtaghi hashtaghi there isn"t' """ # s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention # s = re.sub('http\S+', 'http', s) # keep only http from urls s = re.sub(r'http\S+', ' ', s) # keep only http from urls s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams) # s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9 s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE) s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE) s = re.sub(r'\s+', ' ', s).strip() return s.lower()
dab4c236a36e5eced00245468a0b9363c9c16420
180,019
def checkArrDims(arr, n_dim): """Check if arr has n_dim dimensions""" if arr.ndim == n_dim: return True else: return False
955db0c90ae41cd4a85987e58317e00fa2401a49
276,252
import math def _nCr(n, r): """ Compute the binomial coefficient n! / (k! * (n-k)!) """ return math.factorial(n) / math.factorial(r) / math.factorial(n - r)
b2079241bdea09ac6bd3ab4d757d2e090f7ae272
409,434
def palindrome(word : str) -> int: """ Given a string, calculates the amount of palindromes that exist within that string Parameters ---------- word : str String that may contain palindrome sub-strings Returns ------- int number of palindromes in string """ word = word.lower() count = [] for i in range(len(word)): for p in range(i+1, len(word)+1): count.append(word[i : p]) t = [i for i in set(count) if len(i) > 1 and i == i[::-1]] return len(t)
18d3c8dee118874201259b80d38143a86faf08b4
422,072
import re import click def validate_version_code(ctx, param, value): """ Version codes are validated as semantic versions prefixed by a v, e.g. v1.2.3 :param ctx: the click context :param param: the click parameter :param value: the parameter value :return: the validated value """ re_semver = r"^v(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$" if re.match(re_semver, value): return value else: raise click.BadParameter( "please specifiy major, minor and patch versions, e.g. v1.0.0" )
6748ae4c4d8da5c2fd47aa0ae4124f0a26d5a13b
198,389
def get_mf6_ftypes(namefile, ftypekeys): """Return a list of FTYPES that are in the name file and in ftypekeys. Parameters ---------- namefile : str path to a MODFLOW 6 name file ftypekeys : list list of desired FTYPEs Returns ------- ftypes : list list of FTYPES that match ftypekeys in namefile """ with open(namefile, "r") as f: lines = f.readlines() ftypes = [] for line in lines: # Skip over blank and commented lines ll = line.strip().split() if len(ll) < 2: continue if line.strip()[0] in ["#", "!"]: continue for key in ftypekeys: if ll[0].upper() in key: ftypes.append(ll[0]) return ftypes
0cd9a4f0f608389a19bb73ce8b01ca7f0ee7043e
241,287
def filter_middles(gaps, min_gap): """Filters gaps smaller than some minimum gap threshold.""" middles = [(g[0] + g[1])//2 for g in gaps] ranges = [g[1] - g[0] for g in gaps] return [m for i, m in enumerate(middles) if ranges[i] > min_gap]
6a9842e1f45c223aabd9fd21414326df9aa67a5d
458,052
def theano_safe_run(fn, input_list): """ Help catch theano memory exceptions during running theano functions. :param fn: :param input_list: :return: (status, result), status > 0 means exception catched. """ try: result = fn(*input_list) status = 0 return status, result except MemoryError: print('Memory error catched') status = 1 return status, None except RuntimeError as e: print('RuntimeError encountered') if e.args[0].startswith('CudaNdarray_ZEROS: allocation failed.'): print('Memory error catched') status = 2 return status, None elif str(e).startswith('gpudata_alloc: cuMemAlloc: CUDA_ERROR_OUT_OF_MEMORY: out of memory'): print('Memory error catched') status = 3 return status, None else: raise e except Exception as e: if e.args[0].startswith("b'cuMemAlloc: CUDA_ERROR_OUT_OF_MEMORY: out of memory'"): print('New backend memory error catched') status = 4 return status, None else: raise e
326a4879fba27ea9773dfafe1c95f86b5fca38f5
480,645
import torch def prepare_confusion_matrix(all_preds, all_targets, class_map): """Prepare Confusion matrix Args: all_preds (list): List of all predictions all_targets (list): List of all actule labels class_map (dict): Class names Returns: tensor: confusion matrix for size number of classes * number of classes """ stacked = torch.stack(( all_targets, all_preds ), dim=1 ).type(torch.int64) no_classes = len(class_map) # Create temp confusion matrix confusion_matrix = torch.zeros(no_classes, no_classes, dtype=torch.int64) # Fill up confusion matrix with actual values for p in stacked: tl, pl = p.tolist() confusion_matrix[tl, pl] = confusion_matrix[tl, pl] + 1 return confusion_matrix
498f4c54c726128f769cf6ef6fe28ad2f8dad013
598,735
def mapping_file_to_dict(mapping_data, header): """processes mapping data in list of lists format into a 2 deep dict""" map_dict = {} for i in range(len(mapping_data)): sam = mapping_data[i] map_dict[sam[0]] = {} for j in range(len(header)): if j == 0: continue # sampleID field map_dict[sam[0]][header[j]] = sam[j] return map_dict
4f592f396bac1d8f7c696f057d8b02ecd1408010
83,643
import re def ignore_channel(config, channel_name): """Return True if `channel_name` is a channel we should ignore based on config settings.""" if channel_name in config.ignore_channels: return True for pat in config.ignore_channel_patterns: if re.search(pat, channel_name): return True return False
ad66b80985356241ea532171045edef2cfcb3d49
160,574
def filter_images(data, split_data): """ Keep only images that are in some split and have some captions """ all_split_ids = set() for split_name, ids in split_data.iteritems(): all_split_ids.update(ids) new_data = [] for img in data: keep = img['id'] in all_split_ids and len(img['regions']) > 0 if keep: new_data.append(img) return new_data
65c7b1765371e6e3875766394de09203e7485837
630,819
def parse_csv_data(csv_filename:str) -> list: """Takes in a json file and outputs a list of each line of the file""" with open(csv_filename, "r", encoding="UTF-8") as file: #Forms a list of each line in the file data = file.read().split("\n") #Removes any empty lines at the end while data[-1] == "": data.pop() return data
d8ab580a5e9b411fcdc40bc30311e81913d7dae1
336,531
import inspect def isderivedinstance(instance, parent): """ Test if instance is derived from a parent class. Parameters: instance Instance object. parent Class type or string name of the parent class. Returns True or False. """ try: name = parent.__name__ except AttributeError: name = parent return name in [c.__name__ for c in inspect.getmro(instance.__class__)]
1069cd8c38e93c0041b3af602f63d347967bf2f0
472,665
import torch def encode_data(data, tokenizer, punctuation_enc, segment_size): """ Converts words to (BERT) tokens and punctuation to given encoding. Note that words can be composed of multiple tokens. """ X = [] Y = [] for idx, split_text in enumerate(data): if len(split_text) >= segment_size: X_tmp = [] for word, punc in split_text: tokens = tokenizer.tokenize(word) x = tokenizer.convert_tokens_to_ids(tokens) y = [punctuation_enc[punc]] if len(x) > 0: if len(x) > 1: y = (len(x)-1)*[0] + y X_tmp += x Y += y X.append(X_tmp) return X, torch.tensor(Y)
7d6b39c2a161e928e6891509271b7a383b4e68e3
520,916
import smtplib def smtp_connection(c): """Create an SMTP connection from a Config object""" if c.smtp_ssl: klass = smtplib.SMTP_SSL else: klass = smtplib.SMTP conn = klass(c.smtp_host, c.smtp_port, timeout=c.smtp_timeout) if not c.smtp_ssl: conn.ehlo() conn.starttls() conn.ehlo() if c.smtp_username: conn.login(c.smtp_username, c.smtp_password) return conn
db57f751cb25683222e6fd621ec02e7b05bc598e
633,117
def toflatten(pathX, pathY): """ Flatten list of paths""" flatten=[] for i in range(len(pathX)): for j in range(len(pathX[i])): flatten.append([pathX[i][j], pathY[i][j]]) return flatten
56daa887bf0e17d545b16f582963bd8306513375
367,849
def truncate_patch(patch, margin=0.05): """Truncates module edges by margin (percent of width) to remove module frame.""" width = patch.shape[1] margin_px = int(margin*width) patch = patch[margin_px:-margin_px, margin_px:-margin_px] return patch
340813ff974b1221461d45165c03480ab4f604db
518,315
def add_to_label_index_dict(label, starting_index, ending_index, label_index_dict): """Adds the given indices to the label_index_dict, where the label is the key.""" label = label.upper() if label in label_index_dict.keys(): label_index_dict[label] = label_index_dict[label] + ' ' + str(starting_index) + '-' + str(ending_index) else: label_index_dict[label] = str(starting_index) + '-' + str(ending_index) return label_index_dict
ca988d03bb69dbdfe986ff78642f6f01b65c43da
458,920