content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def format_docstring(docstr: str) -> str: """Removes \n and 4 consecutive spaces.""" return docstr.replace('\n', '').replace(' ', '')
47b9103961a5a87436e52b55c1ae13206c6ee918
519,016
def has_local_name(agent): """Loops through all the agent names and returns True if any of them have a source of `local`, `ingest`, `nad`, or `naf`""" for name in agent.names: if name.source in ['local', 'ingest', 'nad', 'naf']: return True return False
c259b01fdc9f1118877a215713e8e0ffa23425ae
473,747
def flip_1d_index_vertically(index, rows, columns): """Finds the index to the corresponding vertically flipped 1d matrix value. Consider a 1d matrix [1, 2, 3, 4, 5, 6] with 3 rows and 2 columns. The original and vertically flipped representations are shown below. 1 2 5 6 3 4 -> 3 4 5 6 1 2 This function allows the translated matrix to be accessed using indices into the original matrix, such that index 0 (value 1) becomes index 4 (value 5, corresponding to index 0 of the flipped matrix). Args: index (int): Index into the original 1d matrix. rows (int): Number of rows in the matrix. columns (int): Number of columns in the matrix. Returns: The index for the corresponding value of the vertically flipped matrix, as an int. """ # Get current row in 1d matrix, from 0 to rows-1 current_row = index // columns # Flip row (0 -> rows-1, ..., rows-1 -> 0, etc.) flipped_row = rows - current_row - 1 # Calculate total number of entries on preceding rows offset_row = flipped_row * columns # Calculate current column position in row offset_column = index % columns return offset_row + offset_column
ca8fb78915bd278b3ef397a1c9a1d3b2a6af1e15
602,407
def _make_class_name(name): """ Make a ufunc model class name from the name of the ufunc. """ return name[0].upper() + name[1:] + 'Ufunc'
7f5e2b0c81b81532c4222efb1ab58b8126906628
171,473
def issolved(cube: list) -> bool: """ Return True if the cube is solved, False otherwise. A cube is a solved cube if every sticker of each side of the cube is the same. """ return all(all(x == s[0][0] for y in s for x in y) for s in cube)
bb6d0e35061b54a0476adfd90e80bd5a3b878807
594,115
from datetime import datetime def getDateFromDatetime(inputDateString): """ Given a datetime string - return a date object + 1 day ahead since tweets from day X are registered to midnight at day X+1 """ dateOnly = inputDateString.split(" ")[0] dateOnlyList = [int(x) for x in dateOnly.split("-")] returnDate = datetime(dateOnlyList[0], dateOnlyList[1], dateOnlyList[2], 0, 0, 0) return returnDate
d6b5168c68145f029b8a79c8a5682fe7948692e5
285,969
def binary_tag_to_tags(text_df, tag_values): """ +++INPUT+++ text_df: dataframe with binary tags, fillna with 0 tag_values: array of tag strings example: tag_values = text_df.columns[2:].values +++OUTPUT+++ text_df: with Tags column added containing tags """ tags_list = [] for row_index in range(len(text_df)): selector = text_df.loc[row_index, tag_values].values.astype(bool) selected_tags = tag_values[selector] tags_string = ", ".join(selected_tags) tags_list.append(tags_string) text_df['Tags'] = tags_list return text_df
d63aa63f67490b4372706a99afa39f7d9fa6acda
535,328
import torch def densify_features(x, shape): """ Densify features from a sparse tensor Parameters ---------- x : Sparse tensor shape : Dense shape [B,C,H,W] Returns ------- Dense tensor containing sparse information """ stride = x.tensor_stride coords, feats = x.C.long(), x.F shape = (shape[0], shape[2] // stride[0], shape[3] // stride[1], feats.shape[1]) dense = torch.zeros(shape, device=x.device) dense[coords[:, 0], coords[:, 1] // stride[0], coords[:, 2] // stride[1]] = feats return dense.permute(0, 3, 1, 2).contiguous()
4ef60f15a1bae846009ddfa76595fa9121fc3035
575,298
def exp_chem_list(rdict): """extract the chemicals that were used. :param rdict: see DataStructures_README.md :return: a set of the chemicals used in all the experiments """ chemicalslist = [] for reagentnum, reagentobject in rdict.items(): for chemical in reagentobject.chemicals: if chemical in chemicalslist: pass else: chemicalslist.append(chemical) return chemicalslist
0280e99cf3af3860320a23b0314a630d4b373648
424,939
def del_comment(string): """Delete the comment from the parameter setting string. Parameters ---------- string : str, default None The parameter setting string probably with the comment. Returns ------- string : str The parameter setting string without the comment. """ return string.split('#')[0].replace(' ', '')
127130208c08f676dbc9258f0fe86ecf6d2b9f75
257,319
from pathlib import Path def read_file_to_list(p4pfile: str) -> list: """ Reads a file and returns a list without line endings. """ p4plist = [] try: p4plist = Path(p4pfile).read_text().splitlines(keepends=False) except IOError as e: print(e) print('*** CANNOT READ FILE {} ***'.format(p4pfile)) return p4plist
bc0681fc3da1a547a40fe2e437dcc23d24487ea4
475,746
def _req_input_to_args(req_input): """ Given a list of the required inputs for the build command, create an args string :param list[str] req_input: input names :return str: args string """ return ["--" + x + " <arg_here>" for x in req_input]
7f4418ee7e020d3747511d197c33dec5d541248d
441,953
def is_vm(obj): """ Checks if object is a vim.VirtualMachine. :param obj: The object to check :return: If the object is a VM :rtype: bool """ return hasattr(obj, "summary")
263c26d1cf15de663bc00371edaec6173bb800dc
533,465
def data_has_value_from_substring_list(data, needle_list): """Recursively search for any values that contain a substring from the specified list Args: data (dict, list, primitive) needle_list (list) Returns: (bool) True or False if found """ if isinstance(data, list): return any(data_has_value_from_substring_list(item, needle_list) for item in data) if isinstance(data, dict): return any(data_has_value_from_substring_list(v, needle_list) for v in data.values()) if not data: return False return any(needle in data for needle in needle_list)
e5516aa0abb5e54b918b23e2ac35e4dc2a89c183
433,847
def set_regex_parentheses(parent_paragraph_id): """ Adds parentheses if the paragraph has a parent. Paragraphs with a parent ( = subparagraphs ) have their identifiers wrapped in parentheses and an em dash. """ par_open = r"" par_close = r"" if parent_paragraph_id: par_open += r"—\(" par_close += r"\)" return par_open, par_close
5ffd5e15783ccade0fbcc67634a4dff2ebb869d5
548,281
import json def load_json(file): """Loads a JSON file and returns it as a dict""" with open(file) as f: return json.load(f)
d2e593c2d698223ad28103e4c1d7d91426137f53
655,876
import asyncio def wrap(fn, fn_name, called): """Wrap fn to add fn_name to called on invocation. """ async def async_wrapper(*args, **kwargs): await fn(*args, **kwargs) called.add(fn_name) def wrapper(*args, **kwargs): fn(*args, **kwargs) called.add(fn_name) return async_wrapper if asyncio.iscoroutinefunction(fn) else wrapper
b5059656c44d0fc22697f78b3c67a832dd99448b
255,663
import logging import copy def graph_directional_to_undirectional(graph): """ Convert a directional to an undirectional graph Returns a deep copy of the full graph with all directional edges duplicated as undirectional ones. Undirectional edges share the same data dictionary. In converting directional to undirectional edges their data dictionaries will be merged. .. Note:: dictionary merging may result in undesirable results due to data overwrite. :param graph: Graph to convert :type graph: :graphit:Graph :return: Directional graph :rtype: :graphit:Graph """ if not graph.directed: logging.info('Graph already configured as undirected graph') graph_copy = graph.copy(deep=True) graph_copy.directed = False graph_copy.edges.clear() done = [] for edge in graph.edges: reverse_edge = tuple(reversed(edge)) values = copy.deepcopy(graph.edges[edge]) if edge in done or reverse_edge in done: continue if reverse_edge in graph.edges: values.update(graph.edges[reverse_edge]) done.append(reverse_edge) graph_copy.add_edge(*edge, **values) done.append(edge) return graph_copy
045ad1f1a7d455ca05ffe50aa4fb117957cb4ed1
607,279
import heapq def sort_and_trim_beams(beams: list, beam_width: int): """ https://github.com/kensho-technologies/pyctcdecode/blob/v0.1.0/pyctcdecode/decoder.py#L68 """ return heapq.nlargest(beam_width, beams, key=lambda x: x.score_lm)
91087ed3a9e4633c1bb2580d265ab59c1a521c09
353,934
def drop_zero_columns(dtf): """ Drop all columns from a dataframe if they are composed of only zeros """ return dtf.loc[:, (dtf != 0).any(axis=0)]
5710ffe6db72c59d22ff03430d9c07a6faf653d6
144,829
def uniform(random, n): """Returns a bytestring of length n, distributed uniformly at random.""" return random.getrandbits(n * 8).to_bytes(n, "big")
1076d1a1bc4d6838386fb6419d5f1ba58591f2a2
210,276
def get_snaps_for_instance(client, rds_instance, snapshot_type=''): """ Gets all snapshots for a RDS instance""" snapshots = [] resp = client.describe_db_snapshots( DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'], SnapshotType=snapshot_type ) while 'Marker' in resp: snapshots.extend(resp['DBSnapshots']) resp = client.describe_db_snapshots( DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'], SnapshotType=snapshot_type, Marker=resp['Marker'] ) snapshots.extend(resp['DBSnapshots']) return snapshots
03d0c752476822f8f231c8794ac2c1f5c3bca7d6
435,807
def get_unique_list_values(list_to_review): """Helper function, takes in a list as a single argument and returns a unique list. """ unique_list = [] # traverse for all elements for item in list_to_review: # check if exists in unique_list or not if item not in unique_list: unique_list.append(item) return unique_list
f9014c27bccc3f58524ad59c553b951e5150e7a6
227,661
def _attachment_v2_to_v1(vol): """Converts v2 attachment details to v1 format.""" d = [] attachments = vol.pop('attachments', []) for attachment in attachments: a = {'id': attachment.get('id'), 'attachment_id': attachment.get('attachment_id'), 'volume_id': attachment.get('volume_id'), 'server_id': attachment.get('server_id'), 'host_name': attachment.get('host_name'), 'device': attachment.get('device'), } d.append(a) return d
061e7dd81e61ed1dab4e2ea529218bc51df8c9aa
155,174
def get_size_of_wordlist(filename): """Reuturn the number of lines in the file.""" return sum(1 for _ in open(filename))
67db2a86419dd3a5beb6053ad8b4a8b69542cee6
172,737
def get(lst, index, default=None): """ Retourne l'élément de `lst` situé à `index`. Si aucun élément ne se trouve à `index`, retourne la valeur par défaut. """ try: return lst[index] except IndexError: return default
273345d2acf1c4aecbf584d600f67ff095a710ea
53,490
import yaml def configuration(configuration_path): """Load our configuration.""" with open(configuration_path, 'r') as stream: return yaml.load(stream, Loader=yaml.FullLoader)
5fab251f28ca9eca955cd396b92a6c5c7ae8ee97
33,149
def decode(string): """Unescape string originating from XML :param string: string to decode :type: str :returns: decoded string :rtype: str """ if '&#39;' not in string and '&quot;' not in string and '&lt;' not in string and '&gt;' not in string and '&amp;' not in string: # already decoded return string string = string.replace('&#39;', "'") string = string.replace('&quot;', '"') string = string.replace('&lt;', '<') string = string.replace('&gt;', '>') string = string.replace('&amp;', '&') return string
a4432c0b22b239e51f96b2f0fb06d5ec28735b86
516,835
from typing import OrderedDict import math def get_latencies_summary(latencies): """ Given a raw timeseries of latencies, create a summary of each point in time, with the average standard deviation and mean, the minimum and maximum, and the minimum/maximum range for the percentiles (as we can't average them). """ summaries = OrderedDict() for ts, lat in latencies.items(): sum_samples = float(sum([c['samples'] for c in lat.values()])) # if sum of samples is zero, ignore (means end of metrics) if sum_samples == 0: continue avg_var = 0.0 avg_mean = 0.0 mins = [] maxs = [] pcs = {'p50': [], 'p75': [], 'p95': [], 'p98': [], 'p99': [], 'p999': []} for c in lat.values(): avg_var += (float(c['samples']) / sum_samples) * math.pow(c['stddev'], 2) avg_mean += (float(c['samples']) / sum_samples) * c['mean'] mins.append(c['min']) maxs.append(c['max']) for pc in pcs.keys(): pcs[pc].append(c[pc]) summary = {'stddev': math.sqrt(avg_var), 'mean': avg_mean, 'min': min(mins), 'max': max(maxs), 'samples': sum_samples} for pc, vals in pcs.items(): summary[pc] = {'min': min(vals), 'max': max(vals)} summaries[ts] = summary return summaries
cd8e00cb218924aa72e8839b55099b3b9a45c661
561,839
def get_file_extension(fn): """ get file extension and check if it is compressed :param: str, file path :return: str,bool """ # assert isinstance(fn, str) compressed_file_ext = ["gz"] fn_part = fn.split(".") compressed = False ext = fn_part[-1] if ext in compressed_file_ext: compressed = True ext = fn_part[-2] return ext, compressed
bce96d08f28381f42f329e4fa08c4b858401e019
418,303
from dateutil import tz from datetime import datetime def convert_datetime_string_to_object(datetime_string): """ Helper function to convert datetime string to object with local timezone """ local_tz = tz.tzlocal() datetime_object = datetime.strptime(datetime_string, '%Y-%m-%d %H:%M:%S') return datetime_object.replace(tzinfo=local_tz, microsecond=0).isoformat()
9e4c6007806b8932f7cb07ccaa4df068202ffaf5
334,301
import unicodedata def normalize_grapheme(grapheme, bipa): """ Normalizes a grapheme. Does Unicode normalization, splitting in case of Phoible/slash-notation, BIPA default selection. """ # Unicode normalization grapheme = unicodedata.normalize("NFC", grapheme) # Split over slash notation, if any, keeping the entry to the right if "/" in grapheme: grapheme = grapheme.split("/")[1] # Only split the vertical bar, as used in Phoible, if the grapheme is # longer than one character (can be a click), keeping the first one if len(grapheme) > 1 and "|" in grapheme: grapheme = grapheme.split("|")[0] # Normalize BIPA grapheme = str(bipa[grapheme]) return grapheme
98af5b3f88bc88a482ee01c1193ab2a58e6aa907
562,614
def _trimmed_text(node): """Returns the trimmed text contained in the given DOM node or None if empty or if node is None. """ try: return node.text.strip() or None except AttributeError: return None
62f3e5c93d0d4a8a12b95f48a05380047407323e
529,017
from typing import List import glob def get_stock_symbols(market: str) -> List[str]: """ Read stock symbols from internal file Parameters ---------- market: str The stock market (NASDAQ | NYSE) Returns ------- List A list of symbols for the given market """ sym = str() syms = list() sym_files = list() # glob the sym files and get all the markets sym_files = glob.glob('./stock_symbols/sym_*.txt') for sym in sym_files: syms.append(sym.strip().replace('.txt','').split('_')[2]) if market.upper() not in syms: print(syms) raise Exception # read out symbols for given market # market file name should be sym_<MARKET_NAME> located at ./stock_symbols/ symbols = list() with open('./stock_symbols/sym_' + market.upper() + '.txt') as market_file: for line in market_file: symbols.append(line.strip()) return symbols
59471448192e6987fdedbd293f0a40f9e5495306
221,040
def cleanup(data, clean='layers', keep=None, copy=False): """Deletes attributes not needed. Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. clean: `str` or list of `str` (default: `layers`) Which attributes to consider for freeing memory. keep: `str` or list of `str` (default: None) Which attributes to keep. copy: `bool` (default: `False`) Return a copy instead of writing to adata. Returns ------- Returns or updates `adata` with selection of attributes kept. """ adata = data.copy() if copy else data keep = list([keep]) if isinstance(keep, str) else list() if keep is None else list(keep) keep.extend(['spliced', 'unspliced', 'Ms', 'Mu', 'clusters', 'neighbors']) ann_dict = {'obs': adata.obs_keys(), 'var': adata.var_keys(), 'uns': adata.uns_keys(), 'layers': list(adata.layers.keys())} if 'all' not in clean: ann_dict = {ann: values for (ann, values) in ann_dict.items() if ann in clean} for (ann, values) in ann_dict.items(): for value in values: if value not in keep: del(getattr(adata, ann)[value]) return adata if copy else None
36127ef66a04a83ba78d4e8c1e78fdfe7edf94fe
513,330
import itertools def _preorder_walk(node, _app=None): """Walk the tree in preorder""" return itertools.chain( [node], *[_preorder_walk(child) for child in node.children] )
b3503c892d4adb347032dbfe1e6e5cd0e4c203e2
431,040
def initial_fragment(string, words=20): """Get the first `words` words from `string`, joining any linebreaks.""" return " ".join(string.split()[:words])
5b390cb5f98c9e940f2a964101b9593f0fa1ffb8
32,365
def moving_average(data, target_var, features_list, num_days = 7): """ Cacluate moving average of target variable and store result in a new column Parameters ---------- data: data frame It has columns location, date, and a column with the response variable to forecast. This data frame needs to be sorted by location and date columns in ascending order. target_var: string Name of the column in the data frame with the forecast target variable. features_list: list of strings Running list of feature column names num_days: integer Time window to calculate moving average for Returns ------- data: data frame Original data frame with additional column for moving average features_list: list of strings Running list of feature column names """ column_name = str(num_days)+"_moving_avg_"+str(target_var) features_list.append(column_name) data[column_name] = data.groupby('location').rolling(num_days)[target_var].mean().reset_index(drop=True) return data, features_list
1d1b63e76a84a6d6a76fd3ce0ce61e3a82b11b51
211,730
def is_by_sources(module): """ Returns whether module defined by sources """ return module.location is not None
56bf8041fd53d98b4699eca722d56e637324bf49
555,039
def load_login_file(fpath): """ Load login name and password from file. """ with open(fpath) as f: name = f.readline().rstrip('\n') passwd = f.readline().rstrip('\n') return name, passwd
52e4f5e23d4b05e2a5c447bbace40fb8450162f6
262,302
def numeric_filter(operation, value, column, df): """Filters a data column numerically. Arguments: operation: Operator used to filter data (e.g. ">", "<", "==", ">=", "<=", "!=") value: Operand column: String for column name df: DataFrame Returns: Boolean column that indicates whether each entry matches the given numeric filter. """ return eval(f"df['{column}'] {operation} {value}")
868907d2fed5947c0974896af73a9277fcef0ae8
640,693
def public_dict(obj): """Same as obj.__dict__, but without private fields.""" return {k: v for k, v in obj.__dict__.items() if not k.startswith('_')}
2edee1a17d0dad6ab4268f80eb565406656a77b4
704,317
def readUntilNull(s): """ Read a string until a null is encountered returns (string up to null , remainder after null) """ item = s.split(b'\0' , 1) if len(item) == 1: return (item[0] , None) else: return (item[0] , item[1])
62933ee1c30f8b077fe13f8f2074612c8b090213
445,879
def hex_bytes(data: bytes, sep: str= " "): """ Format a bytes() object as a hex dump """ return sep.join("{:02x}".format(bval) for bval in data)
aacc0366a7e20cc3ead71559dba55bafd5d4275e
499,261
def _response_ok(response, call_type): """ Checks whether API HTTP response contains the associated OK code. :param response: Response object. :param call_type: String containing the HTTP request type. :return: True if response was OK. """ ok_codes = { "GET": [200], "PUT": [200, 204], "POST": [201], "DELETE": [204], } return response.status_code in ok_codes[call_type]
e47516e51c7de8e3dd5e7c4eed88705055952803
312,507
def contains_test_passer(t, test): """ Return whether t contains a value that test(value) returns True for. @param Tree t: tree to search for values that pass test @param function[Any, bool] test: predicate to check values with @rtype: bool >>> t = descendants_from_list(Tree(0), [1, 2, 3, 4.5, 5, 6, 7.5, 8.5], 4) >>> def greater_than_nine(n): return n > 9 >>> contains_test_passer(t, greater_than_nine) False >>> def even(n): return n % 2 == 0 >>> contains_test_passer(t, even) True """ if t.children == []: return test(t.value) else: return any([test(t.value)] + [contains_test_passer(c, test) for c in t.children])
56cbd48f9c416c64c1822b637fd4e8780ce72bc5
650,058
def _is_decimal(col): """Check for decimal data types Returns bool - True if column is decimal or numeric. """ return col['field_type_name'].upper() in ['DECIMAL', 'NUMERIC']
a48efd2f2b0c9883d0c09e786e209e1b8cb47778
480,047
def create_idea(conn, idea): """ Add a new idea into the ideas table :param conn: :param idea: :return: idea id """ sql = """INSERT INTO ideas(name, description, tags) VALUES(?,?,?)""" cur = conn.cursor() cur.execute(sql, idea) conn.commit() return cur.lastrowid
8df51c3e6361d2dbbfe03cc18cbae7f9c47841de
152,041
def put_bucket_policy(s3_obj, bucketname, policy): """ Adds bucket policy to a bucket Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket policy (str): Bucket policy in Json format Returns: dict : Bucket policy response """ return s3_obj.s3_client.put_bucket_policy(Bucket=bucketname, Policy=policy)
c1f098a49e743f180ed7882951f0bd38ba9eeb39
571,855
from pathlib import Path import re def check_files_exist(directory, pattern_list): """Check that a list of file patterns exist in a given directory.""" existing_files = [str(i.relative_to(directory)) for i in sorted(Path(directory).rglob("*"))] try: assert len(existing_files) == len(pattern_list) for path, pattern in zip(existing_files, pattern_list): assert re.match(str(pattern), path) except Exception as exc: formatted_existing_files = "\n\t\t".join(existing_files) formatted_pattern_list = "\n\t\t".join(pattern_list) raise RuntimeError( "Error when checking the files.\n\t" f"The directory is: {directory}\n" "\t" f"The found files are:\n\t\t{formatted_existing_files}\n" "\t" f"The patterns are:\n\t\t{formatted_pattern_list}" ) from exc return True
79ecee0d74606c37aef21ed72e7c4c735ad022f7
131,423
import ast def has_node(code, node): """Given an AST or code string returns True if the code contains any particular node statement. Parameters ---------- code: A code string or the result of an ast.parse. node: A node type or tuple of node types to check for. If a tuple is passed it returns True if any one of them is in the code. """ tree = ast.parse(code) if isinstance(code, str) else code for n in ast.walk(tree): if isinstance(n, node): return True return False
9e1555a3393e3c91f35f6e3fc468eab14d5147b4
600,096
def unique(x): """Convert a list in a unique list.""" return list(set(x))
105df86a8a5df1cd1981bbfab304bc4f8b759fa0
580,550
def _date_long_form(date): """ Displays a date in long form, eg 'Monday 29th April 2019'. """ second_last = (date.day // 10) % 10 last = date.day % 10 if second_last != 1 and last == 1: ordinal = "st" elif second_last != 1 and last == 2: ordinal = "nd" elif second_last != 1 and last == 3: ordinal = "rd" else: ordinal = "th" return f"{date:%A} {date.day}{ordinal} {date:%B} {date.year}"
248cdaaba72e6252d6fdcec6499a4332e50a85c9
646,725
def is_iterable(posibleList): """Validate if element is iterable Args: posibleList (Any): posible iterable element Returns: bool: if element is iterable """ try: if isinstance(posibleList, (tuple, list)) or hasattr(posibleList, "__iter__"): _ = posibleList[0] return True return False except Exception as e: return False
9f07a3fa1f21423b477c535020063d6abe376f58
542,062
import random def random_every_time(context, values): """Choose a random value. Unlike Jinja's random filter, this is context-dependent to avoid caching the chosen value. """ return random.choice(values)
4e8a6294a1bf4ccbdfc9714c720ca83df24a6760
170,682
def flatten_dict(d, prefix="", separator="."): """ Flatten netsted dictionaries into a single level by joining key names with a separator. :param d: The dictionary to be flattened :param prefix: Initial prefix (if any) :param separator: The character to use when concatenating key names """ ret = {} for k, v in d.items(): key = separator.join([prefix, k]) if prefix else k if type(v) is dict: ret.update(flatten_dict(v, prefix=key)) else: ret[key] = v return ret
c7e53cb66db8938a196e9adc22e7013e20191d31
394,490
def beaufort(n): """Converts windspeed in m/s into Beaufort Scale descriptor.""" s = '' if n < 0.3: s = 'calm' elif n < 1.6: s = 'light air' elif n < 3.4: s = 'light breeze' elif n < 5.5: s = 'gentle breeze' elif n < 8.0: s = 'moderate breeze' elif n < 10.8: s = 'fresh breeze' elif n < 13.9: s = 'strong breeze' elif n < 17.2: s = 'high wind' elif n < 20.8: s = 'gale' elif n < 24.5: s = 'strong gale' elif n < 28.5: s = 'storm' elif n < 32.7: s = 'violent storm' else: s = 'hurricane force' return s
75b6150a8530ea174c447d11ea2e5c212cdd0e93
442,093
def translate_direct(adata, direct, no_index): """Translate all direct hit genes into their orthologs. Genes not found in the index of the table will be upper-cased, after excluding some gene symbols that usually do not have an ortholog, i.e. genes - Starting with 'Gm' - Starting with 'RP' - Ending with 'Rik' - Containing a 'Hist' - Containing a 'Olfr' - Containing a '.' Parameters ---------- adata : AnnData AnnData object to translate genes in. direct : dict Dictionary with those adata genes mapping to a single ortholog no_index : list List of those adata genes not found in the table. Returns ------- AnnData Updated original adata. """ guess_genes = [ x for x in no_index if x[:2] != 'Gm' and 'Rik' not in x and x[:2] != 'RP' and 'Hist' not in x and 'Olfr' not in x and '.' not in x] ndata = adata[:, list(direct.keys()) + guess_genes].copy() ndata.var['original_gene_symbol'] = list(direct.keys()) + guess_genes ndata.var_names = list(direct.values()) + [m.upper() for m in guess_genes] return ndata
8d8ab2efd60463603fa5b8c212ae6878d5d68a36
142,888
from typing import Optional from typing import Match import re def find_word(word, src) -> Optional[Match[str]]: """Find word in a src using regex""" return re.compile(r'\b({0})\b'.format(word), flags=re.IGNORECASE).search(src)
5619f472db8256ad7524529911cc190fc3140c46
440,393
def squish(tup): """Squishes a singleton tuple ('A',) to 'A' If tup is a singleton tuple, return the underlying singleton. Otherwise, return the original tuple. Args: tup (tuple): Tuple to squish """ if len(tup) == 1: return tup[0] else: return tup
ddaa8e53edc9fbda2ba07185b98bf934171d92bf
170,911
import logging def get_handler_filename(logger): """Gets logger filename Parameters: * logger (object): log file object Returns: * str: Log file name if any, None if not """ for handler in logger.handlers: if isinstance( handler, logging.FileHandler ): return handler.baseFilename return None
bdaa47977c14601aa2217fc8a3c734e97b9a0295
37,729
def check_weights(data_ps): """Check if sum of propensity score weights match sample size Args: data_ps (pandas.DataFrame): dataframe with propensity score Return: tuple: sample size, treated size from weigths, untreated size froms weigths """ weight_t = 1./data_ps.query("nudge==1")["pscore"] weight_nt = 1./(1.-data_ps.query("nudge==0")["pscore"]) print(f"Original sample size {data_ps.shape[0]}") print("Original treatment sample size", data_ps.query("nudge==1").shape[0]) print("Original control sample size", data_ps.query("nudge==0").shape[0]) print(f"Weighted treatment sample size {round(sum(weight_t), 1)}") print(f"Weighted control sample size {round(sum(weight_nt), 1)}") return data_ps.shape[0], sum(weight_t), sum(weight_nt)
85abaf40e7a7e24318ba31797264361d3780ebbd
140,377
import __main__ as m def is_running_interactive() -> bool: """Check if we're running in an REPL""" try: return not hasattr(m, "__file__") except Exception as e: print(e) pass return False
bafe84caf9e40c97a2f000136ca77546850e26c0
114,931
def find_between(s, first, last): """ Find sting between two sub-strings Args: s: Main string first: first sub-string last: second sub-string Example find_between('[Hello]', '[', ']') -> returns 'Hello' Returns: String between the first and second sub-strings, if any was found otherwise returns an empty string """ try: start = s.index(first) + len(first) end = s.index(last, start) return s[start:end] except ValueError: return ""
d2df4c89d63f07678cba8283849917ff767b0903
673,284
def is_range(obj): """Helper function to test if object is valid "range".""" keys = ['start', 'step', 'stop'] return isinstance(obj, dict) and all(k in obj for k in keys) and \ all(isinstance(obj[k], float) for k in keys)
4bb1d210ebb0a7265671b3d7070912052f71601e
33,963
def is_redirect(page): """ Checks if a page is a redirect Returns -------- bool True if the page is a redirect False if the page is not a redirect """ redirect = page.getElementsByTagName('redirect') if len(redirect) > 0: return True return False
c5913b1fb66099c9d38a9851496bfacb76b046aa
126,615
def description_length(user): """ Get the length of user description in words :param user: the user object in json tweet :return: vector length of 1 indicating the length of description. If user if user has no description, [0] will be returned """ des_length=0 if user['description']: description = user['description'] des_length = len(description.split()) return des_length
41e9ec6800b6fe86b14297114179d91955431100
107,571
def _endpoint_from_image_ref(image_href): """Return the image_ref and guessed endpoint from an image url. :param image_href: href of an image :returns: a tuple of the form (image_id, endpoint_url) """ parts = image_href.split('/') image_id = parts[-1] # the endpoint is everything in the url except the last 3 bits # which are version, 'images', and image_id endpoint = '/'.join(parts[:-3]) return (image_id, endpoint)
626484976a35b89ecaf5b363503ac993ee231613
90,570
def sanitize_list_input(_list: list, _def: list) -> list: """Sanitize the _list by reducing it to distinct values. If _list is None, then return second parameters value _def :param _list: list, if not None, will be reduced to its distinct values :param _def: list, the value to be returned, if _list is None :return: Either sanitized _list or _def value, if _list is None """ if not _list: return [_def] else: return list(set(_list))
a2cec282d8ff38290e9249ba692ef696ee40abf9
149,163
def extract_name(row): """ Helper function for comparing datasets, this extracts the name from the db name Args: row: row in the dataframe representing the db model Returns: name extracted from agency_name or np.nan """ return row['agency_name'][:row['agency_name'].rindex('(') - 1]
84d525d008cf35e4ee0b1347caf0909a67cad1e3
663,852
def get_true_positives(data, classifier): """Find the total positives that also are selected by our classifier.""" return data[data["foundSegment"] & classifier(data)].size
2d1581e5f9ade4ff299557c76f3a9507c4dc5a55
10,474
def generate_community_tags_scores(database, community): """ This function generates the most important terms that describe a community of similar documents, alongside their pagerank and in-degree scores. """ # Get all intersecting nodes of the speficied community, # ranked by their in-degree (which shows to how many documents they belong to). # and pagerank score in descending order. query = ('MATCH p=((d:Document {community: '+ str(community) +'})-[:includes]->(w:Word)) ' 'WITH w, count(p) as degree ' 'WHERE degree > 1 ' 'RETURN w.key, w.pagerank as pagerank, degree ' 'ORDER BY degree DESC, pagerank DESC') tags_scores = database.execute(' '.join(query.split()), 'r') return tags_scores
c7623fdbad3389ec5c3dbf55c6758036c7cfce03
660,460
def encode_info_token_http_auth(token): """ Encodes the token to be sent in the Authenticate header. :param token: token to be sent. Assumes the token is already decoded :return: the body of the `Authorization` header """ return ' Token {}'.format(token)
5cacc6b899cec0ab4db486b327646932e595253c
282,762
def _unfill(v, l = 5): """unfill takes a zfilled string and returns it to the original value""" return [ str(int(v[l * i:l * (i + 1)])) for i in range(len(v) // l) ]
28c9adfe90efe2b2e1f895b59844990e2455f183
147,892
import calendar def which_ten(d): """ Determine which ten-day period a date falls in. :type d: datetime.date :param d: day of date, e.g. 2016-01-02 in format %Y-%m-%d is 2 :return: an integer that is the prefix of the relevant ten-day period. """ if not calendar.monthrange(d.year, d.month)[1] >= d.day > 0: raise RuntimeError('Out of range date') if d.day < 10: return 0 elif d.day < 20: return 1 elif d.day < 30: return 2 else: return 3
25b7ac3de60a7b0407191875f8c60f5f614bee1a
470,523
import torch def ohe(input_vector, dim, device="cpu"): """Does one-hot encoding of input vector.""" batch_size = len(input_vector) nb_digits = dim y = input_vector.reshape(-1, 1) y_onehot = torch.FloatTensor(batch_size, nb_digits).to(device) y_onehot.zero_() y_onehot.scatter_(1, y, 1) return y_onehot
1b0406471d3755cf2f99d23f6873727b0986036a
84,907
def always_true(result, previous_state): """ Not really necessary since no filters results in an always true result, but this is useful to show an example of what a filter is without actually doing anything. """ return True
157fe783cfe14049d6c9bf742eb41064fc9b1ef7
387,684
def process_gps_data(gps_df, threshold_record_length=5, threshold_uplift=5): """ Thin the gps data to remove stations with short record lengths and unrealistically high uplift or subsidence rates """ gps_df = gps_df[gps_df['record_length'] > threshold_record_length] gps_df = gps_df[gps_df['RU(mm/yr)'] < threshold_uplift] gps_df = gps_df[gps_df['RU(mm/yr)'] > -threshold_uplift] return gps_df
8a5247182f3132d0e693f6eb03c48b3edcae12bf
468,708
def rgb_to_hsv(rgb): """ Convert an RGB color representation to an HSV color representation. (r, g, b) :: r -> [0, 255] g -> [0, 255] b -> [0, 255] :param rgb: A tuple of three numeric values corresponding to the red, green, and blue value. :return: HSV representation of the input RGB value. :rtype: tuple """ r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255 _min = min(r, g, b) _max = max(r, g, b) v = _max delta = _max - _min if _max == 0: return 0, 0, v s = delta / _max if delta == 0: delta = 1 if r == _max: h = 60 * (((g - b) / delta) % 6) elif g == _max: h = 60 * (((b - r) / delta) + 2) else: h = 60 * (((r - g) / delta) + 4) return round(h, 3), round(s, 3), round(v, 3)
aa2c75b92c9830c7e798b0ca6ee9feac20793de4
24,361
from pathlib import Path from typing import List def collect_app_bundles(source_dir: Path) -> List[Path]: """ Collect all app bundles which are to be put into DMG If the source directory points to FOO.app it will be the only app bundle packed. Otherwise all .app bundles from given directory are placed to a single DMG. """ if source_dir.name.endswith('.app'): return [source_dir] app_bundles = [] for filename in source_dir.glob('*'): if not filename.is_dir(): continue if not filename.name.endswith('.app'): continue app_bundles.append(filename) return app_bundles
50a4bf767eb0208045257a2ebf8a760ae7a5fc6d
667,184
def parsesep(s:str, sep:str=","): """Get the next word from a string, separated ny sep. Return (word,remainder)""" i=s.find(sep) if i<0: i=len(s) result=s[0:i] remain=s[i+len(sep):] return (result,remain)
ebc09edf1f361768db15d1dfefd3d9d00efba795
451,028
def energy_value(h, J, sol): """ Obtain energy of an Ising solution for a given Ising problem (h,J). :param h: External magnectic term of the Ising problem. List. :param J: Interaction term of the Ising problem. Dictionary. :param sol: Ising solution. List. :return: Energy of the Ising string. :rtype: Integer or float. """ ener_ising = 0 for elm in J.keys(): if elm[0] == elm[1]: raise TypeError("""Interaction term must connect two different variables""") else: ener_ising += J[elm] * int(sol[elm[0]]) * int(sol[elm[1]]) for i in range(len(h)): ener_ising += h[i] * int(sol[i]) return ener_ising
866e3175534f5c1e1d606b12e6815cf520a467b0
663,403
def get_scalebin(x, rmin=0, rmax=100, tmin=0, tmax=100, step=10): """ Scale variable x from rdomain to tdomain with step sizes return key, index :param `x`: Number to scale :type `x`: number :param `rmin`: The minimum of the range of your measurement :type `rmin`: number, optional :param `rmax`: The maximum of the range of your measurement :type `rmax`: number, optional :param `tmin`: The minimum of the range of your desired target scaling :type `tmin`: number, optional :param `tmax`: The maximum of the range of your measurement :type `tmax`: number, optional :param `step`: The step size of bins of target range :type `step`: number, optional :return: The bin-string of the scaled variable :rtype: string Example >>> import truvari >>> truvari.get_scalebin(4, 1, 5, 0, 20, 5) ('[15,20)', 3) >>> truvari.get_scalebin(6, 1, 5, 0, 20, 5) ('>=20', 4) """ newx = (x - rmin) / (rmax - rmin) * (tmax - tmin) + tmin pos = 0 for pos, i in enumerate(range(tmin, tmax, step)): if newx < i + step: return f"[{i},{i+step})", pos return f">={tmax}", pos + 1
7cd22ca13e6dde43bada1557fabf3cb217047243
522,541
def get_rad_factor(rad, emission, solar_irr, emissivity=None): """ Return radiance factor (I/F) given observed radiance, modeled emission, solar irradiance and emissivity. If no emissivity is supplied, assume Kirchoff's Law (eq. 6, Bandfield et al., 2018). Parameters ---------- rad (num or arr): Observed radiance [W m^-2 um^-1] emission (num or arr): Emission to remove from rad [W m^-2 um^-1] solar_irr (num or arr): Solar irradiance [W m^-2 um^-1] emissivity (num or arr): Emissivity (if None, assume Kirchoff) """ if emissivity is None: # Assume Kirchoff's Law to compute emissivity emissivity = (rad - solar_irr) / (emission - solar_irr) return (rad - emissivity * emission) / solar_irr
5d02a0e31012e4116e048e0a23e273824d926664
579,434
def filter_dict(original_dict, pattern_dict): """ Return a dict that contains all items of original_dict if item key is in pattern_dict.keys() :param original_dict: :param pattern_dict: :return: """ keys = set(original_dict.keys()).intersection(pattern_dict.keys()) return {key: original_dict[key] for key in keys}
2e5ea5bd2590ebf71ca1174b7d7572f78555bd35
429,601
def parse_rrset_record_values(e_resource_records): """ Used to parse the various Values from the ResourceRecords tags on most rrset types. :param lxml.etree._Element e_resource_records: A ResourceRecords tag beneath a ResourceRecordSet. :rtype: list :returns: A list of resource record strings. """ records = [] for e_record in e_resource_records: for e_value in e_record: records.append(e_value.text) return records
92d758407e557301f766f122a4f6bca952035350
541,955
def create_point_datatype(records): """ Creates point datatype to enable fetching GeoJSON from Socrata :param records: list - List of record dicts """ for record in records: latitude = record["latitude"] longitude = record["longitude"] # Socrata rejects point upserts with no lat/lon if latitude != None and longitude != None: record["point"] = f"POINT ({longitude} {latitude})" return records
3c762ad4cfbad5ef66cb84a91b5a9dcd0e7bfb4f
620,037
import re def get_config_name(config): """ Return sample sheet configuration name. Remove any identifying local directory information if present. :param config_val: Original configuration name (string) :return: String """ if config.find('/') == -1 and config.find('\\') == -1: return config return re.split('[/\\\\]', config)[-1]
bc6634dcc1030fc6b4929dd223b1b8611f922eed
554,939
def add_inference_args(parser): """Add parser arguments for inference options.""" parser.add_argument('--inference-type', metavar='<inference-type>', type=str, required=False, choices=['cvlib', 'fastmot'], default='cvlib', help='Input type for inference ["cvlib", "fastmot"].') parser.add_argument('--names', metavar='<names>', type=str, required=False, default='./utils/cfg/coco.names', help='Path to *.names.') parser.add_argument('--object-category', metavar='<object-category>', type=str, required=False, default='scissors', help='COCO object category to select [scissors].') parser.add_argument('--confidence-threshold', metavar='<confidence-threshold>', type=float, required=False, default=0.25, help='Confidence threshold.') # 0.5 parser.add_argument('--nms-threshold', metavar='<nms-threshold>', type=float, required=False, default=0.3, help='NMS threshold.') parser.add_argument('--model', metavar='<model>', type=str, required=False, default='yolov4', help='Path of input image.') parser.add_argument('--no-filter-object-category', action='store_true', help='Disable biggest single-object category selection.') parser.add_argument('--disable-gpu', action='store_true', help='Disable GPU usage for inference.') return parser
b65ce0a0a2cc9fd270fa46a8eb9c96e045a5c24d
426,946
def check_fit(image, text_file): """ Checks if the supplied data will fit the image. Args: image: The image object of the PIL image. text_file: the text file you want to embed to the image. Returns: True: If the supplied text fits the image. False: If the supplied text will not fit in the image. """ # Reads the number of characters to embed text_file.seek(0) text_length = len(text_file.read()) # Calculates the number of pixels needed, plus the 11 extra pixels to embed text length pixels_required = text_length*8+11 # Calculate the number of pixels the image has image_width, image_height = image.size total_pixels = image_width * image_height if total_pixels < pixels_required: return False else: return True
8b6ea04b0b7c40db9e464b2fd38214e9d9ae868e
437,725
def convert_int_to_bits(int_value, index, size): """ :param int_value: The integer value to convert to shifted bit representation. :param index: Start index of value in desired 4-byte output. Least significant bit of output is index zero. :param size: Size in bits that integer value should take up in returned data. :return: A 4-byte integer containing the int_value at the specified index. """ if index < 0 or size < 1: raise RuntimeError("Index provided to convert_int_to_bits must be a positive value. Index: {0}".format(index)) if size < 1: raise RuntimeError("Size provided to convert_int_to_bits must be non-zero. Size: {0}".format(size)) if int_value > 2**size - 1: raise RuntimeError("Value provided to convert_int_to_bits cannot be represented in desired number of bits. Value: {0}, number of bits: {1}".format(int_value, size)) num_bits = 32 if index + size > num_bits: raise RuntimeError("Invalid values provided to convert_int_to_bits. Size + index must be less than or equal to 32.\nSize: {0} bits, index: {1}".format(size, index)) return int_value << index
aade4c823495305c517a7f573c0f752dcdd4bc38
542,437
import torch def find_intersection(line_a, line_b): """Find the intersection point (if it exists) of two line segments, a and b. This uses an algorithm based upon LeMothe's "Tricks of the Windows Game Programming Gurus". We could have used some fancy cross product stuff here, but this is much more comprehensible. Each segment is parameterized by variables, s and t, such at when s=0 we are at line_a[0] and when s[1] we are at line_a[1] (and so on for t). We then solve for the values of s and t where the equations for line_a and line_b give the same result. If we cannot find values for s and t that satisfy this then there is no intersection. Arguments: line_a (tensor): 2x2 tensor describing line a. line_a[0] is the first point. line_b (tensor): The same thing is line_a. Returns: None or 2 element torch.tensor """ xslope_a = line_a[1,0] - line_a[0,0] xslope_b = line_b[1,0] - line_b[0,0] yslope_a = line_a[1,1] - line_a[0,1] yslope_b = line_b[1,1] - line_b[0,1] # Find the parameters where the lines intersect s_numerator = -yslope_a * (line_a[0,0] - line_b[0,0]) + xslope_a * (line_a[0,1] - line_b[0, 1]) t_numerator = -yslope_b * (line_a[0,0] - line_b[0, 0]) + xslope_b * (line_a[0,1] - line_b[0,1]) denominator = (-xslope_b * yslope_a + xslope_a * yslope_b) # If the lines are parallel then the slope will cancel out and the denominator will be 0. # For simplicity we will just say that they do not intersect in that case. if 0 == denominator: # Early return for efficiency return None # Check one range at a time to possibly skip a second division operation s = s_numerator / denominator if 0 <= s and s <= 1: t = t_numerator / denominator if 0 <= t and t <= 1: intersection = [line_a[0,0] + t * xslope_a, line_a[0, 1] + t * yslope_a] return torch.tensor(intersection) # No intersection return None
bb0fcc8cd3992abef05d13bdc18c1eae407fe5c1
277,294
def _check_1d_vector(vector): """Check 1D vector shape Check 1D vector shape. array with shape [n, 1] or [n, ] are accepted. Will return a 1 dimension vector. Parameters ---------- vector : array (n,) or (n, 1) rank one vector Returns ------- vector : array, (n,) """ v_shape = vector.shape if len(v_shape) == 1: return vector elif len(v_shape) == 2 and v_shape[1] == 1: return vector.reshape(v_shape[0],) else: raise ValueError("Vector is not 1-d array: shape %s" % str(v_shape))
ff8cc78d71f8fdfa714c351dc7662a470c9f510a
547,500
def get_array_type(arr): """Returns if the array is a scalar ('scalars'), vector ('vectors') or tensor ('tensors'). It looks at the number of components to decide. If it has a wierd number of components it returns the empty string. """ n = arr.number_of_components ret = {1: 'scalars', 3: 'vectors', 4: 'scalars', 9:'tensors'} return ret.get(n) or ''
886949122ea3d4b2ea395c9fbdec618e0af973a8
578,484
def find_key_symptom(tariffs, cause_reduction, cause, endorsements, rules=None): """Find the key endorsed symptom for a cause Args: tariffs (dict): processed tariff matrix cause_reduction (dict): mapping from cause46 to cause34 cause (int): cause number at the cause34 level endorsements (iterable): names of endorsed symptoms rules (dict): mapping of rule-based cause prediction to key symptom Returns: symptom (str): name of the key symptom """ rules = rules or {} rule_symp = rules.get(cause) if rule_symp: return rule_symp causes46s = [cause46 for cause46, cause34 in cause_reduction.items() if cause34 == cause] values = {} for cause46 in causes46s: for symptom, tariff in tariffs[cause46]: if symptom not in endorsements or tariff <= 0: continue if symptom in values and values[symptom] < tariff: continue else: values[symptom] = tariff if values: return sorted(values.items(), key=lambda x: x[1])[-1][0]
e8805fd29bf09cd3e0269ae4203f4fd7912f5c72
8,678
from typing import TextIO import csv def process_ethnics(data: TextIO): """Returns a generator of ethnics dictionary Args: data (TextIO): A file-like object of ethnics data """ next(data) datas = csv.reader(data) return [item for item in datas]
517099893895b0e53d633c6c573ea767701455cd
298,080
def get_cmd_args(argv): """ Take the `argv` arguments apart and split up in arguments and options. Options start with `-` and can be stacked. Options starting with `--` cannot be stacked. """ args = [] options = [] i = 1 while (i < len(argv)): if argv[i].strip()[0] == '-': # it's an option if argv[i].strip()[1] == '-': options.append(argv[i].strip()[2:]) else: for a in argv[i].strip()[1:]: options.append(a) else: # it's an argument args.append(argv[i]) return args, options
73b725a2215c845c97e0839f9f8b5f6768671798
448,728
def two_level_minus_r_over_t(g1d, gprime, Delta1): """ Minus the reflection coefficient of a two-level atom divided by the transmission coefficient. The transfer matrix for the two-level atoms will be written in terms of this ratio. """ #Assume real Omega return g1d/(gprime-2j*Delta1)
27632dd9e2b048384624afd3dee05f2e1414841d
356,726
def formatTime(time): """Convert a datetime object to a string in the format expected by the AppAssure API. """ return time.isoformat()[:-7]
47defe4d47b37bdf72442ff17e097bf05cb48f29
521,260
def adjust_timescales_from_daily(ds, tstep): """Adjust continuous growth rate parameters from daily scale to the scale of a single timestep in the dynamic model. Parameters ---------- ds : :class:`xarray.Dataset` Dataset containing coords or variables corresponding to rate parameters, e.g. $\lambda$ (asymptotic exponential growth rate), $\beta$ (daily transmission rate per infectious individual), $\gamma$ (inverse of duration of infectious period), and/or $\sigma$ (inverse of latency period). tstep : float Timestep of model, in days Returns ------- out : :class:`xarray.Dataset` `ds`, with rate parameters adjusted. """ out = ds.copy() for k, v in ds.variables.items(): if k.split("_")[0] in [ "lambda", "beta", "gamma", "sigma", ]: # staying in continuous rate out[k] = out[k] * tstep return out
802b472138be70934ff3e37af367d224970b5517
69,597