content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def whitespace_tokenize(text): """Basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() return text.split() if text else []
af0cee2d429e96af0214614581681927fcaefaf8
159,690
def attach_property(clusters, d, p): """ Attach `p` to `c.properties[d]` for each `c` in `clusters`. """ processed = [] for c in clusters: properties = dict(c.properties) properties[d] = set(properties[d]) | {p} processed.append(c.rebuild(properties=properties)) return processed
420b38282052c51a5c3eaf3b7a9215297a86e002
189,418
import random def flip(prob): """ Return H or T representing the outcome of coin flip """ if random.random() < prob: return "H" else: return "T"
04a8c1930d4deef9f0c1439b535233e3cc7542a8
562,277
def read_spectrum(spectrum, index): """Read a single spectrum Parameters ---------- spectrum : mzML spectrum The mzML spectrum to parse. Returns ------- out : list of tuples List of values associated with each peak in the spectrum. """ polarity = 'MS:1000130' in spectrum ms_level = spectrum['ms level'] rt, units = spectrum['MS:1000016'] if units != 'minute': rt /= 60 collision_energy = spectrum.get('MS:1000045', 0) precursor_intensity = spectrum.get('MS:1000042', 0) precursor_mz = spectrum.get('MS:1000744', 0) min_mz = spectrum.get('lowest m/z value', 0) max_mz = spectrum.get('highest m/z value', 0) if ms_level == 1: data = [(mz, i, rt, polarity) for (mz, i) in spectrum.peaks] else: data = [(mz, i, rt, polarity, precursor_mz, precursor_intensity, collision_energy) for (mz, i) in spectrum.peaks] return data, ms_level, polarity
b3d05c4af829377bcea0e077cd92ceffaca375e5
109,299
from typing import List def get_changed_methods(project_commits) -> List[str]: """ Used to populate the DAILY/WEEKLY reports, gets a list of method names that were changed. Note: Removes duplicates from list, sometimes same method is touched in more than one commit """ project_changed_methods = [] for commit in project_commits: for file in commit.modified_files: method_names = [m.name for m in file.changed_methods] project_changed_methods.extend(method_names) print(*project_changed_methods, sep="\n") return list(set(project_changed_methods))
1982f8c7f79e9320ffa38c5555769344f832c8df
341,187
def name_to_dict(name): """Convert OpenSSL x509Name to dict object. Creates a dict with all the components of the x509Name object and returns it. """ result = {} for (key,value) in name.get_components(): result[key] = value return result
36c87ee9134f3c3ba4c78e9f062498b3990ab9a3
514,653
from typing import Tuple def precisionRecallF1(correct: int, extracted: int, groundTruth: int) -> Tuple[float, float, float]: """ Calculates metrices. :param correct: Number of correct keywords. :type correct: int :param extracted: Number of extracted keywords. :type extracted: int :param groundTruth: Number of annotated keywords. :type groundTruth: int :return: precision, recall, F1 :rtype: Tuple[float, float, float] :raise ValueError: When invalid values are passed. """ if correct > extracted: raise ValueError("Number of correct keywords is greater than number of all extracted keywords.") if groundTruth == 0 and correct > 0: raise ValueError("There should be no keywords, but correct is > 0.") precision = correct / extracted if extracted > 0 else 0 recall = correct / groundTruth if groundTruth > 0 else 0 return precision, recall, 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0
e35b2bbeef22e3eeace40484544fc08dc528f13d
626,059
import math def vectorfield(w, t): """ Defines the differential equations for the coupled spring-mass system. Arguments: w : vector of the state variables: w = [x1,y1,x2,y2] t : time p : vector of the parameters: p = [m1,m2,k1,k2,L1,L2,b1,b2] """ x1, y1, x2, y2, x3 = w # Create f = (x1',y1',x2',y2'): f = [2*math.pow(y1,2), y1*(x2-x1), -2*math.pow(y1,2)+2*math.pow(y2,2), y2*(x3-x2), -2*math.pow(y2,2)] return f
fd1aff97a31cd9c140521c7acb75067255d172d8
136,428
import math def eq4623d2(L1, W1, W, NL): """Eq. 4.6.2.3-2: Equivalent strip width for two lanes. The equivalent width of lingitudinals strips per lane for both shear and moment with more than one lane loaded may be determined as E = min(84.0 + 1.44*math.sqrt(L1*W1), (12.0*W)/NL) Args: L1 (float): modified span length taken equal to the lesser of the actual span or 60.0, (ft) W1 (float): modified edge-to-edge width of bridge taken to be equal to the lesser of the actual width or 60.0 for multilane loading, or 30.0 for single-lane loading, (ft) W (float): physical edge-to-edge width of bridge, (ft) NL (float): number of design lanes as specified in Article 3.6.1.1.1 Returns: E (tuple(float, str)): equivalent width, (in.); formatted text output of equation with values substituted into equation and the final calculated value """ NL = math.floor(NL) E = min(84.0 + 1.44*math.sqrt(L1*W1), (12.0*W)/NL) text = (f'E = min(84.0 + 1.44*math.sqrt(L1*W1), (12.0*W)/NL)\n' + f'E = min(84.0 + 1.44 * math.sqrt({L1:.3f} * {W1:.3f}), ' + f'(12.0*{W:.3f})/{NL:.0f})\n' + f'E = {min(84.0 + 1.44 * math.sqrt(L1 * W1), (12.0*W)/NL):.3f}') return E, text
6dd39cac541ca7f4f5660da00f8ac242bd9e0f6c
567,616
import requests import json def get_asset_change_notifications(channel_id, auth): """Get asset change notifications for a specific channel""" url = "https://api.gettyimages.com/v3/asset-changes/change-sets" query_params = {"channel_id": channel_id} headers = { "Api-Key": auth["api_key"], "Authorization": f"Bearer {auth['access_token']}" } response = requests.put(url, params=query_params, headers=headers) asset_change_notifications = json.loads(response.content) return asset_change_notifications
8fc948ef1cedc9216069b0ac4611d04c42c33606
118,562
def _calcPeakRange(sr,n, loFreq=200., hiFreq=2000.): """ _calcPeakRange - utility function to calculate parameters for peaks inputs: sr - sample rate n - fft length loFreq - lowest peak frequency hiFreq - highest peak frequency """ fftBase = sr / float(n) pkRange = n/2 + 1 minPos = int(round(loFreq/fftBase)) maxPos = int(round(hiFreq/fftBase)) return pkRange, minPos, maxPos
280c685622ffb73d6225a15c9cb138ea525c8e00
562,964
def is_cached(o, name): """Whether a cached property is already computed. Parameters ---------- o : object The object the property belongs to. name : str Name of the property. Returns ------- bool True iff the property is already computed. Examples -------- >>> class MyClass(object): ... @cached_property ... def my_cached_property(self): ... print('Computing my_cached_property...') ... return 42 ... @cached_property ... def my_second_cached_property(self): ... print('Computing my_second_cached_property...') ... return 51 >>> my_object = MyClass() >>> my_object.my_cached_property Computing my_cached_property... 42 >>> is_cached(my_object, 'my_cached_property') True >>> is_cached(my_object, 'my_second_cached_property') False """ return name in o.__dict__
eb7b1356ded56dddb4cd917b27461e9108bd7b76
24,900
def calc_hubbard_u(iv, iv_p1, d_mm_avg): """ Calculate the estimated hubbard energy (eV) of a structure. :param iv: Float, the vth ionization energy :param iv_p1: Float, the (v+1)th ionization energy :param d_mm_avg: Float, the average metal-metal distance :return: Float, the estimated hubbard energy in eV """ # specify the conversion factor from e^2/Angstrom to eV conversion_factor = 14.39965 return iv_p1 - iv - conversion_factor / d_mm_avg
bcd0e189658aaa559ddf532a8289dc652ecffefc
572,712
def generate_united_trip_reason(event_a, event_b): """ Generate a trip reason """ reason = "{} and {} will give a conterts soon. Let's travel! :)".format( event_a["performer"], event_b["performer"] ) return reason
f6d21aa82978a1df2f997f76b72d226b092f64f4
347,868
def summarize_repos(events): """Generate list of all repos in the iterable of events.""" repos = set(event.repo for event in events) tmpl = '[{0}/{1}](https://github.com/{0}/{1})' return [tmpl.format(*repo) for repo in sorted(repos)]
524000f40ae6f637fcbb809e110a1b36dee9a103
46,860
def get_client_ip(request): """ Get the client IP address from the *request* object. Looks up the IP address that sent the *request* by checking the `HTTP_X_FORWARDED_FOR` header or `REMOTE_ADDR` in the request's metadata. Args: request: a Django request. Returns: An IP address as a string or an empty string if no IP could be found. """ client_ip = request.META.get("HTTP_X_FORWARDED_FOR", '') try: # X_FORWARDED_FOR returns client1, proxy1, proxy2,... client_ip = client_ip.split(',')[0] except IndexError: pass if not client_ip: client_ip = request.META.get('REMOTE_ADDR', '') return client_ip.strip()
61b99619ce630819be179320a8b0b88fa74d24ab
551,627
def get_scan_list(scandescriptor_list): """ determine which is the scan list for each radar Parameters ---------- scandescriptor : list of string the list of all scans for all radars Returns ------- scan_list : list of lists the list of scans corresponding to each radar """ descrfields = scandescriptor_list[0].split(':') if len(descrfields) == 1: # one radar return [scandescriptor_list] # one or more radars # check how many radars are there radar_list = set() for scandescriptor in scandescriptor_list: radar_list.add(scandescriptor.split(':')[0]) nradar = len(radar_list) # create the list of lists scan_list = [[] for i in range(nradar)] for scandescriptor in scandescriptor_list: descrfields = scandescriptor.split(':') ind_rad = int(descrfields[0][5:8])-1 scan_list[ind_rad].append(descrfields[1]) return scan_list
7bbd591900f0b574f5c766f8a4fcd7ea0b572ccf
196,265
def compute_min_distance_mendelian_ci(proband_CI, parent_CIs): """Commute the smallest distance between the given proband confidence interval, and the confidence intervals of parental genotypes. Args: proband_CI (Interval): ExpansionHunter genotype confidence interval. parent_CIs (list of Intervals): list of ExpansionHunter genotype confidence intervals. Return: int: the smallest distance (in base-pairs) between one of the Interval in the parent_CIs Interval list, and the given proband_CI. """ return min([abs(proband_CI.distance_to(parent_CI)) for parent_CI in parent_CIs])
9b8f71c612972410054b18b05c5e86bda3c96321
33,731
def get_stations_by_town(stations): """ Returns a dictionary consisting of a (key, value) pair of towns (key) and a list of the stations inside of them (value) """ towns = {} # dictionary of towns for station in stations: town = station.town if town in towns: towns[town].append(station) else: towns[town] = [station] return towns
cc18f6304f4a05894131173ce36b7dec2e95220a
115,897
from datetime import datetime def datetime_to_discord(time: datetime, format: str = "f") -> str: """Convert a datetime object to a Discord timestamp.""" return f"<t:{int(time.timestamp())}:{format}>"
a428141f8afa4a81c04a01d10eb5f46d1dd0e071
69,356
def length(region): """Returns the length in basepairs of region.""" return region.end - region.start
4a2a5b7dbe978cf5dcf0b52e0956dd02fc0443c0
365,602
def distance(point1, point2): """ Returns the Euclidean distance of two points in the Cartesian Plane. >>> distance([3,4],[0,0]) 5.0 >>> distance([3,6],[10,6]) 7.0 """ return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5
f57446d1a27ca42e347b54310a9bc4184d4519ce
685,260
def logstashIndex(date): """ Return the logstash index name for the given date Logstash index names are in the format: 'logstash-YYYY.MM.DD' """ return "logstash-" + date.strftime("%Y.%m.%d")
1d2b953d04bd500b6571bb78ca5cd9c0162221db
100,180
def is_criador(usuario, sugestao): """ Verificar se o Usuário é o criador da Sugestão de Turma. :param usuario: Usuário autenticado. :param sugestao: Sugestão de Turma. :return: True se o usuário for o criador da Sugestão de Turma. """ return usuario == sugestao.criador
226dc8db705ae20fa5dda389ee2e747ad61f2c8d
600,775
import shutil def get_executable(exec): """ Returns the full path to the given executable. If the executable does not exist, None is returned. """ return shutil.which(exec)
e0d6616a9083391d476613b33336eed793cb3155
515,022
import re def congr(morph1, morph2): """ Check morphological analyses for agreement in NUM and CASE. """ tags1 = re.findall('\[(CASE|NUM)=[A-Z]+]', morph1) tags2 = re.findall('\[(CASE|NUM)=[A-Z]+]', morph2) return tags1 == tags2
ed92dedc4841aa9109bf74497aa7ec72982b988e
429,792
def concat(str1, str2): """Concatenate two strings""" return "%s%s" % (str1, str2)
4697d453bb22bb65584f1c457c4d60f9e7e45bf6
392,048
def pos_new(x): """ vectorized version of finding max(x, 0) """ y = x.copy() y[y<0] = 0 return y
6464fbb39aea461f8f5bca4f0db159bebc73b9fe
354,834
from typing import List def _pad_list_int(x: List[int], length: int) -> List[int]: """Pad a List[int] using its last value until it has length `length`.""" x = x + x[-1:] * max(0, length-len(x)) x = x[:length] return x
fa96ddccd8f561ea60352f4684234ae1995a65bb
252,940
def calculate_lives_improved(param_df): """Create a new column for lives improved in a df Inputs: param_df - a df of parameters must contain columns: 'lives_touched' and 'efficacy' Returns: a df with a lives_improved column added """ param_df['lives_improved'] = (param_df['lives_touched']*param_df['efficacy']) return param_df
49a496fa23d02de748c56ec81eca6fd589f1f8c3
482,192
def cap_absolute_value(value, max_absolute_value=1): """ Returns `value` with absolute value capped at `max_absolute_value`. Particularly useful in passing values to trignometric functions where numerical errors may result in an argument > 1 being passed in. This code is modified from the pymatgen source code [1]_. Parameters ---------- value : :class:`float` Value to cap. max_absolute_value : :class:`float`, optional Absolute value to cap `value` at. Defaults to 1. Returns ------- :class:`float` `value` capped at `max_absolute_value` with sign preserved. References ---------- .. [1] https://pymatgen.org/pymatgen.util.num.html """ return max(min(value, max_absolute_value), -max_absolute_value)
925646585d9dd69479e86b7f74e75bd3f1c2d7da
245,233
def cli(ctx): """Get all canned values available in this Apollo instance Output: list of canned value info dictionaries """ return ctx.gi.cannedvalues.get_values()
bc0997297dccff9bca5a72b04fa8c9ceb08c7128
337,081
def complete_re(regex_str): """Add ^$ to `regex_str` to force match to entire string.""" return "^" + regex_str + "$"
e4cb757ff8fef5db2e29c5074d33bcfd12674826
267,819
def s3_folder(path): """ Check if file/folder is in AWS s3 """ return True if path and path.startswith("s3://") else False
3cc230cd400bf216f7a774fa0309298fb7c26471
154,695
def start_redshift_cluster(config, redshift, role_arn): """ Creates a Redshift cluster based on configs Args: config: a ConfigParser object redshift: a boto3 client object for the AWS Redshift service role_arn: String Returns: A dict with the AWS API response metadata of the create_cluster call """ print("Creating Redshift Cluster: ", config['CLUSTER']['IDENTIFIER']) return redshift.create_cluster( DBName=config['CLUSTER']['DB_NAME'], ClusterIdentifier=config['CLUSTER']['IDENTIFIER'], ClusterType=config['CLUSTER']['CLUSTER_TYPE'], NodeType=config['CLUSTER']['NODE_TYPE'], MasterUsername=config['CLUSTER']['DB_USER'], MasterUserPassword=config['CLUSTER']['DB_PASSWORD'], Port=int(config['CLUSTER']['DB_PORT']), NumberOfNodes=int(config['CLUSTER']['NUM_NODES']), IamRoles=[role_arn] )
9134defbac99671b8c120bf66baeee15119004ef
514,365
from typing import Tuple def hex_to_rgb(value: str) -> Tuple[int, int, int]: """Turn a hex color code to RGB ints. Parameters ---------- value : str, the Hex color code Returns ------- Tuple[int, int, int], the RGB values """ value = value.lstrip("#") lv = len(value) // 3 rgb: Tuple[int, int, int] = ( int(value[0:lv], 16), int(value[lv : 2 * lv], 16), int(value[2 * lv : 3 * lv], 16), ) return rgb
74c8630e4af01450d374263aa1169d41f933b871
378,017
def get_regions_overlap_with_tfs(regions, encode_pr): """Returns boolean matrix indicating whether regions overlaps with an ENCODE TF.""" joined = regions.pr.join(encode_pr).as_df() is_overlap = joined.groupby(["peak_id", "Name"]).size().unstack(-1, fill_value=0) > 0 is_overlap = is_overlap.reindex(regions.index, fill_value=False) return is_overlap
b4fb652b07629d78b0fa0476314df6a973bf9c0f
174,608
def PlayerAllegiance(game_state, player): """Map a player to an allegiance.""" return game_state.get('/publicPlayers/%s' % player, 'allegiance')
e82f1ef8a03374d6ac6a60ae816e2062299ccdff
512,943
def dict_to_string(dict): """ Custom conversion method for dict to string. This adds a bullet point and line break to the dict. :param dict: A dict used to store metadata. :type dict: dict :return: The dict as a string with added * for seperation :rtype: str """ str = "" for key, value in dict.items(): str += f" * {key}: {value}\n" return str
82e25583f4594e2b12353e481b1e2d90a33999ce
212,543
def jaccard_distance(text1, text2): """ Measure the jaccard distance of two different text. ARGS: text1,2: list of tokens RETURN: score(float): distance between two text """ intersection = set(text1).intersection(set(text2)) union = set(text1).union(set(text2)) return 1 - len(intersection) / len(union)
1a3095f086c648dfe8b334830a5d963e247cb92e
674,577
def classToDict(obj=None): """ Transform an object into a dict so it can be JSONified. Useful for turning custom classes into JSON-compatible dictionaries. """ if obj == None: return {} _obj = {} _obj.update(obj.__dict__) return _obj
d9f4aa85479fdd00ac25aeb6a8ca5ec08644473f
112,096
def convert_tok_str_to_sent(tok_str): """Returns the sentence given the subword tokens""" res = '' tokens = tok_str.split(' ') for token in tokens: if '#' in token: token = token.replace('#', '') res += token else: res = res + ' ' + token return res.strip()
683d6d91db695eaca117bf486fb1e38251df9d8a
344,418
def find_sub_list(sl, l): """ Returns the start and end positions of sublist sl in l """ sll = len(sl) for ind in (i for i, e in enumerate(l) if e == sl[0]): if l[ind:ind + sll] == sl: return ind, ind + sll
e23f7ca573d61a158d6cb0d571403327d1b69732
140,852
from typing import List def fix_z_values(coord_values: List[str]) -> List[float]: """Some geometries have a '0' value in the z position of the coordinates. This method detects and removes z postion coordinates. This assumes that in cases where the z value is included, it is included for all coordinates. """ if len(coord_values) % 3 == 0: # Check if all of the 3rd position values are '0' # Ignore any blank values third_position_is_zero = [ x == '0' for i, x in enumerate(coord_values) if i % 3 == 2 and x ] if all(third_position_is_zero): # Assuming that all 3rd position coordinates are z values # Remove them. return [float(c) for i, c in enumerate(coord_values) if i % 3 != 2] return [float(c) for c in coord_values if c]
47dbad81331f84696e087731b3ccbe0b27210ae4
216,260
def list_to_string(l): """ e.g. [1, 2, 4] --> '124' """ return ''.join(list(map(str, l)))
f4677a5f6649048526e016f4f9f96d1fb0b28082
581,307
import multiprocessing def cpu_count(multiplier: int = 0): """Get server's CPU core count. Used for number of web server workers. Returns: {int} -- CPU Cores """ # Standard Library return multiprocessing.cpu_count() * multiplier
a19807087b149ce18ea0ced2bb94fbea19ad3490
484,318
import re def extract_variables(content): """ 从内容中提取所有变量名, 变量格式为$variable,返回变量名list :param content: 要被提取变量的用例数据 :return: 所有要提取的变量 """ variable_regexp = r"\$([\w_]+)" if not isinstance(content, str): content = str(content) try: return re.findall(variable_regexp, content) except TypeError: return []
1ce8d3a363c03bb518096c2f3b8d4fe100f793f4
93,854
def convertNumbers(s,l,toks): """ Convert tokens to int or float """ n = toks[0] try: return int(n) except ValueError: return float(n)
109631034ff2eda37f34f6edc11a59d635a39c4c
188,877
import re def get_first_name(source): """Return the given name (first name).""" return re.sub("/", "", source["name"].get("firstname", "")).strip()
cb94291ca60e6abd732f562db1239476d34d558d
491,833
def apply_financial_params(dataframe, financing_terms, inflation_rate): """ apply_financial_params Add the year's financial parameters including depreciation schedule (array for years 0,1,2,3,4,5), Solar ITC fraction, Solar ITC min size kw, Solar ITC max size kw, years of loan term, loan rate, down payment percent, real discount percent, tax rate and economic lifetime Parameters ---------- dataframe : pandas.DataFrame agent attributes for the given year inflation_rate : float inflation rate percent financing_terms : pandas.DataFrame Attributes ---------- financing_terms.deprec_sch financing_terms.itc_fraction financing_terms.min_size_kw financing_terms.max_size_kw financing_terms.loan_term financing_terms.loan_rate financing_terms.down_payment financing_terms.real_discount financing_terms.tax_rate financing_terms.economic_lifetime Returns ------- pandas.DataFrame agent attributes with new attributes joined on """ dataframe = dataframe.reset_index() dataframe = dataframe.merge(financing_terms, how='left', on=['year', 'sector_abbr']) dataframe['inflation'] = inflation_rate dataframe = dataframe.set_index('agent_id') return dataframe
26cfbd72c0110924cff7add0f728dad5f036b6da
587,225
import re def count_characters(text, whites=False): """ Get character count of a text Args: whites: If True, whitespaces are not counted """ if whites: return len(text) else: return len(re.sub(r"\s", "", text))
e4db9e873e800282cf7f2398272a8b4546fe171e
9,395
import torch def split_tensor_along_last_dim(tensor, partitions, contiguous_split_chunks=False): """Split a tensor along its last dimension. Adapted from Megatron-LM. Arguments: tensor: input tensor. partitions: list of partition sizes to supply to torch.split contiguous_split_chunks: If True, make each chunk contiguous in memory. """ # Get the size and dimension. last_dim = tensor.dim() - 1 # Split. tensor_list = torch.split(tensor, partitions, dim=last_dim) # Note: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list
a72f94d23c9b6ea5c8ffd74725b6894e2387856c
188,113
from bs4 import BeautifulSoup def get_categories_objects(categories_output): """ Return categories id, title and description from a table from response HTML document as the dict object: { 'category_id': { 'title': 'some_title', 'description': 'some_description' } } """ categories = {} document = BeautifulSoup(categories_output, 'html.parser') table = document.find_all('table')[0] for row in table.find_all('tr'): columns = row.find_all('td') if columns: categories[columns[0].get_text().strip()] = { 'title': columns[1].get_text().strip(), 'description': columns[2].get_text().strip() } return categories
b7f9b09f80f10632ad1b18e9bb0702404defffba
450,834
def is_batch_norm(v): """Decide whether a variable belongs to `batch_norm`.""" keywords = ['batchnorm', 'batch_norm', 'bn'] return any([k in v.name.lower() for k in keywords])
d9c71cc8b55d13cf5ee3fae1666c5c35b03518f5
192,485
def is_json_metadata(text): """Is this a JSON metadata?""" first_curly_bracket = text.find("{") if first_curly_bracket < 0: return False first_equal_sign = text.find("=") if first_equal_sign < 0: return True return first_curly_bracket < first_equal_sign
5d67e5d6f79cd81e30f1726c9184300e59ad9ff2
662,524
def check_position_detection(bounds): """Check whether the specified range of 5 intervals has the right proportions to correspond to a slice through a position detection pattern. An ideal slice through a position detection pattern consists of 5 intervals colored B,W,B,W,B with lengths proportional to 1,1,3,1,1. Returns: (center_coord, pixels_per_module) if this could be a position detection pattern, otherwise (0, 0). """ # Expected relative positions of black/white boundaries # within the position detection pattern. expect_bound_pos = [-3.5, -2.5, -1.5, 1.5, 2.5, 3.5] if (len(bounds) != 6) or (bounds[4] >= bounds[5]): return (0, 0) pattern_width = float(bounds[5] - bounds[0]) middle_width = float(bounds[3] - bounds[2]) if (pattern_width < 7) or (middle_width < 3): return (0, 0) center = float(sum(bounds)) / 6.0 pitch = (pattern_width + middle_width) / 10.0 good = True for k in range(6): rel_bound_pos = (bounds[k] - center) / pitch if abs(rel_bound_pos - expect_bound_pos[k]) >= 0.5: good = False break if not good: return (0, 0) return (center, pitch)
c425527405e0452d2069168908a81d713b7c9f33
43,493
def get_touchdown(df): """ Calculates instance of touchdown (landing). """ df = df.copy() return df.iloc[df['Altitude'].idxmax():][df['Altitude'] <= 2].index[0]
314aa3f1dfd91fd0f7a287359e48c930b731aa1e
661,134
import struct def encode_big_endian_32(i): """Take an int and return big-endian bytes""" return struct.pack('>I', i)[-4:]
6e84903955e71ca3ed8c72652d28356c35fb1f8b
607,133
from datetime import datetime import time def generate_module_stream_version(timestamp=False): """Generates a version of a module stream. The version of a module stream can be an arbitrary unix timestamp or a timestamp taken from the commit of a git branch. :param timestamp: unix timestamp :type timestamp: int, optional :return: Formated module stream version :rtype: str """ if timestamp: dt = datetime.utcfromtimestamp(int(timestamp)) else: dt = datetime.utcfromtimestamp(int(time.time())) # we need to format the timestamp so its human readable and becomes a module stream version version = int(dt.strftime("%Y%m%d%H%M%S")) return version
4e28ecb43f979c53525c31e8a5e84cddc11362a9
465,935
def himmelblau(p): """ R^2 -> R^1 test function for optimization. The function has four local minima where himmelblau(xopt) == 0. """ x, y = p a = x*x + y - 11 b = x + y*y - 7 return a*a + b*b
d600c2c0ac77971e4e9b6667760b5df819e72061
450,560
import zlib def hash(polygon): """ Get a hash of a polygon Parameters ----------- polygon : shapely.geometry.Polygon Polygon to hash Returns ------------ crc : int Hash of polygon """ crc = zlib.adler32(polygon.wkb) return crc
10d58f376d752d955e21a8a4c35ce287b99fe4c0
604,249
def clean_int( i: int, ub: int, lb: int = 0, ) -> int: """Clean an integer according to a given upper and lower bound Parameters ---------- i : int The integer to be cleaned ub : int The inclusive upper bound lb : int, optional The exclusive lower bound, by default 0 Returns ------- int The cleaned integer """ # Initialisations i_temp = i # Check if the integer is above the upper bound if i_temp > ub: # Set it to the upper bound i_temp = ub # Check if the integer is below or equal to the lower bound elif i_temp <= lb: # Set it to one above the lower bound i_temp = lb + 1 return i_temp
691a42bd1913aeed3632599d09b9e69d87c124b7
358,145
import hashlib def faster_hash_args(*args: bytes) -> bytes: """ Function to hash an entire list of bytes into one hash. This is faster than `better_hash_args` since it only hashes once. Currently single-hashes SHA-512 """ full_bytes = b"".join(args) new_hash = hashlib.sha512() new_hash.update(full_bytes) return new_hash.digest()
e08b2aed96fca08b8fe7b6d355f928ebf95faf18
468,067
def vectors(vs): """ For a list of vectors of the form [x0, ..., xn, y0, ..., yn, z0, ..., zn] this will return a list of vectors with the shape [[x0, y0, z0], ..., [xn, yn, zn]]. """ number_of_nodes = len(vs) // 3 return vs.view().reshape((number_of_nodes, -1), order="F")
a84aab7cbbfb1cc79391baae6e3519f62ae9c6ae
555,381
def float_list_string(vals, nchar=7, ndec=3, nspaces=2, mesg='', left=False): """return a string to display the floats: vals : the list of float values nchar : [7] number of characters to display per float ndec : [3] number of decimal places to print to nspaces : [2] number of spaces between each float """ if left: format = '%-*.*f%*s' else: format = '%*.*f%*s' istr = mesg for val in vals: istr += format % (nchar, ndec, val, nspaces, '') return istr
87be23b879df35e672f32a89bfa0266d73812083
690,410
def filter_numbers(numbers: list[str], position: int, value: str) -> list[str]: """ Filters the list of numbers based on the bit value at a given position :param numbers: List of numbers as strings :param position: The bit position to check :param value: The value needed at a given pit position :return: List of filtered numbers as strings """ return [number for number in numbers if number[position] == value]
d0ab75ad817f79a1d1763564ee68ad26151879b6
452,782
import dateutil.relativedelta from datetime import datetime def format_time_difference(t1: datetime, t2: datetime): """ Displays the time difference from t1 to t2 in a human - readable form. Inspired by https://stackoverflow.com/a/11157649/243519 """ difference = dateutil.relativedelta.relativedelta(t2, t1) return ', '.join([str(getattr(difference, attr)) + ' ' + attr for attr in ['years', 'months', 'days', 'hours', 'minutes', 'seconds'] if getattr(difference, attr) or attr == 'seconds'])
656758faf3b7b8929cdfda9bc94615501ec0f367
155,545
import collections def sort_query_keys(part): """sort keys in place. We do this to every mqlread with a cursor because graphd relies on GQL query string order to maintain the state of the cursor. This calls itself recursively sorting keys in any ply of the query that is a dict Args: part: any ply of your dict Returns: an OrderedDict where all keys have been sorted in every ply of the query. """ if isinstance(part, list): new_d = [] for item in part: new_d.append(sort_query_keys(item)) return new_d elif isinstance(part, dict): new_d = collections.OrderedDict() for k in sorted(part.keys()): new_d[k] = sort_query_keys(part[k]) return new_d else: return part
7082002ea5e1c792761628ea243154e3c35caee6
337,827
def remove_background_noise(cov, min_cov): """ remove coverage background noise by set bins with cov < min_cov to 0 :param cov: coverage data.frame :param min_cov: minimum coverage to include a window in analysis """ cov_ft = cov.iloc[:, 4:].applymap(lambda x: 0 if x < min_cov else x) cov_ft.insert(0, 'chr', cov.chr) return cov_ft
f2508e1ab0ed3f50db5384593d8eed9c4587de58
422,902
def get_unbound_arg_names(arg_names, arg_binding_keys): """Determines which args have no arg binding keys. Args: arg_names: a sequence of the names of possibly bound args arg_binding_keys: a sequence of ArgBindingKey each of whose arg names is in arg_names Returns: a sequence of arg names that is a (possibly empty, possibly non-proper) subset of arg_names """ bound_arg_names = [abk._arg_name for abk in arg_binding_keys] return [arg_name for arg_name in arg_names if arg_name not in bound_arg_names]
92d5ff319c1f96bb246a5d65c93b05b3440df508
634,689
from pathlib import Path def example_repo_path(root_path: str) -> str: """Fixture that returns the path of the example feature repository""" return str(Path(root_path) / "example_feature_repo")
939714b1403ddbbca2b78458368b35bda647193a
114,356
def encode_mem_instr(instr): """ Encode an M-type instruction as a number. :param instr: list, M-type instruction operands, e.g. ['ldw', 'r1', 'r2', '10'] :return: int, encoded instruction. """ hex_instr = 0 # Decode. cmd = instr[0] dst = instr[1] src = instr[2] off = instr[3] # Encode opcode. if cmd == 'ldb': hex_instr += 0x10 << 25 elif cmd == 'ldw': hex_instr += 0x11 << 25 elif cmd == 'stb': hex_instr += 0x12 << 25 else: hex_instr += 0x13 << 25 # Encode destination register. dst_reg = int(''.join(c for c in dst if c.isdigit()), 16) hex_instr += dst_reg << 20 # Encode source register. src_reg = int(''.join(c for c in src if c.isdigit()), 16) hex_instr += src_reg << 15 # Encode offset. off_val = int(off, 16) hex_instr += off_val return hex_instr
4c61ed13af7bad234b7abebe7e326f6936e6ec61
304,587
def list_to_string(ilist): """ Takes a list of instructions and combines them into a single string. This is a helper for compact_instructions().""" str = "" for s in ilist: if len(str) > 0: str += "\n" str += s return str
bb77be424f4c0bf1b427ec24472632bfb1606b64
390,353
def get_network_id(networks, name): """ Get network id based on name provided """ for network in networks: if network["Name"] == name: return network["Id"]
12e53ade2d661587a674435d8c57160d740aa48c
697,286
def get_insert_cmd(step): """Returns the command to be used with an insert for the provided step. This takes a predefined step to determine which type of insert is being performed during the production of the knownet_mappings combined tables. Based off of this step, it returns a MySQL command to be used with an INSERT INTO statement. Args: step (str): the step indicating the step during the production of the combined knownet_mapping tables Returns: str: the command to be used with an INSERT INTO statement at this step """ if step == 'gene': cmd = ("SELECT DISTINCT xref.dbprimary_acc, xref.display_label, " "external_db.db_name, external_db.priority, " "external_db.db_display_name, gene.stable_id " "FROM xref INNER JOIN external_db " "ON xref.external_db_id = external_db.external_db_id " "INNER JOIN object_xref " "ON xref.xref_id = object_xref.xref_id " "INNER JOIN gene " "ON object_xref.ensembl_id = gene.gene_id " "WHERE object_xref.ensembl_object_type = 'Gene'") elif step == 'transcript': cmd = ("SELECT DISTINCT xref.dbprimary_acc, xref.display_label, " "external_db.db_name, external_db.priority, " "external_db.db_display_name, gene.stable_id " "FROM xref INNER JOIN external_db " "ON xref.external_db_id = external_db.external_db_id " "INNER JOIN object_xref " "ON xref.xref_id = object_xref.xref_id " "INNER JOIN transcript " "ON object_xref.ensembl_id = transcript.transcript_id " "INNER JOIN gene " "ON transcript.gene_id = gene.gene_id " "WHERE object_xref.ensembl_object_type = 'Transcript'") elif step == 'translation': cmd = ("SELECT DISTINCT xref.dbprimary_acc, xref.display_label, " "external_db.db_name, external_db.priority, " "external_db.db_display_name, gene.stable_id " "FROM xref INNER JOIN external_db " "ON xref.external_db_id = external_db.external_db_id " "INNER JOIN object_xref " "ON xref.xref_id = object_xref.xref_id " "INNER JOIN translation " "ON object_xref.ensembl_id = translation.translation_id " "INNER JOIN transcript " "ON translation.transcript_id = transcript.transcript_id " "INNER JOIN gene " "ON transcript.gene_id = gene.gene_id " "WHERE object_xref.ensembl_object_type = 'Translation'") elif step == 'transcript2stable': cmd = ("SELECT DISTINCT transcript.stable_id AS dbprimary_acc, " "transcript.stable_id AS display_label, " "'ensembl' AS db_name, " "1000 AS priority, " "'ensembl' AS db_display_name, " "gene.stable_id " "FROM transcript " "INNER JOIN gene " "ON transcript.gene_id = gene.gene_id") elif step == 'translation2stable': cmd = ("SELECT DISTINCT translation.stable_id AS dbprimary_acc, " "translation.stable_id AS display_label, " "'ensembl' AS db_name, " "1000 AS priority, " "'ensembl' AS db_display_name, " "gene.stable_id " "FROM translation " "INNER JOIN transcript " "ON translation.transcript_id = transcript.transcript_id " "INNER JOIN gene " "ON transcript.gene_id = gene.gene_id") else: cmd = '' return cmd
2ae3f7862f6eb2d8c6b1867ee1925930daf60e6c
258,983
def get_ls(num): """ Get a list of available line styles """ ls = [ ':', '--', '-.', '-', ':', '--', '-.', '-', ':', ':', '--', '-.', '-', ':', '--', '-.', '-', ':' ] return ls[:num]
55ed144d5e6cc61b4c9dba8d6447ffa955f47c15
472,605
import copy def uncontract_segmented(basis, use_copy=True): """ Removes the segmented contractions from a basis set This implicitly removes general contractions as well, but will leave sp, spd, ... orbitals alone The input basis set is not modified. The returned basis may have functions with coefficients of zero and may have duplicate shells. If use_copy is True, the input basis set is not modified. """ if use_copy: basis = copy.deepcopy(basis) for k, el in basis['elements'].items(): if 'electron_shells' not in el: continue newshells = [] for sh in el['electron_shells']: exponents = sh['exponents'] nam = len(sh['angular_momentum']) for i in range(len(exponents)): newsh = sh.copy() newsh['exponents'] = [exponents[i]] newsh['coefficients'] = [["1.00000000E+00"] * nam] # Remember to transpose the coefficients newsh['coefficients'] = list(map(list, zip(*newsh['coefficients']))) newshells.append(newsh) el['electron_shells'] = newshells return basis
1f2541defec50881d3a10414c1d324f6ce0c4006
485,350
def nb_coverage_distance(epitope, peptide, mmTolerance = 0): """Determines whether pepitide covers epitope and can handle epitopes and peptides of different lengths. To be a consistent distance matrix: covered = 0 not-covered = 1 If epitope is longer than peptide it is not covered. Otherwise coverage is determined based on a mmTolerance Parameters ---------- epitope : np.array peptide : np.array mmTolerance : int Number of mismatches tolerated If dist <= mmTolerance then it is covered Returns ------- covered : int Covered (0) or not-covered (1)""" LEpitope, LPeptide = len(epitope), len(peptide) if LEpitope > LPeptide: return 1 for starti in range(LPeptide-LEpitope+1): mm = 0 for k in range(LEpitope): if epitope[k] != peptide[starti + k]: mm = mm + 1 if mm > mmTolerance: """If this peptide is already over the tolerance then goto next one""" break if mm <= mmTolerance: """If this peptide is below tolerance then return covered (0)""" return 0 """If no peptides meet mmTolerance then return not covered""" return 1
46b88f83934465e8bb4b30f144b5acc2791c809a
23,455
def molar_volume_Le_Bas(MW, organic = True): """ Return the molar volume [m³/mol] from the molecular weight source : (HNS-MS) Parameters ---------- MW : Molecular weight [kg/mol] organic : Chemical type, True if organic, the default is True. """ if organic: return 4.9807 * (MW/1000)**0.6963 else: return 2.8047 * (MW/1000)**0.651
df5eefdc9905f65c3c2ae97be962b4a7204c875e
258,125
def dict_repr(obj): """ Creates an unambiguous and consistent representation of a dictionary. Args: obj: The dictionary to produce the representation of Returns: The string representation """ result = '{' for key in sorted(obj): elem = obj[key] if isinstance(elem, dict): result += repr(key) + ': ' + dict_repr(elem) + ', ' else: result += repr(key) + ': ' + repr(elem) + ', ' if result.endswith(', '): result = result[0:-2] result += '}' return result
a118019d87b4f5999ab716ecd6e108186dee7423
254,621
def from_s3_input(slug, file): """ Returns the S3 URL for an input fiule required by the DAG. """ return ( '{{ conf.get("core", "reach_s3_prefix") }}' '/%s/%s' ) % (slug, file)
51534adba061b424c84377cf143d03767ff82946
178,955
def xyz_from_rgb(rgb): """Return an XYZ tuple converted from an RGB tuple.""" return tuple([ 0.412453 * rgb[0] + 0.357580 * rgb[1] + 0.180423 * rgb[2], 0.212671 * rgb[0] + 0.715160 * rgb[1] + 0.072169 * rgb[2], 0.019334 * rgb[0] + 0.119193 * rgb[1] + 0.950227 * rgb[2]])
c744e745a319eaf8892a46ce2cf942070fc3090d
180,050
def replacef(s, r=''): """ Returns a function that replaces substrings with a replacement. Examples -------- >>> f = replacef('inside', 'outside') >>> f('Thankfully, I have spent the day inside.') 'Thankfully, I have spent the day outside.' """ return lambda e: e.replace(s, r)
a7d5852d457e1b01f8297aae68013dc9ba855af0
270,142
def extended_euclidean(a, b): """Extended Euclidean algorithm. Return s, t and r such that r = gcd(a, b) and s * a + t * b = r""" prev_r, r = a, b prev_s, s = 1, 0 prev_t, t = 0, 1 while r != 0: q = prev_r // r prev_r, r = r, prev_r - q * r prev_s, s = s, prev_s - q * s prev_t, t = t, prev_t - q * t return prev_s, prev_t, prev_r
9b4c082cf1e68d70af682bb9ff2ba5e49e3f90b1
638,480
def common_shape(imgs, axis): """ Find smallest height or width of all images. The value along `axis` will be `None`, the other will be the smallest of all values Args: imgs (list[np.array]): list of images axis (int): axis images will be concatenated along Returns: tuple(int): height, width """ if axis == 0: ws = [img.shape[1] for img in imgs] return (None, min(ws)) hs = [img.shape[0] for img in imgs] return (min(hs), None)
d0b357f20e955932e135aeedc318a969d7f9f883
674,609
def is_numpy(value): """ Determines whether the specified value is a NumPy value, i.e. an numpy.ndarray or a NumPy scalar, etc. Parameters: ----------- value: The value for which is to be determined if it is a NumPy value or not. Returns: -------- boolean: Returns True if the value is a NumPy value and False otherwise. """ return type(value).__module__.split(".")[0] == "numpy"
3bc294d739e9b108abf7cde3c072611890b7374a
693,264
def get_options(options, names): """Returns a dictionary with options for the given keys. Args: options: dict names: list of keys Returns: dict """ new = {} for name in names: if name in options: new[name] = options[name] return new
1b4df1967cf73b56d5ae2ed998daf7c49ce76a7a
669,315
def clamp(x, _min, _max): """Clamp a value between a minimum and a maximum""" return min(_max, max(_min, x))
fe9979a329cb130ebc9ecb7b812882770c962652
666,921
def _pick_counters(log_interpretation): """Pick counters from a dictionary possibly containing step and history interpretations.""" for log_type in 'step', 'history': counters = log_interpretation.get(log_type, {}).get('counters') if counters: return counters else: return {}
9afa112d566e6a427fba2776c115444a02253135
64,019
def to_numpy(tensor): """ Converts tensor to numpy ndarray. Will move tensor to cpu and detach it before converison. Numpy ndarray will share memory of the tensor. :param tensor: input pytorch tensor. :return: numpy ndarray with shared memory of the given tensor. """ return tensor.cpu().detach().numpy()
0e968897612ae7d1ba93b41c2c33c77c3b43e61e
363,946
def listify(val, length): """Ensure `val` is a list of size `length`. :param val: iterable or constant :param length: integer length :returns: listified `val`. :raises: RuntimeError if 1 < len(val) != length """ if not isinstance(val, str) and hasattr(val, '__iter__'): val = list(val) if len(val) == 1: return val * length if len(val) != length: raise RuntimeError("mismatched length") return val return [val] * length
f77bfc2a49255fdfa02ec95546c80a7fa982ad9b
394,492
def custom(n): """ Returns true if the field name is not in the person, address, contact, donation.""" nDown = n.lower() return "person." not in nDown and "address." not in nDown and "contact." not in nDown and "donation." not in nDown
72eb4d72069d4849d02fa4973c04f3337ce0e43c
214,264
def region(w, h, bw, bh): """Region Returns a new set of region points based on a current width and height and the bounding box Arguments: w (int): The current width h (int): The current height bw (int): The boundary width bh (int): The boundary height Returns: dict """ # Return dRet = {} # If the current width is larger than the bounds width if w > bw: dRet['x'] = int(round((w - bw) / 2.0)) dRet['y'] = 0 dRet['w'] = int(bw + round((w - bw) / 2.0)) dRet['h'] = bh # Else if the current height is larger than the bounds height else: dRet['x'] = 0 dRet['y'] = int(round((h - bh) / 2.0)) dRet['w'] = bw dRet['h'] = int(bh + round((h - bh) / 2.0)) # Return the region return dRet
b0a9df2dd50b18753dad0169579d7bac60724b7f
505,664
def __equivalent_lists(list1, list2): """Return True if list1 and list2 contain the same items.""" if len(list1) != len(list2): return False for item in list1: if item not in list2: return False for item in list2: if item not in list1: return False return True
f9ef5b643ed0ba4b6bddd3a6394af9a22ed118ee
674,396
def dereference_type(t): """ Removes everything after the last star character in a type string, except for 'void *' and 'char *`. """ if t.strip() in ["void *", "char *"]: return t.strip() try: return t[:t.rindex("*")].strip() except: return t.strip()
381ff993510c18046675f6dd53a04bc4db8a1e00
362,766
import pathlib def get_test_cases(which_subset): """Return a list of test case files.""" assert which_subset in ("valid", "invalid") cwd = pathlib.Path(__file__).parent.resolve() test_dir = cwd / ".." / "demes-spec" / "test-cases" / which_subset files = [str(file) for file in test_dir.glob("*.yaml")] assert len(files) > 1 return files
a6f90df366f222b37a90d818660624ec5b189a09
676,981
def _is_number( strg ): """ Helper function which determines whether the given string parameter contains a valid number. Returns True if the string is a number and False otherwise. """ try: isitanumber = float( strg ) return True except (ValueError, TypeError): return False
71ed08f9439791cd5173ec71351368bd8182961e
478,371
def get_end_time(op): """Return the end time string of the operation.""" return op.get('metadata', {}).get('endTime')
c670976423cefe7a12478839eeac0e984f660bb5
567,434
def read_data_from_file(file_path): """Reads data from file.""" with open(file_path, 'rb') as file_handle: return file_handle.read().decode('utf-8')
2a1dde982468cfbd0294b1c96e73345395d4bfc8
406,783
def resolve(regex_match): """Return regex match string or empty string if no matches""" if regex_match: return regex_match.group(0) return ''
656cc41e8e07bf50060dd6b254a6e1994a7576b6
164,555