content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
from typing import Optional def to_safe_str_or_none(value: Optional[str]) -> Optional[str]: """Convert input to cleaned string or None.""" if value is None: return None v = str(value.strip()).replace('\r', '').replace('\n', '') return v or None
2946183f58aa51deb4deeb450296af95ca41f72e
16,648
def queens_constraint(A, a, B, b): """ Constraint is satisfied if it's the same column (queens are assigned by columns), or if the queens are not in the same row or diagonal """ if A == B: return True return a != b and A + a != B + b and A - a != B - b
bafb55092fd2ffb1e0dadba060048a5af11e99b0
248,617
def create_yaml_dict(camera_assignment, mjpeg_info_dict): """ Creates camera assignment yaml dictionary for saving to file """ # Create dictionary for yaml file yaml_dict = {} for camera_id, value in camera_assignment.iteritems(): camera_name = 'camera_{0}'.format(value) computer = mjpeg_info_dict[camera_id]['computer'] yaml_dict[camera_name] = {'guid': camera_id, 'computer': computer} return yaml_dict
effcbb41d8bca9dcbded552d48759081a1e24c5b
561,176
def attrgetter(attr): """Access the object attribute by its name `attr`. >>> attrgetter("lower")("ASD")() 'asd' """ def attrgetter(obj): return getattr(obj, attr) return attrgetter
43bc5f77d3ca9e5964e0ccaa5bf43f3683ec8cc4
227,527
import re def replace_regex_link(pages, regex, basename, link): """Replace links in a bunch of pages based on a regex.""" regex = r'( |^|\n)%s([,. $])' % regex for subdir in pages: for page in pages[subdir]: content = pages[subdir][page] if (basename not in page or basename == "Quattor") and basename in content: content = re.sub(regex, "\g<1>[%s](%s)\g<2>" % (basename, link), content) pages[subdir][page] = content return pages
1010df754f16abef105f9ba96711cae063809b8d
380,529
from typing import Tuple def binary_to_int(bintuple: Tuple[int]) -> int: """Convert a binary tuple to corresponding integer, with most significant bit as the first element of tuple. :param bintuple: Binary tuple :type bintuple: Tuple[int] :return: Integer :rtype: int """ integer = 0 for index, bitset in enumerate(reversed(bintuple)): if bitset: integer |= 1 << index return integer
8a4f1296d9600aa4621efa8a2c32671149b36cec
549,158
import logging def info(line): """Log info""" return logging.info(line)
036d264c204d9d0b33a67ad4bc3c6bda65b4475b
304,061
def multiplicative_inverse(e, phi): """ Euclid's extended algorithm for finding the multiplicative inverse of two numbers """ d, next_d, temp_phi = 0, 1, phi while e > 0: quotient = temp_phi // e d, next_d = next_d, d - quotient * next_d temp_phi, e = e, temp_phi - quotient * e if temp_phi > 1: raise ValueError('e is not invertible by modulo phi.') if d < 0: d += phi return d
9934f6e2f86ff0ef4165728d59f11ba0d1cad928
12,897
def experiment_rank_by_average_normalized_score(experiment_pivot_df): """Creates experiment level ranking by taking the average of normalized per benchmark scores from 0 to 100, where 100 is the highest reach coverage.""" # Normalize coverage values. benchmark_maximum = experiment_pivot_df.max(axis='columns') normalized_score = experiment_pivot_df.div(benchmark_maximum, axis='index').mul(100) average_score = normalized_score.mean().sort_values(ascending=False) return average_score.rename('average normalized score')
e13fef63ba69ea5269fc5de7fd22396a65b1a3eb
106,559
def flip_dataframe(df, new_colname='index'): """Flips table such that first row becomes columns Args: df (DataFrame): Data frame to be flipped. new_colname (str): Name of new column. Defaults to 'index'. Returns: DataFrame: flipped data frame. """ colnames = [new_colname] + df.iloc[:, 0].tolist() df = df.T.reset_index() df.columns = colnames df = df.iloc[1:, :] return df
3a7c733644e2c67398a511c9dea7fa80845bbecf
702,073
from typing import Set def done(job: str, completed: Set[str]) -> str: """Convert set membership into `Yes` for `No`. :param job: The job to check if it was acted on :param completed: The jobs acted on, :returns: Yes or No """ return 'Yes' if job in completed else 'No'
6d056f2471dafb3cab274ffb916e073c2fb62cf6
48,801
def ssXXsuffix( i ): """Turns an integer into an ssXX ending between .ss01 and .ss20, e.g. 5 -> '.ss05'.""" if i < 1: i = 1 elif i > 20: i = 20 return ".ss%0.2d" % i
821b52bfff3179407c1482c8798cb6e83d3d1c61
562,473
def central_smoothing(proba, gamma=1e-4): """ Central smoothing as shown in Malinin et al., 2020 :param proba: Tensor containing the class probability outputs. :param gamma: Gamma value to use for smoothing :return: Output tensor after central smoothing """ return (1 - gamma) * proba + gamma * 1 / (proba.shape[1])
647ffe75270a2e0791e532d3d125db10b7f768aa
357,606
from typing import Union from pathlib import Path import sqlite3 def connect_db(location: Union[str, Path]) -> sqlite3.Connection: """Connect to the database. :param location: The path to the database file. :return: A :class:sqlite3.Connection object. :rtype: sqlite3.Connection Usage: >>> loc = 'mkname/data/names.db' >>> query = 'select name from names where id = 1;' >>> con = connect_db(loc) >>> result = con.execute(query) >>> tuple(result) (('Noah',),) >>> disconnect_db(con) """ # Check to make sure the file exists, since sqlite3 fails silently. path = Path(location) if not path.is_file(): msg = f'No database at "{path}".' raise ValueError(msg) # Make and return the database connection. con = sqlite3.Connection(path) return con
9ca512d6949171a2ae875df808ab2ac533d25862
450,169
def sortbykey(dictlist, sortkey): """ Sort a list of dictionaries by a common key and return the sorted list >>> sortbykey([{'A': 3}, {'A': 1}, {'A': 2}]) [{'A': 1}, {'A': 2}, {'A': 3}] :param dictlist: a list of dictionaries sharing a common key :param sortkey: a dictionary key used to sort a list of dictionaries :return: list of dictionaries sorted by a key """ return sorted(dictlist, key=lambda k: k[sortkey])
bcb0b5ce8097a85327473465621a44460335c493
618,200
def get_sample_mean(values: list) -> float: """ Calculates the sample mean (overline x) of the elements in a list :param values: list of values :return: sample mean """ sample_mean = sum(values) / len(values) return sample_mean
182befe514f406340f0b1f37e892ad1add1f0ed2
9,543
def qname(fn): """Returns the module-qualified name of the given class or function.""" return '{0}.{1}'.format(__name__, fn.__name__)
bc6eacbf4b27a8c7e61335afe913b24f555d9daa
430,289
def formation_factor(arch_a, phi, arch_m): """ Computes Archie Formation Factor (F) Parameters ---------- arch_a : float Archie Tortuosity Factor - a phi : [type] Porosity (decimal) arch_m : float Archie Cementation Exponent - m Returns ------- float Returns Archie Formation Factor """ return arch_a / phi ** arch_m
7edfe3638ec93235afe335a031c40b94bc0b7bc2
446,191
def comment_parser(reddit_comment_object): """Parses a comment and returns selected parameters""" post_timestamp = reddit_comment_object.created_utc post_id = reddit_comment_object.id score = reddit_comment_object.score ups = reddit_comment_object.ups downs = reddit_comment_object.downs post_body = reddit_comment_object.body thread_title = reddit_comment_object.link_title thread_url = reddit_comment_object.link_url subreddit = reddit_comment_object.subreddit.display_name return post_timestamp, post_id, score, ups, downs, post_body, thread_title, thread_url, subreddit
2331c0b52201272a39d0b3befeb8a962f59c05a6
36,322
def ParseHeader(header): """Parses the structured header to retrieve relevant info""" parts = header.split('_') odict = {} odict['CHROM'] = parts[1] odict['START'] = int(parts[2]) odict['STRAND'] = parts[4] return odict
0a66116b1851450d184c048e3e3438da69c667a6
565,049
def _check_lunch_hours(time, lunch_start, lunch_end): """Checks if the time is during lunch""" return (time < lunch_start) or (time >= lunch_end)
97f80818af3577623de0e66011a3ab3f8170f878
616,035
def Top1Criterion(x,y, model): """Returns True if model prediction is in top1""" return model(x).topk(1)[1].view(-1)==y
4c27295d26607f54d052628ee5d5389dba118eeb
587,348
def openFile(file_path): """Open file in read mode, return file contents""" md_file = open(file_path, 'r') file_contents = md_file.read() md_file.close() return file_contents
3f637474910ff6b3549f22cc7384b6d1a419b5b7
650,767
def names_in_dict_like_options(dopts): """ Return variable names in `dopts`. >>> names_in_dict_like_options([('a[k]', 'b'), ('c[k]', 'd')]) ['a', 'c'] """ return [k.split('[', 1)[0] for (k, v) in dopts]
676a79b859582dff150735b61711d5e48765d98e
476,548
def isbn_gendigit (numStr): """ (string)-->(string + 1-digit) Generates the 10th digit in a given 9-digit isbn string. Multiplies the values of individual digits within the given 9-digit string to determine the 10th digit. Prints the original string with its additional 10th digit. Prints X for the 10th digit if the value of isbn_mathremainder is 10. ('123456788')-->1234567881 ('123456789')-->123456789X """ if (len(numStr)) == 9: isbn_math = sum([(int(numStr[i])*(i+1)) for i in range(0,9)]) isbn_mathremainder = (isbn_math % 11) if isbn_mathremainder == 10: print ('ISBN 10-digit: ', str(numStr) + 'X') return (str(numStr) + 'X') else: print ('ISBN 10-digit: ', str(numStr) + str(isbn_mathremainder)) return (str(numStr) + str(isbn_mathremainder)) else: print ('unacceptable') return None
7d013515259eda942bf5fe9c948d25f6915e4936
204,890
def get_relationship_dict(source, destination, role): """ Returns relationship object with Assignee Type""" return { "source": source, "destination": destination, "context": destination.context, "attrs": { "AssigneeType": role, }, }
3cd4e2790f8027f63ea34906c460ecebe29515a6
189,887
def normalize_unit_id(unit_id): """Make unit_id directory name worthy convert: from: block-v1:course+type@vertical+block@digits to: vertical_block_digits """ return "_".join(unit_id.split("@")[1:]).replace("+", "_")
e9f85b8948c051ecf047228a6edc13641c2517b9
383,492
def count_points_in_polygon(x,points_sindex): """Count points in a polygon Inputs are: x -- row of dataframe points_sindex -- spatial index of dataframe with points in the region to consider Outputs are: Amount of points in polygon """ return len(list(points_sindex.intersection(x.bounds)))
ca08b6a089aeea667032bf1bd269bde3a2d69c08
349,964
def safe_unicode(e): """unicode(e) with various fallbacks. Used for exceptions, which may not be safe to call unicode() on. """ try: return str(e) except UnicodeError: pass try: return repr(e) except UnicodeError: pass return u'Unrecoverably corrupt evalue'
4da1d9a2a89d944b61be15923db6756594373371
549,937
import pathlib def findyaml(basedir): """Return a list of absolute paths to yaml files recursively discovered by walking the directory tree rooted at basedir""" return [str(path.resolve()) for path in pathlib.Path(basedir).rglob('*.yml')]
a47055c84a8ad193968dbb8b76d38354c083ffff
496,310
import time def format_date(date): """Return an svn-compatible date string for DATE (seconds since epoch). A Subversion date looks like '2002-09-29T14:44:59.000000Z'.""" return time.strftime("%Y-%m-%dT%H:%M:%S.000000Z", time.gmtime(date))
3c1ac81872730509c0d70701e1fe04d7a903574d
165,687
def get_subject_list(manifest_df, subject_list_file): """ If a list of subject is provided then use that, else collect all unique subject ids from the s3 spreadsheet and use that instead :param manifest_df: pandas dataframe created from the s3 csv :param subject_list_file: cli path to file containing list of subjects :return: subject_list """ subject_list = set() if subject_list_file: print('\tSubjects:\t%s' % subject_list_file) subject_list = [line.rstrip('\n') for line in open(subject_list_file)] # Otherwise get all subjects from the S3 spreadsheet else: print('\tSubjects:\tAll subjects') for manifest_name in manifest_df['manifest_name'].values: subject_id = manifest_name.split('.')[0] subject_list.add(subject_id) return list(subject_list)
51c0dcdb2954482ca60f80ce91154eb67c9fe2e7
576,671
def _ComputeLineCounts(old_lines, chunks): """Compute the length of the old and new sides of a diff. Args: old_lines: List of lines representing the original file. chunks: List of chunks as returned by patching.ParsePatchToChunks(). Returns: A tuple (old_len, new_len) representing len(old_lines) and len(new_lines), where new_lines is the list representing the result of applying the patch chunks to old_lines, however, without actually computing new_lines. """ old_len = len(old_lines) new_len = old_len if chunks: (_, old_b), (_, new_b), old_lines, _ = chunks[-1] new_len += new_b - old_b return old_len, new_len
ba99714016b69d87f260c8e7b8793468a2f7b04d
706,301
def station_by_river(stations): """creates a dictionary that maps a river to stations that lie on it. For a given river the output should be a list of stations""" rivers_station = {} for station in stations: river = station.river station = station.name if river in rivers_station: rivers_station[river].append(station) else: rivers_station[river] = [station] return rivers_station
65b80dab4f753c422e410bb2f77b22c8778bb586
409,378
def remove_extra_zeroes(number: str) -> str: """ Remove all zeroes from the end of the string :param number: :return: """ index = None for i in range(-1, len(number) * -1, -1): if number[i] == '0': index = i else: break if index is not None: return number[0:index] return number
ebaa4a44d0c920378184908e809cb9eab228825d
193,556
def fast_track(remaining, unit, seed, neighbours, elements): """ If any of the neighbours found are in the list of elements "elements", fast -track them to unit list, such that they are not searched as "seed" atoms. Useful for e.g. one-coordinate atoms (often two organic molecules approach most closely via H-atoms, which might lead to the merging of two building units). Args ---- remaining: set Set of atom indices still left to classify. unit: list List of atoms-images in building unit so far. seed: tuple Seed atom to search for neighbours (ix, image). neighbours: List of neighbouring atom indices to search over. elements: List of atomic symbols for elements that are being fast-tracked. """ ft = [] for a in range(len(neighbours)-1, -1, -1): if neighbours[a]["site"].specie.symbol in elements: ft.append(neighbours.pop(a)) # Get index and image for the fast-tracked atoms. branchTips = [(a["site_index"], a["image"] + seed[1]) for a in ft] ix = {a[0] for a in branchTips} # Update the "remaining" set and the buildng unit list. unit += branchTips remaining -= ix return remaining, unit, neighbours
0a308783fb19b9d94774a1734240f7a36952653d
255,339
def media_final_aprovado_reprovado(p1, p2, ep1, ep2): """ Recebe as notas das 2 provas e 2 exercícios de programação e retorna se o aluno foi ou não aprovado. As provas têm peso 7 e os exercícios têm peso 3. Cada parcial tem peso igual.""" nota1 = ((p1*0.7)+(ep1*0.3)) nota2 = ((p2*0.7)+(ep2*0.3)) notaotop = (nota1+nota2)/2 return notaotop>=7
d63977f02325a25954c32e8e61bc957017679254
137,331
def make_frames_list(frames): """Creates a string list in the appropriate format for ffmpeg to extract a specific set of frames. This function is used inside the extract_frames() function. Args: frames (list[int]): List of frame indices. Returns: str: String in the appropriate format for ffmpeg. """ aux = "".join([f"eq(n\\,{x})+" for x in frames])[:-1] return f"select='{aux}'"
58b2563bf2e15a7bd7c15bc52067a3e33e113b67
451,975
import re def is_fasta(filename): """Check if filename is FASTA based on extension Return: Boolean """ if re.search("\.fa*s[ta]*$", filename, flags=re.I): return True elif re.search("\.fa$", filename, flags=re.I): return True else: return False
424062726548707bd2e6fb7d8af8b3684a959542
104,703
def trunc(text, max_length: int = 2000): """Truncates output if it is too long.""" if len(text) <= max_length: return text else: return text[0 : max_length - 3] + "..."
628445714d6554b4c5bab5036db9e1cb20a86d55
530,902
from datetime import datetime def timestamp2string(timestamp): """ convert timestamp value to string :param timestamp timestamp ValueError :return string value """ return datetime.fromtimestamp( int(timestamp) ).strftime('%Y-%m-%d %H:%M')
4db9b782443b45c7c9848bd7da671c550dded515
244,438
def tag_tranfsers(df): """Tag txns with description indicating tranfser payment.""" df = df.copy() tfr_strings = [' ft', ' trf', 'xfer', 'transfer'] exclude = ['fee', 'interest'] mask = (df.transaction_description.str.contains('|'.join(tfr_strings)) & ~df.transaction_description.str.contains('|'.join(exclude))) df.loc[mask, 'tag'] = 'transfers' return df
4fdfd775ec423418370776c34fac809a513f91b5
703,774
def frequencies(text): """Return a dict of frequencies for each letter found in text""" freq = {} for c in text: freq.setdefault(c, 0) freq[c] += 1 return freq
3e4604b1355692cfa82bea0aa1a363d64c06e875
535,445
import base64 def _bytes_to_url64(bytes_data): """Convert bytes to custom-url-base64 encoded string""" return base64.urlsafe_b64encode(bytes_data).decode().replace('=', '~')
cfb16657655f9ef0c6a429ecf9cf36dbed55d08b
663,340
import re def count_n_repetitions(text, n=1): """ Counts how often characters are followed by themselves for n times. text: UTF-8 compliant input text n: How often character should be repeated, defaults to 1 """ findall_list = re.findall(rf'([\s\S])(?=\1{{{n}}})', text) return len(findall_list)
b9061f7ce6cd34d37440f3fbe42d1a5b858bdc6d
231,131
import struct def pack_integer(value): """ Pack an integer value into 32-bits, little endian form. """ return struct.pack('<I', value)
6bd83ce74ddc914cfcb3564ca84bcb4855c2a95d
606,865
def time_interpolation(array0, array1, date0, date, date1): """ Time interpolation at date 'date' of two arrays with dates 'date0' (before') and 'date1' (after). Returns the interpolated array. """ w = (date-date0)/(date1-date0) #Weights array = (1 - w ) * array0 + w * array1 return array
a813f54f66300f66bb1e17b6ab5d0e527a18690e
232,092
def binning2string(binspectral, binspatial): """ Args: binspectral (int): binspatial (int): Returns: str: Binning in binspectral, binspatial order, e.g. '2,1' """ return '{:d},{:d}'.format(binspectral, binspatial)
cac3635a9835609b6c3c15a7b52922e7bc91ead2
341,945
def _indent_str(indent): """ Return a MOF indent pad unicode string from the indent integer variable that defines number of spaces to indent. Used to format MOF output. """ return u''.ljust(indent, u' ')
adec45806160e6ac34c463df34b40583107f018a
193,205
def VersionKey(version): """Convert a version string to a comparable value. All old style values are considered older than all new style values. The actual values returned should only be used for comparison against other VersionKey results. Args: version: String with a build version "1.2.3" or "0.12.3.4" Returns: A value comparable against other version strings. """ key = [int(n) for n in version.split('.')] # 3 number versions are new style. # 4 number versions are old style. assert len(key) in (3, 4) if len(key) == 3: # 1.2.3 -> (1, 0, 1, 2, 3) return [1, 0] + key else: # 0.12.3.4 -> (0, 0, 12, 3, 4) return [0] + key
3d182131170ec66af257eaa6630727d00738f3b1
354,890
def GetKeyUri(key_ref): """Returns the URI used as the default for KMS keys. This should look something like '//cloudkms.googleapis.com/v1/...' Args: key_ref: A CryptoKeyVersion Resource. Returns: The string URI. """ return key_ref.SelfLink().split(':', 1)[1]
a64137cb7c1c08817f8ab20429d161a0ec846487
122,013
def sort_key(item): """Sort list by the first key.""" if len(item) != 1: item = item[0] return (list(item.keys())[0],)
f8f67f1d8a3485404260ca65bec1d1e6f9cea260
325,700
import json def create_user(user_info, app_client): """ Create a user, providing back the id and the token. @param user_dict: dictionary with email and password @param app_client: a Flask app client to create against @returns user_id, token for newly created user """ res = app_client.post( '/api/create_user', data=json.dumps(user_info), content_type='application/json' ) res = json.loads(res.data.decode("utf-8")) return res['id'], res['token']
26661ddb485ab8be600f53b63e2dc5ca744c342c
32,537
def proceed() -> bool: """ Ask to prooceed with extraction or not """ while True: response = input("::Proocced with extraction ([y]/n)?") if response in ["Y", "y", ""]: return True elif response in ["N", "n"]: return False else: continue
6d4c93ee7a216d9eb62f565657cf89a2e829dd30
21,559
def PNT2TidalOcto_Tv14(XA,beta0PNT=0): """ TaylorT2 0PN Octopolar Tidal Coefficient, v^14 Timing Term. XA = mass fraction of object beta0PNT = 0PN Octopole Tidal Flux coefficient """ return (4)/(3)*(520+beta0PNT)-(2080*XA)/(3)
52ebefc8ea4954565eb6500cb1f924ce9d8d51fc
408,925
def listbox_width(items, max_width=30): """Calculate the width for a listbox, based on a list of strings and a maximum width. listbox_width(["foo", "bar", "asdf"], 10) #=> 4 ("asdf") listbox_width(["foo", "asdf", "beep boop"], 5) #=> 5 (max_width) """ max_item_width = max(map(len, items)) return min(max_width, max_item_width)
c956cb683d717649b6ccfc498edc95be0251913f
117,699
def clipAlpha(aj,H,L): """ 修剪alpha_j Parameters: aj - alpha_j的值 H - alpha上限 L - alpha下限 Returns: aj - 修剪后的alpah_j的值 """ if aj > H: aj = H if L > aj: aj = L return aj
f641001d53bff8328bf54c617b9427235a5ad866
274,131
def gm_move1(state, b1, b2): """ If goal is ('pos',b1,b2), b1 is clear, we're holding nothing, and b2 is either the table or clear, then assert goals to get b1 and put it on b2. """ if b2 != 'hand' and state.clear[b1] and state.holding['hand'] == False: if b2 == 'table' or state.clear[b2]: return [('pos', b1, 'hand'), ('pos', b1, b2)]
03a880350a71d6a28d2fbe1cc4eb38259ee52182
446,062
def split_elem_def(path): """Get the element name and attribute selectors from an XPath path.""" path_parts = path.rpartition('/') elem_spec_parts = path_parts[2].rsplit('[') # chop off the other ']' before we return return (elem_spec_parts[0], [part[:-1] for part in elem_spec_parts[1:]])
63394c8a1ecf5d4e2fbeb1f7f3b58f4c386b29e2
652,600
import json def config_file(request, tmp_path, repo_path): """ Create a temporary config file to use with the -c option. The file will configure repo_path as the template repository and add any keyword arguments for the prefilled_values marker as prefilled values. If no such marker is present, the Name "Clark Kent" and Email "[email protected]" is added. """ prefilled = dict(Name="Clark Kent", Email="[email protected]") marker = request.node.get_closest_marker("prefilled_values") if marker is not None: prefilled = dict(**marker.kwargs) config_file = tmp_path / "config.json" config_file.write_text(json.dumps(dict(TPLDIR=str(repo_path), prefilled=prefilled))) return config_file
66e5f8284760dd8d8278741823f8e3bbdf3e7e60
256,431
def entity_tostring(entity): """Converts one GNL (Google Natural Language) entity to a readable string.""" metadata = ", ".join(['"%s": "%s"' % (key, value) for key, value in entity.metadata.items()]) mentions = ", ".join(['"%s"' % mention for mention in entity.mentions]) return ('{name: "%s",' ' type: "%s",' ' metadata: {%s},' ' salience: %s,' ' mentions: [%s]}') % ( entity.name, entity.type, metadata, entity.salience, mentions)
dd3e30247e36186e6eccfe1e32f8f31bf3577660
15,108
def gen_features(columns, classes=None, prefix='', suffix=''): """Generates a feature definition list which can be passed into DataFrameMapper Params: columns a list of column names to generate features for. classes a list of classes for each feature, a list of dictionaries with transformer class and init parameters, or None. If list of classes is provided, then each of them is instantiated with default arguments. Example: classes = [StandardScaler, LabelBinarizer] If list of dictionaries is provided, then each of them should have a 'class' key with transformer class. All other keys are passed into 'class' value constructor. Example: classes = [ {'class': StandardScaler, 'with_mean': False}, {'class': LabelBinarizer} }] If None value selected, then each feature left as is. prefix add prefix to transformed column names suffix add suffix to transformed column names. """ if classes is None: return [(column, None) for column in columns] feature_defs = [] for column in columns: feature_transformers = [] arguments = {} if prefix and prefix != "": arguments['prefix'] = prefix if suffix and suffix != "": arguments['suffix'] = suffix classes = [cls for cls in classes if cls is not None] if not classes: feature_defs.append((column, None, arguments)) else: for definition in classes: if isinstance(definition, dict): params = definition.copy() klass = params.pop('class') feature_transformers.append(klass(**params)) else: feature_transformers.append(definition()) if not feature_transformers: feature_transformers = None feature_defs.append((column, feature_transformers, arguments)) return feature_defs
03e2c91484c6886d1696b5020e17493cb087430f
436,300
def join_as_compacted_paragraphs(paragraphs): """ :param paragraphs: List containing individual paragraphs; potentially with extraneous whitespace within :return: String with \n separated paragraphs and no extra whitespace """ paragraphs[:] = [' '.join(p.split()) for p in paragraphs] # Remove extra whitespace & newlines return '\n'.join(paragraphs)
227337c5512c20ef9f84de6bccc61d9b1f26403a
199,623
import math def _get_xy_index(files,dims,layout): """ Get the x and y indices from a list of filename dictionaries The FilePattern iterate function returns a list of dictionaries containing a filename and variable values parsed from a filename. This function uses that list of dictionaries and assigns them to a position in a grid. If dims contains two characters, then the images are assigned an x-position based on the first character and a y-position based on the second character. If dims contains a single character, then this function assigns positions to images so that they would fit into the smallest square possible. The grid positions are stored in the file dictionaries based on the position of the dims position in layout. The layout variable indicates all variables at every grid layer, starting from the smallest and ending with the largest grid. Using the notation from DeepZooms folder structure, the highest resolution values are stored with the largest index. So, if dims is the first element in the layout list and layout has 3 items in the list, then the grid positions will be stored in the file dictionary as '2_gridX' and '2_gridY'. Inputs: files - a list of dictionaries containing file information dims - the dimensions by which the grid will be organized layout - a list indicating the grid layout Outputs: grid_dims - Dimensions of the grid """ grid_dims = [] if len(dims)==2: # get row and column vals cols = [f[dims[0]] for f in files] rows = [f[dims[1]] for f in files] # Get the grid dims col_min = min(cols) row_min = min(rows) col_max = max(cols) row_max = max(rows) grid_dims.append(col_max - col_min + 1) grid_dims.append(row_max - row_min + 1) # convert to 0 based grid indexing, store in dictionary index = len(layout)-1 for l in layout[:-1]: if dims[0] in l or dims[1] in l: break index -= 1 for f in files: f[str(index) + '_gridX'] = f[dims[0]]-col_min f[str(index) + '_gridY'] = f[dims[1]]-row_min else: # determine number of rows and columns pos = [f[dims[0]] for f in files] pos = list(set(pos)) pos_min = min(pos) col_max = int(math.ceil(math.sqrt(len(pos)))) row_max = int(round(math.sqrt(len(pos)))) grid_dims.append(col_max) grid_dims.append(row_max) # Store grid positions in the dictionary index = len(layout)-1 for l in layout[:-1]: if l==dims: break index -= 1 for f in files: f[str(index) + '_gridX'] = int((f[dims[0]]-pos_min) % col_max) f[str(index) + '_gridY'] = int((f[dims[0]]-pos_min)//col_max) return grid_dims
428daa6a5c958c0a8b534defc9bd8785486d563c
323,732
def leftmostNonzeroEntries(M): """Returns the leftmost nonzero entries of M.""" return [ abs(M[l][M.nonzero_positions_in_row(l)[0]]) for l in range(0,M.dimensions()[0]) if M.nonzero_positions_in_row(l) != [] ]
9e42297dc3000a41dcdceebff10c4fc53e1709ac
7,068
def count_seq_chars(seq, alphabet): """Count characters from given alphabet that are present in sequence. Args: seq (str): sequence alphabet (str/list): list of allowed characters Returns: A list of characters' counting occurrences. Examples: >>> alphabet = 'ACDEFGHIKLMNPQRSTVWY' >>> seq = 'MKSTGWHFSG' >>> print(count_seq_chars(seq, alphabet)) [0, 0, 0, 0, 1, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 2, 1, 0, 1, 0] """ l = [0 for c in alphabet] for i, c in enumerate(alphabet): l[i] += seq.count(c) return l
dffd7b6e39102583559e75c284bba242a1a6844a
207,362
def replace_item(obj, key, replace_value): """ Recursively replaces any matching key in a dictionary with a specified replacement value. Args: obj (dict): Dictionary where item is being replaced. key (obj): Key to replace in dictionary. replace_value (obj): What to replace the key with. Returns: dict: Dictionary with everything of that key replaced with the specified value. """ for k, v in obj.items(): if isinstance(v, dict): obj[k] = replace_item(v, key, replace_value) if key in obj: obj[key] = replace_value return obj
1feccc57081ef2d3b756ee33f4fa50655cc2b1d1
364,308
def make_args(args_dict, required, options): """ Make command line argument list, for testing argument parsers Args: args_dict (:obj:`dict`): argument names and their values required (:obj:`list`): required command line arguments options (:obj:`list`): optional command line arguments Returns: :obj:`list`: list of strings in command line arguements, as would appear in `sys.argv[1:]` """ args = [] for opt in options: if opt in args_dict: args.append('--' + opt) args.append(str(args_dict[opt])) for arg in required: args.extend([str(args_dict[arg])]) return args
3673e5ea3a4eeeda982bebcd3dade382b2124d63
375,311
def valid_time(time): """Validate Time object. Return 'True' if its a valid time or 'False' if otherwise. time: Time object. """ if time.hour < 0 or time.minute < 0 or time.second < 0: return False if time.minute >= 60 or time.second >= 60: return False return True
9baf8788828d113735407c5a16c5b4085aded3e3
402,027
def pad_left_side(sequences, padding_value): """ Modification of torch.nn.utils.rnn.pad_sequence so that we pad left side and not right side :param sequences : list of tensors :param padding_value : tokenizer.pad_token_id :return tensor of shape (len(sequences), max_length of sequence in sequences) the tensor are padded on the left side using pad_token_id from GPT2 tokenizer """ max_len = max([s.size(0) for s in sequences]) out_dims = (len(sequences), max_len) out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value) for i, tensor in enumerate(sequences): length = tensor.size(0) out_tensor[i, max_len - length:] = tensor return out_tensor
220c43e0d35e05677838f773b8d02d6d28f062b2
570,287
def vector_sub(vector1, vector2): """ Subtracts one vector to another :param vector1: list(float, float, float) :param vector2: list(float, float, float) :return: list(float, float, float) """ return [vector1[0] - vector2[0], vector1[1] - vector2[1], vector1[2] - vector2[2]]
19e7eb245e3260dcaf91e8962366d06460640fb8
376,672
def _get_scale(image, md_path_or_scale): """Get a valid scale from an image and a metadata path or scale. Parameters ---------- image : np.ndarray The input image. md_path_or_scale : float or image filename The path to the file containing the metadata, or the scale. Returns ------- scale : float """ scale = None try: scale = float(md_path_or_scale) except ValueError: pass if md_path_or_scale is not None and scale is None: md_path = md_path_or_scale.split(sep='/') meta = image.meta for key in md_path: meta = meta[key] scale = float(meta) else: if scale is None: scale = 1 # measurements will be in pixel units return scale
6552040bac03649d2493b3bb1dc4b7980c3f1a5e
74,801
def get_gb_person_id(acc_id, vehicle_index, person_index): """ Returns global person id for GB, year and index of accident. The id is constructed as <Acc_id><Person_index> where Vehicle_index is two digits max. """ person_id = acc_id * 1000 person_id += vehicle_index * 100 person_id += person_index return person_id
dcd05291fed94088213afd0b1ba67b1a85ff4cc3
595,411
def _create_appconfig_props(config_props, prop_list): """A helper function for mk-appconfig and ch-appconfig that creates the config properties dictionary Arguments: config_props {Dictionary} -- A dictionary with key-value pairs that gives the properties in an appconfig prop_list {List} -- A list with items of the form <Key>=<Value> that describe a given property in an appconfig Returns: config_props {Dictionary} -- A dictionary with key-value pairs that gives the properties in an appconfig """ # Iterate through list of properties, check if correct format, convert each to name/value pairs add to config_props dict # Ex. good proplist is ['name1=value1', 'name2=value2'] # Ex. bad proplist is ['name1=valu=e1', 'name2=value2'] for prop in prop_list: name_value_pair = prop.split("=") if (len(name_value_pair) != 2): raise ValueError("The format of the following property specification is not valid: {}. The correct syntax is: <name>=<value>".format(prop)) config_props[name_value_pair[0]] = name_value_pair[1] return config_props
33989a62ebe22130394561c459cf52d37b1b94a1
207,314
def to_locale(language): """ Turn a language name (en-us) into a locale name (en_US). Extracted `from Django <https://github.com/django/django/blob/e74b3d724e5ddfef96d1d66bd1c58e7aae26fc85/django/utils/translation/__init__.py#L274-L287>`_. """ language, _, country = language.lower().partition("-") if not country: return language # A language with > 2 characters after the dash only has its first # character after the dash capitalized; e.g. sr-latn becomes sr_Latn. # A language with 2 characters after the dash has both characters # capitalized; e.g. en-us becomes en_US. country, _, tail = country.partition("-") country = country.title() if len(country) > 2 else country.upper() if tail: country += "-" + tail return language + "_" + country
a36760999e764be45a6a81a3f7e25696f9bb931b
330,923
def edge_is_present(G, source, target): """ Returns true of there is an edge from source to target Parameters source, target: igraph vertex indices G: directed igraph object """ return G.get_eid(v1=source, v2=target, directed=True, error=False) != -1
87131f0ddd936e2da66cd709d58d834279c56024
497,685
def format_date_key(date_field): """ format a datetime into year-month-date format """ return date_field.strftime('%Y-%m-%d')
5db1a5624cf650427e64f3322e03dc6e9bd12e4b
38,360
import math def norm(x): """ Returns the L^2 norm of a vector x Parameters ---------- x : np.ndarray : a numpy array of floats Returns ------- float : the L^2 norm of x Examples -------- >>> A = np.array([1.0, 2.0, 3.0, 3.0, 1.0, 1.0]) >>> norm(A) 5.0 """ sum = 0 for component in x: sum = sum + component**2 return math.sqrt(sum)
6462c2ceaec3aeb1b4f9195c93549d0e1ec2627d
116,599
def expand_shape(shape): """ Expands a flat shape to an expanded shape >>> expand_shape([2, 3]) [2, [3, 3]] >>> expand_shape([2, 3, 4]) [2, [3, 3], [[4, 4, 4], [4, 4, 4]]] """ expanded = [shape[0]] for i in range(1, len(shape)): next = [shape[i]] * shape[i-1] for j in range(1, i): next = [next] * shape[j] expanded.append(next) return expanded
ecfb8c0742c4b1cd9e044da6766d7ee3e63ea158
166,406
def line_edit_val(le_wg, default_val): """Get QLineEdit field value. Parameters ---------- le_wg : QtGui.QLineEdit QtGui.QLineEdit widget default_val : float Default value of the widget Returns ------- le_val : float the value in the widget """ le_val = le_wg.text() try: return float(le_val) except ValueError: return default_val
3b41bea6255a34d0774497a51d99bb84f9a99104
98,653
def bytes2str(string): """Converts b'...' string into '...' string. On PY2 they are equivalent. On PY3 its utf8 decoded.""" return string.decode("utf8")
4d8aa0720ad0ae013ed6078e84c9eb74179daa37
116,199
def dest(region): """Extract the destination part of a region triple""" return (region[1], region[2])
f1e0842dda2173d0a1b640b767b610a693b5ca3d
606,653
from typing import Dict import torch from typing import Tuple def output_transform_select_affinity_abs( output: Dict[str, torch.Tensor] ) -> Tuple[torch.Tensor, torch.Tensor]: """ Select predicted affinities (in absolute value) and experimental (target) affinities from output dictionary. Parameters ---------- output: Dict[str, ignite.metrics.Metric] Engine output Returns ------- Tuple[torch.Tensor, torch.Tensor] Predicted binding affinity (absolute value) and experimental binding affinity Notes ----- This function is used as :code:`output_transform` in :class:`ignite.metrics.metric.Metric` and allow to select affinity predictions from the dictionary that the evaluator returns. Affinities can have negative values when they are associated to bad poses. The sign is used by :class:`AffinityLoss`, but in order to compute standard metrics the absolute value is needed, which is returned here. """ # Return pose class probabilities and true labels return output["affinities_pred"], torch.abs(output["affinities"])
d6c35638712cef19f31d763a7fdf13072d98968a
420,582
def find_column(text, token): """ Compute column number Parameters ---------- text : str The parsed text token : sly.token The token whose positition is to find Returns ------- int The corresponding column number """ last_cr = text.rfind('\n', 0, token.index) if last_cr < 0: last_cr = 0 column = (token.index - last_cr) + 1 return column
41742b46e8e9c5d30a17444f2a38d6666068a95b
585,888
def update_weights(Y_prediction,Y_truth,beta,current_D): """ This function updates weights for adaboost function using current weights, current_D, and beta values. Inputs: Y_prediction --> Array contains predicted output labels Y_truth --> Array contains ground truth labels beta --> scalar, current beta value current_D --> Array contains current weights, shape = (m,) where m is total number of samples Output: D_next --> weights for the next iteration of adaboots, shape (m,) """ # Select only correct predictions and scale them with beta value D = current_D D[Y_truth == Y_prediction] *= beta D_next = D/sum(D) #normalize #print("always should be equal to one: ", sum(D_next/sum(D_next))) return D_next
b8ac466f6016817e64130179a2300cbe028b17ac
280,745
def dict_to_str(some_dict, prefix=""): """ Converts a dict to a deterministic string for dicts that have the same keys and values (the string will be the same regardless of the original ordering of the keys). """ keys_sorted = sorted(some_dict.keys()) parts = [prefix] for k in keys_sorted: parts.append("|") parts.append(str(k)) parts.append("=") parts.append(str(some_dict[k])) return "".join(parts)
e8e0c6b17b85126a4f6252280634615226dcf253
362,015
def mem_empty_payload(mem_default_payload): """Provide a membership payload with no action.""" empty_payload = mem_default_payload empty_payload["action"] = "" return empty_payload
4a8f5ec3505a3b40b424edbfc6a9e83618292224
574,705
def array_unique(l): """ Removes all duplicates from `l`. """ return list(set(l))
ad7e60c1fd498314aa67f2749966f6723d5427f1
135,031
def get_headers(token): """Return HTTP Token Auth header.""" return {"x-rh-identity": token}
939a5179a465cef27ee3b916a210c26a0ec546d4
243,240
def sort_by_size(s): """Sorts the input elements by decreasing length. Returns a list with the sorted elements.""" return sorted(s, key=len, reverse=True)
6d10592248d1f95066b1fea1a457eef913cc5be7
153,622
def binomial( n, k ): """Calculate the binomial coefficient n over k.""" if k < 0: return 0 if n < 0: return binomial( -n + k - 1, k ) * (-1)**k b = 1 for i in range(k): b *= (n-i) b /= (1+i) return b
49f42e556d3c454a346b48a64969adfd75876de5
274,354
def set_name_x(name_x, x, constant=False): """Set the independent variable names in regression; return generic name if user provides no explicit name." Parameters ---------- name_x : list of string User provided exogenous variable names. x : array User provided exogenous variables. constant : boolean If False (default), constant name not included in name_x list yet Append 'CONSTANT' at the front of the names Returns ------- name_x : list of strings """ if not name_x: name_x = ['var_' + str(i + 1) for i in range(x.shape[1])] else: name_x = name_x[:] if not constant: name_x.insert(0, 'CONSTANT') return name_x
0784a22bfcb5a72e39e0293d216a55a388cb7707
143,655
from typing import Any def required(value: Any): """ Value is required. """ return bool(value)
58be7424edacd0ea3fb10ad4b3c0bd2dcf6d9200
341,853
def check_previewUrls(search_results): """ Checks if a result provides a previewUrl for thumbnails. Otherwise a placeholder will be set. This function is needed to avoid too much logic in template rendering. Args: search_results: Contains all search results Returns: search_results """ for result_key, result_val in search_results.items(): results = result_val.get(result_key, {}).get("srv", []) for result in results: if len(result.get("previewUrl", "")) == 0: result["previewUrl"] = None return search_results
4f78f3d14167970a7988aea310fe2eaeae498ec3
670,103
def not_(child): """Select devices that does not match the given selectors. >>> not_(and_(tag('sports'), tag('business'))) {'not': {'and': [{'tag': 'sports'}, {'tag': 'business'}]}} """ return {'not': child}
9c8eae0125377b7e9942158e132f7f3786342d5e
372,037
def map_list(lst): """maps a list: key = element, value = number of element's occurences""" mp = {} for i in lst: e = mp.get(i) if e == None: mp[i] = 1 else: mp[i] = e + 1 return mp
a6898b9066acaf1de22e30f2dd660eb0ddade98f
310,016
def counter_count_odd(counter): """count number of odd numbered items Could be used for example to cound odd numbered prime factors which prevent the number to be a perfect square.""" oddn = 0 for item in counter: if counter[item] % 2: oddn += 1 return oddn
35a68dec75bacd00fad06f496ee3c58dc7a59c7b
170,960
def _getAction(sBase): """Get action from base url. Basically return the URL with no GET params""" n = sBase.find('?') if n != -1: return sBase[:n] else: return sBase
d9410c58118919e4f1a0970c8620e664adba9662
365,354
def olivine(piezometer=None): """ Data base for calcite piezometers. It returns the material parameter, the exponent parameter and a warn with the "average" grain size measure to be use. Parameter --------- piezometer : string or None the piezometric relation References ---------- | Jung and Karato (2001) https://doi.org/10.1016/S0191-8141(01)00005-0 | Van der Wal et al. (1993) https://doi.org/10.1029/93GL01382 Assumptions ----------- - The piezometer of Van der Wal (1993) requires entering the linear mean apparent grain size in microns calculated from equivalent circular diameters (ECD) with no stereological correction. The function will convert automatically this value to linear intercept (LI) grain size using the De Hoff and Rhines (1968) correction. It is assumed that LI was multiplied by 1.5 (correction factor), the final relation is: LI = (1.5 / sqrt(4/pi)) * ECD - The piezometer of Jung and Karato (2001) requires entering the linear mean apparent grain size in microns calculated from equivalent circular diameters (ECD) with no stereological correction. The function will convert automatically this value to linear intercept (LI) grain size using the De Hoff and Rhines (1968) empirical equation. Since LI was originally multiplied by 1.5 (correction factor), the final relation is: LI = (1.5 / sqrt(4/pi)) * ECD """ if piezometer is None: print('Available piezometers:') print("'Jung_Karato'") print("'VanderWal_wet'") print("'Tasaka_wet'") return None elif piezometer == 'Jung_Karato': B, m = 5461.03, 0.85 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = True correction_factor = 1.5 elif piezometer == 'VanderWal_wet': B, m = 1355.4, 0.75 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = True correction_factor = 1.5 elif piezometer == 'Tasaka_wet': B, m = 719.7, 0.75 warn = 'Ensure that you entered the apparent grain size as the arithmetic mean in linear scale' linear_interceps = False correction_factor = 1.2 else: olivine() raise ValueError('Piezometer name misspelled. Please choose between valid piezometers') return B, m, warn, linear_interceps, correction_factor
387ea9413acdf551abe108ba5ba7dda51e162c51
7,288
from functools import reduce def cumsum(x): """ Custom cumsum to avoid a numpy import. """ def _reducer(a, x): if len(a) == 0: return [x] return a + [a[-1] + x] ret = reduce(_reducer, x, []) return ret
43664c4b0ddfbdf74d87da51eea8c48158ff5725
199,951