content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import traceback import warnings def _execute_select_statements(statements, database): """Execute a list of select statements in one atomic transaction. If any statement fails, the transaction is rolled back, and a warning is issued. Args: statements (list or sqlalchemy statement): List of sqlalchemy select statements. database (sqlalchemy.MetaData): The bind argument must be set. Returns: result (list): List of selection results. A selection result is a list of tuples where each tuple is a selected row. """ if not isinstance(statements, (list, tuple)): statements = [statements] results = [] engine = database.bind conn = engine.connect() # acquire lock trans = conn.begin() try: for stat in statements: res = conn.execute(stat) results.append(list(res)) res.close() # release lock trans.commit() conn.close() except (KeyboardInterrupt, SystemExit): trans.rollback() conn.close() raise except Exception: exception_info = traceback.format_exc() warnings.warn( "Unable to read from database. Try again later. The traceback was:\n\n" f"{exception_info}" ) trans.rollback() conn.close() results = [[] for stat in statements] return results
af80b6137074ebe78b64f04e1c0bf0cfcd2f4884
514,035
import itertools def take_nth(n, seq): """ Every nth item in seq >>> list(take_nth(2, [10, 20, 30, 40, 50])) [10, 30, 50] """ return itertools.islice(seq, 0, None, n)
5504abc8399552a82abbf31d56c7da093c23596d
260,892
def heuristic(current: list, target: list) -> int: """ Calculate the estimated Distance between current node and target node based on Manhattan Distance """ return (abs(target[0] - current[0]) + abs(target[1] - current[1]))
80b724e1cdb06cf657295bd715915a1aa9537266
380,315
def _is_right(a, b, p): """given a line (defined by points a and b) and a point (p), return true if p is to the right of the line and false otherwise raises a ValueError if p lies is colinear with a and b """ ax, ay = a[0], a[1] bx, by = b[0], b[1] px, py = p[0], p[1] value = (bx - ax) * (py - ay) - (by - ay) * (px - ax) if value == 0: raise ValueError( "p is colinear with a and b, 'tis neither right nor left.") return value < 0
6d630eadc77587de60ef6aefff3cac01d9ba3191
41,085
def darpaNodeIP(node_id, octet=0): """Return IP address of radio node on DARPA's network.""" return '192.168.{:d}.{:d}'.format(node_id+100, octet)
36c44dc92771773d7239e915a68255406842677f
127,779
from typing import Union def build_tk_geometry_str(width_in_pixels: Union[int, str], height_in_pixels: Union[int, str], posx_in_pixels: Union[int, str], posy_in_pixels: Union[int, str]) -> str: """ Return a string suitable for passing to tk's geometry method. :param width_in_pixels: The width geometry property in units of pixels of type integer or string. :param height_in_pixels: The height geometry property in units of pixels of type integer or string. :param posx_in_pixels: The x position geometry property in units of pixels of type integer or string. :param posy_in_pixels: The y position geometry property in units of pixels of type integer or string. :return: A string suitable for use with tk's geometry method taking the form of <height_in_pixels>x<width_in_pixels>+posx_in_pixels+posy_in_pixels """ return '%sx%s+%s+%s' % (width_in_pixels, height_in_pixels, posx_in_pixels, posy_in_pixels)
19e2084f0ed00aca672713f7a1d097e865eef3ae
354,550
from typing import Sequence from typing import Dict from typing import Any from typing import Tuple from typing import Set def _integrate_file_changes_in_commits( commits: Sequence[Dict[str, Any]] ) -> Tuple[Set[str], Set[str]]: """Integrate the file changes from the sequence of individual commits to determine the overall set of added/modified and removed files. """ files_written = set() # new or modified content files_removed = set() # deleted content for commit in commits: commit_added = set(commit["added"]) commit_modified = set(commit["modified"]) commit_removed = set(commit["removed"]) # Update tally of modified files files_written |= commit_added | commit_modified # Update tally of removed files files_removed |= commit_removed # Added files are not longer removed files_removed -= commit_added # Deleted files are no longer written files_written -= commit_removed return files_written, files_removed
a590cc6060efc7440820d9e32b2d90f746c82366
462,602
from typing import List import csv from io import StringIO def split_token(token: str) -> List[str]: """ Splits tokens separated by ";". If the resulting values have semicolons, the values must be quoted. >>> split_token('"foo";"bar;far"') ['foo', 'bar;far'] """ stream = csv.reader(StringIO(token), delimiter=";") return next(stream)
0bf0374dd7e7119192c9ff3b1d16afa2a68f58f4
102,897
import copy def deepcopy_mask(obj, memo, mask=None): """Generalized method for deep copies of objects. Certain types of attributes cannot be copied naively by `copy.deepcopy`; for example, `threading.Lock` objects. These may be manually specified in the `mask` argument. Args: obj (object): The object to be copied. mask (Dict[str, object]): Attributes to be replaced manually. Keys are attribute names and values are new attribute values. memo (dict): Tracks already copied objects to prevent a recursive loop. See the documentation for the standard module `copy`. """ if mask is None: mask = {} cls = obj.__class__ # copy object clone = cls.__new__(cls) memo[id(obj)] = clone # copy object attributes for k, v in obj.__dict__.items(): if k in mask: setattr(clone, k, mask[k]) else: setattr(clone, k, copy.deepcopy(v, memo)) return clone
64c540689a938f4f508b3ba19378bd494610afd9
171,757
def stringlist_pmatch_string(l, s): """returns true if any string in the stringlist partially matches string s Args: l (list): list of strings to partially match with s (string): string to match Returns: boolean: True if a partial match was found """ match = False for element in l: if element in s: match = True return match
366a9fe0e26920716412eaf23cfdd78485ec0028
409,590
import copy def convert_to_simple(graph): """Drop duplicate edges, recording multiplicity in the "order" property.""" g = copy.copy(graph) g.es['order'] = 1 g.simplify(multiple=True, loops=False, combine_edges='sum') g.es['order'] = [int(x) for x in g.es['order']] return g
65423b260dfc0abad8cd8bde100f7082a1918b35
284,829
def get_pritunl_settings(module): """ Helper function to set required Pritunl request params from module arguments. """ return { "api_token": module.params.get("pritunl_api_token"), "api_secret": module.params.get("pritunl_api_secret"), "base_url": module.params.get("pritunl_url"), "validate_certs": module.params.get("validate_certs"), }
b315d99f7c205159b9c02aa724e87aca66ae73d6
488,323
def f1f2(f1, f2, *a, **k): """ Apply one function after the other. Call `f1` on the return value of `f2`. Args and kwargs apply to `f2`. Example ------- >>> f1f2(str, int, 2) "2" """ return f1(f2(*a, **k))
671dd92b1f998e163321ab5589a32c290f1223ff
419,279
def split_header(diff): """Splits a diff in two: the header and the chunks.""" header = [] chunks = diff.splitlines(True) while chunks: if chunks[0].startswith('--- '): break header.append(chunks.pop(0)) else: # Some diff may not have a ---/+++ set like a git rename with no change or # a permissions change. pass if chunks: assert chunks[0].startswith('--- '), 'Inconsistent header' return ''.join(header), ''.join(chunks)
76b6065535828fc13e8ef44b2dfd310a29eb6ca2
62,449
def polynomial_decay(initial:float, final:float, max_decay_steps:int, power:float, current_step:int) -> float: """Decays hyperparameters polynomially. If power is set to 1.0, the decay behaves linearly. Args: initial {float} -- Initial hyperparameter such as the learning rate final {float} -- Final hyperparameter such as the learning rate max_decay_steps {int} -- The maximum numbers of steps to decay the hyperparameter power {float} -- The strength of the polynomial decay current_step {int} -- The current step of the training Returns: {float} -- Decayed hyperparameter """ # Return the final value if max_decay_steps is reached or the initial and the final value are equal if current_step > max_decay_steps or initial == final: return final # Return the polynomially decayed value given the current step else: return ((initial - final) * ((1 - current_step / max_decay_steps) ** power) + final)
88aef9bc45a5573fb06941d92ad1211f17d6b0b2
306,721
def pb_config(pb2): """Returns an instance of config of *_pb2 package""" return pb2.PrefixConfig()
72fa5d45ca3b77fa538c9491bcb77a4a02904ee8
636,062
def readfile(file_dir): """ readfile(file_dir:str) Returns the content of a file in an array. """ f = open(file_dir) content = [line.rstrip('\n') for line in f] return content
2b05d1edb4e3d6dc31f8a18c29f42c2f3a68dd37
644,937
def hash_labels_only(**data): """ Computes a hash value only based on a list of labels. :param data: Contains the keyword arguments passed in. Requires 2 keyword arguments: labels: A list of strings node_label_hash: A Dictionary of String -> integer Each label is assigned an integer which is a power of 2, to ensure that each combination of labels has a unique hash value. :return: An integer """ labels = data["labels"] node_label_hash = data["node_label_hash"] hash_value = 0 for label in labels: hash_value += node_label_hash[label] return hash_value
7a7ea858b9c2c94b0b33f66a82867e56591fa90e
346,050
def _convert_pascal_to_camel(pascal_case_string: str) -> str: """ Convert a string provided in PascalCase to camelCase """ return pascal_case_string[:1].lower() + pascal_case_string[1:]
bf07df67b31da40d3c488870228882d3370283a3
232,810
def pad(val: str) -> str: """Pad base64 values if need be: JWT calls to omit trailing padding.""" padlen = 4 - len(val) % 4 return val if padlen > 2 else (val + "=" * padlen)
3ab1c91fde1522f15a766730f73e44c97dbeda1a
12,664
def str_to_count(num): """Helper function to parse a string representation of a count value, with the empty string representing zero""" return 0 if num == '' else int(num)
2c007611badb3d05d99032b145ebf9948ab21097
434,792
def get_coordinate(record): """ :param record: tuple - a (treasure, coordinate) pair. :return: str - the extracted map coordinate. """ return record[1]
b4e6f920b2bdf5f0325996c1b2d2f3dd0a25091b
513,196
import six def _tostr(val): """ >>> _tostr('foo') 'foo' >>> isinstance(_tostr(u'foo'), six.text_type) True >>> _tostr(10) '10' >>> _tostr(True) '1' >>> _tostr(False) '0' """ if isinstance(val, six.string_types): return val if isinstance(val, bool): return str(int(val)) return str(val)
86fdede63d5db3d11351638d5af9a15429d29c1f
364,695
import unicodedata def normalize_casefold(text, *, _casefold=str.casefold, _normalize=unicodedata.normalize ): """Normalize text data for caseless comparison Use the "canonical caseless match" algorithm defined in the Unicode Standard, version 10.0, section 3.13, requirement D146 (page 159). """ # Taken from https://stackoverflow.com/questions/319426/how-do-i-do-a-case-insensitive-string-comparison#comment60758553_29247821 return _normalize('NFKD', _casefold(_normalize('NFKD', _casefold(_normalize('NFD', text)))))
7779919e7c3d46b13b9459ed6135f234645d57b2
112,119
def reverse_word(string, word_start, word_end): """ reverses a sub string from start index to end index in place. """ # this whole function could be replaced by splicing # see the function below start = word_start end = word_end if end - start > 1: while start >= word_start and end <= word_end and start < end: string[start], string[end] = string[end], string[start] start += 1 end -= 1 return string
99d55b9ab5eb26b4fea0ee5e23c555b6580fe518
113,406
import re def check_text_row_column(s): """对一段文本s,用换行符分割行,用至少4个空格或\t分割列,分析数据的行、列数 :return: (n, m),每列的列数相等,则会返回n、m>=0的tuple (m1, m2, ...),如果有列数不相等,则会返回每行的列数组成的tuple 每个元素用负值代表不匹配 """ # 拆开每行的列 if not s: return (0, 0) lines = [re.sub(r'( {4,}|\t)+', r'\t', line.strip()).split('\t') for line in s.splitlines()] cols = [len(line) for line in lines] # 计算每行的列数 if min(cols) == max(cols): return len(lines), cols[0] else: return [-col for col in cols]
f916291fb0c647d6a94cb849042533c0c276851f
530,816
def iana_interface_type( num # type: int ): """Parse IANA-defined interface types""" if num == 1: return "Other" elif num == 2: return "BBN 1822" elif num == 3: return "HDH 1822" elif num == 4: return "DDN X.25" elif num == 5: return "RFC-877 X.25" # For all Ethernet-like CSMA-CD interfaces per IANA elif num == 6: return "Ethernet" # Deprecated, should use 6 per IANA elif num == 7: return "Ethernet" elif num == 8: return "Token Bus" elif num == 9: return "Token Ring" elif num == 10: return "ISO88026Man" elif num == 11: return "Star LAN" elif num == 12: return "Proteon 10Mbit" elif num == 13: return "Proteon 80Mbit" elif num == 14: return "Hyperchannel" elif num == 15: return "FDDI" elif num == 16: return "LAPB" elif num == 17: return "SDLC" elif num == 18: return "DS1" elif num == 19: return "E1" elif num == 20: return "Basic ISDN" elif num == 21: return "Primary ISDN" elif num == 22: return "Prop Point To Point Serial" elif num == 23: return "PPP" elif num == 24: return "Software Loopback" elif num == 25: return "EON" elif num == 26: return "Ethernet 3Mbit" elif num == 27: return "NSIP" elif num == 28: return "SLIP" elif num == 29: return "Ultra" elif num == 30: return "DS3" elif num == 31: return "SIP" elif num == 32: return "Frame Relay" elif num == 33: return "RS232" elif num == 34: return "PARA" elif num == 35: return "ARCNet" elif num == 36: return "ARCNet Plus" elif num == 37: return "ATM" elif num == 38: return "MIOX25" elif num == 39: return "SONET" else: return "Other"
f8e2eb5d0f3c20f9be121ac43a1e458030a18aef
405,521
def _format_tracking(local_branch, remote_branch, left, right): """ Takes a tuple returned by repo.tracking_status and outputs a nice string describing the state of the repository. """ if (left,right) == (0,0): return "Your tracking branch and remote branches are up to date." elif left == 0: return ("The remote branch %s is %d revisions ahead of tracking branch %s." % (remote_branch, right, local_branch)) elif right == 0: return ("Your tracking branch %s is %s revisions ahead of remote branch %s." % (local_branch, left, remote_branch)) else: return (("Your local branch %s and remote branch %s have diverged by " + "%d and %d revisions.") % (local_branch, remote_branch, left, right))
1d4b949aab94ead7e50f9d3cafe978f1381c9ff0
410,760
import ipaddress def config_to_map(topology_config): """ args: topology_config: dict { 'region1': [ '10.1.1.0/24', '10.1.10.0/24', '172.16.1.0/24' ], 'region2': [ '192.168.1.0/24', '10.2.0.0/16', ] } Region cannot be "_default" returns: topology_map: dict { ip_network('10.1.1.0/24'): 'region1', ip_network('10.1.10.0/24'): 'region1', ip_network('172.16.1.0/24'): 'region1', ip_network('192.168.1.0/24'): 'region2', ip_network('10.2.0.0/16'): 'region2', } raises: ValueError: if a region value is "_default" """ topology_map = {} for region in topology_config: # "_default" cannot be used as a region name if region == '_default': raise ValueError('cannot use "_default" as a region name') for net_str in topology_config[region]: net = ipaddress.ip_network(net_str) topology_map[net] = region return topology_map
5cd6e7c17903b9df68cf78260927a84f9464a48e
499,436
def extract_sub_attributes(browser): """ Extract sub attribute tags of avatar image from web page :param browser: opened browser :type browser: webdriver.Chrome :return: sub attribute tags of avatar image :rtype: list """ sub_attrs = browser.find_elements_by_class_name('sub-attr') sub_attr_results = [] for sub_attr in sub_attrs: sub_attr_results.append(sub_attr.text) return sub_attr_results
46ed55666316839bb0f09e82696ab515056b572c
112,484
def add_residue_to_dfix(dfix_head, resinum): """ Add a residue to a list of DFIX/DANG restraints DFIX 1.234 C1 C2 -> DFIX 1.234 C1_4 C2_4 >>> add_residue_to_dfix(['DFIX 1.456 C1 C2', 'DFIX 1.212 C3 C4'], 4) ['DFIX 1.456 C1_4 C2_4\\n', 'DFIX 1.212 C3_4 C4_4\\n'] >>> add_residue_to_dfix(['DFIX 1.456 C1 C2', 'DFIX 1.212 C3 C4'], '5') ['DFIX 1.456 C1_5 C2_5\\n', 'DFIX 1.212 C3_5 C4_5\\n'] >>> add_residue_to_dfix(['FLAT C6 C1 C2 C3', 'FLAT C7 C1 C2 C3'], '2') ['FLAT C6_2 C1_2 C2_2 C3_2\\n', 'FLAT C7_2 C1_2 C2_2 C3_2\\n'] """ newhead = [] for line in dfix_head: line = line.split() first = line[0] for num, item in enumerate(line): try: int(item[0]) except: line[num] = line[num] + '_' + str(resinum) continue line = first + ' ' + ' '.join(line[1:]) + '\n' newhead.append(line) return newhead
69fd5b22a6ef9c1ffe6956ce658a9761f1781238
657,355
from bs4 import BeautifulSoup def parser(raw): """ Change type response to bs4 """ return BeautifulSoup(raw, 'html.parser')
58975302d3d380cc4902ded052db37fbe40a619f
296,355
def gradient_summary(efpobj): """Gets the computed per-fragment EFP energy gradient of `efpobj`. Returns ------- str Formatted text of the `6 x n_frag` gradient and torque. """ grad = efpobj.get_gradient() grad6 = list(map(list, zip(*[iter(grad)] * 6))) text = '\n ==> EFP Gradient & Torque <==\n\n' for fr in grad6: text += '{:14.8f} {:14.8f} {:14.8f} {:14.8f} {:14.8f} {:14.8f}\n'.format(*fr) text += '\n' return text
82d77a45ea37ab870bf0b6119ab879ec72fbbb30
476,379
def accuracy(labels, logits, idx): """ Compute the accuracy for a set of nodes. Parameters ---------- labels: torch.Tensor [n] The ground-truth labels for all nodes. logits: torch.Tensor, [n, nc] Logits for all nodes. idx: array-like [?] The indices of the nodes for which to compute the accuracy . Returns ------- accuracy: float The accuracy. """ return (labels[idx] == logits[idx].argmax(1)).sum().item() / len(idx)
7d81a64f848ddfa4a0dae7a8b991d71048503b0f
137,682
import json def load_json(fn: str): """Standard built in for loading json from file to dictionary""" with open(fn, "r") as file: data = json.load(file) return data
0f7bef349ce4789765b3759073f605b87724e422
122,889
def fields_values(d, k): """ >>> data = {'name1': ['value1.1', 'value1.2'], 'name2': ['value2.1', 'value2.2'], } >>> print(fields_values(data, 'name1')) value1.1,value1.2 """ values = d.get(k, []) return ",".join(map(str, values))
9b693fdc1c50601efba685426a835ac11ddb0d8a
610,695
def tau_h(R0, vsc): """Arnett 1982 expansion timescale, in days R0: initial radius in cm vsc: scaling velocity in cm/s """ return (R0/vsc) / 86400.0
a1284046efd5ae1b3a59dfb0a6c28ed4d4dd5791
548,304
from collections import deque from typing import Iterable from typing import Iterator def tail(it: Iterable, n: int) -> Iterator: """ Return an iterator over the last n items >>> [*tail('ABCDEFG', 3)] ['E', 'F', 'G'] """ return iter(deque(it, maxlen=n))
8d018e37b477a4cd9a8ff92954435896e33681d7
144,463
from typing import Union import requests def post_json(url: str, data: Union[list, dict]) -> Union[list, dict]: """ Send a POST request to the resource with the payload as JSON """ resp = requests.post(url, json=data) if not resp.ok: resp.raise_for_status() return resp.json()
0b07236ce63f02a744c2a9399d4bb478bd41a5a3
238,438
def delete_game(client, game_id): """Delete a tictactoe game.""" return client.delete('tictactoe', game_id)
dd8223257d23c382db69612d29740abd45d7a073
423,200
def absorb_short_notes(pairs, **kwargs): """ If a note with a short duration appears between two of the same notes, change the short note to the surrounding ones to make a continous long note. Otherwise the short note is removed. This avoids the effect of fluctuations in a note. However this has the effect of merging repeated notes separated by short silences (or rapid alternation of two notes) into a single long note. Example: >>> absorb_short_notes([(95, 2), (96, 1), (95, 8), (92, 6)], {}) [(95, 11), (92, 6)] """ duration_threshold = kwargs.get('duration_threshold', 0.25) result = [] i = 0 while i < len(pairs): pair = pairs[i] n, d = pair try: n1, d1 = pairs[i + 1] n2, d2 = pairs[i + 2] except IndexError: if d > duration_threshold: result.append(pair) i += 1 continue if n == n2 and d1 < duration_threshold: new_pair = (n, d + d1 + d2) result.append(new_pair) i += 3 else: if d > duration_threshold: result.append(pair) i += 1 return result
06a7b7125385e74757695bcdfe33dff65582aa45
487,712
def index_to_coord(index, sl): """ Takes an index into a flattened 3D array and its side length. Returns the coordinate in the cube. """ coord = [] two_d_slice_size = sl * sl coord.append(index // two_d_slice_size) remaining = index % two_d_slice_size coord.append(remaining // sl) coord.append(remaining % sl) return coord
051c661f50e616a575e13887c731054080781ae6
158,696
def calc_estimated_runtime(pairs): """ Based on the number of mapped pairs, guess how long the split will take """ SLOPE_CONSTANT = 0.0061401594694834305 return (pairs * SLOPE_CONSTANT) + 0.2
426fba7e2bfce9435f1011fa4cddd3497797121c
212,091
def transform_y(f_x_y, xf_and_dxf_dyprime): """Transform f_x_y(x,y) into f_x_yprime(x,yprime) xf is the inverse transformation: from yprime to y y = xf(yprime, *xf_params) dxf_dyprime is its derivative with respect to yprime""" def f_x_yprime(x,yprime,p): xf, dxf_dyprime = xf_and_dxf_dyprime(yprime, p) return f_x_y(x, xf, p) * dxf_dyprime return f_x_yprime
474b5083ecfe4d4b38e2a91c4ddf26e9d7a0bbdc
618,042
import collections def mode(data): """Return the most common data item. If there are ties, return any one of them.""" [(item, count)] = collections.Counter(data).most_common(1) return item
ec265f6d12cbbfacad6030ecadfd38bba4daa23e
516,520
def _key(node, terminal): """ Combine *node* and *terminal* to a key into the results set for the current template. Returns a string unique within the template that can be used in JSON. """ return ":".join((str(node), str(terminal)))
1047c6f38c3b52b9ceadc54429ad7235aca54d3a
418,111
def read_input(path: str): """ Read game board file from path. Return list of str. """ with open(path, mode='r', encoding='utf-8') as data: content = data.readlines() for i in range(len(content)): content[i] = content[i].strip() return content
2183619dd542762cf62dc7557319614df0341de1
639,554
import hashlib def compute_md5(fp): """Compute an md5 hash. :type fp: file :param fp: File pointer to the file to MD5 hash. The file pointer will be reset to the beginning of the file before the method returns. :rtype: tuple :returns: the hex digest version of the MD5 hash """ m = hashlib.md5() fp.seek(0) s = fp.read(8192) while s: m.update(s) s = fp.read(8192) hex_md5 = m.hexdigest() # size = fp.tell() fp.seek(0) return hex_md5
7928d8766d022c529bf81cd05552c6547e85f9d5
248,283
import re def _collapse_offset_changes(text): """Replaces "offset changed" lines with a one-line summary.""" regex = re.compile( r"^( *)('.*') offset changed from .* to .* \(in bits\) (\(by .* bits\))$") items = [] indent = "" offset = "" new_text = [] def emit_pending(): if not items: return count = len(items) if count == 1: only = items[0] line = "{}{} offset changed {}\n".format(indent, only, offset) else: first = items[0] last = items[-1] line = "{}{} ({} .. {}) offsets changed {}\n".format( indent, count, first, last, offset) del items[:] new_text.append(line) for line in text.splitlines(True): match = regex.match(line) if match: (new_indent, item, new_offset) = match.group(1, 2, 3) if new_indent != indent or new_offset != offset: emit_pending() indent = new_indent offset = new_offset items.append(item) else: emit_pending() new_text.append(line) emit_pending() return "".join(new_text)
e879afc091e3f4f5c91cf913b8e4539e8159b48b
459,604
import random def filter_list(word_list, list_size): """ Filter the provided list, and return the first list_size entries. Args: word_list: unfiltered list of strings list_size: the number of results to be returned Returns: A list of strings, size = list_size """ result = set() if list_size > len(word_list): list_size = len(word_list) while len(result) < list_size: word = random.choice(word_list) if word and word[0].islower() and word.isalpha() and word not in result: result.add(word) return list(result)
92f442d057e81ba48e365333f08875bf96ae4413
532,086
def get_default_nncf_compression_config(h, w): """ This function returns the default NNCF config for this repository. The config makes NNCF int8 quantization. """ nncf_config_data = { 'input_info': { 'sample_size': [1, 3, h, w] }, 'compression': [ { 'algorithm': 'quantization', 'initializer': { 'range': { 'num_init_samples': 8192, # Number of samples from the training dataset # to consume as sample model inputs for purposes of setting initial # minimum and maximum quantization ranges }, 'batchnorm_adaptation': { 'num_bn_adaptation_samples': 8192, # Number of samples from the training # dataset to pass through the model at initialization in order to update # batchnorm statistics of the original model. The actual number of samples # will be a closest multiple of the batch size. #'num_bn_forget_samples': 1024, # Number of samples from the training # dataset to pass through the model at initialization in order to erase # batchnorm statistics of the original model (using large momentum value # for rolling mean updates). The actual number of samples will be a # closest multiple of the batch size. } } } ], 'log_dir': '.' } return nncf_config_data
34784f65ea6b2c1fd953f7d6ac0395ba9300bf43
618,227
def validate_num_cpus(num_cpus): """ Validate `num_cpus` on correctness. Each value of `num_cpus` is checked on possibility of converting to int. Parameters ---------- num_cpus : list or str List of string values. Each value represents number of CPUs to be used by corresponding host. Returns ------- list List of validated number of CPUs per host. """ if not isinstance(num_cpus, list): num_cpus = [num_cpus] def validate(value): try: value = int(value) if value < 1: raise RuntimeError(f"'num_cpus' must be more than 0, got '{num_cpus}'") except ValueError: raise TypeError( f"'num_cpus' must be integer or sequence of integers, got '{num_cpus}'" ) else: return value return [str(validate(n)) for n in num_cpus]
b2158efabc6922dd2c3bbd8961353cc98f823bdc
265,171
def normalize_accents(text: str) -> str: """ Remove accents :param text: text with undesired accents :return: clean text >>> normalize_accents('suspensám') 'suspensam' >>> normalize_accents('quăm') 'quam' >>> normalize_accents('aegérrume') 'aegerrume' >>> normalize_accents('ĭndignu') 'indignu' >>> normalize_accents('îs') 'is' >>> normalize_accents('óccidentem') 'occidentem' >>> normalize_accents('frúges') 'fruges' """ text = text.replace(r"á", "a") # suspensám text = text.replace(r"Á", "A") text = text.replace(r"á", "a") # Note: this accent is different than the one above! text = text.replace(r"Á", "A") text = text.replace(r"ă", "a") # 'quăm' text = text.replace(r"Ă", "A") text = text.replace(r"à", "a") text = text.replace(r"À", "A") text = text.replace(r"â", "a") text = text.replace(r"Â", "A") text = text.replace(r"ä", "a") text = text.replace(r"Ä", "A") text = text.replace(r"é", "e") # aegérrume text = text.replace(r"è", "e") text = text.replace(r"È", "E") text = text.replace(r"é", "e") text = text.replace(r"É", "E") text = text.replace(r"ê", "e") text = text.replace(r"Ê", "E") text = text.replace(r"ë", "e") text = text.replace(r"Ë", "E") text = text.replace(r"ĭ", "i") # ĭndignu text = text.replace(r"î", "i") # 'îs' text = text.replace(r"í", "i") text = text.replace(r"í", "i") text = text.replace(r"î", "i") text = text.replace(r"Î", "I") text = text.replace(r"ï", "i") text = text.replace(r"Ï", "I") text = text.replace(r"ó", "o") # óccidentem text = text.replace(r"ô", "o") text = text.replace(r"Ô", "O") text = text.replace(r"ö", "o") text = text.replace(r"Ö", "O") text = text.replace(r"û", "u") text = text.replace(r"Û", "U") text = text.replace(r"ù", "u") text = text.replace(r"Ù", "U") text = text.replace(r"ü", "u") text = text.replace(r"Ü", "U") text = text.replace(r"ú", "u") # frúges text = text.replace(r"ÿ", "y") text = text.replace(r"Ÿ", "Y") text = text.replace(r"ç", "c") text = text.replace(r"Ç", "C") text = text.replace(r"ë", "e") text = text.replace(r"Ë", "E") text = text.replace(r"Ȳ", "Y") text = text.replace(r"ȳ", "y") return text
7954256350d1c67f6a107d2057b1ca37ebe253cf
99,612
def bench1(x): """A benchmark function for test purposes. f(x) = x ** 2 It has a single minima with f(x*) = 0 at x* = 0. """ return x[0] ** 2
9631a854521907a6afc3a27e857d9699fceb762c
158,212
def calculate_check_digit(mpan_unique: str) -> int: """Check MPAN digit. The final digit in the MPAN is the check digit, and validates the previous 12 (the core) using a modulus 11 test. The check digit is calculated thus: 0. Multiply the first digit by 3 0. Multiply the second digit by the next prime number (5) 0. Repeat this for each digit (missing 11 out on the list of prime numbers for the purposes of this algorithm) 0. Add up all these products 0. The check digit is the sum modulo 11 modulo 10. Args: mpan -- The first 12 digits of the MPAN number, excluding the check digit. """ check_digit = sum(prime * int(digit) for prime, digit in \ zip([3, 5, 7, 13, 17, 19, 23, 29, 31, 37, 41, 43], mpan_unique)) % 11 % 10 return check_digit
2fde1d9732bd1f8d83abfda5eaf6a0425f51cecc
291,467
def split_word_at_pipe(word): """ This function splits a word separated by a | symbol Args: word (str): Word with a pipe symbol Returns: A list of split items Examples: >>> split_word_at_pipe('Bilderbuch|Absturz') ['Bilderbuch', 'Absturz'] >>> split_word_at_pipe('Bilderbuch') ['Bilderbuch', 'Bilderbuch'] """ if '|' in word: return word.split('|') else: return [word, word]
60477f7967457ed05462f9afe405f0139359f9ac
554,555
def read_names_from_file(file): """ Reads the provided file line by line to provide a list representation of the contained names. :param file: A text file containing one name per line :return: A list of the names contained in the provided text file """ names_list = [] for line in file: names_list.append(str(line).strip()) return names_list
fbecc268480412fc472c12c3c4f0e1ed90b66e8e
164,660
import re def python_safe(raw_name): """Make a string safe to use in a Python namespace""" return re.sub('[^0-9a-zA-Z]', '_', raw_name)
2d2645c07d4a0f0e958c637676c81c947b64039a
484,887
def dms_to_dd(degrees, minutes, seconds): """Convert degrees, minutes, seconds to decimal degress""" fd = float(degrees) if fd < 0: return fd - float(minutes) / 60 - float(seconds) / 3600 return fd + float(minutes) / 60 + float(seconds) / 3600
bbd2b32b13354e37c97e1cf18ebc411e60650c15
290,862
def _GetTestSuiteFromKey(test_key): """Gets test suite from |test_key|, None if not found.""" pairs = test_key.pairs() if len(pairs) < 3: return None return pairs[2][1]
fc604055c5c5990071d0284e1b668b18e666a13e
445,508
def flatten_list(input_list): """Flattens a nested list. Args: input_list: A (possibly) nested list. Returns: A flattened list, preserving order. """ if not input_list: return [] if isinstance(input_list[0], list): return flatten_list(input_list[0]) + flatten_list(input_list[1:]) else: return input_list[:1] + flatten_list(input_list[1:])
d77f39a78d05574ce50ebf49e622da6e2a8baa11
380,023
def hPa_to_m(p): """ Compute pressure altitude :param p: pressure (hPa) :return: altitude (ft) """ # https://www.weather.gov/media/epz/wxcalc/pressureAltitude.pdf return (0.3048*145366.45)*(1 - (p/1013.25)**0.190284)
7a84d1144c287971ff925d51e6b2f186376f9d03
318,991
import codecs def make_base64(data): """ Takes a string of hex values and returns a base64 encoded string. :param data: string of Hex :return: base64 encoded string """ return codecs.encode(codecs.decode(data, 'hex'), 'base64').decode('utf-8').replace('\n', '')
5544d1f0cc1d9f9a565f6e013bcdb3c5106da228
289,660
def orientation(a, b, c): """ Given three points, find the orientation. See examples below. They are colinear(0), clockwise(1), and counterclockwise(2) respectively. c | b b_c c_b | | | a a a """ val = (b.y - a.y) * (c.x - b.x) - (b.x - a.x) * (c.y - b.y) # If equal to 0 -> colinear, >0 -> clockwise, <0 -> counterclockwise if val == 0: return 0 elif val > 0: return 1 else: return 2
1c9229ec5be2d7187f7fa3c8f8b79d33122380f8
327,671
def scale_to_control(x, axis_scale=350., min_v=-1.0, max_v=1.0): """ Normalize raw HID readings to target range. Args: x (int): Raw reading from HID axis_scale (float): (Inverted) scaling factor for mapping raw input value min_v (float): Minimum limit after scaling max_v (float): Maximum limit after scaling Returns: float: Clipped, scaled input from HID """ x = x / axis_scale x = min(max(x, min_v), max_v) return x
9c5df3adaad9c040d13bb2f12e7704328b6ec749
64,201
def filter_stop_word(word_list, stop_word_list): """ 过滤停用词 :param word_list: 需要过滤的词列表 :param stop_word_list: 停用词列表 """ res = list(filter(lambda w: w not in stop_word_list, word_list)) return res
9bb0303b0e17263e5dd055c4e1d088f9781de43c
396,360
from packaging import version def cmp_version(string): """convert a version string to a packaging.version.Version""" return version.parse(string)
98c9dc9cc2e199415cc48457573dfcb210a92a31
491,776
def ra_to_set(row): """Convert the ra columns into a set. """ return (row['ra_hrs'],row['ra_minutes'],row['ra_seconds'])
d96e3113a946e526d1823f65a31324093f4f2966
99,437
import re def isolate_titles(targets, predictions): """Maps each target and prediction to a list of movies metioned. Args: targets: a list of strings, the target from the validation set predictions: a list of strings, the model predictions Returns: a tuple containing a list of lists for both target and prediction titles """ all_target_titles = [] all_prediction_titles = [] for tar, pred in zip(targets, predictions): target_titles = re.findall(r"\@([^@]*\([^@]*\)[^@]*)\@", tar) prediction_titles = re.findall(r"\@([^@]*\([^@]*\)[^@]*)\@", pred) all_target_titles.append([x.strip() for x in target_titles]) all_prediction_titles.append([x.strip() for x in prediction_titles]) return all_target_titles, all_prediction_titles
4fc12ab2d14e174162a45699e9f6dc368b68a245
339,655
def _split_xpath_component(xpath_component): """Split an xpath component into a tag-index tuple. >>> split_xpath_component('{urn:oasis:names:tc:opendocument:xmlns:office:1.0}document-content[0]') ('{urn:oasis:names:tc:opendocument:xmlns:office:1.0}document-content', 0). """ lbrac = xpath_component.rfind(u'[') rbrac = xpath_component.rfind(u']') tag = xpath_component[:lbrac] index = int(xpath_component[lbrac+1:rbrac]) return tag, index
01acdf6b503f58ff021e8214dec3414f04f0e26a
476,281
import random def shuffle_word(word: str) -> str: """ Function is encoding given word with the following conditions: - first and last character of a word stays the same - characters in between are being shuffled - final word must not be the same as the original word (if possible) :param word: a word that needs to be shuffled :return: shuffled_word: string """ if len(word) < 4: # Word with 3 letters can not be shuffled with to given conditions return word # Convert letters to shuffle to list letters_to_shuffle = list(word[1:-1]) sorted_letters = letters_to_shuffle.copy() sorted_letters.sort() # If all the letters are the same permutation is not possible if sorted_letters[0] == sorted_letters[-1]: return word # Shuffle the letters shuffled_letters = letters_to_shuffle.copy() while shuffled_letters == letters_to_shuffle: random.shuffle(shuffled_letters) # List to string shuffled_letters = "".join(shuffled_letters) # Create a shuffled word and return it shuffled_word = word[0] + shuffled_letters + word[-1] return shuffled_word
0b3d9c7e71c7891c00fedd6f8dbd4a32bc1ca80f
376,029
from typing import Dict from typing import Any from typing import Union def get(obj: Dict, path: str, default: Any = None) -> Any: """ Traverse object according to path, return value if found, else default. Time complexity: O(n) Args: obj: Object to traverse. path: Path to traverse. default: Default value to return if path not found. Returns: Any: Value at path if found, else default. """ if obj is None or path is None: return default names = None if isinstance(path, str): names = path.split('.') elif isinstance(path, list): names = path else: raise TypeError('Path must be str or list') current: Union[Dict[str, Any], None] = obj for name in names: if isinstance(current, dict) and name in current: current = current[name] else: current = None if current is None: break return current if current is not None else default
1d133be7b2375d7d68905cc2acd63da3f5fb391c
285,332
import hashlib def get_file_sha1(file_path: str) -> str: """ Get sha1 checksum of file :param file_path: File path :return: sha1 checksum """ sha1 = hashlib.sha1() block_size = 1024 * 1024 with open(file_path, 'rb') as f: while True: block = f.read(block_size) if not block: break sha1.update(block) return sha1.hexdigest()
180020cef97add5cdba53dc19966b042fe4eafe3
501,696
import pprint def prettify_object(obj): """Makes a pretty string for an object for nice output""" try: return pprint.pformat(str(obj)) except UnicodeDecodeError as e: raise except Exception as e: return "[could not display: <%s: %s>]" % (e.__class__.__name__, str(e))
136acad3a537f814b881b3c5576d5d2426962241
489,153
def list_to_dict(wc_list, order_list): """ Create a dictionary from a list wc_list, using keys in order_list """ #assert len(order_list) == len(wc_list) wc_dict = {} for wc_ind in range(len(order_list)): wc_dict[order_list[wc_ind]] = wc_list[wc_ind] return wc_dict
6cecf092277b26103ec2442c1bbe19f8b5ad54c7
478,961
import re def normalize(s): """Return a normalized version of string s with newlines, tabs and C style comments ("/* ... */") replaced with spaces. Multiple spaces are replaced with a single space. >>> normalize(" /* nothing */ foo\tfoo /* bar */ foo ") 'foo foo foo' """ assert(type(s) is str) s = s.replace("\n", " ") s = s.replace("\t", " ") s = re.sub("/\*.*?\*/", " ", s) s = re.sub(" {2,}", " ", s) return s.strip()
4cf8892c6038d93a860c0ddf65e86594e10ef6ca
250,775
from typing import Union import time def time_format(seconds: float, format_='%H:%M:%S') -> Union[str, float]: """ Default format is '%H:%M:%S' >>> time_format(3600) '01:00:00' """ # this because NaN if seconds >= 0 or seconds < 0: time_ = time.strftime(format_, time.gmtime(abs(seconds))) if seconds < 0: return f"-{time_}" return time_ return seconds
41116e13c2e93255b0e2512a9fad69bca020ed69
686,744
import yaml def read_yaml(path: str): """Read content of yaml file from path :param path: path to yaml file :type path: str :return: yaml file content, usually a dictionary """ with open(path) as file: return yaml.safe_load(file)
d33b65e0a97600fd0a02587d178ec7705f527620
370,775
def lem_num(orth: str): """Return a nice lemma for a thing supposed to be a number""" return "NUM"
44560b84ac70e5aad9c81808dea7302b02cbf624
119,518
def get_veth_slot_info_cmd(lpar_id, slotnum): """ get virtual ethernet slot information For IVM, vswitch field is not supported. :param lpar_id: LPAR id :param slotnum: veth slot number :returns: A HMC command to get the virtual ethernet adapter information. """ return ("lshwres -r virtualio --rsubtype eth --level " "lpar --filter lpar_ids=%(lparid)s,slots=%(slot)s " "-F is_trunk,trunk_priority,ieee_virtual_eth," "port_vlan_id,addl_vlan_ids" % {'lparid': lpar_id, 'slot': slotnum})
e73ee822030a1cb9b4763d97bc33b6a307077a79
121,648
def gcd(a: int, b: int) -> int: """This function returns the greatest common divisor between two given integers.""" if a < 1 or b < 1: raise ValueError(f'Input arguments (a={a}, b={b}) must be positive integers') while a != 0: a, b = b % a, a return b
d07cf448ac9a5e8fd8874331aeace8b43e8f63e6
183,979
import inspect import re def strip_source_code(method, remove_nested_functions=True): """ Strips the source code of a method by everything that's "not important", like comments. Args: method (Union[callable,str]): The method to strip the source code for (or the source code directly as string). remove_nested_functions (bool): Whether to remove nested functions from the source code as well. These usually confuse the analysis of a method's source code as they comprise a method within a method. Returns: str: The stripped source code. """ if callable(method): src = inspect.getsource(method) else: src = method # Resolve '\' at end of lines. src = re.sub(r'\\\s*\n', "", src) # Remove single line comments. src = re.sub(r'\n\s*#.+', "", src) # Remove multi-line comments. src = re.sub(r'"""(.|\n)*?"""', "", src) # Remove nested functions. if remove_nested_functions is True: src = re.sub(r'\n(\s*)def \w+\((.|\n)+?\n\1[^\s\n]', "", src) return src
5e50c902a6c56b2c46715ebda8a7c2aea8750b1a
656,057
import functools def patchit(obj, name, pass_patched=False): """Return a decorator that replace attribute with decorated item. Parameters ---------- obj : object The object to alter. name : str The name of the attribute to replace. pass_patched : bool If set to True, the original attribute will be passed to as the first argument to the decorated function. """ patched_attr_name = f'__patched_attr_{name}' if hasattr(obj, patched_attr_name): raise RuntimeError(f"attr {name} of obj {obj} is already patched") old_func = getattr(obj, name) def decorator(func): if pass_patched: @functools.wraps(func) def wrapper(*args, **kwargs): return func(old_func, *args, **kwargs) new_func = wrapper new_func = func setattr(obj, patched_attr_name, old_func) setattr(obj, name, new_func) return new_func return decorator
d624a84a197d5f8a68a7e1202177e7a6cf2cc554
298,162
import re def camel_case_transformer(s): """ Converts a camel case string into space separated words starting with a capital letters E.g. input: 'camelCase' output: 'Camel Case' REMARK: the exceptions list below is returned uppercase, e.g. "cve" => "CVE" """ transformed_string = re.sub('([a-z])([A-Z])', r'\g<1> \g<2>', str(s)) if transformed_string in ['id', 'cve', 'arn']: return transformed_string.upper() return transformed_string.title()
107e859489bf80eaa1602139aaf8ff5a55e59041
410,769
def calculate_term_frequencies(tokens): """Given a series of `tokens`, produces a sorted list of tuples in the format of (term frequency, token). """ frequency_dict = {} for token in tokens: frequency_dict.setdefault(token, 0) frequency_dict[token] += 1 tf = [] for token, count in frequency_dict.items(): tf.append( (count, token) ) return sorted(tf, reverse=True)
b764175cd59fe25c4a87576faee2a76273097c5e
6,516
def compounded_interest_fv(P, r, t, m): """ Compunded interest rate assumes that the interest earned will now be added to the principal periodically, for example, annually, semi-annually, quarterly, monthly or on a daily basis. Parameters ---------- P: float Principal, initial deposit. r: float Interest rate t: float Time, expressed in years. m: float Compounding period. Amount of payments made per annum. """ return P*(1 + r/m)**(t*m)
8ce1d32e7c4d6f9d18530db74b645ae0f23e8750
347,233
def can_move(minimap, i, j, di, dj): """ Check whether can move in specified direction """ # minimap is always square n = len(minimap) # Move once i += di j += dj # Cannot move outside of minimap if i < 0 or i >= n or j < 0 or j >= n: return False # Cannot move if occupied if minimap[i][j] == 1: return False # Move second time i += di j += dj # Can move if second move falls outside if i < 0 or i >= n or j < 0 or j >= n: return True # Cannot move if second move is occupied if minimap[i][j] == 1: return False # Otherwise we can move return True
4648a205eb03730d13cb593858623ebba20f4d3a
412,624
def format_kvps(mapping, prefix=""): """Formats a mapping as key=value pairs. Values may be strings, numbers, or nested mappings. Nested mappings, e.g. host:{ip:'0.0.0.1',name:'the.dude.abides'}, will be handled by prefixing keys in the sub-mapping with the key, e.g.: host.ip=0.0.0.1 host.name=the.dude.abides. """ kvp_list = [] for k, v in mapping.items(): if hasattr(v, "keys"): # nested mapping new_prefix = prefix + "." + k if prefix else k kvps = format_kvps(v, prefix=new_prefix) # format as string kvp_list.append(kvps) continue # already prefixed with key; go to next if v is None: v = "None" elif isinstance(v, int) or isinstance(v, float): v = "{}".format(v) elif " " in v: v = '"' + v.replace('"', '\\"') + '"' if prefix: k = prefix + "." + k kvp_list.append("{}={}".format(k, v)) return " ".join(kvp_list)
4780d5c705a8805331a1d981e87fa3d3dca263a8
703,072
def get_ingredients(drink_dict): """ Create a list of ingredients and measures Form data is passed as a dictionary. The functions iterates through the key/value pairs and appends the ingredients and measures to its own list called ingredients. Args: drink_dict : The dictionary containing the ingredients Returns: A list of alternating measures and ingredients """ ingredients = [] for k, v in list(drink_dict.items()): if ('ingredient' in k) or ('measure' in k): ingredients.append(v) drink_dict.pop(k) return ingredients
5ec86b71ab8bf0d9b1d6291d7d14cbfbeff2d867
675,210
from typing import List def distort_single(radius_undist: float, distort_coeffs: List[float]) -> float: """ Calculate distortion for a single undistorted radius. Note that we have 3 distortion parameters. Args: radius_undist: undistorted radius distort_coeffs: list of distortion coefficients Returns: distortion radius """ radius_dist = radius_undist r_u_pow = radius_undist for distortion_coefficient in distort_coeffs: r_u_pow *= radius_undist ** 2 radius_dist += r_u_pow * distortion_coefficient return radius_dist
d2cb7a541e65b915a3134851682739b0c9855245
367,141
import pickle def predictSVM(X_test): """This function takes a dataset and predict the letter for each data point. Parameters ---------- dataset: M X 128 numpy array A dataset represented by numpy-array Returns ------- M x 1 numpy array Returns a numpy array of letter that each data point represents """ fp = open("clf.pkl",'rb') # load the object from the file into var clf clf = pickle.load(fp) z = clf.predict(X_test) fp.close() return z
ce9f58301fbfdf242356b74b0c19743932d44c3b
643,047
def generate_instance_tuples(num_per_class_list): """Generate list of tuples [(class_index, instance_index)...].""" num_classes = len(num_per_class_list) class_and_instance_indices = [] for i in range(num_classes): num_instances = num_per_class_list[i] class_and_instance_indices.extend([(i, j) for j in range(num_instances)]) return class_and_instance_indices
602d4ca15b3a4ffd3fba103a214b3cb113d2035e
301,853
import time import random import requests def _get_page(url, s=None): """ Utility function to play nice when scraping. This will also fetch a page according to a session or a simple get. """ if not url: # What's a proper null requests object? return None # Play nice time.sleep(random.uniform(0.1, 0.3)) if not s or isinstance(s, str): r = requests.get(url) else: # should check this is a valid session try: r = s.open(url) except: return None return r
dd3899c7844c608128b34ab7e9ece03f7b6ad95d
632,575
def max_value_2_compute_all_until_max_capacity(weight_value_tuples, max_capacity): """ Solution: Compute the max value for each capacity from 1 to max_capacity. This algorithm will always find the optimal max value, but is much slower than the less optimal solution and uses more space. Complexity: Time: O(n * k) (where n=number of items, k=max capacity) Space: O(k) """ # List to contain the max value for each capacity up to max_capacity # Ex: Index 1 will contain the max value of weight 1 max_value_capacity = [0] * (max_capacity + 1) # For each weight from 1 to max_capacity, calculate the max value and # store it in our list for weight_to_compute_max_value_for in range(1, max_capacity + 1): for item_weight, item_value in weight_value_tuples: if item_weight <= weight_to_compute_max_value_for: diff = weight_to_compute_max_value_for - item_weight new_max_value = max_value_capacity[diff] + item_value max_value_capacity[weight_to_compute_max_value_for] = max( new_max_value, max_value_capacity[weight_to_compute_max_value_for]) return max_value_capacity[max_capacity]
afd46b947a822d22f16fce21400cc4b7475b068f
544,178
def flag_decomposer(flags: int) -> dict: """ Make font flags human readable. :param flags: integer indicating binary encoded font attributes :return: dictionary of attributes names and their activation state """ # defaults tmp = {"superscript": 0, "italic": 0, "serifed": 0, "monospaced": 0, "bold": 0} # check for activation state if flags & 2 ** 0: tmp["superscript"] = 1 if flags & 2 ** 1: tmp["italic"] = 1 if flags & 2 ** 2: tmp["serifed"] = 1 if flags & 2 ** 3: tmp["monospaced"] = 1 if flags & 2 ** 4: tmp["bold"] = 1 # return return tmp
711ee8b09431844246fa3f303b246440cf073d08
478,578
from typing import Tuple from typing import List def _get_value_name_and_type_from_line(*, line: str) -> Tuple[str, str]: """ Get a parameter or return value and type from a specified line. Parameters ---------- line : str Target docstring line. Returns ------- value_name : str Target parameter or return value name. type_name : str Target parameter or return value type name. """ if ':' not in line: return '', '' splitted: List[str] = line.split(':', maxsplit=1) value_name: str = splitted[0].strip() type_name: str = splitted[1].strip() return value_name, type_name
d2095fa2bc34a7086f60b40373a351f7c984dc96
35,399
from six import text_type def _ListCtrl_Append(self, entry): """ Append an item to the list control. The `entry` parameter should be a sequence with an item for each column """ if len(entry): pos = self.InsertItem(self.GetItemCount(), text_type(entry[0])) for i in range(1, len(entry)): self.SetItem(pos, i, text_type(entry[i])) return pos
9188921374117aa3ba8048461513e611230a4601
284,996
def resource_to_type_name(resource): """Creates a type/name format from a resource dbo. Args: resource (object): the resource to get the the type_name Returns: str: type_name of the resource """ return resource.type_name
a837aac94a493a2201f470670862eb7b22e70941
109,173
def merge(A, B): """Merge two sorted lists.""" C = [] # Take lesser of two elements while A and B: if A[0] < B[0]: C.append(A.pop(0)) else: C.append(B.pop(0)) # Grab the rest if A: C.extend(A) elif B: C.extend(B) return C
b5f015e0ac1410ae3260563798cba74aa98b9ab8
276,030
def get_lids(f): """get identifiers that specify an absolute location (i.e. start with '/')""" lids={} for ns in f.ddef.keys(): lids[ns] = [] structures = f.ddef[ns]['structures'] for id in structures: if id[0] == '/': lids[ns].append(id) return lids
e5ca9f0b4d9bc9328e6337e5de0256dc9129f397
684,886