content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
from typing import Union from typing import Any from typing import List from typing import Tuple def get_sched_value(value_schedule: Union[Any, List[Tuple[Any, int]]], epoch_number: int) -> Any: """Get a value that can be scheduled to change based on the epoch number. value_schedule is either a scalar value or a list of tuples [(epochs, value1), (epochs, value2), ...]""" if not isinstance(value_schedule, list): return value_schedule else: epoch_sum = 0 for v in value_schedule: epoch_sum += v[0] if epoch_number <= epoch_sum: return v[1] return value_schedule[-1][1]
679d19921fa2e7f748c1403f0b00e57216b99212
223,130
def get_file_type(filename): """ Return the extension (if any) of the ``filename`` in lower case. """ return filename[filename.rfind('.')+1:].lower()
cb0487e0886d60a6d0e5f97fa7d2313293390f5d
694,245
def notas(*num, sit=False): """==> Função para analisr notas e situações de vários alunos. :param num: uma ou mais notas dos alunos (aceita várias) :param sit: valor opcional, indicando se deve ou não adcionar a situação :return: dicionário com várias informações sobre a situação da turma""" print('\033[1;33m-=\033[m' * 20) turma = {} maior = menor = soma = cont = 0 for c in num: soma += c if cont == 0: maior = menor = c if c > maior: maior = c elif c < menor: menor = c cont += 1 media = soma / cont turma['total'] = cont turma['maior'] = maior turma['menor'] = menor turma['media'] = media if sit: if media >= 7: turma['situação'] = 'Boa' elif 5 <= media < 7: turma['situação'] = 'Razoavel' else: turma['situação'] = 'Ruim' return turma
25f166f7b56b6a92a3585c53f953d26da23aec7a
324,556
def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha): """ An implementation of [1] Equation 8.9 References ---------- .. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. """ x = x + alpha * d_x tau = tau + alpha * d_tau z = z + alpha * d_z kappa = kappa + alpha * d_kappa y = y + alpha * d_y return x, y, z, tau, kappa
0c38e2f18f7c934d910c024c23d35e13660eec08
688,500
def loan_iof( principal, amortizations, return_days, daily_iof_aliquot, complementary_iof_aliquot ): """The total IOF of a loan. If :math:`s` is the principal, :math:`A_i` is the :math:`i`-th amortization, :math:`n_1,\\ldots,n_k` are the return days, :math:`I^*` is the daily IOF aliquot and :math:`I^{**}` is the complementary IOF aliquot, then the loan IOF tax amount is .. math:: \\mathrm{IOF}(s, I^*, I^{**}, (A_1,\\ldots,A_k),(n_1,\\ldots,n_k)) = sI^{**} + \\sum_{i=1}^k A_i \\min(n_i I^*,0.015) Parameters ---------- principal: float, required Loan principal. amortizations: list, required List of floats providing the amortization due to each payment. return_days: list, required List of integers with the numbers of days since the loan was granted. daily_iof_aliquot: float, required Daily IOF aliquot. Its value is defined by law. complementary_iof_aliquot: float, required Complementary IOF aliquot. Its value is defined by law. """ p = principal d_iof = daily_iof_aliquot c_iof = complementary_iof_aliquot return c_iof * p + sum(a * min(n * d_iof, 0.015) for a, n in zip(amortizations, return_days))
22a8ac058ad97bc18064b42947b11d7d6db7949e
309,205
def normalize(X): """ Normalize vector or matrix columns X """ return X / X.sum(axis=0)
62703e2ef32af7658fbb632f2d6fad8a3a41aef4
502,597
def instant_name_to_class_name(name): """ This will convert from 'parent_name.child_name' to 'ParentName_ChildName' :param name: str of the name to convert :return: str of the converted name """ name2 = ''.join([e.title() for e in name.split('_')]) return '_'.join([e[0].upper() + e[1:] for e in name2.split('.')])
380acd68925ce24846bc30737d7d066609ae3525
635,507
import logging def get_logger(name): """ Internally calls the logging.getLogger function with the 'name' argument to create or retrieve a logger object. It is recommended to pass __name__ as argument when calling get_logger. The returned logger object logs to the standard error stream and formats the messages appropriately. :param name: The name that gets passed to the logger.getLogger function. :return: A logger instance with the given name. """ logger = logging.getLogger(name) logger.setLevel(logging.INFO) stderr_handler = logging.StreamHandler() formatter = logging.Formatter('[%(asctime)s] %(levelname)s | %(name)s | %(message)s') stderr_handler.setFormatter(formatter) logger.addHandler(stderr_handler) return logger
462b84f940e3c0804811f3096e3cfdccc6745b4b
544,219
def euclidean(N: int, a: int) -> int: """ Uses the Euclidean Algorithm to calculate the GCD of `N` and `a`. """ remainder = N % a if remainder == 0: return a return euclidean(a, remainder)
9ea543061277978ff844150ccf7810c1031c728e
149,181
import struct def parse_quantization(read_buffer, sqcd): """Tease out the quantization values. Parameters ---------- read_buffer: sequence of bytes from the QCC and QCD segments. Returns ------ tuple Mantissa and exponents from quantization buffer. """ numbytes = len(read_buffer) exponent = [] mantissa = [] if sqcd & 0x1f == 0: # no quantization data = struct.unpack('>' + 'B' * numbytes, read_buffer) for j in range(len(data)): exponent.append(data[j] >> 3) mantissa.append(0) else: fmt = '>' + 'H' * int(numbytes / 2) data = struct.unpack(fmt, read_buffer) for j in range(len(data)): exponent.append(data[j] >> 11) mantissa.append(data[j] & 0x07ff) return mantissa, exponent
0a4287c7ac218427da541cbaf860670c2c5aba48
506,551
from typing import List def parse_input_file(file_path: str) -> List[List[int]]: """Parse a file with the following format: 21 22 24 12 7 21 23 Returns each line as integers in a nested list """ with open(file_path) as input_file: parsed_file = [list(map(int, line.split())) for line in input_file.readlines()] return parsed_file
f281dbcc7c9dc70eab491b1a334e0cb0cc3e45e4
697,049
import math def get_distance(x1, x2, y1, y2): """ Function to get the distance between any two points """ return math.sqrt((x1-x2) ** 2 + (y1-y2) ** 2)
2dd65e26089c90dd3b7d83566af83463c943b8cc
145,903
import uuid import hashlib def _create_token(user): """Create a unique token for a user. The token is created from the user id and a unique id generated from UUIDv4. Then both are hashed using MD5 digest algorithm. """ _id = f"{user.id}-{str(uuid.uuid4())}" _hash = hashlib.md5(_id.encode('ascii')) return _hash.hexdigest()
ea2458c45043ed9c0902b0a88d20c9c13ede00d0
659,292
import base64 import json def _read_pubsub_json(event): """Extracts the json payload from a pub/sub message. Args: event: A Pub/Sub event. Returns: The json_payload from a pub/sub message. """ pubsub_message = base64.b64decode(event['data']).decode('utf-8') return json.loads(pubsub_message)
9bfafe8f36e6bcd0db68f9d4528081c44067b04f
692,962
import re def check_string(text, search=re.compile(r'[^A-Za-z0-9-_]').search): """Test that a string doesnt contain unwanted characters. :param text: Text that you want to verify is compliant. :type text: str :param search: Regex to use to check the string. Defaults to allowing [^a-z0-9-_]. :return: bool """ return not bool(search(text))
59348de4e86bc762cc8a7aef2243e1d2b2ce9f85
42,511
from datetime import datetime def get_es_index_name(project, meta): """ Get the name for the output ES index :param project: seqr project identifier :param meta: index metadata :return: index name """ return '{project}__structural_variants__{sample_type}__grch{genome_version}__{datestamp}'.format( project=project, sample_type=meta['sampleType'], genome_version=meta['genomeVersion'], datestamp=datetime.today().strftime('%Y%m%d'), ).lower()
fc1245287aed07ddd8d90f33cadc095c22f944a3
7,256
def isfused(cycle_sets): """Determine whether all cycles (represented as sets of node IDs) share at least one node.""" intersection = cycle_sets[0] for cycle in cycle_sets[1:]: intersection = intersection.intersection(cycle) return len(intersection) > 0
1011e472f22cde7bfd186be64bbe688092519ce2
199,062
def split_obj_identifier(obj_identifier): """ Break down the identifier representing the instance. Converts 'notes.note.23' into ('notes.note', 23). """ bits = obj_identifier.split('.') if len(bits) < 2: return (None, None) pk = '.'.join(bits[2:]) # In case Django ever handles full paths... object_path = '.'.join(bits[0:2]) return (object_path, pk)
60d3a9f478ddff9d5e44c7955ca01e14d78a706f
319,681
def link_regulator(incoming_links: list) -> list: """ Regulates the links coming from Google Search. Input URL: "/url?q=https://www.example.com/SparqlEndpoint&sa=U&ved=2ahUKEwiU" Output URL: "https://www.example.com/SparqlEndpoint" :param incoming_links: List of links to be regulated :return: List of regulated links """ regulated_links = list() for link in incoming_links: if "/url?q=" in link: if "%3F" and "%3D" in link: link = link.replace("%3F", "?").replace("%3D", "=") regulated_links.append(link[link.index("=") + 1:link.index("&")]) else: continue return regulated_links
9c7c451c65da2f3f0240c26903cdfe2a9ced1913
656,169
def error_500(error): """Return a custom 500 error.""" return 'Sorry, internal server error.'
f45c15482d3c768ed9672a39ad14adac24d2ebfd
653,050
import hashlib def validateFile(data, hash): """ Validates a file using the MD5 algorithm. Returns True if the files MD5 hash signature matches the provided hexadecimal string, False otherwise. """ h = hashlib.md5() h.update(data) return h.hexdigest().lower() == hash.lower()
992170e8cc731f21d65834a332983b5759c3a636
510,385
def serialize_article_to_values(article): """Serialize an article to a list of values. Args: article: The article to be serialized. Returns: list of primitives that can be used with a DB API v2 compliant connection. """ author = article.get_author() author_str = author if author else '' publish_date = article.get_publish_date() publish_date_str = publish_date.isoformat() if publish_date else '' crawl_date_str = article.get_crawl_date().isoformat() return [ article.get_source(), article.get_source_feed(), article.get_title(), article.get_description(), publish_date_str, crawl_date_str, article.get_link(), author_str ]
aefb17035fa798103cd148318651f3e57de02ec7
179,939
def _unpack_player(sid, p, **_): # pylint: disable=invalid-name """Unpack a player""" return sid, p
c0c0e37bafbd14488c6cf7ca58ecc8ac5f330a45
79,943
def _get_directive(line_info): """Gets a directive from the start of the line. If the line is ":param str foo: Description of foo", then _get_directive(line_info) returns "param str foo". Args: line_info: Information about the current line. Returns: The contents of a directive, or None if the line doesn't start with a directive. """ if line_info.stripped.startswith(':'): return line_info.stripped.split(':', 2)[1] else: return None
569683434a796957b222bd9c8f55e1d3c7798aa0
335,641
def parity(n): """ parity(n) determines the value (-)^n input n: integer output p: sign """ if n%2==0: p=1 else: p=-1 return p
5688fdb28efed43af30bb8b056885dfaf6b32652
332,513
def get_match(match, index, default=''): """ Returns a value from match list for a given index. In the list is out of bounds `default` is returned. """ if index >= len(match): return default else: return match[index]
8669380d3d5e3379d7169754c284498872f46570
597,586
import requests from bs4 import BeautifulSoup def get_sra_ids(gsm_ids): """ Get SRA IDs Args: gsm_ids: the GSM IDs Returns: the list of SRA IDs """ url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=sra&term={}".format(" or ".join(gsm_ids)) r = requests.get(url) soup = BeautifulSoup(r.text, "xml") uid_list = [x.text for x in soup.find_all("Id")][::-1] url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=sra&rettype=runinfo&id={}". \ format(",".join(uid_list)) r = requests.get(url) soup = BeautifulSoup(r.text, "xml") return [x.text for x in soup.find_all("Run")]
c3d40c97b301660998dc365ca7651e69e07ea63e
513,462
import random def position_mod_normal(in_str): """Select any position in the given input string with normally distributed likelihood where the average of the normal distribution is set to one character behind the middle of the string, and the standard deviation is set to 1/4 of the string length. This is based on studies on the distribution of errors in real text which showed that errors such as typographical mistakes are more likely to appear towards the middle and end of a string but not at the beginning. Return 0 is the string is empty. """ if (in_str == ''): # Empty input string return 0 str_len = len(in_str) mid_pos = str_len / 2.0 + 1 std_dev = str_len / 4.0 max_pos = str_len - 1 pos = int(round(random.gauss(mid_pos, std_dev))) while ((pos < 0) or (pos > max_pos)): pos = int(round(random.gauss(mid_pos, std_dev))) return pos
6ed75d80ccb4c4328639549133748126c7e3eec4
76,853
def wrapPos(angDeg): """Returns the angle (in degrees) wrapped into the range [0, 360)""" res = angDeg % 360.0 # First wrap into [0, 360]; result is 360 if ctrAng < 0 but so near 0 that adding 360 rounds it if res == 360.0: return 0.0 return res
c87d73d9155cc6b65ff64f92f635162d9c25c913
83,062
def get_table(client,start,end,table_id,date_part = None): """Getting table data from bigquery client. Args: client (bigquery.Client) : Configured client to access bigquery start (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF] end (str) : time str in the format of yy-mm:dd [HH-MM-SS.FFFFFF] table_id (str) : table name of any bigquery table without array column. date_part (str) : date_part param for SQL TIMESTAMP_TRUNC() function. Returns: pd.DataFrame : with index being the timestamp of the data """ # enable the ability to obtain averaged data. if date_part is None: table_query_str = f"SELECT * FROM cfog.{table_id} " +\ f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\ "ORDER BY timestamp ASC" else: # first obtain a list of field names table_ref = client.dataset('cfog').table(table_id) table = client.get_table(table_ref) schemas = [s for s in table.schema if s.field_type in ['INT', 'FLOAT']] field_names = [s.name for s in schemas] field_name_strs = ','.join([f"AVG({name}) as {name}" for name in field_names]) trunc_exp = f"TIMESTAMP_TRUNC(timestamp, {date_part}) AS timestamp" table_query_str = f"SELECT {trunc_exp}, {field_name_strs} FROM cfog.{table_id} " +\ f"WHERE timestamp BETWEEN '{start}' AND '{end}' " +\ "GROUP BY timestamp ORDER BY timestamp" print(f"Executing bigquery query string: ") print(table_query_str + '\n') table_query_job = client.query(table_query_str) table_query_job.result() print("Query job complete. Start Loading Data. ") table_data = table_query_job.to_dataframe().set_index('timestamp') print(f"Query complete. Total number of data entries: {table_data.shape[0]}.\n\n") return table_data
5ba84bb622d8bd818abb6fd325885c08cf585a5f
167,677
def transform_op_if_inside_handler(info, op, keep_if_possible=True): """Transform an optional op only if it is inside the subgraph. This handler is typically use to handle original op: it is fine to keep them if they are inside the subgraph, otherwise they are just ignored. Args: info: Transform._TmpInfo instance. op: the optional op to transform (or ignore). keep_if_possible: re-attach to the original op if possible, that is, if the source graph and the destination graph are the same. Returns: The transformed op or None. """ if op in info.sgv.ops: return info.transformed_ops[op] else: if keep_if_possible and info.graph is info.graph_: return op else: return None
b7566d5c2e5a8e1fd0bfa073e50270d4ea8f8e28
189,140
def palindrome(word: str) -> bool: """ Check if a string is palindrome :param word: the word to analyze :return: True if the statement is verified, False otherwise """ i=0 while i < int((len(word)+1)/2): if word[i] != word[-(i+1)]: return False i+=1 return True
98da35f89c6c245c66aa5b0c2d65305b9090b1da
585,424
from typing import Set def file_to_set(file_name: str) -> Set[str]: """ Loads a file to a set. :returns: the lines of the file in a ``set``, an empty ``set`` if _file_name_ is ``None``. """ if file_name: with open(file_name, 'rt') as inf: set_from_file = set(line.strip() for line in inf) else: set_from_file = set() return set_from_file
8ff2852992516726dd009249c9db6eda6896e566
147,945
def clear_url(url): """ Remove domain and protocol from url """ if url.startswith('http'): return '/' + url.split('/', 3)[-1] return url
769f0d321cb085218e02d483f44fbf06c300aac1
278,855
def limbs_for_int(v): """ Return the number of bytes required to represent this integer. """ return (v.bit_length() + 7) >> 3
3ef1d3e4863065a1afaa0911934c5dc562dada06
297,270
def validate_maximum_distance(vrp, **kwargs): """ Validates the maximum travel distance of given individual's solution. :param vrp: An individual subject to validation. :param kwargs: Keyword arguments. The following are expected from it: - (int) 'maximum_distance': Maximum travel distance for every vehicle. - (numpy.ndarray) path_table: Square matrix that represents distances between nodes. :return: True, if solution is distance-wise valid. False if not. Second return value is a string that provides details about it. """ maximum_distance = kwargs["maximum_distance"] path_table = kwargs["path_table"] route_list = vrp.get_route_list() for active_route in route_list: route_distance = 0 if len(active_route) <= 1: continue recent_node = active_route[0] recent_depot = active_route[0] for i in range(1, len(active_route)): point_a = active_route[i - 1] point_b = active_route[i] route_distance += path_table[point_a][point_b] if route_distance > maximum_distance: return False, "Maximum distance constraint violation (Route Node {} / {}, situated at {}): {} / {}" \ .format(i, len(active_route) + 1, point_b, route_distance, maximum_distance) # Mark down most recent node for the return trip. recent_node = point_b # Traveling back to the depot node. route_distance += path_table[recent_node][recent_depot] if route_distance > maximum_distance: return False, "Maximum distance constraint violation (Return to Depot Node {}): {} / {}" \ .format(recent_depot, route_distance, maximum_distance) # Save route time for later inspections. vrp.route_distances.append(route_distance) return True, "Maximum distance constraint not violated"
ee5e7c44b0d095835e5ab350bff32e10ac1abb88
556,159
import pathlib def _detect_home_location() -> pathlib.Path: """Detects the location of the root directory""" # path/to/home/backend/core/config_loader.py path_to_self = pathlib.Path(__file__).absolute() # path/to/home/backend/core/ path_to_core_module = path_to_self.parent # path/to/home/backend/ path_to_backend_module = path_to_core_module.parent # path/to/home/ path_to_home = path_to_backend_module.parent return path_to_home
c309656d5a56261fd96c86c179947981dc65dc58
10,763
import csv def load_original_file(filename, delimiter, skipline): """ File loader :param filename: str: path :param delimiter: str: delimiter used, passed to csv.reader :param skipline: bool: does the frst line contains column names (true) or data (then false) :return: data, firstline """ firstline = None data = [] with open(filename, 'r') as data_fid: reader = csv.reader(data_fid, delimiter=delimiter, quotechar='"') if skipline: firstline = next(reader) for row in reader: data.append(row) return data, firstline
88d985cd86607e4d0b485862246dd64133499c3e
571,907
def IntToTime( time_int ): """ Function used to convert an integer into a time-string. Args: time_int ( int ): the integer. Returns: string: the converted time-string. Testing: >>> IntToTime( 20220314092000 ) '2022.03.14 09:20:00' >>> IntToTime( 20220314094000 ) '2022.03.14 09:40:00' """ time_string = str( time_int ) time_string = time_string[ :4 ] + "." + time_string[ 4: ] time_string = time_string[ :7 ] + "." + time_string[ 7: ] time_string = time_string[ :10 ] + " " + time_string[ 10: ] time_string = time_string[ :13 ] + ":" + time_string[ 13: ] time_string = time_string[ :16 ] + ":" + time_string[ 16: ] return time_string
d77c58b45976d55fa3e193536e211bdb6d3a240b
279,220
def ArchToBits(arch): """Takes an arch string like x64 and ia32 and returns its bitwidth.""" if not arch: # Default to x64. return 64 elif arch == 'x64': return 64 elif arch == 'ia32': return 32 assert False, 'Unsupported architecture'
a87b38eca83aa968d9ab235e064f16855a346ad2
461,853
from pathlib import Path def script_loc(request): """Return the directory of the currently running test script""" return Path(request.fspath).parent
23ebda0ad4dbdd94808b4b3da54eb86b634849e7
260,225
import random def backoff_time(attempt, retry_backoff=2., max_delay=30.): """Compute randomized exponential backoff time. Args: attempt (int): attempt number, starting at zero. Keyword Args: retry_backoff(float): backoff time on the first attempt. max_delay(float): maximum returned value. """ delay = retry_backoff * (2 ** attempt) # Add +-25% of variation. delay += delay * ((random.random() - 0.5) / 2.) return min(delay, max_delay)
907e636dc60a81fa9d7d0ebf5c42841b828a693c
32,275
import re def create_auxiliary_table_name(field_name, table_name): """ Args: field_name: A table field whose value identifies a record in a foreign table. table_name: The table containing the field field_name. Returns: A conventional name for the foreign table. Some whitespace and special-character stripping is enforced. """ if not re.search('^' + table_name, field_name): composite = table_name + ' ' + field_name else: composite = field_name return re.sub('[\s\-\_]+', '_', composite).lower()
cd744f1fde76d23252e37bb85a51f5c4709c7dea
369,206
def XlaLaunchOpCount(labels): """Count how many XlaLaunch labels are present.""" return sum("XlaLaunch(" in x for x in labels)
e3b083de64bf1627ca98c427a268412cacf9f43b
23,499
def format_date(date:str) -> str: """return YYYYmmdd as YYYY-mm-dd""" return f"{date[:4]}-{date[4:6]}-{date[6:]}"
dfcc434006df8a7f6bd89003f592792faa891f30
20,911
from typing import Tuple from typing import Dict def parse_line_protocol_stat_key(key: str) -> Tuple[str, Dict[str, str]]: """Parseline protocolish key to stat prefix and key. Examples: SNMP_WORKER;hostname=abc.com,worker=snmp-mti will become: ("SNMP_WORKER", {"hostname": "abc.com", "worker": "snmp-mti"}) """ try: prefix, raw_labels = key.split(";", 1) labels = dict(raw_label.split("=", 1) for raw_label in raw_labels.split(",")) return prefix, labels except ValueError: return key, {}
a6806f7dd67fb2a4734caca94bff3d974923f4b2
5,382
def getProducts(products_list): """Reads products from the SBML file and returns a dictionnary""" final_products = {} for prod in products_list: products = prod.getElementsByTagName("speciesReference") for p in products: final_products[p.getAttribute("species")] = float( p.getAttribute("stoichiometry") ) return final_products
4a85f0fa6106cedb52094d749d43fe10304499cc
490,958
import re def extract_authorization_token(request): """ Get the access token using Authorization Request Header Field method. Or try getting via GET. See: http://tools.ietf.org/html/rfc6750#section-2.1 Return a string. """ auth_header = request.META.get('HTTP_AUTHORIZATION', '') if re.compile(r'^[Bb]earer\s{1}.+$').match(auth_header): access_token = auth_header.split()[1] else: access_token = request.GET.get('access_token', '') return access_token
9776df3ecd59ba3db15664259a6e65114ec61a07
700,716
def _binary_op(result_name, func_name, arg1_name, arg2_name): """ Generates a binary operator indicated by func_name in infix notation with arguments arg1_name and arg2_name storing the result in result_name. Supported func_names are add, sub, mul, and div. """ funcs = {'add': '+', 'sub': '-', 'mul': '*', 'div': '/'} return f"{result_name} = {arg1_name} {funcs[func_name]} {arg2_name}"
5a8cb925aefa4850f182c87595fee9c1409809c6
64,380
def get_repository_id(repository_info, api_root, repository_name=None): """Return the ID of a repostiory from the server. This will look up all accessible repositories on the server and try to find the ID of one that matches the provided repository information. Args: repository_info (rbtools.clients.RepositoryInfo): The scanned repository information. api_root (rbtools.api.resource.RootResource): The root resource for the API. repository_name (unicode, optional): An explicit repository name provided by local configuration. If this is not provided, :py:attr:`RepositoryInfo.name <rbtools.clients.RepositoryInfo.name>` will be used, if available. Returns: int: The ID of the repository, or ``None`` if not found. """ if repository_name is None: repository_name = repository_info.name detected_paths = repository_info.path if not isinstance(detected_paths, list): detected_paths = [detected_paths] repositories = api_root.get_repositories( only_fields='id,name,mirror_path,path', only_links='') for repo in repositories.all_items: # NOTE: Versions of Review Board prior to 1.7.19 didn't include a # 'mirror_path' parameter, so we have to conditionally fetch it. if (repo.name == repository_name or repo.path in detected_paths or getattr(repo, 'mirror_path', None) in detected_paths): return repo.id return None
02ce8504df5900c69f80747534413724e0b2baab
395,791
def check_id(string): """ Runs a basic check for things that look like ID numbers. Currently, simply checks to see that numerals outnumber other types of characters, and that the string is probably long enough to contain an ID.""" string = string.strip() if len(string) == 0 or len(string) < 4: return 0.0 count = 0.0 for c in string: if c.isdigit(): count += 1 if count / len(string) > 0.5: return count/len(string) else: return None
a5cad89ddc71a754fd5f3f2b8a17aa723eb5eca4
180,861
def get_t_demand_list(temp_curve, th_curve): """ Sorts thermal energy demand based on values of ambient temperatures. Parameters ---------- temp_curve : list of ambient temperatures for one year th_curve : list of thermal energy demand for one year Returns ------- t_demand_curve : thermal energy curve sorted based on ambient temperature values """ return [th_demand for _, th_demand in sorted(zip(temp_curve, th_curve))]
f4f01d609e593653c19faa6c82fd3db07db60510
464,760
def subdict_in_dict(subdict, superdict): """True is subdict in subdict_in_dict. Else False. >>> subdict_in_dict({"k1": "v1"}, {"k1": "v1"}) True >>> subdict_in_dict({"k1": "v1"}, {"k1": "v1", "k2": "v2"}) True >>> subdict_in_dict({}, {"k1": "v1"}) True >>> subdict_in_dict({"k1": "v1"}, {}) False """ return all(item in superdict.items() for item in subdict.items())
b2d802045002de15c9eec86028317527a6480304
244,378
def in_list(metric_name, check_list): """ Check if the metric is in list. # @added 20170602 - Feature #2034: analyse_derivatives # Feature #1978: worker - DO_NOT_SKIP_LIST This is a part copy of the SKIP_LIST allows for a string match or a match on dotted elements within the metric namespace used in Horizon/worker """ metric_namespace_elements = metric_name.split('.') metric_in_list = False for in_list in check_list: if in_list in metric_name: metric_in_list = True break in_list_namespace_elements = in_list.split('.') elements_matched = set(metric_namespace_elements) & set(in_list_namespace_elements) if len(elements_matched) == len(in_list_namespace_elements): metric_in_list = True break if metric_in_list: return True return False
51d401585a292933c8652a93a42391ec001efcc5
400,129
import getpass def get_pass(prompt, require=False): """Ask the user for a password. Args: prompt (unicode): The text to prompt the user with. require (bool, optional): Whether to require a result. If ``True``, this will keep prompting until a non-empty value is entered. Returns: bytes: The entered password. """ prompt = str(prompt) if require: password = None while not password: password = getpass.getpass(prompt) else: password = getpass.getpass(prompt) return password
220f53a5b53a57b8046709a0c7b0da20bbad6418
396,179
import random def random_horizontal_flip(image, bboxes): """ Randomly horizontal flip the image and correct the box :param image: BGR image data shape is [height, width, channel] :param bboxes: bounding box shape is [num, 4] :return: result """ if random.random() < 0.5: _, w, _ = image.shape image = image[:, ::-1, :] bboxes[:, 0] = w - bboxes[:, 0] bboxes[:, 4] = -bboxes[:, 4] return image, bboxes
2a865b32a9ed94fdee35b4b075313a5e9d733e90
687,605
def _sudoku_syntax(file): """ Return the full name of the given sudoku syntax based on the base name. """ return "Packages/Sudoku/resources/syntax/%s.sublime-syntax" % file
4db4e557b416b578a9202980589ff8b0de1c5acb
314,136
def parse_slate(slate): """Parses slate document Args: slate (dict): slate document Returns: dict """ wanted = ['_id', 'slateTypeName', 'siteSlateId', 'gameCount', 'start', 'end', 'sport'] return {k: slate[k] for k in wanted}
f0d422d3b934ae20c6a8ad758aa1e92f5bd85c23
195,109
def isColorImage(np_image): """ Check if image is colored (has 3 channels) Return True if image is colored, false otherwise """ if len(np_image.shape) == 3: if np_image.shape[2] == 3: return True return False
91d8749905727af1c02caa3a66484b31eb931a94
51,739
import re def bundle_offset(fname): """ >>> bundle_offset("path/to/R0000C0000.bundle") (0, 0) >>> bundle_offset("path/to/R0380C1380.bundle") (4992, 896) """ match = re.search(r'R([A-F0-9]{4,})C([A-F0-9]{4,}).bundle$', fname, re.IGNORECASE) if match: r = int(match.group(1), 16) c = int(match.group(2), 16) return c, r
9e0627e9085f47ec3dfff8241bff083d45849e3a
468,349
def delete_at(my_list=[], idx=0): """ deletes an element from a list at a given index """ l_len = len(my_list) if idx >= l_len or idx < 0: return (my_list) del my_list[idx] return (my_list)
63c1897eb87a2feed7c013c0b277bacd7d3f61d5
677,093
def _extract_exception_details(request, response, exception): """Extracts exception information from a request-response and an exception. """ # Set some standard values to fall back to if nothing comes of parsing the # request and exception details. code = 500 # Internal server error title = 'Unknown error' explanation = 'An unknown error occured, the error has been logged.' if hasattr(exception, 'code') and exception.code: code = exception.code # If the exception was thrown as part of the RequestHandler.abort, the # exception will be of the type WSGIHttpException and will contain a # comment/an explanation and/or a detail/title. It is confusing the way # webapp2 handles these things. "Explanation" is set internally but only # comment is available through the abort method. if hasattr(exception, 'detail') and exception.detail: title = exception.detail elif hasattr(exception, 'title') and exception.title: title = exception.title if hasattr(exception, 'comment') and exception.comment: explanation = exception.comment elif hasattr(exception, 'explanation') and exception.explanation: explanation = exception.explanation return (code, title, explanation)
018c4b86a3df7dd0d53a2b558e27efb577450126
592,271
def _extract_keys_to_unique_list(lists_of_dictionaries): """ Extract the keys for a list of dictionaries and merge them into a unique list. :param lists_of_dictionaries: List of dictionaries to pull unique keys from. :type lists_of_dictionaries: list(dict) :return: Merged list of keys into a unique list. :rtype: list """ merged_list = list(lists_of_dictionaries[0].keys()) for idx, d in enumerate(lists_of_dictionaries): if idx != 0: merged_list = merged_list + list(set(list(d.keys())) - set(merged_list)) return merged_list
9f94d46cec063a46fd4b506cc08e5cf20b3b98ac
66,129
def strategy_func_longest(zmws): """ >>> strategy_func_longest([]) [] >>> strategy_func_longest([('synthetic/1', 9)]) [('synthetic/1', 9)] >>> strategy_func_longest([('synthetic/1', 9), ('synthetic/2', 21), ('synthetic/3', 9), ('synthetic/4', 15), ('synthetic/5', 20)]) [('synthetic/2', 21), ('synthetic/5', 20), ('synthetic/4', 15), ('synthetic/1', 9), ('synthetic/3', 9)] """ return sorted(zmws, key = lambda x: x[1], reverse = True)
5e83f5d396ccc81c9861a9bc4151d184d188ae83
145,194
def field(request): """ Fixture for returning the field. Needed because indirect=True is used for loading the datasets. """ return request.param
41320222310d51c0dec2fd3b03feda5a045fe3bb
334,686
def sequential_search(value, array): """ Implementação de um algoritmo de busca sequencial. Argumentos: value: Any. Valor a ser buscado na lista array: list. lista na qual o valor será buscado Retorna o índice do valor em "array" ou -1 caso não exista nela. """ for i in range(0, len(array)): if array[i] == value: return i return -1
84b0fa89752d44d30fcb3f581617f6e65c3f5a0e
183,961
def find_y_overlap(rect1, rect2): """ Return bottom_y and height of overlapping y of two rects """ r1bottom = rect1['bottom_y'] r1top = r1bottom + rect1['height'] r2bottom = rect2['bottom_y'] r2top = r2bottom + rect2['height'] highest_start_point = r1bottom if r1bottom >= r2bottom else r2bottom lowest_end_point = r1top if r1top <= r2top else r2top if highest_start_point < lowest_end_point: return highest_start_point, lowest_end_point - highest_start_point else: return None
f5e9b61371b4dfc100e443b42bd6a640dc9440fb
143,221
def peak_to_entry_text(peak, chromatogram): """ For writing the peak's row in a peak table. Parameters ---------- peak: Classes.Peak chromatogram: Classes.Chromatogram Returns ------- entry: str Row for the peak table output """ st_ind = 0 end_ind = 0 peak_start = 0.0 peak_end = 0.0 peak_height = 0.0 if len(peak.indices) > 0: st_ind = peak.indices[0] end_ind = peak.indices[-1] peak_start = chromatogram.time[st_ind] peak_end = chromatogram.time[end_ind] peak_height = peak.height rtn_time = peak.retention_time integral = peak.integral entry = f"{rtn_time},{integral},{peak_start},{peak_end},{peak_height}\n" return entry
97721393bea68549de05e14b96c3849085dd6802
538,929
import torch def init_tdl_zeros(model, batch_size, device): """ Initialize TDLs with zeros. :param model: NARX model :param batch_size: size of batch :param device: device type :return: input TDL, output TDL """ # input tap-delay itdl = torch.zeros((batch_size, model.input_delay_size, model.input_size), dtype=torch.float64).to(device) # output tap-delay otdl = torch.zeros((batch_size, model.output_delay_size, model.output_size), dtype=torch.float64).to(device) return itdl, otdl
d741a57592d619b6915afd57a82b4fa5f2aa7c3f
410,986
import json def json_decode(s): """ Decodes a json string to a dict. """ if not s: return None return json.loads(s)
9549e6a0f6615fcbb8d7f5ec0687ac1bc3626079
74,042
def sort_dict(src_dict): """ Sort given dictionary :param src_dict: source dict :return: sorted dictionary """ sorted_dict = {k: src_dict[k] for k in sorted(src_dict.keys())} return sorted_dict
71ceb261848eb7458e71ea4ea0b4cd9f1d6164a3
67,680
import pickle def load_pickle(ifpath): """ Load an object from pickle Args: ifpath (str): path from where a graph is loaded """ with open(ifpath, 'rb') as ifh: return pickle.load(ifh)
59c32698755b7eff21eb80aa514ea4d8d22e128c
271,489
def get_tile_position(node, target): """ Return (row,col) of a tile """ return divmod(node.matrix.index(target), node.width)
bbf1c451fdf1908dea83b9f4b0405727f22f0cce
602,072
from sympy import diff, symbols, lambdify def newton1D(f, x_0, df=None, delta=0.00001): """ Find solution to f(x) = 0 with newton's method :param f: function f :param x_0: starting point for x :param df: first order derivative of f :param delta: threshold for solution :return: x """ x_n = x_0 if df is None: x = symbols('x') df = lambdify(x, diff(f(x), x)) while True: x_n1 = x_n - f(x_n) / df(x_n) if abs(x_n - x_n1) < delta: return x_n1 x_n = x_n1
85199d3caf9d54c6ce087c01aaec6bbaa56cbc93
599,403
import time def sisock_to_unix_time(t): """Convert a sisock timestamp to a UNIX timestamp. Parameters ---------- t : float A sisock timestamp. Returns ------- unix_time : float If `t` is positive, return `t`. If `t` is zero or negative, return :math:`time.time() - t`. """ if t > 0: return t else: return time.time() + t
9480b99b2c18a989421c2bff8c9629f8d17f1f7b
362,735
def hump_to_underscore(name): """ Convert Hump style to underscore :param name: Hump Character :return: str """ new_name = '' pos = 0 for c in name: if pos == 0: new_name = c.lower() elif 65 <= ord(c) <= 90: new_name += '_' + c.lower() pass else: new_name += c pos += 1 pass return new_name
77f407da9202e049e75f7dfa015ce501e1492ac7
516,892
import asyncio def schedule_coroutine(target): """Schedules target coroutine in the given event loop If not given, *loop* defaults to the current thread's event loop Returns the scheduled task. """ if asyncio.iscoroutine(target): return asyncio.ensure_future(target, loop=asyncio.get_event_loop()) else: raise TypeError("target must be a coroutine, " "not {!r}".format(type(target)))
d23b2f1374df010ea855f0e53dd5576bf24d974b
191,411
def atfile_ivm(filename): """ Return the filename of the IVM file which is assumed to be the second word in the atfile the user gave. """ return filename.split()[1]
fa5523e7e7a7e963dcfe36b9eef8b3132f768719
153,665
def get_all_categories(product_list): """ Function to get a unique list of categories out of the list of products. :param product_list: List of products :return: dict with category names as keys """ categories_dict = dict() for product in product_list: if product['category_name'] not in categories_dict and product['category_name']: categories_dict[product['category_name']] = None return categories_dict
a29c156b337bbbb0045584d8d57dc5794d57718c
440,446
def reshape_sum_backward(gy, x_shape, axis, keepdims): """Reshape gradient appropriately for dezero.functions.sum's backward. Args: gy (dezero.Variable): Gradient variable from the output by backprop. x_shape (tuple): Shape used at sum function's forward. axis (None or int or tuple of ints): Axis used at sum function's forward. keepdims (bool): Keepdims used at sum function's forward. Returns: dezero.Variable: Gradient variable which is reshaped appropriately """ ndim = len(x_shape) tupled_axis = axis if axis is None: tupled_axis = None elif not isinstance(axis, tuple): tupled_axis = (axis,) if not (ndim == 0 or tupled_axis is None or keepdims): actual_axis = [a if a >= 0 else a + ndim for a in tupled_axis] shape = list(gy.shape) for a in sorted(actual_axis): shape.insert(a, 1) else: shape = gy.shape gy = gy.reshape(shape) # reshape return gy
afb274d59a4f3b39c8ad70c98a089b3ad6041f1e
574,531
def validate_password(password: str): """ Validate the user password >>> validate_password("short") Traceback (most recent call last): ... raise ValueError("password must have at least 8 characters") ValueError: password must have at least 8 characters >>> validate_password("This is a good password!") 'This is a good password!' """ minlen = 8 if not isinstance(password, str): raise TypeError("password must be a string") if len(password) < minlen: raise ValueError("password must have at least 8 characters") return password
d613c5b2e6e495b68dfbbae473910729a7014b77
503,683
import re def check_community(name) -> bool: """Perform basic validation on community name""" if (name and isinstance(name, str) and len(name) > 5 and name[:5] == 'hive-' and name[5] in ['1', '2', '3'] and re.match(r'^hive-[123]\d{4,6}$', name)): return True return False
f9f936a0a85299cef6decfd61d84f0f5ef0cf23b
107,384
def normalise_coordinates(x1, y1, x2, y2,min_x,max_x,min_y,max_y): """ Parameters: x1, y1, x2, y2: bounding box coordinates to normalise min_x,max_x,min_y,max_y: minimum and maximum bounding box values (min = 0, max = 1) Returns: Normalised bounding box coordinates (scaled between 0 and 1) """ x1, y1, x2, y2 = (x1-min_x)/(max_x-min_x), (y1-min_y)/(max_y-min_y), (x2-min_x)/(max_x-min_x), (y2-min_y)/(max_y-min_y) return x1, y1, x2, y2
988b523ed8fb85cfc9adc571eaf837fcffb843ff
385,956
def not_(a: object) -> bool: """ Return `not a`, for _a_. Example: >>> not_(True) False Args: a: argument of `not` expression Return: `False` if `a`, `True` otherwise """ return not a
320c5416a7a7eaf23f922f7abbacead10a78b448
442,229
def q_inv(a): """Return the inverse of a quaternion.""" return [a[0], -a[1], -a[2], -a[3]]
e8d06e7db6d5b23efab10c07f4b9c6088190fa07
371,704
from io import StringIO def get_number(token): """ Turn leading part of a string into a number, if possible. """ num = StringIO() for ch in token: if ch.isdigit() or ch == '.' or ch == '-': num.write(ch) else: break val = num.getvalue() num.close() return val
43e703f8fc1993aabc325de3729f87ca27c8fc85
687,099
def mac_byte_mask(mask_bytes=0): """Return a MAC address mask with n bytes masked out.""" assert mask_bytes <= 6 return ':'.join(['ff'] * mask_bytes + (['00'] * (6 - mask_bytes)))
3e3acd78402fc4307141e3ef38aaa73e6e546d20
98,641
def has_decorator( text, pre_decor='"', post_decor='"'): """ Determine if a string is delimited by some characters (decorators). Args: text (str): The text input string. pre_decor (str): initial string decorator. post_decor (str): final string decorator. Returns: has_decorator (bool): True if text is delimited by the specified chars. Examples: >>> has_decorator('"test"') True >>> has_decorator('"test') False >>> has_decorator('<test>', '<', '>') True """ return text.startswith(pre_decor) and text.endswith(post_decor)
1547125e7bf175b6c2a8b6a5306ffcad2c3f8206
531,185
def is_cert_valid(cert, dt): """Check if `cert` is valid at `dt` (datetime in UTC).""" if cert is None: return False return cert.not_valid_before < dt and dt < cert.not_valid_after
82805f9b2f9b93188d39c15e69a3332243595776
375,362
def opposite_sub_simplex(simplex, sub_simplex): """ Get the opposite sub simplex of a given sub simplex in a simplex. The opposite sub simplex of a sub simplex f in a simplex T is the simplex consisting of all the vertices of T not in f. :param simplex: Simplex defined by a list of vertex indices. :type simplex: List[int] :param sub_simplex: Sub simplex defined by a list of vertex indices. :type sub_simplex: List[int] :return: Opposite sub simplex defined by a list of vertex indices. :rtype: List[int] .. rubric:: Examples >>> opposite_sub_simplex([0, 1, 2], [1]) [0, 2] >>> opposite_sub_simplex([0, 1, 2], [0, 1]) [2] >>> opposite_sub_simplex([0, 1, 2], [0, 1, 2]) [] """ assert(i in simplex for i in sub_simplex) opposite = [] for i in simplex: if i not in sub_simplex: opposite.append(i) return opposite
e92032623bb5cd2dfa57e5530d40fee7e96b715c
359,312
def PNT2Tidal_Pv14(XA,chiA=0,chiB=0,AqmA=0,AqmB=0,alpha2PNT=0): """ TaylorT2 2PN Quadrupolar Tidal Coefficient, v^14 Phasing Term. XA = mass fraction of object chiA = aligned spin-orbit component of object chiB = aligned spin-orbit component of companion object AqmA = dimensionless spin-induced quadrupole moment of object AqmB = dimensionless spin-induced quadrupole moment of companion object alpha2PNT = 2PN Quadrupole Tidal Flux coefficient """ XATo2nd = XA*XA XATo3rd = XATo2nd*XA XATo4th = XATo3rd*XA XATo5th = XATo4th*XA return (351560665)/(254016)+(5*alpha2PNT)/(9) - (738971515*XA)/(1524096) \ - (104525*XATo2nd)/(336) - (2160965*XATo3rd)/(6048)-(7310*XATo4th)/(27) \ + (4285*XATo5th)/(36) + (-(1065*XATo2nd)/(8)+(875*XATo3rd)/(8) \ + AqmA*(-130*XATo2nd+(320*XATo3rd)/(3)))*chiA*chiA \ + (-(1015*XA)/(4)+(1385*XATo2nd)/(3) - (2495*XATo3rd)/(12))*chiA*chiB \ + (-(1065)/(8)+(3005*XA)/(8)-(2815*XATo2nd)/(8) + (875*XATo3rd)/(8) \ + AqmB*(-130+(1100*XA)/(3)-(1030*XATo2nd)/(3)+(320*XATo3rd)/(3)))*chiB*chiB
4530916fded5b0c930af91476d94f7aefd8d755f
301,256
def remove_duplicates_retain_order(seq): """Code credited to https://stackoverflow.com/a/480227. Args: seq (list): Any list of any datatype. Returns: list: The list in same order but only first occurence of all duplicates retained. """ seen = set() seen_add = seen.add return([x for x in seq if not (x in seen or seen_add(x))])
a928c63a53ddd032ebcafc3d958d4e8efe313844
663,732
import re def keep_e(st): """ Takes in string, returns that string with all letters that are not e's changed to (NOT_E). "Hello" -> "(NOT_E)e(NOT_E)(NOT_E)(NOT_E)" """ return re.sub(r"[A-D | F-Z | a-d | f-z]", "(NOT_E)", st)
5e486037553c24a0689554e6524c25a63d39b1f9
459,537
def filter_df(df, filter_dict, drop_list=None): """Filter DataFrame by dictionary of key and values.""" for key, val in filter_dict.items(): df = df[df[key] == val] if drop_list is not None: df = df.drop(columns=drop_list) return df
99bbdb6437ffcb087fc5151cdcc2e94dd08afd24
534,032
import random def rand_real(range_start, range_stop): """ Generates a random real number by using random.random to generate a random x in [0,1) and transforming it to be y between range_start and range_stop: y = x * (range_stop - range_start) + range_start """ return random.random()*(range_stop - range_start) + range_start
4d41ee7275cecfe50c2bbcc402d8d70c6d9b98b1
429,364
def process_which(which, max_index): """ Parameters ---------- which: int or tuple or list, optional single index or tuple/list of integers indexing the eigenobjects. If which is -1, all indices up to the max_index limit are included. max_index: int maximum index value Returns ------- list or iterable of indices """ if isinstance(which, int): if which == -1: return range(max_index) return [which] return which
5a14620c04f173ddf1c211676444de648f89c11e
453,754
def is_typed_dict(type_or_hint) -> bool: """ returns whether the type or hint is a TypedDict """ return issubclass(type_or_hint, dict) and hasattr(type_or_hint, "__annotations__")
f5b06f6ae56cd2be4a3b9ca6cbdf0f0cdc979417
233,384
def batch_index(batch: dict, index): """Index into the tensors of a batch. Args: batch (dict): Batch dictionary. index (object): Index. Returns: dict: `batch` indexed at `index`. """ return {k: batch_index(v, index) for k, v in batch.items()}
d59aa80c7f27614a288b625ade0a92042d0069d0
159,747
import random def mutate(x, mutation_rate=1): """ Mutate a random gene :param x: individual to mute (numpy vector) :param mutation_rate: increasing/decreasing factor :return: mutated x (numpy vector) """ mut_gene = random.randint(1, len(x) - 2) x[mut_gene] += (mutation_rate if random.randint(0, 1) == 0 else -mutation_rate) return x
85300ff3b8ca41c733ec1559697ddd9435be87e7
374,764
def get_pubkey(point, compressed=True): """ Get the Serialized pubkey from an ecdsa Point object. point = ecdsa Point object compressed = Boolean whether or not you want the pubkey compressed. """ if compressed: return ("0" + str(2 + (1 & point.y())) + ("%064x" % point.x())).decode('hex') else: return ("04" + ("%064x" % point.x()) + ("%064x" % point.y())).decode('hex')
fce6bceeab17bcadbf62ee31bd6c11cb870a04e4
301,196