content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def _get_subnet(module, array):
"""Return subnet or None"""
subnet = {}
try:
subnet = array.get_subnet(module.params["name"])
except Exception:
return None
return subnet | bd55bb9bfbd6725144b7bc1210dc79f7412777fa | 368,366 |
def stringifyStyle(style):
"""Convert a style dict back to a string"""
return ';'.join(['{}:{}'.format(key, value) for key, value in style.items()]) | 70e7e97809b321fa7b9f1edaf2a750e1cde6948d | 60,461 |
def get_activity_status_text(activity_status):
"""
Given the activity status boolean, return a human
readable text version
"""
if activity_status is True:
activity_status_text = "Success!"
else:
activity_status_text = "FAILED."
return activity_status_text | 697f09b2d9cc9699d95a9eb9db2862a2db3e08b4 | 216,482 |
import binascii
import hashlib
def merkleroot(hashes):
"""
Args:
hashes: reversed binary form of transactions hashes, e.g.:
``binascii.unhexlify(h)[::-1] for h in block['tx']]``
Returns:
merkle root in hexadecimal form
"""
if len(hashes) == 1:
return binascii.hexlify(bytearray(reversed(hashes[0])))
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
parent_hashes = []
for i in range(0, len(hashes)-1, 2):
first_round_hash = hashlib.sha256(hashes[i] + hashes[i+1]).digest()
second_round_hash = hashlib.sha256(first_round_hash).digest()
parent_hashes.append(second_round_hash)
return merkleroot(parent_hashes) | 845239f15a4d37350c14c98285c0b0d118da4e9d | 305,647 |
def IsGzippedFile(path):
"""Check if the given file is gzipped. (Not 100% accurate)
Args:
path: path to the file to check.
Returns:
True if it looks like a gzipped file.
"""
with open(path, 'rb') as f:
return f.read(2) == b'\x1f\x8b' | e91838a752123afc37a883f9b07296521d03f38b | 441,390 |
def time_to_str(seconds): # real signature unknown; restored from __doc__
"""
time_to_str(seconds: int) -> str
Return a string describing the number of seconds in a human
readable manner using days, hours, minutes and seconds.
"""
return "" | aca82a728cad15f844eb21338b0426c50a7c19f4 | 90,902 |
def string_fraction(numerator, denominator):
"""
Format a fraction as a simplified string.
"""
if denominator == 1:
return f"{numerator}"
else:
return f"{numerator}/{denominator}" | c0449d66cb90246ef204f9aa512e31eed8f3989e | 80,862 |
def find_member(bloom_dates, ord_day):
"""Find the ensemble member whose bloom date is ord_day.
If more than one member has ord_day as its bloom date,
choose the member with the most recent year's forcing.
If there is no member with ord_day as its bloom date look at
adjacent days and choose the member with the most recent year's
forcing.
:arg bloom_dates: Predicted bloom dates.
:type bloom_dates: dict keyed by ensemble member identifier
:arg ord_day: Bloom date expressed as an ordinal day.
:type ord_day: int
:returns: Ensemble member identifier
:rtype: str
"""
def find_matches(day):
return [
member for member, bloom_date in bloom_dates.items()
if bloom_date.toordinal() == day
]
matches = find_matches(ord_day)
if not matches:
for i in range(1, 11):
matches.extend(find_matches(ord_day + i))
matches.extend(find_matches(ord_day - i))
if matches:
break
return max(matches) | 79139056e1ee6a5fdc675045766f576b4484b5d1 | 135,535 |
import torch
def gaussian_mixture_moments(mus, sigma_sqs):
"""Estimate moments of a gaussian mixture model
B - number of observations/samples
N - number of components in mixture
Args:
mus torch.tensor((B, N)): Collection of mu-values
sigma_sqs torch.tensor((B, N)): Collection of sigma_sq-values
"""
with torch.no_grad():
mu = torch.mean(mus, dim=1)
sigma_sq = torch.mean(sigma_sqs + mus**2, dim=1) - mu**2
return mu, sigma_sq | 879cb38d55a20b73967c7eeb749dc31ca9effa2d | 400,840 |
def render_list(ip_list):
"""
Render a list of targets for registration/deregistration
"""
target_list = []
for ip in ip_list:
target = {
'Id': ip
}
target_list.append(target)
return target_list | 11c18637985a05b19c73502ba35e3037d17b6156 | 516,737 |
def get_commands_to_remove_vpc(domain):
"""Gets commands to remove vpc domain
Args:
domain (str): vpc domain ID
Returns:
list: ordered list of commands to remove vpc domain
Note:
Specific for Ansible module(s). Not to be called otherwise.
"""
commands = []
commands.append('no vpc domain ' + domain)
return commands | 9fa31aa6ca84da1e7221d25d9b2a7d2490cbaa8d | 103,733 |
def cross_product(v1, v2):
"""Returns the cross product between two vectors
Arguments:
v1 {Vector3} - First vector
v2 {Vector3} - Second vector
Returns:
Vector3 - Cross product between the vectors
"""
return v1.cross(v2) | bfc8567d1376cdefedd574ab2d1a2183b73e7bbe | 583,513 |
def strip_comments(line):
"""Removes all text after a # the passed in string
>>> strip_comments("Test string")
'Test string'
>>> strip_comments("Test #comment")
'Test '
>>> strip_comments("#hashtag")
''
>>> strip_comments("Test#comment")
'Test'
"""
if "#" in line:
return line[:line.find("#")]
else:
return line | c21948eae449eb6037d7abc40cce467d00edf426 | 62,393 |
def get_resource_group_segment(resource_group_name):
"""
given a resource group name, return segment of resource id
"""
resource_group_segment = '/resourceGroups/{0}'.format(resource_group_name)
return resource_group_segment | 83ddb5d70650dcf331273d6e7bb77429703c1843 | 170,322 |
from typing import List
from typing import Generator
from typing import Dict
def get_domain_name(labels: List[Generator[str, None, None]],
label_translator: List[Dict[int, str]], name: List[int]) -> str:
"""
Returns a English domain name for the input Zen generated integer domain name.
:param labels: List of label generators, one for each label index
:param label_translator: List of maps for each label index from an
integer label to an English label.
:param name: Zen generated domain name as a list of integers
"""
english_name = []
for index, integer_label in enumerate(name):
# If integer label is 1, then it is a wildcard label
if integer_label == 1:
english_name.append('*')
else:
if integer_label not in label_translator[index]:
label_translator[index][integer_label] = next(labels[index])
english_name.append(label_translator[index][integer_label])
# Zen generated domain names are in reverse order.
# https://github.com/dns-groot/Ferret/blob/main/TestGenerator/Authoritative/DomainName.cs#L15-L17
return '.'.join(english_name[::-1]) | c06040cdc212e902bb3314891938c3b548845d7d | 172,510 |
def reshape(dna_sequence, reshape_info):
"""
Generate chromosome population.
:param dna_sequence: a string sequence of DNA bases
:param reshape_info: the length of each chromosome
:return: an array of chromosomes, chromosome population
"""
chromosome_length = int(reshape_info[0])
chromosomes = []
# retrieve the population
for i in range(0, len(dna_sequence), chromosome_length):
chromosomes.append(dna_sequence[i:i + chromosome_length])
return chromosomes | 184cbc3111dc1b7c23da845d7e4d3ed838ea07c8 | 264,759 |
import functools
import logging
def skip_if(condition, reason: str = ''):
"""
Wrapper to prevent calling of a function if condition is True
:param condition: condition on which to skip the function
:param reason: why we are skipping
:return: returns function that executes the original function or skips it, depending on the condition
Example:
@skip_if(os.environ['SHOULD_SKIP'])
def do_thing():
print('okay')
do_thing() # warning log that it was skipped and then returns None
"""
def _skip_if(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if condition:
logging.warning(f'Skipping {func.__name__}: {reason}')
return None
else:
return func(*args, **kwargs)
return wrapper
return _skip_if | 27346b90194d2e25fb76b2a3c3a1c5be6ab37bf5 | 328,404 |
def aic_scorer(estimator, X, y_true = None) -> float:
"""AIC score for Gaussian Mixture Model Metric
can be used in GridSearch for hyperparameter tuning
:param estimator: must have ``aic`` method which returns aic score
:param X:
:param y_true:
:return: the protocol requires greater the better, so it will return negative aic score
"""
if not callable(getattr(estimator, "aic", None)):
raise TypeError(f"This estimator: {estimator} does not have ``aic`` method, does not support aic metric")
aic = estimator.aic(X)
return -aic | 1a7a96cd57176992cc032d75a12fc662fa67f66b | 635,860 |
def q_to_q(q):
""" identity map from q to q
Surrogates with this parameterization expect its user intput
to be the mass ratio q, and "map" to the internal surrogate's
parameterization which is also q
The surrogates training interval is in mass ratio
"""
return q | 28f3b1be5b11fe1b350746e8fccf24940e4f3738 | 616,177 |
def egcd(a, b):
"""Calculate greatest common divisor of two numbers.
This implementation uses a recursive version of the extended
Euclidian algorithm.
Arguments:
a: First number.
b: Second number.
Returns:
A tuple (gcd, x, y) that where |gcd| is the greatest common
divisor of |a| and |b| and |a|*|x| + |b|*|y| = |gcd|.
"""
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y) | 47c9c87eedc5d3d4ba43c7a20722b85e8a44e26a | 530,140 |
def hex_color_for(rgb):
"""
Convert a 3-element rgb structure to a HTML color definition
"""
opacity_shade = 0.3
return "rgba(%d,%d,%d,%f)" % (rgb[0], rgb[1], rgb[2], opacity_shade) | b360019ba1b744ca7952ae33cef9284da6fa18d3 | 670,326 |
def __mode2offset(voxel_size, mode='c'):
"""Modes
'c': center
'b': boundary
"""
if mode == 'c':
return voxel_size / 2
elif mode == 'b':
return 0
else:
raise NotImplementedError(f"Unknown offset mode{mode}") | 9bc8ab87e0e60820290d745daf94cf420852d7e0 | 110,432 |
def diff_date(truth_date, computed_date):
"""Compare two dates. Returns (match?, reason for mismatch)."""
if computed_date == truth_date:
return (True, '')
if computed_date is None:
return (False, 'Missing date')
if truth_date is None:
return (False, 'Should be missing date')
return (False, 'complex') | 3ccca3899587ab5322004e1024d6dc81a06a2066 | 52,168 |
from datetime import datetime
def format_generated_timestamp(dt: datetime) -> str:
"""Return standard phrase for the date and time the report is generated"""
dt_as_text = dt.astimezone().strftime('%c %Z')
return f"generated on {dt_as_text}" | 44d09036b1fa5a0e297661aed1f9c5d46745a23a | 624,122 |
from typing import Callable
import click
def variant_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option to choose the DC/OS variant for installation.
"""
function = click.option(
'--variant',
type=click.Choice(['oss', 'enterprise']),
required=True,
help=(
'Choose the DC/OS variant. '
'If the variant does not match the variant of the given installer '
'URL, an error will occur. '
),
)(command) # type: Callable[..., None]
return function | b36783989911461d37e0597187e4d8999a3bc7be | 654,856 |
import copy
def _merge_list_of_scalars(dst, src):
"""Merge list of scalars (add src first, then remaining unique dst)"""
dst_copy = copy.copy(dst)
src_set = set(src)
dst = copy.copy(src)
for val in dst_copy:
if val not in src_set:
dst.append(val)
return dst | b3350296cab9c712eac2c2003d870d05a16aea17 | 115,245 |
from pathlib import Path
def resolve_open_in_colab(content, page_info):
"""
Replaces [[open-in-colab]] special markers by the proper svelte component.
Args:
content (`str`): The documentation to treat.
page_info (`Dict[str, str]`, *optional*): Some information about the page.
"""
if "[[open-in-colab]]" not in content:
return content
package_name = page_info["package_name"]
language = page_info.get("language", "en")
page_name = Path(page_info["page"]).stem
nb_prefix = f"/github/huggingface/notebooks/blob/main/{package_name}_doc/{language}/"
nb_prefix_colab = f"https://colab.research.google.com{nb_prefix}"
nb_prefix_awsstudio = f"https://studiolab.sagemaker.aws/import{nb_prefix}"
links = [
("Mixed", f"{nb_prefix_colab}{page_name}.ipynb"),
("PyTorch", f"{nb_prefix_colab}pytorch/{page_name}.ipynb"),
("TensorFlow", f"{nb_prefix_colab}tensorflow/{page_name}.ipynb"),
("Mixed", f"{nb_prefix_awsstudio}{page_name}.ipynb"),
("PyTorch", f"{nb_prefix_awsstudio}pytorch/{page_name}.ipynb"),
("TensorFlow", f"{nb_prefix_awsstudio}tensorflow/{page_name}.ipynb"),
]
formatted_links = [' {label: "' + key + '", value: "' + value + '"},' for key, value in links]
svelte_component = """<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
"""
svelte_component += "\n".join(formatted_links)
svelte_component += "\n]} />"
return content.replace("[[open-in-colab]]", svelte_component) | 18ab284b6d750743c57fc7746d64a0a8c8ed3b17 | 39,913 |
def mark_key_as_changed_wrapper(parent_method):
"""Decorator that ensures _mark_as_changed method gets called with the key argument"""
def wrapper(self, key, *args, **kwargs):
# Can't use super() in the decorator.
result = parent_method(self, key, *args, **kwargs)
self._mark_as_changed(key)
return result
return wrapper | a7b348a48df40c4b4bc70191bba295350d9d2ce6 | 392,962 |
def get_resources_path() -> str:
""" Get path for copied resources. """
return 'res' | 9eb69d53b03483a361a4e7279ef5671af99ca098 | 165,857 |
def has_modification(monosaccharide, modification):
"""Checks whether ``monosaccharide`` has any modification sites
matching ``modification``.
Parameters
----------
monosaccharide : :class:`~.Monosaccharide`
The monosaccharide to check
modification : :class:`~.constants.Modification` or :class:`str`
The modification to check for
Returns
-------
:class:`bool`
"""
for position, mod in monosaccharide.modifications.items():
if mod == modification:
return True
return False | 10b671f87d50d3b1f000bf2cff44731c52f252a9 | 494,147 |
def pull_rows(client, dset, table_name, start_index=0, count=40000):
"""
Query {count} rows starting at index {start_index} from {table_name} in {dset} from the established bigquery client.
Google Cloud API references: https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.table.Row.html
https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.list_rows.html
Parameters:
-----------
client: bigquery client connection
dset: bigquery data set
table_name: name of the table inside the bigquery dataset
start_index: the starting index of the query
count: max number of rows to be returned
Output:
-----------
list: query results in a list of google bigquery Row instances
"""
results = [x for x in client.list_rows(dset, start_index=start_index, max_results=count)]
return results | 7b0b8c99964496f527d2d562c2ecf6b1af3a5dab | 258,783 |
from datetime import datetime
def parse_utc_string(datestr: str) -> datetime:
"""Parse string containing UTC timestamp an return a corresponding datetime object."""
return datetime.strptime(datestr, "%Y-%m-%dT%H:%M:%SZ") | b9d18bb300f4f381b95d255aac04c762c4a5c7bf | 236,828 |
def get_next_token(tokens, match_token):
"""
Get the next token after the match_token in the list of tokens.
"""
next_token = False
for token in tokens:
if token == match_token:
next_token = True
if next_token:
return token
return '' | 026ca5728e9b37026559c25cea049e79905e9a51 | 106,039 |
import typing
import json
def fromjson(data_bytes: bytes) -> typing.Any:
"""Convert bytes to a python data structure.
Convert json code in byte form, typically received from a QAI request,
into a data struct which we return.
Args:
data_bytes: in the input JSON bytes
Returns:
The converted python data structure.
Raises:
This routine will raise an exception if a json conversion error occurs.
"""
return json.loads(data_bytes) | 4cdd17b934fb50257a4546976d745fd222d19829 | 509,004 |
def form_filename(root, meta):
"""
Form file name from meta data
"""
plate = str(meta["PLATE"])
mjd = str(meta["MJD"])
fiber = meta["FIBERID"]
if fiber < 10:
fiber = "000" + str(fiber)
else:
if fiber < 100:
fiber = "00" + str(fiber)
else:
if fiber < 1000:
fiber = "0" + str(fiber)
else:
fiber = str(fiber)
# root+'sas/dr12/boss/spectro/redux/v5_7_0/spectra/lite/'+ # old
return root + "/" + plate + "/spec-" + plate + "-" + mjd + "-" + fiber + ".fits" | 93d2ac257db50eecdc1df5c64c3c47ed00828180 | 164,727 |
def remaining_regions(user, region_to_remove):
"""
This calculates the remaining regions of a user after a given region is removed
:param user: The given user
:type user: ~django.contrib.auth.models.User
:param region_to_remove: The region which should be removed from the user's regions
:type region_to_remove: ~cms.models.regions.region.Region
:return: The Queryset of regions
:rtype: ~django.db.models.query.QuerySet [ ~cms.models.regions.region.Region ]
"""
return user.profile.regions.exclude(slug=region_to_remove.slug) | 06e1c6e4a0ac961ee37eb0f497450013d6d01006 | 309,065 |
def trim_garbage(text: str) -> int:
"""
Strip all characters from the end of string until ']' is reached.
:param text: Text string.
:return: Return position of a character following ']' or zero in case of a null string.
"""
l = len(text)-1
while l:
if text[l] == "]":
return l+1
l -= 1
return 0 | 4bfdfa8f31372eed57bef74642ddd1f718a7f5ea | 588,295 |
def logistic_map(x, r=4.-1./32):
"""Logistic map r*x*(1-x)"""
return r * x * ( 1. - x ) | b972a07f0fbe3bd8b4826ce7b18211b3be03f085 | 114,362 |
import json
def load_word_mappings() -> tuple:
""" Load word mappings into memory.
Load the word to index and index to word mappings from the JSON files
containing the vocabulary.
Returns:
The word to index mapping and the index to word mapping
"""
word2index = json.loads(open("./resources/word2index.json", "r").read())
index2word = json.loads(open("./resources/index2word.json", "r").read())
return word2index, index2word | 7e1d3cd10aa51c7a49a7e90938c6a074e2503f9b | 417,596 |
def split_replicates(sample_names, split_point=-1):
"""
Splits sample_names into 2 sets. The first set contains replicates 1..split_point
and the second has replicates split_point+1..infinity.
:param sample_names: list of sample names
:param split_point: where to make the split. Set split_point=-1 to include the last replicate
in the second set
:return: indices of first and second subset of samples, respectively.
"""
indices_first = []
indices_second = []
repl_ant = -1
for i, name in enumerate(sample_names):
repl_nb = int(name.split('_')[-1][1:])
if split_point == -1:
if repl_ant != -1:
if repl_ant >= repl_nb:
indices_second.append(i - 1)
else:
indices_first.append(i - 1)
repl_ant = repl_nb
elif repl_nb <= split_point:
indices_first.append(i)
else:
indices_second.append(i)
# Last replicate goes to second set if split_point=-1
if split_point == -1:
indices_second.append(len(sample_names) - 1)
assert len(set(indices_first + indices_second)) == len(sample_names)
return indices_first, indices_second | 56e0218e79b134af4011e9849ce73827210a7a68 | 476,541 |
def getListNameForMember(member):
"""
Returns the member's list namex, which has the format:
"surname, (title) firstname", with title being optional.
"""
return member['ListAs'] | a1c50e983f43ebe7ce3d3accdfd6ca5480f25a76 | 547,890 |
import json
def load(path, **kwargs):
"""Load a `JSON <https://www.json.org/json-en.html>`_ configuration file.
Parameters
----------
path : :class:`str`
The path to the file.
kwargs
All keyword arguments are passed to :func:`json.load`.
Returns
-------
:class:`dict`
The configuration settings.
"""
with open(path, mode='rt') as fp:
return json.load(fp, **kwargs) | 0469abb86c025e5cb117f7d0957e65e77e21dc5b | 511,824 |
from typing import Callable
def composedOptions(
*options: Callable[..., Callable]
) -> Callable[..., Callable]:
"""
Combines options decorators into a single decorator.
"""
def wrapper(f: Callable) -> Callable:
for o in reversed(options):
f = o(f)
return f
return wrapper | 74c46e428f9d44b10ec6ac8ac9ea40d946a55b2a | 326,202 |
def is_yaml_file(filename):
"""
Check if the filename provided points to a file, and ends in .yaml
"""
return filename.check(file=True) and filename.ext == '.yaml' | 8f908b9af426b41f3838dddbef1330bd18e00034 | 444,341 |
def get_vx_licensed_appliances(self) -> list:
"""Returns list of VX licensed appliances
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - license
- GET
- /license/vx
:return: Returns list of VX licensed appliances in Orchestrator \n
[`dict`]: licensed appliance object \n
* keyword **hostname** (`str`): Hostname of appliance
* keyword **model** (`str`): Model of appliance
* keyword **serialNum** (`str`): Serial number of appliance
* keyword **saasEnabled** (`bool`): Whether SaaS
optimization is enabled on the appliance
* keyword **applianceId** (`str`): Appliance ID
* keyword **LicenseType** (`str`): Current license type
* keyword **licenseStartDate** (`int`): License start date
in unix epoch
* keyword **licenseExpirationDate** (`int`): License
expiration date in unix epoch
:rtype: list
"""
return self._get("/license/vx") | a9518e8c4cc65bb460d81281419a4a0dd503a792 | 511,627 |
def collapse_ticket_lines(basket):
"""
Collapses ticket lines into a single quantity for each date
Parameters:
basket (Basket Dictionary): The basket to collapse
Returns:
(Dictionary): The collapsed dictionary
"""
ticket_dates = {}
for date_id in basket:
for type_id in basket[date_id]:
if date_id in ticket_dates:
ticket_dates[date_id] += basket[date_id][type_id]
else:
ticket_dates[date_id] = basket[date_id][type_id]
return ticket_dates | 87c578f8b1103e76750e8bc2c807f76c382b2e20 | 275,634 |
def resistivity_index(rt, ro):
"""
Archie Resistivity Index (I)
Parameters
----------
rt : float
True formation resistivity (ohm.m)
ro : float
Resistivity of water saturated formation (ohm.m)
Returns
-------
float
Returns Archie resistivity index (I)
"""
return rt/ro | 163cea9edeb51d319fe7756858296fc04409a481 | 82,829 |
def getSuit(card):
"""
Get the suit of a card.
'h' = hearts
'c' = clubs
'd' = diamons
's' = spades
Parameters
----------
card : str
The card we are looking at
Returns
-------
suit : str
The suit of the card
"""
return card[-1] | 36cba0d4fcf75284aefa72980daf877ba39bef24 | 202,165 |
def refractive_index(wavelength, a=1.5375, b=0.00829045, c=-0.000211046):
"""Cauchy's equation - dispersion formula
Default coefficients are for NOA61.
https://refractiveindex.info/?shelf=other&book=Optical_adhesives&page=Norland_NOA61
"""
return a + b / (wavelength * 1e6) ** 2 + c / (wavelength * 1e6) ** 4 | 61ccf0e4ebe8b2afd6122b4e784536dc3c6180cf | 444,192 |
import click
def _click_resolve_command(root, parts):
"""Return the click command and the left over text given some vargs."""
location = root
incomplete = ''
for part in parts:
incomplete = part
if not part[0:2].isalnum():
continue
try:
next_location = location.get_command(click.Context(location),
part)
if next_location is not None:
location = next_location
incomplete = ''
except AttributeError:
break
return location, incomplete | 4eaece099ba9c8087e31fda60ac2e5ef626ca843 | 574,790 |
def create_ngrams(corpus_tokens, n):
"""Funktion, die aus den gegebenen Token saemtliche Ngramme der
Laenge n extrahiert und in einer Liste speichert.
Input:
1. corpus_tokens (list): Text als Liste von Tokens
2. n (int): Laenge der Ngramme
Return:
1. ngrams(list): Liste von Ngrammen"""
#definiert eine leere Liste namens ngrams
ngrams = []
#für jedes Element in der Liste "corpus_tokens"
for tokens in corpus_tokens:
#iteriert über die Charaktere im "corpus_tokens", um Ngramme zu erzeugen
#z.B.: "deutschland" 0-8(11 - 4 +1) = 8 = ['deut', 'euts', 'utsc', 'tsch', 'schl', 'chla', 'hlan', 'land']
for elt_tokens in range(len(tokens) - n + 1):
#fügt alle Ngramme zur Liste "ngrams" hinzu
ngrams.append(tokens[elt_tokens:elt_tokens + n])
return ngrams | 4f72d0c70036ef12ae7e950dfb54a811d613da42 | 627,409 |
from typing import Callable
import logging
def _test_unit_wrapper(test_func: Callable[[], bool]) -> Callable[[], int]:
"""Wraps a test function with post processing functionality.
In particular, the test_func wrapper invokes test_func, logs the test and its
status, and returns a value to indicate the number of failing tests.
Args:
test_func: A Callable object for invoking the test. It returns a boolean
boolean to indicate the test status of passing (True) or failing (False).
Returns:
A wrapper of the test_func.
"""
def wrapper() -> int:
passed = test_func()
status_str = "passed" if passed else "failed"
logging.info(f"{test_func.__name__} {status_str}.")
return int(passed == False)
return wrapper | b484353add65626dd57014e19b0355126a6be14e | 259,371 |
import re
def find_pattern(pattern, text):
"""
Find pattern in multiline string
Args:
pattern (str): Regular expression pattern
text (str): string to search in
Returns:
Match object if the pattern is found, None otherwise
"""
for line in text.splitlines():
found = re.match(pattern, line)
if found:
return found
return None | 74c4ad12cddbfeca19a3449675f814c20da0d7bc | 283,758 |
def filter_pd(pd_data, min_index):
"""Filter pandas dataframe by index."""
return pd_data[pd_data['index'] >= min_index] | 0601e6acecc60aba2f52ea49e32f44a80ee034b8 | 159,080 |
def filter_parameters(params, keys=None, prefix='', index=None, index_condition=None):
""" Make a subdictionary of parameters with required keys.
Parameter are retrieved if:
a. It is explicitly requested (via `keys` arg).
b. Its name starts with given prefix (defined by `prefix` arg).
Parameters
----------
params : dict
Arguments to filter.
keys : sequence
Keys to retrieve.
prefix : str, optional
Arguments with keys starting with given prefix will also be retrieved.
Defaults to `''`, i.e. no prefix used.
index : int
Index of argument value to retrieve.
If none provided, get whole argument value.
If value is non-indexable, get it without indexing.
index_condition : callable
Function that takes indexed argument value and returns a bool specifying whether should it be really indexed.
"""
result = {}
keys = keys or list(params.keys())
if prefix:
keys += [key.split(prefix)[1] for key in params if key.startswith(prefix)]
for key in keys:
value = params.get(prefix + key, params.get(key))
if value is None:
continue
# check if parameter value indexing is requested and possible
if index is not None and isinstance(value, list):
# check if there is no index condition or there is one and it is satisfied
if index_condition is None or index_condition(value[index]):
value = value[index]
result[key] = value
return result | df28046200c03e05f7553436e76907bb381295f1 | 485,815 |
from typing import Union
from typing import Dict
from typing import Optional
def get_prefixed_name(
qname: str, namespaces: Union[Dict[str, str], Dict[Optional[str], str]]) -> str:
"""
Get the prefixed form of a QName, using a namespace map.
:param qname: an extended QName or a local name or a prefixed QName.
:param namespaces: a dictionary with a map from prefixes to namespace URIs.
"""
try:
if qname[0] == '{':
ns_uri, local_name = qname[1:].split('}')
elif qname[1] == '{' and qname[0] == 'Q':
ns_uri, local_name = qname[2:].split('}')
else:
return qname
except IndexError:
return qname
except (ValueError, TypeError):
raise ValueError("{!r} is not a QName".format(qname))
for prefix, uri in sorted(namespaces.items(), reverse=True,
key=lambda x: x if x[0] is not None else ('', x[1])):
if uri == ns_uri:
return '%s:%s' % (prefix, local_name) if prefix else local_name
else:
return qname | 68088bfb021d81c57dbc6a36aecccead43faf3a7 | 113,319 |
def clean_visits(data, test, n_visits=100):
"""
Drops rows of `data` with too few entries for given `test`
Parameters
----------
data : pandas.DataFrame
Must have at least columns 'visit' and `test`
test : str
Column of `data` used to determine which rows to drop
n_visits : int, optional
Minimum number of participants with data for `test` at a given visit
required to retain that visit
Returns
-------
data : pandas.DataFrame
Cleaned input `data`
"""
if test not in data.columns:
raise ValueError('Provide test "{}" not in data'.format(test))
# determine which vists have sufficient datapoints
data = data.dropna(subset=[test])
visits = data.groupby('visit').count()
visits = list(visits[visits['participant'] > n_visits].index)
# drop "bad" visits and remove those as categories; also, convert the
# visits column to a numerical value
data = data.query(f'visit in {visits}')
visit_codes = data['visit'].cat.remove_unused_categories().cat.codes
data = data.assign(visit=visit_codes)
return data | 021ed80298e9e5df7da79a169dac00a3d351656a | 74,854 |
import typing
import pathlib
def size_of_directory(directory: str, units='bytes') -> typing.Union[int, float]:
"""
Returns the size of directory. It ignores the size of directories.
Credits: derived from https://stackoverflow.com/a/55659577/3967334
Parameters
----------
directory : str
Path to directory
units: str
One of `bytes` (default), `kilobytes`, `megabytes`, `gigabytes`
Returns
-------
out: int
Size
"""
# the exponent needed in the denominator when doing the conversion
units_conversion_exponent = {'bytes': 0, 'kilobytes': 1, 'megabytes': 2, 'gigabytes': 3}
size = sum(file.stat().st_size for file in pathlib.Path(directory).rglob('*'))
return size/1024**units_conversion_exponent[units] | 0702849c8662010db4ff0cb24e7fc34520c05746 | 64,050 |
def _client_row_class(client: dict) -> str:
"""
Set the row class depending on what's in the client record.
"""
required_cols = ['trust_balance', 'refresh_trigger']
for col in required_cols:
if col not in client:
return 'dark'
try:
if client['trust_balance'] > client['refresh_trigger']:
return 'success'
except TypeError:
return 'dark'
return 'danger' | cd5ebd8fd64c7d994d6803df473cd317af65e9ac | 3,161 |
def _get_problem_names(problems):
"""Extract the names from a list of problems."""
return [problem['name'] for problem in problems] | 4e1a18362d4b136afe61ca013d2fff59ec6ca368 | 236,831 |
def envs_to_exports(envs):
"""
:return: line with exports env variables: export A=B; export C=D;
"""
exports = ["export %s=%s" % (key, envs[key]) for key in envs]
return "; ".join(exports) + ";" | ac4d4d187e189cc5432b12fa0891ba97403a06c7 | 651,058 |
def increment(string):
"""Add 1 to the int in that string
>>> increment('1') == '2'
True
"""
return str(int(string) + 1) | 499549c836704b58db92ca4ff37ed67438ee1b79 | 444,270 |
import logging
def getLogger(plugin_name=None):
"""Get logger object.
This function is needed just to standardize logger names and
ensure that all log objects belong to vitables which was properly
configured.
:parameter plugin_name: the name that will be used in log output
to identify the source
:return: logger object
"""
logger_name = 'vitables'
if plugin_name is not None:
logger_name += '.plugin.' + plugin_name
logger = logging.getLogger(logger_name)
return logger | 2610540fa063034ffcf01d873932c6cace375d50 | 223,249 |
def update_d(
r: str,
d: int,
) -> int:
"""Update the direction
Parameters
----------
r : str
The direction of rotation
d : int
The current direction
Returns
-------
int
The updated direction
"""
# Check if the direction of rotation is 45 degrees positive
if r == "+":
d += 45
# Check if the direction of rotation is 45 degrees negative
elif r == "-":
d -= 45
# Check if the direction is more than 360 degrees
if d >= 360:
d -=360
# Check if the direction is less than 0 degrees
elif d < 0:
d += 360
return d | 088525e9bd8345356dc3f64a525e1138a48935ff | 274,321 |
def adjacent_powerset(iterable):
"""
Returns every combination of elements in an iterable where elements remain ordered and adjacent.
For example, adjacent_powerset('ABCD') returns ['A', 'AB', 'ABC', 'ABCD', 'B', 'BC', 'BCD', 'C', 'CD', 'D']
Args:
iterable: an iterable
Returns:
a list of element groupings
"""
return [iterable[a:b] for a in range(len(iterable)) for b in range(a + 1, len(iterable) + 1)] | 951418b30d541e1dcdd635937ae609d429e3cd70 | 6,032 |
from typing import Any
def try_to_cuda(t: Any) -> Any:
"""
Try to move the input variable `t` to a cuda device.
Args:
t: Input.
Returns:
t_cuda: `t` moved to a cuda device, if supported.
"""
try:
t = t.cuda()
except AttributeError:
pass
return t | 9b6643f169c1eb8fc65de8b1bff55668f8cba950 | 11,446 |
def _calculate_bilinear_cost(
op, coeff, num_alive_inputs, num_alive_outputs, batch_size):
"""Calculates bilinear cost for an op.
Args:
op: A tf.Operation.
coeff: A float coefficient for the bilinear function.
num_alive_inputs: Scalar Tensor indicating how many input channels are
considered alive.
num_alive_outputs: Scalar Tensor indicating how many output channels are
considered alive.
batch_size: Integer batch size to calculate cost/loss for.
Returns:
Tensor with the cost of the op.
"""
if op.type == 'DepthwiseConv2dNative':
# num_alive_inputs may not always equals num_alive_outputs because the
# input (e.g. the image) may not have a gamma regularizer. In this
# case the computation is proportional only to num_alive_outputs.
return batch_size * coeff * num_alive_outputs
else:
return batch_size * coeff * num_alive_inputs * num_alive_outputs | 1a14f02e51d5b48b159e741488c15db8a51088c9 | 206,535 |
def update_light(command, light):
"""Compute new light status.
Args:
command (str): 'toggle', 'turn on' or 'turn off'
light (bool): Light status before command execution
Returns:
bool: New light status
"""
logic = {"toggle": not light, "turn on": True, "turn off": False}
return logic[command] | 56cd8a929ab60e5671760b1abdb0cc4964f2e318 | 171,208 |
def find_avg_distance(segs, traffic_exp=1):
"""
Creates metric for evaluation of trip prediciton model.
Used to determine average distance from beginning coordinate for each segment travelled.
Parameters
----------
segs : list
List of dictionaries of steps for routes
traffic_exp : float
Factor to give preference in weighting to heavily trafficked routes
Default is 1
Return
------
Float containing weighted average distance.
"""
# Initialize distance and traffic percentage.
total_dist_traffic = 0
total_traffic_perc = 0
# Get coords of initial point.
coords1 = segs[0][0]['geometry'][0].coords.xy
x1 = coords1[0][0]
y1 = coords1[1][0]
# Loop through all parts of all routed to obtain distances
for seg in segs:
for part in seg:
# Extract coords of end of each segment.
coords2 = part['geometry'][-1].coords.xy
x2 = coords2[0][-1]
y2 = coords2[1][-1]
# Calculate distance between end of segment and start of routes
dist = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5
# Weight distance by traffic percentage
dist_traffic = dist * part['traffic_perc'] ** traffic_exp
total_traffic_perc += part['traffic_perc'] ** traffic_exp
total_dist_traffic += dist_traffic
# Calculated weighted average of travel distance.
avg_distance = total_dist_traffic / total_traffic_perc
return avg_distance | fe61e6f234ace80a814a18044644425e8c3df3c1 | 214,196 |
import math
def maxsubarrayMid(list, start, mid, end):
"""
Find the max subarray that crosses the mid point given
Do this by naively finding the max subarray on the left side
ending at position mid, and finding the max subarray on the
right side starting at position mid
Time complexity: O(length if input)
"""
leftMaxSum = -math.inf
leftSum = 0
leftIndex = mid
# Find the left max subarray ending at mid
for i in range(mid, start - 1, -1):
leftSum += list[i]
if leftSum > leftMaxSum:
leftMaxSum = leftSum
leftIndex = i
rightMaxSum = -math.inf
rightSum = 0
rightIndex = mid
# Find the right max subarray starting at mid
for i in range(mid + 1, end + 1):
rightSum += list[i]
if rightSum > rightMaxSum:
rightMaxSum = rightSum
rightIndex = i
# Security checks in case there was no left/right subarray
# This is for the edge cases when we have odd number of elements
maxSum = 0
if leftMaxSum != -math.inf:
maxSum += leftMaxSum
if rightMaxSum != -math.inf:
maxSum += rightMaxSum
return (maxSum, leftIndex, rightIndex) | 71d32d9971fb886c6bb2cfcc6acc58613008be58 | 393,885 |
def get_air_quality_qualitative_name(index: int) -> str:
"""
Gets the qualitative name for the air quality based on the index ordinal supplied
More information: https://openweathermap.org/api/air-pollution
"""
if index == 1:
return "Good"
elif index == 2:
return "Fair"
elif index == 3:
return "Moderate"
elif index == 4:
return "Poor"
else:
return "Very Poor" | e15cc4eb57a9f39280a780b4ad1a51a677f64cb4 | 236,065 |
def minimax(val, low, high):
""" Return value forced within range """
try:
val = int(val)
except:
val = 0
if val < low:
return low
if val > high:
return high
return val | 9fdbc8ba8306fc53c4d938b70316b6ea569ebcc3 | 673,229 |
def seg_to_hops(seg):
"""
Extract the list of hops a path segment traverses, returns it as a tuple of
tuples.
"""
hops = []
for asm in seg.iter_asms():
hops.append(asm.isd_as())
assert hops
return tuple(hops) | 996a56f33944996ba7cc8fccef968b0f76fb5efc | 321,975 |
def load_words_from_file(filename):
""" Read words from filename, return list of words. """
f = open(filename, "r")
file_content = f.read()
f.close()
wds = file_content.split()
return wds | 22baf5790f8f71da6f76344a053e5549bf220fa5 | 549,306 |
def get_monthly(
df, net_erp_effect, group_by=("Duration_movement_date", "Visa_subclass")
):
"""
Aggregate unit record NOM data to monthly by visa subclass
"""
summary = (
df[df.net_erp_effect == net_erp_effect]
.groupby(group_by)
.net_erp_effect.sum()
.unstack()
)
return summary.resample("M").sum() | e01355ba50e9c63d0fbc1336391a69a2da469908 | 146,522 |
import re
def generate_callback_id(name):
""" Get callback id from ``name``.
It is a lowercase version of ``name``, where all non-alphanumeric characters are
replaced by underscores.
Parameters
----------
name: str
The callback ``name`` to generate an id from.
Returns
-------
str
Lowercase version of ``name``, where all non-alphanumeric characters are
replaced by underscores.
"""
return re.sub(r"\W+", "_", name).lower() | 70e41bbf24af8dcdb0645fee177590752f7320a7 | 140,070 |
def get_bool(bytearray_: bytearray, byte_index: int, bool_index: int) -> bool:
"""Get the boolean value from location in bytearray
Args:
bytearray_: buffer data.
byte_index: byte index to read from.
bool_index: bit index to read from.
Returns:
True if the bit is 1, else 0.
Examples:
>>> buffer = bytearray([0b00000001]) # Only one byte length
>>> get_bool(buffer, 0, 0) # The bit 0 starts at the right.
True
"""
index_value = 1 << bool_index
byte_value = bytearray_[byte_index]
current_value = byte_value & index_value
return current_value == index_value | 34d7a032b90ffaa7eb85e88bd8a57ec5db54a22b | 682,430 |
def parse_individual(arg):
"""
Try to open arg as a file and return list of each line, otherwise assume it is a comma separated
list and use split to return an actual list
"""
inds=[]
try:
file=open(arg, "r")
inds=file.readlines()
inds=[i[:-1] for i in inds] # remove newlines
except IOError:
inds=arg.split(",")
return inds | 37fc0128d1469f5066aab507780ee3fc2143a45e | 536,877 |
def is_weekend(tx_datetime):
"""
This function checks if a date falls on weekend.
Args:
tx_datetime: Datetime variable
Returns:
0 - date falls on weekday, 1 - date falls on weekend
"""
# Transform date into weekday (0 is Monday, 6 is Sunday)
weekday = tx_datetime.weekday()
# Binary value: 0 if weekday, 1 if weekend
is_weekend = weekday >= 5
return int(is_weekend) | 65c499d13df2f4e325a4063299b77bcfea1b6e2d | 398,931 |
import torch
def calculate_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss"""
# Random weight for interpolation between real and fake samples
eta = torch.rand((real_samples.size(0), 1, 1, 1), device=real_samples.device)
# Get random interpolation between real and fake samples
interpolates = (eta * real_samples + ((1 - eta) * fake_samples)).requires_grad_(True)
# calculate probability of interpolated examples
d_interpolates = D(interpolates)
# Get gradient w.r.t. interpolates
fake = torch.ones_like(d_interpolates, device=real_samples.device, requires_grad=False)
gradients = torch.autograd.grad(outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty | ba790a4a6c8fc0088e5d0a5717e0e59c0436de87 | 58,058 |
from typing import Iterable
def recursive_max(arr):
"""
Method to recursively find the max value of an array of iterables.
Credit: https://www.linkedin.com/pulse/ask-recursion-during-coding-interviews-identify-good-talent-veteanu/
Args:
arr: (numpy array), an array of values or iterables
Returns:
(float), max value in arr
"""
return max(
recursive_max(e) if isinstance(e, Iterable) else e
for e in arr
) | 647f3d51ea5fca895efd4b1fd9e93f31a6536684 | 453,251 |
import warnings
import copy
def clone_trainer(trainer, is_reinit_besides_param=False):
"""Clone a trainer with optional possibility of reinitializing everything besides
parameters (e.g. optimizers.)"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trainer_new = copy.deepcopy(trainer)
if is_reinit_besides_param:
trainer_new.initialize_callbacks()
trainer_new.initialize_criterion()
trainer_new.initialize_optimizer()
trainer_new.initialize_history()
return trainer_new | fc65f663665265fa31a5cd0bcca2e5b05464e079 | 624,734 |
import codecs
def from_bytes(val):
"""
Translates value into valid utf8 string
:param val: A value to translate
:return: A valid utf8 string
"""
try:
return codecs.decode(val, 'utf8')
except (UnicodeEncodeError, TypeError):
return val | fcc9b316f4bef69e73caccf68e62e1b7582b1b8c | 154,074 |
import random
def train_test_split(x, y, test_size=0.3, random_state=None, shuffle=True):
"""
:brief: This function creates the train and the test sets with
a specific percentage for the test and the complementary one for
train set. E.g. test_size = 0.3 means x_train is the 70% of our data
while x_test is the 30%.
:param x: are the dependent data that will be split.
:param y: are the independent data that will be split.
:param test_size: is the value based on which we will split the data
:param random_state: is int number which is used as seed for suffling
:param shuffle: is a boolean value which indicates if the dataset will be shuffled.
:return: x_train, x_test
"""
if random_state is not None:
random.seed(random_state)
if shuffle:
for i in range(len(y)):
x[i].append(y[i])
random.shuffle(x)
for i in range(len(x)):
y[i] = x[i][-1]
x[i] = x[i][:len(x[i]) - 1]
# we assume that our x_train is 70% of the given dataset and our x_test is the 30% of the given dataset
x_train = x[:int((1 - test_size) * len(x))]
y_train = y[:int((1 - test_size) * len(y))]
x_test = x[int((1 - test_size) * len(x)): len(x)]
y_test = y[int((1 - test_size) * len(y)): len(y)]
return x_train, x_test, y_train, y_test | dccd44c734f8cc412e6a6ef0a46e99c6189ad59f | 262,076 |
import json
def to_dict(value):
""" Convert value from e.g. csv-reader to valid json / dict
"""
if value == "":
return {}
else:
return json.loads(value) | b74a7e12dfbf4b1f10fb4feff2de8c4c878528ef | 243,837 |
def trim(gid_list):
"""
Remove an underscore and any following characters from each element in a list of strings
Add a terminal period for easy hierarchical comparison
Intended to remove the version number from gids
"""
result = []
for gid in gid_list:
gid = gid.split("_")
result.append(gid[0] + '.')
return result | c7ed78226263c19939dc51e3b9bd2aeaed6293c2 | 557,126 |
def add_costs(*args):
"""Add the arguments as costs.
Here when one of the operand is unity, it will be taken as a zero in the
summation.
"""
res = sum(i if abs(i) != 1 else 0 for i in args)
return res if res != 0 else 1 | 851e8c95cc02f17e106b82b7fe8a72d88914858c | 548,475 |
def _getElementsFrom(smsTopList, dataset):
"""
Get elements that belong to any of the TxNames in dataset
(appear in any of constraints in the result).
Loop over all elements in smsTopList and returns a copy of the elements belonging
to any of the constraints (i.e. have efficiency != 0). The copied elements
have their weights multiplied by their respective efficiencies.
:parameter dataset: Data Set to be considered (DataSet object)
:parameter smsTopList: list of topologies containing elements (TopologyList object)
:returns: list of elements (Element objects)
"""
elements = []
for txname in dataset.txnameList:
for top in smsTopList:
itop = txname._topologyList.index(top) #Check if the topology appear in txname
if itop is None: continue
for el in top.getElements():
newEl = txname.hasElementAs(el) #Check if element appears in txname
if not newEl: continue
el.covered = True
eff = txname.getEfficiencyFor(newEl.getMasses())
if not eff: continue
el.tested = True
newEl.eff = eff
newEl.weight *= eff
newEl.txname = txname
elements.append(newEl) #Save element with correct branch ordering
return elements | cbda3827bac0f1ace032cb39522085797fee7b2d | 583,369 |
def check_table(conn, table, interconnect):
"""
searches if Interconnect exists in table in database
:param conn: connect instance for database
:param table: name of table you want to check
:param interconnect: name of the Interconnect you are looking for
:return: results of SQL query searching for table
"""
cur = conn.cursor()
sql_search = "SELECT * \
FROM %s \
WHERE Interconnect='%s'" % (table, interconnect)
found = cur.execute(sql_search).fetchone()
return found | 0888146d5dfe20e7bdfbfe078c58e86fda43d6a5 | 4,153 |
def underscore_to_pascalcase(value):
"""Converts a string from underscore_case to PascalCase.
Args:
value: Source string value.
Example - hello_world
Returns:
The string, converted to PascalCase.
Example - hello_world -> HelloWorld
"""
if not value:
return value
def __CapWord(seq):
for word in seq:
yield word.capitalize()
return ''.join(__CapWord(word if word else '_' for word in value.split('_'))) | c28a3b37a0a6ef195ecb50a0ad63067a6cffe878 | 80,203 |
def bisect_right(a, x, lo=0, hi=None, *, key=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(i, x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
# Note, the comparison uses "<" to match the
# __lt__() logic in list.sort() and in heapq.
if key is None:
while lo < hi:
mid = (lo + hi) // 2
if x < a[mid]:
hi = mid
else:
lo = mid + 1
else:
while lo < hi:
mid = (lo + hi) // 2
if x < key(a[mid]):
hi = mid
else:
lo = mid + 1
return lo | 780780f7d444eca6067e82d4c2534b83336ff806 | 680,974 |
import six
def dict_to_string_list(d):
"""
Converts a dict to a list of 'key=value' strings. If the input is None
then None will be returned.
"""
return ['%s=%s' % kv for kv in six.iteritems(d)] if d else None | 7f4442dfc36559c67434e2691db769f4f11ec785 | 184,792 |
def binary_search(search_list, search_key):
"""Find the index of a value of a key in a sorted list using
a binary search algorithm. Returns the index of the value if
found. Otherwise, returns -1.
"""
left_idx, right_idx = 0, len(search_list) - 1
# while True:
while left_idx <= right_idx:
mid_idx = (right_idx + left_idx) // 2
# search_key is left of middle value
if search_key < search_list[mid_idx]:
right_idx = mid_idx - 1
# search key is right of middle value
elif search_key > search_list[mid_idx]:
left_idx = mid_idx + 1
else:
return mid_idx
# If we get here, the value was not found.
return -1 | 17738b9cc392f18d1f51f86a3c8d1b3fd3e14e47 | 649,248 |
def get_highest_score_index(results_list):
"""
Given a list of results, returns the index of the hit with the highest score.
Simple find the maximum algorithm stuff going on here.
"""
highest_score = 0.0
highest_index = 0
index = 0
for hit in results_list:
if hit.score > highest_score:
highest_score = hit.score
highest_index = index
index = index + 1
return highest_index | f546ec6b616237ba03cc7113cd9c6993049689c6 | 523,660 |
def rectify_answer(generated_answer):
"""remove duplicate consecutive words
e.g. My My Name Name -> My Name"""
words = generated_answer.split()
new_words = [words[0]]
for w in words[1:]:
if new_words[-1]!=w:
new_words.append(w)
return " ".join(new_words) | af2465b0387f087bc4ccac55e67ebd247c8009bb | 484,679 |
def celsius2kelvin(celsius):
"""
Convert temperature in degrees Celsius to degrees Kelvin.
:param celsius: Degrees Celsius
:return: Degrees Kelvin
:rtype: float
"""
return celsius + 273.15 | 4826bbdee8e50356649a1adb7294ec12824186d3 | 666,129 |
import ipaddress
def is_valid_ipv4(address):
"""Check an IPv4 address for validity"""
try:
ip = ipaddress.ip_address(address)
except ValueError:
return False
if not isinstance(ip, ipaddress.IPv4Address):
return False
warning = None
if ip.is_loopback:
warning = "loopback address"
elif ip.is_multicast:
warning = "multicast address"
elif ip.is_reserved:
warning = "reserved address"
elif ip.is_link_local:
warning = "link-local address"
elif ip.is_unspecified:
warning = "unspecified address"
elif ip.is_private:
warning = "private address"
elif address.endswith(".0") or address.endswith(".255"):
warning = "potential broadcast address"
if warning:
print("*** Warning: {} is a {}".format(address, warning))
return True | fd095d8903cd0a44bfd6a7eb02fa95217a60077a | 701,270 |
def handle_mask(mask, tree):
"""Expand the mask to match the tree structure.
:param mask: boolean mask
:param tree: tree structure
:return: boolean mask
"""
if isinstance(mask, bool):
return [mask] * len(tree)
return mask | 1621d700b65ecd3f81811828999c42a0cd57c075 | 115,084 |
def resolve_name(obj, _):
"""Convert 'name' from bytes to string."""
return obj.name.decode() | 09616a916ba1b2ef5958d0c6b2bd27238595f9dd | 449,936 |
def subset_x_y(target, features, start_index:int, end_index:int):
"""Keep only the rows for X and y sets from the specified indexes
Parameters
----------
target : pd.DataFrame
Dataframe containing the target
features : pd.DataFrame
Dataframe containing all features
features : int
Index of the starting observation
features : int
Index of the ending observation
Returns
-------
pd.DataFrame
Subsetted Pandas dataframe containing the target
pd.DataFrame
Subsetted Pandas dataframe containing all features
"""
return features[start_index:end_index], target[start_index:end_index] | 76f1efdbda5127bcbfe208ebd32ba1e5da1f3158 | 181,187 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.