content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import click
def d_reduce_options(f):
"""Create common options for dimensionality reduction"""
f = click.option('--axes', nargs=2, type=click.Tuple([int, int]),
help='Plot the projection along which projection axes.',
default=[0, 1])(f)
f = click.option('--dimension', '-d',
help='Number of the dimensions to keep in the output XYZ file.',
default=10)(f)
f = click.option('--scale/--no-scale',
help='Standard scaling of the coordinates.',
default=True)(f)
return f
|
97577d5d52b777ea33d4d47a80cbacc0f394ad00
| 43,525 |
def _is_url_without_path_query_or_fragment(url_parts):
"""
Determines if a URL has a blank path, query string and fragment.
:param url_parts: A URL.
:type url_parts: :class:`urlparse.ParseResult`
"""
return url_parts.path.strip('/') in ['', 'search'] and url_parts.query == '' \
and url_parts.fragment == ''
|
4bad1f230adfa77df019519db276a181d57682dd
| 299 |
def apim_api_release_show(client, resource_group_name, service_name, api_id, release_id):
"""Returns the details of an API release."""
return client.api_release.get(resource_group_name, service_name, api_id, release_id)
|
174e8d6bf55a3f66e549cfb8047a8b6e48e1d98a
| 523,305 |
def factorial(n):
"""Computes factorial of n."""
if n == 0:
return 1
else:
recurse = factorial(n-1)
result = n * recurse
return result
|
16894444b052095b255574aa1ab355e12fa37ef4
| 658,573 |
def decode_default(value):
"""Default decoder.
It tries to convert into an integer value and, if it fails, just
returns the string.
"""
try:
ival = int(value, 0)
return ival
except ValueError:
return value
|
93df67ae96ac19c634f173b8b9082741411cc137
| 210,363 |
import re
def uncamel(s):
"""
Convert CamelCase class names into lower_snake_case.
Taken from http://stackoverflow.com/a/1176023/3288364
"""
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower()
|
21990e0df7d590e03d8b9ea61bb2bb4ef4b6e6d6
| 76,100 |
def gen_hpack_huffman(data):
"""Generates the hpack_huffman array and returns a tuple with the length of the array and the content of the array both as strings"""
content = ""
for d in data:
content += " { " + str(d['bit_len']) + ", 0x" + d['LSB_hex'] + "u },\n"
# We remove the last comma and new line
return (str(len(data)), content[:-2])
|
448845d9bb8a375aa82d1a07c2e2c7759f4d259f
| 259,147 |
def cat_charsets(cs):
"""Combine a set into an alphabetically sorted list in written English,
using commas and 'and'. """
d = sorted(cs)
if len(d) > 2:
d[-1] = "and " + d[-1]
d = ", ".join(d)
else:
d = " and ".join(d)
return d
|
1d5992529b64a8427c353f330c2f47185701a49e
| 235,656 |
import itertools
def flatten(iterable):
"""Reduce dimensionality of iterable containing iterables
Args:
iterable: A multi-dimensional iterable
Returns:
A one dimensional iterable
"""
return itertools.chain.from_iterable(iterable)
|
3869bbc5f2e1377d5c893ce7e35c3b91988916a7
| 26,945 |
def matchline(allregex, line):
""" Apply regexes to line
Parameters
----------
allregex : List[(regex, patinfo)]
a list of tuples
line : Line to be matched
Returns
-------
tuple : (ip, matching pattern)
if a match is found
None : None if no match
"""
for reg, pattern in allregex:
ma = reg.search(line)
if ma:
return (ma.group(1), pattern)
return None
|
b3db70c120b3394b15902e45c7f11b0b4a01acdc
| 515,166 |
def rev_comp(seq: str) -> str:
"""
Returns reverse complementary sequence of the input DNA string
"""
comp = {
'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A',
'N': 'N',
'M': 'K', # M = A C
'K': 'M', # K = G T
'R': 'Y', # R = A G
'Y': 'R', # Y = C T
'S': 'S', # S = C G
'W': 'W', # W = A T
'B': 'V', # B = C G T
'V': 'B', # V = A C G
'D': 'H', # D = A G T
'H': 'D', # H = A C T
'a': 't',
'c': 'g',
'g': 'c',
't': 'a',
'n': 'n',
'm': 'k',
'k': 'm',
'r': 'y',
'y': 'r',
's': 's',
'w': 'w',
'b': 'v',
'v': 'b',
'd': 'h',
'h': 'd'
}
return ''.join([comp[base] for base in seq[::-1]])
|
c1c9b8666f6a07e1f7eab588881bc40a61b0cbff
| 485,539 |
import json
def get_stadiums(infile):
"""
Reads a dict of stadium data from a filename.
Args:
infile: Filename where stadium map data is stored (string).
Returns:
Dict of stadium names (strings) to stadium map data records (dicts).
"""
stadium_dict = {}
with open(infile, "r") as file:
stadiums = json.load(file)
for stadium in stadiums:
stadium_dict[stadium["stadium"]] = stadium
return stadium_dict
|
883c463baf011ab8cfe57bc445bfd2954d995b84
| 252,412 |
def get_cart_location(env, screen_width):
"""
Returns the position of the middle of the cart
:param env:
:param screen_width:
:return:
"""
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0)
|
2b245964e1ce8b70a7964766a13a14e7759e48bf
| 18,188 |
def isTrue(val):
"""Determines if a textual value represents 'true'.
Args:
val: A string, which may be 'true', 'yes', 't', '1' to indicate True.
Returns:
True or False
"""
val = val.lower()
return val == 'true' or val == 't' or val == '1' or val == 'yes'
|
04f8b87148991ba54a052700ba39481cc17e1fb7
| 558,950 |
def recvall(sock, nbytes):
"""Receive all nbytes from socket.
Parameters
----------
sock: Socket
The socket
nbytes : int
Number of bytes to be received.
"""
res = []
nread = 0
while nread < nbytes:
chunk = sock.recv(min(nbytes - nread, 1024))
if not chunk:
raise IOError("connection reset")
nread += len(chunk)
res.append(chunk)
return b"".join(res)
|
7498b5de4d4fd068b606d3209583681f0beaa3ab
| 477,719 |
import six
def get_strings(value):
"""
Getting tuple of available string values
(byte string and unicode string) for
given value
"""
if isinstance(value, six.text_type):
return value, value.encode('utf-8')
if isinstance(value, six.binary_type):
return value, value.decode('utf-8')
return value,
|
6517c5e2053d1facaf4282da720aaa91ca0ab2e7
| 38,635 |
def _get_reason(summons):
"""
Returns reason for summons or summons request.
Arguments:
summons (SummonRequest|Summons)
Returns: str
"""
return (
(
'Conversation outcome: ' + summons.outcomes +
'\nFurther action required because: ' + summons.standards_action
)
if summons.spokeWith
else summons.special_circumstance
)
|
a34ce18edc68f123ff25193733d489d5185bb11b
| 564,709 |
import uuid
def create_writer_commands(
nexus_structure,
output_filename,
broker,
job_id="",
start_time=None,
stop_time=None,
use_hdf_swmr=True,
service_id=None,
abort_on_uninitialised_stream=False,
):
"""
:param nexus_structure: dictionary containing nexus file structure
:param output_filename: the nexus file output filename
:param broker: default broker to consume from
:param job_id: filewriter job_id
:param start_time: ms from unix epoch
:param stop_time: ms from unix epoch
:param abort_on_uninitialised_stream: Whether to abort if the stream cannot be initialised
:param service_id: The identifier for the instance of the file-writer that should handle this command. Only needed if multiple file-writers present
:param use_hdf_swmr: Whether to use HDF5's Single Writer Multiple Reader (SWMR) capabilities. Default is true in the filewriter
:return: A write command and stop command with specified job_id.
"""
if not job_id:
job_id = str(uuid.uuid1())
write_cmd = {
"cmd": "FileWriter_new",
"broker": broker,
"job_id": job_id,
"file_attributes": {"file_name": output_filename},
"nexus_structure": nexus_structure,
}
if start_time is not None:
write_cmd["start_time"] = start_time
if not use_hdf_swmr:
write_cmd["use_hdf_swmr"] = use_hdf_swmr
if abort_on_uninitialised_stream:
write_cmd["abort_on_uninitialised_stream"] = abort_on_uninitialised_stream
stop_cmd = {"cmd": "FileWriter_stop", "job_id": job_id}
if stop_time is not None:
write_cmd["stop_time"] = stop_time
stop_cmd["stop_time"] = stop_time
if service_id is not None and service_id:
write_cmd["service_id"] = service_id
stop_cmd["service_id"] = service_id
return write_cmd, stop_cmd
|
09b90bef6532b40c28109ed8246b3584f3c19e48
| 122,438 |
def read(texname):
"""
A function to read the .tex file
@ In, texname, str, LaTeX file name
@ Out, filler_text, str, tex contents
"""
with open(texname, 'r') as fd:
filler_text = fd.read()
return filler_text
|
35a687836fba6dc37689b4c5d2fb85f0841eb4e9
| 74,377 |
def add_slash(m):
"""
Helper function that appends a / if one does not exist.
Prameters:
m: The string to append to.
"""
if m[-1] != "/":
return m + "/"
else:
return m
|
9a64d8b481c7fd61a232795f26d67b963f81ec65
| 533,112 |
def cliproi(shape, roi):
"""Make sure that a ROI does not exceeds the maximal size.
Args:
shape (n-tuple): array shape (n1, n2, ...)
roi (n-2-tuple): array range indices ((a1,b1),(a2,b2),...)
Returns:
n-2-list: clipped ROI [[a1,b1],[a2,b2],...]
"""
if len(shape) != len(roi):
raise ValueError("Dimensions for shape and ROI should be the same")
roinew = []
for n, (a, b) in zip(shape, roi):
if a is None:
a = 0
else:
if a < 0:
a += n
a = max(0, min(a, n - 1))
if b is None:
b = n
else:
if b < 0:
b += n
b = max(0, min(b, n))
roinew += [[a, b]]
return roinew
|
17c82966400342657f26b92f352a22c66a8c27c8
| 319,868 |
import re
def snake_case(camel_case):
"""
Transform camel was to snake case
:param camel_case: Camel case string
:type camel_case: str
:return: Snake case string
:rtype: str
"""
snake_case = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_case)
snake_case = re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake_case).lower()
return snake_case
|
0a09cd04b5c5a917004139c6d44aa8c03e52ee32
| 172,347 |
import requests
def send_request(body, url, token):
"""
Sends a request to a SOS using POST method
:param body: body of the request formatted as JSON
:param token: Authorization Token for an existing SOS.
:param url: URL to the endpoint where the SOS can be accessed
:return: Server response to response formatted as JSON
"""
# Add headers:
headers = {'Authorization': str(token), 'Accept': 'application/json'}
response = requests.post(url, headers=headers, json=body)
response.raise_for_status() # raise HTTP errors
return response
|
ec092d7ebf7f8f2002795465f04804f19f094c90
| 61,351 |
def _dictmerge(d1, d2):
""" Recursive merges d2 into d1 if they are dictionaries (by keys), otherwise returns d2 """
if isinstance(d1, dict) and isinstance(d2, dict):
rd = d1
for k in d2:
if k in d1:
rd[k] = _dictmerge(d1[k], d2[k])
else:
rd[k] = d2[k]
return rd
return d2
|
592b89bebe40dfca385b3fde70cdcd445bd3a9c1
| 352,830 |
def flip_boxes(boxes, im_width):
"""
Horizontally flip the boxes.
Args:
boxes (array): box to flip.
im_width (int): width of the image.
Returns:
boxes_flipped (array): flipped box.
"""
boxes_flipped = boxes.copy()
boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1
boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1
return boxes_flipped
|
29f46827844c3e292ab0377f16ffeca069082ae9
| 485,704 |
def batchify(instances, batch_size=32):
"""splits instances into batches, each of which contains at most batch_size"""
batches = [instances[i:i + batch_size] if i + batch_size <= len(instances)
else instances[i:] for i in range(0, len(instances), batch_size)]
return batches
|
d5676fe90da26ff2c98fffe48b9afd4f3b02a06e
| 125,371 |
def simple_mapper(stream):
"""
Returns a simple list of tuples: [(<key1>, 1), (<key2>, 1)...]
based on data in stream
"""
return list(map(lambda key: (key, 1), stream))
|
cda4882cdfa0c467637037131f483fee9714b0e8
| 241,046 |
def create_environment(**kwargs):
""" Format args for AWS environment
Writes argument pairs to an array {name, value} objects, which is what AWS wants for
environment overrides.
"""
return [{'name': k, 'value': v} for k, v in kwargs.items()]
|
cc2458c164ed04b533d73c733d1fd4c2cbb93fdb
| 607,291 |
def extract_accessions_and_dates(genbank_query, taxonomy_filters):
"""Retrieve the GenBank accessions and retrieval dates of existing sequences from the db query.
:param genbank_query: sql collection
:param taxonomy_filters: set of genera, species and strains to restrict retrieval of sequences
Return a dict {GenBank_accession: retrieval_date}
"""
accessions = {}
if taxonomy_filters is None:
for item in genbank_query:
if item[0].genbank_accession == "NA": # no GenBank accession stored in CAZy
continue
accessions[item[0].genbank_accession] = item[0].seq_update_date
else:
for item in genbank_query:
if item[0].genbank_accession == "NA": # no GenBank accession stored in CAZy
continue
source_organism = item[-1].genus + item[-1].species
if any(filter in source_organism for filter in taxonomy_filters):
accessions[item[0].genbank_accession] = item[0].seq_update_date
return accessions
|
82a6e5a2c3d88efceef19eb7f2366dee1da6c546
| 287,319 |
def get_magnification(src2sample, sample2det):
"""
determine the magnification (M > 1 is a demagnification)
:param src2sample: source to sample distance [m]
:param sample2det: sample to detector distance [m]
"""
M = (src2sample+sample2det)/src2sample
return M
|
800b6c2c80b3e81a5e82ae9a44ba15835a6b19eb
| 488,076 |
def remove_first_word(text):
"""Given a string, remove what is found before the first space"""
l = text.split(" ", 1)
return l[1] if len(l) > 1 else ""
|
0809a676c60086be6b8186c444550e367b80e26d
| 143,630 |
import hashlib
def generate_hash(input):
"""Return an md5 hash of an object by first converting it to bytes."""
return hashlib.md5(repr(input).encode()).hexdigest()
|
92b90c782631c2ab9618b70ec9451dfa5afbbbfb
| 536,719 |
def _extract_target(data, target_col):
"""Removes the target column from a data frame, returns the target
col and a new data frame minus the target."""
target = data[target_col]
train_df = data.copy()
del train_df[target_col]
return target, train_df
|
726a76e254c1d6dc120b590d9d7128986ddc9227
| 18,823 |
def complex_vlass(df, NN_dist=72, SNR_min=None):
"""Algorithm to select complex VLASS components.
Args:
df ([pandas.DataFrame]): Table of VLASS components.
NN_dist ([float]], optional): Maximum distance (in arcsec) to the nearest component to be considered complex. Defaults to 72.
SNR_min ([float]], optional): Minimum signal-to-noise of the component. Defaults to None.
Returns:
pandas.DataFrame: Subset of the input DataFrame containing only the complex components.
"""
mask = (df["S_Code"] == "S") & (df["NN_dist"] < NN_dist)
mask |= df["S_Code"] == "M"
mask |= df["S_Code"] == "C"
df = df[mask]
if SNR_min is not None:
df = df[df["SNR"] >= SNR_min]
return df
|
3dd668b10f2d19729d30a2996cec08c3304aabed
| 236,781 |
def extraction_date_hour(date_hour):
"""
From a string whose structure is : ...
:param date_hour:
:return: [year, month, day, hour, minute] and each elements are integer
"""
slash = date_hour.split('-')
year = slash[0]
month = slash[1]
t = slash[2].split("T")
day = t[0]
two_points = t[1].split(":")
hour = two_points[0]
minute = two_points[1]
return int(year), int(month), int(day), int(hour), int(minute)
|
1d9c858e4e86bc12b6fd8fb5ab085562e3313717
| 360,370 |
def _in_while_loop(control_flow_node_map, op_name):
"""
Check if a given control flow operator is part of a while loop execution
frame. This is based on the fact that there is only one occurrence of
`LoopCond` for a loop execution frame and it is only presented in the loop
construct.
Parameters
----------
control_flow_node_map : Dict[str, Set[str]]
A dictionay contains the unique control flow execution frame name to
a set of primitive operators mapping.
op_name : str
The name of a control flow primitive.
Returns
-------
ret : bool
Return true if the operator is in a while loop execution frame,
otherwise, return false.
"""
return op_name in control_flow_node_map and "LoopCond" in control_flow_node_map[op_name]
|
7016910bf50e6013c908eda310980f9633b642f6
| 180,670 |
def SplitNamespace(ref):
"""Returns (namespace, entity) from |ref|, e.g. app.window.AppWindow ->
(app.window, AppWindow). If |ref| isn't qualified then returns (None, ref).
"""
if '.' in ref:
return tuple(ref.rsplit('.', 1))
return (None, ref)
|
443b3dca2ac56d9997de777716da53d34377b18a
| 466,791 |
def _build_projection_expression(clean_table_keys):
"""Return a projection expression for the DynamoDB lookup.
Args:
clean_table_keys (Dict[str, Any]): Keys without the data types attached.
Returns:
str: A projection expression for the DynamoDB lookup.
"""
projection_expression = ''
for key in clean_table_keys[:-1]:
projection_expression += ('{},').format(key)
projection_expression += clean_table_keys[-1]
return projection_expression
|
5b22b55a6649aabee6f51878c03430f55c890e88
| 479,408 |
import re
def read_data_names(filepath: str):
"""
function to read class names & attributes
:param filepath: the relative path to the file containing the specifications of the attribute_values
:return: a tuple of lists
classes: is a one-dimensional list containing the class names
attributes: is a one-dimensional list containing the attribute names
attribute_values: is a two-dimensional list where each row respresents
one attribute(in the order of 'attributes') and the possible values
"""
with open(filepath, "r") as f:
lines = f.read().splitlines()
classes = re.sub(r'^' + re.escape("class values: "), '', lines.pop(0))
classes = classes.split(", ")
attributes = re.sub(r'^' + re.escape("attributes: "), '', lines.pop(0))
attributes = attributes.split(", ")
attribute_values = []
for i in range(0, len(attributes)):
values = re.sub(r'^' + re.escape(attributes[i] + ": "), '', lines.pop(0))
attribute_values.append(values.split(", "))
return classes, attributes, attribute_values
|
1111bd25970e5d41c596eaefa9517992186fba8d
| 65,280 |
def get_routes(open_api_spec: dict) -> list:
"""Returns the specific route names for an openapi spec
Args:
open_api_spec (dict): an OpenAPI specification
Returns:
list: a list of strings representing the paths found,
no information about what methods are available for a path are returned
"""
return open_api_spec["paths"].keys()
|
4fa41dc64bd07aea6ff5ae7d7bcfb8bbd4132c00
| 222,688 |
def pr_name(payload):
"""Returns the name (ex. optee_os) of the Git project."""
return payload['repository']['name']
|
8b6e5548bea0913a10701db4d8c798181c14751c
| 406,361 |
import pathlib
def make_report_file_names(proj_full_path):
"""
make the directory and file names for a report
Args:
proj_full_path (string): the path of the results directory
Returns:
report_dir (pathlib.Path)
html_outfile (pathlib.Path)
hash_file (pathlib.Path)
"""
report_dir = pathlib.Path(proj_full_path).joinpath("report")
html_outfile = report_dir.joinpath("report.html")
hash_file = report_dir.joinpath("results_hash.json")
return (report_dir, html_outfile, hash_file)
|
8d995bb15c2b8710ad2fb16e2476b5a96421f379
| 695,780 |
def get_teacher() -> str:
""" Acquire teacher name """
teacher_name = input("Teacher name: ")
return teacher_name
|
bf4b6757ec2d1eebce3f7751f75ea2a42fb67fd0
| 153,689 |
import pathlib
import configparser
def load_config(path: pathlib.Path) -> configparser.ConfigParser:
"""Load config file."""
conf = configparser.ConfigParser()
with path.open() as f:
conf.read_file(f)
return conf
|
0ff93929350a8086ed0e25922fbf413d6f013763
| 549,748 |
def _get_id(param):
"""Returns a parameter ID.
If param is a Parameter object, then we get the ID from it.
Otherwise, we assume param is an integer.
"""
try:
# If this is a Parameter object, then return its _id attr.
return param._id
except AttributeError:
# Otherwise, we assume it's an integer.
return param
|
c9532a110e537121e223eb28e73e8238c3cc2ddb
| 200,497 |
from typing import List
from typing import Dict
import importlib
def get_package_versions(
packages: List[str], *, na_str="not available"
) -> Dict[str, str]:
"""tries to load certain python packages and returns their version
Args:
na_str (str): Text to return if package is not available
Returns:
dict: Dictionary with version for each package name
"""
versions: Dict[str, str] = {}
for name in sorted(packages):
try:
module = importlib.import_module(name)
except ImportError:
versions[name] = na_str
else:
versions[name] = module.__version__ # type: ignore
return versions
|
bc51ef38a2bac20156a5e6b935e18369a5a46119
| 115,535 |
def get_user_group(user_group):
"""
Formats a user and group in the format ``user:group``, as needed for `chown`. If user_group is a tuple, this is used
for the fomatting. If a string or integer is given, it will be formatted as ``user:user``. Otherwise the input is
returned - this method does not perform any more checks.
:param user_group: User name, user id, user and group in format ``user:group``, ``user_id:group_id``, or tuple of
``(user, group)``.
:type user_group: unicode, int, or tuple
:return: Formatted string with in the format ``user:group``.
:rtype: unicode
"""
if isinstance(user_group, tuple):
return '{0}:{1}'.format(*user_group)
elif isinstance(user_group, int) or not ':' in user_group:
return '{0}:{0}'.format(user_group)
else:
return user_group
|
cc04bc3a572d50c8147ee6b66c83c957a84670c9
| 183,227 |
def conformer_eligible_for_topology_detection(conformer):
"""Returns whether this conformer is worthy of topology detection.
Simple duplicate marking or conformers with unreliable geometries are not
generally useful to do topology detection.
Args:
conformer: dataset_pb2.Conformer
Returns:
bool
"""
return (conformer.duplicated_by == 0 and
conformer.properties.errors.status >= 0 and
conformer.properties.errors.status < 512)
|
34ef2f72d4f4430edd7b76e0f2701b19d4e5a1e9
| 339,982 |
def string_tag(name, value):
"""Create a DMAP tag with string data."""
return name.encode('utf-8') + \
len(value).to_bytes(4, byteorder='big') + \
value.encode('utf-8')
|
21e041195fb986cbdcb7d9eabcc57018eb111c05
| 623,894 |
def text(label, posx, posy):
"""Returns list of commands to draw text at (posx, posy)"""
return ["PU;",
"PA{},{}".format(posx, posy),
"LB{}\3;".format(label)]
|
434ef059aa75570be59289d35ae0216c942aef7d
| 489,608 |
from typing import Counter
def bigram_counts(sequences):
"""Return a dictionary keyed to each unique PAIR of values in the input sequences
list that counts the number of occurrences of pair in the sequences list. The input
should be a 2-dimensional array.
For example, if the pair of tags (NOUN, VERB) appear 61582 times, then you should
return a dictionary such that your_bigram_counts[(NOUN, VERB)] == 61582
output format:
out = {{gram1, gram2}: occurance}
"""
cnt = Counter()
for seq in sequences:
# if len >=2 , then should calculate bigram
if len(seq) > 1:
# index against index -1
for index in range(1, len(seq)):
bigram = (seq[index - 1], seq[index])
if bigram not in cnt:
cnt[bigram] = 0
cnt[bigram] += 1
output = dict(cnt)
return output
|
31a9e1050b9ae61640a365697e951b28f4a92a8e
| 133,082 |
def domain_dn_2_name(dn):
"""Example:
Input" "dc=vsphere,dc=local"
Output: "vsphere.local"
"""
domain_name = dn.replace(" ","").replace("dc=","").replace(",",".")
return str(domain_name)
|
300de71b093277d4b396b2971bef6351639497b8
| 278,112 |
def prepend(the_list, the_str):
"""Add string to the front of each item in a list
Args:
list (list): List of values
str (string): string to prepend
"""
the_str += "{0}"
the_list = [the_str.format(i) for i in the_list]
return the_list
|
7e36e573c5f40e788304afead2811b6037c28863
| 557,235 |
def isVowel(letter):
"""
isVowel checks whether a given letter is a vowel. For purposes of this
function, treat y as always a vowel, w never as a vowel.
letter should be a string containing a single letter
return "error" if the value of the parameter is not a single letter string,
return True, if the letter is a vowel, otherwise False
"""
if (type(letter) != str): # letter must be a string
return "error"
if (len(letter) != 1): # letter must only be a single character
return "error"
# return True if letter is a vowel (capital or lowercase)
if (letter in ["a","e","i","o","u","y","A","E","I","O","U","Y"]):
return True
return False
|
5ba6917c80c4679a59580b8f3a05513d0904cd63
| 698,531 |
import uuid
def get_visitor_id(guid, cookie):
"""Generate a visitor id for this hit.
If there is a visitor id in the cookie, use that, otherwise
use the guid if we have one, otherwise use a random number.
"""
if cookie:
return cookie
if guid:
# create the visitor id using the guid.
cid = guid
else:
# otherwise this is a new user, create a new random id.
cid = str(uuid.uuid4())
return cid
|
962e9fe10c8ecec5ca98316feee02302f7e965e3
| 491,958 |
def dispatch(split, *funcs):
"""takes a tuple of items and delivers each item to a different function
/--> item1 --> double(item1) -----> \
/ \
split ----> item2 --> triple(item2) -----> _OUTPUT
\\ /
\\--> item3 --> quadruple(item3) --> /
One way to construct such a flow in code would be::
split = ('bar', 'baz', 'qux')
double = lambda word: word * 2
triple = lambda word: word * 3
quadruple = lambda word: word * 4
_OUTPUT = dispatch(split, double, triple, quadruple)
_OUTPUT == ('barbar', 'bazbazbaz', 'quxquxquxqux')
"""
return [func(item) for item, func in zip(split, funcs)]
|
5a04ea68cc218c116436783a9c27866e4297b0fd
| 599,211 |
import torch
def sum_rightmost(value: torch.Tensor, dim: int):
"""Sum out ``dim`` many rightmost dimensions of a given tensor.
Args:
value: A tensor of ``.dim()`` at least ``dim``.
dim: The number of rightmost dims to sum out.
"""
if dim == 0:
return value
required_shape = value.shape[:-dim] + (-1,)
return value.reshape(required_shape).sum(-1)
|
9214bc18e08cd08efa946ec84740c283901e06f5
| 639,431 |
def interbase0_to_humancoords( fmin, fmax, strand ):
"""
Takes GMOD-standard 0-based inter-base coordinates and transforms them
into the typical human-readable coordinates used in places like GBK flat
files. Features on the reverse strand have a larger start than stop.
The strand values can be:
+ or 1 for forward features
- or -1 for reverse features
Returns a list: [start, stop] where start < stop if on forward strand.
"""
if strand == '+' or strand == 1:
return (fmin + 1, fmax)
elif strand == '-' or strand == -1:
return (fmax, fmin + 1)
else:
raise Exception("Invalid strand specified ({0}). Expected +, -, 1 or -1".format(strand))
|
06718284e1d495272944de5ba1cab107db94d102
| 247,184 |
import math
def delta(l, m, delta0):
"""Convert a coordinate in l, m into an coordinate in Dec
Keyword arguments:
l, m -- direction cosines, given by (offset in cells) x cellsi (radians)
alpha_0, delta_0 -- centre of the field
Return value:
delta -- Dec in decimal degrees
"""
return math.degrees(math.asin(m * math.cos(math.radians(delta0)) +
(math.sqrt(1 - (l*l) - (m*m)) *
math.sin(math.radians(delta0)))))
|
63efa0b386304d50e7c14846c8121f33764622ca
| 116,337 |
def get_symmetric_pos_th_nb(neg_th):
"""Compute the positive return that is symmetric to a negative one.
For example, 50% down requires 100% to go up to the initial level."""
return neg_th / (1 - neg_th)
|
3b9af948d6f405147aa7c156bd81556e7e44b8d9
| 100,303 |
def _format_page_object_string(page_objects):
"""Format page object string to store in test case."""
po_string = ''
for page in page_objects:
po_string = po_string + " '" + page + "',\n" + " " * 8
po_string = "[{}]".format(po_string.strip()[:-1])
return po_string
|
4c984c300ed0289be073d70ba97a301ee207b998
| 457,364 |
from typing import Optional
def erd_encode_bool(value: Optional[bool]) -> str:
""" Encodes a raw value to a bool, None is encoded as FF """
if value is None:
return "FF"
return "01" if value else "00"
|
7d2c5099f6cebcb520c2559601ce765ecbb8ae98
| 157,543 |
import requests
def get_token(username: str,
pw: str,
filehost: str = "securefileshare.wales.nhs.uk") -> dict:
"""
Fetch Bearer token for securefileshare
Parameters
----------
username: str
pw: str
filehost: str
Returns
-------
dict
Dictionary. Access token under key "access_token"
"""
endpoint = f"https://{filehost}/api/v1/token"
data = {"grant_type": "password",
"username": username,
"password": pw}
token = requests.post(endpoint,
data=data,
verify=False)
assert token.status_code == 200, f"Request failed. Error code: {token.status_code}"
json = token.json()
token.close()
return json
|
98593425daaa606c938e63d488ebcd72a314d57d
| 666,036 |
def unknown_processor_rules(info_data, rules):
"""Setup the default keyboard info for unknown boards.
"""
info_data['bootloader'] = 'unknown'
info_data['platform'] = 'unknown'
info_data['processor'] = 'unknown'
info_data['processor_type'] = 'unknown'
info_data['protocol'] = 'unknown'
return info_data
|
6ff076ba40a1d729c5cc7eec68ff6b49482fe87e
| 536,321 |
def rotl(x, n):
"""循环左移函数
Args:
x (int): 输入数据
n (int): 左移位数
Returns:
int: 循环左移n位后的结果
"""
return ((x << n) & 0xffffffff) | ((x >> (32 - n)) & 0xffffffff)
|
abe22155f0cd9434291872783a8179252407a123
| 472,778 |
def list_not_in(lst1, lst2):
""" returns values in lst1 not in lst2 """
lst3 = [v for v in lst1 if v not in lst2]
return lst3
|
6805e55b06cc53308462c5b5dfbe10265f84888f
| 256,220 |
import torch
def test(model, data_loader, silent=False, device=None, test_batch=1000):
"""
Evaluate model on given data.
Args:
model (RNDModel): model object
data_loader (iterable): iterable of (x, y) samples
silent (bool): if True prints nothing
device (pt.device): pytorch device to train on
test_batch (int): size of the test batch
Returns (float): accuracy value
"""
model.eval()
correct = 0
with torch.no_grad():
for batch_i, (x, y) in enumerate(data_loader):
x = x.view(-1, 784).to(device)
y = y.to(device)
predictor_z, target_z = model.predict(x)
mses = []
for predict in predictor_z:
mses.append((target_z - predict).pow(2).sum(1) / 2)
mses_tensor = torch.Tensor(10, test_batch).to(device)
torch.cat(mses, out=mses_tensor)
mses_tensor = mses_tensor.view(10, test_batch)
class_min_mse = torch.argmin(mses_tensor, dim=0)
correct += torch.sum(torch.eq(y, class_min_mse)).item()
acc = correct / 10_000
if not silent:
print('Accuracy: {}/{} ({:.0f}%)\n'.format(correct, 10000, 100. * acc))
return acc
|
c7b4fff6b14f21631b5233747c9a40a8507d03d6
| 192,015 |
def ksw_func( d50, rh=0., rl=1.e-6, theta=0. ):
"""
Calculate wave roughess
Input:
d50 - median grain size (m)
rh - ripple height (m)
rh - ripple wavelength (m)
theta - time-averaged absolute Shields stress
Returns:
ksw - roughness (m)
Based on Ribberink (1998) in van der A et al. (2013), Appendix A.
"""
rh = max(rh,d50)
rl = max(rl,d50) # avoid divide-by-zero
# Eqn. A.2
mu = 6.
d50mm = d50*1.e3
if( d50mm > 0.15 ):
mu = 6. - 5.*(d50mm-0.15)/(0.2-0.15)
if( d50mm >= 0.2 ):
mu = 1.
# eqn A.5
ksw = max(d50, d50*(mu+6.*(theta-1.))) + 0.4*rh*rh/rl
return ksw
|
bf19680b928b254a2698a35ed2cd3a113474c9dc
| 599,435 |
import socket
def free_port() -> int:
"""
Determines a free port using sockets.
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(('127.0.0.1', 0))
free_socket.listen(5)
port: int = free_socket.getsockname()[1]
free_socket.close()
return port
|
03e33db6943a2e5ae646576edf1d70fce27e031e
| 581,559 |
def tuple_set(base, values, indices):
"""
Creates a new tuple with the given values put at indices and otherwise the same as base. The
list of indices must be in sorted order.
"""
new = base[:indices[0]]
for i in range(len(indices)-1):
new += (values[i],) + base[indices[i]+1:indices[i+1]]
return new + (values[-1],) + base[indices[-1]+1:]
|
6f594cf189d71754e024f4770ad516f889ad7921
| 679,322 |
import ipaddress
def netmask(addr):
"""Return the netmask of an address
"""
return str(ipaddress.ip_interface(addr).netmask)
|
6074bf5847ff9b325d1de62477c6dc6b2722c909
| 441,789 |
def point_str(x,y=0,z=0):
"""
Returns a string repr of a point.
Example: point_str(1,2,3) returns '(1.0,2.0,3.0)'
Parameter x: the x coordinate
Precondition: x is a number
Parameter y: the y coordinate
Precondition: y is a number
Parameter z: the x coordinate
Precondition: z is a number
"""
# Convert all numbers to floats
x = float(x)
y = float(y)
z = float(z)
result = '('+str(x)+','+ str(y)+','+ str(z)+')'
return result
|
1586e0b789449cb56690d631aca921ca501b4dcb
| 640,866 |
def parse_cards_from_board(raw):
"""
Returns a list of tuples (word, team, flipped).
"""
out = []
for line in raw.lower().strip().split("\n"):
data = line.strip().split(", ")
out.append((data[0], data[1], "revealed" in data[2]))
return out
|
828f1c81a99f44fb23fa615ad239db006b280800
| 513,817 |
from typing import Any
import random
def getKeyByWeights(array: dict, item: int = 0) -> Any:
"""
Returns a weighted random key from array
:param array: dict[Any: int | float]
:param item: int
:return:
- key - Any
"""
return random.choices(list(array.keys()), weights=list(array.values()))[item]
|
8f1d474c97466407ff643abce1ea0b12b3ebd951
| 13,512 |
from pathlib import Path
def is_probably_stubs_folder(distribution: str, distribution_path: Path) -> bool:
"""Validate that `dist_path` is a folder containing stubs"""
return distribution != ".mypy_cache" and distribution_path.is_dir()
|
b62fc9ac350d268d81f942f2885abce537eb694f
| 374,186 |
def _correct_phase_wrap(ha):
"""Ensure hour angle is between -180 and 180 degrees.
Parameters
----------
ha : np.ndarray or float
Hour angle in degrees.
Returns
-------
out : same as ha
Hour angle between -180 and 180 degrees.
"""
return ((ha + 180.0) % 360.0) - 180.0
|
dc8d5b2bb414f6829a8794753133e1c5350ff5f8
| 347,085 |
import socket
def _is_port_in_use(host, port):
"""Check if a port is already being used."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex((host, port)) == 0
|
fff00086c61c91d07a942f5918d6706805ffff99
| 460,867 |
def files_with_suffix(files, suffix):
"""Filter files with given suffix."""
return [f for f in files if f.endswith(suffix)]
|
315866e23d141bb3799e7eff0218c9d475905152
| 134,671 |
def get_gap(numberlist: list):
"""Get gap
Can take an integer list returned by :meth:`get_numbers` and determine
the missing sequence number.
Args:
numberlist: List of numbers to find a gap in.
Returns:
Missing number in the sequence or None if there is no gap.
"""
seqlist = list(range(numberlist[0], numberlist[-1] + 1))
for element in seqlist:
if element not in numberlist:
return element
return None
|
1b72de67a29fcc17964e5c10155e93f5fa7b1035
| 116,975 |
def recall_pos(y,y_pred):
"""compute recall of the positive class"""
return (y[y == 1] == y_pred[y==1]).sum()/float((y==+1).sum())
|
357fbce5d7b163bc1812b646c291874ce6204a97
| 120,139 |
import re
def GetSegPos(string, keyC):#{{{
"""
Get segment of a continue keyC state
e.g. given a string "0001100022000111100"
and keyC = '1'
return [(3,5), (13,17)]
"""
posList = []
ex = "(%s+)"%(keyC)
m = re.finditer(ex,string)
for i in m:
posList.append((i.start(0), i.end(0)))
return posList
|
a0b3fbfb3aa2d4a3b0d93ba3f69fb8bfa9d47ac7
| 384,858 |
def hello(name: str) -> str:
"""Test function that prints hello world
:param name: name to be displayed
:type name: str, optional
:return: hello world string
:rtype: str
"""
return f'Hello World! My name is {name}'
|
b7f75c2fcce22af030e4575a1b6e6192ef3a1816
| 541,916 |
def target_directory(tmpdir_factory):
"""Set up a tmp directory for writing artifacts."""
return tmpdir_factory.mktemp("target")
|
af8e1d9c7e421b4662cab52c861b0fcea31b516b
| 356,783 |
def orbreap(orb):
"""Get the next packet from an orb"""
return orb.reap()
|
557ad1ac2af84c0c76cf8865ad560d9728803000
| 642,878 |
def __flatten_bookmarks(bookmarks):
"""
Converts a list of nested dicts (with children) from json file
to a flattened, sequential list of all children dicts. Adds only those children
of type 'text/x-moz-place' and ignores those of type 'text/x-moz-separator'.
:param bookmarks: List of nested dicts, e.g. [ { 'title':'rootFolder', children': [{},{},..] }, { }]
(json text)
:return: List of bookmarks dicts,
e.g. [{'type':'text/x-moz-place', 'uri':'abc', },{'type':'text/x-moz-place', 'uri':'def',},..]
"""
result = []
if bookmarks is not None:
for bookmark in bookmarks:
if bookmark.get("type") == "text/x-moz-place-container":
children_bookmarks = __flatten_bookmarks(bookmark.get("children"))
for child in children_bookmarks:
result.append(child)
else:
if bookmark.get("type") == 'text/x-moz-place':
result.append(bookmark)
return result
|
cf1b079384f4d69d1cce32a17ce6c6c8210fc2db
| 345,514 |
import pymysql.cursors
def mysql_conn(
host: str = "localhost",
port: int = 3306,
user: str = "root",
db_name: str = "newbook",
):
"""Establish a PyMySQL connection object
Connect to a MySQL Database to be used with a pnguin RemoteFrame
Args:
host (str): A hostname
port (int): A port number
user (str): A DB user
db_name (str): The DB name being connected to
Returns:
pymysql.connections.Connection: Representation of a socket to a MySQL server
"""
return pymysql.connect(
host=host,
port=port,
db=db_name,
user=user,
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor,
)
|
651803e2e4bb0597f9eba0b698eb847a37c19275
| 598,038 |
import pickle
def loads(o):
"""Unpickle using cPickle"""
return pickle.loads(o)
|
843b44dad61d29d5cd15eaf13f42bb8e452b59ad
| 411,468 |
def check_Nbyzan(opts, P):
"""
Check and get the number of Byzantine machines that
we are going to simulate
Parameters :
-----------
opts : str
Options passed by the command prompt
P : int
Total number of machines (nodes or workers).
1 coodinator ans the ramaining are workers
Return :
-------
n_byzantines : int (entire natural)
Number of byzantine machines that we
are going to simulate
"""
if len(opts) == 0:
n_byzantines = 0;
n_byzantines = int(opts[0][1]);
if n_byzantines < 0 or n_byzantines > P - 1:
raise ValueError("Number of byzantine must be an integer "
"< number of workers or >= 0");
return n_byzantines;
|
aecbcaa8bd7febb59971d27fa81e7d1b678d0ae9
| 96,477 |
def get_random_element(faker, elements):
"""
Get random element from faker using elements list
:param faker: Faker Library
:param elements: List of elements
:return: Single element of the list
"""
return faker.random_element(elements=elements)
|
5fa77aa530633a9a157c57a2f8cb58357ca88169
| 286,939 |
def count(value, sub, start=None, end=None):
"""
Return the number of non-overlapping occurrences of substring sub in the range [start, end].
Optional arguments start and end are interpreted as in slice notation.
"""
return value.count(sub, start, end)
|
1842dec3a1a69a29663afdc956be9e70dc978e81
| 207,410 |
def supports_automatic_install(forge_version: str) -> bool:
"""
Checks if install_forge_version() supports the given forge version
"""
try:
vanilla_version, forge = forge_version.split("-")
version_split = vanilla_version.split(".")
version_number = int(version_split[1])
if version_number >= 13:
return True
else:
return False
except:
return False
|
63122e0fb21e14ef215e2b6564c4739b18887dbc
| 529,728 |
def touch(name):
"""Create a file and return its name."""
with open(name, 'w') as f:
return f.name
|
3cd7296cb801b4c7c8344c8189eb46f1ebf8d3b4
| 136,430 |
def unpack(group_idx, ret):
""" Take an aggregate packed array and uncompress it to the size of group_idx.
This is equivalent to ret[group_idx].
"""
return ret[group_idx]
|
e28bc33f5abb83ec2774ac2bf52d3ba9030196f6
| 215,572 |
def get_subclasses(klass):
"""Return list of all subclasses of `klass`.
Also recurses into subclasses.
"""
subclasses = []
for cls in klass.__subclasses__():
subclasses.append(cls)
subclasses += get_subclasses(cls)
return subclasses
|
7bf82a53f4b90aa11dcf5a3765eaf8b107ab0111
| 114,197 |
import torch
def prod_matrix(pts_src: torch.Tensor, pts_dst: torch.Tensor):
"""
Returns the matrix of "x_i * y_j".
:param pts_src: [R, D] matrix
:param pts_dst: [C, D] matrix
:return: [R, C, D] sum matrix
"""
x_col = pts_src.unsqueeze(1)
y_row = pts_dst.unsqueeze(0)
return x_col * y_row
|
e4dbceedfb489f101ea9e65c1d6e2a3aaf8fbb89
| 642,371 |
def getModelFNameFromHyperPars(pref, *hyperParsList, suff=None) :
"""
Generate a model filename from a list [hyperpar_entry ... ]
where each hyperpar_entry is either a dict of hyperparameter values
or a tuple (hyperpars_dict, default_hyperpars_dict).
In the second case, only the values different from the default
are used to generate the filename.
The filename is in the format pref_key1_val1_key2_val2...
"""
res = pref
for hyperPars in hyperParsList :
if isinstance(hyperPars,tuple) or isinstance(hyperPars,list) :
hyperPars, defHyperPars = hyperPars
else :
defHyperPars = {}
for k in sorted(hyperPars.keys()) :
if hyperPars[k] != defHyperPars.get(k) :
res += '_{}_{}'.format(k,hyperPars[k]).replace('.','_')
return res+(('_'+suff) if suff else '')
|
8dcd4dbeea7a5b1c46833bf25c6cfb0a98f344ab
| 496,799 |
def get_agent_deployment_script (api, configuration, api_version, api_exception, platform, dsm_proxy_id = None, validate_certificate = None, activate = None, computer_group_id = None, policy_id = None, relay_id = None, relay_proxy_id = None):
""" Obtains an agent deployment script from Deep Security Manager according to the provided parameter values
:param api: The Deep Security API modules.
:param configuration: Configuration object to pass to the api client.
:param api_version: The version of the API to use.
:param api_exception: The Deep Security API exception module.
:param platform: The platform of the target computer. Valid values are linux, solaris, and windows.
:param dsm_proxy_id: The ID of the proxy to use to connect to Deep Security Manager. Default is to use no proxy.
:param validate_certificate: True indicates to validate that Deep Security Manager is using a valid TLS certificate from a trusted certificate authority (CA) when downloading the agent installer. Default is False.
:param activate: True causes the script to activate the agent. Default is False.
:param computer_group_id: The ID of the computer group to which the computer is added. Default is no group.
:param policy_id: The ID of the policy to assign to the computer. Default is to assign no policy.
:param relay_id: The ID of the relay to assign to the computer for obtaining updates. Default is no relay.
:param relay_proxy_id: The ID of the proxy that the agent uses to connect to the relay. Default is to use no proxy.
:return: A String that contains the deployment script.
"""
# Create the AgentDeploymentScript object and configure
deployment_script = api.AgentDeploymentScript()
deployment_script.platform = platform
deployment_script.dsm_proxy_id = dsm_proxy_id
deployment_script.validate_certificate_required = validate_certificate
deployment_script.activation_required = activate
deployment_script.computer_group_id = computer_group_id
deployment_script.policy_id = policy_id
deployment_script.relay_id = relay_id
deployment_script.replay_proxy_id = relay_proxy_id
try:
deployment_scripts_api = api.AgentDeploymentScriptsApi(api.ApiClient(configuration))
deployment_script = deployment_scripts_api.generate_agent_deployment_script(api_version, agent_deployment_script = deployment_script)
return deployment_script.script_body
except api_exception as e:
return "Exception: " + str(e)
|
014a41ae97b9d4f19a75202b467df506a35fe30a
| 612,085 |
def comp_masses(self):
"""Compute the masses of the Lamination
Parameters
----------
self : Lamination
A Lamination object
Returns
-------
M_dict: dict
Lamination mass dictionary (Mtot, Mlam, Mteeth, Myoke) [kg]
"""
rho = self.mat_type.struct.rho
V_dict = self.comp_volumes()
M_dict = dict()
M_dict["Myoke"] = V_dict["Vyoke"] * self.Kf1 * rho
M_dict["Mteeth"] = V_dict["Vteeth"] * self.Kf1 * rho
M_dict["Mlam"] = V_dict["Vlam"] * self.Kf1 * rho
M_dict["Mtot"] = M_dict["Mlam"]
return M_dict
|
4ef2b61a382f0a1822eae86a8f138ecf85490de1
| 626,457 |
def execute_cypher_transaction(tx, query, **kwargs):
"""Given a Bolt transaction object and a cypher query, execute the
query on the transaction object and return all records in the
query response as a list.
"""
if kwargs:
verbose = kwargs['verbose']
else:
verbose = False
records = []
for record in tx.run(query):
if verbose:
print(record)
records.append(record)
return records
|
e61d12e4b617f549be93fc3f823a36b91843ff09
| 275,219 |
def quote(text):
"""Quote Discord's MarkDown special characters"""
return text \
.replace("\\", "\\\\") \
.replace("*", "\\*") \
.replace("`", "\\`") \
.replace("[", "\\[") \
.replace("_", "\\_") \
.replace("~", "\\~") \
.replace(":", "\\:") \
.replace("<", "\\<")
|
e38a0841028418204b0046e10a73b268c536362f
| 174,309 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.