content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def head_tail(line):
"""Returns the first word in 'line' and the rest of 'line' or None if the
line is too short."""
generator = (t.strip() for t in line.split(None, 1))
head = next(generator).strip()
tail = ''
try:
tail = next(generator).strip()
except StopIteration:
pass
return head, tail
|
84f14389c96015f6e1dcc235d66038a97c654767
| 240,997 |
def strip_out_too_small(bbs, classes=[], min_size=6):
"""Removes BBs which are unrealistically small
Parameters
----------
bbs : list
list of BBs, in the same form as for merge_close_bbs
classes : list, optional
list of classes, by default []
min_size : int, optional
size below which to remove BBs, by default 6
Returns
-------
list, list
returns BB list and classes list after removing small BBs.
"""
bbs_no_big = []
newclasses = []
i = -1
for _ in bbs:
l, r, b, t = _[:4]
i += 1
if r-l < min_size or t-b < min_size:
continue
else:
newclasses.append(classes[i])
bbs_no_big.append([l, r, b, t])
return bbs_no_big, newclasses
|
712200b71dd32dcaa0cc1520fe691625dcc6333b
| 331,704 |
def read_list(filename):
"""Read the contents of a text file into a list and
return the list. Each element in the list will contain
one line of text from the text file.
Parameter filename: the name of the text file to read
Return: a list of strings
"""
# Create an empty list named text_list.
text_list = []
# Open the text file for reading and store a reference
# to the opened file in a variable named text_file.
with open(filename, "rt") as text_file:
# Read the contents of the text
# file one line at a time.
for line in text_file:
# Remove white space, if there is any,
# from the beginning and end of the line.
clean_line = line.strip()
# Append the clean line of text
# onto the end of the list.
text_list.append(clean_line)
# Return the list that contains the lines of text.
return text_list
|
3231adf61bed86be5f0392eaa7654ca69fb5623a
| 399,150 |
def append_cmd_line_args_to(cmd):
"""Appends the syntax for command line arguments ("$*") to the cmd
Args:
cmd: A string representing the command.
Returns:
'cmd' with the syntax for command line arguments ("$*") appended
"""
return cmd + " $*"
|
0efb064aa4268c84d055dbe7efc6c928b9445e44
| 415,893 |
import csv
def get_words(file_path, column_name, delimiter=",", **fmtparams):
"""
Return a list containing only the items from the *column_name* column in
the *delimiter*-separated file found at *file_path*. Also takes any
of `csv.DictReader`'s *fmtparams*.
"""
with open(file_path, "r") as csvfile:
reader = csv.DictReader(csvfile, delimiter=delimiter, **fmtparams)
words = [row[column_name] for row in reader]
return words
|
173443dfac64c4bfc73b338cf4dc63a9da50a365
| 515,302 |
def read_gems(directory, file_name):
"""
Read a GEM file of the form "PlinePgem".
Args:
directory (str): directory of the file location (ex: '/Users/kimm/')
file_name (str): name of the file (ex: 'SHG0008H.Fragnum_PlinePgem')
Returns:
in_gems (list): tab-separated lists of lists
"""
with open(directory + file_name) as f:
in_gems = [line.strip().split("\t") for line in f]
return in_gems
|
d583cc9f8d3b53fb42dd4e45f2d74531716e8ab5
| 629,769 |
def _n_to_state_data_map(state_lines_data):
"""Returns a map from
N (state index) -> State (list of state line data)
where N uses the convention of the *.lpt files (beginning at 1 instead of
0)
:param state_lines_data: list of state line data
"""
nb_map = dict()
for cbl in state_lines_data:
nb_map[cbl[0]] = cbl[1:]
return nb_map
|
5c87113cdb1ff106065eade120fd3d6dc70503c2
| 281,322 |
import operator
def most_frequent_words(wordcounts, num):
"""Return a list of the top num most frequent words.
The list is ordered by decreasing frequency.
"""
items = wordcounts.items()
items.sort(key=operator.itemgetter(1), reverse=True)
return [items[i][0] for i in range(num)]
|
47d4204e147f509dbc5c44ab7b7ebfbe143ed468
| 551,797 |
def __get_max_abs_value(array):
"""
Retrieve the maximum absolute value of an array
:returns: the maximum absolute value of the input array
:rtype: real
:param array: numpy array
:type array: np.array
"""
mini = min(array)
result = max(array)
if abs(mini) > result:
result = mini
return result
|
68b083a29219820222e3d5f5e19560ed9242d88b
| 182,720 |
def chequers(material1, material2):
"""Alternate between materials (in all directions)."""
def f(point):
if (point.x+point.y+point.z) % 2 == 0:
return material1
else:
return material2
return f
|
8b04d217f11c80c6b8833f8b9497ca11af91118d
| 492,834 |
def extract_id(urn):
"""Extract id from an urn.
'urn:vcloud:catalog:39867ab4-04e0-4b13-b468-08abcc1de810' will produce
'39867ab4-04e0-4b13-b468-08abcc1de810'
:param str urn: a vcloud resource urn.
:return: the extracted id
:rtype: str
"""
if urn is None:
return None
if ':' in urn:
return urn.split(':')[-1]
else:
return urn
|
e9e81b46d3968181b396d5d433da89e7bd78bf55
| 263,640 |
def remark_docstyle(source_files, docstyle_checks_total, ignored_pydocstyle_files, display_results):
"""Generate remark when not all source files are checked by pydocstyle."""
if not display_results:
return "<li>docstyle checker is not setup</li>"
elif source_files != docstyle_checks_total + ignored_pydocstyle_files:
return "<li>not all source files are checked by pydocstyle</li>"
return ""
|
feefa127c02badeb48ed4f03e4ad2cd1af2cb64d
| 257,570 |
def get_older_than_n(imgs,n):
"""Returns images from the list that are older than the newest n images"""
if len(imgs) <= n:
return []
else:
imgs.sort(key=lambda img: img.attrs['Created'], reverse=True)
return imgs[n:]
|
e50cc84d99478c3711ffa782cc57815d67b8d651
| 483,130 |
def OR(A, B):
"""Returns events that are in A OR B."""
return A.union(B)
|
c89df03ab0398a4cc555a019980cbd0c3fe7c70f
| 310,792 |
import re
def ExtractMacro(filename, macro):
"""
Return the string value of the macro `macro' defined in `filename'.
"""
# Simple regex is far from a complete C preprocessor but is useful
# in many cases
regexp = re.compile(r'^\s*#\s*define\s+%s\s+"(.+[.].+[.].+)"\s*$' % macro)
try:
for line in open(filename):
m = regexp.match(line)
if m:
return m.group(1)
except EnvironmentError:
pass
return ''
|
c20e40ca32f911f2313870d0afc3b5bcda69cb8a
| 59,248 |
from typing import Tuple
def decode_length(b: bytes) -> Tuple[int, int]:
""" Decodes a shortvec-encoded length from the provided bytes
:param b: The provided bytes
:return: The length decoded from the provided bytes and how many bytes it used.
"""
offset = 0
length = 0
while offset < len(b):
val = b[offset]
length |= (val & 0x7f) << (offset * 7)
offset += 1
if (val & 0x80) == 0:
break
if offset > 3:
raise ValueError(f'invalid size: {offset} (max 3)')
return length, offset
|
051f28ca178667b4a913a6691dda4c835b38d34f
| 572,454 |
def notcontains(value, arg):
"""
Test whether a value does not contain any of a given set of strings.
`arg` should be a comma-separated list of strings.
"""
for s in arg.split(','):
if s in value:
return False
return True
|
d7d2b5bf4b3b28b771083677b0817537cdeea455
| 686,666 |
def construct_return_statement(prediction, dog_detection, human_detection):
"""Construct a str return statement from predicted breed and results of dog/human detector."""
if dog_detection:
return 'This is a {}.'.format(prediction)
elif human_detection:
return 'This human looks like a {}.'.format(prediction)
else:
return 'No human (frontal-face) or dog detected in image.'
|
3a675ab56706b34efc4b3e511566ead2fc26799f
| 640,329 |
from unittest.mock import Mock
def _get_details(with_wait=False):
"""Return a details dict that conforms to what the backoff handlers expected
Only the on_backoff handler will contain a 'wait' value
"""
details = {'elapsed': 1.2345, 'tries': 3, 'target': Mock(__name__='func')}
if with_wait:
details['wait'] = 1.0
return details
|
3eb4e47c7328fda0fd15ba9268a9cf1ec4255f11
| 382,256 |
def get_workflow_name(workflow):
"""Return a name of a Workflow.
:param workflow: Workflow object which name should be returned.
:type workflow: reana-commons.models.Workflow
"""
return workflow.name + "." + str(workflow.run_number)
|
ac9e1bcfb4dac1d9d6df7e8a02385b567c00cedf
| 374,362 |
def read_cin_mappings(mappings):
"""
Read cin mappings stored within file
Params:
mappings (str): path to cin mappings file
Returns: a dict of {trigger: grade} representing mappings for cervical intraephitelial neoplasia
"""
with open(mappings, 'r') as file:
lines = file.readlines()
mappings = {}
for line in lines:
trigger, grade = line.strip().split('\t')
mappings[trigger] = grade
return mappings
|
057fb70820a54d4e220be03d46c03715f28fe855
| 556,479 |
def table_id_for_date(date):
"""
Return BigQuery table name to hold the extracted prescribing data for the
given date
"""
return "prescribing_{}".format(date[:7].replace("-", "_"))
|
1314a67f8cf4dbe445d02442fccdd052e15800ec
| 544,622 |
def life_crear(mapa):
"""
Crea el estado inicial de Game of life a partir de una disposición
representada con los caracteres '.' y '#'.
`mapa` debe ser una lista de cadenas, donde cada cadena representa una
fila del tablero, y cada caracter puede ser '.' (vacío) o '#' (célula).
Todas las filas deben tener la misma cantidad de caracteres.
Devuelve el estado del juego, que es una lista de listas donde cada
sublista representa una fila, y cada elemento de la fila es False (vacío)
o True (célula).
"""
return [[elemento=="#" for elemento in fil] for fil in mapa]
|
cf71e1ba2d2fc1544e2d86a67cfea8a500481f93
| 604,090 |
import re
import fnmatch
def _name_suffices(txt):
"""
Creates time series name suffices e.g. "Te" and "Mx1", from parsing the "DOF" information in the key-file from
RIFLEX-DYNMOD.
Parameters
----------
txt : list
The lines of the key-file
Returns
-------
list
Suffices, e.g. 'Xd', 'Yd', or 'Te', 'Sy1', 'Sy2', etc.
"""
# extract indices for blocks with DOF descriptions
p = re.compile("following applies")
inds = [txt.index(line) for line in [line for line in txt if p.search(line)]]
# Always choose the last block:
# If there are two blocks, the first will be bar elements, for which only the axial tension is stored.
# This dof will also be the first dof for beam elements, if there are any. Hence, only the last block needs
# to be parsed.
ind = inds[-1]
p = re.compile("DOF")
# list of DOF (...) strings
dofstrings = [line for line in txt[ind:] if p.search(line)]
# extract DOF descriptions by splitting on '='
dofdescr = [x.split('=')[-1].strip() for x in dofstrings]
# define suffices
suffices = [''] * len(dofdescr)
for i, ds in enumerate(dofdescr):
# displacements
if re.match('displacement', ds):
# extract direction : x, y, z
suff = ds.split()[2].upper() + 'd'
# forces, moments
elif re.match('Axial', ds):
suff = 'Te'
elif re.match('Torsional', ds):
suff = 'Mx'
elif re.match('Mom.', ds):
dsp = ds.split()
suff = 'M'
# which axis
suff += fnmatch.filter(dsp, '*axis*')[0][0]
# which end
suff += str(dsp[-1])
elif re.match('Shear', ds):
dsp = ds.split()
suff = 'S'
# which axis
suff += fnmatch.filter(dsp, '*direction*')[0][0]
# which end
suff += str(dsp[-1])
else: # unknown description, use DOFxx
suff = 'DOF' + str(i + 1).zfill(2)
suffices[i] = suff
return suffices
|
c29780fde90acaa4bae89b6a60f34fe2b437926b
| 368,780 |
def _display_err(filename, line, column, message, length, nlines, content): # pylint: disable=R0913
"""Mark the source code error location in content and return a string for display"""
lines = content.splitlines()
start = max(0, line+1-nlines)
res = [f"File {filename}, line {line+1}:{column+1}"]
res.append(str('-' * (len(res[0]) + 7)))
res += lines[start:line+1]
res += [(' ' * column) + ("^" * length), message]
return "\n".join(res)
|
fd3a3f569299ecddab0c987160454d3ff23d2ec9
| 453,148 |
def event_counter(object_type: str) -> str:
"""Return db key for the event counter for the object type.
The value stored at this key is used to generate a unique event id.
Args:
object_type (str): Type of object.
Returns:
str, database key for the event counter
"""
return 'events:{}:count'.format(object_type)
|
eb1ad15af86d70e7aaf9415707201aef934ca1fc
| 623,091 |
import torch
def load_model_pytorch(path: str):
"""
Load a Pytorch at a given path
https://pytorch.org/tutorials/beginner/saving_loading_models.html
:param path: Path to model to load
:return: Pytorch model
"""
model = torch.load(path)
model.eval()
return model
|
6222225977a64f69014177ad2c430e8964f4a3f8
| 603,309 |
def get_long_name(uid):
"""
return long name for variable
"""
return uid.title
|
dd2241415884ee3e4d65e0a2b5639c64213650f0
| 213,731 |
def normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
|
20c3b9a64a0aace5f1e70025f7d25083dc4787ba
| 197,748 |
import torch
def get_input_shape_batch_size(data_loader):
"""
Gets input shape of image and batch size from data loader
:param data_loader: Iterates over data set
:return: returns batch size and shape of one image
"""
for _, (images_in_one_batch, _) in enumerate(data_loader):
# finding shape of a batch
input_shape = torch.Tensor.size(images_in_one_batch)
return input_shape[0], (1, input_shape[1], input_shape[2], input_shape[3])
|
c47f4fa2f2510426bdddff88b14e1ffde93438ef
| 470,352 |
from typing import List
def match_prompt_list(target_str: str, prompt_list: List[str]) -> bool:
"""
Matches any of the prompt candidate strings.
"""
for prompt_str in prompt_list:
if target_str == prompt_str:
return True
return False
|
aa2b3369199d0d269d0eaf01c02b42cd346578af
| 76,244 |
import torch
def get_data(generic_iterator, generic_loader):
"""Code to get minibatch from data iterator
Inputs:
- generic_iterator; iterator for dataset
- generic_loader; loader for dataset
Outputs:
- data; minibatch of data from iterator
- generic_iterator; iterator for dataset, reset if
you've reached the end of the dataset"""
try:
data = next(generic_iterator)[0]
except StopIteration:
generic_iterator = iter(generic_loader)
data = next(generic_iterator)[0]
if torch.cuda.is_available():
data = data.cuda()
return data, generic_iterator
|
e827ab7cea13c96953260d6b157a3e6ab370c6c9
| 697,656 |
def split_query_string(query_string):
"""
Splits a query string into a dictionary
>>> split_query_string('a=1&b=2&name=abc&y=&z=ab23')
{'a': '1', 'y': '', 'b': '2', 'name': 'abc', 'z': 'ab23'}
"""
qs_dict = {}
qs_parts = query_string.split('&')
for qs_part in qs_parts:
k,v = qs_part.split('=')
qs_dict[k] = v
return qs_dict
|
ce53938a00d0771060b7dd4c19ac8d260e65a127
| 604,341 |
import math
def convert_to_readable_size(size, orig_unit='B'):
"""Converts size to human readable unit"""
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
# convert original size to bytes
try:
i = units.index(orig_unit)
except:
raise RuntimeError('Invalid size unit passed: %s' % (orig_unit))
size = size * pow(1024, i)
unitIndex = int(math.floor(math.log(size, 1024)))
# set size unit to PB max if size is greater than 1024 PB
if unitIndex > 5:
unitIndex = 5
sizer = math.pow(1024, unitIndex)
newsize = round(size / sizer, 2)
return "%s %s" % (newsize, units[unitIndex])
|
6eb963bd53d0e5c176b3f73d51b73be2499beea2
| 522,264 |
def has_tiles(export_task_record_name):
"""
Some output types are spatially invariant and it is a waste to try and build per-tile statistics for them. Also
the Project File will contain a copy of each export format which could grossly over estimate data in a region
(i.e. a job may export gpkg and shapefile so the project.zip will contain 2x copies of the data)
:param export_task_record_name: The name of the ExportTaskRecord
:return: True if the result of this task should be included in per-tile statistics
"""
return export_task_record_name not in [
"Area of Interest (.geojson)",
"Project File (.zip)",
]
|
36fd6662761f2da4a02c4f4eacc6dd07e2ec420f
| 249,429 |
from datetime import datetime
def datetime_stamp(string_ts):
"""Create date-time stamp in `DD.MM.YYYY HH:MM:SS` format
:argument string_ts: date-time stamp
:type string_ts: str
:returns datetime object
"""
return datetime.strptime(string_ts, '%d.%m.%Y %H:%M:%S')
|
05f8799917b6c48898a20980dc89e80caf2250d0
| 496,718 |
import math
def work(force, displacement, theta):
"""
Calculates the work done by
the force while displacing
an object at an angle theta
Parameters
----------
force : float
displcement : float
theta : float
Returns
-------
float
"""
return force * displacement * math.cos(theta)
|
989d44fe8f7e64a755484bc95db2e44abaaae3f5
| 667,243 |
def _get_autos_index(prod_array):
"""Obtain auto-correlation indices from the 'prod' index map
returned by andata.
"""
autos_index, autos_chan = [], []
for ii in range(len(prod_array)):
if prod_array[ii][0] == prod_array[ii][1]:
autos_index.append(ii)
autos_chan.append(prod_array[ii][0])
return autos_index, autos_chan
|
5afb8918e4ac8adce3527185243869505cd0dce4
| 273,267 |
import uuid
def GUID(namespace, solution, data):
"""
Generates GUID in given namespace, for given solution (bkl project), with
given data (typically, target ID).
"""
g = uuid.uuid5(namespace, '%s/%s' % (str(solution), str(data)))
return str(g).upper()
|
32ad3513cad503074a72af837fd5c5570dfeb356
| 142,546 |
def split_train_verify(data_set):
"""Split the dataset to the train dataset and verify dataset."""
verify_set = data_set[910:]
data_set = data_set[:910]
return data_set, verify_set
|
4f604c652bce36bc323b4c324a6e6e6b6c888932
| 441,484 |
def _crossGeneRandom(genea, geneb, crossInds):
"""
A crossover startegy where parts of two genes are randomly swapped.
12347 => 14341
54321 52327
"""
# Be careful with copying here.
aOut = genea
aOut[crossInds] = geneb[crossInds]
bOut = geneb
bOut[crossInds] = genea[crossInds]
return aOut, bOut
|
1fcad8da4f4510f36f6d9fc3836ee9383da89ba8
| 305,631 |
def hamming_distance(str1, str2):
"""Computes the hamming distance (i.e. the number of
differing bits) between two byte strings.
Keyword arguments:
str1 -- the first byte string
str2 -- the second byte string
"""
distance = 0
for b1, b2 in zip(str1, str2):
# xor in each place is 1 if the bits differ, 0 otherwise
bdiff = b1 ^ b2
while bdiff > 0:
# if the last bit is 1, count it
if bdiff % 2 == 1:
distance += 1
# drop the last bit
bdiff = bdiff >> 1
return distance
|
5bc24c6ebf7b0d0f589e91b9f50617bb037c83a8
| 668,520 |
def timedelta_split(delta):
"""
Decompose a timedelta into hours, minutes and seconds
(timedelta only stores days and seconds).
"""
sec = delta.seconds + delta.days * 24 * 3600
hours, remainder = divmod(sec, 3600)
minutes, seconds = divmod(remainder, 60)
return hours, minutes, seconds
|
a160a7eaed1cfddea093c35158a9f02fa416b297
| 234,113 |
def _hunks(modified_lines):
"""
Given a list of line numbers, return a list of hunks represented
as `(start, end)` tuples.
"""
# Identify contiguous lines as hunks
hunks = []
last_line = None
for line in sorted(modified_lines):
# If this is contiguous with the last line, continue the hunk
# We're guaranteed at this point to have at least one hunk
if (line - 1) == last_line:
start, _ = hunks[-1]
hunks[-1] = (start, line)
# If non-contiguous, start a new hunk with just the current line
else:
hunks.append((line, line))
# Store the last line
last_line = line
return hunks
|
b0646c522c121016f7dfd4da352b1e3f34aed647
| 564,961 |
def resdiv_r2(Vin, Vout, R1):
"""
Calculate the exact value of R1 with R2 given.
"""
return R1 / (Vin/Vout - 1)
|
7c83447a58c15b4a096164e45ff1571d8bdf34d6
| 662,911 |
def get_day_name(weekday):
"""
This function gets the number of the day considering
the week and returns a String. The string corresponds
to the name of the weekday in Portuguese.
:param weekday: int
The weekday is generated by a
function of datetime package:
:return: String
Example of weekday:
import datetime
weekday = datetime.datetime.strptime(date, '%d %m %Y').weekday()
"""
if weekday == 0:
return "Seg"
elif weekday == 1:
return "Ter"
elif weekday == 2:
return "Qua"
elif weekday == 3:
return "Qui"
elif weekday == 4:
return "Sex"
elif weekday == 5:
return "Sáb"
else:
return "Dom"
|
0ecbfad6887f6dd254b12255301c052e423593c6
| 614,671 |
import textwrap
def indent(s,prefix):
"""
Wrapper for textwrap.indent to handle Python2/3
"""
try:
# Python3
return textwrap.indent(s,prefix)
except AttributeError:
# Python2
return '\n'.join([prefix+line.rstrip('\n')
for line in s.splitlines(True)])
|
6a1d8b5321dca6f471f53284276ddf14955a2fb3
| 561,876 |
def fmt_value(value):
"""format attribute value"""
return value.strip().replace('\r', ' ').replace('\n', ' ')
|
7b4669fdf60bddfa68183bce6c3f8ef77f1f299d
| 622,263 |
def get_trader_dispatch_target(model, trader_id, trade_type):
"""
Extract dispatch target for a given unit. Return 0 trade_type doesn't
exist for a given trader_id
"""
if (trader_id, trade_type) in model.V_TRADER_TOTAL_OFFER.keys():
return model.V_TRADER_TOTAL_OFFER[trader_id, trade_type].value
else:
return 0.0
|
246a18fc7401c468fdb889fc4abd3dfcf8470ea0
| 220,445 |
def set_model_properties_from_dict(model_obj, property_dict):
"""
Takes a model object and a dict property names mapped to desired values, then sets all of the relevant
property values on the model object and saves it
"""
for field, value in property_dict.items():
setattr(model_obj, field, value)
model_obj.save()
return model_obj
|
73e7c44f55f2588266f5408cc2b99cdb54382f87
| 302,213 |
def _find_conv2d_op(op):
"""Find the op with conv2d in its tag by traversing."""
if 'conv2d' in op.tag:
return op
for tensor in op.input_tensors:
op_ = _find_conv2d_op(tensor.op)
if op_ is not None:
return op_
return None
|
17bdd39ead9b9e58bd278a70d62953d1e5123f82
| 197,267 |
def line_goes_through_border(pos1, pos2, dest1, dest2, border, lower, upper):
"""
Test if the line from (pos1, pos2) to (dest1, dest2) goes through the border between lower and upper.
"""
try:
m = (border - pos1) / (dest1 - pos1)
except ZeroDivisionError:
return False
wall_closer = 0 < m < 1
through_wall = lower <= pos2 + m * (dest2 - pos2) <= upper
return wall_closer and through_wall
|
aca4dae17f0e33dd3d643798cd289b598e8152be
| 671,747 |
def _broadcast_params(params, num_features, name):
"""
If one size (or aspect ratio) is specified and there are multiple feature
maps, we "broadcast" anchors of that single size (or aspect ratio)
over all feature maps.
If params is list[float], or list[list[float]] with len(params) == 1, repeat
it num_features time.
Returns:
list[list[float]]: param for each feature
"""
assert isinstance(
params, (list, tuple)
), f"{name} in anchor generator has to be a list! Got {params}."
assert len(params), f"{name} in anchor generator cannot be empty!"
if not isinstance(params[0], (list, tuple)): # list[float]
return [params] * num_features
if len(params) == 1:
return list(params) * num_features
assert len(params) == num_features, (
f"Got {name} of length {len(params)} in anchor generator, "
f"but the number of input features is {num_features}!"
)
return params
|
de1f93a8a3ab19679f1936e5d6fb581475fa1893
| 652,395 |
def brg_to_rgb(brg_colour):
"""Reorders RGB colour value used by DAWN's LED strip to standard RGB order"""
return (
brg_colour[1],
brg_colour[2],
brg_colour[0]
)
|
9ca9cfb5dd5a9c4ee4bf913b2b577ecef73bb4e4
| 590,960 |
def card_abbrev(value, suit):
"""
Constructs an abbreviation for the card, using the given
value, and suit.
:arg str value:
The value to use.
:arg str suit:
The suit to use.
:returns:
A newly constructed abbreviation, using the given value
& suit
"""
if value == "Joker":
return "JKR"
elif value == "10":
return "10%s" % (suit[0])
else:
return "%s%s" % (value[0], suit[0])
|
b341f2a26f72a1158f5328d252e65454b80e5c5f
| 279,221 |
def or_options(values, initial_value=0):
"""
Combine all given values using binary OR.
"""
options = initial_value
for value in values:
options |= value
return options
|
7e6866cfa0cb1a70616455ec2e97b7165c3a8b9d
| 126,797 |
def limit_issues(issues, limit_len=100000):
"""Limit the number of issues saved in our DB."""
sorted_issues = sorted(issues, key=lambda x: x['updated_at'], reverse=True)
return sorted_issues[:limit_len]
|
1dcd4461e9ca78605bf03e7a0fd1635a49bdd69a
| 666,058 |
import copy
def remove_connected_component(cc, connected_components):
"""
Attempt to remove connected component and return the smaller set
:param Panel cc: connected component to remove
:param iterable connected_components: set of all connected components
:return: smaller set of connected components
"""
if not isinstance(connected_components, set):
connected_components = set(copy.deepcopy(connected_components))
connected_components.remove(cc)
return connected_components
|
e1be0e1784d89d0a2637deb6bfbf872156f7e354
| 617,316 |
import io
import csv
def read_csv(text, start):
"""Read the given string as a CSV file.
"""
csvfile = io.StringIO(text)
dialect = csv.Sniffer().sniff(csvfile.readline())
reader = csv.reader(csvfile, dialect)
for _ in range(start - 1):
# NOTE: we use 'start - 1' because we've already read the first line
# (see above).
next(reader)
return reader
|
e609728a595cf0756ac975c2fbaca1e4d9e59c5f
| 186,001 |
import itertools
def concat_list(in_list: list) -> list:
"""Concatenate a list of list into a single list."""
return list(itertools.chain(*in_list))
|
5a58e8e1899fce99f8dabe681206507ae8ad4b8c
| 700,279 |
def __get_tweet_content_from_res(gh_res):
"""From the response object returned by GitHub's API, this function builds the
content to be tweeted
Args:
gh_res (dict): A dictionary of repository details
Returns:
str: The string to be tweeted.
"""
tweet_content = gh_res["name"]
if gh_res["language"]:
tweet_content += f' in #{gh_res["language"]}'
tweet_content += f' by {gh_res["owner"]["login"]} ({gh_res["html_url"]}).'
if gh_res["description"]:
tweet_content += f'\n{gh_res["description"][:100]}'
if (len(gh_res["description"])) > 100:
tweet_content += '...'
return tweet_content
|
50db59f21bce3638217de4132f376acc44542b7b
| 667,224 |
def convert_search_to_dictionary(search):
""" Converts the given search to a dictionary. Useful in mocking queries from dictionaries in testing.
:param search: elasticsearch_dsl object to convert
:return: query in dictionary form
"""
return search.to_dict()
|
abf3f4703a81d7a3a677b94c96a2c3e36c31dabd
| 151,698 |
import struct
def bu32(value):
"""Convert a 32-bit integer to bytes (LSB first).
Example:
bu32(0x12345678) == b'\x78\x56\x34\x12'
"""
return struct.pack('<I', value)
|
dc07ff6ea3c4bef865a0dce9c020a9370719d5fc
| 423,889 |
def issho_pw_name(pw_type, profile):
"""
Helper for standardizing password names
"""
return "issho_{}_{}".format(pw_type, profile)
|
3c69849bae9e1d2c439164cb6085715c33262f45
| 546,474 |
def exists(env):
"""Returns true if tool exists."""
# NOTE: SCons requires the use of this name, which fails gpylint.
return env.Detect('distcc')
|
339fd7c09dcaee8bc53beaa87fb83481a5db836e
| 19,312 |
def isqrt(x: int) -> int:
""" Returns the largest integer y such that y * y <= x """
if x < 0:
raise ValueError('square root not defined for negative numbers')
elif x == 0:
return 0
n = x
a, b = divmod(n.bit_length(), 2)
x = 2 ** (a + b)
while True:
y = (x + n // x) // 2
if y >= x:
return x
x = y
|
9d43432d3aff1f8817dbc8bcf66b66f6fdacdf1d
| 270,428 |
def matched_in_prev_blocks(gid, current_start, block_nodes):
"""
check whether gateway with gid is matched in previous block
:param gid:
:param current_start:
:param block_nodes:
:return:
"""
prev_nodes = set()
for prev_start, nodes in block_nodes.items():
if prev_start == current_start:
continue
prev_nodes.update(nodes)
return gid in prev_nodes
|
119033c4910e86fad0e014b77bfa545df24f2bc9
| 116,724 |
def table_name_suffix(load_table_s3_prefix: str) -> str:
"""
Table name suffix should be '__ct' if the S3 prefix is from change tracking.
Parameters
----------
load_table_s3_prefix : str
The load's prefix as determined by its S3 key.
Returns
-------
str
"__ct" or "" depending on the load's prefix
"""
return "__ct" if load_table_s3_prefix.endswith("__ct/") else ""
|
16df14ef505ea2bdc42d396510bd49ffa2fe33cf
| 623,151 |
def remove_from_string(string, letters):
"""Given a string and a list of individual letters, returns a new string
which is the same as the old one except all occurrences of those letters
have been removed from it."""
for i in string:
for j in letters:
if i == j:
string = string.replace(str(i), "") # Don't forget to update string!
return string
|
cf594b682dbdeefbe159dfd666f6757caf1ee8ed
| 633,380 |
def div_to_zero(a: int, b: int) -> int:
"""Integer division (a/b) that truncates towards 0, instead of -infinity as
is default for Python. Assumes b is positive, but a can be negative or
positive.
"""
return a // b if a >= 0 else (a - 1) // b + 1
|
ecc0ecb75f117ef3ae3e6106dc8fcf3e95744282
| 280,026 |
def split_key_path_to_hive_and_path(key_path):
"""Return a (hive, path) tuple given a full key path"""
path_parts = key_path.split("\\")
hive = path_parts[0]
path = "\\".join(path_parts[1:])
return hive, path
|
e332883cf79ded9cf05c94cf3f1d1501b76ef5a3
| 553,390 |
def remove_forbidden_characters(name):
"""
A function that will remove all the forbidden characters from the string. The forbidden characters are the ones
that are not allowed to be used in the names of windows files. Those are --> r'/*=:<>"|\'.
Parameters:
name : string
Returns
`name` without the forbidden characters
"""
new_name = ""
for znak in name:
if znak in r'\/*?:<>"|':
new_name += '_'
else:
new_name += znak
return new_name
|
15779459ea54bde04523afac60174f95ee2e4689
| 601,487 |
import random
def _pick_split_point(p_span, p_config):
"""
Picks a point at which to split the specified span into two 'intervals' [a, b) and [b, c).
Return value is b, i.e. exclusive for first interval, inclusive for second.
Returns -1 if the span is not wide enough to fit two new rooms with any split.
"""
#new rooms each need at least p_config.min_span tiles
if p_span < (p_config.min_span + p_config.min_offset * 2) * 2:
return -1
min = p_config.min_span + 2 * p_config.min_offset
max = p_span - p_config.min_span - 2 * p_config.min_offset
return random.randint(min, max) if min != max else min
|
73d3e8fc9497da2faab44b6a46040a79b879ac3f
| 526,126 |
import string
def is_pangram(s):
"""
Detect whether or not a given string input is a pangram.
:param s: a string value.
:return: true if string is a pangram, otherwise false.
"""
return set(string.ascii_lowercase) <= set(s.lower())
|
89acf3fa64f74091df74f095cf7a806c78457658
| 630,414 |
def c_to_fortran(stack):
""" Convert Fortran-indexed 3D array to C indexed array. """
return stack.T if not stack.flags.f_contiguous else stack
|
2bf66b08ecdb14876168c6781e40719f5775b3f4
| 550,504 |
import re
def get_orbit_number(infile):
"""
Look for an orbit number in the given filename.
Parameters:
===========
infile: str
Input file.
Returns:
========
orbit: str
Supposed orbit number
"""
orbit = re.findall("[0-9]{6}", infile)[-1] # Get orbit number
return orbit
|
abfa676e86f6cb8700a40a63f73dbf0ee58fdb6a
| 514,173 |
def acceptable_title(node):
""" Omit projects that have certain words in the title """
omit_titles = ['test', 'photo', 'workshop', 'data']
if any(word in node['title'].lower() for word in omit_titles):
return False
return True
|
3c062e5e3b38f3f7d86d3bddbbdfb4bd3cb5ae46
| 374,589 |
def form_metaquast_cmd_list(metaquast_fp, outdir, input_fasta):
"""format argument received to generate list to be used for metquast subprocess call
Args:
metaquast_fp(str): the string representing the path to metaquast executable
outdir(str): the string representing the path to the output directory
input_fasta(list): list of fasta files for the metaquast analysis
Returns:
call_args_list(list): is the sequence of commands to be passed to the metaquast call
"""
if metaquast_fp is '' or metaquast_fp is None:
raise ValueError('metaquast_path invalid. metaquast_path name is empty')
if outdir is None:
raise ValueError('outdir location invalid. outdir is None')
if not input_fasta:
raise ValueError('input contigs invalid. no fasta files specified')
# required arguments
call_args_list = ['python2', metaquast_fp]
# add the fasta files
call_args_list.extend(input_fasta)
# add the output direcotry
call_args_list.extend(['-o', outdir])
return call_args_list
|
9a6d2e1419db4c8e630e7bf6577ac02f5c7bf7a8
| 644,552 |
def indent_lines(
text: str
) -> str:
"""Indent the lines in the specified text."""
lines = text.splitlines()
indented_lines = [f' {line}' for line in lines]
return '\n'.join(indented_lines)
|
93a6a9feacbde14d54408fa14783df8bdc2a9178
| 609,649 |
from pathlib import Path
def requirements_project(tmp_path_factory):
"""
Returns a temp directory containing a requirements.txt
file.
"""
folder: Path = tmp_path_factory.mktemp("myrepo")
req_txt = folder.joinpath("requirements.txt")
req_txt.touch()
return folder
|
48313ba31bbce8bed262c94f190bfbc4e5037bf3
| 361,540 |
def num_of1_right(n: int) -> int:
"""
Count quantity of 1 in the binary of the given int n.
Parameters
-----------
n: int
The given number.
Returns
---------
out: int
The counts of 1 in the binary of the given number.
Notes
------
Pay attention to n < 0 case.
O(len(n_of_binary))
"""
count = 0
if n < 0:
n = -n
# for the minus
count = 1
while n:
if (n & 1):
count += 1
n = n >> 1
return count
|
82c78dd8bda0ef088f9a8fedc83ee402cb10cce2
| 264,275 |
def unique_list(X):
"""unique a list with index and count
Example
-------
>>> unique_list([1,2,4,5,3,2,4])
>>> ([1, 2, 3, 4, 5], [0, 1, 4, 2, 3], [1, 2, 1, 2, 1])
"""
idx = sorted(range(len(X)), key=X.__getitem__)
X_uniq = []
X_count = []
idx_uniq = []
for i in idx:
if len(X_uniq) == 0 or X[i] != X_uniq[-1]:
X_uniq.append(X[i])
X_count.append(1)
idx_uniq.append(i)
else:
X_count[-1] += 1
return X_uniq, idx_uniq, X_count
|
bd30946da7fcb55c853b3fb16714a02ea745f84e
| 573,955 |
def manhatten_distance(first, second):
"""
Given two vectors, returns the manhatten distance between them
Requires that they are the same dimension
:param first: a vector
:param second: a vector
:return: the distance as a double
"""
if len(first) != len(second):
raise Exception("These vectors must be the same size")
return sum([abs(x - y) for x, y in zip(first, second)])
|
0d8dec01c0ab6270cfb5b4f9bec987ec06496081
| 352,169 |
import unittest
import torch
def require_multi_gpu(test_case):
"""
Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple
GPUs.
"""
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
|
267eba327dc65d574c323d0c0e977060894cf6b9
| 404,877 |
def create_reverse_dns(*resource_name_parts) -> str:
"""
Returns a name for the resource following the reverse domain name notation
"""
# See https://en.wikipedia.org/wiki/Reverse_domain_name_notation
return "io.simcore.storage" + ".".join(map(str, resource_name_parts))
|
31f3e1a982f288e1a6a9e687ca0d09f10d0bc176
| 507,307 |
import torch
def to_numpy(tensor: torch.Tensor):
"""
Converts a tensor to numpy.
Parameters
----------
tensor: torch.Tensor
Tensor to be converted.
Returns
-------
numpy.ndarray
"""
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
|
7c71b6572edb1b7e46ab0d62bee57f501e257c4a
| 535,356 |
def getParagraphs(content):
"""
Exctracts paragraphs from the the text content
:param content: (str) text content
:returns: list of paragraphs
"""
paraList = content.split('\n\n')
return paraList
|
f81ad6b5e9f7a81d404cdff852459cca9180a07f
| 337,202 |
from pathlib import Path
def get_pdb_file_suffix_variations(pdb_file_path, sep="_"):
"""
List suffix variations of a PDB file.
If `file.pdb` is given, and files `file_1.pdb`, `file_2.pdb`, exist
in the folder, those will be listed.
Parameters
----------
pdb_file_path : str or Path
The path to the source PDB file. The source PDB file does not
need to exist and it can be just a reference name.
sep : str
The separation between the file base name and the suffix.
Defaults to "_".
Returns
-------
list
List of Paths with the identified PBD files.
If no files are found return an empty list.
"""
basename = Path(pdb_file_path)
abs_path = basename.resolve().parent
return list(abs_path.glob(f"{basename.stem}{sep}*{basename.suffix}"))
|
9369444e32d35315f3e70cf43f36670f9e457c79
| 328,858 |
def binary_search(source, target):
"""二分探索. 計算量: O(log n) """
start, end = 0, len(source)
while start <= end:
index = (start + end) // 2 # index: 中央値
if source[index] == target:
# 見つかったらそのindexを返す
return index
elif source[index] < target:
# 検索範囲を後半部分にする
start = index + 1
else:
# 検索範囲を前半部分にする
end = index - 1
# 見つからない
return -1
|
c9e9adf63149deb74112d5beafd27eb412bc3eb5
| 643,429 |
def antall_rette(data, i):
"""Sammenligner avgitt svar med riktig svar og returnerer den summerte verdien.
Parameters
----------
data : list
En list med dictionarys
i : int
indeksen til frorsøket som det skal returneres antall rette til
Returns
-------
int
Antall rette svar
"""
rette = 0
for q in data:
if q["rett"] == q["svart"][i]:
rette += 1
return rette
|
008f14983565dfd86a249aa4d61d2721192c2bff
| 262,897 |
def filter_corporation_tag(y, only_company=True):
"""Filter unnecessary labels.
Args:
y (List of list of tag):
company_tag (False, optional): If false, return 3 kinds of types.
If true, only return company type.
Defaults to False.
Returns:
[list of list of tag]: filted y
"""
if isinstance(y, tuple):
y = list(y)
if only_company:
company_tag_list = ['B-company', 'I-company']
else:
company_tag_list = ['B-corporation_other', 'I-corporation_other',
'B-company_group', 'I-company_group',
'B-company', 'I-company']
for i, sample in enumerate(y):
y[i] = ['O' if tag not in company_tag_list else tag for tag in sample]
return y
|
da0d9f708d80b28f6f04a3030e1c990990dcc564
| 565,511 |
def _isNamedTuple(obj):
""" Heuristic check if an object is a namedtuple. """
return hasattr(obj, "_fields") and hasattr(obj, "_asdict") and callable(obj._asdict)
|
ec383341f8168b9568105c027f10d71a2192680b
| 86,929 |
def remove_duplicates(items, key):
"""
Remove all duplicates from a list of dict (items), based on unique keys
"""
clean_items = []
for i, item in enumerate(items):
if not any(item[key] == item_[key] for item_ in items[i+1:]):
clean_items.append(item)
return clean_items
|
33dfae9ab7c4e9228f5e37fb20dbc7a1b0ba0bec
| 141,416 |
def get_key_for_value(dict_, value, default=None):
"""
A util function for dict reverse lookup.
Mostly used to find the species name from nameId given a species_name -> nameId dict
:param dict_:
:param value:
:param default:
:return: the key for the given value of default
"""
for k, v in dict_.items():
if v == value:
return k
return default
|
f17ee28f874e8fa0f6438de68ac9860db786ff98
| 338,746 |
import collections
def _aggregate_histograms(histograms):
"""Aggregate multiple histograms into one.
A single histogram just contains the number of times each row length
occurred in the dataset.
:arg list histograms: The histograms, as collections.Counter objects.
:returns: A histogram
:rtype: collections.Counter
"""
aggregated = collections.Counter()
for hist in histograms:
aggregated.update(hist)
return aggregated
|
6f065100b021a039aa48fd53e1aa7b145bc68f55
| 293,342 |
def verify_new_attending(in_dict):
"""
This function verifies the input information for post_new_attending()
This function receives the dictionary containing the input
from the function post_new_attending(). The function uses a for
loop to check if the key strings are the same as the expected
key strings and that the value types are the same as
the expected value types. If the keys are incorrect, then a
string notifying the client that a key is missing is returned.
If a value type is incorrect, then a string is returned
to the patient saying that a specific value is of the wrong type.
If nothing is wrong, then this function returns True.
:param in_dict: a dictionary sent from the client
:return: True if the dictionary has the correct keys and value
types and a string explaining why the dictionary is wrong
otherwise.
"""
expected_keys = ("attending_username", "attending_email",
"attending_phone")
expected_values = (str, str, str)
for key, ty in zip(expected_keys, expected_values):
if key not in in_dict.keys():
return "{} key not found in input".format(key)
if type(in_dict[key]) != ty:
return "{} value is not the correct type".format(key)
return True
|
a5b904a0d288f4ed77c2ba7e8abc22b1438f3d0b
| 136,520 |
def get_P_Elc_PC_oprt(P_Elc_PC_rtd):
"""使用時の消費電力を計算する
Parameters
----------
P_Elc_PC_rtd : float
定格消費電力, W
Returns
----------
P_Elc_PC_oprt : float
使用時の消費電力, W
"""
P_Elc_PC_oprt = 1.0871 * P_Elc_PC_rtd + 2.2719
return P_Elc_PC_oprt
|
ae1a185d5bd121b9d0a7d579091781f35da9aae2
| 522,630 |
def extract_predicate_direction(predicate: str) -> tuple[str, bool]:
"""Extract predicate direction from string with enclosing arrows"""
if "<-" in predicate:
return predicate[2:-1], True
else:
return predicate[1:-2], False
|
84a715abef59051244846741822b665ef776f4ac
| 321,497 |
def delete_scalar(a_dict, key):
"""Delete a scalar in a ConfigObj object.
Returns: 0 if nothing was done.
1 if the scalar was deleted
"""
if key not in a_dict.scalars:
return 0
del a_dict[key]
return 1
|
50d6ab414155089c3bd22e57d70476d716f5781b
| 203,787 |
def mod_min(a, b):
"""
return r such that r = a (mod b), with minimum |r|
"""
# like divmod_min, just skipping a single add
r = (a % b)
diff = b - r
if abs(r) > abs(diff):
r = -diff
return r
|
0cf2fb88b34992a2ede6c4a53f4f621625703fab
| 606,867 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.