content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def filter_clusters(clusters, reference, minsize, mincontigs, checkpresence=True):
"""Creates a shallow copy of clusters, but without any clusters with a total size
smaller than minsize, or fewer contigs than mincontigs.
If checkpresence is True, raise error if a contig is not present in reference, else
ignores it when counting cluster size.
"""
filtered = dict()
for binname, contignames in clusters.items():
if len(contignames) < mincontigs:
continue
size = 0
for contigname in contignames:
contig = reference.contigs.get(contigname)
if contig is not None:
size += len(contig)
elif checkpresence:
raise KeyError('Contigname {} not in reference'.format(contigname))
else:
pass
if size >= minsize:
filtered[binname] = contignames.copy()
return filtered
|
38ae46e33c7bf86fbd1bcad1e38ef4c604c97907
| 278,511 |
def ordered_dict_get_last(ordered_dict):
"""
Gets the last most recently added object of an collections.OrderedDict instance
Args:
ordered_dict: (collections.OrderedDict) the dict to get the value from
Returns: (object) the object at the back
"""
return ordered_dict[next(reversed(ordered_dict))]
|
1811bb5e5356dbad6c66042f5dc7d8bf1d357534
| 162,851 |
from typing import Set
from typing import Callable
from typing import List
import collections
def add_subrepositories_depending_on(
changed: Set[str], all_subrepos: Set[str], requirement_getter: Callable[[str], List[str]]
) -> Set[str]:
"""
Augments "changed" with other members of "all_subrepos" that directly or indirectly depend on the
members of "changed", as listed in their internal_requirements.txt files.
"""
if not changed:
return changed # pragma: no cover
# Mapping from subrepo names to subrepos that directly depend on them.
dependency_graph = collections.defaultdict(list)
for name in all_subrepos:
deps = requirement_getter(name)
for dep in deps:
if dep in all_subrepos:
dependency_graph[dep].append(name)
result = changed.copy()
while True:
additional = set()
for name in result:
additional.update(dependency_graph[name])
additional.difference_update(result)
if not additional:
break # we've converged
result.update(additional)
return result
|
eb50a7ea06da5d4688dc28dea17805a8b5fb9125
| 574,086 |
def upper_nibble(value):
"""
Returns the value of the upper nibble (4 bits) of a byte (i.e. a value in range 0-15)
"""
return (value & 0xF0) >> 4
|
fca3cae98484e523c39f0515ec296b974c986a95
| 373,643 |
def is_csar(file):
"""Check if file is a CSAR multi-file ADT"""
return file.casefold().endswith("csar".casefold())
|
aece7a038dca24527fbbdb9f3b63452f5d1e30bf
| 548,577 |
def build_sub_lattice(lattice, symbol):
"""Generate a sub-lattice of the lattice based on equivalent atomic species.
Args:
lattice (ASE crystal class): Input lattice
symbol (string): Symbol of species identifying sub-lattice
Returns:
list of lists:
sub_lattice: Cartesian coordinates of the sub-lattice of symbol
"""
sub_lattice = []
i = 0
atomic_labels = lattice.get_chemical_symbols()
positions = lattice.get_scaled_positions()
for atom in atomic_labels:
if atom == symbol:
sub_lattice.append(positions[i])
i = i + 1
return sub_lattice
|
7e7748c31f7f082b2e5ec6f21d0a56f60d5ec06c
| 4,874 |
import requests
def curl(url, data=None, method='get'):
"""Download one file from a web URL. The request is stateless, just like
what the cURL tool would do.
Args:
url (str): URL to request
data (dict): data to pass on into the request, such as the post body
method (str): one of the six HTTP methods, "get", "put", "post",
"delete", "head", "options"
Returns:
requests.response object. We can get the content in binary or text using
response.content or response.text respectively
"""
assert method in ["get", "put", "post", "delete", "head", "options"]
requestfunction = getattr(requests, method)
params = {}
if data:
params["data"] = data
return requestfunction(url, **params)
|
25a65ce557233709a2454110cf1100e7a67ff583
| 596,074 |
def max_retries() -> int:
"""How often try retry at max in case of 500 errors"""
return 5
|
ddf508b1dfa8ce49cbdf1081355b63704bbb8159
| 277,557 |
def vec(matrix):
"""
Vectorize, i.e. "vec", a matrix by column stacking.
For example the 2x2 matrix A
A = [[a, b]
[c, d]] becomes |A>> := vec(A) = (a, c, b, d)^T ,
where |A>> denotes the vec'ed version of A and T is a transpose.
:param matrix: A N x M numpy array.
:return: Returns a column vector with N x M rows.
"""
return matrix.T.reshape((-1, 1))
|
a61b6e4d4322cf9811adda8b7d71308f0e343900
| 401,092 |
def getAllSubclasses(cls):
"""
Searches all subclasses of given class.
:param cls: The base class.
:type cls: class
"""
stack = [cls]
sub=[]
while len(stack):
base = stack.pop()
for child in base.__subclasses__():
if child not in sub:
sub.append(child)
stack.append(child)
return sub
|
0fecd0f5ffedf4a113b6ba921606b78fce6e8bb4
| 642,615 |
def get_pip_command_action(command):
"""Return pip action for a pip command."""
return command.split()[1]
|
54693a4e4a60324f1d7c1063000071440a5ad916
| 245,749 |
import operator
def calculate_number_words(references, candidate):
""" Returns the number of words from the reference closest to the length of the prediction
and the number of words in the candidate sentence """
# Calculate the number of words for each reference file
nw_ref = list()
for ref in references:
nw_ref.append(sum([len(line.strip('\n').split()) for line in open(ref, 'r').readlines()]))
# Calculate the number of words in the candidate translation
nw_can = sum([len(line.strip('\n').split()) for line in open(candidate, 'r').readlines()])
# Return the reference length closest to the candidate
diff_list = [x-nw_can for x in nw_ref]
min_ix, min_v = min(enumerate(diff_list), key=operator.itemgetter(1))
return nw_ref[min_ix], nw_can
|
fcdf7bf3e89cc05efab44d5d00a2fa6850c1d44d
| 513,736 |
def get_num_shorts(string_list):
""" Returns the number of occurences of 'Short' in an input string list.
Args:
string_list(list of string objects)
Returns:
numShorts(int): Number of occurences of 'Short'
"""
numShorts = 0
for marker in string_list:
if (marker == 'Short'):
numShorts += 1
return numShorts
|
b8e9da454590a8b29965696be3265053cfc78729
| 701,763 |
def collate_metrics(keys):
"""Collect metrics from the first row of the table.
Args:
keys (List): Elements in the first row of the table.
Returns:
dict: A dict of metrics.
"""
used_metrics = dict()
for idx, key in enumerate(keys):
if key in ['Method', 'Download']:
continue
used_metrics[key] = idx
return used_metrics
|
94366737180d8648c0bd11b06ebedaae790a2ed9
| 610,749 |
def call_bn(bn, x):
"""call batch norm layer"""
return bn(x)
|
c71dd47d7c0f10f538c5dbf37403e1751c22e570
| 176,636 |
from pathlib import Path
def read(fname: str) -> str:
"""Read file starts from root directory."""
with (Path(__file__).resolve().parent / fname).open() as f:
return f.read()
|
340d814777f1f0ef6d5b97d430b3313db5e0a5ca
| 10,855 |
def normalize_version_number(version_number):
"""Clean up the version number extracted from the header
Args:
version_number (str): Version number to normalize.
Returns:
The normalized version number.
"""
return version_number.replace('.', '_')
|
7cd50b850ed132a7b5e07df3596f07fa3d396a0e
| 73,096 |
import random
import decimal
def randdecimal(precision, scale):
"""Generate a random decimal value with specified precision and scale.
Parameters
----------
precision : int
The maximum number of digits to generate. Must be an integer between 1
and 38 inclusive.
scale : int
The maximum number of digits following the decimal point. Must be an
integer greater than or equal to 0.
Returns
-------
decimal_value : decimal.Decimal
A random decimal.Decimal object with the specifed precision and scale.
"""
assert 1 <= precision <= 38, 'precision must be between 1 and 38 inclusive'
if scale < 0:
raise ValueError(
'randdecimal does not yet support generating decimals with '
'negative scale'
)
max_whole_value = 10 ** (precision - scale) - 1
whole = random.randint(-max_whole_value, max_whole_value)
if not scale:
return decimal.Decimal(whole)
max_fractional_value = 10 ** scale - 1
fractional = random.randint(0, max_fractional_value)
return decimal.Decimal(
'{}.{}'.format(whole, str(fractional).rjust(scale, '0'))
)
|
40f1747d735e6de7f9c42dc999d888783abb64e4
| 685,164 |
import ast
def gt(*arguments): # pylint: disable=invalid-name
"""
Greater than function.
"""
return ast.Gt(*arguments)
|
79552de3fe086d8c5333d6e7250cdfb83a8b2570
| 422,137 |
import inspect
import logging
def logger(**kwargs):
"""A logger named after the module it's called in."""
caller = inspect.stack()[1]
name = caller.frame.f_globals.get('__name__', 'UNKNOWN')
return logging.getLogger(name, **kwargs)
|
f77fcf734a2b2b85dabd40c651aac77b82000d20
| 469,148 |
def wavelength2index(wl, step, min_wl):
"""Return the index of the given wavelength."""
return int((wl - min_wl) / step)
|
8774445e6c1b9f0f8e23d0e0342ef68e621ff941
| 389,244 |
def get_similarity_score(dict1, dict2, dissimilarity = False):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary of words or n-grams for one text
dict2: frequency dictionary of words or n-grams for another text
dissimilarity: Boolean, optional parameter. Default to False.
If this is True, return the dissimilarity score, 100*(DIFF/ALL), instead.
Returns:
int, a percentage between 0 and 100, inclusive
representing how similar the texts are to each other
The difference in text frequencies = DIFF sums words
from these three scenarios:
* If a word or n-gram occurs in dict1 and dict2 then
get the difference in frequencies
* If a word or n-gram occurs only in dict1 then take the
frequency from dict1
* If a word or n-gram occurs only in dict2 then take the
frequency from dict2
The total frequencies = ALL is calculated by summing
all frequencies in both dict1 and dict2.
Return 100*(1-(DIFF/ALL)) rounded to the nearest whole number if dissimilarity
is False, otherwise returns 100*(DIFF/ALL)
"""
DIFF = 0
for i in dict1:
x = False
#Boolean used to not add repeated frequencies as it will be seen later
for j in dict2:
if i == j:
#use of == instead of i in j as for example word "meme" could
#be in "memes" and would therefore cause a problem
DIFF += abs(dict1[i] - dict2[j])
#if the word/n-gram appears in both dictionnaires then
#the absolute value of the difference between the frequencies
#in each dictionnary is added to DIFF
x = True
if x == False:
#Boolean used so that frequencies of a word/n-gram are not added again
#and again to DIFF
DIFF += dict1[i]
for j in dict2:
x = False
#same use of boolean for same reasons as previou for loop
for i in dict1:
if i == j:
#use of == due to the same reason
x = True
#this time the absolute value of the difference between the
#frequencies doesn't have to be added as it already has been
if x == False:
DIFF += dict2[j]
ALL = 0
for i in dict1:
ALL += dict1[i]
#all the frequencies of the first dictionnary are added to ALL
for j in dict2:
ALL += dict2[j]
#same occurs as in the previous loop but for the second dictionnary
#Depending on the input of dissimilarity this will occur
if dissimilarity == False:
result = round(100*(1 - (DIFF/ALL)))
#similarity between the dictionnaries of word/n-grams is the result
else:
result = round(100*(DIFF/ALL))
#dissimilarity between the dictionnaries of word/n-grams is the result
return result
|
31e8602d6ef098a58a8eaf497badebf2e19288eb
| 704,421 |
import json
def ocr_loader(json_path):
"""Helper function to load ocr data from json file
Args:
json_path (string):
Path to the json file with OCR output data
Returns:
string:
OCR text output
"""
json_path = json_path.replace('\\', '/')
with open(json_path, "r") as file:
loaded_json = json.load(file)
if type(loaded_json) is list:
result = loaded_json
else:
result = loaded_json['text']
return " ".join(result)
|
7e182b184b305bffc97dadf59b139a1aa53250b1
| 8,681 |
def to_repr(obj):
""" Returns the result of calling repr() on `obj`. """
return repr(obj)
|
e42518cb369a539d2f00fbc45a921540403370e3
| 568,216 |
def update_quotas(neutron, project_id, network_quotas):
"""
Updates the networking quotas for a given project
:param neutron: the Neutron client
:param project_id: the project's ID that requires quota updates
:param network_quotas: an object of type NetworkQuotas containing the
values to update
:return:
"""
update_body = dict()
update_body['security_group'] = network_quotas.security_group
update_body['security_group_rule'] = network_quotas.security_group_rule
update_body['floatingip'] = network_quotas.floatingip
update_body['network'] = network_quotas.network
update_body['port'] = network_quotas.port
update_body['router'] = network_quotas.router
update_body['subnet'] = network_quotas.subnet
return neutron.update_quota(project_id, {'quota': update_body})
|
d3026a28e00658b063fbb98a35fb132ee44be960
| 77,110 |
from pathlib import Path
import re
def validate(
file: Path,
value: str,
pattern: str,
whole_string: bool = True,
none_allowed: bool = False,
) -> bool:
"""
Validate a string value using a regular expression constraint.
:param file: source of the test value
:param value: value to be tested
:param pattern: regex pattern that will match the value
:param whole_string: governs whether the match has to be the whole string, or just a prefix
:param none_allowed: is None allowed as a value
:return: whether or not the regex matches the value
"""
if value is None and none_allowed:
return True
if whole_string:
pattern = f"{pattern}$"
if re.compile(pattern, re.DOTALL).match(value):
return True
print(f"Validate error in file {file}: '{value}' does not match {pattern}")
return False
|
df117e6b3dbb2f5b8a37fe8851be68a0ddc0fa13
| 512,242 |
import requests
import json
def getUrl(articleName, fqdn='https://en.wikipedia.org/', apiPath='w/api.php', exceptNull=False):
"""Uses the WikiMedia API to determine the ID of a page with the given
title, which is then used to construct a stable URL for the corresponding
page.
"""
queryString = '?action=query&prop=info&format=json&titles=' + articleName.replace(' ', '%20')
res = requests.get(fqdn + apiPath + queryString)
data = json.loads(res.content)
pages = data['query']['pages']
keys = list(pages.keys())
id = int(keys[0])
if id < 0 and exceptNull:
raise Exception('Null page returned for article name "%s"' % articleName)
return '%s?curid=%u' % (fqdn, id)
|
ef667eb6bd758a620317f87138e2dc283cbb56c8
| 684,798 |
def get_reference_id(url: str):
"""
Return the reference id from a URL
For example:
>>> get_reference_id("https://github.com/advisories/GHSA-c9hw-wf7x-jp9j")
'GHSA-c9hw-wf7x-jp9j'
"""
_url, _, ref_id = url.strip("/").rpartition("/")
return ref_id
|
370af3938b88b9c704e4777358a4c57bd66a20f0
| 669,243 |
def _get_active_radio_idx(radio):
"""Find out active radio button."""
labels = [label.get_text() for label in radio.labels]
return labels.index(radio.value_selected)
|
87763b8bf053e80bb6c800ed2c5b5d6ff5a16b03
| 358,156 |
def get_domain_from_fqdn(fqdn):
""" Returns domain name from a fully-qualified domain name
(removes left-most period-delimited value)
"""
if "." in fqdn:
split = fqdn.split(".")
del split[0]
return ".".join(split)
else:
return None
|
2a2bd0c67af39dc1949cab1f51fce4593e43d992
| 673,142 |
import re
def parse_basename(bname):
"""return tuple (tsh, axis, ddd, hh, this_file, total_files) given basename"""
# input like BXM00018.15R
m = re.match('(?P<tsh>.)(?P<axis>.)(.)(?P<day>\d{3})(?P<hour>\d{2})\.(?P<this_file>\d)(?P<num_files>\d).', bname)
if m:
return m.group('tsh'), m.group('axis'),\
m.group('day'), m.group('hour'),\
m.group('this_file'), m.group('num_files')
else:
return None
|
37ebba5d929d863ab245019b0d539100f89d395d
| 636,453 |
def parse_env_urls(urls=None):
"""Parses a list of urls
>>> parse_env_urls(urls='https://kibana.energy.svc.dbg.com | https://grafana.energy.svc.dbg.com')
['https://kibana.energy.svc.dbg.com', 'https://grafana.energy.svc.dbg.com']
"""
urls_list = [] if urls == None else urls.split('|')
urls_list_stripped = list(map(str.strip, urls_list))
return urls_list_stripped
|
518a2ae449c037cc63697b492fcfbf4a35123bd6
| 93,919 |
def word2wid(word, word2id_dict, OOV="<oov>"):
"""
Transform single word to word index.
:param word: a word
:param word2id_dict: a dict map words to indexes
:param OOV: a token that represents Out-of-Vocabulary words
:return: int index of the word
"""
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2id_dict:
return word2id_dict[each]
return word2id_dict[OOV]
|
7daa66bfa5feb3e9a4c1d60b714c58079bc5aaeb
| 337,713 |
def not_none_get(value, not_none):
"""Return value, or not_none if value is None"""
if value is None:
return not_none
else:
return value
|
4973efafa85288b6d724a5d060d30cbfc3a26ba8
| 191,125 |
def _timedelta_to_seconds(delta):
"""
Converts timedelta to seconds
"""
offset = delta.days*86400 + delta.seconds + (delta.microseconds+0.0)/1000000
return round(offset)
|
528854f38cd0f9831c16259640428fc72aa8b725
| 476,228 |
def funct_worker(input_list,pre_text):
"""
Worker Function: define function that each process should do
e.g. create string from content in input_list while begin with pre_text => "[pre_text] [input_list]
"""
output_string = f"{pre_text}"
output_string = output_string + " ".join(input_list)
return output_string
|
e28c0bb9d07d6df7c0d88b11869fa58f098825cd
| 635,918 |
def get_wrapper_by_cls(venv, cls):
""" Fetch env wrapper class cls from current venv
Args:
venv (gym.Wrapper): current env
cls (gym.Wrapper): target env wrapper class
"""
if isinstance(venv, cls):
return venv
elif hasattr(venv, 'env'):
return get_wrapper_by_cls(venv.env, cls)
return None
|
a09c3caa84daeda7c5b6913ee756c79ab2f1f8c8
| 280,711 |
def get_ip_and_port(pao, wrappers):
""" This function obtains ip and port of given pao wrapper from list of wrappers.
:param pao: Given unit of active defense.
:param wrappers: List of wrappers.
:return: ip and port to access the wrapper of given 'pao'.
"""
ip = ''
port = ''
for wrapper in wrappers['paos']:
if wrapper['pao'] == pao:
ip = wrapper['ip']
port = wrapper['port']
break
return ip, port
|
37962d505135d4a32e50615b58ed816e23783bad
| 55,843 |
def _image_id_to_filename(image_id:int) -> str:
"""
convert image_id to the corresponding filename of the image in COCO2017
Parameters
----------
image_id: int,
the `image_id` of the image, read from the annotation file
Returns
-------
fn: str,
the filename of the corresponding image
"""
fn = f"{image_id:012d}.jpg"
return fn
|
2286ec3649c6ba7260bed8c1b76573f76f1fb1b6
| 268,991 |
def conv_to_float(indata, inf_str=''):
"""Try to convert an arbitrary string to a float. Specify what will be replaced with "Inf".
Args:
indata (str): String which contains a float
inf_str (str): If string contains something other than a float, and you want to replace it with float("Inf"),
specify that string here.
Returns:
float: Converted string representation
"""
if indata.strip() == inf_str:
outdata = float('Inf')
else:
try:
outdata = float(indata)
except:
raise ValueError('Unable to convert {} to float'.format(indata))
return outdata
|
814fe8c7a08b0bddfa0deed1ab779ef851442d70
| 206,790 |
def sanitize_script_content(content: str) -> str:
"""Sanitize the content of a ``<script>`` tag."""
# note escaping addresses https://github.com/jupyter/jupyter-sphinx/issues/184
return content.replace("</script>", r"<\/script>")
|
7572bc8c14ae529e977dbab7c4c6d2b4cc19b060
| 135,452 |
def calc_query_length(query):
"""
Calculate the length of DNS query.
:param query: query string
:return: Length of query string
"""
return len(query)
|
21221f33f2f840b2074ca603a7403f451d5abb43
| 280,323 |
def DataFrame_to_HdfFile(pandas_data_frame,hdf_file_name="test.hdf"):
"""Saves a DataFrame as an HDF File, returns the file name"""
pandas_data_frame.to_hdf(hdf_file_name,"table")
return hdf_file_name
|
48371af404c9252d2375bf310739d6c91d211244
| 245,393 |
def find_net_from_node(node, in_dict, in_keys):
"""
Return the net name to which the given node is attached.
If it doesn't exist, return None
"""
for k in in_keys:
if node in in_dict[k]:
return k
|
7d5eb4016d6a12ba12a343bbf02c37d4029e65f2
| 81,575 |
def read_file(filename, first_line_labels):
"""
Read the tsv file and return the labels and the data.
:param filename: The data file
:type filename: str
:param labels: Whether the first line has labels
:type labels: bool
:return: array and 2-D array of labels and data
:rtype: array, array
"""
labels = ['X', 'Y']
x=[]
y=[]
with open(filename, 'r') as fin:
if first_line_labels:
labels = fin.readline().strip().split("\t")
for l in fin:
p=l.strip().split("\t")
x.append(float(p[0]))
y.append(float(p[1]))
return labels, [x, y]
|
50aefdaf202a2c18aa03cdfd482ae8ea71860f16
| 160,915 |
from typing import Union
from typing import Mapping
from typing import Sequence
def recursive_getitem(obj: Union[Mapping, Sequence], *, keys: Sequence):
"""Go along the sequence of ``keys`` through ``obj`` and return the target
item.
Args:
obj (Union[Mapping, Sequence]): The object to get the item from
keys (Sequence): The sequence of keys to follow
Returns:
The target item from ``obj``, specified by ``keys``
Raises:
IndexError: If any index in the key sequence was not available
KeyError: If any key in the key sequence was not available
"""
# Define some error format strings
keyerr_fstr = "No such key '{}' of key sequence {} is available in {}."
idxerr_fstr = "No such index '{}' of key sequence {} is available in {}."
if len(keys) > 1:
# Check and continue recursion
try:
return recursive_getitem(obj[keys[0]], keys=keys[1:])
except KeyError as err:
raise KeyError(keyerr_fstr.format(keys[0], keys, obj)) from err
except IndexError as err:
raise IndexError(idxerr_fstr.format(keys[0], keys, obj)) from err
else:
# reached the end of the recursion
try:
return obj[keys[0]]
except KeyError as err:
raise KeyError(keyerr_fstr.format(keys[0], keys, obj)) from err
except IndexError as err:
raise IndexError(idxerr_fstr.format(keys[0], keys, obj)) from err
|
7fffc12e6d142c7324b98b268684837ec9eb727a
| 258,153 |
def byte_to_zwave_brightness(value):
"""Convert brightness in 0-255 scale to 0-99 scale.
`value` -- (int) Brightness byte value from 0-255.
"""
if value > 0:
return max(1, round((value / 255) * 99))
return 0
|
828ab4624da205419cae22471abaabe2828e8a58
| 190,640 |
def new_dashed_word(a, b, c):
"""
This function compose th new dashed_word after user guessed the correct characters.
:param a: int, the index of the correct character in the_word.
:param b: str, representing dashed_word
:param c: str, representing the_word
:return: str, the new dashed word.
"""
ans = ''
ans += b[:a]
ans += c[a]
ans += b[a + 1:]
return ans
|
077f6c1f0d76d35b00bbb261bf152a9234fc6da0
| 309,430 |
def file_gen(fil_name):
"""
Function opens the input file.
Parameters
------------
fil_name : str
name of the input lammps dump file
Returns
----------
fiw:
the opened lammps dump file
fil_name: str
name of the input lammps dump file
"""
fiw = open(fil_name, 'w')
return fiw, fil_name
|
12ef8257129f6f8b58bc35eb42cb1d56e939da1e
| 402,180 |
def directives_read(dfile):
"""
Reads directives into array of lines.
"""
fp = open(dfile)
lines = fp.readlines()
fp.close()
dc = []
for line in lines:
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
dc.append(line)
return dc
|
fb5894cb91fedc8ac69da9bf0b67b13a6e131e79
| 313,453 |
from typing import Union
from pathlib import Path
def relative_to(
path: Union[str, Path], source: Union[str, Path], include_source: bool = True
) -> Union[str, Path]:
"""Make a path relative to another path.
In contrast to :meth:`pathlib.Path.relative_to`, this function allows to keep the
name of the source path.
Examples
--------
The default behavior of :mod:`pathlib` is to exclude the source path from the
relative path.
>>> relative_to("folder/file.py", "folder", False).as_posix()
'file.py'
To provide relative locations to users, it is sometimes more helpful to provide the
source as an orientation.
>>> relative_to("folder/file.py", "folder").as_posix()
'folder/file.py'
"""
source_name = Path(source).name if include_source else ""
return Path(source_name, Path(path).relative_to(source))
|
26caa33770617361406915c9b301b5fe1a0ba9ef
| 11,437 |
import re
def get_imsi(line):
"""get the correct 15 digits IMSI from the raw IMSI string:
example:
input 'line':
64009778311904f1
return:
460079871391401
"""
couple_chars = re.findall(r'.{2}', line)
return ''.join([c[::-1] for c in couple_chars])[:-1]
|
3abfb80a649f843f30e496e502fd8f1c125acef2
| 502,471 |
def dfunc(variable_x):
"""
目标函数一阶导数也即是偏导数实现
args:
variable_x: 目标函数
return:
2 * variable_x: 目标函数一阶导数
"""
return 2 * variable_x
|
b7caca1ba686644c8b67d958eadd3944a9ee8f12
| 109,895 |
def is_nestable(x):
"""
Returns 1 if x is a tuple or list (sequence types that can nest)
Returns 0 otherwise
>>> is_nestable("string")
0
>>> is_nestable((0,))
1
>>> is_nestable(range(5))
1
"""
return isinstance(x, (tuple, list))
|
f379bf2208315a97ae21290d4fe4deb51a63eba4
| 588,750 |
def banner(text: str, *, borderChar: str = '='):
"""Print 'text' as banner, optionally customise 'borderChar'."""
border = borderChar * len(text)
return '\n'.join([border, text, border])
|
76d27b762173e35a15e0e445eccea85cdef3b327
| 701,922 |
def tolist(a):
"""Convert a given input to the dictionary format.
Parameters
----------
a : list or other
Returns
-------
list
Examples
--------
>>> tolist(2)
[2]
>>> tolist([1, 2])
[1, 2]
>>> tolist([])
[]
"""
if type(a) is list:
return a
return [a]
|
b3844f762b07ea330758074f9984c5a49f058f9a
| 549,588 |
def has_metal_sites(structure):
"""Returns True if there is a metal in the structure."""
metal_sites = []
for _, site in enumerate(structure):
if site.species.elements[0].is_metal:
metal_sites.append(site)
return len(metal_sites) > 0
|
cf08d6c984e0a3152c320bec9e094bfe36f95c81
| 539,731 |
def get_link_html(url, station_name, headline):
"""
Returns HTML for a link for member station content.
Args:
url (str): URL of station coverage of a book.
station_name (str): Name of station.
headline (str): Headline of the article.
Returns:
str: String containing HTML linking to the content.
"""
return '<li class="external-link">%s: <strong><a href="%s" target="_blank">%s</a></strong></li>' % (
station_name.strip(),
url,
headline.strip()
)
|
16c12b59f48c9c9ea99552051bbd579bb36d8b3c
| 194,869 |
from tabulate import tabulate
def list_to_html(data, has_header=True, table_format=None):
"""
Convenience function to convert tables to html for attaching as message text.
:param data: Table data
:type data: list of lists
:param has_header: Flag whether data contains a header in the first row.
:type has_header: boolean
:param table_format: Dictionary representation of formatting for table elements. Eg. {'table': "border: 2px solid black;"}
:type table_format: dictionary
:return: String representation of HTML.
"""
if has_header:
header = data.pop(0)
else:
header = ()
table_html = tabulate(data, headers=header, tablefmt='html', numalign='left')
if table_format is not None:
if isinstance(table_format, str) and table_format.lower() == 'default':
table_format = {
'table': "width: 100%; border-collapse: collapse; border: 2px solid black;",
'th': "border: 2px solid black;",
'td': "border: 1px solid black;"
}
if isinstance(table_format, dict):
assert all([key in ('table', 'th', 'tr', 'td') for key in table_format.keys()])
for k, v in table_format.items():
table_html = table_html.replace('<%s>' % k, '<%s style="%s">' % (k, v))
return table_html
|
3b136afb9703758dec30aa6f8a6dab74ef491d86
| 629,906 |
import random
import string
def random_lower_string(*, k: int = 32) -> str:
"""Generate a random string in lowercase."""
return "".join(random.choices(string.ascii_lowercase, k=k))
|
e4dca073f0f877125e3f715830d6cdab5ff45cf0
| 290,106 |
import re
def price_to_number(price_string):
"""Convert price to integer."""
price_digits = []
price_as_string = '0'
if(len(price_string)):
for character in price_string:
number = re.search('[0-9]', character)
if number and number.group(0):
price_digits.append(number.group(0))
price_as_string = ''.join(price_digits)
return int(price_as_string)
|
c9ec07243f64ac235be750b1add3d134e383ac43
| 371,954 |
def parse_input(input_string):
"""Return `input_string` as an integer between 1 and 6.
Check if `input_string` is an integer number between 1 and 6.
If so, return an integer with the same value. Otherwise, tell
the user to enter a valid number and quit the program.
"""
if input_string.strip() in {"1", "2", "3", "4", "5", "6"}:
return int(input_string)
else:
print("Please enter a number from 1 to 6.")
raise SystemExit(1)
|
c15c51319492dc871f24e8f2d22a1d43d115232d
| 82,723 |
def is_only_non_letters(word):
""" Returns True if the word only contains non-letter characters """
for letter in word:
if letter.isalpha():
return False
return True
|
2f2bdd78624612fabffea9f5b2b1b2ef1d340b04
| 161,242 |
import csv
def _sniff_file_info(fname, comment='#', check_header=True, quiet=False):
"""
Infer number of header rows and delimiter of a file.
Parameters
----------
fname : string
CSV file containing the genotype information.
comment : string, default '#'
Character that starts a comment row.
check_header : bool, default True
If True, check number of header rows, assuming a row
that begins with a non-digit character is header.
quiet : bool, default False
If True, suppress output to screen.
Returns
-------
n_header : int or None
Number of header rows. None is retured if `check_header`
is False.
delimiter : str
Inferred delimiter
line : str
The first line of data in the file.
Notes
-----
.. Valid delimiters are: ['\t', ',', ';', '|', ' ']
"""
valid_delimiters = ['\t', ',', ';', '|', ' ']
with open(fname, 'r') as f:
# Read through comments
line = f.readline()
while line != '' and line[0] == comment:
line = f.readline()
# Read through header, counting rows
if check_header:
n_header = 0
while line != '' and (not line[0].isdigit()):
line = f.readline()
n_header += 1
else:
n_header = None
if line == '':
delimiter = None
if not quiet:
print('Unable to determine delimiter, returning None')
else:
# If no tab, comma, ;, |, or space, assume single entry per column
if not any(d in line for d in valid_delimiters):
delimiter = None
if not quiet:
print('Unable to determine delimiter, returning None')
else:
delimiter = csv.Sniffer().sniff(line).delimiter
# Return number of header rows and delimiter
return n_header, delimiter, line
|
9a39d4b660656add3f4064645a141992e8fa5cf7
| 368,432 |
def get_pg_point(geometry):
"""
Output the given point geometry in WKT format.
"""
return 'SRID=4326;Point({x} {y})'.format(**geometry)
|
34aa09fd0d3ddc21096cffa2bc09ea4a478918bc
| 320,842 |
def find_section_id(sections, id):
"""
Find the section with a given id
"""
for idx, section in enumerate(sections):
try:
if section['id'] == id:
return idx
except KeyError:
continue
return None
|
5ee29faea5a0966873966fc85ecfe1f89b08ecbb
| 28,330 |
def remove_duplicates_in_items(items: list, id_key: str) -> list:
"""Remove duplicate items based on the given id key,
Args:
items (list): The items list.
id_key (str): The ID key for suplication check.
Returns:
(list) New items without duplications.
"""
ids = {}
new_items = []
for item in items:
item_id = item.get(id_key)
if item_id not in ids:
ids[item_id] = True
new_items.append(item)
return new_items
|
4f204482a9c1625bf4701a08f25add961605ed89
| 573,766 |
def not_considered(iid: int):
"""Message for NA or non-considered images.
Args:
iid (int): Image ID.
Returns:
str: Message.
"""
return str(
'Image {0} is NA or not considered in this version.'.format(iid),
)
|
c447c4901ba2c0ea5a6006cf1f4bad8bb0ca3931
| 209,029 |
def wdrvire(b5, b7, alpha=0.01):
"""
Wide Dynamic Range Vegetation Index Red-edge (Peng and Gitelson, 2011).
.. math:: t1 = (alpha * b7 - b5) / (alpha * b7 + b5)
WDRVIRE = t1 + ((1 - alpha) / (1 + alpha))
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b7: Red-edge 3.
:type b7: numpy.ndarray or float
:returns WDRVIRE: Index value
.. Tip::
Peng, Y., Gitelson, A. A. 2011. Application of chlorophyll-related \
vegetation indices for remote estimation of maize productivity. \
Agricultural and Forest Meteorology 151(9), 1267-1276. \
doi:10.1016/j.agrformet.2011.05.005.
"""
t1 = (alpha * b7 - b5) / (alpha * b7 + b5)
WDRVIRE = t1 + ((1 - alpha) / (1 + alpha))
return WDRVIRE
|
4a889d99008102bc878932d8001cb081f6f7e819
| 432,122 |
def _apply_post_effect_or_preset(effect_or_preset, tensor, shape, time, speed):
"""Helper function to either invoke a post effect or unroll a preset."""
if callable(effect_or_preset):
return effect_or_preset(tensor=tensor, shape=shape, time=time, speed=speed)
else: # Is a Preset. Unroll me.
for e_or_p in effect_or_preset.post_effects:
tensor = _apply_post_effect_or_preset(e_or_p, tensor, shape, time, speed)
return tensor
|
2d1708c052f41bf1e7c9259e4ae87127aadaa8de
| 240,009 |
from typing import List
from typing import Union
def _unicode_def_src_to_str(srclist: List[Union[str, int]]) -> str:
"""
Used to create :data:`UNICODE_CATEGORY_STRINGS`.
Args:
srclist: list of integers or hex range strings like ``"0061-007A"``
Returns:
a string with all characters described by ``srclist``: either the
character corresponding to the integer Unicode character number, or
all characters corresponding to the inclusive range described
"""
charlist = [] # type: List[str]
for src in srclist:
if isinstance(src, int):
charlist.append(chr(src))
else:
# Range like "0041-005A"
first, last = [int(x, 16) for x in src.split("-")]
charlist += [chr(x) for x in range(first, last + 1)]
return "".join(charlist)
|
ca44283bc8cfd03db390874f84f31297d5f8f918
| 647,160 |
def row_data_table(pw="12L"):
"""Render a RowDataTable object call."""
return f"RowDataTable(PanelWidth={pw})"
|
b5dc752f2dec5088d0a2c577e6899a127ce93250
| 407,329 |
def numeric_input(question, default=0):
"""Ask user for a numeric value."""
while True:
answer = input(f"{question} [{default}]: ").strip() or default
try:
return float(answer)
except ValueError:
pass
|
21d502a7b704052e238bbb3f85d6159ec352f032
| 481,955 |
def df_to_experiment_annotator_table(df, experiment_col, annotator_col, class_col):
"""
:param df: A Dataframe we wish to transform with that contains the response of an annotator to an experiment
| | document_id | annotator_id | annotation |
|---:|--------------:|:---------------|-------------:|
| 0 | 1 | A | 1 |
| 1 | 1 | B | 1 |
| 2 | 1 | D | 1 |
| 4 | 2 | A | 2 |
| 5 | 2 | B | 2 |
:param experiment_col: The column name that contains the experiment (unit)
:param annotator_col: The column name that identifies an annotator
:param class_col: The column name that identifies the annotators response (class)
:return: A dataframe indexed by annotators, with experiments as columns and the responses in the cells
| annotator_id | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
|:---------------|----:|----:|----:|----:|----:|----:|----:|----:|----:|-----:|-----:|-----:|
| A | 1 | 2 | 3 | 3 | 2 | 1 | 4 | 1 | 2 | nan | nan | nan |
| B | 1 | 2 | 3 | 3 | 2 | 2 | 4 | 1 | 2 | 5 | nan | 3 |
| C | nan | 3 | 3 | 3 | 2 | 3 | 4 | 2 | 2 | 5 | 1 | nan |
| D | 1 | 2 | 3 | 3 | 2 | 4 | 4 | 1 | 2 | 5 | 1 | nan |
"""
return df.pivot_table(
index=annotator_col, columns=experiment_col, values=class_col, aggfunc="first"
)
|
37822b986bba61a18c851226ae5ed111aa7e9733
| 406,147 |
def MEAN(src_column):
"""
Builtin average aggregator for groupby. Synonym for tc.aggregate.AVG. If
src_column is of array type, and if array's do not match in length a NoneType is
returned in the destination column.
Example: Get the average rating of each user.
>>> sf.groupby("user",
... {'rating_mean':tc.aggregate.MEAN('rating')})
"""
return ("__builtin__avg__", [src_column])
|
8230c17d0dec7f277638274041e50290883c9eb9
| 659,665 |
import time
def format_date(date):
"""
Creates a date string given a date object, in the format YYYY-mm-dd HH:MM:SS [offset].
The offset will normally be in the -NNNN format (e.g., -0300).
:param date: The date object
:return: The generated string for the date object.
"""
offset = time.strftime('%Z')
if len(offset) > 5:
offset = time.strftime('%z')
return date.strftime('%Y-%m-%d %H:%M:%S ') + offset
|
24d46e9d77e6f36c26f1fc6e0f34aacb8c3f9864
| 217,571 |
def inject_css(path):
"""
HTML string to include a custom stylesheet at <path>
"""
s = '<link rel="stylesheet" type="text/css" href="%s">' % path
return s
|
251de02a3b73e89c5b9e1d073dc6604528091ff7
| 467,813 |
import json
import base64
def b64_json_enc(data):
"""
encode data to b64 encoded json
:data: data to encode
:returns: encoded str
"""
json_str = json.dumps(data)
return base64.b64encode(json_str.encode()).decode()
|
e5ecc8d05ff5f49872010daa500a210cddb91700
| 686,919 |
import re
def get_links_from_description(description):
"""Find via regex all the links in a description string"""
html_link_regex = re.compile(r"\<a href=\"(?P<linkRef>.*)\"\>(?P<linkText>[^\<]*)\</a>")
links = []
link_matches = html_link_regex.findall(description)
if link_matches:
for link_match in link_matches:
link = {}
link['title'] = link_match[1]
link['url'] = link_match[0]
links.append(link)
return links
|
17e96f7074cb2aea39693ccc7d2706e901a31a4b
| 473,808 |
import inspect
def find_caller(level):
"""Return a string with the caller of the function that called
find_caller, and the line number of the call. Intended for use
with exception calls.
Inputs: level - integer - if 0, the caller of find_caller,
if 1, the caller above that
"""
stack_tup = inspect.stack()[level + 1][1:3]
return '{:s}:{:d}'.format(stack_tup[0], stack_tup[1])
|
a02464b6c289773e2ba6b448fe473a3d98b6623e
| 59,340 |
from typing import List
import inspect
def get_all_methods(cls: type) -> List[str]:
"""
Get the names of all methods in a class, excluding
methods decorated with ``@property``, ``@classmethod``, etc
:param cls: The class to get the methods for
:return: A list of the names of the methods
"""
return [meth[0] for meth in inspect.getmembers(cls, inspect.isfunction)]
|
d2c25fc04d0f45c5549bae55025a628c5a225064
| 455,624 |
def un_normalize_range(normalized_array, original_min, original_max, new_min, new_max):
""" Un-normalizing data to its original range (e.g. to [0, 1400])
:param normalized_array: normalized array
:param original_min: minimum value (array, can be derived from a larger sample)
:param original_max: current max (array, can be derived from a larger sample)
:param new_min: new minimum (float)
:param new_max: new maximum (float)
:return: original array
"""
a = original_min
b = original_max
c = new_min
d = new_max
return (normalized_array - c) / (d - c) * (b - a) + a
|
f8f500f5d0782771bc0cc0aa83273570e3f85803
| 429,890 |
import operator
def get_out_operands(instruction_form):
"""Returns a list indicating which operands are written by this instruction form"""
return tuple(map(operator.attrgetter("is_output"), instruction_form.operands))
|
df85c3021268d820f1c7ad0b820d343ae4041a82
| 31,271 |
import itertools
def _set_product(set_1, set_2):
"""Returns the cartesian product of two sets."""
return set(itertools.product(set_1, set_2))
|
5c70c5424cdea1ae5b1cad7b22b6e5c021c01f0c
| 569,292 |
def is_iterable(obj):
"""Returns whether a Python object is iterable."""
typ = type(obj)
if hasattr(typ, "__iter__"):
return True
return hasattr(typ, "__getitem__") and hasattr(typ, "__len__")
|
b74dd425c3f6303dc18ccc7363259eb4310ebb58
| 305,956 |
def horizontal_unfold(A):
"""
For a 3D tensor A(a,i,b), we unfold like: A(a,ib)
"""
S = A.shape
return A.reshape(S[0], S[1] * S[2])
|
59caaa3db71c868d08264c64a88401e85ce6136c
| 42,793 |
def cubicgw(ipparams, width, etc = []):
"""
This function fits the variation in Gaussian-measured PRF half-widths using a 2D cubic.
Parameters
----------
x1: linear coefficient in x
x2: quadratic coefficient in x
x3: cubic coefficient in x
y1: linear coefficient in y
y2: quadratic coefficient in y
y3: cubic coefficient in y
c : constant
Returns
-------
returns the flux values for the intra-pixel model
Revisions
---------
2018-11-16 Kevin Stevenson, STScI
[email protected]
Original version
"""
x1 = ipparams[0]
x2 = ipparams[1]
x3 = ipparams[2]
y1 = ipparams[3]
y2 = ipparams[4]
y3 = ipparams[5]
c = ipparams[6]
s0 = ipparams[7]
sy, sx = width
return x1*(sx-s0) + x2*(sx-s0)**2 + x3*(sx-s0)**3 + y1*(sy-s0) + y2*(sy-s0)**2 + y3*(sy-s0)**3 + c
|
334be9d8dc8baaddf122243e4f19d681efc707cf
| 705,374 |
def cmpExonerateResultByQueryAlignmentStart(e1, e2):
"""Comparator function for sorting C{ExonerateResult}s by query alignment start.
@param e1: first exonerate result
@type e1: C{ExonerateResult}
@param e2: second exonerate result
@type e2: C{ExonerateResult}
@return: one of -1, 0 or 1
@rtype: C{int}
"""
if e1.queryAlignmentStart < e2.queryAlignmentStart:
return -1
elif e1.queryAlignmentStart > e2.queryAlignmentStart:
return 1
return 0
|
633779c2f95282978ed5a807ba743d261937653f
| 77,565 |
from typing import List
import glob
def get_html_files(src: str) -> List[str]:
"""Get all the HTML file names in the source directory"""
_html_files = glob.glob(f"{src}/*.html")
return _html_files
|
7c8f48166f28eb51dcc6d31ed6bdd9ca25b7218c
| 34,177 |
def is_job_flow_done(job_flow):
"""Return True if the given job flow is done running."""
return hasattr(job_flow, 'enddatetime')
|
10a44d5c61f5a23d8cef0ae4d686bc799fdba6f5
| 596,831 |
import re
def sep_chi_eng(text):
"""
a function to separate adjacent Chinese and English words from each other with a space character
e.g. sep_chi_eng("apple手機iphone") = "apple 手機 iphone"
"""
text = re.sub("([a-z])([\u2E80-\u2FD5\u3400-\u4DBF\u4E00-\u9FCC])", "\\1 \\2", text)
text = re.sub("([\u2E80-\u2FD5\u3400-\u4DBF\u4E00-\u9FCC])([a-z])", "\\1 \\2", text)
return text
|
8df69d32bd2417443eb4e94e07907ce592e64296
| 337,871 |
def is_next_south_cell_empty(i, j, field):
"""
check if next below cell is empty
:param i:
:param j:
:param field:
:return: True if next below cell is empty, False otherwise
"""
if i == len(field) - 1:
if field[0][j] == '.':
return True
return False
if field[i + 1][j] == '.':
return True
return False
|
4b06dcba61eff2769fd78176f259f79e98ca2a1d
| 73,817 |
def report(cls_instance) -> str:
"""Function for creating a report about an object.
Returns a string that should describe all of the information needed to
initialize an identical class. It should also contain complementary
information that may be useful to the user. This function in itself only
calls the class's ``report`` method, but, if none is defined, it will return
a string with information about the object.
The classes' ``report`` method shouldn't have any arguments.
Args:
cls_instance: The class instance we want to make a report of.
Returns:
A string with the report.
"""
try:
return cls_instance.report()
except AttributeError:
return f"{str(cls_instance)}\n{repr(cls_instance)}"
|
0d00ecc48afc3a1dc06b627bf764ea501f62c3d5
| 204,228 |
import hashlib
def hash_sha1(f_name):
"""
returns the hash sha1 of the opened file
:param f_name:file full path
:return:(string) sha1_value 40-bit hexadecimal string.
"""
try:
h_sha1 = hashlib.sha1()
with open(f_name, "rb") as f:
for chunk in iter(lambda: f.read(), b""):
h_sha1.update(chunk)
return h_sha1.hexdigest()
except Exception as e:
raise Exception(e)
|
127abe7b41f4b276ac24b2ce8f9a7bdc1287e7f2
| 351,387 |
def skipped_iter(data, skip_criteria):
"""Utility function that returns an iterator where data items are skipped
according to some provided criteria.
Parameters:
-----------
data - list of data items
skip_criteria - lambda or function that takes an item and determines
whether or not to skip.
Returns:
--------
a data item iter starting at the first unskipped item.
"""
iterator = iter(data)
# Skip past any sensor data that was recorded prior to min time.
# if min_time:
item = next(iterator)
i = 0
while skip_criteria(item):
item = next(iterator)
i = i + 1
return iter(data[i:])
|
ed6a6b664215645ac21f51347071ccdbbb39154f
| 127,211 |
def get_jac_idx(s1, s2):
""" Get Jaccard index.
:param s1: word set 1
:param s2: word set 2
:return: whether both of the sets are empty and Jaccard index
"""
if len(s1) == 0 and len(s2) == 0:
return True, 1.0
inter = s1 & s2
union = s1 | s2
return False, len(inter) / len(union)
|
8df5a835006ad092583d76359ab5ed14f687034c
| 447,679 |
def hsv_to_hex(h, s, v):
"""
>>> print(hsv_to_hex(0, 0, 0))
#000000
>>> print(hsv_to_hex(0, 0, 1))
#FFFFFF
>>> print(hsv_to_hex(0, 1, 1))
#FF0000
>>> print(hsv_to_hex(120, 1, 1))
#00FF00
>>> print(hsv_to_hex(240, 1, 1))
#0000FF
>>> print(hsv_to_hex(60, 1, 1))
#FFFF00
>>> print(hsv_to_hex(180, 1, 1))
#00FFFF
>>> print(hsv_to_hex(300, 1, 1))
#FF00FF
>>> print(hsv_to_hex(0, 0, .75))
#C0C0C0
>>> print(hsv_to_hex(0, 0, .5))
#808080
>>> print(hsv_to_hex(0, 1, .5))
#800000
>>> print(hsv_to_hex(60, 1, .5))
#808000
>>> print(hsv_to_hex(120, 1, .5))
#008000
>>> print(hsv_to_hex(300, 1, .5))
#800080
>>> print(hsv_to_hex(180, 1, .5))
#008080
>>> print(hsv_to_hex(240, 1, .5))
#000080
>>> print(hsv_to_hex(220, .95, 1))
#0C5DFF
"""
# Implementation of http://www.rapidtables.com/convert/color/hsv-to-rgb.htm
h %= 360
if not (0 <= s <= 1 and 0 <= v <= 1):
raise ValueError('`s` and `v` must be between 0 and 1.')
c = v * s
x = c * (1 - abs(((h / 60) % 2) - 1))
m = v - c
if 0 <= h < 60:
t = c, x, 0
elif 60 <= h < 120:
t = x, c, 0
elif 120 <= h < 180:
t = 0, c, x
elif 180 <= h < 240:
t = 0, x, c
elif 240 <= h < 300:
t = x, 0, c
elif 300 <= h < 360:
t = c, 0, x
else:
raise ValueError('`h` should be between 0 and 359.')
r2, g2, b2 = t
r, g, b = r2 + m, g2 + m, b2 + m
def to_hex(color):
return hex(int(min(255, color * 256)))[2:].upper().zfill(2)
return '#' + to_hex(r) + to_hex(g) + to_hex(b)
|
8f33987dcd4d4d8f48c4087906e732e212c57cec
| 607,431 |
import torch
def tensor_to_image(tensor):
"""
Convert a torch tensor into a numpy ndarray for visualization.
Inputs:
- tensor: A torch tensor of shape (3, H, W) with elements in the range [0, 1]
Returns:
- ndarr: A uint8 numpy array of shape (H, W, 3)
"""
tensor = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0)
ndarr = tensor.to('cpu', torch.uint8).numpy()
return ndarr
|
48f2d699378dc04670b03bede3c59b966e6f1a38
| 142,527 |
def get_arguments_from_user() -> list:
"""Holt interaktiv vom User die notwendigen Informationen
Impf-Code, Plz, Bundesland
Returns:
list: Liste fuer den Argument Parser
"""
print("Für weitere Konfiguration bitte das Programm direkt über eine Konsole starten.\nMit -h können alle Argumente aufgelistet werden\n")
code = input("Impf-Code: ")
plz = input("PLZ: ")
bundesland = input("Bundesland des Zentrums (zB Baden-Württemberg): ")
arguments = ["-c", code, "-p", plz, "-b", bundesland]
return arguments
|
f1df0d5a906bd6c11f777c1e69f3b40320ed7d37
| 189,749 |
def file_to_list(filename):
"""
Read in a one-column txt file to a list
:param filename:
:return: A list where each line is an element
"""
with open(filename, 'r') as fin:
alist = [line.strip() for line in fin]
return alist
|
33bee263b98c4ff85d10191fa2f5a0f095c6ae4b
| 705,857 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.