content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def titleType(dataFrame):
"""
Count how many series and movies exists in the dataFrame.
Used for mostWatchedType visualization
Parameters:
dataFrame: string
The name of the dataFrame the user wants to work with.
Returns:
a dataFrame
"""
dataFrameTypeCount = dataFrame['Series / Movie'].value_counts()
dataFrameTypeCount = dataFrameTypeCount.to_frame()
dataFrameTypeCount = dataFrameTypeCount.rename(
columns={'Series / Movie': 'Count'})
return dataFrameTypeCount | 865c4d1654592bad9f01ce67f3e9d23559d322f7 | 642,650 |
def _preamble(layout='layered'):
"""Return preamble and begin/end document."""
if layout == 'layered':
layout_lib = 'layered'
elif layout == 'spring':
layout_lib = 'force'
else:
raise ValueError(
'Unknown which library contains layout: {s}'.format(s=layout))
document = (
'\documentclass{{standalone}}\n'
'\usepackage{{amsmath}}\n'
'\n'
'\usepackage{{tikz}}\n'
'\usetikzlibrary{{graphs,graphs.standard,'
'graphdrawing,quotes,shapes}}\n'
'\usegdlibrary{{ {layout_lib} }}\n').format(
layout_lib=layout_lib)
return document | 2cb40fe7d48aa713f01ff79c093794156bff0c96 | 659,830 |
def get_edge_weight(G, node1, node2, weight_attribute="weight"):
"""
Get Edge Weight
Returns edge weight for edge in G from node1 to node2
If edge exists and has weight_attribute, this value is returned.
If edge exists but has not weight_attribute, 1 is returned.
Otherwise 0 is returned
Input
-----
G - networkx graph
node1 - source node
node2 - target node
weight_attribute='weight' - attribute of edge containing weight value
Return
------
edge weight, 1 if edge exists but no weight attribute exists, 0 otherwise.
"""
edge_data = G.get_edge_data(node1, node2)
if edge_data is None:
return 0
elif weight_attribute in edge_data:
return edge_data[weight_attribute]
else:
return 1 | b73363e41ead391ec25c9ece87add2ccd12f5e2f | 482,649 |
def is_same_url(a, b):
"""Check if different forms of same URL match"""
return a and b and a.strip().strip('/') == b.strip().strip('/') | 612093775fae2ac57812c002c21f563a0930724c | 559,048 |
def _df_instance_to_path(df_inst):
"""Convert a df instance name to a mountpoint"""
# df_root is not a dynamic file system. Ignore that one.
if df_inst == 'df_root':
return '/'
else:
# For all others replace all '-' with '/'
return('/' + df_inst[3:].replace('-', '/')) | 60fa2e5d6ba124b818a16ee11cff57e5b4f3e0bd | 208,245 |
def mpe_limits_cont_uncont_mwcm2(mhz: float) -> list:
"""Determine maximum permissible exposure limits for RF, from FCC references.
:param mhz: The radio frequency of interest (megahertz)
:return: MPE limits (mW/cm^2) for controlled & uncontrolled environments, respectively
:raises ValueError: if mhz is out of range (cannot be found in the FCC lookup table)
"""
if mhz <= 0:
raise ValueError("frequency out of range: %s MHz" % str(mhz))
elif mhz <= 1.34:
return [100, 100]
elif mhz < 3:
return [100, 180 / (mhz ** 2)]
elif mhz < 30:
return [900 / (mhz ** 2), 180 / (mhz ** 2)]
elif mhz < 300:
return [1.0, 0.2]
elif mhz < 1500:
return [mhz / 300, mhz / 1500]
elif mhz < 100000:
return [5.0, 1.0]
else:
raise ValueError("frequency out of range: %s MHz" % str(mhz)) | 67c992585a2c35ce6f9039e6ef41b05f2a3d2ebc | 437,274 |
import pickle
from pathlib import Path
def load_obs_fcst(fls_path):
"""Load obs and fcst from existing pickled dataframes.
Args:
obs_path (str): obs
fcst_path (str): fcst
Returns:
2 dataframes: obs, fcst
"""
obs = pickle.load(open(Path(fls_path, "obs.p"), "rb"))
fcst = pickle.load(open(Path(fls_path, "fcst.p"), "rb"))
return obs, fcst | 1a1c1227d5e7dd28f33782818d3af73ccf7e6808 | 483,802 |
def eh_posicao(pos):
"""
eh_posicao recebe um argumento de qualquer tipo e devolve True
se o argumento corresponder a uma posicao do tabuleiro, isto e, se for
um inteiro entre 1 e 9 (inclusive), caso contrario devolve False.
"""
if type(pos)==int:
if pos<10 and pos>0:
return True
return False | 1b1a15947e9f3c12df677fb633fb09f2ae50b7a5 | 208,624 |
import socket
def _canonicalize_hostname(hostname):
"""Canonicalize hostname following MIT-krb5 behavior."""
# https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520
af, socktype, proto, canonname, sockaddr = socket.getaddrinfo(
hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME
)[0]
try:
name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD)
except socket.gaierror:
return canonname.lower()
return name[0].lower() | 3d8bf992e820292112e07aa03b1d1c98dc85c93b | 646,646 |
def contains_row(matrix, row):
"""Check if a numpy matrix contains a row with the same values as the
variable `row`.
"""
return (matrix == row).all(axis=1).any() | 3a21936c983c4bfb8e9fdc5600c5f1d0ec95b33a | 344,143 |
def paginate(text: str):
"""Simple generator that paginates text."""
last = 0
pages = []
appd_index = 0
curr = 0
for curr in range(0, len(text)):
if curr % 1980 == 0:
pages.append(text[last:curr])
last = curr
appd_index = curr
if appd_index != len(text) - 1:
pages.append(text[last:curr])
return list(filter(lambda a: a != '', pages)) | 55874c9fc9448f896c142479595a099cf96109fc | 103,773 |
def opt_mod(num, div):
"""returns nonnegative or negative modulo residue depending on whichever one has a lower absolute value
(if both equal, returns nonnegative)"""
res = num % div
return res if res <= (div/2) else res-div | 582e233f10a8db057ffee583f8b7974b34f15b55 | 64,480 |
def cmldict(argv, cmlargs=None, validity=0):
"""
The cmldict function takes a dictionary cmlargs with default
values for the command-line options and returns a modified form of
this dictionary after the options given in the list argv are
parsed and inserted. One will typically supply sys.argv[1:] as the
argv argument. In case cmlargs is None, the dictionary is built
from scratch inside the function. The flag validity is false (0)
if any option in argv can be inserted in cmlargs, otherwise the
function will issue an error message if an option is not already
present in cmlargs with a default value (notice that cmlargs=None
and validity=1 is an incompatible setting).
Example:
cmlargs = {'p' : 0, 'file' : None, 'q' : 0, 'v' : 0}
argv = "-p 2 --file out -q 0".split()
p = cmldict(argv, cmlargs)
p equals {'p' : 2, 'file' : out, 'q' : 0}
"""
if not cmlargs:
cmlargs = {}
arg_counter = 0
while arg_counter < len(argv):
option = argv[arg_counter]
if option[0] == '-': option = option[1:] # remove 1st hyphen
else:
# not an option, proceed with next sys.argv entry
arg_counter += 1; continue
if option[0] == '-': option = option[1:] # remove 2nd hyphen
if not validity or option in cmlargs:
# next argv entry is the value:
arg_counter += 1
value = argv[arg_counter]
cmlargs[option] = value
elif validity:
raise ValueError("The option %s is not registered" % option)
arg_counter += 1
return cmlargs | a12fe8e8e670cd9ae6ca4baac4b11225a3c273f7 | 345,681 |
def is_high_sec_order(order, system_infos):
"""
Check if order is in high sec
:param order: the order
:param system_infos: the dictionary of system info
:return: true or false
"""
system_id = order['system_id']
system_info = system_infos[str(system_id)]
return system_info['security_status'] >= 0.5 | c5bd97056077f25e38b1848448f6f67b347559df | 178,935 |
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 3:
- `str` -> `str`
- `bytes or bytearray` -> decoded to `str`
"""
if not isinstance(s, (str, bytes, bytearray)):
raise TypeError("not expecting type '%s'" % type(s))
if isinstance(s, (bytes, bytearray)):
s = s.decode(encoding, errors)
return s | 5a53357f7c92c0da0e255a3f59ccdea3d0b46616 | 421,968 |
def _authorisation(auth):
"""Check username/password for basic authentication"""
if not auth:
return False
user = auth.username
password = auth.password
credentials = {'Alice': 'secret',
'Bob': 'supersecret'}
try:
truepass = credentials[user]
except KeyError:
return False
if truepass == password:
return True
else:
return False | 241a4b3d4043e3836721c4aa2688729887c15147 | 463,445 |
def l2_dist_sq(p1: tuple, p2: tuple) -> float:
"""
Compute squared L2 distance between p1 and p2
:param p1: Point 1 in the form (x, y)
:param p2: Point 2 in the form (x, y)
:return: Squared L2 distance
"""
assert len(p1) == 2
assert len(p2) == 2
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
return dx ** 2 + dy ** 2 | f77b5613b24cb035e575d0e2f57986a38d7f907c | 314,891 |
import torch
def cross_entropy_with_logits_loss(input, soft_target):
"""
Implementation of CrossEntropy loss using a soft target. Extension of BCEWithLogitsLoss to MCE.
Normally, cross entropy loss is
\sum_j 1{j == y} -log \frac{e^{s_j}}{\sum_k e^{s_k}} = -log \frac{e^{s_y}}{\sum_k e^{s_k}}
Here we use
\sum_j P_j *-log \frac{e^{s_j}}{\sum_k e^{s_k}}
where 0 <= P_j <= 1
Does not support fancy nn.CrossEntropy options (e.g. weight, size_average, ignore_index, reductions, etc.)
Args:
- input (N, k): logits
- soft_target (N, k): targets for softmax(input); likely want to use class probabilities
Returns:
- losses (N, 1)
"""
return torch.sum(- soft_target * torch.nn.functional.log_softmax(input, 1), 1) | e2543107c248cb9784abf256598523799c4c655e | 479,735 |
def character_frequency(filename):
"""Counts the frequency of each character in the given file."""
# First try to open the file
try:
f = open(filename)
except OSError:
return None
# Now process the file
characters = {}
for line in f:
for char in line:
characters[char] = characters.get(char, 0) + 1
f.close()
return characters | 9cda7b1537b6d05795a2011c2014c2f47e0d6d71 | 559,969 |
from typing import List
def _sampling_from_alias(
alias: List[int],
probs: List[float],
first_random: float,
second_random: float,
) -> int:
"""
This is aligned with the original node2vec implementation w/ 2 random numbers.
:param alias: the pre-calculated alias list
:param probs: the pre-calculated probs list
:param first_random: 1st random floating point number in the range [0.0, 1.0)
:param second_random: 2nd random floating point number in the range [0.0, 1.0)
Return the picked index in the neighbor list as next vertex in the random walk path.
"""
pick = int(first_random * len(alias))
if second_random < probs[pick]:
return pick
else:
return alias[pick] | 1e67f5f3b7cff7c1e99fd05e31e7f17774862890 | 58,198 |
def identity(obj):
"""identity function, returns input unmodified"""
return obj | 17ac77ff54951876961420786a24834de957d1b3 | 58,593 |
def _bump_seed(seed):
"""
Helper to bump a random seed if not None.
"""
return None if seed is None else seed + 1 | 0f4cccba7db0c082b3e2eb2657d89a30a7204cbe | 333,837 |
def read_event_file(filename):
"""Reads a .event file *filename* and returns a dict containing the header and records. Each record contains a timestamp and an external event name."""
master = []
i = 0
hdr = []
start = False
with open(filename, 'r') as f:
for line in f:
if not start:
hdr.append(line)
start = 'Series' in line
else:
rec = line.split(' ')
ts = int(rec[0])
tag = rec[1]
master.append((ts,tag))
i += 1
return master, hdr | bacff629cc7e58aad183da5ca93cf6531035affe | 504,947 |
def fnv1a_64(string):
""" Hashes a string using the 64 bit FNV1a algorithm
For more information see:
https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
@param string The key to hash
@returns Hashed key
"""
fnv_offset = 0xcbf29ce484222325 # The standard FNV 64 bit offset base
fnv_prime = 0x100000001b3 # The standard FNV 64 digit prime
hash = fnv_offset
uint64_max = 2 ** 64
# Iterate through the bytes of the string, ie the characters
for char in string:
# ord() converts the character to its unicode value
hash = hash ^ ord(char)
hash = (hash * fnv_prime) % uint64_max
return hash | 85bda04834b5b7ba5d3163454a2c7183de291ff3 | 406,957 |
def cubic_objective(x, a, b, c, d):
"""Cubic objective function."""
return a*x**3 + b*x**2 + c*x + d | ddd43f7cac4511649b46a68777f34d3c18679f94 | 53,428 |
def happens_before(G, first, second):
"""Returns true if first happens before second in sequential order."""
first_doc = G.nodes[first]['doc']
second_doc = G.nodes[second]['doc']
return first_doc['end_time'] <= second_doc['start_time'] | db7b02ff2c93fbbc91026db011ab1fab7fdafff3 | 398,292 |
def LinearGainLine(x1,y1,x2,y2):
"""
returns tuple (m,b)
that satisfies the equation y=mx+b
uses two points in histogram space to determine slope
x : 0..nbins
y : 0..255
"""
if (x2 > x1):
m = -(y1-y2) / (x2-x1)
else:
m = 0
b = x1
return m,b | 71d1d321c85636e15f05e3ab26c66a48a1cc5103 | 14,307 |
def Btu_hft2F2kJ_hm2C(x):
"""Btu/(h-ft^2-F) -> kJ/(h-m^2-C)"""
return 20.44*x | bc3c25e2ebcd4f3e90a9b5937fe8a246856b23e6 | 384,443 |
def chunk_string(instr):
"""Convert a single multi-line string into a list of chunks (= list of strings.)
The beginning of each chunk is denoted by a keyword beginning with '%'.
Empty lines are ignored.
Continuation lines (backslash at end of a line) are honoured.
Comment lines (first non-space character is '#') are ignored.
"""
# treat lines ending in '\' as continuation lines
instr = instr.replace('\\\n', ' ')
nonblank_lines = [ln.strip() for ln in instr.splitlines() if ln]
ll_lst = [ln for ln in nonblank_lines if not ln.startswith('#')]
ndxlst = [ndx for ndx, l in enumerate(ll_lst) if l.startswith('%')] + [len(ll_lst)]
return [ll_lst[strt:stop] for strt, stop in [(ndxlst[nn], ndxlst[nn+1]) for nn in range(len(ndxlst)-1)]] | 09cdf9f38b888b76517ca170bb44bb70562683f9 | 85,539 |
def percent(num, total): # type: (int, int) -> float
"""Calculate a percent"""
perc = num * 100 / total if total is not 0 else 0
return float(round(perc, 2)) | a69ea0bd5720014f42afa9618ce3f1b988833007 | 256,093 |
def compose(g, f):
"""Take two functions as inputs and return a
function that's their composition"""
def newfunc(x):
return g(f(x))
return newfunc | afc7e885ba528f0ce43dea299e9d2b462570a2a4 | 540,225 |
def conddb_url(api=''):
"""Return CondDB URL for given API name"""
return 'http://cms-conddb.cern.ch/%s' % api | 3363ad27ec211fd372298868ffe46c665c7053c9 | 116,441 |
import json
def create_response(response):
"""
A funtion that creates a response message with appropriate headers.
return: status, headers, response
"""
# Convert dictionary to json
res = json.dumps(response)
# Set Response headers
response_headers = {
'Content-Type': 'application/json; encoding=utf8',
'Content-Length': len(res.encode(encoding="utf-8")),
'Connection': 'close',
}
# Format resposne headers
response_headers_raw = ''.join('%s: %s\n' % (k, v) for k, v in response_headers.items())
# Set HTTP version, status code and status
response_proto = 'HTTP/1.1'
response_status = '200'
response_status_text = 'OK' # this can be random
# Format data
r = '%s %s %s' % (response_proto, response_status, response_status_text)
return r, response_headers_raw, res | 4dc787342c73b9d28a056377c308bd328dfcbe72 | 541,586 |
import torch
def truncated_normal(tensor, mean=0, std=0.5):
""" Truncated normal distribution, similar to tf.random.truncated_normal
:param tensor: A tensor of arbitrary shape.
:param mean: mean of the distribution
:param std: std of the distribution
:returns: A tensor with the same shape of x, with values draw from the designated truncated normal.
"""
with torch.no_grad():
size = tensor.shape
tmp = tensor.new_empty(size + (8,)).normal_(mean=mean, std=std)
valid = (tmp < 2 * std) & (tmp > -2 * std)
ind = valid.max(-1, keepdim=True)[1]
tmp = tmp.gather(-1, ind).squeeze(-1)
return tmp | 29fb60babd11c66d5d78bc021f225e858f0d4b21 | 360,417 |
def sent2labels(sent):
"""
Extracts gold labels for each sentence.
Input: sentence list
Output: list with labels list for each token in the sentence
"""
# gold labels at index 18
return [word[18] for word in sent] | 11b4dc93c465d154e8bf8688a5b5c592b94e7265 | 4,682 |
def partial_difference_quotient(f, v, i, h=0.001):
"""
(f([x1, ..., xi + h, ..., xn]) - f([x1, ..., xi, ..., xn])) / h
:param f: f(vector) = float인 함수
:param v: 기울기(gradient)를 계산할 점의 위치 - 벡터(리스트)
:param i: 기울기(gradient)를 계산할 성분의 인덱스 - 정수
:param h: i번째 성분의 변화량
:return: 편미분 결과 -> i번째 성분 방향의 gradient
"""
# w = [v_j + (h if i == j else 0)
# for j, v_j in enumerate(v)]
w = []
for j, v_j in enumerate(v):
if j == i:
v_j += h
w.append(v_j)
return (f(w) - f(v)) / h | 651c919a41a55bf10e08063eea71d3864f463ab5 | 560,269 |
def longest_prefix(names):
"""Find the longest common prefix of the repository names."""
return next(
names[0][:n]
for n in range(min(len(s) for s in names), 0, -1)
if len({s[:n] for s in names}) == 1
) | 6c0e0c3f3c39793543b55656b5e9d4ff70912481 | 206,056 |
def colour_distance_squared(colour1, colour2):
"""Square of the Euclidian distance between two colours"""
dist_squared = sum((a - b) ** 2 for a, b in zip(colour1, colour2))
return dist_squared | 378e05441f742fdf09e35528779d565fa3d2bcaa | 598,887 |
def filter_unique(spisok):
"""Get unique values from list
"""
spisok_uniq = []
for x in range(0, len(spisok)):
if spisok_uniq.count(spisok[x]) < 1:
spisok_uniq.append(spisok[x])
return spisok_uniq | fd1f017a8de7f70de2173a2169f3f9f26999ea56 | 600,185 |
import calendar
def datetime_to_timestamp(dateTime):
""" Converts a datetime.datetime to a UTC timestamp."""
return calendar.timegm(dateTime.utctimetuple()) | c61d2e8b2b79fdd4b7f2c2219845ccbf8e45aa92 | 208,973 |
def posterior_architecture(args):
"""Architecture of the inference network q(r_{1:K}| r_{1:N}, s)."""
# encoder
args.q_encoder_hidden_dim = getattr(args, 'q_encoder_hidden_dim', 150)
args.q_encoder_dropout = getattr(args, 'q_encoder_dropout', 0.1)
args.q_nlayers = getattr(args, 'q_encoder_nlayers', 2)
# decoder
args.q_decoder_embed_dim = args.q_encoder_hidden_dim
args.q_decoder_learned_pos = getattr(args, 'q_decoder_learned_pos', True)
args.q_max_positions = getattr(args, 'q_max_positions', 100)
return args | 404d5449e75008c902256d622a3110831797c40f | 147,381 |
from typing import Union
from pathlib import Path
import shutil
def _create_output_directory(path: Union[str, Path, None]) -> Path:
"""Determine the output directory for the data.
The user can provide a path or a default path is chosen. If the user's path leads to
an non-empty directory, it is removed and newly created.
Args:
path (Union[str, Path, None]): Path to the output directory.
Returns:
output_directory (pathlib.Path): Path to the created output directory.
"""
if path is None:
path = Path.cwd() / ".sid"
output_directory = Path(path)
if output_directory.exists() and not output_directory.is_dir():
raise ValueError(f"{path} is a file instead of an directory.")
elif output_directory.exists():
shutil.rmtree(output_directory)
for directory in [
output_directory,
output_directory / "last_states",
output_directory / "time_series",
]:
directory.mkdir(parents=True, exist_ok=True)
return output_directory | 21a8b7ad984582f5d7f52a692138faba7529495b | 416,464 |
async def mock_successful_connection(*args, **kwargs):
"""Return a successful connection."""
return True | fc3801c8be6a98033c265a97de3bb413d0c249cc | 45,739 |
def train(df, features, cls):
"""
Run training pipeline
Params:
df: (DataFrame) training data
features: (List) List of column names to take from dataframe
cls: (Object) Sklearn classifier to train
"""
X_train = df[features].values
y_train = df.label.values
cls.fit(X_train,y_train)
return cls | 745bd5b70fb56066df010718936e44942a5c23ee | 327,002 |
def bt_adjusting(bt_value: float, epsilon: float, delta_finite_stat: int = 0) -> float:
"""Creates an adjustment value related to the probability of error due to finite stats.
Args:
bt_value: Bell value.
epsilon: How close the output string is to that of a perfect distribution.
delta_finite_stat: Set to zero to assume no finite statistical effects.
Returns:
Adjusted Bell value.
"""
bt_adjusted = (bt_value + delta_finite_stat) / (8 * ((0.5 - epsilon)**3))
return bt_adjusted | acd1c0c0cc69812fec3784e02c2268dd5578f3a4 | 303,161 |
def verify_input(json_data):
"""
Verifies the validity of an API request content
:param json_data: Parsed JSON accepted from API call
:type json_data: dict
:return: Data for the the process function
"""
# callback_uri is needed to sent the responses to
if 'callback_uri' not in json_data:
raise ValueError('callback_uri not supplied')
return json_data | ad84c15d89fc060575f2dc84acf96dd10f7721e9 | 81,656 |
def hints_for(choices):
"""
Build a hint string from choices options represented as a tuple of tuples.
For example:
.. sourcecode:: python
>>> hints_for((
... ('1', 'One'),
... ('2', 'Two'),))
'1=One, 2=Two'
:rtype: str
"""
return ', '.join(['{}={}'.format(value, label) for value, label in choices]) | 6dbe165d83d14dcdb4f2232f9f111f89a683148d | 316,030 |
import re
def parse_idx_range(s):
"""Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints."""
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2)) + 1))
vals = s.split(',')
return [int(x) for x in vals] | 27c1f0b2d31c753463bf6066968c910167c54848 | 594,708 |
def TFloat(val):
"""Checks if the given value is a float.
"""
return isinstance(val, float) | dc215fb8cc6c035b53db736fafab6eab62be0f68 | 568,181 |
def create_metadata(config, md5s):
"""Compute metadata for a manifest file
Parameters
----------
config : dict
Dictionary of metadata attributes that include experiment_accession,
description, and library_accession.
md5s : list((filename, md5 hexdigest))
List of filename and hexdigest tuples that will be added to the manifest
dictionary.
"""
metadata = {
"type": "MexGeneArchive_v1",
"output_type": config['output_type'],
"software_version": config["software_version"],
"arguments": config["arguments"],
}
for key in ["experiment_accession", "description", "library_accession"]:
if key in config:
metadata[key] = config[key]
for filename, md5 in md5s:
metadata[filename] = "md5sum:{}".format(md5)
return metadata | 3ef974df1b34ceb2efcb0d96599e72db0e33771a | 310,844 |
def lorentzian(x, height=1., center=0., width=1.):
""" defined such that height is the height when x==x0 """
halfWSquared = (width/2.)**2
return (height * halfWSquared) / ((x - center)**2 + halfWSquared) | a2f6a088c7d03c17790ab82449a9417554899004 | 354,524 |
from typing import Sequence
from typing import Tuple
import math
def euler_to_quaternion(euler: Sequence[float]) -> Tuple[float,float,float,float]:
"""
Convert XYZ euler angles to WXYZ quaternion, using the same method as MikuMikuDance.
Massive thanks and credit to "Isometric" for helping me discover the transformation method used in mmd!!!!
:param euler: 3x float, X Y Z angle in degrees
:return: 4x float, W X Y Z quaternion
"""
# massive thanks and credit to "Isometric" for helping me discover the transformation method used in mmd!!!!
# angles are in degrees, must convert to radians
roll, pitch, yaw = euler
roll = math.radians(roll)
pitch = math.radians(pitch)
yaw = math.radians(yaw)
# roll (X), pitch (Y), yaw (Z)
sx = math.sin(roll * 0.5)
sy = math.sin(pitch * 0.5)
sz = math.sin(yaw * 0.5)
cx = math.cos(roll * 0.5)
cy = math.cos(pitch * 0.5)
cz = math.cos(yaw * 0.5)
w = (cz * cy * cx) + (sz * sy * sx)
x = (cz * cy * sx) + (sz * sy * cx)
y = (sz * cy * sx) - (cz * sy * cx)
z = (cz * sy * sx) - (sz * cy * cx)
return w, x, y, z | f75233686cb2490a3a9e50d268068eb00ac813a6 | 176,880 |
def get_maximum_time(note_tracks):
"""
Determines the largest value of end_time
among all notes. This is required to know
when the video should end.
"""
maximum_time = -999999.9
for t in note_tracks:
for pitch_list in t:
if pitch_list != []:
if pitch_list[-1].end_time > maximum_time:
maximum_time = pitch_list[-1].end_time
return maximum_time | 7c6c65df9b0a99591297e7a6633e662b695027e5 | 78,305 |
from typing import List
from typing import Set
def get_valid_digits(grid: List[List[int]], i: int, j: int) -> Set[int]:
"""Finds all of the digits that could be placed in cell (i, j) of grid."""
digits = set(range(1, 10))
# remove digits in same row or column
for k in range(9):
row_digit = grid[i][k]
if row_digit in digits:
digits.remove(row_digit)
col_digit = grid[k][j]
if col_digit in digits:
digits.remove(col_digit)
# remove digits in same 3x3 box
box_i = (i // 3) * 3
box_j = (j // 3) * 3
for row in range(box_i, box_i + 3):
for col in range(box_j, box_j + 3):
box_digit = grid[row][col]
if box_digit in digits:
digits.remove(box_digit)
return digits | 07a2b82ad45a710eab4d013a22855cda83ff1839 | 234,075 |
def cria_peca(s): # str -> peca
"""
Recebe uma cadeia de carateres correspondentea uma peca e devolve a peca
correspondente se o argumento for valido.
:param s: string da peca (pode ser 'X', 'O' ou ' ')
:return: peca, que e representada internamente como o set de um elemento,
que contem um inteiro -1, 0 ou 1 dependendo se a peca e 'O', ' ' ou 'X'
respetivamente
"""
if s != 'X' and s != 'O' and s != ' ':
raise ValueError('cria_peca: argumento invalido')
if s == 'X':
return {1}
elif s == 'O':
return {-1}
elif s == ' ':
return {0} | d45783bcca66f5c1d13dd03b0bd72f9bd8df9185 | 449,907 |
def s3_bucket_uri(bucket_name: str) -> str:
"""
Return the path to the s3 bucket.
:param bucket_name: Name of the S3 bucket.
"""
return f's3://{bucket_name}/' | bf6c276a4e293b2b5784eea5d93a7b8232bba0ec | 444,427 |
from pathlib import Path
def already_processed(processed_paths, force):
"""
Check if re-process is not forced and all processed files exists
:param processed_paths:
:param force: is re-processing forced?
:return:
"""
if force:
return False # re-processing is forced for this phase
all_processed = True
for path in processed_paths:
if not Path(path).is_file():
all_processed = False
break
return all_processed | ad8ab94f39fe75fdb665edba52558382652b8676 | 280,803 |
def directory_slash(destination):
""" Ensure that the destination directory contains a final `/` """
if destination[-1] != '/':
return destination + '/'
return destination | 7d7f4e6979af21c130f4057e944657f0460cb90e | 344,127 |
def checkType(obj, ref_type):
"""Check if obj is of ref_type"""
if type(obj) is ref_type:
return True
else:
return False | df513c9112ef58c29830a49455edad5ed17b196a | 528,310 |
def generate_onehot_dict(word_list):
"""
Takes a list of the words in a text file, returning a dictionary mapping
words to their index in a one-hot-encoded representation of the words.
"""
word_to_index = {}
i = 0
for word in word_list:
if word not in word_to_index:
word_to_index[word] = i
i += 1
return word_to_index | 1408cfbf8360134b4d9a95165e693a972cc4f4e3 | 35,706 |
from typing import IO
from io import StringIO
def input_stream() -> IO:
"""Input stream fixture."""
return StringIO(
"""class: 1-3 or 5-7
row: 6-11 or 33-44
seat: 13-40 or 45-50
your ticket:
7,1,14
nearby tickets:
7,3,47
40,4,50
55,2,20
38,6,12"""
) | fa24df82e2a203a385ca949dc30d478a05a8898a | 291,235 |
def handleLeftParen(root, nodeStack):
"""Create new left subtree and descend to it."""
root.insertLeft('')
nodeStack.push(root)
return root.getLeftChild() | 43db010b5eda845609d0c3ed7ffa1c1b485031cc | 590,561 |
from typing import List
from typing import Dict
from typing import Tuple
import re
def convert(day_input: List[str]) -> Dict[str, List[Tuple[int, str]]]:
"""Converts the input to a dict mapping each key to a list of its contents,
where each content element is represented as a tuple of quantity and element"""
d: Dict[str, List[Tuple[int, str]]] = dict()
for line in day_input:
# Remove all superfluous text and split the spec
spec_split = re.sub(r" bags?|\.", "", line).split(" contain ")
key = spec_split[0]
d[key] = []
for elem in spec_split[1].split(", "):
elem_split = elem.split(" ", 1)
if (elem_split[0].isnumeric()):
d[key].append((int(elem_split[0]), elem_split[1]))
return d | fe66cc907780af5fae65e4ee908603616a7e013b | 371,786 |
import socket
def _connected(host: str = "8.8.8.8", port: int = 53, timeout: int = 2) -> bool:
"""
Determine if internet connectivity is available.
:param host: The host used to test connectivity
:type host: str
:param port: The port used to test connectivity
:type port: int
:param timeout: The connection timeout
:type timeout: int
:return `True` if connected to the internet, `False` otherwise
:rtype: bool
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except socket.error:
return False | 57eebe9073cfd9f4b8ce1b9d2c2b535f71ec94fb | 258,583 |
def filter_planes(feature_dict, removeDirection, percentile):
"""filter planes by the criteria specified by removeDirection
and percentile
Args:
feature_dict (dictionary): planes and respective feature value
removeDirection (string): remove above or below percentile
percentile (int): cutoff percentile
Returns:
set: planes that fit the criteria
"""
planes = list(feature_dict.keys())
feat_value = [feature_dict[i] for i in planes]
thresh = min(feat_value) + percentile * (max(feat_value) - min(feat_value))
# filter planes
if removeDirection == 'Below':
keep_planes = [z for z in planes if feature_dict[z] >= thresh]
else:
keep_planes = [z for z in planes if feature_dict[z] <= thresh]
return set(keep_planes) | 7612741debf983fd0976cc0afe8428120b2e6647 | 575,824 |
def try_int(n):
"""
Takes a number *n* and tries to convert it to an integer. When *n* has no decimals, an integer
is returned with the same value as *n*. Otherwise, a float is returned.
"""
n_int = int(n)
return n_int if n == n_int else n | 9a73d8f2e8410c1784b9580afbfb50fc80b2a615 | 77,416 |
def remove_items(collection, subset):
"""From a collection of defaults, remove a subset and return the rest."""
the_rest = collection.copy()
for name, param in subset.items():
assert (name, the_rest[name].default) == (name, param.default)
del the_rest[name]
return the_rest | 8dba0783631dba0235897a6b0d44bbce959cf53e | 214,421 |
def street_not_in_use(street, items):
"""
Check if elements of street are not in use already.
For example [[(2, 'black'), (3, 'black'), (4, 'black'),(5, 'black)],[(3, 'black'), (4, 'black'), (5, 'black')]]
would return false, because 3, 4 and 5 are already used
:param street: List of elements that will be checked
:param items: list of items that are already planned to be played (here 2,3,4,5)
:return: True if new street can be played as well, otherwise false
"""
for element in street:
if element in items:
return False
return True | 6ba82838ca0b49c59c20cb6b47ec593c1fe43454 | 34,944 |
import json
def parse_params(params: str) -> dict:
"""Parse the given parameters to dictionary."""
return json.loads(params.replace('\'', '\"')) if params else dict() | 5fab16fca7b31680463938afbe2ba800d0dbc364 | 376,602 |
def difference(data, interval):
""" difference dataset
parameters:
data: dataset to be differenced
interval: the interval between the two elements to be differenced.
return:
dataset: with the length = len(data) - interval
"""
return [data[i] - data[i - interval] for i in range(interval, len(data))] | 611f4ad36935000ae7dc16f76aef7cbb494b36ac | 706,535 |
def find_all_indexes(text, pattern, start =0):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
"""Worst Running time: O(n * l)
where n is the number of indexes in text and l is the length of the pattern"""
"""Space complexity: O(n) where n is the length of text"""
indexes = []
text_index = 0
pattern_index = 0
starting_index = 0
if pattern == '' :
iterator = 0
while iterator < len(text):
indexes.append(iterator)
iterator += 1
return indexes
for text_index in range(len(text)):
starting_index = text_index
while pattern[pattern_index]== text[text_index]:
if len(pattern) - 1 == pattern_index:
indexes.append(starting_index)
break
pattern_index += 1
text_index += 1
if text_index == len(text):
break
pattern_index = 0
return indexes | b3df16b84c9efe852379195f7673100dae85d027 | 231,032 |
def duplicate_check(file):
"""Checks the potfiles for duplicate lines/hashes"""
seen = set()
duplicates = 0
try:
with open(file, "r") as source:
line = source.readline()
while line is not '':
if line in seen:
duplicates += 1
else:
seen.add(line)
line = source.readline()
except FileNotFoundError:
print("Potfile {} not found, could not check for duplicates".format(file))
return 0
return duplicates | 4e502d4e89859c22251fd265f54fc3653ca8ac4f | 221,558 |
from typing import Optional
def next_batch(limit: int, offset: int, total: Optional[int]) -> bool:
"""Is there a next batch of resources?"""
if total is None:
return True
return (total - offset) > 0 | 51febba2c3732bf9c1896b3fd31d4b9d5503c8e2 | 627,166 |
def GetControlFlag(controlDR, controlDV, preciseFlag, controlRR, controlRV, controlAltFreq):
"""
This will ``check if the control sample passes or not``
:param int controlDR: int representing number of reference reads for control reported by delly
:param int controlDV: int representing number of variant reads for control reported by delly
:param str preciseFlag: str representing if an event is precise or imprecise
:param int controlRR: int representing number of split reference reads for control reported by delly
:param int controlRV: int representing number of split variant reads for control reported by delly
:param float controlAltFreq: float representing altratio threshold for control
:return: A boolean tag indicating True or False
:rtype: bool
"""
controlAltAf = 0.0
controlCovg = 0
controlFlag = False
if(controlDR is None):
controlDR = 0
if(controlDV is None):
controlDV = 0
if(controlRR is None):
controlRR = 0
if(controlRV is None):
controlRR = 0
if(preciseFlag == "True"):
if((float(controlRR) != 0.0) or (float(controlRV) != 0.0)):
controlAltAf = float(controlRV) / float(int(controlRR) + int(controlRV))
else:
if((float(controlDR) != 0.0) or (float(controlDV) != 0.0)):
controlAltAf = float(controlDV) / float(int(controlDR) + int(controlDV))
if(controlAltAf <= float(controlAltFreq)):
controlFlag = True
else:
controlFlag = False
return(controlFlag) | 713509aabcb8bacf1c767055c65140e71011d8de | 320,222 |
def _coalesce(*args):
""" Returns first non-null argument, or None if all are null. """
return next((a for a in args if a is not None), None) | 1e23821d86deea76af786cd98886b4fa86b91b8b | 620,971 |
import json
def read_json(file_name):
"""Read json from file."""
with open(file_name) as f:
return json.load(f) | 2eccab7dddb1c1038de737879c465f293a00e5de | 709,758 |
def to_stylesheet_color(color):
"""Utility to convert QColor to stylesheet specification"""
return 'rgb({!r}, {!r}, {!r})'.format(color.red(),
color.green(),
color.blue()) | d39eff958af185779075ed0c2d3961bfb6d452b2 | 278,119 |
import math
def productivity(affinity_count):
""" Return number representing productivity given the affinity count
e.g. if you have nothing, we still automatically produce, but if there
is someone, it will produce more efficiently.
"""
if affinity_count == 0:
return 0.3
return 0.419874 * math.log(11.2406 * affinity_count) | 07f5aeda584d328a3d5871879d8c358fee1c0171 | 89,979 |
def read_xyz(file_path):
"""
Function to read in xyz file and output as list of atoms and positions
ARGS:
file_path (str): path to xyz file
RETURNS:
list
"""
with open(file_path, "r") as f:
xyz_file = f.readlines()[2:]
atom_list = []
for line in xyz_file:
element, pos = line.split()[0], [float(x) for x in line.split()[1:]]
atom_list.append([element, pos])
return atom_list | bca633aa31bc829ea901fdfa52cf23fab802ab8f | 216,497 |
import re
def is_valid_version_syntax(version):
"""
Returns True if version is valid, else False. Accepted version examples are:
"1.0.0" "1.1.0" "123.0.123"
"""
if not version:
return False
regex = re.compile(r'^[0-9]+\.[0-9]+\.[0-9]+$')
return regex.match(version) is not None | 595d38652195306344fb467419349ecd041593df | 240,922 |
import click
def all_option(func):
"""Option to return all items."""
return click.option('--all', 'show_all', is_flag=True, help="Show all unread results.")(func) | 78b0a4193b8671f37a10daa144eb3b30ccec80c0 | 524,366 |
def user2nix(uid: str, home: str, gid: str) -> str:
"""
Generate nix config for user vhosts based off ldap
Args:
uid: Username of user
home: homedir of user
gid: user type of user
Returns:
Returns String containing a nix object for inserting in a nix array
"""
return f""" {{
uid = "{uid}";',
home = "{home}";',
gid = "{gid}";',
}}""" | f0918fb271faa54542dbd7ba06ecd94fd1cc87b1 | 490,494 |
def dic_to_str(source_dict):
"""
字典拆出来,通过&和=连接起来;和上面的str_to_dict函数是逆操作
:param source_dict: {"a":1,"b":2}
:return: a=1&b=2
"""
dict_item = []
for key, value in source_dict.items():
dict_item.append("%s=%s" % (key, str(value)))
return "&".join(dict_item) | 79016208131503c33f56a6a7dfea211dbcfe8af0 | 563,368 |
def dodrawdown(df):
"""computes the drawdown of a time series."""
dfsum = df.cumsum()
dfmax = dfsum.cummax()
drawdown = - min(dfsum-dfmax)
return drawdown | e45dbdd19238e04975d5d14beb6348b0a744ada9 | 30,531 |
from typing import Callable
def is_validator_run_success(validator: Callable, *args, **kwargs):
""" Run a validation helper and ensure it has been run.
By design the validators return nothing and they raise when something is invalid.
For the valid case we want to highlight the fact the validator has been called returning True
at the end of this helper."""
validator(*args, **kwargs)
return True | 6a0f4423de2f6abf05ee562d16cbde1cbe2bda88 | 547,066 |
def subtract(a, b):
"""Subtracts two numbers.
Function to subtract two numbers.
Parameters
----------
a : numbers.Real
First operand.
b : numbers.Real
Second operand.
Returns
-------
numbers.Real
Difference of a and b.
Examples
--------
>>> import blaupause
>>> blaupause.subtract(3, 2)
1
"""
return a - b | 9e877d32064c0da8631c51e618360fe87916a293 | 306,035 |
import random
def make_first_key(path, book_dict):
"""Find a random key."""
first_key = random.choice(list(book_dict))
return first_key | e41f05bef9f791b47ba72ba9a4704a2ad3f1b87b | 102,216 |
def get_token_counts(df, col_name):
"""
Given a DataFrame column containing lists of tokens, this returns
a dictionary of all tokens and the number of times they appear in
the column.
"""
counts = {}
for tokens in df[col_name]:
for token in tokens:
count = counts.get(token)
if count is None:
count = 0
counts[token] = count + 1
return counts | a30cca4df11bee9434224bfca55e0e49d94e360c | 215,738 |
import csv
def read_csv_file(filename, key_col, data_col):
"""Read data from a CSV file, get 'key_col' and 'data_col' columns.
Returns ((key[0], data[0]), ...).
"""
# Start reading the CSV file
data = []
fd = open(filename, 'r')
csv_reader = csv.reader(fd)
# Get header row, calculate required column indices
h = next(csv_reader)
header = [x.strip() for x in h]
if key_col not in header:
msg = ("Column '%s' not in file %s"
% (key_col, filename))
fd.close()
raise Exception(msg)
if data_col not in header:
msg = ("Column '%s' not in file %s"
% (data_col, filename))
fd.close()
raise Exception(msg)
key_index = header.index(key_col)
data_index = header.index(data_col)
# read data, extract columns, save
result = []
for line in csv_reader:
try:
key_data = line[key_index].strip()
data_data = line[data_index].strip()
result.append((key_data, data_data))
except:
pass
fd.close()
return result | 779900f276dd8807efebd60a2500f3aff3e08aab | 203,523 |
def get_config_by_name(section, name):
"""
Extract a named section from the configuration file
"""
for config in section:
if config['name'] == name:
return config
return None | 54b40b3a918aa4c60f0cd9b868f819254d3026e4 | 264,924 |
import hashlib
def simple_object_hash(obj):
"""
Turn an arbitrary object into a hash string. Use SHA1.
"""
obj_str = str(obj)
hash_object = hashlib.sha1(obj_str.encode())
hex_dig = hash_object.hexdigest()
return hex_dig | 233ea07bb582db2a3dc1c7e67b04dacc321c1bfb | 200,696 |
from typing import List
from typing import Tuple
def _cluster_length_to_bin(len_list: List[int], num: int) -> List[Tuple[int, int]]:
"""cluster a list of lengths into `num` buckets
Args:
len_list: the list of target summary lengths
num: the number of buckets
Returns:
a list of tuples of which each element is the
[lower bound, upper bound) of the bucket
"""
avg_bin_len = len(len_list) // num
sort_list = sorted(len_list)
bin_bound = []
for i in range(num):
if i == (num - 1):
bin_bound.append((sort_list[i * avg_bin_len], 1e5))
else:
bin_bound.append((sort_list[i * avg_bin_len], sort_list[(i+1) * avg_bin_len]))
return bin_bound | 9f8c3347586dad196cc524554f93c30285309ef8 | 346,101 |
import math
def calculate_entropy(data):
"""Calculates the entropy for a given array"""
unique_values = list(set(data))
data_length = len(data)
entropy = 0
for value in unique_values:
count = data.count(value)
# entropy with the sum formula, with reversed log because of the -
entropy += (count / data_length) * math.log2(data_length / count)
return entropy | 517319b680fd8f2929aeb1676bafa4489681f4c2 | 445,597 |
def get_jobs(links, url):
""" Get all job categories and corresponding links. """
names = []
urls = []
jobs = links.find_all('ul', {'id': 'jjj0'})
links = jobs[0].find_all('a')
for link in links:
names.append(link.text)
urls.append(url + link.get('href'))
return names, urls | 22b59791f2794b2ddfb197791f3336afabdfaa44 | 89,896 |
def title(sen):
"""
Turn text into title case.
:param sen: Text to convert
:return: Converted text
"""
new_text = ""
for i in range(0, len(sen)):
if i == 1:
new_text += sen[i].upper()
continue
if sen[i - 1] == " ":
new_text += sen[i].upper()
continue
new_text += sen[i].lower()
return new_text | e59303eebee37561a2a9181b2328fd53b11bef51 | 85,703 |
def vec_to_midpoint(array):
"""
Computes the midpoint between elements in an array.
Args:
array: (np.array)
Returns: (np.array)
"""
return (array[1:] + array[:-1]) / 2 | 29dc067910e9ac7bb72ce5dd6cb19fada3d29fc4 | 106,899 |
from typing import List
def join_address(bytes_lst: List[int]) -> str:
"""
Join bytes into IP address.
>>> join_address([91, 124, 230, 205])
'91.124.230.205'
>>> join_address([192, 168, 1, 15])
'192.168.1.15'
>>> join_address([192, 168, 1, 255])
'192.168.1.255'
"""
return '.'.join(map(str, bytes_lst)) | ed3bb36b21eeb6a14f08e0816ee7cbe86cc4c0b7 | 150,694 |
def unlist_list(listoflist):
"""
given a list e.g. [["James", "Jones"], ["Hackman", "Talisman", "Daboi"]]
we want to unlist the list as follows:
["James", "Jones", "Hackman", "Talisman", "Daboi"]
param listoflist: A list of listed items
"""
new_list = []
for alist in listoflist:
for item in alist:
new_list.append(item)
return new_list | 28ecc56b83268cd1c531fb57a958adae69ac7a5c | 121,971 |
def list_of_dicts_to_dict_of_lists(ld):
"""
Thanks to Andrew Floren from https://stackoverflow.com/a/33046935/142712
:param ld: list of dicts
:return: dict of lists
"""
return {k: [d[k] for d in ld] for k in ld[0]} | d486ca90a0da26625941bee51c0c69285ecfc49f | 232,415 |
def get_row(square: tuple) -> list:
"""
Gets all the squares in the row, this square is in.
:param square: A tuple (row, column) coordinate of the square
:return: A list containing all the tuples of squares in the row
"""
row_squares = []
for i in range(0, 9):
row_squares.append((square[0], i))
return row_squares | 042fb96554b4a0ebb4ec56a074350dfe92317cf5 | 201,592 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.