content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def split_namespace(ident):
"""
Splits an identifier into its namespace and its bare identifier.
>>> split_namespace('foo')
(None, 'foo')
>>> split_namespace('foo:bar')
('foo', 'bar')
"""
if ':' not in ident:
return (None, ident)
else:
result = ident.split(':', 1)
if len(result) == 1:
return ('', ident)
else:
return tuple(result) | ac5161a978edefdd421555173cdfd56c86f57190 | 458,828 |
def capitalize_names(ds):
"""
Capitalize the 'long_name' attribute for plotting purposes
"""
for var in ds.variables.values():
if "long_name" in var.attrs:
var.attrs["long_name"] = var.attrs["long_name"].capitalize()
return ds | 7d511e5d29e95f2bf2af4b3357f1b296cb538dc6 | 302,251 |
import torch
def look_at(eye, center, world_up):
"""Compute camera viewing matrices.
Functionality mimes gluLookAt (external/GL/glu/include/GLU/glu.h).
Args:
eye: 2D float32 tensor with shape [batch_size, 3] containing the XYZ
world space position of the camera.
center: 2D float32 tensor with shape [batch_size, 3] containing a
position along the center of the camera's gaze line.
world_up: 2D float32 tensor with shape [batch_size, 3] specifying the
world's up direction; the output camera will have no tilt with
respect to this direction.
Returns:
A [batch_size, 4, 4] float tensor containing a right-handed camera
extrinsics matrix that maps points from world space to points in eye
space.
"""
batch_size = center.shape[0]
forward = center - eye
forward = torch.nn.functional.normalize(forward, dim=1, p=2)
to_side = torch.cross(forward, world_up)
to_side = torch.nn.functional.normalize(to_side, dim=1, p=2)
cam_up = torch.cross(to_side, forward)
w_column = torch.tensor(batch_size * [[0., 0., 0., 1.]], device=eye.device)
w_column = torch.reshape(w_column, [batch_size, 4, 1])
view_rotation = torch.stack([to_side, cam_up, -forward, torch.zeros_like(to_side)], dim=1) # [batch_size, 4, 3] matrix
view_rotation = torch.cat([view_rotation, w_column], dim=2) # [batch_size, 4, 4]
identity_batch = torch.unsqueeze(torch.eye(3, device=center.device), 0,).repeat([batch_size, 1, 1])
view_translation = torch.cat([identity_batch, torch.unsqueeze(-eye, 2)], 2)
view_translation = torch.cat([view_translation, torch.reshape(w_column, [batch_size, 1, 4])], 1)
camera_matrices = torch.matmul(view_rotation, view_translation)
return camera_matrices | ffc7a402bd13430c3d69096306d93f4fde803673 | 262,370 |
def calculate(num1, num2, symbol):
"""
操作数的计算
:param num1:操作数1
:param num2:操作数2
:param symbol:操作符
:return:计算后的结果
"""
if symbol == '+':
return num1 + num2
if symbol == '-':
return num1 - num2
if symbol == '*':
return num1 * num2
if symbol == '/':
return num1 / num2
else:
return None | de76a370ea3091dbb328a2f70bdc0ef425ce0167 | 395,707 |
from typing import Union
from typing import List
def encode_with_vocab(x: Union[list, str],
VOCAB: List[str],
unkid: int) -> Union[list, int]:
"""Encode all elements of x that are strings.
x: Union[list, str]
Encoding happens if type(x)==str. If type(x)=list then a recursive
call on each list element is triggered.
VOCAB : List[str]
vocabulary list
unkid : int
Index of the UKNOWN token, e.g. unkid=VOCAB.index("[UNK]")
Returns:
--------
Union[list, int]
The final result (after all recursions) has the same structure as x
but with integer encoded elements.
Example:
--------
import kshingle as ks
data = ['abc d abc de abc def', 'abc defg abc def gh abc def ghi']
shingled = [ks.shingling_k(s, k=9) for s in data]
VOCAB = ks.identify_vocab(shingled, n_max_vocab=10)
VOCAB, unkid = ks.upsert_word_to_vocab(VOCAB, "[UNK]")
encoded = ks.encode_with_vocab(shingled, VOCAB, unkid)
"""
if isinstance(x, str):
try:
return VOCAB.index(x)
except Exception:
return unkid
else:
return [encode_with_vocab(e, VOCAB, unkid) for e in x] | e4373791460b735910d867c761dcea499074a95f | 146,485 |
def _row_norm(df):
"""
>>> df = pd.DataFrame({
... 'cell-1': {'a':8, 'b':1, 'c': 7},
... 'cell-2': {'a':1, 'b':1, 'c': 1},
... 'cell-3': {'a':10, 'b':1, 'c': 10}
... })
>>> _row_norm(df)
cell-1 cell-2 cell-3
a 0.8 0.1 1.0
b 1.0 1.0 1.0
c 0.7 0.1 1.0
"""
t = df.T
return (t / t.max()).T | 096e03b189d206417524a1baafc32ffadc4f8e6b | 572,494 |
def get_standard(obj, standard):
"""
A function that allows to return a standard value for an object, if the object is None.
:param obj: Object to check if it's None.
:param standard: Standard value for the object
:return: return the object or the standard value
"""
if obj is None:
return standard
return obj | b52f67a69c452f1e84f3f42bd4438960d04ba736 | 497,367 |
def GenerateConfig(_):
"""Returns empty string."""
return '' | ed42eb1c320ca1df25603a53d4abf4a1b14215f3 | 707,906 |
import re
def word_filter(word):
""" The filter used for deleting the noisy words in changed code.
Here is the method:
1. Delete character except for digit, alphabet, '_'.
2. the word shouldn't be all digit.
3. the length should large than 2.
Args:
word
Returns:
True for not filtering, False for filtering.
"""
if word[:2] == '0x':
return False
if '=' in word:
return False
if '/' in word:
return False
if '.' in word:
return False
if '$' in word:
return False
word = re.sub("[^0-9A-Za-z_]", "", word)
if(word.isdigit()):
return False
if(len(word) <= 2):
return False
return True | c29e80c7e6839a576b95ee99b18e81a31b95a020 | 689,774 |
def _gate_sequence_product_with_expansion(U_list, left_to_right=True):
"""
Calculate the overall unitary matrix for a given list of unitary operations.
Parameters
----------
U_list : list
List of gates(unitaries) implementing the quantum circuit.
left_to_right : Boolean
Check if multiplication is to be done from left to right.
Returns
-------
U_overall : qobj
Unitary matrix corresponding to U_list.
"""
U_overall = 1
for U in U_list:
if left_to_right:
U_overall = U * U_overall
else:
U_overall = U_overall * U
return U_overall | 45038b9e9a09924dfc0cbee4d5605160f3d862b9 | 75,660 |
def get_event_name(dictionary: dict):
"""Gets value from dictionary for key `name` otherwise returns None"""
try:
return dictionary.get('name', None)
except AttributeError:
return None | 49e2e3ef3c4ae29b5d2edec280b3673e90853b35 | 280,036 |
from datetime import datetime
def read_daily_line(line):
"""
Parses one line from a DailyFile and returns a dictionary containing the parameters by name.
One line is parsed such that Daily(**read_daily_line(line)) creates a Daily instance successfully. Values are all
returned as floats.
:param str line: the line to be read, including whitespace since it will be parsed by splitting on tabs
:return: dict
"""
dailydata = {}
ls = line.split('\t')
try:
dailydata['date'] = datetime.strptime(line.split('\t')[0].split('.')[0], '%y%j%H%M')
dailydata['ads_xfer_temp'] = float(ls[1])
dailydata['valves_temp'] = float(ls[2])
dailydata['gc_xfer_temp'] = float(ls[3])
dailydata['ebox_temp'] = float(ls[4])
dailydata['catalyst_temp'] = float(ls[5])
dailydata['molsieve_a_temp'] = float(ls[6])
dailydata['molsieve_b_temp'] = float(ls[7])
dailydata['inlet_temp'] = float(ls[8])
dailydata['room_temp'] = float(ls[9])
dailydata['v5'] = float(ls[10])
dailydata['mfc1'] = float(ls[11])
dailydata['mfc2'] = float(ls[12])
dailydata['mfc3'] = float(ls[13])
dailydata['he_pressure'] = float(ls[14])
dailydata['linep'] = float(ls[15])
dailydata['zerop'] = float(ls[16])
except ValueError:
print('ValueError while reading line from DailyFile.')
return dailydata | 08eb0733734b45ed4ba5a9fe70f74453e45a467a | 463,472 |
def _get_bikes_available(sta):
"""Given a GBFS station status blob, return the number of bikes"""
# 'num_ebikes_available" is not part of the GBFS spec, but it appears
# in the Divvy API response
return sta['num_bikes_available'] + sta.get('num_ebikes_available', 0) | 39c4b79a09efc5ff30d9fb20a7038546fc201fbb | 278,672 |
def nside2npix(nside):
"""
Convert healpix resolution (nside) to the number of healpix pixels in a full-sky map.
"""
return 12 * nside * nside | c90305b624a1c6ae57815f9a34e48c8415c05055 | 325,729 |
def num_as_bytes(n):
"""
Encodes an integer in UTF-8 bytes.
"""
return str(n).encode("utf-8") | a4df4a7e815eee11a4dc6939323115f1f4953b49 | 328,338 |
def galeshapley(suitor_pref_dict, reviewer_pref_dict, max_iteration):
""" The Gale-Shapley algorithm. This is known to provide a unique, stable
suitor-optimal matching. The algorithm is as follows:
(1) Assign all suitors and reviewers to be unmatched.
(2) Take any unmatched suitor, s, and their most preferred reviewer, r.
- If r is unmatched, match s to r.
- Else, if r is matched, consider their current partner, r_partner.
- If r prefers s to r_partner, unmatch r_partner from r and
match s to r.
- Else, leave s unmatched and remove r from their preference
list.
(3) Go to (2) until all suitors are matched, then end.
Parameters
----------
suitor_pref_dict : dict
A dictionary with suitors as keys and their respective preference lists
as values
reviewer_pref_dict : dict
A dictionary with reviewers as keys and their respective preference
lists as values
max_iteration : int
An integer as the maximum iterations
Returns
-------
matching : dict
The suitor-optimal (stable) matching with suitors as keys and the
reviewer they are matched with as values
"""
suitors = list(suitor_pref_dict.keys())
matching = dict()
rev_matching = dict()
for i in range(max_iteration):
if len(suitors) <= 0:
break
for s in suitors:
r = suitor_pref_dict[s][0]
if r not in matching.values():
matching[s] = r
rev_matching[r] = s
else:
r_partner = rev_matching.get(r)
if reviewer_pref_dict[r].index(s) < reviewer_pref_dict[r].index(r_partner):
del matching[r_partner]
matching[s] = r
rev_matching[r] = s
else:
suitor_pref_dict[s].remove(r)
suitors = list(set(suitor_pref_dict.keys()) - set(matching.keys()))
return matching | 5b52cb165d15a0992b58c38958daf222d8d642cd | 704,166 |
def config_desc(config):
"""A one-line text string describing the configuration of a run."""
return (
"len:{len_train} "
"win:{window_len} "
"str:{use_string} "
"page:{use_page} "
"geom:{use_geom} "
"amt:{use_amount} "
"voc:{vocab_size} "
"emb:{vocab_embed_size} "
"steps:{steps_per_epoch}"
).format(**config) | 2898cb75bb0cc240559f00478624249b7f9b3366 | 277,099 |
from typing import Optional
import json
def get_auth_token() -> Optional[str]:
"""Returns the saved auth token.
:return: The auth token saved in :code:`.frost`
:rtype: Optional[str]
"""
with open('.frost', 'r') as f:
return json.load(f).get('token') | f58241ac8ee8791468170f01453bccab513b10c6 | 413,458 |
from typing import Type
from typing import Any
def typed_property(name: str, expected_type: Type[Any]) -> property:
"""Avoids definition of repetitive property class methods.
Simplifies type assertions on class attributes values.
Illustrates an important feature of inner functions or closures.
Args:
name (str): attribute name
expected_type (Type[Any]): expected type of class attribute
Example:
>>> class Person:
... name: property = typed_property("name", str)
... age: property = typed_property("age", int)
...
... def __init__(self, name: str, age: int) -> None:
... self._name = name
... self._age = age
...
>>> person: Person = Person(name="Luke", age=22)
>>> person.age = None # raise `TypeError`
"""
to_private_name: str = f"_{name}"
@property # type: ignore
def prop(self: Any) -> Any:
"""Returns value of an instance.
Args:
self (Any): an instance
"""
return getattr(self, to_private_name)
@prop.setter
def prop(self: Any, value: Any) -> None:
"""Returns value to instance attribute.
Args:
self (Any): an instance
value (Any): value to be set
Raises:
`TypeError` if attribute value does not match with expected type
"""
if not isinstance(value, expected_type):
raise TypeError(
f"'{name}' argument must be a '{expected_type}' type"
)
setattr(self, to_private_name, value)
return prop | 2f704ae0fe0e6fae60bbc05917ee73b790d77a68 | 637,076 |
import torch
def focal_loss(yhat, ytrue, alpha=0.75, gamma=2):
"""
Computes α-balanced focal loss from FAIR
https://arxiv.org/pdf/1708.02002v2.pdf
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
alpha (Float): weight to balance Cross entropy value
gamma (Float): focal parameter
output:
loss value with `mean` reduction
"""
# compute the actual focal loss
focal = -alpha * torch.pow(1. - yhat, gamma) * torch.log(yhat)
f_loss = torch.sum(ytrue * focal, dim=1)
return torch.mean(f_loss) | 1dcdf9d53c4deae88a4246dee445cc37a90e09e6 | 551,009 |
def _parseAccept(header):
"""
Parses an Accept header.
Returns an iterable of 2-tuples with the content type and the
matching parameters. The parameters is a dictionary with the
key-value pairs of the parameters. This dictionary should either
be empty or contain a single key (``"q"``). The matching value
determines the preference of the client for that content type.
"""
accepted = []
for part in header.strip(".").split(","):
part = part.strip()
if not part:
continue # Begone, vile hellspawn!
elements = part.split(";")
contentType, rawParams = elements[0].strip(), elements[1:]
params = {}
for param in rawParams:
key, value = map(str.strip, param.split("=", 1))
params[key] = value
accepted.append((contentType, params))
return accepted | b792002ed8dc56a8bbaa69f3d137e4819d441320 | 424,848 |
from typing import Iterable
from typing import Hashable
from typing import Callable
from typing import Dict
def _count(
values: Iterable[Hashable], filter_: Callable = lambda value: True
) -> Dict[Hashable, int]:
"""
Returns dictionary where keys are unique `values` and values
there are entry counters. Adding condition can be specified by
`filter_` argument which `lambda value: True` by default.
"""
counted = {}
for value in values:
if filter_(value):
if value in counted:
counted[value] += 1
else:
counted[value] = 1
return counted | d97735cdbface56fbecabc789ab854a71ee18bf0 | 311,258 |
def get_wordlist(stanzas):
"""
Get an iterable of all final words in all stanzas
"""
return sorted(list(set().union(*[stanza.words for stanza in stanzas]))) | a2c610c90aae76276a6b5cc345f9537e1c131464 | 99,920 |
def safe_convert(s, default=0):
"""
Converts something to a float. If an error occurs, returns the default value.
"""
try:
v = float(s)
except ValueError:
v = default
return v | 47afd68bb36d47dfd173c8630581cbd1f427884e | 131,615 |
def Mass_function1(asini, porb):
"""
Returns the mass function of a binary star.
asini: Projected semi-major axis in light-second.
porb: Orbital period in second.
"""
mfunc = 8015123.37129 * asini**3 / porb**2
return mfunc | 13273846fb6d20f943e5740e014fe1fb2a85a6a3 | 276,092 |
def dtime2a(dtime):
"""Converts time in seconds to 'HH:MM:SS'"""
dtime = int(dtime)
sec = dtime % 60
dtime /= 60
minute = dtime % 60
dtime /= 60
hour = dtime
return "%d:%02d:%02d" % (hour, minute, sec) | 5f7186a931f009a34b512a5e82789035e91af6eb | 198,563 |
def getintervalsingle(stimulus, td, kind):
"""
get the interval list for a single stimulus
input: stimulus struct; trackdata; kind of stimulus
output: list of (t1, t2) intervals
"""
tmin, tmax = td.findtimerange()
stimonset = stimulus.onset
stimduration = stimulus.duration
stimoffset = stimonset + stimduration
offsetduration = tmax - (stimonset + stimduration)
if kind == "basic stats":
intervallist = [
(0.0, stimonset),
(stimonset + 1, stimonset + 4),
(stimonset, stimonset + 0.5 * stimduration),
(stimonset + 0.5 * stimduration, stimoffset),
(stimoffset, stimoffset + 0.5 * offsetduration),
(stimoffset + 0.5 * offsetduration, tmax),
]
elif kind == "line fits":
intervallist = [
(stimonset, stimonset + 2.0),
(stimonset + 2.0, stimonset + 4.0),
(stimonset + 0.5 * stimduration, stimoffset),
(stimoffset, tmax),
(stimonset + 0.5 * stimduration, tmax),
]
# this interval is only valid for long enough durations:
if stimonset + 4.0 < stimonset + 0.5 * stimduration:
intervallist.append((stimonset + 4.0, stimonset + 0.5 * stimduration))
elif kind == "xy-motion":
intervallist = [
(0.0, stimonset),
(stimonset, stimoffset),
(stimonset, stimonset + 30),
(stimonset + 30, tmax),
]
else:
raise ValueError("unknown interval kind %s" % kind)
return intervallist
# end getintervalsingle() | ba2d2ae0e814e1139b60eaad2e64276c7219bea0 | 408,266 |
def max(x):
"""Find maximal of array elements and corresponding indices along axis = 1
Args:
x: array-like
Returns:
max_vals_along_axis and max_indices_along_array
"""
if len(x.shape) != 2:
raise ValueError('The size of x shape must be 2 dimension')
max_vals = x.max(1)
max_inds = x.argmax(1)
return max_vals, max_inds | 11cea43653a600d65d074374fec3ca2228dea98b | 80,715 |
def _fast_copy_probs_table(table):
"""
Copy a dictionary representation of a probability table faster than the standard deepcopy.
:param dict table: A dictionary with the tuples of ints as keys and floats as values.
:return: The copied table.
"""
table_copy = {tuple(assign): value for assign, value in table.items()}
return table_copy | f825cb7e36c71c7395feb4e50051ee127c0beea4 | 524,056 |
import math
def bound_value(value, minimum=-math.inf, maximum=math.inf, to_int=False):
"""
Bounds a value between a minimum and maximum
:param value: the value to bound
:type value: int/float
:param minimum: the lower bound
:type minimum: int/float
:param maximum: the upper bound
:type maximum: int/float
:param to_int: whether or not to cast the result to an int
:type to_int: bool
:returns: the bounded value
"""
if minimum is None: # in case None is passed in
minimum = -math.inf
if maximum is None:
maximum = math.inf
if to_int:
if isinstance(minimum, float) and minimum != -math.inf:
return int(min(max(value, math.ceil(minimum)), maximum))
return int(min(max(value, minimum), maximum))
return min(max(value, minimum), maximum) | 1176bf1ad4514923c1b733062a3c336e38a8f0a4 | 243,290 |
import codecs
import json
def load(path):
"""Loads a single Zeppelin notebook. For now just a wrapper around json.load"""
with codecs.open(path, mode="r", encoding="UTF-8") as fr:
try:
return json.load(fr)
except json.JSONDecodeError:
return {} | b88ee6454dc603d6b324a91f879dc167e433f442 | 648,073 |
def fill_zeros(s,n):
"""
Add zeros to a string s until
it reaches length n.
"""
while len(s) < n:
s= ''.join(('0',s))
return s | 0b25353954070668d4bdb3450287b2291008d019 | 655,934 |
def mse(original_data, degradated_data):
"""
This calculates the Mean Squared Error (MSE)
:param original_data: As Pillow gives it with getdata
:param degradated_data: As Pillow gives it with getdata
:return: List containing the MSE in Y, U, V and average of those 3
"""
error_y = 0
error_cb = 0
error_cr = 0
for i in range(0, len(original_data)):
dif_y = abs(original_data[i][0] - degradated_data[i][0])
dif_cb = abs(original_data[i][1] - degradated_data[i][1])
dif_cr = abs(original_data[i][2] - degradated_data[i][2])
error_y += dif_y * dif_y
error_cb += dif_cb * dif_cb
error_cr += dif_cr * dif_cr
mse_y = error_y / len(original_data)
mse_cb = error_cb / len(original_data)
mse_cr = error_cr / len(original_data)
mse_avg = (mse_y*4 + mse_cb + mse_cr)/6
return [mse_y, mse_cb, mse_cr, mse_avg]
"""
Obtains the PSNR for a list of MSE values
:param mse_list: A list of mse quantities
:return: List containing the PSNR. If MSE is 0 the output PSNR is infinite
""" | aaf80905df77dc8e4ec5be361b2227ff419237c1 | 576,208 |
def tokenize_timeperiod(timeperiod):
"""
method breaks given timeperiod into 4 parts: (year, month, day, hour)
for instance: daily 2015031400 -> ('2013', '03', '14', '00')
hourly 2015031413 -> ('2013', '03', '14', '13')
monthly 2015030000 -> ('2013', '03', '00', '00')
yearly 2015000000 -> ('2015', '00', '00', '00')
:return: tuple of four values
"""
assert len(timeperiod) == 10, 'timeperiod {0} does not match accepted format YYYYMMDDHH'.format(timeperiod)
return timeperiod[:4], timeperiod[4: 6], timeperiod[6: 8], timeperiod[8:], | 2c5208d825566426076043f3100699649b968d35 | 597,653 |
import time
def time_str(epoch_time, output_format="%F %T %Z"):
"""
Return epoch_time as a GMT time string.
"""
return time.strftime(output_format, time.gmtime(epoch_time)) | e83e6c15869554733a3f16fcd2d6bf4a5135090e | 349,924 |
def _get_all_issue_tracker_keys(all_issue_trackers):
"""Collect all keys in all dicts."""
ret = set()
for dct in all_issue_trackers:
ret.update(dct.keys())
return ret | 192d1ea9a1d191b2529f5e94beb524802ec20851 | 117,491 |
def readDirective(s):
"""Reads a directive line from forcefield, itp, or rtp file."""
# Remove comments, then split normally
sline = s.split(';')[0].split()
if len(sline) == 3 and sline[0] == '[' and sline[2] == ']':
return sline[1]
else:
return '' | 0daea2a92fc509761bebf97830a819781fd6df42 | 209,861 |
def is_empirical(var_type):
"""
Checks whether the variable type for the
variable of interest is to be taken
as a constant value or as numerical values.
"""
return var_type == "empirical" | b421857d8000d63a782cc1205580c7226147d760 | 470,929 |
def clip(val, lower=0.0, upper=1.0):
"""
Clips val between lower and upper.
>>> clip(1, 0, 2)
1
>>> clip(2, 3, 6)
3
>>> clip(5, 1, 2)
2
Works recursively on lists.
>>> clip([-0.2, 0.5, 1.4, 0.7])
[0.0, 0.5, 1.0, 0.7]
:param val: value to be clipped
:param lower: lower bound
:param upper: upper bound
:return: val clipped between lower and upper
"""
if isinstance(val, list):
return [clip(v, lower, upper) for v in val]
return max(lower, min(upper, val)) | f77fad2fadc17c3fb7a881e869e33ae195255410 | 91,866 |
def adapt_PEPformat(x):
"""
PEPred-Suite jar input format
:param x: id that starts with '>'
:return: id that starts with '>' and ends with '|0'
"""
if '|' not in x:
x = x + '|0'
return x | 4d51ed43f1fdb207232f81c2e5211be0efa2d389 | 174,231 |
def get_serial_numbers(path):
"""
Reads the serial number from .BIN file
Args:
path (str): route to file
Returns:
sn (int): the serial number found in
the header of the file
"""
file = open(path, 'rb')
header_block = file.read()
sn_bytes = header_block[416:420]
sn = int.from_bytes(sn_bytes, byteorder='little')
return sn | 52cdf1001dcac1570ac6c51406ac04879c08972e | 334,630 |
import csv
def load_csv(filename, label_col):
"""
Loads a csv file containin the data, parses it
and returns numpy arrays the containing the training
and testing data along with their labels.
:param filename: the filename
:return: tuple containing train, test data np arrays and labels
"""
X_train = []
X_test = []
num = 0
with open(filename,'rt') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
row1 = [float(item) for item in row if item != '\0']
last_ele = row1.pop(label_col)
X_train.append(row1)
X_test.append(int(last_ele))
num+=1
if num > 1000:
break
f.close()
return X_train, X_test | 30b4b6dcbeaa33fee40cdea81a063dbb3354eb16 | 648,044 |
def pseudo_input(lines):
"""Return a function that acts like raw_input but feeds the input list."""
ilines = iter(lines)
def raw_in(prompt):
try:
return next(ilines)
except StopIteration:
return ''
return raw_in | aa43f8732109ec0873a7793d7c11fbfb813aefca | 331,345 |
def color_rgb(red, green, blue):
""" Given three intensities red, green, blue, all from 0 to 255,
returns the corresponding CSS color string e.g. '#ff00cc' """
return f"#{red*256**2 + green*256 + blue:06x}" | be02aea5a0fc04f7bbedce4b3fc18fb5cdfff862 | 646,878 |
def get_yelp(zipcode, page_num):
"""get yelp address for given zipcode and page number."""
return 'https://www.yelp.com/search?find_loc={}&start={}&cflt=restaurants'.format(
zipcode, page_num) | f11bc5dd07940677725337779a91c8d868535989 | 477,438 |
def distance_to_edge(coordinate, grid_size):
"""Calculate the maximum distance to the edge for this coordinate."""
half_point = (grid_size + 1) / 2
return int(round(abs(coordinate - half_point + 1) + half_point, 0)) | b1284c2e0f61e56232f55e5be381e6c8eac97fa7 | 188,424 |
import re
def check_password(password):
"""Checks if the password is safe enough (At least 1 number, 1 capital letter and 1 special character)."""
cap_letter_regex = re.compile(r"[A-Z]+")
cap_letter_found = re.search(cap_letter_regex, password)
number_regex = re.compile(r"\d+")
number_found = re.search(number_regex, password)
spec_charac_regex = re.compile(r"\W+")
spec_charac_found = re.search(spec_charac_regex, password)
if cap_letter_found and number_found and spec_charac_found:
return True
else:
return False | 8db1f9b2cfede8eaa249b50950cc2e93b3cca176 | 212,188 |
def eqn_of_line(x1: float, y1: float, x2: float, y2: float) -> str:
"""
Finds equation of a line passing through two given points.
Parameters:
x1, y1 : The x and y coordinates of first point
x2, y2 : The x and y coordinates of second point
Returns:
Equation of the line as a string.
"""
a = y2 - y1
b = x1 - x2
c = a*(x1) + b*(y1)
if b<0:
s=( f"{a}x - {abs(b)}y = {c}")
else:
s=( f"{a}x + {b}y = {c}")
return s | 721225fce41fd0d985f9e1aef351ef7751a8db49 | 485,093 |
import math
def sum_more_cosines(m, n):
"""
What comes in: Integers m and n, with m <= n.
What goes out: Returns the sum
cos(m) + cos(m+1) + cos(m+2) + ... cos(n)
Side effects: None.
Examples:
-- sum_more_cosines(0, 3) returns
cos(0) + cos(1) + cos(2) + cos(3)
which is approximately 0.13416
-- sum_more_cosines(-4, 1) returns
cos(-4) + cos(-3) + cos(-2) + cos(-1) + cos(0) + cos(1)
which is approximately 0.02082.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
# IMPORTANT: In this and all other problems in this session,
# you must NOT use the 2 or 3-parameter versions
# of the RANGE expression, if you happen to know them.
# That is, no fair using range(m, n) or anything like that.
# Just range(blah) where blah is a single variable.
# Reason: To ensure that you get more practice using expressions.
# -------------------------------------------------------------------------
answer = 0
for k in range((n + 1) - m):
answer = answer + math.cos(m + k)
return answer | cee71d79acd52aa73e1b4a3369d2b27e805339f3 | 258,817 |
import math
def getTotalIterations(tile_size, shape):
"""
Returns the number of tile iterations.
"""
num_dims = len(shape)
iters = 1
for i in range(num_dims):
iters *= math.ceil(float(tile_size[i])/shape[i])
return iters | 9578c5fb0f9b6d8dbca895cc17989f1e8059326d | 190,143 |
def geo_name(year, release, summary_level):
"""Resolve a year, release and summar level number into a URL geography group name"""
if int(summary_level) in (140, 150):
return 'Tracts_Block_Groups_Only'
else:
return 'All_Geographies_Not_Tracts_Block_Groups' | c4e72ea3f3bcfea456679d0e7255224e2105311f | 589,289 |
def get_account_id(role_arn):
"""
Returns the account ID for a given role ARN.
"""
# The format of an IAM role ARN is
#
# arn:partition:service:region:account:resource
#
# Where:
#
# - 'arn' is a literal string
# - 'service' is always 'iam' for IAM resources
# - 'region' is always blank for IAM resources
# - 'account' is the AWS account ID with no hyphens
#
# See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns
try:
arn, _, service, region, account, _ = role_arn.split(":")
except ValueError:
raise ValueError(f"Is this a valid AWS ARN? {role_arn}")
if arn != "arn":
raise ValueError(f"Is this a valid AWS ARN? {role_arn}")
if service != "iam" or region != "" or not account.isnumeric():
raise ValueError(f"Is this an IAM role ARN? {role_arn}")
return account | 623eb66eefd59b9416deb478c527062ae4454df7 | 704,778 |
import random
import string
def generate_random_filename_safe_text(length: int = 4) -> str:
"""
Generates a (not cryptographically safe) random filename-safe string.
Args:
length:
Length of the random text.
Returns:
Generated text.
"""
return "".join([
random.choice(string.ascii_letters + string.digits)
for _ in range(length)
]) | f333fb715e0e2834c263271c1b7f25e1ddcb80dd | 606,155 |
import shutil
def concatenate_files(*paths: str, out_path: str):
"""Concatenates an arbitrary number of files into one file.
Args:
*paths: An arbitrary number of paths to files
out_path: Path to place concatenated file
Returns:
Path to concatenated file
"""
with open(out_path, 'wb') as out:
for path in paths:
with open(path, 'rb') as f:
shutil.copyfileobj(f, out)
return out_path | 976eb2820706ea3977b4bd4f0dcbef8065bae308 | 187,585 |
def soma_radius(morph):
"""Get the radius of a morphology's soma."""
return morph.soma.radius | 2f9991a2f9240965bdb69a1a14814ed99bf60f86 | 709,471 |
def silent(obj):
""" Mark view method or class as "silent" so events won't be fired.
Should be used as decorator on view classes or methods.
:param obj: Any object that allows attributes assignment. Should be
either view method or view class.
"""
obj._silent = True
return obj | 916a66634eeaa78f3223590cbb2ec2c88748ad9b | 605,592 |
def generate_diff(old_list, new_list):
"""Returns 2 lists of added, deleted and unchanged elements.
Added elements are elements presents in new_list but not in old_list.
Deleted elements are elements presents in old_list but not in new_list.
Args:
old_list (list): Old list.
new_list (list): New list.
Returns:
tuple: Contains 2 elements:
- A list of added elements;
- A list of deleted elements.
- A list of unchanged elements.
"""
old_set = set(old_list)
new_set = set(new_list)
added = new_set - old_set
deleted = old_set - new_set
unchanged = old_set.intersection(new_set)
return list(added), list(deleted), list(unchanged) | f915c0ca33b6f9fa53450dc3d40b042271ca3fc2 | 23,551 |
def get_cycle_start(year):
"""Round year down to the first year of the two-year election cycle. Used
when filtering original data for election cycle.
"""
return year if year % 2 == 1 else year - 1 | df6d7c809ee332b74f03f17b023f4159206fccda | 182,295 |
def replace_ssh_keys(input_dict, original_string, secret_name):
"""
Replace each SSH key path entry (original_string) in the top level of
a dict with the secret.
"""
changed = False
for k in input_dict:
if input_dict[k] == original_string:
input_dict[k] = {'get_secret': secret_name}
changed = True
return changed | 7fef8720452c93c1fe3be5e4d89c322271b39018 | 167,530 |
from datetime import datetime
def time_delta(t1: str, t2: str, fmt='%a %d %b %Y %X %z') -> int:
"""
>>> time_delta('Sun 10 May 2015 13:54:36 -0700',
... 'Sun 10 May 2015 13:54:36 -0000')
25200
>>> time_delta('Sat 02 May 2015 19:54:36 +0530',
... 'Fri 01 May 2015 13:54:36 -0000')
88200
>>> time_delta('Wed 12 May 2269 23:22:15 -0500',
... 'Tue 05 Oct 2269 02:12:07 -0200')
12527392
"""
dt1, dt2 = datetime.strptime(t1, fmt), datetime.strptime(t2, fmt)
delta = dt1 - dt2
return abs(int(delta.total_seconds())) | 0583e955b8b84cfbc27da57259753fb0ba3b102a | 221,741 |
def wilight_to_hass_hue(value):
"""Convert wilight hue 1..255 to hass 0..360 scale."""
return min(360, round((value * 360) / 255, 3)) | 5a9021185f7bbb9bf1351b2df55207063ee49f9a | 20,407 |
def calc_oil_drainage_area(oil_produced=2.5, res_height=20, porosity=0.25, avg_water_saturation=0.25, oil_vol_factor=1.2, recoveryfactor=0.10):
"""Returns the estimate for drainage area (Acres) given the oil produced (MBO), reservoir height (ft),
porosity (fraction), average water saturation (fraction), the oil formation volume factor (RBBL/STB), and the recovery factor."""
return ((oil_produced*10**(3))*oil_vol_factor)/(7758*res_height*porosity*(1-avg_water_saturation)*recoveryfactor) | 83c11a407489a20a88bfc4bbdd6478305efa3103 | 287,229 |
import math
def z_axis_angle(x1: float, y1: float, x2: float, y2: float) -> float:
"""Calulates that angle in degrees [0, 360] that the slope of the provided points
form with rotation about the z axis."""
angle = math.degrees(math.atan2(y2 - y1, x2 - x1))
# Wrap angle to bound it between 0 and 360.
if angle < 0:
angle += 360
if angle > 360:
angle -= 360
return angle | cb376ebd57af93968084abfddedcaf53bb277821 | 325,415 |
def plot_continuation_points(ax, lmbda_saddle, u_saddle, lmbda_hopf, u_hopf):
"""
Plot the saddle and Hopf bifurcation points from the continuation results
INPUT
ax: matplotlib axes
lmbda_saddle: array of the bifurcation parameter values corresponding to saddle points
u_saddle: array of the infinite norms of the variable corresponding to saddle points
lmbda_hopf: array of the bifurcation parameter values corresponding to Hopf points
u_hopf: array of the infinite norms of the variable corresponding to Hopf points"""
ax.plot(lmbda_saddle, u_saddle, "o", color="blue")
ax.plot(lmbda_hopf, u_hopf, "o", color="red")
return None | c61cb0a6ab32ec2dac7b08fdf94045cc53468ed5 | 342,564 |
from functools import reduce
def get_eqns_unk (eqns, knowns):
"""Returns the list of unknowns, given the knowns, from a list of equations"""
if not isinstance(eqns, (list, tuple)):
eqns = [eqns]
atoms = reduce(lambda acc, eqn: acc.union(eqn.atoms()), eqns, set())
return filter(lambda x: not x.is_Number, atoms - set(knowns)) | 565a5a2bedd1453aa7f15f651fbecb7dc21b1237 | 585,859 |
def obter_pos_l(pos): # posicao -> str
"""
Permite obter a linha de uma posicao.
:param pos: posicao
:return: linha da posicao
"""
if pos[1] == 0:
return '1'
elif pos[1] == 1:
return '2'
elif pos[1] == 2:
return '3'
else:
raise ValueError('obter_pos_c: argumento invalido') | e03da8e5ed6dd839994f3194ad123a9b0de2a7d2 | 332,827 |
import pathlib
from typing import Optional
def get_file_contents(path: pathlib.Path) -> Optional[str]:
"""
Helper function to read the contents of a file.
"""
contents = None
with path.open() as f:
contents = f.read()
return contents | c797bb19e00e65e8d534740863310753ad52f4e6 | 258,113 |
def microlisp_decode_atom(s):
""" Convert string atom to corresponding type
Support: boolean `true' `false', integer, float
"""
if s == 'true':
return True
if s == 'false':
return False
try:
return int(s)
except: pass
try:
return float(s)
except: pass
return s | 27f02ad2021c5c0bee8bb7756f10f22c9f4cdf86 | 335,974 |
def _remove_empty_entries(entries):
"""Remove empty entries in a list"""
valid_entries = []
for entry in set(entries):
if entry:
valid_entries.append(entry)
return sorted(valid_entries) | fd931ca05ae777ede70e729993b0d2a60eb06b87 | 408,455 |
def _get_uid_to_db_object_map(model_class, unique_uid_set,
query_kwargs={}):
"""Queries the database for the given list of uids, returning a map from
uid to Python object.
"""
uid_to_obj_map = {}
query_set = model_class.objects.filter(**query_kwargs).filter(
uid__in=unique_uid_set)
for obj in query_set:
uid_to_obj_map[obj.uid] = obj
return uid_to_obj_map | 6de0bcec1aaacda7ba33b7be1dedb1bce55c5fb4 | 408,355 |
def is_iterable(maybe_iter, unless=(dict)):
""" Return whether ``maybe_iter`` is an iterable, unless it's an instance of one
of the base class, or tuple of base classes, given in ``unless``.
Example::
>>> is_iterable('foo')
False
>>> is_iterable(['foo'])
True
>>> is_iterable(['foo'], unless=list)
False
>>> is_iterable(xrange(5))
True
"""
if isinstance(maybe_iter, str):
return False
try: # pragma: no cover
iter(maybe_iter)
except TypeError: # pragma: no cover
return False
return not isinstance(maybe_iter, unless) | 5b547e51f6c109673e9a4a4cc236a48d6ae5f198 | 351,785 |
from typing import Tuple
def complete_index(index: Tuple, n_dims: int) -> Tuple:
""" Complete the index with empty slices.
This will expand ellipses if present, or pad at the end if not.
Args:
index:
The index in which to replace ellipses
n_dims:
The number of dimensions of the sliced object
This fills in missing dimensions shown with ellipsis with full slices.
Examples:
>>> complete_index((5, 3), 4)
(5, 3, slice(None, None, None), slice(None, None, None))
>>> complete_index((5, Ellipsis, 3), 4)
(5, slice(None, None, None), slice(None, None, None), 3)
>>> complete_index((5, 3), 2)
(5, 3)
>>> complete_index((Ellipsis,), 2)
(slice(None, None, None), slice(None, None, None))
>>> complete_index((1, None, 2), 2)
(1, None, 2)
>>> complete_index((Ellipsis, None), 2)
(slice(None, None, None), slice(None, None, None), None)
"""
n_ellipses = sum(ix is Ellipsis for ix in index)
if n_ellipses > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
if n_ellipses == 0:
if n_dims == len(index):
return index
else:
index = (*index, Ellipsis)
ixix = index.index(Ellipsis)
extra_dims = 1 + n_dims + sum(ix is None for ix in index) - len(index)
fill = (slice(None, None, None),) * extra_dims
return (*index[:ixix], *fill, *index[1 + ixix :]) | d268c7abf9680842dab38f407fed0498c6164b18 | 629,295 |
def add_start_end(tokens, start_word="<START>", end_word="<END>"):
""" Add start and end words for a caption string
Args:
tokens: original tokenized caption
start_word: word to indicate start of a caption sentence
end_word: word to indicate end of a caption sentence
Returns:
token_caption: tokenized caption
"""
token_caption = [start_word]
token_caption.extend(tokens)
token_caption.append(end_word)
return token_caption | 565beac0d1439a3079f2f6202e7b25e34c219013 | 470,338 |
def _escape_filename(filename):
"""Escapes spaces in the given filename, Unix-style."""
return filename.replace(" ", "\\ ") | 9bb22a7ab928cbe88bfa3c9b48b2b33faa657fc9 | 662,933 |
def rubygems_api_url(name, version=None, repo='https://rubygems.org/api'):
"""
Return a package API data URL given a name, an optional version and a base
repo API URL.
For instance:
https://rubygems.org/api/v2/rubygems/action_tracker/versions/1.0.2.json
If no version, we return:
https://rubygems.org/api/v1/versions/turbolinks.json
Unused:
https://rubygems.org/api/v1/gems/mqlight.json
"""
repo = repo.rstrip('/')
if version:
api_url = '{repo}/v2/rubygems/{name}/versions/{version}.json'
else:
api_url = '{repo}/v1/versions/{name}.json'
return api_url.format(**locals()) | 578cc8cc346ffe8868bb8700904cab6e437d4a23 | 134,820 |
import re
def untorontoify(term):
"""Return a version of the term with "Toronto" removed.
Useful if the name in OSM is something like "Union Square (Toronto)"
or "Toronto City Hall".
"""
return re.sub(r'^Toronto ', '', re.sub(r' \(Toronto\)', '', term)) | 6e33506137a411c8e7e35e46a5121c388b2fed33 | 349,240 |
def get_bd(r, a):
"""
Converts turnover and relative extinction to birth and death rates.
Args:
r (float): turnover or net diversification (birth - death)
a (float): relative extinction (death / birth)
Returns:
(float, float): birth, death
"""
return -r / (a - 1), -a * r / (a - 1) | 473b92c31724978fcdb435c697a7e3b2bf8ebf28 | 250,124 |
import torch
def flatten_padded_batch(batch, nwords):
"""
Inverse of pad_flat_batch
Parameters
===========
batch : tensor(seq_len, batch, encoding_size), output of the encoder
nwords : tensor(batch), lengths of the sequence (without padding)
Returns
========
tensor(nwords, encoding_size)
>>> batch = [[[0], [3], [4]], [[1], [0], [5]], [[2], [0], [0]]]
>>> nwords = [3, 1, 2]
>>> flatten_padded_batch(torch.tensor(batch), torch.tensor(nwords)).tolist()
[[0], [1], [2], [3], [4], [5]]
"""
with torch.no_grad():
output = []
for sent, sentlen in zip(batch.transpose(0, 1), nwords):
output.extend(list(sent[:sentlen].chunk(sentlen))) # remove <eos>
return torch.cat(output, dim=0) | 08e02a789ccb6422f9e905b36d61a886ee0ea310 | 438,666 |
def groupByThree(lst):
"""
Given input [1,2,3,4,5,6,7,8,9]
outputs: [(1,2,3),(4,5,6),(7,8,9)]
"""
return zip(*[lst[x::3] for x in (0, 1, 2)]) | 7e2f80d3e27b0a75bcfd3507c4f9580b4c2cf1be | 101,824 |
def union(x, y=None):
"""
Return the union of x and y, as a list. The resulting list need not
be sorted and can change from call to call.
INPUT:
- ``x`` - iterable
- ``y`` - iterable (may optionally omitted)
OUTPUT: list
EXAMPLES::
sage: answer = union([1,2,3,4], [5,6]); answer
[1, 2, 3, 4, 5, 6]
sage: union([1,2,3,4,5,6], [5,6]) == answer
True
sage: union((1,2,3,4,5,6), [5,6]) == answer
True
sage: union((1,2,3,4,5,6), set([5,6])) == answer
True
"""
if y is None:
return list(set(x))
return list(set(x).union(y)) | 67b20db26081c05a8c706d5529897bc753726f6a | 30,153 |
from typing import Union
def non_negative(diff: Union[float, int]) -> Union[float, int]:
"""Returns 0 if diff is negative or positive value."""
return 0 if diff < 0 else diff | c0ef7f7661633e48558e6066e0b1cac4c4e89f62 | 80,699 |
import hashlib
def md5_matches(expected_md5: str, filepath: str) -> bool:
"""Checks the MD5 hash for a given filename and compares with the expected value.
Args:
expected_md5: expected MD5 hash.
filepath: file for which MD5 will be computed.
Returns:
True if MD5 matches, False otherwise.
"""
with open(filepath, "rb") as f:
current_md5 = hashlib.md5(f.read()).hexdigest()
return expected_md5 == current_md5 | a28197cb3237b27e70998a05886e738055bd6417 | 144,881 |
def parameter_tuple_parser(parameter_tuple, code_list, relative_base):
"""
Accepts parameter_tuple, code_list, and relative_base. Returns parameter for use in intcode operation.
"""
if parameter_tuple[0] == 0:
return code_list[parameter_tuple[1]]
elif parameter_tuple[0] == 1:
return parameter_tuple[1]
elif parameter_tuple[0] == 2:
return code_list[parameter_tuple[1] + relative_base]
else:
print('And I oop.... parameter_tuple_parser') | f869240666f2adca0551a3620644023b88930a5a | 11,417 |
def dict_with_keys(dictionary, keys):
"""
Returns a new dictionary including only the specified keys
Args:
dictionary(dict): dictionary to filter keys
keys(iterable): iterable of keys to filter to
Returns:
dict: copy of original dictionary inclusive only of specified keys
"""
return {key: dictionary[key] for key in keys} | e93d547b515269c9e2320e41d15ab39ae363c209 | 50,862 |
def y_position(coldstart_status):
"""Returns a hardcoded y-position for text
depending on coldstart status."""
if coldstart_status == 'warm':
return 0.8
elif coldstart_status == 'partial':
return 0.6
elif coldstart_status == 'cold':
return 0.4
else:
return 0.1 | 860ec47eb05b96cd68d2af71bae9692dc1612693 | 574,128 |
def get_color(val, trendType):
"""Determine if evolution is positive or negative.
From trend type - determined in config.toml for each KPIs,
picking right color to return
"""
if(val > 0):
if(trendType == 'normal'):
color = 'red'
else:
color = 'green'
elif(val < 0):
if(trendType == 'normal'):
color = 'green'
else:
color = 'red'
else:
color = 'blue'
return color | edbf842824363f183fe2d612383323e7a62f4ab5 | 353,311 |
def Rder(x, alpha , beta, gamma):
"""Calculates the derivative with respect x1 of the first dimension of the function R()
Parameters
-------
x : array of shape (N,)
first dimension of state at a single timepoint
alpha, beta, gamma : floats
parameter of sigle FutzHugh-Nagumo nodes
Returns
-------
R : array of shape (N,)
d/dx1 ( -alpha* x1**3 + beta *x1**2 - gamma* x1 )
"""
return -3*alpha* x**2 + 2*beta*x - gamma | 2d3c776623a2469d9f42a180ae71e9798dbda579 | 493,289 |
import base64
import six
def serialize(obj):
"""Serialize the given object
:param obj: string representation of the object
:return: encoded object (its type is unicode string)
"""
result = base64.urlsafe_b64encode(obj)
# this workaround is needed because in case of python 3 the
# urlsafe_b64encode method returns string of 'bytes' class.
if six.PY3:
result = result.decode()
return result | c7a7d2362d6c4cc194495af77d5f09991ed4b2ed | 688,604 |
def merge_dicts(*args):
"""Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter
dicts.
Args:
*args (list(dict)): A list of dictionaries to be merged.
Returns:
dict: A merged dictionary.
"""
result = {}
for dictionary in args:
result.update(dictionary)
return result | b570069e82187414559c7a9479402e263366d978 | 216,526 |
def read_file(path_to_file: str):
"""Return the file data from `path_to_file`."""
with open(path_to_file, 'r') as json_file:
return json_file.read() | c7da33c398e7f5ff190c8b5f1ff0d972d01b4d55 | 61,651 |
def max_power_rule(mod, g, tmp):
"""
**Constraint Name**: GenVar_Max_Power_Constraint
**Enforced Over**: GEN_VAR_OPR_TMPS
Power provision plus upward services cannot exceed available power, which
is equal to the available capacity multiplied by the capacity factor.
"""
return (
mod.GenVar_Provide_Power_MW[g, tmp] + mod.GenVar_Upwards_Reserves_MW[g, tmp]
<= mod.Capacity_MW[g, mod.period[tmp]]
* mod.Availability_Derate[g, tmp]
* mod.gen_var_cap_factor[g, tmp]
) | 03cfd061867ce68dcab7068c38eaece69b9906ca | 26,765 |
def find_pow2_size(addr, min_size, next_value):
"""Find smallest power of 2 value greater than min_size by given addr.
For instance, (0x4000_0000, 0x21000) and `next_value` as 0x40080000,
the result will be 0x4_0000
But it should return result not exceeding the base_addr's leading one bit
position. For instance, if the base_addr is 0x4003_0000, the return value
should be less than or equal to 0x1_0000. Cannot be 0x4_0000.
"""
base_addr = int(addr["base_addr"], 0)
diff = next_value - base_addr
# Find the least one bit position.
# If base_addr is 0, then any value can be used
if not base_addr == 0:
leading_one = 1
while True:
if base_addr & leading_one != 0:
break
leading_one = leading_one << 1
if leading_one <= diff:
diff = leading_one
i = 1
while True:
i = i << 1
if i >= min_size:
break
# If found pow2 value is greater tha diff, it cannot be used. Just use
# min_size then the tool will use comparators (>=, <=)
if i > diff:
i = min_size
# Shall be greater than 4kB
assert i >= 0x1000
return i | 4298ab188bfa45ed6ecc996198fe9fe290fa9b03 | 420,633 |
def get_pip_package_key(candidate):
"""Get package key for the provided string.
The string may be a name and version.
"""
return str(candidate).split()[0].split('==')[0] | 323deeee59e5b454e7291c944477471b2ca61aa0 | 79,023 |
def h3_html(text: str) -> str:
"""Embed text in subsubheading tag."""
return "<h3>{}</h3>".format(text) | 7f8631987215674d1bb71b4b60311aad6fbd8449 | 312,846 |
import re
def search_re(out, l):
""" Search the regular expression 'l' in the output 'out'
and return the start index when successful.
"""
m = re.search(l, out)
if m:
return m.start()
return None | c20c5df306c9f0914825fb1e2c10d0ef5590dda6 | 659,770 |
import re
def _gem_name(name: str, version: str) -> str:
"""Determines the gem name for a generated API client.
This effectively converts the API name and version to underscore case,
and matches the logic in the Ruby Apiary generator.
Arguments:
name {str} -- The name of the API being generated
version {str} -- The version of the API being generated
Returns:
str -- Directory name
"""
name = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", name)
name = re.sub(r"([a-z])([A-Z])", r"\1_\2", name)
name = name.lower()
version = version.replace(".", "_").lower()
return f"google-apis-{name}_{version}" | 6351d14ce9232c3e5573164914c16c39e0b1d2a8 | 449,040 |
import math
def Log_Nash_Sutcliffe(SimulatedStreamFlow, ObservedStreamFlow):
"""
(SimulatedStreamFlow, ObservedStreamFlow)
Logarithmic Nash-Sutcliffe model efficiency coefficient
"""
x = SimulatedStreamFlow
y = ObservedStreamFlow
A = 0.0 # dominator
B = 0.0 # deminator
tot = 0.0
for i in range(0, len(y)):
tot = tot + y[i]
average = tot / len(y)
for i in range(0, len(y)):
X = x[i]
Y = y[i]
if X == 0:
X = 1e-3
if Y == 0:
Y = 1e-3
A = A + math.pow((math.log(Y) - math.log(X)), 2) # log = ln or log_e
B = B + math.pow((math.log(Y) - math.log(average)), 2)
E = 1 - (A / B) # Nash-Sutcliffe model eficiency coefficient
return E | b4220b4849f53027ee63c046c08db069e8d3c563 | 622,563 |
def dict2class(dct, name='Dummy'):
"""
Convert a dict to a class.
Examples
--------
>>> dct={'a':1, 'b':2}
>>> dct2class(dct, 'Foo')
<Foo instance at 0x3615ab8>
>>> dct2class(dct, 'Bar')
<Bar instance at 0x3615b48>
>>> dct2class(dct, 'Bar').__dict__
{'a':1, 'b':2}
"""
class Dummy:
pass
cl = Dummy()
cl.__dict__.update(dct)
cl.__class__.__name__ = name
return cl | a28d84fa78065ee0759aee0b448bfd2206696355 | 374,971 |
def is_modified(path, commit):
"""
Test whether a given file was present and modified in a specific commit.
Parameters
----------
path : str
The path of a file to be checked.
commit : git.Commit
A git commit object.
Returns
-------
bool
Whether or not the given path is among the modified files
and was not deleted in this commit.
"""
try:
d = commit.stats.files[path]
if (d["insertions"] == 0) and (d["deletions"] == d["lines"]):
# File was deleted in commit, so cannot be tested
return False
else:
return True
except KeyError:
return False | 4f12b314a6525d7c7832fc4ef37f9cf4d39ee555 | 78,029 |
def median(lst):
"""
Calculates list mediana
"""
n = len(lst)
if n < 1:
return None
if n % 2 == 1:
return sorted(lst)[n//2]
else:
return sum(sorted(lst)[n//2-1:n//2+1])/2.0 | 5da9c14d7fe94dc9f2935c96b88b265583122127 | 585,780 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.