content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def convert_single_linear_to_srgb(color_value):
"""
Changes as single RGB color in linear to SRGB color space
:param color_value:float, single color value in 0-1 range (for example red channel)
:return:float, new color converted to SRGB
"""
a = 0.055
if color_value <= 0.0031308:
return color_value * 12.92
return (1 + a) * pow(color_value, 1 / 2.4) - a
|
80c38f241a6f1bd6e89eecc07140ecb14d1e3b78
| 127,403 |
import six
def to_bed12(f, db, child_type='exon', name_field='ID'):
"""
Given a top-level feature (e.g., transcript), construct a BED12 entry
Parameters
----------
f : Feature object or string
This is the top-level feature represented by one BED12 line. For
a canonical GFF or GTF, this will generally be a transcript.
db : a FeatureDB object
This is need to get the children for the feature
child_type : str
Featuretypes that will be represented by the BED12 "blocks". Typically
"exon".
name_field : str
Attribute to be used in the "name" field of the BED12 entry. Usually
"ID" for GFF; "transcript_id" for GTF.
"""
if isinstance(f, six.string_types):
f = db[f]
children = list(db.children(f, featuretype=child_type, order_by='start'))
sizes = [len(i) for i in children]
starts = [i.start - f.start for i in children]
fields = [
f.chrom,
f.start - 1, # GTF -> BED coord system
f.stop,
f.attributes.get(name_field, ['.'])[0],
f.score,
f.strand,
f.start,
f.stop,
'0,0,0',
len(children),
','.join(map(str, sizes)),
','.join(map(str, starts))
]
return '\t'.join(map(str, fields)) + '\n'
|
d4042b979840b0eb41d23690182a030278473b1b
| 615,371 |
def gap(baseline, heuristic):
"""Computes the heuristic gap"""
return 100 * (heuristic - baseline) / baseline
|
fca62ef1146c8ba87c8a9eaf53dcd5d017998f2c
| 488,570 |
import requests
import time
def _fetch_page(token: dict,
endpoint: str,
sleep: int,
max_repeats: int) -> dict:
"""
Fetch a single page from the securefileshare site.
Parameters
----------
token: dict
Dictionary containing 'access_token'
endpoint: str
API endpoint
sleep: int
Delay in seconds between requests
max_repeats: int
Maximum attempts
Returns
-------
dict
Dictionary of page content. If max_attempts exceeded, TimeoutError raised.
"""
header = {"Authorization": f"Bearer {token}"}
for i in range(max_repeats):
json = requests.get(endpoint,
headers=header).json()
if json.get("message") != "Authorization has been denied for this request.":
if json.get("message") != "Internal Server Error":
if json.get("items") is not None:
return json
time.sleep(sleep)
raise TimeoutError("Failed to fetch page...timed out")
|
489172968a0290b39d080965d9120a341ea62f06
| 252,767 |
import importlib
def import_thing(import_string: str):
"""Obtain an object from a valid qualname (or fully qualified name)
Parameters
----------
import_string : str
the qualname
Returns
-------
Any :
the object from that namespace
"""
splitted = import_string.split('.')
if len(splitted) > 1:
# if the string has a dot, import the module and getattr the object
obj = splitted[-1]
mod = ".".join(splitted[:-1])
module = importlib.import_module(mod)
result = getattr(module, obj)
else:
# if there is no dot, import and return the module
mod = splitted[0]
result = importlib.import_module(mod)
return result
|
ea74544c1e1498ad01f2ff4e691c090911075aeb
| 398,965 |
def sort_based_values(sims_dict):
"""
:param sims_dict: dict
{
'1': 0.2,
"2": 0.4,
"3": 0.3,
"4": 0.23
}
:return: key 降序 ['2', '3', '4', '1']
"""
sims_dict = sorted(sims_dict.items(), key=lambda item: item[1])[::-1]
return [i[0] for i in sims_dict]
|
3118e5ea017527413b24a8085361e47c34b423ce
| 361,678 |
from typing import Dict
from typing import Any
from typing import Iterable
def check_same_and_different(a: Dict[str, Any], b: Dict[str, Any],
same: Iterable[str], different: Iterable[str]) -> bool:
"""Return true iff for all properties in same that are in a, they
exist in b and have the same value, and for all properties in
different that are in a, they exist in b and are different.
"""
same_ok = True
for prop in same:
if prop in a:
if prop not in b or a[prop] != b[prop]:
same_ok = False
break
if not same_ok:
return False
diff_ok = True
for prop in different:
if prop in a:
if prop not in b or a[prop] == b[prop]:
diff_ok = False
break
return diff_ok
|
0b4610784950ec1d75f0181e4d23e07adf6bc954
| 127,680 |
def raw_to_regular(exitcode):
"""
This function decodes the raw exitcode into a plain format:
For a regular exitcode, it returns a value between 0 and 127;
For signals, it returns the negative signal number (-1 through -127)
For failures (when exitcode < 0), it returns the special value -128
"""
if not isinstance(exitcode, int):
return exitcode
if exitcode < 0:
return -128
if (exitcode & 127) > 0:
# Signal
return -(exitcode & 127)
return exitcode >> 8
|
1146bc0303886489f10864c9511280dff8047710
| 36,727 |
def is_in(x, l):
"""Transforms into set and checks existence
Given an element `x` and an array-like `l`, this function turns `l` into a
set and checks the existence of `x` in `l`.
Parameters
--------
x : any
l : array-like
Returns
--------
bool
"""
return x in set(l)
|
4e99e946177f85493f6404f170fadca4b5c1e8f2
| 630,354 |
def scale(kernel):
""" Scales a 2D array to [0, 255] """
minimum = min(min(k) for k in kernel)
maximum = max(max(k) for k in kernel)
return [[int(255 * (k - minimum) / (maximum - minimum)) for k in row]
for row in kernel]
|
bb9f6aa1603547fef1629724d3366c5b05487edf
| 595,826 |
def split_reaches(l, new_reach_pts):
"""splits l into sections where new_reach_pts contains the starting indices for each slice"""
new_reach_pts = sorted(new_reach_pts)
sl = [l[i1:i2] for i1, i2 in zip(new_reach_pts, new_reach_pts[1:])]
last_index = new_reach_pts[-1]
sl.append(l[last_index:])
return sl
|
54186c652d9131d119242a5a7cb3d37b9fe15e62
| 427,377 |
def label_mapper(raw_labels, new_labels):
"""Map some raw labels into new labels.
When dealing with GEO DataSets it is very common that each GSM sample has
a different phenotye (e.g. 'Brain - 001', 'Brain - 002', ...). This
function maps these raw labels into new homogeneous labels.
Parameters
-----------
raw_labels : list of strings
list of unpreprocessed labels
new_labels : list of strings
list of labels to map
Returns
-----------
y : array of float, shape : n_samples
the modified label vector
Examples
-----------
>>> raw_labels = ['Brain - 001', 'Brain - 002', 'Muscle - 001', 'Muscle - 002']
>>> label_mapper(raw_labels, ['Brain', 'Muscle'])
['Brain', 'Brain', 'Muscle', 'Muscle']
"""
y = []
for rl in raw_labels:
for nl in new_labels:
if nl in rl:
y.append(nl)
break
else:
y.append(rl)
# print('No mapping rule for %s', rl)
return y
|
67aaa329374169f61414032b0949344437d16022
| 11,663 |
def find_nearest_index(vertices, x_from, y_from, vertice_count):
"""
Find the index of the vertex that is the closest to specified
coordinate (x_from, y_from).
"""
min_distance_squared = 1000000
nearest_index = 0
x_from *= 1000
y_from *= 1000
for index, vertice in enumerate(vertices):
dx = (vertice[0] * 1000) - x_from
dy = (vertice[1] * 1000) - y_from
square_dist = dx*dx + dy*dy
if square_dist < min_distance_squared and index < vertice_count:
nearest_index = index
min_distance_squared = square_dist
return nearest_index
|
65867ab47c9632dae4b90c7402799846faf2937f
| 600,584 |
def get_images_to_prelabel_query(
label: str,
version: int,
predict_annotated: bool = False,
limit: int = 100000,
exclude_origins = ["archive_stuttgart, archive_würzburg", "gi_genius_retrospective", "2nd_round", "3rd_round", "4th_round", "5th_round", "6th_round"]
):
"""Function expects a label (str) and returns a query to find images which
have no prediction or an outdated prediction for this label.
Args:
label (str): label to query for
version (float): float value representing the current prelabel AI version
limit (int, optional): Maximum value of images to return. Defaults to 100000.
Returns:
List: List of dictionaries containing the elements for a pymongo query aggregation
"""
return [
{
"$match": {
"$and": [
{
"$or": [
{f"predictions.{label}": {"$exists": False}},
{f"predictions.{label}.version": {"$lt": version}},
]
},
{f"labels.annotation.{label}": {"$exists": predict_annotated}},
{"origin": {"$nin": exclude_origins}}
]
}
},
{"$sample": {"size": limit}},
]
|
81ec66483a045acd6752e3d4c1c03f39843ca825
| 463,748 |
from typing import List
def parse_board(data: str) -> List[List[bool]]:
"""Parse the board from data."""
def on_or_off(token: str) -> bool:
"""Parse a token from data as a light turned on/off."""
if token == '#':
return True
if token == '.':
return False
raise ValueError('Invalid token: {}'.format(token))
def parse_line(line: str) -> List[bool]:
"""Parse a line from data as a row of the board."""
return list(map(on_or_off, line))
return list(map(parse_line, data.splitlines()))
|
1f9c6144852d53e04b3b1e01cfd08069adafec70
| 481,637 |
def solution(n: int = 1000) -> int:
"""
returns number of fractions containing a numerator with more digits than
the denominator in the first n expansions.
>>> solution(14)
2
>>> solution(100)
15
>>> solution(10000)
1508
"""
prev_numerator, prev_denominator = 1, 1
result = []
for i in range(1, n + 1):
numerator = prev_numerator + 2 * prev_denominator
denominator = prev_numerator + prev_denominator
if len(str(numerator)) > len(str(denominator)):
result.append(i)
prev_numerator = numerator
prev_denominator = denominator
return len(result)
|
07f07e82642e42232fd8c02b1d607eeaa589a11a
| 669,217 |
import itertools
def sum_matching(puzzle_input, step=1):
"""For a string of numbers, sum digits if they match the digit `step` away.
NOTE: The calculation is circular, so the digit after the last digit is
the first digit in the list.
Usage:
>>>captcha("1122")
3
>>>captcha("1212", step=2)
6
"""
#Make the string circular
puzzle_input += puzzle_input[0:step]
#Create two iterables from the input
first, second = itertools.tee(puzzle_input)
#Advance one iterable forward the number of steps
for _ in range(step):
next(second)
#Sum numbers if the number matches
return sum(int(x) for x, y in zip(first, second) if x == y)
|
c355025b9894829574af68079c39cd39a4c2cecc
| 277,165 |
def _ptid_to_rid(ptid, roster, ptid_label='PTID'):
"""Returns roster id for a given patient id
ptid_label values : 'PTID', 'SCRNO'
"""
rid = roster[roster[ptid_label] == ptid]['RID'].values
if len(rid) > 0:
return rid[0]
else:
return ''
|
48fbc63b2719b5bbd3ec56d23e804f6a863b82be
| 555,153 |
def urn_exists_in_rapidpro(client, uuid, urn):
"""
Return True if a remote contact with this urn
exists in RapidPro and has a different uuid than uuid.
"""
contacts = client.get_contacts(urn=urn)
return contacts and contacts[0].uuid != uuid
|
5eb5e426ac549074836596c8b74970902cd19dc8
| 155,859 |
def to_upper_underscore(name: str) -> str:
"""Transform package name into uppercase with underscores.
Example:
>>> pkg_2_uu('a-pkg')
'A_PKG'
"""
return name.replace("-", "_").upper() + "_"
|
9d703b535c3724b0ae68913ed6d0c7b46b6c5b95
| 123,227 |
from io import StringIO
import csv
def encode_csv(dataset):
"""Takes a list of rows and returns it as a CSV string.
Parameters:
dataset (list[row]): A list of rows. Each row is a list of primitive
values.
Returns:
str: A CSV representation of the dataset.
>>> encode_csv([[1, 2, 3], [4, 5, 6]])
'1,2,3\\r\\n4,5,6\\r\\n'
>>> encode_csv([['a, b, c', 'xyz', None, 1.0]])
'"a, b, c",xyz,,1.0\\r\\n'
>>> encode_csv([['he said, "well, ok" and then left', 'foo', 'bar']])
'"he said, ""well, ok"" and then left",foo,bar\\r\\n'
"""
result = StringIO()
writer = csv.writer(result, quoting=csv.QUOTE_MINIMAL)
for row in dataset:
writer.writerow(row)
return result.getvalue()
|
284c3316c6987f5cc315e3b5b4806d5f274898ba
| 591,900 |
import random
def uniform_positive_integers_with_sum(count, sum_):
"""Returns list of size `count` of integers >= 1, summing to `sum_`."""
assert sum_ >= 0
if count > sum_:
raise ValueError('Cannot find {} numbers >= 1 with sum {}'
.format(count, sum_))
if count == 0:
return []
# Select `count - 1` numbers from {1, ..., sum_ - 1}
separators = random.sample(list(range(1, sum_)), count - 1)
separators = sorted(separators)
return [right - left
for left, right in zip([0] + separators, separators + [sum_])]
|
27f61638e87b8c2a4025f4d43c993a6320615f9d
| 202,765 |
def afl(x):
"""
If no 'l' key is included, add a list of None's the same length as key 'a'.
"""
if 'l' in x:
return x
else:
x.update({'l': ['']*len(x['a'])})
return x
|
bc36298773a1d49b43d2996d3fe6097190442488
| 236,338 |
def _derivate_diff_eq(listofpoly):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, b2... such that the differential equation
b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
former equation.
"""
sol = []
a = len(listofpoly) - 1
sol.append(listofpoly[0].diff())
for i, j in enumerate(listofpoly[1:]):
sol.append(j.diff() + listofpoly[i])
sol.append(listofpoly[a])
return sol
|
a30026fdc25438f5f94bcbd61902af919341d92b
| 522,063 |
def personal_best (scores):
"""
:param scores - list of high scores
:return: int - the highest score from the list
"""
return max (scores)
|
1abdc775ad8cb152e5affa48a0ee51c51099776f
| 467,730 |
def get_node(element):
"""
Convert an OSM node element into the format for a networkx node.
Parameters
----------
element : dict
an OSM node element
Returns
-------
dict
"""
useful_tags_node = ['ref', 'highway', 'route_ref']
node = {}
node['y'] = element['lat']
node['x'] = element['lon']
node['osmid'] = element['id']
if 'tags' in element:
for useful_tag in useful_tags_node:
if useful_tag in element['tags']:
node[useful_tag] = element['tags'][useful_tag]
return node
|
97164f0330b1d54cddeab65184e18b5a4c3e5a92
| 155,528 |
import requests
def http_get(url):
"""
Sends and caches an HTTP GET request.
:param str url: the URL to request
"""
response = requests.get(url)
response.raise_for_status()
return response
|
581fae19107343d24c202fa23d0f82e4230c4d12
| 578,946 |
def create_zero_matrix(rows: int, columns: int) -> list:
"""
Creates a matrix rows * columns where each element is zero
:param rows: a number of rows
:param columns: a number of columns
:return: a matrix with 0s
e.g. rows = 2, columns = 2
--> [[0, 0], [0, 0]]
"""
if not isinstance(rows, int) or not isinstance(columns, int) or \
isinstance(rows, bool) or isinstance(columns, bool):
return []
zero_matrix = []
n_columns = [0] * columns
if n_columns:
zero_matrix = [[0] * columns for _ in range(rows)]
return zero_matrix
|
4e4a759d318f33f962a1d924c4893d5af7043b94
| 682,281 |
def _check_params(params, field_list):
"""
Helper to validate params.
Use this in function definitions if they require specific fields
to be present.
:param params: structure that contains the fields
:type params: ``dict``
:param field_list: list of dict representing the fields
[{'name': str, 'required': True/False', 'type': cls}]
:type field_list: ``list`` of ``dict``
:return True, exits otherwise
:rtype: ``bool``
"""
for d in field_list:
if not d['name'] in params:
if d['required'] is True:
return (False, "%s is required and must be of type: %s" %
(d['name'], str(d['type'])))
else:
if not isinstance(params[d['name']], d['type']):
return (False,
"%s must be of type: %s" % (d['name'], str(d['type'])))
return (True, '')
|
cafbea41b4b2ec4c627968f9111cbea4046d1706
| 355,409 |
import math
def utilityY(c1,c2,rho):
"""Compute the utility of the young agents
Args:
c1 (float): consumption of the young generation
c2 (float): consumption of the old generation
rho (float): discount parameter
Returns:
(float): utility of the old agents
"""
if rho<-1:
return "rho is too low"
else:
return math.log(c1)+ (1/(1+rho))*math.log(c2)
|
6efd01d4a8f85a61f05eb12efca31b63f9f8aeaf
| 639,790 |
def format_txt(signature):
"""
Remove excess spaces and newline characters.
"""
return ' '.join(' '.join(signature.split('\n')).split())
|
c0a8251219f4c9454666e159f695ffe92ff9e476
| 105,990 |
def build_command(cmd, arg_list = []):
"""
Builds the command string to send to the Mecademic Robot
from the function name and arguments the command needs
:param cmd: command name to send to the Mecademic Robot
:param arg_list: list of arguments the command requires
:return command: final command for the Mecademic Robot
"""
command = cmd
if(len(arg_list)!=0):
command = command + '('
for index in range(0, (len(arg_list)-1)):
command = command+str(arg_list[index])+','
command = command+str(arg_list[-1])+')'
return command
|
2d6f2b6328a44606be20d3b62d151f134e7234b8
| 304,148 |
def built_known_values(missing_values,simplices):
"""
The functions return the not missing simplices and cochains in each dimension
Parameters
----------
missing_values: list of dictionaries
List of dictionaries, one per dimension d. The dictionary's keys are the missing d-simplices.
The dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
simplices: list of dictionaries
List of dictionaries, one per dimension d. The size of the dictionary
is the number of d-simplices. The dictionary's keys are sets (of size d
+ 1) of the 0-simplices that constitute the d-simplices. The
dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
Returns
----------
known_values: list of dictionaries
List of dictionaries, one per dimension d. The dictionary's keys are not missing d-simplices.
The dictionary's values are their cochains.
"""
max_dim=len(simplices)
known_values = [dict() for _ in range(max_dim+1)]
for i in range(max_dim):
real_simp=list(set(simplices[i].keys())-set(missing_values[i].keys()))
for index,simplex in enumerate(real_simp):
dim=len(simplex)
known_values[i][simplex]=simplices[dim-1][simplex]
return(known_values)
|
dbe7ab99917b82e072fdeb326b2ab42424458479
| 215,487 |
def append_write(filename="", text=""):
"""
Function that appends a string at the end of a text file (UTF8) and returns
the number of characters added.
Args:
filename (str): The file name
text (str): String to write
Returns:
The number of characters written
"""
with open(filename, 'a', encoding='utf8') as f:
return f.write(text)
|
bf5f02b6dff6e2afa989276d0dfd5a59b553cf43
| 250,354 |
def is_mention_subset(small_mention_text, large_mention_text):
"""
Check if the smaller mention is a "subset" of the larger mention.
We define "subset" in a very specific way:
1. Subsequence:
Example: Barack is a subset of Barack Obama,
John Kelly is a subset of John Kelly Smith,
Kelly Smith is a subset of John Kelly Smith, etc.
And, Barack is a subset of Barack.
2. The smaller string is equal to the larger string minus the words in the
middle.
Example: John Smith is a subset of John Jackson Smith.
"""
small_mention_tokens = small_mention_text.split()
large_mention_tokens = large_mention_text.split()
if small_mention_text in large_mention_text:
return True
elif len(large_mention_tokens) > 2:
if small_mention_tokens == \
[large_mention_tokens[0], large_mention_tokens[-1]]:
return True
return False
|
553e55308523e1634da940c9c2504d82282caefc
| 562,189 |
import re
def split_kwargs(kwargs_dict, prefix='shadow_'):
"""
Splits dictionary into two new dicts by checking the keys for a given prefix. Those key-value pairs
with the prefix will be added to a new dictionary with prefix removed from the keys.
:param kwargs_dict: original dict to be checked and split. As dict
:param prefix: prefix to search keys for. As string
:return: copy of original dict with preficed keys removed, new dict based on prefixed keys
"""
input_dict = kwargs_dict.copy()
output_dict = {}
shadow_dict = {}
for k, v in input_dict.items():
if bool(re.match(prefix + '.*', k)): # find keys with prefix
shadow_k = re.sub(prefix, '', k) # remove prefix
shadow_dict[shadow_k] = v
else:
output_dict[k] = v
return output_dict, shadow_dict
|
e5f07d03e5ffa44e69ed05e11e97bb58549d3246
| 107,809 |
def _filter(objects, **kwargs):
"""
Filters a list of DICOM classes by DICOM tags and values.
Example
-------
instances = _filter(instances, PatientName="Harry")
"""
filtered = []
for obj in objects:
select = True
for tag, value in kwargs.items():
if getattr(obj, tag) != value:
select = False
break
if select:
filtered.append(obj)
return filtered
|
eeee8c8844dafa46c64b830aac75f85ab9075b6c
| 253,063 |
def convert(angle):
"""
Convert a `skyfield` Angle to an EXIF-appropriate
representation (rationals)
e.g. 98° 34' 58.7 to "98/1,34/1,587/10"
Return a tuple containing a boolean and the converted angle,
with the boolean indicating if the angle is negative.
"""
sign, degrees, minutes, seconds = angle.signed_dms()
exif_angle = f'{degrees:.0f}/1,{minutes:.0f}/1,{seconds*10:.0f}/10'
return sign < 0, exif_angle
|
8b80fe529d07fc7e4534282f0311cea7983efe88
| 166,759 |
def fmeasure(R,P):
"""Harmonic mean of recall and precision."""
value = 0
if R > 0 or P > 0:
value = (2 * R * P)/(R+P)
return value
|
70c4a324655f6a5c3df2de6064618803ec3b55c8
| 472,184 |
def _get_config_parameter(config, section, parameter_name, default_value):
"""
Get the parameter if present in the configuration otherwise returns default value.
:param config the configuration parser
:param section the name of the section
:param parameter_name: the name of the parameter
:param default_value: the default to propose the user
:return:
"""
return config.get(section, parameter_name) if config.has_option(section, parameter_name) else default_value
|
2431c37eab3396b79f4f9e649a7cc25fc27208dc
| 12,065 |
import stringprep
def c12_mapping(char):
"""Do mapping of RFC 3454 C.1.2 space characters to ' '.
:Parameters:
- `char`: Unicode character to map.
:returns: u" " if there is `char` code in the table, `None` otherwise.
"""
if stringprep.in_table_c12(char):
return u" "
else:
return None
|
d8cf80a7061168855d437bd8f42a234e47ef7036
| 227,030 |
def is_sequence(x):
"""Is x a sequence? We say it is if it has a __getitem__ method."""
return hasattr(x, '__getitem__')
|
f55a8ab63e4018eda2fac7de15fc8684245c1126
| 114,515 |
def _reconstruct_input_from_dict(x):
"""Reconstruct input from dict back to a list or single object.
Parameters
----------
x : dict
Returns
-------
out : pandas.DataFrame or pandas.Series or callable or list
"""
out = list(x.values())
if len(out) == 1:
out = out[0]
return out
|
e83fbe2a4b50a8a9edfaf3073015e0c47f7d9d3c
| 150,977 |
def update_target_graph(actor_tvars, target_tvars, tau):
""" Updates the variables of the target graph using the variable values from the actor, following the DDQN update
equation. """
op_holder = list()
# .assign() is performed on target graph variables with discounted actor graph variable values
for idx, variable in enumerate(target_tvars):
op_holder.append(
target_tvars[idx].assign(
(variable.value() * tau) + ((1 - tau) * actor_tvars[idx].value())
)
)
return op_holder
|
15f0d192ff150c0a39495b0dec53f18a8ae01664
| 4,072 |
import time
def to_timestamp(datetime):
""" Convert xmlrpclib.DateTime string representation to UNIX timestamp. """
return time.mktime(time.strptime('%s UTC' % datetime.value, '%Y%m%dT%H:%M:%S %Z')) - time.timezone
|
396059463fd77007af661b601474f7fcda1ce512
| 167,295 |
def _is_str(s):
"""
True iff s is a string (checks via duck typing).
"""
return hasattr(s, 'capitalize')
|
206530dd9eedef9e37db9266d6226f1007d50875
| 607,198 |
def mfile_to_lines(mfile):
"""
Read the lines from an mfile
Parameters
----------
mfile : string
Full path to an m file
"""
# We should only be able to read this file:
with open(mfile) as fid:
return fid.readlines()
|
843188720fe1c55eb34b62a6ddfa349f600f496e
| 602,219 |
def get_charge_style(
charge_styles, cutoffs, ewald_accuracy=None, dsf_damping=None
):
"""Get the Charge_Style section of the input file
Parameters
----------
charge_styles : list
list of charge styles, one for each box
cutoffs :
list of coulombic cutoffs, one for each box. For a box with
charge style 'none', the cutoff should be None
ewald_accuracy : float, optional
accuracy of ewald sum. Required if charge_style == ewald
dsf_damping : float, optional
value for dsf damping.
"""
assert len(charge_styles) == len(cutoffs)
valid_charge_styles = ["none", "cut", "ewald", "dsf"]
for charge_style in charge_styles:
if charge_style not in valid_charge_styles:
raise ValueError(
"Unsupported charge_style: {}. Supported options "
"include {}".format(charge_style, charge_styles)
)
if charge_style == "ewald":
if ewald_accuracy is None:
raise ValueError(
"Ewald selected as the charge style but "
"no ewald accuracy provided"
)
inp_data = """
# Charge_Style"""
for charge_style, cutoff in zip(charge_styles, cutoffs):
if charge_style == "none":
inp_data += """
{charge_style}""".format(
charge_style=charge_style
)
elif charge_style == "cut":
inp_data += """
coul {charge_style} {cutoff}""".format(
charge_style=charge_style, cutoff=cutoff
)
elif charge_style == "ewald":
inp_data += """
coul {charge_style} {cutoff} {accuracy}""".format(
charge_style=charge_style,
cutoff=cutoff,
accuracy=ewald_accuracy,
)
elif charge_style == "dsf":
if dsf_damping is not None:
inp_data += """
coul {charge_style} {cutoff} {damping}""".format(
charge_style=charge_style,
cutoff=cutoff,
damping=dsf_damping,
)
else:
inp_data += """
coul {charge_style} {cutoff}""".format(
charge_style=charge_style, cutoff=cutoff
)
inp_data += """
!------------------------------------------------------------------------------
"""
return inp_data
|
15be9d93701fdf1e3ec755e05eb757e040735aac
| 390,938 |
def getFeatureName(feature, default):
"""extract the name of the feature from the definition"""
if 'name' in feature:
return feature['name']
else:
return default
|
8c22388033ff4af21b1428dbc79c9e4ad5715175
| 232,781 |
from pathlib import Path
import json
def remove(guild, key: str):
"""
Removes a guild from a given key
Parameters:
guild (str/int): ID of the guild
key (str): The key to modify
Returns:
Success (bool): Did it succeed
"""
data = {}
guild = str(guild)
if not Path(f'./data/guild_{key}.json').is_file():
return False
with open(f'./data/guild_{key}.json', 'r') as filein:
data = json.load(filein)
data.pop(guild)
with open(f'./data/guild_{key}.json', 'w') as fileout:
json.dump(data, fileout, indent=4)
return True
|
00153f1433a0c80f8af7acf8aba0a1f4b426d80c
| 440,747 |
from typing import Iterable
from typing import List
def _unique(seq: Iterable) -> List:
"""order-preserving unique elements in `seq`."""
out = []
seen = set()
for x in seq:
if x in seen:
continue
out.append(x)
seen.add(x)
return out
|
e633b1e6b56872d61a64ffe2b0dbf46d49f01906
| 140,891 |
def version2string(tversion):
""" Converts version tuple to string """
s = ""
if tversion[3] != 0:
s = "{}.{}.{}.{}".format(tversion[0], tversion[1], tversion[2], tversion[3])
elif tversion[2] != 0:
s = "{}.{}.{}".format(tversion[0], tversion[1], tversion[2])
else:
s = "{}.{}".format(tversion[0], tversion[1])
if tversion[4] != "":
s = s + "-" + tversion[4]
return s
|
707d36722f4e32ad2d7fb85826ff98f6cb3efc8f
| 287,074 |
def squared_dist(x1, x2):
"""Computes squared Euclidean distance between coordinate x1 and coordinate x2"""
return sum([(i1 - i2)**2 for i1, i2 in zip(x1, x2)])
|
c4ae86e54eb1630c20546a5f0563d9db24eebd3a
| 20,838 |
def fetch_following(api,name):
"""
Given a tweepy API object and the screen name of the Twitter user,
return a a list of dictionaries containing the followed user info
with keys-value pairs:
name: real name
screen_name: Twitter screen name
followers: number of followers
created: created date (no time info)
image: the URL of the profile's image
To collect data: get a list of "friends IDs" then get
the list of users for each of those.
"""
id_list = api.friends_ids(name)
following_list = []
for ID in id_list:
person_dict = {}
acc_info = api.get_user(ID)
person_dict['name'] = acc_info.name
person_dict['screen_name'] = acc_info.screen_name
person_dict['followers'] = acc_info.followers_count
person_dict['created'] = acc_info.created_at.date()
person_dict['image'] = acc_info.profile_image_url_https
following_list.append(person_dict)
return following_list
|
d55c5bc976a1076e28bc6e64dbd5eebcc8f42a1d
| 391,568 |
def first_location_preposition(location_string):
"""
In some cases google can't extract a response from the original
sentence but will be able to do so from a sub-sentence starting at the first location indicator
e.g.
"גבר נהרג בתאונת דרכים בגליל התחתון"
->no results
"בתאונת דרכים בגליל התחתון"
->results
"""
# iterate over sentences, find first location preposition (very crudely) and trim the prefix of that sentence
for sentence in location_string.split("."):
trimmed_location_tokens = []
found = False
for token in sentence.split():
if token.startswith("ב"):
found = True
if found:
trimmed_location_tokens.append(token)
if found:
return " ".join(trimmed_location_tokens)
return ""
|
df9abd1823f43a2056e308d8e1a22d3d97f8a561
| 253,614 |
def getFilename(f):
"""Get the filename from an open file handle or filename string."""
if isinstance(f, str):
return f
return f.name
|
62a0e4301ee5284bac569134f3cf89edec7762a4
| 662,874 |
def three_pad(s):
"""Pads a string, s, to length 3 with trailing X's"""
if len(s) < 3:
return three_pad(s + "X")
return s
|
f15c2bb8fdc13d0237e3c3460716bd1208638567
| 508,871 |
import io
import base64
def get_image_html_tag(fig, format="svg"):
"""
Returns an HTML tag with embedded image data in the given format.
:param fig: a matplotlib figure instance
:param format: output image format (passed to fig.savefig)
"""
stream = io.BytesIO()
# bbox_inches: expand the canvas to include the legend that was put outside the plot
# see https://stackoverflow.com/a/43439132
fig.savefig(stream, format=format, bbox_inches="tight")
data = stream.getvalue()
if format == "svg":
return data.decode("utf-8")
data = base64.b64encode(data).decode("utf-8")
return f"<img src=\"data:image/{format};base64,{data}\">"
|
f5c59a6f4f70fb6616cec4619d8cbf9ca2e28529
| 1,308 |
def divisors(num):
"""
Takes a number and returns all divisors of the number, ordered least to greatest
:param num: int
:return: list (int)
"""
# Fill in the function and change the return statment.
numlist = []
check = 1
while check <=num:
divisor = num%check
if divisor == 0:
numlist.append(check)
check = check + 1
else:
check = check + 1
return numlist
|
6c6d5e2cfac4e4139ec60dba42a895d72f99215e
| 549,913 |
def get_time_group_max(time_group):
"""Return dictionary with max values for each time group."""
dict_time_max = {'year': 5000, # dummy large value for year since unbounded ...
'season': 4,
'quarter': 4,
'month': 12,
'weekofyear': 53,
'dayofweek': 7, # init 0, end 6
'dayofyear': 366,
'day': 31,
'hour': 24, # init 0, end 23
'minute': 60, # init 0, end 59
'second': 60, # init 0, end 59
}
return dict_time_max[time_group]
|
23413dca869c4a70d145c89e74985f38b72d0fa8
| 572,412 |
def _get_member_types(tag):
"""Return the types of the members, handle wildcards."""
at1 = tag.attrib.get("type1")
at2 = tag.attrib.get("type2")
at3 = tag.attrib.get("type3")
at4 = tag.attrib.get("type4")
member_types = filter(lambda x: x is not None, [at1, at2, at3, at4])
member_types = [
"*" if mem_type == "" else mem_type for mem_type in member_types
]
return member_types or None
|
11a32a067f3675289eaf470dac09f46b3ca6d308
| 513,492 |
def url_join(*parts: str) -> str:
""" Join the different url parts with forward slashes. """
return "/".join([part.strip("/") for part in parts]) + ("/" if parts and parts[-1].endswith("/") else "")
|
f99c73559dc94614c67cd36aab519d98c80c6d19
| 297,652 |
def snake_to_camel(s: str) -> str:
"""Convert string from snake case to camel case."""
fragments = s.split('_')
return fragments[0] + ''.join(x.title() for x in fragments[1:])
|
5593e29ed345861dd0bada4a163c5006042a9fdb
| 673,162 |
from typing import Union
def bars_to_atmospheres(bar: float, unit: str) -> Union[float, str]:
"""
This function converts bar to atm
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
>>> bars_to_atmospheres(36, "bar")
35.529237601776465
>>> bars_to_atmospheres("57.6", "bar")
56.84678016284234
>>> bars_to_atmospheres(0, "bar")
0.0
>>> bars_to_atmospheres(35, "Pa")
'Invalid unit'
>>> bars_to_atmospheres("barrs", "bar")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'barrs'
"""
if unit == "bar":
atm = float(bar) / 1.01325
return atm
else:
return "Invalid unit"
|
d460021395af77acda01296710f145e9b52d8594
| 13,937 |
def format_time( seconds ):
"""
Formats the time, in seconds, to a nice format. Unfortunately, the :py:class:`datetime <datetime.datetime>` class is too unwieldy for this type of formatting. This code is copied from :py:meth:`deluge's format_time <deluge.ui.console.utils.format_utils.format_time>`.
:param int seconds: number of seconds.
:returns: formatted string in the form of ``1 days 03:05:04``.
:rtype: str
**Usage**
>>> format_time( 97262 )
'1 days 03:01:02'
"""
minutes = seconds // 60
seconds = seconds - minutes * 60
hours = minutes // 60
minutes = minutes - hours * 60
days = hours // 24
hours = hours - days * 24
return "%d days %02d:%02d:%02d" % (days, hours, minutes, seconds)
|
dba02e146b363b5a468a2a5939d9ab96dafaf838
| 647,506 |
def _pack_into_dict(value_or_dict, expected_keys, allow_subset = False):
"""
Used for when you want to either
a) Distribute some value to all predictors
b) Distribute different values to different predictors and check that the names match up.
:param value_or_dict: Either
a) A value
b) A dict<predictor_name: value_for_predictor>
:param expected_keys: Names of predictors
:return: A dict<predictor_name: value_for_predictor>
"""
if not isinstance(value_or_dict, dict):
output_dict = {predictor_name: value_or_dict for predictor_name in expected_keys}
else:
output_dict = value_or_dict
if allow_subset:
assert set(value_or_dict.keys()).issubset(expected_keys), 'Expected a subset of: %s. Got %s' % (expected_keys, value_or_dict.keys())
else:
assert set(expected_keys) == set(value_or_dict.keys()), 'Expected keys: %s. Got %s' % (expected_keys, value_or_dict.keys())
return output_dict
|
858d0c3cd67835e7b33037bbd67b71adbe7e1f99
| 496,025 |
def centre(images):
"""Mapping from {0, 1, ..., 255} to {-1, -1 + 1/127.5, ..., 1}."""
return images / 127.5 - 1
|
73ea422309ffc4c3b90d2f1798ce45e34cc035d1
| 626,971 |
def model_norm(model, pow=(2, 1), vidx=-1):
"""norm of a model (l2 squared by default)
Args:
model (float tensor): scoring model
pow (float, float): (internal power, external power)
vidx (int): video index if only one is computed (-1 for all)
Returns:
(float scalar tensor): norm of the model
"""
q, p = pow
if vidx != -1: # if we use only one video
return abs(model[vidx]) ** (q * p)
return (model ** q).abs().sum() ** p
|
dbdb01b3e2fc6ee8aa6e1725a6544a733173b986
| 68,121 |
def prepare_query(query):
"""
Prepare query to always AND all search terms
Spaces will be replaced with AND. Existing ANDs and ORs will be preserved.
"""
def intersperse_and(phrase):
# For a given phrase, intersperse AND between all words,
# except "and" which is removed
return ' AND '.join([word for word in phrase.split()
if word != 'and'])
# If the query has any ORs, split it into phrases to be ANDed
phrases = query.split(' or ')
# AND words in each phrase, and OR all phrases together
return ' OR '.join(map(intersperse_and, phrases))
|
f84d0277d8d6ba971a1839862dccb88932a4b87b
| 253,888 |
def minidataframe_has_data(minidataframe):
""" Determines whether MiniDataframe (from observation download) has data in it or not.
:param minidataframe: minidataframe to test [MiniDataframe object].
:return: True iff minidataframe has data [boolean].
"""
if minidataframe.dict is None:
return False
if minidataframe.len() == 0:
return False
return True
|
3f35bee7ceca98b8c54af88177e9a613717f44a3
| 455,669 |
import socket
def get_node_ip_address(address="8.8.8.8:53"):
"""Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((ip_address, int(port)))
return s.getsockname()[0]
|
039fdb773f1b392669d45bd98a87005e4a463f39
| 655,505 |
def self_neighbors(matches):
"""
Returns a pandas data series intended to be used as a mask. Each row
is True if it is not matched to a point in the same image (good) and
False if it is (bad.)
Parameters
----------
matches : dataframe
the matches dataframe stored along the edge of the graph
containing matched points with columns containing:
matched image name, query index, train index, and
descriptor distance
Returns
-------
: dataseries
Intended to mask the matches dataframe. True means the row is not matched to a point in the same image
and false the row is.
"""
return matches.source_image != matches.destination_image
|
21988f1ff0c2ff855b83427f33f0d01dde07b2fc
| 150,589 |
import json
def _read_json(path): # type: (str) -> dict
"""Read a JSON file.
Args:
path (str): Path to the file.
Returns:
(dict[object, object]): A dictionary representation of the JSON file.
"""
with open(path, "r") as f:
return json.load(f)
|
b7519b9209a2ed9607d4dc91e5fd68b94be36c42
| 187,462 |
def to_string(quantity_or_item: str) -> str:
""" Returns the same string. """
return quantity_or_item
|
e1828ed0ddd6ea3f2db1140a2da2a94965df84b7
| 695,058 |
def convert_token_number(token_number: str) -> int:
"""Helper function to convert the token index from 1 and string based to 0 and int based index.
Parameters
----------
token_number : str
The token number as it appears in the tsv.
Returns
-------
int
The word index as it's needed for the competency triple.
"""
return int(token_number) - 1
|
a6bc7f95c9590e7b594e16dcbc17c46c801ee0a3
| 379,876 |
def findUniqueContours(inlist):
""" Find list of unique contours"""
uniqueContourList = []
for item in inlist:
if item not in uniqueContourList:
uniqueContourList.append(item)
return uniqueContourList
|
72c0957d88658e42ccf07338e6608d28de536cea
| 282,176 |
def split_line(line):
"""
Given a line like | ''[[813 (film)|813]]'' || [[Charles Christie]], [[Scott Sidney]] || [[Wedgwood Nowell]], [[Ralph Lewis (actor)|Ralph Lewis]], [[Wallace Beery]], [[Laura La Plante]] || Mystery || [[Film Booking Offices of America|FBO]]
Retrun a list of the strings between '||'
"""
return [cell.strip() for cell in line.split("||")]
|
8a8cc4123c6517b8559ddcac250d5d3fa15053f4
| 450,374 |
import re
def remove_specials(str):
"""
Removes everything but letters from a string. This is used to create "search names" for games
in the datastore. This makes it easier to compare the names on HLTB with Steam, and reduces
the number of games incorrectly reported as not existing on HTLB.
e.g. - Chivalry: Medieval Warfare -> chivalrymedievalwarfare
"""
special = r'[^0-9A-Za-z]'
str = str.lower()
return re.sub(special, '', str)
|
aed596f89742a4e2b9cff8acc01ac3f3bb6c3e67
| 407,565 |
def find_nearest(array, value):
"""Find closet value in array to 'value' param"""
idx,val = min(enumerate(array), key=lambda x: abs(x[1]-value))
return val
|
99c027ebd96d28b4b731d1f6d5e3a0be918c051b
| 563,205 |
def shift_TTD_dailyconservation_idx_rule(M):
"""
Index is (window, day, week, tour type).
The index set gets used in the TTD_TTDS_con and TTD_TT_UB constraints.
:param M:
:return: Constraint index rule
"""
return [(i, j, w, t) for i in M.WINDOWS
for j in M.DAYS
for w in M.WEEKS
for t in M.activeTT
if (i, t, j) in M.okTourTypeDay]
|
07a65fe45bab32eaf53bd8f1182df32fb2930038
| 659,777 |
from typing import Tuple
import re
def read_data_from(file_: str) -> Tuple[dict, list, list]:
"""Read rules for, numbers on your, and numbers on nearby tickets."""
file_str = open(file_, "r").read().splitlines()
rules = {}
for rule in file_str[:20]:
key, b0, e0, b1, e1 = re.split(": |-| or ", rule)
rules[key] = [int(b0), int(e0), int(b1), int(e1)]
yours = list(map(int, file_str[22].split(",")))
nearby = []
for ticket in file_str[25:]:
nearby.append(list(map(int, ticket.split(","))))
return rules, yours, nearby
|
c1132d48e401fa28ca4e49eb985971361291596f
| 326,439 |
def get_html_table_headers(**content):
"""
Return the keys of the first element in the user_list dict.
We don't care if the dict is unordered b/c the expectation is
that each element of the dict will be structured the same
"""
user_list = content["users"]
days_since_pwdlastset = content["days_since_pwdlastset"]
try:
return user_list[days_since_pwdlastset][0].keys()
except IndexError:
# return the empty list of there are no accounts to be disabled
return []
|
d7c97e4e81a3d93a6c0634b321ba1d8a22bbc31e
| 369,139 |
def DetectEncoding(data, default_encoding='UTF-8'):
"""Detects the encoding used by |data| from the Byte-Order-Mark if present.
Args:
data: string whose encoding needs to be detected
default_encoding: encoding returned if no BOM is found.
Returns:
The encoding determined from the BOM if present or |default_encoding| if
no BOM was found.
"""
if data.startswith('\xFE\xFF'):
return 'UTF-16BE'
if data.startswith('\xFF\xFE'):
return 'UTF-16LE'
if data.startswith('\xEF\xBB\xBF'):
return 'UTF-8'
return default_encoding
|
43670fd234453a703dd18521b6dc655507448664
| 625,570 |
def TimelineName(name, source_type, value_type):
"""Constructs the standard name given in the timeline.
Args:
name: The name of the timeline, for example "total", or "render_compositor".
source_type: One of "cpu", "gpu" or None. None is only used for total times.
value_type: the type of value. For example "mean", "stddev"...etc.
"""
if source_type:
return '%s_%s_%s_time' % (name, value_type, source_type)
else:
return '%s_%s_time' % (name, value_type)
|
9a9d5c1e516f8a5271b760656c91e6bb0a8a2d25
| 560,704 |
def filter_mismatched_alignments(df):
"""
Removes any alignment that contains more than the smallest number of mismatches.
If there are two miRs that map to a single read, this function compares both
miRs and returns only the one(s) that have the least amount of mismatches.
"""
print("Now filtering miRs with mismatches.")
return df[df.groupby('ref_name')['mutation_num'].transform('min') == df['mutation_num']]
|
ec608c8ae8ead81da4ee6acf60653e6ed23ce3d8
| 562,341 |
def R11_2_d11(R11, SRM_ratio=4.04367):
"""
Convert Ratio to Delta notation.
Default SRM_ratio is NIST951 11B/10B
"""
return (R11 / SRM_ratio - 1) * 1000
|
0e18a90e3a82279895b7472c9edf9646d95748a7
| 88,748 |
import hashlib
def md5sum(file):
"""Compute the MD5 sum of a file.
Args:
file (str): file to be checksummed
Returns:
MD5 sum of the file's content
"""
md5 = hashlib.md5()
with open(file, "rb") as f:
md5.update(f.read())
return md5.digest()
|
ea809aa13b0a1dac833aef69cc4c0e2596fadb8a
| 388,105 |
def filter_dates(df, start_date, end_date):
"""Filters the DataFrame so that all ticks are
between start_date and end_date"""
return df[(df['date'] >= start_date) & (df['date'] <= end_date)]
|
3b5a1303e6789bff23f7b89a7e75f01340331fe3
| 458,547 |
def get_json(response):
"""
Retrieves the 'JSON' body of a response using the property/callable according to the response's implementation.
"""
if isinstance(response.json, dict):
return response.json
return response.json()
|
fda80c7100cb442f177ba18bfedbe0111161d846
| 51,903 |
def _banner() -> str:
"""Return the banner as a string"""
return "\n".join(
[
r" ____ __________ ____ __ __ ",
r" / __ \/ ____/ __ \ / __ \____ _ ______ / /___ ____ ____/ /__ _____",
r" / / / / / / /_/ / / / / / __ \ | /| / / __ \/ / __ \/ __ \/ __ / _ \/ ___/",
r"/ /_/ / /___/ _, _/ / /_/ / /_/ / |/ |/ / / / / / /_/ / /_/ / /_/ / __/ / ",
r"\____/\____/_/ |_| /_____/\____/|__/|__/_/ /_/_/\____/\__,_/\__,_/\___/_/ 2.0",
]
)
|
e7b101394e8b8f876fdbf5ee92360fc13f81bee1
| 432,295 |
def rotations(t):
""" Return list of rotations of input string t """
tt = t * 2
return [tt[i:i + len(t)] for i in range(0, len(t))]
|
6240e3fb1e06057958eff7cf529115e28afc08ce
| 72,233 |
import re
def normalize_value(text):
"""
This removes newlines and multiple spaces from a string.
"""
result = text.replace('\n', ' ')
result = re.subn('[ ]{2,}', ' ', result)[0]
return result
|
51ee1e8d8879fb60a338245850ac8999c4318bfd
| 180,298 |
import torch
def clone_hidden(h):
"""Clone hidden states in new Tensors."""
if isinstance(h, torch.Tensor):
return h.clone()
else:
return tuple(clone_hidden(v) for v in h)
|
5e54b45a427f883daf3ebea2dfb13390f8adb14f
| 463,156 |
def read_file(filename):
"""
Return the content of a file
Parameters
----------
filename : str
file name
Returns
-------
str
content of the file
"""
with open(filename, "r", encoding="utf-8") as f:
return f.read()
|
d06ea635d7405c0191ca99caea6be33b5b888550
| 290,554 |
def count_occurances(comment, word):
"""
A helper function to get the number of words in a comment.
"""
comment = comment.replace('?', ' ')
comment = comment.replace('.', ' ')
comment = comment.replace('-', ' ')
comment = comment.replace('/', ' ')
a = comment.split(" ")
count = 0
for i in range(len(a)):
if (word == a[i]):
count = count + 1
return count
|
b4c5c5e0a7792e29811937a1e77161196bd9856c
| 631,990 |
def precision(cm):
"""The ratio of correct positive predictions to the total predicted positives."""
return cm[1][1]/(cm[1][1] + cm[0][1])
|
c777c03cb31766b0d15faabd67c80b2fa0fcc4dd
| 392,124 |
from pathlib import Path
def open_if_filename(infile, mode='rb'):
"""If ``infile`` is a string, it opens and returns it. If it's already a file object, it simply returns it.
This function returns ``(file, should_close_flag)``. The should_close_flag is True is a file has
effectively been opened (if we already pass a file object, we assume that the responsibility for
closing the file has already been taken). Example usage::
fp, shouldclose = open_if_filename(infile)
dostuff()
if shouldclose:
fp.close()
"""
if isinstance(infile, Path):
return (infile.open(mode), True)
if isinstance(infile, str):
return (open(infile, mode), True)
else:
return (infile, False)
|
0096b184336ebdc32527da106418d4f5ee85b153
| 519,486 |
import re
def StripHTML(input: str) -> str:
"""
Strip the HTML formatting from a string.
Parameters
----------
input : str
HTML formatted string.
Returns
-------
str
Input string without the HTML formatting.
"""
# Regex is hideous, but this gets the job done faster than an external
# library. This is also future-proof against any sort of HTML characters,
# such as   and &.
expression: re.Pattern[str] = re.compile(
"<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});"
)
return re.sub(expression, "", input)
|
a54ea28351fa862360728780d7f19fb674f546da
| 92,754 |
from pathlib import Path
def load_query(file_path: str | Path) -> str:
"""
Loads a query from a file
Args:
file_path (str): The path to the file
Returns:
str: The query
"""
if not str(file_path).endswith(".sql"):
raise ResourceWarning("Given file path does not end with .sql")
with open(file_path, "r", encoding="UTF-8") as sql_query_file:
return sql_query_file.read()
|
fbc9de140c98237146c0a23efe6a191ed7a95e67
| 234,244 |
def get_request_method(environ: dict) -> str:
"""
Returns a method name of HTTP request.
:param environ: WSGI environment
:return: HTTP method name
"""
method = environ["REQUEST_METHOD"]
return method
|
b37bbbdb5f306e9941423a1e000e15f3eef47839
| 146,563 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.