content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def extract_status_code_from_topic(topic):
"""
Topics for responses from DPS are of the following format:
$dps/registrations/res/<statuscode>/?$<key1>=<value1>&<key2>=<value2>&<key3>=<value3>
Extract the status code part from the topic.
:param topic: The topic string
"""
POS_STATUS_CODE_IN_TOPIC = 3
topic_parts = topic.split("$")
url_parts = topic_parts[1].split("/")
status_code = url_parts[POS_STATUS_CODE_IN_TOPIC]
return status_code
|
a75dd25e4236b37a54ee764b9caf2863c1e13873
| 120,442 |
def generate_numbers(partitions):
"""Return a list of numbers ranging from [1, partitions]."""
return list(range(1, partitions + 1))
|
8c1ad09496c4cbddb53c8b93d162d78e3e41b60e
| 25,417 |
def share_edge(label, uv1, uv2, h, w):
"""Determine if two uv share an edge."""
if uv1[0] == uv2[0]:
if uv1[0] == 0 or uv1[0] == w:
return True
elif uv1[1] == uv2[1]:
if uv1[1] == 0 or uv1[1] == h:
return True
else:
return False
|
ad999f355a8386903c12ca514edcf195b2baad30
| 421,411 |
def sort_matrix(df, sort_columns=True):
"""Sorts a DataFrame, first by its columns and then by its rows."""
if sort_columns:
freqs = (df.astype(float) > 0).sum(axis=0)
order = list(freqs.sort_values(ascending=False).index)
else:
order = list(df.columns)
df_sorted = df[order]
df_sorted = df_sorted.sort_values(by=order, ascending=False)
return df_sorted
|
8eb00ed0478ff99de256aaf9bf0370218357c2b3
| 292,239 |
import hashlib
def CreateIdentifier(str_id):
"""Return a 24 characters string that can be used as an identifier."""
return hashlib.sha1(str_id.encode("utf-8")).hexdigest()[:24].upper()
|
c4a950e7ba39d0ea22790db928d933d6b89f0d69
| 423,806 |
def get_patient_status(index, patients): # test
"""Retrieves most recent heart rate with associated status and time stamp
Args:
index (int): index of patient to be located in list
patients (dictionary): list of patients in database
Returns:
dictionary: if data available, contains HR, status, and time stamp
boolean: returns False if data not available
"""
if len(patients[index]["heart_rate"]) == 0:
return False
heart_rate = patients[index]["heart_rate"][-1]
status = patients[index]["status"]
timestamp = patients[index]["time_stamp"][-1]
return_dictionary = {
"heart_rate": heart_rate,
"status": status,
"timestamp": timestamp
}
return return_dictionary
|
999438c3e0661ff85cc69ecc5c7e8981ce92dde7
| 539,667 |
import itertools
def cross_named_parameters(*args):
"""Cross a list of lists of dicts suitable for @named_parameters.
Takes a list of lists, where each list is suitable as an input to
@named_parameters, and crosses them, forming a new name for each crossed test
case.
Args:
*args: A list of lists of dicts.
Returns:
A list of dicts.
"""
def _cross_test_cases(parameters_list):
"""Cross a list of test case parameters."""
crossed_parameters = parameters_list[0].copy()
for current_parameters in parameters_list[1:]:
for name, value in current_parameters.items():
if name == 'testcase_name':
crossed_parameters[name] = '{}_{}'.format(
crossed_parameters[name], value)
else:
assert name not in crossed_parameters, name
crossed_parameters[name] = value
return crossed_parameters
return list(map(_cross_test_cases, itertools.product(*args)))
|
51863e234e74ecc8398cbce9e755de868e4ec942
| 552,263 |
def loss_inversely_correlated(X, y):
"""
Return
-1 * (concept_direction * prediction)
where
prediction = X[0]
concept_direction = y
"""
prediction, *_ = X
concept_direction = y
return -1 * (concept_direction * prediction)
|
9d7649c40d6cc3053b3ce721a468f8079a2be0c8
| 252,973 |
def get_fabric_design(fabric_design_uri, rest_obj):
"""
Get the fabric design name from the fabric design uri which is returned from GET request
:param fabric_design_uri: fabric design uri
:param rest_obj: session object
:return: dict
"""
fabric_design = {}
if fabric_design_uri:
resp = rest_obj.invoke_request("GET", fabric_design_uri.split('/api/')[-1])
design_type = resp.json_data.get("Name")
fabric_design = {"Name": design_type}
return fabric_design
|
6f615548fd0e49419eef5f3f79d860e21ad3a344
| 491,663 |
def get_param_name_list(expressions):
"""Get parameter names from a given expressions list"""
return sorted(list({symbol.name for expression in expressions for symbol in expression.free_symbols if "x" not in symbol.name}))
|
706a7077fe4971e72758d9757d9dfcbf4e34f28a
| 197,440 |
import requests
from bs4 import BeautifulSoup
def get_page(url):
"""
Simple funtion that get the url with requests and return a bs4 object.
:param (str): The string of the url
:return: bs4 object
"""
try:
req = requests.get(url)
return BeautifulSoup(req.text, 'html.parser')
except requests.exceptions.RequestException:
print("Could load the page: {}".format(url))
|
50224f5d660f501874d5951c586bf3ec1b40871c
| 334,737 |
def _get_product_id(device_dict):
"""Converts 0x1234 to 1234."""
return device_dict['product_id'].split('x')[-1]
|
8719d7022991a4fa53658330b01221a4e886e6a3
| 607,732 |
def get_number_versions(pypi_pkg):
"""Count number of versions released"""
version_list = list(pypi_pkg["pypi_data"]["releases"])
num_versions = len(version_list)
return num_versions
|
9711506698d83ae9d4903ae924aac2bf075e3aec
| 239,842 |
def org_id_from_org_name(org_name):
"""Extract the organization id (number) from the organization name.
Args:
org_name (str): The name of the organization, formatted as
"organizations/${ORGANIZATION_ID}".
Returns:
str: just the organization_id.
"""
return org_name[len('organizations/'):]
|
0f9d324164a930f7eeebee8527cda14396049ba3
| 364,963 |
import builtins
def is_a_builtin(word) -> bool:
"""
Returns True if a word is a builtin python object
Args:
word: word to check
Returns: True if word is builtin python object
"""
return str(word) in dir(builtins)
|
5dd2eed08273d8fc5a4bba1e67cc681f3a2488ca
| 677,906 |
def split_into_equal_sublists(a_list, number_of_parts):
""" Splits a list into a list of sublists
Arguments
----------
a_list : list object
List wich will be split into Sublists
number_of_parts : int
Number of sublists which should be created
Returns
-------
seperated_lists : list of lists
List of Sublists of the given list which are as equal sized as possible
"""
start = 0
seperated_lists = []
for i in range(number_of_parts):
end = round((len(a_list) / number_of_parts) * (i + 1))
seperated_lists.append(list(range(start,end)))
start = end
return seperated_lists
|
a98098aeff74f2e7be8bae9560a7aebd48b017f0
| 222,150 |
def valid_int(value):
"""Converts the value to an integer value or zero.
"""
try:
result = int(value)
except ValueError:
result = 0
return result
|
2addb578b251832b40a11b02d0406b8a3875479f
| 323,910 |
def get_remote_accounts(web3, total=6):
"""
Return 6 accounts
(admin, bidder, hoster, validator1, validator2, validator3,
validator4) = get_accounts()
"""
return [web3.eth.accounts[i] for i in range(0, total)]
|
8cc381e79fa4ab43ae41b9784c5046f3d39014e1
| 521,726 |
def display_time(seconds, granularity=1):
""" Turns seconds into weeks, days, hours, minutes and seconds.
Granularity determines how many time units should be returned. EG:
# 2 time unites, week and day
1934815, 2 = '3 weeks, 1 day'
# 4 time units
1934815, 4 = '3 weeks, 1 day, 9 hours, 26 minutes'
"""
result = []
intervals = (
# weeks weren't necessary so I removed them
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ' '.join(result[:granularity])
|
d4b94ffafcdbda99526ecc906ed8379d7fc5edab
| 10,624 |
def twoSum(nums, target):
"""
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
"""
h = {}
for i, v in enumerate(nums):
if v in h:
return [h[v], i]
else:
rem = target - v
h[rem] = i
|
a1503f8e43fbdb51d2ed2693444f9ce33ae80914
| 134,311 |
import re
def get_number_in_string(line, pattern=r'\ done\ processing\ event\ \#(\d+)\,'):
"""
Extract a number from the given string.
E.g. file eventLoopHeartBeat.txt contains
done processing event #20166959, run #276689 22807 events read so far <<<===
This function will return 20166959 as in int.
:param line: line from a file (string).
:param pattern: reg ex pattern (raw string).
:return: extracted number (int).
"""
event_number = None
match = re.search(pattern, line)
if match:
try:
event_number = int(match.group(1))
except (TypeError, ValueError):
pass
return event_number
|
ffb323b664fe873dcf2cf75267a0ec3e3390cedf
| 88,671 |
def binary_array_to_int(x, sep=''):
"""Turn 1D array `x` of 0s and 1s into integer."""
return int(sep.join(x.astype(str)), 2)
|
bfbf898b7b53869a86b6c441d1d2d7f63d6c287c
| 239,298 |
def crop_border(img, border_x, border_y):
"""Crop the border of an image. Due to peripheral blurring.
Args:
img (np.ndarray): input image
border_x (int): number of pixels to crop on each side of x border
border_y (int): number of pixels to crop on each side of y border
Returns:
np.ndarray - cropped image
"""
return img[border_y:-border_y, border_x:-border_x, :]
|
9fb5aaa27e90f16b035d731ab99534a13dbcc899
| 300,973 |
def calculate_event_handler_snapshot_difference(client, snapshot_old, snapshot_new):
"""
Calculates the difference between two event handler snapshot returning the difference.
Parameters
----------
client : ``Client``
The respective client instance.
snapshot_old : `dict` of (`str`, `list` of `async-callable`) items
An old snapshot taken.
snapshot_new : `dict` of (`str`, `list` of `async-callable`) items
A new snapshot.
Returns
-------
snapshot_difference : `None` or `list` of `tuple` \
(`str`, (`None` or `list` of `async-callable`), (`None` or `list` of `async-callable`))
A list of event handler differences in a list of tuples.
The tuple has 3 elements, where the 0th element is the name of the respective event, meanwhile the 1th element
contains the removed event handlers and the 2th the added ones.
If there is no difference between two snapshots, returns `None`.
"""
snapshot_difference = []
event_names = {*snapshot_old.keys(), *snapshot_new.keys()}
for event_name in event_names:
old_handlers = snapshot_old.get(event_name, None)
new_handlers = snapshot_new.get(event_name, None)
if (old_handlers is not None) and (new_handlers is not None):
for index in reversed(range(len(old_handlers))):
handler = old_handlers[index]
try:
new_handlers.remove(handler)
except ValueError:
pass
else:
del old_handlers[index]
if not new_handlers:
new_handlers = None
if not old_handlers:
old_handlers = None
if (old_handlers is not None) or (new_handlers is not None):
snapshot_difference.append((event_name, old_handlers, new_handlers))
if not snapshot_difference:
snapshot_difference = None
return snapshot_difference
|
7ff9f6afd548e3a49c797a259c5177316b427469
| 52,619 |
def _find_framework_name(package_name, options):
"""
:param package_name: the name of the package
:type package_name: str
:param options: the options object
:type options: dict
:returns: the name of framework if found; None otherwise
:rtype: str
"""
return options.get(package_name, {}).get('framework-name', None)
|
3b345a5ccbed309ad7a706dce621a8fd405dd9e4
| 33,717 |
def model_str(instance, attr) -> str:
"""
The field to display for an object's __str__. If the field doesn't exist then an
alternative will be displayed.
:param instance: Instance object
:param attr: Field name to get data from if it exists
:return: str
"""
return hasattr(instance, attr) and getattr(instance, attr) \
or f'<{instance.__class__.__name__}: {instance.id}>'
|
55934a0514083b75ea46d0690a1405343365f98f
| 265,264 |
def pmtcorrection(x_humidity):
"""
Correction factor for low budget pm sensors data based on the research paper 'Developing a Relative Humidity Correction for Low-Cost Sensors Measuring
Ambient Particulate Matter' by Di Antonio et al.
k: the degree of hygroscopicity of a particle.
x_humidity: relative humidity, provided by the sensor.
"""
k = 0.6
aw = x_humidity/100
return (1 + ((k/1.65)/(-1 + (1/aw))))
|
6fd601f5e4d16024a9c12767f2f7ce2238812125
| 403,572 |
from typing import Mapping
from typing import Any
from typing import List
def get_by_keys(data: Mapping[str, Any],
keys: List[Any],
default_value=None,
optional: bool = False) -> Any:
"""Returns value with given key(s) in (possibly multi-level) dict.
The keys represent multiple levels of indirection into the data. For example
if 3 keys are passed then the data is expected to be a dict of dict of dict.
For compatibily with data that uses prefixing to create separate the keys in a
single dict, lookups will also be searched for under the keys separated by
'/'. For example, the keys 'head1' and 'probabilities' could be stored in a
a single dict as 'head1/probabilties'.
Args:
data: Dict to get value from.
keys: Sequence of keys to lookup in data. None keys will be ignored.
default_value: Default value if not found.
optional: Whether the key is optional or not. If default value is None and
optional is False then a ValueError will be raised if key not found.
Raises:
ValueError: If (non-optional) key is not found.
"""
if not keys:
raise ValueError('no keys provided to get_by_keys: %s' % data)
format_keys = lambda keys: '->'.join([str(k) for k in keys if k is not None])
value = data
keys_matched = 0
for i, key in enumerate(keys):
if key is None:
keys_matched += 1
continue
if not isinstance(value, Mapping):
raise ValueError('expected dict for "%s" but found %s: %s' %
(format_keys(keys[:i + 1]), type(value), data))
if key in value:
value = value[key]
keys_matched += 1
continue
# If values have prefixes matching the key, return those values (stripped
# of the prefix) instead.
prefix_matches = {}
for k, v in value.items():
if k.startswith(key + '/'):
prefix_matches[k[len(key) + 1:]] = v
if prefix_matches:
value = prefix_matches
keys_matched += 1
continue
break
if keys_matched < len(keys) or isinstance(value, Mapping) and not value:
if default_value is not None:
return default_value
if optional:
return None
raise ValueError('"%s" key not found (or value is empty dict): %s' %
(format_keys(keys[:keys_matched + 1]), data))
return value
|
cb488798fcfb301f7a3b147a16f20fbb65ada623
| 373,533 |
def _make_alg_experience(experience, name):
"""Given an experience, extracts the ``rollout_info`` field for an
algorithm.
"""
if experience.rollout_info == ():
rollout_info = ()
else:
rollout_info = getattr(experience.rollout_info, name)
return experience._replace(
rollout_info=rollout_info,
rollout_info_field=experience.rollout_info_field + '.' + name)
|
888a0ddf3772a42f4d11ec18b8268e42da9b68e1
| 535,770 |
def fairness_metrics_goal_threshold(metric):
"""Returns metric goal and threshold values.
Parameters
----------
metric: str
The name of the metric
Returns
-------
int:
goal value
float:
threshold (+ and -) of the metric
"""
metrics_goal_1 = [
'disparate_impact'
]
if metric in metrics_goal_1:
return 1, 0.2
elif metric == 'theil_index':
return 0, 0.2
return 0, 0.1
|
20d0b062ca865da85ff4a75078a3b379ac5c73f1
| 292,875 |
import re
def remove_number_of_hits_from_autocomplete(user_entry):
"""
removes number of hits from entry string that has been added by autocomplete
:param user_entry: user entry string with number of hits in parenthesis
:return: user_entry without number of hits
"""
user_entry = re.sub("\([0-9]*\)$", "", user_entry).strip()
return user_entry
|
e6e356cce3048988a48b03ba7c26da07a03ee8bc
| 97,474 |
def age_bracket(x):
"""
Converts ages from [15,65] into age brackets (1,5). If age
is out of that range, returns None
Integer -> String
"""
if 15 <= x < 25:
return '1'
elif 25 <= x < 35:
return '2'
elif 35 <= x < 45:
return '3'
elif 45 <= x < 55:
return '4'
elif 55 <= x < 65:
return '5'
else:
return None
|
95963b7f2b9a07ee090e6e4a6d049ebe9dc402e1
| 599,123 |
def keep_accessible_residues(naccess_rsa):
"""From the output of naccess we keep only accessible residues
which have a all_atoms_rel value > 30 (arbitrary threshold)
Args:
naccess_rsa: A dictionnary containing the output of naccess's calculations
Returns:
dict: Keys are the residue ids and as value their solvant accessible area
"""
accessible_residues_dict = {}
for (chain_id, res_id), data_dict in naccess_rsa.items():
for key, val in data_dict.items():
if key == "all_atoms_rel" and val >= 30:
accessible_residues_dict[res_id[1]] = val
return accessible_residues_dict
|
d837b6525fd54375ba05029879d57bec36f9cbb5
| 526,005 |
def bias(obs, mod):
"""
Bias = mean(obs) - mean(mod)
"""
return obs.mean(axis=0) - mod.mean(axis=0)
|
72315d45513e9181d522ec007ad9d587464f7f79
| 117,002 |
def replace_string_in_list(str_list: list, original_str: str, target_str: str):
"""
Replace a string in a list by provided string.
Args:
str_list (list): A list contains the string to be replaced.
original_str (str): The string to be replaced.
target_str (str): The replacement of string.
Returns,
list, the original list with replaced string.
"""
return [s.replace(original_str, target_str) for s in str_list]
|
31649c8c7171518598f6b3fe4d5db1b46e5cd573
| 688,225 |
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
|
ddc524591d3f262c77373ef8eafbab5112197647
| 196,601 |
import random
def relative_uniform(mean, scale_factor):
"""Returns a value from a uniform distribution where the min and max
are scaled relative to the middle value given as the mean"""
return random.uniform(mean * (1 - scale_factor), mean * (1 + scale_factor))
|
949822c6566b81478c4fd3d8ef2a0775636f6b25
| 339,785 |
def _process_match(rule, syms):
"""Process a match to determine if it is correct, and to find the correct
substitution that will convert the term into the pattern.
Parameters
----------
rule : RewriteRule
syms : iterable
Iterable of subterms that match a corresponding variable.
Returns
-------
A dictionary of {vars : subterms} describing the substitution to make the
pattern equivalent with the term. Returns `None` if the match is
invalid."""
subs = {}
varlist = rule._varlist
if not len(varlist) == len(syms):
raise RuntimeError("length of varlist doesn't match length of syms.")
for v, s in zip(varlist, syms):
if v in subs and subs[v] != s:
return None
else:
subs[v] = s
return subs
|
b81c450ddfa2294ef0f1b46597f336ad72be3187
| 398,516 |
def is_valid_triangle(triangle: list) -> bool:
""" Valid triangles have one length greater than the sum of the two shorter lengths.
Args:
triangle (list): side lengths of this triangle
Returns:
[bool]: Whether valid, according to the rules
"""
triangle = sorted(triangle)
return triangle[0] + triangle[1] > triangle[2]
|
a18d8b88bd2a3624ad55e027821a14235dcce6db
| 306,824 |
def clean_facet_name(name):
"""Clean facet name, without geo distance ":" suffix"""
return name.partition(':')[0]
|
45bb44a54253085c6967b0e6fb706d14b6425afa
| 469,066 |
def _fix_filename(filename):
"""Return a filename which we can use to identify the file.
The file paths printed by llvm-cov take the form:
/path/to/repo/out/dir/../../src/filename.cpp
And then they're truncated to 22 characters with leading ellipses:
...../../src/filename.cpp
This makes it really tough to determine whether the file actually belongs in
the Skia repo. This function strips out the leading junk so that, if the file
exists in the repo, the returned string matches the end of some relative path
in the repo. This doesn't guarantee correctness, but it's about as close as
we can get.
"""
return filename.split('..')[-1].lstrip('./')
|
d4b005f591879aab44275d100e725f61b5d6a764
| 701,357 |
from pathlib import Path
from typing import Optional
from typing import Callable
from typing import Generator
def collect_paths(
input_base_path: Path,
glob_pattern: str,
path_filter: Optional[Callable[[Path], bool]] = None,
) -> Generator[Path, None, None]:
"""
Collect paths, and apply a filter
Parameters
----------
input_base_path : Path
A starting Path
glob_pattern : str
Glob pattern to match. e.g "*" or "**/*"
path_filter : Optional[Callable[[Path], bool]], optional
A custom filter for more complex matching than can be provided by glob, by default None
Yields
-------
Generator[Path, None, None]
The matched Paths.
Raises
------
ValueError
input_base_path must exist and be a directory.
"""
# default content filter.
def pass_through(_path_in: Path):
return True
if path_filter is None:
path_filter = pass_through
if not input_base_path.is_dir():
raise ValueError(f"{input_base_path} - Not a directory or does not exist.")
paths_in = input_base_path.glob(glob_pattern)
for path_in in paths_in:
if path_filter(path_in):
yield path_in
|
349f992bb0ef23b769a0efcc7aab74e9a48f9eb8
| 391,786 |
def T_model_from_NW(NW):
"""Returns Z1, Z2, and Z3, computed from the [ABCD] parameters of
the NW (object of Network Class).
____ ____
o----------|_Z1_|----------|_Z2_|-------o
_|__
|_Z3_|
|
o--------------------------------------o"""
Z3 = 1/NW.C
Z1 = (NW.A-1)/NW.C
Z2 = (NW.D-1)/NW.C
return Z1, Z2, Z3
|
9a78c5020810d492937c06377d70249efde1bfae
| 264,113 |
def get_virtualenv_dir(version):
"""Get virtualenv directory for given version."""
return "version-{}".format(version)
|
1b292b48f9c5ce72304019ecb83a1befe25eaf77
| 678,490 |
import json
def parse_json_string(json_string):
"""Parse json string to object, if it fails then return none
:param json_string: json in string format
:type json_string: str
:return: object or none
:rtype: dict/None
"""
json_object = None
if not isinstance(json_string, str):
return json_string
try:
json_object = json.loads(json_string)
except (ValueError, TypeError):
pass
return json_object
|
71552552022cc82c425780fe8713e17d91ebc8b7
| 629,501 |
def next_tick(cell_alive, number_of_neighbors):
"""Returns a cell's state in the next turn
Three conditions for the next state are layed out:
1. A cell is alive and has 2/3 neighbors, it stays alive
2. A cell is dead and has 3 neighbors, it is born
3. Neither of the above, it dies
"""
if cell_alive and number_of_neighbors in [2, 3]:
return True
elif not cell_alive and number_of_neighbors == 3:
return True
return False
|
8a6bd216934d19096b9d238bd5fdef52d8067d5c
| 348,436 |
from typing import List
def parse_line(row: str) -> List[str]:
"""
Parse a given row in a list of elements
:param row: A string representation of a row
:return: A list of elements
"""
return [char for char in row]
|
e10c034a571c250bc570cec93a2f6571335c39e0
| 386,496 |
def check_parse_type(raw):
""" Checks the type for which the given value should be parsed. Currently
only detects strings and arrays."""
if raw.strip()[0] == '[':
return 'array'
else:
return 'string'
|
8a4278e25be8cf49dcc66721a57792c29f0e99e4
| 556,945 |
import itertools
def four_body_sum(spins):
"""Calculate four body term in the periodic lattice
Input:
spins: spins configuration matrix.
Output:
sum of four body terms.
"""
size = len(spins)
Sum = 0
for i,j in itertools.product(range(size), repeat=2):
Sum += spins[i,j] * spins[i,(j+1)%size] * spins[(i+1)%size,j] \
* spins[(i+1)%size, (j+1)%size] # only consider the right and top part for each site
return Sum
|
3e1408e1618db137489d29b0ee93b1ee7d88564f
| 689,667 |
def splitFill(text, delim, count, fill=u""):
"""
Split text by delim into up to count pieces. If less
pieces than count+1 are available, additional pieces are added containing
fill.
"""
result = text.split(delim, count)
if len(result) < count + 1:
result += [fill] * (count + 1 - len(result))
return result
|
f314bad515508a3286ead7396135c172c8bf7767
| 237,960 |
def _adjusted_index(k: int, n: int) -> int:
"""Index `k` mod `n` (for wraparound)."""
return k % n
|
9db5e5a66e8984fe912f1e916a634247aee7f5e4
| 338,815 |
def unshape_data(data):
"""
Return 1D version of data.
"""
return data.flatten()
|
a291df4b076cc26ccd12054ea10cb7e5a7ee5f25
| 175,078 |
def add_key(rec, idx):
"""Add a key value to the character records."""
rec["_key"] = rec["id"]
rec["influential"] = "false" if rec["influential"] == "False" else "true"
rec["original"] = "false" if rec["original"] == "False" else "true"
del rec["utc_offset"]
del rec["id"]
return rec
|
b69361dfab5fce7edf09b00805b4aae577ed08a1
| 156,613 |
def bit_to_str(bit):
"""
Converts a tuple (frame, bit, value) to its string representation.
"""
s = "!" if not bit[2] else ""
return "{}{}_{:02d}".format(s, bit[0], bit[1])
|
49bdab85689b545039eec5275c1bd59b78a3574f
| 548,554 |
def getEmailParamFrom(request):
"""
Returns str or None.
Arguments:
request -- bottle.request
"""
result = request.query.email
if not result:
result = None
return result
|
5ae07d6e0ae7dfa66b65b510cbe1e40c46f29c1b
| 260,403 |
def findRdkitMolRadicalAtomIds(rdkitMol):
"""Returns a list of atom ids with radical sites."""
radicalAtomIds = []
for atom in rdkitMol.GetAtoms():
if atom.GetNumRadicalElectrons() > 0:
radicalAtomIds.append(atom.GetIdx())
return radicalAtomIds
|
8a8494c4c1a4a2a8af99e92498e0fb73ce45701c
| 661,231 |
import logging
def prepare_data(df):
"""It is used to separate the dependent and iindependent features
Args:
df (pd.DataFrame): It is a dataframe
Returns:
tuple: returning dependent and independent variable tuples
"""
logging.info("preparing the data by segregating the independent and depenedent variables")
X = df.drop('y',axis = 1)
y = df['y']
return X,y
|
58e848cbe8e7933f21daf788e08accc4073e5a80
| 172,370 |
def post_process_botok_segmented_data(segmented_text):
"""Remove unwanted space from segmented text
Args:
segmented_text (str): Botok segmented text
Returns:
str: clean segmented text
"""
clean_segmented_text = segmented_text.replace('\n ', '\n')
clean_segmented_text = clean_segmented_text.replace(' ', ' ')
return clean_segmented_text
|
d2c1be518ee5445b5b4c3b99a7d457d1319f91da
| 409,816 |
def cost(num_of_cities, distance_matrix, tour):
"""
Given `distance_matrix` and `num_of_cities`, calculates cost of a tour
"""
cost = 0
for i in range(num_of_cities):
cost += distance_matrix[tour[i]][tour[(i+1) % num_of_cities]]
return cost
|
30374d358e5b052d9cac05d2740ba3502211069b
| 312,426 |
def getWords(text: str) -> list:
"""
Return words found in a text as list.
A word considering if contains at least single digit or letter.
"""
if not isinstance(text, str):
raise TypeError('Must be passed string, not {0}'.format(type(text)))
return list(
filter(
lambda string: any((char.isalnum() for char in string)),
text.split(' ')
)
)
|
bb76541819336d8cfe92725f0774d30622d7791e
| 460,806 |
def image_mask_overlaps_cropobject(mask, cropobject,
use_cropobject_mask=False):
"""Determines whether the given image mask overlaps the given CropObject.
Can either take into account the cropobject's bounding box only, or can
also take into account the cropobject's mask, according to the
``disregard_cropobject_mask`` flag.
:param mask: A 2D numpy array, assumed to be a mask of the source image
in which the CropObject was marked.
:param cropobject: The cropobject for which we want to determine its
mask.
:param use_cropobject_mask: If False, will only check that the
mask overlaps the CropObject's bounding box, not necessarily
its mask.
:return: True or False
"""
t, l, b, r = cropobject.bounding_box
mask_crop = mask[t:b, l:r]
if (not use_cropobject_mask) or (cropobject.mask is None):
return mask_crop.any() != 0
else:
return (mask_crop * cropobject.mask).any() != 0
|
c4b022bf27a91aa075c93ff663a51ccf513e2e09
| 374,831 |
def format_bytes(size):
"""
Format a number of bytes as a string.
"""
names = ("B", "KB", "MB", "GB", "TB")
mag = 0
while size > 1024:
mag += 1
size = size / 1024.0
return "{} {}".format(round(size, 2), names[mag])
|
437c5f000b60198887d97b9a8672f8cc7c714249
| 380,699 |
import json
def json_load(path):
"""
Load information stored in json file.
"""
with open(path) as data_file: # open & read json
return json.load(data_file)
|
7160612c81e071d50ad63c11f5aeb6d83d093782
| 243,743 |
def stop_words(lang='en'):
"""
Get common words that are often removed during preprocessing of text data,
i.e. "stop words". Currently only English stop words are provided.
Parameters
----------
lang : str, optional
The desired language. Default: 'en' (English).
Returns
-------
out : set
A set of strings.
Examples
--------
You may remove stop words from an SArray as follows:
>>> docs = turicreate.SArray([{'are': 1, 'you': 1, 'not': 1, 'entertained':1}])
>>> docs.dict_trim_by_keys(turicreate.text_analytics.stop_words(), True)
dtype: dict
Rows: 1
[{'entertained': 1}]
"""
if lang=='en' or lang=='english':
return set(['a', 'able', 'about', 'above', 'according', 'accordingly', 'across', 'actually', 'after', 'afterwards', 'again', 'against', 'all', 'allow', 'allows', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'am', 'among', 'amongst', 'an', 'and', 'another', 'any', 'anybody', 'anyhow', 'anyone', 'anything', 'anyway', 'anyways', 'anywhere', 'apart', 'appear', 'appreciate', 'appropriate', 'are', 'around', 'as', 'aside', 'ask', 'asking', 'associated', 'at', 'available', 'away', 'awfully', 'b', 'be', 'became', 'because', 'become', 'becomes', 'becoming', 'been', 'before', 'beforehand', 'behind', 'being', 'believe', 'below', 'beside', 'besides', 'best', 'better', 'between', 'beyond', 'both', 'brief', 'but', 'by', 'c', 'came', 'can', 'cannot', 'cant', 'cause', 'causes', 'certain', 'certainly', 'changes', 'clearly', 'co', 'com', 'come', 'comes', 'concerning', 'consequently', 'consider', 'considering', 'contain', 'containing', 'contains', 'corresponding', 'could', 'course', 'currently', 'd', 'definitely', 'described', 'despite', 'did', 'different', 'do', 'does', 'doing', 'done', 'down', 'downwards', 'during', 'e', 'each', 'edu', 'eg', 'eight', 'either', 'else', 'elsewhere', 'enough', 'entirely', 'especially', 'et', 'etc', 'even', 'ever', 'every', 'everybody', 'everyone', 'everything', 'everywhere', 'ex', 'exactly', 'example', 'except', 'f', 'far', 'few', 'fifth', 'first', 'five', 'followed', 'following', 'follows', 'for', 'former', 'formerly', 'forth', 'four', 'from', 'further', 'furthermore', 'g', 'get', 'gets', 'getting', 'given', 'gives', 'go', 'goes', 'going', 'gone', 'got', 'gotten', 'greetings', 'h', 'had', 'happens', 'hardly', 'has', 'have', 'having', 'he', 'hello', 'help', 'hence', 'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'hi', 'him', 'himself', 'his', 'hither', 'hopefully', 'how', 'howbeit', 'however', 'i', 'ie', 'if', 'ignored', 'immediate', 'in', 'inasmuch', 'inc', 'indeed', 'indicate', 'indicated', 'indicates', 'inner', 'insofar', 'instead', 'into', 'inward', 'is', 'it', 'its', 'itself', 'j', 'just', 'k', 'keep', 'keeps', 'kept', 'know', 'knows', 'known', 'l', 'last', 'lately', 'later', 'latter', 'latterly', 'least', 'less', 'lest', 'let', 'like', 'liked', 'likely', 'little', 'look', 'looking', 'looks', 'ltd', 'm', 'mainly', 'many', 'may', 'maybe', 'me', 'mean', 'meanwhile', 'merely', 'might', 'more', 'moreover', 'most', 'mostly', 'much', 'must', 'my', 'myself', 'n', 'name', 'namely', 'nd', 'near', 'nearly', 'necessary', 'need', 'needs', 'neither', 'never', 'nevertheless', 'new', 'next', 'nine', 'no', 'nobody', 'non', 'none', 'noone', 'nor', 'normally', 'not', 'nothing', 'novel', 'now', 'nowhere', 'o', 'obviously', 'of', 'off', 'often', 'oh', 'ok', 'okay', 'old', 'on', 'once', 'one', 'ones', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'ought', 'our', 'ours', 'ourselves', 'out', 'outside', 'over', 'overall', 'own', 'p', 'particular', 'particularly', 'per', 'perhaps', 'placed', 'please', 'plus', 'possible', 'presumably', 'probably', 'provides', 'q', 'que', 'quite', 'qv', 'r', 'rather', 'rd', 're', 'really', 'reasonably', 'regarding', 'regardless', 'regards', 'relatively', 'respectively', 'right', 's', 'said', 'same', 'saw', 'say', 'saying', 'says', 'second', 'secondly', 'see', 'seeing', 'seem', 'seemed', 'seeming', 'seems', 'seen', 'self', 'selves', 'sensible', 'sent', 'serious', 'seriously', 'seven', 'several', 'shall', 'she', 'should', 'since', 'six', 'so', 'some', 'somebody', 'somehow', 'someone', 'something', 'sometime', 'sometimes', 'somewhat', 'somewhere', 'soon', 'sorry', 'specified', 'specify', 'specifying', 'still', 'sub', 'such', 'sup', 'sure', 't', 'take', 'taken', 'tell', 'tends', 'th', 'than', 'thank', 'thanks', 'thanx', 'that', 'thats', 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein', 'theres', 'thereupon', 'these', 'they', 'think', 'third', 'this', 'thorough', 'thoroughly', 'those', 'though', 'three', 'through', 'throughout', 'thru', 'thus', 'to', 'together', 'too', 'took', 'toward', 'towards', 'tried', 'tries', 'truly', 'try', 'trying', 'twice', 'two', 'u', 'un', 'under', 'unfortunately', 'unless', 'unlikely', 'until', 'unto', 'up', 'upon', 'us', 'use', 'used', 'useful', 'uses', 'using', 'usually', 'uucp', 'v', 'value', 'various', 'very', 'via', 'viz', 'vs', 'w', 'want', 'wants', 'was', 'way', 'we', 'welcome', 'well', 'went', 'were', 'what', 'whatever', 'when', 'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'will', 'willing', 'wish', 'with', 'within', 'without', 'wonder', 'would', 'would', 'x', 'y', 'yes', 'yet', 'you', 'your', 'yours', 'yourself', 'yourselves', 'z', 'zero'])
else:
raise NotImplementedError('Only English stop words are currently available.')
|
af611e17227c8edf441edcb4eca8bd29faf030b2
| 309,604 |
def to_list(x):
"""Normalizes a list/tuple to a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Arguments:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x]
|
2eb3c8776f44cbdf2cf598e0cf893d846c36497b
| 343,995 |
def f1_score (true_positive, positive, gt_positive):
"""Compute the F1-score
Args:
true_positive (Int): Number of true positives
positive (Int): Number of positively classified samples
gt_positive (Int): Number of effectively positive samples
Returns:
float: F1-score
"""
if positive <= 0 or gt_positive <= 0 or true_positive <= 0:
return 0
precision = true_positive / positive
recall = true_positive / gt_positive
return 2 / (1 / precision + 1 / recall)
|
de4358d3c9ca6327033a253c3fd4a9f9d9321a1b
| 308,553 |
def decompose_trans(trans):
"""
Decompose SE3 transformations into R and t, support torch.Tensor and np.ndarry.
Input
- trans: [4, 4] or [bs, 4, 4], SE3 transformation matrix
Output
- R: [3, 3] or [bs, 3, 3], rotation matrix
- t: [3, 1] or [bs, 3, 1], translation matrix
"""
if len(trans.shape) == 3:
return trans[:, :3, :3], trans[:, :3, 3:4]
else:
return trans[:3, :3], trans[:3, 3:4]
|
511c1b6d838bfcce18e399fee78f1d1aa30030a0
| 580,796 |
def format_file_size(size):
"""
Formats the file size as string.
@param size numeric value
@return string (something + unit)
"""
if size >= 2 ** 30:
size = size / 2 ** 30
return "%1.2f Gb" % size
elif size >= 2 ** 20:
size = size / 2 ** 20
return "%1.2f Mb" % size
elif size >= 2 ** 10:
size = size / 2 ** 10
return "%1.2f Kb" % size
else:
return "%d" % size
|
31a00ceb1e757a7bcb1ba360c561e723c6875763
| 351,970 |
def limit(num, minimum=1, maximum=255):
"""Limits input 'num' between minimum and maximum values.
Default minimum value is 1 and maximum value is 255."""
return max(min(num, maximum), minimum)
|
fac54ad0bba741f14f97341a6331c025b5bd49fd
| 633,199 |
def fibonacci(n):
"""
This is the original function, it will be used to compare execution times.
"""
if n <= 1:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
|
1ca15b9bfd259712b924f7f2f67f34183a701555
| 626,719 |
def missing_key_from_list_in_dict(test_list, test_dict):
"""
Takes a list and a dictionary and checks if all keys in the list are present
in the dictionary. Raises a KeyError with the missing key if found, returns
False otherwise.
"""
for key in test_list:
if key not in test_dict:
raise KeyError(key)
return False
|
9dac476e428892ab64027d0963b57ed27e122a4e
| 488,616 |
def no_check(param):
"""doesn't perform any check, always returns true"""
return True
|
02bcc16249dd34fcfa9113ee90fb4bef3bc16fca
| 102,495 |
def cleaning(document: str) -> str:
"""Clean a given document
Args:
document (str): A CPF or CNPJ with special chars
Returns:
str: the document without special chars
"""
return document.replace('-', '').replace('.', '').replace('/', '')
|
ed7bec7a5d7f1e48688ec8c7145c1a36d4f7acb5
| 531,248 |
import re
def validate_date(date: str) -> dict:
"""
Validate provided date data.
Desired format: YYYY or YYYY-MM (2021 or 2021-12).
"""
# regex patters
rp_year_month = re.compile('^(20[0-9]{2})-((0[1-9])|(1[0-2]))$')
rp_year = re.compile('^(20[0-9]{2})$')
# Match year-month object
mo = rp_year_month.match(date)
if mo:
return {
'year': mo.groups()[0],
'month': mo.groups()[1]
}
# Match year object
mo = rp_year.match(date)
if mo:
return {
'year': mo.groups()[0]
}
return {}
|
86ed53dc1a7a1b7e060fc1acf8762e3f447ff279
| 662,883 |
def get_p2p_scatter_over_mad(model):
"""Get ratio of variability of folded and unfolded models."""
return model['scatter_over_mad']
|
c49c1cb91e455c4fe5902a23e3ccb924642fc56a
| 383,475 |
def format_size(nb_bytes):
"""
Format a size expressed in bytes as a string
Parameters
----------
nb_bytes : int
the number of bytes
Return
------
formated : str
the given number of bytes fromated
Example
-------
>>> format_size(100)
'100.0 bytes'
>>> format_size(1000)
'1.0 kB'
>>> format_size(1000000)
'1.0 MB'
>>> format_size(1000000000)
'1.0 GB'
>>> format_size(1000000000000)
'1.0 TB'
>>> format_size(1000000000000000000)
'1000000.0 TB'
"""
for x in ['bytes', 'kB', 'MB', 'GB']:
if nb_bytes < 1000.0:
return "%3.1f %s" % (nb_bytes, x)
nb_bytes /= 1000.0
return "%3.1f %s" % (nb_bytes, 'TB')
|
0a3147d75571bc862749c41d0465ffe89b3c4ab4
| 681,504 |
def get_active_planet(ship_position, planets):
"""
ship_position = [q, r]
planets is a list of the planets in the game
returns [planet, #indexOfPlanet] if at a planet
returns [None, -1] otherwise
"""
for planet in planets:
if ship_position == planet.position_of_hexes[planet.current_position]:
return planet
return None
|
e8f4641e29226f94a648e964ddf2306d104d5f8a
| 242,779 |
def lerp(value1, value2, fac):
"""
linear interpolation
"""
return value1 * (1-fac) + value2 * fac
|
37d1ab5031815b60ae3f68e5c75d4557a2b78994
| 162,588 |
import re
def alphanum_key(s):
"""Order files in a 'human' expected fashion."""
key = re.split(r'(\d+)', s)
key[1::2] = list(map(int, key[1::2]))
return key
|
e1545b791aecf6fd215048620eb6c05a23df30d8
| 101,030 |
def get_video_results(preds, anomaly_frames, normal_frames):
"""Returns confusion matrix values for predictions of one video
Args:
preds (set): set of frame number predictions for a threshold
anomaly_frames (set) set of frames numbers that are anomalies
normal_frames (set) set of normal frame numbers
Returns:
dict: true positives (int), false positives (int), false negatives (int), true negatives (int)
"""
tp = 0
fp = 0
fn = 0
tn = 0
for pred_frame in preds:
if pred_frame in anomaly_frames:
tp += 1
else:
fp += 1
for frame_number in anomaly_frames:
if frame_number not in preds:
fn += 1
for frame_number in normal_frames:
if frame_number not in preds:
tn += 1
return {"tp": tp, "fp": fp, "fn": fn, "tn": tn}
|
1814d1c3e46a6ee84c76d134b32a7c2778c70519
| 265,467 |
from typing import List
import re
def string_to_caps_words(in_string: str) -> List[str]:
"""
Convert string to list of words in caps
:param in_string: input string
:return: word list (all caps)
>>> string_to_caps_words("Svool, R'n z hgirmt!")
['SVOOL', ',', "R'N", 'Z', 'HGIRMT', '!']
"""
return re.findall(r"[\w'-]+|[.,!?;]", in_string.upper())
# regex explanation:
# first [] matches words, including "in-word punctuation" ie. ' and -
# second bracket matches exactly 1 "non-word punctuation"
# | == OR
# so regex splits into words (via findall)
|
79d15d4b89a43aa88007aa54e1465389f4759894
| 184,599 |
def indent_level( block ):
"""Number of characters of indent"""
block = block.replace( '\t',' '*8)
line = block.lstrip( ' ' )
return len(block) - len(line)
|
04031f50b592619cfa63062909513420e15b4c4c
| 385,567 |
import warnings
def _encode_link_field(value):
"""
Encode the field of a `RiakObject.links` entry as bytes if necessary.
"""
if isinstance(value, bytes):
warnings.warn(
"Passing `RiakObject.links` fields as bytes is deprecated "
"and may be removed in a future release. Pass the fields as "
"strings instead.",
category=DeprecationWarning
)
return value
return value.encode()
|
c1c6cdd2f8f0531dc4de94d59b38ba4d7baa1cc6
| 220,790 |
from typing import List
from typing import Dict
def count(items: List, key_from: str, increment_fun=(lambda x: 1)):
"""
Aggregate objects from a list by the attribute `key_from`.
By default each item increments the counter by 1.
"""
counts: Dict[str, int] = {}
for item in items:
key = getattr(item, key_from)
increment = increment_fun(item)
counts[key] = counts.get(key, 0) + increment
return counts
|
73099b827d0e9281475209ed3104aef622d9b4b2
| 443,497 |
def get_crop_shape(dataset_name):
"""Returns cropping shape corresponding to dataset_name."""
if 'MPISintel' in dataset_name:
return (384, 768)
elif 'KITTI' in dataset_name:
return (320, 896)
else:
raise ValueError('Unknown dataset name {}'.format(dataset_name))
|
18368554e9e4d8c6e3cc964df58b2abfe69d404b
| 570,036 |
def binary_step(n: float) -> int:
"""
Binary Step (Unipolar Binary) Activation Function
"""
if n >= 0:
return 1
return 0
|
edc86bd63182bf4f152a7746bae8860addc569cf
| 610,991 |
def column_names(table_name: str, schema: str) -> str:
"""Return column names of the given table."""
return f"""
SELECT
column_name, data_type
FROM
information_schema.columns
WHERE
table_schema = '{schema}' AND
table_name = '{table_name}'
ORDER BY
ordinal_position;
"""
|
70e66a12e1baac2963f5369c8838cab5e711b467
| 80,495 |
def invalid_usb_device_protocol(request):
"""
Fixture that yields a invalid USB device protocol.
"""
return request.param
|
d6bbf3f3c321de2800b82bb32a03fe9b1f892de1
| 128,035 |
def read_label_map(description2label_txt):
"""Reads a label map text file into a dictionary (map)
"""
description2label = {}
f = open(description2label_txt, 'rb')
s = f.read().decode('ISO-8859-1')
lines = s.splitlines()
lines = lines[1:] # skip header
for line in lines:
line = line.strip()
if not line:
continue
# convert line to fields
fields = line.split(';')
if len(fields) < 2:
continue
# read description
description = fields[0].strip().lower()
# read label
label = fields[1].strip()
if label.lower() == 'nan':
continue
label = label.replace(' ', '_').replace('/', '').replace('-', '')
# add to dict
description2label[description] = label.strip().lower()
return description2label
|
f7c32863f8213d476d2155d6b97acaca9f816900
| 277,656 |
def _convert_to_pillow_kwargs(arg):
"""
Converts a string to the correct keyword to use for tiff compression in pillow.
Parameters
----------
arg : str
The string keyword used in the PySimpleGUI window.
Return
------
arg_mapping[arg] : None or str
The keyword in pillow associated with the input arg string.
"""
arg_mapping = {
'None': None, 'Deflate': 'tiff_deflate', 'LZW': 'tiff_lzw',
'Pack Bits': 'packbits', 'JPEG': 'jpeg'
}
return arg_mapping[arg]
|
c5b425489665a0b4bc7a211b590dc79a6fc2892d
| 157,460 |
import fcntl
import errno
def xioctl(fd, req, arg):
"""
Wrapper around ioctl that polls it until it no longer returns EINTR
"""
while True:
try:
r = fcntl.ioctl(fd, req, arg)
except IOError as e:
if e.errno != errno.EINTR:
raise
print("Waiting...")
else:
return r
|
42dd19410d17bc10d3e4a28610294a843140bc87
| 694,430 |
import itertools
def explode(*ds):
"""Returns a generator of the concatenated (key,value) pairs of the provided dictionaries"""
return itertools.chain(*map(lambda d: d.items(), ds))
|
136ab4aff1e1e6e82fc07a5f5941b66f2f67cf09
| 363,114 |
def replace(group):
"""
takes in a pandas group, and replaces the
null value with the mean of the none null
values of the same group
"""
mask = group.isnull()
group[mask] = group[~mask].mean()
return group
|
97b0cab2ff247d689a58a6717a07b5716fcc2cd2
| 92,484 |
def in_range(value, ranges):
"""
checks if a float value `value` is in any of the given `ranges`, which are
assumed to be a list of tuples (or a single tuple) of start end end points
(floats).
Returns `True` or `False`.
"""
# is there anythin to check?
if not ranges or not len(ranges):
return False
if not isinstance(ranges[0], list):
ranges = [ranges]
for r in ranges:
if value >= r[0] and value <= r[1]:
return True
return False
|
e0c823c0aabbdf5f106f70501bd8d41608447a85
| 348,513 |
def is_tuple(obj) -> bool:
"""Checks if the given object is a tuple."""
return isinstance(obj, tuple)
|
cf156273c006128d2f0c9fc0f8a3530007f4c5f1
| 310,638 |
def coerce_url(url, default_schema='http'):
"""
>>> coerce_url('https://blog.guyskk.com/feed.xml')
'https://blog.guyskk.com/feed.xml'
>>> coerce_url('blog.guyskk.com/feed.xml')
'http://blog.guyskk.com/feed.xml'
>>> coerce_url('feed://blog.guyskk.com/feed.xml')
'http://blog.guyskk.com/feed.xml'
"""
url = url.strip()
if url.startswith("feed://"):
return "{}://{}".format(default_schema, url[7:])
if "://" not in url:
return "{}://{}".format(default_schema, url)
return url
|
9c9b4da76ccdab01f65dd25c967830521813d9b9
| 130,266 |
def bytes_view(request):
"""
A simple test view that returns ASCII bytes.
"""
return b'<Response><Message>Hi!</Message></Response>'
|
19bac61604ba81a0f87640670f2993a56aee4d3f
| 692,855 |
import copy
def remove_redundant_data(sequence):
"""Returns a copy of the sequence with redundant data removed.
An event is considered redundant if it is a time signature, a key signature,
or a tempo that differs from the previous event of the same type only by time.
For example, a tempo mark of 120 qpm at 5 seconds would be considered
redundant if it followed a tempo mark of 120 qpm and 4 seconds.
Fields in sequence_metadata are considered redundant if the same string is
repeated.
Args:
sequence: The sequence to process.
Returns:
A new sequence with redundant events removed.
"""
fixed_sequence = copy.deepcopy(sequence)
for events in [
fixed_sequence.time_signatures, fixed_sequence.key_signatures,
fixed_sequence.tempos
]:
events.sort(key=lambda e: e.time)
for i in range(len(events) - 1, 0, -1):
tmp_ts = copy.deepcopy(events[i])
tmp_ts.time = events[i - 1].time
# If the only difference between the two events is time, then delete the
# second one.
if tmp_ts == events[i - 1]:
del events[i]
if fixed_sequence.HasField('sequence_metadata'):
# Add composers and genres, preserving order, but dropping duplicates.
del fixed_sequence.sequence_metadata.composers[:]
added_composer = set()
for composer in sequence.sequence_metadata.composers:
if composer not in added_composer:
fixed_sequence.sequence_metadata.composers.append(composer)
added_composer.add(composer)
del fixed_sequence.sequence_metadata.genre[:]
added_genre = set()
for genre in sequence.sequence_metadata.genre:
if genre not in added_genre:
fixed_sequence.sequence_metadata.genre.append(genre)
added_genre.add(genre)
return fixed_sequence
|
6ebf40f54c57cddbdc9a7d59e15d91fc10c50e38
| 336,449 |
from typing import OrderedDict
def get_query_columns(engine, query):
""" Extract columns names and python typos from query
Args:
engine: SQLAlchemy connection engine
query: SQL query
Returns:
dict with columns names and python types
"""
con = engine.connect()
result = con.execute(query).fetchone()
values = list(result)
cols_names = result.keys()
cols = OrderedDict()
for i in range(len(cols_names)):
cols[cols_names[i]] = type(values[i]).__name__
return cols
|
733b4a2786672eac30b09fc755bb39c42a17eb4a
| 433,030 |
def clean_link(link):
"""
Many links will direct you to a specific subheading of the page, or
reference some particular component on the page. We don't want to
consider these "different" URLs, so parse this out.
In: string representiaton of a URL link.
Out: "cleaned" version of the link parameter.
"""
link = link.split("#", 1)[0]
# link = link.split("?", 1)[0]
return link
|
434e014302e878503d870d66ffe7f4a925793934
| 353,342 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.