content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def format_dictlist(dictlist, features):
"""
Convert a list of dictionaries to be compatible with create_csv_response
`dictlist` is a list of dictionaries
all dictionaries should have keys from features
`features` is a list of features
example code:
dictlist = [
{
'label1': 'value-1,1',
'label2': 'value-1,2',
'label3': 'value-1,3',
'label4': 'value-1,4',
},
{
'label1': 'value-2,1',
'label2': 'value-2,2',
'label3': 'value-2,3',
'label4': 'value-2,4',
}
]
header, datarows = format_dictlist(dictlist, ['label1', 'label4'])
# results in
header = ['label1', 'label4']
datarows = [['value-1,1', 'value-1,4'],
['value-2,1', 'value-2,4']]
}
"""
def dict_to_entry(dct):
""" Convert dictionary to a list for a csv row """
relevant_items = [(k, v) for (k, v) in dct.items() if k in features]
ordered = sorted(relevant_items, key=lambda k_v: header.index(k_v[0]))
vals = [v for (_, v) in ordered]
return vals
header = features
datarows = list(map(dict_to_entry, dictlist))
return header, datarows
|
e9e6e27fd698a6eabe7b7ceb9794d295c9fb2e42
| 554,973 |
import re
def to_address(text):
"""
Convert address text with possible leading '#' to an interger address value.
:param str text: Address text to convert
:returns: Integer address or None if not a valid address
"""
if isinstance(text, int):
return int(text)
if isinstance(text, str):
try:
value = int(re.sub(r'^#', '', text.strip()))
except ValueError:
return None
return value
return None
|
ef2510c5746dad8bd6bee1e9e470b1cb5241921c
| 287,492 |
def has_group(user, group_name):
"""Checks whether a user is in a specific group.
Args:
group_name(str): a string representation of a group's name
Returns:
boolean: True or False depending on whether user belongs to a group.
"""
if user.is_superuser:
return True
else:
return user.groups.filter(name=group_name).exists()
|
da1631314d4a09ea10e31f8b952fffe182d07705
| 540,598 |
import math
def C_fingas_corr(C, thickness):
"""
Returns the constant C used in fingas model corrected with the
slick thickness, used if the slick is less than 1.5 mm
source : (Fingas, 2015)
Parameters
----------
C : Fingas constant
thickness : Slick thickness [m]
"""
if thickness > 1.5 / 1000:
print("Slick thickness is more than 1.5 mm, correction not needed")
return C + 1 - 0.78 * math.sqrt(thickness/1000)
|
a1fa0a96a2d54bbf7919b6f9fa87f94d028eaa51
| 409,359 |
def apply_to_feature(feature_df,groupby_func_name=None,function=None):
"""
Apply a function to the entries for each feature.
feature_df ... dataframe with index (chrom, feature_name, pos)
(Such as the output of data_per_feature())
groupby_func_name ... name of the function of the groupby object
to apply to the data
This is faster than applying a function object.
function ... alternatively: function object to apply
"""
groups = feature_df.groupby(lambda idx: idx[1])
if groupby_func_name is not None:
return getattr(groups,groupby_func_name)()
elif function is not None:
return groups.apply(function)
else:
raise ValueError("Either groupby_func_name or function have to be given.")
|
4457641597303e2b422f84840c6e6fd2446b9c74
| 47,366 |
def set_d_type(x, d_type):
"""Sets the d_type of a numpy array."""
if not isinstance(x[0, 0], d_type):
x = x.astype(d_type)
return x
|
d3fa6619745f7e7f06fa9d9eba5f4ad8158eff33
| 422,250 |
def calc_rate(rate, rate_long, maturity):
"""Approximate a current interest rate.
Permforms linear interpolation based on the current
1-year and 10-year rates and the maturity of the
bond.
Parameters
----------
rate : float
1-year interest rate.
rate_long : float
10-year interest rate.
maturity : int
Maturity of the bond in years.
Returns
-------
float
Approximated interest rate.
"""
return rate + (rate_long - rate)*(maturity - 1) / 9
|
c9a92d3df99697e281ba34e44b38678a5a970123
| 198,297 |
from typing import Counter
def chars(lines, coverage= 0.9995):
"""returns a string containing the most frequent types of characters
which cover the strings in `lines`, ordered by their ranks.
rank 0 is rigged to always be the whitespace.
"""
char2freq = Counter(char for line in lines for char in line)
total = sum(char2freq.values())
cover = char2freq[" "]
del char2freq[" "]
chars = [" "]
for char, freq in char2freq.most_common():
if coverage <= (cover / total): break
chars.append(char)
cover += freq
return "".join(chars)
|
c6feb25cb8b8f546bf1d90c685bb1e089979de69
| 420,733 |
def validation_error(exc, request):
"""Handle a ValidationError."""
request.response.status_int = exc.status_int
return {"error": exc}
|
bde482b980e3c19833d7c0d727127792f6137416
| 470,431 |
import torch
def correct(output, target, topk=(1,)):
"""Computes how many correct outputs with respect to targets
Does NOT compute accuracy but just a raw amount of correct
outputs given target labels. This is done for each value in
topk. A value is considered correct if target is in the topk
highest values of output.
The values returned are upperbounded by the given batch size
[description]
Arguments:
output {torch.Tensor} -- Output prediction of the model
target {torch.Tensor} -- Target labels from data
Keyword Arguments:
topk {iterable} -- [Iterable of values of k to consider as correct] (default: {(1,)})
Returns:
List(int) -- Number of correct values for each topk
"""
with torch.no_grad():
maxk = max(topk)
# Only need to do topk for highest k, reuse for the rest
_, pred = output.topk(k=maxk, dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.item())
return res
|
8608bc5f555bdde96a021347fd32a1f0179b03cf
| 643,059 |
def check_sheet_cols(name):
"""
Callable mini-function passed to pd.read_excel(usecols=function).
Grab the first column to use as an index, then all columns starting with column 4.
"""
return True if name == 0 or name >= 4 else False
|
b6fe4eda2c45d08cce9dec7a202d43512576c672
| 100,034 |
def split_mapping_by_keys(mapping, key_lists):
"""Split up a mapping (dict) according to connected components
Each connected component is a sequence of keys from mapping;
returned is a corresponding sequence of head mappings, each with
only the keys from that connected component.
"""
mappings = []
for seq in key_lists:
mappings.append(dict((head_id, value) for head_id, value
in list(mapping.items()) if head_id in seq))
return mappings
|
1bdccf60c9073fff0dc3152c32b000e5233e095c
| 340,344 |
def vtime2obspytime( vtime ):
"""
Converts valve time string to UTCdatetime (an obspy object).
Parameters
----------
vtime: list
List of strings of valve times (yyyymmddHHMMSS).
Outputs
---------
obspytime: list
List of UTCdatetime objects.
"""
obspytime = [nowtime.strptime(nowtime, '%Y%m%d%H%M%S') for nowtime in vtime]
return obspytime
|
28f7871a4dd9fc7b4dca65488c35db21030506ee
| 273,198 |
import math
def select_from_menu(title, menuEntries):
"""
Prompt user to select from an on-screen menu.
If the option list contains only a single option it will be auto-selected.
Args:
title: The title string.
menuEntries: The list of options.
Returns:
The selected list index, or None if no selection made.
"""
assert len(menuEntries) > 0 # The caller is responsible for handling this case
if len(menuEntries) == 1:
print("\nSelect a %s:" % title)
print(" Auto-selected %s" % menuEntries[0])
return 0
selection = None
while True:
try:
# Print the menu
print("\nSelect a %s:" % title)
countW = int(math.log10(len(menuEntries))) + 1
message = " %%%uu) %%s" % countW
for i, entry in enumerate(menuEntries):
print(message % (i + 1, entry))
print(message % (0, "Exit gator_me.py"))
# Process the response
response = int(input("\n Select entry: "))
if response == 0:
return None
elif 0 < response <= len(menuEntries):
selection = response - 1
break
else:
raise ValueError()
except ValueError:
print(" Please enter an int in range 0-%u" % len(menuEntries))
print("\n Selected %s" % menuEntries[selection])
return selection
|
f0ad904cc4b78ed5142857596f60a216742542f9
| 462,225 |
def best_model_compare_fn(best_eval_result, current_eval_result, key):
"""Compares two evaluation results and returns true if the second one is
greater.
Both evaluation results should have the value for key, used for comparison.
Args:
best_eval_result: best eval metrics.
current_eval_result: current eval metrics.
key: key to value used for comparison.
Returns:
True if the loss of current_eval_result is smaller; otherwise, False.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
if not best_eval_result or key not in best_eval_result:
raise ValueError(f'best_eval_result cannot be empty or key "{key}" is '
f'not found.')
if not current_eval_result or key not in current_eval_result:
raise ValueError(f'best_eval_result cannot be empty or key "{key}" is '
f'not found.')
return best_eval_result[key] < current_eval_result[key]
|
43947b3db0643d2ae4886cb2cb85f7393e1f9aa5
| 101,944 |
def get_extrema(list):
"""Returns the max and min x and y values from a list of coordinate tuples in the form of (min_x, max_x, min_y, max_y)."""
max_x = max(list,key=lambda item:item[0])[0]
max_y = max(list,key=lambda item:item[1])[1]
min_x = min(list,key=lambda item:item[0])[0]
min_y = min(list,key=lambda item:item[1])[1]
return (min_x, max_x, min_y, max_y)
|
16072e44eb6c8ead2e2726b888a59d97f96703a4
| 479,294 |
def best_validation_rows(log_df, valid_col='valid_accuracy', second_criterion='iterations_done'):
"""
Takes a dataframe created by scripts/logs_to_dataframe.py and returns
a dataframe containing the best-validation row for each log.
"""
return log_df.sort_values([valid_col,second_criterion],ascending=False).drop_duplicates(['log'])
|
9541adb6653a93bfe0385bd24beedf80b065dde7
| 15,114 |
def decode(byte_data):
"""
Decode the byte data to a string if not None.
:param byte_data: the data to decode
"""
if byte_data is None:
return None
return byte_data.decode()
|
c773802dd490ca32de7542366c49487e16a411e0
| 62,466 |
from typing import Any
from typing import Match
import re
from textwrap import dedent
def formatter(string_to_be_printed: str, **kwargs: Any) -> str:
"""
Perform recursive string formatting on a string. Any keywords enclosed in curly quotation marks
are expanded using the keyword arguments passed into the function with a few details:
* The formatter is applied recursively to the expanded string. E.g.,
formatter("“a”", a="expanded “b”", b="expanded c")
returns
'expanded expanded c'.
* If a keyword is a comma-separated list like “a, b, c”, then each of the keywords "a", "b", and
"c" are expanded and the results of joined with intervening commas. If any expansion results
in the None object, the formatter acts as if that term were not there. E.g.,
formatter("“a, b, c”", a="expanded a", b=None, c="expanded c")
returns
'expanded a, expanded c'.
* Any keyword can contain a ':' in which case the Python string formatting applies, e.g.,
“a:.6f” would look for 'a' in the keyword arguments and expanded the floating point number to
six decimal places.
formatter("“a:.3f, b:3d”", a=1.23456, b=7)
returns
'1.235, 007'
Finally, the returned string is unindented and stripped of whitespace at either end.
"""
def repl(m: Match[str]) -> str:
keyword = m.group(1)
retval = []
for x in keyword.split(','):
add_space = x and x[-1] == ' '
x = x.strip()
if x == '':
retval.append('')
continue
if kwargs[x.split(':')[0]] is not None:
y = str(('{' + x + '}').format(**kwargs))
if add_space:
y += ' '
retval.append(y)
return ", ".join(retval)
try:
retval = re.sub(r"“(.+?)”", repl, dedent(string_to_be_printed)).strip('\n')
except KeyError as e:
raise Exception(f"No key \"{e.args[0]}\" found in {kwargs} for formatted string "
f"{string_to_be_printed}.") from None
if '“' in retval:
return formatter(retval, **kwargs)
return retval
|
e89e18958f3aef81119e3218145724dceaf4d48a
| 538,681 |
def create_records_versions_bp(app):
"""Create records versions blueprint."""
ext = app.extensions["invenio-rdm-records"]
return ext.records_versions_resource.as_blueprint(
"bibliographic_records_versions_resource"
)
|
9d865a4b7aa0033ae695aed5faf16c08387f3aec
| 379,446 |
def format_msg(sym, msg):
"""
Formats console message.
"""
return ' {} {}'.format(sym, msg)
|
12b0df2c139a0847571d09eefe3d8fa6506c69b6
| 368,387 |
def dash_to_mul(input_string):
"""Replace '-' with '*' in input_string."""
return input_string.replace("-", "*")
|
c868a014aacbbdceddb28cc9e5ada0ae7ef8a930
| 189,683 |
def upload_source_path(instance, filename):
"""
Get the path to upload the file to.
To avoid a lot of files in a single directory,
nests within project.
"""
return "projects/{project}/sources/upload-{id}-{filename}".format(
project=instance.project.id, id=instance.id, filename=filename
)
|
924f0e341a4b6d98d4a438425ede18f3b50adde4
| 324,025 |
import operator
def get_isa_extensions(instruction_form):
"""Returns a hashable immutable set with names of ISA extensions required for this instructions form"""
return frozenset(map(operator.attrgetter("name"), instruction_form.isa_extensions))
|
710006b73ea1875f18c4ce4222b82908f84a9bdb
| 222,021 |
def organize_combinations(attribute_names, attribute_combinations):
"""Organise the generated combinations into list of dicts.
Input:
attribute_name: ["latency", "reliability"]
attribute_combinations: [[1,99.99], [2,99.99], [3,99.99], [1,99.9], [2,99.9], [3,99.9]]
Output:
combinations = [{'latency': 1, 'reliability': 99.99},
{'latency': 2, 'reliability': 99.99},
{'latency': 3, 'reliability': 99.99},
{'latency': 1, 'reliability': 99.9},
{'latency': 2, 'reliability': 99.9},
{'latency': 3, 'reliability': 99.9}
]
"""
combinations = []
for combination in attribute_combinations:
comb = {}
for (name, value) in zip(attribute_names, combination):
comb[name] = value
combinations.append(comb)
return combinations
|
b07dcb6b2ce589ce942b3e43b0e7075a54cb5d37
| 188,887 |
from typing import Dict
def is_call_status_update(item: Dict) -> bool:
"""Checks if a dynamoDB Stream item is a call status update event"""
return item.get("EventType") in [
"START_TRANSCRIPT",
"CONTINUE_TRANSCRIPT",
"CONTINUE",
"END_TRANSCRIPT",
"TRANSCRIPT_ERROR",
"ERROR",
"END",
"ADD_CHANNEL_S3_RECORDING_URL",
]
|
c02e61c9417b69cad2c397346963e3dd3ef6af0c
| 283,532 |
def hdu_get_time(hdu, time_format='bmjd'):
"""
Will be used as a key function for the list.sort() or sorted() functions.
Example,
hdus.sort(key=hdu_fits_by_time)
where hdus is a list of HDU objects, will call hdu_by_time() on each element
in the list, and then sort the elements according to the value returned from the
key function, that is in this case hdu_by_time().
Parameters
----------
hdu: HDU object
The HDU object which is an element in the list that is to be sorted.
time_format: str, optional
The time format you want to sort by, even though it should not matter.
Returns
-------
float
It's the header entry BMJD_OBS (Barycentric Julian Date of observation), which
will then be used as the comparison attribute of each element in the list to
sorted. If a different time format is specified using the 'time_format' keyword,
then the returned value will be the corresponding header value.
"""
format_to_kwrd = {
'bmjd': 'BMJD_OBS',
'hmjd': 'HMJD_OSB',
'mjd': 'MJD_OBS',
'utc': 'UTCS_OSB',
'date': 'DATE_OBS',
'dce': 'ET_OBS'
}
if format_to_kwrd.get(time_format):
return hdu.hdr[format_to_kwrd.get(time_format)]
else:
return hdu.timestamp
|
d51f06b584eb2f26ede7a002eb44dd65e331bfbd
| 644,394 |
import tempfile
import json
def create_credential_tmp_file(credentials: dict) -> str:
"""
Given a credentials' dict, store it in a tmp file
:param credentials: dictionary to store
:return: path to find the file
"""
with tempfile.NamedTemporaryFile(delete=False) as fp:
cred_json = json.dumps(credentials, indent=4, separators=(",", ": "))
fp.write(cred_json.encode())
return fp.name
|
291e7a286cc8473d9bffc9b9a45929a69fa39cad
| 360,165 |
def simplify_DFA(states, alphabet, transitions, initialState, finalStates):
""" Return given DFA with every transition to initialState removed
for clarity """
new_transitions = []
for from_s, to_s, s in transitions:
if to_s != initialState:
new_transitions.append( (from_s, to_s, s) )
return (states, alphabet, new_transitions, initialState, finalStates)
|
c40591c910edea249b4b8bba27637a0f7114336a
| 261,124 |
from typing import Dict
from typing import Optional
def org_and_count_value_from_sparql_response(sparql_response: Dict) -> Optional[Dict]:
"""Map sparql-response to dict with orgId and count value."""
org = sparql_response.get("organizationNumber")
org_value = org.get("value") if org else None
count = sparql_response.get("count")
count_value = count.get("value") if count else None
return (
{"org": org_value.strip().replace(" ", ""), "count": count_value}
if org_value and count_value
else None
)
|
9f02838656eb00f4c07ca960565507f95523781e
| 332,702 |
import unicodedata
def normalize_text(text):
"""Normalize unicode string to NFD form."""
return unicodedata.normalize('NFD', text)
|
95ee89f0f938c4b14afe71917d90e0bfc0f77d41
| 494,767 |
def values_from_analysis(line):
"""
Utility method to obtain the instruction count values from the relative line in the final analysis output text
file.
:param line: string from log file
:return: list of values
"""
values = line.strip().split('\t')[1]
values = values.translate({ord(c): None for c in '[],'}).split()
return [int(val) for val in values]
|
14e12b0f7788909bd5b4aa7ae92e911defc76a04
| 225,195 |
def date_range_to_seconds(datetime1, datetime2):
"""
Computes the number of seconds between two datetime
"""
return (datetime2 - datetime1).total_seconds()
|
d4f3ae25ccd935b9f2757e1005e4d1935ab31907
| 611,074 |
def keep_file(gcov):
"""
Some files should not be considered in coverage.
Files to skip include system files (in /usr), the unit tests themselves
(in tests/ directories) and the unit test infrastructure (in external_codes/)
"""
source_file = gcov.tags['Source']
path_elements = source_file.split('/')
# Only looking at specific depths of directories. This might need
# to be expanded, for example if unit test directories contain
# subdirectories.
try:
if path_elements[-2] == 'tests':
#print 'Unit test, skipping'
return False
if path_elements[-3] == 'external_codes':
#print 'External code, skipping'
return False
except IndexError:
pass
if source_file.startswith('/usr/'):
return False
return True
|
66fbab9b92ae1b0d7818562b9becd1fbe1b8946e
| 413,291 |
def shell_sort(lst: list):
"""Shell sort implementation."""
counter = 0
lst_copy = lst
length = len(lst_copy)
interval = length // 2
while interval > 0:
for i in range(interval, length):
temp = lst_copy[i]
j = i
counter += 1
while j >= interval and lst_copy[j-interval] > temp:
if lst_copy[j-interval] > temp:
counter += 1
lst_copy[j], lst_copy[j -
interval] = lst_copy[j-interval], lst_copy[j]
j -= interval
else:
break
lst_copy[j] = temp
interval = interval // 2
return counter
|
e6bb351165bbf82af134c8b9fb59a0771ef4c760
| 274,183 |
def escape(keyword):
"""
Escape the backtick for keyword when generating an actual SQL.
"""
return keyword.replace('`', '``')
|
50fbd9098cb8af1f1ca9128a9cfe751f78c52d24
| 597,478 |
def breakable_speed(end_speed, trajectory):
""" Computes the max speed that can break to reach *end_speed*
Args:
end_speed: The speed at the end of the trajectory in km/h (num)
trajectory: The distance over which to descelerate in m (num)
Returns:
The maximum absolute speed at the beginning of the
trajectory that ensures a desceleration to *end_speed*
"""
# The car is about 5m long, it descelerated from 280 to 0
# in about 12-14 times its length, which would be 60-70m.
# Assuming a linear decrease in speed, the maximum rate
# of desceleration is therefore -280/65 = -4.31 km/h/m.
# To be safe, we use half that: -2.15
return trajectory * 2.15 + end_speed
|
0c24fb8a6e0a67488d4e7532b1ad505613395698
| 331,754 |
import math
def build_old_mild_beliefs(num_agents, low_pole, high_pole, step):
"""Builds mildly polarized belief state, in which
half of agents has belief decreasing from 0.25, and
half has belief increasing from 0.75, all by the given step.
"""
middle = math.ceil(num_agents / 2)
return [max(low_pole - step*(middle - i - 1), 0) if i < middle else min(high_pole - step*(middle - i), 1) for i in range(num_agents)]
|
d79de923597ef5c786d7036d36fbd8a5dcc87968
| 549,142 |
def normalization(data, dmin=0, dmax=1, save_centering=False):
"""
Normalization in [a, b] interval or with saving centering
x` = (b - a) * (xi - min(x)) / (max(x) - min(x)) + a
Args:
data (np.ndarray): data for normalization
dmin (float): left interval
dmax (float): right interval
save_centering (bool): if True -- will save data centering and just normalize by lowest data
Returns:
np.ndarray: normalized data
"""
# checking on errors
if dmin >= dmax:
raise Exception("Left interval 'dmin' must be fewer than right interval 'dmax'")
if save_centering:
return data / abs(min(data))
else:
min_x = min(data)
max_x = max(data)
return (data - min_x) * (dmax - dmin) / (max_x - min_x) + dmin
|
acfa7aaae1bb7eb5752751f5c929ddb7868ccf49
| 703,816 |
def add_lyrebird_seq(lyrebird, sequence, taxon, inner_name, inner_family, priority=100, repbase_name=None, proofs=None):
""" Simply add a sequence to lyrebird db.
:param lyrebird: lyrebird object
:param sequence: sequence
:param taxon: taxon
:param inner_name: inner_name
:param inner_family: inner_family
:param priority: priority, default 100
:param repbase_name: repbase name, default None
:param proofs: proofs dictionary
"""
full_name = ":".join((sequence, inner_name, inner_family, taxon))
if not proofs:
proofs = {}
data = {'sequence': sequence,
'full_name': full_name,
'priority': priority,
'family_name': inner_family,
'file_name': "lyrebird",
'inner_name': inner_name,
'repbase_name': repbase_name,
'taxons': taxon,
'name_taxon': "%s:%s" % (inner_name, taxon),
'meta': proofs,
}
obj = lyrebird.add_lyrebird_seq(data)
return obj
|
73ee1f97a0127e1d36e8550faee975bec8c1c08b
| 318,159 |
def model_fn_example(dataset_tensor, batch_size, evaluation):
"""
Example of the signature of the function that creates the model used in the
Trainer and Evaluator.
:param tf.Tensor dataset_tensor: the tensor created from the dataset
:param int batch_size: the batch size for the model, the same used in the dataset.
:param bool evaluation: True if this model is for evaluation, False if it is for training.
:return: returns the graph data, this is what session.run will execute during the training,
for the test it will only execute the summary operator.
"""
graph_data = None
return graph_data
|
d430669078457f803647bd67eee02dd3317bc542
| 251,733 |
def is_terminal(subtree):
"""True if this subtree consists of a single terminal node
(i.e., a word or an empty node)."""
return not isinstance(subtree, list)
|
e8300ade3b119297022650ceac791a1c117700e3
| 247,347 |
def is_absolute_url(path_or_url: str):
"""
Checks if the provided string is a path or an absolute URL (prefixed by
'http' or 'https').
Parameters
----------
path_or_url
The path or URL to check.
Returns
-------
bool
True if the provided string is an absolute URL, False otherwise.
"""
return path_or_url.startswith('http')
|
71229b053eafa44be7d739601cbfebf4d30a3e71
| 135,602 |
def is_permutation(s, t):
"""
Returns True iff s is a permutation of t.
Clarifications to ask the interviewer:
- How are the strings encoded? ASCII? Unicode?
- What kinds of characters are used? Alphanumeric? Punctuation?
Here, we assume that all strings are encoded with ASCII (256 chars). Recall
that the ord() function takes as input an 8-bit ASCII string of length one
and returns the integer value of the byte in [0, 255]. Otherwise, if the
string is encoded with Unicode (2^16 = 65536 chars), then the ord() function
will return the integer representing the Unicode code point of the character
in [0, 65535].
"""
#---------------------------------------------------------------------------
# Algorithm 1: Cross off matching characters from strings.
# O(n^2) time, O(n) space.
#---------------------------------------------------------------------------
# Algorithm 2: Sort both strings and compare one-by-one.
# O(n log n) time, O(n) space.
#---------------------------------------------------------------------------
# Algorithm 3: Compare character counts (implemented below).
# O(n) time, O(n) space.
#---------------------------------------------------------------------------
MAX_CHARS = 256
char_counts = [0] * MAX_CHARS
for c in s:
char_counts[ord(c)] += 1
for c in t:
char_counts[ord(c)] -= 1
return not any(char_counts)
|
7ec4672e6b5e4d9cd0f70cec806908198259300d
| 441,184 |
def compile_partitions(datapkg_settings):
"""
Given a datapackage settings dictionary, extract dataset partitions.
Iterates through all the datasets enumerated in the datapackage settings,
and compiles a dictionary indicating which datasets should be partitioned
and on what basis when they are output as tabular data resources. Currently
this only applies to the epacems dataset. Datapackage settings must be
validated because currently we inject EPA CEMS partitioning variables
(epacems_years, epacems_states) during the validation process.
Args:
datapkg_settings (dict): a dictionary containing validated datapackage
settings, mostly read in from a PUDL ETL settings file.
Returns:
dict: Uses table name (e.g. hourly_emissions_epacems) as keys, and
lists of partition variables (e.g. ["epacems_years", "epacems_states"])
as the values. If no datasets within the datapackage are being
partitioned, this is an empty dictionary.
"""
partitions = {}
for dataset in datapkg_settings['datasets']:
for dataset_name in dataset:
try:
partitions.update(dataset[dataset_name]['partition'])
except KeyError:
pass
return partitions
|
df65541171f77e89a037217d0a947f8111082e6e
| 523,115 |
def income_tax(pre_tax_income):
"""
Returns the total amount of tax at a rate of 35% in any amount over the free tax allowance of 11000£
:param pre_tax_income: The yearly income before tax.
:return: The amount of tax to be paid.
"""
tax_allowance = 11000
tax_rate = 0.35
# Calculate the tax to be paid on the taxable income that is greater than the free tax allowance.
if pre_tax_income <= tax_allowance:
return 0
else:
return (pre_tax_income - tax_allowance) * tax_rate
|
82f08dadf512f441737d7ea3a24aafc8009f5a8b
| 180,031 |
import re
def match_in(key):
"""Is the key the 'in' keyword"""
rx = re.compile("^_?([iI][nN])_?$")
match = rx.match(key)
if match:
return True
return False
|
e4a50a28e53f7acaad8d0bb9c48fe9e0d5ff64d2
| 621,916 |
def find_start_end_dates(dates1, dates2):
"""Find start and end dates between lists (or arrays) of datetime objects
that do not have the same length.
The start date will be the later of two dates.
The end date will be the earlier of the two dates.
:param dates1: List or array of datetime objects
:type dates1: list or numpy.ndarray
:param dates2: List or array of datetime objects
:type dates2: list or numpy.ndarray
:returns: Tuple of start date and end date
:rtype: tuple
:raises: ValueError for non overlapping dates
"""
# convert dates to sets for set intersection
date1_set = set(dates1)
date2_set = set(dates2)
if date1_set.intersection(date2_set):
# start date
if dates2[0] > dates1[0]:
start_date = dates2[0]
else:
start_date = dates1[0]
# end date
if dates2[-1] > dates1[-1]:
end_date = dates1[-1]
else:
end_date = dates2[-1]
return start_date, end_date
else:
raise ValueError("No overlapping dates.")
|
4bf80b329e88891560f8653bc46ed5dc00ab8261
| 434,761 |
import re
def format_title(book_title):
"""
Return formatted book title:
"Actual Book Title! (Series identifier #3)" -> "Actual Book Title!"
"History of 4D Fish Slapping [Silly, Part 4]" -> "History of 4D Fish Slapping"
Along with CSV formatting
"""
book_title = ' '.join(book_title.split()).replace('&', '&')
book_title = re.sub(r'(\(|\[)(.*)(\)|\])','', book_title)
book_title = '"' + book_title.replace('"', '""') + '"'
if book_title[-2:-1] == ' ':
book_title = book_title[:-2] + '"'
return book_title
|
4528925679d3d54fef889a6032c571f272075639
| 373,903 |
def GetGuestPolicyUriPath(parent_type, parent_name, policy_id):
"""Return the URI path of an osconfig guest policy."""
return '/'.join([parent_type, parent_name, 'guestPolicies', policy_id])
|
a30934a84e8e87b53eaddc8667a81329a527aa0c
| 493,770 |
def lldb_find_variable(frame, name):
"""
Find a variable. If the variable is not found, try looking for it in parent
frames.
"""
var = frame.FindVariable(name)
if var.IsValid():
return var
thread = frame.GetThread()
frame_id = frame.GetFrameID()
if frame_id == thread.GetNumFrames():
raise ValueError("Variable not found (name=%s)" % name)
return lldb_find_variable(thread.GetFrameAtIndex(frame_id + 1), name)
|
dae43e8759ed562ca227108df1c47977e0395688
| 203,435 |
def _find_depth(node, current_depth):
"""
Recursive function traversing a tree and returning the maximum depth.
"""
if node.left == -1 and node.right == -1:
return current_depth + 1
elif node.left != -1 and node.right == -1:
return _find_depth(node.l, current_depth + 1)
elif node.right != -1 and node.left == -1:
return _find_depth(node.r, current_depth + 1)
elif node.right != -1 and node.left != -1:
return max(_find_depth(node.left, current_depth + 1), _find_depth(node.right, current_depth + 1))
|
1635223fafb69961185adcfc606ae4e16813263e
| 388,298 |
def is_sorted(l):
"""Checks if the list is sorted """
return all(l[i] <= l[i+1] for i in range(len(l)-1))
|
548e85d98ee7873a64f14c38376059f2300a564b
| 61,575 |
def get_insight(df):
"""
Get insight from a youtube_history dataframe transformed by transform()
Parameters
----------
df : DataFrame
A youtube_history dataframe transformed by transform()
Returns
-------
df_info : dictionary
Contains informations about a specific youtube_history dataframe transformed by transform()
"""
df_info = {}
df_info['period'] = {
"from" : min(df['year']),
"to" : max(df['year'])
}
df_info['nbr_watched'] = df.shape[0]
df_info['first'] = df.iloc[-1, df.columns.tolist().index("titleUrl")]
df_info['last'] = df.iloc[0, df.columns.tolist().index("titleUrl")]
return df_info
|
86e586fe1cdad77d0f3248585c4b2feb8716503f
| 654,821 |
def nag_function_is_input(name):
"""
Check wether a NAG function name is of type input
"""
if name[0:1] == 'i':
return True
else:
return False
|
0bc4a63036ea23b29ea7edaa65fbb62bf72d1c98
| 338,176 |
def is_mobile_app(request):
""" Detects requests from mobile application (It will change some view)"""
user_agent = request.headers.get("User-Agent")
return "mobile-app-web-view" in user_agent
|
d3f54acf42a724072af6f264a08f43ffb77aacda
| 445,935 |
def get_device_mac(run_command_fn, dev_name, netns_name=None):
"""Find device MAC address"""
if netns_name:
command_prefix = "ip netns exec %s " % netns_name
else:
command_prefix = ""
(output, _) = run_command_fn("%scat /sys/class/net/%s/address" %
(command_prefix, dev_name))
return output[0]
|
19f9a745f107ce2f00717d783e20cfcfe25fe19a
| 668,971 |
def valid_input(prompt, options):
"""Validates user input, repeats the prompt on invalid valid_input.
Args:
prompt: A str representing the prompt a player sees
options: A list of strs representing the allowed inputs
Returns:
response: A str representing the players choice
"""
while True:
response = input(prompt)
if response in options:
return response
|
d6c01765145e1c942fefb6dd27f0e4b772a908b2
| 457,743 |
import math
def sine_easeout(pos):
"""
Easing function for animations: Sine Ease Out
"""
return math.sin(pos * math.pi / 2)
|
524169e7bd447bfd38c95fc2171e4a33d1508c46
| 554,655 |
def transform(func, data):
"""Apply <func> on each element in <data> and return the list
consisting of the return values from <func>.
>>> data = [[10,20], [30,40], [50,60]]
... chart_data.transform(lambda x: [x[0], x[1]+1], data)
[[10, 21], [30, 41], [50, 61]]
"""
out = []
for r in data:
out.append(func(r))
return out
|
06fc74738292e3fa191d28212b68a60325ee76e1
| 370,592 |
def str2list(string):
"""Returns list contained in a string.
Args:
string (str): A string containing a list.
Returns:
(list): inferred list from the string.
"""
# Removing brackets "[" "]" from the string
string = string[1:-1]
# Splitting at ","
output = string.split(",")
# Removing trailing, leading space and residue ' ' characters
output = [ string.strip()[1:-1] for string in output ]
return output
|
39289d0aabb3e391f25ecd0040e759ccf33b5d5b
| 145,310 |
def getLimOrderValue(odds, volume):
""" Calculate max price matched (to lay) and min (to back) in order to place a limit order """
if volume>0: #BACK
odds_back = odds[odds[:,1]>0]
odds_back = odds_back[odds_back[:,0].argsort()]
odds_back = odds_back[::-1]
odds_back[:,1] = odds_back[:,1].cumsum(axis=0)
pos = odds_back[:,1]>volume
lim_value = odds_back[pos][0][0]
else: # LAY
odds_lay = odds[odds[:,1]<0]
odds_lay = odds_lay[odds_lay[:,0].argsort()]
odds_lay[:,1] = odds_lay[:,1].cumsum(axis=0)
pos = odds_lay[:,1]<volume
lim_value = odds_lay[pos][0][0]
return lim_value
|
e1d0c08d47d692a563572afa49dc829638c32de1
| 577,976 |
def GetColumn(data, index):
"""Extracts the given column from the dataset.
data: sequence of rows
index: which column
Returns: map from int year to float datum
"""
res = {}
for row in data:
try:
year = int(row[0])
res[year] = float(row[index]) / 10.0
except ValueError:
pass
return res
|
be56b64d447f393c7c0c929f56058d2d3ce5a400
| 543,058 |
def make_location(location, protocol):
"""
Creates location object given a location and a protocol.
:param str location: file path
:param str protocol: protocol, for now only accepting `uri`
:return: the location subconfiguration
:rtype: obj
:raises ValueError: if a protocol other than `uri` is used.
"""
# elif protocol == "localPath":
# return { "uri": location, "locationType": "LocalPathLocation"}
if protocol == "uri":
return {"uri": location, "locationType": "UriLocation"}
else:
raise TypeError(f"invalid protocol {protocol}")
|
1b8350f5c71017fcdfed90a7965d3c4dbf219785
| 551,460 |
def coalesce_repeated_switches(cmd):
"""Combines known repeated command line switches.
Repetition of a switch notably happens when both per-test switches and the
additional driver flags specify different --enable-features. For instance:
--enable-features=X --enable-features=Y
Conceptually, this indicates to enable features X and Y. However
Chrome's command line parsing only applies the last seen switch, resulting
in only feature Y being enabled.
To solve this, transform it to:
--enable-features=X,Y
"""
def parse_csv_switch(prefix, switch, values_set):
"""If |switch| starts with |prefix|, parses it as a comma-separated
list of values and adds them all to |values_set|. Returns False if the
switch was not a match for |prefix|."""
if not switch.startswith(prefix):
return False
values = switch[len(prefix):].split(',')
for value in values:
values_set.add(value)
return True
def add_csv_switch(prefix, values_set, result):
if len(values_set) == 0:
return
sorted_values = sorted(list(values_set))
result.append('%s%s' % (prefix, ','.join(sorted_values)))
result = []
ENABLE_FEATURES_FLAG = '--enable-features='
DISABLE_FEATURES_FLAG = '--disable-features='
enabled_set = set()
disabled_set = set()
for switch in cmd:
if parse_csv_switch(ENABLE_FEATURES_FLAG, switch, enabled_set):
continue
if parse_csv_switch(DISABLE_FEATURES_FLAG, switch, disabled_set):
continue
result.append(switch)
# Append any coalesced (comma separated) flags to the end.
add_csv_switch(ENABLE_FEATURES_FLAG, enabled_set, result)
add_csv_switch(DISABLE_FEATURES_FLAG, disabled_set, result)
return result
|
6ad19dd99adf2faf4ed6719b316c0d83dcb1e759
| 116,410 |
def find_vswitch_id_for_name(name, vswitch_map):
"""
Returns the vSwitch K2 id for a given vSwitch Name
:param name: The vSwitch Name
:param vswitch_map: The vSwitch map from the HMC Topo
:returns: vSwitch K2 ID
"""
for item in vswitch_map.iteritems():
vswitch_id = item[0]
vswitch_k2_obj = item[1]
vswitch_k2_name = vswitch_k2_obj.element.findtext('SwitchName')
if vswitch_k2_name == name:
return vswitch_id
return None
|
200cb64fba04a4704626a35c2a62ea447955903e
| 393,639 |
def get_uid_search_xpath(uid):
# type: (str) -> str
"""Method to get the XPath expression for a UID that might contain quote characters.
Parameters
----------
uid : str
Original UID string with XPath expression.
Returns
-------
str
Processed XPath expression to escape quote characters using "concat()".
"""
if '"' in uid or '"' in uid:
uid_concat = "concat('%s')" % uid.replace('"', "\',\'\"\',\'").replace('"',
"\',\'\"\',\'")
return './/*[@uID=' + uid_concat + ']'
else:
return './/*[@uID="' + uid + '"]'
|
f3626595b13a9a9cae44d7f5cb9729602e902db9
| 693,842 |
def is_audit_field(elem, audit_field):
""" Checks if the tag's k-attribute is the field to be audited """
return elem.attrib['k'] == audit_field
|
266e8218daf6ac5a5ab3c80e75523535552bb73a
| 398,622 |
def postpend_str(base_word: str, postpend: str, separator: str='') -> str:
"""Appends str:postpend to str:base_word
with default str:separator, returns a str
"""
return f'{base_word}{separator}{postpend}'
|
6cf5c2e0fbd6f79e84c3b26fc27590904293b9eb
| 656,499 |
def get_level_of_theory(inp_path):
"""
This helper method returns level of theory given
a quantum input file.
Currently it supports Gaussian inputs only.
"""
level_of_theory_dict = {
"um062x/cc-pvtz": "M06-2X/cc-pVTZ"
}
with open(inp_path, 'r') as f_in:
for line in f_in.readlines():
if '# opt freq ' in line:
level_of_theory_in_file = line.split(' ')[3].strip().lower()
level_of_theory = level_of_theory_dict[level_of_theory_in_file]
return level_of_theory
else:
raise Exception('Can not find level of theory in {0}.'.format(inp_path))
|
abcffed64fa31c4a1614e948586dc444df3a7d14
| 557,224 |
import csv
def dict_from_tsvfile(filename):
"""open a tab separated two column file, return it as a str->str dict"""
d = {}
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for row in reader:
d[row[0]] = row[1]
return d
|
cece7b5a06cee4bf5c8082f56d2f1e235d021497
| 621,660 |
from datetime import datetime
def prep_datetime(dt):
"""Prepare datetime for the i14y API.
If the passed value is a Python datetime object, convert it to an ISO 8601
string. If None, return None. Otherwise raises a ValueError.
"""
if isinstance(dt, datetime):
return dt.isoformat()
if dt is None:
return None
raise ValueError(dt)
|
d91d3dc32e70e4c2c88c142743660d6ee087f1f1
| 161,276 |
import pickle
def _encode(value):
"""Convert a Python object to work with memcached."""
return pickle.dumps(value)
|
3cb4ae05e655780c52e70d0f1084735ad1e0b291
| 524,903 |
import yaml
def _plotit_loadWrapper(fpath):
""" yaml.safe_load from path """
with open(fpath) as f:
res = yaml.safe_load(f)
return res
|
3f21044063b61dbba8dc12bb0cc9d9517b7323ee
| 205,396 |
def makeWheel(population):
"""Helper for the RWS in creating a proportional
distribution among elements in a given array
Args:
population(list): List of int/float
Returns:
list: The generated wheel
"""
wheel = []
total = sum([p for p in population])
top = 0
for i in range(len(population)):
p = population[i]
f = p/total
wheel.append((top, top+f, i))
top += f
return wheel
|
54eca37b2ce9b9cb86a593a88163cf4b8bf0c572
| 599,461 |
def extract_metas(bs4):
"""Extracting meta tags from BeautifulSoup object
:param bs4: `BeautifulSoup`
:return: `list` List of meta tags
"""
meta_tags = []
metas = bs4.select('meta')
for meta in metas:
meta_content = {}
meta_attrs = [
'charset',
'name',
'content',
'property',
'http-equiv',
'itemprop'
]
for attr in meta_attrs:
if meta.has_attr(attr):
meta_content.update({attr: meta[attr]})
meta_tags.append(meta_content)
return meta_tags
|
2ffb40bccd1c00762d59c85cfb6b0878a1f75a4f
| 590,136 |
def _rstrip_location_id(device_dict):
"""Strips location_id down to base hex value with no trailing zeroes."""
return device_dict['location_id'].split()[0].rstrip('0')
|
fda90dcc158da0e92c8818adee7ff6dc9852fa6c
| 606,647 |
def mag2mom(mw):
"""Converts magnitude to moment - dyne-cm"""
return 10 ** (3.0 / 2.0 * (mw + 10.7))
|
c1e1ee38b69d4ba07e48fb9c85eab0f2d1ab11c9
| 240,917 |
def register_assign_cycles(N, case=0):
"""
Determine cycles that stem from performing
an in-place assignment of the elements of an array.
In the following. we cannot naively assign the source index
to the dest index,
If we assigned the value at index 0 to index 1, the assignment
from index 1 to index 3 would be invalid, for example:
src: [3, 0, 2, 1]
dest: [0, 1, 2, 3]
assignment cycles can be broken by using a
temporary variable to store the contents of a source index
until dependent assignments have been performed.
In this way, the minimum number of registers can be used
to perform the in-place assignment.
Returns
-------
list of lists of tuples
For example, `[[(0, 2), (2, 0)], [1, 3], [3, 1]]`
"""
dest = range(N)
src = [(N - case + n) % N for n in dest]
deps = {d: s for d, s in zip(dest, src) if d != s}
for di, d in enumerate(dest):
si = src.index(d)
if si > di:
deps[si] = di
cycles = []
while len(deps) > 0:
k, v = deps.popitem()
cycle = [(k, v)]
while True:
try:
k = v
v = deps.pop(k)
except KeyError:
# Check that the last key we're trying
# to get is the first one in the cycle
assert k == cycle[0][0]
break
cycle.append((k, v))
cycles.append(cycle)
return cycles
|
32eae9f1d347de1e520b6f4fc137b4785f76fcc5
| 203,622 |
import re
def replace_pair(pair, sentences, indices):
"""Replace all occurrences of a token pair ('ABC', 'XYZ') with a new symbol 'ABC XYZ'"""
first, second = pair
pair_str = ' '.join(pair)
pair_str = pair_str.replace('\\', '\\\\')
changes = []
pattern = re.compile(re.escape(first + '\t' + second))
for j, freq in indices[pair].items():
if freq < 1:
continue
tokens = sentences[j]
new_snetence = '\t'.join(tokens)
new_snetence = pattern.sub(pair_str, new_snetence)
new_snetence = tuple(new_snetence.split('\t'))
sentences[j] = new_snetence
changes.append((j, new_snetence, tokens))
return changes
|
4c98aa3be524dd0b9eee50030eb49c12d08a24e8
| 566,339 |
def get_header_list(sheet):
"""
Get a list of header row cell values.
:param sheet:
:return: Returns a list of values from the first row in the sheet.
"""
return [c.value for c in sheet.row(0)]
|
9b9ec77529b8e24296316ab47bf8eadff44ffdf5
| 618,978 |
def get_filetypes_keys(ilist):
"""
Extracts keys as a list of strings given a list of dicts from parsing a
.hlsp file.
:param ilist: The input list of dicts.
:type ilist: list
:returns: list - a list of keys as strings.
"""
return [[*x][0] for x in ilist]
|
cb42d25db0654c5e73c5fd355bb39ef0f6b00fe6
| 340,785 |
def has_material(obj, name):
"""check if obj has a material with name"""
return name in obj.data.materials.keys()
|
3c8ce0daaacf59b49e486d09266d6bc94f805590
| 662,885 |
def is_even(n: int) -> bool:
# suggested solution
""" Check if a given number is even
Using modulo 2, and checking if the remainder is zero
"""
return n % 2 == 0
|
e49e3a5bfbf565071d363f6eadcd8604bc0cdfe3
| 629,426 |
import math
def _selection_size_heuristic(num_samples):
"""Return size of mini-batch, given size of dataset."""
# Incrase size of mini-batch gradually as number of samples increases,
# with a soft cap (using log).
# But do not return size larger than number of samples
return min(num_samples,
int(20.0 *
(math.log(num_samples + 10, 2) - math.log(10.0) + 1.0)))
|
605694af4875ac295ae9dea63b1ebda4308c038a
| 680,309 |
def components(pattern):
"""Return the op, x, and y arguments; x and y are None if missing."""
x = pattern[1] if len(pattern) > 1 else None
y = pattern[2] if len(pattern) > 2 else None
return pattern[0], x, y
|
caa7b5265a1377db87a489b10dceec4cd9acd460
| 461,754 |
def return_synapse_positions(X):
"""Filters a NeuroSynapsis matrix and returns only the matrix of synapse positions.
# Arguments:
X (numpy array): A matrix in the NeuroSynapsis matrix format.
# Returns:
numpy array: A matrix of size samples x dimensions.
"""
return X[:,2:5]
|
9091263674cf6e2b25ba543fb1be8218435f23a0
| 513,743 |
import itertools
def disjunktna_unija(*skupovi):
"""Unija skupova, osiguravajući da su u parovima disjunktni."""
for skup1, skup2 in itertools.combinations(skupovi, 2):
assert skup1.isdisjoint(skup2)
return set().union(*skupovi)
|
60e2d2b930f83d5e93ba10f0e1f0b1df6b075815
| 175,727 |
import urllib3
import certifi
def download_file(ctx, path, dest, accept = ""):
"""
Download a file to a NuvIoT
Parameters
----------
ctx:
Context Object that defines how this method should call the server to include authentication.
path:
Path used to make the request, the auth and server information will be used from the ctx object.
dest:
The full path including the file name and extension of where the downloaded file should be saved.
Returns
-------
Will return True if the file is download and saved locally, other wise it will return False.
"""
if ctx.auth_type == 'user':
headers={'Authorization': 'Bearer ' + ctx.auth_token}
else:
headers={'Authorization': 'APIKey ' + ctx.client_id + ':' + ctx.client_token}
if(accept != ""):
headers['Accept'] = accept
url = ctx.url + path
print("Downloading file: %s" % url)
chunk_size = 65536
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
r = http.request("GET", url, headers=headers, preload_content=False)
if r.status > 299:
print('Failed http %d url: %s' % (r.status, url))
print('Headers: ' + str(headers))
print('--------------------------------------------------------------------------------')
print()
r.release_conn()
return False
print('Headers: ' + str(r.headers))
print('Headers: ' + str(r.headers["Content-Disposition"]))
with open(dest, 'wb') as out:
while True:
data = r.read(65535)
if not data:
break
out.write(data)
r.release_conn()
return True
|
ab0ab1274673fa3c1292da53bee49d12a877e90a
| 533,069 |
def divide(value1: int, value2: int) -> float:
"""
Used to divide the number of cards to check that nothing was lost.
Handles division by 0 by returning 0, which is the reciprocal.
"""
if value1 == value2: # good for 0/0
return 1.0
else:
try:
div_value = value1 / float(value2)
except ZeroDivisionError:
div_value = 0.
return div_value
|
afc70043f3a60d2cbca1fce5bc0f49ac6fbd046c
| 33,154 |
def get_corepy_output(code, inst):
"""Take an instruction, and return a hex string of its encoding, as encoded by CorePy"""
hex_list = inst.render()
hex = ""
for x in hex_list:
hex += "%02x" % (x)
return hex
|
26816cc8d424bfeb6db9e3a404f22a648c0d0d41
| 32,605 |
def _replace_code(code, replacements):
"""
Replaces code with new code.
:param str code: code to replace
:param list replacements: list of tuples containing (start, end, replacement)
"""
new_code = ''
index = 0
for start, end, code_string in sorted(replacements):
new_code += code[index:start] + code_string
index = end
new_code += code[index:]
return new_code
|
a664d204c06509c7402c4593018463dfd394c5b0
| 310,221 |
def _s(s):
"""return an empty sentence for None
"""
return "" if s is None else s
|
bcfacd0f5fc64ecae3db5033a759bad6fe10b328
| 456,536 |
def extract_ip(scan):
"""
Grabs IP address from the Nmap scan output.
"""
scanlist = scan.split()
ip = scanlist[scanlist.index("report") + 2]
return ip
|
536cc9727f0839d575714a4cead1dcc474026667
| 437,808 |
def read_script(file):
"""
Opens a namelist file within a context manager.
Parameters
----------
:param file: string
Path to the namelist file you wish to open.
:return: file object
"""
with open(file, 'r') as script:
return script.read()
|
c7e3f4ae9f2f8fcdb32359de06ad974d3c6e5af1
| 319,933 |
def get_cont_dist(ins_1, ins_2, norm=2):
""" get dimensional distance for continuous data
Parameters: ins_1 (nd-array) - array 1
ins_2 (nd-array) - array 2
norm (int) - type of norm use, default is 2
Returns: dist (array) - the dimenional distance between ins_1 and ins_2
"""
dist = abs(ins_1 - ins_2)**norm
return dist
|
30c873f6e09b9dc0cb2b435281596c6bb3a65e18
| 461,147 |
import pwd
def _getuid(user):
"""Return uid for user."""
return pwd.getpwnam(user).pw_uid
|
406ae3af4909ed8617274cb3fabe1ea05ef2755a
| 470,327 |
def select(P):
"""
The normal selection strategy.
INPUT:
- ``P`` -- a list of critical pairs
OUTPUT:
an element of P
EXAMPLES::
sage: from sage.rings.polynomial.toy_d_basis import select
sage: A.<x,y> = PolynomialRing(ZZ, 2)
sage: f = -y^2 - y + x^3 + 7*x + 1
sage: fx = f.derivative(x)
sage: fy = f.derivative(y)
sage: G = [f, fx, fy]
sage: B = set((f1, f2) for f1 in G for f2 in G if f1 != f2)
sage: select(B)
(-2*y - 1, 3*x^2 + 7)
"""
min_d = 2**20
min_pair = 0, 0
for fi, fj in sorted(P):
d = fi.parent().monomial_lcm(fi.lm(), fj.lm()).total_degree()
if d < min_d:
min_d = d
min_pair = fi, fj
return min_pair
|
aaf2d7445b8715b9a98013c8c23d4f0dac78fe55
| 526,055 |
def _dict_of_targetid(submitid, charid, journalid):
"""
Given a target of some type, return a dictionary indicating what the 'some
type' is. The dictionary's key will be the appropriate column on the Report
model.
"""
if submitid:
return {'target_sub': submitid}
elif charid:
return {'target_char': charid}
elif journalid:
return {'target_journal': journalid}
else:
raise ValueError('no ID given')
|
db4698f0732cd30a116c92d2b6350e798527af70
| 623,296 |
def clean_whitespace(df, columns):
"""
Clean leading/trailing whitespace from specific columns
Args:
df (table): pandas dataframe
columns (list): column labels
Returns:
dataframe
"""
df[columns] = df[columns].apply(lambda x: x.str.strip())
return df
|
29ceeea3ad4784399579a0649e1ae2deb2bc202f
| 303,315 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.