content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def join_qualified(region, view):
"""
Given a region, expand outward on periods to return a new region defining
the entire word, in the context of Elm syntax.
For example, when the region encompasses the 'map' part of a larger
'Dict.map' word, this function will return the entire region encompassing
'Dict.map'. The same is true if the region is encompassing 'Dict'.
Recursively expands outward in both directions, correctly returning longer
constructions such as 'Graphics.Input.button'
"""
starting_region = region
prefix = view.substr(region.a - 1)
suffix = view.substr(region.b)
if prefix == '.':
region = region.cover(view.word(region.a - 2))
if suffix == '.':
region = region.cover(view.word(region.b + 1))
if region == starting_region:
return region
else:
return join_qualified(region, view)
|
c95c45df94d1514ed9f829bab3a2f9bfabbb3c9d
| 610,274 |
def time_into_milliseconds(time_string: str) -> int:
"""Utility function to turn time string into milliseconds from H:M:S.f format."""
hours = int(time_string[:2])
mins = int(time_string[3:5])
seconds = float(time_string[6:])
return int(hours * 3600000 + mins * 60000 + seconds * 1000)
|
7dc93f762d5ea8d4bf8dadc25b202c6277c4730d
| 106,644 |
from datetime import datetime
def get_number_of_seconds_before_time(time):
"""
Get number of seconds until time.
:param time: number of seconds into the day.
:return: seconds until time.
"""
current_time = datetime.now()
current_seconds = (
current_time.hour * 60 * 60 + current_time.minute * 60 + current_time.second
)
if current_seconds > time:
difference = time + 60 * 60 * 24 - current_seconds
else:
difference = time - current_seconds
return difference
|
5fab77db6d26b146fe72f832e96492983de01c34
| 543,068 |
def _format_function_call(fn_name, *v, **k):
"""
Return a Python function call as a string.
Keywords are sorted.
EXAMPLES::
sage: from sage.geometry.polyhedron.backend_normaliz import _format_function_call
sage: _format_function_call('foo', 17, hellooooo='goodbyeeee')
"foo(17, hellooooo='goodbyeeee')"
"""
args = [ repr(a) for a in v ] + [ "%s=%r" % (arg, val) for arg, val in sorted(k.items()) ]
return "{}({})".format(fn_name, ", ".join(args))
|
fd2904323a88d7fbd3b6df9649fe7489c6d2796c
| 566,761 |
def get_all_items(combo_box):
"""
Gets all the items in a combobox as a list. Will raise an IndexError if there are not
items in the combobox
:param combo_box: your QComboBox
:return: items of the QComboBox
"""
items = []
for item in [combo_box.itemText(i) for i in
range(combo_box.count())]:
if item is not None:
items.append(item)
return items
|
e55b8b87b46eead47766ff69d02383048a67894d
| 425,873 |
def clean_chars(value, cleanchars):
""" Remove chars for cleaning
:param value: String to be cleaned
:param cleanchars: Characters to remove from value
:return value: Cleaned string
"""
for char in cleanchars:
value = value.replace(char, '')
return value
|
480d921152f9bc3e6491b4a015d61f53932dd16c
| 62,090 |
def operating_cf(cf_df):
"""Checks if the latest reported OCF (Cashflow) is positive.
Explanation of OCF: https://www.investopedia.com/terms/o/operatingcashflow.asp
cf_df = Cashflow Statement of the specified company
"""
cf = cf_df.iloc[cf_df.index.get_loc("Total Cash From Operating Activities"),0]
if (cf > 0):
return True
else:
return False
|
ed6a849fa504b79cd65c656d9a1318aaaeed52bf
| 2,390 |
def _notebook_cell_is_clean(cell):
"""Check if a single notebook cell is clean."""
return cell["cell_type"] != "code" or (
not cell["outputs"] and not cell["execution_count"]
)
|
7fee8655e455e8d02435d64ad546accb2ef3301e
| 402,219 |
def table_exists(db, name):
"""Check if the table 'name' exists in the database."""
name = str(name)
query = "SELECT 1 FROM sqlite_master WHERE type='table' and name=?"
return db.execute(query, (name,)).fetchone() is not None
|
9b0d8f7534266dc6cdc530425bda336915d23583
| 256,552 |
def not_found(params, start_response):
"""Not found response."""
status = '404 NOT FOUND'
start_response(status, [('Content-Type', 'text/plain'),
('Access-Control-Allow-Origin', '*'),
('Content-Length', '15')])
return ['Page Not Found!']
return [db_html]
|
d43248aaea3ace410d11835c427ccbf76039eb58
| 537,299 |
from typing import List
def filter_list(unfiltered_list: List) -> List:
"""Filters empty lists and None's out of a list.
:param unfiltered_list: A list to filter.
:return: A list containing no empty lists or None's
"""
filtered_list = [element for element in unfiltered_list
if element != [] and element is not None]
return filtered_list
|
d8d91984dc97960e704c77e487a93690657b88fe
| 123,292 |
def object_access_allowed(groups, path):
"""
Decide if a user is allowed to access a path
"""
for group in groups.split(','):
if path.startswith(group):
return True
return False
|
17dc4bf72019042cebd5c48097c8744394cf61f9
| 125,963 |
from io import StringIO
import csv
import json
def convert_csv_content_to_json(csv_content):
"""
Convert CSV content to a JSON string
return json_string, None
return None, err_msg
"""
if not csv_content:
return (None, 'No content specified')
# Read the CSV content into python dicts
#
string_io = StringIO(csv_content)
rows = None
with string_io as f:
reader = csv.DictReader(f)
rows = list(reader)
# Failed to read csv into rows
#
if not rows:
return (None, 'Failed to read content as a csv')
# Format the rows into a JSON string
#
try:
json_content = json.dumps(rows)
except TypeError:
return (None, 'Failed to convert csv content to JSON')
return (json_content, None)
|
0fefdaffb3f1085b931aebccd4c346e7aafd09dc
| 447,520 |
from typing import Any
def default_function(n: int, value: Any=None):
"""
Creates a dummy default function to provide as default value when
a func parameter is expected.
`n` is the number of parameters expected.
`value` is the default value returned by the function
"""
if n == 0:
return lambda: value
elif n == 1:
return lambda _: value
elif n == 2:
return lambda _,__: value
else:
raise Exception('Default function with {} parameters is not supported.'.format(n))
|
d8245fed39e423392acfbffd775379a2e15a8848
| 696,184 |
def milliseconds_to_timecode(milliseconds):
""" Takes a time in milliseconds and converts it into a time code.
"""
hours = milliseconds // 3600000
milliseconds %= 3600000
minutes = milliseconds // 60000
milliseconds %= 60000
seconds = milliseconds // 1000
milliseconds %= 1000
return "{}:{:02}:{:02}.{:02}".format(hours, minutes, seconds, milliseconds // 10)
|
b7e6c522bbbbf115b0cebd7046ba7aa41f207e6c
| 124,561 |
def parsing_explore_lines(explore_list, loc_list):
"""
This function parses a list of explore lines into a grouped structure of explore lines.
:param explore_list: the list representing raw explore file.
:type explore_list: list
:param loc_list: the list of dividers, each divider is the number of join in the explore list
:type loc_list: list
:return: a grouped and nested list representing the explore structure with joins.
"""
grouped_explore = []
grouped_explore.append(explore_list[:loc_list[0]])
for r in range(len(loc_list)-1):
grouped_explore.append(explore_list[loc_list[r]: loc_list[r+1]])
grouped_explore.append(explore_list[loc_list[-1]:])
return grouped_explore
|
272d6a2f5f04207a6dd18a78f8cc56967d5ed90f
| 653,775 |
def _diff_for(gitrepo, rev_range=None):
"""
Get a list of :py:class:`git.diff.Diff` objects for the repository.
:param git.repo.base.Repo gitrepo: Git repository
:param RevRangePair rev_range: optional revision to use instead of the
Git index
"""
if rev_range:
return rev_range.a.diff(rev_range.b)
else:
# Assume we want a diff between what is staged and HEAD
try:
return gitrepo.head.commit.diff()
except ValueError:
return None
|
490fa3dc98354810d399d4d824534512a41eeeb4
| 288,944 |
import functools
import string
import unicodedata
def unaccent(text):
"""Translate accented characters to their non-accented equivalents"""
@functools.cache
def unaccent_c(char):
if char in string.printable:
return char
elif unicodedata.combining(char):
return None
else:
return unicodedata.normalize("NFKD", char)[0]
return "".join( (c for c in (unaccent_c(char) for char in text) if c is not None) )
|
1d49eb2089335692d3fb6c28b4a9a8575341945a
| 641,959 |
def print_topics(model, feature_names, n_top_words, topic_prev):
"""Prints the topic information. Takes the sklearn.decomposition.LatentDiricheltAllocation lda model,
the names of all the features, the number of words to be printined per topic, a list holding the freq
of each topic in the corpus"""
i = 0
message_list =[]
for topic_idx, topic in enumerate(model.components_):
message = "%f Topic #%d: " % (topic_prev[i],topic_idx)
i +=1
list_feat = [feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]
feat_freq = sorted(topic, reverse=True)
for j in range(0, len(list_feat)):
list_feat[j] += " " + str(round(feat_freq[j], 3)) + ","
message += " ".join(list_feat)
message_list.append(message)
print(message)
print()
return message_list
|
3228e57cf6972a15c17f4a2efd594dfbf6d34d2b
| 613,419 |
def clean_species_name(common_name):
"""
Converts various forms of "human" to the token "human", and various forms
of "empty" to the token "empty"
"""
_people_tags = {
'Bicycle',
'Calibration Photos',
'Camera Trapper',
'camera trappper',
'camera trapper',
'Homo sapien',
'Homo sapiens',
'Human, non staff',
'Human, non-staff',
'camera trappe',
'Human non-staff',
'Setup Pickup',
'Vehicle'
}
PEOPLE_TAGS = {x.lower() for x in _people_tags}
_no_animal_tags = {'No Animal', 'no animal', 'Time Lapse', 'Camera Misfire', 'False trigger', 'Blank'}
NO_ANIMAL_TAGS = {x.lower() for x in _no_animal_tags}
common_name = common_name.lower().strip()
if common_name in PEOPLE_TAGS:
return 'human'
if common_name in NO_ANIMAL_TAGS:
return 'empty'
return common_name
|
96364758f81754cf4ee7de6a436a1cd82960e514
| 109,532 |
def da_to_string(da):
"""Generate a human readable version of a 1D :class:`xarray:xarray.DataArray`.
Parameters
----------
da : :class:`xarray:xarray.DataArray`
The :class:`xarray:xarray.DataArray` to display. Must only have one dimension.
Returns
-------
str_val : str
String with human readable version of `da`.
"""
assert len(da.dims) == 1
str_val = da.to_series().to_string()
return str_val
|
39ff9ed6ca86b3a7e3b2df053dd2e3e22dbeaa7d
| 430,662 |
from functools import reduce
from operator import mul
from operator import truediv
def divideLists(lst1, lst2):
"""Return the elementwise quotient of lst1 and lst2.
>>> divideLists([1, 2, 3],[4, 5, 6])
[0.25, 0.4, 0.5]
>>> divideLists([1, 2, 3],[4, 0, 6])
Traceback (most recent call last):
...
AssertionError: Second list has entry equal to zero.
"""
assert len(lst1) == len(lst2), "The lists have to be the same length."
assert reduce(mul, lst2) != 0, "Second list has entry equal to zero."
return list(map(truediv, lst1, lst2))
|
7e48d36240da128c1e5965cc545ba99b50d4693e
| 142,429 |
def provider2network(provider):
""" Convert a MADIS network ID to one that I use, here in IEM land"""
if provider in ['KYMN']:
return provider
if provider == 'MesoWest':
return 'VTWAC'
if len(provider) == 5 or provider in ['KYTC-RWIS', 'NEDOR']:
if provider[:2] == 'IA':
return None
return '%s_RWIS' % (provider[:2],)
print("Unsure how to convert %s into a network" % (provider,))
return None
|
71374cda7b66c043d3993f878644602db2e5c5ae
| 665,462 |
def check_possible_win(piece, win, board_state):
"""Returns a bool depending on if a win with a
given piece is on the board"""
# Check if the given piece has the winning combination on board
for square in win:
if "{}{}".format(piece, square) not in board_state:
return False
return True
|
f845626e7c5677b7c529cc923950644319c251c3
| 102,679 |
def _SimplifyContacts(contacts):
"""Returns one contact if all 3 contacts are equal, and all 3 contacts otherwise."""
if contacts.registrantContact == contacts.adminContact and contacts.registrantContact == contacts.technicalContact:
return contacts.registrantContact
return contacts
|
c7d988e6f419e715d5831a30b9c6a08da4f46ec6
| 419,386 |
def figure_linguistic_type(labels):
"""
Gets linguistic type for labels
Parameters
----------
labels : list of lists
the labels of a tier
Returns
-------
the linguistic type
"""
if len(labels) == 0:
return None
elif len(labels) == 1:
return labels[0][0]
label = min(labels, key=lambda x: x[1])
return label[0]
|
14151917bb9ad8f49717ce6c436c496ee3ccfc77
| 683,084 |
def duration_from_toml_value(value):
"""converter for dataset split durations.
If value is -1, that value is returned -- specifies "use the remainder of the dataset".
Other values are converted to float when possible."""
if value == -1:
return value
else:
return float(value)
|
651cb2829fd5372db79afc66c5889835a3055cd4
| 152,807 |
def checkGameStatus(board, max_tile=2048):
"""
Update the game status by checking if the max. tile has been obtained.
Parameters:
board (list): game board
max_tile (int): tile number required to win, default = 2048
Returns:
(str): game status WIN/LOSE/PLAY
"""
flat_board = [cell for row in board for cell in row]
if max_tile in flat_board:
# game has been won if max_tile value is found
return "WIN"
for i in range(4):
for j in range(4):
# check if a merge is possible
if j != 3 and board[i][j] == board[i][j+1] or \
i != 3 and board[i][j] == board[i + 1][j]:
return "PLAY"
if 0 not in flat_board:
return "LOSE"
else:
return "PLAY"
|
6750ddbcd354366731119835da65457e1d0af778
| 149,271 |
def parse_hex(text):
"""Parse a hex number from text or fail
:param text: Text to parse hex number from
:type text: str
:return: Parsed hex number
:rtype: int
:raise RuntimeError: If text does not contain a valid hexadecimal number
"""
try:
return int(text, 0)
except ValueError as ex:
raise RuntimeError(f"Could not parse hex number '{text}': {ex}")
|
c7f11c3a8e436c857c95c3ee833318cfd41f9aec
| 519,659 |
def tick_maker(old_ticks,n):
"""Set n-1 evenly spaced tick marks to make axes look prettier"""
if n < 2:
raise ValueError('n must be greater than 1')
n = n-1
delta = (old_ticks[-1] - old_ticks[0])/n
new_ticks = []
for i in range(n):
new_ticks.append(old_ticks[0] + i*delta)
new_ticks.append(old_ticks[0] + n*delta)
return new_ticks
|
e1319a1300c3d9840a02085e79ee3a26506b617c
| 204,817 |
def norm_int_dict(int_dict):
"""Normalizes values in the given dict with int values.
Parameters
----------
int_dict : list
A dict object mapping each key to an int value.
Returns
-------
dict
A dict where each key is mapped to its relative part in the sum of
all dict values.
Example
-------
>>> dict_obj = {'a': 3, 'b': 5, 'c': 2}
>>> result = norm_int_dict(dict_obj)
>>> print(sorted(result.items()))
[('a', 0.3), ('b', 0.5), ('c', 0.2)]
"""
norm_dict = int_dict.copy()
val_sum = sum(norm_dict.values())
for key in norm_dict:
norm_dict[key] = norm_dict[key] / val_sum
return norm_dict
|
a186582b1e0fac6a16de9603e1761a140e16e4f3
| 273,509 |
def _to_reconfigure_payload(**kwargs):
"""
Utility to convert kafka message to reconfigure API payload.
:param action_params: Action parameters in key/value pairs.
key - param name, value - param value
:type action_params: ``dict``
:param invoker-selector: Possible values: cpu, gpu. If supplied, implies
reconfiguration at the NFVI level to cpu/gpu node
:type invoker-selector: ``str``
:param action-antiaffinity: Possible values: 'true', 'false'. If 'true', do
not allow two actions to run on same NFVI node.
Relevant with invoker-selector
:type action-antiaffinity: ``str``
"""
if not kwargs.get('invoker-selector'):
payload = {
'coe_action_params': {
'action_params': kwargs.get('action_params', {})
}
}
else:
payload = {
'coe_action_params': {
'action_params': kwargs.get('action_params', {}),
'annotations': [{
'key': 'placement',
'value': {
'invoker-selector': {'processor': kwargs['invoker-selector']},
'action-antiaffinity': kwargs.get('action-antiaffinity',
'false')
}
}]
},
}
return payload
|
d14c8cd2a9152ae7123a00f877c6e6d37c42b0a1
| 504,651 |
def bin_string_to_bytearray(binary_string: str) -> bytearray:
"""Converts a binary string to a bytearray
Parameters
----------
binary_string: str
The binary string used to build the bytearray
Returns
-------
bytearray
The generated bytearray
"""
# Fill in bits if the binary string is not dividable by 8 (byte)
binary_string += ((8 - len(binary_string)) % 8) * '0'
# Generate the bytearray
bytes_array = bytearray()
for binary_byte in [binary_string[i:i + 8] for i in range(0, len(binary_string), 8)]:
bytes_array.append(int(binary_byte, 2))
return bytes_array
|
5f1af3a46ee97ad23e3d0a6cb9ded9e1e8568a2b
| 18,981 |
import typing
def bool_converter(obj: typing.Any) -> bool:
"""Returns False if argument is 'false' string or zero."""
return (obj != 0) and (str(obj).lower() != "false")
|
ec8873a52135c895f39eb1f3f851945376a3b045
| 326,140 |
def compute_gcd(x, y):
"""Compute gcd of two positive integers x and y"""
if x < y:
return compute_gcd(y, x)
residual = x % y
if residual == 0:
return y
if residual == 1:
return 1
return compute_gcd(y, residual)
|
c896b1c6939fb45b91cc47061312829f7edbd5b8
| 450,220 |
import base64
def xor_encode(secret_text):
"""
Encode text to the xor encoding used by WAS
"""
# Create a byte array, containing the characters xor'ed with underscore
xor_byte_array=bytearray([])
for char in secret_text:
decimalOfChar=ord(char)
decimalOfUnderScore=ord('_')
xor_value = decimalOfChar ^ decimalOfUnderScore
xor_byte_array.append(xor_value)
## use base64 to encode it, and then decode those bytes as ascii
encoded_secret_text='{xor}' + base64.encodebytes(xor_byte_array).decode('ascii').rstrip()
return encoded_secret_text
|
c349dc2ade883b2ef4a0b621e3ac9228e80e8c7a
| 381,606 |
def count_probes(probes):
"""
Counts the amount of probes in probes array
:param probes: array of probes
:return: float count
"""
count = 0
for n in probes:
count += n
return float(count)
|
423371e99c9c4757892ba1503aa212fd88229a95
| 342,859 |
import click
def output_path(file_okay=False, dir_okay=False):
"""Add click.argument for output path.
:param file_okay: is output a file, defaults to False
:type file_okay: bool, optional
:param dir_okay: is output a directory, defaults to False
:type dir_okay: bool, optional
:return: click.argument decorator
:rtype: click.Argument
"""
return click.argument(
"output_path",
metavar="OUTPUT",
type=click.Path(
exists=False, file_okay=file_okay, dir_okay=dir_okay, writable=True
),
)
|
1c2d3ecb37ac9765099f127d4929065358fa8907
| 514,039 |
def splitName(name):
"""Splits a glyph name into a (baseName string, suffix string) tuple."""
baseName, suffix = '', ''
nameElements = name.split('.')
if len(nameElements) > 0:
baseName = nameElements[0]
if len(nameElements) > 1:
suffix = '.'.join(nameElements[1:])
else:
suffix = ''
return baseName, suffix
|
b31476f48c09d11ce9bbb324a60f958a44f33420
| 237,062 |
def mc_estimates(run_sum, run_sum_squares, n):
"""Returns sample mean and variance from sum and sum of squares
:param run_sum: float
The sum of the samples
:param run_sum_squares: float
The sum of the squares of the samples
:param n: int
The number of samples
:return: 2-tuple of floats
The sample mean and the sample variance
"""
sample_mean = run_sum / n
sample_var = ((run_sum_squares - (run_sum * run_sum) / n) / (n - 1))
return sample_mean, sample_var
|
b416b8187009685680c41aa47dc0b17b1b22b682
| 494,899 |
import typing
def list_value(value: str) -> typing.List[str]:
"""Convert string value from config file to list of strings. Separator is
space. Supports newline.
"""
if not value:
return list()
return [v.strip() for v in value.replace('\n', ' ').strip().split(' ')]
|
7b1fbf49dfa1f17414ba72f137e6d3089573c4f2
| 478,920 |
def binary_str(num):
""" Return a binary string representation from the posive interger 'num'
:type num: int
:return:
Examples:
>>> binary_str(2)
'10'
>>> binary_str(5)
'101'
"""
# Store mod 2 operations results as '0' and '1'
bnum = ''
while num > 0:
bnum = str(num & 0x1) + bnum
num = num >> 1
return bnum
|
dde400323fccb9370c67197f555d9c41c40084a6
| 705,979 |
from typing import Dict
def get_input_data(input_section: Dict) -> str:
"""Gets playbook single input item - support simple and complex input.
Args:
input_section (dict): playbook input item.
Returns:
(str): The playbook input item's value.
"""
default_value = input_section.get('value')
if isinstance(default_value, str):
return default_value
if default_value:
complex_field = default_value.get('complex')
if complex_field:
return f"{complex_field.get('root')}.{complex_field.get('accessor')}"
return default_value.get('simple')
return ''
|
a1d7d3fb4d5d3cd0e5ce46f7d1c685495c71981f
| 53,868 |
def mi(self):
"""Return mutual information between events and observations using equation I(X;Y) = H(X) - H(X|Y)"""
return self.entropy() - self.entropy(conditional = True)
|
18624d4926acf7519dc1b2f9987e11480688be85
| 44,496 |
from typing import Any
from typing import Optional
import torch
def _check_are_tensors(actual: Any, expected: Any) -> Optional[AssertionError]:
"""Checks if both inputs are tensors.
Args:
actual (Any): Actual input.
expected (Any): Actual input.
Returns:
(Optional[AssertionError]): If check did not pass.
"""
if not (isinstance(actual, torch.Tensor) and isinstance(expected, torch.Tensor)):
return AssertionError(f"Both inputs have to be tensors, but got {type(actual)} and {type(expected)} instead.")
return None
|
e0f2a7cb3fa0159d06bbc82ff002eeeb363e54e8
| 162,593 |
def query( otherwise, uri ):
"""Extract the query component."""
first = uri.find('?')
if -1 == first:
return otherwise
last = uri.find('#')
if -1 == last:
return uri[1+first:]
elif last < first:
return uri[1+first:last]
return otherwise
|
b3b02d4ec605ee0ebdaaf1ef8d917b281c523ee9
| 578,430 |
def sort_file(fh):
""" sort the contents of a file handle. """
lst = list(fh.readlines())
lst.sort()
return lst
|
8a8bf189e4294414024285187c66cd303dad2768
| 23,711 |
def cifrar(palabra, cifrador):
"""Recibe un string con texto sin cifrar y un diccionario que contiene el cifrador.
Regresa el string cifrado.
"""
#se declara e instancia una variable de tipo string donde se va a asignar el resultado del cifrado.
string_cifrado = ''
#loop que recorre cada caracter de la palabra.
for char in palabra:
#se asegura que el caracter sea parte de las llaves dentro de nuestro diccionario.
if char in cifrador.keys():
string_cifrado += cifrador[char]
#de lo contrario, simplemente se agrega el caracter normal.
else:
string_cifrado += char
return string_cifrado
|
068a089097546529cd0bc36068ea7b5c5e009f6d
| 53,238 |
def istoken(docgraph, node_id, namespace=None):
"""returns true, iff the given node ID belongs to a token node.
Parameters
----------
node_id : str
the node to be checked
namespace : str or None
If a namespace is given, only look for tokens in the given namespace.
Otherwise, look for tokens in the default namespace of the given
document graph.
"""
if namespace is None:
namespace = docgraph.ns
return namespace+':token' in docgraph.node[node_id]
|
79f4c553ae2918b4295dd443c2bc6e0866b040e7
| 633,029 |
def oss_installer_url() -> str:
"""
Return the url to an installer for DC/OS OSS.
"""
return 'https://downloads.dcos.io/dcos/stable/dcos_generate_config.sh'
|
8b395dfce5536afde0a230c5b3e3f5afcab468fb
| 490,452 |
def get_field(field, row, field_map, default_value=None):
"""Access fields in a row according to the specified field map.
Args:
field: field to extract.
row: row to extract field from.
field_map: field map.
default_value: value to be returned in case the field is not mapped.
Returns:
specified field value if the field is mapped, the default value otherwise.
"""
field_info = field_map.get(field)
if not field_info:
return default_value
else:
return row[field_info['idx']]
|
614665ea96a37722ddf30506157bc8c893ded6b3
| 435,342 |
def __load_txt(file_path: str):
"""
Load a txt file
:param file_path: Path to file to load
:return: Data from txt in formatted list
"""
with open(file_path) as f:
data = []
for line in f:
line = line.strip("\n")
line = line.split(" ")
if len(line) == 1:
line = line[0]
data.append(line)
return data
|
f033afac35295fdf55bf49b74f5bed93fa7646f5
| 524,336 |
import json
def get_json_bytes_for_sign_data(sign_data):
"""
Convert SignData object to JSON bytes for signing.
Args:
sign_data (SignData): The sign data object.
Returns:
bytes: The sign data in JSON bytes.
"""
return json.dumps(sign_data._asdict(),
sort_keys=True,
separators=(',', ':')).encode()
|
c76e64d3bba8d7ebb7579bfa97f53ae7ce1a19f3
| 618,274 |
def _none(ev, state_args=None, **kwargs):
"""A pass-none filter callable."""
return False
|
eaba017fc7081eddf21ced2236f9b27b372f1a7b
| 513,828 |
def _get_sysfs_netdev_path(pci_addr, vf_interface=False):
"""Get the sysfs path based on the PCI address of the device.
Assumes a networking device - will not check for the existence of the path.
:param pci_addr: the pci addresee of the device(PF or VF).
:param vf_interface: True if the pci_addr is a VF,
False if the pci_addr is a PF.
:returns: the sysfs path corresponds to the pci_addr.
"""
if vf_interface:
return "/sys/bus/pci/devices/%s/physfn/net" % pci_addr
return "/sys/bus/pci/devices/%s/net" % pci_addr
|
cb850f8fe329c1c465e9726191e00896b94c7c39
| 296,819 |
def find_left_anchor_index(fragment_info, fragments):
"""
Description:
Use the fragment information to find which fragment is the left anchor
:param fragment_info: [list[dict]] the list of fragment information
:param fragments: [list] the list of fragments being searched
:return: left_anchor_index: [int] the index location in the fragment list for the left anchor fragment
"""
for info in fragment_info:
if len(info['left_matches']) == 0:
left_anchor_index = fragments.index(info['anchor'])
return left_anchor_index
|
502473f7fee00a5ccd1ed3d6cf5c938e8c4d2041
| 703,430 |
def prompt_dictionary(choices, default_style=1, menu_comments={}):
"""
Prompt the user to chose one of many selections from a menu.
Parameters
----------
choices : dictionary
Keys - choice numbers (int)
Values - choice value (str), this is what the function will return
default_style : int, optional
Choice to select if the user doesn't respond
menu_comments : dictionary, optional
Additional comments to append to the menu as it is displayed
in the console.
Keys - choice numbers (int)
Values - comment (str), what will be appended to the
corresponding choice
"""
# Build the menu that will be displayed to the user with
# all of the options available.
prompt = ""
for key, value in choices.items():
prompt += "%d %s " % (key, value)
if key in menu_comments:
prompt += menu_comments[key]
prompt += "\n"
# Continue to ask the user for a style until an appropriate
# one is specified.
response = -1
while (not response in choices):
try:
text_response = input(prompt)
# Use default option if no input.
if len(text_response.strip()) == 0:
response = default_style
else:
response = int(text_response)
except ValueError:
print("Error: Value is not an available option. 0 selects the default.\n")
return choices[response]
|
9f59b53c37e6b5139281384a203f73ddfd9b516c
| 302,774 |
def to_multiset(token):
""" Creates a multiset from a string. A multiset is a set of tuples (c, n) where c is a character and n is a count of that character in the string."""
counts = {}
for ch in token:
try:
count = counts[ch]
except KeyError:
count = 0
counts[ch] = count + 1
multiset = set()
for ch in counts:
multiset.add((ch, counts[ch]))
return frozenset(multiset)
|
26d8440c7db564faf6bcfd19501f5cede945b86f
| 255,341 |
def is_branch_revision_number(rev):
"""Return True iff REV is a branch revision number.
REV is a CVS revision number in canonical form (i.e., with zeros
removed). Return True iff it refers to a whole branch, as opposed
to a single revision."""
return rev.count('.') % 2 == 0
|
f1bd69195ed7d8abd73b1ea5a34e5d4e83a42841
| 351,464 |
def join(parent, child):
"""
Appends the child to the parent, taking care of the slashes. The resulting
url does not have a trailing slash.
"""
return (parent.rstrip("/") + "/" + child.lstrip("/")).rstrip("/")
|
c85cbb2bda055df25ea55aa0c4eddb18a46fa9bf
| 330,882 |
def get_commit(repository):
"""
Get current commit ID.
:param repository: repository
:type repository: git.Repo
:return: commit ID
:rtype: str or unicode
"""
return repository.head.commit.hexsha
|
c6fd3bc4871f135770818a57882e75c163ad4016
| 467,894 |
def solve1(a, b, EPS=1e-6):
"""
Returns root of equation a*x + b = 0.
"""
# a*x + b = 0
if abs(a) < EPS:
return ()
else:
return (complex(-b/a),)
|
a5854fc6118e49ebf8b380903e838b310acc6a4a
| 644,692 |
def textToHtml(text:str) -> str:
"""Transform a string so that it looks the same on browsers
as in `print()`"""
return text.replace("\n", "<br>").replace(" ", " ")
|
ea46abe6d604b392a45fa508e3b256f14ed8a3d9
| 527,282 |
def _allSubclasses(cls):
"""List all direct and indirect subclasses of cls
This function calls itself recursively to return all subclasses of cls.
@param cls: the base class to list subclasses of
@type cls: class
@rtype: [class]
"""
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in _allSubclasses(s)]
|
d11e311530b0606adfa7d4a38dc52916937b70ec
| 288,266 |
def menu(num):
"""
Функция вывода меню
:param num: сумма наличных денег
:return: ответ пользователя
"""
print(f'Доступная сумма: {num} руб.', end='\n\n') # вывод наличных денег с добавлением пустой строки
print('1. Пополнить счет')
print('2. Совершить покупку')
print('3. История покупок')
print('4. Выход')
ans = input('Введите номер пункта: ') # ввод пункт меню пользователем
return ans
|
918b7cba89927adb99000b297ce342fd7af15cbc
| 359,033 |
def trim(String:str):
"""
Gets rid of all spaces in the string
"""
return ''.join([s for s in str(String) if s != " "])
|
e814e1b1bf7d07cf0d29823ac12334a92aead848
| 272,637 |
def remove_emoticons(string):
"""Remove emoticons from a string and return it.
Postional arguments:
string -- string to remove emoticons from.
Returns:
String without emoticons.
>>> remove_emoticons("applause and laughter ;)")
'applause and laughter '
"""
emoticons = (':D', ':)', ':/', ':p', ';)')
for emoticon in emoticons:
string = string.replace(emoticon, "")
return string
|
cb9a4bbbf3d0332aefab1bde5089ef4aa06b3b00
| 81,407 |
def orderDf(df, varList):
""" Re-order the columns of a dataframe.
Arguments:
:type df: pandas.DataFrame
:param df: A pandas dataframe
:param list varList: List of column names you want to be placed at the
front of your data frame.
Returns:
:rtype: pandas.DataFrame
:returns: A pandas DataFrame with re-ordered columns.
"""
# Create a list of the other columns in df, not including the columns that
# are being moved.
otherCols = [x for x in df.columns if x not in varList]
# Create new data frame with the columns name in varList at the front.
dfOrder = df[varList + otherCols].copy()
return dfOrder
|
055d83068c1aac1a45bc98db672f5235d06f80b4
| 254,144 |
def calculate_sensitivity(lr, clip, data_size):
"""
Calculate the sensitivity of the privacy mechanism.
:param lr: learning rate
:param clip: clipping bound
:param data_size: data size
:return: the sensitivity of the privacy mechanism
"""
return 2 * lr * clip / data_size
|
ac203f8ff822fe7a04a4fd8980f94448145a92e7
| 439,525 |
def get_json(request_obj, remove_token=True):
"""
This function is responsible for getting the json data that was sent with
with a request or return an empty dict if no data is sent
Args:
~~~~~
request_obj: request object that data should be attached to
Returns:
~~~~~~~~
dict
"""
result = {}
if not hasattr(request_obj, 'json') or not request_obj.json:
if hasattr(request_obj, 'params'):
result = request_obj.params
result = request_obj.json or {}
if remove_token and 'token' in result:
del result['token']
return result
|
e3dc01488df4fbe2b54f2f9a88df1e5beb443992
| 681,032 |
def register_and_login_test_user(c):
"""
Helper function that makes an HTTP request to register a test user
Parameters
----------
c : object
Test client object
Returns
-------
str
Access JWT in order to use in subsequent tests
"""
c.post(
"/api/auth/register",
json={
"username": "test",
"password": "secret",
"first_name": "tim",
"last_name": "apple",
"email": "[email protected]",
"birthday": "1990-01-01",
},
)
setup_resp = c.post(
"/api/auth/login", json={"username": "test", "password": "secret"}
)
setup_resp_json = setup_resp.get_json()
setup_access_token = setup_resp_json["access_token"]
return setup_access_token
|
b76f7f6afa9af453246ae304b1b0504bd68b8919
| 708,877 |
import json
def load_config(config_path='config.json'):
"""
Load config file
"""
with open(config_path) as json_data_file:
config = json.load(json_data_file)
return config
|
4597b0c866f3d36f2fe3d59115346030af280ff0
| 521,031 |
def get_exptime(n, t_tot, overhead=10):
"""
Function that calculates the exposure time
per image
---
Parameters
n int or array
number of fields
t_tot float
total ovservation time (s)
overhead float
overhead time per exposure (s)
---
Returns
exptime float
exposure time per image
"""
exptime = (t_tot - n*overhead)/n
return exptime
|
8bf6f46b15f0a9acd5955191a8ee98cd2f145691
| 385,151 |
def is_multigraph(G):
"""
Returns True if the graph is a multigraph. Else returns False.
"""
return G.is_multigraph()
|
8221c3e5939bb2f72cd24fe80029f6e95e7f352e
| 549,779 |
def parse_http_header(header, header_key):
"""
**Parse HTTP header value**
Parse the value of a specific header from a RAW HTTP response.
:param header: String containing the RAW HTTP response and headers
:type header: str
:param header_key: The header name of which to extract a value from
:type header_key: str
:return: The value of the header
:rtype: str
"""
split_headers = header.split('\r\n')
for entry in split_headers:
header = entry.strip().split(':', 1)
if header[0].strip().lower() == header_key.strip().lower():
return ''.join(header[1::]).split()[0]
|
993b8190d631accf7c63e259aa5f4f4c4b657c0e
| 21,988 |
from pathlib import Path
def the_path_that_matches(pattern: str, in_directory):
"""
Finds one and only one path matching the specified pattern. Raises an error if it finds 2+ paths or no paths.
To learn how to use advanced patterns, read http://www.robelle.com/smugbook/wildcard.html
Parameters
----------
pattern : str
Pattern to search for.
in_directory : str or Path
Directory in which to search for the pattern.
Returns
-------
Path
Path found by search.
Raises
------
IOError
If it finds 2+ paths or no paths.
"""
matches = list(Path(in_directory).glob(pattern))
if not Path(in_directory).is_dir():
raise IOError(f"{in_directory} either doesn't exist or isn't a directory at all!")
elif(len(matches)) >= 2:
raise IOError(f"The directory {in_directory} exists but contains more than one path that matches '{pattern}': {[match.name for match in matches]}")
elif(len(matches)) == 0:
raise IOError(f"The directory {in_directory} exists but contains no paths that match pattern '{pattern}'")
else:
return matches[0]
|
b6c7f37a24220d02c03a8b38700f65405c9371f5
| 87,666 |
from typing import Any
def convert_null_values(v: Any) -> Any:
"""Convert a "null" string into type(None)."""
null_strings = ["null", "none", "undefined"]
return None if isinstance(v, str) and v.lower() in null_strings else v
|
c5c74ac78da126120dc1fc7c0bfa03df373a7465
| 375,177 |
import re
def _shape(word):
""" This function returns a characteristic of the word, in a sense it tells
if it is made solely of numbers, if it's upper or lower case, punctuations...
We identifiy:
0-None 1-number 2-punct 3-upcase 4-downcase 5-mixedcase 6-other
"""
if word == None:
shape_case = "0" # ''
elif re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word):
shape_case = "1" # 'number'
elif re.match('\W+$', word):
shape_case = "2" # 'punct'
elif re.match('[A-Z][a-z]+$', word):
shape_case = "3" # 'upcase'
elif re.match('[a-z]+$', word):
shape_case = "4" # 'downcase'
elif re.match('\w+$', word):
shape_case = "5" # 'mixedcase'
else:
shape_case = "6" # 'other'
return shape_case
|
ad4a6db95277860d06071b46e305c2127f50b4cf
| 158,515 |
import re
def SelectAbuse(items):
"""
Wrapper function to selected the best effort email address from a list of email addresses for
the abuse email. The algorithm is simple, first, try to find "abuse@domain", next, and email
addres with "abuse" in the user id. All others are added to the potentials list.
"""
hit = None
potentials = []
for item in items:
user,domain = item.split("@")
if re.search("^abuse$",user):
hit = item
else:
potentials.append(item)
return hit, potentials
|
21e4bdf074c975dad5052c19617f0b004c62d17a
| 141,292 |
import requests
def user(gateway, user_name):
"""Fetch a user object by user_name from the server."""
uri = gateway + '/users/' + user_name
response = requests.get(uri)
return response.json()
|
e7d52301cf64430252947bf785119f2a9acf3fc3
| 118,390 |
def rivers_with_station(stations):
"""
Given a list of station objects, returns a set with the names of the
rivers with a monitoring station. As the container is a set, there are no duplicates.
"""
stationed_rivers = set()
for station in stations:
if station.river:
stationed_rivers.add(station.river)
return stationed_rivers
|
d50eaf8d407be77628f53475338a2a0791bff40d
| 433,063 |
import re
def parse_version_string(file_path):
"""
Parse __version__ = 'xxx' from the specifed file
"""
version = None
with open(file_path, 'r') as fd:
match = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE)
if match:
version = match.group(1)
else:
raise Exception('File %s doesn\'t contain __version__ = \'x.y.z\' string')
return version
|
fc95cbc2614d5426cbac8d00b993078be1f14f4f
| 324,652 |
from datetime import datetime
import pytz
def read_gpgsa(sentence, timestamp, do_print=False):
""" Read and parse GPGSA message"""
values = sentence.split('*')[0].split(',')
result = {}
# Linux timestamp
try:
result['linux_stamp'] = int(timestamp)
except:
result['linux_stamp'] = 0
result['linux_date'] = datetime.fromtimestamp(result['linux_stamp'], tz=pytz.UTC)
# Fix selection
try:
result['fix_selection'] = str(values[1])
except:
result['fix_selection'] = None
# Fix type
try:
result['fix_type'] = int(values[2])
except:
result['fix_type'] = None
# Fix satellites
try:
result['fix_satellites'] = []
for sat in range(3, 3+12):
try:
sat_no = int(values[sat])
result['fix_satellites'].append(sat_no)
except:
pass
except:
result['fix_satellites'] = None
# Satellite counts
if result['fix_satellites'] is None:
result['fix_sat_count'] = 0
else:
result['fix_sat_count'] = len(result['fix_satellites'])
# PDOP
try:
result['pdop'] = float(values[15])
except:
result['pdop'] = None
# HDOP
try:
result['hdop'] = float(values[16])
except:
result['hdop'] = None
# VDOP
try:
result['vdop'] = float(values[17])
except:
result['vdop'] = None
if do_print:
print("Linux timestamp :", result['linux_stamp'])
print("Linux datetime :", result['linux_date'])
print(" Fix selection :", result['fix_selection'])
print(" Fix type :", result['fix_type'])
print(" Satellites count:", result['fix_sat_count'])
print(" Fix satellites :", end=" ")
if result['fix_satellites'] is None:
print("NONE")
elif len(result['fix_satellites']) == 0:
print("EMPTY")
else:
for sat in result['fix_satellites']:
print(sat, end=" ")
print()
print(" PDOP :", result['pdop'])
print(" HDOP :", result['hdop'])
print(" VDOP :", result['vdop'])
print()
return result
|
e737ac3af1ef61c8fb042029a889f8b63010b2bd
| 188,569 |
def file_head(file):
"""Read the first line of a text file."""
with open(file, "rt") as f:
return f.readline().rstrip()
|
eaa09bd35eea39e9162471d09ed3e3d0057d1ae2
| 147,880 |
def process_match_by_ordinary_dictionary(dictionary, match_object):
"""
Process a match object using a dictionary of ordinary replacements.
To be passed in the form
functools.partial(process_match_by_ordinary_dictionary, dictionary)
as the replacement-function argument to re.sub,
so that an ordinary replacement dictionary can be used
to process a regex match object.
If the entire string for the match object
is a key (pattern) in the dictionary,
the corresponding value (replacement) is returned;
otherwise the string is returned as is.
"""
match_string = match_object.group()
replacement = dictionary.get(match_string, match_string)
return replacement
|
3b73dd17af15e45af33e02bd21e6256c22de4d17
| 385,405 |
def _get_prefix_and_full_hash(repo_data, kernel_partial_hash):
"""Find the prefix and full hash in the repo_data based on the partial."""
kernel_partial_hash_lookup = 'u\'%s' % kernel_partial_hash
for line in repo_data.splitlines():
if kernel_partial_hash_lookup in line:
prefix, full_hash = line.split(' ', 1)
return prefix, full_hash.strip('u\'')
return None, None
|
277a857153adb3db63ce433db5b0506877eae5dd
| 609,618 |
import ast
from typing import Tuple
from typing import cast
def declares_pkg_resources_namespace_package(python_src: str) -> bool:
"""Given .py file contents, determine if it declares a pkg_resources-style namespace package.
Detects pkg_resources-style namespaces. See here for details:
https://packaging.python.org/guides/packaging-namespace-packages/.
Note: Accepted namespace package decls are valid Python syntax in all Python versions,
so this code can, e.g., detect namespace packages in Python 2 code while running on Python 3.
"""
def is_name(node: ast.AST, name: str) -> bool:
return isinstance(node, ast.Name) and node.id == name
def is_call_to(node: ast.AST, func_name: str) -> bool:
if not isinstance(node, ast.Call):
return False
func = node.func
return (isinstance(func, ast.Attribute) and func.attr == func_name) or is_name(
func, func_name
)
def has_args(call_node: ast.Call, required_arg_ids: Tuple[str, ...]) -> bool:
args = call_node.args
if len(args) != len(required_arg_ids):
return False
actual_arg_ids = tuple(arg.id for arg in args if isinstance(arg, ast.Name))
return actual_arg_ids == required_arg_ids
try:
python_src_ast = ast.parse(python_src)
except SyntaxError:
# The namespace package incantations we check for are valid code in all Python versions.
# So if the code isn't parseable we know it isn't a valid namespace package.
return False
# Note that these checks are slightly heuristic. It is possible to construct adversarial code
# that would defeat them. But the only consequence would be an incorrect namespace_packages list
# in setup.py, and we're assuming our users aren't trying to shoot themselves in the foot.
for ast_node in ast.walk(python_src_ast):
# pkg_resources-style namespace, e.g.,
# __import__('pkg_resources').declare_namespace(__name__).
if is_call_to(ast_node, "declare_namespace") and has_args(
cast(ast.Call, ast_node), ("__name__",)
):
return True
return False
|
0637bc5dc941fda57bdb1402c3341ae17076f2bd
| 160,721 |
import torch
def equalize(image):
"""Implements Equalize function from PIL using PyTorch ops based on:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L352"""
image = image * 255
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = im[c, :, :]
# Compute the histogram of the image channel.
histo = torch.histc(im, bins=256, min=0, max=255) # .type(torch.int32)
# For the purposes of computing the step, filter out the nonzeros.
nonzero_histo = torch.reshape(histo[histo != 0], [-1])
step = (torch.sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (torch.cumsum(histo, 0) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = torch.cat([torch.zeros(1), lut[:-1]])
# Clip the counts to be in range. This is done
# in the C code for image.point.
return torch.clamp(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
if step == 0:
result = im
else:
# can't index using 2d index. Have to flatten and then reshape
result = torch.gather(build_lut(histo, step), 0, im.flatten().long())
result = result.reshape_as(im)
return result # .type(torch.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = torch.stack([s1, s2, s3], 0) / 255.0
return image
|
62f6f982f74a02b7a51134f3c34f94097f57d15c
| 359,231 |
from typing import Any
from typing import List
def get_methods(obj: Any, access: str = "all") -> List[str]:
"""
Return list of names corresponding to class methods of an object `obj`.
:param obj: class or class object
:param access: allows to select private, public or all methods of
the object.
"""
methods = [method for method in dir(obj) if callable(getattr(obj, method))]
if access == "all":
pass
elif access == "private":
methods = [method for method in methods if method.startswith("_")]
elif access == "public":
methods = [method for method in methods if not method.startswith("_")]
else:
raise ValueError("Invalid access='%s'" % access)
return methods
|
97d65597333034b63555432bed657f9cff4254b6
| 568,576 |
def total_normalise(gs):
"""
Performs complete normalisation of GeoSeries
:param gs: GeoSeries
:return: normalised GeoSeries
"""
return gs / gs.sum()
|
9b6bb4be01ce89e192e3475d1724c2975572be69
| 427,415 |
def calc_total_hours(entries):
"""
Calculates sum of hours from an array of entry dictionaries
"""
total_hours = 0.0
for entry in entries:
total_hours = total_hours + float(entry['hours_spent'])
return total_hours
|
82b710016813baedb5439a8ae5f08b41578321e7
| 155,165 |
def match(parsed_spec, dt):
"""
Returns true if parsed_spec would trigger on the datetime dt
"""
# dt.weekday() of monday is 0
return (
dt.minute in parsed_spec.minute and
dt.hour in parsed_spec.hour and
dt.day in parsed_spec.dom and
dt.month in parsed_spec.month and
dt.weekday()+1 in parsed_spec.dow
)
|
7f2ecfe0f407857f6b39d8d6397c7526ca56fdbe
| 114,383 |
def normalize(grid):
""" normalize grid to (0,1) """
field = grid.T.values
min_h, max_h = field.min(), field.max()
return (field - min_h) / (max_h - min_h)
|
0bd5bb6cac14a283aaa8ca6dabc914b920fcfa3a
| 681,127 |
def add_events_to_model(model):
"""
Convert model events into variables to be evaluated in the solver step.
Args:
model (pybamm.lithium_ion.BaseModel):
The PyBaMM model to solve.
Returns:
pybamm.lithium_ion.BaseModel:
The PyBaMM model to solve with events added as variables.
"""
for event in model.events:
model.variables.update({"Event: " + event.name: event.expression})
return model
|
b96ddd3002fb054a70bde3ebaab7888a0c11a65c
| 126,029 |
def cauchy(wvl, A, *args):
"""Cauchy's equation for the (real) index of refraction of transparent materials.
Parameters
----------
wvl : `number`
wavelength of light, microns
A : `number`
the first term in Cauchy's equation
args : `number`
B, C, ... terms in Cauchy's equation
Returns
-------
`numpy.ndarray`
array of refractive indices of the same shape as wvl
"""
seed = A
for idx, arg in enumerate(args):
# compute the power from the index, want to map:
# 0 -> 2
# 1 -> 4
# 2 -> 6
# ...
power = 2*idx + 2
seed = seed + arg / wvl ** power
return seed
|
fab28cc1c46755a45417924f47eb480cdbd29795
| 550,735 |
def patternhost(pattern, user):
"""
Given a 'something-%s-example.org' format, return that with %s replaced
(once) by the username in question.
"""
return pattern % user
|
96127b71b701f2e112bced8fd7e299001bdefea7
| 45,311 |
def eliminate_from_neighbors(csp, var) :
"""
Eliminates incompatible values from var's neighbors' domains, modifying
the original csp. Returns an alphabetically sorted list of the neighboring
variables whose domains were reduced, with each variable appearing at most
once. If no domains were reduced, returns empty list.
If a domain is reduced to size 0, quits immediately and returns None.
"""
def check_constraint(var1, var2, val1, val2):
"""
Checks whether there is a constraint violation between var1/val1 and var2/val2.
Returns True if there isn't a violation or false if there is.
"""
constraints = csp.constraints
for constraint in constraints:
if (constraint.var1 == var1 or constraint.var1 == var2) and (constraint.var2 == var1 or constraint.var2 == var2):
if not constraint.check(val1, val2):
return False
return True
reduced_neighbors = []
neighbors = csp.get_neighbors(var)
unassig_var = csp.unassigned_vars
values = csp.get_domain(var)
remove_n_values = {}
#Building dictionary of neighbors and values that conflict
for n in neighbors:
if n in unassig_var:
n_values = csp.get_domain(n)
remove_n_values[n] = []
for n_val in n_values:
constraint_check = []
for val in values:
if check_constraint(var,n,val,n_val):
constraint_check.append(0)
else:
constraint_check.append(1)
if sum(constraint_check) == len(constraint_check):
remove_n_values[n].append(n_val)
#Remove values from neighbors if they conflict. Check if domain is reduced to 0.
for n in remove_n_values:
for n_val in remove_n_values[n]:
csp.eliminate(n,n_val)
if len(csp.get_domain(n)) == 0:
return None
if len(remove_n_values[n]) > 0:
reduced_neighbors.append(n)
return reduced_neighbors
|
2a37e8dc89650f5b1bac80f0f7f2ba1746e15f54
| 626,657 |
import re
def get_match_spans(pattern, input):
"""
Given string pattern and string input,
return list of [) char position tuples of patterns in input.
:param pattern: string pattern to match.
:param input: string input where we find pattern.
:return: a list of pattern char position tuples in input.
"""
spans = []
for match in re.finditer(re.escape(pattern), input):
spans.append(match.span())
return spans
|
27756374448df2415ba5e6a4d98cf603a59ddf90
| 182,554 |
def get_volume(module, system):
"""Return Volume or None"""
try:
try:
volume = system.volumes.get(name=module.params['name'])
except KeyError:
volume = system.volumes.get(name=module.params['volume'])
return volume
except Exception:
return None
|
d2c810c8f567c7d9e0ff967f72d3bfda0895cfe9
| 681,457 |
def dist(spectrum, A, B):
"""
Minimum number of steps along ring to get from one pitch to the next
Parameters:
-----------
spectrum: list
user-defined spectrum
A: int
first pitch
B: int
second pitch
"""
length = len(spectrum)
forward = abs(B - A) # Forward moving graph distance between pitches
backward = abs(forward - length) # Backward moving distance... ...
if forward <= backward:
n = forward
else:
n = backward
return n
|
3ecda36a9912f3088949b406f93ccf0654d505c1
| 630,257 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.