content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import Union
from typing import Any
from typing import Tuple
from typing import Callable
import attrs
def iter_validator(iter_type, item_types: Union[Any, Tuple[Any]]) -> Callable:
"""Helper function to generate iterable validators that will reduce the amount of
boilerplate code.
Parameters
----------
iter_type : any iterable
The type of iterable object that should be validated.
item_types : Union[Any, Tuple[Any]]
The type or types of acceptable item types.
Returns
-------
Callable
The attr.validators.deep_iterable iterable and instance validator.
"""
validator = attrs.validators.deep_iterable(
member_validator=attrs.validators.instance_of(item_types),
iterable_validator=attrs.validators.instance_of(iter_type),
)
return validator
|
58add73ae65e5cf41ec151c0172810958b1b117d
| 103,099 |
def check_results(results, expected):
"""
Compare the two MoonLight results dictionaries: one from
moonlight_solution.json and the other from the execpted result.
"""
# Mapping of result keys to their data types
keys_to_compare = dict(corpus_size=int,
solution_size=int,
solution_weight=float,
initial_singularities=int,
num_basic_blocks=int,
solution=set)
passed = True
fail_msg = ''
for key, type_ in keys_to_compare.items():
results_value = type_(results[key])
expected_value = type_(expected[key])
if results_value != expected_value:
fail_msg = '{}\n {}: {} v {}'.format(fail_msg,
key,
expected_value,
results_value)
passed = False
if passed:
print('PASSED')
else:
print('FAILED {}'.format(fail_msg))
return passed
|
cc880c646b19f0e9394ff58c15daa808150f2a09
| 579,562 |
from typing import List
import pathlib
def filter_file_existence(c: List[str], files: List[pathlib.Path]) -> List[str]:
"""Filter for files that are not found
:param c: list of files to find
:type c: List[str]
:param files: list of file paths
:type files: List[pathlib.Path]
:return: list of file names that are not found
:rtype: List[str]
"""
for x in files:
for cand in c:
if cand in str(x):
c.remove(cand)
break
return c
|
50e7b8b8986bfa50d5c66367c1575f6b29f13ddf
| 298,547 |
def getRow(data, rowIndex):
"""
Returns the row indicated by rowIndex from data which is assumed to be an array.
Assuming that rowIndex will be a value from 0 to length of data - 1
"""
return data[rowIndex]
|
678fcc082a4ba494f0c2e5597cc6aded6441eb7f
| 243,421 |
def count_stair_ways(n):
"""Count the numbers of ways to walk up a flight of stairs with 'n' steps while taking a maximum of 2 steps at a time
>>> count_stair_ways(2)
2
>>> count_stair_ways(3)
3
>>> count_stair_ways(4)
5
>>> count_stair_ways(5)
8
"""
"""BEGIN PROBLEM 3.1"""
# Hint: This is actually a slight modification of the recursive fibonacci
# function, in that the input to output mapping is off by 1 spot
# i.e. instead of: (2, 3, 4, 5) => (1, 2, 3, 5)
# we get this: (2, 3, 4, 5) => (2, 3, 5, 8)
if n < 0:
return 0
elif n == 0:
return 1
else:
return count_stair_ways(n - 1) + count_stair_ways(n - 2)
"""END PROBLEM 3.1"""
|
de6a72eb722df431b77f95dffb7f48c3cefc5a18
| 120,839 |
def adjacency(indices, length):
"""Given a list of indices and length of the output list,
Return a list of 1s for adjacencies and 0s for non-adjacencies."""
return [ int(i in indices) for i in range(length) ]
|
a0150033dcbbd2e60adf6755c5a83c2985b6cb3e
| 480,950 |
def split_and_check(s, separator, n):
""" Turn string into tuple, checking that there are exactly as many parts as expected.
"""
parts = s.split(separator)
if len(parts) != n:
raise ValueError('Failed to parse "{}"'.format(s))
return tuple(parts)
|
de2fb2714d3c056a91bed41f6301532d0b89a617
| 497,399 |
from datetime import datetime
def is_good_time(time: list):
"""
Возвращает совпадает ли текущее время с временем для публикации
:param time: list с временем публикаций
:return: bool
"""
now = datetime.now().strftime('%H:%M')
if now in time:
return True
else:
return False
|
45f966a9a54ea36717e22a83157880df91def39c
| 380,066 |
from typing import Counter
from functools import reduce
def create_maps(words, tags, min_word_freq=5, min_char_freq=1):
"""
Creates word, char, tag maps.
:param words: word sequences
:param tags: tag sequences
:param min_word_freq: words that occur fewer times than this threshold are binned as <unk>s
:param min_char_freq: characters that occur fewer times than this threshold are binned as <unk>s
:return: word, char, tag maps
"""
word_freq = Counter()
char_freq = Counter()
tag_map = set()
for w, t in zip(words, tags):
word_freq.update(w)
char_freq.update(list(reduce(lambda x, y: list(x) + [' '] + list(y), w)))
tag_map.update(t)
word_map = {k: v + 1 for v, k in enumerate([w for w in word_freq.keys() if word_freq[w] > min_word_freq])}
char_map = {k: v + 1 for v, k in enumerate([c for c in char_freq.keys() if char_freq[c] > min_char_freq])}
tag_map = {k: v + 1 for v, k in enumerate(tag_map)}
word_map['<pad>'] = 0
word_map['<end>'] = len(word_map)
word_map['<unk>'] = len(word_map)
char_map['<pad>'] = 0
char_map['<end>'] = len(char_map)
char_map['<unk>'] = len(char_map)
tag_map['<pad>'] = 0
tag_map['<start>'] = len(tag_map)
tag_map['<end>'] = len(tag_map)
return word_map, char_map, tag_map
|
f873ebc6447eb2024f3abac4aec0a0feccd08470
| 67,097 |
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
|
862db398b427bc4ab17c976db2d4eaf4c1f2e133
| 479,080 |
def colWidth(collection, columnNum):
"""Compute the required width of a column in a collection of row-tuples."""
MIN_PADDING = 5
return MIN_PADDING + max((len(row[columnNum]) for row in collection))
|
9a300106cf57fa6a78af37caa3f6b2a74c3e5b2c
| 50,155 |
import sympy
def variable (name):
"""
A convenient frontend to sympy.symbols, except that it escapes commas, so that the single
variable name can contain a comma. E.g. 'Y[0,1]'.
"""
return sympy.symbols(name.replace(',','\\,'))
|
2194c1f674f9510148ce84d50b4d00dd0017c3a3
| 482,420 |
def echo(data):
"""
Just return data back to the client.
"""
return data
|
80655150d1578c12b2f196b664df8935bae569f1
| 41,297 |
from typing import Union
from typing import List
from typing import Any
def has_multiple_references(summaries: Union[List[List[str]], List[List[List[str]]]]) -> bool:
"""
Checks to see if ``summaries`` has multiple references or not by examining
the type of the summaries. Each individual summary is represented as ``List[str]``.
If the type is ``List[List[str]]``, there is one summary per instance. If the
type is ``List[List[List[str]]]``, there are multiple summaries per instance.
"""
def _is_summary(maybe_summary: Union[List[str], Any]) -> bool:
return isinstance(maybe_summary, list) and all(isinstance(item, str) for item in maybe_summary)
if isinstance(summaries, list):
if all(_is_summary(maybe_summary) for maybe_summary in summaries):
return False
for instance_summaries in summaries:
if not all(_is_summary(maybe_summary) for maybe_summary in instance_summaries):
raise Exception('``summaries`` is neither ``List[List[str]]`` nor ``List[List[List[str]]]``.')
return True
|
4dc42acda8781681debb951d26f3811623033060
| 317,046 |
def calculate_objective(sol, D):
""" The objective is the total cost of all routes in VRP solution. D is
the full distance matrix of the points in the problem (also includes the
depot with index of 0), and solution is a list containing giant tour
encoded VRP solution, where 0 indicates a visit to the depot OR a list of
routes leaving and returning to the depot.
"""
f = 0.0
if hasattr(sol[0], '__iter__'):
# routes are separately
for route in sol:
f+= sum( D[route[i-1],route[i]] for i in range(1,len(route)))
# sometimes a end or start (or both) visit to depot may be missing
if route[0]!=0:
f+=D[0,route[0]]
if route[-1]!=0:
f+=D[route[-1],0]
else:
# giant tour encoding
f = sum( D[sol[i-1],sol[i]] for i in range(1,len(sol)))
f+=D[sol[-1], sol[0]]
return f
|
4daff795533db06f976fdd4490bad96abd35b02e
| 253,583 |
def extractNonRunSimple(q, factoryConfig=None):
"""All NOT Running Glideins: JobStatus != 2
q: dictionary of Glideins from condor_q
factoryConfig (FactoryConfig): Factory configuartion (NOT USED, for interface)
Returns:
dict: dictionary of Not Running Glideins from condor_q
"""
qnrun = q.fetchStored(lambda el: el["JobStatus"] != 2) # Run==2
qnrun_list = qnrun.keys()
return qnrun_list
|
e6e14593f3aede06a00e2c81f568c600dc7899e7
| 414,014 |
from typing import List
from typing import Optional
def resolve_is_aggregate(values: List[Optional[bool]]) -> Optional[bool]:
"""
Resolves the is_aggregate flag for an expression that contains multiple terms. This works like a voter system,
each term votes True or False or abstains with None.
:param values: A list of booleans (or None) for each term in the expression
:return: If all values are True or None, True is returned. If all values are None, None is returned. Otherwise,
False is returned.
"""
result = [x for x in values if x is not None]
if result:
return all(result)
return None
|
7fa150dcfc0fbcf8ebf507d9812aba5132b5637b
| 516,193 |
def get_indentation(line):
"""
Returns the leading spaces and/or tabs of a line of text.
Preconditions: None
@type line: str
@rtype: str
"""
ptr = 0
while ptr < len(line):
if line[ptr] != ' ' and line[ptr] != '\t':
break
ptr += 1
return line[0:ptr]
|
f193302b7edc6b3a10e6136cd3b8f7ab7a04d7dc
| 164,292 |
import json
def dumps(resource):
"""Serialize to string
Parameters
----------
resource : `pyannote.core` data structure
Returns
-------
serialization : string
"""
return json.dumps(resource, encoding='utf-8', for_json=True)
|
43d216c2d35aa8881a61416ae5fb4737bddce15e
| 552,438 |
def _parse_line(s):
"""Parses a line of a requirements.txt file."""
requirement, *_ = s.split("#")
return requirement.strip()
|
5c0c96898c288a7c358bf978a4415c17c7fb19c4
| 29,081 |
def calculateNetIncome(gross, state):
"""
Calculate the net income after federal and state tax
:param gross: Gross Income
:param state: State Name
:return: Net Income
"""
state_tax = {'LA': 10, 'SA': 0, 'NY': 9}
# Calculate net income after federal tax
net = gross - (gross * .10)
# Calculate net income after state tax
if state in state_tax:
net = net - (gross * state_tax[state]/100)
print("Your net income after all the heavy taxes is: " + str(net))
return net
else:
print("State not in the list")
return None
|
915528d8bfd15c18003eaeb8f6b3f1e8ad5565a0
| 699,110 |
def format_content(article):
"""
This function takes an Intercom article and formats its content
as the HTML we'll use for the Guru card. Because Intercom has some
extra fields on its articles, like the # of views, conversations, and
counts of user reactions and we don't have these fields in Guru, we
display these values in the card's HTML.
"""
url = "https://app.intercom.com/a/apps/%s/articles/articles/%s/show/stats?conversations=true" % (
article.get("workspace_id"),
article.get("id")
)
views_url = url + "&displaying=views"
conversations_url = url + "&displaying=newchat"
reactions_url = url + "&displaying=reacted"
stats = article.get("statistics") or {}
banner = """
<hr />
<p>
<a target="_blank" rel="noopener noreferrer" href="%s">%s View%s</a>
<a target="_blank" rel="noopener noreferrer" href="%s">%s Conversation%s</a>
<a target="_blank" rel="noopener noreferrer" href="%s">%s Reaction%s</a>: 😃 %s%% 😐 %s%% 😞 %s%%</p>
""" % (
views_url,
stats.get("views", 0),
"" if stats.get("views") == 1 else "s",
conversations_url,
stats.get("conversations", 0),
"" if stats.get("conversations") == 1 else "s",
reactions_url,
stats.get("reactions", 0),
"" if stats.get("reactions") == 1 else "s",
stats.get("happy_reaction_percentage", 0),
stats.get("neutral_reaction_percentage", 0),
stats.get("sad_reaction_percentage", 0)
)
return article.get("body") + banner
|
78274dca56bc1657c0af5ddee1c6967bc5c233a5
| 641,182 |
def main_heading() -> str:
"""Get index page heading page."""
return 'Video Archive'
|
89057fd3ac0d510cfde43ce8b61cacd4f9f76340
| 137,679 |
def bfs(graph, start):
"""
Breadth first search
Args:
graph (dict): graph
start (node): some key of the graph
Time: O(|V| + |E|)
"""
seen = set()
path = []
queue = [start]
while queue:
current = queue.pop(0)
if current not in seen:
seen.add(current)
path.append(current)
queue.extend(graph[current])
return path
|
a743ee4505eee2b378c141789429aea5944589c2
| 123,583 |
def proposed_scaling_both(current, desired):
"""
identify a single scalar to scale the image by so that
it is closer to the desired scaling (fills the space as much as possible)
Arguments
---------
current : tuple
float tuple of current size of svg object
desired : tuple
float tuple of desired size of svg object
Returns
-------
float
constant scalar for size change
"""
scale_x = desired[0]/current[0]
scale_y = desired[1]/current[1]
return scale_x,scale_y
|
a3bb42c10dfcfcbfc01e74a42198b7b14238453a
| 613,626 |
import unicodedata
def normalize_text(text):
""" Normaliza um texto para otimizar as buscas.
- Remove os acentos.
- Remove os traços.
- Remove os espaços duplos.
- Remove os espaços das extremidades.
- Altera o texto para minúsculo.
Exemplo:
>>> print(normalize_text('São Paulo - Congonhas'))
sao paulo congonhas
>>>
:param text: Texto que deve ser normalizado.
:returns: Texto normalizado.
"""
if not text:
return text
text = unicodedata.normalize('NFKD', text)
text = text.encode('ASCII', 'ignore')
text = text.decode('utf-8')
text = text.replace('-', ' ')
while ' ' in text:
text = text.replace(' ', ' ')
text = text.strip().lower()
return text
|
8543ab4d21e9703cede55be9ab1a2a0dd2ded398
| 418,673 |
import copy
def merge_left_with_defaults(defaults, loaded_config):
"""
Merge two configurations, with one of them overriding the other.
Args:
defaults (dict): A configuration of defaults
loaded_config (dict): A configuration, as loaded from disk.
Returns (dict):
A merged configuration, with loaded_config preferred over defaults.
"""
result = defaults.copy()
if loaded_config is None:
return result
# copy defaults or override them
for k, v in result.items():
if isinstance(v, dict):
if k in loaded_config:
result[k] = merge_left_with_defaults(v, loaded_config[k])
else:
result[k] = copy.deepcopy(v)
elif k in loaded_config:
result[k] = loaded_config[k]
# copy things with no defaults
for k, v in loaded_config.items():
if k not in result:
result[k] = v
return result
|
630db012b0705e7c0ab12c9cd4e7df07b3ab56f0
| 667,842 |
def hours2days(input_hours):
""" Receive the hours and will return ow many days and hours there is in the input """
# Floor division to get the days but not the extra hours
days = input_hours // 24
# Modulus to get the left hours after calculating the days
hours = input_hours % 24
# Returns tuples
return days, hours
|
fcd04352aa990154d55bd8c3352c717d06ad50f0
| 686,636 |
def map_raw_ids_to_sequential_ids(samples):
"""
For each unique user or item id, this function creates a mapping to a sequence of number starting in 0.
This will be the index of the embeddings in the model.
Items ids will be from 0 to n_items - 1.
Users ids will be from n_items to n_items + n_users - 1
This condition is required to later build the distance matrix
:param samples: dict of <user_id1>: [<item_id1>, <item_id2>, ...]
:return: dicts of {<user_idX>: indexY} and {<item_idX>: indexW}
"""
uid2id, iid2id = {}, {}
sorted_samples = sorted(samples.items(), key=lambda x: x[0])
# first sets items ids only
for _, ints in sorted_samples:
sorted_ints = sorted(ints)
for iid in sorted_ints:
if iid not in iid2id:
iid2id[iid] = len(iid2id)
# users ids come after item ids
for uid, _ in sorted_samples:
if uid not in uid2id:
uid2id[uid] = len(uid2id) + len(iid2id)
return uid2id, iid2id
|
f32baeaa596ce24f99280fe37b6568d1386b7653
| 255,706 |
def detective_rate(statistic, threshold):
"""
function to calculate detective rate
parameters
----------
statistic:
statistics of testing data
threshold:
threshold by the offline data
return
------
fault detective rate or false alarm
"""
n_sample = statistic.shape[0]
detective_rate = 0
for i in range(n_sample):
if statistic[i] > threshold:
detective_rate += 1/n_sample
return detective_rate
|
e3401f628b0b48fe105a690ffe2f5167dafe46c4
| 486,810 |
from textwrap import dedent
def package_load_instructions(inst_distributions):
"""Load instructions, displayed in the package notes"""
per_package_inst = ''
for dist in inst_distributions:
if dist.type == 'zip':
per_package_inst += dedent(
"""
# Loading the ZIP Package
Zip packages are compressed, so large resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
elif dist.type == 'csv':
per_package_inst += dedent(
"""
# Loading the CSV Package
CSV packages load resources individually, so small resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
if per_package_inst:
return '\n---\n'+per_package_inst
else:
return ''
|
321a7486f27a3cb327ae7556e317bc53c24726ac
| 4,529 |
def solution(A): # O(N)
"""
Given an array A, multiply each of the values by all of the other
values in the array except itself.
>>> solution([1, 2, 3, 4, 5])
[120, 60, 40, 30, 24]
"""
length = len(A) # O(1)
if not length: # O(1)
return 0 # O(1)
multiplication = 1 # O(1)
for value in A: # O(N)
multiplication *= value # O(1)
for i in range(0, length): # O(N)
A[i] = int(multiplication / A[i]) # O(1)
return A # O(1)
|
cc5f8ff7193d22a7de0fb2a5a56608e4cce0d013
| 257,526 |
def _numpy_array_ufunc(ufunc, method):
"""Trival wrapper for numpy.ufunc"""
return getattr(ufunc, method)
|
1f55f725bb159c903dd386597f7ebcdce5f273be
| 392,110 |
def _get_file_contents(path):
"""
Gets the contents of a specified file, ensuring that the file is properly
closed when the function exits
"""
with open(path, "r") as f:
return f.read()
|
cfe84e52e2ac48d3f7d9d20fd1c85c71a222ef95
| 8,754 |
from typing import List
def list_types() -> List[str]:
"""
Returns supported item types.
"""
return ['string', 'boolean', 'integer', 'number', 'file', 'object', 'array']
|
43f682546c3815316057ef23a720bf21246bc367
| 197,249 |
def _normalize_mac(mac):
"""Convert MAC to a well-known format aa:bb:cc:dd:ee:ff."""
if '-' in mac:
# pxelinux format is 01-aa-bb-cc-dd-ee-ff
mac = mac.split('-', 1)[1]
mac = mac.replace('-', ':')
return mac.lower()
|
c7ec28025001c03b3ba65bfe54750165a1974bd5
| 390,331 |
def df_binary_columns_list(df):
""" Returns a list of binary columns (unique values are either 0 or 1)"""
binary_cols = [col for col in df if
df[col].dropna().value_counts().index.isin([0,1]).all()]
return binary_cols
|
9a2b5313452fac09bb671fe2b12b9f40203fb446
| 78,570 |
def diff_nomod(ctx):
"""Get the unmodded difficulty values of a map."""
return {
"cs": ctx.beatmap.diff_size,
"ar": ctx.beatmap.diff_approach,
"od": ctx.beatmap.diff_overall,
"hp": ctx.beatmap.diff_drain,
"sr": ctx.beatmap.difficultyrating,
"bpm": ctx.beatmap.bpm,
"length": ctx.beatmap.total_length,
}
|
4e025ed19b6deeb1a88d41d1d940940a034c4430
| 567,389 |
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
|
46e58fb0d47a1c71153766620f672c850aabd3ba
| 253,720 |
def te_exp_minus1(posy, nterm):
"""Taylor expansion of e^{posy} - 1
Arguments
---------
posy : gpkit.Posynomial
Variable or expression to exponentiate
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of e^{posy} - 1, carried to nterm terms
"""
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= i
res += posy**i / factorial_denom
return res
|
9818ee831c9b6b51d9142d0c463099d595b11afc
| 150,402 |
import json
import csv
from bs4 import BeautifulSoup
def read_file(path, mode="default"):
"""Open a file and return its content (parsed if possible)
:param path: (abs) path to the file
:param mode: "default|json"
:type path: str
:type mode: str
:return: content of the file
:rtype: str or dict
"""
if mode == "default":
with open(path, "r", encoding="utf-8") as fh:
content = fh.read()
elif mode == "json":
with open(path, "r", encoding="utf-8") as fh:
content = json.load(fh)
elif mode == "csv":
with open(path, "r", newline="", encoding="utf-8") as fh:
content = [r for r in csv.reader(fh)]
elif mode == "xml":
with open(path, "r", encoding="utf-8") as fh:
content = fh.read()
content = BeautifulSoup(content, 'xml')
else:
content = False
return content
|
8b92de37f21a3141cddbe79641b0a7c5117b7739
| 308,286 |
def get_line(merged_vcf):
"""Build a list of relevant data to be printed.
Args:
merged_vcf (dict): an object from project merge VCF response
Returns:
str: tab delimited list of relevant data to be printed
"""
return "\t".join(
[
str(merged_vcf.id),
merged_vcf.last_status.created.isoformat(),
merged_vcf.last_status.status,
]
)
|
b684b6bffc3bf6b603c2fbcee9abfc0cdbbe4943
| 573,009 |
def pprint(l):
"""Pretty print
Parameters:
l (list/float/None): object to print
Returns:
pretty print str
"""
if isinstance(l, list):
if len(l) == 0: return None
else:
if l == None:
return None
else:
return '%.2f' % l
|
d2b10630ece9f5ac3c1fad745435fc043561ce8c
| 405,178 |
def vec(X, order='F'):
"""Returns the vectorization of X. Columns of X are stacked. (The opposite of X.flatten())."""
assert X.ndim == 2, 'vec operator requires a matrix.'
return X.flatten(order=order)
|
591e5b3f0c9fb3139551ab17025bbf4a941e2984
| 348,793 |
def to_dict(sequences, key_function=None) :
"""Turns a sequence iterator or list into a dictionary.
sequences - An iterator that returns SeqRecord objects,
or simply a list of SeqRecord objects.
key_function - Optional function which when given a SeqRecord
returns a unique string for the dictionary key.
e.g. key_function = lambda rec : rec.name
or, key_function = lambda rec : rec.description.split()[0]
If key_function is ommitted then record.id is used, on the
assumption that the records objects returned are SeqRecords
with a unique id field.
If there are duplicate keys, an error is raised.
Example usage:
from Bio import SeqIO
filename = "example.fasta"
d = SeqIO.to_dict(SeqIO.parse(open(faa_filename, "rU")),
key_function = lambda rec : rec.description.split()[0])
print len(d)
print d.keys()[0:10]
key = d.keys()[0]
print d[key]
"""
if key_function is None :
key_function = lambda rec : rec.id
d = dict()
for record in sequences :
key = key_function(record)
if key in d :
raise ValueError("Duplicate key '%s'" % key)
d[key] = record
return d
|
c9d728cca2748791562a6c2bc657fc3296ced5c9
| 395,383 |
def has_children(elem):
"""Check if elem has any child elements."""
if type(elem) == list:
return True
try:
for child in elem:
if type(child) == list:
return True
except TypeError:
return False
return False
|
0ed1f2743231a0df45fb6634bd58664e2115154f
| 239,700 |
def ip_range(network):
"""Return tuple of low, high IP address for given network"""
num_addresses = network.num_addresses
if num_addresses == 1:
host = network[0]
return host, host
elif num_addresses == 2:
return network[0], network[-1]
else:
return network[1], network[-2]
|
b0c4f17e6a8646a92e7141828efc7e364b5a547d
| 673,351 |
def sorted_completions(completions):
"""Sort completions case insensitively."""
return list(sorted(completions, key=lambda x: x[0].lower()))
|
a22fb6bdde0e8eef15ab5baba3d87c12e7906c1c
| 290,529 |
def relu_p(z):
"""
Computes the gradient of the relu for a given numpy array.
"""
result = z
result[result <= 0] = 0
result[result > 0] = 1
return result
|
f7ec1111e2079131616261a16d55310254c42bc8
| 422,569 |
import ipaddress
def _calculate_address_pool(addrmask, ipv6=False):
"""
Return a pool of addresses contained in the network.
@param addrmask: Network in IP/mask format.
@return: A list of ip addresses
"""
if ipv6:
netobj = ipaddress.IPv6Network(addrmask, strict=False)
else:
netobj = ipaddress.IPv4Network(addrmask, strict=False)
return [format(addr) for addr in netobj]
|
3fb1b60324e47e38c2f74caf9f47d02dcb30cb9f
| 638,646 |
def get_extension(t_path):
""" Get extension of the file
:param t_path: path or name of the file
:return: string with extension of the file or empty string if we failed
to get it
"""
path_parts = str.split(t_path, '.')
extension = path_parts[-1:][0]
extension = extension.lower()
return extension
|
70fa800714c1fbd71f3071ed4832a1cd96f1949f
| 45,708 |
def line_list(filename):
"""(Internal) Returns a list of tuples (indentation-level, line) one for every line
Empty lines, comments and extra whitespace are striped out.
Indents are expected to be 4 spaces.
:param filename: Path of the asl file name (may be relative or absolute).
:type filename: str
:returns: List of every content line with the indentation-level as a tuple
:rtype: [(int, str)]
"""
decode_asl_file = open(filename, "r")
lines = decode_asl_file.readlines()
processed_lines = []
for line in lines:
assert int(line.find(line.lstrip())) % 4 == 0
asl_indents = int(line.find(line.lstrip()) / 4)
comment_start = line.find("//")
if comment_start != -1:
line = line[:comment_start]
line = line.strip(" \t\n")
if line != "":
processed_lines += [(asl_indents, line)]
return processed_lines
|
ac1c4accca6042916aff28dfb299b55c169652d2
| 443,666 |
def matches(item, *, plugins=None, subregions=None):
"""
Checks whether the item matches zero or more constraints.
``plugins`` should be a tuple of plugin classes or ``None`` if the type
shouldn't be checked.
``subregions`` should be set of allowed ``subregion`` attribute values or
``None`` if the ``subregion`` attribute shouldn't be checked at all.
Include ``None`` in the set if you want ``matches`` to succeed also when
encountering an item without a ``subregion`` attribute.
"""
if plugins is not None and not isinstance(item, plugins):
return False
if subregions is not None and getattr(item, "subregion", None) not in subregions:
return False
return True
|
bed1c1515489799f8695bfa50aedab2888f67407
| 579,482 |
from typing import Counter
def bag_jaccard(x, y):
"""
Jaccard similarity measure between two multisets (bags)
:param x: list of words (strings) for the first sentence
:param y: list of words (strings) for the second sentence
:return: similarity score between two sentences
"""
if len(x) == 0 or len(y) == 0:
return 0.0
xc = Counter(x)
yc = Counter(y)
inter = xc & yc
union = xc | yc
return len(list(inter.elements())) / len(list(union.elements()))
|
9f946512afc97c3a5cc9c466d9bf2ed61208f7b7
| 338,943 |
import logging
def run_flushdb(redis_con):
"""
Purpose:
Run Redis FlushDB Command. Will clear all
keys and values from the Redis Database (good
for clearing cache when redis is used as
a caching solution)
Args:
redis_con (Redis StrictRedis): Connection
to Redis database
Return
was_successful (bool): whether or not the
command was successful running
"""
logging.info("Flushing Redis DB")
try:
redis_con.flushdb()
except Exception as err:
logging.error("Error Flushing DB: {0}".format(err))
return False
return True
|
7e65b420a827a2204d0f40cb8534ce35c1427ce0
| 81,704 |
def text_remove_empty_lines(text):
"""
Whitespace normalization:
- Strip empty lines
- Strip trailing whitespace
"""
lines = [ line.rstrip() for line in text.splitlines() if line.strip() ]
return "\n".join(lines)
|
eee3aee2e7f56e8567d6b87c9d11cb4d63a7a6e5
| 508,554 |
def get_exponent(number, base):
"""If number = base**k, returns k. Else returns None
"""
if number <= 1: return 0
k = 0
if base > 1:
while number % base == 0:
number /= base
k += 1
return k if number == 1 else None
|
5628fa0b88b0a8b93ac3016744e2775701a30bfa
| 238,398 |
import re
def snake(s):
""" snake cases ``s``
:param str s:
:return: str
"""
# insert a space before each uppercase character preceded by a
# non-uppercase letter
s = re.sub(r'(?<=[^A-Z])\B([A-Z])', r' \1', s)
# lowercase everything, split on whitespace and join
return '_'.join(s.lower().split())
|
077dae87deda6cc00d055a2fa1829c19caa8a27d
| 377,330 |
def calc_control(num):
"""
Рассчитывает контрольное число кода EAN-13.
Цифры набора нумеруются справа налево.
Подсчитываются суммы цифр, стоящих на четных и нечетных местах.
Сумма цифр, стоящих на четных местах, суммируется с утроенной суммой цифр, стоящих на нечетных местах.
Если цифра единиц полученного результата равна нулю, то контрольная цифра — 0.
Если последняя цифра результата не нуль, то контрольная цифра равна дополнению этой цифры до 10.
:param num: число либо строка с первыми 12 числами
:return: последнее контрольное число
"""
rev = list(str(num))
rev.reverse()
sum_odd = sum([int(i) for i in rev[0::2]])
sum_even = sum([int(i) for i in rev[1::2]])
return (10 - ((sum_even + 3 * sum_odd) % 10)) % 10
|
c131058deca2e1f5fe5d50be7b751a7b42b3d467
| 335,966 |
from typing import Counter
from functools import reduce
def merge_raw_vocabs(*raw_vocabs: Counter) -> Counter:
"""
Merges multiple raw vocabularies into a single one.
:param raw_vocabs: Raw vocabularies.
:return: Merged raw vocabulary.
"""
raw_vocab = reduce(lambda c1, c2: c1 + c2, raw_vocabs)
return raw_vocab
|
a80ed338830a16004a7281e553d69b189564cce9
| 243,181 |
import re
def is_valid_ttl(ttl):
"""Check that ttl value is valid (ie. positive signed 32 bit number)"""
if len(ttl) == 0:
return False
match = re.search(r"[^0-9]", ttl)
if match is not None:
return False
value = int(ttl)
if not (0 <= value < 2 ** 31):
return False
return True
|
267a2ead566b9dfe5a7e881995a592257b103e43
| 594,080 |
from typing import List
def generate_gmt_file(output_file: str, gene_sets: List) -> str:
"""
Generate a GSEA gmt file
:param output_file: name of output file
:param gene_sets: list of gene sets; each entry is (gene_set_name, gene_set_origin, list of gene symbols)
:return:
"""
with open(output_file, 'w') as f:
for gs_name, gs_origin, symbols in gene_sets:
f.write('{}\t{}\t{}\n'.format(
gs_name,
gs_origin,
'\t'.join(symbols)
))
return output_file
|
9c3c343d589d0ae798806da63e67d1fa9576c3e1
| 124,624 |
def read_contents(file_path):
"""Reads the contents of file_path without decoding."""
with open(file_path) as fin:
return fin.read()
|
4331371c012420a176fc5a6f191afce325058004
| 267,138 |
import re
def extract_md5_from_text(text):
"""
extract base-128 hash from string
Args:
text(str): string output from gsutil command
Returns:
hash(str) or None
"""
m = re.search("[0-9,a-f]{32}", text.lower())
if m:
return m.group(0)
else:
return None
|
9104d78fc16a25b6a0dc1ed0adb1457c8b56e05f
| 599,469 |
def is_float4x4(items):
"""Verify that the sequence contains 4 sequences of each 4 :obj:`float`.
Parameters
----------
items : iterable
The sequence of items.
Returns
-------
bool
"""
return (
len(items) == 4 and
all(
len(item) == 4 and
all(isinstance(i, float) for i in item) for item in items
)
)
|
b01feb2f728aa693922b4743d158194234681e67
| 577,325 |
def create_set_state_payload(context: str, state: int):
"""Create and return "setState" dictionary to send to the Plugin Manager.
Args:
context (str): An opaque value identifying the instance's action you want to modify.
state (int): A 0-based integer value representing the state of an action with multiple states.
Returns:
dict: Dictionary with payload to set the plugin action state.
"""
return {
"event": "setState",
"context": context,
"payload": {
"state": state
}
}
|
3bff49a05b66d7fc4b0615c0e4c8659a8666d364
| 583,685 |
def from_date(val):
"""escape a python datetime.date"""
return val.strftime("'%Y-%m-%d'")
|
2f7ca7ac6966c6249836d76e9d94b57da35f7a90
| 405,788 |
import glob
def _get_header_files(include_dirs):
"""Return a list of all extra headers to include in checks."""
headers = []
for path in include_dirs:
headers += glob.glob(path + "/**/*.h", recursive=True)
headers += glob.glob(path + "/**/*.hh", recursive=True)
headers += glob.glob(path + "/**/*.hpp", recursive=True)
headers += glob.glob(path + "/**/*.ipp", recursive=True)
return headers
|
981652963a8bbf71beef10a31a24d02fafd71695
| 637,874 |
def areaRectangulo(base, altura):
"""Function that finds the area of a rectangle given its width and height
Args:
base (float): the value for the width of the rectangle
altura (float): the value for the height of the rectangle
Returns:
float: The area of the rectangle
"""
return base * altura
|
c013f86cac04ee405a5d6cf06184a19ffbf7f254
| 62,583 |
def tuple_incr(t1, idx, val=1):
"""Return a tuple with the index idx incremented by val"""
return t1[:idx] + (t1[idx]+val,) + t1[idx+1:]
|
bbf7c9d7b669fff5a42e224790b1af9135cce231
| 653,171 |
def getInputShape(model):
"""
Gets the shape when there is a single input.
Return:
Numeric dimensions, omits dimensions that have no value. eg batch
size.
"""
s = []
for dim in model.input.shape:
if dim.value:
s.append(dim.value)
return tuple(s)
|
628f61a995784b9be79816a5bbcde2f8204640be
| 707,243 |
def seq_del(s, i):
"""
Returns a new sequence with i missing. Mysteriously missing from the
standard library.
"""
return s[:i] + s[i+1:]
|
d72be1f0317d8252881e1a00384c9cc3a9a69488
| 295,604 |
def path_sequence(list_of_links, source, target):
"""
Convert set of tuples representing path in random order to a path
:param list_of_links: set of tuples representing path
:param source: source node
:param target: destination node
:return: pretty path as sequence of nodes
"""
pretty_path = []
nxt = source
pretty_path.append(source)
# iterate through list till target is reached
while (nxt != target):
for pair in list_of_links:
if pair[0] == nxt:
nxt = pair[1]
pretty_path.append(nxt)
list_of_links.remove(pair)
return pretty_path
|
696126c38fdb092033bee64846627bfbaa2c1523
| 450,482 |
def ascii_to_bit(ascii_string: str) -> str:
"""
Converts a given string of ASCII chars to a string of bits.
:param ascii_string:
:return:
"""
result = bin(int.from_bytes(ascii_string.encode(), 'big'))
result = result[2:] # We don't want this '0b' at the beginning.
while len(result) % 8 != 0:
result = '0' + result
return result
|
7600d7290fd638e25b06ef8fcbab6c19a93e1dd2
| 293,999 |
from typing import Any
def _get_ending_line(expr: Any) -> int:
"""Get the ending line number of a Python expression
This method gets the final line number of a
(possibly-multiline) Python expression.
Args:
expr (ast.AST): a Python expression object
Returns:
int: the line number on which the given expression ends
"""
final_stmt = expr
highest_line_no = -1
not_at_end = True
while not_at_end:
if hasattr(final_stmt, 'lineno'):
highest_line_no = final_stmt.lineno
body_is_valid = hasattr(final_stmt, 'body') and final_stmt.body
if hasattr(final_stmt, 'orelse') and final_stmt.orelse:
# 'orelse' should take priority over 'body'
# (as it always has a lower ending line)
final_stmt = final_stmt.orelse
if isinstance(final_stmt, list):
# .orelse may or may not be a list
final_stmt = final_stmt[-1]
elif body_is_valid and isinstance(final_stmt.body, list):
final_stmt = final_stmt.body[-1]
elif body_is_valid:
final_stmt = final_stmt.body
elif hasattr(final_stmt, 'exc'):
final_stmt = final_stmt.exc
elif hasattr(final_stmt, 'args') and final_stmt.args:
final_stmt = final_stmt.args[-1]
elif hasattr(final_stmt, 'elts') and final_stmt.elts:
final_stmt = final_stmt.elts[-1]
elif hasattr(final_stmt, 'generators') and final_stmt.generators:
final_stmt = final_stmt.generators[-1]
elif hasattr(final_stmt, 'iter'):
final_stmt = final_stmt.iter
elif hasattr(final_stmt, 'values') and final_stmt.values:
final_stmt = final_stmt.values[-1]
elif hasattr(final_stmt, 'value'):
# some (but not all) value attributes have
# child elements - we handle both kinds here
final_stmt = final_stmt.value
else:
not_at_end = False
return highest_line_no
|
164ed7fcc84a6d9b61b1f896358b52b7d6b1fdfb
| 307,288 |
from pathlib import Path
def baddir(path: Path) -> bool:
"""
tells if a directory is not a Git repo or excluded.
A directory with top-level file ".nogit" is excluded.
Parameters
----------
path : pathlib.Path
path to check if it's a Git repo
Results
-------
bad : bool
True if an excluded Git repo or not a Git repo
"""
path = path.expanduser()
try:
if not path.is_dir():
return True
except PermissionError:
return True
try:
bad = (path / ".nogit").is_file() or not (path / ".git" / "HEAD").is_file()
except PermissionError: # Windows
bad = True
return bad
|
027366d5c1ddf1f55e2011709d826561d86950de
| 678,379 |
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
|
170e6a1980242bb0be20adc7db91a0dea2535298
| 508,961 |
def is_409(item):
"""Return True if the item has a status_code of 409 (conflict).
Return TypeError if item doesn't have a status_code.
"""
if hasattr(item, "status_code"):
return (True, "Conflict") if item.status_code == 409 else False
return TypeError("is_409 cannot check item with missing status_code attribute")
|
9b3ab5ec8e0d69c41e82544ab20db4beee63c80a
| 466,169 |
def index_gen(length):
"""returns a callable
Parameters
----------
length : int
length of series of integers to generate
Returns
-------
callable
Notes
-----
When invoked with an int, returns all indices except that provided.
The result can be used with a numpy.take(data, indices).
>>> gen_series = index_gen(4)
>>> gen_series(0)
[1, 2, 3]
>>> gen_series(1)
[0, 2, 3]
"""
data = tuple(range(length))
def gen(i):
temp = list(data)
temp.pop(i)
return temp
return gen
|
4d00f214e8ef15e392187206efaa6a5d44910182
| 177,692 |
def is_within_interval(value, min_value=None, max_value=None):
"""
Check whether a variable is within a given interval. Assumes the value is
always ok with respect to a `None` bound. If the `value` is `None`, it is
always within the bounds.
:param value: The value to check. Can be ``None``.
:param min_value: The lower bound.
:param max_value: The upper bound.
:return: ``True`` if the value is ``None``. ``True`` or ``False`` whether
the value is within the given interval or not.
.. note::
A value is always within a ``None`` bound.
Example::
>>> is_within_interval(None)
True
>>> is_within_interval(None, 0, 10)
True
>>> is_within_interval(2, None, None)
True
>>> is_within_interval(2, None, 3)
True
>>> is_within_interval(2, 1, None)
True
>>> is_within_interval(2, 1, 3)
True
>>> is_within_interval(2, 4, 7)
False
>>> is_within_interval(2, 4, 1)
False
"""
checks = []
if value and min_value:
checks.append(value >= min_value)
if value and max_value:
checks.append(value <= max_value)
return all(checks)
|
8b82858d33b56fcd479f97a4077c03bd1392bb75
| 94,541 |
import ast
def convert(val):
"""Convert input to interpreted data type.
:param val: value to be interpreted
:returns: interpreted value
"""
try:
return ast.literal_eval(val)
except BaseException:
pass
return val
|
02995e3aac6a16beea1e0f36e1c4bec4343ea0f9
| 479,134 |
def get_one_of(object, attribs):
"""Try to get the first one of attribs."""
for a in attribs:
v = getattr(object, a, None)
if v:
return v
|
1c440e332b269c5542eac26c299bc2afc24cb2f4
| 522,963 |
def parse_header(line):
"""Parse output of tcpdump of pcap file, extract:
time
date
ethernet_type
protocol
source ip
source port (if it exists)
destination ip
destination port (if it exists)
length of the data
"""
ret_dict = {}
h = line.split()
date = h[0]
time = h[1]
ret_dict['raw_header'] = line
ret_dict['date'] = date
ret_dict['time'] = time
src_a = h[3].split(".", 3)
if "." in src_a[-1]:
port_a = src_a[-1].split('.')
ret_dict['src_port'] = port_a[-1]
ret_dict['src_ip'] = ".".join(h[3].split('.')[:-1])
else:
ret_dict['src_ip'] = h[3]
dest_a = h[5].split(".", 3)
if "." in dest_a[-1]:
port_a = dest_a[-1].split('.')
ret_dict['dest_port'] = port_a[-1].split(":")[0]
ret_dict['dest_ip'] = ".".join(h[5].split('.')[:-1])
else:
ret_dict['dest_ip'] = h[5].split(":")[0]
ret_dict['protocol'] = h[6]
ret_dict['ethernet_type'] = h[2]
try:
ret_dict['length'] = int(h[-1])
except Exception as e:
print("failed to get length because: {0}, setting it to 0".format(str(e)))
ret_dict['length'] = 0
if h[2] == 'IP':
#do something meaningful
pass
else:
pass
#do something else
ret_dict['tool'] = "tcpdump_hex_parser"
return ret_dict
|
461d970e3ab686f9b14bfb55fbe56552e0f9e90b
| 206,366 |
def camel_to_snake(s: str) -> str:
"""Converts "CamelCase" to "snake_case"."""
return "".join(f"_{c}" if c.isupper() else c for c in s).strip("_").lower()
|
d6697877e5a00ce2bb1149dd6cf0da2b0230c9cd
| 664,982 |
def shift_cftime_index(xobj, time_string, n, freq):
"""Shifts a ``CFTimeIndex`` over a specified number of time steps at a given
temporal frequency.
This leverages the handy ``.shift()`` method from ``xarray.CFTimeIndex``. It's a
simple call, but is used throughout ``climpred`` so it is documented here clearly
for convenience.
Args:
xobj (xarray object): Dataset or DataArray with the ``CFTimeIndex`` to shift.
time_string (str): Name of time dimension to be shifted.
n (int): Number of units to shift.
Returned from :py:func:`get_lead_cftime_shift_args`.
freq (str): Pandas frequency alias.
Returned from :py:func:`get_lead_cftime_shift_args`.
Returns:
``CFTimeIndex`` shifted by ``n`` steps at time frequency ``freq``.
"""
time_index = xobj[time_string].to_index()
return time_index.shift(n, freq)
|
18fd4e4b388f2006a59bbf5c20f3793028e2510c
| 565,177 |
import binascii
def hex_to_sha(hex):
"""Takes a hex sha and returns a binary sha"""
assert len(hex) == 40, "Incorrect length of hexsha: %s" % hex
try:
return binascii.unhexlify(hex)
except TypeError as exc:
if not isinstance(hex, bytes):
raise
raise ValueError(exc.args[0])
|
ad3870706f523353d8d99455f64a90abf23b491a
| 232,500 |
def get_mode_count(df):
"""
Computes the mode and the count of the mode from an array.
Args:
df - data fram with ONE column
Returns:
df_mode - mode of the column
df_mode_count - count for that mode
"""
# calculate the mode and its count from the input data fram (with one column)
df_value_counts = df.value_counts()
df_mode = df_value_counts.index[0]
df_mode_count = df_value_counts.iloc[0]
return df_mode, df_mode_count
|
8712b6a351c6afdb328e94a633652af86a1f4eba
| 18,159 |
def get_stop_times(feed, date=None):
"""
Return a subset of ``feed.stop_times``.
Parameters
----------
feed : Feed
date : string
YYYYMMDD date string restricting the output to trips active
on the date
Returns
-------
DataFrame
Subset of ``feed.stop_times``
Notes
-----
Assume the following feed attributes are not ``None``:
- ``feed.stop_times``
- Those used in :func:`.trips.get_trips`
"""
f = feed.stop_times.copy()
if date is None:
return f
g = feed.get_trips(date)
return f[f["trip_id"].isin(g["trip_id"])]
|
8e7df4ab933e5a1121ba943fb473fa51b9fc160c
| 218,962 |
def get_pycache(source, names):
"""
gets all `__pycache__` directories available in names.
:param str source: source directory of contents.
:param list[str] names: name of all contents in source.
:rtype: list[str]
"""
return [name for name in names if '__pycache__' in name]
|
d1e325648e0353c8ece59a8448167ff16a0fb9a9
| 563,087 |
def doubles(counts):
"""Returns count of double occurrences."""
return (counts==2).sum()
|
4ad63c65120c367ab7a9c7b10f9bb9272f3f99ff
| 375,687 |
import json
def load_json(path):
"""Loads a JSON file and returns the data.
Args:
path: (str) a file path.
Returns:
(dict) the parsed data.
"""
with open(path, 'r') as fp:
ret = json.load(fp)
return ret
|
c547a8f794b0d2026e1f504d37565c0614ffbf7b
| 324,696 |
def removekey(d, key):
"""This functions returns a copy of a dictionnary with a removed key
Parameters: d (dict): dictionnary
key: the key that must be deleted
Returns: copy of dictionnary d without the key
"""
r = dict(d)
del r[key]
return r
|
45eeb1e56a38e065aeb2dba58acc098152ea0425
| 663,932 |
def str2ascii(string: str) -> list:
"""Convert a string to a list of ascii-codes"""
return [ord(i) for i in string]
|
a938b0c585e78a455721e9d17e8915b0769a025f
| 698,310 |
import csv
def read_csv(input_file):
""" Read a csv as dict
Return participant dict with paired_site """
participant = {}
with open(input_file) as csv_file:
reader = csv.DictReader(csv_file)
try:
for row in reader:
if row["paired_site"] == "" or row["paired_site"] is None:
# no_site_pairing is a possible bucket name in awardee bucket
row["paired_site"] = "no_site_pairing"
participant[row["pmi_id"]] = row["paired_site"]
except KeyError as e:
print("Check csv file headers. Error: {}".format(e))
return participant
|
2cd50be71d5fe11e13b582142553c6b3e5544b02
| 262,505 |
def compute_avg_headway(headways):
"""
Compute the average headways from the sum of all trips with the
specified headways.
:param headways: (list of int) list of headways
"""
if not headways:
return 0
frequency = sum([1 / headway for headway in headways])
return 1 / frequency
|
2c0f034de54160aa899fb34bcc805dc3b5f90627
| 262,168 |
def get_basename(filename, extention):
"""
Get basename for a file contains format extension.
:param filename: (string) filename/path
:return: basename of a file without path or extension
"""
if extention in filename:
filename = filename.split("/")[-1]
base = filename.replace(extention, "")
return base
else:
raise ValueError("Wrong file type.")
|
90133465b19330fbd999166115f2dc35b28c7e70
| 399,037 |
import six
def quote(s, *args):
"""Return quoted string even if it is unicode one.
:param s: string that should be quoted
:param args: any symbol we want to stay unquoted
"""
s_en = s.encode('utf8')
return six.moves.urllib.parse.quote(s_en, *args)
|
14b72b9231a2d53f242136cb002dc2d189eca1aa
| 61,297 |
def extract_version(txt):
"""This function tries to extract the version from the help text of any
program."""
words = txt.replace(",", " ").split()
version = None
for x in reversed(words):
if len(x) > 2:
if x[0].lower() == "v":
x = x[1:]
if "." in x and x[0].isdigit():
version = x
break
return version
|
a9a7264942cf607d8faa5311c5bcfdac553d7929
| 436,743 |
def test_module(client):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client (Client): instance of the Client class
Returns:
'ok' if test passed, anything else will fail the test.
"""
try:
client.get_supported_languages()
return 'ok'
except Exception as e:
return 'Test failed: {}'.format(str(e))
|
c52ef76a3db946974b2798f2b20ee3eb6388b72f
| 97,240 |
def flatten_dict(nested_dict, sep=None):
"""
flatten_dict flattens a dictionary,
The flattened keys are joined using a separater which is default to '__'.
:param nested_dict: input nested dictionary to be flattened.
:type nested_dict: dict
:param sep: seperator for the joined keys, defaults to __
:type sep: str, optional
:return: flattened dictionar
:rtype: dict
"""
if sep is None:
sep = "__"
res = {}
def flatten(x, name=""):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + sep)
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + sep)
i += 1
else:
res[name[:-2]] = x
flatten(nested_dict)
return res
|
b7fe90a54df05ad8adc063be50d9dbe91a240a99
| 428,751 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.