content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def check_interfaces(interfaces):
"""
Iterate over interfaces
- If Admin state is "up" and Operational Link is "down"
- Description is not None
- Descriptions that do not start with '_' (underscore)
- Return an array of what is left
"""
down_interfaces = []
for interface in interfaces:
if interface.admin == 'up' and interface.oper == 'down':
if interface.description is None or \
interface.description.startswith('_'):
continue
else:
down_interfaces.append([interface.key, interface.admin,
interface.oper, interface.description])
return down_interfaces
|
21c650f8b24dcfcbaf71699d81a7b00ffb8f4edf
| 428,635 |
def is_defined(s, table):
"""
Test if a symbol or label is defined.
:param s: The symbol to look up.
:param table: A dictionary containing the labels and symbols.
:return: True if defined, False otherwise.
"""
try:
table[s] # Exploiting possible KeyError
return True
except KeyError:
return False
|
02b96a86d96da97c0b008dcaa7924c9e77a003f2
| 66,200 |
def rotate_char(c, n):
"""Rotate a single character n places in the alphabet
n is an integer
"""
# alpha_number and new_alpha_number will represent the
# place in the alphabet (as distinct from the ASCII code)
# So alpha_number('a')==0
# alpha_base is the ASCII code for the first letter of the
# alphabet (different for upper and lower case)
if c.islower():
alpha_base = ord('a')
elif c.isupper():
alpha_base = ord('A')
else:
# Don't rotate character if it's not a letter
return c
# Position in alphabet, starting with a=0
alpha_number = ord(c) - alpha_base
# New position in alphabet after shifting
# The % 26 at the end is for modulo 26, so if we shift it
# past z (or a to the left) it'll wrap around
new_alpha_number = (alpha_number + n) % 26
# Add the new position in the alphabet to the base ASCII code for
# 'a' or 'A' to get the new ASCII code, and use chr() to convert
# that code back to a letter
return chr(alpha_base + new_alpha_number)
|
b1259722c7fb2a60bd943e86d87163866432539f
| 3,896 |
from typing import Mapping
from typing import Sequence
def iterate(mapping):
"""
Attempt to iterate over `mapping` such that key-values pairs are yielded per iteration. For
dictionaries and other mappings, this would be the keys and values. For lists and other
sequences, this would be the indexes and values. For other non- standard object types, some
duck-typing will be used:
- If `mapping` has callable ``mapping.items()`` attribute, it will be used.
- If `mapping` has callable ``mapping.keys()`` and ``__getitem__`` attributes, then
``(key, mapping[key])`` will be used.
- Otherwise, `iter(mapping)` will be returned.
"""
if isinstance(mapping, Mapping) or callable(getattr(mapping, "items", None)):
return mapping.items()
if isinstance(mapping, Sequence):
return enumerate(mapping)
if callable(getattr(mapping, "keys", None)) and hasattr(mapping, "__getitem__"):
return ((key, mapping[key]) for key in mapping.keys())
return iter(mapping)
|
8b651ea8b239883f0323ab8542bd912319c78012
| 83,278 |
def rosen(x):
"""The Rosenbrock function."""
return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)
|
ac5cda8b88fe3d726636b2c236128a1b2212d2c3
| 429,264 |
def noop(data):
"""
No-op "encoder" returns the object exactly as it is.
"""
return data
|
2929d8ce17197946e1af8f705ea17fdd4dfc6e41
| 44,414 |
import functools
def product(seq):
"""
Returns the product of elements in the iterable ``seq``.
"""
return functools.reduce(lambda x1, x2: x1 * x2, seq, 1)
|
bd08322f54faf93f59195aa7cf6342702e22b199
| 556,453 |
import six
def is_file_exists_error(e):
"""
Returns whether the exception *e* was raised due to an already existing file or directory.
"""
if six.PY3:
return isinstance(e, FileExistsError) # noqa: F821
else:
return isinstance(e, OSError) and e.errno == 17
|
88e2a073e4482be6064041a12bc03c9fb37f14ef
| 432,783 |
def run_coroutine(coro):
"""Runs a coroutine as top-level task by iterating through all yielded steps."""
result = None
try:
# step through all parts of coro without scheduling anything else:
while True:
result = coro.send(result)
except StopIteration as ex:
# coro reached end pass on its return value:
return ex.value
except:
raise
|
91f21936a8ef601266f962c73610049c84cb823d
| 355,408 |
import logging
def setup_gunicorn_logging(app):
"""This is meant to be called if the Flask app is run by Gunicorn.
The logger will set the handlers of the Flask app logger
to the Gunicorn logger. The Flask app will have the same
log level as the Gunicorn logger.
:param app: the actual Flask app to be logged
:return: the logger object
"""
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
return app.logger
|
c2ac3afcb7a25f5373f664dd6a5889653b2190be
| 181,597 |
def deep_update(a, b):
"""Updates data structures
Dicts are merged, recursively
List b is appended to a (except duplicates)
For anything else, the value of a is returned"""
if type(a) is dict and type(b) is dict:
for key in b:
if key in a:
a[key] = deep_update(a[key], b[key])
else:
a[key] = b[key]
return a
if type(a) is list and type(b) is list:
return a + [i for i in b if i not in a]
return a if a is not None else b
|
961c2e4147a4f5e2612bcf2163a7540537587d4f
| 354,254 |
def determine_execution(execution_choices, roles):
"""
>>> determine_execution([1, 0, 0], [1, 0, 0])
True
>>> determine_execution([0,0,0], [1, 0, 0])
False
>>> determine_execution([1,1,1], [1, 0, 0])
None
"""
agent_win = None
bad = roles.index(1)
# If bad chose themselves they win
if execution_choices[bad] == bad:
agent_win = False
# If x num chose bad, good win
elif sum(vote == bad for vote in execution_choices) >= (len(execution_choices) - 1) // 2:
agent_win = True
return agent_win
|
b47b558b0a272b2de004c406e0f14f7da73d838c
| 168,785 |
def unpad_b64(encoded):
"""Remove = or == from the end of the b64 string."""
if encoded.find('=') != -1:
return encoded[:encoded.find('=')]
else:
return encoded
|
b3b0914b3c14265889ce6a8783612f1633b2c36a
| 462,676 |
def list_remove_repeat(x):
"""Remove the repeated items in a list, and return the processed list.
You may need it to create merged layer like Concat, Elementwise and etc.
Parameters
----------
x : list
Input
Returns
-------
list
A list that after removing it's repeated items
Examples
-------
>>> l = [2, 3, 4, 2, 3]
>>> l = list_remove_repeat(l)
[2, 3, 4]
"""
y = []
for i in x:
if i not in y:
y.append(i)
return y
|
be7c6087df54d0378555591582f80f3570f065f7
| 353,880 |
import pathlib
def force_path(path):
"""
Return a pathlib.Path instance.
:param path: An object representing a file path ('/tmp/foo', Path(/tmp/foo), etc)
"""
return path if isinstance(path, pathlib.Path) else pathlib.Path(path)
|
fdec7883675cb764c13e96a0acf03080151ad3b8
| 366,508 |
def comment(s: str):
"""
Make a LISP inline comment from str
"""
return f"#|{s}|#"
|
31ff231df1296b38e4a7a778cc22a8402adfa1ed
| 363,722 |
def complex_permittivity_to_tan_delta(real_permittivity: float,
imag_permittivity: float) -> float:
"""Computes loss tangent from complex relative permittivity
This is a simple and straightforward calculation of a material's loss
tangent from the real and imaginary parts of its complex relative
permittivity.
Args:
real_permittivity: A `float` value for te real part of the
complex relative permittivity.
imag_permittivity: A `float` value for the imaginary part of the
complex relative permittivity.
Returns:
The value for the loss tangent.
Raises:
ZeroDivisionError: If you specify 0 Hz, i.e. DC, for the real part
of the permittivity.
"""
try:
tan_delta = imag_permittivity / real_permittivity
except ZeroDivisionError as error:
raise ZeroDivisionError('Real part must be > 0'). \
with_traceback(error.__traceback__)
return tan_delta
|
1d0465c5e344c50bb440ab042408bedc104302ba
| 574,144 |
import turtle
import math
def drawHouse(unit):
"""
Draws the house on the screen
:pre: (relative) pos (0,0), heading (east), down
:post: (relative) pos (0,0), heading (east), up
:param unit: constant required to build the walls and roof of the house
:return: wood required to build the house
"""
turtle.pendown()
turtle.left(90)
turtle.forward(2 * unit)
turtle.right(45)
turtle.forward(unit * math.sqrt(2))
turtle.right(90)
turtle.forward(unit * math.sqrt(2))
turtle.right(45)
turtle.forward(2 * unit)
turtle.left(90)
turtle.penup()
return 2 * unit + unit * math.sqrt(2) + unit * math.sqrt(2) + 2 * unit
|
accfe7b85fa18b996ba290b7e0652173a6547825
| 566,660 |
import yaml # noqa: WPS433
def generate_yaml(config: dict, user_data: dict) -> str:
"""Generate out a YAML format document."""
return yaml.dump(user_data)
|
c7fce536bff7ea0610c2b7ee610d270158179eea
| 57,906 |
def int_return(number):
"""If is integer, return the integer, otherwise return the input"""
if number:
return int(number)
return number
|
c3cb58bacff77c8b0629552ef4b65fd51ba82c08
| 326,663 |
from typing import List
def represent_strlist(strlist: List[str]) -> str:
"""Output a list-of-strings as aligned multiline string. Can be used for printing."""
return "\n".join(strlist)
|
3d78e32a367a0a60ef99a6b86c09ad27070188b0
| 345,963 |
def robust_scale(df):
"""Return copy of `df` scaled by (df - df.median()) / MAD(df) where MAD is a function returning the median absolute deviation."""
median_subtracted = df - df.median()
mad = median_subtracted.abs().median()
return median_subtracted/mad
|
ba9ce747612c99997d890930e7ac7c582ba1af70
| 7,043 |
def ConvertStringToListFloat(line, space = " ", endline = ""):
"""This function converts a string into a list of floats
"""
list_values = []
string_values = (line.replace(endline,"")).split(space)
for string in string_values:
if (string != ""):
list_values.append(float(string))
return list_values
|
64778a4225235049db9b4e355c2d9c7915e96166
| 118,338 |
import re
def valid_filename(text):
"""Turn a string into a valid filename.
:param string text: The string to convert.
:returns string: A valid filename derived from the string.
"""
text = text.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', text)
|
61544282576b1eea113950e7ff31093a68ecf6ad
| 218,724 |
def _to_label(repo, path, name):
"""
Returns the target string to pass to buck
Args:
repo: The name of the cell, or None
path: The path within the cell
name: The name of the rule
Returns:
A fully qualified target string
"""
return "{}//{}:{}".format(repo or "", path, name)
|
c674083b42064e850875332ceca48f2979d63d3a
| 362,933 |
import re
def normalize_phone_number(value):
"""Normalize phone number so it doesn't contain invalid characters.
This removes all characters besides a leading +, digits and x as
described here: http://stackoverflow.com/a/123681/298479
"""
return re.sub(r'((?!^)\+)|[^0-9x+]', '', value.strip())
|
489edac5e9e7bec61cb8ce9722446bd4be220548
| 331,758 |
def nonzero(matrix, row):
"""
usage:
>>> from scipy.sparse import csr_matrix
>>> m = csr_matrix([[1, 0, 2], [3, 0, 0]])
>>> print nonzero(m, 0)
set([0, 2])
>>> print nonzero(m, 1)
set([0])
"""
return set(matrix[row].indices)
|
197308cf9ed83124565444a5ac97dae14d805cb8
| 631,263 |
import pathlib
def generate_face_image_path(src_image_path, face_image_dir, face_id):
""" Generate a file path of an extracted face image
Arguments
---------
src_image_path: str
a directory path which source images have been saved at.
face_image_dir: str
a directory path where extracted face images will be saved.
face_id: int
an ID of an extracted image.
Returns
-------
pathlib.Path object.
a path which is used at saving an extracted image.
"""
src_image_id = src_image_path.stem
suffix = src_image_path.suffix
face_image_id = src_image_id + '{:02d}'.format(face_id)
face_image_filename = face_image_id + suffix
_face_image_dir = pathlib.Path(face_image_dir)
member_id = str(src_image_path.parent).split('/')[-1]
face_image_path = _face_image_dir.joinpath(member_id, face_image_filename)
return pathlib.Path(face_image_path)
|
1212bdd48ef9a571ea0bad74560a989aa10a3416
| 420,389 |
def recursive_egg_drop(eggs, floors):
"""
Explanation
When we drop an egg from the floor i, there can be two cases: (1) The egg breaks (2) The egg doesn’t break.
If the egg breaks after dropping from the i(th) floor, then, we only need to check for floors lower than i
with remaining eggs; so the problem reduces to i - 1 floors and n - 1 eggs.
If the egg doesn’t break after dropping from the i(th) floor, then,
we only need to check for floors higher than i; so the problem reduces to k - i floors and n eggs.
Since we need to minimize the number of trials in the worst case, we take the maximum of two cases.
We consider the max of the two cases above for every floor and choose the floor that yields
the minimum number of trials.
Parameters
----------
eggs : int
number of eggs
floors : int
number of floors
Returns
-------
int
minimum number of egg droppings to figure out the critical floor
>>> recursive_egg_drop(6, 15)
4
"""
# if eggs == 0 => 0
if eggs <= 0:
return 0
# if floor == 1 or 0 => return floors
if floors == 1 or floors == 0:
return floors
# if eggs = 1 => return floors
if eggs == 1:
return floors
min_floors = float("inf")
result = 0
for floor in range(1, floors + 1):
result = max(recursive_egg_drop(eggs - 1, floor - 1), recursive_egg_drop(eggs, floors - 1))
if result < min_floors:
min_floors = result
return min_floors + 1
|
58162f76a72ca8ff50303d6e09fe902ba3ff3899
| 347,309 |
def jinja_hasattr(obj, string):
"""Template filter checking if the provided object at the provided
string as attribute
"""
return hasattr(obj, string)
|
bd97caa7caf3e1e00ec7dc60a06ff0eb9f5d1f59
| 154,287 |
def decode_in_term(site):
"""
Decodes input termination setting.
"""
for term in ["40", "50", "60"]:
if site.has_feature("IN_TERM.UNTUNED_SPLIT_" + term):
return "UNTUNED_SPLIT_" + term
return None
|
7b71814eee34dfaa2ac01c1144d776859407402d
| 287,748 |
def get_azfs_url(storage_account, container, blob=''):
"""Returns the url in the form of
https://account.blob.core.windows.net/container/blob-name
"""
return 'https://' + storage_account + '.blob.core.windows.net/' + \
container + '/' + blob
|
dedc8fb665385165a4bf03329f17aa946bba8cd6
| 81,229 |
def _get_normalized_tde_config(tde_config):
"""Normalize the TDE configuration of a SQL database.
Arguments:
tde_config (dict): Raw TDE configuration of a SQL database
Returns:
dict: Normalized TDE configuration
"""
tde_info = {}
tde_enabled = False
tde_status = tde_config.get('status')
if tde_status == 'Enabled':
tde_enabled = True
tde_info['tde_enabled'] = tde_enabled
return tde_info
|
cdd425ed32b7a16ccf7443f41351a3303614811d
| 43,772 |
def f1_at_n(is_match, potential_matches, n):
"""
Takes a boolean list denoting if the n-th entry of the predictions is an actual match
and the number of potential matches, i.e. how many matches are at most possible and
an integer n and computed the f1 score if one were to only consider the n most
relevant matches
:param is_match:
:param potential_matches:
:param n:
:return:
"""
if potential_matches == 0:
return 0
correct_prediction = float(sum(is_match[:n]))
precision = correct_prediction / n
recall = correct_prediction / potential_matches
try:
if (recall + precision) != 0.0:
f1 = 2 * (recall * precision) / (recall + precision)
else:
f1 = 0
except ZeroDivisionError:
f1 = 0
return f1
|
7db45d613de61b243022b5d737dedfd3b6e09293
| 287,323 |
from typing import Union
import torch
from typing import Dict
from typing import Optional
def load_torch_model(
saved_model: Union[torch.nn.Module, Dict],
model_definition: Optional[torch.nn.Module] = None,
) -> torch.nn.Module:
"""Loads a PyTorch model from the provided``saved_model``.
If ``saved_model`` is a torch Module, then return it directly. If ``saved_model`` is
a torch state dict, then load it in the ``model_definition`` and return the loaded
model.
"""
if isinstance(saved_model, torch.nn.Module):
return saved_model
elif isinstance(saved_model, dict):
if not model_definition:
raise ValueError(
"Attempting to load torch model from a "
"state_dict, but no `model_definition` was "
"provided."
)
model_definition.load_state_dict(saved_model)
return model_definition
else:
raise ValueError(
f"Saved model is of type {type(saved_model)}. "
f"The model saved in the checkpoint is expected "
f"to be of type `torch.nn.Module`, or a model "
f"state dict of type dict."
)
|
53e7178d955cf7da0471e6a4ed663522598f113a
| 64,071 |
def genotype_from_likelyhood_index(p,n,index):
"""
figuring out the allele number corresponding to likelihood position
ploidy P and N alternate alleles
https://samtools.github.io/hts-specs/VCFv4.3.pdf
:param p: the ploidy as int
:param n: alt alleles as int
:param index:
:return: list with genotype numbers as strings (it's how pyvcf has it)
"""
def recursive_order(_p, _n, alleles, suffix = []):
for a in range(_n):
if _p == 1:
alleles.append([str(a)]+suffix)
elif _p > 1:
recursive_order(_p-1, a+1, alleles, [str(a)]+suffix)
alleles_list = []
recursive_order(p,n,alleles_list)
return alleles_list[index]
|
46cba784f4ee5462caabb0dbdcfd078469883365
| 282,613 |
def escape_single_quotes_sql(expr):
""" Escape the single quotes of the given string.
Args:
expr: the string to escape.
Returns:
expr with quotes escaped.
"""
return expr.replace("'", "''")
|
3032f11c3734805ea258025dd2166180c4bee8e0
| 335,928 |
def strip(input_string):
"""Strip trailing whitespace, single/double quotes."""
return input_string.strip().rstrip(";").strip('"').strip("'")
|
8278395cd8bcfc5fe700dfe593957410e79d174f
| 104,325 |
def identity(x):
"""Identity map."""
return x
|
175def70d5694d5b9c0ca513ceea6c31860a4f8f
| 42,115 |
def over(*, aalpha, oomega):
"""Define the dyadic over ⍥ operator.
Monadic case:
f⍥g ⍵
f g ⍵
Dyadic case:
⍺ f⍥g ⍵
(g ⍺) f (g ⍵)
"""
def derived(*, alpha=None, omega):
if alpha is None:
return aalpha(alpha=alpha, omega=oomega(omega=omega))
else:
return aalpha(alpha=oomega(omega=alpha), omega=oomega(omega=omega))
return derived
|
68079977befc9ec576aaf8706579d05864f21d24
| 693,554 |
def get_sorted_start_activities_list(start_activities):
"""
Gets sorted start attributes list
Parameters
----------
start_activities
Dictionary of start attributes associated with their count
Returns
----------
listact
Sorted start attributes list
"""
listact = []
for sa in start_activities:
listact.append([sa, start_activities[sa]])
listact = sorted(listact, key=lambda x: x[1], reverse=True)
return listact
|
ea86f6f1ad77f264810e27de910362c020cbfbfb
| 463,221 |
def compare_adf_stat_with_critical_values(result):
""" Function compares the t-stat with adfuller critcial values (1%) and returnsm true or false
depending on if the t-stat >= adfuller critical value
:result (dict_items) Output from adfuller test
:return (bool)
"""
tstat = abs(next(iter(result[4].items()))[1])
adf_stat = abs(round(result[0], 3))
if adf_stat >= tstat:
return True
else:
return False
|
3f40d725b6ced7fd57b8cbd24e8178c372078755
| 572,472 |
def _compute_zero_padding(kernel_size: int) -> int:
"""Computes zero padding."""
return (kernel_size - 1) // 2
|
59a5c23789cf44205ae305d8bae8dbfc37741699
| 520,718 |
import re
def charset_from_headers(headers):
"""Parse charset from headers.
:param httplib2.Response headers: Request headers
:return: Defined encoding, or default to ASCII
"""
match = re.search("charset=([^ ;]+)", headers.get('content-type', ""))
if match:
charset = match.groups()[0]
else:
charset = "ascii"
return charset
|
50fc897588b94b1b9d9af655244f97a5bb18face
| 656,126 |
import re
def replace_location(match: re.Match, location: str) -> str:
"""Replace location data in the warning string."""
text = match.string
start, end = match.span(2)
return text[:start] + location + text[end:]
|
1260b7d42af59222bb2cc0be17f8349200f6c938
| 643,013 |
def write_begin_end(message):
"""Decorator factory, returning a begin-end decorator with the desired message.
Can only be used on functions of a class that has self.write_end and self.write_begin.
Usage:
@write_begin_end('Summary')
def _write_summary(self):
...
"""
def decorator(func_to_be_wrapped):
"""Decorator, returning a wrapped function
"""
def wrapper(self, *args, **kwargs):
"""The wrapped function
"""
self.write_begin(message)
func_to_be_wrapped(self, *args, **kwargs)
self.write_end(message)
return wrapper
return decorator
|
44cdaa597a92740c3f9638df5908ed92027d052b
| 301,806 |
def _add_new_line_if_none(s: str):
"""Since graphviz 0.18, need to have a newline in body lines.
This util is there to address that, adding newlines to body lines
when missing."""
if s and s[-1] != "\n":
return s + "\n"
return s
|
aed3582be53cf403601a125cebc436041257a0d9
| 687,716 |
def find_number_universe(keylog):
"""For entries in the keylog, find the set of used numerals."""
numbers = set()
for attempt in keylog:
for num in attempt:
numbers.add(num)
return numbers
|
299ea2663cf02f990c1c0453d832efd10e554e92
| 264,837 |
def find_header(path: str) -> int:
"""find_header
this function counts the lines of the header (i.e. how many lines there are
until the first line with doesn't start with a '#' is encountered)
Parameters
----------
path : str
file path
Returns
-------
int
number of lines belonging to the header
"""
comment_lines = -1
with open(path, "r") as file:
for line in file.readlines():
if (line.strip().startswith("#")):
comment_lines += 1
continue
else:
break
if (comment_lines == -1):
return 0
return comment_lines
|
6bfd83f82762f4292f605917dc034e10d2850bd4
| 643,550 |
import json
def read_json(json_file_path: str) -> dict:
"""Takes a JSON file and returns a dictionary"""
with open(json_file_path, "r") as fp:
data = json.load(fp)
return data
|
07cb6c606de83b2b51ddcbf64f7eb45d6907f973
| 703,572 |
def hashable_tokens(string_to_split):
"""Split string into unique ngram tokens, sort and return as tuple,
which is hashable.
Args:
string_to_split (str): string to split
Returns:
hashable_tokens (tuple): Hashable, standardised tuple of tokens
"""
tokens = set(string_to_split.split(" ")) # unique tokens
while '' in tokens:
tokens.remove('') # ignore empty tokens
tokens = sorted(tokens) # standardise order
return tuple(tokens)
|
71f0b39909aefdef80d816f210211c47bb5c2d7b
| 274,169 |
def resize_with_aspect_ratio(img, max_size=2800):
"""Helper function to resize image against the longer edge
Args:
img (PIL.Image):
Image object to be resized
max_size (int, optional):
Max size of the longer edge in pixels.
Defaults to 2800.
Returns:
PIL.Image:
Resized image object
"""
w, h = img.size
aspect_ratio = min(max_size/w, max_size/h)
resized_img = img.resize(
(int(w * aspect_ratio), int(h * aspect_ratio))
)
return resized_img
|
d8342ca127a386a76c1efbab14ed6f2de49a55cb
| 88,628 |
def prune_satisfied_clauses(clauses, variables):
"""Remove any clause that is already satisfied (i.e. is True) from the
given clause set.
Parameters
----------
clauses: seq
Sequence of clauses
variables: dict
variable name -> bool mapping
Returns
-------
clauses: seq or None
Sequence of clauses that are not yet satisfied. If None, it means at
least one clause could not be satisfied
"""
new_clauses = []
for clause in clauses:
evaluated_or_none = variables.satisfies_or_none(clause)
if evaluated_or_none is None:
new_clauses.append(clause)
elif evaluated_or_none is False:
return None
return new_clauses
|
c11688df7a1d664c504a2602e5321c8aa6525f29
| 84,840 |
def remove_well_known_protos(filenames):
"""Remove "well-known" protos for objc and cpp.
On those platforms we get these for free as a part of the protobuf runtime.
We only need them for nanopb.
Args:
filenames: A list of filenames, each naming a .proto file.
Returns:
The filenames with members of google/protobuf removed.
"""
return [f for f in filenames if 'protos/google/protobuf/' not in f]
|
44e2872dd03f82bdc83bb83ffbeb3de01fcfdc39
| 692,188 |
def is_whitespace(character):
"""
Function to determine whether a given character is a whitespace character.
:param character: The character to be checked
:return: Whether the character is a whitespace character or not as a boolean value
"""
if character == ' ' or character == '\n' or character == '\t' or character == '\r':
return True
else:
return False
|
4920ff9cfa7394aaa543d58df5af523ab06fd317
| 392,924 |
def hex_fig(n, uppercase=True):
"""
Return the hexadecimal figure of `n`.
:param n: 0 <= int < 16
:param uppercase: bool
:return: str (one character from 0123456789ABCDEF or 0123456789abcdef)
"""
assert isinstance(n, int), type(n)
assert 0 <= n < 16
assert isinstance(uppercase, bool), type(uppercase)
return (str(n) if n < 10
else chr((ord('A' if uppercase
else 'a')
+ n - 10)))
|
47b7b2af2e3b047b1bd0dd707ec007e7d83592a1
| 586,103 |
def get_ext_coeffs(band):
"""
Returns the extinction coefficient for a given band.
Args:
band: band name: "G", "R", "Z", "W1", or "W2" (string)
Returns:
ext: extinction coefficient (float)
Note:
https://www.legacysurvey.org/dr9/catalogs/#galactic-extinction-coefficients
"""
exts = {"G": 3.214, "R": 2.165, "Z": 1.211, "W1": 0.184, "W2": 0.113}
return exts[band]
|
a8bf8f8a340de6ce04dc6a5d8d51a1f0937c6610
| 125,627 |
def bbox_from_gdf(gdf, buffer_value = 0.001):
"""
Obtain bbox from the whole GeoDataFrame
bbox --> (South, West, North, East) or (# minlat, minlon, maxlat, maxlon)
Parameters
----------
gdf : GeoDataFrame
buffer_value : float
buffer variable, see more in GeoDataFrame.Geometry.buffer()
Returns
-------
Tuple
"""
# the whole geometry
gdf_boundary =gdf.cascaded_union.convex_hull.buffer(buffer_value)
# get the bounds
(West, South, East, North) = gdf_boundary.bounds
# create the bbox
bbox = (South, West, North, East)
return bbox
|
843d2782679907e93f814e71d5c4d7d945452325
| 334,239 |
def _apply_penalties(extra_out, args):
"""Based on `args`, optionally adds regularization penalty terms for
activation regularization, temporal activation regularization and/or hidden
state norm stabilization.
Args:
extra_out[*]:
dropped: Post-dropout activations.
hiddens: All hidden states for a batch of sequences.
raw: Pre-dropout activations.
Returns:
The penalty term associated with all of the enabled regularizations.
See:
Regularizing and Optimizing LSTM Language Models (Merity et al., 2017)
Regularizing RNNs by Stabilizing Activations (Krueger & Memsevic, 2016)
"""
penalty = 0
# Activation regularization.
if args.activation_regularization:
penalty += (args.activation_regularization_amount *
extra_out['dropped'].pow(2).mean())
# Temporal activation regularization (slowness)
if args.temporal_activation_regularization:
raw = extra_out['raw']
penalty += (args.temporal_activation_regularization_amount *
(raw[1:] - raw[:-1]).pow(2).mean())
# Norm stabilizer regularization
if args.norm_stabilizer_regularization:
penalty += (args.norm_stabilizer_regularization_amount *
(extra_out['hiddens'].norm(dim=-1) -
args.norm_stabilizer_fixed_point).pow(2).mean())
return penalty
|
4260fb359592c4ea4efda73563e61975576ab9fe
| 411,292 |
def subdivide(x_1, x_2, n):
"""Performs the n-th cantor subdivision of the interval (x_1, x_2), a subset [0, 1]"""
if n == 0:
return []
new_x_1 = 2 * (x_1 / 3) + x_2 / 3
new_x_2 = x_1 / 3 + 2 * (x_2 / 3)
return (
subdivide(x_1, new_x_1, n - 1)
+ [new_x_1, new_x_2]
+ subdivide(new_x_2, x_2, n - 1)
)
|
ee2cc0ba214d363555224e4b70c10976c63d7dec
| 27,369 |
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa
|
1330434a0a630634c5284920fa35c7518bc34937
| 679,923 |
import re
def psfAscAdjust(str):
"""There are minor differences how the psf util handles floats, -0.0000 is
shown as 0.0000 in Python. This function corrects for that.
>>> psfAscAdjust("-0.00000")
'0.00000'
>>> psfAscAdjust("bla bla bla -0.00 0.00 -0.001 1.00000e-05")
'bla bla bla 0.00 0.00 -0.001 1.00000e-05'
"""
str = re.sub("-(0\.0*$)", '\\1', str)
return re.sub("-(0\.0*\D)", '\\1', str)
|
69c5547df64346a95c815ada758838c06f7aee4d
| 234,603 |
from typing import Counter
def extract_binary_tally(class_name, tally):
"""Extract single-class TP, FP, FN, TN from multi-class confusion tally.
Reduces the mutli-class expectation/prediction to binary - did they
include the class of interest, or not?
Returns a 4-tuple of values, True Positives (TP), False Positives (FP),
False Negatives (FN), True Negatives (TN), which sum to the tally total.
"""
bt = Counter()
for (expt, pred), count in tally.items():
bt[class_name in expt.split(";"), class_name in pred.split(";")] += count
return bt[True, True], bt[False, True], bt[True, False], bt[False, False]
|
a06f21c6338e098e8b31450d4196eb964d721a3e
| 84,004 |
def wasserstein_loss(real_logit, fake_logit):
"""
:param real_logit: logit(s) for real images (if None just return generator loss)
:param fake_logit: logit(s) for fake images
:return: loss for discriminator and generator (unless real_logit is None)
"""
loss_generator = - fake_logit
if real_logit is None:
return loss_generator
loss_discriminator_real = - real_logit
loss_discriminator_fake = fake_logit
# this actually negates the need for a bias in the FC layer, it's cancelled out
loss_discriminator = loss_discriminator_real + loss_discriminator_fake
return loss_discriminator, loss_generator
|
5e10c3da3a74033011474a24fc10f9042bf5d45a
| 363,111 |
def get_institution_url(base_url):
"""
Clean up a given base URL.
:param base_url: The base URL of the API.
:type base_url: str
:rtype: str
"""
base_url = base_url.strip()
return base_url.rstrip("/")
|
0a691c041b73d5f8ac74c1ad264c5e0d8ff2af92
| 90,891 |
def iff(p: bool, q: bool) -> bool:
"""
If and only if operation.
p <--> q <==> (p --> q) and (q --> p)
The biconditional or iff is true
when p and q are both True or False.
This is, if the hypothesis is True,
then the conclusion is True, meanwhile if
the hypothesis is False the conclusion is False
"""
return p is q
|
89052ab55fa61bc8f1407cf04bcd230d8dd5effc
| 392,270 |
from typing import List
from typing import Tuple
def chunk_list(list: List[str], chunkSize: int) -> List[Tuple[int, List[str]]]:
"""Converts a list into an iterable of lists where each list of of one chunk size"""
result: List[Tuple[int, List[str]]] = []
for index in range(0, len(list), chunkSize):
result.append((index, list[index:(index + chunkSize)]))
return result
|
d76ac190bb4ee8e43f8b03d095043694f051c756
| 472,237 |
def filter_selected(queryset, name, value):
"""
Filter a queryset with a lecture code.
"""
query = [x.strip() for x in value.split(',')]
base = queryset
result = None
for code in query:
# Filter out lectures that has different code with our query.
if result is None:
result = base.filter(code=code)
else:
result = result.union(base.filter(code=code))
return result
|
b4a3405cf7a5e9a6fce3cb740e9ff98ff7433d5a
| 237,593 |
def expect_lit(char, buf, pos):
"""Expect a literal character at the current buffer position."""
if pos >= len(buf) or buf[pos] != char:
return None, len(buf)
return char, pos+1
|
a90ab29f20bdc4733cfd331d2d58f7952c02ae62
| 657,441 |
def reverse(sequence):
"""
Reverse a string
Args:
sequence (string): Sequence to reverse
Returns:
string: The reversed sequence
"""
return sequence[::-1]
|
c31f46cdbdb324e88b7901906337aede21762633
| 200,103 |
def encode(value, encoding='utf-8'):
"""Encode given Unicode value to bytes with given encoding
:param str value: the value to encode
:param str encoding: selected encoding
:return: bytes; value encoded to bytes if input is a string, original value otherwise
>>> from pyams_utils.unicode import encode
>>> encode('Chaîne accentuée')
b'Cha\\xc3\\xaene accentu\\xc3\\xa9e'
>>> encode('Chaîne accentuée', 'latin1')
b'Cha\\xeene accentu\\xe9e'
"""
return value.encode(encoding) if isinstance(value, str) else value
|
bd417a0ef50ef0f9a48594fd2b9c049e98932bc8
| 662,188 |
import requests
def get_common_player_info(player_id):
"""Get common player info."""
url = 'http://stats.nba.com/stats/{}/'
query = '?PlayerID={}'
endpoint = 'commonplayerinfo'
full_url = url.format(endpoint) + query.format(player_id)
r = requests.get(full_url, headers={'User-agent': 'Ryan DiCommo'})
player_info = r.json()
return player_info
|
ebe5b383a3861ddfc13e8e71499eed15cef3914a
| 487,059 |
def get_categories( trans ):
"""Get all categories from the database."""
return trans.sa_session.query( trans.model.Category ) \
.filter( trans.model.Category.table.c.deleted==False ) \
.order_by( trans.model.Category.table.c.name ) \
.all()
|
bba145dc8f9b2ba0b8631dcb5a1abff2b0327262
| 353,222 |
import torch
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1):
"""
3x3 convolution with zero padding
Args:
in_planes (int): The number of input channels.
out_planes (int): The number of output channels.
stride (int): The convolution stride. Controls the stride for the cross-correlation
groups (int): The number of groups in the convolution. Controls the connections between input and outputs.
dilation (int): Controls the spacing between the kernel points
Returns:
:obj:`torch.nn.Conv3d`: A convolution layer.
"""
return torch.nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
fc19fb56aa47e5ae64f371fc4b8b479a96b79051
| 232,260 |
def click(parent, selector):
"""Click on selector"""
element = parent.find_element_by_css_selector(selector)
element.click()
return element
|
41cfd3e306cf7bf47307a85ae96b30602958ff83
| 203,314 |
def dot(x: dict, y: dict):
"""Returns the dot product of two vectors represented as dicts.
Parameters
----------
x
y
Examples
--------
>>> from river import utils
>>> x = {'x0': 1, 'x1': 2}
>>> y = {'x1': 21, 'x2': 3}
>>> utils.math.dot(x, y)
42
"""
if len(x) < len(y):
return sum(xi * y[i] for i, xi in x.items() if i in y)
return sum(x[i] * yi for i, yi in y.items() if i in x)
|
5480250407b5c563e1aa058e774c2427458a47c5
| 570,315 |
import math
def poisson(n, expectedValue):
"""
Calculates the probability-value of an integer in a poisson distribution.
Args:
n (int): quantity of which you want to know the probability
expectedValue (float): the average value that defines the poisson distribution
Returns:
float: probability of "n" in distribution "expectedValue"
"""
# Only consider whole numbers
if n<0 or n%1!=0:
return 0
else:
return (expectedValue**n)/math.factorial(n)*math.exp(-expectedValue)
|
e1a81b47aad4128021981b3c5d1d83b2b4af2865
| 106,565 |
def parse_extra_alignments(extra_field):
"""
Parse the additional alignments field of a SAM file
:param extra_field: The 'XA' field of a SAM file (e.g. "XA:Z:NC_000913.3,+2729620,22M,0;NC_000913.3,+3427225,22M,0;")
:return: A list of tuples representing the positions of the additional alignments
"""
extra_field = extra_field[5:]
alignments = extra_field.strip(";").split(";")
alignment_positions = []
for alignment in alignments:
parts = alignment.split(",")
seq = parts[0]
strand = parts[1][0]
pos = int(parts[1][1:])
alignment_positions += [(seq, pos)]
# print("extra alignment:", seq, strand, pos, file=sys.stderr)
return alignment_positions
|
5799794760e65ff20aa2f3767d49d17b873fb31e
| 641,348 |
import requests
def check_is_student(s: requests.Session) -> bool:
"""判断用户是否为学生
Args:
s (requests.session): session
Returns:
bool:
"""
url = s.get("https://www.zhixue.com/container/container/index/").url
return "student" in url
|
891451df60c0bf649a9b20b874882c0e1529c9c3
| 282,366 |
def add_stats_args(parser, with_prefix=False, with_required=True): # pylint: disable=missing-docstring
"""
Adds the args for collect_stats to a parser.
:param parser: Parser instance.
:param with_prefix: Add prefix (collect-stats).
:param with_required: Add required statements where necessary.
:return: The updated parser
"""
def _add_arg(name, short_name, help_msg, **kwargs):
if with_prefix:
name_arg = "collect-stats-" + name
short_name_arg = "cs" + short_name
else:
name_arg = name
short_name_arg = short_name
name_arg = "--" + name_arg
if len(short_name_arg) > 1:
short_name_arg = "--" + short_name_arg
else:
short_name_arg = "-" + short_name_arg
required = False
if "required" in kwargs:
required = (required or kwargs["required"]) and with_required
del kwargs["required"]
parser.add_argument(name_arg, short_name_arg, help=help_msg, required=required, **kwargs)
_add_arg("log-path", "l", "Path to the log output folder.", type=str, required=True)
_add_arg("validation-set-path", "v", "Path to the validation set SMILES file.", type=str, required=True)
_add_arg("sample-size", "n", "Number of SMILES to sample from the model. [DEFAULT: 10000]", type=int, default=10000)
_add_arg("with-weights", "w", "Store the weight matrices each epoch [DEFAULT: False].",
action="store_true", default=False)
_add_arg("smiles-type", "st",
"SMILES type to converto to TYPES=(smiles, deepsmiles.[branches|rings|both]) [DEFAULT: smiles]",
type=str, default="smiles")
return parser
|
02331903f6dfc4ba5f53f17f0fde21802fe90e80
| 532,357 |
def get_i18n_field_param(Model, i18n_field, param_name):
"""
Return i18n_param from Model.i18n_field_params dict if exists
or get param from i18n_field
"""
if hasattr(Model, "i18n_field_params") and param_name in Model.i18n_field_params:
return Model.i18n_field_params[param_name]
return getattr(i18n_field, param_name)
|
c0289446c7914ca2a1f990b7411bf8a07c7a89c0
| 300,319 |
from collections import deque
def problem_1_4(s: str) -> str:
"""
replace all spaces in a string with %20
>>> problem_1_4("foo bar ")
'foo%20bar'
>>> problem_1_4("a s")
Traceback (most recent call last):
...
ValueError: Size of provided string is incorrect
"""
response = deque([])
for char in s:
if len(response) == len(s):
return "".join(response)
if char == " ":
response.append("%")
response.append("2")
response.append("0")
else:
response.append(char)
raise ValueError("Size of provided string is incorrect")
|
89bae80b9a2270a1100f3f48b67813d736df0941
| 571,809 |
def indent_xml(xml_as_string):
"""
Indents a string of XML-like objects.
This works only for units with no text or tail members, and only for
strings whose leaves are written as <tag /> and not <tag></tag>.
:param xml_as_string: XML string to indent
:return: indented XML string
"""
tabs = 0
lines = str(xml_as_string).replace('><', '>\n<').splitlines()
s = ''
for line in lines:
if line.startswith('</'):
tabs -= 1
s += (" " * tabs) + line + '\n'
if not (line.endswith('/>') or line.startswith('</')):
tabs += 1
return s
|
0c330b5c277042bb86011bc7c7cbb5f71da63ea7
| 321,306 |
def parse_seqid(record):
"""Parses sequence and taxids from fasta
UniRef records have the format 'UniRefxx_Q6GZX3' where xx is one of 100, 90 or 50. The fasta description for
these entries contain the lowest common taxonomic id for all sequences belonging to each uniref id:
Example:
>UniRef50_Q6GZX3 Uncharacterized protein 002L n=51 Tax=Iridoviridae TaxID=10486 RepID=002L_FRG3G
This function trims the 'UniRefxx' part and extracts the taxonomy id from the fasta description.
If the format doesn't correspond to UniRef (e.g. when the records are from 'nr' instead) the original sequence id
is returned instead.
Parameters
----------
record: Bio.SeqRecord.SeqRecord
Sequence record as parsed using Bio.SeqIO.parse
Returns
-------
newid: str
Sequence identifier string
taxid: str
Taxonomy identifier string
db_type: str
Text string showing whether sequence is from a UniRef fasta file or not
"""
taxid = None
if "UniRef" in record.id:
db_type = "uniref"
newid = (record.id).split("_")[-1] # Remove the 'UniRefxx_' string
try:
taxid = [x.split("=")[1] for x in (record.description).split(" ") if "TaxID" in x][0]
except IndexError:
taxid = None
else:
db_type = "non_uniref"
newid = record.id
return newid, taxid, db_type
|
03dff5a933d7881664c88d22863399a86702840c
| 509,403 |
def has_possible_duplicates(results):
"""Returns True if it detects that there are possible duplicate records
in the results i.e. identical full name."""
full_names = set()
for result in results:
if result.full_name in full_names:
return True
full_names.add(result.full_name)
return False
|
6f96973fbb964ef4cfc8f2e5fe984640fd5d928d
| 627,434 |
def subset_data_for_overhang(dataframe, overhang, horizontal=True, filter=True):
"""Subset Tatapov dataframe for given overhang.
**Parameters**
**dataframe**
> Tatapov dataset, for example `tatapov.annealing_data["25C"]["01h"]`
**overhang**
> Overhang class instance (`Overhang`)
**horizontal**
> Orientation of returned dataframe (`bool`).
**filter**
> If True, keep only columns (if horizontal=True) or rows (if horizontal=False)
with nonzero values (`bool`).
"""
overhangs = [overhang.overhang, overhang.overhang_rc]
if horizontal:
subset_data = dataframe.loc[overhangs]
if filter:
subset_data = subset_data.loc[:, subset_data.sum(axis=0) != 0]
return subset_data
else: # vertical
subset_data = dataframe[overhangs]
if filter:
subset_data = subset_data.loc[subset_data.sum(axis=1) != 0, :]
return subset_data
|
760b809429c5bbc62a024759a1cb5aa57709559b
| 650,755 |
def chromosome(record):
"""
Get the chromosome this record is from. If it is part of a scaffold then it
will be returned.
"""
basic = record.id
if basic.startswith("chromosome:") or basic.startswith("scaffold:"):
parts = basic.split(":")
return parts[2]
return basic.split(".")[0]
|
220695950ef7de4e2819ae84c69b77d7f95ac8cf
| 623,913 |
from typing import List
def simple_acronym_detection(s: int, i: int, words: List[str], *args) -> int:
"""Detect acronyms based on runs of upper-case letters.
Arguments:
s (int): Index of first letter in run
i (int): Index of current word
words (list of str): Segmented input string
args: Placeholder to conform to signature of
advanced_acronym_detection
Returns:
int: Index of last letter in run
"""
# Combine each letter into a single string.
acr_str = "".join(words[s:i])
# Remove original letters in word list.
for _ in range(s, i):
del words[s]
# Replace them with new word grouping.
words.insert(s, "".join(acr_str))
return s
|
227ce6569715b3427ed104d633307caf724fbb8b
| 211,671 |
def set_break_params(tracer, filename, lineno, temporary=False, cond=None,
funcname=None, **kwargs):
"""
Formats the parameters for set_break.
"""
filename = filename or tracer.default_file
return {
'filename': filename,
'lineno': lineno,
'temporary': temporary,
'cond': cond,
'funcname': funcname
}
|
b97b3e386d27207b0841430273537f5a19b79a45
| 291,334 |
def findCharacter(stringList, patternCharacter):
"""
Find the specific character from the list and return their indices
"""
return([ind for ind, x in enumerate(list(stringList)) if x == patternCharacter])
|
32cc8fb5970c6cd3cefd161b9e13e340f1645d13
| 13,949 |
from typing import Sequence
def get_hiou(A: Sequence, B: Sequence) -> float:
""" Horizontal intersection over union
Expect A and B to be xyxy bboxes
"""
h_inter = max(0, min(A[2], B[2]) - max(A[0], B[0]))
viou = h_inter / float(A[2] - A[0] + B[2] - B[0] - h_inter)
return viou
|
603b2249c7ff04f0182b4c5d85c6186b1b100fc7
| 326,038 |
from typing import Any
def generate_filename(document_date: str, args: Any) -> str:
"""Generate a filename depending on CLI arguments."""
if args.output_file:
return str(args.output_file)
return f"tdc-{document_date}"
|
63777086c1e416b114442bca0dc18cf891fc4fae
| 92,262 |
def getList(x):
"""
Convert any input object to list.
"""
if isinstance(x, list):
return x
elif isinstance(x, str):
return [x]
try:
return list(x)
except TypeError:
return [x]
|
06f888a5e689e9e19e5b3530fb9e9f8d7c63d670
| 60,252 |
def _tag(element):
"""Return element.tag with xmlns stripped away."""
tag = element.tag
if tag[0] == "{":
uri, tag = tag[1:].split("}")
return tag
|
36500ea2ea308740ac0e219e775fc394ba87e059
| 339,037 |
import json
def isJson(tstr):
""" if tstr is valid JSON, return object created from the string, else, return None """
ret = None
try:
ret = json.loads(tstr)
except ValueError:
pass
return ret
|
0dd369cdbdd1e2a5c6e83b18e8777ad686e6000d
| 605,655 |
import csv
def read_csv(fpath):
"""Read path to CSV and return data.
@param fpath: path to input CSV file, as absolute path or relative to
the app dir. The header row is required and will be used to create
column names as keys.
@return data: list of CSV data, where each element is a row in the CSV
represented as a dictionary.
"""
with open(fpath) as csv_file:
reader = csv.DictReader(csv_file)
data = list(reader)
return data
|
2e3c10e64ed96c3db6a3ebfba1d2a980beb73ec3
| 363,128 |
import pathlib
def find_component(path: pathlib.PurePath):
"""
Extracts the likely component name of a CSV file based on the path to it
:param path: path to a CSV file
:return: likely component to use
"""
# pylint: disable=no-else-return
if path.parent.name.isnumeric():
# Probably a version directory
return path.parents[1].name
else:
return path.parent.name
|
2cdf37ed07a1c535f59c6318f402c66fe4248fc2
| 21,149 |
def parseDeviceInfo(device_info):
"""
Parses Vendor, Product, Revision and UID from a Setup API entry
:param device_info: string of device information to parse
:return: dictionary of parsed information or original string if error
"""
# Initialize variables
vid = ''
pid = ''
rev = ''
uid = ''
# Split string into segments on \\
segments = device_info[0].split('\\')
if 'usb' not in segments[0].lower():
return None # Eliminate non-USB devices from output. may hide othe rstorage devices
for item in segments[1].split('&'):
lower_item = item.lower()
if 'ven' in lower_item or 'vid' in lower_item:
vid = item.split('_',1)[-1]
elif 'dev' in lower_item or 'pid' in lower_item or 'prod' in lower_item:
pid = item.split('_',1)[-1]
elif 'rev' in lower_item or 'mi' in lower_item:
rev = item.split('_',1)[-1]
if len(segments) >= 3:
uid = segments[2].strip(']')
if vid != '' or pid != '':
return {'Vendor ID': vid.lower(), 'Product ID': pid.lower(),
'Revision': rev, 'UID': uid,
'First Installation Date': device_info[1]}
else:
# Unable to parse data, returning whole string
return device_info
|
cd8c0fea623c0c36ac733e239b84b7f1a9a90b09
| 187,471 |
from typing import Callable
from typing import List
import inspect
def positional_args_names(f: Callable) -> List[str]:
"""Returns the ordered names of the positional arguments of a function."""
return list(p.name for p in inspect.signature(f).parameters.values()
if p.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD))
|
6b45d7c96a931ad28cb2ed38ab60fd44d1be008d
| 181,929 |
def is_anagram(a, b):
"""
Checking if two words are anagrams
>>> is_anagram("hello", "helol")
True
>>> is_anagram("hello", "helllo")
False
"""
return sorted(a) == sorted(b)
|
6b293fe1c12c279c95b3cd455edc70f2892bfaec
| 389,515 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.