content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def reverse_str(s):
"""Reverses the words in a sting and returns the new string to the
caller. A word in this context is defined as any space seperated
sequence of non-whitespace characters.
"""
s_list = s.strip().split(' ')
s_list.reverse()
rs = ''
for word in s_list:
rs = rs + ' ' + word
return rs.strip()
|
723840fb757ec8cba22d9920e149f1ef8754f115
| 383,349 |
import random
def get_random_hashtags(input, number):
"""Gets a user-specified number of random hashtags from input
Parameters:
input (list): list of hashtags e.g. from fileIO.get_hashtags()
number (int): number of random hashtags the we wanna get out
Returns:
Array of random hashtags
"""
output = []
count = 0
while (count < number):
hashtag = random.choice(input)
output.append(hashtag)
count = count + 1
return output
|
218ecbdc681c0eaf9ebae05c485de77e4c70c9d5
| 65,419 |
def bbox_size(bbox):
"""Calcs bounding box width and height.
:type bbox: list
:param bbox: bounding box
:rtype: tuple
:return: width and height
"""
x0, y0, x1, y1 = bbox
return abs(x1 - x0), abs(y1 - y0)
|
1032ab37e5b05e38f67121e974354ea2fcdf9385
| 51,193 |
def find_init(cls):
"""Find proper init function for the given class."""
cls_attrs = dir(cls)
if "from_json" in cls_attrs:
return getattr(cls, "from_json")
return cls
|
ccd74a0d99a79fcd328c3e03c825020d7374501b
| 544,427 |
import time
def pretty_time(input_time):
"""Convert unix epoch time to human readable format"""
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(input_time))
|
d869bfa6428e7a840aad4c34b97179824375c403
| 459,930 |
def classify_helmet_belt_worn(x):
"""
This function returns a strinig representation of the int value of the field which specifies whether the
person was wearing a setabelt or a helmet. This specification is from the Road Crash Statistics Victoria , 2013 Edition
document.
:param x: int value representing the classify helmet belt worn field
:return: string representation of the integer value
"""
if x == 1:
return 'Seatbelt Worn'
elif x == 2:
return 'Seatbelt Not Worn'
elif x == 3:
return 'Child Restraint Worn'
elif x == 4:
return 'Child Restraint Not Worn'
elif x == 5:
return 'Seatbelt/restraint Not fitted'
elif x == 6:
return 'Crash Helmet Worn'
elif x == 7:
return 'Crash Helmet Not Worn'
elif x == 8:
return 'Not Appropriate'
else:
return 'Not Known'
|
cba05be8d03c933e767a75400032d07e296e0ec3
| 708,049 |
def tags(*tags):
"""
Given a list of tags as positional arguments TAGS, return
a list of dictionaries in the format that the CKAN API
wants!
"""
return [{'name': t.replace("'", "") } for t in tags]
|
4532b81ebf75aa877082ad9d8da9d0fee70afac3
| 384,982 |
def array_current_index(array):
"""
Return the current index (elementIndex()) of a MArrayDataHandle, or -1 if the
current index isn't valid, probably because the array is empty.
"""
try:
return array.elementIndex()
except RuntimeError as e:
# If the array is empty, elementIndex raises an error.
return -1
|
654173b806c921b7a0d4bce987fba71a8bc26bb5
| 600,326 |
def _GenerateAlignedHtml(hyp, ref, err_type):
"""Generate a html element to highlight the difference between hyp and ref.
Args:
hyp: Hypothesis string.
ref: Reference string.
err_type: one of 'none', 'sub', 'del', 'ins'.
Returns:
a html string with
- error of hyp shown in "(hyp)"
- error of ref shown in "<del>ref</def>"
- all errors highlighted with yellow background
"""
highlighted_html = ''
if err_type == 'none':
highlighted_html += '%s ' % hyp
elif err_type == 'sub':
highlighted_html += """<span style="background-color: yellow">
'<del>%s</del>(%s) </span>""" % (hyp, ref)
elif err_type == 'del':
highlighted_html += """<span style="background-color: yellow">
'<del>%s</del></span>""" % (
hyp)
elif err_type == 'ins':
highlighted_html += """<span style="background-color: yellow">
'(%s) </span>""" % (
ref)
else:
raise ValueError('unknown err_type ' + err_type)
return highlighted_html
|
bf397aeec5e382ecfb997cf4708a549e22263b39
| 300,954 |
def time_to_galex(atime):
"""
Given an astropy time object return the associated GALEX time (float)
"GALEX Time" = "UNIX Time" - 315964800
"""
return atime.unix - 315964800
|
41cb4cb0330d88bd2aceb8e0b3c675df5f5903cb
| 536,848 |
def gcd_step(a, b):
"""
Performs a single step of the gcd algorithm.
Example: gcd_step(1071, 462) == (2, 147) because 1071 == 2 * 462 + 147.
"""
if a < b:
return (0, b)
res = 0
while a >= b:
a -= b
res += 1
return (res, a)
|
02eedde8652a285c1b7af999d439c9fe77349d3f
| 41,970 |
import typing
def empty(x: typing.Any) -> bool:
"""Returns true if x is None, or if x doesn't have a length, or if x's length is 0
"""
if x is None:
return True
# noinspection PyBroadException
try:
return len(x) == 0
except: # noinspection PyBroadException
return False
|
ecc576d7dc167b8a01753fd6a018a75515a611f5
| 113,138 |
def calculate_deg_fold_change(data1_df, data2_df, fc_cutoff=1,
alternative='two-sided'):
"""
This function calculates differentially expressed genes (DEGs)
between two DataFrames or Series based on fold-change.
Parameters
----------
data1_df : DataFrame or Series
gene expression data 1 (row: genes, col: samples)
data2_df : DataFrame or Series
gene expression data 2 (row: genes, col: samples)
fc_cutoff : float, optional
log2 fold-change cutoff. Default is 1.
alternative : {'greater', 'less', 'two-sided'}, optional
indicates the way to compare the two data. Default is 'two-sided'.
Returns
-------
gene_arr : ndarray
differentially expressed genes.
"""
if data1_df.ndim == 2:
diff_sr = data1_df.mean(axis=1) - data2_df.mean(axis=1)
else:
diff_sr = data1_df - data2_df
if alternative == 'two-sided':
gene_arr = diff_sr[diff_sr.abs() > fc_cutoff].index.values
elif alternative == 'greater':
gene_arr = diff_sr[diff_sr > fc_cutoff].index.values
elif alternative == 'less':
gene_arr = diff_sr[diff_sr < -fc_cutoff].index.values
else:
raise ValueError("<alternative> must be 'greater', 'less', or 'two-sided'.")
return gene_arr
|
1fd636b824131bfcf61d878ec2b30c220398694d
| 557,959 |
import uuid
def my_random_string(string_length=10):
"""Returns a random string of length string_length."""
random = str(uuid.uuid4()) # Convert UUID format to a Python string.
random = random.upper() # Make all characters uppercase.
random = random.replace("-", "") # Remove the UUID '-'.
return random[0:string_length]
|
4526673ca5e5bcfa02a1a0c8c66812db1ddd7767
| 384,001 |
import itertools
def _chunk(iterable, size):
"""Split an iterable into chunks of a fixed size."""
# We're going to use some star magic to chunk the iterable. We create a
# copy of the iterator size times, then pull a value from each to form a
# chunk. The last chunk may have some trailing Nones if the length of the
# iterable isn't a multiple of size, so we filter them out.
args = (iter(iterable),) * size
return (
# pylint: disable=star-args
itertools.takewhile(lambda x: x is not None, group)
for group in itertools.zip_longest(*args)
)
|
eb418877683bd67460a9363010284498adc935f2
| 566,516 |
def valiant_app_title() -> str:
"""The expected app title."""
return "Valiant"
|
c16193dab99a5756e0ec42e8042e9e0c331f56ff
| 599,215 |
def _find_user(oneandone_conn, user):
"""
Validates that the user exists by ID or a name.
Returns the user if one was found.
"""
for _user in oneandone_conn.list_users(per_page=1000):
if user in (_user['id'], _user['name']):
return _user
|
b16030b31868b5beac42801dff7d4ac9d617d317
| 128,258 |
def _complex_matrix_multiplication(x, y, mult_func):
"""
Perform a matrix multiplication, helper function for complex_bmm and complex_mm.
Parameters
----------
x : torch.Tensor
y : torch.Tensor
mult_func : Callable
Multiplication function e.g. torch.bmm or torch.mm
Returns
-------
torch.Tensor
"""
if not x.is_complex() or not y.is_complex():
raise ValueError("Both x and y have to be complex-valued torch tensors.")
output = (
mult_func(x.real, y.real)
- mult_func(x.imag, y.imag)
+ 1j * mult_func(x.real, y.imag)
+ 1j * mult_func(x.imag, y.real)
)
return output
|
140c9b5dee130fb7e583e1c515e063c05850f9ee
| 501,500 |
def printBond(bond, shift, molecule, alchemicalTransformation):
"""Generate bond line
Parameters
----------
bond : Bond Object
Bond Object
shift : int
Shift produced by structural dummy atoms
molecule : molecule object
Molecule object
alchemicalTransformation : bool
True if alchemical transformation
Returns
-------
bondLine : str
Bond line data
"""
k0 = bond.K0*836.80
R0 = bond.R0*0.1
ftype = 1
line = ''
atomAbond = molecule.atoms[bond.atomA.serialOriginal -1].serial-shift
atomBbond = molecule.atoms[bond.atomB.serialOriginal -1].serial-shift
if alchemicalTransformation:
k0_B = bond.K0_B*836.80
R0_B = bond.R0_B*0.1
line = '%5d%5d%5d %11.4f %13.3f%12.4f%11.3f\n' % (atomAbond, atomBbond, ftype, R0, k0, R0_B, k0_B)
# line = '%5d%5d%5d %11.4f %13.3f%12.4f%11.3f\n' % (bond.atomA.serial-shift, bond.atomB.serial-shift, ftype, R0, k0, R0_B, k0_B)
else: line = '%5d%5d%5d %11.4f %13.3f\n' % (atomAbond, atomBbond, ftype, R0, k0)
return line
|
4035d31952c4886ff5881d2ac502830468cfac77
| 245,855 |
def row_to_dict(row):
"""
Translate sql alchemy row to dict
Args:
row: SQL alchemy class
Returns:
data_dict(dict): data as dictionary
"""
if not row:
return {}
if hasattr(row, "__table__"):
return dict((col, getattr(row, col))
for col in row.__table__.columns.keys())
else:
ret = {}
for table in row:
if table:
ret[table.__tablename__] = dict(
(col, getattr(table, col)) for col
in table.__table__.columns.keys())
return ret
|
d5f13b7f582d97328f46960a02ce8007e8bfcaf6
| 62,024 |
def score_stash_counts(table, per=1):
""" Deduct points for cards in the stash. """
return -per * sum(len(stash.cards) for stash in table.stashes if stash.cards)
|
741e1fee5fe5a509125b3885268853333cc8753e
| 184,564 |
def pad_blocksize(value, block=64):
"""Pads value to blocksize
Args:
value (bytes): Value to pad
block (int): Block size (Default: 64)
"""
if len(value) % block != 0:
value += b"\x00" * (block - (len(value) % block))
return value
|
8306b343cacb36be462d2c9959aa3e4ab84889d4
| 273,629 |
def in_title(substr):
"""Make sure the substring is in the title"""
return lambda d: substr in d.title.lower()
|
e83c045a75b33a41bb695be6226e3ec441b48424
| 498,255 |
def pdfembed_html(pdfembed_specs):
"""
Build the iframe code for the pdf file,
"""
html_base_code = """
<iframe
id="ID"
style="border:1px solid #666CCC"
title="PDF"
src="%s"
frameborder="1"
scrolling="auto"
height="%s"
width="%s"
align="%s">
</iframe>
"""
return ( html_base_code % (pdfembed_specs['src' ],
pdfembed_specs['height'],
pdfembed_specs['width' ],
pdfembed_specs['align' ]) )
|
95c0efbd8b9c7d0686919ad81bfa4397722331db
| 250,720 |
def maybe_first(a, b):
"""Returns first argument 'a' if it is not None else 'b' """
return b if a is None else a
|
145566786c896acf3c0cdcfdc026a8072d2ded68
| 433,824 |
def buildCDF(values):
"""
Given an array, accumulate it and normalize to the interval U[0,1]
"""
return (values.cumsum()-values.cumsum()[0])/values.cumsum()[-1]
|
0fa290a6e28fe99d3188ef209a55d92a6788fd6a
| 193,197 |
def check_perm(user, permission, website):
"""
Determine if the user has a global or website-specific permission
Args:
user (users.models.User): The user to check
permission (str): The permission to check
website (Website): The website to check
"""
return user.has_perm(permission, website) or user.has_perm(permission)
|
55d8c344f4c1028608aff2a57b485d4b16a78280
| 423,014 |
def validate_extension(extension):
"""
Checks that the API extension starts and does not end with a '/'. An error is
raised, at which point the application exits, if the extension does not meet
these validation rules.
:param extension: The extension for the API
"""
extension = extension.strip()
if not extension.startswith("/"):
raise ValueError("must start with '/'")
if extension.endswith("/"):
raise ValueError("must not end with '/'")
return extension
|
e5d672d289ba94039afca4ba270bd056a40ecd3e
| 149,324 |
import re
def is_itiming_hosted(race_info):
"""
:param race_info: the race metadata to be inspected (RaceInfo)
:return: if the given race is hosted on itiming.com
"""
it_re = re.compile(".*itiming\.com.*")
match = it_re.search(race_info.url)
return match is not None
|
624b357bacdcad98a5de64cc80d2e539ab6bfe2c
| 578,822 |
def mils(value):
"""Returns number in millions of dollars"""
try:
value = float(value) / 1000000
except (ValueError, TypeError, UnicodeEncodeError):
return ''
return '${0:,}M'.format(value)
|
2dcbcb3b4a731c2b76ff348a398e469b0f493174
| 668,671 |
import re
def extract_digits_from_text(text):
"""
This function extracts any digits in a text
:param text:
:return:
"""
return list(map(int, re.findall(r'\d+', text))) if text else []
|
d502c1a09dd8d82f57a310b3be44d0a7ac31f597
| 466,898 |
def get_domain_id(ks_client, domain_name):
"""Return domain ID.
:param ks_client: Authenticated keystoneclient
:type ks_client: keystoneclient.v3.Client object
:param domain_name: Name of the domain
:type domain_name: string
:returns: Domain ID
:rtype: string or None
"""
all_domains = ks_client.domains.list(name=domain_name)
if all_domains:
return all_domains[0].id
return None
|
df07d3a957cc520d180131b1475fadec8e5fa6cf
| 677,767 |
import base64
def b64(content, encode=True):
"""Encode/Decode string to/from base64
Checks if content is base64 or not. Then converts it to or from base64.
Args:
content: Content to encode or decode
encode: Encode or decode content. Default True (encode string to base64)
Returns:
The encoded/decoded message
"""
if encode:
content_bytes = content.encode('ascii')
base64_bytes = base64.b64encode(content_bytes)
base64_content = base64_bytes.decode('ascii')
return base64_content
else:
base64_bytes = content.encode('ascii')
content_bytes = base64.b64decode(base64_bytes)
message = content_bytes.decode('ascii')
return message
|
e2bec9e0db375c7f0bdf5a17d5ba04948126a872
| 240,925 |
async def present(
hub,
ctx,
name,
address_prefix,
next_hop_type,
route_table,
resource_group,
next_hop_ip_address=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 1.0.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurerm.network.route.present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
route = await hub.exec.azurerm.network.route.get(
ctx,
name,
route_table,
resource_group,
azurerm_log_level="info",
**connection_auth,
)
if "error" not in route:
action = "update"
if address_prefix != route.get("address_prefix"):
ret["changes"]["address_prefix"] = {
"old": route.get("address_prefix"),
"new": address_prefix,
}
if next_hop_type.lower() != route.get("next_hop_type", "").lower():
ret["changes"]["next_hop_type"] = {
"old": route.get("next_hop_type"),
"new": next_hop_type,
}
if next_hop_type.lower() == "virtualappliance" and next_hop_ip_address != route.get(
"next_hop_ip_address"
):
ret["changes"]["next_hop_ip_address"] = {
"old": route.get("next_hop_ip_address"),
"new": next_hop_ip_address,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Route {0} is already present.".format(name)
return ret
if ctx["test"]:
ret["result"] = None
ret["comment"] = "Route {0} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"address_prefix": address_prefix,
"next_hop_type": next_hop_type,
"next_hop_ip_address": next_hop_ip_address,
},
}
if ctx["test"]:
ret["comment"] = "Route {0} would be created.".format(name)
ret["result"] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = await hub.exec.azurerm.network.route.create_or_update(
ctx=ctx,
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs,
)
if "error" not in route:
ret["result"] = True
ret["comment"] = f"Route {name} has been {action}d."
return ret
ret["comment"] = "Failed to {0} route {1}! ({2})".format(
action, name, route.get("error")
)
if not ret["result"]:
ret["changes"] = {}
return ret
|
5e4aff2737882d7efa0b1522739af6697d6197a9
| 574,631 |
def pos_obs_from_sig(read_sig):
"""
Returns a list of position, observation pairs described in the read
signature string.
"""
def pos_obs(var):
""" Splits var into a int position and a string base. """
pos, obs = var.split(':')
return int(pos), obs
return [pos_obs(var) for var in read_sig.split(',')]
|
81153dc2ce59b9b7b5c17cef20b346f27ce0b6a2
| 416,199 |
def generate_color_series(color, variation, diff=10, reverse=False):
"""Generate light and dark color series.
Args:
color (tuple) : Color [0,255]
variation (int) : How many colors to create.
diff (int) : How much to change
reverse (bool) : If ``True``, sort in descending order.
Returns:
colors (list) : colors.
Examples:
>>> from pycharmers.utils import generateLightDarks
>>> generateLightDarks(color=(245,20,25), variation=3, diff=10)
[(235, 10, 15), (245, 20, 25), (255, 30, 35)]
>>> generateLightDarks(color=(245, 20, 25), variation=3, diff=-10)
[(225, 0, 5), (235, 10, 15), (245, 20, 25)]
"""
val = max(color[:3]) if diff > 0 else min(color[:3])
u = 0
for _ in range(variation - 1):
val += diff
if not 255 >= val >= 0:
break
u += 1
return sorted(
[
tuple(
[
max(min(e + diff * (u - v), 255), 0) if i < 3 else e
for i, e in enumerate(color)
]
)
for v in range(variation)
],
reverse=reverse,
)
|
fe87e87139590e5ac4f45f2a34d20c913361da97
| 437,484 |
import glob
def _glob(filenames):
"""Expand a filename or sequence of filenames with possible
shell metacharacters to a list of valid filenames.
Ex: _glob(('*.py*',)) == ['able.py','baker.py','charlie.py']
"""
if type(filenames) is str:
return glob.glob(filenames)
flist = []
for filename in filenames:
globbed = glob.glob(filename)
if globbed:
for file in globbed:
flist.append(file)
else:
flist.append(filename)
return flist
|
5d374467cc0731f4a8468515e5b79a00898d1766
| 225,078 |
import re
def safe_filename(s: str, max_length: int = 255) -> str:
"""Sanitize a string making it safe to use as a filename.
This function was based off the limitations outlined here:
https://en.wikipedia.org/wiki/Filename.
:param str s:
A string to make safe for use as a file name.
:param int max_length:
The maximum filename character length.
:rtype: str
:returns:
A sanitized string.
"""
# Characters in range 0-31 (0x00-0x1F) are not allowed in ntfs filenames.
ntfs_characters = [chr(i) for i in range(0, 31)]
characters = [
r'"',
r"\#",
r"\$",
r"\%",
r"'",
r"\*",
r"\,",
r"\.",
r"\/",
r"\:",
r'"',
r"\;",
r"\<",
r"\>",
r"\?",
r"\\",
r"\^",
r"\|",
r"\~",
r"\\\\",
]
pattern = "|".join(ntfs_characters + characters)
regex = re.compile(pattern, re.UNICODE)
filename = regex.sub("", s)
return filename[:max_length].rsplit(" ", 0)[0]
|
62eba00ba32dda2f0a5b7987d3e0158775b927d4
| 301,691 |
def add_currency_filter_to_query(query, currency):
"""
Adds the currency filter to the query
:param query: the query dictionary
:type query: dict
:param currency: currency type
:type currency: str
:return: the query dictionary
:rtype: dict
"""
if "filters" not in query["query"]:
query["query"]["filters"] = {}
if "trade_filters" not in query["query"]["filters"]:
query["query"]["filters"]["trade_filters"] = {}
if "filters" not in query["query"]["filters"]["trade_filters"]:
query["query"]["filters"]["trade_filters"]["filters"] = {}
if "price" not in query["query"]["filters"]["trade_filters"]["filters"]:
query["query"]["filters"]["trade_filters"]["filters"]["price"] = {}
if "option" not in query["query"]["filters"]["trade_filters"]["filters"]["price"] or \
query["query"]["filters"]["trade_filters"]["filters"]["price"]["option"] != currency:
query["query"]["filters"]["trade_filters"]["filters"]["price"]["option"] = currency
return query
|
a69579e37914064d8d6327f74db929b4827c41f6
| 651,098 |
from typing import Iterable
from typing import Tuple
def compute_iterable_delta(old: Iterable, new: Iterable) -> Tuple[set, set, set]:
"""Given two iterables, return the entries that's (added, removed, updated).
Usage:
>>> old = {"a", "b"}
>>> new = {"a", "d"}
>>> compute_iterable_delta(old, new)
({"d"}, {"b"}, {"a"})
"""
old_keys, new_keys = set(old), set(new)
added_keys = new_keys - old_keys
removed_keys = old_keys - new_keys
updated_keys = old_keys.intersection(new_keys)
return added_keys, removed_keys, updated_keys
|
a049f98f192b35745b99ec7c72adf6fabccfe078
| 120,420 |
def average_word_length(tweet):
"""
Return the average length of the tweet
:param tweet: raw text tweet
:return: the float number of character count divided by the word count
"""
character_count = 0
word_count = 0
for c in tweet:
character_count += 1
word_count = len(tweet.split())
return float(character_count)/float(word_count)
|
4077e531c69b51f6df74546b0e84fc41720ffbe5
| 686,449 |
def split_columns(line, separator='\t'):
""" Split a line with a "separator" """
return line.split(separator)
|
dbf158ecbe6e534bd1977c31dcb96f2415e09f58
| 342,256 |
def connection_requires_http_tunnel(
proxy_url=None, proxy_config=None, destination_scheme=None
):
"""
Returns True if the connection requires an HTTP CONNECT through the proxy.
:param URL proxy_url:
URL of the proxy.
:param ProxyConfig proxy_config:
Proxy configuration from poolmanager.py
:param str destination_scheme:
The scheme of the destination. (i.e https, http, etc)
"""
# If we're not using a proxy, no way to use a tunnel.
if proxy_url is None:
return False
# HTTP destinations never require tunneling, we always forward.
if destination_scheme == "http":
return False
# Support for forwarding with HTTPS proxies and HTTPS destinations.
if (
proxy_url.scheme == "https"
and proxy_config
and proxy_config.use_forwarding_for_https
):
return False
# Otherwise always use a tunnel.
return True
|
8ae18bea3530d1497d34c8dc25e0a0d9a5def017
| 626,488 |
def is_schema_field(field):
""" Returns whether or not we should expect a schema to be found for the given field.
Currently this only applies to validation_errors and aggregated_items.
:param field: field name to check
:return: False if this field doesn't a schema, True otherwise
"""
# XXX: Consider doing this with regex? - Will 6/11/2020
if field.startswith('validation_errors') or field.startswith('aggregated_items'): # note that trailing '.' is gone
return False
return True
|
00dcd1c01f7ac8b31a5010912f3f4a28035bc045
| 179,096 |
import json
def load_json_file(json_path, logger=None, encoding=None):
"""Load a JSON file, possibly logging errors.
Parameters
----------
json_path : path_like
Relative path to the JSON file.
logger : logging.Logger (optional, default None)
Logger to use.
encoding : str (optional, default None)
Encoding to use. None means system preferred encoding.
Returns
-------
contents : dict
Contents of the JSON file.
"""
try:
with open(json_path, 'r', encoding=encoding) as json_file:
contents = json.load(json_file)
except OSError as e:
if logger is not None:
logger.error("Could not open {}: {}".format(json_path, e))
raise
except ValueError as e:
if logger is not None:
logger.error("Could not load {}: {}".format(json_path, e))
raise
return contents
|
8ff917cc88854b3d4a2c54b4a8406bddffdb041b
| 519,611 |
def end_of_day(dt):
"""Take datetime and move forward to last microsecond of date"""
return dt.replace(hour=23, minute=59, second=59, microsecond=999999)
|
b14d686f1d57c54afa96079a4893a09bb36353d7
| 328,393 |
from datetime import datetime
import shutil
def fix_md5_file(filename, append_path, savebu=False):
"""
Fixes the md5 file so that it will check the zip at the correct path
If you run md5sum in a different directory, then the md5 file needs to
specify the directory or it will be unable to find it. This adds the
full path in front.
Note: md5sum is insanely specific:
<md5sum_checksum><space><space><file_name>
Parameters
----------
filename : str
Valid filename with resolvable path from cwd
append_path : str
path to append in the file
savebu : boolean, default False
If you a bu of the file should be made prior to modification
Returns
-------
Fixed str saved within the md5 file
"""
with open(filename, 'r+', encoding='UTF-8') as fileobj:
if savebu:
bufilename = (f"{filename}-{datetime.now().strftime('%Y%m%d-%H%M%S')}.backup" )
shutil.copy(filename, bufilename)
old = fileobj.read().split()
fileobj.seek(0)
fpfn = f"{old[0]} {append_path}/{old[1]}"
fileobj.write(fpfn)
fileobj.truncate()
return fpfn
|
79f7f39803cbcf1b6bc2356a78fc25f2879f1b94
| 131,866 |
def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
}
|
5ec6b21b09372e71e6dcf8c60f418bcbc4beee64
| 706,151 |
import string
def remove_punctuation(myString):
"""Remove punction from input string
Arguments
- myString: a string of text
Returns:
- newString: a string of text without punctuation
"""
translator = str.maketrans('', '', string.punctuation)
newString = myString.translate(translator)
return newString
|
12f2d3ca6e231b341e6c106c59ac40d392c1cfcc
| 585,922 |
import statistics
def get_window_size(windows):
""" Returns median window size calculated from difference in
neighboring window positions """
diffs = [abs(e-s) for s, e in zip(windows[0:], windows[1:])]
median_window = statistics.median(diffs)
return median_window
|
ea57d4b5947cd14142de681711c26927ceffbe3a
| 360,191 |
def create_key_pair(client, name):
""" Create a new key pair and return the private key
"""
response = client.create_key_pair(KeyName=name)
return response['KeyMaterial']
|
e3c3525dcbda873a696c12f689ed8dc8c202dc05
| 294,925 |
def page_list(pages, publish_filter=None, limit=None, meta_sort='', reverse=False):
"""Basic sorting and limiting for flatpage objects"""
if publish_filter is True:
# Only published
pages = [p for p in pages if p.meta['published']]
elif publish_filter is False:
# Only unpublished
pages = [p for p in pages if not p.meta['published']]
else:
# All pages
pages = [p for p in pages]
if meta_sort:
pages = sorted(pages, reverse=reverse, key=lambda p: p.meta[meta_sort])
return pages[:limit]
|
9ba9771d90b1987423c961f0a9b066baaceb5b79
| 333,397 |
def black_invariant(text, chars=None):
"""Remove characters that may be changed when reformatting the text with black"""
if chars is None:
chars = [' ', '\t', '\n', ',', "'", '"', '(', ')', '\\']
for char in chars:
text = text.replace(char, '')
return text
|
1a56f7520519bb376c67f9bfea262ea42187e16d
| 284,011 |
def check_min_guide_pairs(df, min_pairs):
"""Check that each guide is paired with a minimum number of guides
Parameters
----------
df: DataFrame
Anchor df with column anchor_guide
min_pairs: int
minimum number of guides to be paired with
Returns
-------
List
guides that are not paired fewer than min_pairs number of guides
"""
pair_count = (df[['anchor_guide', 'target_guide']]
.drop_duplicates()
.groupby('anchor_guide')
.apply(lambda d: d.shape[0])
.reset_index(name='n'))
guides_no_pairs = pair_count.anchor_guide[~(pair_count.n >= min_pairs)].to_list()
return guides_no_pairs
|
444c6508b008a04916ae8fcc679b4b630d73095f
| 357,816 |
def checksum(number, bits=4):
"""
Calculate the checksum of a number.
The checksum of length N is formed by splitting the number into
bitstrings of N bits and performing a bitwise exclusive or on them.
:param number: Number to generate the check
:type number: int
:return: Checksum of the number
:rtype: int
>>> checksum(0)
0
>>> checksum(0xFFF)
15
>>> checksum(0b01100111, 2)
1
"""
if bits == 0:
return 0
mask = (1 << bits) - 1
chk = 0
while number > 0:
chk ^= (number & mask)
number >>= bits
return chk & mask
|
7218b31bd537907e2201254cdfae2b2554d61b39
| 371,978 |
def value_to_string(val, precision=3):
"""
Convert a number to a human readable string.
"""
if (not isinstance(val, float)) or (val == 0):
text = str(val)
elif (abs(val) >= 10.0**(precision+1)) or \
(abs(val) <= 10.0**(-precision-1)):
text = "{val:.{prec}e}".format(val=val, prec=precision)
else:
text = "{}".format(val)
if len(text) > precision + 2 + (text[0] == '-'):
text = "{val:.{prec}f}".format(val=val, prec=precision)
return text
|
802c931b7b92f19b318011f560b164725c6a825d
| 375,486 |
def diagpq(p, q=0):
"""
Returns string equivalent metric tensor for signature (p, q).
"""
n = p + q
D = []
for i in range(p):
D.append((i*'0 ' +'1 '+ (n-i-1)*'0 ')[:-1])
for i in range(p,n):
D.append((i*'0 ' +'-1 '+ (n-i-1)*'0 ')[:-1])
return ','.join(D)
|
29e86f72338d31e8791f68331618273b17eb2cd9
| 690,088 |
import torch
def get_device(device_num: int = 0):
"""Get a device (GPU or CPU) for the torch model
"""
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device(f"cuda:{device_num}")
print("There are %d GPU(s) available." % torch.cuda.device_count())
print("We will use the GPU:", torch.cuda.get_device_name(device_num))
# If not...
else:
print("No GPU available, using the CPU instead.")
device = torch.device("cpu")
return device
|
c584de1be4a882eb39da46617170673ae8835489
| 240,700 |
def _unique_item_counts(iterable):
""" Build a dictionary giving the count of each unique item in a sequence.
:param iterable: sequence to obtain counts for
:type iterable: iterable object
:rtype: dict[obj: int]
"""
items = tuple(iterable)
return {item: items.count(item) for item in sorted(set(items))}
|
c0619aef5dac3f2d1d35f94cc9d1777e9d051b3d
| 219,714 |
from typing import Tuple
from typing import List
def read_dataset(path: str) -> Tuple[List[List[str]], List[List[str]]]:
"""
Reads a dataset from a given path.
Args:
path: Path of the file stored in tsv format.
Returns:
A 2D list of tokens and another of associated labels.
"""
tokens_s = []
labels_s = []
tokens = []
labels = []
with open(path) as f:
for line in f:
line = line.strip()
if line.startswith('# '):
tokens = []
labels = []
elif line == '':
tokens_s.append(tokens)
labels_s.append(labels)
else:
_, token, label = line.split('\t')
tokens.append(token)
labels.append(label)
assert len(tokens_s) == len(labels_s)
return tokens_s, labels_s
|
daa0320ac23d71c25056d49810ad5036e2fdac13
| 61,610 |
import torch
def swish_func(x, beta=1.0, inplace=False):
"""
"Swish: a Self-Gated Activation Function"
Searching for Activation Functions (https://arxiv.org/abs/1710.05941)
If beta=1 applies the Sigmoid Linear Unit (SiLU) function element-wise
If beta=0, Swish becomes the scaled linear function (identity
activation) f(x) = x/2
As beta -> ∞, the sigmoid component converges to approach a 0-1 function
(unit step), and multiplying that by x gives us f(x)=2max(0,x), which
is the ReLU multiplied by a constant factor of 2, so Swish becomes like
the ReLU function.
Including beta, Swish can be loosely viewed as a smooth function that
nonlinearly interpolate between identity (linear) and ReLU function.
The degree of interpolation can be controlled by the model if beta is
set as a trainable parameter.
Alt: 1.78718727865 * (x * sigmoid(x) - 0.20662096414)
"""
if inplace:
# In-place implementation, may consume less GPU memory:
result = x.clone()
torch.sigmoid_(beta*x)
x *= result
return x
# Normal out-of-place implementation:
return x * torch.sigmoid(beta * x)
|
152d669eea550f08ce998c994b307d31b2e58432
| 556,466 |
def K(u, kap, eps):
"""Compute diffusion function
.. math::
K(u) = \kappa \, (1 + \varepsilon u)^3 + 1
Parameters
----------
u : array_like
Temperature variable.
kap : float
Diffusion parameter.
eps : float
Inverse of activation energy.
Returns
-------
array_like
Evaluation of K function.
"""
return kap * (1 + eps * u) ** 3 + 1
|
9ca6c25acd3ab3079d8f84f97cb9252c4ee0adca
| 312,763 |
def get_control_count(cmd):
"""
Return the number of control qubits of the command object cmd
"""
return len(cmd.control_qubits)
|
405f9c525db409f4f3fb71eba9e95e0ee5a5fa1b
| 121,632 |
def dual_neighbours(p, displace=1):
"""All the dual neighbours of primal vertex p in the RHG lattice.
A helper function for RHG_graph. Given a primal vertex p, returns
the coordinates of all the dual neighbours. Assumes each neighbour
is 1 unit away by default.
Args:
p (tuple): the coordinates of the primal vertex.
displace (float): how much to displace the neighbour by. Useful
to change when creating maronodes.
Returns:
List[Tuple]: the coordinates of the four neighbours.
"""
x, y, z = p[0], p[1], p[2]
top = (x, y + displace, z)
bottom = (x, y - displace, z)
left = (x - displace, y, z)
right = (x + displace, y, z)
if z % 2:
front = (x, y, z + displace)
back = (x, y, z - displace)
if x % 2:
return [back, left, front, right]
return [back, top, front, bottom]
return [bottom, left, top, right]
|
8a1d0fe72451e825a11b78d2f52979289429e779
| 254,767 |
def _IncludedPaintEvents(events):
"""Get all events that are counted in the calculation of the speed index.
There's one category of paint event that's filtered out: paint events
that occur before the first 'ResourceReceiveResponse' and 'Layout' events.
Previously in the WPT speed index, paint events that contain children paint
events were also filtered out.
"""
def FirstLayoutTime(events):
"""Get the start time of the first layout after a resource received."""
has_received_response = False
for event in events:
if event.name == 'ResourceReceiveResponse':
has_received_response = True
elif has_received_response and event.name == 'Layout':
return event.start
assert False, 'There were no layout events after resource receive events.'
paint_events = [e for e in events
if e.start >= FirstLayoutTime(events) and e.name == 'Paint']
return paint_events
|
de3f344a3e74d98b6ddf727dc12bc8d4bf368fa4
| 339,278 |
def load_trec_labels_dict(in_name):
"""
input: trec format qrel
:param in_name: qrel
:return: h_qrel = {qid:{doc:score} }
"""
h_qrel = {}
l_lines = open(in_name).read().splitlines()
for line in l_lines:
cols = line.split()
qid = cols[0].strip()
docno = cols[2].strip()
label = int(cols[3])
if qid not in h_qrel:
h_qrel[qid] = {}
h_qrel[qid][docno] = label
return h_qrel
|
9faa57a0d0ec69199f8ddfe6817d564912608f81
| 493,053 |
def is_templated(module):
"""Returns an indication where a particular module is templated
"""
if "attr" not in module:
return False
elif module["attr"] in ["templated"]:
return True
else:
return False
|
c8873b5f88a02c603a32f68580b2c6eccedb9933
| 267,210 |
def powerlaw(wave, tau_v=1, alpha=1.0, **kwargs):
"""Simple power-law attenuation, normalized to 5500\AA.
:param wave:
The wavelengths at which optical depth estimates are desired.
:param tau_v: (default: 1)
The optical depth at 5500\AA, used to normalize the
attenuation curve.
:returns tau:
The optical depth at each wavelength.
"""
return tau_v * (wave / 5500)**(-alpha)
|
e9681c71b4fe3f096c9700469d3cb8325deeb118
| 436,939 |
import unittest
def run_student_tests(print_feedback=True, show_traces=True,
success_required=True):
"""Run a suite of student submitted tests.
Tests must be located in /autograder/source/student_tests/
Args:
print_feedback (bool): Print success or failure message
show_traces (bool): Show failure/error stack traces
success_required (bool): If True, this function will raise an
AssertionError if any student tests fail.
Returns:
bool: True if all tests pass, False otherwise
Raises:
AssertionError if success_required is true and any test fails.
"""
suite = unittest.defaultTestLoader.discover('student_tests',
top_level_dir="./")
result = unittest.TestResult()
suite.run(result)
succeeded = len(result.failures) == 0 and len(result.errors) == 0
if not succeeded:
if print_feedback:
print(
"It looks like your submission is not passing your own tests:")
if len(result.errors) > 0:
print("Errors:")
for error in result.errors:
print(error[0]._testMethodName)
if show_traces:
print(error[1])
if len(result.failures) > 0:
print("Failures:")
for failure in result.failures:
print(failure[0]._testMethodName)
if show_traces:
print(failure[1])
if success_required:
raise AssertionError("Student tests failed.")
else:
if print_feedback:
print("Submission passes student tests.")
return succeeded
|
e262b6d5e8c74ca9085aa943a5d58670314d781d
| 27,362 |
def ask_confirmation(message, yes = 'y', no = 'n', default = False):
"""
Ask user to confirm something. Ask again if answer other than expected.
Arguments:
- message (string): message to print (e.g. "Are you sure?")
- yes (string): expected value if user confirms
- no (string): expceted value if user denies
- default (bool): value to return if user hits Enter
Return (bool):
- True if user types string same as argument yes
- False if user types string same as argument no
- default value if user doesn't type anything and hit Enter
Example:
ask_confirmation("Are you sure?", "yes", "no", True)
displays:
Are you sure? [YES/no]:
Result:
- if user types no, No, nO, or NO: returns False
- if user types yes, Yes, YES, or etc.: returns True
- if user hits Enter (confirms default): returns True
- if user types anything else, ask again
"""
if default:
yes = yes.upper()
no = no.lower()
else:
yes = yes.lower()
no = no.upper()
long_message = message + " [" + yes + "/" + no + "]: "
answer = input(long_message).lower()
if answer == yes.lower():
return True
elif answer == no.lower():
return False
elif answer == '':
return default
else:
return ask_confirmation(message, yes, no, default)
|
bd66b526f1efe8f648b3d204e7c791a74b4324b7
| 666,066 |
from datetime import datetime
def line_to_time(line):
""" Extract timestamp from a line, created by log. """
time = line.split(' ')[0].split(',')[0]
time = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
return time
|
996ef811d649c1eafa9b123bb6c84fe442c6e432
| 343,765 |
def get_common_start_stop_times(neo_objects):
"""
Extracts the `t_start`and the `t_stop` from the input neo objects.
If a single neo object is given, its `t_start` and `t_stop` is returned.
Otherwise, the aligned times are returned: the maximal `t_start` and
minimal `t_stop` across `neo_objects`.
Parameters
----------
neo_objects : neo.SpikeTrain or neo.AnalogSignal or list
A neo object or a list of neo objects that have `t_start` and `t_stop`
attributes.
Returns
-------
t_start, t_stop : pq.Quantity
Shared start and stop times.
Raises
------
AttributeError
If the input neo objects do not have `t_start` and `t_stop` attributes.
ValueError
If there is no shared interval ``[t_start, t_stop]`` across the input
neo objects.
"""
if hasattr(neo_objects, 't_start') and hasattr(neo_objects, 't_stop'):
return neo_objects.t_start, neo_objects.t_stop
try:
t_start = max(elem.t_start for elem in neo_objects)
t_stop = min(elem.t_stop for elem in neo_objects)
except AttributeError:
raise AttributeError("Input neo objects must have 't_start' and "
"'t_stop' attributes")
if t_stop < t_start:
raise ValueError("t_stop ({t_stop}) is smaller than t_start "
"({t_start})".format(t_stop=t_stop, t_start=t_start))
return t_start, t_stop
|
d986a9b54b7f067f4f3c25860b22d4cb9443e961
| 582,038 |
def cast_position_and_time(cast):
"""Retrieve ONC cast average longitude, latitude and minimum time.
:arg cast: the ONC cast
:type cast: a single group of pandas grouby object
:returns: lat, lon, date
"""
lon = cast['Longitude Corrected (deg)'].mean()
lat = cast['Latitude Corrected (deg)'].mean()
date = cast['day'].min()
return lat, lon, date
|
6f6e305dbcf06bf633f8c2623017d2e464fd2f7e
| 212,417 |
import math
def poisson(k, lamb):
"""
Calculate the Poisson distribution given (k, lambda).
"""
return math.exp(-lamb) * math.pow(lamb, k) / math.factorial(k)
|
7c5451823d6a760f0832e58ed0b121eff390a2bb
| 313,401 |
def _detect_adfs_authority(authority_url, tenant):
"""Prepare authority and tenant for Azure Identity with ADFS support.
If `authority_url` ends with '/adfs', `tenant` will be set to 'adfs'. For example:
'https://adfs.redmond.azurestack.corp.microsoft.com/adfs'
-> ('https://adfs.redmond.azurestack.corp.microsoft.com/', 'adfs')
"""
authority_url = authority_url.rstrip('/')
if authority_url.endswith('/adfs'):
authority_url = authority_url[:-len('/adfs')]
# The custom tenant is discarded in ADFS environment
tenant = 'adfs'
return authority_url, tenant
|
2f1987502c11fab33045b8b96cb934f1a2fe7d98
| 516,157 |
def make_unique(items):
"""Remove duplicate items from a list, while preserving list order."""
seen = set()
def first_occurrence(i):
if i not in seen:
seen.add(i)
return True
return False
return [i for i in items if first_occurrence(i)]
|
4c97795cb56f9073d0b28c93ed1be384d0c1f578
| 319,831 |
import six
def _get_selections_from_property_and_vertex_fields(property_fields_map, vertex_fields):
"""Combine property fields and vertex fields into a list of selections.
Args:
property_fields_map: OrderedDict[str, Field], mapping name of field to their
representation. It is not modified by this function
vertex_fields: List[Field]. It is not modified by this function
Returns:
List[Field], containing all property fields then all vertex fields, in order
"""
selections = list(six.itervalues(property_fields_map))
selections.extend(vertex_fields)
return selections
|
2560dae51abfd2dd3c40bfab853706c33419d73c
| 595,820 |
def get_string_before_delimiter(string, delimiter):
"""
Returns contents of a string before a given delimiter
Example: get_string_before_delimiter("banana-kiwi", "-") returns "banana"
"""
if delimiter in string:
return (string[:string.index(delimiter)]).strip()
else:
return string
|
97b5da492834a62c5648f76002e690b659d3ab38
| 679,132 |
def quote(word_list):
"""
Quote a list or tuple of strings for Unix Shell as words, using the
byte-literal single quote.
The resulting string is safe for use with ``shell=True`` in ``subprocess``,
and in ``os.system``. ``assert shlex.split(ShellQuote(wordList)) == wordList``.
See POSIX.1:2013 Vol 3, Chap 2, Sec 2.2.2:
http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02_02
"""
if not isinstance(word_list, (tuple, list)):
word_list = (word_list,)
return " ".join(list("'{0}'".format(s.replace("'", "'\\''")) for s in word_list))
|
c4e7c2a3ddca7f3670908d8aec5bda2d5550f819
| 228,095 |
def flatten_nested(nested_dicts):
"""
Flattens dicts and sequences into one dict with tuples of keys representing the nested keys.
Example
>>> dd = { \
'dict1': {'name': 'Jon', 'id': 42}, \
'dict2': {'name': 'Sam', 'id': 41}, \
'seq1': [{'one': 1, 'two': 2}] \
}
>>> flatten_nested(dd) == { \
('dict1', 'name'): 'Jon', ('dict1', 'id'): 42, \
('dict2', 'name'): 'Sam', ('dict2', 'id'): 41, \
('seq1', 0, 'one'): 1, ('seq1', 0, 'two'): 2, \
}
True
"""
assert isinstance(nested_dicts, (dict, list, tuple)), 'Only works with a collection parameter'
def items(c):
if isinstance(c, dict):
return c.items()
elif isinstance(c, (list, tuple)):
return enumerate(c)
else:
raise RuntimeError('c must be a collection')
def flatten(dd):
output = {}
for k, v in items(dd):
if isinstance(v, (dict, list, tuple)):
for child_key, child_value in flatten(v).items():
output[(k,) + child_key] = child_value
else:
output[(k,)] = v
return output
return flatten(nested_dicts)
|
68d1ddd6ebd053fb8291acb270e287b55ef3bc59
| 288,885 |
def remove_templates(text):
"""Remove all text contained between matched pairs of '{{' and '}}'.
Args:
text (str): Full text of a Wikipedia article as a single string.
Returns:
str: A copy of the full text with all templates removed.
"""
good_char_list = []
prev_char = None
next_char = None
depth = 0
open_pos = None
close_pos = None
for pos,char in enumerate(text):
try:
next_char = text[pos+1]
except IndexError:
next_char = None
if char == '{' and next_char == '{':
depth += 1
elif char == '}' and prev_char == '}':
depth -= 1
# Add characters if we are at depth 0
elif depth == 0:
good_char_list.append(char)
prev_char = char
return ''.join(good_char_list)
|
97cd95160703aef8e74bbb4134c83eee2b195fa8
| 628,981 |
import secrets
def nonce(length=20):
"""
Generate a nonce string. This is just a random string that uniquely
identifies something. It needn't be globally unique, just unique enough
for a period of time (e.g. to identify a specific call in a rolling
log file).
"""
return secrets.token_urlsafe()
|
df842ea9fadc3d09d5e647a56559081d19b43c96
| 455,092 |
def after_request(response):
"""Grafana makes POST requests for queries, which aren't on the same domain/port
from which you're serving the Grafana instance. Therefore we must add the headers
so the browser will approve the cross domain request. Flask automatically deals
with OPTIONS requests, which uses the same headers here."""
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,POST,OPTIONS')
return response
|
ae3c022c770cb3877335bde0e02de672c09d5bff
| 398,505 |
def make_split(df, t):
"""
Splits a dataframe on attribute.
Parameter:
df -- the dataframe to split
t -- target attribute to split upon
Return:
new_df -- split dataframe
"""
new_df = {}
for df_key in df.groupby(t).groups.keys():
new_df[df_key] = df.groupby(t).get_group(df_key)
return new_df
|
7fa079521d8bf33c9881cd40c71093731ee4f4d0
| 435,925 |
def filter_reviews_rating(df, threshold):
"""
Filters reviews with rating below a threshold (normalized)
:param df: dataframe of reviews
:param threshold: float from 0 to 1
:return: filtered reviews dataframe
"""
df = df.copy()
return df[df['cleaned_ratings'] <= threshold]
|
7d1047c7f91db7424c050ce3d174fd6bddedab21
| 206,428 |
def quantize_float(f, q):
"""Converts a float to closest non-zero int divisible by q."""
return int(round(f / q) * q)
|
28a7170b3c1d2d8e7c0d9dc36ae8bc1b31c01165
| 629,136 |
import socket
def open_port() -> int:
"""Find an open port."""
with socket.socket() as s:
s.bind(("", 0))
port = s.getsockname()[1]
return port
|
bc5b9b7b416a24fa09b19074b1f5c471e1e4dc05
| 526,780 |
def centerOfMass(positions, weights):
"""
Calculate the center of mass of a set of weighted positions.
Args:
positions: A list of (x,y,z) position tuples
weights: A list of position weights
Return:
A tuple of floats representing the coordinates of the center of mass.
"""
tot_weight = sum(weights)
assert tot_weight != 0
zipped = zip(positions, weights)
[x, y, z] = [0.0, 0.0, 0.0]
for (pos, weight) in zipped:
x += pos[0] * weight
y += pos[1] * weight
z += pos[2] * weight
return tuple(map(lambda k: k / tot_weight, [x,y,z]))
|
f229da64fe645504cff16c3486e59e75ad69046f
| 411,757 |
def formatBuckets(buckets):
"""
Formats buckets into a list of strings
:param buckets: buckets to be formatted
"""
timeList = []
for i, bucket in enumerate(buckets):
formatStr = '{0:4,.3g}' if i < len(buckets) -1 else '>>{0:4,.3g}'
timeList.append(formatStr.format(bucket))
return timeList
|
005115b4092993a8cde8415fc014b264ed55d1a5
| 614,058 |
def pk_equals(first, second):
""" Helper function to check if the ``pk`` attributes of two models are
equal.
"""
return first.pk == second.pk
|
1596b87073e9dbb14fee1dba921f328704c8e6e2
| 107,537 |
def _max_thread_width(thread):
"""compute the widest breadth of the thread,
that is the max number of replies a comment in the thread has received"""
if not thread['children']:
return 0
return max(
max([_max_thread_width(reply) for reply in thread['children']]),
len(thread['children'])
)
|
2689347d71177bc39f0b572c7a91782f29be641e
| 51,004 |
from datetime import datetime
def mdtm_to_string(mtime: int) -> str:
"""Convert the last modification date of a file as an iso string"""
return datetime.utcfromtimestamp(mtime).isoformat() + "Z"
|
1c7db72df1d048bd948c6ca3629531090b22b52f
| 502,609 |
def pairs_from_array(a):
"""
Given an array of strings, create a list of pairs of elements from the array
Creates all possible combinations without symmetry (given pair [a,b], it does not create [b,a])
nor repetition (e.g. [a,a])
:param a: Array of strings
:return: list of pairs of strings
"""
pairs = list()
for i in range(len(a)):
for j in range(len(a[i + 1 :])):
pairs.append([a[i], a[i + 1 + j]])
return pairs
|
50c489d660a7e82c18baf4800e599b8a3cd083f0
| 6,507 |
def delete_index(es, index):
"""
Delete Elasticsearch index.
"""
return es.indices.delete(index)
|
178570c82a7a9cb5d2bc3dcc136454997b967b09
| 149,481 |
def buckle_thickness(D_o, P_p, sig_y):
"""Return the nominal buckle thickness [t] based on the propagation pressure.
Considers the worst case maximum external pressure and ignores internal
pressure - PD8010-2 Equation (G.21).
:param float D_o: Outside Diameter [m]
:param float P_p: Propagation pressure [Pa]
:param float sig_y: Yield strength [Pa]
"""
return D_o * (P_p / (10.7 * sig_y))**(4 / 9)
|
203b1f036c3e201d80a686ad427deacdf0a8ad4a
| 429,346 |
def get_image_name(name: str, tag: str, image_prefix: str = "") -> str:
"""Get a valid versioned image name.
Args:
name (str): Name of the docker image.
tag (str): Version to use for the tag.
image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
Returns:
str: a valid docker image name based on: prefix/name:tag
"""
versioned_tag = name.strip() + ":" + tag.strip()
if image_prefix:
versioned_tag = image_prefix.strip().rstrip("/") + "/" + versioned_tag
return versioned_tag
|
e8826186e4ecc1e8a912772b027adaf21a10c1ab
| 128,011 |
def get_name_from_link(link):
""" returns name from link. """
name = link.split("/")[-1]
return name
|
ee992e872a718a2ef057df7bb1fb68135b5d72f4
| 135,587 |
def partial_weight(problem, partial_tour):
"""Return weight of partial tour, excluding the edge from last to first city."""
weight = 0
for i in range(1, len(partial_tour)):
weight += problem.get_weight(partial_tour[i], partial_tour[i - 1])
return weight
|
bdfd61cd29970e64f6ad4d011ef0b7955e3dc500
| 315,792 |
import torch
def _reduce_list(val_list, red_func=torch.cat):
"""
Applies reduction function to given list. If each element in the list is
a Tensor, applies reduction function to all elements of the list, and returns
the output Tensor / value. If each element is a tuple, applies reduction
function to corresponding elements of each tuple in the list, and returns
tuple of reduction function outputs with length matching the length of tuple
val_list[0].
"""
if isinstance(val_list[0], torch.Tensor):
return red_func(val_list)
assert isinstance(val_list[0], tuple), "Elements to be reduced can only be"
"either Tensors or tuples containing Tensors."
final_out = []
for i in range(len(val_list[0])):
final_out.append(_reduce_list([val_elem[i] for val_elem in val_list], red_func))
return tuple(final_out)
|
5ebdace78bf4dbd7b89559529db1ac7aba7ca7b6
| 264,361 |
def format_url(category: str, year: int, month: str, day: str) -> str:
"""
It returns a URL from The Guardian website with links for the articles of the given date
and category
:param category: A String representing the category
:param year: An integer representing the year
:param month: A String representing the first three letters of the month in lower case
:param day: A String representing the day
:return: The URL
"""
return f"https://www.theguardian.com/{category}/{year}/{month}/{day}"
|
5e385659b70efea2117c8b3e5a2534448c446b63
| 653,203 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.