content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def _try_connect_always_fail(**kwargs): # pylint: disable=unused-argument
"""Always return False"""
return False
|
320f9d3f737309107c1b8047e894f5ccb1f9b36b
| 244,622 |
def byteswap(*arrays):
"""
Swapping of bytes for provided arrays.
Notes
-----
arr.newbyteorder('S') swaps dtype interpretation, but not bytes in memory
arr.byteswap() swaps bytes in memory, but not dtype interpretation
arr.byteswap(True).newbyteorder('S') completely swaps both
References
----------
https://docs.scipy.org/doc/numpy/user/basics.byteswapping.html
"""
return [arr.newbyteorder('S') for arr in arrays]
|
1fa1c0662fe525c1575365c7bcde5cf9f9ab96b3
| 631,779 |
def max_weighted_independent_set_in_path_graph(weights):
""" Computes the independent set with maximum total weight for a path graph.
A path graph has all vertices are connected in a single path, without cycles.
An independent set of vertices is a subset of the graph vertices such that
no two vertices are adjacent in the path graph.
Complexity: O(n); Space: O(n)
Args:
weights: list, of vertex weights in the order they are present in the
graph.
Returns:
list, format [max_weight: int, vertices: list]
"""
# 0. Initialization: A[i] - max total weight of the independent set for
# the first i vertices in the graph.
a = [0] * (len(weights)+1)
a[0] = 0 # Max weight for empty graph.
a[1] = weights[0] # Max weight for the graph with only the first weight.
# 1. Compute the max total weight possible for any independent set.
for i in range(2, len(weights)+1):
a[i] = max(a[i-1], a[i-2]+weights[i-1])
max_weight = a[len(weights)]
# 2. Trace back from the solution through the subproblems to compute the
# vertices in the independent set.
vertices = []
i = len(weights)
while (i>0):
if a[i-2] + weights[i-1] >= a[i-1]:
vertices.insert(0, weights[i-1])
i -= 2
else:
i -= 1
return [max_weight, vertices]
|
3be9d7dd54b939260f37ff2c97c18f18df0644b5
| 675,141 |
def skin_temperature(upward_longwave_irradiance_W_m2, emissivity=1.0):
"""
Calculates the radiative skin temperature from the upward longwave irradiance.
Args:
upward_longwave_irradiance_W_m2: The upward longwave irradiance from the surface
emissivity: How much energy is emitted in comparison to a blackbody. Ranges from 0 to 1.
Returns:
The skin temperature in Kelvin.
"""
sigma = 5.673067e-8
return (upward_longwave_irradiance_W_m2 / (emissivity * sigma)) ** 0.25
|
3a391f19b4a8c4b2c78e96639f95e8e6720c8cd1
| 381,025 |
def extract_ranges(index_list, range_size_limit=32):
"""Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists it will be returned as multiple
ranges.
Returns:
ranges, singles where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
"""
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
|
4341f5319fcc80be57a98536eb908960be2bb8bb
| 223,065 |
def inc(x):
""" Add one to the current value """
return x + 1
|
c8f9a68fee2e8c1a1d66502ae99e42d6034b6b5c
| 4,015 |
import math
def slice_int(x, k, d):
"""
Slices an integer in the domain [0,2**d] to k-bit parts, and returns the
respective parts (in range 0-2**k - 1), MSB to LSB.
:param x: A positive integer
:param k: An integer representing the size of the slices, in bits
:param d: The range of x. If x < 2**d - 1, it is treated as left padded with zeros.
:return:
"""
return [(x >> i*k) % 2**k for i in range(int(math.ceil(float(d)/k)))][::-1]
|
14ca3f70fdd9fbd4c3441402692b8dd6df1e5a11
| 70,014 |
def iso_date(year, week, day):
"""Return the ISO date data structure."""
return [year, week, day]
|
307bd606b772e696289640a96692fd9134ff3410
| 218,775 |
def radius_from_volume(volume: float, dim: int) -> float:
"""Return the radius of a sphere with a given volume
Args:
volume (float): Volume of the sphere
dim (int): Dimension of the space
Returns:
float: Radius of the sphere
"""
if dim == 1:
return volume / 2
elif dim == 2:
return np.sqrt(volume / π) # type: ignore
elif dim == 3:
return (3 * volume / (4 * π)) ** (1 / 3) # type: ignore
else:
raise NotImplementedError(f"Cannot calculate the radius in {dim} dimensions")
|
22d8c4e6b2567465f0aaf5fd5fac1d4429ec3754
| 576,473 |
def _find_paths(graph, root_node, n_transitions):
""" Returns a list of paths with n_transitions transitions from root node
graph: Is the nx.Graph object which contains the (V, E) graph
root_node: it is the node from which we start
n_transitions: is the number of transitions
"""
# recursion base case
if n_transitions == 0:
return [[root_node]]
paths = []
for neighbor in graph.successors(root_node):
# recursive step
for path in _find_paths(graph, neighbor, n_transitions - 1):
paths.append([root_node] + path)
return paths
|
80ed27b16d8af0db3790e4163041fb6d4aaf32ad
| 516,783 |
def linear_search(numbers: list, item: int) -> int:
"""
Algorithm that implement linear search
Parameters
----------
numbers : list
The numbers list
item : int
The element to search
Returns
-------
index int
The index of element. Return -1 if element not exists in list
"""
for (index, number) in enumerate(numbers):
if number == item:
return index
return -1
|
9c79b0debc4a5355eac0b7122a438587bbf5f4f0
| 108,537 |
import struct
import socket
def ip2long(*args):
"""
.. function:: ip2long(ip) -> int
Converts a decimal dotted quad IP string to long integer IP format.
It can take either one column of IP strings or 4 columns each having one
part of the IP address.
Examples:
>>> sql("select ip2long('123.123.123.123')")
ip2long('123.123.123.123')
--------------------------
2071690107
>>> sql("select ip2long(123,123,123,123)")
ip2long(123,123,123,123)
------------------------
2071690107
"""
if len(args) == 1:
try:
return struct.unpack('!L', socket.inet_aton(args[0]))[0]
except:
return
elif len(args) == 4:
return struct.unpack('!L', socket.inet_aton('.'.join([str(x) for x in args])))[0]
|
b2759e2c7e33b26311d9df3f94a6898bd0ffecc2
| 397,288 |
def get_plot_name(name_idx, data):
"""Returns the plot name taken from the data parameter
Args:
name_idx(str or list): the plot column name to look for, or an array of plot names
data(obj): index-able object containing the values that make up the plot name
Return:
Returns the found plot name or None if not found
Note:
If the plot name consists of more than one index, the values are concatenated
to make up the returned plot name. Indexes that are missing in the data are
ignored. If the data doesn't contain any of the indexes, None is returned
"""
plot_name = ""
if isinstance(name_idx, list):
for idx in name_idx:
if idx in data:
plot_name += str(data[idx]) + "_"
plot_name = plot_name.rstrip("_")
elif name_idx in data:
plot_name = str(data[name_idx])
if plot_name == "":
plot_name = None
return plot_name
|
41fd2f76374f8388ac2ed6527bfa03437e77aeaa
| 150,445 |
def powerlaw(r,L,extra=False):
"""
POWERLAW Powerlaw function and derivatives.
f = powerlaw(R,L) is the powerlaw function with length scale
parameter L evaluated at R.
The length scale L is defined here as L = sqrt(-1/f''(0)).
f,df,ddf = powerlaw(R,L,extra=True) also returns df = (1/r) df/dr and
ddf = r d/dr (1/r) df/dr = r d/dr df.
Based on Matlab script of 14Nov97 by Dick Dee.
"""
f = 1./(1. + 0.5*(r/L)**2)
if extra:
df = (-1/(L**2))*(f**2);
ddf = (-2*(r/L)**2)*(f*df);
return (f,df,ddf)
else:
return f
|
1fa3ecbc9483ac90ede3b1a2ee7c6280da31f005
| 182,310 |
def _format(string):
""" Formats a class name correctly for checking function and class names.
Strips all non-alphanumeric chars and makes lowercase.
"""
return ''.join(list(filter(str.isalnum, string))).lower()
|
0fbff1d0da8c3bd4b318613dfa039dcef664f11f
| 688,160 |
from typing import List
def lcs(a: List[str], b: List[str]) -> int:
"""Get LCS (Longest Common Subsequence).
Args:
a: List of numbers.
b: List of numbers.
Returns:
LCS
Landau notation: O(a_count * b_count).
"""
n = len(a)
m = len(b)
dp = [[0 for _ in range(m + 1)] for _ in range(n + 1)]
for i, ai in enumerate(a):
for j, bj in enumerate(b):
if ai == bj:
dp[i + 1][j + 1] = dp[i][j] + 1
else:
dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])
return dp[n][m]
|
90a902d4a03532760fa9b8f33ce9a839bef32c43
| 212,158 |
def GetValuesFromList(array, key):
"""[Gets values from dictionary and returns them as a list, to use at plot.]
Arguments:
array {[dict]} -- [Dict we get from ]
key {[string]} -- [Key for ExchangeRate currency, EUR to KEY]
Returns:
[array] -- [Array of values to show on plot.]
"""
vals = []
for i in array:
vals.append(i['rates'][key])
return vals
|
e5dc96966b6f7626079241819170ab20ee22a135
| 407,209 |
from typing import Union
def create_choice(value: Union[str, int], name: str):
"""
Creates choices used for creating command option.
:param value: Value of the choice.
:param name: Name of the choice.
:return: dict
"""
return {
"value": value,
"name": name
}
|
7fba604f4bf5a5bbbf15a602c767f4973befc3c3
| 661,907 |
def escape_characters(markdown):
"""Takes some markdown and replaces escaped characters with the substition
character. This is returned along with a list of escaped characters.
You cannot escape line breaks - the backslash will be removed but not the
line break.
:param str markdown: The string to break up.
:rtype: ``str``, ``list``"""
characters = []
while "\\" in markdown:
loc = markdown.find("\\")
if loc != len(markdown) - 1:
character = markdown[loc + 1]
if character != "\n":
characters.append(character)
markdown = markdown[:loc] + "\x1A" + markdown[loc + 2:]
else:
markdown = markdown[:loc] + markdown[loc + 1:]
else:
break
return markdown, characters
|
f90376ae46585ef531dff9d412602ebf5d99a3cd
| 321,897 |
def move_to_tile(location, tile):
""" Return the action that moves in the direction of the tile. """
# actions = ['', 'u', 'd', 'l', 'r', 'p']
print(f"my tile: {tile}")
# see where the tile is relative to our current location
diff = tuple(x - y for x, y in zip(location, tile))
if diff == (0, 1):
action = 'd'
elif diff == (1, 0):
action = 'l'
elif diff == (0, -1):
action = 'u'
elif diff == (-1, 0):
action = 'r'
else:
action = ''
return action
|
d2fd8ecb913f4fa09cf4536fa3729e8abff86634
| 603,927 |
def apply_format(data_frame, column_names, format_method):
"""Apply a formatting function to a DataFrame column and return.
Simplify applying format modifications to the data stored in columns
of `data_frame`. Check if the parameters are of the right type, apply
`format_method` to the columns of `data_frame` whose labels are passed
in `column names`. Return the DataFrame with the applied changes.
Parameters
----------
data_frame : pandas.DataFrame
DataFrame containing the data to be modified.
column_names : list
List of string labels of columns in `data_frame` to be modified.
format_method : function
Function to be applied to the columns of `data_frame`, whose labels
are listed in `column_names`.
Returns
-------
data_frame : pandas.DataFrame
The passed in DataFrame with the formatting changes applied to
its columns.
See Also
--------
pandas.apply
Examples
--------
>>> data = pd.read_csv("data.csv")
>>> print(data[['Wage']][0:3]) #print first few lines
Wage
0 €565K
1 €405K
2 €290K
>>> data = apply_format(data, ['Wage'], money_format)
>>> print(data[['Wage']][0:3])
Wage
0 565
1 405
2 290
"""
for column in column_names:
if isinstance(column, str) and (column in data_frame) and callable(format_method):
data_frame.loc[:, column] = data_frame[column].apply(format_method)
return data_frame
|
af8f09d57e1f48da79c576ae542bfc5cc6cd837b
| 12,143 |
def tokenize_mnemonic(instr):
"""
Returns a list of tokens
The first token is the operation and all subsequent tokens are operands
"""
tokens = instr.mnemonic.split()
base = tokens[0]
dst = src = None
if len(tokens) < 2:
# Only single token (the operation)
return ( base ,)
else:
op_tks = tokens[1]
if ',' in op_tks:
dst, src = op_tks.split(',')
return (base, dst, src)
else:
return (base, op_tks)
#end if-else on tokens
|
b0e237e9486475a44b54cdc3fa0dcccc2b42349e
| 366,758 |
def drop(i_list: list,n:int) -> list:
"""
Drop at multiple of n from the list
:param n: Drop from the list i_list every N element
:param i_list: The source list
:return: The returned list
"""
assert(n>0)
_shallow_list = []
k=1
for element in i_list:
if k % n != 0:
_shallow_list.append(element)
k+=1
return _shallow_list
|
666329bfa9a6f9c55c6dc2d7bdcb53c44cb35a60
| 404,049 |
def glyph_contour_count(font, name):
"""Contour count for specified glyph.
This implementation will also return contour count for
composite glyphs.
"""
contour_count = 0
items = [font['glyf'][name]]
while items:
g = items.pop(0)
if g.isComposite():
for comp in g.components:
if comp.glyphName != ".ttfautohint":
items.append(font['glyf'][comp.glyphName])
if g.numberOfContours != -1:
contour_count += g.numberOfContours
return contour_count
|
65469809aa789ca48778f95fbb6e661f6c74259d
| 570,220 |
def ops_to_dict(session, ops):
"""Converts canonical dict of TF ops to an actual dict.
Runs ops first.
Args:
session: tf session
ops: dict mapping name to tf operation
Returns:
dict
"""
dict_ = dict(list(zip(list(ops.keys()), session.run(list(ops.values())))))
return dict_
|
cf34434a23ce62f9c628c4b76f09a19380e8fe8a
| 191,030 |
import pickle
def load_model(model_path="model/best_model.pkl"):
""" Model loader
Args:
model_path (str, optional): Path to file model name, default to `model/best_model.pkl`
"""
return pickle.load(open(model_path, "rb"))
|
32be408c3d618e25aa30e673b7106905553bb1db
| 486,407 |
def hexify_string(text: str) -> str:
"""Replace each character in a string by the corresponding HTML character entity."""
return ''.join('&#x{:x};'.format(ord(c)) for c in text)
|
8cb233d4a16415234d3bc9257794d2152b7740d3
| 443,276 |
def get_geometries(data_frame):
"""
Returns a geopandas GeoSeries object with geometries from a DataFrame.
"""
return data_frame['geometry']
|
ed44e100732f181619973f0e247136643f78d1d1
| 377,410 |
def pass_empty(_, value, *args):
"""
Used for EMPTY production alternative in collect.
"""
return []
|
c22c1131d4dde36325aef287183c858a82e33ed7
| 539,908 |
def validate_file(rules: dict, fname: dict):
"""
Validates files by BIDS-compatible identifiers
Parameters
----------
rules : dict
Dictionary with keys of BIDS-recognized key and their accepted values.
fname : str
File to validate.
"""
valid = []
for key, value in rules.items():
if f"{key}-{value}" in fname:
valid.append(True)
else:
valid.append(False)
return all(valid)
|
92b82ea9d65f6037a7aef45995399cb137996240
| 486,168 |
def test_user(user_num):
"""Pre-defined user variables for testing purposes"""
users = {
"1": (5, "", 3, True, True, True, True, True),
"2": (0, "", 3, False, False, False, False, False),
"3": (2, "", 4, True, True, False, False, True),
"4": (1, "", 5, False, False, True, True, False)
}
return users[str(user_num)]
|
b98d0fea48a1bb96a14c46528ee0754dd90f7d0e
| 648,113 |
def calculate_center(S):
"""
Calculate the center of a list
of points, S
"""
cx = 0.
cy = 0.
n = len(S)
for p in S:
cx += p[0] / n
cy += p[1] / n
return (cx, cy)
|
8206845e2fe941c19f3454214144993a03ff1c64
| 457,679 |
def parse_date(date):
"""Parse date int string into to date int (no-op)."""
if date is None:
return 0
return int(date)
|
46cba9bcc9a0ba90028579994df4e92b9165a1b8
| 48,984 |
def timePoint_hierarchy(nb_TP):
"""
Convert the number of time points in a list with all the time points. This list can be used by
many others functions.
The time points columns in the input dataset should be write like : "tp* s"
* is the time point number (the first time point must be 1).
@type nb_TP: int
@param nb_TP: the number of time points
@rtype : list
@return : all the time points name (string)
"""
TPs = []
for i in range(nb_TP):
TP_name = "tp" + str(i+1) + " s"
TPs.append(TP_name)
return TPs
|
8941db47aac0d765b4fdb15ebabd6ebacdda0eb8
| 469,159 |
def parse_team_stats(stat_obj: dict) -> str:
"""
Currently, individual team's stats look like this from Dynamo:
"Hotshots": {
"GA": "25",
"GF": "27",
"GP": "7",
"L": "3",
"OTL": "0",
"PTS": "8",
"SOL": "0",
"T": "0",
"W": "4"
}
we turn these into a nice human readable line.
:param stat_obj: dict containing a team's stats
:return: strified version of this data
"""
line = ""
for stat, val in stat_obj.items():
line += f"{stat}: {val}\t"
return line + "\n"
|
7ee324c0bcdda2903be2fd82679dae7c7d46ed75
| 685,084 |
def get_as_list(parameter):
"""Make sure 'parameter' is a list."""
if not isinstance(parameter, list):
return [parameter]
return parameter
|
59262dd33d9b95826532d00de015b1c1a68effd3
| 324,533 |
import json
import hashlib
def dict_hash(a_dict):
"""Return a hash for a dict.
See https://stackoverflow.com/a/22003440."""
dict_str = json.dumps(a_dict, sort_keys=True, default=str)
return hashlib.md5(dict_str.encode('utf8')).hexdigest()
|
6c526fe4e2d62a59e2330e1c84ee4fa72e34f35b
| 90,441 |
import math
def angle(dx, dy):
"""
Returns angle from in x- and y-coordinates.
Parameters
----------
dx : float
x-coordinate or difference in x-coordinate.
dy : float
y-coordinate or difference in y-coordinate.
Returns
-------
ang : float
Corresponding angle in radians.
"""
return math.atan2(dy, dx)
|
23e16bd7a8550cdcba71ae45f3a1be9b22c77325
| 385,253 |
import json
def load_metrics(source_file):
"""Reads the json file of metric definitions and returns a map of metric names to
metric definitions"""
raw_metrics = json.loads(open(source_file).read())
metrics = { }
for m in raw_metrics:
if m['key'] in metrics:
assert False, "Metric key %s already used, check definition of %s" % (m['key'], m)
m['kind'] = "Metrics.TMetricKind.%s" % m['kind']
m['units'] = "Metrics.TUnit.%s" % m['units']
metrics[m['key']] = m
return metrics
|
02a3390d25f2f5b18a6a231d7cd94d83b9fa048e
| 602,084 |
def make_samplers(distances, sampler_factory):
"""Construct samplers for each informative subset of the summary statistic.
Parameters
----------
distances : dict
A dictionary with discrepancy nodes corresponding to each subset of the summary statistic.
sampler_factory
A function which takes a discrepancy node as an argument
and returns an ELFI ABC sampler (e.g. elfi.Rejection).
Returns
-------
samplers : dict
A mapping from the marginals of the parameter to the corresponding sampler.
"""
return {k: sampler_factory(dist) for (k, dist) in distances.items()}
|
1e3ce6763e591b8342ed81b427b6b60524402feb
| 159,102 |
def time_format(num, digits=1, align_unit=False):
# type: (float, int, bool) -> str
"""Format and scale output according to standard time
units.
Example
-------
>>> time_format(0)
'0.0 s'
>>> time_format(0, align_unit=True)
'0.0 s '
>>> time_format(0.002)
'2.0 ms'
>>> time_format(2001)
'33.4 m'
>>> time_format(2001, digits=3)
'33.350 m'
"""
if num is None:
return "<unknown>"
unit = "s"
if (num >= 1.0) or (num == 0.0):
if num >= 60.0:
num /= 60.0
unit = "m"
if num >= 60.0:
num /= 60.0
unit = "h"
if num >= 24.0:
num /= 24.0
unit = "d"
else:
num *= 1000.0
for p in ["ms", "us", "ns", "ps", "fs"]:
unit = p
if abs(round(num, digits + 3)) >= 1:
break
num *= 1000.0
if (len(unit) == 1) and align_unit:
return ("%." + str(digits) + "f %s ") % (num, unit)
else:
return ("%." + str(digits) + "f %s") % (num, unit)
|
806cefcd7c9ddeff8214031bbbdfee09ad7fa27f
| 669,104 |
def format_types(types):
"""Format types for SQL statements"""
return ["\"{}\"".format(type_) for type_ in types]
|
f4207ad13fbace48aa7f0fa93c8fe90ca82289b1
| 212,274 |
def _get_feed_index_definition(number_of_shards, number_of_replicas):
"""
Constructs the Feed index with a given number of shards and replicas. Feeds are stored in an atomic form and
are based as much as possible on http://activitystrea.ms/
:param number_of_shards: Number of shards for the feeds index.
:param number_of_replicas: Number of replicas for the feeds index.
The index has the following parts:
published: Date when the feed was published. Stored in ISO 8601 format.
See https://docs.python.org/3.6/library/datetime.html#datetime.date.isoformat for more info.
published_date: Date section of the published date. YYY-MM-DD. E.g., 2002-12-04
published_time: Time section of the published date. HH:MM:SS. E.g., 13:00:23
type: Single word in infinitive. Also know as "verb", it describes some form of action that may happen,
is currently happening, or has already happened.
See https://www.w3.org/TR/activitystreams-vocabulary/#dfn-activity for more info.
actor: Describes the entity that is performing the activity.
See https://www.w3.org/TR/activitystreams-vocabulary/#dfn-actor for more info.
actor.id: The unique id of the actor. Only one ID is accepted.
actor.type: Single word. The type of actor performing the activity. E.g., Person, User, Member.
See https://www.w3.org/TR/activitystreams-vocabulary/#actor-types for more info.
actor.extra: Use this field to store extra information at actor level.
IMPORTANT NOTE: This field is "non-analyzable" which means that ES does not perform any
operations on it thus it cannot be used to order, aggregate, or filter query results.
object: Describes an object of any kind linked to the action itself.
See https://www.w3.org/TR/activitystreams-vocabulary/#dfn-object for more info.
object.id: Single ID. The unique id of the object
object.type: Single word. Provides some degree of specificity to the object. E.g., Document, Project, Form.
See https://www.w3.org/TR/activitystreams-vocabulary/#object-types for more info
object.extra: Use this field to store extra information at object level.
IMPORTANT NOTE: This field is "non-analyzable" which means that ES does not perform any
operations on it thus it cannot be used to order, aggregate, or filter query results.
origin [optional]: The origin is applicable to any type of activity for which the English preposition "from"
can be considered applicable in the sense of identifying the origin, source or provenance of
the activity's object. See https://www.w3.org/TR/activitystreams-vocabulary/#origin-target
for more information
origin.id: The unique ID the origin. Only one ID is accepted.
origin.type: Single word. Provides some degree of specificity to the origin. E.g., Collection
origin.extra: Use this field to store extra information at origin level.
IMPORTANT NOTE: This field is "non-analyzable" which means that ES does not perform any
operations on it thus it cannot be used to order, aggregate, or filter query results.
target [optional]: The target is applicable to any type of activity for which the English preposition
"to" can be considered applicable in the sense of identifying the indirect object or
destination of the activity's object.
See https://www.w3.org/TR/activitystreams-vocabulary/#origin-target for more information
target.id: The unique ID the target. Only one ID is accepted.
target.type: Single word. Provides some degree of specificity to the target. E.g., Collection
target.extra: Use this field to store extra information at target level.
IMPORTANT NOTE: This field is "non-analyzable" which means that ES does not perform any
operations on it thus it cannot be used to order, aggregate, or filter query results.
extra: Use this field to store extra information at activity level.
IMPORTANT NOTE: This field is "non-analyzable" which means that ES does not perform any
operations on it thus it cannot be used to order, aggregate, or filter query results.
:return: Dict.
"""
# noinspection SpellCheckingInspection
_json = {
"settings": {
"index": {
"number_of_shards": number_of_shards,
"number_of_replicas": number_of_replicas,
}
},
"mappings": {
"properties": {
"published": {"type": "date"},
"published_date": {"type": "date", "format": "yyyy-MM-dd"},
"published_time": {"type": "date", "format": "HH:mm:ss"},
"published_year": {"type": "integer"},
"published_month": {"type": "integer"},
"type": {"type": "keyword"},
"actor": {
"properties": {
"id": {"type": "keyword"},
"type": {"type": "keyword"},
"extra": {"type": "object", "enabled": "false"},
}
},
"object": {
"properties": {
"id": {"type": "keyword"},
"type": {"type": "keyword"},
"extra": {"type": "object", "enabled": "false"},
}
},
"origin": {
"properties": {
"id": {"type": "keyword"},
"type": {"type": "keyword"},
"extra": {"type": "object", "enabled": "false"},
}
},
"target": {
"properties": {
"id": {"type": "keyword"},
"type": {"type": "keyword"},
"extra": {"type": "object", "enabled": "false"},
}
},
"extra": {"type": "object", "enabled": "false"},
}
},
}
return _json
|
b5c5d13f3b25c36b8de5b64d08f0bd1946366441
| 330,065 |
def spawn_dates_times(df, spawn_dates=True, spawn_times=False):
"""
Build date/times column from a timeseries dataframe
:param df: (pd.DataFrame) - dataframe with datetime index
:param spawn_dates: (boolean) - whether to spawn year, month, day cols
:param spawn_times: (boolean) - whether to spawn hour, minute, second cols
:return df: (pd.DataFrame) - dataframe with datetime index
"""
if spawn_dates:
ind = df.index
df = df.assign(year=ind.year, month=ind.month, day=ind.day)
if spawn_times:
ind = df.index
df = df.assign(hour=ind.hour, minute=ind.minute, second=ind.second)
return df
|
ee58b5117d65fa3f217b16973dcdc06918c5474b
| 10,645 |
from typing import OrderedDict
def arr_to_dict(arr, ref_dict):
"""
Transform an array of data into a dictionary keyed by the same keys in
ref_dict, with data divided into chunks of the same length as in ref_dict.
Requires that the length of the array is the sum of the lengths of the
arrays in each entry of ref_dict. The other dimensions of the input
array and reference dict can differ.
Arguments
---------
arr : array
Input array to be transformed into dictionary.
ref_dict : dict
Reference dictionary containing the keys used to construct the output
dictionary.
Returns
-------
out : dict
Dictionary of values from arr keyed with keys from ref_dict.
"""
out = OrderedDict()
idx = 0
assert len(arr) == sum([len(v) for v in ref_dict.values()])
for k, bd in ref_dict.items():
out[k] = arr[idx : idx + len(bd)]
idx += len(bd)
return out
|
55339447226cdd2adafe714fa12e144c6b38faa2
| 706,830 |
def sol(arr, n, k):
"""
Store the position of the element in a hash and keep updating it
as we go forward. If the element already exists in the hash check
the distance between them and return True if it is <= k
"""
h = {}
for i in range(n):
if arr[i] in h:
pos = h[arr[i]]
if pos+k >= i:
return True
h[arr[i]] = i
return False
|
7022dbe564505cd8c02373d1620b78fe39a1f53c
| 412,970 |
def underscore_to_camelcase(s):
"""
Convert lowercase_with_underscore names to CamelCase.
"""
return ''.join(x.capitalize() or '_' for x in s.split('_'))
|
8de189007ac25b53037cb17e267ec4b81ee80f8f
| 664,628 |
def getXYCoords(geometry, coord_type):
"""
Returns either x or y coordinates from geometry coordinate sequence.
Used with LineString and Polygon geometries.
"""
if coord_type == 'x':
return geometry.coords.xy[0]
elif coord_type == 'y':
return geometry.coords.xy[1]
|
ae9b713632150c3b811a172ec0716db0486d0a87
| 385,420 |
def any_in_dict(key_tuple, dict_obj):
"""Return whether any of the given keys is in the given dict.
Parameters
----------
key_tuple : tuple
The keys for which to check inclusion.
dict_obj : dict
The dict to examine.
Returns
-------
bool
True if any of the given keys is in the given dict. False otherwise.
Example
-------
>>> dict_obj = {'a': 1, 'c': 2}
>>> any_in_dict(('a', 'b'), dict_obj)
True
>>> any_in_dict(('b', 'g'), dict_obj)
False
"""
return any([key in dict_obj for key in key_tuple])
|
03208035392d884ab7b41e4229bbf995a4b78f9e
| 226,359 |
from typing import List
def score(
source_confidence: List[float], score: List[float], mentioned_feeds_count: int
) -> int:
"""
Function calculates final score for the IoC
Parameters:
source_confidence (List[float], 0..1) — a list `source_confindence` of all CTI feeds where the IoC has been mentioned.
score (List[float], 0..1) — a list `score` of all CTI feeds where the IoC has been mentioned.
mentioned_feeds_count (int) — count of feeds that mentioned the IoC.
Returns:
IoC final score (int, 0..100)
"""
x: float = 0
y: float = 0
for i in range(0, mentioned_feeds_count):
x += (source_confidence[i] ** 2) * score[i]
y += source_confidence[i]
return round(x / y * 100)
|
0d268c5b23c8f82d1f507b2923acd41775d021aa
| 104,884 |
import torch
from typing import Optional
from typing import Union
from typing import Tuple
def wmean(
x: torch.Tensor,
weight: Optional[torch.Tensor] = None,
dim: Union[int, Tuple[int]] = -2,
keepdim: bool = True,
eps: float = 1e-9,
) -> torch.Tensor:
"""
Finds the mean of the input tensor across the specified dimension.
If the `weight` argument is provided, computes weighted mean.
Args:
x: tensor of shape `(*, D)`, where D is assumed to be spatial;
weights: if given, non-negative tensor of shape `(*,)`. It must be
broadcastable to `x.shape[:-1]`. Note that the weights for
the last (spatial) dimension are assumed same;
dim: dimension(s) in `x` to average over;
keepdim: tells whether to keep the resulting singleton dimension.
eps: minimum clamping value in the denominator.
Returns:
the mean tensor:
* if `weights` is None => `mean(x, dim)`,
* otherwise => `sum(x*w, dim) / max{sum(w, dim), eps}`.
"""
args = {"dim": dim, "keepdim": keepdim}
if weight is None:
return x.mean(**args)
if any(
xd != wd and xd != 1 and wd != 1
for xd, wd in zip(x.shape[-2::-1], weight.shape[::-1])
):
raise ValueError("wmean: weights are not compatible with the tensor")
return (x * weight[..., None]).sum(**args) / weight[..., None].sum(**args).clamp(
eps
)
|
db742eb5d899b190609a8e40cd9e4a65f52a45cd
| 701,186 |
import itertools
def run_return_final_tissue(simulation,N_step):
"""run given simulation for N_step iterations
returns final tissue object"""
return next(itertools.islice(simulation,N_step,None))
|
408b0a36e1526bd83087702001b089e2d8097ea2
| 335,989 |
import re
def isCmdOption(arg):
"""Returns True if the string is a command line option,
False otherwise (if it is an argument)"""
# An option must start with '-' and not consist of all numbers
if ( arg.startswith('-') and not re.match('^-[0-9.]+$', arg) ):
return True
else:
return False
|
5bc27819415dedae2e45df16fd66616ee8820f1e
| 404,494 |
def remove_virtual_slot_cmd(lpar_id, slot_num):
"""
Generate HMC command to remove virtual slot.
:param lpar_id: LPAR id
:param slot_num: virtual adapter slot number
:returns: A HMC command to remove the virtual slot.
"""
return ("chhwres -r virtualio --rsubtype eth -o r -s %(slot)s "
"--id %(lparid)s" %
{'slot': slot_num, 'lparid': lpar_id})
|
2c6f14949910865f3a60c0016b4587088441572e
| 7,948 |
def analyzeCharCategory(char, char_cat_def):
"""Analyze a given character category by checking code points
Parameters
----------
char_cat_def: dictionary
Dcitionary which is obtained by loadCharDef function.
char: string
A character to analyze
Returns
-------
cat_name: string
Category name
"""
assert len(char) == 1
cp = ord(char)
for cat_name, params in char_cat_def.items():
if 'code_points' in params:
for cand_cp in params['code_points']:
cp_beg, cp_end = cand_cp
if cp >= int(cp_beg, 16) and cp <= int(cp_end, 16):
return cat_name
|
ba25843463710e974330015f180e77fa1f1eea84
| 198,642 |
def append_heatmap(tokens, scores, latex, gamma, caption, pad_token, formatting="colorbox", truncate_pad=True):
"""
Produce a heatmap for LaTeX
Format options: colorbox, text"""
if gamma != 1:
raise NotImplementedError
latex += "\n\\begin{figure}[!htb]"
for token, score in zip(tokens, scores):
if token == pad_token and truncate_pad:
continue
color = "blue"
if score >= 0:
color = "red"
latex += f"\\{formatting}" + "{" + f"{color}!{abs(score) * 100}" + "}" + "{" + token + "}"
latex += "\\caption{" + f"{caption}" + "}"
latex += "\\end{figure}\n"
return latex
|
a558988606fe7bd0514f2697fb6644fc47e6c9c4
| 684,176 |
def get_middle(self):
"""Return the point at the middle of the Segment
Parameters
----------
self : Segment
A Segment object
Returns
-------
Zmid: complex
Complex coordinates of the middle of the Segment
"""
Z1 = self.begin
Z2 = self.end
Zmid = (Z1 + Z2) / 2.0
# Return (0,0) if the point is too close from 0
if abs(Zmid) < 1e-6:
Zmid = 0
return Zmid
|
d23e8403026a9af5d39752f60631e95ceb60069d
| 217,048 |
def classify(true_otu, pred_otu):
"""
Classify a prediction as a true positive (tp), true negative (tn),
false positive (fp), or false negataive (fn).
"""
if true_otu and pred_otu:
result = "tp"
elif true_otu and not pred_otu:
result = "fn"
elif not true_otu and pred_otu:
result = "fp"
elif not true_otu and not pred_otu:
result = "tn"
else:
raise ValueError("this should never ever happen")
return result
|
eba696284b7984685ebae932147ff23608e02fac
| 493,888 |
def filter_dict(dictionary, keep_fields):
"""
Filter a dictionary's entries.
:param dictionary: Dictionary that is going to be filtered.
:type dictionary: dict
:param keep_fields: Dictionary keys that aren't going to be filtered.
:type keep_fields: dict or list or set
:return: Filtered dictionary.
:rtype: dict
"""
return {
key: value
for key, value in dictionary.items()
if key in keep_fields
}
|
48f0fe632d0b6e056d4049ba5b79009ed63fac62
| 185,214 |
from typing import List
from typing import Tuple
def centroid(vectors: List[List[float]]) -> Tuple[float, float, float]:
"""
Calculate the centroid from a vectorset X.
https://en.wikipedia.org/wiki/Centroid
Centroid is the mean position of all the points in all of the coordinate
directions.
C = sum(X)/len(X)
Parameters
----------
vectors : np.array
(N,D) matrix, where N is points and D is dimension.
Returns
-------
C : float
centeroid
"""
s = [0.0, 0.0, 0.0]
num = 0
for line in vectors:
num += 1
for n, v in enumerate(line):
s[n] += v
return (s[0] / num, s[1] / num, s[2] / num)
|
72b8a793bec4e529ffbe34e7a9e5909b72420f7f
| 555,394 |
import json
def save_json(f, cfg):
""" Save JSON-formatted file """
try:
with open(f, 'w') as configfile:
json.dump(cfg, configfile)
except:
return False
return True
|
75c232000962a4edbad5131b11f2701157f32d76
| 682,537 |
def tle_fmt_float(num,width=10):
""" Return a left-aligned signed float string, with no leading zero left of the decimal """
digits = (width-2)
ret = "{:<.{DIGITS}f}".format(num,DIGITS=digits)
if ret.startswith("0."):
return " " + ret[1:]
if ret.startswith("-0."):
return "-" + ret[2:]
|
686cb4061e5cf2ad620b85b0e66b96a8cd1c3abf
| 707,207 |
def clamp(n, smallest, largest):
""" Force n to be between smallest and largest, inclusive """
return max(smallest, min(n, largest))
|
75afadfc7c49d07f9a2c23074d666cb0b55b0e00
| 51,693 |
def get_onat_shifts(full_ids, shifts):
"""
combines explicit atom ids "ACD_id[MOL block_id]" and chemical shifts to produce a mixonat-formatted list of chemical shifts
"""
return '\n'.join(["%s \t%s \t" % x for x in zip(full_ids, shifts)])
|
291737a70bd91d0a9f1929d735d4980deff15a02
| 462,253 |
def notas(*nts, sit=False):
"""
-> Retorna um dicionário com a quantidade de notas informadas, a maior e menor nota, a média e a situação(opcional).
:param nts:notas
:param sit:operador para adicionar ou não a situação ao dicionário
:return:dicionário com as informações sobre as notas
"""
info = dict()
info['quantidade de notas'] = len(nts)
info['maior nota'] = max(nts)
info['menor nota'] = min(nts)
info['média'] = sum(nts)/len(nts)
if sit:
if info['média'] > 7:
info['situação'] = 'BOA'
elif info['média'] > 5:
info['situação'] = 'RAZOÁVEL'
else:
info['situação'] = 'RUIM'
return info
|
cb51ed853227df4aadcd7f36f7e4ad4f4ff87f13
| 402,950 |
def split_choices(choices_string):
"""
Convert a comma separated choices string to a list.
"""
return [x.strip() for x in choices_string.split(",") if x.strip()]
|
f9efd7151d8a58f5130ab622589afeed5a470d9f
| 154,873 |
def parse_number_set(number_set):
"""
Parse a number set string to a set of integers.
The string is a comma-separated range of "atoms", which may be
* A single value (`1`)
* A range of values (`1-5`) separated with either a dash or double dots.
and these may also be negated with a leading exclamation mark.
>>> parse_number_set('1-5,8-9')
{1, 2, 3, 4, 5, 8, 9}
>>> parse_number_set('1-5,!4')
{1, 2, 3, 5}
>>> parse_number_set('1,2,3')
{1, 2, 3}
>>> sorted(parse_number_set('-1..-5'))
[-5, -4, -3, -2, -1]
"""
incl = set()
excl = set()
for atom in number_set.split(','):
atom = atom.strip()
if not atom:
continue
if atom.startswith('!'):
dest = excl
atom = atom[1:]
else:
dest = incl
if '-' in atom[1:] or '..' in atom:
start, end = [int(v) for v in atom.split(('..' if '..' in atom else '-'), 1)]
if start > end:
end, start = start, end
dest.update(set(range(start, end + 1)))
else:
dest.add(int(atom))
return incl - excl
|
f8967ffa9400268a75ae810e8209241138edbd10
| 435,721 |
def axial_n_moves(f, n, col, slant):
"""
Return coordinate moved n hexes by the action defined in f.
f is expected to be an axial move function.
"""
for _ in range(n):
col, slant = f(col, slant)
return col, slant
|
4707df4b678321098b78acae70012bd837946cd6
| 509,066 |
def factorial(n):
"""
Calculates factorial of a number
Arguments:
n {integer} -- input
Returns:
factorial {integer}
"""
assert n >= 0, 'invalid number'
if n == 0:
return 1
else:
return n * factorial(n - 1)
|
7d5e9b62e4bfbd93f8b35a90a25ebdc40c531fae
| 510,073 |
def is_day_of_week(date, day_of_week):
"""Check if you are on specific day of a week.
day_of_week: 0 for Monday, 6 is Sunday.
"""
return (date.weekday() == day_of_week)
|
8b6503b3c284bea3dc2d3e82ba1fe0fa7926b8cf
| 586,610 |
def LoadDependencyDictionary(filename):
"""Loads dependencies from the named file, returning a dictionary.
Args:
filename: name of the file to load the dictionary from.
Returns:
A dictionary in which each entry key is a library name and the value is a
list of all libraries on which that library depends on.
"""
return eval(open(filename, 'r').read())
|
a984af27cd7d675ee29f678ea42b3f1d2f489208
| 407,814 |
def x_aver_top_mass(xp_mass, xpf_mass):
"""
Calculates the average mass concentration at the top of column.
Parameters
----------
xp_mass : float
The mass concentration of distilliat, [kg/kg]
xpf_mass : float
The mass concentration of point of feed, [kg/kg]
Returns
-------
x_aver_top_mass : float
The average mass concentration at top of column, [kg/kg]
References
----------
Дытнерский, стр.230, формула 6.8
"""
return (xp_mass + xpf_mass) / 2
|
a6cf2405f71701f247895f1935a4aaa8e88c7865
| 86,963 |
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False
|
effa9f55c82a9edcacd79e07716527f314e41f39
| 708,818 |
def build_process_trees(processes):
"""
Build a nested dictionary, based on the relationships between processes.
Args:
processes: array of the currently-running processes
Returns:
trees: a multi-level dictionary.
The keys are the ids of processes that "may" have children
Values are arrays which hold the children (and children of children)
Empty arrays indicate that the process has no children
"""
trees, seen_ppids = {}, {}
for row in processes:
pid = row['pid']
ppid = row['ppid']
command = row['command']
seen_ppids[pid] = 1
if ppid == 0 or ppid not in seen_ppids:
trees[pid] = []
else:
# Describe the child and find its parent process (ppid)
new_process = {'pid': pid, 'command': command}
try:
trees[ppid].append([new_process])
except KeyError:
for root in trees:
for node in trees[root]:
# node should be an array holding a dict
for counter, process in enumerate(node, start=1):
if ppid == process['pid']:
# Success. We found the parent of the child process
# Append child to parent to indicate relationship
node[counter:0] = [new_process]
# End of for loop
return trees
|
46d2c2bcedce9f039e5024833bbe9a9b0a9fae84
| 231,788 |
import pickle
def load_obj(name):
"""
Method to load pickle objects.
input:
name: path with the name of the pickle without file extension.
"""
with open(name + '.pkl', 'rb') as file:
return pickle.load(file)
|
edf99286bb0d2f66cfa3b7d47a7929c7ea86bf30
| 680,024 |
def expand_aabb(left, right, top, bottom, delta_pixel):
""" Increases size of axis aligned bounding box (aabb).
"""
left = left - delta_pixel
right = right + delta_pixel
top = top - delta_pixel
bottom = bottom + delta_pixel
return left, right, top, bottom
|
0073df23538892a5ae0262b82f16eabbd1f41da2
| 43,848 |
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
with open(vocab_file, 'r') as f:
for line in f:
token = line.strip().split()[0]
if token not in vocab:
vocab[token] = len(vocab)
return vocab
|
88d3f353738b7837c77ef2bda30482fc4d0ef454
| 157,898 |
def compare_extension(filename: str, expected_extension: str):
""" Compare the extension of the given file with the expected extension """
return filename.endswith(expected_extension)
|
c6021ec04fe287f70a3eadf7f977e8f29a6937fc
| 689,478 |
def separate_words_and_numbers(strings):
"""
Separates words and numbers into two lists.
:param strings: List of strings.
:return: One list of words and one list of numbers
"""
filtered_words = []
filtered_numbers = []
for string in strings:
if string.isdigit():
filtered_numbers.append(string)
else:
filtered_words.append(string)
return filtered_words, filtered_numbers
|
d0846effcd81524620ed7d6c3e132a3d44f0f2ef
| 670,616 |
from typing import List
from typing import Tuple
def _count_inversions(ints: List[int]) -> Tuple[List[int], int]:
"""
Count the number of inversions in the given sequence of integers (ignoring
zero), and return the sorted sequence along with the inversion count.
This function is only intended to assist |is_solvable|.
>>> _count_inversions([3, 7, 1, 4, 0, 2, 6, 8, 5])
([1, 2, 3, 4, 5, 6, 7, 8], 10)
"""
if len(ints) <= 1:
return ([], 0) if 0 in ints else (ints, 0)
midpoint = len(ints) // 2
l_side, l_inv = _count_inversions(ints[:midpoint])
r_side, r_inv = _count_inversions(ints[midpoint:])
inversions = l_inv + r_inv
i = j = 0
sorted_tiles = []
while i < len(l_side) and j < len(r_side):
if l_side[i] <= r_side[j]:
sorted_tiles.append(l_side[i])
i += 1
else:
sorted_tiles.append(r_side[j])
inversions += len(l_side[i:])
j += 1
sorted_tiles += l_side[i:] + r_side[j:]
return (sorted_tiles, inversions)
|
e6953d75d6b76ad686f2b476a8ab10ee3a458796
| 248,936 |
def assert_is_dict(var):
"""Assert variable is from the type dictionary."""
if var is None or not isinstance(var, dict):
return {}
return var
|
3ed3e970662213a920854e23b6e4a18042daddf0
| 77,622 |
import torch
def median_heuristic(dnorm2, device):
"""Compute median heuristic.
Inputs:
dnorm2: (n x n) tensor of \|X - Y\|_2^2
Return:
med(\|X_i - Y_j\|_2^2, 1 \leq i < j \leq n)
"""
ind_array = torch.triu(torch.ones_like(dnorm2, device=device), diagonal=1) == 1
med_heuristic = torch.median(dnorm2[ind_array])
return med_heuristic
|
d437aaee4b2a7c867cf6de57938383d35ff98c43
| 164,419 |
def cast_tensor_type(tens, dtype):
"""
tens: pytorch tens
dtype: string, eg 'float', 'int', 'byte'
"""
if dtype is not None:
assert hasattr(tens, dtype)
return getattr(tens, dtype)()
else:
return tens
|
378154acebad9ff080090b6dfad803c03c9ea11b
| 696,384 |
import math
def get_bearing(origin_point, destination_point):
"""
Calculate the bearing between two lat-lng points.
Each argument tuple should represent (lat, lng) as decimal degrees.
Bearing represents angle in degrees (clockwise) between north and the
direction from the origin point to the destination point.
Parameters
----------
origin_point : tuple
(lat, lng)
destination_point : tuple
(lat, lng)
Returns
-------
bearing : float
the compass bearing in decimal degrees from the origin point to the
destination point
"""
if not (isinstance(origin_point, tuple) and isinstance(destination_point, tuple)):
raise TypeError("origin_point and destination_point must be (lat, lng) tuples")
# get latitudes and the difference in longitude, as radians
lat1 = math.radians(origin_point[0])
lat2 = math.radians(destination_point[0])
diff_lng = math.radians(destination_point[1] - origin_point[1])
# calculate initial bearing from -180 degrees to +180 degrees
x = math.sin(diff_lng) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diff_lng))
initial_bearing = math.atan2(x, y)
# normalize initial bearing to 0-360 degrees to get compass bearing
initial_bearing = math.degrees(initial_bearing)
bearing = (initial_bearing + 360) % 360
return bearing
|
cfba6ccd27e0b2e2b8fa34f71611b061692f6dbf
| 693,852 |
def default_credit_scorer(score):
"""Report the underlying score without modification."""
return score
|
b2b522900666d74f9152ca2c8dea2d9f2ca73dbf
| 399,995 |
def _clean_query_string(q):
"""Clean up a query string for searching.
Removes unmatched parentheses and joining operators.
Arguments:
q (str): Query string to be cleaned
Returns:
str: The clean query string.
"""
q = q.replace("()", "").strip()
if q.endswith("("):
q = q[:-1].strip()
# Remove misplaced AND/OR/NOT at end
if q[-3:] == "AND" or q[-3:] == "NOT":
q = q[:-3]
elif q[-2:] == "OR":
q = q[:-2]
# Balance parentheses
while q.count("(") > q.count(")"):
q += ")"
while q.count(")") > q.count("("):
q = "(" + q
return q.strip()
|
655607a0ee33841f34578dd92041b3e7bcb7730c
| 492,420 |
def _create_ip_to_node_map(meta_map):
"""
Create IP to NodeId mapping from meta_map
"""
ip_to_node = {}
if not meta_map or not isinstance(meta_map, dict):
return ip_to_node
for node in meta_map:
if not meta_map[node] or not 'ip' in meta_map[node]:
continue
ip_to_node[meta_map[node]['ip']] = node
return ip_to_node
|
597d15da92bb0ea1efc0098f8b26d7900960a002
| 292,637 |
def exceptions_equal(exception1, exception2):
"""Returns True if the exceptions have the same type and message"""
# pylint: disable=unidiomatic-typecheck
return type(exception1) == type(exception2) and str(exception1) == str(exception2)
|
f677191d49e37cb11743743eb099d0594a66a781
| 308,455 |
def cli(ctx, data_table_id):
"""Display information on a single data_table
"""
return ctx.gi.tool_data.show_data_table(data_table_id)
|
35bc3a085631713675bc4a9eddc9f8f85b50712b
| 145,752 |
def founder_allocation() -> float:
"""How much tokens are allocated to founders, etc."""
return 0.2
|
90967e693d9c21c3f729bc9ffdea1472768f58b0
| 674,185 |
def _get_displayed_page_numbers(current, final):
"""
This utility function determines a list of page numbers to display.
This gives us a nice contextually relevant set of page numbers.
For example:
current=14, final=16 -> [1, None, 13, 14, 15, 16]
This implementation gives one page to each side of the cursor,
or two pages to the side when the cursor is at the edge, then
ensures that any breaks between non-continous page numbers never
remove only a single page.
For an alernativative implementation which gives two pages to each side of
the cursor, eg. as in GitHub issue list pagination, see:
https://gist.github.com/tomchristie/321140cebb1c4a558b15
"""
assert current >= 1
assert final >= current
if final <= 5:
return list(range(1, final + 1))
# We always include the first two pages, last two pages, and
# two pages either side of the current page.
included = {1, current - 1, current, current + 1, final}
# If the break would only exclude a single page number then we
# may as well include the page number instead of the break.
if current <= 4:
included.add(2)
included.add(3)
if current >= final - 3:
included.add(final - 1)
included.add(final - 2)
# Now sort the page numbers and drop anything outside the limits.
included = [
idx for idx in sorted(list(included))
if idx > 0 and idx <= final
]
# Finally insert any `...` breaks
if current > 4:
included.insert(1, None)
if current < final - 3:
included.insert(len(included) - 1, None)
return included
|
2c37f9a3de18b3aed22fe0e2cca84aeed4ce327b
| 633,017 |
import json
def _json_from_message(message):
""" Parses a PB2 message into JSON format. """
return json.loads(message.payload_utf8)
|
710a8dac7889ff7548497a70252f7b3fd5113dc9
| 480,077 |
import torch
def dt_dqn(s, a, r, ns, d, q_local, q_target, gamma):
"""Calculate temporal-difference delta_t using fixed Q-targets. This works for batches of tuples.
Parameters
----------
s : torch.tensor
Current state (size [n * m], where n is the batch size and m is the number of features in the state)
a : torch.tensor
Action taken (size [n])
r : torch.tensor
Reward obtained (size [n])
ns : torch.tensor
Next state (size [n * m])
d : torch.tensor
True if the episode ended after the action (size [n])
q_local : torch.nn.Module
Network used to determine the policy
q_target : torch.nn.Module
Copy of q_local that is updated less frequently
gamma : float
Weight of the estimation of future rewards, in range [0, 1]
Returns
-------
torch.tensor of size [n] where each row is the delta_t of a tuple
"""
with torch.no_grad():
QT = q_target(ns).max(1)[0]
QL = q_local(s).gather(1, a.unsqueeze(1)).squeeze(1)
return r + gamma * QT * (1 - d) - QL
|
00f56871ef0e6fd6680958b9ac456cf5545fad17
| 589,402 |
import torch
def load_checkpoint(path: str):
"""Load checkpoint from path.
Args:
path: checkpoint file to load
Returns:
checkpoint content
"""
checkpoint = torch.load(path, map_location=lambda storage, loc: storage)
return checkpoint
|
d4ed6f9c5d0e8dba08f22d360e353280fb5e24b9
| 283,282 |
def preproc_meta(metadata):
"""
For easier access, convert the metadata list into a dictionary where
the ids are the keys
"""
res = {}
for x in metadata:
try:
k = x["_id"]
except KeyError:
continue
res[k] = x
return res
|
e47aeffcf1b1896ee66ca7f70c8ab96d0dbc8d12
| 103,455 |
def partition(l, func):
"""Partition the list by the result of func applied to each element."""
ret = {}
for x in l:
key = func(x)
if key not in ret:
ret[key] = []
ret[key].append(x)
return ret
|
fe2ac43d21d2fccce736bf54362ca8bc0ed65065
| 482,135 |
def get_max_subarray_sum(nums_array: list) -> int:
"""
Algorithm for getting the maximum sum of a subarray (Kadane's Algorithm)
Complexity --> O(N)
:param nums_array: list
:return sum: int
"""
global_sum = local_sum = nums_array[0]
for i in range(1, len(nums_array)):
if local_sum + nums_array[i] > nums_array[i]:
local_sum += nums_array[i]
else:
local_sum = nums_array[i]
if local_sum > global_sum:
global_sum = local_sum
return global_sum
|
5ff5e26836c3014c8916057cb4890a70bafb986a
| 90,638 |
def get_versions(existings):
"""
Given gathered path, get all existing versions.
"""
versions = set()
for k, v in existings.items():
versions = versions.union(v)
return versions
|
5f8a1b58c7812a0c2db33e5663d0e82a6dc22011
| 653,321 |
def binary_search(list_to_search_in, key_to_search_for, field = lambda item: item["title"]):
"""
Does a binary search through a list containing dictionaries.
Defaults to searching for "title" if no other filed is specified.
"""
if len(list_to_search_in) == 0:
return "Error"
#to make the search work with lowercase and uppercase - make everything to lowercase.
field_to_use = lambda l: field(l).lower()
key_to_search_for = key_to_search_for.lower()
#sort the list, because binary only works on sorted lists.
list_to_search_in = sorted(list_to_search_in, key = field_to_use)
#init low, high.
low = 0
high = len(list_to_search_in) - 1
#while we have not found the item yet
while low <= high:
#get middle of list
mid = (low + high) // 2
#if search key is at a lower index than mid
if field_to_use(list_to_search_in[mid]) > key_to_search_for:
high = mid - 1
#if search key is at a higher index than mid
elif field_to_use(list_to_search_in[mid]) < key_to_search_for:
low = mid + 1
#item is found
else:
return list_to_search_in[mid]
#if nothing was found in list.
return "No such item in list."
|
8570c4c4189a05b1a5dd64266ccaaed62d191146
| 205,122 |
def mse(y,f):
"""MSE."""
mse = ((y - f)**2).mean(axis=0)
return mse
|
fe2f64b7be8ec280e92481bffee82c09a04d69aa
| 230,936 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.