content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import List
def simple_clean_cells(cells: List) -> List:
"""
Removes cells that lack a bounding box or have a volume equal to 0
Args:
cells: List of spatial features
Returns:
List of spatial features
"""
return [cell for cell in cells
if len(cell.get_bounding_box()) == 4 and cell.get_volume() > 0]
|
2904b961127d0250c13e8a2647c9745dfa396981
| 646,185 |
def get_keys_from_osm_json(osm_json):
"""
Obtain all keys from queried OSM results, and do not include repeated elements.
Parameters
----------
osm_json : JSON
Returns
-------
List
"""
osm_keys = []
for element in osm_json['elements']:
keys = list(element["tags"].keys())
osm_keys = list(set(osm_keys + keys))
return osm_keys
|
c07f20798c732ba3cd7615eb5e849ff4e59bf069
| 660,455 |
def time_series_year(time_series, year):
"""Extract a section of the time-series based on the year.
:param time_series: time-series to be extracted
:param year: year to be extracted
:type time_series: pandas.DataFrame
:type year: int
"""
return time_series[time_series.index.year == year].values.transpose()[0]
|
e6e44dcd000d5ae646ba574af59f4306b559f656
| 169,426 |
from typing import Any
from typing import TypeGuard
import numbers
def is_real_number(x: Any) -> TypeGuard[float]:
"""Return true if `x` is a real number."""
return isinstance(x, numbers.Real)
|
d789f2423a0ecf542121882976d848a815952752
| 535,924 |
import csv
def sniff_csv_sep(file_path: str) -> str:
"""Determine and return the separator used in a given .csv file.
Works by sniffing the first line of the given file.
:param file_path:
The file path of the .csv file.
:return:
Delimiter used in the given .csv file.
"""
with open(file_path, 'r') as csv_file:
dialect = csv.Sniffer().sniff(csv_file.readline())
return dialect.delimiter
|
95de08b7fbb2786b1bf49132b914ecb56e4798e7
| 440,284 |
def complement(seq):
"""
Complement the sequence
:param seq: "ATATCTC"
:return: "TATGAG"
"""
alpha = {
"A": "T",
"T": "A",
"G": "C",
"C": "G",
"N": "N"
}
return ''.join([alpha[i] for i in list(seq)])
|
d0e0975ee9de546dab4f46ac20cf2e637bd8dd9e
| 129,984 |
def array_sum(space, w_arr):
""" Calculate the sum of values in an array """
res = 0
is_float = False
with space.iter(w_arr) as itr:
while not itr.done():
_, w_val = itr.next_item(space)
w_val_n = space.as_number(w_val)
if w_val_n.tp == space.tp_float:
res += space.float_w(w_val_n)
is_float = True
else:
res += space.int_w(w_val_n)
if is_float:
return space.newfloat(res)
return space.newint(int(res))
|
9a35b9a4e6fbc43c96e65fd5e33bd3853ff89070
| 307,339 |
import logging
def setup_logger(log_level):
"""Setup logger for the module"""
logger = logging.getLogger('email_client')
logger.setLevel(log_level)
sh = logging.StreamHandler()
sh.setLevel(log_level)
format = logging.Formatter('%(asctime)s %(levelname)-7s %(name)-10s %(message)s')
sh.setFormatter(format)
logger.addHandler(sh)
return logger
|
9904698af9aa485d554b7afc7dd54d378fc655b5
| 572,946 |
def PNewOTFTable (inOTF, access, tabType, tabVer, err,
numDet=1, numPoly=0, numParm=0):
""" Return the specified associated table
inOTF = Python OTF object
access = access code 1=READONLY, 2=WRITEONLY, 3=READWRITE
tabType = Table type, e.g. "OTFSoln"
tabVer = table version, if > 0 on input that table returned,
if 0 on input, the highest version is used.
err = Python Obit Error/message stack
Optional parameters, values only used if table created
numDet = Number of Detectors (OTFCal, OTFSoln, OTFScanData)
numPoly = Number of polynomial terms (OTFCal, OTFSoln)
numParm = Number of model parameters (OTFModel)
"""
################################################################
return inOTF.NewTable (access, tabType, tabVer, err, \
numDet=numDet, numPoly=numPoly, numParm=numParm)
# end PNewOTFTable
|
edea1d30b0b8c519f7245de16764b46ff419ee24
| 626,467 |
import re
def validate_student_id(student_id, is_year1=False):
"""
Get and verify student id
:param student_id:
:param is_year1: whether the student is year 1 student
:return: a boolean
"""
if not is_year1:
if not re.match(r'\d{8}', student_id):
return False
else:
return True
else:
if not re.match(r'Year 1-.*-\d{2}.*', student_id):
return False
else:
return True
|
0ab87e41811ff6d1e8c54fe8c58c30c86aa9eeac
| 106,098 |
def permute_dimensions(x, pattern):
"""Transpose dimensions.
pattern should be a tuple or list of
dimension indices, e.g. [0, 2, 1].
"""
pattern = tuple(pattern)
return x.transpose(pattern)
|
87c7215721c0a02777e40c850f6daaa66f4b0045
| 680,525 |
def _SqlString(s):
"""Produce a valid SQL string constant."""
return "'%s'" % s.replace("'", "''")
|
1d981f14a40b943b8664e2827c78426b1a2bb759
| 145,602 |
def ProximityOperator(kappa, a):
"""
The soft thresholding operator, used to enforce L1 regularization. It produces the solution to:
\argmin_x 2 \kappa | x | + (x - a)^2
"""
if a > kappa: return a - kappa
elif a < -kappa: return a + kappa
else: return 0.
|
a9b4d7b258282d1e620faa0571fe32ae76f21c60
| 251,759 |
def strip_string(phrase: str):
"""
The strip method removes leading and trailing whitespace from a string
:param phrase:
:return:
"""
return phrase.strip()
|
c64dbba80fd62f1ede764a396129687c7c13d692
| 90,318 |
from typing import OrderedDict
def tuples_as_bytes(cmds):
"""Format list of tuples to CAM message with format /key:val.
Parameters
----------
cmds : list of tuples
List of commands as tuples.
Returns
-------
bytes
Sequence of /key:val.
Example
-------
::
>>> tuples_as_bytes([('cmd', 'val'), ('cmd2', 'val2')])
b'/cmd:val /cmd2:val2'
"""
cmds = OrderedDict(cmds) # override equal keys
tmp = []
for key, val in cmds.items():
key = str(key)
val = str(val)
tmp.append("/" + key + ":" + val)
return " ".join(tmp).encode()
|
44a06247c9061d798a7cde23475b2a3dba587514
| 340,310 |
def pp_num(num):
"""
pretty prints number with commas
"""
s = '%d' % num
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
|
d249e973b52114c72fa684471d441b870b1fc6c4
| 55,383 |
def isstringlike(item):
"""Checks whether a term is a string or not"""
return isinstance(item, str)
|
db0a88ebea37a5050e212db130da02bc6bdc07d5
| 681,018 |
from typing import Mapping
import functools
def replace_multiple_substrings(s_in: str, subs: Mapping[str, str]) -> str:
"""
Replace multiple substrings within the given input string.
The order in which the replacements occur cannot be guaranteed.
Parameters
----------
s_in: str
Input string to make the substitutions in.
sub: mapping of str to str
Keys in this dict are substrings to replace, with each values
being the string the key should be replaced with.
Examples
--------
>>> from glo.helpers import replace_multiple_substrings
>>> replace_multiple_substrings("a test", {"a": "hey", "test": "there"})
'hey there'
>>> replace_multiple_substrings("12546", {"5": "3", "6": "321"})
'1234321'
"""
return functools.reduce(
lambda result, next_sub: result.replace(*next_sub), subs.items(), s_in
)
|
c64808434b706d1cdee2826f8aff69b60671a01e
| 476,275 |
def strip_type_prefix(path, prefix):
"""Strip source type prefix from the path.
This function strips prefix from strings like::
[<prefix>]://<path>
For example::
pyfile://home/me/config.py -> /home/me/config.py
json://path/to/some.cfg -> /path/to/some.cfg
Args:
path: Path string.
prefix: Prefix to strip.
Returns:
Path string.
"""
path = path.lstrip()
if path.startswith(prefix + '://'):
path = path[len(prefix) + 2:]
return path
|
19549fcaab844351ec2101d91321980a1b4312b7
| 525,707 |
def form_bowtie_build_cmd_list(bowtie_build_fp, input_contigs_fasta, output_index_fp):
""" format arguments received to generate list used for bowtie_build subprocess call
Args:
bowtie_build_fp(str): the string representing the path to the bowtie program
input_contigs_fasta(list): list of files which represent the fasta file inputs used for index construction
output_index_fp(str): base name of file path to be used as output directory for index files
Returns:
call_args_list(list): the sequence of commands to be passed to the bowtie2 build call
"""
if bowtie_build_fp is '':
raise ValueError('bowtie2_build_path name is empty')
if output_index_fp is '':
raise ValueError('output file name invalid. index_output_fp is None')
if input_contigs_fasta is '' or input_contigs_fasta is None:
raise ValueError('no fasta file found')
# required arguments
calls_args_list = [bowtie_build_fp, input_contigs_fasta, output_index_fp]
return calls_args_list
|
28af32e6a7626d8bd5647fa12194f0afddb31fbc
| 33,705 |
def second2time(second):
"""
Convert file size in bytes to human-readable format.
Parameters
----------
second : float
Time in seconds.
Returns
-------
t : str
Time in the format of (days:)hours:minutes:seconds.
Examples
--------
>>> second2time(15481.8)
4:18:01.80
"""
if second < 60:
return '%04.2fs' % second
m, s = divmod(second, 60)
if m < 60:
return '%2d:%05.2f' % (m, s)
h, m = divmod(m, 60)
if h < 24:
return '%2d:%02d:%05.2f' % (h, m, s)
# More than 1 day
d, h = divmod(h, 24)
return '%d:%02d:%02d:%05.2f' % (d, h, m, s)
|
080b0b1d3af2db2d780112f56c18d5f7124b56af
| 391,399 |
def support(node):
"""Get support value of a node.
Parameters
----------
node : skbio.TreeNode
node to get support value of
Returns
-------
float or None
support value of the node, or None if not available
Notes
-----
A "support value" is defined as the numeric form of a whole node label
without ":", or the part preceding the first ":" in the node label.
- For examples: "(a,b)1.0", "(a,b)1.0:2.5", and "(a,b)'1.0:species_A'". In
these cases the support values are all 1.0.
- For examples: "(a,b):1.0" and "(a,b)species_A". In these cases there are
no support values.
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['((a,b)99,(c,d):1.0);'])
>>> support(tree.lca(['a', 'b']))
99.0
>>> support(tree.lca(['c', 'd'])) is None
True
"""
try:
return float(node.name.split(':')[0])
except (ValueError, AttributeError):
return None
|
788814d06d02cdbfdf5803e231dd0bb56e4ea06e
| 676,131 |
def allowed_file(filename: str) -> bool:
"""
Check if the input file has the correct format
Args:
filename: the filename of the uploaded image
Returns:
if the filename has the right extension.
"""
allowed_extensions = {'png', 'jpg', 'jpeg'}
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in allowed_extensions
|
51f1c2f40b5479344645495ebf3fca44f7d3c9b4
| 396,966 |
def normalize_n_to_m(val, m, n):
# type: (float, float, float) -> float
"""
normalize_n_to_m normalizes a range of [0,m] to [0,n]
"""
return (val / m) * n
|
3602a3de1df6c5caa74e166a29b7888fda69a507
| 298,519 |
from datetime import datetime
import time
def uptime(since):
"""Turn an date and time into an uptime value.
The returned value is a number of seconds from the provided value
to the current time. The date/time provided is expected to be a
local time using the following format: 2017-01-10 16:32:21.
"""
fr = datetime(*time.strptime(since, "%Y-%m-%d %H:%M:%S")[:6])
to = datetime.now()
delta = to - fr
delta = int(delta.total_seconds())
if delta < 0:
return 0
return delta
|
b9dddfb3b99d9dafb32d9cdc3ee4e8710bde4f0e
| 689,306 |
def get_astronomical_twilight(times, events, value):
"""
value = 0 for end of astronomical twilight
value = 1 for the beginning of astronomical twilight (first occurrence)
"""
try:
zindex = events.tolist().index(value)
at_time = times[zindex]
except:
at_time = None
return at_time
|
ce8780a833e6356ad169430720f6b4bdb555e8ed
| 39,859 |
def role_object_factory(role_name='role.test_role'):
"""Cook up a fake role."""
role = {
'nameI18n': role_name[:32],
'active': 1
}
return role
|
e79e7b22a83d9da721b074d32878eabeca4054cd
| 703,629 |
def sexagesimal_angle_to_decimal(degrees, minutes=0, seconds=0, thirds=0, fourths=0):
"""Convert sexagesimal-parsed angles to a decimal.
Args:
degrees (int): Angle degrees count.
minutes (int): Angle minutes count.
seconds (int): Angle seconds count.
thirds (int): Angle thirds count.
fourths (int): Angle fourths count.
Returns:
float: Angle in decimal degrees.
"""
if degrees is None:
return None
# Degrees must be absolute or it will not sum right with subdivisions.
absolute_decimal = abs(float(degrees))
try:
sign_multiplier = abs(float(degrees)) / float(degrees)
except ZeroDivisionError:
sign_multiplier = 1
for count, divisor in [
(minutes, 60),
(seconds, 3600),
(thirds, 216000),
(fourths, 12960000),
]:
if count:
absolute_decimal += float(count) / divisor
return absolute_decimal * sign_multiplier
|
b312bfe0a9d15ee3c9853d8a515d00cebae29823
| 407,651 |
import re
def match_str(find_exp, where):
"""find_exp regexp is present in buffer where"""
for item in where:
if re.search(find_exp, str(item)):
return True
return False
|
682c9394dba61b2ad84f635c21fb1263177c50e1
| 495,006 |
def get_lomb_frequency_ratio(lomb_model, i):
"""
Get the ratio of the ith and first frequencies from a fitted Lomb-Scargle
model.
"""
return (lomb_model['freq_fits'][i-1]['freq'] /
lomb_model['freq_fits'][0]['freq'])
|
9ea8027f0a58df5dfcd08ede271d8c6bd4ab8e58
| 120,801 |
def indent(
text, # Text to indent
char=' ', # Character to use in indenting
indent=2 # Repeats of char
):
"""
Indent single- or multi-lined text.
"""
prefix = char * indent
return "\n".join([prefix + s for s in text.split("\n")])
|
f170745f99a2bb151e79c2f468cf23880d60b3e5
| 696,281 |
def indent(text: str, amount: int) -> str:
"""indent according to amount, but skip first line"""
return "\n".join(
[
amount * " " + line if id > 0 else line
for id, line in enumerate(text.split("\n"))
]
)
|
a78fedaada94bd9dd5129e7f85f2ba2fe506eb42
| 43,876 |
import pickle
def read_pickle(file):
"""Read a pickle file"""
with open(file, "rb") as handle:
return pickle.load(handle)
|
d074df20a4b035137efb8c1fc52f6e9fcdda7add
| 19,474 |
def check_zip(zip_code):
"""check if zip code has correct format.
Args:
zip_code as a string
Returns:
a boolean indicating if valid (True) or not (False)
"""
if len(zip_code) < 5:
return False
if not zip_code[:5].isdigit():
return False
return True
|
c3fd30789e1140bfd4297f98ff949ef1ad5600a4
| 565,300 |
from typing import Iterable
from typing import List
def clean_pubmed_identifiers(identifiers: Iterable[str]) -> List[str]:
"""Clean a list of identifiers with string strips, deduplicates, and sorting."""
_identifiers = (str(identifier).strip() for identifier in identifiers if identifier)
return sorted({i for i in _identifiers if i})
|
895da74afc2eda4ab898f172fbd48633a53c697a
| 320,568 |
import time
import asyncio
async def checkTemp(tempObj,LEDObj,loop):
"""Samples temperature reading and returns it, and also flashes RED or GREEN LED based on weather the temp reading is higher than threshold.
:param tempObj: Object from Temp class
:type tempObj: temp
:param LEDObj: Object from LED class
:type LEDObj: LED
:param loop: Main event loop to be used to run coroutines concurrenlty
:type loop: asyncio application
:return: Temperature value of target object
:rtype: float
"""
tic = time.perf_counter()
tempValue = await asyncio.gather(tempObj.readTemp())
toc = time.perf_counter()
print(f"Time taken to get Temp readings: {toc - tic:0.4f} seconds")
if tempValue[0] < 35.00 and tempValue[0] > 29.00:
tic = time.perf_counter()
asyncio.gather(LEDObj.swichOnTempGreen())
toc = time.perf_counter()
print(f"Time taken to toggle red LED: {toc - tic:0.4f} seconds")
return tempValue
else:
tic = time.perf_counter()
asyncio.gather(LEDObj.swichOnTempRed())
toc = time.perf_counter()
print(f"Time taken to toggle green LED :{toc - tic:0.4f} seconds")
return tempValue
|
fe72b47f0d766b6e05afaafda3dcd0618ad12787
| 423,571 |
def meters_formatter(f):
"""
Returns a float with 4 decimal digits and the unit 'm' as suffix.
"""
if f is None: return None
return '%0.4f m' % f
|
ae80f27b16fba79b0c02fb25aa61e4324c4ece04
| 48,612 |
def enthalpy_wall_del(T_0, T_w, C_p):
"""
Calculates specific enthalpy difference between total conditions and
those at the stagnation point of a sphere in supersonic flow.
Input variables:
T_0 : Gas total temperature
T_w : Sphere wall temperature
C_p : Specific heat capacity
"""
del_h = C_p * (T_0 - T_w)
return del_h
|
a27f829d26cdc7f0e5895d77b66e6478ff6608c1
| 377,208 |
def oz_to_g(oz):
"""
Convert ounces to grams
"""
return oz * 28.34952
|
0c547b3b95964e25ace4d00d9c491f367282b89f
| 40,393 |
def isascii(text):
"""check text is all ascii character.
Python 3.6 does not support str.isascii()
"""
return all(ord(c) < 128 for c in text)
|
9bfcd31fca8f1895b8233eb2f313d46b9ae542c2
| 337,796 |
from typing import List
def concordance_index(
events: List[int],
risks: List[float]
) -> float:
"""
Compute Concordance index (C-index)
The concordance index is a value between 0 and 1 where:
- 1.0 - all risk scores for happened events higher than for unhappened,
- 0.0 - all risk scores for unhappened events higher than for happened,
- 0.5 - random risk scores.
The formula is:
.. math::
C\\mbox{-}Index = \\frac{n\\_concordant\\_pairs +
0.5 * n\\_risk\\_ties}{n\\_permissible\\_pairs}
where:
- n_concordant_pairs - number of permissible pairs where the score is
bigger for the event with label 1 than 0
- n_risk_ties - number of permissible pairs with equal scores
- n_permissible_pairs - number of pairs with different events
Parameters
----------
events : List[int]
If the some event happened - 1 or not happened - 0
risks : List[int]
Risks scores for each event
Returns
-------
float
Concordance index
Examples
--------
>>> from evaluations.medical import concordance_index
>>> events = [1, 0, 1, 1, 0]
>>> risks = [0.8, 0.43, 0.62, 0.58, 0.62]
>>> concordance_index(events, risks)
0.75
"""
n_permissible_pairs = 0
n_concordant_pairs = 0
n_risk_ties = 0
for i in range(len(events) - 1):
for j in range(i + 1, len(events)):
if events[i] == events[j]:
continue
n_permissible_pairs += 1
if events[i] and risks[i] > risks[j]:
n_concordant_pairs += 1
elif events[j] and risks[j] > risks[i]:
n_concordant_pairs += 1
elif risks[i] == risks[j]:
n_risk_ties += 1
return (n_concordant_pairs + 0.5 * n_risk_ties) / n_permissible_pairs
|
ef067eec6df6395065551df7619d0392027eb737
| 235,687 |
import re
def _fs_exists(module, filesystem):
"""
Check if file system already exists on /etc/filesystems.
:param module: Ansible module.
:param filesystem: filesystem name.
:return: True or False.
"""
lsfs_cmd = module.get_bin_path('lsfs', True)
rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem))
if rc == 1:
if re.findall("No record matching", err):
return False
else:
module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
else:
return True
|
297c3573ffb73d205345578562828404d34e471b
| 343,645 |
def In(field, values):
"""
A criterion used to search for records where `field` has one of the `values`. For example
* search for observables of type domain, fqdn, ip
* search for cases tagged as malspam, or phising
Arguments:
field (str): field name
values (Array): A set of values the field must be in
Returns:
dict: JSON repsentation of the criterion
```python
# Query to search for observables of one of the following types: domain, fqdn, ip
query = In('dataType', ['domain', 'fqdn', 'ip'])
```
produces
```json
{
"_in": {
"_field": "dataType",
"_values": [
"domain",
"fqdn",
"ip"
]
}
}
```
"""
return {'_in': {'_field': field, '_values': values}}
|
99f831290b7c199f263d3af45b15824b6631e165
| 208,186 |
def sort(items, key=None, start_with_none=False, reverse=False):
"""
Sorts a list (or any other iterable). It differs from built-in
function ``sorted`` by treating ``None`` keys in a special way:
they are all gathered either in the end of the result (default),
or in the beginning. It also constructs the list and returns it
completely instead of yielding.
Args:
items: any finite iterable
key: optional, function ``item -> bool``
start_with_none: if ``True``, items with ``None`` keys will
go first.
reverse: sort in the decrementing order
Returns: a sorted list
"""
if key is not None:
not_nones = [item for item in items if key(item) is not None]
nones = [item for item in items if key(item) is None]
else:
not_nones = [item for item in items if item is not None]
nones = [item for item in items if item is None]
not_nones = sorted(not_nones, key=key, reverse=reverse)
return nones + not_nones if start_with_none else not_nones + nones
|
b4f1c357178846c9567649b765cc46b186faede2
| 596,652 |
def to_url_representation(path: str) -> str:
"""Convert path to a representation that can be used in urls/queries"""
return path.replace("_", "-_-").replace("/", "__")
|
d1a380eeac2410d6c3f0b3f269c5aa2f4143f91f
| 182,281 |
def split_time_range(start_time, duration):
"""
Given a start time and a duration, returns a 3-tuple containing
the time left for the current day, a number of plain day left, a number of hours left
for the last day
"""
if start_time + duration <= 24:
# end is inside the first day
return duration, 0, 0
time_left_first = 24 - start_time
plain_days = (duration - time_left_first) // 24
time_left_last = (duration - time_left_first) % 24
return time_left_first, int(plain_days), time_left_last
|
7274d4825b180d3cb814b9710e0da7a09ec6d397
| 479,006 |
from typing import Dict
from pathlib import Path
from typing import Optional
import csv
def write_results(
result_dict: Dict[str, Dict[str, str]],
trans_dict: Dict[str, str],
input_filepath: Path,
output_filepath: Path,
write_csv: Optional[bool] = False,
) -> Dict[str, Dict[str, str]]:
"""
Returns processed output by combining results_dict (predicted values) and trans_dict (compound IDs).
Optionally writes results to a CSV file.
"""
# obtaining all possible column names
acd_columns = []
counter = 0
for key, value in result_dict.items():
for col, value1 in value.items():
if col not in acd_columns:
acd_columns.append(col)
counter += 1
if counter == 10 ** 4:
break
# filling in missing columns
for key, value in result_dict.items():
for col in acd_columns:
if col not in value:
result_dict[key][col] = "NaN"
# translating ID back to original IDs as provided in input file
trans_result_dict = {}
for cp_id, props in result_dict.items():
trans_result_dict[trans_dict[cp_id]] = props
# writting to csv
if write_csv is True:
acd_columns.append("compound_id")
with open(output_filepath, "w") as f:
w = csv.DictWriter(f, acd_columns)
w.writeheader()
for k in trans_result_dict:
w.writerow(
{col: trans_result_dict[k].get(col) or k for col in acd_columns}
)
return trans_result_dict
|
a791f47c9d16b451db14cd599cf2f15f04f0637c
| 14,371 |
def halve_res_quick(array):
"""
Halves the resolution of the input 2D array, e.g. from 0.5km to 1km,
by averaging every 2x2 grid of pixels into 1 pixel.
:param array: 2D numpy array of data.
:return: 2D numpy array with half the resolution of the input array.
"""
# Define new shape of lower res array
shape = (int(array.shape[0] / 2),
array.shape[0] // int(array.shape[0] / 2),
int(array.shape[1] / 2),
array.shape[1] // int(array.shape[1] / 2))
# Return array with lower res
return array.reshape(shape).mean(-1).mean(1)
|
23eadd26c2891852dfa776f2b894299fdaa188f3
| 640,142 |
def qrcode_size_table() -> dict[int, int]:
"""Dictionary that contains the size of the QR-code depending
on the version.
Returns:
dict[int, int]: A dictionary that contains data in the
form (version: size)
"""
table = {
1: 21, 2: 25, 3: 29, 4: 33, 5: 37, 6: 41, 7: 45, 8: 49, 9: 53, 10: 57,
11: 61, 12: 65, 13: 69, 14: 73, 15: 77, 16: 81, 17: 85, 18: 89, 19: 93,
20: 97, 21: 101, 22: 105, 23: 109, 24: 113, 25: 117, 26: 121, 27: 125,
28: 129, 29: 133, 30: 137, 31: 141, 32: 145, 33: 149, 34: 153, 35: 157,
36: 161, 37: 165, 38: 169, 39: 173, 40: 177
}
return table
|
f4de761348a2331f047786b09f58b45d039efb7f
| 577,659 |
async def get_random_bnum(session):
"""Use i'mfeelinglucky to get a random bnum"""
async with session.get("https://iiif.wellcomecollection.org/service/suggest-b-number?q=imfeelinglucky") as response:
json = await response.json()
return json[0].get("id", "")
|
faac8ce405eff04ca255c7a9aaeb9496f12ca85c
| 56,631 |
def available_color(G, vertex, color):
"""Returns True if color is available or not
Parameters:
G: a networkx graph with Graph Nodes
vertex: the vertex number (int)
color: the color to see if available
Returns:
available: True if color is available (boolean)
"""
available = True
for neighbor in G.neighbors(vertex):
if G.nodes[neighbor]['node'].color == color:
available = False
break
return available
|
49f111f3989509ede9db25d5a02a0c8f38d70af5
| 166,905 |
import torch
def camera_matrix(pinholes, eps=1e-6):
""" Returns the intrinsic matrix as a tensor.
Args:
pinholes (list): List of fx, cx, fy, cy camera parameters.
eps (float, optional): A small number for computational stability. Defaults to 1e-6.
Returns:
torch.Tensor: Intrinsic matrix as a [4,4] Tensor.
"""
k = torch.eye(4, device=pinholes.device, dtype=pinholes.dtype) + eps
# k = k.view(1, 4, 4).repeat(pinholes.shape[0], 1, 1) # Nx4x4
# fill output with pinhole values
k[..., 0, 0] = pinholes[0] # fx
k[..., 0, 2] = pinholes[1] # cx
k[..., 1, 1] = pinholes[2] # fy
k[..., 1, 2] = pinholes[3] # cy
return k
|
513b23943a019a764533323509f2687df81b08d5
| 51,034 |
def cartesian_product(*seqin):
"""Calculates the cartesian product of given lists.
"""
# Found in ASPN Cookbook
def rloop(seqin, comb):
if seqin:
for item in seqin[0]:
newcomb = comb + [item]
for item in rloop(seqin[1:], newcomb):
yield item
else:
yield comb
return rloop(seqin, [])
|
f7846771df55d958b52c7e81af6293f602d1ab25
| 224,566 |
def concat_filter_strings(filter_strings, operator='&'):
"""
Helper function to combine ami filter strings
Parameters
----------
filter_strings: ``list``
The valid filter strings to combine
operator: ``str``
The operator to place between the filter strings. This can either be
``&`` or ``|``, for ``and`` or ``or`` respectively.
"""
if len(filter_strings) == 0:
raise ValueError('filter_strings must have at least one element')
elif len(filter_strings) == 1:
return filter_strings[0]
else:
sep = ')' + operator + '('
return '(' + sep.join(filter_strings) + ')'
|
b37ccc528b3d758dd017a750d59020d6ad0d1583
| 454,168 |
def add_or_append(base_dict, key, value, conditional=True):
"""
Add or append a value to a dictionary
Args:
base_dict (dict): The existing dictionary to modify
key (string): The key to add or append to
value (string): The value to give that key
[Optional] conditional (bool): exclusion parameter
Returns (dict): A new dictionary with the provided key/value pair added
"""
new_dict = {x: base_dict[x] for x in base_dict}
"""Check if the key already exists"""
if key in new_dict:
if conditional:
"""Append the value to the existing array"""
if value not in new_dict[key]:
new_dict[key].append(value)
else:
"""Add the new value"""
new_dict[key] = [value]
return new_dict
|
dff1cc22ac886b09a13a084425dc0d4719f24f6b
| 214,576 |
def _longest_line_length(text):
"""Return the longest line in the given text."""
max_line_length = 0
for line in text.splitlines():
if len(line) > max_line_length:
max_line_length = len(line)
return max_line_length
|
db8972f66798b10a27e79b6e291abb885a18f60f
| 196,314 |
def new_list(a):
"""
Converts lists into number format with minimal decimal places
:param a: list
:return: new list with floats
"""
b = []
for i in a:
b.append(float(format(i, ".2f")))
return b
|
f1f80ea44f58f0780f02e8df698dbeb81d7ac934
| 17,967 |
def is_digit(key_name):
"""Check if a key is a digit."""
return len(key_name) == 1 and key_name.isnumeric()
|
1ff4d39839b4a45ccec78c3856557ff211344da9
| 311,714 |
def get_fields(obj):
"""
Returns a tuple:
0: list of the fields in a given Salesforce object
1: the 'soapType' for each of the passed fields
"""
fields = obj.describe()['fields']
return ([field['name'] for field in fields],
[field['soapType'] for field in fields])
|
6f12ef02b411e283f34bd075916a5794ffdafa93
| 389,663 |
def coord_to_index_(coord, map_obj):
"""
Converts atomic residue coordinates into map indices
:param coord: coordinate in angstrom
:param map_obj: map object
:return: float indices
"""
return coord[2] / map_obj.voxel_size[2] - map_obj.origin[2] / map_obj.voxel_size[2] - map_obj.n_start[2], \
coord[1] / map_obj.voxel_size[1] - map_obj.origin[1] / map_obj.voxel_size[1] - map_obj.n_start[1], \
coord[0] / map_obj.voxel_size[0] - map_obj.origin[0] / map_obj.voxel_size[0] - map_obj.n_start[0]
|
d2c198bd574efbccbb97b4ff0dee79fa528b55dd
| 96,773 |
def LoadInput(input_file, input_version, base_data):
"""Load the input file generated by quality_regression_main."""
output = []
with open(input_file, encoding='utf-8') as file:
for line in file:
line = line.strip('\n')
if line.startswith('#'):
output.append(line)
continue
items = line.split('\t')
result = '\t'.join(items[0:5]) # status, input, output, command, argument
version = base_data.get(result, input_version)
output.append(result + '\t' + version)
return output
|
86dd28e157ccf8e2249b1933513388f271702a99
| 449,559 |
def prune_deg_one_nodes(sampled_graph):
""" prune out degree one nodes from graph """
deg_one_nodes = []
for v in sampled_graph.nodes():
if sampled_graph.degree(v) == 1:
deg_one_nodes.append(v)
for v in deg_one_nodes:
sampled_graph.remove_node(v)
return sampled_graph
|
c4df72a66c6fb57d5d42a1b877a846338f32f42a
| 708,110 |
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i, :] = boxes[i, :] / scales[int(inds[i])]
return boxes
|
c3ea86dd1dc030f517762960095ec79d3a975bd3
| 232,779 |
def get_index_csv_data(data, entry_dict):
"""Get index of .csv header entries"""
indices = {}
for entry_key in entry_dict:
indices[str(entry_key)] = data[0].index(entry_dict[entry_key])
return indices
|
47888e26da7236144dd3d1ae8c9eb3d2827725a2
| 608,433 |
def upsert_record(client, zoneid, record_name, value, record_type='A', ttl=300):
"""Updates or Creates the specified AWS Route 53 domain resource record
Parameters
----------
client: boto3.client
The boto3 'route53' client to utilize for the update request
zoneid: str
The Route 53 Hosted Zone ID where the record to be created/updated
resides
record_name: str
The fully qualified name for the DNS record to be created/updated. Note:
the trailing top-level '.' is optional in this context.
value: str
The value which the record will be updated to. Typically the IP address.
record_type: str
The record type to create/update. One of:
'SOA'|'A'|'TXT'|'NS'|'CNAME'|'MX'|'NAPTR'|'PTR'|'SRV'|'SPF'|'AAAA'|'CAA'
ttl: int, Optional
Set the TTL (Time To Live) of the record in seconds. Defaults to 300s
See Also
--------
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/route53.html#Route53.Client.change_resource_record_sets
Returns
-------
dict:
{
'Id': 'string',
'Status': 'PENDING'|'INSYNC',
'SubmittedAt': datetime(year, month, day),
'Comment': 'string'
}
"""
response = client.change_resource_record_sets(
HostedZoneId=zoneid,
ChangeBatch={
'Comment': 'Update record initiated by r53ddns.py',
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': record_name,
'Type': record_type,
'TTL': ttl,
'ResourceRecords': [
{'Value': value}
]
}
}
]
}
)
return response['ChangeInfo']
|
e71cf391dfa2093f74929c6fecd9c36b5af622c6
| 651,732 |
def write_file(path: str, s: str = "") -> str:
""" Save a string to a file """
with open(path, "w") as f:
f.write(s)
f.flush()
return path
|
e430196446745f0f5bb02228ed877a47793c79ce
| 259,270 |
def recursive(fn):
"""
Make a function to work recursively, regardless dict, list and tuple
Args:
fn: processing function
Returns:
- a recursive version of given function
"""
def rfn(x, *args, **kwargs):
if isinstance(x, dict):
return {key: rfn(val, *args, **kwargs) for key, val in x.items()}
elif isinstance(x, list):
return [rfn(val, *args, **kwargs) for val in x]
elif isinstance(x, tuple):
return tuple([rfn(val, *args, **kwargs) for val in x])
else:
return fn(x, *args, **kwargs)
return rfn
|
10d9da660d97e01a5f1bed06b3aab79a1480aa0d
| 328,785 |
def sphere_mixed(solution):
"""
Sphere function for mixed optimization
"""
x = solution.get_x()
value = sum([i*i for i in x])
return value
|
785e07fc5b230c45b47e19b1490c4b55dedace27
| 336,314 |
def intersect(list1, list2):
"""
Given two lists, get a new list consisting of the elements only contained
in *both of the input lists*, while preserving the ordering.
"""
return [x for x in list1 if x in list2]
|
e6ce78cc03ae3ce8957f45b5a367cea49d99f0bf
| 347,082 |
import random
def death_with_chance(p_death: float) -> bool:
"""
Takes a float between 0 and 1 and returns a boolean
if the player has survived (based on random chance)
Returns True if death, False if survived
"""
return p_death > random.random()
|
ad2a88727369e703cee6c345882c873db2104827
| 11,757 |
def get_stride_sequence(stride_desc):
"""
Return the sequence of the secondary structure.
Parameters
----------
stride_desc : list of (str, str, float, float, float)
The Stride description.
Returns
-------
sequence : list of str
The secondary structure sequence.
"""
sequence = []
for sd in stride_desc:
sequence.append(sd[1])
return sequence
|
6cae849c35eab3bd4443320f2d71ca7718798cde
| 319,817 |
def _create_indice(chip):
"""Account for missing indices (Duo and PrixPowr) and create them.
Even if these indices do not actually exist, we still need one for
relational data purposes.
"""
if not chip['game'] == 'bn4':
# Nothing to do here.
return chip['indice']
indice = chip['indice']
if indice == '??':
if chip['name'] == 'PrixPowr':
indice = '39'
if chip['name'] == 'Duo':
indice = '40'
return indice
|
8c07ec1022a47ab59a80cc359715fedb320eb170
| 632,952 |
def true_fov(M, fov_e=50):
"""Calulates the True Field of View (FOV) of the telescope & eyepiece pair
Args:
fov_e (float): FOV of eyepiece; default 50 deg
M (float): Magnification of Telescope
Returns:
float: True Field of View (deg)
"""
return fov_e/M
|
7735135d326f3000ac60274972263a8a71648033
| 705,808 |
import torch
def load_model(model, path, device):
"""
Load `model` from `path`, and push `model` to `device`\\
Args:
model: The model to save
path: The path of the model to be saved
device: the torch device
Return:
the loaded model
"""
saved_params = torch.load(path)
model.load_state_dict(saved_params)
model = model.to(device)
return model
|
6edbc12269a147fe3399836dacdd509e674706a5
| 488,471 |
def trace_overall_index(batch_idx, test_time_indices):
"""
Given
batch_idx: the particular program this all corresponds to
test_time_indices: list of which (tests, timestep) to select
Returns (trace_idxs, time_idxs)
trace_idxs: the indices int the traces to be returned
time_idxs: the indices into which timesteps should be returned
"""
# this assumes 5 tests exactly
assert all(test < 5 for test, _ in test_time_indices)
return [batch_idx * 5 + test for test, _ in test_time_indices], [time for _, time in test_time_indices]
|
b063be90d53726d49937d6b6978d75f79617b2c1
| 119,561 |
def civis_api_formatting(soct_type_map):
"""
Converts soct_type_map to be readable by Civis API.
"""
table_columns = [{"name": n, "sql_type": t} for n, t in soct_type_map.items()]
return table_columns
|
275c50bccfdc0d647b8191c2187ab6a11bde98ef
| 85,211 |
def add_edges_GRNdb(dir_graph, w):
"""
Given a directed graph and w matrix, adds a directed edge from gene j to gene i representing the interaction at
w[i,j], if it exists. Edges are weighted based on confidence values in GRNdb; 0 marks no interaction, 1 marks a low
confidence interaction, 2 marks a high confidence interaction. Returns the graph.
:param dir_graph: A directed graph of gene interactions
:type dir_graph: DiGraph
:param w: A matrix representing perturbation interactions with genes as columns and gene names as indices
:type w: DataFrame
:output dir_graph: A directed graph of gene interactions with edges to represent interactions between genes
:type dir_graph: DiGraph
"""
w = w.to_numpy()
for i in range(w.shape[1]):
for j in range(w.shape[1]):
if w[i, j] == 1: # low confidence interaction
dir_graph.add_edge(j, i, color="green", weight=w[i, j])
if w[i, j] == 2: # high confidence interaction
dir_graph.add_edge(j, i, color="green", weight=w[i, j])
return dir_graph
|
c6aaef3f1920cf3f8fddc881304684601772c672
| 219,735 |
def decorator_with_argument(arg1, arg2):
"""
Function decorator with arguments
:param arg1: (int) first argument
:param arg2: (int) second argument
:return: function decorated
"""
def inner_function(func_to_decorate):
def wrapper(*args, **kwargs):
print(f"Enter decorator with arguments: {arg1} & {arg2}")
# Something before
response = func_to_decorate(*args, **kwargs)
# Something after
return response
return wrapper
return inner_function
|
3a9936b0ebc2b48b27471311059d79f43491bdb7
| 333,672 |
def __descriptor_dep(dep):
""" Convert a dependency for a source proto. """
return "$(location %s)" % dep
|
e42a8c47c5cf582fe16fc3c5aea9f68c33977486
| 275,093 |
def notas(*provas, sit=False):
"""
Função para analisar notas e a situação da turma.
:param provas: uma ou mais notas dos alunos.
:param sit: Indicação se deve mostrar a situação da turma.
:return: Um dicionário com as notas, média e a situação (caso seja solicitado) da turma.
"""
d = dict()
d['quantidade'] = len(provas)
d['maior'] = max(provas)
d['menor'] = min(provas)
d['média'] = sum(provas)/d['quantidade']
if sit:
if d['média'] >= 7:
d['situação'] = 'BOA'
elif d['média'] >= 5:
d['situação'] = 'RAZOÁVEL'
else:
d['situação'] = 'RUIM'
return d
|
ce2e0051a55415f6864bb8665546f230de7e6312
| 151,863 |
def truncate_down_to_maxlen(split_sentence, maxlen):
""" function used to truncate a sentence down to maxlen words """
# truncate it
truncated_split_sentence = split_sentence[:maxlen]
# return the rejoined sentence
return " ".join(truncated_split_sentence)
|
a754fbc2b0d140ad4d9c7593954cfdb2e2689e30
| 480,022 |
def cache_filename(path):
"""Turn absolute path into filename used in filecache."""
return path.replace("/", ".")[1:] + ".yaml"
|
d64dbc27f9f09a59d17c78d45bca4bd2ec1c7c33
| 417,543 |
def solve(s):
"""
Function that accepts a string, s, and returns the minimum number of
deletions required to ensure that every two consecutive characters are
different.
"""
# Initialize the answer which is the number of deletions required.
answer = 0
# Keep track of the previous character we saw.
prev = None
# Iterate thru the string and look for identical consecutive characters.
# Each time we see consecutive characters, increment our counter.
for c in s:
if c == prev:
answer += 1
prev = c
return answer
|
00e165127002e60f55a002d1f20e64e3f5d48a66
| 233,591 |
def is_lvm(name):
"""
Check if device is marked as an lvm
"""
if "lvm" in name:
return True
return False
|
8b29fc4b49580cb776529d573c47fdc7b47e7a71
| 689,031 |
def ishexadecimal(value):
"""
Return whether or not given value is a hexadecimal number.
If the value is a hexadecimal number, this function returns ``True``, otherwise ``False``.
Examples::
>>> ishexadecimal('deadBEEF')
True
>>> ishexadecimal('abcdefg')
False
:param value: string to validate hexadecimal number
"""
try:
return int(value, 16) >= 0
except ValueError:
return False
|
9bc5b83c87499b495dfd2c878694a22fdc24d574
| 186,848 |
def create_iterable(item_or_iterable):
"""
If the argument is iterable, just return it. Otherwise, return a list
containing the item.
"""
try:
iter(item_or_iterable)
return item_or_iterable
except TypeError:
return [item_or_iterable]
|
ba9402de596065fb995225145d235d99f3369a98
| 383,125 |
def coordinate(latitude, longitude):
"""Coordinate data model.
Parameters
----------
latitude : float
Decimal degree coordinate in EPSG:4326 projection.
longitude : float
Decimal degree coordinate in EPSG:4326 projection.
Returns
-------
coordinate : dict
Coordinate data model as a dictionnary.
"""
return {
'latitude': latitude,
'longitude': longitude
}
|
8762c4f6b4784408e7bebc48587c1495a93d52c8
| 100,645 |
def loadrepr(reprstr):
"""Returns an instance of the object from the object's repr() string.
It involves the dynamic specification of code.
>>> obj = loadrepr('datetime/datetime.datetime.now()')
>>> obj.__class__.__name__
'datetime'
"""
module, evalstr = reprstr.split('/')
mylocals = locals()
localname = module
if '.' in localname:
localname = module.split('.', 1)[0]
mylocals[localname] = __import__(module)
return eval(evalstr)
|
c0eaa039c3575992702a2a76cca2e9756561c833
| 660,968 |
def name(cls):
"""Get a nice name for this object."""
return cls.__class__.__name__
|
9db47520de07a1940320380e5171e4560a60ca71
| 643,395 |
def flat_test_name(_id):
"""Return short form test name from TestCase ID."""
return '-'.join(_id.split('.')[1:])
|
75bba12d5dca97814ba4d937a329ad5ba0c77df4
| 91,066 |
def load_camera_params(hf, path):
"""Load h36m camera parameters
Args
hf: hdf5 open file with h36m cameras data
path: path or key inside hf to the camera we are interested in
Returns
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: (scalar) Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camera tangential distortion coefficients
name: String with camera id
"""
R = hf[path.format('R')][:]
R = R.T
T = hf[path.format('T')][:]
f = hf[path.format('f')][:]
c = hf[path.format('c')][:]
k = hf[path.format('k')][:]
p = hf[path.format('p')][:]
name = hf[path.format('Name')][:]
name = "".join([chr(item) for item in name])
return R, T, f, c, k, p, name
|
765e6542a565d5e135b090795878b369f56f26e7
| 396,986 |
def remove_quotes(t, l, s):
"""Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quoted_string/ removeQuotes
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
|
61d5eb35183f9ab681d4abbf31ac74494505cd89
| 517,620 |
import time
def formatDate(when):
"""Given a time in seconds since the epoch, returns a date value in the
format used by server descriptors (YYYY/MM/DD) in GMT"""
gmt = time.gmtime(when + 1) # Add 1 to make sure we round down.
return "%04d-%02d-%02d" % (gmt[0], gmt[1], gmt[2])
|
6b6996a8dd67e1f723b0f2b7dde0b9a822cbd346
| 304,956 |
import inspect
def inspect_function_arguments(function): # pragma: no cover
"""
Returns the list of variables names of a function and if it
accepts keyword arguments.
:type function: Callable
:rtype: tuple[list[str], bool]
"""
parameters = inspect.signature(function).parameters
bound_arguments = [name for name, p in parameters.items()
if p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)]
has_kwargs = any(p.kind == p.VAR_KEYWORD for p in parameters.values())
return list(bound_arguments), has_kwargs
|
68072892b021e8c24d62f22ee0bf04d4afce1193
| 168,165 |
def removelatex(string):
"""
Remove the latex $ symbols from a unit string
Parameters
----------
string : str
String containing latex math mode $ delimiters
Returns
-------
string : str
Input string with $ delimiters removed
"""
if '$' in string:
string = string.replace('$', '')
if '\\' in string:
string = string.replace('\\', '')
return string
|
06fd070840b0c55ff19bb41db8aadd8842b2abf1
| 252,926 |
def measure_f_score(precision, recall):
"""
A function to measure the f_score, given precision and recall
F = 2PR/(P+R), where P = precision, R = recall and F = f_score
"""
num = 2*precision*recall
den = precision + recall
# Handling division by zero case, which is caused by precision = recall = 0
if den == 0:
return 0
return num/den
|
cd88040240d0f95a17691b840eea8a25992d41d5
| 135,518 |
import torch
def purge_unfeasible(x, res):
"""
Purge unfeasible event locations by setting their interpolation weights to zero.
:param x: location of motion compensated events
:param res: resolution of the image space
:return masked indices
:return mask for interpolation weights
"""
mask = torch.ones((x.shape[0], x.shape[1], 1)).to(x.device)
mask_y = (x[:, :, 0:1] < 0) + (x[:, :, 0:1] >= res[0])
mask_x = (x[:, :, 1:2] < 0) + (x[:, :, 1:2] >= res[1])
mask[mask_y + mask_x] = 0
return x * mask, mask
|
8e7cd559ced55c9cb20b80184c503cc615176673
| 585,001 |
def reformat_persian_date(date: str) -> str:
"""
Replcae full space between words with half space (persian writing rules related)
"""
return date\
.replace('یکشنبه', 'یکشنبه')\
.replace('سه شنبه', 'سهشنبه')\
.replace('پنجشنبه', 'پنجشنبه')
|
6bb8100299ad6390be02dfe92df06dd060a6bcb7
| 429,767 |
import itertools
def powerset(iterable):
"""Yield all subsets of given finite iterable.
Based on code from itertools docs.
Arguments:
iterable -- finite iterable
We yield all subsets of the set of all items yielded by iterable.
>>> sorted(list(powerset([1,2,3])))
[(), (1,), (1, 2), (1, 2, 3), (1, 3), (2,), (2, 3), (3,)]
"""
s = list(iterable)
return itertools.chain.from_iterable( itertools.combinations(s, r)
for r in range(len(s)+1) )
|
8bcc95585393e2790a8081337aeb6e9d54173b3d
| 26,767 |
def make_network_unweighted(n_df, wgt):
"""
Make the network unweighted, by setting the weights on all the edges to the
same value (1).
Parameters:
n_df (list): the data
wgt (int): the weight column
Returns:
list: the modified data
"""
return [n[:wgt] + [1] + n[wgt+1:] for n in n_df]
|
d3d54c10535924b3c9f23f7c2bf5d578b63ad9d8
| 145,669 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.