content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def difference_of(lhs, rhs):
"""
Computes the difference of two posting lists, that is, the elements
in the first list which are not elements in the second list.
"""
i = 0
j = 0
answer = []
while (i < len(lhs)):
if (j == len(rhs) or lhs[i] < rhs[j]):
answer.append(lhs[i])
i += 1
elif (rhs[j] < lhs[i]):
j += 1
else:
i += 1
return answer
|
aa9970717968773d09cc6048880f570edb7f6f24
| 389,400 |
def sql_exec(node, sql, quiet=False):
"""
Wrapper around NoisePageServer.execute() when no results are expected.
Parameters
----------
node : NoisePageServer
The node to execute the SQL on.
sql : str
The SQL string to be executed.
"""
return node.execute(sql, expect_result=False, quiet=quiet)
|
83acf93d25521e58eb128773bfa80aeccab406b4
| 419,831 |
from pathlib import Path
def get_root_dir() -> Path:
"""Return the root directory of the project."""
return Path(__file__).absolute().parent.parent
|
0640b7e605e42b4ebc0a1c4bdb94fba9d619af4f
| 469,459 |
from typing import Callable
def createPoly(a: float, b: float, c: float, d: float) -> "Callable":
"""Returns a function representing a polynomial that can be evaluated for an x."""
return lambda x: a * x ** 3 + b * x ** 2 + c * x + d
|
f9163e44ff79776618e9cc4f0a409537e09d84b9
| 567,749 |
import re
def tsqueryWrapVal(val):
"""
Check if a value contains any special characters. If it does, wrap it
in parenthesis.
:param val: value to possible wrap in parenthesis.
:return: value that has been wrapped as needed.
"""
if len(re.split('[&|!()]', val, 1)) > 1:
return '(' + val + ')'
return val
|
e433b13d28746c23e04e109a5406a3814672aa3a
| 602,775 |
def _is_vulnerability_file(source_repo, file_path):
"""Return whether or not the file is a Vulnerability entry."""
return file_path.endswith(source_repo.extension)
|
c11c14fbfc2471ddeac0615338c14e5a092648d9
| 561,929 |
def vapor_phase_sat_conc(MW, vapour_pressure, temperature, R=8.314):
"""
Return the vapor phase saturation concentration [kg/m³]
Parameters
----------
MW : Molecular weight [kg/mol]
vapour_pressure : Vapour pressure [Pa]
T : Temperature [K]
R : Perfect gas constant, the default is 8.314 [J/mol K]
source:ALOHA
"""
return MW * vapour_pressure /(temperature * R)
|
8136990d00fcef80edfeba9446fe80baadfeb195
| 263,303 |
def is_array(output):
"""Check if `output` behaves as np.array (simple)."""
return hasattr(output, 'shape')
|
02623f4bbd1d29bbc494e17f9b5ae22c51dcad98
| 507,486 |
def possible_sums(interval):
"""All sums of two numbers in list where numbers are not equal."""
return {i+j for i in interval for j in interval if i!=j}
|
2f33750286ebfe9a048c6369da550ca83e84ff8f
| 613,672 |
def argsort(seq):
""" Stable version of argsort """
# http://stackoverflow.com/questions/3382352/equivalent-of-numpy-argsort-in-basic-python/3382369#3382369
return sorted(range(len(seq)), key=seq.__getitem__)
|
9ccd0f2c3267dcec10db7c9e7668d5f3574050ea
| 634,582 |
def _small_mie(m, x):
"""
Calculate the efficiencies for a small sphere.
Typically used for small spheres where x<0.1
Args:
m: the complex index of refraction of the sphere
x: the size parameter of the sphere
Returns:
qext: the total extinction efficiency
qsca: the scattering efficiency
qback: the backscatter efficiency
g: the average cosine of the scattering phase function
"""
m2 = m * m
x2 = x * x
D = m2 + 2 + (1 - 0.7 * m2) * x2 - (8 * m**4 - 385 * m2 + 350) * x**4 / 1400.0 + \
2j * (m2 - 1) * x**3 * (1 - 0.1 * x2) / 3
ahat1 = 2j * (m2 - 1) / 3 * (1 - 0.1 * x2 + (4 * m2 + 5) * x**4 / 1400) / D
bhat1 = 1j * x2 * (m2 - 1) / 45 * (1 + (2 * m2 - 5) /
70 * x2) / (1 - (2 * m2 - 5) / 30 * x2)
ahat2 = 1j * x2 * (m2 - 1) / 15 * (1 - x2 / 14) / \
(2 * m2 + 3 - (2 * m2 - 7) / 14 * x2)
T = abs(ahat1)**2 + abs(bhat1)**2 + 5 / 3 * abs(ahat2)**2
temp = ahat2 + bhat1
g = (ahat1 * temp.conjugate()).real / T
qsca = 6 * x**4 * T
if m.imag == 0:
qext = qsca
else:
qext = 6 * x * (ahat1 + bhat1 + 5 * ahat2 / 3).real
sback = 1.5 * x**3 * (ahat1 - bhat1 - 5 * ahat2 / 3)
qback = 4*abs(sback)**2/x2
return [qext, qsca, qback, g]
|
d3bc0ea553ee91b8319a7a7af270736180858df2
| 512,303 |
def _int_to_bin(x: int) -> str:
"""Converts an integer to a binary string representation.
"""
return bin(x)
|
ec310aab312ca2323b79cd90a03753faa649fbef
| 476,267 |
def impl_ret_new_ref(ctx, builder, retty, ret):
"""
The implementation returns a new reference.
"""
return ret
|
44bd7d083d817396f175f29d792d1267b8fcf914
| 173,743 |
import torch
def eigenvector_22(x:torch.Tensor):
"""return eigenvector of 2x2 symmetric matrix using closed form
https://math.stackexchange.com/questions/8672/eigenvalues-and-eigenvectors-of-2-times-2-matrix
The calculation is done by using double precision
Args:
x (torch.Tensor): (..., 2, 2), symmetric, semi-definite
Return:
v1 (torch.Tensor): (..., 2)
v2 (torch.Tensor): (..., 2)
"""
# NOTE: must use doule precision here! with float the back-prop is very unstable
a = x[..., 0, 0].double()
c = x[..., 0, 1].double()
b = x[..., 1, 1].double() # (..., )
delta = torch.sqrt(a*a + 4*c*c - 2*a*b + b*b)
v1 = (a - b - delta) / 2. /c
v1 = torch.stack([v1, torch.ones_like(v1, dtype=torch.double, device=v1.device)], dim=-1) # (..., 2)
v2 = (a - b + delta) / 2. /c
v2 = torch.stack([v2, torch.ones_like(v2, dtype=torch.double, device=v2.device)], dim=-1) # (..., 2)
n1 = torch.sum(v1*v1, keepdim=True, dim=-1).sqrt()
n2 = torch.sum(v2*v2, keepdim=True, dim=-1).sqrt()
v1 = v1 / n1
v2 = v2 / n2
return v1.float(), v2.float()
|
d3b84725f3c4c29991ed75b78d246dc873dda96b
| 545,517 |
def apply_auto(line, target, value):
# type: (str, str, str) -> str
"""
Replace the target string with the value in line
"""
start_idx = line.find(target)
if start_idx == -1:
return line
end_idx = start_idx + len(target)
return line[:start_idx] + str(value) + line[end_idx:]
|
4a1685e585589ca588273ee9de08baf5bc2e0b93
| 178,893 |
import re
def _CanonicalizePath(secret_path):
"""Canonicalizes secret path to the form `/mount_path:/secret_file_path`.
Gcloud secret path is more restrictive than the backend (shortn/_bwgb3xdRxL).
Paths are reduced to their canonical forms before the request is made.
Args:
secret_path: Complete path to the secret.
Returns:
Canonicalized secret path.
"""
secret_path = re.sub(r'/+', '/', secret_path)
mount_path, _, secret_file_path = secret_path.rpartition('/')
if ':' in secret_path:
mount_path, _, secret_file_path = secret_path.partition(':')
mount_path = mount_path[:-1] if mount_path.endswith('/') else mount_path
secret_file_path = '/' + secret_file_path if not secret_file_path.startswith(
'/') else secret_file_path
return mount_path + ':' + secret_file_path
|
23172730253065d846e2124af191203e7849e59f
| 583,222 |
def truncate_path(full_path, chars=30, skip_first=True):
"""Truncate the path of a category to the number of character.
Only the path is truncated by removing nodes, but the nodes
themselves are never truncated.
If the last node is longer than the ``chars`` constraint, then it is
returned as is (with ``None`` for the first and inner nodes.)
This is the only case where the ``chars`` constraint might not be
respected.
If ``skip_first`` is ``True``, the first node will be skipped,
except if the path has 1 or 2 nodes only and the first node fits
under the ``chars`` constraint
:param full_path: list -- all the nodes of the path in order
:param chars: int -- the desired length in characters of the path
:param skip_first: bool -- whether to ignore the first node or not
If the path only has 1 or 2 nodes, then the first
node will not be skipped if it can fit within the
``chars`` constraint.
:returns: tuple -- the first node, the inner nodes, the last node
and whether the path was truncated. If any of the values
have been truncated, they will be ``None``.
"""
truncated = False
if skip_first:
skip, full_path = full_path[:1], full_path[1:]
skip = skip[0] if skip else None
else:
skip = None
if not full_path:
return None, None, skip, truncated
if len(full_path) == 1:
if skip is not None and len(skip) + len(full_path[0]) <= chars:
return skip, None, full_path[0], truncated
else:
return None, None, full_path[0], skip is not None # only truncated if we skip the first
first_node, inner, last_node = full_path[0], full_path[1:-1], full_path[-1]
char_length = len(last_node)
if char_length + len(first_node) > chars:
first_node = None
truncated = True
else:
char_length += len(first_node)
if not inner:
return first_node, None, last_node, truncated
path = []
prev = inner.pop()
while char_length + len(prev) <= chars:
char_length += len(prev)
path.append(prev)
if not inner:
break
prev = inner.pop()
else:
truncated = True
return first_node, path[::-1], last_node, truncated
|
b3ced4c6114b75fd6702b7f06f791f3d5008c399
| 621,054 |
import queue
def queue_to_list(q):
"""
Get all the values in a :class:`queue.Queue` object and return a list.
"""
l = []
while True:
try:
l.append(q.get_nowait())
except queue.Empty:
return l
|
c285485440b634a14f88a61c54a56ea6665fccc3
| 458,121 |
def get_width(img):
"""
Returns the number of columns in the image
"""
return len(img[0])
|
2e70d54c78c9bf4b53f92f5233aae3f554baf3e9
| 189,171 |
def asm_label(address):
"""
Return a local label name for asm at <address>.
"""
return '.asm_%x' % address
|
81a9f6a722b22bb0ccaaec9909e0e840baa7ab54
| 693,120 |
def load_txt_file(file_path):
"""Load data or string from txt file."""
with open(file_path, 'r') as cfile:
content = cfile.readlines()
cfile.close()
content = [x.strip() for x in content]
num_lines = len(content)
return content, num_lines
|
30d02da5f445e47d30b465191f8b660452c3ce31
| 343,432 |
def n_lexemes_for_lemma(conn, language_code, lemma) -> int:
"""Get the number of dictionary entries with ``lemma`` as headword.
:param conn: The database connection for the dictionary.
:param str language_code: ISO 639-3 language code of the language of interest.
:param lemma: A dictionary that contains the keys ``graphic`` and
``phonetic``.
:return: The number of entries in the dictionary that have the specified
lemma as one of its headwords.
"""
return next(conn.cursor().execute(
'SELECT COUNT(DISTINCT entry_id) '
' FROM lemmas '
' WHERE language = ? AND graphic = ? AND phonetic = ?',
(language_code, lemma['graphic'], lemma['phonetic'])))[0]
|
9b66d78931921127afa5d0739d86f00d649f9b21
| 665,785 |
def user_enabled(inst, opt):
"""
Check whether the option is enabled.
:param inst: instance from content object init
:param url: Option to be checked
:return: True if enabled, False if disabled or non present
"""
return opt in inst.settings and inst.settings[opt]
|
3b2a5a1534ff779178eb4bd6b839b66c0b07864f
| 709,538 |
def get_next_code(code):
"""
Generate the next code from the code that precedes it.
"""
return code * 252533 % 33554393
|
2dbcae9d86ff9bbae9cc6ebfa0c36279b22bd0c2
| 426,796 |
from typing import Optional
def get_result_state(code: int) -> Optional[str]:
"""Translate an result state code to a human-readable state.
Official mapping is available
`here <https://github.com/BOINC/boinc/blob/master/py/Boinc/boinc_db.py>`.
Args:
code:
The code of result state.
Returns:
The human-readable state related to the code.
"""
states = {
0: "NEW",
1: "FILES_DOWNLOADING",
2: "FILES_DOWNLOADED",
3: "COMPUTE_ERROR",
4: "FILES_UPLOADING",
5: "FILES_UPLOADED",
6: "ABORTED",
7: "UPLOAD_FAILED",
}
return states.get(int(code))
|
b62b0abded6967bc0ebf885e77c3ea322ed7d763
| 393,855 |
def reduce_to_nonempty(objs):
"""Remove from a list all objects that don't follow ``obj.empty==True``."""
objs_reduced = []
ids = []
for i, obj in enumerate(objs):
assert hasattr(obj, "empty"), (
"Expected object with property 'empty'. Got type %s." % (
type(obj),))
if not obj.empty:
objs_reduced.append(obj)
ids.append(i)
return objs_reduced, ids
|
4ccab5d5cbcb3ac4c3486d30ba6844454c4521f2
| 502,356 |
def patch_save_intermediate_df(mocker):
"""Patch the save_intermediate_df function."""
return mocker.patch("src.make_feedback_tool_data.make_data_for_feedback_tool.save_intermediate_df")
|
6c668407857c3ddbd1b3a29aa5e0e24dfae19d72
| 659,565 |
def extend_list_in_dict(dictionary, key, *args):
""" Add a value to a list inside a dictionary, automatically creates list. """
assert isinstance(dictionary, dict)
if key in dictionary:
assert isinstance(dictionary[key], list)
else:
dictionary[key] = []
dictionary[key].extend(args)
return dictionary
|
92ab2af3a14d1543fc2526c37d565723a637923d
| 564,739 |
def strip_leading_characters(string, num):
"""Returns the input string after removing `num` leading characters"""
return string[num:]
|
d50f37b6e445349b52427a70f3e031a572174afd
| 385,222 |
import time
def calc_seconds(t_data,
sep_date_clock=' ', sep_date='-', sep_clock=':',
is_float=False):
"""
Returns time in seconds after 1/1/1970.
Time format for time data string used:
2012-05-02 12:57:08.0
"""
if len(t_data.split(sep_date_clock)) != 2:
return -1
(date, clock) = t_data.split(sep_date_clock)
if (len(clock.split(sep_clock)) == 3) & (len(date.split(sep_date)) == 3):
(year_str, month_str, day_str) = date.split(sep_date)
# print ' year_str,month_str,day_str',year_str,month_str,day_str
(hours_str, minutes_str, seconds_str) = clock.split(sep_clock)
# print ' hours_str,minutes_str,seconds_str',hours_str,minutes_str,seconds_str
t = time.mktime((int(year_str), int(month_str), int(day_str),
int(hours_str), int(minutes_str), int(float(seconds_str)), -1, -1, -1))
# print 'calc_seconds',t
# print ' t_data'
# print ' tupel',int(year_str),int(month_str),int(day_str), int(hours_str),int(minutes_str),int(float(seconds_str)),0,0,0
if is_float:
return t
else:
return int(t)
else:
return -1
|
0666763b861b8b02cb3caf5e4b74a75d552d8dc4
| 253,512 |
def average(*args):
""" returns simple average of list of values """
val = 0.
for arg in args:
val += arg
return val / len(args)
|
934efd20d6b030bedd5d867040443d664c31e108
| 273,599 |
def label(self):
"""
Returns:
label (string): name for the hazard category
"""
try:
value = self._label
except AttributeError:
value = None
return value
|
0cc08e0b62420f9eb92da64b4636191c72306091
| 691,344 |
from io import BytesIO
from io import StringIO
from zipfile import ZipFile
import requests
def get_zip_file(url, filepath):
"""Gets zip file from url.
Args:
url: A string, the url of zip file.
filepath: A string, the file path inside the zip file.
Returns:
A String, the content of wanted file.
"""
zipfile = ZipFile(BytesIO(requests.get(url).content))
file = zipfile.open(filepath).read().decode('utf8')
return StringIO(file)
|
6630fadd083523a3575acebf9a3db49bb5aa8537
| 631,437 |
def format_icd9_code(code):
"""Nicely format an ICD9 code into the form used in the bayes_hcup_ccs table."""
return "'{:5}'".format(code.replace('.', ''))
|
bb7f5f6581b71e875015c96037968f3b1dd74ede
| 467,318 |
from typing import Tuple
from typing import Union
def count_things(collection: Tuple, thing: Union[int, str]) -> int:
"""
This function counts the number of occurrences of the thing value within the collection parameter.
:param collection: A tuple containing 0 or more things
:param thing: An item in the collection parameter
:return: An integer.
"""
result = collection.count(thing)
return result
|
0fca1c6fc87ba169a0c041820abd4274ab5f8141
| 472,774 |
import codecs
def _load_from_json(json_object: dict) -> object:
"""
Load from JSON using custom schema (see code).
Args:
`json_object`: serialized JSON object as a Python dict.
Returns:
`python_object`: object hook for `json.load` function.
"""
if "__class__" in json_object and json_object["__class__"] == "bytes":
return codecs.decode(json_object["__value__"].encode(), "base64")
return json_object
|
fd0500d57ae6604c32c0967c47df3537000e906e
| 464,473 |
def progress_rate1(v_i, x, k):
"""Returns the progress rate for a reaction (v_A)A+(v_B)B --> (v_C)C
INPUTS
======
v_i: a list of floats representing Stoichiometric coefficients of reactants
ordered according to the reaction form, left to right
x: a list of floats representing the concentration of each reactant and product
ordered according to the reaction form, left to right
k: float, reaction rate coefficient
RETURNS
=======
progress rate for the reaction: a float
EXAMPLES
========
>>> progress_rate1([2.0, 1.0, 0.0], [1.0, 2.0, 3.0], 10)
20.0
"""
r=1
if (len(v_i) != len(x)):
raise ValueError("Lengths do not match.")
for v, xi in zip(v_i, x):
r *= xi**v
w = k*r
return w
|
fe24a2aa9a5afe7a3a25d695eeca771e87441a45
| 155,513 |
import re
def squash_whitespace(text):
""" Combine multiple whitespaces into one, trim trailing/leading spaces
>>> squash_whitespace(' some\t text with a lot of spaces ')
'some text with a lot of spaces'
"""
return re.sub('\s+', ' ', text.strip())
|
a9b7dd6ef0e90b69e62d31966af305cae4b942f2
| 204,062 |
import io
import csv
def radio_id_csv() -> io.StringIO:
"""
Generates a placeholder RadioIDList.csv
"""
header = ["No.", "Radio ID", "Name"]
sio = io.StringIO()
writer = csv.writer(sio, dialect="d878uvii")
writer.writerow(header)
placeholder_radio_id = ("1", "268000", "CT0ZZZ")
writer.writerow(
[
f"{placeholder_radio_id[0]}",
f"{placeholder_radio_id[1]}",
f"{placeholder_radio_id[2]}",
]
)
return sio
|
05aceba61149238e0734618eaf3ae8f59adbf8ab
| 75,229 |
def convert(x_s):
"""
This function converts the raw hokuyo data into meters.
:param x_s: scaled data
:return: float64 -- distance in meters
"""
scaling = 0.005 # 5 mm
offset = -100.0
x = x_s * scaling + offset
return x
|
421c3344a2a0b48ed0c49fb0a5cf982cc380d7bc
| 303,345 |
def _format_tags(line, slice_):
"""
Create or format tags.
Parameters
----------
line: Line to add a tag.
slice_: Tag interval.
Returns
-------
str: formatted html tag
"""
map_formated_tags = map(lambda tag: '{}: {}'.format(tag, line[tag]), slice_)
return '<br/>'.join(map_formated_tags)
|
86d01eae1d81444183b08dc993db584ff795c6df
| 597,686 |
def merge(x, y):
"""Merge two ordered lists
:param x:
:param y: x,y are non decreasing ordered lists
:returns: union of x and y in order
:complexity: linear
"""
z = []
i = 0
j = 0
while i < len(x) or j < len(y):
if j == len(y) or i < len(x) and x[i] <= y[j]: # priority on x
z.append(x[i])
i += 1
else:
z.append(y[j])
j += 1
return z
|
d7e4f77be7e1c46d244038d52ba618d985082a37
| 616,300 |
def get_countries_per_tile(cache, tile_id):
"""Given a tile id it returns the country list"""
return cache[tile_id]['countries']
|
a63350624f87561bc97eeb7d363ab7c3bb333e94
| 499,689 |
def max_number(number: int) -> int:
"""Returns number starting from the largest item.
Args:
number (int): input number
Examples:
>>> assert max_number(132) == 321
"""
return int("".join(sorted(tuple(str(number)), reverse=True)))
|
2da3c9fb139d971cc794733112a2340675631e9e
| 580,211 |
from typing import OrderedDict
def bs3_inverse_cols(sizes, *, offset=False, grid=12):
"""
Convert a BS3 column sizes dict into a css class string,
but calculate the inverse used for the labels on the left.
The `sizes` may be a list of tuples instead of a dict.
The `offset` argument is useful when a label is not present,
and space needs to be padded.
Set `grid` for a non-standard number of grid columns.
>>> bs3_inverse_cols({'md': '6', 'xs': '8'})
'col-md-6 col-xs-4'
>>> bs3_inverse_cols({'md': '6', 'xs': '8'}, offset=True)
'col-md-offset-6 col-xs-offset-4'
>>> bs3_inverse_cols({'md': '6', 'xs': '8'}, grid=16)
'col-md-10 col-xs-8'
"""
if not isinstance(sizes, dict):
sizes = OrderedDict(sizes)
fmt = 'col-{k}-{v}' if not offset else 'col-{k}-offset-{v}'
return ' '.join([
fmt.format(k=k, v=grid - int(v))
for k, v in sizes.items()
])
|
b3fab534e632011c7b67f4ca0b00e5cf96291c8c
| 221,806 |
def geojson_to_tuples(bounding_box):
"""
Given a GeoJSON polygon, returns in tuple format
(lat(y) min, lat(y) max, long(x) min, long(x) max)
"""
lat_max = bounding_box["coordinates"][0][0]
long_min = bounding_box["coordinates"][0][1]
long_max = bounding_box["coordinates"][1][1]
lat_min = bounding_box["coordinates"][2][0]
return (lat_min, lat_max, long_min, long_max)
|
de9267ab026c9b39fbb8b67fa12caf43dca42993
| 479,091 |
from typing import Callable
import requests
from typing import Any
def send_get_request(url: str,
filter_function: Callable[[requests.Response], Any] =
lambda result: result.json()) -> Any:
"""
The function sends a get request to the given url and returns the result based on the filter
function.
NOTE: This function assumes a raw JSON object will be returned from the get call.
url: The url of the address where the get request is to be made to.
filter_function: A function that takes a requests.Response object and returns a result based on
the function. This is an optional parameter, and if not supplied, a json response will be sent.
return: resulting data passed through the filter function
"""
res = requests.get(url)
return filter_function(res)
|
d52a9942c1d213af38e65c5f09ac11f1473d85a9
| 111,288 |
def rect_corners(rect):
""" Returns cornerpoints of given rectangle.
>>> rect_corners((1,2,1,3))
((1, 2), (2, 2), (2, 5), (1, 5))
"""
tl = (rect[0],rect[1])
tr = (rect[0]+rect[2],rect[1])
br = (rect[0]+rect[2],rect[1]+rect[3])
bl = (rect[0],rect[1]+rect[3])
return (tl,tr,br,bl)
|
bf003fd4366afd66c8f8a987df86a1f306fc6152
| 259,247 |
import struct
def inet_ntoa(packed_ip):
"""inet_ntoa(packed_ip) -> ip_address_string
Convert an IP address from 32-bit packed binary format to string format"""
return "{}.{}.{}.{}".format(*struct.unpack("BBBB", packed_ip))
|
96d092a7277732de393ddb9379fe9b6f22f0e7a3
| 303,064 |
def eeff(e_1, nu_1, e_2, nu_2):
"""
Calculate the effective (Young's) modulus of two contact bodies according
to Hertzian contact theory.
Parameters
----------
e_1: ndarray, scalar
The Young's modulus of contact body 1.
nu_1: ndarray, scalar
The Poisson ratio of contact body 1.
e_2: ndarray, scalar
The Young's modulus of contact body 2.
nu_2: ndarray, scalar
The Poisson ratio of contact body 1.
Returns
-------
e_eff: scalar
The effective modulus.
"""
e_eff = 1 / ((1 - nu_1 ** 2) / (2 * e_1) + (1 - nu_2 ** 2) / (2 * e_2))
return e_eff
|
29472661a213565a9d195466e7982c915aab2c1b
| 508,453 |
def ascii_distance(first, second):
"""
returns the positive distance between the ascii values of input characters
assume: inputs are valid single letters
e.g. inputs ("a", "b") should return 1,
inputs ("b", "a") should return 1 too (postive distance)
"""
return abs(ord(first)-ord(second))
|
6c1bd591e13fccf14dd17249f392e03ce59c7126
| 668,922 |
def find_circle_size(v):
"""
Determines the dimension of the square in which the number is
ex: 4 is a number on the outer layer of the 3x3 circle => 3
"""
for i in range(1, v, 2):
if i ** 2 < v and (i+2) ** 2 >= v:
return i+2
return 1
|
eea8f3ce35542fcad35107bf1083abf9ace06fc1
| 588,085 |
def build_permission_tuple(obj_type, unbound_permission, obj_parts):
"""Returns a tuple of (object_uri, unbound_permission)"""
PARTS_LENGTH = {
'bucket': 3,
'collection': 5,
'group': 5,
'record': 7
}
if obj_type not in PARTS_LENGTH:
raise ValueError('Invalid object type: %s' % obj_type)
if PARTS_LENGTH[obj_type] > len(obj_parts):
raise ValueError('You cannot build children keys from its parent key.'
'Trying to build type "%s" from object key "%s".' % (
obj_type, '/'.join(obj_parts)))
length = PARTS_LENGTH[obj_type]
return ('/'.join(obj_parts[:length]), unbound_permission)
|
b6f559ae7fdb980a0d3aec4efd4fe09f88810ac7
| 210,883 |
import math
def is_prime(number):
"""Determines if a single number is prime or not.
"""
if(number <= 1):
return False
if(number == 2):
return True
a = int(math.sqrt(number))+1
divisor = 3
if((number % 2) == 0):
return False
while(divisor < a):
if((number % divisor) == 0):
return False
divisor += 2
return True
|
9104b7379530ba2cc0436306ad38880f586c140a
| 318,446 |
def get_jobs_by_type(data_dict):
"""
Examines 'algo' and creates new dict where the key is
the value of 'algo' and value is a list of
jobs (each one a dict) run with that 'algo'
:param data_dict:
:return:
:rtype: dict
"""
jobtype_dict = dict()
for entry in data_dict:
if data_dict[entry]['algo'] not in jobtype_dict:
jobtype_dict[data_dict[entry]['algo']] = []
jobtype_dict[data_dict[entry]['algo']].append(data_dict[entry])
return jobtype_dict
|
1dbabc8327b96601a9f8ff8b980f86908b648f51
| 67,024 |
import json
def load_settings(filename):
"""Load settings from JSON and returns config object."""
with open(filename) as f:
config = json.load(f)
return config
|
b380a95a899a465319fa21b9363b83586e55de9e
| 489,166 |
import shutil
def command_exists(cmd):
""" Does this command even exists? """
path = shutil.which(cmd)
if path is None:
return False
return True
|
21361505be6b7d332b9c47217d1db647db033408
| 573,764 |
def _ra2ha(ra, lst):
"""
Converts a right ascension to an hour angle.
"""
return (lst - ra / 15.0) % 24
|
c1c059c673c83140da89b53303800c2611ef1c2c
| 401,725 |
def convertColor(col,toint=True):
"""
This function will convert a color array [r,g,b] from range 1-255
to range 0.-1 (vice/versa)
@type col: array
@param col: the color [r,g,b]
@type toint: boolean
@param toint: way of the convertion, if true convert to 1-255, if false
convert to range 0-1
@rtype: array
@return: the converted color [0-1.,0-1.,0-1.] or [1-255,1-255,1-255]
"""
if toint and max(col)<=1.0: col = map( lambda x: x*255, col)
elif not toint and max(col)>1.0: col = map( lambda x: x/255., col)
return col
|
cb7fab4793379e147d133a3cc231c8c32c3b9d4d
| 139,385 |
def FindReviewee( jsondata ):
""" Find the person who is being reviewed's name. """
return jsondata['header']['properties']['value']['PlayerName']['value']['str_property']
|
2d0b1d93fef2f8c5f1aefcfa6c164289a8f76538
| 245,897 |
import torch
def coordinates(x, y, start=-1, end=1):
"""
Returns a map of coordinates with x rows and y columns.
Input:
- x: rows
- y: columns
Returns:
- xy_coords: 1 x 2 x 'x' x y
"""
x_row = torch.linspace(start, end, steps=y) # y
y_row = torch.linspace(start, end, steps=x) # x
x_coords = x_row.unsqueeze(0).expand(x, y).unsqueeze(0) # 1 x y
y_coords = y_row.unsqueeze(1).expand(x, y).unsqueeze(0) # 1 x y
# 1 2 x y
return torch.autograd.Variable(torch.cat([x_coords, y_coords], 0).unsqueeze(0))
|
e38356ca64127ad44de2bb3090c0c834dbd7d4ee
| 246,053 |
import torch
def generalized_distance_matrix_torch(X,Y):
"""
Returns the distances between all datapoints from X and Y,
considering each datapoint to be a row vector in each matrix individually.
Notes
-----
Let $X \in R^{n_x \times k}, Y \in R^{n_y \times k}$, the distance between all
points in X to all points in Y is:
$D = \mathrm{diag}(XX^T) \mathbf{1}^T_{n_y} + \mathbf{1}_{n_x} \mathrm{diag}(YY^T)^T -2 XY^T$
where $D_{(i,j)} = || x_i - x_j ||^2$ , i.e. the squared euclidean distance.
"""
n_x,k_x = X.shape
n_y,k_y = Y.shape
dev = X.device
assert k_x == k_y, 'Number of cols of data X is %d and of Y is %d'%(k_x, k_y) # dimensionality of vector spaces must be equal
diag_x = torch.zeros((n_x, 1)).to(dev)
diag_y = torch.zeros((1, n_y)).to(dev)
for i in range(n_x):
diag_x[i] = torch.dot(X[i], X[i])
for j in range(n_y):
diag_y[0, j] = torch.dot(Y[j], Y[j])
g1 = diag_x @ torch.ones((1, n_y), device=dev)
g2 = torch.ones((n_x, 1), device=dev) @ diag_y
D = g1 + g2 - 2*[email protected]
return torch.sqrt(D)
|
2fade3144d8482b76b1b5aaeac6c6f3df1507ae3
| 592,130 |
def int2RGB(i: int):
"""Break down int color i to its
byte valued r, g and b
components
Args:
i: int color value
Returns:
r: byte, g: byte, b: byte
"""
return i >> 16, (i >> 8) & 0xff, i & 0xff
|
fb63897048f3d98bee2fb2990be6c83e0cf745a2
| 545,106 |
def get_child(node, tag_name):
"""Analog of JS's node.getElementsByTagName(tag_name)[0].
Args:
node: parent node
tag_name: tag name
Returns:
Returns first matching child element
Raises:
IndexError if unable to find the node
"""
return [
child
for child in node.childNodes
if child.nodeType != child.TEXT_NODE
and child.tagName == tag_name
][0]
|
1922013bc1a8079d6060404e6c5ce5b7ede5673a
| 167,345 |
def _compareRegionXS(region1, region2, tolerance, verbose):
"""Compare the macroscopic cross sections between two homogenized regions."""
return region1.macros.compare(region2.macros, None, tolerance, verbose)
|
64aa97307c722961110f18dc0703985632152743
| 167,145 |
def _is_equal_all_type(data, type_pattern):
"""
:type data: list or tuple
:type type_pattern: type
:rtype: bool
>>> _is_equal_all_type([1, 2, 3], int)
True
>>> _is_equal_all_type([1.0, 2.0, 3.0], float)
True
>>> _is_equal_all_type([1, 2.0, 3], int)
False
"""
if all(isinstance(d, type_pattern) for d in data):
return True
else:
return False
|
4f9003cfd9b7e4b1b5072e2854bba480d094904a
| 580,514 |
from typing import Union
import time
def time_ms(as_float: bool = False) -> Union[int, float]:
"""Convert current time to milliseconds.
:param as_float: result should be float, default result is int
:return: current time in milliseconds
"""
_time_ms = time.time() * 1000
if not as_float:
return int(_time_ms)
return _time_ms
|
9e9dd47636182935d2a6f52156fc987996c75ec3
| 41,335 |
def offset_grid_pixels(pts, vision_tform_local_grid, cell_size):
"""Offset the local grid's pixels to be in the world frame instead of the local grid frame."""
x_base = vision_tform_local_grid.position.x + cell_size * 0.5
y_base = vision_tform_local_grid.position.y + cell_size * 0.5
pts[:, 0] += x_base
pts[:, 1] += y_base
return pts
|
fd73af7eccf4c79e83ca9655573b0150d4fef639
| 172,285 |
import hashlib
def git_blob_style_hash(data: bytes):
"""Return the hash of data as a hex string
This is the hash that git uses for hashing blobs. You can pass the hash
returned by this function to git to find a copy of the file in the history
of a repository, for example
git whatchanged --all --find-object=acab3218a50a
>>> git_blob_style_hash(b'hi\\n')
'45b983be36b73c0788dc9cbcb76cbb80fc7bb057'
"""
hash = hashlib.sha1()
hash.update(b"blob %d\0" % len(data))
hash.update(data)
return hash.hexdigest()
|
b58ba4d1deff3ebe9ba80d3b06783ebacde6f9bb
| 550,671 |
def pos_in_box(pos, lbox):
"""Positions in [-lbox/2, lbox/2)
Args:
pos (np.array): positions in open BC
lbox (float): cubic box side length
Return:
np.array: positions in box centered at 0
"""
return (pos+lbox/2.) % lbox - lbox/2.
|
f4eb3b1a1fbe460783aa0f60c14e574e90431d0e
| 307,423 |
from typing import Union
from typing import List
import requests
def dafni_patch_request(
url: str, jwt: str, data: dict, allow_redirect: bool = False
) -> Union[List[dict], dict]:
"""Performs a PATCH request from the DAFNI API.
If a status other than 200 is returned, an exception will be raised.
Args:
url (str): The url endpoint that is being queried
jwt (str): JWT
data (dict): The data to be POSTed in JSON format
allow_redirect (bool): Flag to allow redirects during API call. Defaults to False.
Returns:
List[dict]: For an endpoint returning several objects, a list is returned (e.g. /catalogue/)
dict: For an endpoint requesting upload urls (e.g. /models/upload/)
"""
response = requests.patch(
url,
headers={"Content-Type": "application/json", "authorization": jwt},
allow_redirects=allow_redirect,
json=data,
)
response.raise_for_status()
return response.json()
|
c16f41770af78049a68689636dadd781dd0851aa
| 354,604 |
import csv
def readCSV(path2File):
"""
returns a list of links to pdfs.
Assumes the first column of the csv file corresponds to the links
"""
pdfFileLinks = []
with open(path2File, newline='') as csvfile:
data = csv.reader(csvfile)
for row in data:
pdfFileLinks.append(row[0])
return pdfFileLinks
|
0918fbbaf580cdf4221551b7cac58e4add129c76
| 49,159 |
def parse_file(file_path, separator='\t'):
"""
Parse a file containing a table. This one is returned as a list of lists where each one is a row of the table.
:file_path: The file path.
:separator: The separator between columns.
:return: The table as a list of lists.
"""
table = []
with open(file_path) as file:
for line in file:
# Remove newline character.
line = line.rstrip('\n')
# Parse the line.
row = line.split(separator)
table.append(row)
return table
|
7deebe55ebd50927e7f687c6db75b85e0d8e763e
| 556,273 |
def get_time_signatures(root):
"""Returns all time signatures in the score as a list of tupples.
Assumes it is possible to have time change in makam pieces."""
beats = [t.text for t in root.findall('part/measure/attributes/time/beats')]
types = [t.text for t in root.findall('part/measure/attributes/time/beat-type')]
all_time_signatures=[(int(b),int(t)) for b,t in zip(beats,types)]
return all_time_signatures
|
68f173de653ded4a5ae40a54c6dcc3b531999b2b
| 251,100 |
from typing import Union
from datetime import datetime
def time_from_timestamp(timestamp_in: Union[int, float]) -> datetime:
"""
Convert timestamp to :py:class:`datetime <datetime.datetime>` object.
:param timestamp_in: Number representing the epoch.
:return: :py:class:`datetime <datetime.datetime>` object as a time representation.
"""
return datetime.fromtimestamp(timestamp_in)
|
6abeaced426b4b22e772d7bfa9ec9f5eef182d1d
| 669,622 |
def cross3(u, v):
"""
Returns 3 tuple that is the 3 dimentional vector cross product of
3 vector u crossed onto 3 vector v
"""
return ((u[1] * v[2] - v[1] * u[2],
u[2] * v[0] - v[2] * u[0],
u[0] * v[1] - v[0] * u[1]))
|
564d3becf9e3fdcb709e2bf217aff28e73d0e66b
| 488,639 |
def within_tolerance(measured, theoretical, tolerance):
""" Returns True if normalized, absolute difference is with tolerance """
return abs(measured - theoretical)/theoretical <= tolerance
|
ed98b5efe0b75c6be01d2feac922c1f1fe0a7078
| 333,350 |
import re
def parse_container_id(container_id: str):
""" Parse container ID from cgroups directory """
pattern = ".-"
if any(c for c in pattern if c in container_id):
return re.split("[.-]", container_id)[1]
return container_id
|
e24b2f35f4b4cae51fe04426a0236e9e69702d2b
| 413,859 |
def GenerateEnv(context):
"""Generates environmental variables for a pod.
Args:
context: Template context, which can contain the following properties:
env - Environment variables to set.
Returns:
A list containing env variables in dict format {name: 'name', value: 'value'}
"""
env = []
tmp_env = context.properties.get('env', [])
for entry in tmp_env:
if isinstance(entry, dict):
env.append({'name': entry.get('name'), 'value': entry.get('value')})
return env
|
bcc59d49692ffe0a29e3a1393e70a21230f329b9
| 274,069 |
def calc_charge_trans(m_potential, x_potential, iv, elec_affinity_non_metal, d_mx_avg):
"""
Calculate the estimated charge transfer energy (eV) of a structure
:param m_potential: Float, the metal site Madelung potential in V
:param x_potential: Float, the non_metal site Madelung potential in V
:param iv: Float, the vth ionization energy of the metal in eV
:param elec_affinity_non_metal: Float, the electron affinity of the non_metal in eV
:param d_mx_avg: Float, the average metal-non_metal distance in Angstrom
:return: Float, the estimated charge transfer energy in eV
"""
# specify the conversion factor from e^2/Angstrom to eV
conversion_factor = 14.39965
return x_potential - m_potential + elec_affinity_non_metal - iv - conversion_factor / d_mx_avg
|
563b677492ce13928262347e7ba8352d11aa44f7
| 648,329 |
def frames(signal, samplerate, conf):
"""
Compute frames from an audio signal.
Args:
signal: the audio signal from which to compute features. Should be an
N*1 array
samplerate: the samplerate of the signal we are working with.
conf: feature configuration
Returns:
A numpy array of size (NUMFRAMES by winlen) containing features. Each
row holds 1 feature vector
"""
raise BaseException('Not yet implemented')
signal = sigproc.preemphasis(signal, float(conf['preemph']))
winfunc = _get_winfunc(conf['winfunc'])
frames = sigproc.framesig(signal, float(conf['winlen'])*samplerate,
float(conf['winstep'])*samplerate,
winfunc)
return frames
|
82b8d9ef688bbcdad56cac8ece4c3bdc307d3c0f
| 384,144 |
def get_sense_rank_dict(path_to_index_sense):
"""
create mapping sense key -> sense rank
casuistical%3:01:01:: 03053657 1 0 -> casuistical%3:01:01:: -> 1
:param str path_to_index_sense: wordnet index sense file
:rtype: dict
:rtype: mapping wordnet key -> sense rank
"""
sense_ranks = {}
with open(path_to_index_sense) as infile:
for line in infile:
key, offset, sqr, freq = line.strip().split()
sense_ranks[key] = int(sqr)
return sense_ranks
|
564fd2fcf5e6d512432b358d9a050cf5468e9272
| 439,806 |
def asmodule(module):
"""
Return the :class:`module` instance named by `module`.
If `module` is already a module instance and not a string, return
it unchanged.
"""
if isinstance(module, str):
module = __import__(module, fromlist=[])
return module
|
143cc4cc7c7d72e32a3e89fd7775a1d2633f44bc
| 552,537 |
def calculate_avg_delay_wait(avg_delays):
"""
Calculate average delay or waiting value.
"""
avg_delays_dict = {}
for lambda_rate, avg_delay_list in avg_delays.items():
avg_value = sum(avg_delay_list) / len(avg_delay_list)
avg_delays_dict[lambda_rate] = avg_value
return avg_delays_dict
|
88542ff894c17d4bd9be17dee606ec77adc9ed8d
| 655,757 |
def strip_signature_from_subject(content):
"""Assume --\n is marker for email signature and return everything before.
"""
return content.split('--')[0]
|
fe7ecd0ac59f474be0531de85b8144e1248a4e22
| 628,137 |
def create_a_correlation_matrix(dfData):
"""
Parameters
#---------
dfData pandas.DataFrame containing data
Returns
#------
matCorrelation pandas.DataFrame
Description
#----------
Uses pandas builtin function for computing matCorrelation
"""
matCorrelation = dfData.corr()
return matCorrelation
|
57473f16db20ef75a35a631d8e8123b019a15d5b
| 242,945 |
from typing import Union
from pathlib import Path
def _get_epoch_images(epoch_path: Union[str, Path],
image_type: str,
image_dir: str
) -> list:
"""
Get all available images in a given epoch.
Args:
epoch_path: Path to the epoch of interest.
image_type: `COMBINED` or `TILES`.
image_dir: The name of the folder containing the images to be updated.
E.g. `STOKESI_IMAGES`.
Returns:
The list of images.
Raises:
Exception: Path does not exist.
"""
P = Path(epoch_path) / image_type / image_dir
if not P.exists():
raise Exception("{} does not exist!".format(P))
raw_images = sorted(list(P.glob("*.fits")))
return raw_images
|
1ad1dd7e58f381ca48dcb531b48a839540f64183
| 579,553 |
def likelihoods(d_given_h, priors):
"""Calculate likelihoods through marginalization, given Pr(D|H) and priors.
Usage: scores = likelihoods(d_given_h, priors)
d_given_h and priors are equal-length lists of probabilities. Returns
a list of the same length of numbers (not probabilities).
"""
# check that the lists of Pr(D|H_i) and priors are equal
length = len(d_given_h)
if length != len(priors):
raise ValueError("Lists not equal lengths.")
# find weighted sum of Pr(H_i) * Pr(D|H_i)
wt_sum = 0
for d, p in zip(d_given_h, priors):
wt_sum += d * p
# divide each Pr(D|H_i) by the weighted sum and multiply by its prior
# to get its likelihood
return [d / wt_sum for d in d_given_h]
|
9e98c42ead525fadc7967121add0b85af27e7a06
| 222,333 |
def decorate(string, *formats):
"""Decorates a string using ANSI escape codes given some format enums.
Calling len(s) on a string which has been decorated in this manner will not
return the printed width. Call len(ansi_undecorate(s)) to achieve this.
Args:
string: string to decorate.
formats: any number of format enums to apply to the string.
Returns:
Decorated representation of string.
"""
# If no formats have been given, do nothing
if not formats:
return string
# Otherwise construct the start code
start = '\033['
for fmt in formats:
start += str(fmt) + ';'
# Remove final ';', append an 'm'
start = start[:-1] + 'm'
# Hard coded reset code to finish
end = '\033[0m'
return start + string + end
|
2fceb0e5a1cc54e2bf520c51ccf6cd6a98494178
| 203,619 |
def order_nodes(tree, increase=True):
"""Rotate internal nodes of a tree so that child nodes are ordered by the
number of descendants.
Parameters
----------
tree : skbio.TreeNode
tree to order
increase : bool, optional
order nodes in increasing (True) or decreasing (False) order
Returns
-------
skbio.TreeNode
resulting ordered tree
See Also
--------
is_ordered
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read(['(((a,b),(c,d,e)),((f,g),h));'])
>>> print(tree)
(((a,b),(c,d,e)),((f,g),h));
<BLANKLINE>
>>> tree_ordered = order_nodes(tree, False)
>>> print(tree_ordered)
((h,(f,g)),((a,b),(c,d,e)));
<BLANKLINE>
"""
res = tree.copy()
for node in res.postorder():
if node.is_tip():
node.n = 1
else:
node.n = sum(x.n for x in node.children)
for node in res.postorder():
if not node.is_tip():
children = node.children
node.children = []
for child in sorted(children, key=lambda x: x.n, reverse=increase):
node.append(child)
for node in res.postorder():
delattr(node, 'n')
return res
|
cdf634c2ac30e8540223b08a28d3db1e2612a350
| 470,493 |
def left_fill(s, n, x="0"):
"""Cross platform string left fill method, defaults to zero filling.
Parameters
----------
s : str, string to left pad
n : int, number of total places to pad to
x : str, optional, character to fill, defaults to "0"
"""
sl = len(s)
zn = n - sl
if zn > 0:
return zn*"0" + s
else:
return s
|
8ec29f993fd01122ea274a2a41e8224f7dcde1ca
| 597,306 |
def deltas(numbers):
"""
Returns the changes between consecutive numbers in a list of numbers.
Example: deltas([1,5,3]) -> [4, -2]
Parameters
----------
numbers : list
The list of numbers to process.
Returns
-------
A list containing the differences between a series of numbers.
"""
return [l - r for l,r in zip(numbers[1:], numbers[:])]
|
155073e249141643f169fc2684042914356171f1
| 614,657 |
def get_class_name(node):
"""
Simple wrapper to get a _class knob off a node so that you can use standard nuke nodes like group, NoOp etc, but
give them their own "class".
Args:
node (nuke.Node): A node object we want to get a class from.
Returns:
str: The node class.
"""
_class = node.knobs().get("_class")
if not _class:
return node.Class()
return _class.value()
|
09027c082a9f45fdbdd15b0cc9bbd7fa6694c7cb
| 623,302 |
def check_transaction_threw(client, transaction_hash):
"""Check if the transaction threw/reverted or if it executed properly
Returns None in case of success and the transaction receipt if the
transaction's status indicator is 0x0.
"""
receipt = client.web3.eth.getTransactionReceipt(transaction_hash)
if 'status' not in receipt:
raise ValueError(
'Transaction receipt does not contain a status field. Upgrade your client',
)
if receipt['status'] == 0:
return receipt
return None
|
0f6fa3ea236a58a948c634b1dc35f25402ab74e2
| 111,643 |
from typing import Callable
from typing import Sized
def minimum_length(length: int) -> Callable:
"""Generate a function that checks for a minimum string length."""
def _min_len(value: Sized) -> float:
if length > len(value) > 0:
return 0.
return 1.
return _min_len
|
de201dba8ea9c69bd281ca09ad2e7089380d35a8
| 486,188 |
from typing import Optional
import random
def random_hex_color(short: Optional[bool] = False) -> str:
"""
Generates a random hexadecimal color string.
Arguments:
short: If `True`, this function will generated
a shortened 3-digit hex color, otherwise a 6-digit
hex color will be generated instead.
Returns:
A three or six digit hex color of the format
`#ff00ff` or `#f0f`.
"""
if short:
return "#%03x" % random.randint(0, 0xFFF)
return "#%06x" % random.randint(0, 0xFFFFFF)
|
41696f614f58aa13330eb40d9bac2d4317ea2d58
| 422,907 |
def get_obj_value(obj_value, obj_type):
"""Extract object value depending on its type."""
if obj_type in ['Literal', 'literal']:
return 'Literal'
elif obj_type in ['BNode', 'bnode']:
return 'BNode'
return str(obj_value)
|
8da2ce46fc20ea668b2a98bec831b2fddb92d42a
| 388,047 |
def _parent(i):
"""
Returns the parent node of the given node.
"""
return (i - 1) // 2
|
045bce8778605b5083458f5ec99f81ffca18d623
| 398,956 |
def select_completer(question):
"""generate a completer for a select choices case
Parameters
----------
question: ConcreteQuestion
Returns
-------
function
completer function for readline
"""
def _completer(text, state):
options = [choice for choice in question.choices if choice.startswith(text)]
if state < len(options):
return options[state]
return None
return _completer
|
d4144040076b93a52682e1c37dc3a570df3cd201
| 440,966 |
def recursive_update_dict(original_dict, update_dict):
"""Update a dictionary recursively"""
new_dict = dict(original_dict)
for k, v in update_dict.iteritems():
if type(v) is dict:
new_dict[k] = recursive_update_dict(original_dict[k], v)
else:
new_dict[k] = v
return new_dict
|
af827b6e459af6190ed2ab29492137967095af2e
| 155,811 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.