content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_routes_by_tunnel(compute, project, region, tunnel, restore, debug):
""" Filters all routes to a specific project, region and tunnel."""
match = '%s/regions/%s/vpnTunnels/%s' % (project, region, tunnel)
routes_all = list_routes(compute, project, debug)
routes = []
for route in routes_all:
if route.has_key('nextHopVpnTunnel'):
token = '/'.join(route['nextHopVpnTunnel'].split('/')[-5:])
if token == match and restore == is_route_we_created(route):
routes.append(route)
return routes
| 5,350,900 |
def fcard(card):
"""Create format string for card display"""
return f"{card[0]} {card[1]}"
| 5,350,901 |
def replaceUnresolvedLinks(root):
""" This transformation replaces all a.external-link
nodes with a proper footnote.
Used for PDF generation only (html_mode = 'split')
"""
for link in CSSSelector('a.external-link')(root):
href = link.attrib['href']
span1 = lxml.html.Element('span')
span1.attrib['class'] = 'generated-footnote-text'
span1.text = link.text_content()
span2 = lxml.html.Element('span')
span2.attrib['class'] = 'generated-footnote'
span2.text = href
span1.insert(1, span2)
link.getparent().replace(link, span1)
| 5,350,902 |
def resetChapterProgress(chapterProgressDict, chapter, initRepeatLevel):
"""This method resets chapter progress and sets initial level for repeat routine.
Args:
chapterProgressDict (dict): Chapter progress data.
chapter (int): Number of the chapter.
initRepeatLevel (int): Initial level for repeat routine.
Returns:
dictionary: Return Reseted chapter progress dictionary with initial level set.
"""
chapterProgressDict[chapter]["status"] = "Not started"
chapterProgressDict[chapter]["progress"]["current"] = 0
chapterProgressDict[chapter]["correct"] = {"correct":0, "subtotal":0, "rate":''}
chapterProgressDict[chapter]["repeatLevel"] = initRepeatLevel
return chapterProgressDict
| 5,350,903 |
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
priorityqueue = util.PriorityQueue()
priorityqueue.push( (problem.getStartState(), [], 0), heuristic(problem.getStartState(), problem) )
checkedstates = []
while not priorityqueue.isEmpty():
state, actions, curCost = priorityqueue.pop()
if problem.isGoalState(state):
return actions
if(not state in checkedstates):
checkedstates.append(state)
for child in problem.getSuccessors(state):
point = child[0]
direction = child[1]
cost = child[2]
g = curCost + cost
heuristic_cost = heuristic(point, problem)
sum_g_heuristic = g + heuristic_cost
priorityqueue.push((point, actions + [direction], g), sum_g_heuristic)
# util.raiseNotDefined()
| 5,350,904 |
def index_referenced_records(series):
"""Index referenced records."""
indexer = ReferencedRecordsIndexer()
indexed = dict(pid_type=SERIES_PID_TYPE, record=series)
indexer.index(indexed, get_related_records(series["pid"]))
| 5,350,905 |
def main(client, customer_id, merchant_center_account_id):
"""Demonstrates how to reject a Merchant Center link request.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
merchant_center_account_id: The Merchant Center account ID for the
account requesting to link.
"""
# Get the MerchantCenterLinkService client.
merchant_center_link_service = client.get_service(
"MerchantCenterLinkService", version="v6"
)
try:
# Get the extant customer account to Merchant Center account links.
list_merchant_center_links_response = merchant_center_link_service.list_merchant_center_links(
customer_id
)
number_of_links = len(
list_merchant_center_links_response.merchant_center_links
)
if number_of_links <= 0:
print(
"There are no current merchant center links to Google Ads "
f"account {customer_id}. This example will now exit."
)
return
print(
f"{number_of_links} Merchant Center link(s) found with the "
"following details:"
)
merchant_center_link_status_enum = client.get_type(
"MerchantCenterLinkStatusEnum", version="v6"
).MerchantCenterLinkStatus
for (
merchant_center_link
) in list_merchant_center_links_response.merchant_center_links:
print(
f"\tLink '{merchant_center_link.resource_name}' has status "
f"'{merchant_center_link_status_enum.Name(merchant_center_link.status)}'."
)
# Check if this is the link to the target Merchant Center account.
if merchant_center_link.id == merchant_center_account_id:
# A Merchant Center link can be pending or enabled; in both
# cases, we reject it by removing the link.
_remove_merchant_center_link(
client,
merchant_center_link_service,
customer_id,
merchant_center_link,
)
# We can terminate early since this example concerns only one
# Google Ads account to Merchant Center account link.
return
# Raise an exception if no matching Merchant Center link was found.
raise ValueError(
"No link could was found between Google Ads account "
f"{customer_id} and Merchant Center account "
f"{merchant_center_account_id}."
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| 5,350,906 |
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# print(len(lines))
leftlines = []
rightlines = []
lc = []
lm = []
rc = []
rm = []
height, width, channels = img.shape
print(height/2)
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y2 - y1) / (x2 - x1)
center = [(x1 + x2) / 2, (y1 + y2) / 2]
if slope < 0 and slope > -2:
lc.append(center)
lm.append(slope)
leftlines.append(line)
else:
if slope > 0 and slope < 2:
rightlines.append(line)
rc.append(center)
rm.append(slope)
r_slope = np.sum(rm) / len(rm)
l_slope = np.sum(lm) / len(lm)
l_center = np.divide(np.sum(lc, axis=0), len(lc))
r_center = np.divide(np.sum(rc, axis=0), len(rc))
leftX, leftY = l_center
rightX, rightY = r_center
lefty1 = - (l_slope*(leftX - 480) - leftY)
lefty2 = - (l_slope*(leftX - 100) - leftY)
int_lefty1 = int(round(lefty1))
int_lefty2 = int(round(lefty2))
righty1 = - (r_slope*(rightX - 480) - rightY)
righty2 = - (r_slope*(rightX - 900) - rightY)
int_righty1 = int(round(righty1))
int_righty2 = int(round(righty2))
cv2.line(img, (480, int_lefty1), (100, int_lefty2), [255, 0, 255], 15)
cv2.line(img, (480, int_righty1), (900, int_righty2), [255, 0, 255], 15)
| 5,350,907 |
def gen_flag(p1=0.5, **_args):
"""
Generates a flag.
:param p1: probability of flag = 1
:param _args:
:return: flag
"""
return 1 if np.random.normal(0, 100) <= p1 * 100 else 0
| 5,350,908 |
def load_migrations(path):
"""
Given a path, load all migrations in that path.
:param path: path to look for migrations in
:type path: pathlib.Path or str
"""
migrations = OrderedDict()
r = re.compile(r'^[0-9]+\_.+\.py$')
filtered = filter(
lambda x: r.search(x) is not None,
[x.name for x in path.iterdir()]
)
for migration in sorted(filtered):
migration_id = migration[:-3]
migrations[migration_id] = {
'id': migration_id,
'commit_time': None,
'status': 0,
'module': load_module(migration_id, path / migration),
'table': None
}
return migrations
| 5,350,909 |
def filterkey(e, key, ns=False):
"""
Gibt eine Liste aus der Liste C{e} mit dem Attribut C{key} zurück.
B{Beispiel 1}: Herauslesen der SRS aus einer Liste, die C{dict}'s enthält.
>>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}]
>>> key = "SRS"
>>> filterkey(e, key)
['12345', '54321']
B{Beispiel 2}: Herauslesen des Namens aus einer Liste, die C{dict}'s enthält.
>>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}]
>>> key = "Name"
>>> filterkey(e, key)
['WGS-1', 'WGS-2']
@param e: Liste
@type e: list
@param key: Schlüssel
@type key: str
@param ns: Status, ob zusätzlich L{_helpers.ns} verwendet werden soll
@type ns: bool
@return: Liste mit den gefundenen Attributen C{key}
@rtype: list
"""
l = []
key_split = key.split("=")
if isinstance(e, list):
for i in e:
if len(key_split)>1:
if i[key_split[0]] == key_split[1]:
if ns:
l.append(_ns(i[key_split[0]]))
else:
l.append(i[key_split[0]])
else:
if ns:
l.append(_ns(i[key]))
else:
l.append(i[key])
return l
| 5,350,910 |
def validate_profile(profile_id, profile):
"""
Validates and normalises the given profile dictionary.
As side effects to the input dict:
* Fields that are set to None in rules are removed completely.
* Selectors are replaced with SelectorExpression objects. Parsing now
ensures that the selectors are valid and ensures that equal selectors
compare and hash equally. For example: "a == 'b'" and "a=='b'" are
different strings that parse to the same selector.
Once this routine has returned successfully, we know that all
required fields are present and have valid values.
:param str profile_id: The ID of the profile, which is also validated.
:param profile: The profile dict. For example,
{"inbound_rules": [...], "outbound_rules": [...]}
:raises ValidationFailed
"""
# The code below relies on the top-level object to be a dict. Check
# that first.
if not isinstance(profile, dict):
raise ValidationFailed("Expected profile %r to be a dict, not %r." %
(profile_id, profile))
issues = []
if not VALID_ID_RE.match(profile_id):
issues.append("Invalid profile ID '%r'." % profile_id)
_validate_rules(profile, issues)
if issues:
raise ValidationFailed(" ".join(issues))
| 5,350,911 |
def get_runtime_preinstalls(internal_storage, runtime):
"""
Download runtime information from storage at deserialize
"""
if runtime in default_preinstalls.modules:
logger.debug("Using serialize/default_preinstalls")
runtime_meta = default_preinstalls.modules[runtime]
preinstalls = runtime_meta['preinstalls']
else:
logger.debug("Downloading runtime pre-installed modules from COS")
runtime_meta = internal_storage.get_runtime_info(runtime)
preinstalls = runtime_meta['preinstalls']
if not runtime_valid(runtime_meta):
raise Exception(("The indicated runtime: {} "
"is not appropriate for this Python version.")
.format(runtime))
return preinstalls
| 5,350,912 |
def test_join_network(mock_cli, network_id, outcome):
""" Ensures that a ValueError is reaised on invalid id """
if isinstance(outcome, str):
mock_cli.return_value = outcome
assert zerotier.join_network(network_id) == outcome
else:
with pytest.raises(outcome):
zerotier.join_network(network_id)
| 5,350,913 |
def test_parse_feature_with_scenarios(parser):
"""
Test parsing a Feature with multiple Scenarios and Steps
"""
# when
feature = parser.parse()
# then
assert len(feature.scenarios) == 2
assert len(feature.scenarios[0].steps) == 3
assert len(feature.scenarios[1].steps) == 3
| 5,350,914 |
def set_warnings():
"""Configure warnings to show while running tests."""
warnings.simplefilter("default")
warnings.simplefilter("once", DeprecationWarning)
# Warnings to suppress:
# How come these warnings are successfully suppressed here, but not in setup.cfg??
# setuptools/py33compat.py:54: DeprecationWarning: The value of convert_charrefs will become
# True in 3.5. You are encouraged to set the value explicitly.
# unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape)
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message=r"The value of convert_charrefs will become True in 3.5.",
)
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message=r".* instead of inspect.getfullargspec",
)
# <frozen importlib._bootstrap>:681:
# ImportWarning: VendorImporter.exec_module() not found; falling back to load_module()
warnings.filterwarnings(
"ignore",
category=ImportWarning,
message=r".*exec_module\(\) not found; falling back to load_module\(\)",
)
# <frozen importlib._bootstrap>:908:
# ImportWarning: AssertionRewritingHook.find_spec() not found; falling back to find_module()
# <frozen importlib._bootstrap>:908:
# ImportWarning: _SixMetaPathImporter.find_spec() not found; falling back to find_module()
# <frozen importlib._bootstrap>:908:
# ImportWarning: VendorImporter.find_spec() not found; falling back to find_module()
warnings.filterwarnings(
"ignore",
category=ImportWarning,
message=r".*find_spec\(\) not found; falling back to find_module\(\)",
)
if env.PYPY:
# pypy3 warns about unclosed files a lot.
warnings.filterwarnings("ignore", r".*unclosed file", category=ResourceWarning)
| 5,350,915 |
def update_graph(slider_value):
"""
Update the graph depending on which value of the slider is selected
Parameters
----------
slider_value: the current slider value, None when starting the app
Returns
-------
dcc.Graph object holding the new Plotly Figure as given by the plot_slice function
"""
value = slider_value if slider_value is not None else 0
fname = os.path.join(solution_dir, f"{solution_prefix}_{value}.csv")
return plot_slice(fname)
| 5,350,916 |
def on_coordinator(f):
"""A decorator that, when applied to a function, makes a spawn of that function happen on the coordinator."""
f.on_coordinator = True
return f
| 5,350,917 |
def redirect_subfeed(match):
"""
URL migration: my site used to have per-category/subcategory RSS feeds
as /category/path/rss.php. Many of the categories have Path-Aliases
in their respective .cat files, but some of the old subcategories no
longer apply, so now I just bulk-redirect all unaccounted-for subcategories
to the top-level category's feed.
"""
return flask.url_for(
'category',
category=match.group(1),
template='feed'), True
| 5,350,918 |
def _train_test_split_dataframe_strafified(df:pd.DataFrame, split_cols:List[str], test_ratio:float=0.2, verbose:int=0, **kwargs) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
ref. the function `train_test_split_dataframe`
"""
df_inspection = df[split_cols]
for item in split_cols:
all_entities = df_inspection[item].unique().tolist()
entities_dict = {e: str(i) for i, e in enumerate(all_entities)}
df_inspection[item] = df_inspection[item].apply(lambda e:entities_dict[e])
inspection_col_name = "Inspection" * (max([len(c) for c in split_cols])//10+1)
df_inspection[inspection_col_name] = df_inspection.apply(
func=lambda row: "-".join(row.values.tolist()),
axis=1
)
item_names = df_inspection[inspection_col_name].unique().tolist()
item_indices = {
n: df_inspection.index[df_inspection[inspection_col_name]==n].tolist() for n in item_names
}
if verbose >= 1:
print("item_names = {}".format(item_names))
for n in item_names:
random.shuffle(item_indices[n])
test_indices = []
for n in item_names:
item_test_indices = item_indices[n][:int(round(test_ratio*len(item_indices[n])))]
test_indices += item_test_indices
if verbose >= 2:
print("for the item `{}`, len(item_test_indices) = {}".format(n, len(item_test_indices)))
df_test = df.loc[df.index.isin(test_indices)].reset_index(drop=True)
df_train = df.loc[~df.index.isin(test_indices)].reset_index(drop=True)
return df_train, df_test
| 5,350,919 |
def index():
"""
This route will render a template.
If a query string comes into the URL, it will return a parsed
dictionary of the query string keys & values, using request.args
"""
args = None
if request.args:
args = request.args
return render_template("public/index.html", args=args)
return render_template("public/index.html", args=args)
| 5,350,920 |
def load_from_string(xml_string, header_line):
"""
load document string, the string might contain multiple xml files
params:
xml_string (string): input xml strings
output:
xml string: a iterator object to generate xml string
"""
xml_lines = []
for line in xml_string.splitlines():
if line.startswith(header_line):
if xml_lines:
xml = "\n".join(xml_lines)
yield xml
xml_lines = [line]
elif line.startswith('<?xml'):
pass
else:
xml_lines.append(line)
xml = "\n".join(xml_lines)
yield xml
| 5,350,921 |
def multi_from_dict(cls: Type[T_FromDict], data: dict, key: str) -> List[T_FromDict]:
"""
Converts {"foo": [{"bar": ...}, {"bar": ...}]} into list of objects using the cls.from_dict method.
"""
return [cls.from_dict(raw) for raw in data.get(key, [])]
| 5,350,922 |
def authenticate(user, passwd):
"""Authenticate via LDAP."""
caCertStr = zlib.decompress(base64.b64decode(encodedCompressedCaCert))
certFile = mktemp('.pem')
open(certFile, 'w').write(caCertStr)
try:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, certFile)
l = ldap.open("ldap.jpl.nasa.gov")
l.protocol_version = ldap.VERSION3
l.start_tls_s()
l.simple_bind_s(
'uid=%s, ou=personnel, dc=dir, dc=jpl, dc=nasa, dc=gov' % user, passwd)
l.unbind_s()
except ldap.NO_SUCH_OBJECT as e:
info = e.args[0]['info']
desc = e.args[0]['desc']
print(("%s: %s" % (info, desc)))
except ldap.INVALID_CREDENTIALS as e:
info = e.args[0]['info']
desc = e.args[0]['desc']
print(("%s: %s" % (info, desc)))
except ldap.INAPPROPRIATE_AUTH as e:
info = e.args[0]['info']
desc = e.args[0]['desc']
print(("%s: %s" % (info, desc)))
os.unlink(certFile)
| 5,350,923 |
def infer_signature(func, class_name=''):
"""Decorator that infers the signature of a function."""
# infer_method_signature should be idempotent
if hasattr(func, '__is_inferring_sig'):
return func
assert func.__module__ != infer_method_signature.__module__
try:
funcfile = get_defining_file(func)
funcsource, sourceline = inspect.getsourcelines(func)
sourceline -= 1 # getsourcelines is apparently 1-indexed
except:
return func
funcid = (class_name, func.__name__, funcfile, sourceline)
func_source_db[funcid] = ''.join(funcsource)
try:
func_argid_db[funcid] = getfullargspec(func)
vargs_name, kwargs_name = func_argid_db[funcid][1], func_argid_db[funcid][2]
except TypeError:
# Not supported.
return func
def wrapper(*args, **kwargs):
global is_performing_inference
# If we're already doing inference, we should be in our own code, not code we're checking.
# Not doing this check sometimes results in infinite recursion.
if is_performing_inference:
return func(*args, **kwargs)
expecting_type_error, got_type_error, got_exception = False, False, False
is_performing_inference = True
try:
callargs = getcallargs(func, *args, **kwargs)
# we have to handle *args and **kwargs separately
if vargs_name:
va = callargs.pop(vargs_name)
if kwargs_name:
kw = callargs.pop(kwargs_name)
arg_db = {arg: infer_value_type(value) for arg, value in callargs.items()}
# *args and **kwargs need to merge the types of all their values
if vargs_name:
arg_db[vargs_name] = union_many_types(*[infer_value_type(v) for v in va])
if kwargs_name:
arg_db[kwargs_name] = union_many_types(*[infer_value_type(v) for v in kw.values()])
except TypeError:
got_exception = expecting_type_error = True
except:
got_exception = True
finally:
is_performing_inference = False
try:
ret = func(*args, **kwargs)
except TypeError:
got_type_error = got_exception = True
raise
except:
got_exception = True
raise
finally:
if not got_exception:
assert not expecting_type_error
# if we didn't get a TypeError, update the actual database
for arg, t in arg_db.items():
update_db(func_arg_db, (funcid, arg), t)
# if we got an exception, we don't have a ret
if not got_exception:
is_performing_inference = True
try:
type = infer_value_type(ret)
update_db(func_return_db, funcid, type)
except:
pass
finally:
is_performing_inference = False
return ret
if hasattr(func, '__name__'):
wrapper.__name__ = func.__name__
wrapper.__is_inferring_sig = True
return wrapper
| 5,350,924 |
def field_wrapper(col):
"""Helper function to dynamically create list display method
for :class:`ViewProfilerAdmin` to control value formating
and sort order.
:type col: :data:`settings.ReportColumnFormat`
:rtype: function
"""
def field_format(obj):
return col.format.format(getattr(obj, col.attr_name))
field_format.short_description = col.name
field_format.admin_order_field = col.attr_name
return field_format
| 5,350,925 |
def __alarm_handler(*_args):
"""
Handle an alarm by calling any due heap entries and resetting the alarm.
Note that multiple heap entries might get called, especially if calling an
entry takes a lot of time.
"""
try:
nextt = __next_alarm()
while nextt is not None and nextt <= 0:
(_tm, func, args, keys) = heapq.heappop(alarmlist)
func(*args, **keys)
nextt = __next_alarm()
finally:
if alarmlist:
__set_alarm()
| 5,350,926 |
def is_isotropic(value):
""" Determine whether all elements of a value are equal """
if hasattr(value, '__iter__'):
return np.all(value[1:] == value[:-1])
else:
return True
| 5,350,927 |
def grouper(iterable, n, fillvalue=None):
"""Iterate over a given iterable in n-size groups."""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
| 5,350,928 |
def format_gro_coord(resid, resname, aname, seqno, xyz):
""" Print a line in accordance with .gro file format, with six decimal points of precision
Nine decimal points of precision are necessary to get forces below 1e-3 kJ/mol/nm.
@param[in] resid The number of the residue that the atom belongs to
@param[in] resname The name of the residue that the atom belongs to
@param[in] aname The name of the atom
@param[in] seqno The sequential number of the atom
@param[in] xyz A 3-element array containing x, y, z coordinates of that atom
"""
return "%5i%-5s%5s%5i % 13.9f % 13.9f % 13.9f" % (resid,resname,aname,seqno,xyz[0],xyz[1],xyz[2])
| 5,350,929 |
def return_as_list(ignore_nulls: bool = False):
"""
Enables you to write a list-returning functions using a decorator. Example:
>>> def make_a_list(lst):
>>> output = []
>>> for item in lst:
>>> output.append(item)
>>> return output
Is equivalent to:
>>> @return_as_list()
>>> def make_a_list(lst):
>>> for item in lst:
>>> yield item
Essentially a syntactic sugar for @for_argument(returns=list)
:param ignore_nulls: if True, then if your function yields None, it won't be appended.
"""
def outer(fun):
@wraps(fun)
def inner(*args, **kwargs):
output = []
for item in fun(*args, **kwargs):
if item is None and ignore_nulls:
continue
output.append(item)
return output
return inner
return outer
| 5,350,930 |
def get_pingback_url(target_url):
"""
Grabs an page, and reads the pingback url for it.
"""
logger.debug("get_pingback_url called...")
logger.debug("grabbing " + str(target_url))
html = urlopen(target_url).read()
logger.info( "Got %d bytes" % len(html))
soup = bs(html, 'html.parser')
# check for link tags...
pbaddress = None
for l in soup.findAll('link'):
if l.get('rel','') == 'pingback':
pbaddress = str(l.get('href'))
logger.debug("Got: %s" % pbaddress)
logger.debug("get_pingback_url completed")
return pbaddress
| 5,350,931 |
def onedim_phasecurves(left, right, singpoints, directions,
orientation='vertical', shift=0,
delta=0.05, **kwargs):
"""
Draws phase curves of one-directional vector field;
left and right are borders
singpoints is a list of singular points (equilibria)
directions is a list of +1 and -1 that gives a direction
Example:
plt.ylim(-4, 4)
plt.xlim(-4, 4)
onedim_phasecurves(-4, 4, [-1, 1], [1, -1, 1], orientation='horizontal',
shift=1)
"""
assert len(directions) == len(singpoints) + 1
assert orientation in ['vertical', 'horizontal']
n = len(singpoints)
defaultcolor = 'Teal'
plot_params = get_default(kwargs, color=defaultcolor, marker='o',
fillstyle='none', mew=5, lw=0, markersize=2)
quiver_params = dict(angles='xy',
scale_units='xy', scale=1, units='inches')
quiver_params.update(get_default(kwargs, width=0.03,
color=defaultcolor))
baseline = np.zeros(n) + shift
if orientation == 'vertical':
plt.plot(baseline, singpoints, **plot_params)
else:
plt.plot(singpoints, baseline, **plot_params)
# We have to process special case when left or right border is singular
# move them to special list lonesingpoints to process later
if singpoints:
if singpoints[0] == left:
singpoints.pop(0)
directions.pop(0)
n -= 1
if singpoints:
if singpoints[-1] == right:
singpoints.pop()
directions.pop()
n -= 1
xs = np.zeros(n + 1) + shift
ys = []
us = np.zeros(n + 1)
vs = []
endpoints = [left] + list(singpoints) + [right]
for i, direction in enumerate(directions):
if direction > 0:
beginning = endpoints[i]
ending = endpoints[i+1]
elif direction < 0:
beginning = endpoints[i+1]
ending = endpoints[i]
else:
raise Exception("direction should be >0 or <0")
ys.append(beginning + np.sign(direction) * delta)
vs.append(ending - beginning - np.sign(direction)*2*delta)
if orientation == 'vertical':
plt.quiver(xs, ys, us, vs, **quiver_params, **kwargs)
else:
plt.quiver(ys, xs, vs, us, **quiver_params, **kwargs)
| 5,350,932 |
def assemble_insert_msg_content(row, column, digit):
"""Assemble a digit insertion message."""
return str(row) + CONTENT_SEPARATOR + str(column) + CONTENT_SEPARATOR + str(digit)
| 5,350,933 |
def get_difference_of_means(uni_ts: Union[pd.Series, np.ndarray]) -> np.float64:
"""
:return: The absolute difference between the means of the first and the second halves of a
given univariate time series.
"""
mid = int(len(uni_ts) / 2)
return np.abs(get_mean(uni_ts[:mid]) - get_mean(uni_ts[mid:]))
| 5,350,934 |
async def test_binary_sensors(hass, mock_asyncsleepiq):
"""Test the SleepIQ binary sensors."""
await setup_platform(hass, DOMAIN)
entity_registry = er.async_get(hass)
state = hass.states.get(
f"binary_sensor.sleepnumber_{BED_NAME_LOWER}_{SLEEPER_L_NAME_LOWER}_is_in_bed"
)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ICON) == "mdi:bed"
assert state.attributes.get(ATTR_DEVICE_CLASS) == BinarySensorDeviceClass.OCCUPANCY
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== f"SleepNumber {BED_NAME} {SLEEPER_L_NAME} Is In Bed"
)
entity = entity_registry.async_get(
f"binary_sensor.sleepnumber_{BED_NAME_LOWER}_{SLEEPER_L_NAME_LOWER}_is_in_bed"
)
assert entity
assert entity.unique_id == f"{SLEEPER_L_ID}_is_in_bed"
state = hass.states.get(
f"binary_sensor.sleepnumber_{BED_NAME_LOWER}_{SLEEPER_R_NAME_LOWER}_is_in_bed"
)
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ICON) == "mdi:bed-empty"
assert state.attributes.get(ATTR_DEVICE_CLASS) == BinarySensorDeviceClass.OCCUPANCY
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== f"SleepNumber {BED_NAME} {SLEEPER_R_NAME} Is In Bed"
)
entity = entity_registry.async_get(
f"binary_sensor.sleepnumber_{BED_NAME_LOWER}_{SLEEPER_R_NAME_LOWER}_is_in_bed"
)
assert entity
assert entity.unique_id == f"{SLEEPER_R_ID}_is_in_bed"
| 5,350,935 |
def user_collection():
"""
用户的收藏页面
1. 获取参数
- 当前页
2. 返回数据
- 当前页
- 总页数
- 每页的数据
:return:
"""
user = g.user
if not user:
return "请先登录"
# 获取当前页
page = request.args.get('p', 1)
page_show = constants.USER_COLLECTION_MAX_NEWS
# 校验参数
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
# return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
abort(404)
try:
user_collection = user.collection_news.paginate(page, page_show)
currentPage = user_collection.page
totalPage = user_collection.pages
items = user_collection.items
except Exception as e:
current_app.logger.error(e)
# return jsonify(errno=RET.DBERR, errmsg="数据库查询出错")
abort(404)
user_collection_list = []
for item in items:
user_collection_list.append(item.to_review_dict())
data = {
'currentPage': currentPage,
'totalPage': totalPage,
'user_collection_list': user_collection_list
}
return render_template("news/user_collection.html", data=data)
| 5,350,936 |
def activity(*, domain, name, version):
"""Decorator that registers a function to `ACTIVITY_FUNCTIONS`
"""
def function_wrapper(func):
identifier = '{}:{}:{}'.format(name, version, domain)
ACTIVITY_FUNCTIONS[identifier] = func
return function_wrapper
| 5,350,937 |
def OEDParser(soup, key):
""" The parser of Oxford Learner's Dictionary. """
rep = DicResult(key)
rep.defs = parseDefs(soup)
rep.examples = parseExample(soup)
return rep
| 5,350,938 |
def main():
"""CLI main entry."""
# We parse it from sys.argv instead of argparse parser because we want
# to run the app parser only once, after we update it with the loaded
# arguments from the CI models based on the loaded configuration file
config_file_path = get_config_file_path(sys.argv)
orchestrator = Orchestrator(config_file_path)
orchestrator.load_configuration()
orchestrator.create_ci_environments()
# Add arguments from CI & product models to the parser of the app
for env in orchestrator.environments:
orchestrator.extend_parser(attributes=env.API)
# We can parse user's arguments only after we have loaded the
# configuration and extended based on it the parser with arguments
# from the CI models
orchestrator.parser.app_args, orchestrator.parser.ci_args = \
orchestrator.parser.parse()
orchestrator.run_query()
orchestrator.publisher.publish(orchestrator.environments)
| 5,350,939 |
def X_SIDE_GAP_THREE_METHODS(data):
"""
Upside/Downside Gap Three Methods
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('CDLXSIDEGAP3METHODS')
return fn(data)
| 5,350,940 |
def fix_legacy_database_uri(uri):
""" Fixes legacy Database uris, like postgres:// which is provided by Heroku but no longer supported by SqlAlchemy """
if uri.startswith('postgres://'):
uri = uri.replace('postgres://', 'postgresql://', 1)
return uri
| 5,350,941 |
def create_need():
"""
Créé le besoin en db
:return:
"""
student = Student.query.filter_by(id_user=session['uid']).first()
title = request.form.get('title')
description = request.form.get('description')
speaker_id = request.form.get('speaker_id')
estimated_tokens = int(request.form.get('estimated_tokens'))
if estimated_tokens < 0:
estimated_tokens = 0
need = Need(
title=title,
description=description,
estimated_tokens=estimated_tokens,
status='En cours',
id_assigned_team=student.team.id,
id_assigned_speaker=speaker_id
)
db.session.add(need)
try:
db.session.commit()
except:
abort(500)
return redirect(url_for('get_student_dashboard'))
| 5,350,942 |
def insert_rolling_mean_columns(data, column_list, window):
"""This function selects the columns of a dataframe
according to a provided list of strings, re-scales its
values and inserts a new column in the dataframe with the
rolling mean of each variable in the column list and the
provided window length.
Params:
data: original dataframe
column_list: list of columns to select
window: window length to calculate rolling mean
"""
scaler = MinMaxScaler()
data[column_list] = scaler.fit_transform(data[column_list])
for var in column_list:
data[var + "_RollingMean"] = data[var].rolling(window).mean()
return data
| 5,350,943 |
def get_url_names():
""" Получение ссылок на контент
Returns:
Здесь - список файлов формата *.str
"""
files = ['srts/Iron Man02x26.srt', 'srts/Iron1and8.srt']
return files
| 5,350,944 |
def test_delete_password_policy_false():
"""Will delete an password policy (Wrong password).
:return: Should return: False
"""
syn = syncope.Syncope(syncope_url="http://192.168.1.145:9080", username="admin", password="passwrd")
assert syn.delete_password_policy("json") == False
| 5,350,945 |
def unix_utc_now() -> int:
"""
Return the number of seconds passed from January 1, 1970 UTC.
"""
delta = datetime.utcnow() - datetime(1970, 1, 1)
return int(delta.total_seconds())
| 5,350,946 |
def clear_ulog_cache():
""" clear/invalidate the ulog cache """
load_ulog_file.cache_clear()
| 5,350,947 |
def _preload_specific_vars(env_keys: Set[str]) -> Store:
"""Preloads env vars from environ in the given set."""
specified = {}
for env_name, env_value in environ.items():
if env_name not in env_keys:
# Skip vars that have not been requested.
continue
specified[env_name] = env_value
return specified
| 5,350,948 |
def _format_workflow_id(id):
"""
Add workflow prefix to and quote a tool ID.
Args:
id (str): ...
"""
id = urlparse.unquote(id)
if not re.search('^#workflow', id):
return urlparse.quote_plus('#workflow/{}'.format(id))
else:
return urlparse.quote_plus(id)
| 5,350,949 |
def calc_log_sum(Vals, sigma):
"""
Returns the optimal value given the choice specific value functions Vals.
Parameters
----------
Vals : [numpy.array]
A numpy.array that holds choice specific values at common grid points.
sigma : float
A number that controls the variance of the taste shocks
Returns
-------
V : [numpy.array]
A numpy.array that holds the integrated value function.
"""
# Assumes that NaNs have been replaced by -numpy.inf or similar
if sigma == 0.0:
# We could construct a linear index here and use unravel_index.
V = np.amax(Vals, axis=0)
return V
# else we have a taste shock
maxV = np.max(Vals, axis=0)
# calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma)
sumexp = np.sum(np.exp((Vals - maxV) / sigma), axis=0)
LogSumV = np.log(sumexp)
LogSumV = maxV + sigma * LogSumV
return LogSumV
| 5,350,950 |
def comment_create(request, post_pk):
"""記事へのコメント作成"""
post = get_object_or_404(Post, pk=post_pk)
form = CommentForm(request.POST or None)
if request.method == 'POST':
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('blog:post_detail', pk=post.pk)
context = {
'form': form,
'post': post
}
return render(request, 'blog/comment_form.html', context)
| 5,350,951 |
def tensor_from_var_2d_list(target, padding=0.0, max_len=None, requires_grad=True):
"""Convert a variable 2 level nested list to a tensor.
e.g. target = [[1, 2, 3], [4, 5, 6, 7, 8]]
"""
max_len_calc = max([len(batch) for batch in target])
if max_len == None:
max_len = max_len_calc
if max_len_calc > max_len:
print("Maximum length exceeded: {}>{}".format(max_len_calc, max_len))
target = [batch[:max_len] for batch in target]
padded = [batch + (max_len - len(batch)) * [padding] for batch in target]
return torch.tensor(padded, requires_grad=requires_grad)
| 5,350,952 |
def make_w3(gateway_config=None):
"""
Create a Web3 instance configured and ready-to-use gateway to the blockchain.
:param gateway_config: Blockchain gateway configuration.
:type gateway_config: dict
:return: Configured Web3 instance.
:rtype: :class:`web3.Web3`
"""
if gateway_config is None or gateway_config['type'] == 'auto':
w3 = web3.Web3()
elif gateway_config['type'] == 'user':
request_kwargs = gateway_config.get('http_options', {})
w3 = web3.Web3(web3.Web3.HTTPProvider(gateway_config['http'], request_kwargs=request_kwargs))
elif gateway_config['type'] == 'infura':
request_kwargs = gateway_config.get('http_options', {})
project_id = gateway_config['key']
# project_secret = gateway_config['secret']
http_url = 'https://{}.infura.io/v3/{}'.format(gateway_config['network'], project_id)
w3 = web3.Web3(web3.Web3.HTTPProvider(http_url, request_kwargs=request_kwargs))
# https://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority
if gateway_config.get('network', None) == 'rinkeby':
# This middleware is required to connect to geth --dev or the Rinkeby public network.
from web3.middleware import geth_poa_middleware
# inject the poa compatibility middleware to the innermost layer
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
else:
raise RuntimeError('invalid blockchain gateway type "{}"'.format(gateway_config['type']))
return w3
| 5,350,953 |
def _log2_ratio_to_absolute(log2_ratio, ref_copies, expect_copies, purity=None):
"""Transform a log2 ratio to absolute linear scale (for an impure sample).
Does not round to an integer absolute value here.
Math::
log2_ratio = log2(ncopies / ploidy)
2^log2_ratio = ncopies / ploidy
ncopies = ploidy * 2^log2_ratio
With rescaling for purity::
let v = log2 ratio value, p = tumor purity,
r = reference ploidy, x = expected ploidy,
n = tumor ploidy ("ncopies" above);
v = log_2(p*n/r + (1-p)*x/r)
2^v = p*n/r + (1-p)*x/r
n*p/r = 2^v - (1-p)*x/r
n = (r*2^v - x*(1-p)) / p
If purity adjustment is skipped (p=1; e.g. if germline or if scaling for
heterogeneity was done beforehand)::
n = r*2^v
"""
if purity and purity < 1.0:
ncopies = (ref_copies * 2**log2_ratio - expect_copies * (1 - purity)
) / purity
else:
ncopies = _log2_ratio_to_absolute_pure(log2_ratio, ref_copies)
return ncopies
| 5,350,954 |
def test_eap_canned_failure_before_method(dev, apdev):
"""EAP protocol tests for canned EAP-Failure before any method"""
params = int_eap_server_params()
hapd = hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
hapd.request("SET ext_eapol_frame_io 1")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412",
phase1="allow_canned_success=1",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = hapd.wait_event(["EAPOL-TX"], timeout=10)
if ev is None:
raise Exception("Timeout on EAPOL-TX from hostapd")
res = dev[0].request("EAPOL_RX " + bssid + " 0200000404020004")
if "OK" not in res:
raise Exception("EAPOL_RX to wpa_supplicant failed")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
| 5,350,955 |
def fixt():
"""
Create an Exchange object that will be re-used during testing.
"""
mesh = df.UnitCubeMesh(10, 10, 10)
S3 = df.VectorFunctionSpace(mesh, "DG", 0)
Ms = 1
A = 1
m = df.Function(S3)
exch = ExchangeDG(A)
exch.setup(S3, m, Ms)
return {"exch": exch, "m": m, "A": A, "S3": S3, "Ms": Ms}
| 5,350,956 |
def touch_emulator(ev, x, y):
"""
This emulates a touch-screen device, like a tablet or smartphone.
"""
if ev.type == pygame.MOUSEBUTTONDOWN:
if ev.button != 1:
return None, x, y
elif ev.type == pygame.MOUSEBUTTONUP:
if ev.button != 1:
return None, x, y
move = pygame.event.Event(pygame.MOUSEMOTION, { "pos" : (0, 0), "rel" : (0, 0), "buttons" : (0, 0, 0) })
renpy.display.interface.pushed_event = move
elif ev.type == pygame.MOUSEMOTION:
if not ev.buttons[0]:
x = 0
y = 0
elif ev.type == pygame.KEYDOWN:
if not ev.key in TOUCH_KEYS:
return None, x, y
elif ev.type == pygame.KEYUP:
if not ev.key in TOUCH_KEYS:
return None, x, y
return ev, x, y
| 5,350,957 |
def check_python_version() -> bool:
"""Check minimum Python version is being used.
Returns:
True if version is OK.
"""
if sys.version_info.major == 3 and sys.version_info.minor >= 6:
return True
logger.error(
"Aborting... Python version 3.6 or greater is required.\n"
f"Current Python version is {sys.version_info.major}.{sys.version_info.minor}."
)
return False
| 5,350,958 |
def output_matrix(matrix: list, padding_space: int):
"""
Build the status table shown by the sequencer.
Parameters
----------
matrix: list
padding_space: int
"""
max_field_length = []
for row in matrix:
for j, col in enumerate(row):
if matrix.index(row) == 0:
max_field_length.append(len(str(col)))
elif len(str(col)) > max_field_length[j]:
# Insert or update the first length
max_field_length[j] = len(str(col))
for row in matrix:
stringrow = ""
for j, col in enumerate(row):
lpadding = (max_field_length[j] - len(str(col))) * " "
rpadding = padding_space * " "
if isinstance(col, int):
# we got an integer, right aligned
stringrow += f"{lpadding}{col}{rpadding}"
else:
# should be a string, left aligned
stringrow += f"{col}{lpadding}{rpadding}"
log.info(stringrow)
| 5,350,959 |
def invert_dict(d):
"""
Invert dictionary by switching keys and values.
Parameters
----------
d : dict
python dictionary
Returns
-------
dict
Inverted python dictionary
"""
return dict((v, k) for k, v in d.items())
| 5,350,960 |
def run_trials(p, args):
"""
Run trials.
"""
# Model
m = p['model']
# Number of trials
try:
ntrials = int(args[0])
except:
ntrials = 100
ntrials *= m.nconditions
# RNN
rng = np.random.RandomState(p['seed'])
rnn = RNN(p['savefile'], {'dt': p['dt']}, verbose=False)
# Trials
w = len(str(ntrials))
trials = []
backspaces = 0
try:
for i in xrange(ntrials):
b = i % m.nconditions
k1, k2 = tasktools.unravel_index(b, (len(m.modalities), len(m.freqs)))
modality = m.modalities[k1]
freq = m.freqs[k2]
# Trial
trial_func = m.generate_trial
trial_args = {
'name': 'test',
'catch': False,
'modality': modality,
'freq': freq
}
info = rnn.run(inputs=(trial_func, trial_args), rng=rng)
# Display trial type
if info['modality'] == 'v':
s = "Trial {:>{}}/{}: v |{:>2}".format(i+1, w, ntrials, info['freq'])
elif info['modality'] == 'a':
s = "Trial {:>{}}/{}: a|{:>2}".format(i+1, w, ntrials, info['freq'])
else:
s = "Trial {:>{}}/{}: va|{:>2}".format(i+1, w, ntrials, info['freq'])
sys.stdout.write(backspaces*'\b' + s)
sys.stdout.flush()
backspaces = len(s)
# Save
dt = rnn.t[1] - rnn.t[0]
step = int(p['dt_save']/dt)
trial = {
't': rnn.t[::step],
'u': rnn.u[:,::step],
'r': rnn.r[:,::step],
'z': rnn.z[:,::step],
'info': info
}
trials.append(trial)
except KeyboardInterrupt:
pass
print("")
# Save all
filename = get_trialsfile(p)
with open(filename, 'wb') as f:
pickle.dump(trials, f, pickle.HIGHEST_PROTOCOL)
size = os.path.getsize(filename)*1e-9
print("[ {}.run_trials ] Trials saved to {} ({:.1f} GB)".format(THIS, filename, size))
# Compute the psychometric function
psychometric_function(filename)
| 5,350,961 |
def init_output_dir(output_dir, force_overwrite):
"""Create output directory (and all intermediate dirs on the path) if it doesn't exist.
Args:
output_dir (str): output directory path.
force_overwrite (bool): If False and output dir is complete, raise RuntimeError.
Raises:
RuntimeError if overwrite option is not enabled and output dir contains "DONE" signal file.
"""
if not force_overwrite and is_done(output_dir):
raise RuntimeError(f"'{output_dir}' run is already done, and not forcing overwrite")
os.makedirs(output_dir, exist_ok=True)
| 5,350,962 |
def atom_present_in_geom(geom, b, tol=DEFAULT_SYM_TOL):
"""Function used by set_full_point_group() to scan a given geometry
and determine if an atom is present at a given location.
"""
for i in range(len(geom)):
a = [geom[i][0], geom[i][1], geom[i][2]]
if distance(b, a) < tol:
return True
return False
| 5,350,963 |
def atof(value):
"""
locale.atof() on unicode string fails in some environments, like Czech.
"""
if isinstance(value, unicode):
value = value.encode("utf-8")
return locale.atof(value)
| 5,350,964 |
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Fibaro covers."""
async_add_entities(
[FibaroCover(device) for device in hass.data[FIBARO_DEVICES]["cover"]], True
)
| 5,350,965 |
def r_group_list(mol, core_mol):
"""
This takes a mol and the common core and finds all the R-groups by
replacing the atoms in the ligand (which make up the common core) with
nothing.
This fragments the ligand and from those fragments we are able to
determine what our R-groups are. for any common core atom which touched
the fragment a * will replace that atom in the fragments.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: an rdkit molecule
:param rdkit.Chem.rdchem.Mol core_mol: an rdkit molecule for the shared
common core
Returns:
:returns: rdkit.Chem.rdchem.Mol replace_core_mol: an rdkit molecule with
the common core removed from a ligand fragments the mol which can be used
to make lists of R-groups
"""
# This returns all the mol frags for a particular compound against the
# core molecule
replace_core_mol = Chem.ReplaceCore(
mol, core_mol, labelByIndex=True, replaceDummies=True, requireDummyMatch=False
)
if len(replace_core_mol.GetAtoms()) == 0:
# This means that the mol either did not contain the core_mol or the
# core_mol is the same mol as the mol. ie) if mol_string
# ="[10000N-]=[10001N+]=[10002N][10003CH]1[10004O][10005CH]([10006CH2][10007OH])[10008CH]([10013OH])[10009CH]([10012OH])[10010CH]1[10011OH]"
# and core_string
# ="[10000NH]=[10001N+]=[10002N][10003CH]1[10004O][10005CH]([10006CH2][10007OH])[10008CH]([10013OH])[10009CH]([10012OH])[10010CH]1[10011OH]"
# the only difference is the H's which means it can be replaced within
# because its the same mol This is rare but does occur.
return None
return replace_core_mol
| 5,350,966 |
def set_execution_target(backend_id='simulator'):
"""
Used to run jobs on a real hardware
:param backend_id: device name. List of available devices depends on the provider
example usage.
set_execution_target(backend_id='arn:aws:braket:::device/quantum-simulator/amazon/sv1')
"""
global device
if backend_id == None or backend_id == "":
device = None
elif backend_id == "simulator":
device = LocalSimulator()
else:
device = AwsDevice(backend_id)
if verbose:
print(f"... using Braket device = {device}")
# create an informative device name
device_name = device.name
device_str = str(device)
if device_str.find(":device/") > 0:
idx = device_str.rindex(":device/")
device_name = device_str[idx+8:-1]
metrics.set_plot_subtitle(f"Device = {device_name}")
return device
| 5,350,967 |
def add_submissions(contest_name, task_name, username, items):
"""
Add submissions from the given user to the given task
in the given contest. Each item corresponds to a submission,
and should contain a dictionary which maps formatted file names
to paths. For example, in batch tasks the format is "Task.%l",
so one submission would be {"Task.%l": "path/to/task.cpp"}.
"""
# We connect to evaluation service to try and notify it about
# the new submissions. Otherwise, it will pick it up only on
# the next sweep for missed operations.
rs = RemoteServiceClient(ServiceCoord("EvaluationService", 0))
rs.connect()
with SessionGen() as session:
user = get_user(session, username)
contest = get_contest(session, contest_name)
participation = get_participation(session, contest, user)
task = get_task(session, task_name, contest)
elements = set(format_element.filename for format_element in
task.submission_format)
file_cacher = FileCacher()
# We go over all submissions twice. First we validate the
# submission format.
for submission_dict in items:
for (format_file_name, path) in submission_dict.iteritems():
if format_file_name not in elements:
raise Exception("Unexpected submission file: %s. "
"Expected elements: %s" %
(format_file_name, elements))
if not os.path.isfile(path):
raise Exception("File not found: %s" % path)
# Now add to database.
for submission_dict in items:
if not submission_dict:
continue
timestamp = time.time()
file_digests = {}
language_name = None
for (format_file_name, path) in submission_dict.iteritems():
digest = file_cacher.put_file_from_path(
path,
"Submission file %s sent by %s at %d."
% (path, username, timestamp))
file_digests[format_file_name] = digest
current_language = filename_to_language(path)
if current_language is not None:
language_name = current_language.name
submission = Submission(make_datetime(timestamp), language_name,
participation=participation, task=task)
for filename, digest in file_digests.items():
session.add(File(filename, digest, submission=submission))
session.add(submission)
session.commit()
rs.new_submission(submission_id=submission.id)
rs.disconnect()
| 5,350,968 |
def multiply_table_f(x):
""" Takes an input and creates a multiplication table - uses f''
Parameters
___________
:param x: int: a number
Returns
___________
:return: Returns a multiplication table
"""
for i in range(1,x+1):
print(f'{x} * {i} = {x*i}')
return
| 5,350,969 |
def segregate(str):
"""3.1 Basic code point segregation"""
base = bytearray()
extended = set()
for c in str:
if ord(c) < 128:
base.append(ord(c))
else:
extended.add(c)
extended = sorted(extended)
return bytes(base), extended
| 5,350,970 |
def count_tetrasomic_indivs(lis) -> dict:
""" Count number of times that a chromosome is tetrasomic (present in four copies)
:returns counts_of_tetrasomic_chromosomes"""
counts_of_tetrasomic_chromosomes = {k: 0 for k in chr_range}
for kary_group in lis:
for index, chr_type in enumerate(chr_range):
if kary_group[index // 2].count(chr_type) == 4:
counts_of_tetrasomic_chromosomes[chr_type] += 1
return counts_of_tetrasomic_chromosomes
| 5,350,971 |
def test_feature_multiStrategies_c_unknown(unleash_client):
"""
Feature.multiStrategies.C disabled for unknown users
"""
# Set up API
responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202)
responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200)
responses.add(responses.POST, URL + METRICS_URL, json={}, status=202)
# Tests
context = {}
unleash_client.initialize_client()
assert not unleash_client.is_enabled("Feature.multiStrategies.C", context)
| 5,350,972 |
def make_image_grid(images: np.ndarray, nrow: int = 1) -> np.ndarray:
"""Concatenate multiple images into a single image.
Args:
images (np.array):
Images can be:
- A 4D mini-batch image of shape [B, C, H, W].
- A 3D RGB image of shape [C, H, W].
- A 2D grayscale image of shape [H, W].
nrow (int):
Number of images in each row of the grid. Final grid size is
`[B / nrow, nrow]`. Default: `1`.
Returns:
cat_image (np.ndarray):
Concatenated image.
"""
# NOTE: Type checking
if images.ndim == 3:
return images
# NOTE: Conversion (just for sure)
if is_channel_first(images):
images = to_channel_last(images)
b, c, h, w = images.shape
ncols = nrow
nrows = (b // nrow) if (b // nrow) > 0 else 1
cat_image = np.zeros((c, int(h * nrows), w * ncols))
for idx, im in enumerate(images):
j = idx // ncols
i = idx % ncols
cat_image[:, j * h: j * h + h, i * w: i * w + w] = im
return cat_image
| 5,350,973 |
def drive_time_shapes(drive_time):
"""Simplify JSON response into a dictionary of point lists."""
isochrones = {}
try:
for shape in drive_time['response']['isoline']:
uid = str(int(shape['range'] / 60)) + ' minutes'
points = shape['component'][0]['shape']
point_list = array_to_points(points)
isochrones[uid] = point_list
except KeyError:
print(drive_time)
return isochrones
| 5,350,974 |
def compute_transformation_sequence_case_1(cumprod, local_shape, ind,
sharded_leg_pos, pgrid):
"""
Helper function for `pravel`, see `pravel` for more details.
"""
ops = []
ndev = np.prod(pgrid)
if ndev % cumprod[ind - 1] != 0:
raise ValueError("reshaping not possible")
remainder = ndev // cumprod[ind - 1]
# the local leg has to be divisible by the remainder,
# otherwise we can't place the sharded legs that need to be
# localized at their respective positions
if local_shape[sharded_leg_pos] % remainder != 0:
raise ValueError(
f"tensor.shape[{sharded_leg_pos}] = {local_shape[sharded_leg_pos]}"
f" is not divisible by a local remainder of {remainder}. "
f"Try using a different shape for the input tensor")
if np.prod(local_shape[sharded_leg_pos:]) % remainder != 0:
raise ValueError("reshaping not possible 2")
# the first index group contains all legs that are going to be sharded
# the second index group contain is swapped with the currently sharded legs
# the third group remains unchanged
orig_left_shape = tuple(local_shape[:sharded_leg_pos],) + (remainder,)
orig_right_shape = (local_shape[sharded_leg_pos] // remainder,) + tuple(
local_shape[sharded_leg_pos + 1:])
shape_1 = orig_left_shape + (ndev,) + (np.prod(orig_right_shape) // ndev,)
ops.append(('reshape', [local_shape, shape_1]))
ops.append(('pswapaxes', {
'axis_name': AXIS_NAME,
'axis': sharded_leg_pos + 1
}))
# the previously sharded legs are now localized at position
# sharded_leg_pos + 1 we now split off the legs that need
# to be distributed again and move them to the right of their
# corresponding local legs
shape_2 = orig_left_shape + tuple(pgrid) + (
np.prod(orig_right_shape) // ndev,)
l = list(range(len(shape_2)))
left = l[:len(orig_left_shape)]
right = l[len(orig_left_shape):]
perm_1 = misc.flatten([[r, l] for l, r in zip(left, right[:len(left)])
]) + right[len(left):]
shape_3 = (np.prod(shape_2[:2 * len(orig_left_shape)]),) + tuple(
shape_2[2 * len(orig_left_shape):])
ops.append(('reshape', [shape_1, shape_2]))
ops.append(('transpose', perm_1))
perm_shape_2 = [shape_2[p] for p in perm_1]
ops.append(('reshape', [perm_shape_2, shape_3]))
ops.append(('pswapaxes', {'axis_name': AXIS_NAME, 'axis': 0}))
# swap the first local axis with the sharded one
# now we have the harded legs in the right order
# next we need to fix the order of the localized legs
perm_2 = list(range(1, len(
pgrid[sharded_leg_pos:]))) + [0] + [len(pgrid[sharded_leg_pos:])]
shape_4 = tuple(pgrid[sharded_leg_pos + 1:]) + orig_right_shape
ops.append(('transpose', perm_2))
perm_shape_3 = [shape_3[p] for p in perm_2]
ops.append(('reshape', [perm_shape_3, shape_4]))
p = len(pgrid[sharded_leg_pos + 1:])
left = list(range(p))
right = list(range(p + 1, len(shape_4)))
perm_3 = [p] + misc.flatten([[l, r] for l, r in zip(left, right[:len(left)])
]) + right[len(left):]
ops.append(('transpose', perm_3))
perm_shape_4 = [shape_4[p] for p in perm_3]
shape_5 = misc.maybe_ravel_shape(perm_shape_4)
ops.append(('reshape', [perm_shape_4, shape_5]))
return ops
| 5,350,975 |
def fix_join_words(text: str) -> str:
"""
Replace all join ``urdu`` words with separate words
Args:
text (str): raw ``urdu`` text
Returns:
str: returns a ``str`` object containing normalized text.
"""
for key, value in WORDS_SPACE.items():
text = text.replace(key, value)
return text
| 5,350,976 |
def degree_of_appropriateness(attr_list,data_list,summarizers,summarizer_type,t3,letter_map_list,alpha_sizes,age,activity_level,flag=None,goals=None):
"""
Inputs:
- data: the database
- summarizer: the conclusive phrase of the summary
- summarizer_type: the type of summarizer
- t3: the degree of covering
- letter_map: a mapping from letters to integers
- alpha_size: the alphabet size
- age: the user's age
- activity_level: the user's activity level
Outputs: the degree of appropriateness
Purpose: to calculate the degree of appropriateness. The degree of appropriateness describes how characteristic for the particular database the summary found is. This helps avoid the output of trivial summaries
"""
# TODO: Factor this in once we have multiple attributes
#t4_list = []
# Day count
r_list = []
#print(data_list)
#input([summarizers,attr_list])
#print("degree")
#print(summarizers)
try:
n = len(data_list[0])
except TypeError:
data_list = [data_list]
n = len(data_list)
#if "Pattern Recognition" in summarizer_type:
#n = len(data_list)
#print(flag)
for j in range(len(summarizers)):
t_k = []
summarizer = summarizers[j]
for i in range(n):
#print(j,i)
if flag == "compare":
if (i == 0 and "Pattern Recognition" not in summarizer_type): #or ("Pattern Recognition" in summarizer_type and ((i+1)%2==0 or i == len(data_list[j])-1)):
continue
#print(i)
prev_letter = data_list[j][i-1]
curr_letter = data_list[j][i]
if "Pattern Recognition" in summarizer_type:
#print(data_list,j,i)
prev_letter = data_list[j][i][0]
curr_letter = data_list[j][i][1]
#print(prev_letter,curr_letter)
goal_list, conclusion_list = compare_SAX(attr_list[j],prev_letter,curr_letter,summarizer_type,letter_map_list[j])
conclusion = conclusion_list[0]
if "Pattern Recognition" in summarizer_type:
summarizer_map = { "better" : "rose",
"about the same" : "stayed the same",
"not do as well" : "dropped"}
conclusion = summarizer_map[conclusion_list[0]]
#print(prev_letter,curr_letter,conclusion,summarizer)
if conclusion == summarizer:
t_k.append(1)
else:
t_k.append(0)
elif flag == "compareHR":
#if i%7 != 0 or i < 7:
#continue
#curr_tw = data_list[j][i:i+7]
#last_tw = data_list[j][i-7:i]
if "Pattern Recognition" in summarizer_type:
last_tw = [data_list[j][i][0]]
curr_tw = [data_list[j][i][1]]
else:
curr_tw = [data_list[j][i]]
last_tw = [data_list[j][i-1]]
#print(curr_tw,last_tw,flag)
summary, conclusion, goal_list = comparison_TW_SAX_summary(summarizer_type,attr_list,last_tw,curr_tw,"weeks",letter_map_list,i-1,i,flag="eval")
#conclusion = compare_HR_TW(last_tw,curr_tw,age,activity_level)
conclusion_map = { 'lower' : 'dropped',
'higher' : 'rose',
'about the same' : 'stayed the same'}
#print(conclusion,summarizers)
#print(conclusion_map[conclusion[j]],summarizers[j])
if conclusion_map[conclusion[j]] == summarizer:
t_k.append(1)
else:
t_k.append(0)
elif flag == "compareACT":
if i==0:
continue
conclusion, pair_word = compareACT(data_list[j][i-1],data_list[j][i])
if summarizer == conclusion:
t_k.append(1)
else:
t_k.append(0)
elif flag == "HR" or (attr_list[j] == "Heart Rate" and "Trends" not in summarizer_type):
#print(hr_evaluation(data_list[j][i],age,activity_level),summarizer)
if hr_evaluation(data_list[j][i],age,activity_level) == summarizer:
t_k.append(1)
else:
t_k.append(0)
else:
curr_data = data_list[j][i]
if "If-then pattern" in summarizer_type:
start_index = 1
skip_index = 1
valid = True
extra = len(summarizer)
for k in range(start_index,len(summarizer)):
if i+k >= len(data_list[j]):
valid = False
break
#print(flag,summarizer,k,weekday_map)
if flag != None and summarizer[k] in weekday_map.keys():
#input(weekday_map.keys())
curr_data += str(weekday_map[flag[i+k]])
else:
curr_data += data_list[j][i+k]
if not valid or len(summarizer)==0:
continue
goal = None
if goals != None and None not in goals:
try:
goal = goals[j][i]
except IndexError:
#goal = goals[j]
try:
goal = goals[j]
except IndexError:
goals = goals[0]
try:
goal = goals[j][i]
except IndexError:
goal = goals[j]
#print(curr_data)
#print(summarizer_type)
#print(summarizer_type,summarizer,flag,goal,curr_data)
#input(get_muS(attr_list[j],summarizer_type,summarizer,curr_data,letter_map_list[j],alpha_sizes[j],flag=flag,goal_=goal))
if get_muS(attr_list[j],summarizer_type,summarizer,curr_data,letter_map_list[j],alpha_sizes[j],flag=flag,goal_=goal):
t_k.append(1)
else:
t_k.append(0)
#print(sum(t_k),len(t_k))
if sum(t_k)==0:
r_k = 1
else:
r_k = sum(t_k)/float(len(t_k))
#print(len(t_k),len(data_list[0]), t_k)
r_list.append(r_k)
#print(r_list,t3)
r = 1
for i in range(len(r_list)):
r *= r_list[i]
#print(r,t3)
return abs(r - t3)
| 5,350,977 |
def update_task_prices(task, discounts):
"""Update prices in task with given discounts and proper taxes."""
taxes = get_taxes_for_address(task.delivery_address)
for line in task:
if line.variant:
line.unit_price = line.variant.get_price(discounts, taxes)
line.tax_rate = get_tax_rate_by_name(
line.variant.skill.tax_rate, taxes)
line.save()
if task.delivery_method:
task.delivery_price = task.delivery_method.get_total(taxes)
task.save()
recalculate_order(task)
| 5,350,978 |
def constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db={}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y]); break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list=[]
for probeset in probeset_list:
try: exp_val = array_raw_group_values[probeset][y][x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError: continue
exp_list.append(exp_val)
try: avg_const_exp = statistics.avg(exp_list)
except Exception: avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios; normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try: normalized_raw_exp_ratios[probeset].append(log_exp_ratio)
except KeyError: normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
| 5,350,979 |
def aws_env_variables(monkeypatch):
"""
When running in AWS Lambda, there are some environment variables that AWS creates and the tracer uses.
This fixture creates those environment variables.
"""
monkeypatch.setenv(
"_X_AMZN_TRACE_ID",
"RequestId: 4365921c-fc6d-4745-9f00-9fe9c516ede5 Root=1-000044d4-c3881e0c19c02c5e6ffa8f9e;Parent=37cf579525dfb3ba;Sampled=0",
)
| 5,350,980 |
def parse_rummager_topics(results):
"""
Parse topics from rummager results
"""
pages = []
for result in results:
pages.append(
Topic(
name=result['title'],
base_path=result['slug'],
document_type=DocumentType[result['format']]
)
)
return pages
| 5,350,981 |
def test_get_all_links() -> None:
"""Test get_all_links func by adding
a link and retrieve it"""
delete_all_collections_datas()
prep_db_if_not_exist()
node_name: str = "test_node"
neigh_name: str = "test_neigh"
iface_name: str = "e1/1"
neigh_iface_name: str = "e2/1"
add_link(node_name, neigh_name, iface_name, neigh_iface_name)
links: List[Dict[str, Any]] = list(get_all_links())
assert len(links) == 1
assert links[0] == {
"device_name": node_name,
"neighbor_name": neigh_name,
"iface_name": iface_name,
"neighbor_iface": neigh_iface_name,
"iface_descr": "",
"neighbor_iface_descr": "",
}
| 5,350,982 |
def format_last_online(last_online):
"""
Return the upper limit in seconds that a profile may have been
online. If last_online is an int, return that int. Otherwise if
last_online is a str, convert the string into an int.
Returns
----------
int
"""
if isinstance(last_online, str):
if last_online.lower() in ('day', 'today'):
last_online_int = 86400 # 3600 * 24
elif last_online.lower() == 'week':
last_online_int = 604800 # 3600 * 24 * 7
elif last_online.lower() == 'month':
last_online_int = 2678400 # 3600 * 24 * 31
elif last_online.lower() == 'year':
last_online_int = 31536000 # 3600 * 365
elif last_online.lower() == 'decade':
last_online_int = 315360000 # 3600 * 365 * 10
else: # Defaults any other strings to last hour
last_online_int = 3600
else:
last_online_int = last_online
return last_online_int
| 5,350,983 |
def plot_inspection_data(table: pd.DataFrame, title: str, ylabel: str, decimals: int = 0) -> None:
"""
plots the inspection data for inspection tear sheets
:param table: the table to plot
:param title: the title for the plot
:param ylabel: y label for plot
:param decimals: amount of decimals to display on the Y axis
:return: None
"""
fig, ax = plt.subplots(1, 1, figsize=MEDIUM_FIGSIZE)
ax.set(title=title, ylabel=ylabel)
table.plot(lw=2, ax=ax, cmap=RETURN_COLOR_MAP)
ax.legend(loc="center left", bbox_to_anchor=(1, .5))
# ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%m-%Y'))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(f'%.{decimals}f'))
fig.autofmt_xdate()
if isinstance(table, pd.Series):
ax.get_legend().remove()
plt.show()
| 5,350,984 |
def get_currency_cross_historical_data(currency_cross, from_date, to_date, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves recent historical data from the introduced `currency_cross` from Investing
via Web Scraping. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a
:obj:`json` file, with `ascending` or `descending` order.
Args:
currency_cross (:obj:`str`): name of the currency cross to retrieve recent historical data from.
from_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, from where data is going to be retrieved.
to_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, until where data is going to be retrieved.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).
order (:obj:`str`, optional):
optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`).
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function returns a either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved
recent data from the specified currency_cross via argument. The dataset contains the open, high, low, close and
volume values for the selected currency_cross on market days.
The return data is case we use default arguments will look like::
Date || Open | High | Low | Close | Currency
-----||------|------|-----|-------|---------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
recent: [
dd/mm/yyyy: {
'open': x,
'high': x,
'low': x,
'close': x,
'currency' : x
},
...
]
}
Raises:
ValueError: argument error.
IOError: stocks object/file not found or unable to retrieve.
RuntimeError: introduced currency_cross does not match any of the indexed ones.
ConnectionError: if GET requests does not return 200 status code.
IndexError: if currency_cross information was unavailable or not found.
Examples:
>>> investpy.get_currency_cross_historical_data(currency_cross='EUR/USD', from_date='01/01/2018', to_date='01/01/2019')
Open High Low Close Currency
Date
2018-01-01 1.2003 1.2014 1.1995 1.2010 USD
2018-01-02 1.2013 1.2084 1.2003 1.2059 USD
2018-01-03 1.2058 1.2070 1.2001 1.2014 USD
2018-01-04 1.2015 1.2090 1.2004 1.2068 USD
2018-01-05 1.2068 1.2085 1.2021 1.2030 USD
"""
if not currency_cross:
raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.")
if not isinstance(currency_cross, str):
raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.")
try:
datetime.strptime(from_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.")
try:
datetime.strptime(to_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.")
start_date = datetime.strptime(from_date, '%d/%m/%Y')
end_date = datetime.strptime(to_date, '%d/%m/%Y')
if start_date >= end_date:
raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if interval not in ['Daily', 'Weekly', 'Monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
date_interval = {
'intervals': [],
}
flag = True
while flag is True:
diff = end_date.year - start_date.year
if diff > 20:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': start_date.replace(year=start_date.year + 20).strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
start_date = start_date.replace(year=start_date.year + 20)
else:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': end_date.strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
flag = False
interval_limit = len(date_interval['intervals'])
interval_counter = 0
data_flag = False
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'currency_crosses', 'currency_crosses.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
currency_crosses = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0060: currency_crosses file not found or errored.")
if currency_crosses is None:
raise IOError("ERR#0050: currency_crosses not found or unable to retrieve.")
currency_cross = currency_cross.strip()
currency_cross = currency_cross.lower()
if unidecode.unidecode(currency_cross) not in [unidecode.unidecode(value.lower()) for value in currency_crosses['name'].tolist()]:
raise RuntimeError("ERR#0054: the introduced currency_cross " + str(currency_cross) + " does not exists.")
id_ = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'id']
name = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'name']
currency = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'second']
final = list()
header = name + ' Historical Data'
for index in range(len(date_interval['intervals'])):
interval_counter += 1
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"st_date": date_interval['intervals'][index]['start'],
"end_date": date_interval['intervals'][index]['end'],
"interval_sec": interval,
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
if elements_.xpath(".//td")[0].text_content() == 'No results found':
if interval_counter < interval_limit:
data_flag = False
else:
raise IndexError("ERR#0055: currency_cross information unavailable or not found.")
else:
data_flag = True
if data_flag is True:
currency_cross_date = datetime.fromtimestamp(int(info[0]))
currency_cross_date = date(currency_cross_date.year, currency_cross_date.month, currency_cross_date.day)
currency_cross_close = float(info[1].replace(',', ''))
currency_cross_open = float(info[2].replace(',', ''))
currency_cross_high = float(info[3].replace(',', ''))
currency_cross_low = float(info[4].replace(',', ''))
result.insert(len(result),
Data(currency_cross_date, currency_cross_open, currency_cross_high, currency_cross_low,
currency_cross_close, None, currency))
if data_flag is True:
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {'name': name,
'historical':
[value.currency_cross_as_json() for value in result]
}
final.append(json_)
elif as_json is False:
df = pd.DataFrame.from_records([value.currency_cross_to_dict() for value in result])
df.set_index('Date', inplace=True)
final.append(df)
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if as_json is True:
return json.dumps(final[0], sort_keys=False)
elif as_json is False:
return pd.concat(final)
| 5,350,985 |
def gen_project(project_name, project_revision, target, template, working_dir):
"""Generate and compiles the project defined in make_project.tcl.
Parameters
----------
working_dir : string
Working directory of the generation process
"""
gen_project_tcl(project_name, project_revision,
target, template, working_dir)
qsys_files = filter(lambda file: file.endswith(".qsys"), target.files_list)
for file in qsys_files:
gen_qsys_system_from_qsys_file(file, working_dir)
log_msg = "Generating project"
cmd = f"cd {working_dir} && {QUARTUS_BIN_DIR}quartus_sh -t make_project.tcl"
log_file_path = working_dir + "project_gen.log"
run_cmd_and_log(cmd, log_msg, log_file_path)
| 5,350,986 |
def load_target(target_name, base_dir, cloud=False):
"""load_target load target from local or cloud
Parameters
----------
target_name : str
target name
base_dir : str
project base directory
cloud : bool, optional
load from GCS, by default False
Returns
-------
y_train: pd.DataFrame
target data
"""
if cloud:
y_train = load_cloud_target(target_name, base_dir)
else:
y_train = load_local_target(target_name, base_dir)
return y_train
| 5,350,987 |
def test_aspect_transfer_function():
"""
Assert aspect transfer function
"""
da = xr.DataArray(data_gaussian, dims=['y', 'x'], attrs={'res': 1})
da_aspect = aspect(da)
# default name
assert da_aspect.name == 'aspect'
assert da_aspect.dims == da.dims
assert da_aspect.attrs == da.attrs
assert da.shape == da_aspect.shape
for coord in da.coords:
assert np.all(da[coord] == da_aspect[coord])
assert pytest.approx(np.nanmax(da_aspect.data), .1) == 360.
assert pytest.approx(np.nanmin(da_aspect.data), .1) == 0.
| 5,350,988 |
def gradient(pixmap, ca, cb, eff, ncols):
""" Returns a gradient width the start and end colors.
eff should be Gradient.Vertical or Gradient.Horizontal
"""
x=0
y=0
rca = ca.red()
rDiff = cb.red() - rca
gca = ca.green()
gDiff = cb.green() - gca
bca = ca.blue()
bDiff = cb.blue() - bca
rl = rca << 16
gl = gca << 16
bl = bca << 16
if eff == Gradient.Vertical:
rcdelta = (1<<16) / (pixmap.height() * rDiff)
gcdelta = (1<<16) / (pixmap.height() * gDiff)
bcdelta = (1<<16) / (pixmap.height() * bDiff)
else:
print (1<<16) ,pixmap.width() * rDiff
rcdelta = (1<<16) / (pixmap.width() * rDiff)
gcdelta = (1<<16) / (pixmap.width() * gDiff)
bcdelta = (1<<16) / (pixmap.width() * bDiff)
p = QPainter(pixmap)
# these for-loops could be merged, but the if's in the inner loop
# would make it slow
if eff == Gradient.Vertical:
for y in range(pixmap.height()):
rl += rcdelta
gl += gcdelta
bl += bcdelta
p.setPen(QColor(rl>>16, gl>>16, bl>>16))
p.drawLine(0, y, pixmap.width()-1, y)
else:
for x in pixmap.width():
rl += rcdelta
gl += gcdelta
bl += bcdelta
p.setPen(QColor(rl>>16, gl>>16, bl>>16))
p.drawLine(x, 0, x, pixmap.height()-1)
return pixmap
| 5,350,989 |
def validate_filters(filters):
"""
>>> validate_filters('{"fileName": {"is": ["foo.txt"]}}')
>>> validate_filters('"')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The `filters` parameter is not valid JSON
>>> validate_filters('""')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The `filters` parameter must be a dictionary.
>>> validate_filters('{"sampleDisease": ["H syndrome"]}') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: \
The `filters` parameter entry for `sampleDisease` must be a single-item dictionary.
>>> validate_filters('{"sampleDisease": {"is": "H syndrome"}}') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The value of the `is` relation in the `filters` parameter entry for \
`sampleDisease` is not a list.
>>> validate_filters('{"sampleDisease": {"was": "H syndrome"}}') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The relation in the `filters` parameter entry for `sampleDisease` \
must be one of ('is', 'contains', 'within', 'intersects')
"""
try:
filters = json.loads(filters)
except Exception:
raise BadRequestError('The `filters` parameter is not valid JSON')
if type(filters) is not dict:
raise BadRequestError('The `filters` parameter must be a dictionary.')
for facet, filter_ in filters.items():
validate_facet(facet)
try:
relation, value = one(filter_.items())
except Exception:
raise BadRequestError(f'The `filters` parameter entry for `{facet}` must be a single-item dictionary.')
else:
valid_relations = ('is', 'contains', 'within', 'intersects')
if relation in valid_relations:
if not isinstance(value, list):
raise BadRequestError(
msg=f'The value of the `{relation}` relation in the `filters` parameter '
f'entry for `{facet}` is not a list.')
else:
raise BadRequestError(f'The relation in the `filters` parameter entry for `{facet}`'
f' must be one of {valid_relations}')
if facet == 'organismAge':
validate_organism_age_filter(value)
| 5,350,990 |
def option_to_text(option):
"""Converts, for example, 'no_override' to 'no override'."""
return option.replace('_', ' ')
| 5,350,991 |
def login(email, password):
"""
:desc: Logs a user in.
:param: email - Email of the user - required
password - Password of the user - required
:return: `dict`
"""
if email == '' or password == '':
return {'success': False, 'message': 'Email/Password field left blank.'}
resp = {'success': False}
data = {'email': email, 'password': password}
session = get_session()
session.cookies = LWPCookieJar(filename=COOKIES_FILE_PATH)
resp_obj = session.post(LOGIN_URL, data=data)
if resp_obj.status_code == 200:
if resp_obj.url == BASE_URL:
session.cookies.save(ignore_expires=True, ignore_discard=True)
resp['success'] = True
resp['message'] = 'Successfully Logged In!'
else:
resp['message'] = 'Incorrect credentials'
else:
resp['message'] = 'Stackoverflow is probably down. Please try again.'
return resp
| 5,350,992 |
def test_tox_environments_use_max_base_python():
"""Verify base Python version specified for Tox environments.
Every Tox environment with base Python version specified should
use max Python version.
Max Python version is assumed from the .python-versions file.
"""
pyenv_version = max(
sorted(
"python{}.{}".format(*v.split(".")[0:2])
for v in open(".python-version").read().splitlines()
)
)
for _env, basepython in helpers.tox_info("basepython"):
assert basepython == pyenv_version
| 5,350,993 |
def test_idempotence_after_controller_death(ray_start_stop, use_command: bool):
"""Check that CLI is idempotent even if controller dies."""
config_file_name = os.path.join(
os.path.dirname(__file__), "test_config_files", "basic_graph.yaml"
)
success_message_fragment = b"Sent deploy request successfully!"
deploy_response = subprocess.check_output(["serve", "deploy", config_file_name])
assert success_message_fragment in deploy_response
ray.init(address="auto", namespace=SERVE_NAMESPACE)
serve.start(detached=True)
wait_for_condition(
lambda: len(ray.util.list_named_actors(all_namespaces=True)) == 4, timeout=15
)
# Kill controller
if use_command:
subprocess.check_output(["serve", "shutdown", "-y"])
else:
serve.shutdown()
status_response = subprocess.check_output(["serve", "status"])
status_info = yaml.safe_load(status_response)
assert len(status_info["deployment_statuses"]) == 0
deploy_response = subprocess.check_output(["serve", "deploy", config_file_name])
assert success_message_fragment in deploy_response
# Restore testing controller
serve.start(detached=True)
wait_for_condition(
lambda: len(ray.util.list_named_actors(all_namespaces=True)) == 4, timeout=15
)
serve.shutdown()
ray.shutdown()
| 5,350,994 |
def register():
"""Register this generator.
"""
import agx.generator.uml
from agx.core.config import register_generator
register_generator(agx.generator.uml)
| 5,350,995 |
def _cons8_89(m8, L88, L89, d_gap, k, Cp, h_gap):
"""dz constrant for edge gap sc touching edge, corner gap sc"""
term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts
term2 = k * d_gap / m8 / Cp / L88 # cond to adj bypass edge
term3 = k * d_gap / m8 / Cp / L89 # cond to adj bypass corner
return 1 / (term1 + term2 + term3)
| 5,350,996 |
def categories_split(df):
""" Separate the categories in their own columns. """
ohe_categories = pd.DataFrame(df.categories.str.split(';').apply(
lambda x: {e.split('-')[0]: int(e.split('-')[1]) for e in x}).tolist())
return df.join(ohe_categories).drop('categories', axis=1)
| 5,350,997 |
def build_received_request(qparams, variant_id=None, individual_id=None, biosample_id=None):
""""Fills the `receivedRequest` part with the request data"""
request = {
'meta': {
'requestedSchemas' : build_requested_schemas(qparams),
'apiVersion' : qparams.apiVersion,
},
'query': build_received_query(qparams, variant_id, individual_id, biosample_id),
}
return request
| 5,350,998 |
def is_router_bgp_configured_with_four_octet(
device, neighbor_address, vrf, max_time=35, check_interval=10
):
""" Verifies that router bgp has been enabled with four
octet capability and is in the established state
Args:
device('obj'): device to check
vrf('vrf'): vrf to check under
neighbor_address('str'): neighbor address to match
max_time('int'): maximum time to wait
check_interval('int'): how often to check
Returns:
True
False
Raise:
None
"""
log.info(
"Verifying {} has bgp configured with four octet capability".format(
device.hostname
)
)
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = device.parse("show ip bgp neighbors")
if out:
if vrf in out.get("vrf", {}):
for neighbor in out["vrf"][vrf].get("neighbor", {}):
if neighbor_address in neighbor:
neighbor_dict = out["vrf"][vrf]["neighbor"][neighbor]
if (
"established"
in neighbor_dict.get("session_state", "").lower()
):
if "bgp_negotiated_capabilities" in neighbor_dict and "advertised and received" in neighbor_dict[
"bgp_negotiated_capabilities"
].get(
"four_octets_asn", ""
):
return True
timeout.sleep()
return False
| 5,350,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.