content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def pass_api_client(function):
"""Create API client form API key and pass it to subcommand.
:param function: Subcommand that returns a result from the API.
:type function: callable
:returns: Wrapped function that prints subcommand results
:rtype: callable
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
context = click.get_current_context()
api_key = context.params.get("api_key")
offering = context.params.get("offering")
config = load_config()
if api_key is None:
if not config["api_key"]:
prog_name = context.parent.info_name
click.echo(
"\nError: API key not found.\n\n"
"To fix this problem, please use any of the following methods "
"(in order of precedence):\n"
"- Pass it using the -k/--api-key option.\n"
"- Set it in the GREYNOISE_API_KEY environment variable.\n"
"- Run {!r} to save it to the configuration file.\n".format(
"{} setup".format(prog_name)
)
)
context.exit(-1)
api_key = config["api_key"]
if offering is None:
if not config["offering"]:
offering = "enterprise"
else:
offering = config["offering"]
api_client = GreyNoise(
api_key=api_key,
offering=offering,
timeout=config["timeout"],
integration_name="cli",
)
return function(api_client, *args, **kwargs)
return wrapper
| 5,347,300 |
async def radius(ctx, system: str, radius: float, minRadius = 0.0):
"""Returns systems within a radius around a system"""
await bot.send_typing(ctx.message.channel)
coords = elite.friendly_get_coords(system)
systems = elite.get_systems_in_radius(coords, radius, minRadius)
if systems:
msg = '{0} systems between {1} and {2} LY from {3}'.format(len(systems), minRadius, radius, system)
if len(systems) > 0:
msg += ':\n'
limit = 50
if len(systems) > limit:
msg += '(closest {0} shown)\n'.format(limit)
count = 0
for sys in sorted(systems, key=lambda x: x['distance']):
msg += '{0}: {1} LY\n'.format(sys['name'], sys['distance'])
count += 1
if count > limit: break
else:
msg += '\n'
else:
msg = 'No systems in range'
await bot.say(msg)
| 5,347,301 |
def add_blocks(sender, **kwargs):
"""add extra blocks on save (dummy rendering happens too since CMSBaseModel
extends _CMSAbstractBaseModel). """
if isinstance(kwargs['instance'], CMSBaseModel):
ctype = ContentType.objects.get_for_model(kwargs['instance'])
for label_tuple in kwargs['instance'].BLOCK_LABELS:
if isinstance(label_tuple, str):
label_tuple = (label_tuple, None,)
block, created = Block.objects.get_or_create(
label=label_tuple[0],
content_type=ctype,
object_id=kwargs['instance'].id
)
# only set the format if the block was just created, or it's blank,
# and if a format is defined
if (not block.format or created) and label_tuple[1]:
block.format = label_tuple[1]
block.save()
for label in kwargs['instance'].IMAGE_LABELS:
Image.objects.get_or_create(
label=label,
content_type=ctype,
object_id=kwargs['instance'].id
)
| 5,347,302 |
def _test(blocksize=256):
"""Tests one configuration of blocksize."""
print("Testing blocksize %d ... " % blocksize, end="")
padder = Padder(blocksize)
for size in (0, 1, blocksize - 2, blocksize - 1, blocksize):
data = b"a" * max(0, size - 1) + (b"b" if size else b"")
assert len(data) == size
padded = padder.pad(data)
assert len(padded) % blocksize == 0
unpadded = padder.unpad(padded)
assert unpadded == data
print("OK")
| 5,347,303 |
def reset_log_level_global_var():
"""Reset log level global var, in order to update the value from settings"""
# pylint: disable=global-statement
global __LOG_LEVEL__
__LOG_LEVEL__ = None
| 5,347,304 |
def vecs_Xg_ig(x):
""" Vi = vec(dg/dxi * inv(g)), where g = exp(x)
(== [Ad(exp(x))] * vecs_ig_Xg(x))
"""
t = x.view(-1, 3).norm(p=2, dim=1).view(-1, 1, 1)
X = mat(x)
S = X.bmm(X)
#B = x.view(-1,3,1).bmm(x.view(-1,1,3)) # B = x*x'
I = torch.eye(3).to(X)
#V = sinc1(t)*eye(3) + sinc2(t)*X + sinc3(t)*B
#V = eye(3) + sinc2(t)*X + sinc3(t)*S
V = I + sinc2(t)*X + sinc3(t)*S
return V.view(*(x.size()[0:-1]), 3, 3)
| 5,347,305 |
def wide_factorial(x):
"""factorial returns x! = x * x-1 * x-2 * ...,
Args:
x: bytes to evaluate as an integer
Returns:
bytes representing the integer that is the result of the factorial applied on the argument passed
"""
return If(
BitLen(x) == Int(1), x, BytesMul(x, wide_factorial(BytesMinus(x, Itob(Int(1)))))
)
| 5,347,306 |
def test_absent_rm_alias_failed():
"""
test alias.absent remove alias failure
"""
name = "saltdude"
ret = {
"comment": "Failed to remove alias {}".format(name),
"changes": {},
"name": name,
"result": False,
}
get_target = MagicMock(return_value=True)
rm_alias = MagicMock(return_value=False)
with patch.dict(alias.__salt__, {"aliases.get_target": get_target}):
with patch.dict(alias.__opts__, {"test": False}):
with patch.dict(alias.__salt__, {"aliases.rm_alias": rm_alias}):
assert alias.absent(name) == ret
| 5,347,307 |
def spring_outpath(filepath: pathlib.Path) -> pathlib.Path:
"""Build a spring path based on a fastq file path"""
LOG.info("Create spring path from %s", filepath)
file_name = filepath.name
file_parent = filepath.parent
splitted = file_name.split("_")
spring_base = pathlib.Path("_".join(splitted[:-2]))
spring_path = pathlib.Path(file_parent).joinpath(spring_base).with_suffix(".spring")
LOG.info("Creates spring path %s", spring_path)
return spring_path
| 5,347,308 |
def convert_gensim_to_word2vec_format(fileName):
"""Converts gensim exportation format to word2vec format"""
model = gensim.models.KeyedVectors.load(fileName)
word_vectors = model.wv
word_vectors.save_word2vec_format(fileName)
| 5,347,309 |
def attach_tfidf_weights(storage, vocab, tf_arr):
"""Appends tf-idf weights to each word """
wordlist = vocab
storage_weighted = []
for i in range(len(storage)):
sys.stdout.write(str(i)+",")
sys.stdout.flush()
docweights = []
stor_list = storage[i].split()
for word in stor_list:
words = [word,0]
for j in range(len(wordlist)):
if (wordlist[j] == word):
words[1] = tf_arr[i][j]
docweights.append(words)
storage_weighted.append(docweights)
return storage_weighted
| 5,347,310 |
def env(config, endpoint):
"""Print RENKU environment variables.
Run this command to configure your Renku client:
$ eval "$(renku env)"
"""
access_token = config['endpoints'][endpoint]['token']['access_token']
click.echo('export {0}={1}'.format('RENKU_ENDPOINT', endpoint))
click.echo('export {0}={1}'.format('RENKU_ACCESS_TOKEN', access_token))
click.echo('# Run this command to configure your Renku client:')
click.echo('# eval "$(renku env)"')
| 5,347,311 |
def expanded_bb( final_points):
"""computation of coordinates and distance"""
left, right = final_points
left_x, left_y = left
right_x, right_y = right
base_center_x = (left_x+right_x)/2
base_center_y = (left_y+right_y)/2
dist_base = abs(complex(left_x, left_y)-complex(right_x, right_y ) )
return (int(base_center_x), int(base_center_y) ), dist_base
| 5,347,312 |
def gen_file_get_url(token, filename):
"""
Generate httpserver file url.
Format: http://<domain:port>/files/<token>/<filename>
"""
return '%s/files/%s/%s' % (get_httpserver_root(), token, urlquote(filename))
| 5,347,313 |
def count_items():
"""
Get number of items in the DB
Per the AWS documentation:
DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_table
"""
return dynamo_client.alerts_table.item_count
| 5,347,314 |
def extract_uris(data):
"""Convert a text/uri-list to a python list of (still escaped) URIs"""
lines = data.split('\r\n')
out = []
for l in lines:
if l == chr(0):
continue # (gmc adds a '\0' line)
if l and l[0] != '#':
out.append(l)
return out
| 5,347,315 |
def _load_fonts():
"""Load the fonts.
This will add the OpenSans fonts to the QFontDatabase.
Returns:
list: The registered font ids.
"""
font_root = os.path.join(os.path.dirname(__file__), "fonts")
fonts = [
"opensans/OpenSans-Bold.ttf",
"opensans/OpenSans-BoldItalic.ttf",
"opensans/OpenSans-ExtraBold.ttf",
"opensans/OpenSans-ExtraBoldItalic.ttf",
"opensans/OpenSans-Italic.ttf",
"opensans/OpenSans-Light.ttf",
"opensans/OpenSans-LightItalic.ttf",
"opensans/OpenSans-Regular.ttf",
"opensans/OpenSans-Semibold.ttf",
"opensans/OpenSans-SemiboldItalic.ttf",
]
for font in fonts:
path = os.path.join(font_root, font)
QtGui.QFontDatabase.addApplicationFont(path)
| 5,347,316 |
def handle_form_submission(ack, body, client, view):
"""
Handles what happens when the track submission form gets submitted
"""
# Perform validiation on the times before ack'ing
form_state = view['state']['values']
time_regex = '^([01]?[0-9]|2[0-3]):([0-5][0-9])$'
arrival = re.match(time_regex, form_state['arrival_time_input']['arrival_time']['value'])
departure = re.match(time_regex, form_state['departure_time_input']['departure_time']['value'])
errors = {}
if arrival is None:
errors['arrival_time_input'] = 'You must enter arrival time in the 24-hour format (H:MM or HH:MM)'
if departure is None:
errors['departure_time_input'] = 'You must enter departure time in the 24-hour format (H:MM or HH:MM)'
if arrival is not None and departure is not None:
arrival_time = int(arrival.group(1)) + float(arrival.group(2)) / 60
departure_time = int(departure.group(1)) + float(departure.group(2)) / 60
if departure_time < arrival_time:
error_msg = 'Departure time must be later than arrival time! '
if departure_time < 4:
error_msg += 'To submit hours crossing midnight, please submit as two'
error_msg += ' separate time intervals for each day'
errors['departure_time_input'] = error_msg
if len(errors) > 0:
ack(response_actions='errors', errors=errors)
return
# Otherwise we are good; ack to close the form
ack()
# Otherwise, add row
day = form_state['submit_date_input']['submit_date']['selected_date']
arrival = form_state['arrival_time_input']['arrival_time']['value']
departure = form_state['departure_time_input']['departure_time']['value']
add_row('covid_hours.csv',
body['user']['username'],
day,
arrival,
departure)
duration = datetime.strptime(departure, '%H:%M') - datetime.strptime(arrival, '%H:%M')
client.chat_postMessage(
channel=body['user']['id'],
text='{:.1f} hours submitted successfully for {}, from {} to {}'.format(
duration.total_seconds() / 3600.0,
datetime.strptime(day, '%Y-%m-%d').strftime('%B %d'),
arrival, departure),
user=body['user']['id'])
| 5,347,317 |
def createConnection(ps, graph, e, q, maxIter):
"""
Try to build a path along a transition from a given configuration
"""
for i in range(maxIter):
q_rand = shootConfig(ps.robot, q, i)
res, q1, err = graph.generateTargetConfig(e, q, q_rand)
if not res:
continue
res, p, msg = ps.directPath(q, q1, True)
if not res:
continue
ps.addConfigToRoadmap(q1)
ps.addEdgeToRoadmap(q, q1, p, True)
print("Success (i={0})".format(i))
return p, q1
print("Failed (maxIter={0})".format(maxIter))
return None, None
| 5,347,318 |
def delete_division_item(ids, my_divname):
"""
Given division id, delete from db
Return deleted division entry.
"""
settings = utils.load_json_definition_file(SETTINGS_FILE)
success, dubconn = utils.open_monitoring_db(settings['dbhost'],
settings['dbuser'],
settings['dbpass'],
settings['db_db'])
if success:
division = format_division(div_id=int(ids.div), name=my_divname)
query = """
DELETE FROM divisions
WHERE divid=%s
"""
app.logger.debug("Got a delete query of: %s ", query)
cursor = dubconn.cursor()
try:
cursor.execute(query, (division['ID'],))
except Exception, err:
app.logger.error("mysql exception: [%d]: %s", err.args[0],
err.args[1])
app.logger.error("generated by: %s", query)
success = 0
dubconn.commit()
cursor.close()
dubconn.close()
return division
| 5,347,319 |
def extract_info(filepath,pdbid,info_id_list):
"""Returns a dictionary where the key is pocket ID (starting at zero) and the value is a dictionary of information points."""
pockets_info = {}
pocket_file = open(filepath+pdbid+'_out/'+pdbid+'_info.txt')
pocket_lines = pocket_file.readlines()
pocket_file.close()
# create inner dictionaries
counter = 0
for line in pocket_lines:
if line[:6] == 'Pocket':
pockets_info[counter] = {}
counter += 1
# populate inner dictionaries
for info_id in info_id_list:
counter = 0
for line in pocket_lines:
if line.lstrip()[:len(info_id)] == info_id:
split = re.split(r'\s+',line.rstrip())
pockets_info[counter][info_id] = float(split[-1])
counter += 1
return pockets_info
| 5,347,320 |
def eval_shape_fcn(w, x, N1, N2, yte):
"""
compute class and shape function
:param w:
:param x:
:param N1:
:param N2:
:param yte: trailing edge y coordinate
:return:
"""
C = x**N1 * (1-x)**N2
n = len(w) - 1 # degree of Bernstein polynomials
S = np.zeros_like(x)
for j in range(0, n+1):
K = factorial(n)/(factorial(j)*(factorial(n-j)))
S += w[j]*K*x**j * ((1-x)**(n-j))
return C * S + x * yte
| 5,347,321 |
def _pickle_path(file_name):
"""Returns an absolute path to the specified pickle file."""
return project_root_path('pickles', file_name)
| 5,347,322 |
def streamentry(parser, token):
"""
streamentry <entry_var>
"""
bits = token.split_contents()
bits.reverse()
tag_name = bits.pop()
try:
entry_var = bits.pop()
except IndexError:
raise template.TemplateSyntaxError, "%r is missing entry argument" % tag_name
if bits:
raise template.TemplateSyntaxError, "%r has unexpected arguments" % tag_name
return StreamItemNode(entry_var)
| 5,347,323 |
def mult_int_list_int():
"""
>>> mult_int_list_int()
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]
"""
return 3 * [1, 2] * 2
| 5,347,324 |
def test_pseudonymize__integer__2():
"""It pseudonymizes the value `0`."""
from gocept.pseudonymize import integer
assert 8 == pseudo(0, integer)
| 5,347,325 |
def _gcd_tf(a, b, dtype=tf.int64):
"""Calculates the greatest common denominator of 2 numbers.
Assumes that a and b are tf.Tensor of shape () and performs the extended
euclidean algorithm to find the gcd and the coefficients of Bézout's
identity (https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity)
Args:
a: A scalar `tf.Tensor`.
b: A scaler `tf.Tensor`.
dtype: Data type to perform operations in. `a` and `b` are casted to this
dtype.
Returns:
A tuple of `tf.Tensor`s `(g, x, y)` such that `a*x + b*y = g = gcd(a, b)`.
"""
a = tf.cast(a, dtype=dtype)
b = tf.cast(b, dtype=dtype)
x0, x1, y0, y1 = (tf.constant(0, dtype=dtype), tf.constant(1, dtype=dtype),
tf.constant(1, dtype=dtype), tf.constant(0, dtype=dtype))
def cond(a, b, x0, x1, y0, y1):
del b, x0, x1, y0, y1
return tf.math.not_equal(a, tf.constant(0, dtype=dtype))
def body(a, b, x0, x1, y0, y1):
(q, a), b = (tf.cast(b / a, dtype=dtype), b % a), a
y0, y1 = y1, y0 - q * y1
x0, x1 = x1, x0 - q * x1
return a, b, x0, x1, y0, y1
a, b, x0, x1, y0, y1 = tf.while_loop(
cond, body, loop_vars=(a, b, x0, x1, y0, y1))
return b, x0, y0
| 5,347,326 |
def compute_running_mean(x, kernel_size):
""" Fast analogue of scipy.signal.convolve2d with gaussian filter. """
k = kernel_size // 2
padded_x = np.pad(x, (k, k), mode='symmetric')
cumsum = np.cumsum(padded_x, axis=1)
cumsum = np.cumsum(cumsum, axis=0)
return _compute_running_mean_jit(x, kernel_size, cumsum)
| 5,347,327 |
def update_daily_traffic(traffic_data: list[list[int]]):
"""
Expected format for each traffic_data list item should match "day" value of the /about/traffic endpoint:
[timestamp (start of day), unique_pageviews, total_pageviews, subscribers]
"""
oldest_date = datetime.date.fromtimestamp(traffic_data[-1][0])
today = datetime.datetime.now(datetime.timezone.utc).date()
current_traffic_rows = _traffic_data.get_daily_traffic_by_range(oldest_date, today)
current_traffic_by_date = {model.date: model for model in current_traffic_rows}
for row in traffic_data:
date = datetime.date.fromtimestamp(row[0])
unique_pageviews, total_pageviews, subscribers = row[1:4]
# New entry, date not currently in database.
if date not in current_traffic_by_date:
add_daily_traffic(date, unique_pageviews, total_pageviews, subscribers)
continue
# Otherwise see if the data needs updating for some reason.
current_traffic = current_traffic_by_date[date]
if unique_pageviews != 0:
current_traffic.unique_pageviews = unique_pageviews
if total_pageviews != 0:
current_traffic.total_pageviews = total_pageviews
current_traffic.net_subscribers = subscribers
_traffic_data.update(current_traffic)
| 5,347,328 |
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
| 5,347,329 |
def _list_manager_owned_bidding_strategies(client, manager_customer_id):
"""List all cross-account bidding strategies in the manager account.
Args:
client: An initialized GoogleAdsClient instance.
manager_customer_id: A manager customer ID.
"""
googleads_service = client.get_service("GoogleAdsService")
query = """
SELECT
bidding_strategy.id,
bidding_strategy.name,
bidding_strategy.type,
bidding_strategy.currency_code
FROM bidding_strategy"""
# Creates and issues a search Google Ads stream request that will retrieve
# all bidding strategies.
stream = googleads_service.search_stream(
customer_id=manager_customer_id, query=query
)
# Iterates through and prints all of the results in the stream response.
print(
"Cross-account bid strategies in manager account: "
f"{manager_customer_id}"
)
for response in stream:
for row in response.results:
bs = row.bidding_strategy
print(
f"\tID: {bs.id}\n"
f"\tName: {bs.name}\n"
f"\tStrategy type: {bs.type_}\n"
f"\tCurrency: {bs.currency_code}\n\n"
)
# [END list_manager_strategies]
| 5,347,330 |
def _check_param_testcase_report(testcase_report, i):
"""
Check the testcase report generated for the ith parametrization of the
"parametrized" testcase from the "Suite" testsuite.
"""
assert testcase_report.passed
assert testcase_report.name == "parametrized__val_{}".format(i + 1)
assert testcase_report.category == report.ReportCategories.TESTCASE
assert len(testcase_report.entries) == 1 # One assertion
greater_assertion = testcase_report.entries[0]
assert greater_assertion["passed"]
assert greater_assertion["type"] == "Greater"
assert greater_assertion["first"] == i + 1
assert greater_assertion["second"] == 0
| 5,347,331 |
def to_dict(eds, properties=True, lnk=True):
"""
Encode the EDS as a dictionary suitable for JSON serialization.
"""
nodes = {}
for node in eds.nodes:
nd = {
'label': node.predicate,
'edges': node.edges
}
if lnk and node.lnk is not None:
nd['lnk'] = {'from': node.cfrom, 'to': node.cto}
if node.type is not None:
nd['type'] = node.type
if properties:
props = node.properties
if props:
nd['properties'] = props
if node.carg is not None:
nd['carg'] = node.carg
nodes[node.id] = nd
return {'top': eds.top, 'nodes': nodes}
| 5,347,332 |
def find_lineage(tax_id: int) -> Lineage:
"""Finds lineage for a single tax id"""
if tax_id % 50000 == 0:
_LOGGER.info("working on tax_id: %d", tax_id)
lineage = []
while True:
record = TAXONOMY_DICT[tax_id]
lineage.append((record["tax_id"], record["rank"], record["rank_name"]))
tax_id = record["parent_tax_id"]
# every tax can be traced back to tax_id == 1, the root
if tax_id == ROOT_TAX_ID:
break
# reverse results in lineage of Kingdom => species, this is helpful for
# to_dict when there are multiple "no rank"s
lineage.reverse()
return Lineage(lineage)
| 5,347,333 |
def modify(request):
"""
[メソッド概要]
グループのDB更新処理
"""
logger.logic_log('LOSI00001', 'None', request=request)
msg = ''
error_msg = {}
now = datetime.datetime.now(pytz.timezone('UTC'))
try:
with transaction.atomic():
json_str = json.loads(request.POST.get('json_str', '{}'))
if 'json_str' not in json_str:
msg = get_message('MOSJA23019', request.user.get_lang_mode(), showMsgId=False)
logger.user_log('LOSM04000', 'json_str', request=request)
raise Exception()
# 更新前にレコードロック
group_update_list = [
rq['group_id']
for rq in json_str['json_str']
if int(rq['ope']) in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE)
]
Group.objects.select_for_update().filter(pk__in=group_update_list)
error_flag, error_msg = _validate(json_str['json_str'], request)
if error_flag:
raise Exception('validation error.')
# 更新データ作成
group_id_list_reg = []
group_id_list_mod = []
sorted_data = sorted(json_str['json_str'], key=lambda x: x['group_id'])
upd_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_UPDATE, sorted_data))
del_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_DELETE, sorted_data))
ins_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_INSERT, sorted_data))
for rq in upd_data:
group_id_list_mod = Group.objects.filter(group_id=rq['group_id'])
if len(group_id_list_mod) <= 0:
logger.logic_log('LOSI04000', rq['group_name'], request=request)
continue
# システム管理系はグループ名の更新不可
if int(rq['group_id']) not in defs.GROUP_DEFINE.PROTECTED_GROUP_IDS:
group_id_list_mod[0].group_name = rq['group_name']
group_id_list_mod[0].summary = rq['summary']
group_id_list_mod[0].last_update_user = request.user.user_name
group_id_list_mod[0].last_update_timestamp = now
group_id_list_mod[0].save(force_update=True)
group_id_list_del = [rq['group_id'] for rq in del_data if int(rq['group_id']) not in defs.GROUP_DEFINE.PROTECTED_GROUP_IDS]
for rq in ins_data:
group_info = Group(
group_name=rq['group_name'],
summary=rq['summary'],
last_update_user=request.user.user_name,
last_update_timestamp=now
)
group_id_list_reg.append(group_info)
# 追加
Group.objects.bulk_create(group_id_list_reg)
# 権限を追加
_bulk_create_access_permission(
request.user.user_name,
[i.group_name for i in group_id_list_reg],
now,
)
# 削除対象グループを削除
Group.objects.filter(pk__in=group_id_list_del).delete()
# 削除対象ユーザグループに該当するユーザIDを取得
before_user_list = list(UserGroup.objects.filter(group_id__in=group_id_list_del).values_list('user_id', flat=True).distinct())
# ユーザグループを削除
UserGroup.objects.filter(group_id__in=group_id_list_del).delete()
# どのグループにも所属しないユーザを検索
after_user_list = list(UserGroup.objects.filter(user_id__in=before_user_list).values_list('user_id', flat=True).distinct())
delete_user_list = list(set(before_user_list) ^ set(after_user_list))
# ユーザ、パスワード履歴、アクセス権限を削除
User.objects.filter(pk__in=delete_user_list).delete()
PasswordHistory.objects.filter(user_id__in=delete_user_list).delete()
AccessPermission.objects.filter(group_id__in=group_id_list_del).delete()
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
msg = get_message('MOSJA23021', request.user.get_lang_mode()) + '\\n' + str(e.args)
response = {}
response['status'] = 'failure'
response['msg'] = msg
response['error_msg'] = error_msg
response_json = json.dumps(response)
return HttpResponse(response_json, content_type="application/json")
redirect_url = '/oase_web/system/group'
response_json = '{"status": "success", "redirect_url": "%s"}' % redirect_url
logger.logic_log('LOSI00002', 'None', request=request)
return HttpResponse(response_json, content_type="application/json")
| 5,347,334 |
def _function_args_doc(functions):
"""
Create documentation of a list of functions.
Return: usage dict (usage[funcname] = list of arguments, incl.
default values), doc dict (doc[funcname] = docstring (or None)).
Called by function_UI.
"""
import inspect
usage = {}
doc = {}
for f in functions:
args = inspect.getargspec(f)
if args.defaults is None:
# Only positional arguments
usage[f.__name__] = args.args
else:
# Keyword arguments too, build complete list
usage[f.__name__] = args.args[:-len(args.defaults)] + \
['%s=%s' % (a, d) for a, d in \
zip(args.args[-len(args.defaults):], args.defaults)]
doc[f.__name__] = inspect.getdoc(f)
return usage, doc
| 5,347,335 |
def get_loss(pred, label):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
| 5,347,336 |
def mock_modules_list():
"""Standard module list without any issues"""
return [
{"name": "foo", "module_type": "app", "supported_platforms": ["macos"]},
{"name": "bar", "module_type": "app"},
]
| 5,347,337 |
def https(remainder, params):
"""Ensure that the decorated method is always called with https."""
if request.scheme.lower() == 'https': return
if request.method.upper() == 'GET':
redirect('https' + request.url[len(request.scheme):])
raise HTTPMethodNotAllowed(headers=dict(Allow='GET'))
| 5,347,338 |
def cal_iou(box1, box1_area, boxes2, boxes2_area):
"""
box1 [x1,y1,x2,y2]
boxes2 [Msample,x1,y1,x2,y2]
"""
x1 = np.maximum(box1[0], boxes2[:, 0])
x2 = np.minimum(box1[2], boxes2[:, 2])
y1 = np.maximum(box1[1], boxes2[:, 1])
y2 = np.minimum(box1[3], boxes2[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
iou = intersection / (box1_area + boxes2_area[:] - intersection[:])
return iou
| 5,347,339 |
def hbonds_single_c(snap, id1, id2, cut1, cut2, angle, names=False):
"""
Binding of C++ routines in :mod:`.hbonds_c` for couting of hydrogen bonds in a single snapshot.
Args:
snap (:class:`.Snap`): single snapshot containing the atomic information
id1 (str): identifier for oxygen atoms (e.g. 'O\_')
id2 (str): identifier for hydrogen atoms (e.g. 'H\_')
cut1 (float): maximum distance between two oxygen atoms
cut2 (float): maximum distance between an oxygen and a hydrogen atom
angle (float): minimum O-H-O angle in degree
names (list[str], optional): names of oxygen atoms used as search centers
Returns:
float: number of hydrogen bonds found for this snapshot
"""
atoms1 = snap.atoms[snap.atoms['id'] == id1]['pos'].values
atoms1 = atoms1.reshape(len(atoms1) * 3)
atoms2 = snap.atoms[snap.atoms['id'] == id2]['pos'].values
atoms2 = atoms2.reshape(len(atoms2) * 3)
cell = snap.cell.reshape(9)
if names:
center = snap.atoms.loc[snap.atoms['name'].isin(names)]
center = center['pos'].values
center = center.reshape(len(center) * 3)
number = hbonds_c.hbonds(atoms1, atoms2, center, cut1, cut2, angle, cell)
else:
number = hbonds_c.hbonds(atoms1, atoms2, atoms1, cut1, cut2, angle, cell)
return number
| 5,347,340 |
def _find_event_times(raw, event_id, mask):
"""Given the event_id and mask, find the event times.
"""
stim_ch = find_stim_channel(raw)
sfreq = raw.info['sfreq']
events = find_events(raw, stim_ch, mask, event_id)
times = [(event[0] - raw.first_samp) / sfreq for event in events]
return times
| 5,347,341 |
def get_parameters():
"""Load parameter values from AWS Systems Manager (SSM) Parameter Store"""
parameters = {
"kafka_servers": ssm_client.get_parameter(
Name="/kafka_spark_demo/kafka_servers")["Parameter"]["Value"],
"kafka_demo_bucket": ssm_client.get_parameter(
Name="/kafka_spark_demo/kafka_demo_bucket")["Parameter"]["Value"],
"schema_registry_url": ssm_client.get_parameter(
Name="/kafka_spark_demo/schema_registry_url_int")["Parameter"]["Value"],
}
return parameters
| 5,347,342 |
async def get_accounts(context, names, observer=None):
"""Find and return lite accounts by `names`.
Observer: will include `followed` context.
"""
assert isinstance(names, list), 'names must be a list'
assert names, 'names cannot be blank'
assert len(names) < 100, 'too many accounts requested'
return await accounts_by_name(context['db'], names, observer, lite=True)
| 5,347,343 |
def _worker(vecs, k, modelpath, curid, retval):
"""Worker to search for nearest embeddings.
"""
cur = current_process()
print('\n{} loading knn'.format(cur.name))
knn = joblib.load(modelpath)
print('\n{} searching...'.format(cur.name))
res = knn.kneighbors(vecs, n_neighbors=k, return_distance=False)
retval[curid] = res
| 5,347,344 |
def softmax_crossentropy_logits(p, q):
"""see sparse cross entropy"""
return -(p * log_softmax(q)).sum(-1)
| 5,347,345 |
def SieveOfEratosthenes(limit=10**6):
"""Returns all primes not greater than limit."""
isPrime = [True]*(limit+1)
isPrime[0] = isPrime[1] = False
primes = []
for i in range(2, limit+1):
if not isPrime[i]:continue
primes += [i]
for j in range(i*i, limit+1, i):
isPrime[j] = False
return primes
| 5,347,346 |
def update_submission_template(default_template, qtemplate):
"""
helper function for writing a CommonAdapter template fireworks
submission file based on a provided default_template which
contains hpc resource allocation information and the qtemplate
which is a yaml of commonly modified user arguments
"""
pass
| 5,347,347 |
def GetHistory():
"""Obtain mapping from release version to docs/root/intro/deprecated.rst PRs.
Returns:
A dictionary mapping from release version to a set of git commit objects.
"""
repo = Repo(os.getcwd())
version = None
history = defaultdict(set)
for commit, lines in repo.blame('HEAD', 'docs/root/intro/deprecated.rst'):
for line in lines:
sr = re.match('## Version (.*) \(.*\)', line)
if sr:
version = sr.group(1)
continue
history[version].add(commit)
return history
| 5,347,348 |
def generate_chromosome(constraint = False, constraint_levers = [], constraint_values = [],
threshold = False, threshold_names = [], thresholds = []):
"""
Initialises a chromosome and returns its corresponding lever values, and temperature and cost.
**Args**:
- constraint (*boolean*): Flag to select whether any inputs have been fixed.
- constraint_levers (*list of strings*): Contains the name of levers to be fixed.
- constraint_values (*list of floats*): Contains the values to fix the selected levers to.
- threshold (*boolean*): Flag to select whether any inputs have to be bounded within a range.
- threshold_names (*list of strings*): Contains the name of the levers to be bounded within a range.
- thresholds (*list of lists of floats*): Contains the upper and lower bound for each specified lever.
**Returns**:
Lever values corresponding to generated chromosome and cost values corresponding to the current chromosome.
"""
lever_names = list(dfs_3.iloc[:, 0].to_numpy()) # Create list with all lever names
# Generate random lever combination
random_lever_values = new_lever_combination(threshold = threshold, threshold_names = threshold_names, thresholds = thresholds)
# Fix specified input levers
if constraint == True:
lever_names, random_lever_values = overwrite_lever_values(lever_names, random_lever_values, constraint_levers, constraint_values)
result = move_lever(lever_names, random_lever_values, costs = True, constraint = constraint, constraint_levers = constraint_levers, constraint_values = constraint_values) # Move lever accordingly and read temperature and cost valuesw
return random_lever_values, result
| 5,347,349 |
def plot_cov(state_pred, cov_pred):
"""
Plot the covariances
:param state_pred: lit of all the previous positions
:param cov_pred: list of all the previous covariances
:return:
"""
state = []
cov = []
for i in range(len(state_pred)):
x = np.array([[state_pred[i][0]], [state_pred[i][1]], [state_pred[i][2]]])
state.append(x)
x2 = np.array(cov_pred[i])
cov.append(x2)
plt.ion()
fig, ax = plt.subplots()
px, py = plot_covariance_ellipse(np.array(state[0]), np.array(cov[0] / 1000))
line_v = ax.axvline(x=state[0][0], color="k")
line_h = ax.axhline(y=state[0][1], color="k")
ellips, = ax.plot(px, py, "--r", label="covariance matrix")
l_max = len(state)
list = [i for i in range(l_max)]
list.insert(0, -1)
for i in list:
px, py = plot_covariance_ellipse(np.asarray(state[i]), np.array(cov[i] / 1000))
line_v.set_xdata([state[i][0]])
line_h.set_ydata([state[i][1]])
ellips.set_xdata(px)
ellips.set_ydata(py)
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
fig.canvas.flush_events()
plt.axis([0.8, 0, 0.725, 0])
plt.show()
if i == (l_max - 1):
time.sleep(10)
| 5,347,350 |
def save_data(df, database_filename):
"""save data to sql database"""
engine = create_engine('sqlite:///{}'.format(database_filename))
df.to_sql("MessagesCategories", engine, if_exists='replace', index=False)
| 5,347,351 |
def make_link_request(data: dict, user_token: str):
"""
https://yandex.ru/dev/disk/api/reference/response-objects-docpage/#link
- it will not raise in case of error HTTP code.
- see `api/request.py` documentation for more.
:param data: Data of link to handle.
:param user_token: User OAuth token to access the API.
:raises NotImplementedError: If link requires templating.
"""
if (data["templated"]):
raise NotImplementedError("Templating not implemented")
url = data["href"]
method = data["method"].upper()
timeout = current_app.config["YANDEX_DISK_API_TIMEOUT"]
return request(
raise_for_status=False,
content_type="json",
method=method,
url=url,
timeout=timeout,
auth=HTTPOAuthAuth(user_token),
allow_redirects=False,
verify=True
)
| 5,347,352 |
def load_sample_bathymetry(**kwargs):
"""
(Deprecated) Load a table of ship observations of bathymetry off Baja
California as a pandas.DataFrame.
.. warning:: Deprecated since v0.6.0. This function has been replaced with
``load_sample_data(name="bathymetry")`` and will be removed in
v0.9.0.
This is the ``@tut_ship.xyz`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Columns are longitude, latitude, and bathymetry.
"""
if "suppress_warning" not in kwargs:
warnings.warn(
"This function has been deprecated since v0.6.0 and will be "
"removed in v0.9.0. Please use "
"load_sample_data(name='bathymetry') instead.",
category=FutureWarning,
stacklevel=2,
)
fname = which("@tut_ship.xyz", download="c")
data = pd.read_csv(
fname, sep="\t", header=None, names=["longitude", "latitude", "bathymetry"]
)
return data
| 5,347,353 |
def add_bar_labels(ax, vertical=True, bar_label_fontsize=8):
"""Adds a value at the top of the bar showing the size of it"""
handles, labels = ax.get_legend_handles_labels()
# In order to support stacked bar charts by only add a value for the
# highest point (as stacked rectangles are counted individually),
# store each bars identifier along with the size of the bar, and
# keep the max size.
max_values = {}
neginf = -float('inf') # Placeholder for the first occurence of each key
# that is always replaced
for i in ax.patches:
if vertical:
x = i.xy[0]
y = i.xy[1] + i.get_height()
max_values[x] = max(max_values.get(x, neginf), y)
else: # Do the same except for horizontal bar charts.
x = i.xy[0] + i.get_width()
y = i.xy[1]
max_values[y] = max(max_values.get(y, neginf), x)
# Perform a second pass, this time fetching the sizes
for i in ax.patches:
# Only add a label once per set of (stacked) bars
used = set()
if vertical:
x = i.xy[0]
if x in used:
continue
used.add(x)
y = max_values[x]
# Avoid floating point checks on equality (e.g as keys), even if
# it might be fine. Hence, do the offset to center the label here
# after it has been used as a key.
x += i.get_width()*0.5
ha,va = 'center','bottom'
size=y
else: # Do the same except for horizontal bar charts.
y = i.xy[1]
if y in used:
continue
used.add(y)
x = max_values[y]
y += i.get_height()*0.5
ha,va = 'left','center'
size=x
ax.text(x=x,
y=y,
s="{}".format(int(size)),
fontsize=bar_label_fontsize,
ha=ha,
va=va)
| 5,347,354 |
def main():
""" main """
parser = argparse.ArgumentParser(
description='This is a wrapper to test the resolver code')
parser.add_argument("-s",
"--servers",
default="8.8.8.8,1.1.1.1",
help="Resolvers to query")
parser.add_argument("-n",
"--name",
default="jrcs.net",
help="Name to query for")
parser.add_argument("-t",
"--rdtype",
default="txt",
help="RR Type to query for")
args = parser.parse_args()
if not validation.is_valid_host(args.name):
print(f"ERROR: '{args.name}' is an invalid host name")
else:
qry = Query(args.name, args.rdtype)
qry.servers = args.servers.split(",")
qry.do = True
print(json.dumps(qry.resolv(), indent=2))
| 5,347,355 |
def shift(obj, offset, excluded=(), op=operator.sub, verbose=False):
"""Shift soda.ir.Ref with the given offset.
All soda.ir.Ref, excluding the given names, will be shifted with the
given offset using the given operator. The operator will be applied pointwise
on the original index and the given offset.
Args:
obj: A haoda.ir.Node or a tensor.Tensor object.
offset: Second operand given to the operator.
excluded: Sequence of names to be excluded from the mutation. Default to ().
op: Shifting operator. Should be either add or sub. Default to sub.
verbose: Whether to log shiftings. Default to False.
Returns:
Mutated obj. If obj is an IR node, it will be a different object than the
input. If obj is a tensor, it will be the same object but with fields
mutated.
"""
if op not in (operator.add, operator.sub):
_logger.warn('shifting with neither + nor -, which most likely is an error')
def visitor(obj, args):
if isinstance(obj, ir.Ref):
if obj.name not in excluded:
new_idx = tuple(op(a, b) for a, b in zip(obj.idx, offset))
if verbose:
_logger.debug('reference %s(%s) shifted to %s(%s)', obj.name,
', '.join(map(str, obj.idx)), obj.name,
', '.join(map(str, new_idx)))
obj.idx = new_idx
if isinstance(obj, ir.Node):
return obj.visit(visitor)
if isinstance(obj, tensor.Tensor):
obj.mutate(visitor)
else:
raise TypeError('argument is not an IR node or a tensor')
return obj
| 5,347,356 |
async def ticket_channel_embed(
_: hikari.InteractionCreateEvent, bot: hikari.GatewayBot
) -> hikari.Embed:
"""Provides an embed for individual ticket channels."""
description = (
"Thanks for submitting a ticket! We take all tickets "
"very seriously. Please provide a full explanation in this "
"channel. You can include text, images, files, video, or "
"documents. \n\nPlease do not ping the Mods or Staff unless "
"there is a life or death situation. Someone will address it "
"available."
)
embed = hikari.Embed(title="", description=description, color=8454399)
embed.set_thumbnail(
"https://cdn.discordapp.com/attachments/733789542884048906/900079323279663175/85d744c5310511ecb705f23c91500735.png"
)
embed.set_author(name=bot.get_me().username, icon=bot.get_me().avatar_url)
return embed
| 5,347,357 |
def count_branching_factor(strips_ops: List[STRIPSOperator],
segments: List[Segment]) -> int:
"""Returns the total branching factor for all states in the segments."""
total_branching_factor = 0
for segment in segments:
atoms = segment.init_atoms
objects = set(segment.states[0])
ground_ops = {
ground_op
for op in strips_ops
for ground_op in all_ground_operators(op, objects)
}
for _ in get_applicable_operators(ground_ops, atoms):
total_branching_factor += 1
return total_branching_factor
| 5,347,358 |
def iou_overlaps(b1, b2):
"""
Arguments:
b1: dts, [n, >=4] (x1, y1, x2, y2, ...)
b1: gts, [n, >=4] (x1, y1, x2, y2, ...)
Returns:
intersection-over-union pair-wise, generalized iou.
"""
area1 = (b1[:, 2] - b1[:, 0] + 1) * (b1[:, 3] - b1[:, 1] + 1)
area2 = (b2[:, 2] - b2[:, 0] + 1) * (b2[:, 3] - b2[:, 1] + 1)
# only for giou loss
lt1 = torch.max(b1[:, :2], b2[:, :2])
rb1 = torch.max(b1[:, 2:4], b2[:, 2:4])
lt2 = torch.min(b1[:, :2], b2[:, :2])
rb2 = torch.min(b1[:, 2:4], b2[:, 2:4])
wh1 = (rb2 - lt1 + 1).clamp(min=0)
wh2 = (rb1 - lt2 + 1).clamp(min=0)
inter_area = wh1[:, 0] * wh1[:, 1]
union_area = area1 + area2 - inter_area
iou = inter_area / torch.clamp(union_area, min=1)
ac_union = wh2[:, 0] * wh2[:, 1] + 1e-7
giou = iou - (ac_union - union_area) / ac_union
return iou, giou
| 5,347,359 |
def aggregate(data):
"""Aggregate the data."""
return NotImplemented
| 5,347,360 |
def get_valid_columns(solution):
"""Get a list of column indices for which the column has more than one class.
This is necessary when computing BAC or AUC which involves true positive and
true negative in the denominator. When some class is missing, these scores
don't make sense (or you have to add an epsilon to remedy the situation).
Args:
solution: array, a matrix of binary entries, of shape
(num_examples, num_features)
Returns:
valid_columns: a list of indices for which the column has more than one
class.
"""
num_examples = solution.shape[0]
col_sum = np.sum(solution, axis=0)
valid_columns = np.where(1 - np.isclose(col_sum, 0) -
np.isclose(col_sum, num_examples))[0]
return valid_columns
| 5,347,361 |
def fourier_transform(data, proc_parameters):
"""Perform Fourier Transform down dim dimension given in proc_parameters
.. Note::
Assumes dt = t[1] - t[0]
Args:
data (nddata): Data container
proc_parameters (dict, procParam): Processing parameters
Returns:
nddata: Fourier Transformed data
Example:
.. code-block:: python
proc_parameters['dim'] = 't'
proc_parameters['zero_fill_factor'] = 2
proc_parameters['shift'] = True
proc_parameters['convert_to_ppm'] = True
all_data = dnplab.dnpNMR.fourier_transform(all_data, proc_parameters)
"""
required_parameters = defaults._fourier_transform
# Add required parameters to proc_parameters
print(required_parameters)
for key in required_parameters:
if key not in proc_parameters:
proc_parameters[key] = required_parameters[key]
#
dim = proc_parameters["dim"]
zero_fill_factor = proc_parameters["zero_fill_factor"]
shift = proc_parameters["shift"]
convert_to_ppm = proc_parameters["convert_to_ppm"]
index = data.dims.index(dim)
dt = data.coords[index][1] - data.coords[index][0]
n_pts = zero_fill_factor * len(data.coords[index])
f = (1.0 / (n_pts * dt)) * np.r_[0:n_pts]
if shift == True:
f -= 1.0 / (2 * dt)
data.values = np.fft.fft(data.values, n=n_pts, axis=index)
if shift:
data.values = np.fft.fftshift(data.values, axes=index)
data.coords[index] = f
return data
| 5,347,362 |
def iupac_fasta_converter(header, sequence):
"""
Given a sequence (header and sequence itself) containing iupac characters,
return a dictionary with all possible sequences converted to ATCG.
"""
iupac_dict = {"R": "AG", "Y": "CT", "S": "GC", "W": "AT", "K": "GT",
"M": "AC", "B": "CGT", "D": "AGT", "H": "ACT", "V": "ACG",
"N": "ACGT"}
iupac_dict = {k: list(iupac_dict[k])
for k in list(iupac_dict.keys())}
if sequence.upper().count("N") >= 10:
return {header: sequence}
sequence = list(sequence.upper())
result_list = []
def iupac_recurse(seq):
for i in range(len(seq)):
if seq[i] in list(iupac_dict.keys()):
iup = iupac_dict[seq[i]]
for i_seq in iup:
new_seq = copy.deepcopy(seq)
new_seq[i] = i_seq
iupac_recurse(new_seq)
break
else:
result_list.append("".join(seq))
iupac_recurse(sequence)
if len(result_list) == 1:
return {header: result_list[0]}
else:
return {header + "-" + str(i): result_list[i]
for i in range(len(result_list))}
| 5,347,363 |
def test_deserialzing_an_invalid_bootcamp(bootcamp_valid_data):
"""
Verifies that parse_bootcamp_json_data does not create a new Course if the serializer is invalid
"""
bootcamp_valid_data.pop("course_id")
parse_bootcamp_json_data(bootcamp_valid_data)
assert Course.objects.count() == 0
assert LearningResourceRun.objects.count() == 0
| 5,347,364 |
def reverse(arr, start, end):
"""Reverse an array arr between indices start and end (exclusive)."""
while start < end:
arr[start], arr[end - 1] = arr[end - 1], arr[start]
start += 1
end -= 1
| 5,347,365 |
def integer_list_to_named_tuple(list_of_integers):
"""
Converts a list of integers read from the ultrak498 into a named tuple
based upon the type. The type is determiend by the first integer in the
list. Since all tuples contain five fields, the list of integers must
have a length of five.
Returns a named tuple based on the type,
"""
# Dictionary mapping type id to record named tuples.
valid_types = {
0: namedtuple("RaceHeader", "type year month day id"),
1: namedtuple("RaceHeader", "type year month day id"),
2: namedtuple("RaceHeader", "type year month day id"),
3: namedtuple("RaceHeader", "type year month day id"),
4: namedtuple("RaceHeader", "type year month day id"),
5: namedtuple("RaceHeader", "type year month day id"),
6: namedtuple("RaceHeader", "type year month day id"),
7: namedtuple("RaceHeader", "type year month day id"),
8: namedtuple("RaceHeader", "type year month day id"),
9: namedtuple("RaceHeader", "type year month day id"),
10: namedtuple("LapTime", "type minutes seconds hundreths lap"),
20: namedtuple("AbsTime", "type minutes seconds hundreths lap"),
30: namedtuple("Type30", "type a b c laps"),
40: namedtuple("Type40", "type a b c laps"),
50: namedtuple("RaceEnd", "type minutes seconds hundreths laps"),
}
# List of integers must be length of five.
if len(list_of_integers) != 5:
raise ValueError("Unable to convert list of integers to tuple; incorrect number of integers.")
# First byte is the type; type must be known.
tuple_type = list_of_integers[0]
if tuple_type not in valid_types:
raise ValueError("Unable to convert list of integers to tuple; unknown record type [%d]." % tuple_type)
# Create a namedtuple based upon the tuple_type.
named_tuple = valid_types[tuple_type]._make(list_of_integers)
return named_tuple
| 5,347,366 |
def get_albums(): # noqa: E501
"""get_albums
Muestra todos los albums disponibles # noqa: E501
:rtype: List[Album]
"""
albums = DBAlbum.query.all()
results = [
Album(album.id, album.title, album.description) for album in albums]
return results
| 5,347,367 |
def get_final_shape(data_array, out_dims, direction_to_names):
"""
Determine the final shape that data_array must be reshaped to in order to
have one axis for each of the out_dims (for instance, combining all
axes collected by the '*' direction).
"""
final_shape = []
for direction in out_dims:
if len(direction_to_names[direction]) == 0:
final_shape.append(1)
else:
# determine shape once dimensions for direction (usually '*') are combined
final_shape.append(
np.product([len(data_array.coords[name])
for name in direction_to_names[direction]]))
return final_shape
| 5,347,368 |
def create_assignment_payload(subsection_block):
"""
Create a Canvas assignment dict matching a subsection block on edX
Args:
subsection_block (openedx.core.djangoapps.content.block_structure.block_structure.BlockData):
The block data for the graded assignment/exam (in the structure of a course, this unit is a subsection)
Returns:
dict:
Assignment payload to be sent to Canvas to create or update the assignment
"""
return {
"assignment": {
"name": subsection_block.display_name,
"integration_id": str(subsection_block.location),
"grading_type": "percent",
"points_possible": DEFAULT_ASSIGNMENT_POINTS,
"due_at": (
None if not subsection_block.fields.get("due")
# The internal API gives us a TZ-naive datetime for the due date, but Studio indicates that
# the user should enter a UTC datetime for the due date. Coerce this to UTC before creating the
# string representation.
else subsection_block.fields["due"].astimezone(pytz.UTC).isoformat()
),
"submission_types": ["none"],
"published": False,
}
}
| 5,347,369 |
def return_random_initial_muscle_lengths_and_activations(InitialTension,X_o,**kwargs):
"""
This function returns initial muscle lengths and muscle activations for a given pretensioning level, as derived from (***insert file_name here for scratchwork***) for the system that starts from rest. (Ex. pendulum_eqns.reference_trajectories._01).
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1) Seed - Can see the random tension generated. When FixedInitialTension is provided, this seed will apply only to the initial conditions for activation and muscle length.
2) PlotBool - Must be either True or False. Default is False. Will plot all possible initial muscle lengths and activations for a given pretensioning level.
3) InitialTensionAcceleration - must be a numpy array of shape (2,). Default is set to the value generated from zero IC's. If using different reference trajectory, set InitialAngularAcceleration to d2r(0) (See below).
4) InitialAngularAcceleration - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d2r(0) (either by convention or by choice).
5) InitialAngularSnap - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d4r(0) (either by convention or by choice).
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
PlotBool = kwargs.get("PlotBool",False)
assert type(PlotBool)==bool,"PlotBool must be a boolean. Default is False."
InitialAngularAcceleration = kwargs.get(
"InitialAngularAcceleration",
0
) # 0 or d2r(0)
assert str(type(InitialAngularAcceleration)) in ["<class 'float'>","<class 'int'>","<class 'numpy.float64'>"], "InitialAngularAcceleration must be either a float or an int."
InitialAngularSnap = kwargs.get(
"InitialAngularSnap",
0
) # 0 or d4r(0)
assert str(type(InitialAngularSnap)) in ["<class 'float'>","<class 'int'>","<class 'numpy.float64'>"], "InitialAngularSnap must be either a float or an int."
InitialTensionAcceleration = kwargs.get(
"InitialTensionAcceleration",
return_initial_tension_acceleration(
InitialTension,
X_o,
InitialAngularAcceleration=InitialAngularAcceleration,
InitialAngularSnap=InitialAngularSnap
)
)
assert np.shape(InitialTensionAcceleration)==(2,) \
and str(type(InitialTensionAcceleration))=="<class 'numpy.ndarray'>", \
"InitialTensionAcceleration must be a numpy array of shape (2,)"
a_MTU1_o = np.sign(-r1(X_o[0]))*(
InitialAngularAcceleration
* np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2)
+
X_o[1]**2
* dr1_dθ(X_o[0])
* (d2r1_dθ2(X_o[0]) + r1(X_o[0]))
/ np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2)
)
a_MTU2_o = np.sign(-r2(X_o[0]))*(
InitialAngularAcceleration
* np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2)
+
X_o[1]**2
* dr2_dθ(X_o[0])
* (d2r2_dθ2(X_o[0]) + r2(X_o[0]))
/ np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2)
)
L1_UB = lo1*L_CE_max_1*(
k_1*np.log(
np.exp(
(m1*InitialTensionAcceleration[0]
+ (F_MAX1*cT/lTo1)
* (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
* (c3*InitialTension[0]
- m1*a_MTU1_o
)
)
/ (F_MAX1*c3**2
*c_1*k_1
*(F_MAX1*cT/lTo1)
*(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
)
)
- 1
)
+ Lr1
)
L2_UB = lo2*L_CE_max_2*(
k_1*np.log(
np.exp(
(m2*InitialTensionAcceleration[1]
+ (F_MAX2*cT/lTo2)
* (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
* (c4*InitialTension[1]
- m2*a_MTU2_o
)
)
/ (F_MAX2*c4**2
*c_1*k_1
*(F_MAX2*cT/lTo2)
*(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
)
)
- 1
)
+ Lr1
)
L1_LB = 0.5*lo1
if L1_UB > 1.5*lo1:
L1_UB = 1.5*lo1
L1 = np.linspace(L1_LB, L2_UB, 1001)
# mu1, sigma1 = lo1, 0.1*lo1
# L1 = np.array(list(sorted(np.random.normal(mu1, sigma1, 1001))))
U1 = (m1*InitialTensionAcceleration[0]
+ (F_MAX1*cT/lTo1)
* (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
* (c3*InitialTension[0]
- m1*a_MTU1_o
- F_MAX1*c3**3
*c_1*k_1
*np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1)
)
) \
/ (
F_MAX1*c3**2
*(F_MAX1*cT/lTo1)
*(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))
*np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ))
)
# U1 = (
# InitialTension[0][0]/(F_MAX1*np.cos(α1))
# - c_1*k_1*np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1)
# ) / (np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ)))
L2_LB = 0.5*lo2
if L2_UB > 1.5*lo2:
L2_UB = 1.5*lo2
L2 = np.linspace(L2_LB, L2_UB, 1001)
# mu2, sigma2 = lo2, 0.1*lo2
# L2 = np.array(list(sorted(np.random.normal(mu2, sigma2, 1001))))
U2 = (m2*InitialTensionAcceleration[1]
+ (F_MAX2*cT/lTo2)
* (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
* (c4*InitialTension[1]
- m2*a_MTU2_o
- F_MAX2*c4**3
*c_1*k_1
*np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1)
)
) \
/ (
F_MAX2*c4**2
*(F_MAX2*cT/lTo2)
*(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))
*np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ))
)
# U2 = (
# InitialTension[1][0]/(F_MAX2*np.cos(α2))
# - c_1*k_1*np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1)
# ) / (np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ)))
if PlotBool == True:
plt.figure(figsize=(10,8))
plt.title(r"Viable Initial $l_{m,1}$ and $u_{1}$ Values")
plt.xlabel(r"$l_{m,1}$ (m)",fontsize=14)
plt.ylabel(r"$u_{1}$",fontsize=14)
plt.scatter(L1,U1)
plt.plot([lo1,lo1],[0,1],'0.70',linestyle='--')
plt.gca().set_ylim((0,1))
plt.gca().set_xticks(
[0.25*lo1,
0.5*lo1,
0.75*lo1,
lo1,
1.25*lo1,
1.5*lo1,
1.75*lo1]
)
plt.gca().set_xticklabels(
["",
r"$\frac{1}{2}$ $l_{o,2}$",
"",
r"$l_{o,2}$",
"",
r"$\frac{3}{2}$ $l_{o,2}$",
""],
fontsize=12)
plt.figure(figsize=(10,8))
plt.title(r"Viable Initial $l_{m,2}$ and $u_{2}$ Values")
plt.xlabel(r"$l_{m,2}$ (m)",fontsize=14)
plt.ylabel(r"$u_{2}$",fontsize=14)
plt.scatter(L2,U2)
plt.plot([lo2,lo2],[0,1],'0.70',linestyle='--')
plt.gca().set_ylim((0,1))
plt.gca().set_xticks(
[0.25*lo2,
0.5*lo2,
0.75*lo2,
lo2,
1.25*lo2,
1.5*lo2,
1.75*lo2]
)
plt.gca().set_xticklabels(
["",
r"$\frac{1}{2}$ $l_{o,2}$",
"",
r"$l_{o,2}$",
"",
r"$\frac{3}{2}$ $l_{o,2}$",
""],
fontsize=12)
plt.show()
return(L1,U1,L2,U2)
| 5,347,370 |
def update_box(form_data):
"""
Updates a box in the database.
Takes in form_data as the data being updated.
"""
update_query = "UPDATE boxes SET shelf_number = ? WHERE box_number = ?"
c.execute(update_query, form_data)
conn.commit()
| 5,347,371 |
def test_input(test_page):
"""When an input is filled with the text 'Winamp'
Then the text in the input should be 'Winamp'
"""
test_page.navigate()
test_page.input_area.input.fill('Winamp')
assert 'Winamp' == test_page.input_area.input.element.value
| 5,347,372 |
def add_top_features(df, vocab, n=10):
"""
INPUT: PySpark DataFrame, List, Int
RETURN: PySpark DataFrame
Take in DataFrame with TFIDF vectors, list of vocabulary words,
and number of features to extract. Map top features from TFIDF
vectors to vocabulary terms. Return new DataFrame with terms
"""
# Create udf function to extract top n features
extract_features_udf = udf(lambda x: extract_top_features(x, vocab, n))
# Apply udf, create new df with features column
df_features = df.withColumn("top_features",
extract_features_udf(df["tfidf_vectors_sum"]))
return df_features
| 5,347,373 |
def java_solvability(level: MarioLevel, time_per_episode=20, verbose=False, return_trajectories=False) -> Union[bool, Tuple[bool, List[Tuple[float, float]]]]:
"""Returns a boolean indicating if this level is solvable.
Args:
level (MarioLevel): The level
time_per_episode (int, optional): How many seconds per episodes. Defaults to 500.
verbose (bool, optional): Should this print many info. Defaults to False.
return_trajectories (bool, optional). If this is true, then we are by default verbose and we return trajectories
Returns:
Union[bool, :Is this solvable
Tuple[bool, List[Tuple[float, float]]] : solvable, trajectory if return_trajectories = True
]
"""
filename = write_level_to_file(level)
verbose = verbose or return_trajectories
args = ["Astar_Solvability", filename, str(time_per_episode), str(1), str(verbose).lower()]
s = timer()
string = run_java_task(args)
e = timer()
lines = string.split("\n")
result_line = [l for l in lines if 'Result' in l]
if len(result_line) == 0:
raise Exception("Java didn't print out result properly: " + string + "args = " + ' '.join(args))
if return_trajectories:
traj_line = [l for l in lines if 'Trajectories' in l]
if len(traj_line) == 0:
raise Exception("Java didn't print out trajectory properly: " + string + "args = " + ' '.join(args))
vals = [s.strip() for s in traj_line[0].split(":")[1].split(" ")]
vals = [s for s in vals if s != '']
vals = [ tuple(map(float, s.split(','))) for s in vals]
return 'WIN' in result_line[0], vals
return 'WIN' in result_line[0]
| 5,347,374 |
def set_values(imask, k, val):
""" Set all k values to val"""
Mp, Lp = imask.shape
for j in range(1,Mp-1):
for i in range(1,Lp-1):
if (imask[j,i] == k): imask[j,i] = val
| 5,347,375 |
def get_trait_value(traitspec, value_name, default=None):
""" Return the attribute `value_name` from traitspec if it is defined.
If not will return the value of `default`.
Parameters
----------
traitspec: TraitedSpec
value_name: str
Name of the `traitspect` attribute.
default: any
A default value in case the attribute does not exist or is not defined.
Returns
-------
trait_value: any
"""
val = getattr(traitspec, value_name, default)
return default if not isdefined(val) else val
| 5,347,376 |
def remove_dict_keys(obj, remove_keys=[], empty_keys=False, recursive=False):
""" Modify a dictionary by removing items with matching and/or empty keys
Parameters
----------
obj: dict
Dictionary to modify by removing keys
remove_keys: list of str
Keys to remove
empty_keys: bool
If true, keys will be removed if bool(obj[key]) is false
recursive: bool
If true, will search dict recursively
Returns
-------
None
"""
if isinstance(obj, dict):
for key in list(obj.keys()):
if key in list(remove_keys):
del obj[key]
elif empty_keys and not obj[key]:
del obj[key]
else:
if recursive:
remove_dict_keys(obj[key], remove_keys, empty_keys)
| 5,347,377 |
def check_if_blank(cell_image: Image) -> bool:
"""Check if image is blank
Sample the color of the black and white content - if it is white enough
assume no text and skip. Function takes a small more centered section to
OCR to avoid edge lines.
:param cell_image: Image to OCR
:return: True or None
"""
w, h = cell_image.size
crop = cell_image.crop((w * 0.1, h * 0.1, w * 0.8, h * 0.8))
data = crop.getdata()
counts = collections.Counter(data)
if (
len(counts)
< 50 # this number needs to fluctuate - or i need to find a way to create this in code,
# Current ideas is to grab a predictable slice of page that is white and sample it and use that number as a threshold
): # this may need to fluctuate to be accurate at dropping empty sections to remove gibberish
return True
return False
| 5,347,378 |
def get_tpr_from_threshold(scores,labels, threshold_list):
"""Calculate the recall score list from the threshold score list.
Args:
score_target: list of (score,label)
threshold_list: list, the threshold list
Returns:
recall_list: list, the element is recall score calculated by the
correspond threshold
"""
tpr_list = []
hack_scores = []
for score, label in zip(scores,labels):
if label == 1:
hack_scores.append(float(score))
hack_scores.sort(reverse=True)
hack_nums = len(hack_scores)
for threshold in threshold_list:
hack_index = 0
while hack_index < hack_nums:
if hack_scores[hack_index] <= threshold:
break
else:
hack_index += 1
if hack_nums != 0:
tpr = hack_index * 1.0 / hack_nums
else:
tpr = 0
tpr_list.append(tpr)
return tpr_list
| 5,347,379 |
def combined_w2v_labeling(apply_sub_clustering=False):
"""Combine words with w2v for labeling"""
reviews = get_reviews()
count_vectorizer = CountVectorizer(stop_words=STOPWORDS, tokenizer=valid_words_tokenizer)
count_vectorizer.fit(reviews)
features = count_vectorizer.get_feature_names()
# Get cluster of words
words_vectors = w2v.terms_vectors(features)
model = AgglomerativeClustering(0.5, "complete", "cosine")
result = model.fit_predict(words_vectors)
word_clusters = group_result(result, 2)
# word_clusters = [[features[word_index] for word_index in cl] for cl in word_clusters]
df_pipeline = make_pipeline(count_vectorizer, labeling_utils.SimilarDFTransformer(tokens_clusters=word_clusters))
tf_pipeline = make_pipeline(count_vectorizer, labeling_utils.SimilarTFTransformer(tokens_clusters=word_clusters))
tfidf_vectorizer = TfidfTransformer()
tfidf_vectorizer.fit(df_pipeline.transform(reviews))
clusters = list(get_clusters())
del clusters[-1]
if apply_sub_clustering:
clusters_labels = []
for cluster in clusters:
sub_clusters = sub_clustering(cluster)
sub_clusters_size_ratio = [(len(cl) / len(cluster)) for cl in sub_clusters]
text_sub_clusters = [" ".join(cl) for cl in sub_clusters]
vectorized_sub_clusters = tfidf_vectorizer.transform(tf_pipeline.transform(text_sub_clusters))
sub_clusters_top_terms = []
for vectorized_sub_cluster in vectorized_sub_clusters.todense():
weights = sorted(enumerate(vectorized_sub_cluster.tolist()[0]), key=itemgetter(1), reverse=True)
sub_clusters_top_terms.append([(features[token[0]], token[1]) for token in weights[:10]])
terms = __combine_terms(sub_clusters_top_terms, sub_clusters_size_ratio)
clusters_labels.append(get_labels(cluster, terms))
else:
text_clusters = [" ".join(cluster) for cluster in clusters]
vectorized_clusters = tfidf_vectorizer.transform(tf_pipeline.transform(text_clusters))
clusters_top_terms = []
for vectorized_cluster in vectorized_clusters.todense():
weights = sorted(enumerate(vectorized_cluster.tolist()[0]), key=itemgetter(1), reverse=True)
clusters_top_terms.append([(features[token[0]], token[1]) for token in weights[:10]])
clusters_labels = [get_labels(cluster, clusters_top_terms[cluster_index], tokenizer=utils.word_tokenizer) for
cluster_index, cluster in
enumerate(clusters)]
print_result(clusters_labels)
# print(clusters_labels)
print(evaluate_clusters([[label[0] for label in labels] for labels in clusters_labels]))
| 5,347,380 |
def updating_node_validation_error(address=False, port=False, id=False,
weight=False):
"""
Verified 2015-06-16:
- when trying to update a CLB node's address/port/id, which are
immutable.
- when trying to update a CLB node's weight to be < 1 or > 100
At least one of address, port, id, and weight should be `True` for this
error to apply.
:param bool address: Whether the address was passed to update
:param bool port: Whether the port was passed to update
:param bool id: Whether the ID was passed to update
:param bool weight: Whether the weight was passed to update and wrong
:return: a `tuple` of (dict body message, 400 http status code)
"""
messages = []
if address:
messages.append("Node ip field cannot be modified.")
if port:
messages.append("Port field cannot be modified.")
if weight:
messages.append("Node weight is invalid. Range is 1-100. "
"Please specify a valid weight.")
if id:
messages.append("Node id field cannot be modified.")
return(
{
"validationErrors": {
"messages": messages
},
"message": "Validation Failure",
"code": 400,
"details": "The object is not valid"
},
400
)
| 5,347,381 |
def gaussianDerivative(x):
"""This function returns the gaussian derivative of x
(Note: Not Real Derivative)
"""
return -2.0*x*(np.sqrt(-np.log(x)))
| 5,347,382 |
def show_table(file: IO, spec: Dict):
"""Display the contents of a CSV file as a pandas DataFrame.
Parameters
----------
file: io.BytesIO
IO buffer containing the file content.
spec: dict
File output format specification.
"""
format = spec.get('format', {})
if not format.get('header', True) and format.get('columns'):
df = pd.read_csv(file, header=None, names=format.get('columns'))
else:
df = pd.read_csv(file)
if 'caption' in spec:
st.write(spec['caption'])
st.table(df)
| 5,347,383 |
def parse_numbers(numbers):
"""Return list of numbers."""
return [int(number) for number in numbers]
| 5,347,384 |
def add_server():
"""
Adds a server to database if not exists
"""
data = json.loads(request.data)
ip_addr = IPModel.get_or_create(address=data["ip_addr"])[0]
ServerModel.create(ip=ip_addr, port=data["port"])
return 'OK'
| 5,347,385 |
def _fetch_files(data_dir, files, resume=True, verbose=1):
"""Load requested dataset, downloading it if needed or requested.
This function retrieves files from the hard drive or download them from
the given urls. Note to developpers: All the files will be first
downloaded in a sandbox and, if everything goes well, they will be moved
into the folder of the dataset. This prevents corrupting previously
downloaded data. In case of a big dataset, do not hesitate to make several
calls if needed.
Parameters
----------
data_dir : string
Path of the data directory. Used for data storage in a specified
location.
files : list of (string, string, dict)
List of files and their corresponding url with dictionary that contains
options regarding the files. Eg. (file_path, url, opt). If a file_path
is not found in data_dir, as in data_dir/file_path the download will
be immediately cancelled and any downloaded files will be deleted.
Options supported are:
* 'move' if renaming the file or moving it to a subfolder is needed
* 'uncompress' to indicate that the file is an archive
* 'md5sum' to check the md5 sum of the file
* 'overwrite' if the file should be re-downloaded even if it exists
resume : bool, optional
If true, try resuming download if possible. Default=True.
verbose : int, optional
Verbosity level (0 means no message). Default=1.
Returns
-------
files : list of string
Absolute paths of downloaded files on disk.
"""
# There are two working directories here:
# - data_dir is the destination directory of the dataset
# - temp_dir is a temporary directory dedicated to this fetching call. All
# files that must be downloaded will be in this directory. If a corrupted
# file is found, or a file is missing, this working directory will be
# deleted.
files = list(files)
files_pickle = pickle.dumps([(file_, url) for file_, url, _ in files])
files_md5 = hashlib.md5(files_pickle).hexdigest()
temp_dir = os.path.join(data_dir, files_md5)
# Create destination dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# Abortion flag, in case of error
abort = None
files_ = []
for file_, url, opts in files:
# 3 possibilities:
# - the file exists in data_dir, nothing to do.
# - the file does not exists: we download it in temp_dir
# - the file exists in temp_dir: this can happen if an archive has been
# downloaded. There is nothing to do
# Target file in the data_dir
target_file = os.path.join(data_dir, file_)
# Target file in temp dir
temp_target_file = os.path.join(temp_dir, file_)
# Whether to keep existing files
overwrite = opts.get('overwrite', False)
if (abort is None and (overwrite or (not os.path.exists(target_file) and not
os.path.exists(temp_target_file)))):
# We may be in a global read-only repository. If so, we cannot
# download files.
if not os.access(data_dir, os.W_OK):
raise ValueError('Dataset files are missing but dataset'
' repository is read-only. Contact your data'
' administrator to solve the problem')
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
dl_file = _fetch_file(url, temp_dir, resume=resume,
verbose=verbose,
overwrite=overwrite)
if (abort is None and not os.path.exists(target_file) and not
os.path.exists(temp_target_file)):
warnings.warn('An error occured while fetching %s' % file_)
abort = ("Dataset has been downloaded but requested file was "
"not provided:\nURL: %s\n"
"Target file: %s\nDownloaded: %s" %
(url, target_file, dl_file))
if abort is not None:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
raise IOError('Fetching aborted: ' + abort)
files_.append(target_file)
# If needed, move files from temps directory to final directory.
if os.path.exists(temp_dir):
# XXX We could only moved the files requested
# XXX Movetree can go wrong
movetree(temp_dir, data_dir)
shutil.rmtree(temp_dir)
return files_
| 5,347,386 |
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Checks for normal termination of Gaussian output files in a '
'specified directory, and moves them to a new location.')
parser.add_argument("-a", "--all", help="Check convergence of all steps and print to standard out.",
action="store_true", default=False)
parser.add_argument("-b", "--best", help="Check convergence of each step and list the convergence of the best 10 "
"steps, sorted by convergence.", action="store_true", default=False)
parser.add_argument("-d", "--directory", help="The directory where to look for Gaussian output files to check for "
"normal termination, without checking in subdirectories.",
metavar="path", default=None)
parser.add_argument("-ds", "--dir_subdirs", help="The directory where to look for Gaussian output files to check "
"for normal termination, including checking in subdirectories.",
metavar="path", default=None)
parser.add_argument("-e", "--extension", help="The extension of the Gaussian output file(s) to look for when "
"searching a directory for output files. The default is '{}'."
"".format(DEF_EXT), metavar="ext", default=DEF_EXT)
parser.add_argument("-f", "--file_name", help="A file name (with path, if not the current directory) to check for "
"either normal termination or convergence. If used, this option "
"overrides the '-d' option, and no searching for files is "
"performed.", metavar="path", default=None)
parser.add_argument("-l", "--file_list", help="A file name (with path, if not the current directory) with a "
"list of files (also with path, if not the current directory) "
"overrides the '-d' option, and no searching for files is to check "
"for either normal termination or convergence. If used, this "
"option overrides the '-d' option, and no searching for files is "
"performed.", metavar="path", default=None)
parser.add_argument("-o", "--output_directory", help="The directory where to put Gaussian output files that have "
"terminated normally. The default is '{}'."
"".format(DEF_COMPLETE_DIR), metavar="path",
default=DEF_COMPLETE_DIR)
parser.add_argument("-s", "--step_converg", help="Report the convergence for each step value for the files in the "
"directory or those specified with the '-f' or '-l' options. When "
"this option is chosen, the check for normal termination is "
"skipped. The default is False.",
action="store_true", default=False)
parser.add_argument("-t", "--to_step", help="Check convergence of each step only to provided step number, and "
"before printing to standard out, sort by convergence.",
default=False)
parser.add_argument("-z", "--final_converg", help="Report the final convergence value for the files in the "
"directory or those specified with the '-f' or '-l' options. "
"When this option is chosen, the check for normal termination "
"is skipped. The default is False.", action="store_true",
default=False)
parser.add_argument("--scan", help="Read output file(s) from a scan and writes the converged energies from each "
"point of the scan to a csv file and creates a plot saved as the given file "
"name.", metavar="path", default=None)
args = None
try:
args = parser.parse_args(argv)
if args.to_step or args.best or args.all:
args.step_converg = True
if args.to_step:
try:
args.to_step = int(args.to_step)
except ValueError:
raise InvalidDataError("When the '-t' option is used, an integer must be provided.")
if args.step_converg and args.final_converg:
raise InvalidDataError("Choose either the '-a', '-b', '-s', '-t', or '-z' option.")
# make the default output directory a subdirectory of the directory to search
if args.output_directory == DEF_COMPLETE_DIR:
if args.dir_subdirs:
args.output_directory = os.path.relpath(os.path.join(args.dir_subdirs, DEF_COMPLETE_DIR))
if args.directory:
args.output_directory = os.path.relpath(os.path.join(args.directory, DEF_COMPLETE_DIR))
except (KeyError, InvalidDataError, MissingSectionHeaderError, SystemExit) as e:
if hasattr(e, 'code') and e.code == 0:
return args, GOOD_RET
warning(e)
parser.print_help()
return args, INPUT_ERROR
return args, GOOD_RET
| 5,347,387 |
def get_continuum_extrapolation( # pylint: disable=C0103
df: pd.DataFrame,
n_poly_max: int = 4,
delta_x: float = 1.25e-13,
include_statistics: bool = True,
odd_poly: bool = False,
) -> pd.DataFrame:
"""Takes a data frame read in by read tables and runs a continuum extrapolation
for the spectrum.
The continuum extrapolation is executed by a even polynomial fit up to order
`n_poly_max`
**Arguments**
df: pd.DataFrame
DataFrame returend by `read_table`.
n_poly_max: int = 4
Maximal order of the polynomial used for the spectrum extrapolation.
The fitter runs fits from 1 to `n_poly_max` even polynomials and picks
the best one defined by the maximum of the logGBF.
delta_x:float=1.0e-8
Approximate error for the x-values.
include_statistics: bool = True
Includes fit statistics like chi2/dof or logGBF.
odd_poly: bool = False
Allow fits of odd polynomials.
"""
if lsqfit is None or gv is None:
raise ImportError(
"Cannort load `lsqfit` and `gvar`." " Thus fitting is not possible."
)
group = df.groupby(["L", "nstep", "nlevel"])[["epsilon", "x"]]
fit_df = group.apply(
_group_wise_fit,
n_poly_max=n_poly_max,
delta_x=delta_x,
include_statistics=include_statistics,
odd_poly=odd_poly,
).reset_index()
fit_df["epsilon"] = 0
if "level_3" in fit_df.columns:
fit_df = fit_df.drop(columns=["level_3"])
return fit_df
| 5,347,388 |
def _setup_animation(
animation, time_codes, joints, joints_transforms, topology, joint_count
):
"""Write joint animation to USD and bind the animation to a skeleton.
Reference:
https://graphics.pixar.com/usd/docs/api/_usd_skel__a_p_i__intro.html#UsdSkel_API_WritingSkels
Args:
animation (`pxr.UsdSkel.Animation`):
The object that will get all of the animation data applied
to it in this function.
time_codes (iter[tuple[float or int]):
The time-codes that will be used to calculate local joint transforms.
joints (list[str]):
The ordered-list of parent<->child joints that will be used
to compute the local-space joint transformations. This
parameter is the base of `topology`.
joints_transforms (dict[float or int, list[`pxr.Gf.Matrix4d`]]):
The world-space transforms of every joint in `joints`
for-each time-code in `time_codes`. The length of each
value in this parameter must match `joint_count`.
topology (`pxr.UsdSkel.Topology`):
A description of the joint hierarchy that was created using
`joints` as its input.
joint_count (int):
The required transforms-per-time-code. This parameter is used
to check that each world-space joint transform is correct
before it is used to compute a local-space joint transform.
"""
animation.GetPrim().SetMetadata(
"comment",
"local-space joint transformations that match the `joints` attribute.",
)
joints_attribute = animation.CreateJointsAttr(joints)
joints_attribute.SetMetadata(
"comment",
"This list of joints contains every joint plus the original Maya root joint.",
)
for time_code in time_codes:
joint_world_space_transforms = Vt.Matrix4dArray(joints_transforms[time_code])
if len(joint_world_space_transforms) != joint_count:
LOGGER.warning(
'Found transforms "%s" don\'t match the number of joints',
joint_world_space_transforms,
)
continue
joint_local_space_transforms = UsdSkel.ComputeJointLocalTransforms(
topology, joint_world_space_transforms
)
if joint_local_space_transforms:
animation.SetTransforms(joint_local_space_transforms, time_code)
else:
LOGGER.warning(
'World space transforms "%s" could not be computed as local transforms. Skipping.',
joint_world_space_transforms,
)
| 5,347,389 |
def test_writing_core_attributes_of_faces_mesh():
"""Tests writing to the vectors, vertices and faces attributes of a vectors
mesh.
- `vectors` should be writeable, resizable, allow changes of dtypes but
always silently enforce C contiguous arrays.
- `vertices` should not be writeable.
- `faces` should not be writeable.
After any modifications, all lazy attributes should be reset.
"""
self = vectors_mesh(5, 4)
assert self.vectors.flags.writeable
assert not self.vertices.flags.writeable
old_vertices = self.vertices
self.vectors += [1, 2, 3]
assert np.all(self.vertices != old_vertices)
with pytest.raises(ValueError, match="vectors mesh's vertices .*"):
self.vertices = 8
with pytest.raises(ValueError, match="vectors mesh's faces .*"):
self.faces += 1
self.vectors = np.asarray(self.vectors[:-2], order="f", dtype=np.float32)
assert self.dtype == np.float32
assert self.vectors.flags.c_contiguous
| 5,347,390 |
def setup():
"""
This will be called at the beggining, you set your stuff here
"""
ofSetBackgroundAuto(False)
global color, circles
color = ofColor(content['color1'].r,content['color1'].g,content['color1'].b)
for i in range(0, 20):
circle = Circle()
circle.setSpeed(content['speed'])
circles.append(circle)
#print "New Circle"
| 5,347,391 |
def listvalues(d):
"""Return `d` value list"""
return list(itervalues(d))
| 5,347,392 |
def build_image(repo_url: str, tag: str, path: str) -> None:
"""
build_image builds the image with the given tag
"""
client = docker.from_env()
print(f"Building image: {tag}")
client.images.build(tag=tag, path=path)
print("Successfully built image!")
| 5,347,393 |
def _non_blank_line_count(string):
"""
Parameters
----------
string : str or unicode
String (potentially multi-line) to search in.
Returns
-------
int
Number of non-blank lines in string.
"""
non_blank_counter = 0
for line in string.splitlines():
if line.strip():
non_blank_counter += 1
return non_blank_counter
| 5,347,394 |
def load(map_name, batch_size):
"""Load CaraEnvironment
Args:
map_name (str): name of the map. Currently available maps are:
'Town01, Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07',
and 'Town10HD'
batch_size (int): the number of vehicles in the simulation.
"""
return CarlaEnvironment(batch_size, map_name)
| 5,347,395 |
def _save_seasons_emotions_to_file(seasons_episodes_subtitles_emotions: Dict[int, Dict[
int, List[TimeEmotion]]], output_file_path: str) -> None:
"""
Save dictionary seasons_episodes_subtitles_emotions to file
"""
with open(output_file_path, 'w') as output_file:
json.dump(_season_episode_emotions_to_dict(seasons_episodes_subtitles_emotions),
output_file)
| 5,347,396 |
def decrypt_password(encrypted_password: str) -> str:
""" b64 decoding
:param encrypted_password: encrypted password with b64
:return: password in plain text
"""
return b64decode(encrypted_password).decode("UTF-8")
| 5,347,397 |
def AppendRecentJetFile(jetFile):
""" Appends to a list of recent jet files """
addedFiles = []
fileList = GetRecentJetFiles()
config = ConfigParser.ConfigParser()
config.read(JetDefs.JETCREATOR_INI)
if config.has_section(JetDefs.RECENT_SECTION):
config.remove_section(JetDefs.RECENT_SECTION)
config.add_section(JetDefs.RECENT_SECTION)
config.set(JetDefs.RECENT_SECTION, "File0", jetFile)
addedFiles.append(jetFile)
count = 1
for file in fileList:
if file not in addedFiles:
sFile = "File" + str(count)
config.set(JetDefs.RECENT_SECTION, sFile, file)
addedFiles.append(file)
count += 1
FileKillClean(JetDefs.JETCREATOR_INI)
cfgfile = open(JetDefs.JETCREATOR_INI,'w')
config.write(cfgfile)
cfgfile.close()
| 5,347,398 |
def model_keplerian(positions, velocities, v_lsr=None, fit_method=None,
flag_singularity=True, flag_radius=None, flag_intervals=None,
return_stddevs=True, plot=False, debug=False):
"""Fit a Keplerian velocity profile to position-velocity-data.
Args:
positions (np.ndarray or Quantity):
PVdata object to compute the data from.
velocities (np.ndarray or Quantity):
Set as multiples of PVdata.noise (for instance 3sigma)
v_lsr (float):
Systemic velocity in units of km/ s.
fit_method (any, optional):
Method to fit the model to the data.
flag_singularity (bool, optional):
Flag the zero position data points, to avoid running in trouble there during fitting.
flag_radius (astropy.units.Quantity, optional):
If given, then all data points within this given radius from the position_reference are flagged.
flag_intervals (list of tupels of astropy.units.Quantity, optional):
Similar to flag_radius, but arbitrary intervals may be flagged. Each interval is
given as a tuple of two radial distances from the position_reference.
return_stddevs (boolean, optional):
The fit method LevMarLSQFitter is able to return the standard deviation of the fit parameters. Default is
True.
plot (boolean, optional):
If True, the fit will be displayed as a matplotlib pyplot.
debug (bool, optional):
Stream debugging information to the terminal.
Returns:
best_fit (astropy.modelling.models.custom_model):
Best fitting model.
stddevs (numpy.array):
Only if return_stddevs is True. The array entries correspond to the best_fit instance parameters in the
same order.
chi2 (float):
chi-squared residual of the fit to the unflagged data.
"""
# Transform Quantities to correct units
if isinstance(positions, Quantity):
positions = positions.to('AU').value
if isinstance(velocities, Quantity):
velocities = velocities.to('km/ s').value
# Apply fall back values
if fit_method is None:
fit_method = LevMarLSQFitter()
if v_lsr is None:
v_lsr = 0
# Create masked arrays
xdata = np.ma.masked_array(positions, np.zeros(positions.shape, dtype=bool))
ydata = np.ma.masked_array(velocities, np.zeros(velocities.shape, dtype=bool))
# Mask the desired flags and intervals
if flag_singularity:
print('Flagging the singularity')
singularity_mask = np.ma.masked_less(np.abs(xdata), 1e-3).mask
xdata.mask = np.logical_or(xdata.mask, singularity_mask)
ydata.mask = np.logical_or(ydata.mask, singularity_mask)
print(f">> Done")
else:
print("Not masking the singularity")
if flag_radius is not None:
print(f"Flagging towards a radial distance of {flag_radius}")
if isinstance(flag_radius, Quantity):
flag_radius = flag_radius.to('au').value
xdata = np.ma.masked_inside(xdata, -flag_radius, flag_radius)
ydata.mask = np.logical_or(ydata.mask, xdata.mask)
print(f">> Done")
print(f"The mask is {xdata.mask}")
else:
print("No flag radius provided")
if flag_intervals is not None:
print('Flagging intervals...')
for interval in flag_intervals:
xdata = np.ma.masked_inside(xdata, interval[0], interval[1])
ydata.mask = np.logical_or(ydata.mask, xdata.mask)
print(f">> Flagged {np.sum(xdata.mask)} elements")
else:
print("No flag intervals provided")
if debug:
print('x data:', xdata)
print('y data:', ydata)
# Initialize the fit model
print("Initializing the model...")
model = Keplerian1D(mass=10., v0=v_lsr, r0=0, bounds={'mass': (0.0, None)})
if debug:
print(f"Initialize the model: {model}")
# Fit the chosen model to the data
print("Fitting the model to the data...")
best_fit = fit_method(model, xdata.compressed(), ydata.compressed())
if debug:
print(fit_method.fit_info['message'])
# Estimate chi2
print("Computing the chi-squared value...")
chi2 = np.sum(np.square(best_fit(xdata.compressed()) - ydata.compressed()))
# Plot
if plot:
plt.plot(positions, velocities, 'o', label='data')
plt.xlabel('Position offset (AU)')
plt.ylabel('Velocity (km/ s)')
plt.axhline(v_lsr, c='k', ls='--', label=r'$v_\mathrm{LSR}$')
plt.plot(xdata, best_fit(xdata), label='model')
plt.fill_between(xdata, best_fit(xdata), best_fit.v0, facecolor='tab:orange', alpha=.5)
if debug:
plt.plot(xdata, model(xdata), label='init')
plt.grid()
plt.legend()
plt.show()
plt.close()
# Prepare the return
stddevs = None
if not isinstance(fit_method, LevMarLSQFitter):
return_stddevs = False
if return_stddevs:
covariance = fit_method.fit_info['param_cov']
if covariance is None:
print(f"[ERROR] Unable to compute the covariance matrix and fit parameter uncertainties!")
else:
stddevs = np.sqrt(np.diag(covariance))
return best_fit, stddevs, chi2
| 5,347,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.