content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def build_goods_query(
good_ids: List[str], currency_id: str, is_searching_for_sellers: bool
) -> Query:
"""
Build buyer or seller search query.
Specifically, build the search query
- to look for sellers if the agent is a buyer, or
- to look for buyers if the agent is a seller.
In particular, if the agent is a buyer and the demanded good ids are {'tac_good_0', 'tac_good_2', 'tac_good_3'}, the resulting constraint expression is:
tac_good_0 >= 1 OR tac_good_2 >= 1 OR tac_good_3 >= 1
That is, the OEF will return all the sellers that have at least one of the good in the query
(assuming that the sellers are registered with the data model specified).
:param good_ids: the list of good ids to put in the query
:param currency_id: the currency used for pricing and transacting.
:param is_searching_for_sellers: Boolean indicating whether the query is for sellers (supply) or buyers (demand).
:return: the query
"""
data_model = _build_goods_datamodel(
good_ids=good_ids, is_supply=is_searching_for_sellers
)
constraints = [Constraint(good_id, ConstraintType(">=", 1)) for good_id in good_ids]
constraints.append(Constraint("currency_id", ConstraintType("==", currency_id)))
constraint_expr = cast(List[ConstraintExpr], constraints)
if len(good_ids) > 1:
constraint_expr = [Or(constraint_expr)]
query = Query(constraint_expr, model=data_model)
return query
| 5,349,100 |
def make_piecewise_const(num_segments):
"""Makes a piecewise constant semi-sinusoid curve with num_segments segments."""
true_values = np.sin(np.arange(0, np.pi, step=0.001))
seg_idx = np.arange(true_values.shape[0]) // (true_values.shape[0] / num_segments)
return pd.Series(true_values).groupby(seg_idx).mean().tolist()
| 5,349,101 |
def save_default_model(model, L):
"""Saving information associated with the exact diagonalization via
symmetry for the model model with the given model_params.
"""
H, model_params, symmetries = base.gen_model(model, L=L)
assert os.path.isfile(projfile(L, S=1/2, **symmetries)),\
"Could not find projection operators. Try running "\
+ "```save_projectors(L)```"
# Diagonalize with symmetries, save results
cu.eigh_symms(H, L, S=1/2,
save_systemfile=sysfile(model, **model_params),
save_eigenfile=eigenfile(model, **model_params),
# Optionally, load saved projectors:
load_projfile=projfile(**model_params, **symmetries),
)
return
| 5,349,102 |
def convert_rational_from_float(number):
"""
converts a float to rational as form of a tuple.
"""
f = Fraction(str(number)) # str act as a round
return f.numerator, f.denominator
| 5,349,103 |
def classname(obj):
"""Returns the name of an objects class"""
return obj.__class__.__name__
| 5,349,104 |
def train(epoch, model, dataloader, optimizer, criterion, device, writer, cfg):
"""
training the model.
Args:
epoch (int): number of training steps.
model (class): model of training.
dataloader (dict): dict of dataset iterator. Keys are tasknames, values are corresponding dataloaders.
optimizer (Callable): optimizer of training.
criterion (Callable): loss criterion of training.
device (torch.device): device of training.
writer (class): output to tensorboard.
cfg: configutation of training.
Return:
losses[-1] : the loss of training
"""
model.train()
metric = PRMetric()
losses = []
for batch_idx, (x, y) in enumerate(dataloader, 1):
for key, value in x.items():
x[key] = value.to(device)
y = y.to(device)
optimizer.zero_grad()
y_pred = model(x)
if cfg.model_name == 'capsule':
loss = model.loss(y_pred, y)
else:
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
metric.update(y_true=y, y_pred=y_pred)
losses.append(loss.item())
data_total = len(dataloader.dataset)
data_cal = data_total if batch_idx == len(dataloader) else batch_idx * len(y)
if (cfg.train_log and batch_idx % cfg.log_interval == 0) or batch_idx == len(dataloader):
# p r f1 皆为 macro,因为micro时三者相同,定义为acc
acc, p, r, f1 = metric.compute()
logger.info(f'Train Epoch {epoch}: [{data_cal}/{data_total} ({100. * data_cal / data_total:.0f}%)]\t'
f'Loss: {loss.item():.6f}')
logger.info(f'Train Epoch {epoch}: Acc: {100. * acc:.2f}%\t'
f'macro metrics: [p: {p:.4f}, r:{r:.4f}, f1:{f1:.4f}]')
if cfg.show_plot and not cfg.only_comparison_plot:
if cfg.plot_utils == 'matplot':
plt.plot(losses)
plt.title(f'epoch {epoch} train loss')
plt.show()
if cfg.plot_utils == 'tensorboard':
for i in range(len(losses)):
writer.add_scalar(f'epoch_{epoch}_training_loss', losses[i], i)
return losses[-1]
| 5,349,105 |
async def test_trigger_with_pending_and_delay(opp, mqtt_mock):
"""Test trigger method and switch from pending to triggered."""
assert await async_setup_component(
opp,
alarm_control_panel.DOMAIN,
{
"alarm_control_panel": {
"platform": "manual_mqtt",
"name": "test",
"code": CODE,
"delay_time": 1,
"pending_time": 0,
"triggered": {"pending_time": 1},
"disarm_after_trigger": False,
"command_topic": "alarm/command",
"state_topic": "alarm/state",
}
},
)
await opp.async_block_till_done()
entity_id = "alarm_control_panel.test"
assert opp.states.get(entity_id).state == STATE_ALARM_DISARMED
await common.async_alarm_arm_away(opp, CODE)
await opp.async_block_till_done()
assert opp.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
await common.async_alarm_trigger(opp, entity_id=entity_id)
await opp.async_block_till_done()
state = opp.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(
("openpeerpower.components.manual_mqtt.alarm_control_panel." "dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(opp, future)
await opp.async_block_till_done()
state = opp.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes["post_pending_state"] == STATE_ALARM_TRIGGERED
future += timedelta(seconds=1)
with patch(
("openpeerpower.components.manual_mqtt.alarm_control_panel." "dt_util.utcnow"),
return_value=future,
):
async_fire_time_changed(opp, future)
await opp.async_block_till_done()
state = opp.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
| 5,349,106 |
def custom_timeseries_widget_for_behavior(node, **kwargs):
"""Use a custom TimeSeries widget for behavior data"""
if node.name == 'Velocity':
return SeparateTracesPlotlyWidget(node)
else:
return show_timeseries(node)
| 5,349,107 |
def db_tween_factory(handler, registry):
"""A database tween, doing automatic session management."""
def db_tween(request):
response = None
try:
response = handler(request)
finally:
session = getattr(request, "_db_session", None)
if session is not None:
# always rollback/close the read-only session
try:
session.rollback()
except DatabaseError:
registry.raven_client.captureException()
finally:
registry.db.release_session(session)
return response
return db_tween
| 5,349,108 |
def calibrate_profiler(n, timer=time.time):
"""
Calibration routine to returns the fudge factor. The fudge factor
is the amount of time it takes to call and return from the
profiler handler. The profiler can't measure this time, so it
will be attributed to the user code unless it's subtracted off.
"""
starttime = timer()
p = Profiler(fudge=0.0)
for i in range(n):
a_very_long_function_name()
p.stop()
stoptime = timer()
simpletime = p.get_time('a_very_long_function_name')
realtime = stoptime - starttime
profiletime = simpletime + p.overhead
losttime = realtime - profiletime
return losttime/(2*n) # 2 profile events per function call
| 5,349,109 |
def getbias(x, bias):
"""Bias in Ken Perlin’s bias and gain functions."""
return x / ((1.0 / bias - 2.0) * (1.0 - x) + 1.0 + 1e-6)
| 5,349,110 |
def modify_vm(hostname: str, vm_id: str, memory: int, cpu: int):
""" set memory and core count (cpu) of the given vm to the given values """
# TODO implement
pass
| 5,349,111 |
def get_exif_flash_fired(exif_data: Dict) -> Optional[bool]:
"""
Parses the "flash" value from exif do determine if it was fired.
Possible values:
+-------------------------------------------------------+------+----------+-------+
| Status | Hex | Binary | Fired |
+-------------------------------------------------------+------+----------+-------+
| No Flash | 0x0 | 00000000 | No |
| Fired | 0x1 | 00000001 | Yes |
| "Fired, Return not detected" | 0x5 | 00000101 | Yes |
| "Fired, Return detected" | 0x7 | 00000111 | Yes |
| "On, Did not fire" | 0x8 | 00001000 | No |
| "On, Fired" | 0x9 | 00001001 | Yes |
| "On, Return not detected" | 0xd | 00001011 | Yes |
| "On, Return detected" | 0xf | 00001111 | Yes |
| "Off, Did not fire" | 0x10 | 00010000 | No |
| "Off, Did not fire, Return not detected" | 0x14 | 00010100 | No |
| "Auto, Did not fire" | 0x18 | 00011000 | No |
| "Auto, Fired" | 0x19 | 00011001 | Yes |
| "Auto, Fired, Return not detected" | 0x1d | 00011101 | Yes |
| "Auto, Fired, Return detected" | 0x1f | 00011111 | Yes |
| No flash function | 0x20 | 00100000 | No |
| "Off, No flash function" | 0x30 | 00110000 | No |
| "Fired, Red-eye reduction" | 0x41 | 01000001 | Yes |
| "Fired, Red-eye reduction, Return not detected" | 0x45 | 01000101 | Yes |
| "Fired, Red-eye reduction, Return detected" | 0x47 | 01000111 | Yes |
| "On, Red-eye reduction" | 0x49 | 01001001 | Yes |
| "On, Red-eye reduction, Return not detected" | 0x4d | 01001101 | Yes |
| "On, Red-eye reduction, Return detected" | 0x4f | 01001111 | Yes |
| "Off, Red-eye reduction" | 0x50 | 01010000 | No |
| "Auto, Did not fire, Red-eye reduction" | 0x58 | 01011000 | No |
| "Auto, Fired, Red-eye reduction" | 0x59 | 01011001 | Yes |
| "Auto, Fired, Red-eye reduction, Return not detected" | 0x5d | 01011101 | Yes |
| "Auto, Fired, Red-eye reduction, Return detected" | 0x5f | 01011111 | Yes |
+-------------------------------------------------------+------+----------+-------+
:param exif_data:
:return: If the flash was fired, or None if the exif information is not present
"""
if 'Flash' not in exif_data:
return None
return bool((int(exif_data['Flash']) & 1) > 0)
| 5,349,112 |
def gyp_generator_flags():
"""Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))
| 5,349,113 |
def get_geoJson(addr):
"""
Queries the Google Maps API for specified address, returns
a dict of the formatted address, the state/territory name, and
a float-ified version of the latitude and longitude.
"""
res = requests.get(queryurl.format(addr=addr, gmapkey=gmapkey))
dictr = {}
if res.json()["status"] == "ZERO_RESULTS" or not res.ok:
dictr["res"] = res
else:
print(json.dumps(res.json(), indent=4))
rresj = res.json()["results"][0]
dictr["formatted_address"] = rresj["formatted_address"]
dictr["latlong"] = rresj["geometry"]["location"]
for el in rresj["address_components"]:
if el["types"][0] == "administrative_area_level_1":
dictr["state"] = el["short_name"]
return dictr
| 5,349,114 |
def genomic_del6_abs_cnv(params, genomic_del6_seq_loc):
"""Create genomic del6 absolute cnv"""
_id = "ga4gh:VAC.6RkHgDOiRMZKMKgI6rmG9C3T6WuMhcex"
params["variation"] = {
"type": "AbsoluteCopyNumber",
"_id": _id,
"subject": genomic_del6_seq_loc,
"copies": {"type": "Number", "value": 1}
}
params["variation_id"] = _id
| 5,349,115 |
def check_bcc(msg: Match[bytes]):
"""Check that BCC from the response message is correct."""
calc_bcc = calculate_bcc(msg[0][1:-1])
if calc_bcc != msg["bcc"]:
raise WrongBCC(f"BCC must be {calc_bcc}, but received {msg['bcc']}")
| 5,349,116 |
def display_data_in_new_tab(message, args, pipeline_data):
""" Displays the current message data in a new tab """
window = sublime.active_window()
tab = window.new_file()
tab.set_scratch(True)
edit_token = message['edit_token']
tab.insert(edit_token, 0, message['data'])
return tab
| 5,349,117 |
def update_user_pool(UserPoolId=None, Policies=None, LambdaConfig=None, AutoVerifiedAttributes=None, SmsVerificationMessage=None, EmailVerificationMessage=None, EmailVerificationSubject=None, VerificationMessageTemplate=None, SmsAuthenticationMessage=None, MfaConfiguration=None, DeviceConfiguration=None, EmailConfiguration=None, SmsConfiguration=None, UserPoolTags=None, AdminCreateUserConfig=None, UserPoolAddOns=None, AccountRecoverySetting=None):
"""
Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings with .
See also: AWS API Documentation
Exceptions
:example: response = client.update_user_pool(
UserPoolId='string',
Policies={
'PasswordPolicy': {
'MinimumLength': 123,
'RequireUppercase': True|False,
'RequireLowercase': True|False,
'RequireNumbers': True|False,
'RequireSymbols': True|False,
'TemporaryPasswordValidityDays': 123
}
},
LambdaConfig={
'PreSignUp': 'string',
'CustomMessage': 'string',
'PostConfirmation': 'string',
'PreAuthentication': 'string',
'PostAuthentication': 'string',
'DefineAuthChallenge': 'string',
'CreateAuthChallenge': 'string',
'VerifyAuthChallengeResponse': 'string',
'PreTokenGeneration': 'string',
'UserMigration': 'string'
},
AutoVerifiedAttributes=[
'phone_number'|'email',
],
SmsVerificationMessage='string',
EmailVerificationMessage='string',
EmailVerificationSubject='string',
VerificationMessageTemplate={
'SmsMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string',
'EmailMessageByLink': 'string',
'EmailSubjectByLink': 'string',
'DefaultEmailOption': 'CONFIRM_WITH_LINK'|'CONFIRM_WITH_CODE'
},
SmsAuthenticationMessage='string',
MfaConfiguration='OFF'|'ON'|'OPTIONAL',
DeviceConfiguration={
'ChallengeRequiredOnNewDevice': True|False,
'DeviceOnlyRememberedOnUserPrompt': True|False
},
EmailConfiguration={
'SourceArn': 'string',
'ReplyToEmailAddress': 'string',
'EmailSendingAccount': 'COGNITO_DEFAULT'|'DEVELOPER',
'From': 'string',
'ConfigurationSet': 'string'
},
SmsConfiguration={
'SnsCallerArn': 'string',
'ExternalId': 'string'
},
UserPoolTags={
'string': 'string'
},
AdminCreateUserConfig={
'AllowAdminCreateUserOnly': True|False,
'UnusedAccountValidityDays': 123,
'InviteMessageTemplate': {
'SMSMessage': 'string',
'EmailMessage': 'string',
'EmailSubject': 'string'
}
},
UserPoolAddOns={
'AdvancedSecurityMode': 'OFF'|'AUDIT'|'ENFORCED'
},
AccountRecoverySetting={
'RecoveryMechanisms': [
{
'Priority': 123,
'Name': 'verified_email'|'verified_phone_number'|'admin_only'
},
]
}
)
:type UserPoolId: string
:param UserPoolId: [REQUIRED]\nThe user pool ID for the user pool you want to update.\n
:type Policies: dict
:param Policies: A container with the policies you wish to update in a user pool.\n\nPasswordPolicy (dict) --The password policy.\n\nMinimumLength (integer) --The minimum length of the password policy that you have set. Cannot be less than 6.\n\nRequireUppercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.\n\nRequireLowercase (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.\n\nRequireNumbers (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one number in their password.\n\nRequireSymbols (boolean) --In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.\n\nTemporaryPasswordValidityDays (integer) --In the password policy you have set, refers to the number of days a temporary password is valid. If the user does not sign-in during this time, their password will need to be reset by an administrator.\n\nNote\nWhen you set TemporaryPasswordValidityDays for a user pool, you will no longer be able to set the deprecated UnusedAccountValidityDays value for that user pool.\n\n\n\n\n\n
:type LambdaConfig: dict
:param LambdaConfig: The AWS Lambda configuration information from the request to update the user pool.\n\nPreSignUp (string) --A pre-registration AWS Lambda trigger.\n\nCustomMessage (string) --A custom Message AWS Lambda trigger.\n\nPostConfirmation (string) --A post-confirmation AWS Lambda trigger.\n\nPreAuthentication (string) --A pre-authentication AWS Lambda trigger.\n\nPostAuthentication (string) --A post-authentication AWS Lambda trigger.\n\nDefineAuthChallenge (string) --Defines the authentication challenge.\n\nCreateAuthChallenge (string) --Creates an authentication challenge.\n\nVerifyAuthChallengeResponse (string) --Verifies the authentication challenge response.\n\nPreTokenGeneration (string) --A Lambda trigger that is invoked before token generation.\n\nUserMigration (string) --The user migration Lambda config type.\n\n\n
:type AutoVerifiedAttributes: list
:param AutoVerifiedAttributes: The attributes that are automatically verified when the Amazon Cognito service makes a request to update user pools.\n\n(string) --\n\n
:type SmsVerificationMessage: string
:param SmsVerificationMessage: A container with information about the SMS verification message.
:type EmailVerificationMessage: string
:param EmailVerificationMessage: The contents of the email verification message.
:type EmailVerificationSubject: string
:param EmailVerificationSubject: The subject of the email verification message.
:type VerificationMessageTemplate: dict
:param VerificationMessageTemplate: The template for verification messages.\n\nSmsMessage (string) --The SMS message template.\n\nEmailMessage (string) --The email message template.\n\nEmailSubject (string) --The subject line for the email message template.\n\nEmailMessageByLink (string) --The email message template for sending a confirmation link to the user.\n\nEmailSubjectByLink (string) --The subject line for the email message template for sending a confirmation link to the user.\n\nDefaultEmailOption (string) --The default email option.\n\n\n
:type SmsAuthenticationMessage: string
:param SmsAuthenticationMessage: The contents of the SMS authentication message.
:type MfaConfiguration: string
:param MfaConfiguration: Can be one of the following values:\n\nOFF - MFA tokens are not required and cannot be specified during user registration.\nON - MFA tokens are required for all user registrations. You can only specify required when you are initially creating a user pool.\nOPTIONAL - Users have the option when registering to create an MFA token.\n\n
:type DeviceConfiguration: dict
:param DeviceConfiguration: Device configuration.\n\nChallengeRequiredOnNewDevice (boolean) --Indicates whether a challenge is required on a new device. Only applicable to a new device.\n\nDeviceOnlyRememberedOnUserPrompt (boolean) --If true, a device is only remembered on user prompt.\n\n\n
:type EmailConfiguration: dict
:param EmailConfiguration: Email configuration.\n\nSourceArn (string) --The Amazon Resource Name (ARN) of a verified email address in Amazon SES. This email address is used in one of the following ways, depending on the value that you specify for the EmailSendingAccount parameter:\n\nIf you specify COGNITO_DEFAULT , Amazon Cognito uses this address as the custom FROM address when it emails your users by using its built-in email account.\nIf you specify DEVELOPER , Amazon Cognito emails your users with this address by calling Amazon SES on your behalf.\n\n\nReplyToEmailAddress (string) --The destination to which the receiver of the email should reply to.\n\nEmailSendingAccount (string) --Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:\n\nCOGNITO_DEFAULT\nWhen Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.\nTo look up the email delivery limit for the default option, see Limits in Amazon Cognito in the Amazon Cognito Developer Guide .\nThe default FROM address is [email protected]. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the SourceArn parameter.\n\nDEVELOPER\nWhen Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account.\nIf you use this option, you must provide the ARN of an Amazon SES verified email address for the SourceArn parameter.\nBefore Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a service-linked role , which is a type of IAM role, in your AWS account. This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see Using Service-Linked Roles for Amazon Cognito in the Amazon Cognito Developer Guide .\n\nFrom (string) --Identifies either the sender\xe2\x80\x99s email address or the sender\xe2\x80\x99s name with their email address. For example, [email protected] or Test User <[email protected]> . This address will appear before the body of the email.\n\nConfigurationSet (string) --The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\nEvent publishing \xe2\x80\x93 Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.\nIP pool management \xe2\x80\x93 When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.\n\n\n\n
:type SmsConfiguration: dict
:param SmsConfiguration: SMS configuration.\n\nSnsCallerArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages.\n\nExternalId (string) --The external ID is a value that we recommend you use to add security to your IAM role which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an ExternalId , the Cognito User Pool will include it when attempting to assume your IAM role, so that you can set your roles trust policy to require the ExternalID . If you use the Cognito Management Console to create a role for SMS MFA, Cognito will create a role with the required permissions and a trust policy that demonstrates use of the ExternalId .\n\n\n
:type UserPoolTags: dict
:param UserPoolTags: The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria.\n\n(string) --\n(string) --\n\n\n\n
:type AdminCreateUserConfig: dict
:param AdminCreateUserConfig: The configuration for AdminCreateUser requests.\n\nAllowAdminCreateUserOnly (boolean) --Set to True if only the administrator is allowed to create user profiles. Set to False if users can sign themselves up via an app.\n\nUnusedAccountValidityDays (integer) --The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call AdminCreateUser again, specifying 'RESEND' for the MessageAction parameter. The default value for this parameter is 7.\n\nNote\nIf you set a value for TemporaryPasswordValidityDays in PasswordPolicy , that value will be used and UnusedAccountValidityDays will be deprecated for that user pool.\n\n\nInviteMessageTemplate (dict) --The message template to be used for the welcome message to new users.\nSee also Customizing User Invitation Messages .\n\nSMSMessage (string) --The message template for SMS messages.\n\nEmailMessage (string) --The message template for email messages.\n\nEmailSubject (string) --The subject line for email messages.\n\n\n\n\n
:type UserPoolAddOns: dict
:param UserPoolAddOns: Used to enable advanced security risk detection. Set the key AdvancedSecurityMode to the value 'AUDIT'.\n\nAdvancedSecurityMode (string) -- [REQUIRED]The advanced security mode.\n\n\n
:type AccountRecoverySetting: dict
:param AccountRecoverySetting: Use this setting to define which verified available method a user can use to recover their password when they call ForgotPassword . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email.\n\nRecoveryMechanisms (list) --The list of RecoveryOptionTypes .\n\n(dict) --A map containing a priority as a key, and recovery method name as a value.\n\nPriority (integer) -- [REQUIRED]A positive integer specifying priority of a method with 1 being the highest priority.\n\nName (string) -- [REQUIRED]Specifies the recovery method for a user.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Represents the response from the server when you make a request to update the user pool.
Exceptions
CognitoIdentityProvider.Client.exceptions.ResourceNotFoundException
CognitoIdentityProvider.Client.exceptions.InvalidParameterException
CognitoIdentityProvider.Client.exceptions.ConcurrentModificationException
CognitoIdentityProvider.Client.exceptions.TooManyRequestsException
CognitoIdentityProvider.Client.exceptions.NotAuthorizedException
CognitoIdentityProvider.Client.exceptions.UserImportInProgressException
CognitoIdentityProvider.Client.exceptions.InternalErrorException
CognitoIdentityProvider.Client.exceptions.InvalidSmsRoleAccessPolicyException
CognitoIdentityProvider.Client.exceptions.InvalidSmsRoleTrustRelationshipException
CognitoIdentityProvider.Client.exceptions.UserPoolTaggingException
CognitoIdentityProvider.Client.exceptions.InvalidEmailRoleAccessPolicyException
:return: {}
:returns:
CognitoIdentityProvider.Client.exceptions.ResourceNotFoundException
CognitoIdentityProvider.Client.exceptions.InvalidParameterException
CognitoIdentityProvider.Client.exceptions.ConcurrentModificationException
CognitoIdentityProvider.Client.exceptions.TooManyRequestsException
CognitoIdentityProvider.Client.exceptions.NotAuthorizedException
CognitoIdentityProvider.Client.exceptions.UserImportInProgressException
CognitoIdentityProvider.Client.exceptions.InternalErrorException
CognitoIdentityProvider.Client.exceptions.InvalidSmsRoleAccessPolicyException
CognitoIdentityProvider.Client.exceptions.InvalidSmsRoleTrustRelationshipException
CognitoIdentityProvider.Client.exceptions.UserPoolTaggingException
CognitoIdentityProvider.Client.exceptions.InvalidEmailRoleAccessPolicyException
"""
pass
| 5,349,118 |
def _cost( q,p, xt_measure, connec, params ) :
"""
Returns a total cost, sum of a small regularization term and the data attachment.
.. math ::
C(q_0, p_0) = .01 * H(q0,p0) + 1 * A(q_1, x_t)
Needless to say, the weights can be tuned according to the signal-to-noise ratio.
"""
s,r = params # Deformation scale, Attachment scale
q1 = _HamiltonianShooting(q,p,s)[0] # Geodesic shooting from q0 to q1
# To compute a data attachment cost, we need the set of vertices 'q1' into a measure.
q1_measure = Curve._vertices_to_measure( q1, connec )
attach_info = _data_attachment( q1_measure, xt_measure, r )
return [ .01* _Hqp(q, p, s) + 1* attach_info[0] , attach_info[1] ]
| 5,349,119 |
def get_full_lang_code(lang=None):
""" Get the full language code
Args:
lang (str, optional): A BCP-47 language code, or None for default
Returns:
str: A full language code, such as "en-us" or "de-de"
"""
if not lang:
lang = __active_lang
return lang or "en-us"
| 5,349,120 |
def load_keras_model():
"""Load in the pre-trained model"""
global model
model = load_model('../models/train-embeddings-rnn-2-layers.h5')
# Required for model to work
#global graph
#graph = tf.compat.v1.get_default_graph()
#graph = tf.get_default_graph()
| 5,349,121 |
async def handle_api_exception(request) -> User:
"""
API Description: Handle APIException. This will show in the swagger page (localhost:8000/api/v1/).
"""
raise APIException("Something bad happened", code=404)
| 5,349,122 |
def acquire_images(cam, nodemap, nodemap_tldevice):
"""
This function acquires and saves 10 images from a device.
:param cam: Camera to acquire images from.
:param nodemap: Device nodemap.
:param nodemap_tldevice: Transport layer device nodemap.
:type cam: CameraPtr
:type nodemap: INodeMap
:type nodemap_tldevice: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print '*** IMAGE ACQUISITION ***\n'
try:
result = True
# Set acquisition mode to continuous
#
# *** NOTES ***
# Because the example acquires and saves 10 images, setting acquisition
# mode to continuous lets the example finish. If set to single frame
# or multiframe (at a lower number of images), the example would just
# hang. This would happen because the example has been written to
# acquire 10 images while the camera would have been programmed to
# retrieve less than that.
#
# Setting the value of an enumeration node is slightly more complicated
# than other node types. Two nodes must be retrieved: first, the
# enumeration node is retrieved from the nodemap; and second, the entry
# node is retrieved from the enumeration node. The integer value of the
# entry node is then set as the new value of the enumeration node.
#
# Notice that both the enumeration and the entry nodes are checked for
# availability and readability/writability. Enumeration nodes are
# generally readable and writable whereas their entry nodes are only
# ever readable.
#
# Retrieve enumeration node from nodemap
# In order to access the node entries, they have to be casted to a pointer type (CEnumerationPtr here)
node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode('AcquisitionMode'))
if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):
print 'Unable to set acquisition mode to continuous (enum retrieval). Aborting...'
return False
# Retrieve entry node from enumeration node
node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName('Continuous')
if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous):
print 'Unable to set acquisition mode to continuous (entry retrieval). Aborting...'
return False
# Retrieve integer value from entry node
acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()
# Set integer value from entry node as new value of enumeration node
node_acquisition_mode.SetIntValue(acquisition_mode_continuous)
print 'Acquisition mode set to continuous...'
# Begin acquiring images
#
# *** NOTES ***
# What happens when the camera begins acquiring images depends on the
# acquisition mode. Single frame captures only a single image, multi
# frame catures a set number of images, and continuous captures a
# continuous stream of images. Because the example calls for the
# retrieval of 10 images, continuous mode has been set.
#
# *** LATER ***
# Image acquisition must be ended when no more images are needed.
cam.BeginAcquisition()
print 'Acquiring images...'
# Retrieve device serial number for filename
#
# *** NOTES ***
# The device serial number is retrieved in order to keep cameras from
# overwriting one another. Grabbing image IDs could also accomplish
# this.
device_serial_number = ''
node_device_serial_number = PySpin.CStringPtr(nodemap_tldevice.GetNode('DeviceSerialNumber'))
if PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(node_device_serial_number):
device_serial_number = node_device_serial_number.GetValue()
print 'Device serial number retrieved as %s...' % device_serial_number
# Retrieve, convert, and save images
for i in range(NUM_IMAGES):
try:
# Retrieve next received image
#
# *** NOTES ***
# Capturing an image houses images on the camera buffer. Trying
# to capture an image that does not exist will hang the camera.
#
# *** LATER ***
# Once an image from the buffer is saved and/or no longer
# needed, the image must be released in order to keep the
# buffer from filling up.
image_result = cam.GetNextImage(1000)
# Ensure image completion
#
# *** NOTES ***
# Images can easily be checked for completion. This should be
# done whenever a complete image is expected or required.
# Further, check image status for a little more insight into
# why an image is incomplete.
if image_result.IsIncomplete():
print 'Image incomplete with image status %d ...' % image_result.GetImageStatus()
else:
# Print image information; height and width recorded in pixels
#
# *** NOTES ***
# Images have quite a bit of available metadata including
# things such as CRC, image status, and offset values, to
# name a few.
width = image_result.GetWidth()
height = image_result.GetHeight()
print 'Grabbed Image %d, width = %d, height = %d' % (i, width, height)
# Convert image to mono 8
#
# *** NOTES ***
# Images can be converted between pixel formats by using
# the appropriate enumeration value. Unlike the original
# image, the converted one does not need to be released as
# it does not affect the camera buffer.
#
# When converting images, color processing algorithm is an
# optional parameter.
image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
# Create a unique filename
if device_serial_number:
filename = 'Acquisition-%s-%d.jpg' % (device_serial_number, i)
else: # if serial number is empty
filename = 'Acquisition-%d.jpg' % i
# Save image
#
# *** NOTES ***
# The standard practice of the examples is to use device
# serial numbers to keep images of one device from
# overwriting those of another.
image_converted.Save(filename)
print 'Image saved at %s' % filename
# Release image
#
# *** NOTES ***
# Images retrieved directly from the camera (i.e. non-converted
# images) need to be released in order to keep from filling the
# buffer.
image_result.Release()
print ''
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
return False
# End acquisition
#
# *** NOTES ***
# Ending acquisition appropriately helps ensure that devices clean up
# properly and do not need to be power-cycled to maintain integrity.
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
return False
return result
| 5,349,123 |
def apply_operations(source: dict, graph: BaseGraph) -> BaseGraph:
"""
Apply operations as defined in the YAML.
Parameters
----------
source: dict
The source from the YAML
graph: kgx.graph.base_graph.BaseGraph
The graph corresponding to the source
Returns
-------
kgx.graph.base_graph.BaseGraph
The graph corresponding to the source
"""
operations = source['operations']
for operation in operations:
op_name = operation['name']
op_args = operation['args']
module_name = '.'.join(op_name.split('.')[0:-1])
function_name = op_name.split('.')[-1]
f = getattr(importlib.import_module(module_name), function_name)
log.info(f"Applying operation {op_name} with args: {op_args}")
f(graph, **op_args)
return graph
| 5,349,124 |
def plotter(model, X, Y, ax, npts=5000):
"""
Simple way to get a visualization of the decision boundary
by applying the model to randomly-chosen points
could alternately use sklearn's "decision_function"
at some point it made sense to bring pandas into this
"""
xs = []
ys = []
cs = []
for _ in range(npts):
x0spr = max(X[:,0])-min(X[:,0])
x1spr = max(X[:,1])-min(X[:,1])
x = np.random.rand()*x0spr + min(X[:,0])
y = np.random.rand()*x1spr + min(X[:,1])
xs.append(x)
ys.append(y)
cs.append(model.predict([x,y]))
ax.scatter(xs,ys,c=list(map(lambda x:'lightgrey' if x==0 else 'black', cs)), alpha=.35)
ax.hold(True)
ax.scatter(X[:,0],X[:,1],
c=list(map(lambda x:'r' if x else 'lime',Y)),
linewidth=0,s=25,alpha=1)
ax.set_xlim([min(X[:,0]), max(X[:,0])])
ax.set_ylim
| 5,349,125 |
def skillLvl(ign, key):
"""Get the skill lvl of the player"""
data = requests.get(f'https://hypixel-api.senither.com/v1/profiles/{uuid(ign)}/weight/?key={key}').json()
skill_types = ['mining', 'foraging', 'enchanting', 'farming', 'combat', 'fishing', 'alchemy', 'taming']
skills_lvl = []
for i in skill_types:
skills_lvl.append(data['data']['skills'][i]['level'])
for i in range(len(skills_lvl)):
skills_lvl[i] = round(skills_lvl[i], 1)
'#Note: there are 2 ways to return these values, you pick which one you prefer and uncomment them'
'#First:'
# for i in range(8):
# if i == 8:
# break
# print(f'{skill_types[i]}: {skills_lvl[i]}')
'#Second:'
# return f'Mining: {skills_lvl[0]}\nForaging: {skills_lvl[1]}\nEnchanting: {skills_lvl[2]}\nFarming: ' \
# f'{skills_lvl[3]}\nCombat: {skills_lvl[4]}\nFishing: {skills_lvl[5]}\nAlchemy: {skills_lvl[6]}' \
# f'\nTaming: {skills_lvl[7]}'
| 5,349,126 |
def tt_logdotexp(A, b):
"""Construct a Theano graph for a numerically stable log-scale dot product.
The result is more or less equivalent to `tt.log(tt.exp(A).dot(tt.exp(b)))`
"""
A_bcast = A.dimshuffle(list(range(A.ndim)) + ["x"])
sqz = False
shape_b = ["x"] + list(range(b.ndim))
if len(shape_b) < 3:
shape_b += ["x"]
sqz = True
b_bcast = b.dimshuffle(shape_b)
res = tt_logsumexp(A_bcast + b_bcast, axis=1)
return res.squeeze() if sqz else res
| 5,349,127 |
def test_get_fallback_executable(mock_os_path_exists):
"""Find vmrun in PATH."""
mock_os_path_exists.return_value = True
with patch.dict('os.environ', {'PATH': '/tmp:/tmp2'}):
got = mech.utils.get_fallback_executable()
expected = '/tmp/vmrun'
assert got == expected
mock_os_path_exists.assert_called()
| 5,349,128 |
def _check_like(val, _np_types, _native_types, check_str=None): # pylint: disable=too-many-return-statements
"""
Checks the follwing:
- if val is instance of _np_types or _native_types
- if val is a list or ndarray of _np_types or _native_types
- if val is a string or list of strings that can be parsed by check_str
Does not check:
- if val is an ndarray of strings that can be parsed by check_str
"""
_all_types = _np_types + _native_types
if isinstance(val, _all_types):
return True
elif isinstance(val, string_types):
return check_str and check_str(val)
elif isinstance(val, (list, tuple)):
for v in val:
if isinstance(v, string_types):
if check_str and check_str(v):
continue
if not isinstance(v, _all_types):
return False
return True
elif hasattr(val, 'dtype'):
if val.dtype == np.object:
return all(isinstance(v, _native_types) for v in val)
else:
return val.dtype.type in _np_types
else:
return False
| 5,349,129 |
def rotation_matrix(x, y, theta):
""" Calculate the rotation matrix. Origin is assumed to be (0, 0)
theta must be in radians
"""
return [np.cos(theta) * x - np.sin(theta) * y, np.sin(theta) * x + np.cos(theta) * y]
| 5,349,130 |
def create_players(num_human: int, num_random: int, smart_players: List[int]) \
-> List[Player]:
"""Return a new list of Player objects.
<num_human> is the number of human player, <num_random> is the number of
random players, and <smart_players> is a list of difficulty levels for each
SmartPlayer that is to be created.
The list should contain <num_human> HumanPlayer objects first, then
<num_random> RandomPlayer objects, then the same number of SmartPlayer
objects as the length of <smart_players>. The difficulty levels in
<smart_players> should be applied to each SmartPlayer object, in order.
"""
goal = generate_goals(num_random + num_human + len(smart_players))
final = []
for x in range(num_human):
final.append(HumanPlayer(x, goal[x]))
for y in range(num_random):
final.append(RandomPlayer(num_human + y, goal[num_human + y]))
for z in range(len(smart_players)):
final.append(SmartPlayer(num_human + num_random + z,
goal[num_human + num_random + z],
smart_players[z]))
return final
| 5,349,131 |
def file_info(path):
"""
Return file information on `path`. Example output:
{
'filename': 'passwd',
'dir': '/etc/',
'path': '/etc/passwd',
'type': 'file',
'size': 2790,
'mode': 33188,
'uid': 0,
'gid': 0,
'device': 64769
}
"""
fname = os.path.basename(path)
fdir = os.path.dirname(path)
fstat = os.lstat(path)
ftype = file_types.get(stat.S_IFMT(fstat.st_mode), "unknown")
return {
"filename": fname,
"dir": fdir,
"path": path,
"type": ftype,
"size": fstat.st_size,
"mode": fstat.st_mode,
"uid": fstat.st_uid,
"gid": fstat.st_gid,
"device": fstat.st_dev,
}
| 5,349,132 |
def test_beam_focusing( show=False ):
"""
Runs the simulation of a focusing charged beam, in a boosted-frame,
with and without the injection through a plane.
The value of the RMS radius at focus is automatically checked.
"""
# Simulate beam focusing with injection through plane or not
simulate_beam_focusing( None, 'direct' )
simulate_beam_focusing( z_focus, 'through_plane' )
# Analyze the results and show that the beam reaches
# the right RMS radius at focus
ts1 = OpenPMDTimeSeries('./direct/hdf5/')
r1 = get_rms_radius( ts1 )
ts2 = OpenPMDTimeSeries('./through_plane/hdf5/')
r2 = get_rms_radius( ts2 )
if show:
import matplotlib.pyplot as plt
plt.plot( 1.e3*c*ts1.t, 1.e6*r1 )
plt.plot( 1.e3*c*ts2.t, 1.e6*r2 )
plt.xlabel( 'z (mm)' )
plt.ylabel( 'RMS radius (microns)' )
plt.show()
# Find the index of the output at z_focus
i = np.argmin( abs( c*ts2.t - z_focus ) )
# With injection through plane, we get the right RMS value at focus
assert abs( r2[i] - sigma_r ) < 0.05e-6
# Without injection through plane, the RMS value is significantly different
assert abs( r1[i] - sigma_r ) > 0.5e-6
# Clean up the data folders
shutil.rmtree( 'direct' )
shutil.rmtree( 'through_plane' )
| 5,349,133 |
def extract_test_params(root):
"""VFT parameters, e.g. TEST_PATTERN, TEST_STRATEGY, ..."""
res = {}
'''
xpath = STATIC_TEST + '*'
elems = root.findall(xpath) + root.findall(xpath+'/FIXATION_CHECK*')
#return {e.tag:int(e.text) for e in elems if e.text.isdigit()}
print(xpath)
for e in elems:
print(e.tag)
if e.text.isdigit():
res[e.tag] = int(e.text)
elif len(e.text) > 1:
#print(e.tag, e.text,type(e.text),'$'*100)
res[e.tag] =e.text
else:
for ee in e:
if ee.tag not in ['QUESTIONS_ASKED','SF']:
if ee.text.isdigit():
res[ee.tag] = int(ee.text)
elif len(ee.text) > 1:
res[ee.tag] = ee.text
'''
for p in params:
xpath = STATIC_TEST + p
el = root.findall(xpath)
if not el:
res[p.split('/')[-1]] =''
elif el[0].text.isdigit():
res[el[0].tag] = int(el[0].text)
else:
res[el[0].tag] = el[0].text
for pth in [DISPLAY_NAME,VISIT_DATE,SERIES_DATE_TIME,TEST_NODE+'PUPIL_DIAMETER',TEST_NODE+'PUPIL_DIAMETER_AUTO',TEST_NODE+'EXAM_TIME']:
e=root.find(pth)
if e.text is None:
res[e.tag] = e.text
else:
if e.text.isdigit():
res[e.tag] = int(e.text)
else:
res[e.tag] = e.text
'''
vkind = ['THRESHOLD', 'TOTAL', 'PATTERN']
for vk in vkind:
vs = extract_vft_values(root, vk)
mat = vf2matrix(vs)
res[vk+'_MATRIX'] = [mat]
'''
return res
| 5,349,134 |
def csc_list(
city: str,
state: Optional[str] = None,
country: Optional[str] = None,
) -> List[db.Geoname]:
"""
>>> [g.country_code for g in csc_list('sydney')]
['AU', 'CA', 'US', 'US', 'ZA', 'VU', 'US', 'US', 'CA']
>>> [g.name for g in csc_list('sydney', country='australia')]
['Sydney']
>>> [g.timezone for g in csc_list('sydney', state='victoria')][:3]
['Australia/Sydney', 'America/Glace_Bay', 'America/Phoenix']
"""
if state and country:
cinfo = db.country_info(country)
states = [
g for g in db.select_geonames_name(state)
if g.feature_class == 'A' and g.country_code == cinfo.iso
]
cities = [
g for g in db.select_geonames_name(city)
if g.feature_class == 'P' and g.country_code == cinfo.iso
]
city_matches = list(_match(cities, states))
if city_matches:
return [c for (c, _) in city_matches]
#
# Try omitting state. If the country is specified, that alone may be sufficient.
#
if country:
cinfo = db.country_info(country)
cities = [
g for g in db.select_geonames_name(city)
if g.feature_class == 'P' and g.country_code == cinfo.iso
]
if cities:
return cities
#
# Perhaps state is really a city?
#
if state and country:
cinfo = db.country_info(country)
cities = [
g for g in db.select_geonames_name(state)
if g.country_code == cinfo.iso
]
if cities:
return cities
#
# Perhaps the specified country is wrong?
#
if state:
states = [g for g in db.select_geonames_name(state) if g.feature_class == 'A']
cities = [g for g in db.select_geonames_name(city) if g.feature_class == 'P']
city_matches = list(_match(cities, states))
if city_matches:
return [c for (c, _) in city_matches]
#
# Perhaps city itself is unique?
#
cities = [g for g in db.select_geonames_name(city) if g.feature_class == 'P']
if cities:
return cities
return list(db.select_geonames_name(city))
| 5,349,135 |
def calculate_frame_score(current_frame_hsv: Iterable[cupy.ndarray],
last_frame_hsv: Iterable[cupy.ndarray]) -> Tuple[float]:
"""Calculates score between two adjacent frames in the HSV colourspace. Frames should be
split, e.g. cv2.split(cv2.cvtColor(frame_data, cv2.COLOR_BGR2HSV)).
Arguments:
curr_frame_hsv: Current frame.
last_frame_hsv: Previous frame.
Returns:
Tuple containing the average pixel change for each component as well as the average
across all components, e.g. (avg_h, avg_s, avg_v, avg_all).
"""
current_frame_hsv = [x.astype(cupy.int32) for x in current_frame_hsv]
last_frame_hsv = [x.astype(cupy.int32) for x in last_frame_hsv]
delta_hsv = [0, 0, 0, 0]
for i in range(3):
num_pixels = current_frame_hsv[i].shape[0] * current_frame_hsv[i].shape[1]
delta_hsv[i] = cupy.sum(
cupy.abs(current_frame_hsv[i] - last_frame_hsv[i])) / float(num_pixels)
delta_hsv[3] = sum(delta_hsv[0:3]) / 3.0
return tuple(delta_hsv)
| 5,349,136 |
def test_md024_good_different_heading_content_setext():
"""
Test to make sure this rule does not trigger with a document that
contains SetExt headings with no duplicate content.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md024/different_heading_content_setext.md",
]
expected_return_code = 0
expected_output = ""
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
)
| 5,349,137 |
def huber_loss_function(sq_resi, k=1.345):
"""Robust loss function which penalises outliers, as detailed in Jankowski et al (2018).
Parameters
----------
sq_resi : `float` or `list`
A single or list of the squared residuals.
k : `float`, optional
A constant that defines at which distance the loss function starts to penalize outliers. |br| Default: 1.345.
Returns
-------
rho : `float` or `list`
The modified squared residuals.
"""
single_value = False
if isinstance(sq_resi, float) or isinstance(sq_resi, int):
sq_resi = np.array([sq_resi])
single_value = True
elif isinstance(sq_resi, list):
sq_resi = np.array(sq_resi)
rho = []
residual = np.sqrt(abs(sq_resi))
for j in range(len(residual)):
if residual[j] < k:
rho.append( sq_resi[j]/2 )
else:
rho.append( k * residual[j] - 1./2. * k**2 )
if single_value:
return rho[0]
else:
return rho
| 5,349,138 |
def clean_sentence(sentence: str) -> str:
"""
Bertに入れる前にtextに行う前処理
Args:
sentence (str): [description]
Returns:
str: [description]
"""
sentence = re.sub(r"<[^>]*?>", "", sentence) # タグ除外
sentence = mojimoji.zen_to_han(sentence, kana=False)
sentence = neologdn.normalize(sentence)
sentence = re.sub(
r'[!"#$%&\'\\\\()*+,\-./:;<=>?@\[\]\^\_\`{|}~「」〔〕“”〈〉『』【】&*・()$#@?!`+¥%︰-@]。、♪',
" ",
sentence,
) # 記号
sentence = re.sub(r"https?://[\w/:%#\$&\?\(\)~\.=\+\-]+", "", sentence)
sentence = re.sub(r"[0-90-9a-zA-Za-zA-Z]+", " ", sentence)
sentence = "".join(
[
emoji_dict[c].get("short_name", "") if c in emoji.UNICODE_EMOJI["en"] else c
for c in sentence
]
)
return sentence
| 5,349,139 |
def test_survive_after_linting():
"""Test that it handles vital.vim, without crashing."""
cmd = [sys.executable, "-m", "vint", vital_dir]
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT, universal_newlines=True
)
except subprocess.CalledProcessError as err:
output = err.stdout
assert "Traceback" not in output
| 5,349,140 |
def assert_user(user_id: int, permission: Union[str, Enum] = None) -> bool:
"""
Assert that a user_id belongs to the requesting user, or that
the requesting user has a given permission.
"""
permission = (
permission.value if isinstance(permission, Enum) else permission
)
return flask.g.user.id == user_id or flask.g.user.has_permission(
permission
)
| 5,349,141 |
def single_prob(n, n0, psi, c=2):
"""
Eq. 1.3 in Conlisk et al. (2007), note that this implmentation is
only correct when the variable c = 2
Note: if psi = .5 this is the special HEAP case in which the
function no longer depends on n.
c = number of cells
"""
a = (1 - psi) / psi
F = (get_F(a, n) * get_F((c - 1) * a, n0 - n)) / get_F(c * a, n0)
return float(F)
| 5,349,142 |
def getbyid(ctx,
# Mandatory main parameter
accountid):
"""GetAccountByID enables you to return details about a specific account, given its accountID."""
cli_utils.establish_connection(ctx)
ctx.logger.info(""": """"""accountid = """ + str(accountid)+""";"""+"")
try:
_GetAccountResult = ctx.element.get_account_by_id(account_id=accountid)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_GetAccountResult), indent=4))
return
else:
cli_utils.print_result(_GetAccountResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)
| 5,349,143 |
def array_pair_sum_iterative(arr, k):
"""
returns the array of pairs using an iterative method.
complexity: O(n^2)
"""
result = []
for i in range(len(arr)):
for j in range(i + 1, len(arr)):
if arr[i] + arr[j] == k:
result.append([arr[i], arr[j]])
return result
| 5,349,144 |
def save_checkpoint(state, is_best, exp_name):
"""
save the checkpoint during training stage
:param state: content to be saved
:param is_best: if DPGN model's performance is the best at current step
:param exp_name: experiment name
:return: None
"""
torch.save(state, os.path.join('{}'.format(exp_name), 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join('{}'.format(exp_name), 'checkpoint.pth.tar'),
os.path.join('{}'.format(exp_name), 'model_best.pth.tar'))
| 5,349,145 |
def ComparativePlotting(t_df, p_df_dic):
"""
Plotting result comparisons.
"""
dims = {'s': 'start', 'd': 'duration',
'wt': 'waitTime', 'it': 'initTime', 'l': 'latency'}
t_df['execution'] = t_df['duration'] - t_df['initTime']
t_df['start'] = t_df['start']/1000.0
t_df['latency'] = t_df['latency']/1000.0
p_df = p_df_dic['perf_records']
# Add new dimensions if perf data is available
# p_df['IPC'] = p_df['instructions']/p_df['cycles']
# p_df['Page Faults per Million Instruction'] = 1000000.0*p_df['page-faults']/p_df['instructions']
# Add your plotting code here
plt.show()
plt.close()
| 5,349,146 |
def cli(summary_sheet, output, village_id_map):
"""Reformat and combine collection spreadsheets into a single standardized file."""
village_id_map = get_village_id_map(village_id_map)
df_all = []
# load all xls files into a list of dataframes
for f in summary_sheet:
df_all.extend(load_xl_sheets(f).values())
# run our recoder's on each dataframe
for df in df_all:
recode_sex(df)
recode_species(df)
recode_villages(df, village_id_map=village_id_map)
recode_positives(df)
recode_dead(df)
recode_teneral(df)
recode_date(df)
add_infection_state_col(df)
# combine all dataframes into a single big dataframe
df_big = pd.concat(df_all)
# write the new dataframe to xls file
df_big.to_excel(output, index=False)
| 5,349,147 |
def merge_named_payload(name_to_merge_op):
"""Merging dictionary payload by key.
name_to_merge_op is a dict mapping from field names to merge_ops.
Example:
If name_to_merge_op is
{
'f1': mergeop1,
'f2': mergeop2,
'f3': mergeop3
},
Then two payloads { 'f1': a1, 'f2': b1, 'f3': c1 } and
{ 'f1': a2, 'f2': b2, 'f3': c2 } will be merged into
{
'f1': mergeop1(a1, a2),
'f2': mergeop2(b1, b2),
'f3': mergeop3(c1, c2)
}.
"""
def merge(p1,p2):
p = {}
for name, op in name_to_merge_op.items():
p[name] = op(p1[name], p2[name])
return p
return merge
| 5,349,148 |
def PrintResultsDuplicationsDistances(outfile,
categories,
histogram_data,
options):
"""write histograms of duplication distances."""
###################################
# construct and write histograms
num_bins = 100
bins = map(lambda x: float(x) / 20.0, range(0, num_bins))
histograms1 = {}
histograms2 = {}
vals0 = []
vals1 = []
for key, vals in histogram_data.items():
if key not in categories:
continue
h = scipy.stats.histogram2(vals[0], bins)
histograms1[key] = h
h = scipy.stats.histogram2(vals[1], bins)
histograms2[key] = h
vals0 += vals[0]
vals1 += vals[1]
h0 = scipy.stats.histogram2(vals0, bins)
h1 = scipy.stats.histogram2(vals1, bins)
outfile.write("# duplications - all histograms for %s and %s\n" %
(options.schema1, options.schema2))
outfile.write("bin\t('sum','sum')\t%s\n" %
"\t\t".join(map(str, categories)))
for b in range(0, num_bins):
outfile.write("%5.2f" % bins[b])
outfile.write("\t%i\t%i" % (h0[b], h1[b]))
for x in categories:
if x in histograms1 and x in histograms2:
outfile.write("\t%i\t%i" %
(histograms1[x][b], histograms2[x][b]))
else:
outfile.write("\t0\t0")
outfile.write("\n")
outfile.write("total")
outfile.write(
"\t%i\t%i" %
(reduce(lambda x, y: x + y, h0), reduce(lambda x, y: x + y, h0)))
for x in categories:
if x in histograms1 and x in histograms2:
outfile.write("\t%i\t%i" %
(reduce(lambda x, y: x + y, histograms1[x]),
reduce(lambda x, y: x + y, histograms2[x])))
else:
outfile.write("\t0\t0")
outfile.write("\n")
| 5,349,149 |
def euclidean_distance(this_set, other_set, bsf_dist):
"""Calculate the Euclidean distance between 2 1-D arrays.
If the distance is larger than bsf_dist, then we end the calculation and return the bsf_dist.
Args:
this_set: ndarray
The array
other_set: ndarray
The comparative array.
bsf_dist:
The best so far distance.
Returns:
output: float
The accumulation of Euclidean distance.
"""
sum_dist = 0
for index in range(0, len(this_set)):
sum_dist += (this_set[index] - other_set[index]) ** 2
if sum_dist > bsf_dist:
return bsf_dist
return sum_dist
| 5,349,150 |
def run_cmd_simple(cmd: str,
variables: dict,
env=None,
args: List[str] = None,
libraries=None) -> Union[dict, str]:
"""
Run cmd with variables written in environment.
:param args: cmd arguments
:param cmd: to run
:param variables: variables
:param env: custom environment
:param libraries: additional libraries used for source compilation
:return: output in json (if can be parsed) or plaintext
"""
env = _prepare_env(variables, env=env)
cmd, cwd = _prepare_cmd(cmd, args, variables, libraries=libraries)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, cwd=cwd)
if p.wait() == 0:
out = p.stdout.read().decode()
debug(out)
return _parse_output(out)
else:
out = p.stdout.read().decode()
warning(out)
raise Exception('Execution failed.')
| 5,349,151 |
def host(provider: Provider) -> Host:
"""Create host"""
return provider.host_create(utils.random_string())
| 5,349,152 |
def loop_invariant_branching_while():
"""Ensure node is walked up to find a loop-invariant branch"""
x = [1, 2, 3, 4]
i = 6
j = 0
while j < 10_000:
j += 1
# Marks entire branch
if len(x) > 2:
print(x * i)
# Marks comparator, but not print
j = 0
while j < 10_000:
j += 1
if len(x) > 2:
print(x * j)
| 5,349,153 |
def deal_with_direct(header: HeaderModel, body: BodyModel, name, packet_record: PacketRecord,
auth: WorkspaceAuth):
"""
手动录入认证信息
:param header:
:param body:
:param name:
:param packet_record:
:param auth:
:return:
"""
logger.info("{} deal with direct: {}".format(name, header.url))
if auth != "":
for auth_info in auth.auth_info:
assert isinstance(auth_info, AuthInfo)
if auth_info.url_pattern and not re.match(auth_info.url_pattern, header.url): # url_pattern
continue
_header = copy.copy(header)
_body = copy.copy(body)
try:
if auth_info.auth_args:
_header.update_args(auth_info.auth_args)
if auth_info.auth_header:
_header.update_headers(auth_info.auth_header)
if auth_info.auth_param:
_body.update_param(auth_info.auth_param)
if _body.type == BodyModel.TYPE_JSON:
raw_rest = requests_request(_header.method, _header.url, json=_body.body(), headers=_header.header)
elif _body.type in [BodyModel.TYPE_FORM, BodyModel.TYPE_BYTE]:
raw_rest = requests_request(_header.method, _header.url, data=_body.body(), headers=_header.header)
else:
raise ParserException("illegal body type {}".format(_body.type))
except Exception as e:
logger.error("{} processing error!".format(auth_info.describe), exc_info=True)
packet_data = PacketData(banner=gen_banner(auth_info.describe, _header.method, _header.url, str(e)),
role_describe=auth_info.describe)
packet_data.save()
packet_record.per_packets.append(packet_data)
else:
resp_body = BodyModel(raw_rest.content, charset=raw_rest.encoding)
packet_data = PacketData(banner=gen_banner(auth_info.describe, _header.method, _header.url,
raw_rest.text), role_describe=auth_info.describe,
request=Request(url=_header.url, method=_header.method, header=_header.header,
body_content=_body.content, body_type=_body.type),
response=Response(status_code=raw_rest.status_code, header=raw_rest.headers,
body_content=resp_body.content, body_type=resp_body.type))
packet_data.save()
packet_record.per_packets.append(packet_data)
else:
_header = copy.copy(header)
_body = copy.copy(body)
try:
if _body.type == BodyModel.TYPE_JSON:
raw_rest = requests_request(_header.method, _header.url, json=_body.body(), headers=_header.header)
elif _body.type in [BodyModel.TYPE_FORM, BodyModel.TYPE_BYTE]:
raw_rest = requests_request(_header.method, _header.url, data=_body.body(), headers=_header.header)
else:
raise ParserException("illegal body type {}".format(_body.type))
except Exception as e:
logger.error("{} processing error!".format("原始包"), exc_info=True)
packet_data = PacketData(banner=gen_banner("原始包", _header.method, _header.url, str(e)),
role_describe="原始包")
packet_data.save()
packet_record.per_packets.append(packet_data)
resp_body = BodyModel(raw_rest.content, charset=raw_rest.encoding)
packet_data = PacketData(banner=gen_banner("原始包", _header.method, _header.url,
raw_rest.text), role_describe="原始包",
request=Request(url=_header.url, method=_header.method, header=_header.header,
body_content=_body.content, body_type=_body.type),
response=Response(status_code=raw_rest.status_code, header=raw_rest.headers,
body_content=resp_body.content, body_type=resp_body.type))
packet_data.save()
packet_record.per_packets.append(packet_data)
| 5,349,154 |
def launch(reactor,
progress_updates=None,
control_port=None,
data_directory=None,
socks_port=None,
stdout=None,
stderr=None,
timeout=None,
tor_binary=None,
user=None, # XXX like the config['User'] special-casing from before
# 'users' probably never need these:
connection_creator=None,
kill_on_stderr=True,
_tor_config=None, # a TorConfig instance, mostly for tests
):
"""
launches a new Tor process, and returns a Deferred that fires with
a new :class:`txtorcon.Tor` instance. From this instance, you can
create or get any "interesting" instances you need: the
:class:`txtorcon.TorConfig` instance, create endpoints, create
:class:`txtorcon.TorState` instance(s), etc.
Note that there is NO way to pass in a config; we only expost a
couple of basic Tor options. If you need anything beyond these,
you can access the ``TorConfig`` instance (via ``.config``)
and make any changes there, reflecting them in tor with
``.config.save()``.
You can igore all the options and safe defaults will be
provided. However, **it is recommended to pass data_directory**
especially if you will be starting up Tor frequently, as it saves
a bunch of time (and bandwidth for the directory
authorities). "Safe defaults" means:
- a tempdir for a ``DataDirectory`` is used (respecting ``TMP``)
and is deleted when this tor is shut down (you therefore
*probably* want to supply the ``data_directory=`` kwarg);
- a random, currently-unused local TCP port is used as the
``SocksPort`` (specify ``socks_port=`` if you want your
own). If you want no SOCKS listener at all, pass
``socks_port=0``
- we set ``__OwningControllerProcess`` and call
``TAKEOWNERSHIP`` so that if our control connection goes away,
tor shuts down (see `control-spec
<https://gitweb.torproject.org/torspec.git/blob/HEAD:/control-spec.txt>`_
3.23).
- the launched Tor will use ``COOKIE`` authentication.
:param reactor: a Twisted IReactorCore implementation (usually
twisted.internet.reactor)
:param progress_updates: a callback which gets progress updates; gets 3
args: percent, tag, summary (FIXME make an interface for this).
:param data_directory: set as the ``DataDirectory`` option to Tor,
this is where tor keeps its state information (cached relays,
etc); starting with an already-populated state directory is a lot
faster. If ``None`` (the default), we create a tempdir for this
**and delete it on exit**. It is recommended you pass something here.
:param stdout: a file-like object to which we write anything that
Tor prints on stdout (just needs to support write()).
:param stderr: a file-like object to which we write anything that
Tor prints on stderr (just needs .write()). Note that we kill
Tor off by default if anything appears on stderr; pass
"kill_on_stderr=False" if you don't want this behavior.
:param tor_binary: path to the Tor binary to run. If None (the
default), we try to find the tor binary.
:param kill_on_stderr:
When True (the default), if Tor prints anything on stderr we
kill off the process, close the TorControlProtocol and raise
an exception.
:param connection_creator: is mostly available to ease testing, so
you probably don't want to supply this. If supplied, it is a
callable that should return a Deferred that delivers an
:api:`twisted.internet.interfaces.IProtocol <IProtocol>` or
ConnectError.
See :api:`twisted.internet.interfaces.IStreamClientEndpoint`.connect
Note that this parameter is ignored if config.ControlPort == 0
:return: a Deferred which callbacks with :class:`txtorcon.Tor`
instance, from which you can retrieve the TorControlProtocol
instance via the ``.protocol`` property.
HACKS:
1. It's hard to know when Tor has both (completely!) written its
authentication cookie file AND is listening on the control
port. It seems that waiting for the first 'bootstrap' message on
stdout is sufficient. Seems fragile...and doesn't work 100% of
the time, so FIXME look at Tor source.
XXX this "User" thing was, IIRC, a feature for root-using scripts
(!!) that were going to launch tor, but where tor would drop to a
different user. Do we still want to support this? Probably
relevant to Docker (where everything is root! yay!)
``User``: if this exists, we attempt to set ownership of the tempdir
to this user (but only if our effective UID is 0).
"""
# We have a slight problem with the approach: we need to pass a
# few minimum values to a torrc file so that Tor will start up
# enough that we may connect to it. Ideally, we'd be able to
# start a Tor up which doesn't really do anything except provide
# "AUTHENTICATE" and "GETINFO config/names" so we can do our
# config validation.
if not IReactorCore.providedBy(reactor):
raise ValueError(
"'reactor' argument must provide IReactorCore"
" (got '{}': {})".format(
type(reactor).__class__.__name__,
repr(reactor)
)
)
if tor_binary is None:
tor_binary = find_tor_binary()
if tor_binary is None:
# We fail right here instead of waiting for the reactor to start
raise TorNotFound('Tor binary could not be found')
# make sure we got things that have write() for stderr, stdout
# kwargs (XXX is there a "better" way to check for file-like
# object? do we use anything besides 'write()'?)
for arg in [stderr, stdout]:
if arg and not getattr(arg, "write", None):
raise RuntimeError(
'File-like object needed for stdout or stderr args.'
)
config = _tor_config or TorConfig()
if data_directory is not None:
user_set_data_directory = True
config.DataDirectory = data_directory
try:
os.mkdir(data_directory, 0o0700)
except OSError:
pass
else:
user_set_data_directory = False
data_directory = tempfile.mkdtemp(prefix='tortmp')
config.DataDirectory = data_directory
# note: we also set up the ProcessProtocol to delete this when
# Tor exits, this is "just in case" fallback:
reactor.addSystemEventTrigger(
'before', 'shutdown',
functools.partial(delete_file_or_tree, data_directory)
)
# things that used launch_tor() had to set ControlPort and/or
# SocksPort on the config to pass them, so we honour that here.
if control_port is None and _tor_config is not None:
try:
control_port = config.ControlPort
except KeyError:
control_port = None
if socks_port is None and _tor_config is not None:
try:
socks_port = config.SocksPort
except KeyError:
socks_port = None
if socks_port is None:
socks_port = yield available_tcp_port(reactor)
config.SOCKSPort = socks_port
try:
our_user = user or config.User
except KeyError:
pass
else:
# if we're root, make sure the directory is owned by the User
# that Tor is configured to drop to
if sys.platform in ('linux', 'linux2', 'darwin') and os.geteuid() == 0:
os.chown(data_directory, pwd.getpwnam(our_user).pw_uid, -1)
# user can pass in a control port, or we set one up here
if control_port is None:
# on posix-y systems, we can use a unix-socket
if sys.platform in ('linux', 'linux2', 'darwin'):
# note: tor will not accept a relative path for ControlPort
control_port = 'unix:{}'.format(
os.path.join(os.path.realpath(data_directory), 'control.socket')
)
else:
control_port = yield available_tcp_port(reactor)
else:
if str(control_port).startswith('unix:'):
control_path = control_port.lstrip('unix:')
containing_dir = dirname(control_path)
if not exists(containing_dir):
raise ValueError(
"The directory containing '{}' must exist".format(
containing_dir
)
)
# Tor will be sad if the directory isn't 0700
mode = (0o0777 & os.stat(containing_dir).st_mode)
if mode & ~(0o0700):
raise ValueError(
"The directory containing a unix control-socket ('{}') "
"must only be readable by the user".format(containing_dir)
)
config.ControlPort = control_port
config.CookieAuthentication = 1
config.__OwningControllerProcess = os.getpid()
if connection_creator is None:
if str(control_port).startswith('unix:'):
connection_creator = functools.partial(
UNIXClientEndpoint(reactor, control_port[5:]).connect,
TorProtocolFactory()
)
else:
connection_creator = functools.partial(
TCP4ClientEndpoint(reactor, 'localhost', control_port).connect,
TorProtocolFactory()
)
# not an "else" on purpose; if we passed in "control_port=0" *and*
# a custom connection creator, we should still set this to None so
# it's never called (since we can't connect with ControlPort=0)
if control_port == 0:
connection_creator = None
# NOTE well, that if we don't pass "-f" then Tor will merrily load
# its default torrc, and apply our options over top... :/ should
# file a bug probably? --no-defaults or something maybe? (does
# --defaults-torrc - or something work?)
config_args = ['-f', '/dev/null/non-existant-on-purpose', '--ignore-missing-torrc']
# ...now add all our config options on the command-line. This
# avoids writing a temporary torrc.
for (k, v) in config.config_args():
config_args.append(k)
config_args.append(v)
process_protocol = TorProcessProtocol(
connection_creator,
progress_updates,
config, reactor,
timeout,
kill_on_stderr,
stdout,
stderr,
)
if control_port == 0:
connected_cb = succeed(None)
else:
connected_cb = process_protocol.when_connected()
# we set both to_delete and the shutdown events because this
# process might be shut down way before the reactor, but if the
# reactor bombs out without the subprocess getting closed cleanly,
# we'll want the system shutdown events triggered so the temporary
# files get cleaned up either way
# we don't want to delete the user's directories, just temporary
# ones this method created.
if not user_set_data_directory:
process_protocol.to_delete = [data_directory]
reactor.addSystemEventTrigger(
'before', 'shutdown',
functools.partial(delete_file_or_tree, data_directory)
)
log.msg('Spawning tor process with DataDirectory', data_directory)
args = [tor_binary] + config_args
transport = reactor.spawnProcess(
process_protocol,
tor_binary,
args=args,
env={'HOME': data_directory},
path=data_directory if os.path.exists(data_directory) else None, # XXX error if it doesn't exist?
)
transport.closeStdin()
proto = yield connected_cb
# note "proto" here is a TorProcessProtocol
# we might need to attach this protocol to the TorConfig
if config.protocol is None and proto is not None and proto.tor_protocol is not None:
# proto is None in the ControlPort=0 case
yield config.attach_protocol(proto.tor_protocol)
# note that attach_protocol waits for the protocol to be
# boostrapped if necessary
returnValue(
Tor(
reactor,
config.protocol,
_tor_config=config,
_process_proto=process_protocol,
)
)
| 5,349,155 |
def bj_struktur_p89(x, n: int = 5, **s): # brute force
"""_summary_
:param x: _description_
:type x: _type_
:param n: _description_, defaults to 5
:type n: int, optional
:return: _description_
:rtype: _type_
"""
gamma, K = gamma_K_function(**s)
b_j = np.empty((x.size, n + 1))
for i, xi in enumerate(x):
for j in range(n + 1):
b_j[i, j] = bj_p89(K, xi, j)
return b_j
| 5,349,156 |
def test_get_left():
"""Test left method."""
from heap import Heap
high_low = Heap()
high_low.push(data[0])
high_low.push(data[1])
high_low.push(data[2])
assert high_low.high_low[high_low.get_left(0)] == data[1]
| 5,349,157 |
def _get_pulse_width_and_area(tr, ipick, icross, max_pulse_duration=.08):
"""
Measure the width & area of the arrival pulse on the displacement trace
Start from the displacement peak index (=icross - location of first zero
crossing of velocity)
:param tr: displacement trace
:type tr: obspy.core.trace.Trace or microquake.core.Trace
:param ipick: index of pick in trace
:type ipick: int
:param icross: index of first zero crossing in corresponding velocity trace
:type icross: int
:param max_pulse_duration: max allowed duration (sec) beyond pick to search
for zero crossing of disp pulse
:type max_pulse_duration: float
return pulse_width, pulse_area
:returns: pulse_width, pulse_area: Returns the width and area of the
displacement pulse
:rtype: float, float
"""
fname = '_get_pulse_width_and_area'
data = tr.data
sign = np.sign(data)
nmax = int(max_pulse_duration * tr.stats.sampling_rate)
iend = ipick + nmax
epsilon = 1e-10
if icross >= iend:
i = iend - 1
for i in range(icross, iend):
diff = np.abs(data[i] - data[ipick])
if diff < epsilon or sign[i] != sign[icross]:
break
if i == iend - 1:
logger.info("%s: Unable to locate termination of displacement "
"pulse for tr:%s!" % (fname, tr.get_id()))
return 0, 0
istop = i
pulse_width = float(istop - ipick) * tr.stats.delta
pulse_area = np.trapz(data[ipick:istop], dx=tr.stats.delta)
return pulse_width, pulse_area
| 5,349,158 |
def testQuestionMarkURI():
"""An URI with a question mark"""
assert ["http://www.bdog.fi/cgi-bin/netstore/tuotehaku.pl?tuoteryhma=16"] == grab('http://www.bdog.fi/cgi-bin/netstore/tuotehaku.pl?tuoteryhma=16', needScheme)
| 5,349,159 |
def extract_video(video_path, out_dir, name_length, ext='.jpg'):
"""
retrieve all frames of an video
:param video_path: path of video
:param out_dir: directory of output images
:param name_length: name length of video
:param ext: extension of image
:return: None
"""
if not os.path.exists(out_dir):
os.makedirs(out_dir)
video_ext = os.path.splitext(video_path)[-1]
assert video_ext in ['.mp4', '.avi']
cap = cv2.VideoCapture(video_path)
counter = 0
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
while cap.isOpened():
ret, frame = cap.read()
if ret:
file_name = str(counter).zfill(name_length) + ext
file_path = os.path.join(out_dir, file_name)
cv2.imwrite(file_path, frame)
counter = counter + 1
l1 = counter * 50 // length
print('Extracting {0}, Progress Bar: {1:><{len1}}{2:=<{len2}}. '
'{3} of {4}'.format(video_path, '>', '=', counter, length, len1=l1, len2=50 - l1, ))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
return
| 5,349,160 |
def explain(variable, name=""):
"""
Show a brief overview of a variable, including type, size and a sample.
:param variable: any variable
:param name: optional name of the variable used in the title
"""
print()
print(f"Explanation of variable {name}")
print("===============================")
print(f"The type of this variable is {type(variable)}")
try:
print(f"The dimensions of this variable are {variable.shape}")
except AttributeError:
try:
print(f"The length of this variable is '{len(variable)}'")
except TypeError:
print(f"The dimensions of this variable are unknown, meaning it is either 0 dimensional or complex")
sample = str(variable)
if len(sample) > 77:
sample = sample[:77] + "..."
print("Sample of the data:")
print(sample)
print()
| 5,349,161 |
def load_backend(name, options=None):
"""Load the named backend.
Returns the backend class registered for the name.
If you pass None as the name, this will load the default backend.
See the documenation for get_default() for more information.
Raises:
UnknownBackend: The name is not recognized.
LoadingError: There was an error loading the backend.
"""
if name is None:
assert options is None
return get_default()
if options is None:
options = {}
if name not in _backends:
raise UnknownBackend(name)
options = _backends[name][1](**options)
key = (name, tuple(sorted(list(options.items()))))
res = _active_backends.get(key, None)
if res is None:
try:
res = _backends[name][0](options)
_active_backends[key] = res
except Exception as e:
raise LoadingError(name) from e
return res
| 5,349,162 |
def recursive_reload(module, paths=None, mdict=None):
"""Recursively reload modules."""
if paths is None:
paths = ['']
if mdict is None:
mdict = {}
if module not in mdict:
# modules reloaded from this module
mdict[module] = []
reload(module)
for attribute_name in dir(module):
attribute = getattr(module, attribute_name)
if type(attribute) is ModuleType:
if attribute not in mdict[module]:
if attribute.__name__ not in sys.builtin_module_names:
if os.path.dirname(attribute.__file__) in paths:
mdict[module].append(attribute)
recursive_reload(attribute, paths, mdict)
reload(module)
| 5,349,163 |
def test() -> ScadObject:
"""
Create something.
"""
result = IDUObject()
result += box(10, 10, 5, center=True).translated((0, 0, -1)).named("Translated big box")
result -= box(4, 4, 4, center=True)
result += box(10, 10, 5)
result *= sphere(7).translated((0, 0, 1))
return (
result.rotated((-45, 0, 0))
.rendered(10)
.commented("Render it now!")
.colored("green", alpha=0.5)
.commented(
"""
This file is autogenerated by r7scad.
It is not supposed to be edited manually.
"""
)
)
| 5,349,164 |
def save_exp_log(file_name, d_log):
"""
Utility to save the experiment log as json file
details: Save the experiment log (a dict d_log) in file_name
args: file_name (str) the file in which to save the log
d_log (dict) python dict holding experiment log
"""
with open(file_name, 'w') as fp:
json.dump(d_log, fp, indent=4, sort_keys=True)
| 5,349,165 |
def rollout(policy, env_class, step_fn=default_rollout_step, max_steps=None):
"""Perform rollout using provided policy and env.
:param policy: policy to use when simulating these episodes.
:param env_class: class to instantiate an env object from.
:param step_fn: a function to be called at each step of rollout.
The function can have 2 or 3 parameters, and must return an action:
* 2 parameter definition: policy, observation.
* 3 parameter definition: policy, observation, step_num.
Default value is ``agentos.core.default_rollout_step``.
:param max_steps: cap on number of steps per episode.
:return: the trajectory that was followed during this rollout.
A trajectory is a named tuple that contains the initial observation (a
scalar) as well as the following arrays: actions, observations,
rewards, dones, contexts. The ith entry of each array corresponds to
the action taken at the ith step of the rollout, and the respective
results returned by the environment after taking that action. To learn
more about the semantics of these, see the documentation and code of
gym.Env.
"""
actions = []
observations = []
rewards = []
dones = []
contexts = []
env = env_class()
obs = env.reset()
init_obs = obs
done = False
step_num = 0
while True:
if done or (max_steps and step_num >= max_steps):
break
if step_fn.__code__.co_argcount == 2:
action = step_fn(policy, obs)
elif step_fn.__code__.co_argcount == 3:
action = step_fn(policy, obs, step_num)
else:
raise TypeError("step_fn must accept 2 or 3 parameters.")
obs, reward, done, ctx = env.step(action)
actions.append(action)
observations.append(obs)
rewards.append(reward)
dones.append(done)
contexts.append(ctx)
step_num += 1
Trajectory = namedtuple(
"Trajectory",
[
"init_obs",
"actions",
"observations",
"rewards",
"dones",
"contexts",
],
)
return Trajectory(
init_obs, actions, observations, rewards, dones, contexts
)
| 5,349,166 |
def test_exact_cover_trivial_single_set_single_element_problem(solver_factory):
"""
Consider the following example:
There is a single time labelled 0. So T = {0}.
There is a single event type labelled 0. So U = {0}.
We observe 1 event count for t=0 and u=0, so rhs vector b is:
b = [b_{t=0, u_0}] = [1]
We have the following possible generators to explain the event count:
index probability counts supplied at t=1, u=1 upper bound
a 1/e 1 1
cost = - log probability = - log(1/e) = 1
Let x = [x_a] represent primal decision variable
Clearly x_a=1 is the only feasible value, hence it is optimal.
The correspondng optimal objective value of min problem is 1.
If we formulate relaxed exact cover as a max problem, by flipping the sign
of the costs, the optimal solution is x_a=1 with optimal objective value
of -1.
The dual problem for the max problem is:
b = [1]
y = [y_{t=0,u=0}]
w = [w_a]
u = [1]
A = [1]
c = [1]
min b^T y + u^T w
where
y in R^L unconstrained ; w in R_{>=0}^n
subject to
A^T y + w >= -c
i.e.
min y_{t=0, u=0} + w_a
where
y_{t=0, u=0} in R unconstrained
w_a >= 0
subject to
y_{t=0, u=0} + w_a >= -1
This has optimal value of -1
As expected it agrees with primal optimal value.
The solution is nonunique:
any solution s.t.
w_1 >= 0
y_{t=1, u=1} + w_1 = -1
is optimal.
"""
times = [0]
event_types = [0]
z_by_i = {
'a':base.CandidateSet(cost=numpy.float64(1.0), e=numpy.ones(shape=(1, 1), dtype=numpy.float64)),
}
ub_by_i = {
'a': 1.0,
}
u_with_support_t_u = collections.defaultdict(set)
u_with_support_t_u[(0, 0)].add('a')
problem = base.ExactCoverResourcePricingProblem(
times=times,
event_types=event_types,
e_hat = numpy.ones(shape=(1, 1), dtype=numpy.float64),
z_by_i = z_by_i,
ub_by_i = ub_by_i,
i_with_support_t_u=u_with_support_t_u,
)
expected_objective = -1.0
s = solver_factory()
result = s.solve(problem)
assert result is not None
assert numpy.allclose(expected_objective, result.objective)
| 5,349,167 |
def get_source(location, **kwargs):
"""Factory for StubSource Instance.
Args:
location (str): PathLike object or valid URL
Returns:
obj: Either Local or Remote StubSource Instance
"""
try:
utils.ensure_existing_dir(location)
except NotADirectoryError:
return RemoteStubSource(location, **kwargs)
else:
return LocalStubSource(location, **kwargs)
| 5,349,168 |
def about(request):
"""
Prepare and displays the about view of the web application.
Args:
request: django HttpRequest class
Returns:
A django HttpResponse class
"""
template = loader.get_template('about.html')
return HttpResponse(template.render())
| 5,349,169 |
def url(parser, token):
"""Overwrites built in url tag to use . It works identicaly, except that where possible
it will use subdomains to refer to a project instead of a full url path.
For example, if the subdomain is vessel12.domain.com it will refer to a page
'details' as /details/ instead of /site/vessel12/details/
REQUIREMENTS:
* MIDDLEWARE_CLASSES in settings should contain
'core.middleware.subdomain.SubdomainMiddleware'
* These keys should be in the django settings file:
SUBDOMAIN_IS_PROJECTNAME = True
MAIN_HOST_NAME = <your site's hostname>
* APACHE url rewriting should be in effect to rewrite subdomain to
site/project/. To get you started: the following apache config does this
for the domain 'devcomicframework.org'
(put this in your apache config file)
RewriteEngine on
RewriteCond $1 .*/$
RewriteCond $1 !^/site/.*
RewriteCond %{HTTP_HOST} !^devcomicframework\.org$
RewriteCond %{HTTP_HOST} !^www.devcomicframework\.org$
RewriteCond %{HTTP_HOST} ^([^.]+)\.devcomicframework\.org$
RewriteRule (.*) /site/%1$1 [PT]
TODO: turn on and off this behaviour in settings, maybe explicitly define
base domain to also make it possible to use dots in the base domain.
"""
orgnode = defaulttags.url(parser, token)
return comic_URLNode(
orgnode.view_name, orgnode.args, orgnode.kwargs, orgnode.asvar
)
| 5,349,170 |
def _update_environ(dest, src):
"""Overwrite ``environ`` with any additions from the prepared environ.
Does not remove any variables from ``environ``.
"""
# updating os.environ can be a memory leak, so we only update
# those values that actually changed.
for key, value in src.items():
if key not in dest or dest[key] != value:
dest[key] = value
| 5,349,171 |
def getAp(ground_truth, predict, fullEval=False):
"""
Calculate AP at IOU=.50:.05:.95, AP at IOU=.50, AP at IOU=.75
:param ground_truth: {img_id1:{{'position': 4x2 array, 'is_matched': 0 or 1}, {...}, ...}, img_id2:{...}, ...}
:param predict: [{'position':4x2 array, 'img_id': image Id, 'confident': confident}, {...}, ...]
:return: AP, AP at IOU=.50, AP at IOU=.75
"""
is_match = {'is_matched': 0}
ap_050_095 = 0.
ap_050 = 0.
ap_075 = 0.
prec_050_095 = 0.
prec_050 = 0.
prec_075 = 0.
recall_050_095 = 0.
recall_050 = 0.
recall_075 = 0.
if fullEval:
for i in np.arange(0.50, 1.0, 0.05):
for key in ground_truth:
for win_idx in range(len(ground_truth[key])):
ground_truth[key][win_idx].update(is_match) # reset 'is_matched' for all windows
ap, recall, precision = evaluateAP(ground_truth, predict, threshold=i)
if math.isclose(round(i, 2), 0.5):
ap_050 = ap
prec_050 = precision
recall_050 = recall
if math.isclose(round(i, 2), 0.75):
ap_075 = ap
prec_075 = precision
recall_075 = recall
ap_050_095 += ap
prec_050_095 += precision
recall_050_095 += recall
logging.info("threshold:%.2f"%i + " precsion:%.2f"%(precision*100) + " recall:%.2f"%(recall*100))
else:
ap_050, recall_050, prec_050 = evaluateAP(ground_truth, predict, threshold=0.5)
ap_050_095 = ap_050_095 / 10
prec_050_095 = prec_050_095 / 10
recall_050_095 = recall_050_095 / 10
return [ap_050_095, ap_050, ap_075], \
[prec_050_095, prec_050, prec_075], \
[recall_050_095, recall_050, recall_075]
| 5,349,172 |
def aumenta_fome(ani):
""" aumenta_fome: animal --> animal
Recebe um animal e devolve o mesmo com o valor da fome incrementado por 1
"""
if obter_freq_alimentacao(ani) == 0:
return ani
else:
ani['a'][0] += 1
return ani
| 5,349,173 |
def match_inputs(
bp_tree,
table,
sample_metadata,
feature_metadata=None,
ignore_missing_samples=False,
filter_missing_features=False
):
"""Matches various input sources.
Also "splits up" the feature metadata, first by calling
taxonomy_utils.split_taxonomy() on it and then by splitting the resulting
DataFrame into two separate DataFrames (one for tips and one for internal
nodes).
Parameters
----------
bp_tree: bp.BP
The tree to be visualized.
table: pd.DataFrame
Representation of the feature table. The index should describe feature
IDs; the columns should describe sample IDs. (It's expected that
feature IDs in the table only describe tips in the tree, not internal
nodes.)
sample_metadata: pd.DataFrame
Sample metadata. The index should describe sample IDs; the columns
should describe different sample metadata fields' names.
feature_metadata: pd.DataFrame or None
Feature metadata. If this is passed, the index should describe feature
IDs and the columns should describe different feature metadata fields'
names. (Feature IDs here can describe tips or internal nodes in the
tree.)
ignore_missing_samples: bool
If True, pads missing samples (i.e. samples in the table but not the
metadata) with placeholder metadata. If False, raises a
DataMatchingError if any such samples exist. (Note that in either case,
samples in the metadata but not in the table are filtered out; and if
no samples are shared between the table and metadata, a
DataMatchingError is raised regardless.) This is analogous to the
ignore_missing_samples flag in Emperor.
filter_missing_features: bool
If True, filters features from the table that aren't present as tips in
the tree. If False, raises a DataMatchingError if any such features
exist. (Note that in either case, features in the tree but not in the
table are preserved.)
Returns
-------
(table, sample_metadata, tip_metadata, int_metadata):
(pd.DataFrame, pd.DataFrame, pd.DataFrame / None, pd.DataFrame / None)
Versions of the input table, sample metadata, and feature metadata
filtered such that:
-The table only contains features also present as tips in the tree.
-The sample metadata only contains samples also present in the
table.
-Samples present in the table but not in the sample metadata will
have all of their sample metadata values set to "This sample has
no metadata". (This will only be done if ignore_missing_samples is
True; otherwise, this situation will trigger an error. See below.)
-If feature metadata was not passed, tip_metadata and int_metadata
will both be None. Otherwise, tip_metadata will contain the
entries of the feature metadata where the feature name was present
as a tip in the tree, and int_metadata will contain the entries
of the feature metadata where the feature name was present as
internal node(s) in the tree.
-Also, for sanity's sake, this will call
taxonomy_utils.split_taxonomy() on the feature metadata before
splitting it up into tip and internal node metadata.
Raises
------
DataMatchingError
If any of the following conditions are met:
1. No features are shared between the tree's tips and table.
2. There are features present in the table but not as tips in the
tree, AND filter_missing_features is False.
3. No samples are shared between the sample metadata and table.
4. There are samples present in the table but not in the sample
metadata, AND ignore_missing_samples is False.
5. The feature metadata was passed, but no features present in it
are also present as tips or internal nodes in the tree.
References
----------
This function was based on match_table_and_data() in Qurro's code:
https://github.com/biocore/qurro/blob/b9613534b2125c2e7ee22e79fdff311812f4fefe/qurro/_df_utils.py#L255
"""
# Match table and tree.
# (Ignore None-named tips in the tree, which will be replaced later on
# with "default" names like "EmpressNode0".)
tip_names = set(bp_tree.bp_tree_tips())
tree_and_table_features = table.index.intersection(tip_names)
if len(tree_and_table_features) == 0:
# Error condition 1
raise DataMatchingError(
"No features in the feature table are present as tips in the tree."
)
ff_table = table.copy()
if len(tree_and_table_features) < len(table.index):
if filter_missing_features:
# Filter table to just features that are also present in the tree.
#
# Note that we *don't* filter the tree analogously, because it's ok
# for the tree's nodes to be a superset of the table's features
# (and this is going to be the case in most datasets where the
# features correspond to tips, since internal nodes aren't
# explicitly described in the feature table).
ff_table = table.loc[tree_and_table_features]
# Report to user about any dropped features from table.
dropped_feature_ct = table.shape[0] - ff_table.shape[0]
warnings.warn(
(
"{} feature(s) in the table were not present as tips in "
"the tree. These feature(s) have been removed from the "
"visualization."
).format(
dropped_feature_ct
),
DataMatchingWarning
)
else:
# Error condition 2
raise DataMatchingError(
"The feature table contains features that aren't present as "
"tips in the tree. You can override this error by using the "
"--p-filter-missing-features flag."
)
# Match table (post-feature-filtering, if done) and sample metadata.
table_samples = set(ff_table.columns)
sm_samples = set(sample_metadata.index)
sm_and_table_samples = sm_samples & table_samples
if len(sm_and_table_samples) == 0:
# Error condition 3
raise DataMatchingError(
"No samples in the feature table are present in the sample "
"metadata."
)
padded_metadata = sample_metadata.copy()
if len(sm_and_table_samples) < len(ff_table.columns):
if ignore_missing_samples:
# Works similarly to how Emperor does this: see
# https://github.com/biocore/emperor/blob/659b62a9f02a6423b6258c814d0e83dbfd05220e/emperor/core.py#L350
samples_without_metadata = table_samples - sm_samples
padded_metadata = pd.DataFrame(
index=samples_without_metadata,
columns=sample_metadata.columns,
dtype=str
)
padded_metadata.fillna("This sample has no metadata", inplace=True)
sample_metadata = pd.concat([sample_metadata, padded_metadata])
# Report to user about samples we needed to "pad."
warnings.warn(
(
"{} sample(s) in the table were not present in the "
"sample metadata. These sample(s) have been assigned "
"placeholder metadata."
).format(
len(samples_without_metadata)
),
DataMatchingWarning
)
else:
# Error condition 4
raise DataMatchingError(
"The feature table contains samples that aren't present in "
"the sample metadata. You can override this error by using "
"the --p-ignore-missing-samples flag."
)
# If we've made it this far, then there must be at least *one* sample
# present in both the sample metadata and the table: and by this point the
# metadata's samples should be a superset of the table's samples (since we
# padded the metadata above if there were any samples that *weren't* in the
# table).
#
# All that's left to do is to filter the sample metadata to just the
# samples that are also present in the table.
sf_sample_metadata = sample_metadata.loc[ff_table.columns]
# If desired, we could report here to the user about any dropped samples
# from the metadata by looking at the difference between
# sample_metadata.shape[0] and sf_sample_metadata.shape[0]. However, the
# presence of such "dropped samples" is a common occurrence in 16S studies,
# so we currently don't do that for the sake of avoiding alarm fatigue.
# If the feature metadata was passed, filter it so that it only contains
# features present as tips / internal nodes in the tree
tip_metadata = None
int_metadata = None
if feature_metadata is not None:
# Split up taxonomy column, if present in the feature metadata
ts_feature_metadata = taxonomy_utils.split_taxonomy(feature_metadata)
fm_ids = ts_feature_metadata.index
# Subset tip metadata
fm_and_tip_features = fm_ids.intersection(tip_names)
tip_metadata = ts_feature_metadata.loc[fm_and_tip_features]
# Subset internal node metadata
internal_node_names = set(bp_tree.bp_tree_non_tips())
fm_and_int_features = fm_ids.intersection(internal_node_names)
int_metadata = ts_feature_metadata.loc[fm_and_int_features]
if len(tip_metadata.index) == 0 and len(int_metadata.index) == 0:
# Error condition 5
raise DataMatchingError(
"No features in the feature metadata are present in the tree, "
"either as tips or as internal nodes."
)
return ff_table, sf_sample_metadata, tip_metadata, int_metadata
| 5,349,174 |
def test_writing_csv_files():
"""Tests writing a csv file.
:raises:
:rtype:
"""
x = datetime.datetime.now()
headers = ["Header1", "Header2", "Header3"]
content = [["Column1", "Column2", "Column3"],
["Row2C1", "Row2C2", "Row2C3"],
["Row3C1", "Row3C2", "Row3C3"]
]
file_name = f"/test_writing_csv_file_{x}.csv"
full_path = f"{path}{file_name}"
writing_files.write_csv_file(full_path, headers, content)
response = reading_files.reading_csv_file(full_path)
combine = list()
combine.append(headers)
for row in content:
combine.append(row)
assert response
assert response == combine
| 5,349,175 |
def im_adjust(img, tol=1, bit=8):
"""
Adjust contrast of the image
"""
limit = np.percentile(img, [tol, 100 - tol])
im_adjusted = im_bit_convert(img, bit=bit, norm=True, limit=limit.tolist())
return im_adjusted
| 5,349,176 |
def FibanocciSphere(samples=1):
""" Return a Fibanocci sphere with N number of points on the surface.
This will act as the template for the nanoparticle core.
Args:
Placeholder
Returns:
Placeholder
Raises:
Placeholder
"""
points = []
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
for i in range(samples):
y = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = math.sqrt(1 - y * y) # radius at y
theta = phi * i # golden angle increment
x = math.cos(theta) * radius
z = math.sin(theta) * radius
points.append((x, y, z))
return points
| 5,349,177 |
def get_code():
"""
returns the code for the activity_selection function
"""
return inspect.getsource(activity_selection)
| 5,349,178 |
def calc_luminosity(flux, fluxerr, mu):
""" Normalise flux light curves with distance modulus.
Parameters
----------
flux : array
List of floating point flux values.
fluxerr : array
List of floating point flux errors.
mu : float
Distance modulus from luminosity distance.
Returns
-------
fluxout : array
Same shape as input flux.
fluxerrout : array
Same shape as input fluxerr.
"""
d = 10 ** (mu/5 + 1)
dsquared = d**2
norm = 1e18
fluxout = flux * (4 * np.pi * dsquared/norm)
fluxerrout = fluxerr * (4 * np.pi * dsquared/norm)
return fluxout, fluxerrout
| 5,349,179 |
def app(testdir):
"""Provide instance for basic Flask app."""
app = flask.Flask(__name__)
app.config['TESTING'] = True
# This config value is required and must be supplied.
app.config['HASHFS_ROOT_FOLDER'] = str(testdir)
with app.app_context():
yield app
| 5,349,180 |
def download_file(url: str, destination: str, timeout: Optional[int] = None,
silent: Optional[bool] = False) -> str:
"""
Downloads file by given URL to destination dir.
"""
file_name = get_file_name_from_url(url)
file_path = join(destination, file_name)
parsed_url: ParseResult = urlparse(url)
with urlopen(url, timeout=timeout) as resp:
code: int = resp.getcode()
if parsed_url.scheme != 'file' and code != 200:
raise IOError(f'Bad HTTP response code: {code}')
total = int(resp.getheader('Content-Length')) if parsed_url.scheme != 'file' \
else os.path.getsize(parsed_url.path)
if not isfile(file_path) or getsize(file_path) != total:
if not silent:
echo(f'Downloading {file_name}')
with open(file_path, 'wb') as file, \
progressbar(length=total,
width=PROGRESS_BAR_WIDTH,
bar_template=PROGRESS_BAR_TEMPLATE) as progress_bar:
while True:
chunk = resp.read(CHUNK_SIZE)
if not chunk:
break
file.write(chunk)
if not silent:
progress_bar.update(len(chunk))
return file_path
| 5,349,181 |
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
###
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
#xavier = tf.contrib.layers.xavier_initializer_conv2d()
with tf.variable_scope('conv1') as scope:
kernel1 = _variable_with_weight_decay('weights',
shape=[3, 3, 3, 128],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(images, kernel1, [1, 2, 2, 1], padding='SAME')
#conv = tf.nn.dropout(conv, 0.9)
biases1 = cifar10._variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases1)
conv1 = tf.nn.relu(pre_activation, name = scope.name)
cifar10._activation_summary(conv1)
norm1 = tf.contrib.layers.batch_norm(conv1, scale=True, is_training=True, updates_collections=None)
# conv2
with tf.variable_scope('conv2') as scope:
kernel2 = _variable_with_weight_decay('weights',
shape=[5, 5, 128, 128],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(norm1, kernel2, [1, 1, 1, 1], padding='SAME')
biases2 = cifar10._variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases2)
conv2 = tf.nn.relu(pre_activation, name = scope.name)
#conv2 = tf.nn.dropout(conv2, 0.9)
cifar10._activation_summary(conv2)
# concat conv2 with norm1 to increase the number of features, this step does not affect the Differential_Privacy preserving guarantee
current = tf.concat((conv2, norm1), axis=3)
# norm2
norm2 = tf.contrib.layers.batch_norm(current, scale=True, is_training=True, updates_collections=None)
# conv3
with tf.variable_scope('conv3') as scope:
kernel3 = _variable_with_weight_decay('weights',
shape=[5, 5, 256, 256],
stddev=5e-2,
wd=None)
conv = tf.nn.conv2d(norm2, kernel3, [1, 1, 1, 1], padding='SAME')
biases3 = cifar10._variable_on_cpu('biases', [256], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases3)
conv3 = tf.nn.relu(pre_activation, name = scope.name)
#conv3 = tf.nn.dropout(conv3, 0.9)
cifar10._activation_summary(conv3)
# norm3
norm3 = tf.contrib.layers.batch_norm(conv3, scale=True, is_training=True, updates_collections=None)
#pool3, row_pooling_sequence, col_pooling_sequence = tf.nn.fractional_max_pool(norm3, pooling_ratio=[1.0, 2.0, 2.0, 1.0])
pool3 = avg_pool(norm3, 2)
# local4
with tf.variable_scope('local4') as scope:
weights1 = cifar10._variable_with_weight_decay('weights', shape=[5 * 5 * 256, hk],
stddev=0.04, wd=None)
biases4 = cifar10._variable_on_cpu('biases', [hk], tf.constant_initializer(0.1))
h_pool2_flat = tf.reshape(pool3, [-1, 5*5*256]);
z2 = tf.add(tf.matmul(h_pool2_flat, weights1), biases4, name=scope.name)
#Applying normalization for the flat connected layer h_fc1#
batch_mean2, batch_var2 = tf.nn.moments(z2,[0])
scale2 = tf.Variable(tf.ones([hk]))
beta2 = tf.Variable(tf.zeros([hk]))
BN_norm = tf.nn.batch_normalization(z2,batch_mean2,batch_var2,beta2,scale2,1e-3)
###
local4 = max_out(BN_norm, hk)
cifar10._activation_summary(local4)
"""print(images.get_shape());
print(norm1.get_shape());
print(norm2.get_shape());
print(pool3.get_shape());
print(local4.get_shape());"""
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
weights2 = cifar10._variable_with_weight_decay('weights', [hk, 10],
stddev=1/(hk*1.0), wd=0.0)
biases5 = cifar10._variable_on_cpu('biases', [10],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights2), biases5, name=scope.name)
cifar10._activation_summary(softmax_linear)
return softmax_linear
| 5,349,182 |
def euler237_():
"""Solution for problem 237."""
pass
| 5,349,183 |
def cluster_molecules(mols, cutoff=0.6):
"""
Cluster molecules by fingerprint distance using the Butina algorithm.
Parameters
----------
mols : list of rdkit.Chem.rdchem.Mol
List of molecules.
cutoff : float
Distance cutoff Butina clustering.
Returns
-------
pandas.DataFrame
Table with cluster ID - molecule ID pairs.
"""
# Generate fingerprints
fingerprints = _generate_fingerprints(mols)
# Calculate Tanimoto distance matrix
distance_matrix = _get_tanimoto_distance_matrix(fingerprints)
# Now cluster the data with the implemented Butina algorithm
clusters = Butina.ClusterData(distance_matrix, len(fingerprints), cutoff, isDistData=True)
# Sort clusters by size
clusters = sorted(clusters, key=len, reverse=True)
# Get cluster ID - molecule ID pairs
clustered_molecules = []
for cluster_id, molecule_ids in enumerate(clusters, start=1):
for cluster_member_id, molecule_id in enumerate(molecule_ids, start=1):
clustered_molecules.append([cluster_id, cluster_member_id, molecule_id])
clustered_molecules = pd.DataFrame(
clustered_molecules, columns=["cluster_id", "cluster_member_id", "molecule_id"]
)
# Print details on clustering
print("Number of molecules:", len(fingerprints))
print("Threshold: ", cutoff)
print("Number of clusters: ", len(clusters))
print(
"# Clusters with only 1 molecule: ",
len([cluster for cluster in clusters if len(cluster) == 1]),
)
print(
"# Clusters with more than 5 molecules: ",
len([cluster for cluster in clusters if len(cluster) > 5]),
)
print(
"# Clusters with more than 25 molecules: ",
len([cluster for cluster in clusters if len(cluster) > 25]),
)
print(
"# Clusters with more than 100 molecules: ",
len([cluster for cluster in clusters if len(cluster) > 100]),
)
return clustered_molecules
| 5,349,184 |
def exec_benchmarks_empty_inspection(code_to_benchmark, repeats):
"""
Benchmark some code without mlinspect and with mlinspect with varying numbers of inspections
"""
benchmark_results = {
"no mlinspect": timeit.repeat(stmt=code_to_benchmark.benchmark_exec, setup=code_to_benchmark.benchmark_setup,
repeat=repeats, number=1),
"no inspection": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str, "[]",
repeats),
"one inspection": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str,
"[EmptyInspection(0)]", repeats),
"two inspections": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str,
"[EmptyInspection(0), EmptyInspection(1)]", repeats),
"three inspections": benchmark_code_str_with_inspections(code_to_benchmark.benchmark_exec_func_str,
code_to_benchmark.benchmark_setup_func_str,
"[EmptyInspection(0), " +
"EmptyInspection(1), EmptyInspection(2)]", repeats)}
return benchmark_results
| 5,349,185 |
def detect_version():
"""
Try to detect the main package/module version by looking at:
module.__version__
otherwise, return 'dev'
"""
try:
m = __import__(package_name, fromlist=['__version__'])
return getattr(m, '__version__', 'dev')
except ImportError:
pass
return 'dev'
| 5,349,186 |
def model(p, x):
""" Evaluate the model given an X array """
return p[0] + p[1]*x + p[2]*x**2. + p[3]*x**3.
| 5,349,187 |
def normalize(x:"tensor|np.ndarray") -> "tensor|np.ndarray":
"""Min-max normalization (0-1):
:param x:"tensor|np.ndarray":
:returns: Union[Tensor,np.ndarray] - Return same type as input but scaled between 0 - 1
"""
return (x - x.min())/(x.max()-x.min())
| 5,349,188 |
def test_valid_amendment_adddebtors():
"""Assert that the schema is performing as expected for a amendment to add debtors."""
statement = copy.deepcopy(AMENDMENT_STATEMENT)
del statement['baseDebtor']
del statement['removeTrustIndenture']
del statement['addTrustIndenture']
del statement['addSecuredParties']
del statement['deleteSecuredParties']
del statement['deleteDebtors']
del statement['deleteVehicleCollateral']
del statement['addVehicleCollateral']
del statement['deleteGeneralCollateral']
del statement['addGeneralCollateral']
is_valid, errors = validate(statement, 'amendmentStatement', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
| 5,349,189 |
def apply_on_multi_fasta(file, function, *args):
"""Apply a function on each sequence in a multiple FASTA file (DEPRECATED).
file - filename of a FASTA format file
function - the function you wish to invoke on each record
*args - any extra arguments you want passed to the function
This function will iterate over each record in a FASTA file as SeqRecord
objects, calling your function with the record (and supplied args) as
arguments.
This function returns a list. For those records where your function
returns a value, this is taken as a sequence and used to construct a
FASTA format string. If your function never has a return value, this
means apply_on_multi_fasta will return an empty list.
"""
import warnings
import Bio
warnings.warn("apply_on_multi_fasta is deprecated", Bio.BiopythonDeprecationWarning)
try:
f = globals()[function]
except:
raise NotImplementedError("%s not implemented" % function)
handle = open(file, 'r')
records = SeqIO.parse(handle, "fasta")
results = []
for record in records:
arguments = [record.sequence]
for arg in args: arguments.append(arg)
result = f(*arguments)
if result:
results.append('>%s\n%s' % (record.name, result))
handle.close()
return results
| 5,349,190 |
def update_contracts_esi(force_sync=False, user_pk=None) -> None:
"""start syncing contracts"""
_get_contract_handler().update_contracts_esi(force_sync, user=_get_user(user_pk))
| 5,349,191 |
def resize_bbox(box, image_size, resize_size):
"""
Args:
box: iterable (ints) of length 4 (x0, y0, x1, y1)
image_size: iterable (ints) of length 2 (width, height)
resize_size: iterable (ints) of length 2 (width, height)
Returns:
new_box: iterable (ints) of length 4 (x0, y0, x1, y1)
"""
check_box_convention(np.array(box), 'x0y0x1y1')
box_x0, box_y0, box_x1, box_y1 = map(float, box)
image_w, image_h = map(float, image_size)
new_image_w, new_image_h = map(float, resize_size)
newbox_x0 = box_x0 * new_image_w / image_w
newbox_y0 = box_y0 * new_image_h / image_h
newbox_x1 = box_x1 * new_image_w / image_w
newbox_y1 = box_y1 * new_image_h / image_h
return int(newbox_x0), int(newbox_y0), int(newbox_x1), int(newbox_y1)
| 5,349,192 |
def plot_emoji_heatmap(df, size=(20, 5), agg='from', axs=None):
"""
Plot an emoji heatmap according to the specified column passed as agg parameter
Eg. if agg='From' this plots a heatmap according to the smileys/emojis used by a person
if agg= df.time.dt.hour will give a heatmap of emojis used at some time of the hour
:param axs:
:type axs:
:param df:
:type df:
:param size:
:type size:
:param agg:
:type agg:
:return:
:rtype:
"""
df_smiley = df.groupby(agg)['emojis'].agg(['count', __custom_smiley_aggregator])
ls_smiley = []
for x in df_smiley.itertuples():
for smiley, count in x._2:
ls_smiley.append((x.Index, smiley, count))
df_smiley_reduced = pd.DataFrame(ls_smiley, columns=["agg", "smiley", "count"])
df_smiley_reduced = df_smiley_reduced.pivot_table('count', ['agg'], 'smiley').fillna(0)
sns.set(rc={'figure.figsize': size})
sns.heatmap(df_smiley_reduced.transpose(), cmap="Blues", ax=axs)
| 5,349,193 |
def spin_polarize(inp, mpol=1):
"""
Add a collinear spin polarization to the system.
Arguments:
mpol (int): spin polarization in Bohr magneton units.
"""
__set__(inp, 'dft', 'nspin', 2)
__set__(inp, 'dft', 'mpol', mpol)
| 5,349,194 |
def perf_counter_ms():
"""Returns a millisecond performance counter"""
return time.perf_counter() * 1_000
| 5,349,195 |
def print_feed(items_objects):
"""Printing the results from all urls"""
print("---------------------------")
print("Number of RSS posts: ", len(items_objects))
print("---------------------------")
for item_object in items_objects:
# Used html.unescape for the conversion of named and numeric character references in the rss feed response to
# the corresponding Unicode characters
print("Title: " + html.unescape(item_object.title))
print("Description: " + html.unescape(item_object.description))
print("Link: " + html.unescape(item_object.link))
print("Publish Date: " + item_object.pub_date)
print("********************* \n")
| 5,349,196 |
def make_shutdown_packet( ):
"""Create a shutdown packet."""
packet = struct.pack( "<B", OP_SHUTDOWN );
return packet;
| 5,349,197 |
def unique():
"""Return unique identification number."""
global uniqueLock
global counter
with uniqueLock:
counter = counter + 1
return counter
| 5,349,198 |
def shortcut_download(dataset, compression_type='tar.gz'):
"""Download and unpack pre-processed dataset"""
if compression_type not in ['tar.gz', 'zip']:
print('Warning! Wrong compression format. Changing to tar.gz')
compression_type = 'tar.gz'
if dataset == 'reddit_casual' and compression_type == 'zip':
print('Warning! Zip format is not supported for reddit casual dataset due to file size. Changing to tar.gz')
compression_type = 'tar.gz'
if not os.path.exists(datasets_dir):
os.makedirs(datasets_dir)
compressed_url = f'https://affect.media.mit.edu/neural_chat/datasets/{dataset}_preprocessed.{compression_type}'
compressed_file_dir = datasets_dir.joinpath(dataset)
compressed_file_path = datasets_dir.joinpath(f'{dataset}_preprocessed.{compression_type}')
# Prepare Dialog data
if not os.path.exists(compressed_file_dir):
print(f'Downloading {compressed_url} to {compressed_file_path}')
urlretrieve(compressed_url, compressed_file_path)
print(f'Successfully downloaded {compressed_file_path}')
if compression_type == 'tar.gz':
tar_ref = tarfile.open(compressed_file_path, 'r:gz')
for member in tar_ref.getmembers():
try:
tar_ref.extract(member, path=datasets_dir)
print(f'Extracting {member.name}: OK')
except Exception as e:
print(f'Extracting {member.name}: ERROR - {e}')
tar_ref.close()
elif compression_type == 'zip':
zip_ref = ZipFile(compressed_file_path, mode='r')
for member in zip_ref.infolist():
try:
zip_ref.extract(member, path=datasets_dir)
print(f'Extracting {member}: OK')
except Exception as e:
print(f'Extracting {member}: ERROR - {e}')
zip_ref.close()
print(f'Successfully extracted {compressed_file_path}')
else:
print('Directory already exists. Aborting download.')
| 5,349,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.