content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
ACTION_CLEAN = 'clean'
ACTION_CREATE_USERDEF = 'createuserdef'
ACTION_PREPARE = 'prepare'
ACTION_BUILD = 'build'
ACTION_BACKUP = 'backup'
ACTION_CREATE_NUGET = 'createnuget'
ACTION_PUBLISH_NUGET = 'publishnuget'
ACTION_UPDATE_SAMPLE = 'updatesample'
ACTION_RELEASE_NOTES = 'releasenotes'
ACTION_UPLOAD_BACKUP = 'uploadbackup'
ACTION_RUN_UNITTESTS = 'rununittests'
MAX_SDK_ROOT_PATH_LENGTH = 64
| python |
# AI_Javaher
# this is the first session of GDAL/OGR tutorial
# install GDAL video : https://www.youtube.com/watch?v=YsdHWT-hA4k&list=PLFhf3UaNX_xc8ivjt773rAjGNoAfz_ELm&index=2
# check the video of this code in youtube :https://www.youtube.com/watch?v=F1jaX9vmhIk
# you can find the list of videos about GDAL tutorial in link : https://www.youtube.com/playlist?list=PLFhf3UaNX_xc8ivjt773rAjGNoAfz_ELm
# you can find more videos about artificial intelligence in : https://www.youtube.com/channel/UCxKMssgH5eai60XeIuvg-dg
########################## GDAL_Read vector ##########################
from osgeo import ogr
mnh_shp = ogr.Open('D:\\youtube\\GDAL\\GDAL_introduction\\data\\manhattan\\manhattan_zone.shp',0)
mnh_lyr = mnh_shp.GetLayer(0)
mnh_feature_num = mnh_lyr.GetFeatureCount()
# print(mnh_feature_num)
mnh_feature = mnh_lyr.GetFeature(0)
# print(mnh_feature.zone)
# print(mnh_feature.LocationID)
mnh_feature_last = mnh_lyr.GetFeature(mnh_feature_num-1)
# print(mnh_feature_last.LocationID,'last')
for f in mnh_lyr:
# print(f.zone)
geo = f.geometry()
print(geo)
| python |
# Builtin
import os
import unittest
# Internal
from nxt import stage, nxt_layer
class TestReferences(unittest.TestCase):
def test_reference_by_path(self):
test_dir = os.path.dirname(__file__)
empty_path = os.path.join(test_dir, 'empty.nxt')
pre_test = stage.Stage.load_from_filepath(empty_path).build_stage()
# assert that empty is empty
self.assertEqual(0, len(pre_test.descendants()))
# Test adding reference
empty_spec_layer = nxt_layer.SpecLayer.load_from_filepath(empty_path)
empty_spec_layer.add_reference('ref_test.nxt')
temporary_graph_path = os.path.join(test_dir, 'IWILLBEDELTED.nxt')
empty_spec_layer.save(temporary_graph_path)
# Rebuild stage and verify
stage_with_ref = stage.Stage.load_from_filepath(temporary_graph_path)
comp_layer_with_ref = stage_with_ref.build_stage()
# Remove before asserting, to clean up even on failure.
os.remove(temporary_graph_path)
self.assertIsNotNone(comp_layer_with_ref.lookup('/i_am_here'))
def test_reference_by_obj(self):
test_dir = os.path.dirname(__file__)
empty_path = os.path.join(test_dir, 'empty.nxt')
pre_test = stage.Stage.load_from_filepath(empty_path).build_stage()
# assert that empty is empty
self.assertEqual(0, len(pre_test.descendants()))
# Test adding reference
empty_spec_layer = nxt_layer.SpecLayer.load_from_filepath(empty_path)
ref_path = os.path.join(test_dir, 'ref_test.nxt')
ref_test_spec_layer = nxt_layer.SpecLayer.load_from_filepath(ref_path)
empty_spec_layer.add_reference(layer=ref_test_spec_layer)
temporary_graph_path = os.path.join(test_dir, 'IWILLBEDELTED.nxt')
empty_spec_layer.save(temporary_graph_path)
# Rebuild stage and verify
stage_with_ref = stage.Stage.load_from_filepath(temporary_graph_path)
comp_layer_with_ref = stage_with_ref.build_stage()
# Remove before asserting, to clean up even on failure.
os.remove(temporary_graph_path)
self.assertIsNotNone(comp_layer_with_ref.lookup('/i_am_here'))
| python |
from Utilities import *
def convert_type(in_type: str) -> str:
if in_type == 'bit':
return 'boolean'
if in_type == 'datetime':
return 'Date'
if in_type == 'mediumtext':
return 'String'
if in_type == 'nonnegativeinteger':
return 'int'
if in_type == 'phone':
return 'String'
if in_type == 'shorttext':
return 'String'
if in_type == 'spatialpoint':
return 'String'
if in_type == 'varchar':
return 'String'
if in_type == 'year':
return 'int'
return in_type
class WriteGosu:
def write(self):
print('Writing Gosu Classes')
for structure in self.plant_structures:
self.package_path = maybe_create_package(self.json_config['target_directory'], structure.package)
self.create_class(structure)
def create_class(self, in_structure: PlantContent):
class_file_name = self.package_path
class_file_name = class_file_name + '/' + in_structure.name + '.gs'
file = open(class_file_name, 'w')
file.write(f'package {in_structure.package}\n')
file.write('\n')
self.create_uses(file, in_structure)
create_wording(file, '/**', '/*', ' * ')
class_type = in_structure.type
if class_type == 'abstract':
file.write('abstract class ' + in_structure.name)
elif class_type == 'interface':
file.write('interface ' + in_structure.name)
else:
file.write('class ' + in_structure.name)
if len(in_structure.extensions) > 0:
file.write(' extends ')
for idx, extends_name in enumerate(in_structure.extensions):
file.write(extends_name)
if idx + 1 < len(in_structure.extensions):
file.write(', ')
if len(in_structure.implements) > 0:
file.write(' implements ')
for idx, implements_name in enumerate(in_structure.implements):
file.write(implements_name)
if idx + 1 < len(in_structure.implements):
file.write(', ')
file.write(' { \n\n')
if len(in_structure.variables) > 0:
self.create_variables(file, in_structure)
file.write('\n')
if not class_type == "interface":
file.write(' construct() {\n')
file.write(' }\n')
file.write('\n')
if len(in_structure.methods) > 0:
self.create_methods(file, in_structure)
file.write('\n')
file.write('}')
file.close()
def create_uses(self, file, in_structure: PlantContent):
"""
Create the uses statements, while there are some that have been created during the processing
of the puml some additional ones are needed for some of the data types, these are added here.
"""
for var in in_structure.variables:
if var.type == 'BigDecimal':
in_structure.add_implement('java.math,BigDecimal')
if var.type == 'Date':
in_structure.add_implement('java.util.Date')
if len(in_structure.imports) == 0:
return
for uses in in_structure.imports:
file.write(f'uses {uses}\n')
file.write('\n')
return self
def create_variables(self, file, in_structure: PlantContent):
for variable in in_structure.variables:
var_name = '_' + variable.name[0].lower() + variable.name[1:]
var_as = variable.name[0].upper() + variable.name[1:]
var_type = convert_type(variable.type)
if variable.scope == 'protected':
file.write(f' protected var {var_name} : {var_type} as {var_as}\n')
if variable.scope == 'private':
file.write(f' var {var_name} : {var_type}\n')
if variable.scope == 'public':
file.write(f' var {var_name} : {var_type} as {var_as}\n')
return self
def create_methods(self, file, in_structure: PlantContent):
for method in in_structure.methods:
method_name = method.name
method_return_type = convert_type(method.return_type)
method_scope = method.scope
file.write(f' {method_scope} function ' + method_name + ' (')
for idx, param in enumerate(method.parameters):
param_type = method.parameters[param]
file.write(f'{param} : {param_type}')
if idx + 1 < len(method.parameters):
file.write(', ')
file.write(') ')
if not method_return_type == '':
file.write(': ' + method_return_type + ' ')
file.write('{\n')
if not method_return_type == '':
file.write(' return null\n')
file.write(' }\n\n')
for composition in in_structure.compositions:
method_name = 'addTo' + composition.alternate[0].upper() + composition.alternate[1:]
file.write(' public function ' + method_name + ' (')
file.write('inItem : ' + composition.type + ') {\n')
file.write(' }\n')
file.write('\n')
method_name = 'removeFrom' + composition.alternate[0].upper() + composition.alternate[1:]
file.write(' public function ' + method_name + ' (')
file.write('inItem : ' + composition.type + ') {\n')
file.write(' }\n')
return self
def create_composition(self, file, in_structure: PlantContent):
pass
def __init__(self, in_json_config, in_plant_structures: list[PlantContent]):
self.json_config = in_json_config
self.plant_structures = in_plant_structures
self.package_path = ''
| python |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<pk>[0-9]+)$', views.DocumentDetailView.as_view(), name='document_detail'),
url(r'^create/$', views.DocumentCreate.as_view(), name='document_create'),
url(r'^update/(?P<pk>[0-9]+)$', views.DocumentUpdate.as_view(), name='document_update'),
url(r'^delete/(?P<pk>[0-9]+)$', views.DocumentDelete.as_view(), name='document_delete'),
]
| python |
from django.db import models
class StatisticsMemory(models.Model):
value = models.FloatField()
| python |
""" AUTHTAB.DIR file parser. """
from pybycus.file import File
class AuthTab(File):
""" The Author List (with the filename AUTHTAB.DIR) contains
descriptive information for each text file on the disc. The
purpose of the Author Table is to allow the user to ask for
the author Plato, for example, without having to know that
the actual file name is TLG0059. Each entry contains the
author name, the corresponding file name, synonyms, remarks,
and language. The entries are arranged by category. """
def __init__(self, path):
super().__init__(path)
while True:
# An (optional) synonym for the author name is introduced by a
# byte of hex 80 and is terminated by the first byte value above
# hex 7f. Up to five synonyms are allowed for each author name.
# pylint: disable=E0601
if self.peek_ubyte() == 0x80:
_ = self.read_ubyte()
synonym = self.read_string()
entry["aliases"].append(synonym)
assert len(entry["aliases"]) <= 5
# The (optional) remarks field is introduced by a byte of hex 81
# and is terminated by the first byte value above hex 7f.
elif self.peek_ubyte() == 0x81:
assert False
# The optional file size field is introduced by a byte of hex 82
# and is terminated by the first byte value above hex 7f.
elif self.peek_ubyte() == 0x82:
assert False
# The optional language code field is introduced by a byte of hex 83
# and is terminated by the first byte value above hex 7f.
elif self.peek_ubyte() == 0x83:
_ = self.read_ubyte()
language_code = self.read_string()
entry["language_code"] = language_code
# The entry is terminated by at least one hex ff (decimal 255). A
# second ff is used when needed to pad the entry to an even byte
# boundary.
elif self.peek_ubyte() == 0xff:
_ = self.read_ubyte()
# Each entry begins with a file name (without any file name
# extension) on an even byte boundary. The name is padded with
# blanks if necessary to reach the fixed length of 8 bytes.
else:
# If the file name starts with an asterisk, it is a library
# name (four characters including the asterisk). In this case
# the second four bytes are the binary length of the library
# (including the 8 bytes for the asterisk, name and length).
if chr(self.peek_ubyte()) == '*':
name = self.read_nstring(4)
# If the file name starts *END it marks the end of the
# list. The second four bytes are binary zeroes.
if name == "*END":
padding = self.read_uint()
assert len(name) == 4 and padding == 0x0000
break
listlen = self.read_uint()
title = self.read_string()
library = {"name": name, "title": title, "entries": []}
self._content.append(library)
# The full author name (of any reasonable length) starts after
# the filename and is terminated by the first byte value above
# 7f (decimal 127).
else:
filename = self.read_string()
entry = {"id": filename[:7],
"name": filename[8:],
"aliases": []}
library["entries"].append(entry)
def content(path):
""" Return the content of an AUTHTAB.DIR file. """
return AuthTab(path).content()
if __name__ == "__main__":
import sys
import pprint
pprint.pprint(content(sys.argv[1]))
| python |
"""
Get an admin token for KeyCloak.
"""
import logging
from functools import partial
import requests
from rest_tools.server import from_environment
from rest_tools.client import RestClient
def get_token(url, client_id, client_secret, client_realm='master'):
url = f'{url}/auth/realms/{client_realm}/protocol/openid-connect/token'
args = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
}
logging.debug(f'get_token() url: {url} client_id: {client_id}')
r = requests.post(url, data=args)
r.raise_for_status()
req = r.json()
return req['access_token']
def get_rest_client(retries=None, timeout=10):
config = from_environment({
'KEYCLOAK_REALM': None,
'KEYCLOAK_URL': None,
'KEYCLOAK_CLIENT_ID': 'rest-access',
'KEYCLOAK_CLIENT_SECRET': None,
'KEYCLOAK_CLIENT_REALM': 'master',
})
token_func = partial(get_token, config["KEYCLOAK_URL"],
client_id=config['KEYCLOAK_CLIENT_ID'],
client_secret=config['KEYCLOAK_CLIENT_SECRET'],
client_realm=config['KEYCLOAK_CLIENT_REALM'],
)
kwargs = {'timeout': timeout}
if retries:
kwargs['retries'] = retries
return RestClient(
f'{config["KEYCLOAK_URL"]}/auth/admin/realms/{config["KEYCLOAK_REALM"]}',
token=token_func,
**kwargs
)
def main():
import argparse
from pprint import pprint
parser = argparse.ArgumentParser(description='Keycloak tokens')
subparsers = parser.add_subparsers()
parser_get = subparsers.add_parser('get', help='get token')
parser_get.add_argument('url', help='keycloak base url')
parser_get.add_argument('client_id', help='keycloak client id')
parser_get.add_argument('client_secret', help='keycloak client secret')
parser_get.set_defaults(func=get_token)
args = vars(parser.parse_args())
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
func = args.pop('func')
ret = func(**args)
if ret is not None:
pprint(ret)
if __name__ == '__main__':
main()
| python |
import scrapy
import codecs
import re
import json
from ..items import WebcrawlerItem
def unmangle_utf8(match):
escaped = match.group(0) # '\\u00e2\\u0082\\u00ac'
hexstr = escaped.replace(r'\u00', '') # 'e282ac'
buffer = codecs.decode(hexstr, "hex") # b'\xe2\x82\xac'
try:
return buffer.decode('utf8') # '€'
except UnicodeDecodeError:
print("Could not decode buffer: %s" % buffer)
class Spider(scrapy.Spider):
name = "alzaSpidey"
start_urls = [
'https://www.alza.cz/akcni-zbozi'
]
def parse(self, response):
items = WebcrawlerItem()
all_boxes = response.css('div.browsingitem')
for box in all_boxes:
item = box.css('a.name::text').extract()
price = box.css('span.c2::text').extract()
priceBefore = box.css('span.np2::text').extract()
discount = box.css('span.np::text').extract()
items['item'] = item
items['price'] = price
items['priceBefore'] = priceBefore
items['discount'] = discount
yield items
next_page = response.css('a.next::attr(href)').get()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse) | python |
raise ValueError('character must be a single string')
raise ValueError('width must be greater than 2')
try
....
except ValueError as err:
print(str(err))
# if we want to log errors that are not crashers:
import traceback
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
except:
errorFile = open('errorInfo.txt', 'a')
errorFile.write(now)
errorFile.write(traceback.format_exc())
errorFile.close()
print("The traceback info was written to errorInfo.txt")
| python |
## predict iris dataset
## imports
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import neptune
import os
from dotenv import load_dotenv
load_dotenv()
## setup neptune account
NEPTUNE_API_KEY=os.getenv('NEPTUNE_API_TOKEN')
neptune.init(project_qualified_name='rishushrivastava/sandbox', api_token=NEPTUNE_API_KEY)
## create an neptune experiment
neptune.create_experiment()
## load the data set
iris = datasets.load_iris()
## pre-processing and train/test split
X = iris.data[:]
y = iris.target[:]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
## train and fit the model - KNN
knn_clf = KNeighborsClassifier(n_neighbors=8)
knn_clf.fit(X_train,y_train)
neptune.log_metric('Training Score :',knn_clf.score(X_test,y_test)*100)
## stop the execution
neptune.stop()
| python |
# Generated by Django 3.2.12 on 2022-04-13 19:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crypto', '0004_alert_user'),
]
operations = [
migrations.RenameField(
model_name='asset',
old_name='daily_diff',
new_name='convertEUR',
),
migrations.RemoveField(
model_name='alert',
name='asset_name',
),
migrations.RemoveField(
model_name='alert',
name='percent',
),
migrations.RemoveField(
model_name='alert',
name='user',
),
migrations.AddField(
model_name='alert',
name='alert_value',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='alert',
name='currency',
field=models.CharField(choices=[('EUR', 'EUR'), ('USD', 'USD'), ('PLN', 'PLN')], default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='alert',
name='email',
field=models.EmailField(default='', max_length=254),
preserve_default=False,
),
migrations.AddField(
model_name='alert',
name='idA',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='crypto.asset'),
preserve_default=False,
),
migrations.AddField(
model_name='asset',
name='convertPLN',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='asset',
name='convertUSD',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='asset',
name='guidA',
field=models.CharField(default='', max_length=40),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='asset',
unique_together=set(),
),
migrations.DeleteModel(
name='User',
),
migrations.RemoveField(
model_name='asset',
name='currency',
),
]
| python |
# coding: utf-8
"""
ThingsBoard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeviceProfileAlarm(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alarm_type': 'str',
'clear_rule': 'AlarmRule',
'create_rules': 'dict(str, AlarmRule)',
'id': 'str',
'propagate': 'bool',
'propagate_relation_types': 'list[str]'
}
attribute_map = {
'alarm_type': 'alarmType',
'clear_rule': 'clearRule',
'create_rules': 'createRules',
'id': 'id',
'propagate': 'propagate',
'propagate_relation_types': 'propagateRelationTypes'
}
def __init__(self, alarm_type=None, clear_rule=None, create_rules=None, id=None, propagate=None, propagate_relation_types=None): # noqa: E501
"""DeviceProfileAlarm - a model defined in Swagger""" # noqa: E501
self._alarm_type = None
self._clear_rule = None
self._create_rules = None
self._id = None
self._propagate = None
self._propagate_relation_types = None
self.discriminator = None
if alarm_type is not None:
self.alarm_type = alarm_type
if clear_rule is not None:
self.clear_rule = clear_rule
if create_rules is not None:
self.create_rules = create_rules
if id is not None:
self.id = id
if propagate is not None:
self.propagate = propagate
if propagate_relation_types is not None:
self.propagate_relation_types = propagate_relation_types
@property
def alarm_type(self):
"""Gets the alarm_type of this DeviceProfileAlarm. # noqa: E501
:return: The alarm_type of this DeviceProfileAlarm. # noqa: E501
:rtype: str
"""
return self._alarm_type
@alarm_type.setter
def alarm_type(self, alarm_type):
"""Sets the alarm_type of this DeviceProfileAlarm.
:param alarm_type: The alarm_type of this DeviceProfileAlarm. # noqa: E501
:type: str
"""
self._alarm_type = alarm_type
@property
def clear_rule(self):
"""Gets the clear_rule of this DeviceProfileAlarm. # noqa: E501
:return: The clear_rule of this DeviceProfileAlarm. # noqa: E501
:rtype: AlarmRule
"""
return self._clear_rule
@clear_rule.setter
def clear_rule(self, clear_rule):
"""Sets the clear_rule of this DeviceProfileAlarm.
:param clear_rule: The clear_rule of this DeviceProfileAlarm. # noqa: E501
:type: AlarmRule
"""
self._clear_rule = clear_rule
@property
def create_rules(self):
"""Gets the create_rules of this DeviceProfileAlarm. # noqa: E501
:return: The create_rules of this DeviceProfileAlarm. # noqa: E501
:rtype: dict(str, AlarmRule)
"""
return self._create_rules
@create_rules.setter
def create_rules(self, create_rules):
"""Sets the create_rules of this DeviceProfileAlarm.
:param create_rules: The create_rules of this DeviceProfileAlarm. # noqa: E501
:type: dict(str, AlarmRule)
"""
self._create_rules = create_rules
@property
def id(self):
"""Gets the id of this DeviceProfileAlarm. # noqa: E501
:return: The id of this DeviceProfileAlarm. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DeviceProfileAlarm.
:param id: The id of this DeviceProfileAlarm. # noqa: E501
:type: str
"""
self._id = id
@property
def propagate(self):
"""Gets the propagate of this DeviceProfileAlarm. # noqa: E501
:return: The propagate of this DeviceProfileAlarm. # noqa: E501
:rtype: bool
"""
return self._propagate
@propagate.setter
def propagate(self, propagate):
"""Sets the propagate of this DeviceProfileAlarm.
:param propagate: The propagate of this DeviceProfileAlarm. # noqa: E501
:type: bool
"""
self._propagate = propagate
@property
def propagate_relation_types(self):
"""Gets the propagate_relation_types of this DeviceProfileAlarm. # noqa: E501
:return: The propagate_relation_types of this DeviceProfileAlarm. # noqa: E501
:rtype: list[str]
"""
return self._propagate_relation_types
@propagate_relation_types.setter
def propagate_relation_types(self, propagate_relation_types):
"""Sets the propagate_relation_types of this DeviceProfileAlarm.
:param propagate_relation_types: The propagate_relation_types of this DeviceProfileAlarm. # noqa: E501
:type: list[str]
"""
self._propagate_relation_types = propagate_relation_types
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeviceProfileAlarm, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeviceProfileAlarm):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| python |
import graphene
import pytest
from ....tests.utils import get_graphql_content
QUERY_GIFT_CARDS = """
query giftCards($filter: GiftCardFilterInput){
giftCards(first: 10, filter: $filter) {
edges {
node {
id
displayCode
}
}
totalCount
}
}
"""
def test_query_gift_cards(
staff_api_client, gift_card, gift_card_created_by_staff, permission_manage_gift_card
):
# given
query = QUERY_GIFT_CARDS
gift_card_id = graphene.Node.to_global_id("GiftCard", gift_card.pk)
gift_card_created_by_staff_id = graphene.Node.to_global_id(
"GiftCard", gift_card_created_by_staff.pk
)
# when
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_gift_card]
)
# then
content = get_graphql_content(response)
data = content["data"]["giftCards"]["edges"]
assert len(data) == 2
assert data[0]["node"]["id"] == gift_card_created_by_staff_id
assert data[0]["node"]["displayCode"] == gift_card_created_by_staff.display_code
assert data[1]["node"]["id"] == gift_card_id
assert data[1]["node"]["displayCode"] == gift_card.display_code
@pytest.mark.parametrize(
"filter_value, expected_gift_card_indexes",
[
("test-tag", [0]),
("another-tag", [1, 2]),
("tag", [0, 1, 2, 3]),
("not existing", []),
],
)
def test_query_filter_gift_cards(
filter_value,
expected_gift_card_indexes,
staff_api_client,
gift_card,
gift_card_expiry_period,
gift_card_expiry_date,
gift_card_used,
permission_manage_gift_card,
):
# given
query = QUERY_GIFT_CARDS
gift_cards = [
gift_card,
gift_card_expiry_period,
gift_card_expiry_date,
gift_card_used,
]
variables = {"filter": {"tag": filter_value}}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_gift_card]
)
# then
content = get_graphql_content(response)
data = content["data"]["giftCards"]["edges"]
assert len(data) == len(expected_gift_card_indexes)
assert {card["node"]["id"] for card in data} == {
graphene.Node.to_global_id("GiftCard", gift_cards[i].pk)
for i in expected_gift_card_indexes
}
def test_query_own_gift_cards(
user_api_client, gift_card_used, gift_card_created_by_staff
):
query = """
query giftCards{
me {
giftCards(first: 10) {
edges {
node {
id
displayCode
code
}
}
totalCount
}
}
}
"""
gift_card_id = graphene.Node.to_global_id("GiftCard", gift_card_used.pk)
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
data = content["data"]["me"]["giftCards"]
assert data["edges"][0]["node"]["id"] == gift_card_id
assert data["edges"][0]["node"]["displayCode"] == gift_card_used.display_code
assert data["edges"][0]["node"]["code"] == gift_card_used.code
assert data["totalCount"] == 1
| python |
import sqlalchemy as sa
import aiopg.sa
meta = sa.MetaData()
question = sa.Table(
'question', meta,
sa.Column('id', sa.Integer, nullable=False),
sa.Column('question_text', sa.String(200), nullable=False),
sa.Column('pub_date', sa.Date, nullable=False),
# Indexes
sa.PrimaryKeyConstraint('id', name='question_id_pkey')
)
choice = sa.Table(
'choice', meta,
sa.Column('id', sa.Integer, nullable=False),
sa.Column('question_id', sa.Integer, nullable=False),
sa.Column('choice_text', sa.Integer, nullable=False),
sa.Column('votes', sa.Integer, server_default="0", nullable=False),
# Indexes
sa.PrimaryKeyConstraint('id', name='choice_id_pkey'),
sa.ForeignKeyConstraint(['question_id'], [question.c.id], name='choice_question_id_fkey', ondelete='CASACADE')
)
async def init_pg(app):
conf = app['config']['postgres']
engine = await aiopg.sa.create_engine(
database = conf['database'],
user=conf['user'],
password=conf['password'],
host=conf['host'],
port=conf['port'],
minsize=conf['minsize'],
maxsize=conf['maxsize']
)
app['db'] = engine
async def close_pg(app):
app['db'].close()
await app['db'].wait_closed()
| python |
import json
import argparse
def main():
parser = argparse.ArgumentParser(description='Conversion IO')
parser.add_argument("--input_file", dest="input_file", type=argparse.FileType('r', encoding='UTF-8'), required=True)
parser.add_argument("--output_file", dest="output_file", type=argparse.FileType('w', encoding='UTF-8'))
parser.add_argument("--output_type", dest="output_type", type=str, choices=["txt", "json"], default="json")
args = parser.parse_args()
dictionary = json.load(args.input_file)
word_list = []
for entry in dictionary:
word_list.append(entry["word"])
word_list = list(set(word_list))
word_list.sort()
output_string = ""
if args.output_type == "txt":
output_string = "\n".join(word_list)
else:
output_string = json.dumps(word_list, indent = 4, ensure_ascii=False)
output_to_file_successful = False
if args.output_file:
args.output_file.write(output_string)
output_to_file_successful = True
if output_to_file_successful:
print("Operation done! Successfully retrieved {} words.".format(len(word_list)))
else:
print(output_string)
main() | python |
import xml.etree.ElementTree as ET
import sys
tree = ET.parse(sys.argv[1])
# the xml tree is of the form
# <expr><list> {all options, each an attrs} </list></expr>
options = list(tree.getroot().find('list'))
def sortKey(opt):
def order(s):
if s.startswith("enable"):
return 0
if s.startswith("package"):
return 1
return 2
return [
(order(p.attrib['value']), p.attrib['value'])
for p in opt.findall('attr[@name="loc"]/list/string')
]
# always ensure that the sort order matches the order used in the nix expression!
options.sort(key=sortKey)
doc = ET.Element("expr")
newOptions = ET.SubElement(doc, "list")
newOptions.extend(options)
ET.ElementTree(doc).write(sys.argv[2], encoding='utf-8')
| python |
from typing import Tuple, Union
import torch
def make_dense_volume(
ind: torch.Tensor,
voxel_res: Union[int, Tuple[int, int, int]]
) -> torch.Tensor:
if isinstance(voxel_res, int):
voxel_res = (voxel_res, voxel_res, voxel_res)
grid = torch.zeros(voxel_res, dtype=torch.bool)
grid[ind[:, 0], ind[:, 1], ind[:, 2]] = True
return grid.unsqueeze(0)
| python |
import os
from django.http import FileResponse
from wsgiref.util import FileWrapper
from settings.static import MEDIA_URL
# from django.core.servers.basehttp import FileWrapper
from django.views.generic import TemplateView
from django.shortcuts import render_to_response, render, redirect, get_object_or_404
from django.core.mail import send_mail
from django.http import HttpResponse
from django.template import RequestContext
from django.http import HttpResponseRedirect
from pangolinfog.forms import *
# from pangolinfog.recaptcha.forms import *
from django.template.loader import get_template
from django.core.mail import EmailMessage
from django.template import Context
from product.models import Category
from product.models import Product, Accessory
from content.models import Slide
from django.core.urlresolvers import reverse_lazy
from django.views.generic import FormView
from nocaptcha_recaptcha.fields import NoReCaptchaField
def contact(request):
form_class = ContactForm
success_url = reverse_lazy('success')
args = {}
background_image = get_object_or_404(Slide, header_about=1)
args['menu'] = "contact"
categories_main_menu = Category.objects.filter(published_in_menu=1).order_by('ordering')
args['categories_main_menu'] = categories_main_menu
args['form'] = form_class
args['background_image'] = background_image
def form_valid(self, form):
return super(form_class, self).form_valid(form)
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
contact_name = request.POST.get(
'contact_name'
, '')
contact_email = request.POST.get(
'contact_email'
, '')
contact_phone = request.POST.get(
'contact_phone'
, '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = Context({
'contact_name': contact_name,
'contact_email': contact_email,
'contact_phone': contact_phone,
'form_content': form_content,
})
content = template.render(context)
email = EmailMessage(
"Pangolin Fog",
content,
"Pangolin Fog" +'',
['[email protected]'],
headers = {'Reply-To': contact_email }
)
email.send()
return redirect(request.META.get('HTTP_REFERER', '/'))
return render(request, 'contact.html', args)
def jq_subsc(request):
return render(request, 'jq_subsc.html')
def download_file(request):
_file = 'manualtourhazer2.pdf.zip'
filename = os.path.basename(_file)
# python 3
# response = FileResponse(FileWrapper(open(filename, 'rb')), content_type='application/x-zip-compressed')
# python 2
response = FileResponse(FileWrapper(file(filename, 'rb')), content_type='application/x-zip-compressed')
response['Content-Disposition'] = "attachment; filename=%s" % _file
return response
def download_mp3(request):
_file = 'Last_Summer_in_Yalta.mp3.zip'
filename = os.path.basename(_file)
# python 3
# response = FileResponse(FileWrapper(open(filename, 'rb')), content_type='application/x-zip-compressed')
# python 2
response = FileResponse(FileWrapper(file(filename, 'rb')), content_type='application/x-zip-compressed')
response['Content-Disposition'] = "attachment; filename=%s" % _file
return response
def main(request):
args = {}
slides = Slide.objects.filter(published_main=1).order_by('ordering')
categories_main_menu = Category.objects.filter(published_in_menu=1).order_by('ordering')
products_main = Product.objects.filter(published_main=1)
args['products_main'] = products_main
args['categories_main_menu'] = categories_main_menu
args['slides'] = slides
return render_to_response("home.html", args)
def news(request):
args = {}
slides = Slide.objects.filter(published_portfolio=1).order_by('ordering')
news = Slide.objects.filter(published_news=1).order_by('ordering')
background_image = get_object_or_404(Slide, header_about=1)
args['news'] = news
args['menu'] = "news"
args['slides'] = slides
args['background_image'] = background_image
return render_to_response("news.html", args)
def about(request):
args = {}
slides = Slide.objects.filter(published_portfolio=1).order_by('ordering')
news = Slide.objects.filter(published_news=1).order_by('ordering')
background_image = get_object_or_404(Slide, header_about=1)
args['news'] = news
args['menu'] = "about"
args['slides'] = slides
args['background_image'] = background_image
return render_to_response("about.html", args)
| python |
from functools import cached_property
from ..typing import TYPE_CHECKING, Any, Callable, Catchable
if TYPE_CHECKING:
from .fn import fn
def as_method(method, name):
method.__name__ = name
method.__qualname__ = f"fn.{name}"
method.__doc__ = "Auto generated, see :func:`sidekick.functions.{name}`"
return method
def curry_n(n, name, options=()):
"""
Curry if only one argument is given and execute if any additional arguments
are passed.
"""
def method(self: "FnMixin", *args, **kwargs):
api_func = getattr(self._mod, name)
api_kwargs = {k: kwargs.pop(k) for k in kwargs if k in options}
if len(args) == n and not kwargs:
return api_func(*args, self, **api_kwargs)
transformed_fn = api_func(*args[:n], self, **api_kwargs)
return transformed_fn(*args[n:], **kwargs)
return as_method(method, name)
class FnMixin:
"""
Basic mixin class that exposes a _mod lazy attribute to access the
sidekick.functions module and declare the _func attribute that is
manipulated by methods.
"""
_func: Callable
__call__: Callable
__slots__ = ()
if TYPE_CHECKING:
from .. import functions as _mod
_mod = _mod
else:
@cached_property
def _mod(self):
from .. import functions
return functions
#
# Expose functions in sidekick.functions.lib_arguments as methods.
#
def flip(self, x, y, /, *args, **kwargs):
"""
Executes flipping the first two arguments.
Access as attribute to obtain a flipped version of function.
"""
return self._func(y, x, *args, **kwargs)
def reverse_args(self, /, *args, **kwargs):
"""
Executes reversing the order of positional arguments.
Access as attribute to obtain a reversed version of function.
"""
return self._func(*args[::-1], **kwargs)
select_args = curry_n(1, "select_args")
skip_args = curry_n(1, "skip_args")
keep_args = curry_n(1, "keep_args")
def variadic_args(self, /, *args, **kwargs):
"""
Pass variadic arguments as single tuple to function.
"""
return self._func(args, **kwargs)
def splice_args(self, xs, /, *args, **kwargs):
"""
Splice first argument.
"""
return self._func(*xs, *args, **kwargs)
def set_null(self, /, *defaults: Any, **kwargs: Any) -> "fn":
"""
Return a new function that replace all null arguments in the given positions
by the provided default value.
"""
return self._mod.set_null(self._func, *defaults, **kwargs)
#
# Expose functions in sidekick.functions.lib_combinators as methods.
#
def do(self, /, *args, **kwargs):
"""
Execute function, but return the first argument.
Function result is ignored, hence do is executed only for the function
side-effects.
"""
if not args:
raise TypeError("requires at least a single argument.")
self(*args, **kwargs)
return args[0]
#
# Expose functions in sidekick.functions.lib_composition as methods.
#
def compose(self, *funcs) -> "fn":
"""
Compose with other functions.
Argument flow from right to left. Function is thus the last to execute.
"""
return self._mod.compose(self, *funcs)
def pipeline(self, *funcs) -> "fn":
"""
Compose with other functions.
Argument flow from left to right, starting in self.
"""
return self._mod.pipeline(self, *funcs)
def juxt(self, *funcs, **kwargs) -> "fn":
"""
Return function that juxtaposes fn with all functions in the arguments.
"""
return self._mod.juxt(self, *funcs, **kwargs)
#
# Expose functions in sidekick.functions.lib_runtime as methods.
#
def once(self) -> "fn":
"""
Version of function that perform a single invocation.
Repeated calls to the function return the value of the first invocation.
"""
return self._mod.once(self._func)
def thunk(self, /, *args, **kwargs) -> Callable[[], Any]:
"""
Return as a thunk.
"""
return self._mod.thunk(self, *args, **kwargs)
call_after = curry_n(1, "call_after", {"default"})
call_at_most = curry_n(1, "call_at_most")
def throttle(self, dt: float, **kwargs) -> "fn":
"""
Limit the rate of execution of func to once at each ``dt`` seconds.
Return a new function.
"""
return self._mod.throttle(dt, self, **kwargs)
def background(self, /, *args, **kwargs) -> Any:
"""
Execute function in the background.
Current implementation uses threads, but in the future it may use hooks
to other runtimes such as asyncio, curio, etc.
"""
return self._mod.background(self, *args, **kwargs)
def catch(self, error, /, *args, **kwargs):
"""
Handle exception in function.
If the exception occurs, return None or the value mapped from the error
mapping.
"""
return self._mod.catch(error, self, *args, **kwargs)
def catching(self, error) -> "fn":
"""
Handle exception in function.
If the exception occurs, it executes the given handler.
Return a new function with the new error handling behavior.
"""
return self._mod.catching(error, self)
def retry(
self, n: int, /, *args, error: Catchable = Exception, sleep=None, **kwargs
) -> "fn":
"""
Try to call function n types before raising an error.
This is useful for functions that may fail due to interaction with
external resources (e.g., fetch data from the network).
Args:
n:
Maximum number of times to execute function
error:
Exception or tuple with suppressed exceptions.
sleep:
Interval between attempts. This is a blocking function, hence
use with care.
Other positional and keyword arguments are forwarded to function.
"""
func = self._mod.retry(n, self, error=error, sleep=sleep)
return func(*args, **kwargs)
| python |
import requests
from typing import Dict, NamedTuple, NoReturn
from bs4 import BeautifulSoup
class WorkshopError(Exception):
def __init__(self, error: str):
self.error = error
def __str__(self) -> str:
return self.error
class Script(NamedTuple):
"""Encapsulate a numworks workshop python script."""
name: str
description: str
content: str
public: bool
class Workshop:
def __init__(self, email: str, password: str):
self.session = requests.Session()
self.base_url = "workshop.numworks.com"
user = {
"email": email,
"password": password
}
self.login(user)
def login(self, user: Dict[str, str]) -> NoReturn:
login = self.session.get(self.get_url("/users/sign_in"))
soup = BeautifulSoup(login.text, "html.parser")
authenticity_token = soup.find("input").get("value")
payload = {
"authenticity_token": authenticity_token,
"commit": "Se connecter",
"user[email]": user["email"],
"user[password]": user["password"],
}
r = self.session.post(self.get_url("/users/sign_in"), data=payload)
soup = BeautifulSoup(r.text, "html.parser").find(
["ul", "li", "a"],
class_="dropdown-menu animated-dropdown-sm")
self.python = soup.find_all("a")[1].get("href")
def create_script(self, script: Script) -> NoReturn:
# @todo : error if script allready exist...
r = self.session.get(self.get_url(f"{self.python}/new"))
soup = BeautifulSoup(r.text, "html.parser")
authenticity_token = soup.find("input").get("value")
payload = {
"authenticity_token": authenticity_token,
"commit": "Sauvegarder",
"script[description]": script.description,
"script[name]": f"{script.name.lower()}.py",
"script[public]": int(script.public),
"script[text_area_content]": script.content,
}
r = self.session.post(self.get_url(f"{self.python}"), data=payload)
soup = BeautifulSoup(r.text, "html.parser")
self.raise_errors(soup.find(id="error_explanation"))
def edit_script(self, script: Script, name=None) -> NoReturn:
r = self.session.get(self.get_url(f"{self.python}/{script.name}/edit"))
soup = BeautifulSoup(r.text, "html.parser")
authenticity_token = soup.find_all("input")[1].get("value")
payload = {
"_method": "patch",
"authenticity_token": authenticity_token,
"commit": "Sauvegarder",
"script[description]": script.description,
"script[name]": (f"{name.lower()}.py"
or f"{script.name.lower()}.py"),
"script[public]": int(script.public),
"script[text_area_content]": script.content,
}
r = self.session.post(self.get_url(f"{self.python}/{script.name}"),
data=payload)
soup = BeautifulSoup(r.text, "html.parser")
self.raise_errors(soup.find(id="error_explanation"))
script.name = name or script.name
def delete_script(self, script: Script) -> NoReturn:
r = self.session.get(self.get_url(f"{self.python}/{script.name}"))
soup = BeautifulSoup(r.text, "html.parser")
authenticity_token = soup.find("meta",
attrs={"name": "csrf-token"}).get(
"content")
payload = {
"_method": "delete",
"authenticity_token": authenticity_token,
}
r = self.session.post(self.get_url(f"{self.python}/{script.name}"),
data=payload)
soup = BeautifulSoup(r.text, "html.parser")
self.raise_errors(soup.find(id="error_explanation"))
def get_script(self, url: str) -> Script:
r = self.session.get(f"{url}")
soup = BeautifulSoup(r.text, "html.parser")
send_to_calculator = soup.find("send-to-calculator")
script_name = send_to_calculator.get("script-name").split(".")[0]
script_content = send_to_calculator.get("script-content")
script_description = soup.find(class_="text-justify").text.strip("\n")
if url[37:].split("/")[0] != self.python.split("/")[2]:
script_public = True
else:
script_public = bool(soup.find(class_="text-success"))
return Script(script_name,
script_description,
script_content,
script_public)
def get_url(self, url: str) -> str:
return f"https://{self.base_url}{url}"
def raise_errors(self, errors: Exception) -> NoReturn:
if errors:
errors = (error.text for error in errors.find_all("li"))
for error in errors:
raise WorkshopError(error)
| python |
import json
import os
import random
import bottle
from api import ping_response, start_response, move_response, end_response
@bottle.route('/')
def index():
return '''
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.io">https://docs.battlesnake.io</a>.
'''
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
def start():
data = bottle.request.json
"""
TODO: If you intend to have a stateful snake AI,
initialize your snake state here using the
request's data if necessary.
"""
print(json.dumps(data))
color = "#FF0000"
return start_response(color)
@bottle.post('/move')
def move():
data = bottle.request.json
"""
TODO: Using the data from the endpoint request object, your
snake AI must choose a direction to move in.
"""
print(json.dumps(data))
myId = data['you']['id']
turn = data['turn']
body = data['you']['body']
snakes = data['board']['snakes']
foods = data['board']['food']
moveOption = []
riskyMove = []
x = body[0]['x']
y = body[0]['y']
xLimit = data['board']['width'] - 1
yLimit = data['board']['height'] - 1
longuestSnake = 1
food = 1
left = 1
right = 1
up = 1
down = 1
hungry = 0
if data['you']['health']<50:
hungry = 1
for snake in snakes:
if snake['id'] != myId:
if len(snake['body']) >= len(body):
longuestSnake = 0
head = snake['body'][0]
if head['x'] == x:
if head['y']+2 == y:
up = 0
riskyMove += ['up']
elif head['y']-2 == y:
down = 0
riskyMove += ['down']
if head['y'] == y:
if head['x']+2 == x:
left = 0
riskyMove += ['left']
if head['x']-2 == x:
right = 0
riskyMove += ['right']
if head['x']+1 == x:
if head['y']+1 == y:
up = 0
riskyMove += ['up']
left = 0
riskyMove += ['left']
if head['y']-1 == y:
down = 0
riskyMove += ['down']
left = 0
riskyMove += ['left']
if head['x']-1 == x:
if head['y']+1 == y:
up = 0
riskyMove += ['up']
right = 0
riskyMove += ['right']
if head['y']-1 == y:
down = 0
riskyMove += ['down']
right = 0
riskyMove += ['right']
i=1
for b in snake['body']:
if i != len(snake['body']):
if x == b['x']:
if b['y'] == y-1:
up = 0
if 'up' in riskyMove:
riskyMove.remove('up')
elif b['y'] == y+1:
down = 0
if 'down' in riskyMove:
riskyMove.remove('down')
elif y == b['y']:
if b['x'] == x-1:
left = 0
if 'left' in riskyMove:
riskyMove.remove('left')
elif b['x'] == x+1:
right = 0
if 'right' in riskyMove:
riskyMove.remove('right')
i+=1
i=1
for b in body:
if i != len(body):
if x == b['x']:
if b['y'] == y-1:
up = 0
if 'up' in riskyMove:
riskyMove.remove('up')
elif b['y'] == y+1:
down = 0
if 'down' in riskyMove:
riskyMove.remove('down')
elif y == b['y']:
if b['x'] == x-1:
left = 0
if 'left' in riskyMove:
riskyMove.remove('left')
elif b['x'] == x+1:
right = 0
if 'right' in riskyMove:
riskyMove.remove('right')
i+=1
minDistance = 1000000000
if len(foods) != 0:
for f in foods:
xDistance = f['x']-x
yDistance = f['y']-y
distance = abs(xDistance)+abs(yDistance)
if distance <= minDistance:
minDistance = distance
xFoodDistance = xDistance
yFoodDistance = yDistance
else:
food = 0
if x and left:
moveOption += ['left']
if x != xLimit and right:
moveOption += ['right']
if y and up:
moveOption += ['up']
if y != yLimit and down:
moveOption += ['down']
if food and (not longuestSnake or hungry):
if xFoodDistance == 0:
if yFoodDistance < 0:
if 'up' in moveOption:
direction = 'up'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'down' in moveOption:
direction = 'down'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
elif yFoodDistance == 0:
if xFoodDistance < 0:
if 'left' in moveOption:
direction = 'left'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'right' in moveOption:
direction = 'right'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
elif abs(xFoodDistance) < abs(yFoodDistance):
if xFoodDistance < 0:
if 'left' in moveOption:
direction = 'left'
else:
if yFoodDistance < 0:
if 'up' in moveOption:
direction = 'up'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'down' in moveOption:
direction = 'down'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'right' in moveOption:
direction = 'right'
else:
if yFoodDistance < 0:
if 'up' in moveOption:
direction = 'up'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'down' in moveOption:
direction = 'down'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
elif abs(yFoodDistance) < abs(xFoodDistance):
if yFoodDistance < 0:
if 'up' in moveOption:
direction = 'up'
else:
if xFoodDistance < 0:
if 'left' in moveOption:
direction = 'left'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'right' in moveOption:
direction = 'right'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'down' in moveOption:
direction = 'down'
else:
if xFoodDistance < 0:
if 'left' in moveOption:
direction = 'left'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if 'right' in moveOption:
direction = 'right'
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
else:
if len(moveOption):
direction = random.choice(moveOption)
else:
direction = random.choice(riskyMove)
return move_response(direction)
@bottle.post('/end')
def end():
data = bottle.request.json
"""
TODO: If your snake AI was stateful,
clean up any stateful objects here.
"""
print(json.dumps(data))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
| python |
import json
import itertools as it
from collections import defaultdict
import textwrap
from itertools import chain
COURSE_LIST_FILENAME = 'course_list.json'
REVERSE_KDAM_FILENAME = 'reverse_kdam.json'
REVERSE_ADJACENT_FILENAME = 'reverse_adjacent.json'
def read_json_to_dict(filename=COURSE_LIST_FILENAME):
with open(filename, encoding='utf8') as f:
return json.load(f)
def flatten(v, field):
return sum(v.get(field, []), [])
def to_jsonable(d):
return {k:list(sorted(set(v))) for k, v in d.items()}
def multidict(pairs):
res = defaultdict(list)
for k, v in it.chain.from_iterable(pairs):
res[k].append(v)
return to_jsonable(res)
def merge_mutildicts(d1, d2):
res = defaultdict(list, d1)
for (k, v) in d2.items():
res[k] += v
return to_jsonable(res)
def multidict_to_pairs(d):
return it.chain.from_iterable(it.product([k], v) for k, v in d.items())
def get_reverse_kdam_from_course_list(field='kdam', filename=COURSE_LIST_FILENAME):
d = read_json_to_dict(filename)
return multidict(it.product(flatten(v, field), [k])
for k, v in d.items())
def read_kdam_and_adjacent():
kdams = read_json_to_dict(REVERSE_KDAM_FILENAME)
adjacents = read_json_to_dict(REVERSE_ADJACENT_FILENAME)
return merge_mutildicts(kdams, adjacents)
def dump_json_kdam(d):
s = ',\n'.join('{}: {}'.format(repr(k), repr(v)) for k, v in sorted(d.items()))
return ('{\n%s\n}' % s.replace("'", '"'))
def print_to_file(filename, field):
with open(filename, 'w') as f:
f.write(dump_json_kdam(get_reverse_kdam_from_course_list(field)))
def is_cs(cid):
return 234000 <= int(cid) <= 236999
def nodes_to_visDataSet(fp):
from functools import partial
pr = partial(print, file=fp)
pr('var nodes = new vis.DataSet([')
edges = defaultdict(set)
d = read_json_to_dict(filename=COURSE_LIST_FILENAME)
for cid, details in sorted(d.items()):
cid = int(cid)
if not is_cs(cid):
continue
for k in details.get('kdam', []):
if len(k) > 1:
dummy = 1000000 + sum(map(int, k))
if dummy not in edges:
pr('{', 'id:"{}", group: 9, hidden: true'.format(dummy), '},')
edges[dummy].add(cid)
for p in k:
edges[p].add(dummy)
else:
edges[k[0]].add(cid)
for cid in {int(x) for x in (set(chain.from_iterable(edges.values())) | set(edges))}:
cid = int(cid)
if cid < 1000000:
details = d.get(str(cid).zfill(6))
if details is None:
pr('{', 'id:"{0}", group: 10, label: {0}, title: "{0}", mass:1'.format(cid), '},')
else:
name = repr(textwrap.fill(details['name'], 25))
pr('{', 'id:"{}", group: {g}, label: {name}, title: "{number}"'.format(
cid, g=str(cid)[-4], name=name, number=cid), '},')
pr(']);')
pr('var edges = new vis.DataSet([')
for cid, v in multidict_to_pairs(edges):
pr('{', 'from: {}, to: {}'.format(cid, v), '},')
pr(']);')
if __name__ == '__main__':
with open(r'..\ug-data-vis\data.js', 'w', encoding='utf8') as fp:
nodes_to_visDataSet(fp)
| python |
# -*- coding: utf-8 -*-
# created: 2021-06-22
# creator: [email protected]
import asyncio
from gcommon.aio.gasync import maybe_async
def sync_call():
print("sync")
return "1"
async def async_call():
await asyncio.sleep(1)
print("async")
return "2"
async def test():
r = await maybe_async(sync_call)
print(r)
r = await maybe_async(async_call)
print(r)
if __name__ == '__main__':
asyncio.run(test())
| python |
# -*- coding: utf-8 -*-
from peewee import *
from telegram import User as TelegramUser
import util
from model.user import User
from model.basemodel import BaseModel
class APIAccess(BaseModel):
user = ForeignKeyField(User)
token = CharField(32)
webhook_url = CharField(null=True)
| python |
"""Module to test reset password"""
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from django.contrib.auth.tokens import default_token_generator
class ResetPassword(APITestCase):
def setUp(self):
""" Set the data for test """
self.email = {"email": "[email protected]"}
self.valid_user = {
"username": "michael",
"email": "[email protected]",
"password": "Bit22150"}
self.client.defaults['HTTP_REFERER'] = '127.0.0.1'
self.client.post(reverse('authentication:register'),
self.valid_user, format='json')
self.forget_password_url = reverse('authentication:forgot')
def test_sending_successful_email(self):
""" Test email is sent """
response = self.client.post(
self.forget_password_url, self.email, format='json')
self.assertIn(
'Please check your email for further instruction', str(response.data))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_invalid_email(self):
""" Test for invalid email """
email = {"email": "[email protected]"}
response = self.client.post(
self.forget_password_url, email, format='json')
self.assertIn('The email you entered does not exist',
str(response.data))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_reset_password(self):
""" Test user successfully reset password """
user = get_user_model().objects.create_user(username='leon', email='[email protected]',
password='123456789')
token = default_token_generator.make_token(user)
reset_password_url = reverse(
'authentication:reset_password', kwargs={'token': token})
new_password = {"password": "abcdef",
"confirm_password": "abcdef",
"email": "[email protected]",
"token": token}
response = self.client.put(
reset_password_url, data=new_password, format='json')
self.assertIn('Your password has been successfully changed',
str(response.data))
self.assertEqual(response.status_code, status.HTTP_200_OK)
| python |
#!/usr/bin/env python3
import argparse
import logging
from pathlib import Path
import sys
from typing import Iterable
from typing import Union
import numpy as np
from espnet.utils.cli_utils import get_commandline_args
def aggregate_stats_dirs(
input_dir: Iterable[Union[str, Path]],
output_dir: Union[str, Path],
log_level: str,
skip_sum_stats: bool,
):
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) (levelname)s: %(message)s",
)
input_dirs = [Path(p) for p in input_dir]
output_dir = Path(output_dir)
for mode in ["train", "valid"]:
with (input_dirs[0] / mode / "batch_keys").open("r", encoding="utf-8") as f:
batch_keys = [line.strip() for line in f if line.strip() != ""]
with (input_dirs[0] / mode / "stats_keys").open("r", encoding="utf-8") as f:
stats_keys = [line.strip() for line in f if line.strip() != ""]
(output_dir / mode).mkdir(parents=True, exist_ok=True)
for key in batch_keys:
with (output_dir / mode / f"{key}_shape").open(
"w", encoding="utf-8"
) as fout:
for idir in input_dirs:
with (idir / mode / f"{key}_shape").open(
"r", encoding="utf-8"
) as fin:
# Read to the last in order to sort keys
# because the order can be changed if num_workers>=1
lines = fin.readlines()
lines = sorted(lines, key=lambda x: x.split()[0])
for line in lines:
fout.write(line)
for key in stats_keys:
if not skip_sum_stats:
sum_stats = None
for idir in input_dirs:
stats = np.load(idir / mode / f"{key}_stats.npz")
if sum_stats is None:
sum_stats = dict(**stats)
else:
for k in stats:
sum_stats[k] += stats[k]
np.savez(output_dir / mode / f"{key}_stats.npz", **sum_stats)
# if --write_collected_feats=true
p = Path(mode) / "collect_feats" / f"{key}.scp"
scp = input_dirs[0] / p
if scp.exists():
(output_dir / p).parent.mkdir(parents=True, exist_ok=True)
with (output_dir / p).open("w", encoding="utf-8") as fout:
for idir in input_dirs:
with (idir / p).open("r", encoding="utf-8") as fin:
for line in fin:
fout.write(line)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Aggregate statistics directories into one directory",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument(
"--skip_sum_stats",
default=False,
action="store_true",
help="Skip computing the sum of statistics.",
)
parser.add_argument("--input_dir", action="append", help="Input directories")
parser.add_argument("--output_dir", required=True, help="Output directory")
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
aggregate_stats_dirs(**kwargs)
if __name__ == "__main__":
main()
| python |
import argparse
import parmed as pmd
def merge_gro_files(prot_gro, lig_gro, cmplx_gro):
prot = pmd.load_file(prot_gro)
lig = pmd.load_file(lig_gro)
cmplx = prot + lig
cmplx.save(cmplx_gro)
def merge_top_files(prot_top, lig_top, cmplx_top):
with open(lig_top, 'r') as f:
lig_top_sections = f.read().split('\n[')
# open ligand topology
for n in range(len(lig_top_sections)):
if 'atomtypes' in lig_top_sections[n][:10]:
lig_atomtypes = lig_top_sections[n]
del lig_top_sections[n]
break
else:
lig_atomtypes = None
lig_top_updated = '\n['.join(lig_top_sections)
# open protein topology
with open(prot_top, 'r') as f:
prot_top_combined = f.read()
if lig_atomtypes:
prot_top_sections = prot_top_combined.split('[ moleculetype ]\n')
prot_top_combined = (prot_top_sections[0] +
'; Include ligand atomtypes\n[' +
lig_atomtypes +
'\n[ moleculetype ]\n' +
prot_top_sections[1])
prot_top_sections = prot_top_combined.split('; Include water topology')
prot_top_combined = (prot_top_sections[0] +
'; Include ligand topology\n' +
lig_top_updated +
'\n; Include water topology' +
prot_top_sections[1])
prot_top_combined += 'base 1\n'
# save complex topology
with open(cmplx_top, 'w') as f:
f.write(prot_top_combined)
def main():
parser = argparse.ArgumentParser(
description='Perform SMD runs for dynamic undocking')
parser.add_argument('--lig-top', help='Ligand TOP file.')
parser.add_argument('--prot-top', help='Protein TOP file.')
parser.add_argument('--lig-gro', help='Ligand GRO file.')
parser.add_argument('--prot-gro', help='Protein GRO file.')
parser.add_argument('--complex-top', help='Complex TOP file.')
parser.add_argument('--complex-gro', help='Complex GRO file.')
args = parser.parse_args()
merge_gro_files(args.prot_gro, args.lig_gro, args.complex_gro)
merge_top_files(args.prot_top, args.lig_top, args.complex_top)
if __name__ == "__main__":
main()
| python |
dictionary = {"name": "Shahjalal", "ref": "Python", "sys": "Mac"}
for key, value in dictionary.items():
print key, " = ", value
| python |
def get_sum_by_route(route_val, nums):
sum_val = nums[0][0]
j = 0
route=[sum_val]
for i in range(1, len(nums)):
if route_val % 2 > 0:
j+=1
sum_val += nums[i][j]
route.append(nums[i][j])
route_val >>= 1
return route, sum_val
s = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
lines = s.splitlines()
nums = []
for line in lines:
line_list = [int(i) for i in line.split(' ')]
nums.append(line_list)
possible_route = 2 ** (len(nums) - 1)
print("Possible routs: ", possible_route)
max_sum = 0
for i in range(possible_route):
route, sum_val = get_sum_by_route(i, nums)
if sum_val > max_sum:
print("Max route updated", i)
print("Route: ", route)
max_sum = sum_val
print(max_sum) | python |
import numpy as np
from sort import algs
def test_bubblesort():
# 1) Test odd-sized vector + duplicate values
assert algs.bubblesort([1,2,4,0,1]) == [0,1,1,2,4]
# 2) Test even+duplicate values
assert algs.bubblesort([1,2,4,6,0,1]) == [0,1,1,2,4,6]
# 3) Test empty vector
assert algs.bubblesort([]) == []
# 4) Test single-element vectors
assert algs.bubblesort([1]) == [1]
# 5) Test single-value vectors
assert algs.bubblesort([1,1,1,1,1,1,1,1]) == [1,1,1,1,1,1,1,1]
# 6) Test vectors with negative values
assert algs.bubblesort([-2,-6,8,9,-4]) == [-6,-4,-2,8,9]
# 7) Test ordered and reverse-order lists of large size
assert algs.bubblesort(range(1000)) == range(1000)
assert algs.bubblesort(list(reversed(range(1000)))) == list(range(1000))
# 8) Test vector of strings
assert algs.bubblesort(["will", "this", "work"]) == ["this", "will", "work"]
def test_quicksort():
# 1) Test odd-sized vector + duplicate values
assert algs.quicksort([1,2,4,0,1]) == [0,1,1,2,4]
# 2) Test even+duplicate values
assert algs.quicksort([1,2,4,6,0,1]) == [0,1,1,2,4,6]
# 3) Test empty vector
assert algs.quicksort([]) == []
# 4) Test single-element vectors
assert algs.quicksort([1]) == [1]
# 5) Test single-value vectors
assert algs.quicksort([1,1,1,1,1,1,1,1]) == [1,1,1,1,1,1,1,1]
# 6) Test vectors with negative values
assert algs.quicksort([-2,-6,8,9,-4]) == [-6,-4,-2,8,9]
# 7) Test ordered and reverse-order lists of large size
assert algs.quicksort(range(1000)) == list(range(1000))
assert algs.quicksort(list(reversed(range(1000)))) == list(range(1000))
# 8) Test vector of strings
assert algs.quicksort(["will", "this", "work"]) == ["this", "will", "work"]
| python |
from service.resolver_base import ResolverBase
from service.rule_item_mutex import RuleItemMutex
# 6宫无马数独
# DB 互斥规则已写入
class Resolver1623(ResolverBase):
ANSWER_RANGE = ['1', '2', '3', '4', '5', '6']
def get_answer_range(self) -> []:
return Resolver1623.ANSWER_RANGE
def calculate_rules(self):
super().calculate_rules()
self.question_data.rules_list = [
RuleItemMutex(self.question_data, '0,0;0,1;0,2;0,3;0,4;0,5'),
RuleItemMutex(self.question_data, '0,0;0,1;0,2;0,3;0,4;0,5'),
RuleItemMutex(self.question_data, '1,0;1,1;1,2;1,3;1,4;1,5'),
RuleItemMutex(self.question_data, '2,0;2,1;2,2;2,3;2,4;2,5'),
RuleItemMutex(self.question_data, '3,0;3,1;3,2;3,3;3,4;3,5'),
RuleItemMutex(self.question_data, '4,0;4,1;4,2;4,3;4,4;4,5'),
RuleItemMutex(self.question_data, '5,0;5,1;5,2;5,3;5,4;5,5'),
RuleItemMutex(self.question_data, '0,0;1,0;2,0;3,0;4,0;5,0'),
RuleItemMutex(self.question_data, '0,1;1,1;2,1;3,1;4,1;5,1'),
RuleItemMutex(self.question_data, '0,2;1,2;2,2;3,2;4,2;5,2'),
RuleItemMutex(self.question_data, '0,3;1,3;2,3;3,3;4,3;5,3'),
RuleItemMutex(self.question_data, '0,4;1,4;2,4;3,4;4,4;5,4'),
RuleItemMutex(self.question_data, '0,5;1,5;2,5;3,5;4,5;5,5'),
RuleItemMutex(self.question_data, '0,0;1,0;2,0;0,1;1,1;2,1'),
RuleItemMutex(self.question_data, '3,0;4,0;5,0;3,1;4,1;5,1'),
RuleItemMutex(self.question_data, '0,2;1,2;2,2;0,3;1,3;2,3'),
RuleItemMutex(self.question_data, '3,2;4,2;5,2;3,3;4,3;5,3'),
RuleItemMutex(self.question_data, '0,4;1,4;2,4;0,5;1,5;2,5'),
RuleItemMutex(self.question_data, '3,4;4,4;5,4;3,5;4,5;5,5'),
# 1
RuleItemMutex(self.question_data, '0,0;1,2'),
RuleItemMutex(self.question_data, '0,1;1,3'),
RuleItemMutex(self.question_data, '0,2;1,4'),
RuleItemMutex(self.question_data, '0,3;1,5'),
RuleItemMutex(self.question_data, '1,0;2,2'),
RuleItemMutex(self.question_data, '1,1;2,3'),
RuleItemMutex(self.question_data, '1,2;2,4'),
RuleItemMutex(self.question_data, '1,3;2,5'),
RuleItemMutex(self.question_data, '2,0;3,2'),
RuleItemMutex(self.question_data, '2,1;3,3'),
RuleItemMutex(self.question_data, '2,2;3,4'),
RuleItemMutex(self.question_data, '2,3;3,5'),
RuleItemMutex(self.question_data, '3,0;4,2'),
RuleItemMutex(self.question_data, '3,1;4,3'),
RuleItemMutex(self.question_data, '3,2;4,4'),
RuleItemMutex(self.question_data, '3,3;4,5'),
RuleItemMutex(self.question_data, '4,0;5,2'),
RuleItemMutex(self.question_data, '4,1;5,3'),
RuleItemMutex(self.question_data, '4,2;5,4'),
RuleItemMutex(self.question_data, '4,3;5,5'),
RuleItemMutex(self.question_data, '0,0;2,1'),
RuleItemMutex(self.question_data, '1,0;3,1'),
RuleItemMutex(self.question_data, '2,0;4,1'),
RuleItemMutex(self.question_data, '3,0;5,1'),
# 2
RuleItemMutex(self.question_data, '0,1;2,2'),
RuleItemMutex(self.question_data, '1,1;3,2'),
RuleItemMutex(self.question_data, '2,1;4,2'),
RuleItemMutex(self.question_data, '3,1;5,2'),
RuleItemMutex(self.question_data, '0,2;2,3'),
RuleItemMutex(self.question_data, '1,2;3,3'),
RuleItemMutex(self.question_data, '2,2;4,3'),
RuleItemMutex(self.question_data, '3,2;5,3'),
RuleItemMutex(self.question_data, '0,3;2,4'),
RuleItemMutex(self.question_data, '1,3;3,4'),
RuleItemMutex(self.question_data, '2,3;4,4'),
RuleItemMutex(self.question_data, '3,3;5,4'),
RuleItemMutex(self.question_data, '0,4;2,5'),
RuleItemMutex(self.question_data, '1,4;3,5'),
RuleItemMutex(self.question_data, '2,4;4,5'),
RuleItemMutex(self.question_data, '3,4;5,5'),
# 3
RuleItemMutex(self.question_data, '0,1;2,0'),
RuleItemMutex(self.question_data, '1,1;3,0'),
RuleItemMutex(self.question_data, '2,1;4,0'),
RuleItemMutex(self.question_data, '3,1;5,0'),
RuleItemMutex(self.question_data, '0,2;2,1'),
RuleItemMutex(self.question_data, '1,2;3,1'),
RuleItemMutex(self.question_data, '2,2;4,1'),
RuleItemMutex(self.question_data, '3,2;5,1'),
RuleItemMutex(self.question_data, '0,3;2,2'),
RuleItemMutex(self.question_data, '1,3;3,2'),
RuleItemMutex(self.question_data, '2,3;4,2'),
RuleItemMutex(self.question_data, '3,3;5,2'),
RuleItemMutex(self.question_data, '0,4;2,3'),
RuleItemMutex(self.question_data, '1,4;3,3'),
RuleItemMutex(self.question_data, '2,4;4,3'),
RuleItemMutex(self.question_data, '3,4;5,3'),
RuleItemMutex(self.question_data, '0,5;2,4'),
RuleItemMutex(self.question_data, '1,5;3,4'),
RuleItemMutex(self.question_data, '2,5;4,4'),
RuleItemMutex(self.question_data, '3,5;5,4'),
# 4
RuleItemMutex(self.question_data, '0,2;1,0'),
RuleItemMutex(self.question_data, '1,2;2,0'),
RuleItemMutex(self.question_data, '2,2;3,0'),
RuleItemMutex(self.question_data, '3,2;4,0'),
RuleItemMutex(self.question_data, '4,2;5,0'),
RuleItemMutex(self.question_data, '0,3;1,1'),
RuleItemMutex(self.question_data, '1,3;2,1'),
RuleItemMutex(self.question_data, '2,3;3,1'),
RuleItemMutex(self.question_data, '3,3;4,1'),
RuleItemMutex(self.question_data, '4,3;5,1'),
RuleItemMutex(self.question_data, '0,4;1,2'),
RuleItemMutex(self.question_data, '1,4;2,2'),
RuleItemMutex(self.question_data, '2,4;3,2'),
RuleItemMutex(self.question_data, '3,4;4,2'),
RuleItemMutex(self.question_data, '4,4;5,2'),
RuleItemMutex(self.question_data, '0,5;1,3'),
RuleItemMutex(self.question_data, '1,5;2,3'),
RuleItemMutex(self.question_data, '2,5;3,3'),
RuleItemMutex(self.question_data, '3,5;4,3'),
RuleItemMutex(self.question_data, '4,5;5,3'),
]
def calculate_editable_original_data(self):
super().calculate_editable_original_data()
for y_index in range(len(self.question_data.editable_original_data)):
for x_index in range(len(self.question_data.editable_original_data[y_index])):
if self.question_data.editable_original_data[y_index][x_index] == '':
self.question_data.editable_original_data[y_index][x_index] = '#'
| python |
from specusticc.data_preprocessing.preprocessed_data import PreprocessedData
from specusticc.model_testing.prediction_results import PredictionResults
class Tester:
def __init__(self, model, model_name: str, data: PreprocessedData):
self._model = model
self._data: PreprocessedData = data
self._model_name = model_name
self.prediction_results: PredictionResults = PredictionResults()
def test(self):
train_set = self._data.train_set
input_data = train_set.get_input(self._model_name)
output_data = train_set.get_output()
self.prediction_results.train_output = self._model.predict(input_data)
print("Evaluate on train data")
self._model.evaluate(input_data, output_data, batch_size=128)
test_sets = self._data.test_sets
self.prediction_results.test_output = []
for test_set in test_sets:
input_data = test_set.get_input(self._model_name)
output_data = test_set.get_output()
prediction = self._model.predict(input_data)
self.prediction_results.test_output.append(prediction)
print("Evaluate on test data")
self._model.evaluate(input_data, output_data, batch_size=128)
def get_test_results(self) -> PredictionResults:
return self.prediction_results
| python |
from .encodeClass import encoderClass
from .decodeClass import decoderClass
| python |
import os
import uuid
from typing import Generator
from flask import current_app
from unittest import TestCase
from contextlib import contextmanager
from alembic import command
from sqlalchemy import create_engine
from {{ cookiecutter.app_name }} import app
from {{ cookiecutter.app_name }}.extensions import db
DATABASE_URI_FORMATTER = 'postgresql://{username}:{password}@postgres:5432/{database}'
@contextmanager
def provision_database(config :dict) -> Generator[None, None, None]:
database_name = str(uuid.uuid4()).replace('-', '_')
postgres_database_uri = DATABASE_URI_FORMATTER.format(username='postgres', password=os.environ['PGPASSWORD'], database='postgres')
app_database_uri = DATABASE_URI_FORMATTER.format(username='{{ cookiecutter.app_name }}', password=os.environ['APP_PASSWORD'], database=database_name)
migrate_database_uri = DATABASE_URI_FORMATTER.format(username='migrator', password=os.environ['MIGRATOR_PASSWORD'], database=database_name)
engine = create_engine(postgres_database_uri, isolation_level='AUTOCOMMIT')
connection = engine.connect()
connection.execute(f'create database "{database_name}" with owner migrator template template0 encoding "UTF-8"')
config.update({
'SQLALCHEMY_DATABASE_URI': app_database_uri,
'SQLALCHEMY_DATABASE_MIGRATION_URI': migrate_database_uri,
})
context = app.create_app().test_request_context()
context.push()
config = current_app.extensions['migrate'].migrate.get_config(directory=None, x_arg=None)
command.upgrade(config, revision='head', sql=False, tag=None)
yield
# make sure all held connections are destroyed before dropping the database
db.session.remove()
db.engine.dispose()
context.pop()
connection.execute(f'drop database "{database_name}"')
connection.close()
class IntegrationTestCase(TestCase):
# override in test cases for custom test configuration
custom_test_config = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = None
def run(self, result=None):
# initialize global test configuration here...
global_test_config = {}
global_test_config.update(self.custom_test_config or {})
with provision_database(global_test_config):
self.client = current_app.test_client()
super().run(result)
| python |
# #https://docs.pytest.org/en/reorganize-docs/new-docs/user/assert_statements.html
# # Assertions are the condition or boolean expression which are always supposed to be true
# import pytest
# def vowels():
# return set('aeiou')
# @pytest.mark.skip
# def test_vowels():
# result = vowels()
# expected = set('aeiou')
# print ("this test has run")
# assert result == expected | python |
#
# author: Jungtaek Kim ([email protected])
# last updated: December 29, 2020
#
"""It is utilities for Gaussian process regression and
Student-:math:`t` process regression."""
import numpy as np
from bayeso.utils import utils_common
from bayeso import constants
@utils_common.validate_types
def get_prior_mu(prior_mu: constants.TYPING_UNION_CALLABLE_NONE, X: np.ndarray) -> np.ndarray:
"""
It computes the prior mean function values over inputs X.
:param prior_mu: prior mean function or None.
:type prior_mu: function or NoneType
:param X: inputs for prior mean function. Shape: (n, d) or (n, m, d).
:type X: numpy.ndarray
:returns: zero array, or array of prior mean function values. Shape: (n, 1).
:rtype: numpy.ndarray
:raises: AssertionError
"""
assert isinstance(X, np.ndarray)
assert callable(prior_mu) or prior_mu is None
assert len(X.shape) == 2 or len(X.shape) == 3
if prior_mu is None:
prior_mu_X = np.zeros((X.shape[0], 1))
else:
prior_mu_X = prior_mu(X)
assert len(prior_mu_X.shape) == 2
assert X.shape[0] == prior_mu_X.shape[0]
return prior_mu_X
@utils_common.validate_types
def validate_common_args(X_train: np.ndarray, Y_train: np.ndarray,
str_cov: str, prior_mu: constants.TYPING_UNION_CALLABLE_NONE,
debug: bool,
X_test: constants.TYPING_UNION_ARRAY_NONE=None,
) -> constants.TYPE_NONE:
"""
It validates the common arguments for various functions.
:param X_train: inputs. Shape: (n, d) or (n, m, d).
:type X_train: numpy.ndarray
:param Y_train: outputs. Shape: (n, 1).
:type Y_train: numpy.ndarray
:param str_cov: the name of covariance function.
:type str_cov: str.
:param prior_mu: None, or prior mean function.
:type prior_mu: NoneType, or function
:param debug: flag for printing log messages.
:type debug: bool.
:param X_test: inputs or None. Shape: (l, d) or (l, m, d).
:type X_test: numpy.ndarray, or NoneType, optional
:returns: None.
:rtype: NoneType
:raises: AssertionError
"""
assert isinstance(X_train, np.ndarray)
assert isinstance(Y_train, np.ndarray)
assert isinstance(str_cov, str)
assert callable(prior_mu) or prior_mu is None
assert isinstance(debug, bool)
assert len(X_train.shape) == 2 or len(X_train.shape) == 3
assert len(Y_train.shape) == 2
assert X_train.shape[0] == Y_train.shape[0]
assert isinstance(X_test, (np.ndarray, type(None)))
if X_test is not None:
if len(X_train.shape) == 2:
assert X_train.shape[1] == X_test.shape[1]
else:
assert X_train.shape[2] == X_test.shape[2]
| python |
import rclpy,numpy,psutil
from rclpy.node import Node
from std_msgs.msg import Float32
class RpiMon(Node):
def __init__(self):
super().__init__('rpi_mon')
self.ramPublisher = self.create_publisher(Float32, 'freeram', 1)
timer_period = 2.0 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
def timer_callback(self):
msg = Float32()
msg.data = 100.0-psutil.virtual_memory()[2]
self.ramPublisher.publish(msg)
def main(args=None):
print('Hi from rpi_mon.')
rclpy.init(args=args)
rpi_mon = RpiMon()
rclpy.spin(rpi_mon)
rpi_mon.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| python |
from libspn.inference.type import InferenceType
from libspn.graph.op.base_sum import BaseSum
import libspn.utils as utils
@utils.register_serializable
class Sum(BaseSum):
"""A node representing a single sum in an SPN.
Args:
*values (input_like): Inputs providing input values to this node.
See :meth:`~libspn.Input.as_input` for possible values.
weights (input_like): Input providing weights node to this sum node.
See :meth:`~libspn.Input.as_input` for possible values. If set
to ``None``, the input is disconnected.
latent_indicators (input_like): Input providing IndicatorLeaf of an explicit latent variable
associated with this sum node. See :meth:`~libspn.Input.as_input`
for possible values. If set to ``None``, the input is disconnected.
name (str): Name of the node.
Attributes:
inference_type(InferenceType): Flag indicating the preferred inference
type for this node that will be used
during value calculation and learning.
Can be changed at any time and will be
used during the next inference/learning
op generation.
"""
def __init__(self, *values, weights=None, latent_indicators=None,
inference_type=InferenceType.MARGINAL,
sample_prob=None, name="Sum"):
super().__init__(
*values, num_sums=1, weights=weights, latent_indicators=latent_indicators,
inference_type=inference_type, sample_prob=sample_prob, name=name)
| python |
##############################################
##############################################
###### Predict the Bear ######################
# Flask app that uses a model trained with the Fast.ai v2 library
# following an example in the upcoming book "Deep Learning for Coders
# with fastai and PyTorch: AI Applications Without a PhD" by
# Jeremy Howard and Sylvain Gugger.
##############################################
# Project put together by Javier Ideami
# Email: [email protected]
# Web: ideami.com
##############################################
import numpy as np
from flask import Flask, request, render_template
import pickle
from resources.utils import *
from fastai2.vision.widgets import *
from fastai2.imports import *
import os
cwd = os.getcwd()
path = Path()
Path().ls(file_exts='.pkl')
application = Flask(__name__)
model = load_learner(path/'model/export.pkl')
#Defining the home page for the web service
@application.route('/')
def home():
return render_template('index.html')
#Writing api for inference using the loaded model
@application.route('/predict',methods=['POST'])
#Predict method that uses the trained model to predict the kind of bear in the picture we uploaded
def predict():
#labels = ['grizzly','black','teddy']
file = request.files['file']
#Store the uploaded images in a temporary folder
if file:
filename = file.filename
file.save(os.path.join("resources/tmp", filename))
to_predict = "resources/tmp/"+filename
#Getting the prediction from the model
prediction=model.predict(to_predict)
#Render the result in the html template
return render_template('index.html', prediction_text='Your Prediction : {} '.format(prediction[0]))
if __name__ == "__main__":
#run the application
application.run(host='0.0.0.0')
| python |
import numpy as np
from nexpy.gui.datadialogs import NXDialog, GridParameters
from nexpy.gui.utils import report_error
from nexusformat.nexus import NXfield, NXdata, NeXusError
from nexusformat.nexus.tree import centers
def show_dialog():
try:
dialog = ConvertDialog()
dialog.show()
except NeXusError as error:
report_error("Converting to (Q,E)", error)
class ConvertDialog(NXDialog):
def __init__(self, parent=None):
super(ConvertDialog, self).__init__(parent)
self.select_entry()
self.parameters = GridParameters()
self.parameters.add('Ei', self.entry['instrument/monochromator/energy'],
'Incident Energy')
self.parameters.add('dQ', self.round(np.sqrt(self.Ei/2)/50), 'Q Step')
self.parameters.add('dE', self.round(self.Ei/50), 'Energy Step')
self.set_layout(self.entry_layout,
self.parameters.grid(),
self.action_buttons(('Plot', self.plot_data),
('Save', self.save_data)),
self.close_buttons())
self.setWindowTitle('Converting to (Q,E)')
@property
def Ei(self):
return self.parameters['Ei'].value
@property
def dQ(self):
return self.parameters['dQ'].value
@property
def dE(self):
return self.parameters['dE'].value
def read_parameters(self):
self.L1 = - self.entry['sample/distance']
self.L2 = np.mean(self.entry['instrument/detector/distance'])
self.m1 = self.entry['monitor1']
self.t_m1 = self.m1.moment()
self.d_m1 = self.entry['monitor1/distance']
def convert_tof(self, tof):
ki = np.sqrt(self.Ei / 2.0721)
ts = self.t_m1 + 1588.254 * (self.L1 - self.d_m1) / ki
kf = 1588.254 * self.L2 / (tof - ts)
eps = self.Ei - 2.0721*kf**2
return eps
def convert_QE(self):
"""Convert S(phi,eps) to S(Q,eps)"""
self.read_parameters()
Ei = self.Ei
dQ = self.dQ
dE = self.dE
signal = self.entry['data'].nxsignal
pol = centers(self.entry['data/polar_angle'], signal.shape[0])
tof = centers(self.entry['data/time_of_flight'], signal.shape[1])
en = self.convert_tof(tof)
idx_max = min(np.where(np.abs(en-0.75*Ei)<0.1)[0])
en = en[:idx_max]
data = signal.nxdata[:,:idx_max]
if self.entry['data'].nxerrors:
errors = self.entry['data'].nxerrors.nxdata[:]
Q = np.zeros((len(pol), len(en)))
E = np.zeros((len(pol), len(en)))
for i in range(0,len(pol)):
p = pol[i]
Q[i,:] = np.array(np.sqrt((2*Ei - en - 2*np.sqrt(Ei*(Ei-en))
* np.cos(p*np.pi/180.0))/2.0721))
E[i,:] = np.array(en)
s = Q.shape
Qin = Q.reshape(s[0]*s[1])
Ein = E.reshape(s[0]*s[1])
datain = data.reshape(s[0]*s[1])
if self.entry['data'].nxerrors:
errorsin = errors.reshape(s[0]*s[1])
qmin = Q.min()
qmax = Q.max()
emin = E.min()
emax = E.max()
NQ = int((qmax-qmin)/dQ) + 1
NE = int((emax-emin)/dE) + 1
Qb = np.linspace(qmin, qmax, NQ)
Eb = np.linspace(emin, emax, NE)
#histogram and normalize
norm, nbin = np.histogramdd((Ein,Qin), bins=(Eb,Qb))
hist, hbin = np.histogramdd((Ein,Qin), bins=(Eb,Qb), weights=datain)
if self.entry['data'].nxerrors:
histe, hbin = np.histogramdd((Ein,Qin), bins=(Eb,Qb), weights=errorsin*errorsin)
histe = histe**0.5
err = histe/norm
I = NXfield(hist/norm, name='S(Q,E)')
Qb = NXfield(Qb[:-1]+dQ/2., name='Q')
Eb = NXfield(Eb[:-1]+dE/2., name='E')
result = NXdata(I, (Eb, Qb))
if self.entry.data.nxerrors:
result.errors = NXfield(err)
return result
def round(self, x, prec=2, base=.05):
return round(base * round(float(x)/base), prec)
def plot_data(self):
self.convert_QE().plot()
def save_data(self):
self.entry['sqe'] = self.convert_QE()
| python |
from riemann.tx import tx_builder
from riemann import simple, script
from riemann import utils as rutils
from riemann.encoding import addresses
from workshop import crypto
from workshop.transactions import spend_utxo
from riemann import tx
'''
This is a hash timelock contract. It locks BTC until a timeout, or until a
specific secret is revealed.
HTLCs are used in cross-chain swaps, and are the core primitive for updating
lightning channels. Because of this, they can also be used to build cool things
like submarine (lightning-to-mainnet) atomic swaps.
Basically, an HTLC has 2 paths: execute and refund. The execute path checks a
secret against a pre-committed digest, and validates the executor's signature.
The refund path checks a timeout, and validates the funder's signature.
This script must be parameterized with a 32 byte hash, a timeout, and both
parties' pubkeyhashes.
# WARNING: This is an example. Do not use it in production.
'''
htlc_script = \
'OP_IF ' \
'OP_SHA256 {secret_hash} OP_EQUALVERIFY ' \
'OP_DUP OP_HASH160 {pkh0} ' \
'OP_ELSE ' \
'{timeout} OP_CHECKLOCKTIMEVERIFY OP_DROP ' \
'OP_DUP OP_HASH160 {pkh1} ' \
'OP_ENDIF ' \
'OP_EQUALVERIFY ' \
'OP_CHECKSIG'
def build_htlc_script(
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes
) -> str:
'''
Parameterizes the HTLC script with the arguments.
'''
if len(secret_hash) != 32:
raise ValueError('Expected a 32-byte digest. '
f'Got {len(secret_hash)} bytes')
if len(redeemer_pkh) != 20:
raise ValueError('Expected a 20-byte redeemer pubkeyhash. '
f'Got {len(redeemer_pkh)} bytes')
if len(funder_pkh) != 20:
raise ValueError('Expected a 20-byte funder pubkeyhash. '
f'Got {len(redeemer_pkh)} bytes')
return htlc_script.format(
secret_hash=secret_hash.hex(),
pkh0=rutils.sha256(redeemer_pkh).hex(),
timeout=rutils.i2le(timeout),
pkh1=rutils.sha256(funder_pkh).hex())
def htlc_address(
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes
) -> str:
'''Parameterizes the script, and returns the corresponding address'''
s = build_htlc_script(secret_hash, redeemer_pkh, timeout, funder_pkh)
return addresses.make_p2wsh_address(s)
def p2htlc_output(
value: int,
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes
) -> tx.TxOut:
'''Parameterizes the script, and creates an output paying that address'''
address = htlc_address(secret_hash, redeemer_pkh, timeout, funder_pkh)
return simple.output(value, address)
def htlc_refund_witness(
htlc_script: str,
signature: bytes,
pubkey: bytes
) -> tx.InputWitness:
'''
Given a signature, creates a witness for the refund path of the HTLC
The b'\x00' corresponds to OP_FALSE
'''
serialized = script.serialize(htlc_script)
return tx_builder.make_witness([signature, pubkey, b'\x00', serialized])
def htlc_execute_witness(
htlc_script: str,
signature: bytes,
pubkey: bytes,
secret: bytes
) -> tx.InputWitness:
'''
Given a signature and the secret, makes a witness for the execute path of
the HTLC.
The b'\x01' corresponds to OP_TRUE
'''
serialized = script.serialize(htlc_script)
return tx_builder.make_witness(
[signature, pubkey, secret, b'\x01', serialized]
)
def spend_htlc_transaction(
tx_id: str,
index: int,
value: int,
address: str,
timeout: int = 0
) -> tx.Tx:
'''
Creates an unsigned txn that sends funds from an HTLC to a specified
address.
Not that this step requires knowledge only of the timeout. An exercise tx
can safely leave this at 0.
'''
tx_in = spend_utxo(tx_id, index)
tx_out = simple.output(value, address)
return simple.unsigned_witness_tx( # type: ignore
tx_ins=[tx_in],
tx_outs=[tx_out],
locktime=timeout)
def signed_refund_htlc_transaction(
secret_hash: bytes,
redeemer_pkh: bytes,
timeout: int,
funder_pkh: bytes,
tx_id: str,
index: int,
prevout_value: int,
address: str,
privkey: bytes,
fee: int = 0
) -> tx.Tx:
'''
Builds an entire Refund HTLC spend from scratch.
'''
# build the unsigned version of the transaction
t = spend_htlc_transaction(
tx_id,
index,
prevout_value - fee,
address,
timeout)
# Prep the witness program
s = build_htlc_script(secret_hash, redeemer_pkh, timeout, funder_pkh)
serialized_script = script.serialize(s)
script_len = len(serialized_script)
prepended_script = tx.VarInt(script_len).to_bytes() + serialized_script
# calculate sighash using the witness program
sighash = t.sighash_all(
index=index,
script=prepended_script,
prevout_value=rutils.i2le_padded(prevout_value, 8))
# sign it and make the witness
signature = crypto.sign_digest(sighash, privkey)
witness = htlc_refund_witness(s, signature, crypto.priv_to_pub(privkey))
# insert the witness into the tx
return t.copy(tx_witnesses=[witness])
| python |
#!/usr/bin/env python
# Part of sniffMyPackets framework.
# GeoIP Lookup modules to cut down on code changes.
import pygeoip
from canari.config import config
def lookup_geo(ip):
try:
# homelat = config['geoip/homelat'].strip('\'')
# homelng = config['geoip/homelng'].strip('\'')
db = config['geoip/db'].strip('\'')
try:
gi = pygeoip.GeoIP(db)
except Exception as e:
return str(e)
rec = gi.record_by_addr(ip)
if rec is not None:
return rec
except Exception as e:
return str(e)
| python |
#python3 code
def count(i,s):
ans=0
for j in range(i,len(s)):
if(s[j]=="<"):
ans+=1
return ans
def higher(s):
res=0
for i in range(len(s)):
if(s[i]==">"):
b=count(i,s)
res=res+(b*2)
return res
def solution(s):
# Your code here
result=higher(s)
return result
| python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FleetArgs', 'Fleet']
@pulumi.input_type
class FleetArgs:
def __init__(__self__, *,
compute_capacity: pulumi.Input['FleetComputeCapacityArgs'],
instance_type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input['FleetDomainJoinInfoArgs']] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input['FleetVpcConfigArgs']] = None):
"""
The set of arguments for constructing a Fleet resource.
:param pulumi.Input['FleetComputeCapacityArgs'] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input['FleetDomainJoinInfoArgs'] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input['FleetVpcConfigArgs'] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
pulumi.set(__self__, "compute_capacity", compute_capacity)
pulumi.set(__self__, "instance_type", instance_type)
if description is not None:
pulumi.set(__self__, "description", description)
if disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "disconnect_timeout_in_seconds", disconnect_timeout_in_seconds)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if domain_join_info is not None:
pulumi.set(__self__, "domain_join_info", domain_join_info)
if enable_default_internet_access is not None:
pulumi.set(__self__, "enable_default_internet_access", enable_default_internet_access)
if fleet_type is not None:
pulumi.set(__self__, "fleet_type", fleet_type)
if iam_role_arn is not None:
pulumi.set(__self__, "iam_role_arn", iam_role_arn)
if idle_disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "idle_disconnect_timeout_in_seconds", idle_disconnect_timeout_in_seconds)
if image_arn is not None:
pulumi.set(__self__, "image_arn", image_arn)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
if max_user_duration_in_seconds is not None:
pulumi.set(__self__, "max_user_duration_in_seconds", max_user_duration_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if stream_view is not None:
pulumi.set(__self__, "stream_view", stream_view)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter(name="computeCapacity")
def compute_capacity(self) -> pulumi.Input['FleetComputeCapacityArgs']:
"""
Configuration block for the desired capacity of the fleet. See below.
"""
return pulumi.get(self, "compute_capacity")
@compute_capacity.setter
def compute_capacity(self, value: pulumi.Input['FleetComputeCapacityArgs']):
pulumi.set(self, "compute_capacity", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
"""
Instance type to use when launching fleet instances.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description to display.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disconnectTimeoutInSeconds")
def disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that a streaming session remains active after users disconnect.
"""
return pulumi.get(self, "disconnect_timeout_in_seconds")
@disconnect_timeout_in_seconds.setter
def disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable friendly name for the AppStream fleet.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="domainJoinInfo")
def domain_join_info(self) -> Optional[pulumi.Input['FleetDomainJoinInfoArgs']]:
"""
Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
"""
return pulumi.get(self, "domain_join_info")
@domain_join_info.setter
def domain_join_info(self, value: Optional[pulumi.Input['FleetDomainJoinInfoArgs']]):
pulumi.set(self, "domain_join_info", value)
@property
@pulumi.getter(name="enableDefaultInternetAccess")
def enable_default_internet_access(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables default internet access for the fleet.
"""
return pulumi.get(self, "enable_default_internet_access")
@enable_default_internet_access.setter
def enable_default_internet_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_default_internet_access", value)
@property
@pulumi.getter(name="fleetType")
def fleet_type(self) -> Optional[pulumi.Input[str]]:
"""
Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
"""
return pulumi.get(self, "fleet_type")
@fleet_type.setter
def fleet_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fleet_type", value)
@property
@pulumi.getter(name="iamRoleArn")
def iam_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to apply to the fleet.
"""
return pulumi.get(self, "iam_role_arn")
@iam_role_arn.setter
def iam_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_role_arn", value)
@property
@pulumi.getter(name="idleDisconnectTimeoutInSeconds")
def idle_disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
"""
return pulumi.get(self, "idle_disconnect_timeout_in_seconds")
@idle_disconnect_timeout_in_seconds.setter
def idle_disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="imageArn")
def image_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the public, private, or shared image to use.
"""
return pulumi.get(self, "image_arn")
@image_arn.setter
def image_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_arn", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the image used to create the fleet.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="maxUserDurationInSeconds")
def max_user_duration_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Maximum amount of time that a streaming session can remain active, in seconds.
"""
return pulumi.get(self, "max_user_duration_in_seconds")
@max_user_duration_in_seconds.setter
def max_user_duration_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_user_duration_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name for the fleet.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="streamView")
def stream_view(self) -> Optional[pulumi.Input[str]]:
"""
AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
"""
return pulumi.get(self, "stream_view")
@stream_view.setter
def stream_view(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_view", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to attach to AppStream instances.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['FleetVpcConfigArgs']]:
"""
Configuration block for the VPC configuration for the image builder. See below.
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['FleetVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
@pulumi.input_type
class _FleetState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
compute_capacity: Optional[pulumi.Input['FleetComputeCapacityArgs']] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input['FleetDomainJoinInfoArgs']] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input['FleetVpcConfigArgs']] = None):
"""
Input properties used for looking up and filtering Fleet resources.
:param pulumi.Input[str] arn: ARN of the appstream fleet.
:param pulumi.Input['FleetComputeCapacityArgs'] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] created_time: Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input['FleetDomainJoinInfoArgs'] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] state: State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input['FleetVpcConfigArgs'] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if compute_capacity is not None:
pulumi.set(__self__, "compute_capacity", compute_capacity)
if created_time is not None:
pulumi.set(__self__, "created_time", created_time)
if description is not None:
pulumi.set(__self__, "description", description)
if disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "disconnect_timeout_in_seconds", disconnect_timeout_in_seconds)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if domain_join_info is not None:
pulumi.set(__self__, "domain_join_info", domain_join_info)
if enable_default_internet_access is not None:
pulumi.set(__self__, "enable_default_internet_access", enable_default_internet_access)
if fleet_type is not None:
pulumi.set(__self__, "fleet_type", fleet_type)
if iam_role_arn is not None:
pulumi.set(__self__, "iam_role_arn", iam_role_arn)
if idle_disconnect_timeout_in_seconds is not None:
pulumi.set(__self__, "idle_disconnect_timeout_in_seconds", idle_disconnect_timeout_in_seconds)
if image_arn is not None:
pulumi.set(__self__, "image_arn", image_arn)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if max_user_duration_in_seconds is not None:
pulumi.set(__self__, "max_user_duration_in_seconds", max_user_duration_in_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if state is not None:
pulumi.set(__self__, "state", state)
if stream_view is not None:
pulumi.set(__self__, "stream_view", stream_view)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if vpc_config is not None:
pulumi.set(__self__, "vpc_config", vpc_config)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the appstream fleet.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="computeCapacity")
def compute_capacity(self) -> Optional[pulumi.Input['FleetComputeCapacityArgs']]:
"""
Configuration block for the desired capacity of the fleet. See below.
"""
return pulumi.get(self, "compute_capacity")
@compute_capacity.setter
def compute_capacity(self, value: Optional[pulumi.Input['FleetComputeCapacityArgs']]):
pulumi.set(self, "compute_capacity", value)
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[pulumi.Input[str]]:
"""
Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
"""
return pulumi.get(self, "created_time")
@created_time.setter
def created_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description to display.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disconnectTimeoutInSeconds")
def disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that a streaming session remains active after users disconnect.
"""
return pulumi.get(self, "disconnect_timeout_in_seconds")
@disconnect_timeout_in_seconds.setter
def disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable friendly name for the AppStream fleet.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="domainJoinInfo")
def domain_join_info(self) -> Optional[pulumi.Input['FleetDomainJoinInfoArgs']]:
"""
Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
"""
return pulumi.get(self, "domain_join_info")
@domain_join_info.setter
def domain_join_info(self, value: Optional[pulumi.Input['FleetDomainJoinInfoArgs']]):
pulumi.set(self, "domain_join_info", value)
@property
@pulumi.getter(name="enableDefaultInternetAccess")
def enable_default_internet_access(self) -> Optional[pulumi.Input[bool]]:
"""
Enables or disables default internet access for the fleet.
"""
return pulumi.get(self, "enable_default_internet_access")
@enable_default_internet_access.setter
def enable_default_internet_access(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_default_internet_access", value)
@property
@pulumi.getter(name="fleetType")
def fleet_type(self) -> Optional[pulumi.Input[str]]:
"""
Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
"""
return pulumi.get(self, "fleet_type")
@fleet_type.setter
def fleet_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fleet_type", value)
@property
@pulumi.getter(name="iamRoleArn")
def iam_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to apply to the fleet.
"""
return pulumi.get(self, "iam_role_arn")
@iam_role_arn.setter
def iam_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_role_arn", value)
@property
@pulumi.getter(name="idleDisconnectTimeoutInSeconds")
def idle_disconnect_timeout_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
"""
return pulumi.get(self, "idle_disconnect_timeout_in_seconds")
@idle_disconnect_timeout_in_seconds.setter
def idle_disconnect_timeout_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_disconnect_timeout_in_seconds", value)
@property
@pulumi.getter(name="imageArn")
def image_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the public, private, or shared image to use.
"""
return pulumi.get(self, "image_arn")
@image_arn.setter
def image_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_arn", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the image used to create the fleet.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
Instance type to use when launching fleet instances.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="maxUserDurationInSeconds")
def max_user_duration_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Maximum amount of time that a streaming session can remain active, in seconds.
"""
return pulumi.get(self, "max_user_duration_in_seconds")
@max_user_duration_in_seconds.setter
def max_user_duration_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_user_duration_in_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name for the fleet.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="streamView")
def stream_view(self) -> Optional[pulumi.Input[str]]:
"""
AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
"""
return pulumi.get(self, "stream_view")
@stream_view.setter
def stream_view(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_view", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to attach to AppStream instances.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> Optional[pulumi.Input['FleetVpcConfigArgs']]:
"""
Configuration block for the VPC configuration for the image builder. See below.
"""
return pulumi.get(self, "vpc_config")
@vpc_config.setter
def vpc_config(self, value: Optional[pulumi.Input['FleetVpcConfigArgs']]):
pulumi.set(self, "vpc_config", value)
class Fleet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_capacity: Optional[pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']]] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']]] = None,
__props__=None):
"""
Provides an AppStream fleet.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_fleet = aws.appstream.Fleet("testFleet",
compute_capacity=aws.appstream.FleetComputeCapacityArgs(
desired_instances=1,
),
description="test fleet",
display_name="test-fleet",
enable_default_internet_access=False,
fleet_type="ON_DEMAND",
idle_disconnect_timeout_in_seconds=60,
image_name="Amazon-AppStream2-Sample-Image-02-04-2019",
instance_type="stream.standard.large",
max_user_duration_in_seconds=600,
tags={
"TagName": "tag-value",
},
vpc_config=aws.appstream.FleetVpcConfigArgs(
subnet_ids=["subnet-06e9b13400c225127"],
))
```
## Import
`aws_appstream_fleet` can be imported using the id, e.g.,
```sh
$ pulumi import aws:appstream/fleet:Fleet example fleetNameExample
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FleetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an AppStream fleet.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_fleet = aws.appstream.Fleet("testFleet",
compute_capacity=aws.appstream.FleetComputeCapacityArgs(
desired_instances=1,
),
description="test fleet",
display_name="test-fleet",
enable_default_internet_access=False,
fleet_type="ON_DEMAND",
idle_disconnect_timeout_in_seconds=60,
image_name="Amazon-AppStream2-Sample-Image-02-04-2019",
instance_type="stream.standard.large",
max_user_duration_in_seconds=600,
tags={
"TagName": "tag-value",
},
vpc_config=aws.appstream.FleetVpcConfigArgs(
subnet_ids=["subnet-06e9b13400c225127"],
))
```
## Import
`aws_appstream_fleet` can be imported using the id, e.g.,
```sh
$ pulumi import aws:appstream/fleet:Fleet example fleetNameExample
```
:param str resource_name: The name of the resource.
:param FleetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FleetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_capacity: Optional[pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']]] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FleetArgs.__new__(FleetArgs)
if compute_capacity is None and not opts.urn:
raise TypeError("Missing required property 'compute_capacity'")
__props__.__dict__["compute_capacity"] = compute_capacity
__props__.__dict__["description"] = description
__props__.__dict__["disconnect_timeout_in_seconds"] = disconnect_timeout_in_seconds
__props__.__dict__["display_name"] = display_name
__props__.__dict__["domain_join_info"] = domain_join_info
__props__.__dict__["enable_default_internet_access"] = enable_default_internet_access
__props__.__dict__["fleet_type"] = fleet_type
__props__.__dict__["iam_role_arn"] = iam_role_arn
__props__.__dict__["idle_disconnect_timeout_in_seconds"] = idle_disconnect_timeout_in_seconds
__props__.__dict__["image_arn"] = image_arn
__props__.__dict__["image_name"] = image_name
if instance_type is None and not opts.urn:
raise TypeError("Missing required property 'instance_type'")
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["max_user_duration_in_seconds"] = max_user_duration_in_seconds
__props__.__dict__["name"] = name
__props__.__dict__["stream_view"] = stream_view
__props__.__dict__["tags"] = tags
__props__.__dict__["vpc_config"] = vpc_config
__props__.__dict__["arn"] = None
__props__.__dict__["created_time"] = None
__props__.__dict__["state"] = None
__props__.__dict__["tags_all"] = None
super(Fleet, __self__).__init__(
'aws:appstream/fleet:Fleet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
compute_capacity: Optional[pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']]] = None,
created_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
display_name: Optional[pulumi.Input[str]] = None,
domain_join_info: Optional[pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']]] = None,
enable_default_internet_access: Optional[pulumi.Input[bool]] = None,
fleet_type: Optional[pulumi.Input[str]] = None,
iam_role_arn: Optional[pulumi.Input[str]] = None,
idle_disconnect_timeout_in_seconds: Optional[pulumi.Input[int]] = None,
image_arn: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
max_user_duration_in_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stream_view: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_config: Optional[pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']]] = None) -> 'Fleet':
"""
Get an existing Fleet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: ARN of the appstream fleet.
:param pulumi.Input[pulumi.InputType['FleetComputeCapacityArgs']] compute_capacity: Configuration block for the desired capacity of the fleet. See below.
:param pulumi.Input[str] created_time: Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
:param pulumi.Input[str] description: Description to display.
:param pulumi.Input[int] disconnect_timeout_in_seconds: Amount of time that a streaming session remains active after users disconnect.
:param pulumi.Input[str] display_name: Human-readable friendly name for the AppStream fleet.
:param pulumi.Input[pulumi.InputType['FleetDomainJoinInfoArgs']] domain_join_info: Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
:param pulumi.Input[bool] enable_default_internet_access: Enables or disables default internet access for the fleet.
:param pulumi.Input[str] fleet_type: Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
:param pulumi.Input[str] iam_role_arn: ARN of the IAM role to apply to the fleet.
:param pulumi.Input[int] idle_disconnect_timeout_in_seconds: Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
:param pulumi.Input[str] image_arn: ARN of the public, private, or shared image to use.
:param pulumi.Input[str] image_name: Name of the image used to create the fleet.
:param pulumi.Input[str] instance_type: Instance type to use when launching fleet instances.
:param pulumi.Input[int] max_user_duration_in_seconds: Maximum amount of time that a streaming session can remain active, in seconds.
:param pulumi.Input[str] name: Unique name for the fleet.
:param pulumi.Input[str] state: State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
:param pulumi.Input[str] stream_view: AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to attach to AppStream instances.
:param pulumi.Input[pulumi.InputType['FleetVpcConfigArgs']] vpc_config: Configuration block for the VPC configuration for the image builder. See below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FleetState.__new__(_FleetState)
__props__.__dict__["arn"] = arn
__props__.__dict__["compute_capacity"] = compute_capacity
__props__.__dict__["created_time"] = created_time
__props__.__dict__["description"] = description
__props__.__dict__["disconnect_timeout_in_seconds"] = disconnect_timeout_in_seconds
__props__.__dict__["display_name"] = display_name
__props__.__dict__["domain_join_info"] = domain_join_info
__props__.__dict__["enable_default_internet_access"] = enable_default_internet_access
__props__.__dict__["fleet_type"] = fleet_type
__props__.__dict__["iam_role_arn"] = iam_role_arn
__props__.__dict__["idle_disconnect_timeout_in_seconds"] = idle_disconnect_timeout_in_seconds
__props__.__dict__["image_arn"] = image_arn
__props__.__dict__["image_name"] = image_name
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["max_user_duration_in_seconds"] = max_user_duration_in_seconds
__props__.__dict__["name"] = name
__props__.__dict__["state"] = state
__props__.__dict__["stream_view"] = stream_view
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["vpc_config"] = vpc_config
return Fleet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
ARN of the appstream fleet.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="computeCapacity")
def compute_capacity(self) -> pulumi.Output['outputs.FleetComputeCapacity']:
"""
Configuration block for the desired capacity of the fleet. See below.
"""
return pulumi.get(self, "compute_capacity")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> pulumi.Output[str]:
"""
Date and time, in UTC and extended RFC 3339 format, when the fleet was created.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Description to display.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="disconnectTimeoutInSeconds")
def disconnect_timeout_in_seconds(self) -> pulumi.Output[int]:
"""
Amount of time that a streaming session remains active after users disconnect.
"""
return pulumi.get(self, "disconnect_timeout_in_seconds")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Human-readable friendly name for the AppStream fleet.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="domainJoinInfo")
def domain_join_info(self) -> pulumi.Output['outputs.FleetDomainJoinInfo']:
"""
Configuration block for the name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. See below.
"""
return pulumi.get(self, "domain_join_info")
@property
@pulumi.getter(name="enableDefaultInternetAccess")
def enable_default_internet_access(self) -> pulumi.Output[bool]:
"""
Enables or disables default internet access for the fleet.
"""
return pulumi.get(self, "enable_default_internet_access")
@property
@pulumi.getter(name="fleetType")
def fleet_type(self) -> pulumi.Output[str]:
"""
Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON`
"""
return pulumi.get(self, "fleet_type")
@property
@pulumi.getter(name="iamRoleArn")
def iam_role_arn(self) -> pulumi.Output[str]:
"""
ARN of the IAM role to apply to the fleet.
"""
return pulumi.get(self, "iam_role_arn")
@property
@pulumi.getter(name="idleDisconnectTimeoutInSeconds")
def idle_disconnect_timeout_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins.
"""
return pulumi.get(self, "idle_disconnect_timeout_in_seconds")
@property
@pulumi.getter(name="imageArn")
def image_arn(self) -> pulumi.Output[str]:
"""
ARN of the public, private, or shared image to use.
"""
return pulumi.get(self, "image_arn")
@property
@pulumi.getter(name="imageName")
def image_name(self) -> pulumi.Output[str]:
"""
Name of the image used to create the fleet.
"""
return pulumi.get(self, "image_name")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Output[str]:
"""
Instance type to use when launching fleet instances.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="maxUserDurationInSeconds")
def max_user_duration_in_seconds(self) -> pulumi.Output[int]:
"""
Maximum amount of time that a streaming session can remain active, in seconds.
"""
return pulumi.get(self, "max_user_duration_in_seconds")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Unique name for the fleet.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
State of the fleet. Can be `STARTING`, `RUNNING`, `STOPPING` or `STOPPED`
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="streamView")
def stream_view(self) -> pulumi.Output[str]:
"""
AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays.
"""
return pulumi.get(self, "stream_view")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of tags to attach to AppStream instances.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="vpcConfig")
def vpc_config(self) -> pulumi.Output['outputs.FleetVpcConfig']:
"""
Configuration block for the VPC configuration for the image builder. See below.
"""
return pulumi.get(self, "vpc_config")
| python |
if x == 'none':
if False:
print('None')
elif x == None:
print('oh')
elif x == 12:
print('oh')
else:
print(123)
if foo:
foo()
elif bar:
bar()
else:
if baz:
baz()
elif garply:
garply()
else:
qux()
| python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http: //www.runoob.com/python/python-exercise-example70.html
if __name__ == "__main__":
# s = input("please input a string:\n")
s = "Hello World"
print("the string has %d characters." % len(s))
| python |
"""
Orchestrator module
"""
import logging
import os
import re
import shutil
import traceback
from functools import wraps
from glob import glob
from io import open
import six
from halo import Halo
from tabulate import tabulate
from toscaparser.common.exception import ValidationError
from yaml.scanner import ScannerError
from termcolor import colored
from . import docker_interface, helper, protocol_helper
from .graph.nodes import Container, Software, Volume
from .graph.protocol import (CONTAINER_STATE_CREATED, CONTAINER_STATE_DELETED,
CONTAINER_STATE_RUNNING, SOFTWARE_STATE_ZOTTED,
STATE_RUNNING, VOLUME_STATE_CREATED,
VOLUME_STATE_DELETED)
from .helper import Logger
from .managers.container_manager import ContainerManager
from .managers.software_manager import SoftwareManager
from .managers.volume_manager import VolumeManager
from .storage import Memory
from .tosca_parser import get_tosca_template
try:
from os import scandir
except ImportError:
from scandir import scandir
class Orchestrator:
def update_memory(f):
"""decorator that update memory before execute function"""
@wraps(f)
def decorated_function(*args, **kwargs):
status, faulty = args[0]._update_state()
Logger.println('(update memory: {})'.format(
'ok' if status else 'fixed {}'.format(', '.join(faulty))))
return f(*args, **kwargs)
return decorated_function
def __init__(self,
log_handler=logging.NullHandler(),
quiet=True,
tmp_dir='/tmp/tosker',
data_dir='/tmp/tosker'): # TODO: use /usr/lib/tokser instead
Logger.set(log_handler, quiet)
self._log = Logger.get(__name__)
self._tmp_dir = tmp_dir
# Setup Storage system (folder and class)
self._data_dir = data_dir
try:
os.makedirs(data_dir)
except os.error:
pass
Memory.set_db(data_dir)
@update_memory
def orchestrate(self, file_path, plan, inputs=None):
"""
Start the orchestration using the management protocols.
plan must be a list of tuple (component, full_operation)
"""
# Parse TOSCA file
tpl = self._parse_tosca(file_path, inputs)
if tpl is None:
return False
# Check plan format
if not self._check_plan_format(tpl, plan):
self._log.debug(plan)
Logger.print_error('Plan format not correct')
return False
# Create tmp directory for the template
self._create_tmp_dir(tpl)
# Load components state
if not self._load_component_state(tpl):
Logger.print_error('Cannot load components state,'
'try to use "tosker prune" to hard reset.')
return False
self._log.debug('State: %s', ' '.join(
(c['name'] + '.' + c['state'] for c in Memory.get_comps(tpl.name))))
try:
# Check plan
self._print_loading_start('Check deployment plan... ')
for component, full_operation in plan:
try:
protocol_helper.can_execute(full_operation, component)
component.protocol.execute_operation(full_operation)
except ValueError as e:
self._print_cross('Error on {}.{}: {}'
''.format(component.name, full_operation, e))
return False
self._load_component_state(tpl)
self._print_tick()
# Create Network
# TODO: do not create network if already there
self._print_loading_start('Create network... ')
docker_interface.create_network(tpl.name)
self._print_tick()
# Execute plan
for component, full_operation in plan:
protocol = component.protocol
self._log.debug('Component %s is in state %s',
component.name, component.protocol.current_state)
self._print_loading_start('Execute op "{}" on "{}"... '
''.format(full_operation, component.name))
transition = protocol.next_transition(full_operation)
self._log.debug('transition: i={} o={}'.format(
transition.interface, transition.operation))
if isinstance(component, Container):
ContainerManager.exec_operation(
component, transition.operation)
elif isinstance(component, Volume):
VolumeManager.exec_operation(
component, transition.operation)
elif isinstance(component, Software):
SoftwareManager.exec_operation(component, transition.interface,
transition.operation)
state = protocol.execute_operation(full_operation)
# remove the component if it is in the initial state
if state == protocol.initial_state:
Memory.remove(component)
else:
Memory.update_state(component, state.name)
self._print_tick()
self._print_outputs(tpl)
except Exception as e:
self._log.debug('Exception type: %s', type(e))
self._log.debug(traceback.format_exc())
self._print_cross(e)
return False
return True
@update_memory
def ls_components(self, app=None, filters={}):
comps = Memory.get_comps(app, filters)
def get_state(state):
return colored(state, ('green' if state == STATE_RUNNING else None))
def format_row(comp):
return [comp['app_name'],
comp['name'],
comp['type'],
get_state(comp['state']),
'{}.{}'.format(comp['app_name'], comp['name'])]
table = [format_row(c) for c in comps]
table_str = tabulate(table, headers=['Application', 'Component',
'Type', 'State', 'Full name'])
Logger.println(table_str)
def log(self, component, operation):
# TODO: add logs also for Docker container
app, name = helper.split(component, '.')
if app is None:
Logger.print_error('First argument must be a component full name '
'(i.e my_app.my_component)')
return
if '.' not in operation:
operation = 'Standard.{}'.format(operation)
self._log.debug('app: %s, name: %s, operation: %s',
app, name, operation)
log_file_name = '{}/{}/*/{}/{}.log'.format(self._tmp_dir,
app, name, operation)
log_file = glob(log_file_name)
if len(log_file) != 1:
Logger.print_error('Component or operation log not found')
return
with open(log_file[0], 'r', encoding='utf-8', errors='ignore') as f:
for line in f.readlines():
line = colored(line, 'green') if line.startswith(
'+ ') else line
Logger.print_(line)
def prune(self):
self._print_loading_start('Remove containers.. ')
con = docker_interface.get_containers(all=True)
for c in (c for c in con if c['Names'][0].startswith('/tosker')):
self._log.debug(c['Names'][0])
docker_interface.delete_container(c['Id'], force=True)
self._print_tick()
self._print_loading_start('Remove volumes.. ')
vol = docker_interface.get_volumes()
for v in (v for v in vol if v['Name'].startswith('tosker')):
self._log.debug(v['Name'])
docker_interface.delete_volume(v['Name'])
self._print_tick()
# TODO: remove also networks
self._print_loading_start('Remove tosker data.. ')
shutil.rmtree(self._tmp_dir)
self._print_tick()
def parse_operations(self, operations):
"""
Transform a ["component:interface.operation"..] in
[("component","interface.operation")..]
"""
return [helper.split(op.strip(), ':') for op in operations]
def read_plan_file(self, file):
"""Parse the operation from a general plan file (.csv, .plan, other)"""
with open(file, 'r') as fstream:
_, ext = os.path.splitext(file)
if '.csv' == ext:
return self._read_csv(fstream)
elif '.plan' == ext:
return self._read_plan(fstream)
else:
Logger.print_error('Plan file format not supported.')
pass
def _read_csv(self, stream):
"""
Get a file stream of a .csv file and return a list
of tuple (componet, interface.operation).
"""
return [(l[0], '{}.{}'.format(l[1], l[2]))
for l in (l.strip().split(',')
for l in stream.readlines())]
def _read_plan(self, stream):
"""
Get a file streame of a .plan file and return a list
of tuple (componet, interface.operation).
"""
return self.parse_operations(
[l for l in (l.strip() for l in stream.readlines())
if l and not l.startswith('#')])
def _parse_tosca(self, file_path, inputs):
'''
Parse TOSCA file
'''
try:
return get_tosca_template(file_path, inputs)
except ScannerError as e:
Logger.print_error('YAML parse error\n {}'.format(e))
return None
except ValidationError as e:
Logger.print_error('TOSCA validation error\n {}'.format(e))
return None
except ValueError as e:
Logger.print_error('TosKer validation error\n {}'.format(e))
self._log.debug(colored(traceback.format_exc(), 'red'))
return None
except Exception as e:
Logger.print_error('Internal error\n {}'.format(e))
self._log.debug('Exception type: %s', type(e))
self._log.debug(colored(traceback.format_exc(), 'red'))
return None
def _create_tmp_dir(self, tpl):
'''
Create temporany directory
'''
tpl.tmp_dir = os.path.join(self._tmp_dir, tpl.name)
try:
os.makedirs(tpl.tmp_dir)
except os.error as e:
self._log.info(e)
def _check_plan_format(self, tpl, operations):
"""
operation: [("component", "interface.operation")..]
"""
for i, op in enumerate(operations):
if not (isinstance(op, tuple) and len(op) == 2):
Logger.print_error('Plan is not in the right format')
return False
comp_name, full_operation = op
# Check that the component existes in the template
comp = tpl[comp_name]
if comp is None:
Logger.print_error(
'Component "{}" not found in template.'.format(comp_name))
return False
# check that the component has interface.operation
interface, operation = helper.split(full_operation, '.')
if interface not in comp.interfaces and\
operation not in comp.interfaces[interface]:
Logger.print_error('Component "{}" not has the "{}"'
'operation in the "{}" interface.'
''.format(comp_name, operation, interface))
return False
operations[i] = comp, full_operation
return True
def _load_component_state(self, tpl):
for comp in tpl.nodes:
state = Memory.get_comp_state(comp)
if state is not None:
state = comp.protocol.find_state(state)
if state is not None:
comp.protocol.current_state = state
else:
return False
else:
comp.protocol.reset()
return True
def _print_outputs(self, tpl):
if len(tpl.outputs) != 0:
Logger.println('\nOUTPUTS:')
for out in tpl.outputs:
self._log.debug('value: %s', out.value)
value = out.value if isinstance(out.value, six.string_types) \
else helper.get_attributes(out.value.args, tpl)
Logger.println(' - ' + out.name + ":", value)
def _update_state(self):
errors = set()
def manage_error(comp, state):
errors.add(comp['full_name'])
Memory.update_state(comp, state)
def manage_error_container(comp, state):
manage_error(comp, state)
path = os.path.join(self._tmp_dir, comp['app_name'], comp['name'])
try:
software = [(f.name, f.path) for f in scandir(path)
if f.is_dir()]
except FileNotFoundError as e:
software = []
self._log.debug('path %s found %s', path, software)
for s, s_path in software:
full_name = '{}.{}'.format(comp['app_name'], s)
Memory.update_state('{}.{}'.format(
comp['app_name'], s), SOFTWARE_STATE_ZOTTED)
errors.add(full_name)
for container in Memory.get_comps(filters={'type': 'Container'}):
status = docker_interface.inspect_container(container['full_name'])
deleted, created, running = status is None,\
status is not None and not status['State']['Running'],\
status is not None and status['State']['Running']
if deleted and container['state'] != CONTAINER_STATE_DELETED:
manage_error_container(container, CONTAINER_STATE_DELETED)
elif created and container['state'] != CONTAINER_STATE_CREATED:
manage_error_container(container, CONTAINER_STATE_CREATED)
elif running and container['state'] != CONTAINER_STATE_RUNNING:
manage_error_container(container, CONTAINER_STATE_RUNNING)
for volume in Memory.get_comps(filters={'type': 'Volume'}):
status = docker_interface.inspect_volume(volume['full_name'])
if status is None:
manage_error(volume, VOLUME_STATE_DELETED)
return len(errors) == 0, errors
def _print_tick(self):
self._loading_thread.succeed(self._loading_thread.text + 'Done')
def _print_skip(self):
self._loading_thread.info(self._loading_thread.text + 'Skipped')
def _print_cross(self, error):
self._loading_thread.fail(self._loading_thread.text + '\n' +
colored(error, 'red'))
def _print_loading_start(self, msg):
self._loading_thread = Halo(text=msg, spinner='dots')
self._loading_thread.start()
| python |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.settings.app_cfg import VALID_PIPE_MEDIA_EXTS
from vframe.utils.click_utils import generator
@click.command('')
@click.option('-i', '--input', 'opt_input', required=True,
help='Path to image or directory')
@click.option('-e', '--exts', 'opt_exts', default=VALID_PIPE_MEDIA_EXTS,
multiple=True, help='Extensions to glob for')
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True,
help='Recursive glob')
@click.option('--slice', 'opt_slice', type=(int, int), default=(-1, -1),
help="Slice list of inputs")
@click.option('--skip-frames', 'opt_skip_frames', is_flag=True,
help='Skip all frames, only iterate files')
@click.option('--check-exist', 'opt_check_exist',
is_flag=True, default=False,
help='Check files existence before processing')
@click.option('--randomize', 'opt_randomize', is_flag=True,
help='Randomize file list before slicing')
@click.option('--media-path', 'opt_new_filepath', type=str,
default='',
help='Override JSON filepath')
@generator
@click.pass_context
def cli(ctx, sink, opt_input, opt_recursive, opt_exts, opt_slice,
opt_skip_frames, opt_check_exist, opt_randomize, opt_new_filepath):
"""Open media for processing"""
from tqdm import tqdm
import dacite
from vframe.settings.app_cfg import LOG, SKIP_FRAME, READER, SKIP_FILE
from vframe.settings.app_cfg import USE_PREHASH, USE_DRAW_FRAME
from vframe.settings.app_cfg import MEDIA_FILTERS, SKIP_MEDIA_FILTERS
from vframe.models.media import MediaFileReader
from vframe.utils.sys_utils import SignalInterrupt
from vframe.utils.file_utils import get_ext
# ---------------------------------------------------------------------------
# init
sigint = SignalInterrupt()
init_obj = {
'filepath': opt_input,
'exts': tuple(opt_exts),
'slice_idxs': opt_slice,
'recursive': opt_recursive,
'use_prehash': ctx.obj.get(USE_PREHASH, False),
'use_draw_frame': ctx.obj.get(USE_DRAW_FRAME, False),
'media_filters': ctx.obj.get(MEDIA_FILTERS, []),
'skip_all_frames': opt_skip_frames,
'opt_check_exist': opt_check_exist,
'opt_randomize': opt_randomize,
'opt_new_filepath': opt_new_filepath,
}
# init media file reader
r = dacite.from_dict(data_class=MediaFileReader, data=init_obj)
ctx.obj[READER] = r
ctx.obj[SKIP_MEDIA_FILTERS] = get_ext(opt_input) == 'json'
# error checks
if not r.n_files:
LOG.info('No files to process.')
return
# process media
for m in tqdm(r.iter_files(), total=r.n_files, desc='Files', leave=False):
ctx.obj[SKIP_FILE] = False # reset
m.skip_all_frames = opt_skip_frames
if sigint.interrupted:
m.unload()
return
for ok in tqdm(m.iter_frames(), total=m.n_frames, desc=m.fn, disable=m.n_frames <= 1, leave=False):
ctx.obj[SKIP_FRAME] = (opt_skip_frames or m.skip_all_frames)
# TODO: cleanup
if ctx.obj.get(SKIP_FILE, False) or m._skip_file:
ctx.obj[SKIP_FILE] = True
m.set_skip_file()
# check for ctl-c, exit gracefully
if sigint.interrupted:
m.unload()
return
sink.send(m)
# print stats
LOG.info(r.stats) | python |
import os
import torch
import numpy as np
import warnings
try:
from typing import Protocol
except ImportError: # noqa
# Python < 3.8
class Protocol:
pass
from .dsp.overlap_add import LambdaOverlapAdd
from .utils import get_device
class Separatable(Protocol):
"""Things that are separatable."""
def forward_wav(self, wav, **kwargs):
"""
Args:
wav (torch.Tensor): waveform tensor.
Shape: 1D, 2D or 3D tensor, time last.
**kwargs: Keyword arguments from `separate`.
Returns:
torch.Tensor: the estimated sources.
Shape: [batch, n_src, time] or [n_src, time] if the input `wav`
did not have a batch dim.
"""
...
@property
def sample_rate(self):
"""Operating sample rate of the model (float)."""
...
def separate(
model: Separatable, wav, output_dir=None, force_overwrite=False, resample=False, **kwargs
):
"""Infer separated sources from input waveforms.
Also supports filenames.
Args:
model (Separatable, for example asteroid.models.BaseModel): Model to use.
wav (Union[torch.Tensor, numpy.ndarray, str]): waveform array/tensor.
Shape: 1D, 2D or 3D tensor, time last.
output_dir (str): path to save all the wav files. If None,
estimated sources will be saved next to the original ones.
force_overwrite (bool): whether to overwrite existing files
(when separating from file).
resample (bool): Whether to resample input files with wrong sample rate
(when separating from file).
**kwargs: keyword arguments to be passed to `forward_wav`.
Returns:
Union[torch.Tensor, numpy.ndarray, None], the estimated sources.
(batch, n_src, time) or (n_src, time) w/o batch dim.
.. note::
`separate` calls `model.forward_wav` which calls `forward` by default.
For models whose `forward` doesn't have waveform tensors as input/ouput,
overwrite their `forward_wav` method to separate from waveform to waveform.
"""
if isinstance(wav, str):
file_separate(
model,
wav,
output_dir=output_dir,
force_overwrite=force_overwrite,
resample=resample,
**kwargs,
)
elif isinstance(wav, np.ndarray):
return numpy_separate(model, wav, **kwargs)
elif isinstance(wav, torch.Tensor):
return torch_separate(model, wav, **kwargs)
else:
raise ValueError(
f"Only support filenames, numpy arrays and torch tensors, received {type(wav)}"
)
@torch.no_grad()
def torch_separate(model: Separatable, wav: torch.Tensor, **kwargs) -> torch.Tensor:
"""Core logic of `separate`."""
# Handle device placement
input_device = get_device(wav, default="cpu")
model_device = get_device(model, default="cpu")
wav = wav.to(model_device)
# Forward
separate_func = getattr(model, "forward_wav", model)
out_wavs = separate_func(wav, **kwargs)
# FIXME: for now this is the best we can do.
out_wavs *= wav.abs().sum() / (out_wavs.abs().sum())
# Back to input device (and numpy if necessary)
out_wavs = out_wavs.to(input_device)
return out_wavs
def numpy_separate(model: Separatable, wav: np.ndarray, **kwargs) -> np.ndarray:
"""Numpy interface to `separate`."""
wav = torch.from_numpy(wav)
out_wavs = torch_separate(model, wav, **kwargs)
out_wavs = out_wavs.data.numpy()
return out_wavs
def file_separate(
model: Separatable,
filename: str,
output_dir=None,
force_overwrite=False,
resample=False,
**kwargs,
) -> None:
"""Filename interface to `separate`."""
import soundfile as sf
if not hasattr(model, "sample_rate"):
if isinstance(model, LambdaOverlapAdd):
model = model.nnet
raise TypeError(
f"This function requires your model ({type(model).__name__}) to have a "
"'sample_rate' attribute. See `BaseModel.sample_rate` for details."
)
# SoundFile wav shape: [time, n_chan]
wav, fs = sf.read(filename, dtype="float32", always_2d=True)
if wav.shape[-1] > 1:
warnings.warn(
f"Received multichannel signal with {wav.shape[-1]} signals, "
f"using the first channel only."
)
# FIXME: support only single-channel files for now.
if fs != model.sample_rate:
if resample:
from librosa import resample
wav = resample(wav[:, 0], orig_sr=fs, target_sr=model.sample_rate)[:, None]
else:
raise RuntimeError(
f"Received a signal with a sampling rate of {fs}Hz for a model "
f"of {model.sample_rate}Hz. You can pass `resample=True` to resample automatically."
)
# Pass wav as [batch, n_chan, time]; here: [1, 1, time]
wav = wav[:, 0][None, None]
(to_save,) = numpy_separate(model, wav, **kwargs)
# Save wav files to filename_est1.wav etc...
for src_idx, est_src in enumerate(to_save):
base = ".".join(filename.split(".")[:-1])
save_name = base + "_est{}.".format(src_idx + 1) + filename.split(".")[-1]
if output_dir is not None:
save_name = os.path.join(output_dir, save_name.split("/")[-1])
if os.path.isfile(save_name) and not force_overwrite:
warnings.warn(
f"File {save_name} already exists, pass `force_overwrite=True` to overwrite it",
UserWarning,
)
return
if fs != model.sample_rate:
from librosa import resample
est_src = resample(est_src, orig_sr=model.sample_rate, target_sr=fs)
sf.write(save_name, est_src, fs)
| python |
import pickle
import brewer2mpl
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from absl import app, flags
from utils import *
FLAGS = flags.FLAGS
flags.DEFINE_string('base_dir', '', 'Path to the base dir where the logs are')
flags.DEFINE_bool('small_paper_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('stretched', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('paper_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('slide_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_bool('poster_mode', False, 'Adjusts the size of the plots.')
flags.DEFINE_string('file_format', 'png', 'File type of the output plot.')
flags.DEFINE_string('file_name', 'prediction-runtime-horizon',
'Name of the file to output to.')
flags.DEFINE_list('horizons', '10,20,30,40,50',
'Comma separated list of horizons.')
flags.DEFINE_integer('hz', '10', 'Frequency of predictions.')
def main(argv):
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors[3:]
hatches = ["////", "****"]
if FLAGS.paper_mode:
plt.figure(figsize=(3.33, 2.22))
set_paper_rcs()
elif FLAGS.small_paper_mode:
plt.figure(figsize=(2.4, 1.66))
set_paper_rcs()
elif FLAGS.stretched:
plt.figure(figsize=(3, 1.4))
set_paper_rcs()
elif FLAGS.slide_mode:
plt.figure(figsize=(8, 6))
set_slide_rcs()
elif FLAGS.poster_mode:
plt.figure(figsize=(12, 9))
set_poster_rcs()
else:
plt.figure()
set_rcs()
ax = plt.gca()
models = ["mfp", "r2p2"]
legend_elements = []
dfs = []
for i, model in enumerate(models):
for h in FLAGS.horizons:
file_name = '{}/{}_timely_horizon_{}.pkl'.format(
FLAGS.base_dir, model, h)
f = open(file_name, 'rb')
num_secs = int(int(h) * 1.0 / FLAGS.hz)
data = pickle.load(f)
df = pd.DataFrame({
'model': [model] * len(data),
'horizon': [num_secs] * len(data),
'runtime': data
})
dfs.append(df)
if model == 'mfp':
label = 'MFP'
elif model == 'r2p2':
label = 'R2P2-MA'
else:
label = model
legend_elements.append(
Patch(facecolor=colors[i],
alpha=0.6,
hatch=hatches[i],
label=label))
data = pd.concat(dfs)
ax = sns.boxplot(x='horizon',
y='runtime',
hue='model',
data=data,
palette=colors,
width=0.7,
saturation=1,
whis=(5, 95),
showfliers=False)
for i, box in enumerate(ax.artists):
box.set_hatch(hatches[i % len(models)])
adjust_box_widths(plt.gcf(), 0.8)
plt.legend(handles=legend_elements,
framealpha=0,
handlelength=1.5,
handletextpad=0.1)
plt.xlabel('Prediction horizon [s]')
plt.ylabel('Runtime [ms]')
plt.savefig("{}.{}".format(FLAGS.file_name, FLAGS.file_format),
format=FLAGS.file_format,
bbox_inches='tight')
if __name__ == '__main__':
app.run(main)
| python |
# coding: utf-8
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Blurring-a-part-of-an-image-in-Python" data-toc-modified-id="Blurring-a-part-of-an-image-in-Python-1"><span class="toc-item-num">1 </span>Blurring a part of an image in Python</a></div><div class="lev2 toc-item"><a href="#Blur-all-the-image" data-toc-modified-id="Blur-all-the-image-11"><span class="toc-item-num">1.1 </span>Blur all the image</a></div><div class="lev2 toc-item"><a href="#Blur-only-an-area-of-the-image" data-toc-modified-id="Blur-only-an-area-of-the-image-12"><span class="toc-item-num">1.2 </span>Blur only an area of the image</a></div><div class="lev2 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-13"><span class="toc-item-num">1.3 </span>Conclusion</a></div>
# # Blurring a part of an image in Python
#
# This very short notebook shows how to open an image (eg a PNG image), and nicely blur a part of it.
# In[1]:
import numpy as np
import skimage
# In[2]:
get_ipython().run_line_magic('load_ext', 'watermark')
get_ipython().run_line_magic('watermark', '-v -m -a "Lilian Besson (Naereen)" -p numpy,skimage -g')
# ## Blur all the image
# Let's import one of the example image, and blur all of it using [`skimage.filters.gaussian`](http://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.gaussian).
# In[9]:
from skimage import data, io, filters
image = data.astronaut()
# In[10]:
def imshow(image):
io.imshow(image)
io.show()
# In[11]:
imshow(image)
# In[5]:
from skimage.filters import gaussian
# In[12]:
filtered_img = gaussian(image, sigma=1, multichannel=True)
imshow(filtered_img)
# In[13]:
filtered_img = gaussian(image, sigma=2, multichannel=True)
imshow(filtered_img)
# ## Blur only an area of the image
# In[17]:
image.shape
# In[71]:
def blur(image, x0, x1, y0, y1, sigma=1, imshowall=False):
x0, x1 = min(x0, x1), max(x0, x1)
y0, y1 = min(y0, y1), max(y0, y1)
im = image.copy()
sub_im = im[x0:x1,y0:y1].copy()
if imshowall: imshow(sub_im)
blur_sub_im = gaussian(sub_im, sigma=sigma)
if imshowall: imshow(blur_sub_im)
blur_sub_im = np.round(255 * blur_sub_im)
im[x0:x1,y0:y1] = blur_sub_im
return im
# In[72]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=1)
imshow(filtered_img)
# In[76]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=5)
imshow(filtered_img)
# In[73]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=10)
imshow(filtered_img)
# In[74]:
filtered_img = blur(image, 80, 180, 170, 270, sigma=20)
imshow(filtered_img)
# ## Conclusion
#
# That's it.
| python |
import json
import logging
import requests
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework import status
class ExternalUmbrellaServiceAuthenticationBackend:
logger = logging.getLogger(__name__)
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
:param user_id:
:return:
"""
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def authenticate(self, request, uid=None, eaa_hash=None):
self.logger.debug('Attempting to authenticate via umbrella')
try:
if None in (uid, eaa_hash):
self.logger.warning('Tried to authenticate user with missing fields, rejecting')
return None
post_data = {'eaa_hash': eaa_hash}
headers = {'Content-type': 'application/json'}
response = requests.post(settings.BACKEND_UO_HASH, data=json.dumps(post_data), headers=headers)
if response.status_code == status.HTTP_200_OK:
self.logger.info('Authenticated %s', uid)
try:
user = User.objects.get(username=uid)
return user
except User.DoesNotExist as udne:
self.logger.info('Creating %s user in django database, as it is not yet present', uid)
# User will have unusable password, it is authenticated externally
user = User.objects.create_user(uid, '')
user.save()
return user
return None
except Exception as e:
self.logger.debug(e) | python |
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
from nnabla.testing import assert_allclose
ctxs = list_context('TopNError')
def ref_top_n_error(x, l, axis, n):
orig_x = x.copy()
x = np.rollaxis(x, axis, x.ndim).reshape(-1, x.shape[axis])
ll = np.rollaxis(l, axis, x.ndim).flatten()
y = []
for x_, ll_ in zip(x, ll):
threshold = x_[ll_]
count = 0
for x__ in x_:
if x__ >= threshold:
count += 1
y.append(1 if count > n else 0)
return np.array(y).reshape(l.shape)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("axis", [0, 1, 2, -1, -2, -3])
@pytest.mark.parametrize("n", [3, 5])
def test_top_n_error_forward(seed, axis, n, ctx, func_name):
ishape = [5, 6, 7]
rng = np.random.RandomState(seed)
l_shape = list(ishape)
l_shape[axis] = 1
n_class = ishape[axis]
inputs = [
rng.rand(5, 6, 7).astype(np.float32) * 0.9 + 0.05,
rng.randint(0, n_class, size=l_shape).astype(np.int)]
ref = ref_top_n_error(inputs[0], inputs[1], axis, n)
x = nn.Variable(ishape)
l = nn.Variable(l_shape)
y = F.top_n_error(x, l, axis, n)
x.d = inputs[0]
l.d = inputs[1]
y.forward()
res = y.d
atol_f = 1e-6
assert_allclose(ref, res, atol=atol_f)
| python |
from tclCommands.TclCommand import TclCommandSignaled
import collections
class TclCommandMirror(TclCommandSignaled):
"""
Tcl shell command to mirror an object.
"""
# array of all command aliases, to be able use
# old names for backward compatibility (add_poly, add_polygon)
aliases = ['mirror']
description = '%s %s' % ("--", "Will mirror the geometry of a named object. Does not create a new object.")
# Dictionary of types from Tcl command, needs to be ordered.
# For positional arguments
arg_names = collections.OrderedDict([
('name', str)
])
# Dictionary of types from Tcl command, needs to be ordered.
# For options like -optionname value
option_types = collections.OrderedDict([
('axis', str),
('box', str),
('origin', str)
])
# array of mandatory options for current Tcl command: required = {'name','outname'}
required = ['name']
# structured help for current command, args needs to be ordered
help = {
'main': "Will mirror the geometry of a named object. Does not create a new object.",
'args': collections.OrderedDict([
('name', 'Name of the object (Gerber, Geometry or Excellon) to be mirrored. Required.'),
('axis', 'Mirror axis parallel to the X or Y axis.'),
('box', 'Name of object which act as box (cutout for example.)'),
('origin', 'Reference point . It is used only if the box is not used. Format (x,y).\n'
'Comma will separate the X and Y coordinates.\n'
'WARNING: no spaces are allowed. If uncertain enclose the two values inside parenthesis.\n'
'See the example.')
]),
'examples': ['mirror obj_name -box box_geo -axis X -origin 3.2,4.7']
}
def execute(self, args, unnamed_args):
"""
Execute this TCL shell command
:param args: array of known named arguments and options
:param unnamed_args: array of other values which were passed into command
without -somename and we do not have them in known arg_names
:return: None or exception
"""
name = args['name']
# Get source object.
try:
obj = self.app.collection.get_by_name(str(name))
except Exception:
return "Could not retrieve object: %s" % name
if obj is None:
return "Object not found: %s" % name
if obj.kind != 'gerber' and obj.kind != 'geometry' and obj.kind != 'excellon':
return "ERROR: Only Gerber, Excellon and Geometry objects can be mirrored."
# Axis
if 'axis' in args:
try:
axis = args['axis'].upper()
except KeyError:
axis = 'Y'
else:
axis = 'Y'
# Box
if 'box' in args:
try:
box = self.app.collection.get_by_name(args['box'])
except Exception:
return "Could not retrieve object: %s" % args['box']
if box is None:
return "Object box not found: %s" % args['box']
try:
xmin, ymin, xmax, ymax = box.bounds()
px = 0.5 * (xmin + xmax)
py = 0.5 * (ymin + ymax)
obj.mirror(axis, [px, py])
obj.plot()
return
except Exception as e:
return "Operation failed: %s" % str(e)
# Origin
if 'origin' in args:
try:
origin_val = eval(args['origin'])
x = float(origin_val[0])
y = float(origin_val[1])
except KeyError:
x, y = (0, 0)
except ValueError:
return "Invalid distance: %s" % str(args['origin'])
try:
obj.mirror(axis, [x, y])
except Exception as e:
return "Operation failed: %s" % str(e)
| python |
import torch.nn as nn
from qanet.encoder_block import EncoderBlock
class ModelEncoder(nn.Module):
def __init__(self, n_blocks=7, n_conv=2, kernel_size=7, padding=3,
hidden_size=128, conv_type='depthwise_separable', n_heads=8, context_length=400):
super(ModelEncoder, self).__init__()
self.n_conv = n_conv
self.n_blocks = n_blocks
self.total_layers = (n_conv + 2) * n_blocks
self.stacked_encoderBlocks = nn.ModuleList([EncoderBlock(n_conv=n_conv,
kernel_size=kernel_size,
padding=padding,
n_filters=hidden_size,
conv_type=conv_type,
n_heads=n_heads) for i in range(n_blocks)])
def forward(self, x, mask):
for i in range(self.n_blocks):
x = self.stacked_encoderBlocks[i](x, mask, i*(self.n_conv+2)+1, self.total_layers)
M0 = x
for i in range(self.n_blocks):
x = self.stacked_encoderBlocks[i](x, mask, i*(self.n_conv+2)+1, self.total_layers)
M1 = x
for i in range(self.n_blocks):
x = self.stacked_encoderBlocks[i](x, mask, i*(self.n_conv+2)+1, self.total_layers)
M2 = x
return M0, M1, M2
| python |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def helper(self,root):
if not root:
return (0,0)
# get res from left
left=self.helper(root.left)
# get res from right
right=self.helper(root.right)
# two situation get the max
# not rob root, we can do
max_norob=max(left[0],left[1])+max(right[0],right[1])
# not rob left and right,rob root
max_rob=root.val+left[0]+right[0]
return (max_norob,max_rob)
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
"""
1. root stolen
2. root not stolen
"""
res=self.helper(root)
return max(res[0],res[1])
| python |
#%load_ext autoreload
#%autoreload 2
from pathlib import Path
from pprint import pformat
from hloc import extract_features, match_features, localize_inloc, visualization
dataset = Path('datasets/inloc/') # change this if your dataset is somewhere else
pairs = Path('pairs/inloc/')
loc_pairs = pairs / 'pairs-query-netvlad40.txt' # top 40 retrieved by NetVLAD
outputs = Path('outputs/inloc/') # where everything will be saved
results = outputs / 'InLoc_hloc_superpoint+superglue_netvlad40.txt' # the result file
# list the standard configurations available
print(f'Configs for feature extractors:\n{pformat(extract_features.confs)}')
print(f'Configs for feature matchers:\n{pformat(match_features.confs)}')
# pick one of the configurations for extraction and matching
# you can also simply write your own here!
feature_conf = extract_features.confs['superpoint_inloc']
matcher_conf = match_features.confs['superglue']
feature_path = extract_features.main(feature_conf, dataset, outputs)
match_path = match_features.main(matcher_conf, loc_pairs, feature_conf['output'], outputs)
localize_inloc.main(
dataset, loc_pairs, feature_path, match_path, results,
skip_matches=20) # skip database images with too few matches
save_path = "outputs/inloc/visualize.png"
visualization.save_visualize_loc_images(save_path, results, dataset, n=1, top_k_db=1, seed=2)
print("done") | python |
from aws_cdk import (
aws_batch as _batch,
aws_ec2 as _ec2,
aws_iam as _iam,
core,
)
class BatchENV(core.Construct):
def getComputeQueue(self,queue_name):
return self.job_queue[queue_name]
def __init__(self, scope: core.Construct, id: str,CurrentVPC="default",TargetS3="default",UserName="default",**kwargs):
super().__init__(scope, id, **kwargs)
self.job_queue = {}
# batch service role
self.batch_service_role = _iam.Role(self,'BatchServiceRole',
assumed_by=_iam.ServicePrincipal('batch.amazonaws.com'),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSBatchServiceRole')
]
)
# ec2 role with policy that allow to get object from s3 bucket for batch computing
self.batch_compute_role = _iam.Role(self, 'BatchComputeRole',
assumed_by=_iam.CompositePrincipal(
_iam.ServicePrincipal('ec2.amazonaws.com'),
_iam.ServicePrincipal('ecs.amazonaws.com')
),
managed_policies=[
_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2RoleforSSM'),
_iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2ContainerServiceforEC2Role"),
_iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchLogsFullAccess")
]
)
TargetS3.grant_read_write(self.batch_compute_role)
self.batch_compute_instance_profile = _iam.CfnInstanceProfile(
self,
'BatchInstanceProfile' + UserName,
instance_profile_name='BatchInstanceProfile-' + UserName,
roles=[self.batch_compute_role.role_name]
)
self.ComputeENV = _batch.ComputeEnvironment(self, "ComputeENV",
service_role=self.batch_service_role,
compute_resources={
"vpc": CurrentVPC,
"instance_types":[
_ec2.InstanceType("c5"),
_ec2.InstanceType("m5")
],
"maxv_cpus":128,
"minv_cpus":0,
"type":_batch.ComputeResourceType.SPOT,
"allocation_strategy":_batch.AllocationStrategy.BEST_FIT_PROGRESSIVE,
"instance_role":self.batch_compute_instance_profile.instance_profile_name
}
)
self.ComputeQueue = _batch.JobQueue(self,"ComputeQueue",
priority=1,
compute_environments=[
_batch.JobQueueComputeEnvironment(
compute_environment=self.ComputeENV,
order=1
)
]
)
self.job_queue["ComputeQueue"] = self.ComputeQueue | python |
from vyper import basebot
from vyper.web import interface
import os
class PluginBot(basebot.BaseBot):
def __init__(self, token, debug=False, start_loop=False, loop_time=.05, ping=True, list_plugins=False, web_app=None, name=None):
if not os.path.exists('plugins'):
os.mkdir('plugins')
with open('plugins/__init__.py', 'w') as ini:
ini.write("""import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
for importer, modname, ispkg in pkgutil.walk_packages(path=__path__, prefix=__name__+'.'):
__import__(modname)""")
import plugins
Ping.enabled = ping
self.functions = {
'message': self.message,
'edited_message': self.edited_message,
'channel_post': self.channel_post,
'edited_channel_post': self.edited_channel_post,
'inline_query': self.inline_query,
'chosen_inline_result': self.chosen_inline_result,
'callback_query': self.callback_query,
'shipping_query': self.shipping_query,
'pre_checkout_query': self.pre_checkout_query
}
self.configure(token, functions=self.functions, debug=debug)
self.plugins = list(self._get_plugins())
if list_plugins:
for plugin in self.plugins:
print(plugin)
self.web_app = web_app
if start_loop:
self.start_loop(loop_time)
def _get_plugins(self):
for plugin in Plugin.__subclasses__():
if plugin.enabled:
plugin.bot = self
yield plugin()
def test_plugins(self, msg):
if 'text' in msg:
for plugin in list(self.plugins):
plugin.message(msg)
class Plugin:
bot = None
enabled = True
def __repr__(self):
return "Plugin: {0}".format(self.__class__.__name__)
def message(self, msg):
pass
class Ping(Plugin):
def message(self, msg):
if msg['text'] == '/ping':
self.bot.sendMessage(msg['chat']['id'], 'PONG!')
| python |
import timeit
from copy import deepcopy
import time
import cProfile
import pstats
import numpy as np
from sympy import sin, symbols, Matrix, Symbol, exp, solve, Eq, pi, Piecewise, Function, ones
from CompartmentalSystems.moothmodel_run import SmoothModelRun
from CompartmentalSystems.smooth_reservoir_model import SmoothReservoirModel
def smr_1d(nc):
#one-dimensional
C = Symbol('C')
state_vector = [C]
time_symbol = Symbol('t')
input_fluxes = {}
output_fluxes = {0: C}
internal_fluxes = {}
srm = SmoothReservoirModel(state_vector, time_symbol, input_fluxes, output_fluxes, internal_fluxes)
start_values = np.array([5])
times = np.linspace(0,1,6)
smr = SmoothModelRun(srm, {}, start_values, times)
smr.build_state_transition_operator_cache(nc)
return deepcopy(smr)
def smr_2d(nc):
# two-dimensional
C_0, C_1 = symbols('C_0 C_1')
state_vector = [C_0, C_1]
time_symbol = Symbol('t')
input_fluxes = {}
output_fluxes = {0: C_0, 1: C_1}
internal_fluxes = {}
srm = SmoothReservoirModel(state_vector, time_symbol, input_fluxes, output_fluxes, internal_fluxes)
start_values = np.array([5, 3])
times = np.linspace(0,1,100)
smr = SmoothModelRun(srm, {}, start_values, times)
smr.build_state_transition_operator_cache(nc)
return deepcopy(smr)
def age_densities(smr):#_1D(smr):
start_age_densities = lambda a: np.exp(-a)*smr.start_values
p=smr.pool_age_densities_func(start_age_densities)
p1_sv = smr._age_densities_1_single_value(start_age_densities)
# negative ages will be cut off automatically
ages = np.linspace(-1,1,3)
res=p(ages)
# main
reps=10
def funcmaker(f,*args):
def f_wihtout_args():
return f(*args)
return f_wihtout_args
for smr_func in [smr_1d,smr_2d]:
print('#####################################')
for nc in [10,100,1000]:#,10000]:
smr=smr_func(nc)
res=timeit.timeit(
#funcmaker(age_densities_1_single_value_2D,smr)
funcmaker(age_densities,smr)
,number=10
)
print('res',res)
#with cProfile.Profile() as pr:
# test_age_densities_1_single_value()
#
#st=pstats.Stats(pr)
#st.sort_stats('time')
#st.print_stats()
| python |
from django.contrib import admin
from .models import User, Agent
class UserAdmin(admin.ModelAdmin):
list_display = ['username', 'is_agent', 'is_superuser']
admin.site.register(User, UserAdmin)
admin.site.register(Agent)
| python |
'''
'''
def main():
info('Pump Microbone After Jan diode analysis')
close(description="Jan Inlet")
close(description= 'Microbone to Minibone')
open(description= 'Microbone to Turbo')
open(description= 'Microbone to Getter NP-10H')
open(description= 'Microbone to Getter NP-10C')
open(description= 'Microbone to CO2 Laser')
#open(description= 'CO2 Laser to Jan')
open(description= 'Microbone to Inlet Pipette')
sleep(1)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# script by Ruchir Chawdhry
# released under MIT License
# github.com/RuchirChawdhry/Python
# ruchirchawdhry.com
# linkedin.com/in/RuchirChawdhry
from subprocess import run
from prettytable import PrettyTable
# PS: This only works on macOS & Linux. It will not work on Windows
# unless you install GNU coreutils:
# http://gnuwin32.sourceforge.net/packages/coreutils.htm
def folder_size(path):
size = run(["du", "-sk", path], capture_output=True, encoding="utf-8")
return size
def megabytes(size):
mb = int(size) / 1024
return round(mb, 2)
def gigabytes(size):
gb = (int(size) / 1024) / 1024
return round(gb, 2)
def table_print(data):
t = PrettyTable()
mb = megabytes(data[0])
gb = gigabytes(data[0])
t.field_names = ["Folder/Directory", "KB", "MB", "GB"]
t.add_row([data[1], data[0], mb, gb])
print(t)
if __name__ == "__main__":
try:
s = folder_size(input("PATH TO FOLDER/DIR: "))
s = str(s.stdout).split("\t")
table_print(s)
except ValueError:
print("Please enter a valid PATH without quotes or any other characters")
| python |
import os
import logging
from counterblock.lib import config
def set_up(verbose):
global MAX_LOG_SIZE
MAX_LOG_SIZE = config.LOG_SIZE_KB * 1024 #max log size of 20 MB before rotation (make configurable later)
global MAX_LOG_COUNT
MAX_LOG_COUNT = config.LOG_NUM_FILES
# Initialize logging (to file and console)
logger = logging.getLogger() #get root logger
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
#Color logging on console for warnings and errors
logging.addLevelName(logging.WARNING, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
#Console logging
console = logging.StreamHandler()
console.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(module)s: %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
#File logging (rotated)
fileh = logging.handlers.RotatingFileHandler(config.LOG, maxBytes=MAX_LOG_SIZE, backupCount=MAX_LOG_COUNT)
fileh.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(module)s:%(message)s', '%Y-%m-%d-T%H:%M:%S%z')
fileh.setFormatter(formatter)
logger.addHandler(fileh)
#socketio logging (don't show on console in normal operation)
socketio_log = logging.getLogger('socketio')
socketio_log.setLevel(logging.DEBUG if verbose else logging.WARNING)
socketio_log.propagate = False
#Transaction log
tx_logger = logging.getLogger("transaction_log") #get transaction logger
tx_logger.setLevel(logging.DEBUG if verbose else logging.INFO)
tx_fileh = logging.handlers.RotatingFileHandler(config.TX_LOG, maxBytes=MAX_LOG_SIZE, backupCount=MAX_LOG_COUNT)
tx_fileh.setLevel(logging.DEBUG if verbose else logging.INFO)
tx_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(module)s:%(message)s', '%Y-%m-%d-T%H:%M:%S%z')
tx_fileh.setFormatter(tx_formatter)
tx_logger.addHandler(tx_fileh)
tx_logger.propagate = False
| python |
from getpass import getpass
from pprint import pprint
from datetime import datetime
from sqlalchemy import create_engine
from pydango import state
from pydango.switchlang import switch
from pydango import (
primary_func,
secondary_func
)
from pydango.primary_func import chunks
from pydango.primary_func import (
create_sqlite_session,
random_number_generator,
)
from pydango.tables import (
Account,
Category,
Movie,
Payment,
Ticket,
Theater,
theater_schedule,
)
from sqlalchemy.sql import (
update,
and_,
)
# Unfortunate I could not find a way to get around creating a
# second connection the sqlite DB here
engine = create_engine('sqlite:///sqlite3.db')
engine, session = create_sqlite_session(engine=engine)
def run():
print('****************** Hello Cinephile ******************')
print()
show_commands()
while True:
action = primary_func.get_action()
with switch(action) as s:
s.case('c', create_account)
s.case('l', log_into_account)
s.case('o', logout)
s.case('s', list_movies)
s.case('n', browse_by_location)
s.case('t', browse_by_category)
s.case('r', purchase_ticket)
s.case('v', view_ticket)
s.case('m', lambda: 'change_mode')
s.case(['x', 'bye', 'exit', 'exit()'], secondary_func.exit_app)
s.default(secondary_func.unknown_command)
if action:
print()
if s.result == 'change_mode':
return
def show_commands():
print('What action would you like to take: ')
print('[C]reate an account')
print('[L]ogin to your account')
print('Log[O]ut of your account')
print('[R]eserve a movie ticket')
print('[V]iew your movie ticket')
print('[S]ee list of available movies')
print('Search for [N]earby theaters')
print('Search by ca[T]egory')
print('[M]ain menu')
print('e[X]it app')
print('[?] Help (this info)')
print()
def create_account():
print("****************** REGISTER ******************")
print()
print("Please provide the following information\n")
email = input("Email (required): ").strip().lower()
credit_card = input("Credit-card number (required, i.e. 4444333399993333): ").strip()
credit_card = int(credit_card)
password = getpass().strip()
zip_code = input("Zip-code (required): ").strip()
zip_code = int(zip_code)
first_name = input("What is your first name? ").strip()
last_name = input("What is your last name? ").strip()
old_account = session.query(Account).filter_by(email=email).first()
if old_account:
secondary_func.error_msg(f"ERROR: Account with email {email} already exists.")
return
account = Account(
email=email,
credit_card=credit_card,
password=password,
zip_code=zip_code,
first_name=first_name,
last_name=last_name
# exclude theater_owner attribute
)
session.add(account)
# Flush
my_account = session.query(Account).filter_by(email=email).first()
session.commit()
state.active_account = account
secondary_func.success_msg(f"\nCreated new account with id {state.active_account.id}")
def log_into_account():
print("****************** LOGIN ******************")
email = input("Email: ").strip()
password = getpass().strip()
account = session.query(Account).filter_by(email=email).first()
if not account:
secondary_func.error_msg(f"Could not find account with email ({email})")
return
elif account.password != password:
secondary_func.error_msg(f"Password does not match")
return
state.active_account = account
secondary_func.success_msg(f"\nYou are now logged in.")
# To help with testing in the Python shell
return state.active_account
def logout():
if state.active_account is None:
print("You are already logged-out.")
return
state.active_account = None
print("You are logged-out.")
def list_movies():
print("****************** BROWSE FOR MOVIES ******************")
print()
# Grab all Movie objects
movies = session.query(Movie).filter_by(active=True).all()
movies_list = [
i.__dict__.copy()
for i in movies
]
# movie __dict__ attribute contains _sa_instance_state which isn't useful
# popped = [i.pop('_sa_instance_state') for i in movies_list]
# create a movie_chunks generator out of movie_list
# to generate 3 items at a time
movie_chunks = chunks(movies_list, 5)
while True:
chunked = next(movie_chunks, None)
if chunked == None:
print("The End")
break
for i in chunked:
print(f"""\nTitle: {i['title']} | Rating: {i['rating']}
Description: {i['description']}""")
more = input("\n--More--<ENTER>\n")
if not more == "":
break
def browse_by_location():
print("****************** BROWSE FOR MOVIES BY LOCATION ******************")
print()
zip_code = input("Enter your zipcode: ").strip()
zip_code = int(zip_code)
theaters = session.query(Theater).filter_by(zip_code=zip_code).all()
if not theaters:
print("There are no theaters in that zip_code.")
by_city = input("Would you like to search by city (Yes or <ENTER to quit>)? ").strip()
if by_city == "":
return
city = input("Enter your city of residence: ").strip()
theaters = session.query(Theater).filter_by(city=city).all()
if not theaters:
print("Sorry, but there are no open theaters in your city.")
return
for i, theater in enumerate(theaters, 1):
movies = theater.movies
print(f"""\n{i}. {theater.name} at {theater.address} {theater.zip_code}
Open: {theater.open_time.strftime('%H:%M:%S')} | Close: {theater.close_time.strftime('%H:%M:%S')}
Prices: {theater.ticket_price}
""")
print(f"\n{theater.name}'s Movies:\n")
if movies:
for movie in movies:
movie = session.query(Movie).filter_by(id=movie.movie_id).first()
print(f"Title: {movie.title} | Rating: {movie.rating}\n")
else:
print("No movies playing currently due to COVID.")
print("Please check back when we get a government that cares about its people.")
def browse_by_category():
print("****************** BROWSE FOR MOVIES BY CATEGORY ******************")
print()
categories = session.query(Category).all()
categories_dict = {
'1': 'Drama',
'2': 'Action',
'3': 'Horror',
'4': 'Scifi',
'5': 'Romance',
'6': 'Comedy'
}
print("Movie categories: \n")
for i, category in enumerate(categories, 1):
print(f"{i}. {category.category_name}")
print()
category = input("Which category are you interested in (Enter a number): ").strip()
category = session.query(Category).filter_by(category_name=categories_dict[category]).first()
movies = category.movies
print(f"Movies for category: {category.category_name}\n")
for i, movie in enumerate(movies, 1):
print(i, movie.title)
def purchase_ticket():
print("****************** PURCHASE TICKETS ******************")
print()
if not state.active_account:
print("You must be logged in to purchase a ticket.")
return
# Get account credentials that were created on registration
account = state.active_account
# Grab the theater_schedule objects
schedules = session.query(theater_schedule).all()
print("\nMOVIE THEATER SCHEDULES\n")
# List all available movies and theaters and times
# with index loop so they can input a number representing an object
# that will later get mapped to elements of tuples appended to a list
index = 0
for i in schedules:
theater = session.query(Theater).filter_by(id=i.theater_id).first()
movie = session.query(Movie).filter_by(id=i.movie_id).first()
index += 1
print(f"""{index}: {theater.name} {theater.address}, Prices: {theater.ticket_price}
{movie.title}, Schedules: {i.time}, Seats: {i.seats_available}\n""")
ticket_number = input("\nEnter ticket number: ").strip()
ticket_number = int(ticket_number) - 1
quantity = input("How many tickets would you like to purchase: ").strip()
quantity = int(quantity)
category = input("Which category of tickets (i.e. Adult/Child): ").strip()
theaters_list = []
# Creat a tuple of the required information to purchase a ticket
# along with an index so the user can select a tuple
for i, x in enumerate(schedules, 1):
theater = session.query(Theater).filter_by(id=x.theater_id).first()
movie = session.query(Movie).filter_by(id=x.movie_id).first()
payment_id = random_number_generator()
payment_id = int(payment_id)
tup = (i, theater.id, movie.id, x.time, payment_id, account.id)
theaters_list.append(tup)
my_ticket = theaters_list[ticket_number]
# I need to figure out the price for the category chosen for
# this particular theater outside of the loop because we don't want to do this for every theater
my_theater = session.query(Theater).filter_by(id=my_ticket[1]).first()
my_movie = session.query(Movie).filter_by(id=my_ticket[2]).first()
ticket_price = float(my_theater.ticket_price[category])
total = ticket_price * quantity
ticket = Ticket(
theater_id=my_ticket[1],
movie_id=my_ticket[2],
time=my_ticket[3],
payment_id=my_ticket[4],
account_id=my_ticket[5],
quantity=quantity,
total=total
)
payment = Payment(
id=my_ticket[4],
credit_card=account.credit_card,
paid=True
)
session.add(ticket)
session.add(payment)
session.commit()
# I think there's gotta be a better way to do this, but what it's supposed to do
# is update the value of seats_available in theater_schedule
# everytime someone purchases a ticket
my_theater_schedule = session.query(theater_schedule).filter_by(
theater_id=my_ticket[1],
movie_id=my_ticket[2],
time=my_ticket[3]
).first()
new_seats_available = my_theater_schedule.seats_available - quantity
engine.execute(update(theater_schedule).where(and_(theater_schedule.c.theater_id==my_ticket[1],
theater_schedule.c.movie_id==my_ticket[2],
theater_schedule.c.time==my_ticket[3])).values(seats_available=new_seats_available))
ticket_receipt = session.query(Ticket).filter_by(id=ticket.id).first()
print("\nYour receipt: \n")
print(f"""Movie: {my_movie.title} | Location: {my_theater.name} at {my_theater.address}
Time: {ticket_receipt.time} | Quantity: {ticket_receipt.quantity} tickets
Total Price: ${total} \n
Payment Id: {payment.id} | Date of Purchase: {ticket_receipt.created.date()}""")
print("\nEnjoy your movie!\n")
def view_ticket():
print("****************** VIEW MY CURRENT TICKETS ******************")
print()
if not state.active_account:
print("You must be logged in to view a purchased ticket.")
return
# Grab account
account = state.active_account
# Get account-related tickets
tickets = session.query(Ticket).filter_by(account_id=account.id).all()
# If account has no tickets return
if not tickets:
return
# Return only valid tickets - tickets that were purchased today
today = datetime.today().date()
print("\nMy Tickets: \n")
for ticket in tickets:
if ticket.created.date() == today:
theater = session.query(Theater).filter_by(id=ticket.theater_id).first()
movie = session.query(Movie).filter_by(id=ticket.movie_id).first()
payment = session.query(Payment).filter_by(id=ticket.payment_id).first()
if not payment.paid:
status = 'Unpaid'
status = 'Paid'
print(f"""
Movie: {movie.title} | Location: {theater.name} at {theater.address}
Time: {ticket.time} | Quantity: {ticket.quantity} tickets
Total Price: ${ticket.total} | Status: {status}\n
Payment Id: {ticket.payment_id} | Date of Purchase: {ticket.created.date()}\n
""")
| python |
def _foo():
return "private" | python |
from collections import defaultdict
from itertools import islice
from typing import Dict, List, Optional, Sequence
import torch
from tango.common.dataset_dict import DatasetDictBase
from tango.common.exceptions import ConfigurationError
from tango.common.lazy import Lazy
from tango.common.tqdm import Tqdm
from tango.format import Format, JsonFormat
from tango.step import Step
from .data import DataLoader
from .eval_callback import EvalCallback
from .model import Model
from .util import check_dataset, move_to_device, resolve_device, set_seed_all
@Step.register("torch::eval")
class TorchEvalStep(Step):
"""
A PyTorch evaluation loop that pairs well with :class:`TorchTrainStep`.
.. tip::
Registered as a :class:`~tango.step.Step` under the name "torch::eval".
.. important::
The evaluation loop will use a GPU automatically if one is available.
You can control which GPU it uses with the environment variable ``CUDA_VISIBLE_DEVICES``.
For example, set ``CUDA_VISIBLE_DEVICES=1`` to force ``TorchEvalStep`` to only use
the GPU with ID 1.
.. warning::
By default the metrics specified by the ``metric_names`` parameter
are aggregated by simply averaging across batches.
This behavior is usually correct for metrics like "loss" or "accuracy",
for example, but may not be correct for other metrics like "F1".
If this is not correct for your metric you will need to handle the aggregation
internally in your model or with an :class:`EvalCallback`
using the :meth:`EvalCallback.post_batch()` method.
Then set the parameter ``auto_aggregate_metrics`` to ``False``.
"""
DETERMINISTIC = True
CACHEABLE = True
FORMAT: Format = JsonFormat()
SKIP_ID_ARGUMENTS = {"log_every"}
def run( # type: ignore[override]
self,
model: Model,
dataset_dict: DatasetDictBase,
dataloader: Lazy[DataLoader],
test_split: str = "test",
seed: int = 42,
eval_steps: Optional[int] = None,
log_every: int = 1,
metric_names: Sequence[str] = ("loss",),
auto_aggregate_metrics: bool = True,
callbacks: Optional[List[Lazy[EvalCallback]]] = None,
) -> Dict[str, float]:
"""
Evaluate the ``model``.
:param model:
The model to evaluate. It should return a ``dict`` from its ``forward()`` method
that includes all of the metrics in ``metric_names`` .
:param dataset_dict:
Should contain the test data.
:param dataloader:
The data loader that generates test batches. The batches should be :class:`dict`
objects.
:param test_split:
The name of the data split used for evaluation in the ``dataset_dict``.
Default is "test".
:param seed:
Used to set the RNG states at the beginning of the evaluation loop.
:param eval_steps:
The number of steps to evaluate for. If not specified evaluation will
stop after a complete iteration through the ``dataloader``.
:param log_every:
Log every this many steps. Default is ``1``.
:param metric_names:
The names of the metrics to track and aggregate. Default is ``("loss",)``.
:param auto_aggregate_metrics:
If ``True`` (the default), the metrics will be averaged across batches.
This may not be the correct behavior for some metrics (such as F1),
in which you should set this to ``False`` and handle the aggregation
internally in your model or with an :class:`EvalCallback`
(using :meth:`EvalCallback.post_batch()`).
:param callbacks:
A list of :class:`EvalCallback`.
"""
set_seed_all(seed)
check_dataset(dataset_dict, test_split)
# Resolve device.
device = resolve_device()
# Prep model.
model = model.eval().to(device)
# Construct dataloader.
dataloader: DataLoader = dataloader.construct(dataset=dataset_dict[test_split])
steps: int
try:
dataloader_len = len(dataloader)
steps = dataloader_len if eval_steps is None else min(dataloader_len, eval_steps)
except TypeError:
if eval_steps is None:
raise ConfigurationError(
"You must set 'eval_steps' for streaming/iterable datasets"
)
else:
steps = eval_steps
# Initialize callbacks.
callbacks: List[EvalCallback] = [
callback.construct(
step_id=self.unique_id,
work_dir=self.work_dir,
model=model,
dataset_dict=dataset_dict,
dataloader=dataloader,
)
for callback in (callbacks or [])
]
for callback in callbacks:
callback.pre_eval_loop()
eval_batches = enumerate(islice(dataloader, steps))
running_metrics: Dict[str, float] = defaultdict(float)
aggregated_metrics: Dict[str, float] = {}
with Tqdm.tqdm(eval_batches, desc="Evaluating", total=steps) as batch_iter:
for step, batch in batch_iter:
should_log_this_step = step % log_every == 0 or step == steps - 1
for callback in callbacks:
callback.pre_batch(step, batch)
batch = move_to_device(batch, device)
with torch.inference_mode():
outputs = model(**batch)
for callback in callbacks:
callback.post_batch(step, outputs)
# Gather metrics we want to track.
batch_metrics = {
k: outputs[k].item() if isinstance(outputs[k], torch.Tensor) else outputs[k]
for k in metric_names
}
# Aggregate metrics.
if auto_aggregate_metrics:
for k in batch_metrics:
running_metrics[k] += batch_metrics[k]
aggregated_metrics[k] = running_metrics[k] / (step + 1)
else:
aggregated_metrics.update(batch_metrics)
# Update progress bar.
if should_log_this_step:
batch_iter.set_postfix(**aggregated_metrics)
# Clean up to help garbage collector. Hopefully this saves memory.
del batch
del outputs
del batch_metrics
for callback in callbacks:
callback.post_eval_loop(aggregated_metrics)
return aggregated_metrics
| python |
import sproxel
from zipfile import ZipFile, ZIP_DEFLATED
import json
import os, sys
import imp
CUR_VERSION=1
def save_project(filename, proj):
# gather layers
layers=[]
for spr in proj.sprites:
for l in spr.layers:
if l not in layers: layers.append(l)
# prepare metadata
meta={}
meta['version']=CUR_VERSION
meta['layers']=[
dict(name=l.name, offset=l.offset, visible=l.visible,
palette = proj.palettes.index(l.palette) if l.palette!=None else -1)
for l in layers]
meta['sprites']=[
dict(name=s.name, layers=[layers.index(l) for l in s.layers], curLayer=s.curLayerIndex)
for s in proj.sprites]
meta['palettes']=[
dict(name=p.name, colors=p.colors)
for p in proj.palettes]
meta['mainPalette']=proj.palettes.index(proj.mainPalette)
# write zip file
with ZipFile(filename, 'w', ZIP_DEFLATED) as zf:
zf.writestr('metadata.json', json.dumps(meta, sort_keys=True, indent=2))
for i, l in enumerate(layers): zf.writestr('%04d.png' % i, l.toPNG())
return True
def load_project(filename):
prj=sproxel.Project()
with ZipFile(filename, 'r') as zf:
meta=json.loads(zf.read('metadata.json'))
# load palettes
palettes=[]
for mp in meta['palettes']:
p=sproxel.Palette()
p.name=mp['name']
p.colors=[tuple(c) for c in mp['colors']]
palettes.append(p)
prj.palettes=palettes
try:
prj.mainPalette=palettes[meta['mainPalette']]
except IndexError:
try:
prj.mainPalette=palettes[0]
except IndexError:
prj.mainPalette=sproxel.Palette()
# load layers
layers=[]
for i, ml in enumerate(meta['layers']):
l=sproxel.layer_from_png(zf.read('%04d.png' % i),
prj.palettes[ml['palette']] if ml['palette']>=0 else None)
l.name =ml['name' ]
l.offset =tuple(ml['offset'])
l.visible=ml['visible']
print 'layer', i, 'type', l.dataType
layers.append(l)
# load sprites
sprites=[]
for ms in meta['sprites']:
s=sproxel.Sprite()
s.name=ms['name']
for i, li in enumerate(ms['layers']):
l=layers[li]
s.insertLayerAbove(i, l)
s.curLayerIndex=ms['curLayer']
sprites.append(s)
prj.sprites=sprites
#print prj.sprites
return prj
def init_plugin_pathes():
sproxel.plugin_pathes=[os.path.abspath(p) for p in sproxel.plugin_pathes]
sys.path=sproxel.plugin_pathes+sys.path
def scan_plugin_module(name, fn):
mod=imp.load_source(name, fn)
try:
info=mod.plugin_info
except KeyError:
return
print ' plugin', name, fn
info['module']=name
info['path']=fn
sproxel.plugins_info[name]=info
sproxel.plugins[name]=mod
def scan_plugins():
sproxel.plugins_info=dict()
sproxel.plugins=dict()
for path in sproxel.plugin_pathes:
#print 'scanning', path
for name in os.listdir(path):
fn=os.path.join(path, name)
if os.path.isdir(fn):
fn=os.path.join(fn, '__init__.py')
if os.path.isfile(fn):
scan_plugin_module(name, fn)
else:
modname, ext = os.path.splitext(name)
if ext.lower()=='.py':
scan_plugin_module(modname, fn)
def register_plugins():
for mod in sproxel.plugins.itervalues():
if hasattr(mod, 'register'):
print 'registering plugin', mod.plugin_info['module']
try:
mod.register()
except:
sys.excepthook(*sys.exc_info())
print 'error registering plugin', mod.plugin_info['name']
def unregister_plugins():
for mod in sproxel.plugins.itervalues():
if hasattr(mod, 'unregister'):
print 'unregistering plugin', mod.plugin_info['module']
try:
mod.unregister()
except:
sys.excepthook(*sys.exc_info())
print 'error unregistering plugin', mod.plugin_info['name']
| python |
import uuid
from django.db import models
class Dice(models.Model):
sides = models.PositiveIntegerField()
class Roll(models.Model):
roll = models.PositiveIntegerField()
class DiceSequence(models.Model):
uuid = models.UUIDField(primary_key=False, default=uuid.uuid4, editable=True, unique=True)
seq_name = models.CharField(max_length=256)
created = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey('auth.User', related_name='dice_sequence', on_delete=models.CASCADE)
sequence = models.ManyToManyField(Dice)
class RollSequence(models.Model):
created = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey('auth.User', related_name='roll_sequence', on_delete=models.CASCADE)
roll_sequence = models.ManyToManyField(Roll)
dice_sequence = models.ForeignKey(DiceSequence, related_name='+', on_delete=models.PROTECT)
class Meta:
ordering = ('created',)
| python |
class Solution(object):
def XXX(self, n):
"""
:type n: int
:rtype: str
"""
if not isinstance(n, int):
return ""
if n == 1:
return "1"
pre_value = self.XXX(n-1) # 递归
# 双指针解法
i = 0
res = ""
for j in range(len(pre_value)):
if pre_value[j] != pre_value[i]:
res += str(j-i) + pre_value[i]
i = j
res += str(len(pre_value)-i) + pre_value[i]
return res
| python |
# -*- coding: utf-8 -*-
from django.conf import settings
import requests
from sendsms.backends.base import BaseSmsBackend
TINIYO_API_URL = "https://api.tiniyo.com/v1/Account/SENDSMS_TINIYO_TOKEN_ID/Message"
TINIYO_TOKEN_ID = getattr(settings, "SENDSMS_TINIYO_TOKEN_ID", "")
TINIYO_TOKEN_SECRET = getattr(settings, "SENDSMS_TINIYO_TOKEN_SECRET", "")
class SmsBackend(BaseSmsBackend):
"""
Tiniyo gateway backend. (https://tiniyo.com)
Docs in https://tiniyo.com/docs/#/quickstart
Settings::
SENDSMS_BACKEND = 'sendsms.backends.tiniyo.SmsBackend'
SENDSMS_TINIYO_TOKEN_ID = 'xxx'
SENDSMS_TINIYO_TOKEN_SECRET = 'xxx'
Usage::
from sendsms import api
api.send_sms(
body='This is first sms to tiniyo', from_phone='TINIYO', to=['+13525051111']
)
"""
def send_messages(self, messages):
payload = []
for m in messages:
entry = {"src": m.from_phone, "dst": m.to, "text": m.body}
payload.append(entry)
api_url = TINIYO_API_URL.replace("SENDSMS_TINIYO_TOKEN_ID", TINIYO_TOKEN_ID)
response = requests.post(
api_url, json=payload, auth=(TINIYO_TOKEN_ID, TINIYO_TOKEN_SECRET)
)
if response.status_code != 200:
if self.fail_silently:
return False
raise Exception(
"Error: %d: %s"
% (response.status_code, response.content.decode("utf-8"))
)
return True
| python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
from fairseq.models.transformer import TransformerEncoder
from .linformer_sentence_encoder_layer import LinformerTransformerEncoderLayer
class LinformerTransformerEncoder(TransformerEncoder):
"""
Implementation for a Bi-directional Linformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
LinformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(self, args, dictionary, embed_tokens):
self.compress_layer = None
super().__init__(args, dictionary, embed_tokens)
def build_encoder_layer(self, args):
if self.args.shared_layer_kv_compressed == 1 and self.compress_layer is None:
compress_layer = nn.Linear(
self.args.max_positions,
self.args.max_positions // self.args.compressed,
)
# intialize parameters for compressed layer
nn.init.xavier_uniform_(compress_layer.weight, gain=1 / math.sqrt(2))
if self.args.freeze_compress == 1:
compress_layer.weight.requires_grad = False
self.compress_layer = compress_layer
return LinformerTransformerEncoderLayer(args, self.compress_layer)
| python |
# file arrange, remove, rename
import os
import astropy.io.fits as fits
def oswalkfunc():
f=open('oswalk.list','w')
#workDIr = os.path.abspath(b'.')
for root, dirs, files in os.walk('.'): # os.walk(".", topdown = False):
# all files with path names
for name in files:
#print(os.path.join(root, name))
f.write(os.path.join(root, name)+'\n')
f.close()
with open('oswalk.list','r') as file_handle: lines = file_handle.read().splitlines()
print(len(lines),'files')
return lines
# lines = [line.strip() for line in file_handle]
def fnamechange(ii):
#for CCA250
i=ii.split('/')[-1]
head=fits.getheader(ii)
objname=head['OBJECT']
dateobs=head['DATE-OBS']
datestr=dateobs[:4]+dateobs[5:7]+dateobs[8:10]+'-'+dateobs[11:13]+dateobs[14:16]+dateobs[17:20]
filterstr=head['FILTER']
exptimestr=str(int(head['EXPTIME']))
newname='Calib-CCA250-'+objname+'-'+datestr+'-'+filterstr+'-'+exptimestr+'.fits'
print('cp '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+newname)
os.system('cp '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+newname)
def LSGTfilechange(ii):
# From Calib-LSGT-NGC3367-20180519-220208-g-BIN1-W-180-003.fits
# To Calib-LSGT-NGC3367-20180519-220208-g-180.fits
i=ii.split('/')[-1]
frag=i.split('-')
frag[0]=='Calib'
# if frag[1]=='T52' : obs='LSGT'
# else : obs=frag[1]
finalname='Calib-LSGT'+'-'+frag[2]+'-'+frag[3]+'-'+frag[4]+'-'+frag[5]+'-'+frag[8]+'.fits'
os.system('mv '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+finalname)
def iTelfilechange(ii):
# From Calib-T21-ceouobs.changsu-NGC3367-20161130-042831-R-BIN1-E-180-003.fits
# To Calib-T21-NGC3367-20161130-042831-R-180.fits
i=ii.split('/')[-1]
frag=i.split('-')
frag[0]=='Calib'
# if frag[1]=='T52' : obs='LSGT'
# else : obs=frag[1]
#finalname='Calib-'+ frag[1] +'-'+frag[2]+'-'+frag[3]+'-'+frag[4]+'-'+frag[5]+'-'+frag[8]+'.fits'
finalname='Calib-'+ frag[1] +'-'+frag[3]+'-'+frag[4]+'-'+frag[5]+'-'+frag[6]+'-'+frag[9]+'.fits'
os.system('mv '+ii+' '+'/'.join(ii.split('/')[:-1])+'/'+finalname)
def simplerename(ii,a,b):
'''
simplerename(filename, from, to)
'''
import os
#i=ii.split('/')[-1]
os.system('rename '+a+' '+b+' '+ii)
def oswalknamesep(i):
filename=i.split('/')[-1]
head='/'.join(i.split('/')[:-1])+'/'
return filename, head
###########################################################################
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
files=os.listdir('.')
dirs=[i for i in files if os.path.isdir(i)]
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in lines :
if ('Cal' in i and 'psf' in i) or ('merge.cat' in i) or ('Cal' in i and '.xml' in i) or ('Cal' in i and '.png' in i) or ('Cal' in i and '.cat' in i) or ('Cal' in i and 'seg' in i) or ('hdre' in i ) or ('reCal' in i ) or ('recCal' in i) or ('wr' in i and '.fit' in i) or ('gregister' in i) :
# if 'com.cat' in i :
print('remove', i)
os.remove(i)
## LSGT
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if 'cCalib' in i :
print('rename', i)
os.system('rename cCalib Calib '+i)
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if 'Calibrated' in i :
print('rename', i)
os.system('rename Calibrated Calib '+i)
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if 'T52-ceouobs.changsu' in i :
print('rename', i)
os.system('rename T52-ceouobs.changsu LSGT '+i)
if 'T52-ceouobs.joonho' in i :
print('rename', i)
os.system('rename T52-ceouobs.joonho LSGT '+i)
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
for i in fitslist :
if ('LSGT' in i) and ('BIN' in i) :
print('rename', i)
LSGTfilechange(i)
## CCA250
lines= oswalkfunc()
lines.sort()
for i in lines:
if 'CCA250' in i and '.new' in i :
print('rename & remove', i)
fnamechange(i)
os.remove(i)
lines= oswalkfunc()
lines.sort()
for i in lines :
if 'CCA250' in i:
os.system('rename NGC3367-18 NGC3367-2018 '+i)
os.system('rename NGC3367-17 NGC3367-2017 '+i)
os.system('rename Calibrated Calib '+i)
os.system('rename 0.0.fits 0.fits '+i)
os.system('rename 00.fits .fits '+i)
os.system('rename ..fits .fits '+i)
## CCA250 directory and files
os.chdir('CCA250')
os.system('rename 100-c 100c Calib*.fits')
os.system('mv *-m575-* m575/')
os.system('mv *-m625-* m625/')
os.system('mv *-m675-* m675/')
os.system('mv *-m725-* m725/')
os.system('mv *-m775-* m775/')
os.system('mv *-V-* V/')
os.system('mv *-R-* R/')
os.chdir('c')
os.system('rename 100-c 100c Calib*.fits')
os.system('mv *-100c-* ../100c')
os.chdir('..')
os.rmdir('c')
os.system('rename NGC3367-18 NGC3367-2018 Calib*.fits')
os.system('rename NGC3367-17 NGC3367-2017 Calib*.fits')
os.system('rename 0.0.fits 0.fits Calib*.fits')
os.system('rename 00.fits .fits Calib*.fits')
os.system('rename ..fits .fits Calib*.fits')
## itelescope T21
lines= oswalkfunc()
lines.sort()
for i in lines :
if 'Calib-T21-ceou' in i:
print('file name :',i)
iTelfilechange(i)
## MAO SNUCAM
lines= oswalkfunc()
lines.sort()
for i in lines :
if 'SNUCAM' in i :
if ('reaCal' in i) or ('reCal' in i) or ('aCalib' in i) or('Calib-MAIDANAK' in i):
print('remove',i)
os.remove(i)
## MCD30INCH
lines= oswalkfunc()
lines.sort()
for i in lines:
if 'MCD30INCH' in i :
print(i)
if not 'Calib-MCD30INCH' in i:
print( 'rename ',i)
simplerename(i,'Cal-30inch','Calib-MCD30INCH')
'''
!rename Cal-30inch Calib-MCD30INCH Cal*.fits
!rename Calib-30inch Calib-MCD30INCH Cal*.fits
!rename Calib-MCD30inch Calib-MCD30INCH Cal*.fits
'''
## SOAO
lines= oswalkfunc()
lines.sort()
for i in lines:
if 'SOAO' in i and 'SOAO_FLI' in i:
print ('rename',i)
simplerename(i,'SOAO_FLI','SOAO')
if 'SOAO' in i and 'SOAO_FLI4k' in i:
print ('rename',i)
simplerename(i,'SOAO_FLI4k','SOAO')
if 'SOAO' in i and 'SOAO4k' in i:
print ('rename',i)
simplerename(i,'SOAO4k','SOAO')
## DOAO
lines= oswalkfunc()
lines.sort()
fitslist= [s for s in lines if s.split('/')[-1][-5:]=='.fits']
print(len(fitslist))
for i in fitslist:
if 'gregister' in i: os.remove(i)
lines= oswalkfunc()
lines.sort()
| python |
from django.db import models
class TrackedModel(models.Model):
"""
a model which keeps track of creation and last updated time
"""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
| python |
# -*- coding: utf-8 -*-
'''
Created by 15 cm on 11/22/15 3:20 PM
Copyright © 2015 15cm. All rights reserved.
'''
__author__ = '15cm'
import json
import urllib2
import multiprocessing
import numpy as np
from PIL import Image
import io
import os
CURPATH = os.path.split(os.path.realpath(__file__))[0]
DATAPATH = os.path.join(os.path.dirname(CURPATH),'dataset')
def download_stuff(stuff):
image_bytes = urllib2.urlopen(stuff.link).read()
data_stream = io.BytesIO(image_bytes)
pil_image = Image.open(data_stream)
try:
pil_image.load()
except IOError:
pass
w,h = pil_image.size
pil_image.thumbnail((w/3,h/3))
pil_image.save(os.path.join(DATAPATH,str(stuff.id)+'.jpg'),'jpeg')
class DataHandler:
class ImageData:
def __init__(self,id,link,label):
self.id = id
self.link = link
self.label = label
def __init__(self):
self.data = [] # [(link,label),...]
self.label_dict = {}
self.label_list = []
self.data_file = os.path.join(DATAPATH,'data.txt')
self.label_list_file = os.path.join(DATAPATH,'label_list.json')
def label_filter(self,s):
# valid_word_list = ['衣','裙','裤','长','大','短','单','套','衫','毛']
valid_word_list = ['裙','衣','裤']
valid_word_set = set((map(lambda x: x.decode('utf-8'),valid_word_list)))
res_str = ''
if not isinstance(s,unicode):
s = s.decode('utf-8')
for word in s:
if word in valid_word_set:
res_str += word
break
if not res_str:
res_str = '其他'.decode('utf-8')
return res_str.encode('utf-8')
def parse_data(self,json_file):
file = os.path.join(DATAPATH,json_file)
with open(file) as f:
json_content = json.load(f)
for item in json_content:
id=int(item['id'])
label = self.label_filter(item['sub_category'])
link = item['picture']
if not self.label_dict.has_key(label):
self.label_list.append(label)
self.label_dict[label] = len(self.label_list) - 1
self.data.append(self.ImageData(id, link, self.label_dict[label]))
def download(self,num = -1,id_geq = 0):
if num > 0:
data = [x for x in self.data if x.id < num and x.id > id_geq]
else:
data = [x for x in self.data if x.id > id_geq]
pool = multiprocessing.Pool(processes=5)
pool.map(download_stuff,data)
def save(self):
# data_matrix:
# id label
# ... ...
data_matrix = np.empty((len(self.data),2))
for i in range(len(self.data)):
data_matrix[i][0] = self.data[i].id
data_matrix[i][1] = self.data[i].label
np.savetxt(self.data_file,data_matrix)
with open(self.label_list_file,'w') as f:
json.dump(self.label_list,f)
def load(self):
self.data_matrix = np.loadtxt(self.data_file)
with open(self.label_list_file) as f:
self.label_list = json.load(f)
def get_lables(self,id = -1):
if id >= 0:
return self.data_matrix[id][1]
else:
return self.data_matrix[:,1]
def tell_label(self,label):
return self.label_list[label]
| python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .pygame_component import Pygame
from .pygame_surface import PygameSurface
from .blit_surface import BlitSurface
from .blocking_pygame_event_pump import BlockingPygameEventPump
from .color_fill import ColorFill
from .draw_on_resized import DrawOnResized
from .resize_event_on_videoresize import ResizeEventOnVideoresize
from .surface_draw_event import SurfaceDrawEvent
| python |
#!/usr/bin/python
'''
(C) Copyright 2018-2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from __future__ import print_function
import os
import traceback
from apricot import TestWithServers
from daos_api import DaosPool, DaosServer, DaosApiError
class PoolSvc(TestWithServers):
"""
Tests svc argument while pool create.
:avocado: recursive
"""
def tearDown(self):
try:
if self.pool is not None and self.pool.attached:
self.pool.destroy(1)
finally:
super(PoolSvc, self).tearDown()
def test_poolsvc(self):
"""
Test svc arg during pool create.
:avocado: tags=pool,svc
"""
# parameters used in pool create
createmode = self.params.get("mode", '/run/createtests/createmode/*/')
createuid = os.geteuid()
creategid = os.getegid()
createsetid = self.params.get("setname", '/run/createtests/createset/')
createsize = self.params.get("size", '/run/createtests/createsize/')
createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')
expected_result = createsvc[1]
try:
# initialize a python pool object then create the underlying
# daos storage
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid,
createsize, createsetid, None, None, createsvc[0])
self.pool.connect(1 << 1)
# checking returned rank list for server more than 1
iterator = 0
while (
int(self.pool.svc.rl_ranks[iterator]) > 0 and
int(self.pool.svc.rl_ranks[iterator]) <= createsvc[0] and
int(self.pool.svc.rl_ranks[iterator]) != 999999
):
iterator += 1
if iterator != createsvc[0]:
self.fail("Length of Returned Rank list is not equal to "
"the number of Pool Service members.\n")
rank_list = []
for iterator in range(createsvc[0]):
rank_list.append(int(self.pool.svc.rl_ranks[iterator]))
if len(rank_list) != len(set(rank_list)):
self.fail("Duplicate values in returned rank list")
self.pool.pool_query()
leader = self.pool.pool_info.pi_leader
if createsvc[0] == 3:
# kill pool leader and exclude it
self.pool.pool_svc_stop()
self.pool.exclude([leader])
# perform pool disconnect, try connect again and disconnect
self.pool.disconnect()
self.pool.connect(1 << 1)
self.pool.disconnect()
# kill another server which is not a leader and exclude it
server = DaosServer(self.context, self.server_group, leader - 1)
server.kill(1)
self.pool.exclude([leader - 1])
# perform pool connect
self.pool.connect(1 << 1)
if expected_result in ['FAIL']:
self.fail("Test was expected to fail but it passed.\n")
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")
| python |
from UE4Parse.BinaryReader import BinaryStream
class FPathHashIndexEntry:
FileName: str
Location: int
def __init__(self, reader: BinaryStream):
self.FileName = reader.readFString()
self.Location = reader.readInt32()
| python |
from typing import List
import cv2
from vision.domain.iCamera import ICamera
from vision.domain.iCameraFactory import ICameraFactory
from vision.infrastructure.cvCamera import CvCamera
from vision.infrastructure.cvVisionException import CameraDoesNotExistError
from vision.infrastructure.fileCamera import FileCamera
class CvCameraFactory(ICameraFactory):
def __init__(self, max_camera_count: int = 10) -> None:
self._max_camera_count = max_camera_count
self._cameras: List[int] = [1337]
self._find_all_camera()
def get_cameras(self) -> List[int]:
return self._cameras
def create_camera(self, index: int) -> ICamera:
if index not in self._cameras:
raise CameraDoesNotExistError(index)
if index == 1337:
return FileCamera('./vision/infrastructure/2.jpg')
return CvCamera(index)
def _find_all_camera(self) -> None:
index = 0
while index < self._max_camera_count:
cap = cv2.VideoCapture(index)
if cap.isOpened():
cap.release()
self._cameras.append(index)
index += 1
| python |
import contextlib
import logging
import six
import py.test
_LOGGING_CONFIGURED_STREAM = None
@py.test.fixture(scope="session")
def streamconfig():
global _LOGGING_CONFIGURED_STREAM
if not _LOGGING_CONFIGURED_STREAM:
_LOGGING_CONFIGURED_STREAM = six.StringIO()
logging.basicConfig(
stream=_LOGGING_CONFIGURED_STREAM, level=logging.INFO
)
@contextlib.contextmanager
def manager():
_LOGGING_CONFIGURED_STREAM.truncate(0) # reset stream
_LOGGING_CONFIGURED_STREAM.seek(0) # rewind stream
yield _LOGGING_CONFIGURED_STREAM
_LOGGING_CONFIGURED_STREAM.seek(0) # rewind stream
return manager
_MESSAGES = (
"Hello world",
"My hovercraft is full of eels",
"49.3",
)
@py.test.fixture(scope="function", params=_MESSAGES)
def message(request):
return request.param
_MODULE_NAMES = (
"tests.mountains",
"tests.music.instruments.cymbal",
"tests.music",
"tests.discombobulate",
"tests.music.instruments",
"tests.mountains.ventoux",
)
@py.test.fixture(scope="function", params=_MODULE_NAMES)
def module_name(request):
return request.param
| python |
import re
# Solution
def part1(data, multiplier = 1):
pattern = r'\d+'
(player_count, marble_count) = re.findall(pattern, data)
(player_count, marble_count) = (int(player_count), int(marble_count) * multiplier)
players = [0] * player_count
marbles = DoubleLinkedList(0)
k = 0
for i in range(1, marble_count + 1):
if i % 23 == 0:
players[k] += (i + marbles.remove_node())
else:
marbles.add_node(i)
k = (k + 1) % player_count
return max(x for x in players)
def part2(data, multiplier):
return part1(data, 100)
class DoubleLinkedList:
def __init__(self, initial_value):
initial_node = DoubleLinkedListNode(initial_value)
initial_node.prev = initial_node
initial_node.next = initial_node
self.current = initial_node
def add_node(self, node_value):
left = self.current.next
right = self.current.next.next
new_node = DoubleLinkedListNode(node_value, left, right)
left.next = new_node
right.prev = new_node
self.current = new_node
def remove_node(self):
for _ in range(7):
self.current = self.current.prev
val = self.current.value
self.current.prev.next = self.current.next
self.current.next.prev = self.current.prev
self.current = self.current.next
return val
class DoubleLinkedListNode:
def __init__(self, value, prev = None, next = None):
self.value = value
self.prev = prev
self.next = next
# Tests
def test(expected, actual):
assert expected == actual, 'Expected: %r, Actual: %r' % (expected, actual)
test(32, part1('9 players; last marble is worth 25 points'))
test(8317, part1('10 players; last marble is worth 1618 points'))
test(146373, part1('13 players; last marble is worth 7999 points'))
test(2764, part1('17 players; last marble is worth 1104 points'))
test(54718, part1('21 players; last marble is worth 6111 points'))
test(37305, part1('30 players; last marble is worth 5807 points'))
test(8317, part1('10 players; last marble is worth 1618 points'))
# Solve real puzzle
filename = 'data/day09.txt'
data = [line.rstrip('\n') for line in open(filename, 'r')][0]
print('Day 09, part 1: %r' % (part1(data)))
print('Day 09, part 2: %r' % (part2(data, 100)))
| python |
# -*- coding: utf-8 -*-
import hexchat
import re
__module_name__ = "DeadKeyFix"
__module_version__ = "2.2"
__module_description__ = "Fixes the Us-International deadkey issue"
prev = ''
def keypress_cb(word, word_eol, userdata):
global prev
specialChars = {
'65104': {
'a': u'à',
'o': u'ò',
'e': u'è',
'i': u'ì',
'u': u'ù',
'A': u'À',
'O': u'Ò',
'E': u'È',
'I': u'Ì',
'U': u'Ù'
},
'65105': {
'a': u'á',
'o': u'ó',
'e': u'é',
'i': u'í',
'u': u'ú',
'y': u'ý',
'c': u'ç',
'A': u'Á',
'O': u'Ó',
'E': u'É',
'I': u'Í',
'U': u'Ú',
'Y': u'Ý',
'C': u'Ç'
},
'65106': {
'a': u'â',
'o': u'ô',
'e': u'ê',
'i': u'î',
'u': u'û',
'A': u'Â',
'O': u'Ô',
'E': u'Ê',
'I': u'Î',
'U': u'Û'
},
'65107': {
'a': u'ã',
'o': u'õ',
'n': u'ñ',
'A': u'Ã',
'O': u'Õ',
'N': u'Ñ'
},
'65111': {
'a': u'ä',
'o': u'ö',
'e': u'ë',
'i': u'ï',
'u': u'ü',
'y': u'ÿ',
'A': u'Ä',
'O': u'Ö',
'E': u'Ë',
'I': u'Ï',
'U': u'Ü',
'Y': u'Ÿ'
}
}
accents = {
'65104': '`',
'65105': "'",
'65106': '^',
'65107': '~',
'65111': '"'
}
charset = hexchat.get_info('charset')
#When there is no current charset derived from server or channel it is set to IRC
#IRC is not a recognized encoding type so default to utf-8 in that case.
if(charset == "IRC"):
charset = "utf-8"
text = hexchat.get_info('inputbox')
loc = hexchat.get_prefs("state_cursor")
if prev in accents and word[2] in specialChars[prev]:
#insert an accented character
text = insert(specialChars[prev][word[2]],text,loc)
elif prev in accents and word[2] == ' ':
#insert a clean accent ( input was accent followed by space )
text = insert(accents[prev],text,loc)
elif prev in accents and word[0] in accents:
#Insert two accents ( input was accent followed by accent )
text = insert(accents[prev] + accents[word[0]],text,loc)
loc+=1
elif prev in accents and int(word[3]) != 0:
#insert an accent and a character (character and accent do not combine)
text = insert(accents[prev] + word[2],text,loc)
loc+=1
elif word[0] in accents:
#store an accent input
prev = word[0]
return
else:
#regular character input
if int(word[3]) != 0:
prev = ''
return
prev = ''
settex = u"settext " + text
hexchat.command( settex )
hexchat.command('setcursor {}'.format(loc+1))
return hexchat.EAT_HEXCHAT
def unload_cb(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
def insert(char,text,loc):
return u"{}{}{}".format(text[:loc] , char , text[loc:])
hexchat.hook_print('Key Press', keypress_cb)
hexchat.hook_unload(unload_cb)
print(__module_name__, 'version', __module_version__, 'loaded.')
| python |
import FWCore.ParameterSet.Config as cms
process = cms.Process("TREESPLITTER")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(0)
)
process.TreeSplitterModule = cms.EDAnalyzer(
"TreeSplitter",
InputFileName = cms.string("/afs/cern.ch/user/d/demattia/scratch0/MuScleFit/CMSSW_3_11_0/src/MuonAnalysis/MomentumScaleCalibration/test/StatisticalErrors/Tree_MCFall2010_INNtk_CRAFTRealistic_wGEN.root"),
OutputFileName = cms.string("SubSample.root"),
MaxEvents = cms.int32(MAXEVENTS),
SubSampleFirstEvent = cms.uint32(SUBSAMPLEFIRSTEVENT),
SubSampleMaxEvents = cms.uint32(SUBSAMPLEMAXEVENTS)
)
process.p1 = cms.Path(process.TreeSplitterModule)
| python |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .ledger_close_value_signature import LedgerCloseValueSignature
from .stellar_value_type import StellarValueType
from ..exceptions import ValueError
__all__ = ["StellarValueExt"]
class StellarValueExt:
"""
XDR Source Code
----------------------------------------------------------------
union switch (StellarValueType v)
{
case STELLAR_VALUE_BASIC:
void;
case STELLAR_VALUE_SIGNED:
LedgerCloseValueSignature lcValueSignature;
}
----------------------------------------------------------------
"""
def __init__(
self, v: StellarValueType, lc_value_signature: LedgerCloseValueSignature = None,
) -> None:
self.v = v
self.lc_value_signature = lc_value_signature
def pack(self, packer: Packer) -> None:
self.v.pack(packer)
if self.v == StellarValueType.STELLAR_VALUE_BASIC:
return
if self.v == StellarValueType.STELLAR_VALUE_SIGNED:
if self.lc_value_signature is None:
raise ValueError("lc_value_signature should not be None.")
self.lc_value_signature.pack(packer)
return
raise ValueError("Invalid v.")
@classmethod
def unpack(cls, unpacker: Unpacker) -> "StellarValueExt":
v = StellarValueType.unpack(unpacker)
if v == StellarValueType.STELLAR_VALUE_BASIC:
return cls(v)
if v == StellarValueType.STELLAR_VALUE_SIGNED:
lc_value_signature = LedgerCloseValueSignature.unpack(unpacker)
if lc_value_signature is None:
raise ValueError("lc_value_signature should not be None.")
return cls(v, lc_value_signature=lc_value_signature)
raise ValueError("Invalid v.")
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "StellarValueExt":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "StellarValueExt":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.v == other.v and self.lc_value_signature == other.lc_value_signature
def __str__(self):
out = []
out.append(f"v={self.v}")
out.append(
f"lc_value_signature={self.lc_value_signature}"
) if self.lc_value_signature is not None else None
return f"<StellarValueExt {[', '.join(out)]}>"
| python |
import setuptools
def get_requires(filename):
requirements = []
with open(filename) as req_file:
for line in req_file.read().splitlines():
if not line.strip().startswith("#"):
requirements.append(line)
return requirements
with open("Readme.md", "r", encoding="utf8") as fh:
long_description = fh.read()
setuptools.setup(
name="recap",
version="1.0.0",
author="Croydon",
author_email="[email protected]",
description="An example how a testing environment can look like in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Croydon/pt-recap",
packages=setuptools.find_packages(exclude=["tests"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
keywords=["testing", "requests", "calculations", "templates"],
install_requires=get_requires("requirements.txt"),
extras_require={
"test": get_requires("requirements_test.txt")
},
package_data={
'': ['*.md'],
'': ['data/*.tmpl']
},
entry_points={
'console_scripts': [
'recap=recap.main:run',
],
},
)
| python |
REGISTRY = {}
from .sc_agent import SCAgent
from .rnn_agent import RNNAgent
from .latent_ce_dis_rnn_agent import LatentCEDisRNNAgent
REGISTRY["rnn"] = RNNAgent
REGISTRY["latent_ce_dis_rnn"] = LatentCEDisRNNAgent
REGISTRY["sc"] = SCAgent
| python |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the Apache License Version 2.0.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License Version 2.0 for more details.
# ============================================================================
import tensorflow as tf
from tensorflow.python.keras import layers as keras_layers
class FeedForwardNetwork(keras_layers.Layer):
def __init__(self, hidden_size, filter_size, relu_dropout):
super(FeedForwardNetwork, self).__init__()
self.hidden_size = hidden_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.filter_dense_layer = keras_layers.Dense(
filter_size,
use_bias=True,
activation=tf.nn.relu,
name="filter_layer")
self.output_dense_layer = keras_layers.Dense(
hidden_size, use_bias=True, name="output_layer")
def call(self, x, training):
"""
Args:
x: A tensor with shape [batch_size, length, hidden_size]
training (boolean): whether in training mode or not.
Returns:
Output of the feedforward network.
tensor with shape [batch_size, length, hidden_size]
"""
# input_shape = tf.shape(x)
# batch_size, length = input_shape[0], input_shape[1]
output = self.filter_dense_layer(x)
if training:
output = tf.nn.dropout(output, rate=self.relu_dropout)
output = self.output_dense_layer(output)
return output
| python |
# -*- coding: utf-8 -*-
#
# Copyright 2017 CPqD. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
@author: Akira Miasato
Audio generation examples.
Generators should always yield bytestrings. Our ASR interface only supports
linear PCM with little-endian signed 16bit samples. Their length may be
variable, as long as they are smaller than the predefined maximum payload size
from the configured websocket connection, and the length of each bytestring
is modulo 0 with the size of the sample (i.e. is even in length).
"""
import soundfile as sf
import pyaudio
import time
class MicAudioSource:
"""
Simple microphone reader.
chunk_size is in samples, so the size in bytes of the sent packet is
sizeof(<sample_type>) * chunk_size. This value should be smaller than the
predefined maximum payload from the configured websocket connection.
:sample_rate: Sample rate for the captured audio
:sample_type: Sample type provided by pyaudio
:chunk_size: Size of the blocks of audio which will be sent (in samples)
:yields: bytestrings of size <chunk_size> * sizeof(<sample_type>)
Does not terminate. When used inside a SpeechRecognition instance, it
is halted when the recognition instance is cancelled or closed.
"""
def __init__(self, sample_rate=8000, sample_type=pyaudio.paInt16, chunk_size=4096):
self._audio = pyaudio.PyAudio()
self._sample_rate = sample_rate
self._sample_type = sample_type
self._chunk_size = chunk_size
def __enter__(self):
self._stream = self._audio.open(
format=self._sample_type,
channels=1,
rate=self._sample_rate,
input=True,
frames_per_buffer=self._chunk_size,
)
return self
def __exit__(self, etype, value, traceback):
self._stream.stop_stream()
self._stream.close()
def __iter__(self):
return self
def __next__(self):
if not self._stream.is_active:
raise StopIteration
return self._stream.read(self._chunk_size)
def FileAudioSource(path, chunk_size=4096):
"""
Simple audio file reader. Should be compatible with all files supported
by 'soundfile' package.
chunk_size is in samples, so the size in bytes of the sent packet is
2*chunk_size, since we are sending 16-bit signed PCM samples. chunk_size*2
should be smaller than the predefined maximum payload from the configured
websocket connection.
:path: Path to the audio input (any format supported by soundfile package)
:chunk_size: Size of the blocks of audio which will be sent (in samples)
:yields: bytestrings of size <chunk_size> * 2
Terminates when the audio file provided has no more content
"""
f = open(path, "rb")
bytestr = f.read(chunk_size)
while bytestr:
yield bytestr
bytestr = f.read(chunk_size)
class BufferAudioSource:
"""
Very simple buffer source.
This generator has a "write" method which updates its internal buffer,
which is periodically consumed by the ASR instance in which it is inserted.
:buffer_size: Size of the internal buffer (in bytes)
:yields: bytestrings of size <buffer_size>
Terminates only if the "finish" method is called, in which case the
remaining buffer is sent regardless of its size.
"""
def __init__(self, chunk_size=4096):
self._buffer = b""
self._chunk_size = chunk_size
self._finished = False
def __iter__(self):
return self
def __next__(self):
while True:
if len(self._buffer) >= self._chunk_size:
r = self._buffer[: self._chunk_size]
self._buffer = self._buffer[self._chunk_size :]
return r
elif self._finished:
if self._buffer:
r = self._buffer
self._buffer = b""
return r
else:
raise StopIteration
time.sleep(0.05)
def write(self, byte_str):
"""
Writes to the buffer.
:byte_str: A byte string (char array). Currently only 16-bit signed
little-endian linear PCM is accepted.
"""
self._finished = False
self._buffer += byte_str
def finish(self):
"""
Signals the ASR instance that one's finished writing and is now waiting
for the recognition result.
"""
self._finished = True
| python |
#
# @lc app=leetcode id=160 lang=python3
#
# [160] Intersection of Two Linked Lists
#
# https://leetcode.com/problems/intersection-of-two-linked-lists/description/
#
# algorithms
# Easy (39.05%)
# Likes: 3257
# Dislikes: 372
# Total Accepted: 438K
# Total Submissions: 1.1M
# Testcase Example: '8\n[4,1,8,4,5]\n[5,0,1,8,4,5]\n2\n3'
#
# Write a program to find the node at which the intersection of two singly
# linked lists begins.
#
# For example, the following two linked lists:
#
#
# begin to intersect at node c1.
#
#
#
# Example 1:
#
#
#
# Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA =
# 2, skipB = 3
# Output: Reference of the node with value = 8
# Input Explanation: The intersected node's value is 8 (note that this must not
# be 0 if the two lists intersect). From the head of A, it reads as
# [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes
# before the intersected node in A; There are 3 nodes before the intersected
# node in B.
#
#
#
# Example 2:
#
#
#
# Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3,
# skipB = 1
# Output: Reference of the node with value = 2
# Input Explanation: The intersected node's value is 2 (note that this must not
# be 0 if the two lists intersect). From the head of A, it reads as
# [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes
# before the intersected node in A; There are 1 node before the intersected
# node in B.
#
#
#
#
# Example 3:
#
#
#
# Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
# Output: null
# Input Explanation: From the head of A, it reads as [2,6,4]. From the head of
# B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must
# be 0, while skipA and skipB can be arbitrary values.
# Explanation: The two lists do not intersect, so return null.
#
#
#
#
# Notes:
#
#
# If the two linked lists have no intersection at all, return null.
# The linked lists must retain their original structure after the function
# returns.
# You may assume there are no cycles anywhere in the entire linked
# structure.
# Your code should preferably run in O(n) time and use only O(1) memory.
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
if headA is None or headB is None:
return None
a = headA
b = headB
while a != b:
a = a.next if a else headB
b = b.next if b else headA
return a
# @lc code=end
| python |
def dividend(ticker_info):
for ticker, value in ticker_info.items():
value_dividends = value["dividends"].to_frame().reset_index()
dividend_groupped = value_dividends.groupby(value_dividends["Date"].dt.year)['Dividends'].agg(['sum'])
dividend_groupped = dividend_groupped.rename(columns={'sum': ticker})
value["dividends_grouped"] = dividend_groupped
| python |
import markdown
from flask import abort, flash, redirect, render_template, request
from flask_babel import gettext as _
from flask_login import current_user, login_required
from ..ext import db
from ..forms.base import DeleteForm
from ..models import Brew, TastingNote
from ..utils.pagination import get_page
from ..utils.views import next_redirect
from . import tasting_bp
from .forms import TastingNoteForm
from .permissions import AccessManager
from .utils import TastingUtils
@tasting_bp.route('/all', endpoint='all')
def all_tasting_notes():
page_size = 20
page = get_page(request)
kw = {}
if current_user.is_authenticated:
kw['extra_user'] = current_user
query = TastingUtils.notes(public_only=True, **kw)
query = query.order_by(db.desc(TastingNote.date))
pagination = query.paginate(page, page_size)
context = {
'public_only': True,
'pagination': pagination,
}
return render_template('tasting/list.html', **context)
@tasting_bp.route('/<int:brew_id>/add', methods=['GET', 'POST'], endpoint='add')
@login_required
def brew_add_tasting_note(brew_id):
brew = Brew.query.get_or_404(brew_id)
AccessManager.check_create(brew)
form = TastingNoteForm()
if form.validate_on_submit():
form.save(brew)
flash(_('tasting note for %(brew)s saved', brew=brew.name), category='success')
next_ = next_redirect('brew.details', brew_id=brew.id)
return redirect(next_)
ctx = {
'brew': brew,
'form': form,
}
return render_template('tasting/tasting_note.html', **ctx)
@tasting_bp.route('/<int:note_id>/delete', methods=['GET', 'POST'], endpoint='delete')
@login_required
def brew_delete_tasting_note(note_id):
note = TastingNote.query.get_or_404(note_id)
brew = note.brew
AccessManager(note, None).check()
form = DeleteForm()
if form.validate_on_submit() and form.delete_it.data:
db.session.delete(note)
db.session.commit()
flash(
_('tasting note for brew %(brew)s has been deleted', brew=brew.name),
category='success'
)
next_ = next_redirect('brew.details', brew_id=brew.id)
return redirect(next_)
ctx = {
'brew': brew,
'note': note,
'delete_form': form,
}
return render_template('tasting/tasting_note_delete.html', **ctx)
@tasting_bp.route('/ajaxtext', endpoint='loadtext')
def brew_load_tasting_note_text():
provided_id = request.args.get('id')
if not provided_id:
abort(400)
note_id = provided_id.rsplit('_', 1)[-1]
note = TastingNote.query.get_or_404(note_id)
return note.text
@tasting_bp.route('/ajaxupdate', methods=['POST'], endpoint='update')
@login_required
def brew_update_tasting_note():
note_id = request.form.get('pk')
if not note_id:
abort(400)
note = TastingNote.query.get_or_404(note_id)
AccessManager(note, None).check()
value = request.form.get('value', '').strip()
if value:
note.text = value
db.session.add(note)
db.session.commit()
return markdown.markdown(value)
return note.text_html
| python |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from collections import OrderedDict
from teach.dataset.actions import (
Action_Audio,
Action_Basic,
Action_Keyboard,
Action_MapGoal,
Action_Motion,
Action_ObjectInteraction,
Action_ProgressCheck,
)
class Interaction:
def __init__(self, agent_id, action, is_object=False, status=None, time_start=None):
self.agent_id = agent_id
self.action = action
self.is_object = is_object
self.status = status
self.time_start = time_start
def to_dict(self):
_dict = OrderedDict()
if self.is_object:
_dict["object_id"] = self.agent_id
else:
_dict["agent_id"] = self.agent_id
_dict.update(self.action.to_dict())
if self.status is not None:
_dict["status"] = self.status
return _dict
@classmethod
def from_dict(cls, interaction_dict, action_type) -> "Interaction":
if "object_id" in interaction_dict:
is_object = True
agent_id = interaction_dict["object_id"]
else:
is_object = False
agent_id = interaction_dict["agent_id"]
if action_type == "Motion":
action = Action_Motion.from_dict(interaction_dict)
elif action_type == "MapGoal":
action = Action_MapGoal.from_dict(interaction_dict)
elif action_type == "ObjectInteraction":
action = Action_ObjectInteraction.from_dict(interaction_dict)
elif action_type == "ProgressCheck":
action = Action_ProgressCheck.from_dict(interaction_dict)
elif action_type == "Keyboard":
action = Action_Keyboard.from_dict(interaction_dict)
elif action_type == "Audio":
action = Action_Audio.from_dict(interaction_dict)
else:
action = Action_Basic.from_dict(interaction_dict)
status = interaction_dict.get("status")
time_start = interaction_dict.get("time_start")
return cls(agent_id=agent_id, action=action, is_object=is_object, status=status, time_start=time_start)
| python |
import collections
import pathlib
import sys
import os
import json
def as_dict(par):
if not par:
return None
if isinstance(par, dict):
return par
else:
return dict(par._asdict())
def from_dict(par_dict):
if not par_dict:
return None
# par = collections.namedtuple('Params', par_dict.keys())(**par_dict)
par = collections.namedtuple('Params', par_dict.keys())
class IBLParams(par):
def set(self, field, value):
d = as_dict(self)
d[field] = value
return from_dict(d)
def as_dict(self):
return as_dict(self)
return IBLParams(**par_dict)
def getfile(str_params):
"""
Returns full path of the param file per system convention:
linux/mac: ~/.str_params, Windows: APPDATA folder
:param str_params: string that identifies parm file
:return: string of full path
"""
if sys.platform == 'win32' or sys.platform == 'cygwin':
pfile = str(pathlib.PurePath(os.environ['APPDATA'], '.' + str_params))
else:
pfile = str(pathlib.PurePath(pathlib.Path.home(), '.' + str_params))
return pfile
def read(str_params, default=None):
"""
Reads in and parse Json parameter file into dictionary
:param str_params: path to text json file
:param default: default values for missing parameters
:return: named tuple containing parameters
"""
pfile = getfile(str_params)
if os.path.isfile(pfile):
with open(pfile) as fil:
par_dict = json.loads(fil.read())
else:
par_dict = as_dict(default)
# without default parameters
default = as_dict(default)
# TODO : behaviour for non existing file
# tat = params.read('rijafa', default={'toto': 'titi', 'tata': 1})
if not default or default.keys() == par_dict.keys():
return from_dict(par_dict)
# if default parameters bring in a new parameter
new_keys = set(default.keys()).difference(set(par_dict.keys()))
for nk in new_keys:
par_dict[nk] = default[nk]
# write the new parameter file with the extra param
write(str_params, par_dict)
return from_dict(par_dict)
def write(str_params, par):
"""
Write a parameter file in Json format
:param str_params: path to text json file
:param par: dictionary containing parameters values
:return: None
"""
pfile = getfile(str_params)
with open(pfile, 'w') as fil:
json.dump(as_dict(par), fil, sort_keys=False, indent=4)
| python |
import unittest
import ServiceGame
from model.Platform import platform
from model.Publishers import publisher
class TestServiceGame(unittest.TestCase):
def test_games_Wii(self):
wiigames = ServiceGame.platz(platform('Wii'))
self.assertEqual(15, len(wiigames))
def test_games_PC(self):
pc = ServiceGame.platz(platform('PC'))
self.assertEqual(1, len(pc))
def test_games_SquareSoft(self):
squaresoft = ServiceGame.plubz(publisher('SquareSoft'))
self.assertNotEqual(0, len(squaresoft))
def test_games_ElectronicArts(self):
electronicarts = ServiceGame.plubz(publisher('Electronic Arts'))
self.assertEqual(5, len(electronicarts))
def test_csv_is_create_platform(self):
ServiceGame.escolher('P1', platform('Wii'))
with open('output.csv') as arquivo:
conteudo = arquivo.readlines()
self.assertEqual(15, len(conteudo))
def test_csv_is_create_publisher(self):
ServiceGame.escolher('P2', publisher('Electronic Arts'))
with open('output.csv') as arquivo:
conteudo = arquivo.readlines()
self.assertEqual(5, len(conteudo))
if __name__ == '__main__':
unittest.main()
| python |
from robot_server.service.errors import RobotServerError, \
CommonErrorDef, ErrorDef
class SystemException(RobotServerError):
"""Base of all system exceptions"""
pass
class SystemTimeAlreadySynchronized(SystemException):
"""
Cannot update system time because it is already being synchronized
via NTP or local RTC.
"""
def __init__(self, msg: str):
super().__init__(definition=CommonErrorDef.ACTION_FORBIDDEN,
reason=msg)
class SystemSetTimeException(SystemException):
"""Server process Failure"""
def __init__(self, msg: str, definition: ErrorDef = None):
if definition is None:
definition = CommonErrorDef.INTERNAL_SERVER_ERROR
super().__init__(definition=definition,
error=msg)
| python |
from flask import Flask, redirect, render_template, url_for
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
mongo=PyMongo(app, uri="mongodb://localhost:27017/mars_app")
@app.route("/")
def index():
mars_info = mongo.db.mars_info.find_one()
return render_template("index.html", mars_info=mars_info)
#trigger scrape
@app.route("/scrape")
def scrape():
mars_info = mongo.db.mars_info
mars_data = scrape_mars.mars_scrape_news()
mars_data = scrape_mars.mars_scrape_image()
mars_data = scrape_mars.mars_scrape_faq()
mars_data = scrape_mars.mars_scrape_hemi()
mars_info.update({}, mars_data, upsert=True)
return "Scrape Complete!"
if __name__ == "__main__":
app.run() | python |
import sys
with open(sys.argv[1]) as f:
data = f.read()
stack = []
for i in range(len(data)):
if i%1000000==0:
print("%.2f %%"%(i/len(data)*100))
stack += [data[i]]
if (len(stack)>=8 and
stack[-8] in "<" and
stack[-7] in "Ss" and
stack[-6] in "Cc" and
stack[-5] in "Rr" and
stack[-4] in "Ii" and
stack[-3] in "Pp" and
stack[-2] in "Tt" and
stack[-1] in ">"):
for i in range(8):
stack.pop()
print("".join(stack)[:-1])
| python |
#!/usr/bin/env python
import numpy as np
import sys
from readFiles import *
thisfh = sys.argv[1]
linkerfh = "part_Im.xyz"
#Read the linker file
lAtomList, lAtomCord = readxyz(linkerfh)
sAtomList, sAtomCord = readxyz(thisfh)
a,b,c,alpha,beta,gamma = readcifFile(thisfh[:-4] + ".cif")
cell_params = [a, b, c, alpha, beta, gamma]
#sAtomCord, sAtomList = reduceToUnitCell(sAtomCord,sAtomList,cell_params,-1,2)
sAtomList = replaceSiwZn(sAtomList)
#writexyzFile(sAtomCord,sAtomList,"testZn.xyz")
minDist = calcMinZnZnDist(sAtomCord,sAtomList)
sf = 6/minDist
a = a*sf; b = b*sf; c = c*sf;
sAtomCord = expandStructure(sAtomCord,sf)
#writexyzFile(sAtomCord,sAtomList,"testZnExpanded.xyz")
sAtomCord, sAtomList = putLinkerIn(sAtomList,lAtomList,sAtomCord,lAtomCord)
cell_params = [a, b, c, alpha, beta, gamma]
#writexyzFile(sAtomCord,sAtomList, thisfh[:-4] + "_ZIF.xyz",cell_params)
reducedCord,reducedList = reduceToUnitCell(sAtomCord,sAtomList,cell_params,.5,1.5)
writexyzFile(reducedCord,reducedList, thisfh[:-4] + "_ZIF_unitcell.xyz",cell_params)
| python |
import copy
import os
import random
import kerastuner
import kerastuner.engine.hypermodel as hm_module
import tensorflow as tf
from autokeras.hypermodel import base
class AutoTuner(kerastuner.engine.multi_execution_tuner.MultiExecutionTuner):
"""A Tuner class based on KerasTuner for AutoKeras.
Different from KerasTuner's Tuner class. AutoTuner's not only tunes the
Hypermodel which can be directly built into a Keras model, but also the
preprocessors. Therefore, a HyperGraph stores the overall search space containing
both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the
PreprocessGraph and KerasGraph with the provided HyperParameters.
# Arguments
hyper_graph: HyperGraph. The HyperGraph to be tuned.
fit_on_val_data: Boolean. Use the training set and validation set for the
final fit of the best model.
**kwargs: The other args supported by KerasTuner.
"""
def __init__(self, hyper_graph, hypermodel, fit_on_val_data=False, **kwargs):
self.hyper_graph = hyper_graph
super().__init__(
hypermodel=hm_module.KerasHyperModel(hypermodel),
# TODO: Support resume of a previous run.
overwrite=True,
**kwargs)
self.preprocess_graph = None
self.best_hp = None
self.fit_on_val_data = fit_on_val_data
def run_trial(self, trial, **fit_kwargs):
"""Preprocess the x and y before calling the base run_trial."""
# Initialize new fit kwargs for the current trial.
new_fit_kwargs = copy.copy(fit_kwargs)
# Preprocess the dataset and set the shapes of the HyperNodes.
self.preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
trial.hyperparameters)
self.hypermodel = hm_module.KerasHyperModel(keras_graph)
self._prepare_run(self.preprocess_graph, new_fit_kwargs, True)
super().run_trial(trial, **new_fit_kwargs)
def _prepare_run(self, preprocess_graph, fit_kwargs, fit=False):
dataset, validation_data = preprocess_graph.preprocess(
dataset=fit_kwargs.get('x', None),
validation_data=fit_kwargs.get('validation_data', None),
fit=fit)
# Batching
batch_size = fit_kwargs.pop('batch_size', 32)
dataset = dataset.batch(batch_size)
validation_data = validation_data.batch(batch_size)
# Update the new fit kwargs values
fit_kwargs['x'] = dataset
fit_kwargs['validation_data'] = validation_data
fit_kwargs['y'] = None
def _get_save_path(self, trial, name):
filename = '{trial_id}-{name}'.format(trial_id=trial.trial_id, name=name)
return os.path.join(self.get_trial_dir(trial.trial_id), filename)
def on_trial_end(self, trial):
"""Save and clear the hypermodel and preprocess_graph."""
super().on_trial_end(trial)
self.preprocess_graph.save(self._get_save_path(trial, 'preprocess_graph'))
self.hypermodel.hypermodel.save(self._get_save_path(trial, 'keras_graph'))
self.preprocess_graph = None
self.hypermodel = None
def load_model(self, trial):
"""Load the model in a history trial.
# Arguments
trial: Trial. The trial to be loaded.
# Returns
Tuple of (PreprocessGraph, KerasGraph, tf.keras.Model).
"""
preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
trial.hyperparameters)
preprocess_graph.reload(self._get_save_path(trial, 'preprocess_graph'))
keras_graph.reload(self._get_save_path(trial, 'keras_graph'))
self.hypermodel = hm_module.KerasHyperModel(keras_graph)
models = (preprocess_graph, keras_graph, super().load_model(trial))
self.hypermodel = None
return models
def get_best_model(self):
"""Load the best PreprocessGraph and Keras model.
It is mainly used by the predict and evaluate function of AutoModel.
# Returns
Tuple of (PreprocessGraph, tf.keras.Model).
"""
preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
self.best_hp)
preprocess_graph.reload(self.best_preprocess_graph_path)
keras_graph.reload(self.best_keras_graph_path)
model = keras_graph.build(self.best_hp)
model.load_weights(self.best_model_path)
return preprocess_graph, model
def search(self, callbacks=None, **fit_kwargs):
"""Search for the best HyperParameters.
If there is not early-stopping in the callbacks, the early-stopping callback
is injected to accelerate the search process. At the end of the search, the
best model will be fully trained with the specified number of epochs.
"""
# Insert early-stopping for acceleration.
if not callbacks:
callbacks = []
new_callbacks = self._deepcopy_callbacks(callbacks)
if not any([isinstance(callback, tf.keras.callbacks.EarlyStopping)
for callback in callbacks]):
new_callbacks.append(tf.keras.callbacks.EarlyStopping(patience=10))
super().search(callbacks=new_callbacks, **fit_kwargs)
best_trial = self.oracle.get_best_trials(1)[0]
self.best_hp = best_trial.hyperparameters
preprocess_graph, keras_graph, model = self.get_best_models()[0]
preprocess_graph.save(self.best_preprocess_graph_path)
keras_graph.save(self.best_keras_graph_path)
# Fully train the best model with original callbacks.
if not any([isinstance(callback, tf.keras.callbacks.EarlyStopping)
for callback in callbacks]) or self.fit_on_val_data:
fit_kwargs['callbacks'] = self._deepcopy_callbacks(callbacks)
self._prepare_run(preprocess_graph, fit_kwargs)
if self.fit_on_val_data:
fit_kwargs['x'] = fit_kwargs['x'].concatenate(
fit_kwargs['validation_data'])
model = keras_graph.build(self.best_hp)
model.fit(**fit_kwargs)
model.save_weights(self.best_model_path)
@property
def best_preprocess_graph_path(self):
return os.path.join(self.project_dir, 'best_preprocess_graph')
@property
def best_keras_graph_path(self):
return os.path.join(self.project_dir, 'best_keras_graph')
@property
def best_model_path(self):
return os.path.join(self.project_dir, 'best_model')
class RandomSearch(AutoTuner, kerastuner.RandomSearch):
"""KerasTuner RandomSearch with preprocessing layer tuning."""
pass
class Hyperband(AutoTuner, kerastuner.Hyperband):
"""KerasTuner Hyperband with preprocessing layer tuning."""
pass
class BayesianOptimization(AutoTuner, kerastuner.BayesianOptimization):
"""KerasTuner BayesianOptimization with preprocessing layer tuning."""
pass
class GreedyOracle(kerastuner.Oracle):
"""An oracle combining random search and greedy algorithm.
It groups the HyperParameters into several categories, namely, HyperGraph,
Preprocessor, Architecture, and Optimization. The oracle tunes each group
separately using random search. In each trial, it use a greedy strategy to
generate new values for one of the categories of HyperParameters and use the best
trial so far for the rest of the HyperParameters values.
# Arguments
hyper_graph: HyperGraph. The hyper_graph model to be tuned.
seed: Int. Random seed.
"""
HYPER = 'HYPER'
PREPROCESS = 'PREPROCESS'
OPT = 'OPT'
ARCH = 'ARCH'
STAGES = [HYPER, PREPROCESS, OPT, ARCH]
@staticmethod
def next_stage(stage):
stages = GreedyOracle.STAGES
return stages[(stages.index(stage) + 1) % len(stages)]
def __init__(self, hyper_graph, seed=None, **kwargs):
super().__init__(**kwargs)
self.hyper_graph = hyper_graph
# Start from tuning the hyper block hps.
self._stage = GreedyOracle.HYPER
# Sets of HyperParameter names.
self._hp_names = {
GreedyOracle.HYPER: set(),
GreedyOracle.PREPROCESS: set(),
GreedyOracle.OPT: set(),
GreedyOracle.ARCH: set(),
}
# The quota used to tune each category of hps.
self._capacity = {
GreedyOracle.HYPER: 1,
GreedyOracle.PREPROCESS: 1,
GreedyOracle.OPT: 1,
GreedyOracle.ARCH: 4,
}
self._stage_trial_count = 0
self.seed = seed or random.randint(1, 1e4)
# Incremented at every call to `populate_space`.
self._seed_state = self.seed
self._tried_so_far = set()
self._max_collisions = 5
def set_state(self, state):
super().set_state(state)
# TODO: self.hyper_graph.set_state(state['hyper_graph'])
# currently the state is not json serializable.
self._stage = state['stage']
self._capacity = state['capacity']
def get_state(self):
state = super().get_state()
state.update({
# TODO: 'hyper_graph': self.hyper_graph.get_state(),
# currently the state is not json serializable.
'stage': self._stage,
'capacity': self._capacity,
})
return state
def update_space(self, hyperparameters):
# Get the block names.
preprocess_graph, keras_graph = self.hyper_graph.build_graphs(
hyperparameters)
# Add the new Hyperparameters to different categories.
ref_names = {hp.name for hp in self.hyperparameters.space}
for hp in hyperparameters.space:
if hp.name not in ref_names:
hp_type = None
if any([hp.name.startswith(block.name)
for block in self.hyper_graph.blocks
if isinstance(block, base.HyperBlock)]):
hp_type = GreedyOracle.HYPER
elif any([hp.name.startswith(block.name)
for block in preprocess_graph.blocks]):
hp_type = GreedyOracle.PREPROCESS
elif any([hp.name.startswith(block.name)
for block in keras_graph.blocks]):
hp_type = GreedyOracle.ARCH
else:
hp_type = GreedyOracle.OPT
self._hp_names[hp_type].add(hp.name)
super().update_space(hyperparameters)
def _populate_space(self, trial_id):
for _ in range(len(GreedyOracle.STAGES)):
values = self._generate_stage_values()
# Reached max collisions.
if values is None:
# Try next stage.
self._stage = GreedyOracle.next_stage(self._stage)
self._stage_trial_count = 0
continue
# Values found.
self._stage_trial_count += 1
if self._stage_trial_count == self._capacity[self._stage]:
self._stage = GreedyOracle.next_stage(self._stage)
self._stage_trial_count = 0
return {'status': kerastuner.engine.trial.TrialStatus.RUNNING,
'values': values}
# All stages reached max collisions.
return {'status': kerastuner.engine.trial.TrialStatus.STOPPED,
'values': None}
def _generate_stage_values(self):
best_trials = self.get_best_trials()
if best_trials:
best_values = best_trials[0].hyperparameters.values
else:
best_values = self.hyperparameters.values
collisions = 0
while 1:
# Generate new values for the current stage.
values = {}
for p in self.hyperparameters.space:
if p.name in self._hp_names[self._stage]:
values[p.name] = p.random_sample(self._seed_state)
self._seed_state += 1
values = {**best_values, **values}
# Keep trying until the set of values is unique,
# or until we exit due to too many collisions.
values_hash = self._compute_values_hash(values)
if values_hash not in self._tried_so_far:
self._tried_so_far.add(values_hash)
break
collisions += 1
if collisions > self._max_collisions:
# Reached max collisions. No value to return.
return None
return values
class Greedy(AutoTuner):
def __init__(self,
hyper_graph,
hypermodel,
objective,
max_trials,
fit_on_val_data=False,
seed=None,
hyperparameters=None,
tune_new_entries=True,
allow_new_entries=True,
**kwargs):
self.seed = seed
oracle = GreedyOracle(
hyper_graph=hyper_graph,
objective=objective,
max_trials=max_trials,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries)
hp = oracle.get_space()
preprocess_graph, keras_graph = hyper_graph.build_graphs(hp)
oracle.update_space(hp)
super().__init__(
hyper_graph=hyper_graph,
fit_on_val_data=fit_on_val_data,
oracle=oracle,
hypermodel=hypermodel,
**kwargs)
TUNER_CLASSES = {
'bayesian': BayesianOptimization,
'random': RandomSearch,
'hyperband': Hyperband,
'greedy': Greedy,
'image_classifier': Greedy,
'image_regressor': Greedy,
'text_classifier': Greedy,
'text_regressor': Greedy,
'structured_data_classifier': Greedy,
'structured_data_regressor': Greedy,
}
def get_tuner_class(tuner):
if isinstance(tuner, str) and tuner in TUNER_CLASSES:
return TUNER_CLASSES.get(tuner)
else:
raise ValueError('The value {tuner} passed for argument tuner is invalid, '
'expected one of "greedy", "random", "hyperband", '
'"bayesian".'.format(tuner=tuner))
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.