content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import datetime
from random import randint
from telegram.chat import Chat
from telegram.message import Message
from telegram.user import User
class MockBot:
last_message = {}
def send_message(self, chat_id, text, **kwargs):
self.last_message[chat_id] = text
def sendMessage(self, *args, **kwargs):
self.send_message(*args, **kwargs)
def send_photo(self, chat_id, photo, *args, **kwargs):
self.last_message[chat_id] = photo
def sendPhoto(self, *args, **kwargs):
self.send_photo(*args, **kwargs)
class MockChat(Chat):
def __init__(self, id=None, type=Chat.PRIVATE):
_id = id
if not _id:
_id = randint(1, 1000)
super().__init__(id=_id, type=type)
class MockUser(User):
def __init__(self, id=None, first_name=None, is_bot=False):
super().__init__(
id=id or randint(1, 1000),
first_name=first_name or "MockUser",
is_bot=is_bot or False,
)
class MockMessage(Message):
def __init__(
self,
text="",
reply_to_message=None,
from_user=None,
chat=None,
date=datetime.datetime.now(),
):
message_id = randint(1, 1000)
chat = chat or MockChat()
from_user = from_user or MockUser()
super().__init__(
message_id=message_id,
from_user=from_user,
date=date,
chat=chat,
text=text,
reply_to_message=reply_to_message,
)
class MockUpdate:
message = None
def __init__(self, message=MockMessage()):
self.message = message
"""
def PaDondeHoy(bot, update):
day = datetime.date.today().weekday()
@lru_cache()
def cached_response(day_of_week, chat):
with open('res/texts/days.pickle', 'rb') as f:
days = pickle.load(f)
return random.choice(days[day_of_week])
response = cached_response(day, update.message.chat_id)
bot.sendMessage(chat_id=update.message.chat_id, text=response)
"""
|
python
|
def pairs(k, arr):
result = 0
arr = sorted(arr)
j = 1
for i in range(len(arr)-1):
while j<len(arr):
if arr[j] - arr[i] == k:
result += 1
j += 1
elif arr[j] - arr[i] > k:
break
elif arr[j] - arr[i] < k:
j += 1
return result
|
python
|
# MINLP written by GAMS Convert at 04/21/18 13:54:23
#
# Equation counts
# Total E G L N X C B
# 19 18 1 0 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 36 17 0 19 0 0 0 0
# FX 1 0 0 1 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 169 106 63 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i7 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i8 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i9 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i11 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i12 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i13 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i14 = Var(within=Integers,bounds=(0,0),initialize=0)
m.i15 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i16 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i17 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i18 = Var(within=Integers,bounds=(0,14),initialize=0)
m.i19 = Var(within=Integers,bounds=(0,14),initialize=0)
m.x20 = Var(within=Reals,bounds=(1,1000),initialize=1)
m.x21 = Var(within=Reals,bounds=(1,1000),initialize=1)
m.x22 = Var(within=Reals,bounds=(1,100),initialize=1)
m.x23 = Var(within=Reals,bounds=(1,32.2),initialize=1)
m.x24 = Var(within=Reals,bounds=(1,100),initialize=1)
m.x25 = Var(within=Reals,bounds=(18.4,100),initialize=18.4)
m.x26 = Var(within=Reals,bounds=(1.4,14),initialize=1.4)
m.x27 = Var(within=Reals,bounds=(1.4,14),initialize=1.4)
m.x28 = Var(within=Reals,bounds=(0.001,1),initialize=0.001)
m.x29 = Var(within=Reals,bounds=(0.001,1),initialize=0.001)
m.x30 = Var(within=Reals,bounds=(0.001,1),initialize=0.001)
m.x31 = Var(within=Reals,bounds=(0.001,1),initialize=0.001)
m.x32 = Var(within=Reals,bounds=(0.001,1),initialize=0.001)
m.x33 = Var(within=Reals,bounds=(0.001,10),initialize=0.001)
m.x34 = Var(within=Reals,bounds=(0.001,10),initialize=0.001)
m.x35 = Var(within=Reals,bounds=(-10,10),initialize=0)
m.obj = Objective(expr=-m.x25/m.x23, sense=minimize)
m.c1 = Constraint(expr= 23.58*m.i1 + 22.88*m.i2 + 21.74*m.i3 + 18.25*m.i4 - 0.03*m.i5 + 38.13*m.i6 + 66.86*m.i7
+ 93.84*m.i8 + 92.88*m.i9 + 76.34*m.i10 + 22.42*m.i11 + 31.22*m.i12 + 73.23*m.i13 + 50.17*m.i14
+ 52.82*m.i15 + 11.74*m.i16 + 63.56*m.i17 + 68.78*m.i18 + 52.1*m.i19 - m.x20 == -198.2)
m.c2 = Constraint(expr=-m.x20/(0.584 + 0.0136065*m.i1 - (0.0141*m.i1 + 0.0189*m.i2 + 0.0164*m.i3 + 0.0067*m.i4 + 0.0111*
m.i5 + 0.0105*m.i6 + 0.0133*m.i7 + 0.0068*m.i8 + 0.0741*m.i9 + 0.0204*m.i10 + 0.0168*m.i11 +
0.0098*m.i12 + 0.0243*m.i13 + 0.0295*m.i14 + 0.013*m.i15 + 0.0169*m.i16 + 0.0031*m.i17 + 0.0119*
m.i18 + 0.0019*m.i19)**2 + 0.0182385*m.i2 + 0.015826*m.i3 + 0.0064655*m.i4 + 0.0107115*m.i5 +
0.0101325*m.i6 + 0.0128345*m.i7 + 0.006562*m.i8 + 0.0715065*m.i9 + 0.02316*m.i10 + 0.016212*m.i11
+ 0.009457*m.i12 + 0.0234495*m.i13 + 0.0284675*m.i14 + 0.012545*m.i15 + 0.0163085*m.i16 +
0.0029915*m.i17 + 0.0114835*m.i18 + 0.0018335*m.i19) + m.x21 == 0)
m.c3 = Constraint(expr=-1/(0.113 + 0.014*m.i1 + 0.0096*m.i2 + 0.0044*m.i3 - 0.0011*m.i4 + 0.0089*m.i5 + 0.0081*m.i6 -
0.0025*m.i7 + 0.0066*m.i8 - 0.0048*m.i9 - 0.012*m.i10 + 0.0017*m.i11 - 0.0016*m.i12 - 0.0013*
m.i13 - 0.0013*m.i14 - 0.005*m.i15 - 0.0042*m.i16 - 0.002*m.i17 - 0.0017*m.i18 - 0.0019*m.i19)**2
+ m.x22 == 0)
m.c4 = Constraint(expr= 8.8*m.i1 + 7.26*m.i2 + 5*m.i3 + 1.76*m.i4 + 4*m.i5 + 8.6*m.i6 + 9*m.i7 + 8.6*m.i8 + 10.7*m.i9
+ 10.7*m.i10 + 8.4*m.i11 + 8.4*m.i12 + 14*m.i13 + 10.5*m.i14 + 10.5*m.i15 + 7.5*m.i16
+ 10.7*m.i17 + 8*m.i18 + 8*m.i19 - m.x23 == 0)
m.c5 = Constraint(expr= 2.373*m.i1 + 2.226*m.i2 + 1.691*m.i3 + 0.636*m.i4 - 0.67*m.i5 + 4.532*m.i6 + 6.582*m.i7
+ 9.52*m.i8 + 16.826*m.i9 + 12.499*m.i10 + 2.41*m.i11 + 4.682*m.i12 + 10.788*m.i13 + 6.436*m.i14
+ 6.93*m.i15 + 1.896*m.i16 + 6.884*m.i17 + 6.817*m.i18 + 5.984*m.i19 - m.x24 == -15.3)
m.c6 = Constraint(expr=-m.x20/m.x21 + m.x32 == 0)
m.c7 = Constraint(expr=-272.04/m.x21 + m.x30 == 0)
m.c8 = Constraint(expr=-316.48/m.x21 + m.x31 == 0)
m.c9 = Constraint(expr=-log(m.x22)*m.x32/(1 - m.x32) + m.x33 == 0)
m.c10 = Constraint(expr= - 0.4605*m.x33 + m.x34 == 0.4835)
m.c11 = Constraint(expr=-(m.x33 - (1 + m.x32)*m.x34)/(3 + m.x32)/(1 - m.x32)**2/m.x34 + m.x35 == 0)
m.c12 = Constraint(expr=-exp(-(1 + m.x35*(3 + m.x30)*(1 - m.x30)**3 - m.x30**2)*m.x34/m.x30) + m.x28 == 0)
m.c13 = Constraint(expr=-exp(-(1 + m.x35*(3 + m.x31)*(1 - m.x31)**3 - m.x31**2)*m.x34/m.x31) + m.x29 == 0)
m.c14 = Constraint(expr=-m.x28*m.x22 + m.x26 == 0)
m.c15 = Constraint(expr=-m.x29*m.x22 + m.x27 == 0)
m.c16 = Constraint(expr=-((1 - m.x30)/(1 - m.x32))**0.38*m.x24 + m.x25 == 0)
m.c17 = Constraint(expr= m.i1 - m.i3 - 2*m.i4 + m.i5 + m.i6 + m.i7 + m.i8 + m.i9 + m.i10 + m.i13 - m.i16 + m.i17 == 2)
m.c18 = Constraint(expr= m.i1 + m.i2 + m.i3 + m.i4 + m.i5 + m.i6 + m.i7 + m.i8 + m.i9 + m.i10 + m.i11 + m.i12 + m.i13
+ m.i14 + m.i15 + m.i16 + m.i17 + m.i18 + m.i19 >= 2)
|
python
|
from org.mowl.Parsers import TaxonomyWithRelsParser as Parser
from org.semanticweb.owlapi.model import OWLOntology
from mowl.graph.edge import Edge
import sys
from mowl.graph.graph import GraphGenModel
class TaxonomyWithRelsParser(GraphGenModel):
r'''
This class will project the ontology considering the following axioms:
* :math:`A \sqsubseteq B` will generate the triple :math:`\langle A, subClassOf, B \rangle`
* :math:`A \sqsubseteq \exists R. B` will generate the triple :math:`\left\langle A, R, B \right\rangle`
:param ontology: The ontology to be processed.
:param bidirectional_taxonomy: If true then per each SubClass edge one SuperClass edge will be generated.
'''
def __init__(self, ontology: OWLOntology, bidirectional_taxonomy: bool = False):
super().__init__(ontology)
self.parser = Parser(ontology, bidirectional_taxonomy)
def parse(self):
edges = self.parser.parse()
edges = [Edge(str(e.src()), str(e.rel()), str(e.dst())) for e in edges]
return edges
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from de_expl import de_expl
from es_expl import es_expl
from gwo_expl import gwo_expl
from hho_expl import hho_expl
from mfo_expl import mfo_expl
from pso_expl import pso_expl
from p5_base import plot_progress, plot_objs
fevals = 10000
#de_x, de_y, de_hist, de_res, de_npop = de_expl(fevals)
#es_x, es_y, es_hist, es_res, es_lambda_ = es_expl(fevals)
#gwo_x, gwo_y, gwo_hist, gwo_res, gwo_nwolves = gwo_expl(fevals)
#hho_x, hho_y, hho_hist, hho_res, hho_nhawks = hho_expl(fevals)
#mfo_x, mfo_y, mfo_hist, mfo_res, mfo_nmoths = mfo_expl(fevals)
#pso_x, pso_y, pso_hist, pso_res, pso_npar = pso_expl(fevals)
fs = [de_expl, es_expl, gwo_expl, hho_expl, mfo_expl, pso_expl]
all_names = ["DE", "ES", "GWO", "HHO", "MFO", "PSO"]
places = [0,1,2,3,4,5] #which methods to include
es = [fs[i](fevals) for i in places]
names = [all_names[i] for i in places]
fig, ax = plt.subplots(3, 2, sharey = True, figsize = (6, 7))
ax = ax.flatten()
for i in range(6):
handles = plot_progress(es[i][3]["fitness"], es[i][4], ax = ax[i], legend = False,
m = 1.3)
if i < 4:
ax[i].set_xlabel("")
if i % 2 == 1:
ax[i].set_ylabel("")
ax[i].set_title(all_names[i])
lbls = ["gen. ave.", r"gen. 1-$\sigma$", "gen. max", "gen. min"]
fig.legend(handles, lbls, loc="upper center", ncol = 4)#, bbox_to_anchor=(2,0), loc = "lower right")
plt.subplots_adjust(top = 0.9, bottom = .09, left=0.1, right=.97, wspace=0.07, hspace=0.35)
#plt.tight_layout()
plt.show()
|
python
|
# Create a mapping of state to abbreviation
states = {
'Oregon': 'OR',
'Florida': 'FL',
'California': 'CA',
'New York': 'NY',
'Michigan': 'MI',
}
# Careate a basic set of states and some cities in them
cities = {
'CA': 'San Farncisco',
'MI': 'Detroit',
'FL': 'JAcksonsville',
}
# Add some more cities
cities['NY'] = 'New York' ## ADDING DATA TO CITIES DICT
cities['OR'] = 'Portland' ## ADDING DATA TO CITIES DICT
# Print out some cities
print('-' * 10)
print("NY State has: ", cities['NY'])
print("OR State has: ", cities['OR'])
# Print some states
print('-' * 10)
print("Michigane abbreviation is: ", states['Michigan'])
print("Florida abbreviation is: ", states['Florida'])
# Do it by using the state then cities dict
print('-' * 10)
print("Michigan has: ", cities[states['Michigan']]) ## THE CITIES FROM THE STATES DICT
print("Florida has: ", cities[states['Florida']])
# Print every state abbreviation
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state} is abbreviated {abbrev}")
# Print every city in state
print('-' * 10)
for abbrev, city in list(cities.items()):
print(f"{abbrev} has the city {city}")
# Now do the both at the same time
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state} is abbreviated {abbrev}")
print(f"and has city {cities[abbrev]}")
print('-' * 10)
# Safly get a abbreviation by state that might not be there
state = states.get('Texas')
if not state:
print("Sorry no Texas.")
# Get a city with a default value
city = cities.get('TX', 'Does Not Exist')
print(f"The city for the state 'TX' is: {city}")
## END
|
python
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from modules.execution.models import *
class Project(models.Model):
PROJECT_TYPES = (
("internal", "internal"),
("public", "public")
)
name = models.CharField(max_length=100, unique=True)
project_type = models.CharField(choices=PROJECT_TYPES, default=PROJECT_TYPES[0][0], max_length=100)
create_time = models.DateTimeField(default=timezone.now)
update_time = models.DateTimeField(default=timezone.now)
# owner = models.ManyToManyField(User, blank=True)
def __str__(self):
return self.name
class Script(models.Model):
name = models.CharField(max_length=100)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
version = models.CharField(max_length=100, blank=True)
status = models.CharField(max_length=100, blank=True)
author = models.CharField(max_length=100, blank=True)
# author = models.ForeignKey(User)
maintainer = models.CharField(max_length=100, blank=True)
# maintainer = models.ForeignKey(User)
file_created = models.DateField(blank=True, null=True)
file_updated = models.DateField(blank=True, null=True)
tag = models.CharField(max_length=100, blank=True)
create_time = models.DateTimeField(default=timezone.now)
update_time = models.DateTimeField(default=timezone.now)
class ScriptFunction(models.Model):
name = models.CharField(max_length=100)
script = models.ForeignKey(Script, on_delete=models.CASCADE)
create_time = models.DateTimeField(default=timezone.now)
update_time = models.DateTimeField(default=timezone.now)
class Meta:
db_table = "projects_script_function"
class AutomatedCase(models.Model):
name = models.CharField(max_length=100)
script_function = models.ForeignKey(ScriptFunction, on_delete=models.CASCADE)
create_time = models.DateTimeField(default=timezone.now)
update_time = models.DateTimeField(default=timezone.now)
class Meta:
db_table = "projects_automated_case"
class TestSuite(models.Model):
SUITE_TYPES = (
("Debug", "Debug"),
("BVT", "BVT"),
("Regression", "Regression")
)
name = models.CharField(max_length=100)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
script = models.ManyToManyField(Script)
suite_type = models.CharField(choices=SUITE_TYPES, default=SUITE_TYPES[0][0], max_length=100)
# cannot use limit_choise_to for referring to model forienkey field like limit_choices_to={'project': project}
# https://stackoverflow.com/questions/31578559/django-foreignkey-limit-choices-to-a-different-foreignkey-id
# script = models.ManyToManyField(Script, limit_choices_to={'project': 2})
create_time = models.DateTimeField(default=timezone.now)
update_time = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
class Meta:
db_table = "projects_test_suite"
unique_together = ("name", "project")
class TestRound(models.Model):
STATUS_TYPES = (
("Waiting", "Waiting"),
("Running", "Running"),
("Completed", "Completed")
)
RESULT_TYPES = (
("NotRun", "NotRun"),
("Pass", "Pass"),
("Fail", "Fail"),
("Warning", "Warning")
)
name = models.CharField(max_length=100)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
status_type = models.CharField(choices=STATUS_TYPES, default=STATUS_TYPES[0][0], max_length=100)
result_type = models.CharField(choices=RESULT_TYPES, default=RESULT_TYPES[0][0], max_length=100)
pass_count = models.IntegerField(default=0)
fail_count = models.IntegerField(default=0)
warning_count = models.IntegerField(default=0)
not_run_count = models.IntegerField(default=0)
test_suite = models.ForeignKey(TestSuite, on_delete=models.SET_NULL, null=True)
test_environment = models.ForeignKey(TestEnvironment, on_delete=models.SET_NULL, null=True)
browser = models.ForeignKey(Browser, on_delete=models.SET_NULL, blank=True, null=True)
device = models.ForeignKey(Device, on_delete=models.SET_NULL, blank=True, null=True)
mobile_os = models.ForeignKey(MobileOS, on_delete=models.SET_NULL, blank=True, null=True)
platform_os = models.ForeignKey(PlatformOS, on_delete=models.SET_NULL, blank=True, null=True)
creator = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
start_time = models.DateTimeField(default=timezone.now)
end_time = models.DateTimeField(blank=True, null=True)
create_time = models.DateTimeField(default=timezone.now)
update_time = models.DateTimeField(default=timezone.now)
extra_info = models.CharField(max_length=1000, blank=True, null=True)
class Meta:
db_table = "projects_test_round"
|
python
|
from datasets.parties.all import df_regions_votes_and_parties
from datasets.regions.province import df_community_and_provice
|
python
|
# Link - https://www.hackerrank.com/challenges/designer-door-mat/problem
inp = input().split()
N = int(inp[0])
M = int(inp[1])
for i in range(1, N, 2):
print((i * ".|.").center(M, "-"))
print("WELCOME".center(M,"-"))
for i in range(N-2, -1, -2):
print((i * ".|.").center(M, "-"))
|
python
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import inspect
from typing import Dict
import pandas as pd
from apache_beam.dataframe import expressions
class DeferredFrame(object):
_pandas_type_map = {} # type: Dict[type, type]
def __init__(self, expr):
self._expr = expr
@classmethod
def _register_for(cls, pandas_type):
def wrapper(deferred_type):
cls._pandas_type_map[pandas_type] = deferred_type
return deferred_type
return wrapper
@classmethod
def wrap(cls, expr):
return cls._pandas_type_map[type(expr.proxy())](expr)
def _elementwise(self, func, name=None, other_args=(), inplace=False):
return _elementwise_function(func, name, inplace=inplace)(self, *other_args)
def name_and_func(method):
if isinstance(method, str):
return method, lambda df, *args, **kwargs: getattr(df, method)(*args, **
kwargs)
else:
return method.__name__, method
def _elementwise_method(func, name=None, restrictions=None, inplace=False):
if name is None:
name, func = name_and_func(func)
if restrictions is None:
restrictions = {}
return _elementwise_function(func, name, restrictions)
def _elementwise_function(func, name=None, restrictions=None, inplace=False):
if name is None:
name = func.__name__
if restrictions is None:
restrictions = {}
def wrapper(*args, **kwargs):
for key, values in restrictions.items():
if key in kwargs:
value = kwargs[key]
else:
try:
# pylint: disable=deprecated-method
ix = inspect.getargspec(func).args.index(key)
except ValueError:
# TODO: fix for delegation?
continue
if len(args) <= ix:
continue
value = args[ix]
if not isinstance(values, list):
values = [values]
if value not in values:
raise NotImplementedError(
'%s=%s not supported for %s' % (key, value, name))
deferred_arg_indices = []
deferred_arg_exprs = []
constant_args = [None] * len(args)
for ix, arg in enumerate(args):
if isinstance(arg, DeferredFrame):
deferred_arg_indices.append(ix)
deferred_arg_exprs.append(arg._expr)
elif isinstance(arg, pd.core.generic.NDFrame):
deferred_arg_indices.append(ix)
deferred_arg_exprs.append(expressions.ConstantExpression(arg, arg[0:0]))
else:
constant_args[ix] = arg
if inplace:
actual_func = copy_and_mutate(func)
else:
actual_func = func
def apply(*actual_args):
full_args = list(constant_args)
for ix, arg in zip(deferred_arg_indices, actual_args):
full_args[ix] = arg
return actual_func(*full_args, **kwargs)
result_expr = expressions.elementwise_expression(
name, apply, deferred_arg_exprs)
if inplace:
args[0]._expr = result_expr
return args[0]
else:
return DeferredFrame.wrap(result_expr)
return wrapper
def copy_and_mutate(func):
def wrapper(self, *args, **kwargs):
copy = self.copy()
func(copy, *args, **kwargs)
return copy
return wrapper
class WontImplementError(NotImplementedError):
"""An subclass of NotImplementedError to raise indicating that implementing
the given method is infeasible.
Raising this error will also prevent this doctests from being validated
when run with the beam dataframe validation doctest runner.
"""
pass
|
python
|
import time
import uuid
import six
from mazepa.queue import Queue
from mazepa.job import Job, AllJobsIndicator
class Scheduler:
def __init__(self, queue_name=None, completion_queue_name=None,
queue_region=None, threads=1):
self.queue = Queue(queue_name=queue_name,
completion_queue_name=completion_queue_name,
queue_region=queue_region,
threads=threads)
self.unfinished_jobs = {}
self.finished_jobs = {}
def all_jobs_finished(self):
return bool(self.unfinished_jobs)
def submit_ready_jobs(self):
jobs_ready = self.queue.get_completed()
if jobs_ready is not None:
self.submit_jobs(jobs_ready)
def execute_until_completion(self, sleep_gap_sec=15):
jobs_spec = AllJobsIndicator()
self.submit_jobs(jobs_spec)
while True:
self.submit_ready_jobs()
if not self.unfinished_jobs:
break
time.sleep(sleep_gap_sec)
def submit_jobs(self, jobs_spec):
tasks = []
jobs_just_finished = []
for job_name, job in six.iteritems(self.unfinished_jobs):
this_job_tasks = []
# if this job is flagged for execution
if isinstance(jobs_spec, AllJobsIndicator) or \
job_name in jobs_spec:
if isinstance(job, Job):
this_job_tasks = job.get_next_task_batch()
print ("Got {} tasks from job '{}'".format(len(this_job_tasks),
job_name))
if this_job_tasks == []:
jobs_just_finished.append(job_name)
else:
try:
this_job_tasks = job()
except StopIteration:
print ("Job '{}' is done!")
jobs_just_finished.append(job_name)
for t in this_job_tasks:
t.job_name = job_name
tasks.extend(this_job_tasks)
# Move finished jobs to finished dict
for job_name in jobs_just_finished:
print ("Flagging job as FINISHED: '{}'".format(job_name))
self.finished_jobs[job_name] = self.unfinished_jobs[job_name]
del self.unfinished_jobs[job_name]
if len(tasks) > 0:
print ("Scheduling {} tasks..".format(len(tasks)))
self.queue.submit_mazepa_tasks(tasks)
else:
print ("No tasks to submit!")
def register_job(self, job, job_name=None):
'''
job must be either an istance of type mazepa.Job
or a simple generator
'''
if not isinstance(job, Job):
if not inspect.isgeneratorfunction(job) and \
not inspect.isgenerator(job):
raise Exception("Registering job that is \
neither Mazepa job nor a generator is\
not supported. Submitted job type: {}".format(
type(job)
))
if job_name is None:
job_name = uuid.uuid1()
while job_name in self.unfinished_jobs:
job_name = uuid.uuid1()
else:
if job_name in self.unfinished_jobs:
raise Exception('Unable to register task "{}": \
task with name "{}" is already registerd'.format(job_name, job_name))
self.unfinished_jobs[job_name] = job
return job_name
def unregister_job(self, job_name):
del self.unfinished_jobs[job_name]
|
python
|
"""Main module which implements the components of graph-based autoconstraint model.
"""
import torch
from sketchgraphs.pipeline.graph_model import target, scopes_from_offsets
from sketchgraphs_models.graph.model import EdgePartnerNetwork, numerical_features, message_passing
from sketchgraphs_models import nn as sg_nn
class GlobalEmbeddingModelCore(torch.nn.Module):
def __init__(self, embedding_dim, feature_dims, depth=3):
super(GlobalEmbeddingModelCore, self).__init__()
self.embedding_dim = embedding_dim
self.node_embedding = message_passing.DenseSparsePreEmbedding(
target.TargetType, {
k.name: torch.nn.Sequential(
numerical_features.NumericalFeatureEncoding(fd.values(), embedding_dim),
numerical_features.NumericalFeaturesEmbedding(embedding_dim)
)
for k, fd in feature_dims.items()
},
len(target.NODE_TYPES), embedding_dim)
self.global_entity_embedding = torch.nn.GRU(
input_size=embedding_dim,
hidden_size=embedding_dim,
num_layers=3)
self.edge_embedding = torch.nn.Embedding(len(target.EDGE_TYPES), embedding_dim)
self.message_passing = sg_nn.MessagePassingNetwork(
depth, torch.nn.GRUCell(embedding_dim, embedding_dim),
sg_nn.ConcatenateLinear(embedding_dim, embedding_dim, embedding_dim))
self.graph_post_embedding = message_passing.GraphPostEmbedding(embedding_dim)
self.merge_global_embedding = sg_nn.ConcatenateLinear(embedding_dim, 2 * embedding_dim, embedding_dim)
def forward(self, data):
graph = data['graph']
with torch.autograd.profiler.record_function('entity_embeddings'):
node_pre_embedding = self.node_embedding(data['node_features'].data, data['sparse_node_features'])
with torch.autograd.profiler.record_function('global_embedding'):
node_pre_embedding_packed = torch.nn.utils.rnn.PackedSequence(
node_pre_embedding, data['node_features'].batch_sizes)
output, _ = self.global_entity_embedding(node_pre_embedding_packed)
global_embedding = output.data.index_select(0, data['last_graph_node_index'])
with torch.autograd.profiler.record_function('message_passing'):
node_pre_embedding_graph = node_pre_embedding.index_select(0, data['node_features_graph_index'])
edge_pre_embedding = self.edge_embedding(graph.edge_features)
node_post_embedding = self.message_passing(node_pre_embedding_graph, graph.incidence, (edge_pre_embedding,))
graph_post_embedding = self.graph_post_embedding(node_post_embedding, graph)
merged_global_embedding = self.merge_global_embedding(global_embedding, graph_post_embedding)
return node_post_embedding, merged_global_embedding
class RecurrentEmbeddingModelCore(torch.nn.Module):
def __init__(self, embedding_dim, feature_dims, depth=3):
super(RecurrentEmbeddingModelCore, self).__init__()
self.embedding_dim = embedding_dim
self.node_embedding = message_passing.DenseSparsePreEmbedding(
target.TargetType, {
k.name: torch.nn.Sequential(
numerical_features.NumericalFeatureEncoding(fd.values(), embedding_dim),
numerical_features.NumericalFeaturesEmbedding(embedding_dim)
)
for k, fd in feature_dims.items()
},
len(target.NODE_TYPES), embedding_dim)
self.global_entity_embedding = torch.nn.GRU(
input_size=embedding_dim,
hidden_size=embedding_dim,
num_layers=3)
self.edge_embedding = torch.nn.Embedding(len(target.EDGE_TYPES), embedding_dim)
self.message_passing = sg_nn.MessagePassingNetwork(
depth, torch.nn.GRUCell(embedding_dim, embedding_dim),
sg_nn.ConcatenateLinear(embedding_dim, embedding_dim, embedding_dim))
self.graph_post_embedding = message_passing.GraphPostEmbedding(embedding_dim)
self.merge_global_embedding = sg_nn.ConcatenateLinear(embedding_dim, 2 * embedding_dim, embedding_dim)
def forward(self, data):
graph = data['graph']
with torch.autograd.profiler.record_function('entity_embeddings'):
node_pre_embedding = self.node_embedding(data['node_features'].data, data['sparse_node_features'])
with torch.autograd.profiler.record_function('global_embedding'):
node_pre_embedding_packed = torch.nn.utils.rnn.PackedSequence(
node_pre_embedding, data['node_features'].batch_sizes)
node_pre_embedding_transformed, state = self.global_entity_embedding(node_pre_embedding_packed)
global_embedding = state[-1]
with torch.autograd.profiler.record_function('message_passing'):
node_pre_embedding_graph = node_pre_embedding_transformed.data.index_select(0, data['node_features_graph_index'])
edge_pre_embedding = self.edge_embedding(graph.edge_features)
node_post_embedding = self.message_passing(node_pre_embedding_graph, graph.incidence, (edge_pre_embedding,))
graph_post_embedding = self.graph_post_embedding(node_post_embedding, graph)
merged_global_embedding = self.merge_global_embedding(global_embedding, graph_post_embedding)
return node_post_embedding, merged_global_embedding
class BidirectionalRecurrentModelCore(torch.nn.Module):
def __init__(self, embedding_dim, feature_dims, depth=3):
super(BidirectionalRecurrentModelCore, self).__init__()
self.embedding_dim = embedding_dim
self.node_embedding = message_passing.DenseSparsePreEmbedding(
target.TargetType, {
k.name: torch.nn.Sequential(
numerical_features.NumericalFeatureEncoding(fd.values(), embedding_dim),
numerical_features.NumericalFeaturesEmbedding(embedding_dim)
)
for k, fd in feature_dims.items()
},
len(target.NODE_TYPES), embedding_dim)
self.node_pre_embedding_transform = torch.nn.GRU(
input_size=embedding_dim,
hidden_size=embedding_dim,
num_layers=3,
bidirectional=True)
self.node_pre_embedding_merge_direction = torch.nn.Linear(2 * embedding_dim, embedding_dim)
self.edge_embedding = torch.nn.Embedding(len(target.EDGE_TYPES), embedding_dim)
self.message_passing = sg_nn.MessagePassingNetwork(
depth, torch.nn.GRUCell(embedding_dim, embedding_dim),
sg_nn.ConcatenateLinear(embedding_dim, embedding_dim, embedding_dim))
self.graph_post_embedding = message_passing.GraphPostEmbedding(embedding_dim)
self.merge_global_embedding = sg_nn.ConcatenateLinear(2 * embedding_dim, 2 * embedding_dim, embedding_dim)
def forward(self, data):
graph = data['graph']
with torch.autograd.profiler.record_function('entity_embeddings'):
node_pre_embedding = self.node_embedding(data['node_features'].data, data['sparse_node_features'])
with torch.autograd.profiler.record_function('global_embedding'):
node_pre_embedding_packed = torch.nn.utils.rnn.PackedSequence(
node_pre_embedding, data['node_features'].batch_sizes)
node_pre_embedding_transformed, state = self.node_pre_embedding_transform(node_pre_embedding_packed)
global_embedding = torch.flatten(torch.transpose(
state.view(3, 2, -1, self.embedding_dim)[-1], 0, 1),
start_dim=1)
with torch.autograd.profiler.record_function('message_passing'):
node_pre_embedding_graph_bidir = node_pre_embedding_transformed.data.index_select(0, data['node_features_graph_index'])
node_pre_embedding_graph = self.node_pre_embedding_merge_direction(node_pre_embedding_graph_bidir)
edge_pre_embedding = self.edge_embedding(graph.edge_features)
node_post_embedding = self.message_passing(node_pre_embedding_graph, graph.incidence, (edge_pre_embedding,))
graph_post_embedding = self.graph_post_embedding(node_post_embedding, graph)
merged_global_embedding = self.merge_global_embedding(global_embedding, graph_post_embedding)
return node_post_embedding, merged_global_embedding
MODEL_CORES = {
'global_embedding': GlobalEmbeddingModelCore,
'recurrent_embedding': RecurrentEmbeddingModelCore,
'bidirectional_recurrent': BidirectionalRecurrentModelCore
}
class AutoconstraintModel(torch.nn.Module):
def __init__(self, model_core):
super(AutoconstraintModel, self).__init__()
self.model_core = model_core
embedding_dim = model_core.embedding_dim
self.edge_partner_network = EdgePartnerNetwork(
torch.nn.Sequential(
torch.nn.Linear(3 * embedding_dim, embedding_dim),
torch.nn.ReLU(),
torch.nn.Linear(embedding_dim, 1)))
self.edge_label = torch.nn.Sequential(
torch.nn.Linear(3 * embedding_dim, embedding_dim),
torch.nn.ReLU(),
torch.nn.Linear(embedding_dim, embedding_dim),
torch.nn.ReLU(),
torch.nn.Linear(embedding_dim, len(target.EDGE_TYPES_PREDICTED)))
def _compute_label_logits_partner(self, graph, node_post_embedding, global_embedding, partner_index):
node_current_post_embedding_label = node_post_embedding.index_select(
0, graph.node_offsets[1:][partner_index.index] - 1)
node_partner_post_embedding_label = node_post_embedding.index_select(0, partner_index.values)
merged_global_embedding_label = global_embedding.index_select(0, partner_index.index)
edge_label_input = torch.cat(
(node_current_post_embedding_label, node_partner_post_embedding_label, merged_global_embedding_label), dim=-1)
return self.edge_label(edge_label_input)
def _compute_all_label_logits(self, graph, node_post_embedding, global_embedding):
node_current_post_embedding = (node_post_embedding
.index_select(0, graph.node_offsets[1:] - 1)
.repeat_interleave(graph.node_counts, 0))
global_embedding = global_embedding.repeat_interleave(graph.node_counts, 0)
edge_label_input = torch.cat(
(node_current_post_embedding, node_post_embedding, global_embedding), dim=-1)
return self.edge_label(edge_label_input)
def forward(self, data, compute_all_label_logits=False):
graph = data['graph']
node_post_embedding, merged_global_embedding = self.model_core(data)
with torch.autograd.profiler.record_function('edge_partner'):
edge_partner_logits = self.edge_partner_network(
node_post_embedding, merged_global_embedding, graph)
with torch.autograd.profiler.record_function('edge_label'):
if compute_all_label_logits:
edge_label_logits = self._compute_all_label_logits(
graph, node_post_embedding, merged_global_embedding)
else:
edge_label_logits = self._compute_label_logits_partner(
graph, node_post_embedding, merged_global_embedding, data['partner_index'])
return {
'edge_partner_logits': edge_partner_logits,
'edge_label_logits': edge_label_logits
}
def segment_stop_loss(partner_logits, segment_offsets, partner_index, stop_partner_index_index, reduction='sum'):
scopes = scopes_from_offsets(segment_offsets)
log_weight_other = sg_nn.functional.segment_logsumexp(partner_logits, scopes)
total_weight = torch.nn.functional.softplus(log_weight_other)
stop_loss = total_weight.index_select(0, stop_partner_index_index)
partner_loss = (total_weight.index_select(0, partner_index.index)
- partner_logits.index_select(0, partner_index.value))
if reduction == 'sum':
return stop_loss.sum(), partner_loss.sum()
elif reduction == 'mean':
return stop_loss.mean(), partner_loss.mean()
elif reduction == 'none':
return stop_loss, partner_loss
else:
raise ValueError('Reduction must be one of sum, mean or none.')
def segment_stop_accuracy(partner_logits, segment_offsets, target_idx, stop_partner_index_index):
"""Computes the accuracy for stop prediction for partner logits."""
scopes = scopes_from_offsets(segment_offsets)
max_logit_in_segment, max_logit_indices = sg_nn.functional.segment_argmax(
partner_logits.detach(), scopes)
prediction_stop_correct = (max_logit_in_segment < 0).index_select(0, stop_partner_index_index)
prediction_partner_correct = (
(max_logit_in_segment > 0).index_select(0, target_idx.indices) &
((max_logit_indices + segment_offsets[:-1]).index_select(0, target_idx.indices) == target_idx.values))
return prediction_stop_correct.float().mean(), prediction_partner_correct.float().mean()
def compute_losses(data, readout, reduction='sum'):
edge_stop_loss, partner_loss = segment_stop_loss(
readout['edge_partner_logits'], data['graph'].node_offsets,
data['partner_index'], data['stop_partner_index_index'],
reduction=reduction)
edge_stop_accuracy, edge_partner_accuracy = segment_stop_accuracy(
readout['edge_partner_logits'], data['graph'].node_offsets,
data['partner_index'], data['stop_partner_index_index'])
edge_label_loss = torch.nn.functional.cross_entropy(
readout['edge_label_logits'], data['edge_label'],
reduction=reduction)
if len(readout['edge_label_logits']) > 0:
edge_label_accuracy = (torch.argmax(readout['edge_label_logits'], dim=-1) == data['edge_label']).float().mean()
else:
edge_label_accuracy = readout['edge_label_logits'].new_empty([0])
return {
'edge_partner': partner_loss,
'edge_stop': edge_stop_loss,
'edge_label': edge_label_loss,
}, {
'edge_partner': edge_partner_accuracy,
'edge_stop': edge_stop_accuracy,
'edge_label': edge_label_accuracy,
}
def compute_average_losses(data, losses):
result = {}
result['edge_partner'] = losses['edge_partner'] / data['edge_label'].shape[0]
result['edge_label'] = losses['edge_label'] / data['edge_label'].shape[0]
result['edge_stop'] = losses['edge_stop'] / data['stop_partner_index_index'].shape[0]
return result
|
python
|
# Ensures the validity of blocks inside a blockchain
# Copyright (c) 2022 gparap
from Blockchain import Blockchain
class Validator:
def __init__(self, blockchain: Blockchain):
self.blockchain = blockchain
# the previous hash of the current block must match the hash of the previous block
def validate_blockchain(self):
i = 0
for block in self.blockchain.get_all_blocks():
# skip genesis block
if i == 0:
i += 1
continue
# get the previous block hash
prev_block = self.blockchain.get_block_at(i - 1)
prev_block_hash = prev_block.get_hash()
# check if the block's previous hash matches the hash of the previous block
if block.prev_hash != prev_block_hash:
print("blockchain is invalid...")
return
i += 1
print("blockchain is valid...")
pass
|
python
|
from pdb import set_trace as T
import numpy as np
from itertools import chain
from neural_mmo.forge.blade import core
from neural_mmo.forge.blade.lib import material
from random import randint
import os
numsent = 0
class Map:
'''Map object representing a list of tiles
Also tracks a sparse list of tile updates
'''
def __init__(self, config, realm):
self.config = config
sz = config.TERRAIN_SIZE
self.tiles = np.zeros((sz, sz), dtype=object)
for r in range(sz):
for c in range(sz):
self.tiles[r, c] = core.Tile(config, realm, r, c)
@property
def packet(self):
'''Packet of degenerate resource states'''
missingResources = []
for e in self.updateList:
missingResources.append((e.r.val, e.c.val, e.index.val))
return missingResources
@property
def repr(self):
'''Flat matrix of tile material indices'''
return [[t.mat.index for t in row] for row in self.tiles]
@property
def items(self):
global numsent
numsent += 1
item_types = self.tiles[0,0].items_dict.keys()
ret = {itm:[] for itm in item_types}
for itm in item_types:
temp = []
for row in self.tiles:
for t in row:
if t.mat == material.Grass and t.dirty:
temp.append((t.r.val, t.c.val, t.items_dict[itm].val))
ret[itm] = [i for i in temp]
# ret = {itm:[(t.r.val, t.c.val, t.items_dict[itm].val+2) for t in chain(*self.tiles) if t.mat.index == material.Grass] for itm in item_types}
# for t in chain(*self.tiles):
# t.dirty = False
return ret
def reset(self, realm, idx):
'''Reuse the current tile objects to load a new map'''
self.updateList = set()
materials = {mat.index: mat for mat in material.All}
fPath = os.path.join(self.config.PATH_MAPS,
self.config.PATH_MAP_SUFFIX.format(idx))
for r, row in enumerate(np.load(fPath)):
for c, idx in enumerate(row):
mat = materials[idx]
tile = self.tiles[r, c]
tile.reset(mat, self.config)
def step(self):
'''Evaluate updatable tiles'''
for e in self.updateList.copy():
if e.static:
self.updateList.remove(e)
e.step()
# for t in chain(*self.tiles):
# t.dirty = False
def harvest(self, r, c):
'''Called by actions that harvest a resource tile'''
self.updateList.add(self.tiles[r, c])
return self.tiles[r, c].harvest()
|
python
|
# encoding: cinje
: from .template import page
: from .letterscountsbar import letterscountsbar
: from .namelist import namelist
: def browsetemplate title, ctx, letterscountslist, names=None
: using page title, ctx, lang="en"
<div class="row">
: if letterscountslist is not None
: use letterscountsbar ctx, letterscountslist
: end
<div class="browselist">
: if names is not None
: flush
: yield from namelist(ctx, names)
: end
</div>
</div>
: end
: end
|
python
|
# [๊ทธ๋ฆฌ๋] ๊ฑฐ์ค๋ฆ๋
#========== input ===========
# n : ๊ฑฐ์ค๋ฆ๋ ๊ธ์ก
#========== output ==========
# result : ๊ธ์ก์ ํด๋น๋๋ ์ต์์ ๋์ ๊ฐ์
# ํ์ด : ๊ฐ์ฅ ํฐ ๋์ ๋ถํฐ, ๊ฑฐ์ค๋ฅผ ์ ์์ ๋งํผ ๊ฑฐ์ฌ๋ฌ ์ฃผ๊ธฐ
n = int(input())
result = 0
coin_list = [500, 100, 50, 10]
for coin in coin_list:
result += n//coin
n %= coin
print(result)
|
python
|
from collections import namedtuple
from copy import copy
from datetime import *
from geojson import Feature, Point, FeatureCollection, LineString
from geojson.mapping import to_mapping
from typing import List, Dict
import functools
import os
import struct
import pyall
from hyo2.mate.lib.scan import Scan, A_NONE, A_PARTIAL, A_FULL, A_FAIL, A_PASS
from hyo2.mate.lib.scan import ScanState, ScanResult
class ScanALL(Scan):
'''
A Scan object that contains check information on the contents of a Kongsberg .all file
:param file_path: The file path to the .all file
:type file_path: str
'''
def __init__(self, file_path):
Scan.__init__(self, file_path)
self.reader = open(self.file_path, 'rb')
self.all_reader = pyall.ALLReader(self.file_path)
def get_size_n_pings(self, pings):
'''
return bytes in the file which contain specified
number of pings
'''
c_bytes = 0
result = {}
self.all_reader.rewind()
while self.all_reader.readDatagramHeader():
# read datagram header
header = self.all_reader.readDatagramHeader()
num_bytes, stx, dg_type, em_model, record_date, record_time, \
counter, serial_number = header
dg_type, datagram = self.all_reader.readDatagram()
c_bytes += num_bytes
if dg_type in ['D', 'X', 'F', 'f', 'N', 'S', 'Y']:
if dg_type not in result.keys():
result[dg_type] = {
'seqNo': counter,
'count': 0,
}
if counter > result[dg_type]['seqNo']:
result[dg_type]['count'] += 1
if result[dg_type]['count'] > pings:
break
result[dg_type]['seqNo'] = counter
else:
return None
return c_bytes
def scan_datagram(self, progress_callback=None):
'''scan data to extract basic information for each type of datagram'''
# summary type information stored in plain dict
self.scan_result = {}
# datagram objects
self.datagrams = {}
while self.all_reader.moreData():
# update progress
self.progress = 1.0 - (self.all_reader.moreData() / self.file_size)
if progress_callback is not None:
progress_callback(self.progress)
# read datagram header
header = self.all_reader.readDatagramHeader()
num_bytes, stx, dg_type, em_model, record_date, record_time, \
counter, serial_number = header
time_stamp = pyall.to_DateTime(record_date, record_time)
dg_type, datagram = self.all_reader.readDatagram()
if dg_type not in self.scan_result.keys():
self.scan_result[dg_type] = copy(self.default_info)
self.scan_result[dg_type]['_seqNo'] = None
# save datagram info
self.scan_result[dg_type]['byteCount'] += num_bytes
self.scan_result[dg_type]['recordCount'] += 1
if self.scan_result[dg_type]['startTime'] is None:
self.scan_result[dg_type]['startTime'] = time_stamp
self.scan_result[dg_type]['stopTime'] = time_stamp
if dg_type == 'I':
# Instrument parameters
# we care about this datagram so read its contents
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'h':
# height
if 'h' not in self.datagrams:
# only read the first h datagram, this is all we need
# to check the height type (assuming all h datagrams)
# share the same type
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'R':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'A':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'n':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'P':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'G':
# problem
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'U':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'D':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'X':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'F':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'f':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'N':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'S':
datagram.read()
self._push_datagram(dg_type, datagram)
elif dg_type == 'Y':
datagram.read()
self._push_datagram(dg_type, datagram)
if dg_type in ['D', 'X', 'F', 'f', 'N', 'S', 'Y']:
this_count = counter
last_count = self.scan_result[dg_type]['_seqNo']
if last_count is None:
last_count = this_count
if this_count - last_count >= 1:
self.scan_result[dg_type]['missedPings'] += \
this_count - last_count - 1
self.scan_result[dg_type]['pingCount'] += 1
self.scan_result[dg_type]['_seqNo'] = this_count
return
def get_installation_parameters(self):
'''
Gets the decoded contents of the I datagram (installation parameters)
as a Python dict. If multiple I datagrams are included only decoded
parameters from the first will be included.
Will return None if datagram not present in *.all file
'''
if 'I' not in self.datagrams:
return None
installationParametersDatagrams = self.datagrams['I']
for installationParametersDatagram in installationParametersDatagrams:
# skip datagrams with no params
if len(installationParametersDatagram.installationParameters) == 0:
continue
return installationParametersDatagram.installationParameters
return None
def get_datagram_format_version(self):
'''
gets the version of the datagram format used by this file
'''
rec = self.get_installation_parameters()
dsv = None
if rec is not None and 'DSV' in rec.keys():
dsv = rec['DSV']
return dsv
def get_active_sensors(self):
installationParameters = self.get_installation_parameters()
activeSensors = {'Position': installationParameters['APS'],
'RollPitch': installationParameters['ARO'],
'Heave': installationParameters['AHE'],
'Heading': installationParameters['AHS'],
'Attvel': installationParameters['VSN']}
return activeSensors
def filename_changed(self) -> ScanResult:
'''
Check if the filename is different from what recorded
in the datagram. Requires I - Installation Parameters datagram
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
if 'I' not in self.datagrams:
# then there's no way to check, so fail test
return ScanResult(
state=ScanState.FAIL,
messages=[
"'I' datagram not found, cannot extract original filename"]
)
base_fn = os.path.basename(self.file_path)
found_filenames = []
installationParametersDatagrams = self.datagrams['I']
for installationParametersDatagram in installationParametersDatagrams:
if 'RFN' not in installationParametersDatagram.installationParameters:
# then filename hasn't been included
continue
rfn = installationParametersDatagram.installationParameters['RFN']
found_filenames.append(rfn)
matched = base_fn == rfn
if matched:
return ScanResult(state=ScanState.PASS)
msg = (
"Filename {} did not match any filenames specified within the "
"installation parameters (I) datagram. The following were found; "
"{}".format(base_fn, ', '.join(found_filenames))
)
# then none of the installation parameter datagrams included the
# filename that this data has been loaded from.
return ScanResult(
state=ScanState.FAIL,
messages=[msg],
data={'found_filenames': found_filenames}
)
def date_match(self) -> ScanResult:
'''
Compare the date as in the I datagram and the date as written
in the filename
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
dt = 'unknown'
if 'I' not in self.datagrams:
# then there's no way to check, so fail test
return ScanResult(
state=ScanState.FAIL,
messages=[
"'I' datagram not found, cannot extract startTime"]
)
base_fn = os.path.basename(self.file_path)
installationParametersDatagrams = self.datagrams['I']
# assume we just use the first one we find
rec = installationParametersDatagrams[0]
found = str(rec.RecordDate) in base_fn
if found:
return ScanResult(state=ScanState.PASS)
else:
msg = (
"Could not find record date {} in filename"
.format(rec.RecordDate)
)
return ScanResult(
state=ScanState.FAIL,
messages=[msg]
)
def bathymetry_availability(self) -> ScanResult:
'''
Checks the contents of a Kongsberg .all file for all required datagrams when bathymetry processing
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
# define a named tuple for bathy datagrams. Provides a means to define
# then reference this info.
Datagram = namedtuple(
'Datagram',
'id critical error_message alternatives'
)
bathy_datagrams = [
Datagram(
'I',
False,
"Warning: installation parameters are missing please ensure that you have your lever arms and vessel frame parameters collected elsewhere.",
[]
),
Datagram(
'R',
False,
"Warning: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'A',
True,
"Critical: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'n',
False,
"Warning: your network attitude and velocity is not being logged. If you intend working in deeper water with a frequency modulated chirp you will need to interface this data.",
[]
),
Datagram(
'P',
True,
"Critical: position data missing, you will not be able to process this data without ensuring this data is being collected.",
[]
),
Datagram(
'G',
False,
"Warning: surface sound velocity data is missing, ensure your sensor is working or collect as many profiles as possible to attempt to compensate.",
[]
),
Datagram(
'U',
False,
"Warning: no sound velocity profile data has been collected in the raw file, please ensure you are collecting this data elsewhere.",
[]
),
Datagram(
'D',
False,
"Warning: neither datagram 'D' or 'X' were found, processed depth information is missing.",
['X']
),
Datagram(
'F',
True,
"Critical: neither datagram 'F', 'f' or 'N' were found. Critical range and angle data missing, you are not collecting the data required for post processing. If you are collecting processed depths it is possible to back process however it is not desirable and is a complex process.",
['f', 'N']
)
]
return self.__result_from_datagram_presence(bathy_datagrams)
def backscatter_availability(self) -> ScanResult:
'''
Checks the contents of a Kongsberg .all file for all required datagrams when backscatter processing
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
# define a named tuple for datagrams. Provides a means to define
# then reference this info.
Datagram = namedtuple(
'Datagram',
'id critical error_message alternatives'
)
bs_datagrams = [
Datagram(
'S',
True,
"Critical: backscatter information is missing ('S' or 'Y' datagram). You will not be able to process backscatter without seabed image data. If you intend processing backscatter check your setup.",
['Y']
)
]
return self.__result_from_datagram_presence(bs_datagrams)
def ray_tracing_availability(self) -> ScanResult:
'''
Checks the contents of a Kongsberg .all file for all required datagrams when recalculating ray tracing
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
# define a named tuple for datagrams. Provides a means to define
# then reference this info.
Datagram = namedtuple(
'Datagram',
'id critical error_message alternatives'
)
rt_datagrams = [
Datagram(
'I',
False,
"Warning: installation parameters are missing please ensure that you have your lever arms and vessel frame parameters collected elsewhere.",
[]
),
Datagram(
'R',
False,
"Warning: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'A',
True,
"Critical: runtime parameters are missing these are critical for backscatter processing streams. If just collecting bathymetry, please consider other users of this data.",
[]
),
Datagram(
'n',
False,
"Warning: your network attitude and velocity is not being logged. If you intend working in deeper water with a frequency modulated chirp you will need to interface this data.",
[]
),
Datagram(
'P',
True,
"Critical: position data missing, you will not be able to process this data without ensuring this data is being collected.",
[]
),
Datagram(
'G',
False,
"Warning: surface sound velocity data is missing, ensure your sensor is working or collect as many profiles as possible to attempt to compensate.",
[]
),
Datagram(
'U',
False,
"Warning: no sound velocity profile data has been collected in the raw file, please ensure you are collecting this data elsewhere.",
[]
),
Datagram(
'F',
True,
"Critical: neither datagram 'F', 'f' or 'N' were found. Critical range and angle data missing, you are not collecting the data required for post processing. If you are collecting processed depths it is possible to back process however it is not desirable and is a complex process.",
['f', 'N']
)
]
return self.__result_from_datagram_presence(rt_datagrams)
def __result_from_datagram_presence(self, required_datagrams):
'''
Util function, checks the datagrams that have been read during the
scan against the `required_datagrams` list. Critical vs Warning, and
associated messages are extracted from the attributes of the datagram
tuples in `required_datagrams`
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
# tl;dr this cuts down on amount of code that was duplicated across
# a number of check functions
present_datagrams = self.datagrams.keys()
all_critical = True
all_noncritical = True
missing_critical = []
missing_noncritical = []
present = []
messages = []
for required_datagram in required_datagrams:
# the bathy datagram was not read from file
not_found = required_datagram.id not in present_datagrams
if not_found:
# it may be that one of the alternative datagrams exist, so
# loop through these to see if they exist
found_in_alts = functools.reduce(
lambda a,b : (b in present_datagrams) or a, required_datagram.alternatives,
False)
not_found = not found_in_alts
if not_found and required_datagram.critical:
all_critical = False
missing_critical.append(required_datagram.id)
messages.append(required_datagram.error_message)
elif not_found and not required_datagram.critical:
all_noncritical = False
missing_noncritical.append(required_datagram.id)
messages.append(required_datagram.error_message)
else:
present.append(required_datagram.id)
# include a lists of missing datagrams in result object
data = {
'missing_critical': missing_critical,
'missing_noncritical': missing_noncritical,
'present': present
}
if not all_critical:
return ScanResult(
state=ScanState.FAIL,
messages=messages,
data=data
)
elif not all_noncritical:
return ScanResult(
state=ScanState.WARNING,
messages=messages,
data=data
)
else:
return ScanResult(
state=ScanState.PASS,
messages=messages,
data=data
)
def is_missing_pings_tolerable(self, thresh=1.0):
'''
check for the number of missing pings in all multibeam
data datagrams (D or X, F or f or N, S or Y)
(allow the difference <= 1%)
return: True/False
'''
for d_type in ['D', 'X', 'F', 'f', 'N', 'S', 'Y']:
if d_type in self.scan_result.keys():
rec = self.scan_result[d_type]
if rec['pingCount'] == 0:
continue
if rec['missedPings'] * 100.0 / rec['pingCount'] > thresh:
return False
return True
def has_minimum_pings(self, thresh=10):
'''
check if we have minimum number of requied pings in all multibeam
data datagrams (D or X, F or f or N, S or Y)
(minimum number is 10)
return: True/False
'''
for d_type in ['D', 'X', 'F', 'f', 'N', 'S', 'Y']:
if d_type in self.scan_result.keys():
rec = self.scan_result[d_type]
if rec['pingCount'] < thresh:
return False
return True
def ellipsoid_height_availability(self) -> ScanResult:
'''
check the presence of the datagrams h and the height type is 0 (The
height is derived from the GGK or GGA datagram and is the height of
the water level at the vertical datum (possibly motion corrected).)
All parsed 'h' datagrams must have the height type of 0 to pass. BUT
currently only the first 'h' datagram is read (for performance reasons)
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
if 'h' not in self.datagrams:
# then there's no way to check, so fail test
return ScanResult(
state=ScanState.FAIL,
messages=["'h' datagram not found"]
)
height_datagrams = self.datagrams['h']
first_height_datagram = height_datagrams[0]
if first_height_datagram.HeightType == 0:
return ScanResult(
state=ScanState.PASS
)
else:
return ScanResult(
state=ScanState.FAIL,
messages=[(
"Height 'h' datagram was included but HeightType did not "
"match expected value of 0 (found {})"
.format(first_height_datagram.HeightType)
)]
)
def ellipsoid_height_setup(self) -> ScanResult:
'''
Decode the raw n - network attitude and velocity input data to try determine positioning system
in use. This is done for the active system only
If positioning system is able to be determined decode raw P - position data input to check
the correct positioning string is interfaced that contains ellipsoid heights. This is done
for the active system only
Example is that if a PosMV or F180 is interfaced the raw position string
needs to be a GGK message
if a seapath binary message is interfaced then there is no way to determine
which positioning system is interfaced and the user will need to check documentation
to ensure the string interfaced for positioning contains ellipsoid heights
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
if 'n' not in self.datagrams:
return ScanResult(
state=ScanState.WARNING,
messages="Unable to conduct height setup check. No network attitude and velocity datagrams",
data={})
activeSensors = self.get_active_sensors()
for datagram in self.datagrams['n']:
if int(format(datagram.SystemDescriptor, '08b')[2:4],2) == int(activeSensors['Attvel'])+1:
inputtelegramsize = datagram.Attitude[0][6]
break
for datagram in self.datagrams['P']:
if int(format(datagram.Descriptor, '08b')[6:],2) == int(activeSensors['Position'])+1:
rawposinput = datagram.data[2:5].decode("utf-8")
break
if inputtelegramsize == 137:
inertialSystem = 'PosMV'
elif inputtelegramsize == 45 or inputtelegramsize == 43:
inertialSystem = 'Other'
else:
intertialSystem = 'F180'
data = {
'inertial_pos_system': inertialSystem,
'pos_string': rawposinput
}
if inertialSystem == 'PosMV' and rawposinput == 'GGK' or \
inertialSystem == 'F180' and rawposinput == 'GGK':
return ScanResult(
state=ScanState.PASS,
messages='',
data=data
)
elif inertialSystem == 'Other':
return ScanResult(
state=ScanState.WARNING,
messages="Unable to determine positioning system. Check position input contains ellipsoid heights",
data=data
)
else:
return ScanResult(
state=ScanState.FAIL,
messages="Ellipsoid heights not being logged change your position input to a GGK string",
data=data
)
def __dg_to_dict(self, datagram) -> Dict:
'''
Utils function to convert a datagram into a python
dict that can be serialised. The assumption is that all attributes
not starting with "__" are json serialisable and this may not be
the case.
'''
# ignore the following attributes. They are either unecessary or
# are know to not be json serialisable.
ignore_attrs = {
'offset',
'fileptr',
'data',
'typeOfDatagram',
'numberOfBytes',
'header',
'parameters',
'read',
}
dg_dict = {}
# get a list of all the attributes
attrs = [
a
for a in dir(datagram)
if not (a.startswith('__') or a in ignore_attrs)
]
# loop through each attribute and add it to the dict
for attr in attrs:
value = getattr(datagram, attr)
dg_dict[attr] = value
return dg_dict
def _merge_position(self, positions: List, to_merge_list: List):
'''
Adds position (Latitude and Longitude) to the `to_merge` list based
on the timestamps included in both `positions` and `to_merge`
:param position: List of position datagrams from where the Latitude
and Longitude will be extracted
:param to_merge_list: List of dictionaries that will have the Latitude
and Longitude added.
'''
# to keep this code efficient we assume the position datagrams
# are ordered according to time. We match a position datagram
# with a runtime params datagram by using the position before the
# runtime params datagram.
position_iterator = iter(positions)
position = next(position_iterator)
position_prev = position
for to_merge_item in to_merge_list:
tmi_time = to_merge_item["Time"]
while position is not None and position.Time <= tmi_time:
position_prev = position
try:
position = next(position_iterator)
except StopIteration:
break
to_merge_item["Latitude"] = position_prev.Latitude
to_merge_item["Longitude"] = position_prev.Longitude
def runtime_parameters(self) -> ScanResult:
'''
Extracts runtime parameters for inclusion in the `data` dict.
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
if 'R' not in self.datagrams:
return ScanResult(
state=ScanState.WARNING,
messages="Runtime parameters datagram (R) not found in file",
data={})
r_datagrams = self.datagrams['R']
runtime_parameters = []
string = ""
for r_datagram in r_datagrams:
if r_datagram.parameters() != string:
dg_dict = self.__dg_to_dict(r_datagram)
runtime_parameters.append(dg_dict)
string = r_datagram.parameters()
data = {}
if 'P' in self.datagrams:
# then we can build a point based dataset of where the parameters
# changed
position_datagrams = self.datagrams['P']
self._merge_position(position_datagrams, runtime_parameters)
map = self._to_points_geojson(runtime_parameters)
data['map'] = map
return ScanResult(
state=ScanState.PASS,
messages=(
"{} Runtime parameter (R) datagrams found in file"
.format(len(runtime_parameters))
),
data=data
)
def positions(self) -> ScanResult:
'''
Extracts positions from position datagram. Scan result includes
geojson line definition comprised of these unique positions.
:return: :class:`hyo2.mate.lib.scan.ScanResult`
'''
if 'P' not in self.datagrams:
return ScanResult(
state=ScanState.WARNING,
messages="Posistion datagram (P) not found in file",
data={})
p_datagrams = self.datagrams['P']
position_points = []
last_point = None
for p_datagram in p_datagrams:
pt = Point([p_datagram.Longitude, p_datagram.Latitude])
if (last_point is not None and
pt.coordinates[0] == last_point.coordinates[0] and
pt.coordinates[1] == last_point.coordinates[1]):
# skip any points that have the same location
continue
position_points.append(pt)
last_point = pt
line = LineString(position_points)
feature = Feature(geometry=line)
feature_collection = FeatureCollection([feature])
data = {'map': to_mapping(feature_collection)}
return ScanResult(
state=ScanState.PASS,
messages=[],
data=data
)
def installation_parameters(self) -> ScanResult:
'''
Extracts installation parameters from datagram (of same name).
'''
if 'I' not in self.datagrams:
return ScanResult(
state=ScanState.FAIL,
messages=(
"Installation Parameters datagram (I) not found in file"),
data={})
ips = self.get_installation_parameters()
if ips is None:
return ScanResult(
state=ScanState.FAIL,
messages=(
"Failed to find Installation Parameters datagram (I) that "
"contained data"),
data={})
data = {}
for ip_param_name, ip_param_value in ips.items():
# todo: in future we'll need to perform some translations between
# the raw field names and a standardised version across all
# file formats
data[ip_param_name] = ip_param_value
return ScanResult(
state=ScanState.PASS,
messages=[],
data=data
)
|
python
|
import argparse
import sys
class ContactsCLI():
"""Parse command line arguments for contacts program"""
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'-f',
'--filter',
default=None,
type=str,
help='Filter contacts list by checking for matches to this string.'
)
self.subparsers = self.parser.add_subparsers(dest='subcommand')
self.gen_show_parser()
self.gen_export_parser()
self.gen_modify_parser()
self.gen_add_parser()
self.gen_import_parser()
self.gen_edit_parser()
self.gen_get_field_parser()
self.gen_delete_parser()
try:
self.args = self.parser.parse_args()
except argparse.ArgumentError:
sys.stderr.write('Invalid arguments passed')
self.parser.print_help()
sys.exit(1)
def gen_show_parser(self):
show_parser = self.subparsers.add_parser('show', help='Show help')
show_parser.add_argument(
'-c',
'--color',
action='store_true',
default=False,
help='Show contacts with colored output'
)
def gen_export_parser(self):
export_parser = self.subparsers.add_parser(
'export',
help='Export help',
)
export_parser.add_argument(
'-o',
'--output_file',
type=str,
default=None,
help='Optional output file path. Default is writing to stdout.'
)
def gen_modify_parser(self):
modify_parser = self.subparsers.add_parser(
'modify',
help='Modify help',
)
modify_parser.add_argument(
'-n',
'--name',
type=str,
help='Modify contact name and overwrite with new value.'
)
modify_parser.add_argument(
'-e',
'--email',
nargs='+',
type=str,
help='Modify contact emails and overwrite with new values.'
)
modify_parser.add_argument(
'-p',
'--phone',
nargs='+',
type=str,
help='Modify contact phone numbers and overwrite with new values.'
)
modify_parser.add_argument(
'-t',
'--tags',
nargs='+',
type=str,
help='Modify contact tags and overwrite with new values.'
)
def gen_add_parser(self):
add_parser = self.subparsers.add_parser('add', help='Add help')
add_parser.add_argument(
'-n',
'--name',
type=str,
help='Add contact name.'
)
add_parser.add_argument(
'-e',
'--email',
nargs='+',
type=str,
help='Add contact emails.'
)
add_parser.add_argument(
'-p',
'--phone',
nargs='+',
type=str,
help='Add contact phone numbers.'
)
add_parser.add_argument(
'-t',
'--tags',
nargs='+',
type=str,
help='Add contact tags.'
)
def gen_import_parser(self):
import_parser = self.subparsers.add_parser(
'import',
help='Import help'
)
import_parser.add_argument(
'input_file',
type=str,
help='Path to JSON file'
)
def gen_edit_parser(self):
edit_parser = self.subparsers.add_parser('edit', help='Edit help')
edit_parser.add_argument(
'--editor',
default='/usr/bin/vim',
help='Manually select a text editor.'
)
def gen_get_field_parser(self):
get_field_parser = self.subparsers.add_parser(
'get_field',
help='get field help'
)
get_field_parser.add_argument(
'fieldname',
help='fieldname to get value for'
)
def gen_delete_parser(self):
delete_parser = self.subparsers.add_parser(
'delete',
help='delete'
)
delete_parser.add_argument(
'--backup',
default='/home/sam/documents/contacts_deleted.json',
help='path to backup file where deleted contacts will be exiled'
)
if __name__ == '__main__':
ContactsCLI()
|
python
|
from client import Client
import pprint
"""
MAIN INSTANCE (petition)
"""
tenant_id = ""
client_id = ""
client_secret = ""
dynamics_resource = ""
CRM_resource = ""
refresh_token = ""
token = ""
petition = Client(client_id=client_id, client_secret=client_secret, token=token)
"""
API ENDPOINTS EXAMPLES
"contacts", "accounts", "opportunities", "leads", "campaigns", "EntityDefinitions(LogicalName='contact')/Attributes"
"""
"""
REFRESH TOKEN
to refresh the token you have to send the client_id, the client_secret, the refresh_token, the redirect_uri, and the resource
Example:
refresh = petition.refresh_token(refresh_token, redirect_uri, resource)
pprint.pprint(refresh)
"""
"""
GET DATA METHOD
for get data just have to indicate the endpoint and the other filter options if you want
Example:
get_data = petition.get_data('contacts')
pprint.pprint(get_data)
"""
"""
CREATE DATA METHOD
to create data you have to specify the endpoint where you will create data and send the data in **kwarg
Example:
data = {"firstname": "TEST", "lastname": "ITS A TEST", "middlename": "TESTING", "emailaddress1": "[email protected]"}
create_data = petition.create_data('contacts', **data)
pprint.pprint(create_data)
"""
"""
UPDATE DATA METHOD
to update data you have to specify the endpoint where you will update data, also you have to specify the id of the thing to update and send te update data in **kwargs
Example:
data = {"firstname": "TESTCHANGE", "lastname": "UPDATE THE TEST", "middlename": "UPDATE TESTING", "emailaddress1": "[email protected]"}
update_data = petition.update_data(type='contacts', id='ID', **data)
pprint.pprint(update_data)
"""
"""
DELETE DATA METHOD
is simple, just send the endpoint where you'll delete data and the id of the data to delete
Example:
delete_data = petition.delete_data(type='contacts', id='ID')
pprint.pprint(delete_data)
"""
|
python
|
# */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
import json
from multiprocessing import JoinableQueue, Process
import random
import re
import traceback
import uuid
import time
import sys
import argparse
import loremipsum
import requests
from elasticsearch import Elasticsearch
__author__ = '[email protected]'
es_hosts = [
{'host': 'elasticsearch000west', 'port': 9200},
{'host': 'elasticsearch001west', 'port': 9200},
{'host': 'elasticsearch002west', 'port': 9200},
{'host': 'elasticsearch003west', 'port': 9200}
]
def parse_args():
parser = argparse.ArgumentParser(description='ElasticSearch Index Test 1')
parser.add_argument('-t', '--type_count',
help='The number of types to produce',
type=int,
default=100)
parser.add_argument('-ic', '--index_count',
help='The number of indices to create',
type=int,
default=10)
parser.add_argument('-sc', '--shard_count',
help='The number of indices to create',
type=int,
default=18)
parser.add_argument('-rc', '--replica_count',
help='The number of indices to create',
type=int,
default=1)
parser.add_argument('-w', '--workers',
help='The number of worker threads',
type=int,
default=8)
parser.add_argument('-dc', '--document_count',
help='The number of documents per index',
type=long,
default=100000000)
parser.add_argument('-bs', '--batch_size',
help='The size of batches to send to ES',
type=long,
default=25)
parser.add_argument('-ip', '--index_prefix',
help='The Prefix to use for index names',
type=str,
default='apigee_ftw')
parser.add_argument('-tp', '--type_prefix',
help='The Prefix to use for type names',
type=str,
default='type_this')
parser.add_argument('-s', '--setup',
help='The Prefix to use for type names',
action='store_true')
my_args = parser.parse_args(sys.argv[1:])
return vars(my_args)
args = parse_args()
class APIClient():
def __init__(self, base_url):
self.base_url = base_url
def put(self, path='/', data=None):
if not data:
data = {}
url = '%s%s' % (self.base_url, path)
r = requests.put(url, json.dumps(data))
if r.status_code == 200:
print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
return r.json()
raise Exception('HTTP %s calling PUT on URL=[%s]: %s' % (r.status_code, url, r.text))
def index_batch(self, batch):
data = ''
for element in batch:
index_tuple = element[0]
doc = element[1]
data += '{ "index" : { "_index" : "%s", "_type" : "%s", "_id" : "%s" } }\n' % (
index_tuple[0], index_tuple[1], doc['entityId'])
data += json.dumps(doc)
data += '\n'
url = '%s/_bulk' % self.base_url
# print data
r = requests.post(url, data)
# print json.dumps(r.json(), indent=2)
if r.status_code == 200:
print 'PUT (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
return r.json()
raise Exception('HTTP %s calling POST URL=[%s]: %s' % (r.status_code, url, r.text))
def delete(self, index):
url = '%s%s' % (self.base_url, index)
r = requests.delete(url)
if r.status_code == 200:
print 'DELETE (%s) in %sms' % (r.status_code, total_milliseconds(r.elapsed))
return r.json()
raise Exception('HTTP %s calling DELETE URL=[%s]: %s' % (r.status_code, url, r.text))
def create_index(self, name=None, shards=18 * 3, replicas=1):
data = {
"settings": {
"index": {
"action": {
"write_consistency": "one"
},
"number_of_shards": shards,
"number_of_replicas": replicas
}
}
}
try:
print 'Creating index %s' % name
response = self.put('/%s/' % name.lower(), data)
print response
except Exception, e:
print traceback.format_exc()
def delete_index(self, name):
try:
response = self.delete('/%s/' % name.lower())
print response
except Exception, e:
print traceback.format_exc()
def define_type_mapping(self, index_name, type_name):
try:
url = '/%s/_mapping/%s' % (index_name, type_name)
print url
response = self.put(url, get_type_mapping(type_name))
print response
except Exception, e:
print traceback.format_exc()
class Worker(Process):
def __init__(self, work_queue):
super(Worker, self).__init__()
self.api_client = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host'))
self.work_queue = work_queue
self.es = Elasticsearch(es_hosts)
self.sentence_list = loremipsum.get_sentences(1000)
self.re_first_word = re.compile('([A-z]+)')
def run(self):
print 'Starting %s ' % self.name
counter = 0
batch = []
while True:
index_batch_size = args.get('batch_size')
task = self.work_queue.get(timeout=600)
counter += 1
document = self.generate_document(task['field_count'])
flattened_doc = self.process_document(document,
task['type'],
task['uuid'],
task['uuid'])
index_type_tuple = (task['index'], task['type'])
# self.handle_document(task['index'], task['type'], task['uuid'], flattened_doc)
batch.append((index_type_tuple, flattened_doc))
if len(batch) >= index_batch_size:
self.handle_batch(batch)
batch = []
self.work_queue.task_done()
def generate_document(self, fields):
doc = {}
my_bool = True
for i in xrange(fields):
sentence_index = random.randint(0, max((fields / 2) - 1, 1))
sentence = self.sentence_list[sentence_index]
if random.random() >= .5:
key = self.re_first_word.findall(sentence)[1]
else:
key = self.re_first_word.findall(sentence)[1] + str(i)
field_type = random.random()
if field_type <= 0.3:
doc[key] = sentence
elif field_type <= 0.5:
doc[key] = random.randint(1, 1000000)
elif field_type <= 0.6:
doc[key] = random.random() * 1000000000
elif field_type == 0.7:
doc[key] = my_bool
my_bool = not my_bool
elif field_type == 0.8:
doc[key] = self.generate_document(max(fields / 5, 1))
elif field_type <= 1.0:
doc['mylocation'] = self.generate_location()
return doc
@staticmethod
def get_fields(document, base_name=None):
fields = []
for name, value in document.iteritems():
if base_name:
field_name = '%s.%s' % (base_name, name)
else:
field_name = name
if isinstance(value, dict):
fields += Worker.get_fields(value, field_name)
else:
value_name = None
if isinstance(value, basestring):
value_name = 'string'
elif isinstance(value, bool):
value_name = 'boolean'
elif isinstance(value, (int, long)):
value_name = 'long'
elif isinstance(value, float):
value_name = 'double'
if value_name:
field = {
'name': field_name,
value_name: value
}
else:
field = {
'name': field_name,
'string': str(value)
}
fields.append(field)
return fields
@staticmethod
def process_document(document, doc_type, application_id, uuid):
response = {
'entityId': uuid,
'entityVersion': '1',
'entityType': doc_type,
'applicationId': application_id,
'fields': Worker.get_fields(document)
}
return response
def handle_document(self, index, doc_type, uuid, document):
res = self.es.create(index=index,
doc_type=doc_type,
id=uuid,
body=document)
print res
def generate_location(self):
response = {}
lat = random.random() * 90.0
lon = random.random() * 180.0
lat_neg_true = True if lon > .5 else False
lon_neg_true = True if lat > .5 else False
lat = lat * -1.0 if lat_neg_true else lat
lon = lon * -1.0 if lon_neg_true else lon
response['location'] = {
'lat': lat,
'lon': lon
}
return response
def handle_batch(self, batch):
print 'HANDLE BATCH size=%s' % len(batch)
# self.api_client.define_type_mapping(index, doc_type)
self.api_client.index_batch(batch)
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
def get_type_mapping(type_name):
return {
type_name: {
"_routing": {
"path": "entityId",
"required": True
},
"properties": {
"entityId": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"entityVersion": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"entityType": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"applicationId": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"nodeId": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"edgeName": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"entityNodeType": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"edgeTimestamp": {
"type": "long",
"doc_values": True
},
"edgeSearch": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"fields": {
"type": "nested",
"properties": {
"name": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
},
"boolean": {
"type": "boolean",
"doc_values": True
},
"long": {
"type": "long",
"doc_values": True
},
"double": {
"type": "double",
"doc_values": True
},
"location": {
"type": "geo_point",
"lat_lon": True,
"geohash": True,
"doc_values": True
},
"string": {
"type": "string",
"norms": {
"enabled": False
},
"fields": {
"exact": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
}
}
},
"uuid": {
"type": "string",
"index": "not_analyzed",
"doc_values": True
}
}
}
},
"_all": {
"enabled": False
}
}
}
def main():
INDEX_COUNT = args.get('index_count')
TYPE_COUNT = args.get('type_count')
SETUP = args.get('setup')
indices = []
types = []
work_queue = JoinableQueue()
apiclient = APIClient('http://%s:9200' % es_hosts[random.randint(0, len(es_hosts) - 1)].get('host'))
workers = [Worker(work_queue) for x in xrange(args.get('workers'))]
[worker.start() for worker in workers]
try:
#
for x in xrange(TYPE_COUNT):
type_name = '%s_%s' % (args.get('type_prefix'), x)
types.append(type_name)
for x in xrange(INDEX_COUNT):
index_name = '%s_%s' % (args.get('index_prefix'), x)
indices.append(index_name)
if SETUP:
print 'Running setup...'
for index_name in indices:
apiclient.delete_index(index_name)
time.sleep(1)
for index_name in indices:
apiclient.create_index(
index_name,
shards=args['shard_count'],
replicas=args['replica_count'])
# time.sleep(5)
# for index_name in indices:
# for type_name in types:
# apiclient.define_type_mapping(index_name, type_name)
# time.sleep(5)
total_messages = args.get('document_count')
batch_size = 100000
message_counter = 0
fields = random.randint(50, 100)
while message_counter < total_messages:
for count in xrange(batch_size):
for index_name in indices:
doc_id = str(uuid.uuid1())
task = {
'field_count': fields,
'uuid': doc_id,
'index': index_name,
'type': types[random.randint(0, len(types) - 1)]
}
work_queue.put(task)
print 'Joining queue counter=[%s]...' % message_counter
work_queue.join()
print 'Done queue counter=[%s]...' % message_counter
message_counter += batch_size
except KeyboardInterrupt:
[worker.terminate() for worker in workers]
main()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import combinations, permutations
import logging
import networkx as nx
import numpy as np
import pandas as pd
# +
# generate a random adjacency matrix
# traces: Number or Domino Traces
# If traces>1 the output will be a data frame of list
# nodes: Number of nodes
# parent_max: number max of possible parents per node
def adjacency_generator(traces,nodes,parents_max):
def connected_graph(adjacency_matrix):
g = nx.DiGraph(adjacency_matrix)
connect = nx.is_weakly_connected(g)
return connect
data = []
for k in range(traces):
finished = False
while not finished:
permutation = np.random.permutation(range(0,nodes)) #creating permutation matrix
idx = np.empty_like(permutation)
idx[permutation] = np.arange(len(permutation))
adjacency_matrix = np.zeros((nodes, nodes),dtype=int)
for j in range(1, nodes):
nb_parents = np.random.randint(0, min([parents_max, j])+1) # selecting number of parents for each node min 1
for i in np.random.choice(range(0,j), nb_parents, replace=True):# selecting randomly connections betwwen nodes
adjacency_matrix[i, j] = 1
adjacency_matrix[:, idx]
adjacency_matrix[:] = adjacency_matrix[:, idx]
adjacency_matrix[:] = adjacency_matrix[idx,:]
finished = connected_graph(adjacency_matrix)
data.append(adjacency_matrix) # generating nested list of adjacency matrix
return data
# +
# Add causal relation between parents and childs nodes
# adjacency_matrix : a adjacency matrix it must be a numpy array
# nodes: Number of nodes
# parent_max: number max of possible parents per node
# N : Number of experimental cases (sample) per domino trace
# p : probability that the initial root fall or not in the Domino trace
# eps: Quantity of noise level in the data
def theta_generator_multi(adjacency_matrix,nodes,parents_max, N, p, eps):
data = [] # creating empty data sets
num_rows = len(adjacency_matrix)
for b in range(num_rows):
matrix = adjacency_matrix[b]
nodes = len(matrix)
X = np.zeros(shape=(nodes, N), dtype=int)
for t in range (2):
for i in range(0, nodes):
if not sum(matrix[:, i]):
X[i,:] = np.random.binomial(1, p, size=N)
for k in range(0, nodes):
if sum(matrix[:, k]):
parents = np.where(matrix[:,k] == 1)[0]
X[k, :] = np.ones_like(X[k, :])
for a in parents:
noise = np.random.binomial(1, eps, size=N)
X[k, :] = X[k,:]*[(1-noise[j])*X[a,j]+noise[j]*(1-X[a,j]) for j in range(N)]
theta = X.sum(axis=1)/N
data.append({"Trace": matrix, "Theta": theta, "Matrix_theta": X}) # generating nested list of adjacency matrix
df = pd.DataFrame(data=data).sample(frac=1).reset_index(drop=True)
return df
# +
# Metrics of recall and precision of the skeleton
# between ground truth and predicted graph
def diff_DAG(dag, skel):
dag_edges = set(list(dag.edges()))
skel_edges = set(list(skel.edges()))
dag_edges_inv = {(j, i) for i, j in dag_edges }
edges_skel_inv = {(j, i) for i, j in skel_edges }
additions = skel_edges - dag_edges - dag_edges_inv
deletions = dag_edges - skel_edges - edges_skel_inv
diff = len(additions) + len(deletions)
true_positives = len(dag_edges) - len(deletions)
false_negatives = len(deletions)
false_positives = len(additions)
if false_positives + true_positives != 0 and true_positives + false_negatives != 0 :
precision = true_positives/(false_positives + true_positives)
recall = true_positives/(true_positives + false_negatives)
else :
precision = 0
recall = 0
return precision, recall, len(dag_edges)
# +
# Metrics of recall and precision of the direction of edges
# between ground truth and predicted graph
def rec_directions(dag, pred_dag):
dag_edges = set(list(dag.edges()))
pred_dag_edges = set(list(pred_dag.edges()))
dag_edges_inv = {(j, i) for i, j in dag_edges }
edges_pred_dag_inv = {(j, i) for i, j in pred_dag_edges }
pred_dag_inv_diff = edges_pred_dag_inv - pred_dag_edges
additions = pred_dag_edges - dag_edges - pred_dag_inv_diff
deletions = dag_edges - pred_dag_edges - dag_edges_inv
true_positives = len(pred_dag_edges) - len(additions)
false_positives = len(additions)
false_negatives = len(deletions)
if false_positives + true_positives != 0 and true_positives + false_negatives != 0 :
precision = true_positives/(false_positives + true_positives)
recall = true_positives/(true_positives + false_negatives)
else :
precision = 0
recall = 0
return precision, recall
# -
def estimate_skeleton_mulalpha(indep_test_func, data_matrix, alpha, **kwargs):
l_pval = []
def method_stable(kwargs):
return ('method' in kwargs) and kwargs['method'] == "stable"
node_ids = range(data_matrix.shape[1])
node_size = data_matrix.shape[1]
sep_set = [[set() for i in range(node_size)] for j in range(node_size)]
if 'init_graph' in kwargs:
g = kwargs['init_graph']
if not isinstance(g, nx.Graph):
raise ValueError
elif not g.number_of_nodes() == len(node_ids):
raise ValueError('init_graph not matching data_matrix shape')
for (i, j) in combinations(node_ids, 2):
if not g.has_edge(i, j):
sep_set[i][j] = None
sep_set[j][i] = None
else:
g = _create_complete_graph(node_ids)
l = 0
print("multi")
while True:
cont = False
remove_edges = []
for (i, j) in permutations(node_ids, 2):
adj_i = list(g.neighbors(i))
if j not in adj_i:
continue
else:
adj_i.remove(j)
if len(adj_i) >= l:
_logger.debug('testing %s and %s' % (i,j))
_logger.debug('neighbors of %s are %s' % (i, str(adj_i)))
if len(adj_i) < l:
continue
for k in combinations(adj_i, l):
p_val = indep_test_func(data_matrix, i, j, set(k),
**kwargs)
l_pval.append({"i":i,"j":j,"set_k":set(k),"p_val":p_val})
if p_val > alpha:
if g.has_edge(i, j):
if method_stable(kwargs):
remove_edges.append((i, j))
else:
g.remove_edge(i, j)
sep_set[i][j] |= set(k)
sep_set[j][i] |= set(k)
break
cont = True
l += 1
if method_stable(kwargs):
g.remove_edges_from(remove_edges)
if cont is False:
break
if ('max_reach' in kwargs) and (l > kwargs['max_reach']):
break
df_pval = pd.DataFrame(data=l_pval).sample(frac=1).reset_index(drop=True)
return (g, sep_set,df_pval )
def estimate_skeleton_list(data_matrix, alpha, l_pval, **kwargs):
def method_stable(kwargs):
return ('method' in kwargs) and kwargs['method'] == "stable"
node_ids = range(data_matrix.shape[1])
node_size = data_matrix.shape[1]
sep_set = [[set() for i in range(node_size)] for j in range(node_size)]
if 'init_graph' in kwargs:
g = kwargs['init_graph']
if not isinstance(g, nx.Graph):
raise ValueError
elif not g.number_of_nodes() == len(node_ids):
raise ValueError('init_graph not matching data_matrix shape')
for (i, j) in combinations(node_ids, 2):
if not g.has_edge(i, j):
sep_set[i][j] = None
sep_set[j][i] = None
else:
g = _create_complete_graph(node_ids)
l = 0
while True:
cont = False
remove_edges = []
for (i, j) in permutations(node_ids, 2):
adj_i = list(g.neighbors(i))
if j not in adj_i:
continue
else:
adj_i.remove(j)
if len(adj_i) >= l:
_logger.debug('testing %s and %s' % (i,j))
_logger.debug('neighbors of %s are %s' % (i, str(adj_i)))
if len(adj_i) < l:
continue
for k in combinations(adj_i, l):
#p_val = indep_test_func(data_matrix, i, j, set(k),
p_val = l_pval.p_val[(l_pval.i == i) & (l_pval.j == j) & (l_pval.set_k == set(k))].values.item(0)
if p_val > alpha:
if g.has_edge(i, j):
if method_stable(kwargs):
remove_edges.append((i, j))
else:
g.remove_edge(i, j)
sep_set[i][j] |= set(k)
sep_set[j][i] |= set(k)
break
cont = True
l += 1
if method_stable(kwargs):
g.remove_edges_from(remove_edges)
if cont is False:
break
if ('max_reach' in kwargs) and (l > kwargs['max_reach']):
break
return (g, sep_set )
|
python
|
# AUTOGENERATED FILE - DO NOT MODIFY!
# This file generated by Djinni from foo_client_interface.djinni
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyPrimitive, CPyRecord
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
class FooSomeOtherRecord:
c_data_set = MultiSet()
@staticmethod
def check_c_data_set_empty():
assert len(FooSomeOtherRecord.c_data_set) == 0
# Record deriving types
def __eq__(self, other):
return self.number1==other.number1 and \
self.number2==other.number2
def __lt__(self, other):
if self.number1<other.number1:
return True
if self.number1>other.number1:
return False
if self.number2<other.number2:
return True
if self.number2>other.number2:
return False
return False
def __le__(self, other):
return not other.__lt__(self)
def __ge__(self, other):
return not self.__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __hash__(self):
# Pick an arbitrary non-zero starting value
hash_code = 17
hash_code = hash_code * 31 + self.number1.__hash__()
hash_code = hash_code * 31 + self.number2.__hash__()
return hash_code
def __init__(self, number1, number2):
self.number1 = number1
self.number2 = number2
|
python
|
import numpy as np
a = np.array([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]])
print(np.cumsum(a)) # 1 ๆฌกๅ
array ใซๅคๆใใฆใใ็ดฏ็ฉๅใ่จ็ฎ
'''
[ 0 1 3 6 10 15 21 28 36 45 55 66]
'''
print(np.cumsum(a, axis=0)) # ็ธฆๆนๅใซ็ดฏ็ฉๅใ่จ็ฎ
'''
[[ 0 1 2 3]
[ 4 6 8 10]
[12 15 18 21]]
'''
print(np.cumsum(a, axis=1)) # ๆจชๆนๅใซ็ดฏ็ฉๅใ่จ็ฎ
'''
[[ 0 1 3 6]
[ 4 9 15 22]
[ 8 17 27 38]]
'''
|
python
|
import pytest
try:
import simplejson as json
except ImportError:
import json
from click.testing import CliRunner
from diderypy.cli import main
from diderypy.lib import generating as gen
def parsOutput(data):
return list(filter(None, data.split('\n')))
def testValidConfigFile():
runner = CliRunner()
with runner.isolated_filesystem():
# Test valid config file
with open('config.json', 'w') as f:
f.write('{"servers": ["http://localhost:8080", "http://localhost:8000"]}')
result = runner.invoke(main, ['config.json'])
assert result.exit_code == 0
assert result.output == "No options given. For help use --help. Exiting Didery.py\n"
def testInvalidConfigJson():
runner = CliRunner()
with runner.isolated_filesystem():
# Test invalid json
with open('config.json', 'w') as f:
f.write('{"servers": ["http://localhost:8080", "http://localhost:8000"]')
result = runner.invoke(main, ['config.json', '--upload'])
assert result.exit_code == 0
assert result.output == "Error parsing the config file: Invalid JSON.\n"
def testInvalidServerList():
runner = CliRunner()
with runner.isolated_filesystem():
# Test non list "servers" value
with open('config.json', 'w') as f:
f.write('{"servers": "http://localhost:8080"}')
result = runner.invoke(main, ['config.json', '--upload'])
assert result.exit_code == 0
assert result.output == "Error parsing the config file: \"servers\" field must be a list.\n"
def testMissingConfigFields():
runner = CliRunner()
with runner.isolated_filesystem():
# Test missing required fields
with open('config.json', 'w') as f:
f.write('{"urls": ["http://localhost:8080", "http://localhost:8000"]}')
result = runner.invoke(main, ['config.json', '--upload'])
assert result.exit_code == 0
assert result.output == "Error parsing the config file: Missing required field servers.\n"
# TODO figure out why these fail when run with other tests
# def testValidInceptionDataFile():
# runner = CliRunner()
# history, vk, sk, pvk, psk = gen.historyGen()
# data = {
# "history": history
# }
#
# with runner.isolated_filesystem():
# # Test valid config file
# with open('config.json', 'w') as f:
# f.write('{"servers": ["http://localhost:8080", "http://localhost:8000"]}')
#
# with open('data.json', 'w') as f:
# f.write(json.dumps(data))
#
# result = runner.invoke(main, ['config.json', '--incept', '--data=data.json', '-v'], input="{}\n".format(sk))
#
# output = parsOutput(result.output)
# expected_output = [
# "Please enter you signing/private key: {}".format(sk),
# "2/2 requests succeeded."
# ]
#
# assert result.exit_code == 0
# assert output == expected_output
# def testValidInception():
# runner = CliRunner()
#
# with runner.isolated_filesystem():
# # Test valid config file
# with open('config.json', 'w') as f:
# f.write('{"servers": ["http://localhost:8080", "http://localhost:8000"]}')
#
# result = runner.invoke(main, ['config.json', '--incept', '-v'], input="y\n")
#
# output = parsOutput(result.output)
# expected_output = [
# "Keys have been generated and stored in the current directory under didery.keys.json. ",
# "Make a copy and store them securely. ",
# "The file will be deleted after you enter a key: y",
# "didery.keys.json deleted.",
# "2/2 requests succeeded."
# ]
#
# assert result.exit_code == 0
# assert output == expected_output
|
python
|
import time
import heapq
def curr_time():
return round(time.time() * 1000)
class GameEventPQueue:
''' singleton event queue for the game, event listeners handle different game event types
game events are scheduled in a pqueue for certain events can be handled before others
uses minpqueue since events can be scheduled by time in ms to occur in future
events that are scheduled earlier will run earlier. when proceeding through the queue, hitting an event
that isn't scheduled to run yet will cancel the procession. '''
def __init__(self):
self.listeners = {}
self.tick = 0
self.timepqueue = []
self.tickpqueue = []
def add_listener(self, event_name, listener):
if not self.listeners.get(event_name, None):
self.listeners[event_name] = {listener}
else:
self.listeners[event_name].add(listener)
def remove_listener(self, event_name, listener):
self.listeners[event_name].remove(listener)
if len(self.listeners[event_name]) == 0:
del self.listeners[event_name]
def tick_enq(self, event_name, event, priority):
# don't initialize priority since time_enq is preferrable for immediate events since it respects enqueuing order
# event execution timing and order is based on game ticks
priority = self.tick + priority
heapq.heappush(self.tickpqueue, (priority, event_name, event))
def time_enq(self, event_name, event, priority=0):
# enqueue events with priority based on time in ms
priority = curr_time() + priority
heapq.heappush(self.timepqueue, (priority, event_name, event))
def _proceed(pqueue, moment):
while pqueue != []:
peek = pqueue[0]
event_time = peek[0]
event_name = peek[1]
event = peek[2]
if event_time <= moment:
heapq.heappop(pqueue)
listeners = self.listeners.get(event_name, [])
for listener in listeners:
listener(event)
else:
break
def tick(self):
self._proceed(self.timepqueue, curr_time()) # timepqueue is processed first since immediate tasks run on it
self._proceed(self.tickpqueue, self.tick)
self.tick += 1 # increment game ticks
#def clear(maxt=0, event_name):
# if maxt == 0:
# # clear all events in event_name
# else:
# # clear events before or equal to maxt relating to event
# instantiate global event prioq
eventQ = GameEventPQueue()
# test code
#def test(event):
# print("hello", event)
#
#def test2(event):
# print("test2", event)
#
#gameQueue = GameEventPQueue()
#gameQueue.add_listener("test", test)
#gameQueue.add_listener("test2", test)
#gameQueue.enqueue("test", "world")
#gameQueue.enqueue("test2", "here")
#gameQueue.enqueue("test", "test")
#gameQueue.enqueue("test2", "again", 1000)
#gameQueue.dequeue()
#gameQueue.dequeue(3)
#time.sleep(1)
#gameQueue.dequeue()
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This contains a 128x128 px thumbnail in PNG format
# Taken from http://www.zwahlendesign.ch/en/node/20
# openoffice_icons/openoffice_icons_linux/openoffice11.png
# License: Freeware
import base64
iconstr = """\
iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAAG0OVFdAAAABGdBTUEAANbY1E9YMgAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAFoHSURBVHjaYvz//z8DJQAggFhu3LiBU1JI
SOiPmJgYM7IYUD0jMh8ggFhAhKamJuOHDx/+8fPz4zQsMTGRYf78+RjiAAHEBCJOnTr1HZvmN2/e
MDAyQiycOXMmw5MnTxhmzZoViqwGIIAYrl+/DqKM/6OBNWvWgOmvX7/+37Rp0/8jR478//fv3/+f
P3/+h+phPHHixH+AAIK75D8WMGnSpP8vXrz4//v37/9///6Fi4MMALruf3Bw8H+AAAJp5rQrOoeh
edmyZWAbgd77f/bsWTAbBoB6JOpbmkF0OkAAgcLgO8gUYCCCnSIlJQWmw8LCGA4cOAAOAyMjI3hY
gMDvP7+f3791+weQuQAggGBi7FPmrvnf3NwMtgnkt/Xr1//fuXMn2EaQ5TB89+nX/wUlJSDbPUFe
AQgguKleiY2/QIpBTv727TuKJhB+//nf/xtP/4ANrK6tBRnAATIAIICQEwUjUCHIoyjOBYGbz/8y
8HMwMXCzfmcoLC1kMDH3YNDU1mGQ4PvLCBBALEjq/t958Zfh0dt/DL/+MDD8BdkBNIeXnYFBhIeR
4efffwybNqxgEOEXZLjw25Xh2QMWhmi9BwwAAYRsAMO5268ZZMREGGSEGBmYgcEL1MMAcgwo3D9/
+sIwf84cBhHLGoYAVVYGxi/3wDYABBCKU6dPn37s1vM//3/+/v//20+gn5/9+b/7yq//iw++/6+o
qAhy0zUg1gH5HYYBAgg99Srsvvzz//6Tt//beSf+V/doBGkqheaFL0CKF1kzCAMEECOWfAMSY3Yq
PvF7X68FKCcCPcLAA8QqQHwB3VaAAGKktDwACCCc5QETE5ODjIzMfi4uLoRtjIwiQBe8RVYHEEDg
WODh4dkBTMLuQE1YDdPR0WG4cuUKw6tXr968ffsWxdsAAQTWAbQJq+aenh5wogJpBpUNzMzMGGoA
AggckshZFRmA8sXz58/BeQKY2WA5kRmkp7Oz8z8vL+8WgAACG3Lv3j0Mze/fvwcpBuaLb/9//foF
FweG2U9dXV2RixcvguTNAAKIAVQWaPt2oGgGlT4gzSBDNm/e/P/jx48o8n/+/PlraWkJil5OgAAC
OUDEKvsgWOLdu3f/k5KSwOxPnz79nzt3LrgIQwY/fvz4X1FbDbIgAOQVgAACxcIbFnZesFcEBQXB
AbdhwwYGNjY2BmdnZzANSypffvxn4OFgY/j5+TvI9i0gMYAAgkUJI7Dc+/flyxeGly9fMaipqWEE
9m1gTv329RvDjAmVDE52dgx6enpgvQABBIu7//fvPwCmB14Mze+//geXBwKcTAwn9q9kEOIXYNC2
8IfLAwQQcqIIOHPv9/o3X/4z/PkLzABAR7KyQMoCPi5Ghm9fvjJM7i5lUDbwYXjI4sIwK41LHBgG
rwACCLk82Pvq038GaQEmBi52iAEwK/4BDbx7cTeDEB8/w42/TgwhRt8ZzNeeeAHyAUAAoSTL15/+
/f/++z+DrBATw/P3/xgeAkunt5//MSzYcpOhJYyNQUNDowGorA9o82eYHoAAQjFgw6kv/yV4/zLc
v3WRoaRxBoOEtj/D2cXhPECNAcAExAbUiFE5AgQQenkAis/PrkWH/u/us3MGsvdBxYOAeD3QAIy8
DxBAjNiKJXIAqIZ//PjxYT4+PmtgHmEAJjiGhw8fMhLSBxBALIQUcHBw1AINbAIZCkqUuABywQZM
kwzAnMBw//79TcCy2A+f+QABBA4BoOuZHj169FdWVpYs3wPzKoOAgACKI0BsYCnDwMrKyg204xsu
vQABxAQtkv6FhISUEmuho6Mjw9OnT+F8UNsIWHQxAMsChtOnT4PaSwzAVglYDBgNX9H129raci8C
AhAbIICQkTCoACEWgAoVDw8PcKl17Nix/ydPnvx//vz5/9jMAKqRh9Vi9fX1YLHe3l6QuD1AAMEs
ZwUVi6s37CTK8t27d4MtBrW7QPj169f/79y58x+YCDFKP1jJCIruurq6VyC+t4/Pf2DUgAozSYAA
Atvu4Wm5D+QA47hVoLIWwwBQsVpaWgq2FIRVVVX/gxp427dv/79kyZL/Fy5cAIcIPrBh/QZwtZOS
mvoXmLDngDIOKEQAAgg5CmLsis7+v3XrFlgDyAJIWoIAkM+A8Q5ufYEqidmzZ4Md8PnzZxzVGQSD
wN79+8F0ekb6X2C92AyqRmFRAhBA6PnUVtuv99CVjUXwlAysicEKQZUuKJcAm/7AlM0GrmyBwYi9
ogWa+hYY6m+AxeDPt9cY9PV0GSoqKxjef/jGMGvGZGmgec9gSgECCFtBofvu3ftLoJQNjFuwI0RF
RRlwNRkQbQ4Ghmfv/jF8BlZaoKDjAzYnb1w4wHDx+lWG98A66s27zwwVZUUM8vJyakAH3IbpAwgg
rCXVxo2bnvr5+Ur9+w+pFX78+s/w8w+kvQnyMCsQs7GAeIwM91//A6r5z8DLAQwRFmDVwwnUA1R6
4uhBhl0H9jG8efacgZldgCE4Pp+BiUuc4fTNLwyVwUJMsGIZIIBwFZUam89+u84GrND+QZMeKQ04
acYbDGs3bWR4B/T5kbtcDLouWQycvKLgqp0FGJBGghdu2mgLaoDUAgQQrqL4BjOw/augogGuXNnZ
GBn4OUG+Y2RgY4W2l7//Bwb3P2BpB2oGMjKwMDMy3ARW+5nRbgwB7hYMTk5ODIVdWxmiQp0Yvj5b
9qy1uHIn0NyroH4dyHxYDgAIIHyVhdvzd392vvj4nwGYdhi+AKOBGdpY//vvDwPr348MX94+BVed
fTPXMry4tm02qMbLzs7eBmynrwOWgsuA/G1Ai77jCy2AAMLnAM75S1a/SIwJ3QTqpoAEzFO3N7Nx
CTEwMrMycN8qvLB9y8FAoPADmFna2tp/rl69mglyCKh9QExNCxBAjCTWOxKg+h6Iv2KRAzXDxYD4
ORD/ROoG4wUAAURx/4BSABBAeMcbSAHA4jUF2M2YDWo3sLOzM0ybNi0SmBBXENIHEEAkt4hALR9g
FTsX2PJJBFrIwMKCPSMB2xcMwI4BwSgGCCC8LSJgBSMtLi5+AGiRCsgyUPFLTJRt3bqVwdXVFRQS
oK7MX3xqAQII7gCgTyKBrZplIIuAwUlyFADbAwwWFhZgB3p7e8OEZYD4IT59AAEEGzKyBuVb9CEC
YsHy5csZysvLUUIH1Bq6du3aLdBACD69AAEEC4GXwHYAuHYjFqxevZph3bp1DCtWrACH2Pfv38EO
AHWQgFU0OLqEhYXZQM00fAAggGBV3DPYeA8hAEq0SkpKDKGhoWCfgywFWQ7shTLcvXuXAdjzBLeI
QVEpIiICCl1hdDMWLFiwCtirBdsNEEDwEQdgcBFsih08eBCFD2qOgTqloEYMaIwJmPjATTPkLvG2
bds2IY9sAHt/6rDhNFAAAAQQ3FWtra1biW2Qgjrvly5dAteTwP422HJQo/TBgwcYTTpgg+Y/zHIX
FxdWYGj9P3fu3H9g6LwHNYQBAgil8kEel8NneXp6OthyUF8e1H8HNddAoYGtPQlSD+3LM2ZmZoLF
Nm7c+B86XMcLEEBgmw10JazMUrYSbFiC23VQy0EhABreACa6/8BCBxz0oEEFbJ4ANmiDgXoEQOyG
1tb/VlZWIDNAvWxGgABiSSqseXiHMUju359fDEADGCQkJHAmwJUrV4LbiKDEBeyxgjodDLdv3wY3
19TV1Rm4ubkZsGXlnJycNdpa2vfAQwXAtAbsP2wEMu+AWkUAAQQSkwU1yUH4ypUrGK4HKQImJHiT
HIRBiezy5cvgJjko4b18+fI/vugDhdK/P//+VTfU/09ISACNliaCogWULgACCJQVHp+aYtQEToiz
9qK4fP/+/aBsBC5WQdkNVLiAshtoCBqU3Tg5ORmMjY3BjVZ8hdiZM2eBbQhGxhdPnv4DOrofZDSs
oQIQQOC8+OMXQw+IvvaSB16axcTEMJiYmID5oKY3KG/fvHmTAZjwwMUuyCGgQTRcloOMAeFPX34A
+4I2DKWVlUA9P38DE+oRoDS8YwkQQLCS8POhPiNfi/Rdm0H9ehUVFXjnE2QRsMvFAExkDF+/fgWX
lqAmu4KCArifAIp/XPXTm8//GW5dPs9gbW3JwAxUtGL5ik7ooOVvmBqAAEKuDXfwcLIwvH37Fm45
MHuBfQ2MY3DilJSUZIDUikxgi5EHsVC668DAffcF2Ef4/BVseU5hAYMwjyBo3ABUN7xEVgsQQMi9
jT97JjgZvHkDGc8E9e1BdfqPHz8Z9PUNGLS1QcEtBox3LnDZj2uw4hWwEfvyw1+G38B+BOsviEcE
efkYXgNzGLC/0Qn0/R9k9QABhN7duTRn/pyPIF/9/PkLWJ9zAC3WBscz1i4YUsPy0zfIAPuHb//A
vSRulh8MZ8+dY4iMjWX49/cfg6OjHYORiYU0ul6AAMKWdAP+/v23HpT4YAmQEHj05h/Dj9//wRYL
8zCBHXTs4DaG81cuM7x98YLh229mhqjEPAZpaRkGNSkWPuRhMoAAwtbhOwmKe2ZmYDwDLf8G7A98
+g7qG/wHxi2w5gPy//6HWPYOmMhuPvsL7raJAC2WFmQGdlCAXTfGbwzPgenm0YMHQHNYGGxsHRg+
M4kz3H71jyGlbGoOsmUAAYStSfbm3M3XDAIiUkAL/zF8+8nI8PM3pMMJshSMQcPGTJA+IiewCcEJ
7Dm9AAYzGzNktuHZrdMMt+7eYeAA9qKffGBmEPinx3DkNNDRTH8Yfoh4tAHzVjvMMoAAwhYCv6/f
f/Xv6XtgKgam5j/AugTUMQZZyMSImKwAWfQdmJnefQM1Jv6D50zuAH14/fFnBhU1VYY3r18y8PHx
M3zms2F4/EUEaDmk06ogKw4q3OAeBwggrI3SnprEqgnLz3aAesCgXi8fEIPLGuiEDIyJngVBFZ+l
jgLDbWCZIcgrwLDj4l8GbSdDBi52JgZ3/f8M74FZ/O2rZ7C2IrhHBRBAWB1w89rlAwrC0PAGdXlY
GRmE+BjBQQ0S+v7zP8MvoO+/AtPDDyAN6jPyczEyHLryHjyC9ub1awZhUQkGHVZRBnOJ2wzt5Zbb
Jj55AuqYngXlNOSSECCAcBXgou8/fnn16RcneGxAQpAJHBKgIASNmoMGgD8AE+QXYBR9A6aPP7//
MGw69prh8e1zDOZCFxiAjRSGkJCQbaD5JKilr9HzPwwABBAuBzBdu3n/LwuvLDCOgTng639wnP+D
TFcC8Q+Gv19fMnx5/5yhu386w9kDK0CWzAE269k3bdo0wc7ODlTkggai7mIbH0YGAAGEq2Py7/jl
J98klKW5+Dj+MvAxfWJ4+/opw707VxnaJq1g4BRUYOCT1GWQF3z9G2i5JdSXjOvXr/8HtXwZMZaD
AEAA4esIRLu7e+bu3Ln9JJB9xSh2+SwOPikG2AQHsPIKh3bDwRULsGiWB9aeB48dOxYH5B4FZRRi
un0AAYTPAWxQ+Z9Qvg2w0XIYaDGo6gb58g2aen0gVgXiXaCSmdjuOUAAkdIVAqlVBjWlcMhLgio0
qMP+E+sAgACi2nwBLQGoRw7se7gCO7uJwHZnBLBNyobcpqAEAAQQy0B6DNjkUAR6KAnYvIgFpWFQ
EwM0tgEackBu5SH3eUHNlNOnT98GBgpovPMXpW4ACCAWWsQWsPUYB/RIPNBjjjBPgVqShAZ7iQGg
1omysrK8lpaWJpB7kVLzAAKI6CwA9IAlECcBPRMDxBwgj4EwrgEiagDQnHdRURHD4sWLGbq7uxlK
Skrgcvfv3weNEaA0rcgBAAEEDwBQzC1cuNDO39//AB8fHwO5QzUUZgmG3t5ehoqKCnCyB3UPQHMT
2ABoQGTt2rU9sbGxZcTUN7gAQACxII26/AcGwndQgIACgB4A5MEHwDbrt2/fGC5cuMCQl5cHbkb8
g89aI8oAkBhoCAuEQWxQdrK1tQUlCVA38xm5bgAIIPRMeX/Xrl0HQQ6iNgD1Ljdu3Ahf2hQVFQVO
xvr6+iCPMOTm5oI9eunSJUgHDehR0Fjb8+fPwaMP165dA9MgPkgclFrExMRAXeRjwIhjJdddAAGE
UgYADQL1f1yBsbJdTk6OKtkAlH+zs7PBMY0rOYNiFIRBngIFFMiDoNQBKgNAM+CgIRfQcAxIP6hX
DCp7YAUqaDjHxsbGAJgdLuIrmC0tLa+tXLlSA2Tew4cP/8bFxXE9efLkH0AAYRSCQMWKBw8ePG9h
YcGPb5qeGIBtZRhsNh00/gByfG1tLcPSpUvBMd7f389gaGgIlgOpA2VF0HAAqFMMWo6Eq3967949
UM2AtUD08vLiAeK7QHvEQOtjgCmcAeh50Ey/FjDQHwIEEDbzuCQlJVNB403UBKCRPNDYZEZGxn9g
coePc7W0tPwHDc6C1iEBYwS8aAlkN2jgFbT+CNuQIzoAqQOmtG5YioZGKouTk9NP0FgodNnR/zlz
5vzfsWPHf2Dq6QOldCAWAQggbM1NXv9Q/9OggTpcq6tIBaAx1Pz8/P8bNmyAexxkPmjFJmzBJciB
oOFR0BQ4aMUWSA/IYyB5YsZtQdPpoKk0qOfZHBwcnoNGob/+/P5/2owZ/1tbW/8fPXoUZn8CA2Rp
HStAADFCPS0UXTbt3uM/FuDi/8+PTwzavNcYeqqiKa4ROjo6wENtoDF9cHe7p4ehsLAQnMRBox+g
/A5aeAIa+wMlfVAyB+VzUHIF2Q0agCSmrQHKVsCa5AGwR6QBbKeI37x585S8vLz49bt3GKrLKxiE
geYBszaoIAWtGQCtKboIDKz3AAEEMhlUglrCPA9OOxy8DCfvsYCn7EFTb8QWhiALlixZAsqP4NId
BCorK1GW9IAKO1DeB40zg0p0EBvkeJA9oPwuLi4OXoUDaj0SMyaF3EJUVFRUAJZhFgcOHlwtBiw4
rty6yVBXVc1gaW7+e+bMmX/v3r3bC+0qgpZ1fgTpAwggRqT2gI1D0en9/xgglv78/JIhy/kPQ5i/
C96JM1DVBmrmIk2OMVhbWzP4+vqCqylQTIPqeGDeZ5CWlmZ49uwZeGAdFLigwACV7KAaB7QaGDTo
CjKLnNoHZA9oDJWNg51BSECQ4cLVqwz1wALWztr61+zZs/8CU0QtdLIe5Pn3oNVKIH0AAcSI1iYw
DClZfOLVP22Wf39/Mby7e4hh98xo+FJlGAAtS9q5cydDQkICQ1JSEsPcuXMxqjVQqQ6q0kDJHJS0
QUkd5GlQAIDm0UClOmh0GTTKDKriQDFOnsch9j14cB8YgIJAs4QYTl04z9Bc38BgbWnxa+HCRb9u
3LhRCvU8qCv9GbnlCBBAjFgKQZXo9MwDj7lTpb69vccwr1gNPEkAyoegUAbFKmhcHjR5gJ4HQR4F
5WVQsgZNEILYoCYrKOmD5EGBAqveQLEOzKPgFIArqROaFgbJv//yl+E2MKmrK0sByw0BhqOnTjK0
tbQymJub/dm6ecvXUydPlgGVnoZ6/gt6sxkggHAFuZStrfb0f/oz/ER/n2GY1x4PLpSAfQWG+Ph4
lGQHimVQIQZqtIBiGDSHAAKgGAU1YEAxDcpCIE+CYhjUgIHI8eCt23EtDQItGP/4DTRI9h/o+X8M
j+9fY7AxVgWaxcmw/8gRhq72dgYfbx+GbVu3MWzbtiULmudB81NfsfUZAAIIX5oDNdviDCLm969s
tGJQVVVFSaIgj4Nmd0GFGSjGQYEBKshAMcrLCym9YV1gSlqUIK0/gb3+Lz//M4DWp3798R+ezR7e
vshgZ64N9vzOffsYJgA7UmGh4cDGzg4GNQ19hlUrFmfcuH51KS7PgwBAABFyGTdotqp76vIZWQl+
DLDF4aA5E5CnQRjkEJDHQSU3SJ4a3WOQp0EDvp+BMf3l5z8wm4kRkez//vvL8PzueQZBXlaGA0eP
APM+L8OqlasZEmPjGLZs3sygq2/IYGRmy8DPx8NgYaIjBKrucNkFEEDERA1oPX7Z06fPakEzVKCY
BuVpUOEGHY2k2mDHT6BHQTMhn779g+yLgI3GM0JWwoGG6n//Bub5GxeAofCDYdf+feAIuHDmLIOn
pwfDWSCtpaPHYGRqzSAjr8bwl4GN4cal4/uC/ZxdYaU+OgAIIGKiC7SbYQ0wf9eCCkBQnoUNhmAL
TZiDiVmKBFL3DZi8P4Cm84Aeh818gD3MCfEwaECcA9hS4WJnZPj2/Q/DjZvnGVgY/zFs2buH4dfv
XwwXz55jcHJwZLh46QaDpJIeg4qOLYOEHNDzzFwMX4Fm/+RRd4LORTzC5gaAACI2c/L7+fnX9U+Z
W8TOLcjw4w+ou/of4mFGREiCVheCkuq//4jQ+AddrffnH2Q66ecfyLJDYIUAXob4H+pvUALi4WAE
eg6Y74CeZwZng/8MXGyM4MV77z7/YTh/9igDO8tfhv2HD4Gr1XvA7rGRgSHDk2cvGIRkdBgUtKwY
FJWUGV594WC49OgfUA/QTqb/DNy/b3+fmGcgCEwFP9E9BhBApJROVnM3Xz0qLq0CXiXIiJQn/xNZ
bRGq0hiQZp1AAQlis4Irib8MX5+cAmaDfwyHjh4GN3hePX/BoKWpxXDl9nMGRkEDBhZxCwZeEQUG
VnYuFHMFuBgZXr37yHB6frD1mmVzjqHbCxBApJRYd989vnRDSFRagxVY2KE7GBTTyEkfFOuw/QxM
0Lk1ZmhJxsQEmnAEBiJUzz/QfA+QAZoLBPFBMQ5SCp4P+veHYd/ayQyeThYMf5mYwY2mA3v3MTjY
2TOsP/yYQVDWhEFc0pyBT0SRgZmVA6wXZIacMCODtjwzMACAfY5ffAy2SvOOamqqg1IBysIkgAAi
JQWAAstj54n7m+VlpRkYgWkWFDl//6PGPCz1w1begqZsQetLQROczEAT/v+DrEVlBq3EBQYEKIBA
+kFiIDlQVmAH5oPfQPY3YPE/d/kOhsnNmQx1dXVg80FtClC12j5nP4OIgjmDsLwZg7aaNIOGFNCQ
318Yvnz9zPDtyycG1l8vv+/duvzaxg3rQas+QbUAaHpwKzAAUMoCgAAiJQWAZl1u/Pr8+g8zgySL
IBczMK8ygh0LCvHfwJAANVJAc9pfvjFA5rGheZyXG6IOtCKNm50RkufBSZwRHDi//oECCVHPP3r3
n+HOiz8MB84+Z1hbG85QX18H7iGCWqCgwY/mrml/OX99eHp40XzwXOcGYOucAbJi9DFopQJ0dgg0
PQVqlf3CN2gKEECkVtrPw/0ds05cuDdLUkiUgYMFMl0MSqqgCdsvPyHzp8CaiuEbkA2a0P/5F5Sk
/4HV/IUmedA8J0iM4T+wifz7BzDGvjB8+/aJ4dzNTwzs/94zPDm/+sW61YtBnvsI7G+EgjwOnnzT
198DigRo6w7UvH1M6eQIQACRGgBfv337eu78rZcMz77xAz3EBE7qoOQNbqBAS3w2pn/A0vwPA/P/
XwzMwGT5++snhi/AfsGfH58ZDhw6yrB+x3Fgl5uPgZVTgIGVS5iB+/3Gu5cu3JwH9RhoNQson7J2
dnbeBdXzISEhi4HtjtfQadhz0Jj+RslwOAwABBA5zbYH146t26wYmuoL9D445j59+szw68fn/6eO
7Hy6ceMG4R9cepwsQM+BVlqz80owsHELAz0rCB5nYGb1YNDx8WDY32cFmmXdDp2yfwdJF4iZ2ebm
5sfAzpNQbW1tFTAQQJ6/AO3QgJL2P2rNaQIEECOZekBp0hwaU8+hee49NM8JGBrId/E6rY9Cnbo9
9mtlX04xkAlarXWbAfsKcGjjkq0F2NfwgY75g8y/Ah37/4U0j0GVAAAIIFpN/4A6UmG+gS6pm9fv
6YT2xZ/CJvMJuIcX2nKTgMb2I2gKQVljQa0AAAgg+s9/EZctWaBu+w1uBWHZQkKtAAAIoAHfPzDQ
ACCAqLZ/gZYA2PsUAbYDkoGNoOi/f/+elJWVTaNGDQACAAHEMtg8C+xtGgMLQdCiiRggzQcaPUKe
hn/06JEukCqE9lIpBgABNGABAJqvu3v3biDQcwlAT/rCutggjG+YDDTSxABZ8U6VAAAIILoEwMeP
H/nExMSSgZ6LA8asAciToAFUEE3qcBmoSbx48eIcYACWUSMbAAQQ1QOAiYlJFxijoJUksUAsDPIk
aGaIWitJQJ0hU1PTPCAT1Dv6Tql5AAHEQkkSvnPnjjfQk6AkHAwaFoPlV1JGeokFt27dAk+ogNbw
8/Lygoaj+KkRAAABxEJkEmYXFRVNAXouHpgHTSlJwqQA0CYl0KoRkB2ghROwgxdAg7DA7rEDkLmC
UjsAAgjbIil1oCfBSRjoQUlYrJKyu4oSAJo8aWxsZGhpaQHzQVsI0GemQJMs58+ffwjMCqqUrhcE
CCAWpCTNAUxi30GW0XLlFz6wY8cOhm3btjFMnjwZfMzSvHnzcFWVoBkqeSATNKH7lhI7AQII2aeg
c2zuiYuLKw1EAGzZsoVh1apV4CU1oO0xoAlW7GOHkAVToDkIYKTJUhoAAAGE7NPfu3fvPmphYaEE
Svb0SOqgPX+g6TRQeQKaaAWtCwJNnoLmH5ABbP0QSA+IBgFQluzs7GwGBkIgrsXoxACAAEJeJwjy
tT/QASvRHUBNcOLECXCBBtqLtGfPHlBehssdP34cvBcVOa8jzzCDAgB2lgGoOgTNUqurq/MD/fCJ
XPcABBByCgD1tV+AJjexLW6iFID6HKBtp9XV1eApcdBiSBCGrRwDeRQ22QKKcdDsE8gtoKl0EA3b
5gqbbAWxoatXBKHdZrIAQAAxIXUvQa2qt6CQhiUzagHQMlfQFDtkp9l/8PrA9PR0lGVzMM+D7AZN
qYNmm0H79EAYNgcJagaDWoKgQACpBwUAtFVIMLaAhWrUokWLDgL9aY4sDhBA6BN6H65evXoLFPrU
AqB1AKB1AiCPg2IcW6EGS+ogj4PUgiY+QB4H0aDkD/KwsLAwGIM8DZtuBwUItFWId+93dHS0eEBA
wJKmpiY7QUHBE8AI2QvN9kwAAYRe3H8BFoR73N3d1aCdDoqAoqIieAceNgAr1JDzOCibwM4mAk19
gWIZ5GnQuiHQ8hlQ4Yw8CQvKpoRahaBNUubm5mdiY2MZQakIlJ02bNjgAKxiQWXeL4AAQk8B3+fO
nbsXtsiBEgDyFCipowOQI0ByoJjetGkTfAuqm5sbOGa/gIfIv4E9C5qMBRXIoKwCksM2Aw0KsNLS
UmtczfWqqqrslJQUGVD75syZM+B5BWCqew1tQzABBBC2ITJr0M5wYtbmkQpAawFBB/2ATjg0NDSE
rxlcunTpf2ANAN4MDtoUfvr0afDiSdCCSdAebXxuAZkJVP8AVDOiHebH6Ojo+AHoafiud9CJksAs
AFooCVrmAtrfww4QQOhZAFwQghYoApMoIzUbRKDkfvToUXBDB9S8BVV/oGwG2g8Mil3kTRWwKXhY
aY+vRsLWKgTFPDD7cTo5OfGDVp/DaiGQWba2tn+AKQ+U/ME9NoAAwubDj2fPnr2sqqqqR60AAOX3
9vZ2sOdA+5FB63VBHZu9e/eCCzjYNhnYchpYNUxsVQzKBlpaWjJIrUIWYEx/Bq1MBwUkyP5jx46B
p9aAnmeBDuGDlsT/BQggJiztAuH1R1/pggoiaoH6+npwPgYGLNjzoCoRdAoYKO+DCjdQSgB5HpRK
QDUBqAyANXyIGbQFLcbq6ekBtQqZQbEvIiIiYGVlxQQKmMvA7NDX1wcuT0CbMYABAjpXCzR/CFpY
/RcggFiQ8j5/aknt6Rt//FTu3z8CdgRoZRilDSLQSnBQSQ7y2Pr168GrQUHJH5a6QJ4EeRbmaVBs
geRAKQPkMWJSISiWgS1CXwbI0To/gO0GXmDT+j+wgGW8AWxunwQWfpcvXwY3xIDgALTh9BPU9gEI
IJDp7EH+phFvlGcuuAk6ahQ0t8clzECNmgA2fg/q2Hh5eYH5oPodFKiw2AbVBqDVZqDSH9bYAaUK
UN6GLa0jNhtAW4VvJ06cuFNbWxs01cYMOnOEA5j3QRsugI2obUB73kHHE8GNHYAAAmUBaZDnUfIB
Bx94Cz65LULQ5ghQzIMKPNCZlTDPg8RBHgJVg6DWHSgwQNUSqK0AW2oHinVQYweUAkhZfAVqIQJb
ernAVMsbHh7OCyxAmUELpa8A+x3A5P8PtBwf6HnQ/CJoiu0jdMKFASCAQKb/lf4w4S5KALDzMGw/
fBW+EozYer+hoQG8Ehy0aBpU74I6PKBDtkAAtBECtDQW1swFDW+B2gmgDg2oMQTyAKjBA8qroBQA
a+2RMlYITPJ5EydOOAEMBNAJUwwrli1nEOTn//fu3bs/wOy1kgFywM8TBshyWfBkLEAAgQLg+fJ5
i3JR2sesHAz7zr5kILYgBK0g3b59O7hkB7YkwV1bkMeBTU+4GlAfH2QeqMsLqglAGLScFhTDoGUv
MjIy4PIBFPsUDLWxOTu78gMDjvEysNq7dOECg6aGxl9gO+EfMOBBjQNQRIMaQfCYBQggFmgv8Pyz
DS6zpAL2pIEDgJmd4c03SKsMVHrjcgwoX4GWvIPorq4usBjIE5mZmaATVcHJGORh0EgPKDZh/X1Q
OwBkNii/g2Ic5HlQjQDikzugCuq/fPzwkUFdQ1UAHPvAvC8iJPQPWPiBlsnPYoCsOwC1yz8hrxkE
CCBYEfv6zr13C1UY36d9+w8s+ZmYGVi5BOEnJWELAFCMgo75ADYzwckX1MgBVj1YOzqgAAGtJwZ1
bkBJHtTFBeVvUMyDltCDaEo8D1sqr6yizMDMxMpw7vIlcOzb29r8mT17zl9g9gR5/h567IMAQADB
ShhQgXBtR69zFkyClYOf4cWrd+DSGh2A8jiwcwH2PGgrG8hDyJ6HrRwHeRoU6yBPg2IelFpAYqAA
BVWxoHY+yPOkLppG9Tyk/Ll29TqDsAjkAKmVK1YyiAMLvqtXroEKvwnQvA9aZ4CxZhgggJCLWFDJ
eEjm/x5w/cfMxg0sCG+ACyhkADpHy8HBgcHPzw8c8qBJD2SPg1INqFoDeRZ24hBoTB9U2oOGv0Dm
gTwMSvIgz4NKfWI9Dzsi5z/SyrT///8xXL/1kMHU3BQY+8wMJ4CNravAOl9ZSfnP8RPHvgMjELSc
5inUfxixCRBALGj9gPvL+ssS7YrOrQKtudt//jVDMbCBAkriIABqURUXF4M9BurnIydB2AgOyPOg
wg1UrYH4oNhB3vEJ8jyoXAF5HlR342vo4GsEgpfZAhPzu4/ArvOHVwyCupA5g1WrVgIbRSpANz74
//DhoxkMkPVEoKSPdU0RQACh2w5qH58SfFh56p1cq9kPBl5wCw12YhrI8+hNU1CsgzwJ2zMASvKg
LAEbVIHtG4DV56DqChT7sH1BsPKFlGUKoMWQn779B680u3HrDoO5oQHYnIPA9v6N6zcYXBwdGRYt
XPgVmA3vI8U+1kYNQABhC/5nG9furLIrat8DWtgEC4CAgACsngfJg2IctG8AFPuwsT1QXQ7yOKhw
Q97pCRvXA22YAJ2LRIrHQWEFOlEEVCyBzlB59/E7gxDXX1grkGHdmjUMOsB+xof3HxiuXr26HFrn
g2L/O66JVIAAwnqUDRBfvrnYsg/UInzw+BU4NkE9N3TPg5I4bPcXyPOwOh20FQa0cww0IgTZECUN
LPCkwBh0JhPoiBtS6nrwThGgq95+hhwm9OYr6Iyfvwyvnt6GT5ftPngAWN7cAtqpwHDrzi2GiKhY
FwbIAspPuGIfBAACCFc78+3L1z9XiLC//rfj6C2MghCUl0GFHSjmQUkeVOKDyglQlQYa8QUdmwTy
KD+/ADimQakAlPRh/X70EyLwl3yQWActp38P9DhoPwFI56+fPxhkRDmA9nKDlW1Yt57B2MgE2MZ4
wSAkLMVgaGyuDh0m+4FvGh0ggHAFACjEbm6bmph18OI7BuTd5LB9QqB8Dsr3oMAQEBAEexoUw4KC
QuCkT+kmCvDxLX8g55KB8vubL5DVprDU9+zBTQZFYGCDwLbdu8C1jaqyCrBtcoNBQU6KQUddEWYM
3g4NQADhcyGov3xY7P+5j7ByAGL5P/BpX9+//wB6kBnsYVC7H1SwgVIBufv+0D3/Hhjm7z//Y3gH
Su7Qw7pALgDvRfj7k4HpzzuGNevXMazZuJFhy6bNDJampuC+B6htISElx8AvJscwceLkIEJ2AQQQ
E/7Ex3Bvz+bV9aA9Qoj6F3LSGMijoJIc1JSFleiUbpuB7QoDndX0DohBB4jBAgTkcdDaZEHO/wwf
XtwB2ivOcA/Yuzx//hywN/kQmPUUge2N2wzSMrIMmmoKDGqKUgzWdk7VDJDzgHACgAAiNNoA6g2d
BvbZ/wGTHRMs78ImJUB8ap0jAj4DEJTXf/4H7xBjZEDsPwB5nIcTkqq+ASv/r5/fMVy78YiBA1i2
3Lt7l0FPW5fhDpCWkBRnkJFVYmDlFmf4+IMDWKfzgsb2QX2Dl7jsBQggYqLs/smTJ27Beoaw4/xA
+RxUqFHqefAOMWAZ+/LTf4ZXwFj/CvU8eA8B0OMivEwM3KCTvEBVKPM/hif3L4PPtr4PbIx9/gRs
agNbl5Cjyd8x8AnLMnAKyDL8ZeEHNvmYGPh4eBk8fYK08dkPEEDEBMCn/Pz86aBSHzYZARutwZXk
GdFoXB4HD0EDC7kXH4DJHZjX/0G334D2FIE8DtpDBB7t4WIEn5cGKndAVfI9YJJnBAbC08dPGJSA
SR80qvSHkZNBXFoR2BsVB3qcA1hjABsBn9kZotPK5iGfIIcOAAKImOgD+fwmrCBkgm57+Qvd3QGq
n3/9hZQN4H4T1GNMDBB5kEdBu0ZA2yQgZ7RDxL8CExSogANtkGCEbuIHJSbQ9jiYGlDMgzZn/QTW
/Rxs/xju3boMNucBMPa/f//G8ArYyVIwNWN4+OQlg6qaJugweYb3f/gYPr1hAqaW/wxvvzAzHLsn
CBoyB7XlsZ7hCxBAxAQAKPW9ffv+CwMb31+G33+ZgB7+D94OA9shBk4I/xHbZUD7BpihGynAaqDi
0L0S4MBhYERsjgKZwcsJ2jEG2VnCycYAPikRZAc7KyPDb2DA3n/2DVzt3n94H7xd7svnTwyyMjIM
n4DVMTsnL8NvLjmGrwzAzhUjO8P5+8DAevGPQYyfkeHHf3A/RhRXAAAEELEZ+M2TVx++cQn/5mJi
YQNvZwPV0d9/gUptSEkN2hYHa9b+Q/IYOCCQdoGBNleAT2UESjAz/gemCkbwJguQmSAZkOf/ADWA
jg/8/x+y2eoHMIk9un0B3Oh5/voVw3dganz98jV4y9yFG48ZZJR1GfiE5BhOPeIBFn7/wZuyQJEC
akCxsnMzVE7YMQWYDTyxbZ4ECCBiA+Dz0SOHD//h1XTn5mFBKaFh9SUjUqZnxkHDCjdwfQ4MOFCb
G+Tx/9CtNKAN0UyMsJ0nEFNBSfnPr+8MrAy/GK5cv8vw9dtX8F0+wP4+OEX8ZRFkePhNjuHtMyEG
Lj52BuSeNShSmJhZGR59l3GDDpljrC4FCCBiK+5v3S2VS379/A452hLN88g0uTUBAyx1MEK23cBM
/fkb2Od4fpHByMiYQUpWhuHLx88Mn4D9DkkJKYaDl94xvPkry8DGK8PAzsUL7lyhjx0oSTAzaCmA
F1IIYLMbIICITQGgOvDZD2Do//uHKAj/w/sGiC2zIDbIIyAOEzCJs4L2D4JikhGRD0AeBJcTwFj+
9x+0fxBC/2eA7CeEtDoh+wh/AmP/9+dnDEdPfAGP+LwGdr6kxCWA3fPPDP/YgP0NUXkGDl4RBmYW
drCHQQWyALD2V5dhYlARhxROHz9yMJRV1oFOjcG4wAgggIgNAJCL3v0COgYUAP8ZIHe2gU5VZQWm
bw5WUOEF3SzJAKEhSZ0RvgEStDHyP9QkRkbYRktGaEqCjQkwQrMSRBzY+mIwt5QDjzBPmTKFYc++
fcBW3wMGG0trhgVbrjAIyZkw8AjJMDCy8ALtYGJQFmNk0JAB1h5sv4F9lW8Mz55+Ae8h/A10t6GJ
RRKwHFiKvssMIIBIacW85f7/7r2UwB9BLtDmRQZEgYdcXcCqSPDuT1BMQ/cQM0PTOQsrKFVAPAra
LAkrI0ClP6g8ZWOGlAkgvadvfQMPxIDmC0BrB0FrCDrbOhhWrFjJwM4rycAhoMDAySvAYKr8l0GY
8w2wlfiF4cndjwys/78xPL5x7OmCOdMuALvqL6HjgfuxNU0AAoiUAPhyeM/6rRra+jHAJiB0Hz8D
tIUGSQnswBTBww6p70H7i1mQCiRQbQHaHg9K6qCzekF7C0E0qBAE6f3C8B+8hZaXiwl8aPCdFz8Z
7jz4AG5xgqa4QR0t0PgC6Hqlg1d/MEgr8TIYyf1iEBG4B+z1v/uxevuq6+vWrgbt/ngHHQR5Bh0Q
gW2kfMuAtOkKBgACiJRuG3gZ3ZHzj1cqyUuBW2mgqgrkSVAqAHViQB4CbZz8Br0h5e8/SMBwcUCy
BUg9JysjvO0AKi/AbYr/kOYw6BDblx//MTz98B9YwH0BtvNvMTzfUwjOAqBRaNC9evWds4F5/O+X
OzcvX3v58uVd6JDXS6hnn0E9C5v/+0FoDSFAAJGSAsDL6MR5fgA9D7l35jWw7Q5L6qCmKg/QgxIC
TPCNlKBTtsG7SH9B1IPO/PgBPRIYVEWBYh+8dfYvpCT9++83w5X7Xxku3vkMLPheMDy/vpPBztaW
wcDAADys3tPT8/Lq6ROg1SCgbXTHGSC7SGFzfQS3yWIDAAFESgCAC8L7z78x8IkAW1kCLPDqCpS0
wUNVn0FJ+h+4kQQ+9/gXxHOgghMUUKB2DbhQBLXy/oMkfjH8+Qls4X36zPD+/QeGs7c+Mwj/Pv/r
4vq512/evHFfVFTU0ts4Txw09L5ixYrXJ06cOMgA2T0K2gYP2gz9gdJNEwABRGpX7u339w8f//uj
Ivv8HSPDp++QZP/tNyQ2QdkA3A+AlvL/oS3Bf/+BFSEwdln+/2D49xtYlX7/zPDzG7Aa+/WJYee+
wwxb95xm4AU2V+7dvAHaGwyawQUdjfddR0fHH3Sjwfnz5790dnZuZoDsMgXtGb4JneKieMcIQACR
GgDfZsyas/KfiEUJOw8rOPkyQmMV3BZghJzYwMr0D9xyY/jzneHXj08MX4F1NqiEfvboHsPaLfsZ
7j5+ywDeWsspyMDHy/b/9c1ds4Gl1mGg+WegJfYPAQEB5cbGRsZp06b9XwEq9iGevgid4PxI6C4Z
YgFAAJE6dgXaNOC9aNeD9YIiUuDSn53lHwMHy18Gpr+/QPuHgc1U6F5iYCwfPX6cYe3WowxMrDzg
uUZWLiFgo0WCgZVbBDz19mZ/4pb7D94vgObnl0jjd+zd3d1P//79K1xVVdX379+/N9CkfxVawv+i
1sZJgAAiNQWAmu+v+FiAeZX1HTAZfwG3yF4Ae2QfXj/4smTBrKe33/Gpg3eFg2KYR4xBTCcQHNMs
QA8zs3ExMLNyMsh8nf96+ezp5VCP30ebsGRUUFAIAhZ8wgsWLHgD9Pwz6MzuTWgJ/4uBigAggMgZ
znnH9Ovjr80r1jyfMqnv3OfPn99CHQaKwe9WNiZ5v1VTtUCHgIM8zMTMBll3AxrYYHr+f2uPPWi9
LCi5X4OO2aPnYzbQjpXU1NR/Dx48aIZmidvQ+pyqngcBgAAiZ/gWtA7HEUrD5t3eQj0DSiGWDkWn
d8NOpYOB55vc1t6+82YBNCm/wjFcDXKPILD0f/r69esMaODcgKaAj8iBRa0sABBA5G6f54Y65gcW
j4iJi7KXq8ceB19kKP15Nr7kjg5AoQZa3qLHANmBDkpZ16EzPDTZPQ4QQORkgf8M+Hdtvnn5+udy
B6Yz2St70uoIJHdsgcsEbdR8g3r8HaHJDUoAQADRas8baPBBCuqRVyR4ABQhoJlOPqgeWAsPYySH
WikAIIBotTsKFHt3yND3F+rhn1D2b2yepyYACKAhcb/AUAdycnKijx49ej0Y3QYQQCyj0UM6AK3H
vnbtmjILC4sRKyurAZDWB2I9ZmZmGdicEQyDJtJfvXr1DKgHdO7MF2pdjEEtABBAoyUAUqSeO3dO
gIeHxwgYmYbAyDMAYm1QBKNHKjImtBAAlABAa+MuXLiw2MfHB3SI0FtcRzsOBAAIIJaRELG3bt0C
5VBDJiYmUMTqAyNVF0gLIkck7Exv0GwvNc4GhfedWVnBK7iA7ohNTExcNH/+/OMMuE+QojsACKAh
WQKAIvXSpUuyXFxcoAgF51ZQEQzEirANV6A1C8gRTOnRvpQA0AJR0EIy0GHv9vb2oEtAX1Cy2Zua
ACCABlUCePHiBbesrKwhMKJAOdYYGHF60IhlJbcIHiwAtJwSVBVs2rSpMD8/fwF0RG/AT/ACCCAW
euRWYMrXBOZAI2h9qg+NVDH03ApaV4pv0c1gBaDVkqB7aEFHzYNORgAtJAJdLADaJQUazIdt8QZV
BQ4ODqA1a6C77b8TGBWhCwAIILJKAFCkXr16VYyNjQ25waQHjGB1WKSiF8MDWQRTC4Dm60ERDTrk
ZNasWeBiHQRAuz7KysoYsrKy8N5FAlpODyoFzp8/v9TPzw90ENqbgS4FAAKIBUcEMwEdZgX0TBe0
e8MNikBYhMIaTEOpCCYXgJbdgnbAga4HAR3tA9rjCA0j8C455CtFCAHYwlJtbe1oYINwwWBoEAIE
ELYLpkDlL9CdfPLHjh07JSEhwYZvx9xwBKAtD6CDbUA5HHTQzbRp08DioP0dkyZNYkhOTh42DUKA
AMJW2YKPcv/06RPfoUOHzoACA3mR/HAFoJ0HoLr88OHD4MifPXs2+EIgUOSD1qWAdiGAGnLERD7s
jCvk01FgJ6SAxEElgbS0tFlvb28YKLMRc/YNrQBAAGFUAdDbFkFj2R/WrFmzzc7OzgpU5JO6g3mw
AVAEgLamwzYtgiIHtp0FtMIatOcXtBES+bQX0EKkAwcOoOwLxBXZyPcEwjCIj5xxYGMMoHAEhSew
BADdvQVa7AAK7x8DES4AAYS1EQhdUg46zVl33bp1U42MjBRBy9PIvQFqIABoExeo/gZFOqjeBh2b
AzqlDrSkFgZg13rBLnSD7YkE7X4D7XoH3XyJL8Jhm0BBy/ZBCQzGhsnDIh3WIIadxQhiw+5ZAzYq
lwUEBBQMVIMQIICwNgJBdRIwEYDmPF/vBQIgO4Uep2ZSCkCbVUFHkoAiG3RoBbCnAt6Z6+PjwxAT
EwNeYAra0kRO6x+2+R1WpIPCA4ZBRTss4kEAFOGgiIZFOPJBnKDIh+02Apmpo6MTBWwQzqd2g3Dt
2rWcoqKi7MCq5hOwe41z6BkggHB2A6EnCkoDsdHRo0cXKCgocIP2gVJ68ygtAOhIBlCdDToXBrRV
s6CgAHwIJ7nXQ8KKbVgRDsvZoMgG9fFBbQFQuwjEhm0DRz7uCdTnh20PhR0JBetBwbrDsKNjqN0g
BCb2TBMTkwZgqSICtBvUXf8PLOm+ysvL7wOGU0p8fPwbpJ7eP4AAwjcQBFqAA1rF8BLYGzgOTE0u
oAClxk5IagFQfzw8PJwhKCgIfOwsMNWTnKtx1eEwNqx4hx1uBDvgCLYdHnacJag9AcPIEY9vDASU
KEDVDzCCzCZOnBgCGiEERgzJI4TQRiSzoaHhNFNT04S0tDRW0FlFoARpbm7OePnyZV5glegPbNR7
Aruuuc+fP58DLf1/AQQQzgSA1Bh8O2/evA3u7u4uIM9TY28gpQCU86ZPnw66bIXo3glyRMOKcljO
BuVoUOMPVH2ADlkDHfEFakMA++vgfj6ozw/SB0oIsLOUQIEL6haCIhA0wgeikTeDEzvwRekIIbTb
zqaurj4TiKOAPRcW0PEloMQHWk8Nqg5B7gFWMaBRSmZgKQ6KPNCarp+ghAMQQIRcCFukpbts2bJ+
S0tLLVBjEHa501AYuUPvjoGOKwIN6IAwMEdgRAao8RcREQGOQFgVgNyaB+Vo2CEYoEiHnQkBuxSX
nBFP2AghMLIWBAYGgq5DfUtMKVBYWOgKtHtzZGQkE7AtwSIpKckIa+OAGr6ghAyK/BkzZoCOdfoH
jLsZwEQB2iUEWqYFag1/AwggQlkZtkTpNbD+2KOvr68FKv4Ge28AuZUOammDLgIGHfQDO10I1LVD
BqCtF6BhXNg5+MjTwrD6GnY7Kqw1D8LI9TslQ92wEUI9Pb0EYNtlKTC3HoN2DXEW90B1icASaFZU
VBT4wCLkk59BpRdoAAvU4wF1e4Hs/8CcvwMY+aCl5YLQOAUfJQEQQMRuFH6/aNGi/cAGRAzQoUKw
Ix8Ha44H5XSge8FHRoIOMwDlVFBggA43gwFQ9w90tT3oiEnk1jm2xho6Rk4c1JjjAOmHHZYILIFa
gQkgGBjRv9AbhNDinhWY42uA3dUaUMIFlsrgHWQwAGpUgo7wApUooAEs2CAUMOH/grbrQBjUgAGZ
/Q8ggIhJAH+hgxTf9+7ddUdGRsYMdizOYCoFQJEPyu2gIzJBI3pKSkoMLi4u4HvPQUd2woCWlha4
iwg63AU54tFzPKzIhyUqbA1HWDVBjURAqEEIOhoUSLED3T8Z2F5JAs2cnjt/Dlx9gA6rAw1agfwD
mo0ERT5oTxVo/gJ0zzuwRAB5AHTkD2j1PmiZ8WdonP4FCCAWPEPEoP4eb3SUY9J3ibjGx+/52bdc
uckQ/AXSBQLVewPdGEQexgWd3Adq9IC2E4HugwYV+6BzHGEAJgY6zgi5yIZFNnp7AUbDqhPIMQmI
kTzkrh1swIfSRICrQQiLfNCwMTBidUE7Bt3c3cEnBcDAnIXzGXZs28HwH+hufz8/8FZK0IWcoHMj
GSB3z7yGRv5HaPUCKmH+AQQQC1qDD3QcOV96VlTPE664yM//xBgfg5M+sJ/L/h6YJIQYdh+7wRDq
xQ926GCYDQSdXgg6px6UIzw9PcFFPbDHApcHRdTmzZvBp5gin0qMbSQPNl4P4sOuhocV0bC6H1T1
wc7GofbQOMhMUCkgJCQksnr16srQ0NBKYOSDimvQriye6OjoBGAjUR7ov19AdfABmWWrVzKcPH6S
4Sewd8QDrEZAp7aCzssFRj6oNzEJ2p0HRfwXaKICVQPgYg0ggFighvNlFOVvvc0YZv77Pyd4LwbK
anRGoGdZOMA7nvaeeczg4/gd3BWDtXwHCoBOZATduGJrawsu0svLyxnOnTsHlwctygDleuSiHHly
BuQHGIaN6sFyP6zBB/Ij7MZ45HO/kBt/1FzrAGsQAtsmKcB23kpgewB0KSMjsHuXAaz704ENcR7U
yF/NsH/vfnDjVglYDYAOcAMd0yssLPwNKDaXAbK95hU093+CFf2wqgUggECuBu3ikOHnZfA0Sz00
4ScDD/Y69t9fhl9f3zB8fX2LoTpWjcHKRAvc+KB1WwCUw0ErbUBHboP666ChXdAxvLBxfFBktbW1
YbTsZ86cyZCamgofwkUfyYNhUHUGy/WwuhgUwbBGGQzDunrIgzu0WrkEGyEERtIpR0fH6IKCgqT4
uPhYdQ01EU5OLvilEUtWrmQ4CAybN69egaq+v3///Pm7b98+UAJ4B9S/mgGycRx0bCjo0FjQLjvY
2el/YAkAIIBge5FAFzEoA7FVQMma/nf/lLD67M/Pzwzf3z9k0JP4xFCR6gzueoBSK7USACiiQBfK
gCIblENBkQ27OQPWvQEdQwqKbNDp+/hGCIODg+E3dIAiHnYvC2jmDzThA+LDjiyHHYIFq4NhGDni
kXM7PQDIraA2zbp168+7u7tJamioCXBxcXPAjnpYtGI5OPLfv3nLoK+n9xcEDh8+/P/OnTvPnjx5
soEBcnrAIyh+AY3878iRDwIAAcQCLexBRwmBtiD+2tATkhhd2Dv9MaMjF0YdxcIO3uh66tZzhvcf
P4NP6oKd9E0uAKV0UGSCIgY0IwdrdIFGrkCRTSoA3UYGGhqG3U4CimyQ2aC2AYgGBSzsOFdQZMKG
cUERDvIPKMFRY3CH0owAsvPmzVsMAUGBhlISEr+Bkc8Ki/yFy5YwHNh/kOHDu7cMBnr6f4DhBY58
YO/gLjDyQXUeaFMx7MzcV9D4/Y5c9MMAQAAxorFBZ8yBLunSi80unfaQPVIErSJg+P39A8O3t/cZ
fI2ZGeKDbMClACgRkBJAoFwJGnYF5UJQZIOOogZFHHK/GHTlA2gWz8PDA9yyB/XniRnbB0UsKIJB
RT0o0kELWkAYFPmg4h5U1IPMh43mgSIcNHoGinzki4oGIuKR7xXaC8zdoKFoSdDIKycHA/QQHYZ5
wAbeoQP7GT4Au7z6unp/gHr+AqtIUORffvr0KSzynzIgbr3+jF7vIwOAAMLmO1B5CxpW0guNDmx9
KV6Lcubevz/AxtLHpwxsP+4xzKzxArcDQDmXmC4hqH8KGmsHRcSaNWsY5syZAxYH5T7QGeOgG6ZA
/XdiBnuQu23I3TdY5INyPigBgDAo14NKA5A62A0VsIgHYViuJ3UcnxaR/+3bd4YdO7YzGBgaAds4
kgzcnJDMBTqAYu7ChQwHgdXfZ2CC1tXR+QN0Jyjn/wOG6Vlg428zNOKfMiC2FYNa/T/xHSgBEEDY
Yu0H1JDfq5euL7K1vZXNaLrYD55imIG5g52X4dNnbobj5+8wuNnxEuwSglqooBwPKupB9yyAgLq6
OngaF7nLhmtYFxY4sEiGdduQaeSLqWAte9iULayBB8rZoFwOyu2g9gUo8kFuh1VjtO7W4pq3giRm
0FHM34Fd1k0MVpaWDBKgkpWDE9xCA3Xk5wJ7O4cPHQAdl/bf0FD/79cv3/+ePHXiH7CRfOzli5c7
oXH2BGmw5wu0r493azZAALHgmQoGpaLfhw9f7VN56vJcLnBb+p//bOAz2ZhZucAHX+w5+YjBxkQd
5xWroGIX1E0DXWUCatiBWu2gC7RA3TNiFl8gT8fC6nQYBkUsSAw5ASAP4sDEQGbBJnBgK39gkQ/i
w7p4lDbuSF0yCTtg8Nfv/wxff/xl+PDpK8Pxg9sYbGxswYcBgop9kPyP/38ZFsybD2z4HmT4A/Sz
vr7e35/ff4HOz/h/7+69Y69fvd6JVN/DGnuwyCe4BxEggPCV23+gdciVO/feLbjTa/HAu2Rr2+d/
kozM4MYgH8OVR88ZHj97Db/2BXmxyNatW8F1OAjExcWBZ97wzR8gT+Ag52RYLobNw8O6bLAEAksw
yO0H5BE62KgdyH2g3I5c38OmtglFPrXWwzJCT46DHbf5+88/8JF4X4F+O3pgK4M1MOeLiokwwHp6
34D+nD8fmPMPHgKfKWhhYQlqzzCev3D+77lz53a+efPmKDDMnqDl/K/ERj4IAAQQMbOBoBQFGhv6
sbXH+1VYybwZLxj0WZnZeRjYuIUYth+5xaAoJwmOGNhiEdDRdqALkUGrdEDXbBAzXQuLdNiiC1gf
Hbb4AjYsCzu2F3ZgM6ENKOgDOrArOZHre1oteIY5A2T+j98Q/A96NvDPv/+A/vvD8PHLV4ZH148y
uLm6Aksnfngm+gRM9AsXLgBHPsggcxMTcHg9e/6c6ezZsweA7alj0JwPi3xYzv9Nyu5jgAAi9tRc
0AgS6MSLX6t6khKj8xqmPWRx42PhEGA4eOEWQ0IApM6FzQ+A7kjEt1ADFvGw4h15cAaW40HisEkY
WA6GDcXCpmOREwGs/sbMzYjZO8yJH9pEPiP0pEDQ6X/gIxL/wU4U/A8+Dfj3b9BheX8YfoCOPr53
msHezoaBm4cbHvnvgI3WRYsWMhw5dJiBCajRzNKMgYWZheHalcsM33/8YRQRFfsKTABvGBAnBcJy
/m9St54DBBAjiWpB3UTQ2S96EQkx3Q/Y45S+vr3LEO8kwBDkYQ7uEYASAa6GFPJwLKyIB0U67BJN
kBhs/B2Wa2Fr7GBj8LBIhyysZMKYxcM35YpMUz3SGSCRDiraYQcBgxMBMMJB9Tz85GRw++YPwzeg
v5/dPgFs8JmDqyYODkj1+PrjB4bFixYzHDl8mIGTg43B3sYe3Ja6desOw2+gefwCggw83FzfWptq
QRsULkO7e2SdlAgCAAFE6kD+b6hln65cuHRGT/qpxncea6lXL54xOJrKwxdCYgtkWB8dlLNBkQ0b
lQPRoIQAu0QQ1B0D1dNCQqA7dEWQWut8SMuuQPdssqHM3eObv0fG1MzlsMOTwcdhgs4J/QmJfNDx
9yD8/Tfk7G+YeuTI//DkAoOdnS3QT6C1FZCc//ztG4ZFCxcxHAW2lziACd3F0QnYTvjDcOPmbWAY
/WQQl5BkkJGRZZCRlmI1MTFh3rdv7y5oO+0XuUvKAQKI3BABDf2BBol0NdQls1g0SvwbknQZzI00
wddooDf2YPU8YnQOMQYPm09HXlGLXEcPto2lIGeAzjr9CT0zHcT+BT75Fpg7oPU7NqeCMwAw8n98
/cLw7M4J0A13DLygYXQWZgZxYWGG+0+egIewjx45wsAJTNzOzi4Mf//8Zrhw8SIw4/xlkJaWZJCT
lWOQkQVmNB5+0DUmf50dLE1AVTMw8r+Q6x+AAKIkVEHtB9DyImNWFobQjPLJSbnx7sBqQAKcU5Ej
DP2CeORVtbCiHrmIH5SRDj/iGMKGHIH8H3zOMy5Xwq5QAB2qyQwsPH///MZw/tR+BiUlZXBiOHnm
NMNnYPX3BZgoPn74wPD502fwnLwtsCsICi/QzCbo2HQxcSnw5JeCvAKDvLw0g7CQAMOf3+CdTmfs
7OxAYzSvyD09FCCAKA1hkP9A65GsgUV34PHjJ6JAq1NAfW1YNwx5QwX6PDusBQ9q5MH64oPhbABY
d+3PP8hBwKCGHOj6hp/QRhz0VHeMiIcc9s3IwAY6RRVYqrMxQY6C//3rN7DE+8pw8exhYOQJgi8I
uH7zOsMXYCkIvnkNiEG3IbABw0JPVw9s+K1bN8CXFcnISDPIy8kDc74cg5gE6IYmEQbQOZzfgI4C
jazu2Lwqpam+cjUwAXwix68AAURpaP+DTjZcBjbmDt27dw982xLyTciwljlshwzyxAtsxo3Y/jit
Ix3cev8LOt6ageEtsFB99ekf+BToN5//MXz4/g/coEPv3oEii42ZEXz3hygvE4MwDyP4PHEWYOSD
unygYvzPr68Ml88fAUaeADjSL127DOwh/GJ49+EdeE3DV2A7iBNY8oEi/y+wSnzy5DG4lBERl2YQ
EpNn4BKWZ2DmkWL4xSLM8PEPN8OPf6wMP/8D21qsPAy2Tt49oOoYupGHZAAQQNQI8b/QRHBtxYoV
20GtetgdpMh9ceTtUrB5dXoMvxIzGgfK6V++Q+4rewmMdNDB7W8+/wXfXQYq6hlhuR0pwvk4IXcb
ifBAIh90ODxIEYgGXfYkxM0ATAh/GP79/sJw6vgh8GAZqLdz4/ZNYCn4h+Hp86cMTx48YvgFDCte
8FpAefC8BehiiJfvvjFwC0oyCIorALE8A+hsZm5+YciFdMCeAeSKIdDN19wMn/7xCkyau6YWNKVC
zi5jgACiVsiDmrGgBWpmwGpgAbC+YgdtI8M1TYwS3/8xXfEfS0MKXQybGhSz/iNu80DhQ5WBDrMH
Fe+gbhuoLod11ZBzN0w9GyvkFi9WFkYUp4IOx2cHi0PYoIQEOigfVMT/Bub8Xz++MFw8f5aBm4sL
XNdfv3mTgRGoEDQ3Arr27BuwMcwPLAVBRTzILlDv4MXbb8CiXhKcIBTk5RkkJaUZePgEGX4zcTF8
/8PK8OUXpDriZmcEJs5/DN+/fmT48uHt31gvTV3Q4g9gVfCNlIgDCCBqreoEzR2A1pw9OXXq5Glh
YSEbXl4eBhZW0C5YRshFGv8gl+OAijbQlSiw2wL+/oXcRAKuLf8zgtWAAuM/9H4qBuigCuwseiZo
1wt2NRMT9Daif9A7msB3VcEqcQZIfQyrWVjAF/NArmT59RvSgkdOM7Dj30GRycHGBG28QYo4UAYH
JQA26A0p4ET0B2QeyC+QhAG+RAAo/wvYL/z44QvDpfPHwLkbVMzffvCAgR2Ye9++e8vwCHT7GTC3
CwG7t7LAbh24ewy6DhpY9ygBG3kikgoM3MKyDF9YJBmefONnYP/PCUyELODBpE/A7ubrj0D8CZT4
/jPcf8HO8P0LO3P5xP2LOvMd/YClwE9SGoQAAVg7gx6CYSiO/4sEkxEjNjvgS/gwvpOvIOLuzNXd
wVEiMTcO2AxLNMO0fR2uEocemiZNmn/z3vu/NP39M/bKByQdMbrT2WJYqdrMrFgiCuTeQLBUlu/u
mxQ6TiEk4oRSR+WmnsTXUUIn9B8/vsRn+kIkTC8yEovIbNLzE7tDYqrImol5hr2RFkponfdTpld6
UdT+6v//D/JK4nFojaGQT1DSeKv4Qc6AikUiTF6jC/beXNU3fhBguV7BKBrwjz48b43zKUTTbijw
Ib9xYSljbLYhypaLmtOG7bZg1R1kClWE3MDulBUpSdOg2AeyZIpaQ/Yf7jwCjw7gy0FvPOpPfikI
XwKImuu6f0CHJJ8+fXz/wR8mXsXfTDwMzMBQ/AfNdX//IW6bQekv/0esQWVEaoVjqMNeY0DrA9S6
AFFF/IcnOlAigJUW/xkQF7/Brv+B3W32FzpCxsyMsBtUt7Mywdo0kMTBBG0cgC6KAJU8v4Fds2/A
Bt2rB8DI5+ECXwVy/fYt8DD250+fQBc8M7wHJgJxUVFonf8JPMDz8M0vBmExaQZOIXmGL8wyDC9f
iTP8fsMPbO1zgOt6ZmgpxobWYgNFvpQgE8PTd5wMzL+B7QiTyBkMC3sMgaXAD/Q7knABgACidusL
tB9bxcTcxj+pdEqdsKgEsKUvAGzhM2EclI5SCqDX4wyUXUdG+8UbiITxF5q6/v35BayPPzNwfzkP
vujuNbBrd+XGVWDk84AHve7fuwe+8lBESBi8FgI0FvL563eG07eB+YZTioFdQI6BV1iOgUdAnIGD
W5CBhY0TGOks2EeVkNzBDE0UX779YPj77Q2DEtOJvtbyuDZQZiRmdBAggKi9swO8m/jMySNnU/+8
//zzBz8vByc3+GozYiP/PzHTskh9cNj9NOCSA6k9ALu2EXZzIUwvuJ6H5nKG/6hdQORr4KCFA+SW
M6g5DFA1sO4TI/RGwN+gCa2v7xj4v19lUFJWAq9aBk3u+P8PYti0eRP4mufHjx8ySAH78ZqaWuD7
L998/MFw5NoPBl4haQZeEXkGHiFZBi4+cWDDHphhQHdtMDJjRD6sBAUJg25aFONnAnY9GRjkREGT
WtwM/37+ZvjxxSQfqHQWNC6+E4owgACiRf8LdNWDenZhdam+fWSgELAU4OLmBde/yJEIrsuRingm
eEQwQu/Y/A9udIEaVsyMSA0+RsiVdCxMsMiFVAqM0DodcicXpEEJa9QxIl1kBBcDLXj4zwC92Q0S
o///wa61Q5q4+Q9xNxO0OgG1ARABB7rf7yfDlJ46hucPb4BXItvZ2QEjn5fh+YvnDAcOHWI4euwo
w7PHTxjkgInCxNgEvD7x8cuPDOcf/AdffccL7ONzC8owcPGLM7BzCTAwA3M+sKUCbNZArtAD9UD4
gd1KcX5GBlEg5mUHNUb/gy9j+gdeDgdZ/AIab/j1E9ie+PGNgf3Hw21x4V5JoLklQrODAAFEi71d
4FJgan/r6lk2gX6/fnxn5uHmAqZYFgZ2ZkhLmgXauobVoUxMiEiBRdR/2DJIRuyLMuANSgZGlJQM
0/sfaaQKXM//Q5QU/5mQruWGDdn+Q1IPbRWC3McCvdIT3EYA8llZGaFtDFDk/2J4/uwlw7wZ/eB6
HtSaBxX3sDGOH8Cu3/tXrxnMTM3AmzZA3b+bD14z3H7NgYh8YGufi1eMgY2THzwgJsjDxCAh8B+Y
s/8BI/sv+D61nz9+gRuKvz78ZHjx+yewxIFgJoY/DOwM3/68f/Xg04M7116fP3v6PrAXBrqUBrQ7
hgfaM8N7+BRAANEiAYC2I4GWIT///ebqVTEpPj0R3j9Az7GCh0n//8cs6sGlwX9E5CNyHFoxzAQd
V4eWAuAWMTP0qkKkRASyB3SRLKjUgVxmCb30khF+gxHYnj/QIogZGMvg3shfyAW2sCvPwJdc/4a0
vkFXG4ISL0ju3ee/DE/e/GR49e4Tw5YV88Dmgep10A4l0CAYqNsH2phqb2/P4O/vD96rD9qxu33/
GYY/PGoMXAKSwFwvxSAiIswgLcHBICX2i0GQ6y3QDX/AufjHO6DZb/8wfGT88ffTm8ef79+58vrC
udMvjh879hwaod+hGe0bdHb2CzSyQYtC3kIH5t4yoF20gw0ABBCthuBAqU9JRlbOccbCLRNExMQZ
eICNQSZQbP1H3C0NijjYXZOgVjUogEFKQGxYVwdl3Pk/JIJAd9SBIu8ftA/+9z9kfAHUJYPdZghK
JGB5aGMAfMcltBvIAG0rgIZrQXdeguz/D78Z9T+YD6vz/0LNff3pP8ODN//ALe+Pn38y3Hn6heHW
/dcMW3pcGfh52MF7EUBL5EEJABT5IDbotLGHwD4/GxsHQ3nbPAYuYNUgJirCICslBox0gX/MwM7b
8yf33ty4dvn5yRMnnv38+fM7lsj9zIDY2/eZAXGhEEztD+g0PQz/IWVqGCAAbWewgjAMg+EyC3pS
BnrydXw194bzsOMUEaZDRTdmm9YkbezAq556Lfz52yYh/f413ksbaw/7ulL9qcln89V6ucBcOHX4
nA+FFOJnykrERRFRHluTLKRgGu89zc5Wn2CZor11pr4iRWoF3oWikyCuYYS0JGGfjKxMxNcBAgzd
R6Kzxfv+ZRwH3QVdXx0tOt+oDo922595PsKZDp2/4TY4tbZpQJXKvvT5FnX0tkVx25XlFQAe8WQk
ZzYqQU3b6Nz7SHQReIiiykw//Bo28RZAtEoAf6HF0ps1q1esj0stSvv69yuwMQhausUEabAxQe4Q
B12UDrqLmA+YOFiYGbBGJCRRMIJzPijyQJH47wcoIv+Bb7IHzdD9/c8AH0kEJag/0NwPHkz6C6GR
hh3ACewvtG5hBDakgOUFuEH1B6j5/79fwAbWL4bHr38x3H7yneH1+x/gbh4z4y8GEa4f/7m/P/j5
8uGFj7dP7Gc2MTERAZ0IDlrHACryQQ0yUOSvWrXqxYoVKx5CI/0FdPkWiA+76BW2hu87tKj+MxA3
iQAEEC1nYWDHzBmePHlqmaysDBtsfgA0/PsbWrT+/ou4ffcndFEFePj4P0IOrB46nAzr4kE7Coiu
IAN6V/A/9Cpx8NgiOJKB2RW0swUoB4xgYGPq31/QTN1Pho9ffjB8+fETfOPnr69vwcX2nVs3GW5c
2PPx/r1H95Dq1jfQnAqKtP/Aer/Cx8eHE5QATE1NYUezvC8pKTkC1QO72Re2OfMlA2J//u/BcHUM
QADR8oQH2PzAqz37Dx0xsfF24nnPycDGzg0dj4dE7p9//+GLJv/C+/T/4d035L4+A3w+ADpDB6KB
EczE+JeB6T+o7Aa12H6B8e8/wJbyT9A28J/gSGYElqCf3r9luHv/IcOVG/cYrt1+zPDl22/wfkcQ
Bu114Gd+8efx9f1XP378Cbql9DE04p5Bi+wP0OIZ5BJuYMRnAxt4nKCt2KAEA8z5PwsKCtYDq4Ef
0ITyHJrrnzEgVu1+hYbL38FwWQQIAAQQredhwfMDHJyc5v2Lj8zjFRBn4BUQAq9wRYlc2OwadPiV
BdSCByUCYN+MmfEfPOf+h0bsH3A36DfDDyANajUzAiP/88d3DPfuP2C4fus+MHKfMLz/9B18cSkT
KwcwgjmBEcwBub2VjZuBhY0HTLMBO9kiP3a/37VxLWg3Leh2x3vQiH/JgNhQ+QdpvIqFiYmJz8/P
b0JKSko0sMvHdOzYsX9AfP4UsP8FVf8GKeJhW7Q+Q+vzP4Ml4mEAIIBofcYLeH7gx/fvj3+9v3vn
Hw+vCjsTDwMX9KZ5pv9/wZH3Dxi5oCVOf0B93O8/wf1r0PIxEP7/H7SO7hP4HL9rN+8zXL35iOHt
x2+QLWosHOAIBl3JC8agyGWXYmCVVmWQVOCCyEHVMDEDczow4fGyvv3PcHvipZ3rD69ggNxlirF3
HsdINDM3N7coMNfPiYmJ8fj48SPTyZMn/+7cuXMjMFIvI0U+LOJfQyMeloj+MQxCABBAtE4A/6DF
3tvVi6YsLavrq//74TvDuz+/oQtCgXXw98+/nzy6/f761cuvTp449uzmzZvvYMUkdJ2BhIZdotNf
NklgnIsxsEgqMkjIQSMcnLs5IMU4Myuw/geWLEwgmhmImeAFnCTDsV/nNjVsvn3nzQEg9wEDYjMF
/MQMAqUkyDBOaWnpCmAR73rgwAGmT58+/QFG/hxgwrwJa/BCzYSVHt8YEDd/DtqpDYAAYqRTIgOd
PK4FxAbQhiEb2iAGbCADdo4NrBsECnhRINYMzp3Y85LRnA09crFaCGyti39aCLq1eCG0aH8IzZmw
7dKkXMkKPqBJSkoqRFhYuB/Y0BN8+vTpdGBVcBOYGH5CzXsNjfg3aIkKI+IH2y1tAAFEr7VY7NCZ
QtAqYg5o4MPOyP8JzfF/YH1dpICDHWEDOrBfPyavevojlmCsx32DLif/d6Pr4vYtB5dDc/ljIot2
fIAJ6nbQhcZmQJwKxCsZIMviGdByPnJdjzNxDbYEABBAQ+Hwf0ZoogHvSIpMiut+KlCgDCnaTwCL
9jpyi3ZiSy/QbijQETrC0ATMA3XTD6SBnY9ILXy8JctgSwAAATSULgIC5UTQZhR5aJXCBA3852QW
7cSEDTM0AYBKIW4oZmVAHJ4Ji/gfxJYwgy0BAATQULo69ic0sl9DI+Y/NND/0qiR9R+amGAN0p/Q
CGeAjdwhVVv/GQb3GhacACCAWIbY1bGwCKEL0NTU/A+1jxGaABiRIvs/NEcP6du0AAIMANtMxR3x
N38FAAAAAElFTkSuQmCC\
"""
def thumbnail():
icon = base64.decodestring(iconstr)
return icon
if __name__ == "__main__":
icon = thumbnail()
f = file("thumbnail.png","wb")
f.write(icon)
f.close()
|
python
|
import os
from my_lib import Object, Object2, Object3
def is_unique(s):
s = list(s)
s.sort()
for i in range(len(s) - 1):
if s[i] == s[i + 1]:
return 0
else:
return 1
if __name__ == "__main__":
print(is_unique(input()))
|
python
|
def get_names():
names = []
while True:
name = input("Enter players name: ")
if name != 'done':
print(f'{name} added to the list of players')
names.append(name)
continue
else:
break
return names
def get_player_scores(players):
for player in players:
scores = []
while True:
score = input(f"What are {player}'s final cards? ")
if score != 'end':
scores.append(score)
continue
else:
break
return scores
if __name__ == '__main__':
players = get_names()
print(players)
scores = get_player_scores(players)
print(scores)
|
python
|
#! /usr/bin/env python
from eclib.elgamal import *
key_length = 20
params, pk, sk = keygen(key_length)
delta = 1e-2
print('========== ElGamal parameter ==========')
print(f'key length = {key_length} bit')
print(f'p = {params.p}')
print(f'q = {params.q}')
print(f'g = {params.g}')
print(f'h = {pk}')
print(f's = {sk}')
x1 = 1.23
x2 = -4.56
x3 = x1 * x2
c1 = enc(params, pk, x1, delta)
c2 = enc(params, pk, x2, delta)
c3 = mult(params, c1, c2)
y1 = dec(params, sk, c1, delta)
y2 = dec(params, sk, c2, delta)
y3 = dec(params, sk, c3, delta ** 2)
print('========== scalar & scalar ==========')
print(f'x1 = {x1:.2f}')
print(f'x2 = {x2:.2f}')
print(f'x3 = {x3:.2f}')
print(f'c1 = {c1}')
print(f'c2 = {c2}')
print(f'c3 = {c3}')
print(f'y1 = {y1:.2f}')
print(f'y2 = {y2:.2f}')
print(f'y3 = {y3:.2f}')
x1 = 1.23
x2 = [-0.12, -3.45, -6.78]
x3 = list(map(lambda a: x1 * a, x2))
c1 = enc(params, pk, x1, delta)
c2 = enc(params, pk, x2, delta)
c3 = mult(params, c1, c2)
y1 = dec(params, sk, c1, delta)
y2 = dec(params, sk, c2, delta)
y3 = dec(params, sk, c3, delta ** 2)
print('========== scalar & vector ==========')
print(f'x1 = {x1:.2f}')
print(f'x2 = [{x2[0]:.2f}, {x2[1]:.2f}, {x2[2]:.2f}]')
print(f'x3 = [{x3[0]:.2f}, {x3[1]:.2f}, {x3[2]:.2f}]')
print(f'c1 = {c1}')
print(f'c2 = {c2}')
print(f'c3 = {c3}')
print(f'y1 = {y1:.2f}')
print(f'y2 = [{y2[0]:.2f}, {y2[1]:.2f}, {y2[2]:.2f}]')
print(f'y3 = [{y3[0]:.2f}, {y3[1]:.2f}, {y3[2]:.2f}]')
x1 = 1.23
x2 = [[0.12, 3.45, 6.78], [-0.12, -3.45, -6.78]]
x3 = [list(map(lambda a: x1 * a, x2[0])), list(map(lambda a: x1 * a, x2[1]))]
c1 = enc(params, pk, x1, delta)
c2 = enc(params, pk, x2, delta)
c3 = mult(params, c1, c2)
y1 = dec(params, sk, c1, delta)
y2 = dec(params, sk, c2, delta)
y3 = dec(params, sk, c3, delta ** 2)
print('========== scalar & matrix ==========')
print(f'x1 = {x1:.2f}')
print(f'x2 = [[{x2[0][0]:.2f}, {x2[0][1]:.2f}, {x2[0][2]:.2f}], [{x2[1][0]:.2f}, {x2[1][1]:.2f}, {x2[1][2]:.2f}]]')
print(f'x3 = [[{x3[0][0]:.2f}, {x3[0][1]:.2f}, {x3[0][2]:.2f}], [{x3[1][0]:.2f}, {x3[1][1]:.2f}, {x3[1][2]:.2f}]]')
print(f'c1 = {c1}')
print(f'c2 = {c2}')
print(f'c3 = {c3}')
print(f'y1 = {y1:.2f}')
print(f'y2 = [[{y2[0][0]:.2f}, {y2[0][1]:.2f}, {y2[0][2]:.2f}], [{y2[1][0]:.2f}, {y2[1][1]:.2f}, {y2[1][2]:.2f}]]')
print(f'y3 = [[{y3[0][0]:.2f}, {y3[0][1]:.2f}, {y3[0][2]:.2f}], [{y3[1][0]:.2f}, {y3[1][1]:.2f}, {y3[1][2]:.2f}]]')
x1 = [1.23, 4.56, 7.89]
x2 = [-0.12, -3.45, -6.78]
x3 = list(map(lambda a, b: a * b, x1, x2))
c1 = enc(params, pk, x1, delta)
c2 = enc(params, pk, x2, delta)
c3 = mult(params, c1, c2)
y1 = dec(params, sk, c1, delta)
y2 = dec(params, sk, c2, delta)
y3 = dec(params, sk, c3, delta ** 2)
print('========== vector & vector ==========')
print(f'x1 = [{x1[0]:.2f}, {x1[1]:.2f}, {x1[2]:.2f}]')
print(f'x2 = [{x2[0]:.2f}, {x2[1]:.2f}, {x2[2]:.2f}]')
print(f'x3 = [{x3[0]:.2f}, {x3[1]:.2f}, {x3[2]:.2f}]')
print(f'c1 = {c1}')
print(f'c2 = {c2}')
print(f'c3 = {c3}')
print(f'y1 = [{y1[0]:.2f}, {y1[1]:.2f}, {y1[2]:.2f}]')
print(f'y2 = [{y2[0]:.2f}, {y2[1]:.2f}, {y2[2]:.2f}]')
print(f'y3 = [{y3[0]:.2f}, {y3[1]:.2f}, {y3[2]:.2f}]')
x1 = [[1.23, 4.56, 7.89], [-1.23, -4.56, -7.89]]
x2 = [0.12, 3.45, 6.78]
x3 = [list(map(lambda a, b: a * b, x1[0], x2)), list(map(lambda a, b: a * b, x1[1], x2))]
c1 = enc(params, pk, x1, delta)
c2 = enc(params, pk, x2, delta)
c3 = mult(params, c1, c2)
y1 = dec(params, sk, c1, delta)
y2 = dec(params, sk, c2, delta)
y3 = dec(params, sk, c3, delta ** 2)
print('========== matrix & vector ==========')
print(f'x1 = [[{x1[0][0]:.2f}, {x1[0][1]:.2f}, {x1[0][2]:.2f}], [{x1[1][0]:.2f}, {x1[1][1]:.2f}, {x1[1][2]:.2f}]]')
print(f'x2 = [{x2[0]:.2f}, {x2[1]:.2f}, {x2[2]:.2f}]')
print(f'x3 = [[{x3[0][0]:.2f}, {x3[0][1]:.2f}, {x3[0][2]:.2f}], [{x3[1][0]:.2f}, {x3[1][1]:.2f}, {x3[1][2]:.2f}]]')
print(f'c1 = {c1}')
print(f'c2 = {c2}')
print(f'c3 = {c3}')
print(f'y1 = [[{y1[0][0]:.2f}, {y1[0][1]:.2f}, {y1[0][2]:.2f}], [{y1[1][0]:.2f}, {y1[1][1]:.2f}, {y1[1][2]:.2f}]]')
print(f'y2 = [{y2[0]:.2f}, {y2[1]:.2f}, {y2[2]:.2f}]')
print(f'y3 = [[{y3[0][0]:.2f}, {y3[0][1]:.2f}, {y3[0][2]:.2f}], [{y3[1][0]:.2f}, {y3[1][1]:.2f}, {y3[1][2]:.2f}]]')
x1 = [[1.23, 4.56, 7.89], [-1.23, -4.56, -7.89]]
x2 = [[0.12, 3.45, 6.78], [-0.12, -3.45, -6.78]]
x3 = [list(map(lambda a, b: a * b, x1[0], x2[0])), list(map(lambda a, b: a * b, x1[1], x2[1]))]
c1 = enc(params, pk, x1, delta)
c2 = enc(params, pk, x2, delta)
c3 = mult(params, c1, c2)
y1 = dec(params, sk, c1, delta)
y2 = dec(params, sk, c2, delta)
y3 = dec(params, sk, c3, delta ** 2)
print('========== matrix & matrix ==========')
print(f'x1 = [[{x1[0][0]:.2f}, {x1[0][1]:.2f}, {x1[0][2]:.2f}], [{x1[1][0]:.2f}, {x1[1][1]:.2f}, {x1[1][2]:.2f}]]')
print(f'x2 = [[{x2[0][0]:.2f}, {x2[0][1]:.2f}, {x2[0][2]:.2f}], [{x2[1][0]:.2f}, {x2[1][1]:.2f}, {x2[1][2]:.2f}]]')
print(f'x3 = [[{x3[0][0]:.2f}, {x3[0][1]:.2f}, {x3[0][2]:.2f}], [{x3[1][0]:.2f}, {x3[1][1]:.2f}, {x3[1][2]:.2f}]]')
print(f'c1 = {c1}')
print(f'c2 = {c2}')
print(f'c3 = {c3}')
print(f'y1 = [[{y1[0][0]:.2f}, {y1[0][1]:.2f}, {y1[0][2]:.2f}], [{y1[1][0]:.2f}, {y1[1][1]:.2f}, {y1[1][2]:.2f}]]')
print(f'y2 = [[{y2[0][0]:.2f}, {y2[0][1]:.2f}, {y2[0][2]:.2f}], [{y2[1][0]:.2f}, {y2[1][1]:.2f}, {y2[1][2]:.2f}]]')
print(f'y3 = [[{y3[0][0]:.2f}, {y3[0][1]:.2f}, {y3[0][2]:.2f}], [{y3[1][0]:.2f}, {y3[1][1]:.2f}, {y3[1][2]:.2f}]]')
|
python
|
import numpy as np
import cv2
import os
from src.types.parent_slide import SlideImage
class FluorescenceSlide(SlideImage):
def __init__(self, img, **kwargs):
"""
Arguments:
img (ndarray): uint16 array of shape (h,w,3)
"""
self.img = img
self.height, self.width, self.channels = self.img.shape
assert self.channels == 3, f'FluorescenceSlide must have 3 channels but found {self.channels}'
self.name = kwargs['name'] if 'name' in kwargs.keys() else ''
@classmethod
def fromFluorescenceSlides(cls, slides:list, weights:list=None, **kwargs):
if len(slides) == 1:
return cls(slides[0].img, **kwargs)
stack = np.stack([slide.img for slide in slides])
avg = np.average(stack, axis=0)
return cls(avg, **kwargs)
def write_to(self, out_dir):
"""
Write three channels of the FluorescenceSlide as .tif files to a specified directory
"""
assert self.name is not None, 'name is missing from FluorescenceSlide'
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for chan in range(3):
chan_img = self.img[:,:,chan]
chan_img = np.clip(chan_img, 0, 65535)
chan_img = chan_img.astype(np.uint16)
file_suffix = f'A0{chan+1}Z01C0{chan+1}.tif'
fn = os.path.join(out_dir, self.name + file_suffix)
cv2.imwrite(fn, chan_img)
|
python
|
from typing import List, Optional, Type
from ape.types import AddressType
from .base import abstractdataclass, abstractmethod
from .providers import ProviderAPI, ReceiptAPI, TransactionAPI
@abstractdataclass
class AddressAPI:
_provider: Optional[ProviderAPI] = None
@property
def provider(self) -> ProviderAPI:
if not self._provider:
raise Exception("Wired incorrectly")
return self._provider
@property
def _receipt_class(self) -> Type[ReceiptAPI]:
return self.provider.network.ecosystem.receipt_class
@property
def _transaction_class(self) -> Type[TransactionAPI]:
return self.provider.network.ecosystem.transaction_class
@property
@abstractmethod
def address(self) -> AddressType:
...
def __dir__(self) -> List[str]:
# This displays methods to IPython on `a.[TAB]` tab completion
return [
"address",
"balance",
"code",
"codesize",
"nonce",
"is_contract",
"provider",
]
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.address}>"
def __str__(self) -> str:
return self.address
@property
def nonce(self) -> int:
return self.provider.get_nonce(self.address)
@property
def balance(self) -> int:
return self.provider.get_balance(self.address)
@property
def code(self) -> bytes:
# TODO: Explore caching this (based on `self.provider.network` and examining code)
return self.provider.get_code(self.address)
@property
def codesize(self) -> int:
return len(self.code)
@property
def is_contract(self) -> bool:
return len(self.code) > 0
class Address(AddressAPI):
_address: AddressType
@property
def address(self) -> AddressType:
return self._address
|
python
|
""" capture_faces.py
Author: Gonรงalo S. Martins
This script implements the basic face detection and extraction features
needed for this work. The main section of the script illustrates its usage
in extracting faces from pre-recorded videos.
This script was tested using Python 3.5 and OpenCV 3.1.
Video capture code inspired by:
http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_video_display/py_video_display.html
Face extraction code inspired by:
http://docs.opencv.org/master/d7/d8b/tutorial_py_face_detection.html#gsc.tab=0
"Haar inversion method" inspired by work of the CASIR project:
http://mrl.isr.uc.pt/projects/casir/
"""
# Standard Library imports
import time
import os
# Non-standard imports
import cv2
import numpy
# Data loading functions
def capture_video(duration):
""" This function captures video from the webcam for the given duration, in
seconds. It returns the individual frames that it has captured.
"""
# Open capture device
cap = cv2.VideoCapture(0)
print("Camera is operating at {} fps.".format(cap.get(cv2.CAP_PROP_FPS)))
# Test capture device
ret, frame = cap.read()
if not ret:
print("Could not capture image!")
return
# Start capture
start = time.time()
frames = []
while(time.time() - start < duration):
# Capture frame
ret, frame = cap.read()
# Append to our frame list
frames.append(frame)
# When everything done, release the capture
cap.release()
# Print frequencies
print("True average frequency of captured frames: {}fps.".format(len(frames)/duration))
# Return the captured frames
return frames
def load_frames_from_video(filename):
# Open capture device
cap = cv2.VideoCapture(filename)
# Extract frames and append to frame list
frames = []
while True:
#for i in range(50):
ret, frame = cap.read()
if ret == False:
break
frames.append(frame)
# Return list
return frames
# Batch operations
def show_frames(frames, freq = 12):
""" This function receives a list of frames and plays them back at the given
frequency.
"""
for frame in frames:
cv2.imshow('frame',frame)
cv2.waitKey(round(1000/freq))
def find_faces(frames):
""" This function receives a list of frames and draws rectangles around all
of the faces it finds. For now, it edits the original frames. """
# Load the haar cascades
front_cascade = cv2.CascadeClassifier('/home/vsantos/opencv_install/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
side_cascade = cv2.CascadeClassifier('/home/vsantos/opencv_install/share/OpenCV/haarcascades/haarcascade_profileface.xml')
# Define scale factor.
# We will reduce images by this factor to speed up processing.
scale_factor = 0.5
# Detect faces in all the frames
i = 1
face_boxes = []
for frame in frames:
# Print the current frame index
print("Processing frame {} of {}.".format(i, len(frames)))
i+=1
# Convert image to grayscale and reduce it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
small = cv2.resize(gray, (0,0), fx=scale_factor, fy=scale_factor)
# Detect faces
face_list = []
face_list.append(front_cascade.detectMultiScale(small))
face_list.append(side_cascade.detectMultiScale(small))
flipped_faces = side_cascade.detectMultiScale(cv2.flip(small,1))
# Since the coordinates are flipped as well, we need to fix them:
face_list.append([(small.shape[1] - x - w, y, w, h) for (x, y, w, h) in flipped_faces])
# Transform faces to the original coordinate frame and append to the
# general face list
faces = []
for l in face_list:
l = [[int(x*(1/scale_factor)) for x in v] for v in l]
faces += l
# Detect and remove overlapping rectagles (keeping the largest)
# TODO: Actually implement
if type(faces) != type(tuple()):
faces = faces[numpy.random.randint(len(faces))]
face_boxes.append(faces)
# Paint rectangles in images
#for (x,y,w,h) in faces:
#(x,y,w,h) = faces
#cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
return face_boxes
def crop_faces(frames, boxes):
""" This function receives a list of frames and a list of bounding boxes
corresponding to faces, and uses that information to produce a list of
cropped faces.
"""
cropped = []
for i, frame in enumerate(frames):
(x,y,w,h) = boxes[i]
cropped.append(frame[y: y + h, x: x + w])
return cropped
# Real Time
def find_faces_rt(duration=30):
""" This function reads frames from the camera and marks faces, doing so
continuously for a set duration. Its goal is to help set all of the
relevant parameters.
"""
# Open capture device
cap = cv2.VideoCapture(0)
# Load cascade
front_cascade = cv2.CascadeClassifier('/home/vsantos/opencv_install/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
side_cascade = cv2.CascadeClassifier('/home/vsantos/opencv_install/share/OpenCV/haarcascades/haarcascade_profileface.xml')
# Define scale factor.
# We will reduce images by this factor to speed up processing.
scale_factor = 0.5
# Test capture device
ret, frame = cap.read()
if not ret:
print("Could not capture image!")
return
# Start capture
start = time.time()
frames = []
while(time.time() - start < duration):
# Capture frame
ret, frame = cap.read()
# Convert image to grayscale and reduce it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
small = cv2.resize(gray, (0,0), fx=scale_factor, fy=scale_factor)
# Detect faces
face_list = []
face_list.append(front_cascade.detectMultiScale(small))
face_list.append(side_cascade.detectMultiScale(small))
flipped_faces = side_cascade.detectMultiScale(cv2.flip(small,1))
# Since the coordinates are flipped as well, we need to fix them:
face_list.append([(small.shape[1] - x - w, y, w, h) for (x, y, w, h) in flipped_faces])
# Transform faces to the original coordinate frame and append to the
# general face list
faces = []
for l in face_list:
l = [[int(x*(1/scale_factor)) for x in v] for v in l]
faces += l
# Detect and remove overlapping rectagles (keeping the largest)
# Paint rectangles in images
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',frame)
cv2.waitKey(round(10))
# When everything done, release the capture
cap.release()
# Pipeline
def find_face(frame):
""" This function finds a face in a given image. """
# Load the haar cascades
front_cascade = cv2.CascadeClassifier('/home/vsantos/opencv_install/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
side_cascade = cv2.CascadeClassifier('/home/vsantos/opencv_install/share/OpenCV/haarcascades/haarcascade_profileface.xml')
# Define scale factor.
# We will reduce images by this factor to speed up processing.
scale_factor = 0.25
# Detect faces in all the frames
# Convert image to grayscale and reduce it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
small = cv2.resize(gray, (0,0), fx=scale_factor, fy=scale_factor)
# Detect faces
face_list = []
face_list.append(front_cascade.detectMultiScale(small))
face_list.append(side_cascade.detectMultiScale(small))
flipped_faces = side_cascade.detectMultiScale(cv2.flip(small,1))
# Since the coordinates are flipped as well, we need to fix them:
face_list.append([(small.shape[1] - x - w, y, w, h) for (x, y, w, h) in flipped_faces])
# Transform faces to the original coordinate frame and append to the
# general face list
faces = []
for l in face_list:
l = [[int(x*(1/scale_factor)) for x in v] for v in l]
faces += l
# Detect and remove overlapping rectagles (keeping the largest)
# TODO: Actually implement
if type(faces) != type(tuple()) and len(faces) > 0:
faces = faces[numpy.random.randint(len(faces))]
# Return the face we found
return faces
def crop_face(frame, box):
""" This function takes an image and a bounding box and crops the image. """
(x,y,w,h) = box
return frame[y: y + h, x: x + w]
def extract_faces_from_video(filename, save_location):
""" This function receives a video file and extracts all of the faces in it,
saving the images into the provided folder.
"""
# Ensure that the save location exists
if not os.path.exists(save_location):
os.makedirs(save_location)
# Open capture device
cap = cv2.VideoCapture(filename)
num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# Process frame-by-frame
i = 1
while True:
# Check clock
start = time.time()
# Get frame
ret, frame = cap.read()
if ret == False:
print("Could not read frame, stopping!")
break
# Find face
box = find_face(frame)
if len(box) > 0:
# Crop face
cropped = crop_face(frame, box)
# Save to file
cv2.imwrite(os.path.join(save_location, "{}.jpg".format(i)), cropped)
# Inform on time
fps = 1/(time.time()-start)
eta = (num_frames-i)/fps
print("Processed frame {:4d} at {:1.2f} fps. ETA: {:1.2f}s".format(i, fps, eta))
# Increment counter
i += 1
if __name__ == "__main__":
#find_faces_rt()
#frames = capture_video(5)
#frames = load_frames_from_video("2016-07-12-131940.webm")
#frames = load_frames_from_video("20160712_134926.mp4")
#boxes = find_faces(frames)
#cropped = crop_faces(frames, boxes)
#show_frames(frames)
#show_frames(cropped)
#extract_faces_from_video("videos_vvb/Filipa_HNS.mp4", "./Filipa_HNS")
#extract_faces_from_video("videos_vvb/Filipa_HNY.mp4", "./Filipa_HNY")
#extract_faces_from_video("videos_vvb/Filipa_HNN.mp4", "./Filipa_HNN")
#extract_faces_from_video("videos_vvb/Filipa_HSS.mp4", "./Filipa_HSS")
#extract_faces_from_video("videos_vvb/Filipa_HSY.mp4", "./Filipa_HSY")
#extract_faces_from_video("videos_vvb/Filipa_HSN.mp4", "./Filipa_HSN")
#extract_faces_from_video("videos_vvb/Gonรงalo_HNS.mp4", "./Gonรงalo_HNS")
#extract_faces_from_video("videos_vvb/Gonรงalo_HNY.mp4", "./Gonรงalo_HNY")
#extract_faces_from_video("videos_vvb/Gonรงalo_HNN.mp4", "./Gonรงalo_HNN")
#extract_faces_from_video("videos_vvb/Gonรงalo_HSS.mp4", "./Gonรงalo_HSS")
#extract_faces_from_video("videos_vvb/Gonรงalo_HSY.mp4", "./Gonรงalo_HSY")
#extract_faces_from_video("videos_vvb/Gonรงalo_HSN.mp4", "./Gonรงalo_HSN")
#extract_faces_from_video("videos_vvb/Gonรงalo_test.mp4", "./Gonรงalo_test")
#extract_faces_from_video("videos_vvb/Filipa_test.mp4", "./Filipa_test")
#extract_faces_from_video("Awais_Train.mp4", "./Awais_Train")
#extract_faces_from_video("Awais_Test.mp4", "./Awais_Test")
#extract_faces_from_video("Miguel_Train.mp4", "./Miguel_Train")
#extract_faces_from_video("Miguel_Test.mp4", "./Miguel_Test")
#extract_faces_from_video("hugo_test.mp4", "./hugo_test")
#extract_faces_from_video("hugo_train.mp4", "./hugo_train")
#extract_faces_from_video("jose_test.mp4", "./jose_test")
#extract_faces_from_video("jose_train.mp4", "./jose_train")
show_cenas()
|
python
|
#!/usr/bin/python3
"""
Made by Facundo Diaz - Tomas De Castro - Tadeo Grach for Holberton School 2021 """
""" IMPORTS EXTERN MODULES """
import time
from datetime import datetime
import os
""" IMPORTS FILES """
import persistence
import entities
import mail
import info
import percent
""" In case url 1 is not working, changing for these
url = "https://api2.binance.com/"
url = "https://api3.binance.com/"
"""
url = "https://api.binance.com/"
i = 2
a = b = 0
cortarpicos = cortarconstantes = 0
while(i >= 0):
btc_price = info.consultar_precio_BTC(url)
persistence.save_price_bitcoin(btc_price)
eth_price = info.consultar_precio_ETH(url)
persistence.save_price_ethereum(eth_price)
doge_price = info.consultar_precio_DOGE(url)
persistence.save_price_doge(doge_price)
a = percent.detectar_picos(a)
if a != 0:
cortarpicos = i + 180
b = percent.chequear_movimientos(b)
if b != 0:
cortarconstantes = i + 180
""" Save trends every hour """
if i % 60 == 0:
percent.insert_in_tendencias()
""" Every 3 hours clear history of constants and peaks """
if i == cortarpicos:
a = 0
if i == cortarconstantes:
b = 0
"""Send daily summary """
if i % 1440 == 0:
mail.daily_resume()
i += 1
time.sleep(60)
|
python
|
#Based on https://github.com/alexandrebarachant/muse-lsl/blob/master/notebooks/N170%20with%20Muse.ipynb
import sys
from collections import OrderedDict
from mne import create_info, concatenate_raws
from mne.io import RawArray
from mne.channels import read_montage
import pandas as pd
import numpy as np
from glob import glob
import seaborn as sns
from matplotlib import pyplot as plt
sys.path.append('../muse')
import utils
#matplotlib inline
subject = 1
session = 1
dataPath = '../data/ia/N170Simon'
#dataPath = '../data/visual/P300'
raw = utils.load_data(dataPath, sfreq=256., subject_nb=subject, session_nb=session, ch_ind=[0, 1, 2, 3])
raw.plot_psd(tmax=np.inf)
raw.filter(1,30, method='iir')
from mne import Epochs, find_events
events = find_events(raw)
event_id = {'Face': 1, 'House': 2}
epochs = Epochs(raw, events=events, event_id=event_id,
tmin=-0.1, tmax=0.8, baseline=None,
reject={'eeg': 75e-6}, preload=True,
verbose=False, picks=[0,1,2,3])
print epochs
conditions = OrderedDict()
conditions['Face'] = [1]
conditions['House'] = [2]
fig, ax = utils.plot_conditions(epochs, conditions=conditions,
ci=97.5, n_boot=1000, title='',
diff_waveform=(1, 2))
|
python
|
#https://www.hackerrank.com/challenges/mark-and-toys/problem?isFullScreen=true
def maximumToys(prices, k):
# Write your code here
prices.sort()
count = 0
sum = 0
i = 0
while sum < k:
sum += prices[i]
i += 1
count += 1
return count - 1
|
python
|
import hassapi as hass
import datetime
import re
"""
Class Phone_Manager handles sending call to voice notfyng service
"""
__NOTIFY__ = "notify/"
SUB_TTS = [("[\*\-\[\]_\(\)\{\~\|\}\s]+", " ")]
class Phone_Manager(hass.Hass):
def initialize(self):
self.dict_lingua = {
"it-IT": "it-IT-Standard-A",
"en-GB": "en-GB-Standard-A",
"en-US": "en-US-Standard-A",
"fr-FR": "fr-FR-Standard-A",
"de-DE": "de-DE-Standard-A",
"es-ES": "es-ES-Standard-A",
}
def replace_regular(self, text: str, substitutions: list):
for old, new in substitutions:
text = re.sub(old, new, text.strip())
return text
def send_voice_call(self, data, phone_name: str, sip_server_name: str, language: str):
message = self.replace_regular(data["message"], SUB_TTS)
message_tts = message.replace(" ", "%20")
called_number = data["called_number"]
lang = self.dict_lingua.get(language)
phone_name = phone_name.lower().replace(" ", "_")
if phone_name.find("voip_call") != -1:
if called_number != "":
called_number = "sip:{}@{}".format(called_number, sip_server_name)
self.call_service(
"hassio/addon_stdin",
addon="89275b70_dss_voip",
input={"call_sip_uri": called_number, "message_tts": message},
)
else:
if called_number != "":
url_tts = "http://api.callmebot.com/start.php?source=HA&user={}&text={}&lang={}".format(
called_number, message_tts, lang
)
self.call_service("shell_command/telegram_call", url=url_tts)
|
python
|
"""
Export biomedical data from a relational database to an Avro file.
An Avro file stores the data schema as a JSON blob and the data in a
binary format
See https://avro.apache.org for details
In this case, the Avro file is called a PFB
(Portable Format for Bioinformatics) file because the data in the Avro file
conforms to the PFB schema, which is a graph structure suitable for capturing
relational data.
PFB file creations involves the following steps:
1. Create a PFB schema to represent the relational database
2. Transform the data from the relational database into the graph form
3. Add the PFB schema to the Avro file as an Avro schema
4. Add the transformed graph data to the Avro file as data records
Supported Databases:
- Any of the databases supported by SQLAlchemy since the SQLAlchemy ORM
is used to inspect the database and autogenerate the SQLAlchemy models
which are in turn used to create the PFB Schema.
"""
import os
import logging
from pfb_exporter.config import (
DEFAULT_OUTPUT_DIR,
DEFAULT_PFB_FILE,
DEFAULT_MODELS_PATH,
DEFAULT_TRANFORM_MOD
)
from pfb_exporter.utils import (
import_module_from_file,
import_subclass_from_module,
setup_logger
)
from pfb_exporter.transform.base import Transformer
class PfbExporter(object):
def __init__(
self,
data_dir,
db_conn_url=None,
models_filepath=DEFAULT_MODELS_PATH,
transform_module_filepath=DEFAULT_TRANFORM_MOD,
output_dir=DEFAULT_OUTPUT_DIR
):
setup_logger(os.path.join(output_dir, 'logs'))
self.logger = logging.getLogger(type(self).__name__)
self.models_filepath = os.path.abspath(
os.path.expanduser(models_filepath)
)
self.data_dir = os.path.abspath(os.path.expanduser(data_dir))
self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
self.pfb_file = os.path.join(output_dir, DEFAULT_PFB_FILE)
# Relational model to PFB Schema transformer
self.transformer = None
# Import transformer subclass class from transform module
mod = import_module_from_file(transform_module_filepath)
child_classes = import_subclass_from_module(Transformer, mod)
if not child_classes:
raise NotImplementedError(
f'Transform module {transform_module_filepath} must implement '
f'a class which extends the abstract base class '
f'{os.path.abspath(mod.__file__)}. + {Transformer.__name__}'
)
else:
self.transformer = child_classes[0](
self.models_filepath, self.output_dir, db_conn_url=db_conn_url
)
def export(self, output_to_pfb=True):
"""
Create a PFB file containing JSON payloads which conform to a
relational model
- Transform the relational model into a PFB Schema
- Transform the data into PFB Entities
- Create an Avro file with the PFB schema and Entities
:param output_to_pfb: whether to complete the export after transforming
the relational model to the PFB schema
:type output_to_pfb: bool
"""
try:
# Transform relational model to PFB Schema
self.transformer.transform()
# Create the PFB file from the PFB Schema and data
if output_to_pfb:
self._create_pfb()
except Exception as e:
self.logger.exception(str(e))
self.logger.info(f'โ Export to PFB file {self.pfb_file} failed!')
exit(1)
else:
self.logger.info(
f'โ
Export to PFB file {self.pfb_file} succeeded!'
)
def _create_pfb(self):
"""
Create a PFB file from a Gen3 PFB Schema and JSON payloads
"""
# Add schema to temporary avro file
pass
|
python
|
from urlparse import urljoin
from kivy.properties import StringProperty
from kivy.uix.screenmanager import Screen
from kivy.utils import platform
from pytt.tools import Tool, RectangleTool
from pytt.pasteimage import PasteImageScreen, get_png_from_clipboard
class DMScreen(Screen):
mapfile = StringProperty('')
server = StringProperty('127.0.0.1')
port = StringProperty('5000')
@property
def fog(self):
return self.ids.fog
def url(self, path):
base = 'http://%s:%s' % (self.server, self.port)
return urljoin(base, path)
def on_key_press(self, app, k):
if k == ' ':
app.tool = 'move'
elif k == 'R':
app.tool = 'rect'
elif k == 'ctrl+S':
# sync
self.cmd_sync()
elif k == 'ctrl+V':
png_data = get_png_from_clipboard()
if png_data is None:
print 'No image in the clipboard'
else:
screen = PasteImageScreen(name="paste",
dm=self,
png_data=png_data)
app.root.open(screen)
def select_tool(self, tool):
self.fog.locked = (tool != 'move')
if tool == 'move':
self.fog.tool = Tool()
elif tool == 'rect':
self.fog.tool = RectangleTool()
else:
print 'Unknown tool: %s' % tool
def cmd_load_map(self):
from kivy.utils import platform
if platform == 'android':
from pytt.select_image import user_select_image
user_select_image(self.on_image_selected)
else:
print 'TODO: implement load_map on desktops'
def on_image_selected(self, filename):
self.mapfile = filename
def cmd_send_map(self, app):
with open(self.mapfile, 'rb') as f:
url = self.url('/load_map/')
resp = app.requests.post(url, files={'image': f})
print resp
print resp.text
def cmd_sync(self, app):
areas = self.fog.get_json_areas()
resp = app.requests.post(self.url('/reveal/'), json=areas)
print resp
print resp.text
def cmd_adjust_rotation(self):
rot = self.fog.rotation
if rot % 90 != 0:
self.fog.rotation = int(rot % 90) * 90
self.fog.rotation += 90
def send_image(self, app, stream):
url = self.url('/show_image/')
resp = app.requests.post(url, files={'image': stream})
print resp
print resp.text
|
python
|
import adv.adv_test
from core.advbase import *
import marth
def module():
return Marth
class Marth(marth.Marth):
comment = 'dodge*2 to miss s2p3 then dodge*2 back to attack'
def s2_proc(self, e):
if self.stance == 0:
self.stance = 1
Selfbuff('s21',0.1,10).on()
self.dmg_make('s2',6.85)
elif self.stance == 1:
self.stance = 2
Teambuff('s22',0.1,10).on()
self.dmg_make('s2',6.85)
self.conf.s2.startup += 42.0*2/60
self.conf.s2.recovery += 42.0*2/60
elif self.stance == 2:
Teambuff('s23',0.1,10).on()
Teambuff('s23s',0.3,10,'att','speed').on()
def l_s(self, e):
prev, index, stat = self.getprev()
if prev == 'fs':
log('cast', e.name, 0,'<cast> %d/%d, %d/%d, %d/%d (%s after fs)'%(\
self.s1.charged, self.s1.sp, self.s2.charged, self.s2.sp, self.s3.charged, self.s3.sp, e.name) )
elif prev[0] == 'x':
log('cast', e.name, 0,'<cast> %d/%d, %d/%d, %d/%d (%s after c%s)'%(\
self.s1.charged, self.s1.sp, self.s2.charged, self.s2.sp, self.s3.charged, self.s3.sp, e.name, index ) )
else:
log('cast', e.name, 0,'<cast> %d/%d, %d/%d, %d/%d (%s after %s)'%(\
self.s1.charged, self.s1.sp, self.s2.charged, self.s2.sp, self.s3.charged, self.s3.sp, e.name, prev ) )
dmg_coef = self.conf[e.name+'.dmg']
if dmg_coef :
if e.name != 's2':
self.dmg_make(e.name , dmg_coef)
if 'buff' in self.conf[e.name]:
buffarg = self.conf[e.name+'.buff']
wide = buffarg[0]
buffarg = buffarg[1:]
if wide == 'team':
Teambuff(e.name, *buffarg).on()
elif wide == 'self':
Selfbuff(e.name, *buffarg).on()
elif wide == 'debuff':
Debuff(e.name, *buffarg).on()
else:
Buff(e.name, *buffarg).on()
func = e.name + '_proc'
getattr(self, func)(e)
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf)
|
python
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\fame\fame_commands.py
# Compiled at: 2020-06-01 23:01:55
# Size of source mod 2**32: 8751 bytes
from bucks.bucks_utils import BucksUtils
from careers.career_interactions import set_force_fame_moment, set_debug_force_fame_moment
from fame.fame_tuning import FameTunables
from server_commands.argument_helpers import OptionalTargetParam, get_optional_target
import services, sims4.commands
logger = sims4.log.Logger('Fame', default_owner='rfleig')
@sims4.commands.Command('fame.set_start_all_sims_opted_out_of_fame', command_type=(sims4.commands.CommandType.Live))
def set_start_all_sims_opted_out_of_fame(start_opted_out: bool, _connection=None):
services.sim_info_manager().set_start_all_sims_opted_out_of_fame(start_opted_out)
return True
@sims4.commands.Command('fame.set_freeze_fame', command_type=(sims4.commands.CommandType.Cheat))
def set_freeze_fame(freeze_fame: bool, opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output('No target Sim to freeze the fame of.', _connection)
return False
if not sim.allow_fame:
if freeze_fame:
sims4.commands.output('Cannot freeze fame on a sim with disabled fame.', _connection)
else:
sims4.commands.output('Fame is already unfrozen for sims with disabled fame.', _connection)
return False
sim.set_freeze_fame(freeze_fame)
sims4.commands.output("{}'s fame frozen setting is now set to {}.".format(sim, freeze_fame), _connection)
return True
@sims4.commands.Command('fame.set_allow_fame', command_type=(sims4.commands.CommandType.Live))
def set_allow_fame(allow_fame: bool, opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output('No target Sim to manipulate the fame of.', _connection)
return False
sim.force_allow_fame(allow_fame)
sims4.commands.output("{}'s allow_fame setting is set to {}".format(sim, sim.allow_fame), _connection)
return True
@sims4.commands.Command('fame.show_allow_fame', command_type=(sims4.commands.CommandType.Automation))
def show_allow_fame(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output('No target Sim to get the value of allow_fame from.', _connection)
return False
sims4.commands.output("{}'s allow_fame setting is set to {}".format(sim, sim.allow_fame), _connection)
return True
@sims4.commands.Command('famepoints', command_type=(sims4.commands.CommandType.Cheat))
def add_fame_points(points: int=0, opt_sim: OptionalTargetParam=None, _connection=None):
if FameTunables.FAME_PERKS_BUCKS_TYPE is None:
sims4.commands.output('The DLC that is necessary for this cheat is not loaded.', _connection)
return
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output('No Target Sim to add the fame points too.', _connection)
bucks_tracker = BucksUtils.get_tracker_for_bucks_type((FameTunables.FAME_PERKS_BUCKS_TYPE), (sim.id),
add_if_none=True)
bucks_tracker.try_modify_bucks(FameTunables.FAME_PERKS_BUCKS_TYPE, points)
sims4.commands.output('{} Fame Points have been added to {}'.format(points, sim), _connection)
@sims4.commands.Command('fame.add_sim_to_squad', command_type=(sims4.commands.CommandType.Automation))
def add_sim_to_squad(sim_with_squad: int=None, sim_to_add: int=None, _connection=None):
object_manager = services.object_manager()
sim = object_manager.get(sim_with_squad)
if sim is None:
sims4.commands.output('Sim with the squad does not exist, please specify an existing sim id.', _connection)
return
target = object_manager.get(sim_to_add)
if target is None:
sims4.commands.output('Sim to add to the squad does not exist, please specify an existing sim id.', _connection)
return
sim.sim_info.add_sim_info_id_to_squad(target.sim_info.id)
@sims4.commands.Command('fame.remove_sim_from_squad', command_type=(sims4.commands.CommandType.Automation))
def remove_sim_from_squad(sim_with_squad: int=None, sim_to_add: int=None, _connection=None):
object_manager = services.object_manager()
sim = object_manager.get(sim_with_squad)
if sim is None:
sims4.commands.output('Sim with the squad does not exist, please specify an existing sim id.', _connection)
return
target = object_manager.get(sim_to_add)
if target is None:
sims4.commands.output('Sim to remove from the squad does not exist, please specify an existing sim id.', _connection)
return
sim.sim_info.remove_sim_info_id_from_squad(target.sim_info.id)
@sims4.commands.Command('fame.turn_off_lifestyle_brand', command_type=(sims4.commands.CommandType.Live))
def turn_off_lifestyle_brand(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output('No target Sim to turn the lifestyle brand off for.', _connection)
return False
tracker = sim.sim_info.lifestyle_brand_tracker
if tracker is None:
sims4.commands.output("{} doesn't have a lifestyle brand tracker, something is seriously wrong. Get GPE help please.".format(sim))
return False
tracker.clear_brand()
return True
@sims4.commands.Command('fame.award_parent_fame_bonus', command_type=(sims4.commands.CommandType.Live))
def award_parent_fame_bonus(child_sim_id: int, _connection=None):
if FameTunables.FAME_RANKED_STATISTIC is None:
return False
sim_info_manager = services.sim_info_manager()
child_sim = sim_info_manager.get(child_sim_id)
if child_sim is None:
logger.error('Calling award_parent_fame_bonus passing in an invalid sim id {}. Sim not found', child_sim_id)
child_fame = child_sim.commodity_tracker.get_statistic((FameTunables.FAME_RANKED_STATISTIC), add=True)
child_fame_rank = 0 if child_fame is None else child_fame.rank_level
max_parent_rank = 0
genealogy = child_sim.sim_info.genealogy
for parent_id in genealogy.get_parent_sim_ids_gen():
parent = sim_info_manager.get(parent_id)
if parent is None:
continue
fame = parent.commodity_tracker.get_statistic(FameTunables.FAME_RANKED_STATISTIC)
if fame is None:
continue
fame_rank = fame.rank_level
if fame_rank > max_parent_rank:
max_parent_rank = fame_rank
difference = max(0, max_parent_rank - child_fame_rank)
bonus = FameTunables.PARENT_FAME_AGE_UP_BONUS.get(difference, 0)
child_fame.add_value(bonus)
return True
@sims4.commands.Command('fame.force_fame_moments', command_type=(sims4.commands.CommandType.Cheat))
def force_fame_moments(enable: bool=True, _connection=None):
set_force_fame_moment(enable)
sims4.commands.output('Force Fame Moment Cheat: {}.'.format(enable), _connection)
@sims4.commands.Command('fame.debug_force_fame_moments', command_type=(sims4.commands.CommandType.DebugOnly))
def debug_force_fame_moments(enable: bool=True, _connection=None):
set_debug_force_fame_moment(enable)
sims4.commands.output('Debug Force Fame Moment Cheat: {}.'.format(enable), _connection)
|
python
|
from django.apps import AppConfig
class ReportConfig(AppConfig):
name = "reports"
verbose_name = "Django Model Reports"
def ready(self):
from .base import reports
reports.discover()
|
python
|
#!/usr/bin/env python
import rospy
import numpy as np
import cv2
from subprocess import call
import sys
from std_msgs.msg import String
import cv2.cv as cv
import os
class martin_aimas_interface:
def __init__(self):
self.speechRecognitionFlag = True
self.subSpeech = rospy.Subscriber('/recognizer/output', String, self.talkback)
self.r=rospy.Rate(10)
self.r.sleep()
self.windowHeight=1000
self.windowWidth=1024
self.current_screen_image = np.zeros((self.windowHeight, self.windowWidth, 3), dtype=np.uint8)
def getImage(self, data):
self.r.sleep()
print "look up keyword" #write the key word to a file and system call a helper program to download images
outputFileName= '/home/turtlebot/ros_ws/src/hpc/my_keyword.txt' #CHANGE this depending on where you put your files
keywordFile=open(outputFileName, 'w')
lineToWrite= data
keywordFile.write(lineToWrite)
keywordFile.close()
call("python /home/turtlebot/ros_ws/src/hpc/src/downloadImage.py", shell=True) #CHANGE this depending on where you put your files
def talkback(self, data):
print data.data
self.r.sleep()
if(self.speechRecognitionFlag == False):
print "I heard something but am ignoring it, since the flag is set to False"
else:
print "I heard:", data.data
#remove small words and recombine search string
myWords= data.data.split()
myWordsRefined=[]
for wordIndex in range(len(myWords)):
if (myWords[wordIndex] != "is") and (myWords[wordIndex] != "the") and (myWords[wordIndex] != "my") and (myWords[wordIndex] != "this") and (myWords[wordIndex] != "to") and (myWords[wordIndex] != "for"):
myWordsRefined.append(myWords[wordIndex])
newSearchString = ' '.join(myWordsRefined)
if(len(newSearchString)>1):
self.getImage(newSearchString) #important, this calls the actual search
#check that we actually received something
myDirectory = '/home/turtlebot/ros_ws/src/hpc/images/%s' % newSearchString #CHANGE this depending on where you put your files
myFiles = os.listdir(myDirectory)
if(len(myFiles)==0):
print "Failed. It looks like no files were downloaded"
else:
#if we got some files, try read the first
inputFileName= '/home/turtlebot/ros_ws/src/hpc/images/%s/%s' % (newSearchString, myFiles[0]) #CHANGE this depending on where you put your files
img = cv2.imread(inputFileName)
if img is not None:
cv2.putText(img, data.data, (30, 70), cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 0), 4)
self.current_screen_image = cv2.resize(img, (1000, 1024))
else:
print "Failed to read image."
else:
print "I didn't find a nice word to look up."
def main():
rospy.init_node('hpcaimas', anonymous=True)
my_aimas = martin_aimas_interface()
cv2.namedWindow("aimas_screen", cv2.WINDOW_NORMAL)
cv2.imshow("aimas_screen", my_aimas.current_screen_image)
cv2.waitKey(100)
while True:
my_aimas.r.sleep()
cv2.imshow("aimas_screen", my_aimas.current_screen_image)
key= cv2.waitKey(10) & 0xFF
if(key== ord("q")):
break
cv2.destroyAllWindows()
if __name__== '__main__':
print '-------------------------------------'
print '- AIMAS -'
print '- DEC 2017, HH, Martin Cooney -'
print '-------------------------------------'
try:
main()
except rospy.ROSInterruptException:
pass
finally:
pass
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 22 00:29:48 2018
@author: Hiba Lubbad
"""
animals = { 'a': ['horse'], 'b': ['baboon'], 'c': ['giraffe']}
animals['d'] = ['donkey']
animals['d'].append('dog')
animals['d'].append('dingo')
def howmany(di):
count = 0
for i in di.values():
count += len(i)
return count
def biggest(di):
l = []
for i in di:
a = di.get(i)
if len(a) > len (l):
l = a
return l
def dstats(di):
a,l = howmany(di), len(biggest(di))
return (a,l)
print(dstats(animals))
|
python
|
# Get the data from:
# https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range, input
import os
# Note: you may need to update your version of future
# sudo pip install -U future
# just in case we need it
import numpy as np
import pandas as pd
# load the data
# important note: this is where we will usually put data files
os.system('wget https://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat')
df = pd.read_csv('./airfoil_self_noise.dat', sep='\t', header=None)
os.system('clear')
# check the data
df.head()
df.info()
# get the inputs
data = df[[0,1,2,3,4]].values
# get the outputs
target = df[5].values
# tiny update: pandas is moving from .as_matrix() to the equivalent .values
# normally we would put all of our imports at the top
# but this lets us tell a story
from sklearn.model_selection import train_test_split
# split the data into train and test sets
# this lets us simulate how our model will perform in the future
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.33)
# instantiate a classifer and train it
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, y_train)
# evaluate the model's performance
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
# how you can make predictions
predictions = model.predict(X_test)
# what did we get?
predictions
# we can even use random forest to solve the same problem!
from sklearn.ensemble import RandomForestRegressor
model2 = RandomForestRegressor()
model2.fit(X_train, y_train)
# evaluate the model's performance
print(model2.score(X_train, y_train))
print(model2.score(X_test, y_test))
# we can even use deep learning to solve the same problem!
from sklearn.neural_network import MLPRegressor
# you'll learn why scaling is needed in a later course
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train2 = scaler.fit_transform(X_train)
X_test2 = scaler.transform(X_test)
scaler2 = StandardScaler()
y_train2 = scaler2.fit_transform(np.expand_dims(y_train, -1)).ravel()
y_test2 = scaler2.fit_transform(np.expand_dims(y_test, -1)).ravel()
model = MLPRegressor(max_iter=500)
model.fit(X_train2, y_train2)
# evaluate the model's performance
print(model.score(X_train2, y_train2))
print(model.score(X_test2, y_test2))
# not as good as a random forest!
# but not as bad as linear regression
#clean up
os.systyem('rm airfoil_self_noise.dat')
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import inspect
from abc import ABC, abstractmethod
from typing import Dict, Generator, List, Type, TypeVar
from libcst import Module
from libcst.codemod._codemod import Codemod
from libcst.codemod._context import CodemodContext
from libcst.codemod._visitor import ContextAwareTransformer
from libcst.codemod.visitors._add_imports import AddImportsVisitor
from libcst.codemod.visitors._remove_imports import RemoveImportsVisitor
_Codemod = TypeVar("_Codemod", bound=Codemod)
class CodemodCommand(Codemod, ABC):
"""
A :class:`~libcst.codemod.Codemod` which can be invoked on the command-line
using the ``libcst.tool codemod`` utility. It behaves like any other codemod
in that it can be instantiated and run identically to a
:class:`~libcst.codemod.Codemod`. However, it provides support for providing
help text and command-line arguments to ``libcst.tool codemod`` as well as
facilities for automatically running certain common transforms after executing
your :meth:`~libcst.codemod.Codemod.transform_module_impl`.
The following list of transforms are automatically run at this time:
- :class:`~libcst.codemod.visitors.AddImportsVisitor` (adds needed imports to a module).
- :class:`~libcst.codemod.visitors.RemoveImportsVisitor` (removes unreferenced imports from a module).
"""
#: An overrideable description attribute so that codemods can provide
#: a short summary of what they do. This description will show up in
#: command-line help as well as when listing available codemods.
DESCRIPTION: str = "No description."
@staticmethod
def add_args(arg_parser: argparse.ArgumentParser) -> None:
"""
Override this to add arguments to the CLI argument parser. These args
will show up when the user invokes ``libcst.tool codemod`` with
``--help``. They will also be presented to your class's ``__init__``
method. So, if you define a command with an argument 'foo', you should also
have a corresponding 'foo' positional or keyword argument in your
class's ``__init__`` method.
"""
pass
def _instantiate_and_run(self, transform: Type[_Codemod], tree: Module) -> Module:
inst = transform(self.context)
return inst.transform_module(tree)
@abstractmethod
def transform_module_impl(self, tree: Module) -> Module:
"""
Override this with your transform. You should take in the tree, optionally
mutate it and then return the mutated version. The module reference and all
calculated metadata are available for the lifetime of this function.
"""
...
def transform_module(self, tree: Module) -> Module:
# Overrides (but then calls) Codemod's transform_module to provide
# a spot where additional supported transforms can be attached and run.
tree = super().transform_module(tree)
# List of transforms we should run, with their context key they use
# for storing in context.scratch. Typically, the transform will also
# have a static method that other transforms can use which takes
# a context and other optional args and modifies its own context key
# accordingly. We import them here so that we don't have circular imports.
supported_transforms: Dict[str, Type[Codemod]] = {
AddImportsVisitor.CONTEXT_KEY: AddImportsVisitor,
RemoveImportsVisitor.CONTEXT_KEY: RemoveImportsVisitor,
}
# For any visitors that we support auto-running, run them here if needed.
for key, transform in supported_transforms.items():
if key in self.context.scratch:
# We have work to do, so lets run this.
tree = self._instantiate_and_run(transform, tree)
# We're finally done!
return tree
class VisitorBasedCodemodCommand(ContextAwareTransformer, CodemodCommand, ABC):
"""
A command that acts identically to a visitor-based transform, but also has
the support of :meth:`~libcst.codemod.CodemodCommand.add_args` and running
supported helper transforms after execution. See
:class:`~libcst.codemod.CodemodCommand` and
:class:`~libcst.codemod.ContextAwareTransformer` for additional documentation.
"""
pass
class MagicArgsCodemodCommand(CodemodCommand, ABC):
"""
A "magic" args command, which auto-magically looks up the transforms that
are yielded from :meth:`~libcst.codemod.MagicArgsCodemodCommand.get_transforms`
and instantiates them using values out of the context. Visitors yielded in
:meth:`~libcst.codemod.MagicArgsCodemodCommand.get_transforms` must have
constructor arguments that match a key in the context
:attr:`~libcst.codemod.CodemodContext.scratch`. The easiest way to
guarantee that is to use :meth:`~libcst.codemod.CodemodCommand.add_args`
to add a command arg that will be parsed for each of the args. However, if
you wish to chain transforms, adding to the scratch in one transform will make
the value available to the constructor in subsequent transforms as well as the
scratch for subsequent transforms.
"""
def __init__(self, context: CodemodContext, **kwargs: Dict[str, object]) -> None:
super().__init__(context)
self.context.scratch.update(kwargs)
@abstractmethod
def get_transforms(self) -> Generator[Type[Codemod], None, None]:
"""
A generator which yields one or more subclasses of
:class:`~libcst.codemod.Codemod`. In the general case, you will usually
yield a series of classes, but it is possible to programmatically decide
which classes to yield depending on the contents of the context
:attr:`~libcst.codemod.CodemodContext.scratch`.
Note that you should yield classes, not instances of classes, as the
point of :class:`~libcst.codemod.MagicArgsCodemodCommand` is to
instantiate them for you with the contents of
:attr:`~libcst.codemod.CodemodContext.scratch`.
"""
...
def _instantiate(self, transform: Type[Codemod]) -> Codemod:
# Grab the expected arguments
argspec = inspect.getfullargspec(transform.__init__)
args: List[object] = []
kwargs: Dict[str, object] = {}
# pyre-fixme[6]: Expected `Sized` for 1st param but got `Union[Tuple[],
# Tuple[Any, ...]]`.
last_default_arg = len(argspec.args) - len(argspec.defaults or ())
for i, arg in enumerate(argspec.args):
if arg in ["self", "context"]:
# Self is bound, and context we explicitly include below.
continue
if arg not in self.context.scratch:
if i >= last_default_arg:
# This arg has a default, so the fact that its missing is fine.
continue
raise KeyError(
f"Visitor {transform.__name__} requires positional arg {arg} but "
+ "it is not in our context nor does it have a default! It should "
+ "be provided by an argument returned from the 'add_args' method "
+ "or populated into context.scratch by a previous transform!"
)
# No default, but we found something in scratch. So, forward it.
args.append(self.context.scratch[arg])
for kwarg in argspec.kwonlyargs:
if (
kwarg not in self.context.scratch
and kwarg not in argspec.kwonlydefaults
):
raise KeyError(
f"Visitor {transform.__name__} requires keyword arg {kwarg} but "
+ "it is not in our context nor does it have a default! It should "
+ "be provided by an argument returned from the 'add_args' method "
+ "or populated into context.scratch by a previous transform!"
)
kwargs[kwarg] = self.context.scratch.get(
kwarg, argspec.kwonlydefaults[kwarg]
)
# Return an instance of the transform with those arguments
return transform(self.context, *args, **kwargs)
def transform_module_impl(self, tree: Module) -> Module:
for transform in self.get_transforms():
inst = self._instantiate(transform)
tree = inst.transform_module(tree)
return tree
|
python
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from itl.layers.core import Layer
from itl import logging
# from itl.decorators import deprecated_alias
__all__ = [
'UpSampling2dLayer',
'DownSampling2dLayer',
]
class UpSampling2dLayer(Layer):
"""The :class:`UpSampling2dLayer` class is a up-sampling 2D layer, see `tf.image.resize_images <https://www.tensorflow.org/api_docs/python/tf/image/resize_images>`__.
Parameters
----------
prev_layer : :class:`Layer`
Previous layer with 4-D Tensor of the shape (batch, height, width, channels) or 3-D Tensor of the shape (height, width, channels).
size : tuple of int/float
(height, width) scale factor or new size of height and width.
is_scale : boolean
If True (default), the `size` is a scale factor; otherwise, the `size` is the numbers of pixels of height and width.
method : int
The resize method selected through the index. Defaults index is 0 which is ResizeMethod.BILINEAR.
- Index 0 is ResizeMethod.BILINEAR, Bilinear interpolation.
- Index 1 is ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation.
- Index 2 is ResizeMethod.BICUBIC, Bicubic interpolation.
- Index 3 ResizeMethod.AREA, Area interpolation.
align_corners : boolean
If True, align the corners of the input and output. Default is False.
name : str
A unique layer name.
"""
# @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
size,
is_scale=True,
method=0,
align_corners=False,
name='upsample2d_layer',
):
super(UpSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"UpSampling2dLayer %s: is_scale: %s size: %s method: %d align_corners: %s" %
(self.name, is_scale, size, method, align_corners)
)
if not isinstance(size, (list, tuple)) and len(size) == 2:
raise AssertionError()
if len(self.inputs.get_shape()) == 3:
if is_scale:
size_h = size[0] * int(self.inputs.get_shape()[0])
size_w = size[1] * int(self.inputs.get_shape()[1])
size = [int(size_h), int(size_w)]
elif len(self.inputs.get_shape()) == 4:
if is_scale:
size_h = size[0] * int(self.inputs.get_shape()[1])
size_w = size[1] * int(self.inputs.get_shape()[2])
size = [int(size_h), int(size_w)]
else:
raise Exception("Donot support shape %s" % self.inputs.get_shape())
self.size = size
with tf.variable_scope(name):
try:
self.outputs = tf.image.resize_images(
self.inputs, size=size, method=method, align_corners=align_corners
)
except Exception: # for TF 0.10
self.outputs = tf.image.resize_images(
self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners
)
self._add_layers(self.outputs)
def to_caffe_prototxt(self):
'''
layer{
bottom:"input"
top:"output"
name:"interp_layer"
type:"Interp"
interp_param{
shrink_factor:4
zoom_factor:3
pad_beg:0
pad_end:0
}
}
message InterpParameter {
optional int32 height = 1 [default = 0]; // Height of output
optional int32 width = 2 [default = 0]; // Width of output
optional int32 zoom_factor = 3 [default = 1]; // zoom factor
optional int32 shrink_factor = 4 [default = 1]; // shrink factor
optional int32 pad_beg = 5 [default = 0]; // padding at begin of input
optional int32 pad_end = 6 [default = 0]; // padding at end of input
}
'''
interp_layer = self.create_caffe_layer()
interp_layer.name = self.name
interp_layer.type = 'Interp'
self.append_bottom_from_inputs(interp_layer)
self.append_top_from_outputs(interp_layer)
interp_layer.interp_param.height = self.size[0]
interp_layer.interp_param.width = self.size[1]
self.add_activation_layer()
class DownSampling2dLayer(Layer):
"""The :class:`DownSampling2dLayer` class is down-sampling 2D layer, see `tf.image.resize_images <https://www.tensorflow.org/versions/master/api_docs/python/image/resizing#resize_images>`__.
Parameters
----------
prev_layer : :class:`Layer`
Previous layer with 4-D Tensor in the shape of (batch, height, width, channels) or 3-D Tensor in the shape of (height, width, channels).
size : tuple of int/float
(height, width) scale factor or new size of height and width.
is_scale : boolean
If True (default), the `size` is the scale factor; otherwise, the `size` are numbers of pixels of height and width.
method : int
The resize method selected through the index. Defaults index is 0 which is ResizeMethod.BILINEAR.
- Index 0 is ResizeMethod.BILINEAR, Bilinear interpolation.
- Index 1 is ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation.
- Index 2 is ResizeMethod.BICUBIC, Bicubic interpolation.
- Index 3 ResizeMethod.AREA, Area interpolation.
align_corners : boolean
If True, exactly align all 4 corners of the input and output. Default is False.
name : str
A unique layer name.
"""
# @deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
size,
is_scale=True,
method=0,
align_corners=False,
name='downsample2d_layer',
):
super(DownSampling2dLayer, self).__init__(prev_layer=prev_layer, name=name)
logging.info(
"DownSampling2dLayer %s: is_scale: %s size: %s method: %d, align_corners: %s" %
(self.name, is_scale, size, method, align_corners)
)
if not isinstance(size, (list, tuple)) and len(size) == 2:
raise AssertionError()
if len(self.inputs.get_shape()) == 3:
if is_scale:
size_h = size[0] * int(self.inputs.get_shape()[0])
size_w = size[1] * int(self.inputs.get_shape()[1])
size = [int(size_h), int(size_w)]
elif len(self.inputs.get_shape()) == 4:
if is_scale:
size_h = size[0] * int(self.inputs.get_shape()[1])
size_w = size[1] * int(self.inputs.get_shape()[2])
size = [int(size_h), int(size_w)]
else:
raise Exception("Do not support shape %s" % self.inputs.get_shape())
with tf.variable_scope(name):
try:
self.outputs = tf.image.resize_images(
self.inputs, size=size, method=method, align_corners=align_corners
)
except Exception: # for TF 0.10
self.outputs = tf.image.resize_images(
self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners
)
self._add_layers(self.outputs)
|
python
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Author : xuqiang
@License : (C) Copyright 2020-, H3C
@Contact : [email protected]
@Software: f8s
@File : config.py
@Time : 20200211
@Desc :
'''
#k8s cfg
K8S_CFG = "/root/f8s/kubeconfig.yaml"
FISCO_HOME = '/root/fisco_network'
NFS = "10.114.134.46"
NET="fisco"
PEER_YAML = "{home}/deploy/{orgname}/peer.yaml"
SVC_YAML = "{home}/deploy/{orgname}/svc.yaml"
PV_YAML = "{home}/deploy/{orgname}/pv.yaml"
PVC_YAML = "{home}/deploy/{orgname}/pvc.yaml"
NAMESPACE_YAML = "{home}/deploy/namespace.yaml{orgname}"
RPC=20000
CHANNEL=20200
FRONT=30000
|
python
|
import cfm_api_utils
token = get_token()
print token
|
python
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
spyder.plugins.editor.widgets
=============================
Editor related widgets (Editor, EditorStack, CodeEditor) based on only Qt.
"""
|
python
|
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName #initialization of first number ,last number as done in the constructor
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print("Name:", self.lastName + ",", self.firstName)# #printout statement
print("ID:", self.idNumber)
import statistics #for using predefined method statistics to calculate mean
class Student(Person):
def __init__(self,firstName, lastName, idNumber, scores):#taking the value from main function
super().__init__(firstName, lastName, idNumber)#calling the super class note here tht super is predefined function in python used to initialize child class with super class
self.scores=scores # assigning the value here to local variable
def calculate(self): #function defined to calculate the grades
a = statistics.mean(self.scores) #best way to calculate mean but only for lists/arrays
if a < 40:
return "T"
elif (40 <= a) and (a < 55):
return "D"
elif (55 <= a) and (a < 70):
return "P"
elif (70 <= a) and (a < 80):
return "A"
elif (80 <= a) and (a < 90):
return "E"
elif (90 <= a) and (a <= 100):
return "O"
else:
return ""
# Function Name: calculate
# Return: A character denoting the grade.
#
# Write your funct
line = input().split() #entering the value
firstName = line[0]
lastName = line[1]
idNum = line[2]
numScores = int(input()) #noy needed for python
scores = list( map(int, input().split()) )
s = Student(firstName, lastName, idNum, scores)#sending the value to class
s.printPerson()
print("Grade:", s.calculate())#recieveing the value and printing the output
|
python
|
'''
Copyright (c) 2021, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
'''
import torch
# from transformers import MT5ForConditionalGeneration, MT5Config, MT5EncoderModel, MT5Tokenizer, Trainer, TrainingArguments
# from progeny_tokenizer import TAPETokenizer
import numpy as np
import math
import random
import scipy
import time
import pandas as pd
from torch.utils.data import DataLoader, RandomSampler, Dataset, BatchSampler
import typing
from pathlib import Path
import argparse
from collections import OrderedDict
import os
import shutil
import pickle
from tqdm import tqdm
# from modeling_progeny import ProgenyForSequenceToSequenceClassification, ProgenyForValuePrediction, ProgenyForSequenceClassification, ProgenyForContactPrediction, ProgenyConfig
# from transformers_custom import MT5ForConditionalGenerationWithLatentSpace
from transformers_custom import T5ForConditionalGenerationWithLatentSpace, T5Discriminator, T5Tokenizer, T5Config, BertTokenizer, BertForSequenceClassification, GPT2LMHeadModel, GPT2TokenizerFast
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', action='store', type=int, default=30, help='random seed')
parser.add_argument('--num_generations', action='store', type=int, default=None, help='(min) number of generation')
parser.add_argument('--generation_output_dir', action='store', type=str, default="controlled_generated_seqs_debug/" )
parser.add_argument('--prepend_output_name', action='store', type=str, default="" )
parser.add_argument('--unique_gen', action='store_true')
parser.add_argument('--no_repeat_input_seq', action='store_true')
# generator args
parser.add_argument('--gen_pretrained_dir', action='store', type=str, default="congen/v1/meanpool_sephead256dim_domi/" )
parser.add_argument('--tokenizer_pretrained_dir', action='store', type=str, default="t5-small" )
# parser.add_argument('--input_seq', action='store', type=str, default="" )
parser.add_argument('--temperature_init', action='store', type=float, default=1.0)
parser.add_argument('--temperature_multiple', action='store', type=float, default=1.2)
parser.add_argument('--patience', action='store', type=int, default=50, help='number of repeats before increasing temperature values for gen decoding')
parser.add_argument('--batch_repeat_threshold', action='store', type=int, default=4)
parser.add_argument('--gen_batch_size', action='store', type=int, default=200)
parser.add_argument('--gen_save_interval', action='store', type=int, default=100, help='interval to save generations')
parser.add_argument('--skip_gen', action='store_true')
parser.add_argument('--gen_token_len', action='store', type=int, default=86, help='len to check for generated tokens')
# new controlled gen args
parser.add_argument('--input_data_dir', action='store', type=str, default="data/sst", help='data for generator input seqs' )
parser.add_argument('--input_data_subset', action='store', type=str, default="train", help='data subset for generator input seqs', choices=["train", "dev", "test"] )
parser.add_argument('--src_config_json', action='store', type=str, default=None )
parser.add_argument('--num_gen_inputs', action='store', type=int, default=None, help='top K most stable sequences to use input for generation')
parser.add_argument('--num_gen_samples_per_input', action='store', type=int, default=10, help='number of generation per input sequence')
# latent space args
parser.add_argument('--latent_pooler', action='store', type=str, default="mean", choices=['mean', 'max', 'cls'], help='op to pool encoder hidden states' )
parser.add_argument('--pool_enc_hidden_states_for_dec', action='store_true')
parser.add_argument('--mask_non_target_z_vector', action='store_true')
parser.add_argument('--separate_targetattr_head', action='store_true')
parser.add_argument('--z_tar_vector_dim', action='store', type=int, default=1)
parser.add_argument('--do_mi', action='store_true')
parser.add_argument('--z_tar_edit_before_dec', action='store', type=float, default=None, help='perturbation to latent vector z_tar')
# vae/wae args
parser.add_argument('--latent_space_type', action='store', type=str, default="plain", choices=['plain', 'vae', 'wae', 'adversarial'], help='type of latent space' )
parser.add_argument('--latent_size', action='store', type=int, default=None, help='use None to use pooled enc hidden state as latent vector')
parser.add_argument('--no_separate_latent_enc', action='store_false', dest='separate_latent_enc', default=True)
parser.add_argument('--no_separate_latent_dec', action='store_false', dest='separate_latent_dec', default=True)
# wae only args
parser.add_argument('--wae_z_enc_type', action='store', type=str, default=None, choices=['deterministic', 'stochastic'], help='type of wae encoder' )
# discriminator args
parser.add_argument('--disc_batch_size', action='store', type=int, default=1000)
parser.add_argument('--disc_save_interval', action='store', type=int, default=30)
parser.add_argument('--disc_pretrained_dir', action='store', type=str, default="/export/share/alvinchan/models/SST5/disc/SST5_discT5base_lre-04_25ep" )
parser.add_argument('--disc_latent_pooler', action='store', type=str, default="mean", choices=['mean', 'max', 'cls'], help='op to pool encoder hidden states' )
# GT model args
parser.add_argument('--gt_batch_size', action='store', type=int, default=1000)
parser.add_argument('--gt_tokenizer_pretrained_dir', action='store', type=str, default="bert-large-uncased" )
parser.add_argument('--gt_pretrained_dir', action='store', type=str, default="/export/share/alvinchan/models/SST5/disc/SST5_clsBERTlarge_lre-05_30ep_bs32" )
parser.add_argument('--gt_save_interval', action='store', type=int, default=30, help='interval to save generations')
# PPL model args
parser.add_argument('--ppl_model_id', action='store', type=str, default="gpt2-large" )
# SST5 args
parser.add_argument('--gen_input_labels', nargs='+', help='Labels of samples to use for generation input seqs, labels are 0: strongly neg, 1: neg, 2: neutral, 3: pos, 4: strongly pos')
parser.add_argument('--prepended_cls_token', action='store', type=str, default="<extra_id_0>" )
args = parser.parse_args()
print("args: ", args)
seed = args.seed
num_generations = args.num_generations
gen_save_interval = args.gen_save_interval
generation_output_dir = args.generation_output_dir
prepend_output_name = args.prepend_output_name
gen_pretrained_dir = args.gen_pretrained_dir
tokenizer_pretrained_dir = args.tokenizer_pretrained_dir
gen_input_labels = args.gen_input_labels
prepended_cls_token = args.prepended_cls_token
temperature_init = args.temperature_init
temperature_multiple = args.temperature_multiple
patience = args.patience
batch_repeat_threshold = args.batch_repeat_threshold
gen_batch_size = args.gen_batch_size
unique_gen = args.unique_gen
no_repeat_input_seq =args.no_repeat_input_seq
disc_batch_size = args.disc_batch_size
disc_save_interval = args.disc_save_interval
disc_pretrained_dir = args.disc_pretrained_dir
disc_latent_pooler = args.disc_latent_pooler
gt_batch_size = args.gt_batch_size
gt_tokenizer_pretrained_dir = args.gt_tokenizer_pretrained_dir
gt_pretrained_dir = args.gt_pretrained_dir
gt_save_interval = args.gt_save_interval
ppl_model_id = args.ppl_model_id
# new controlled gen args
src_config_json = args.src_config_json
input_data_dir = args.input_data_dir
input_data_subset = args.input_data_subset
num_gen_inputs = args.num_gen_inputs
num_gen_samples_per_input = args.num_gen_samples_per_input
z_tar_edit_before_dec = args.z_tar_edit_before_dec
latent_space_args = {
'latent_pooler': args.latent_pooler,
'pool_enc_hidden_states_for_dec': args.pool_enc_hidden_states_for_dec,
'mask_non_target_z_vector': args.mask_non_target_z_vector,
'separate_targetattr_head': args.separate_targetattr_head,
'z_tar_vector_dim': args.z_tar_vector_dim,
'do_mi': args.do_mi,
'latent_space_type': args.latent_space_type,
'latent_size': args.latent_size,
'separate_latent_enc': args.separate_latent_enc,
'separate_latent_dec': args.separate_latent_dec,
'wae_z_enc_type': args.wae_z_enc_type,
}
if not os.path.isfile(os.path.join(gen_pretrained_dir, 'config.json')):
shutil.copy(src_config_json, gen_pretrained_dir)
output_dir = Path(generation_output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# os.makedirs(generation_output_dir, exist_ok = True)
# wt_seq = 'STIEEQAKTFLDKFNHEAEDLFYQSSLASWNYNTNITEENVQNMNNAGDKWSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ'
# constant_region = 'NTNITEEN'
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
# Set up generator model
tokenizer = T5Tokenizer.from_pretrained(tokenizer_pretrained_dir)
# tokenizer = TAPETokenizer(vocab="progeny")
device = torch.device('cuda:0')
gen_model = T5ForConditionalGenerationWithLatentSpace.from_pretrained(gen_pretrained_dir, **latent_space_args)
gen_model.parallelize()
# Set up input data - start -
TEXT_COL, LABEL_COL = 'text', 'truth'
def read_sst5(data_dir, colnames=[LABEL_COL, TEXT_COL]):
datasets = {}
for t in ["train", "dev", "test"]:
df = pd.read_csv(os.path.join(data_dir, f"sst_{t}.txt"), sep='\t', header=None, names=colnames)
df[LABEL_COL] = df[LABEL_COL].str.replace('__label__', '')
df[LABEL_COL] = df[LABEL_COL].astype(int) # Categorical data type for truth labels
df[LABEL_COL] = df[LABEL_COL] - 1 # Zero-index labels for PyTorch
df[TEXT_COL] = df[TEXT_COL].str.replace("`", "'") # handle T5Tokenizer's inability to tokenize `, tokenizes it as <unk>
datasets[t] = df
return datasets
def pad_sequences(sequences: typing.Sequence, constant_value=0, dtype=None) -> np.ndarray:
batch_size = len(sequences)
shape = [batch_size] + np.max([seq.shape for seq in sequences], 0).tolist()
if dtype is None:
dtype = sequences[0].dtype
if isinstance(sequences[0], np.ndarray):
array = np.full(shape, constant_value, dtype=dtype)
elif isinstance(sequences[0], torch.Tensor):
array = torch.full(shape, constant_value, dtype=dtype)
for arr, seq in zip(array, sequences):
arrslice = tuple(slice(dim) for dim in seq.shape)
arr[arrslice] = seq
return array
datasets = read_sst5(input_data_dir)
input_data_df = datasets[input_data_subset]
# input_data_df = datasets['train']
print("gen_input_labels: ", gen_input_labels)
if gen_input_labels != None:
gen_input_labels = [int(gen_input_label) for gen_input_label in gen_input_labels]
gen_input_df = input_data_df.loc[input_data_df['truth'].isin(gen_input_labels)]
else:
gen_input_df = input_data_df
print("gen_input_df len: ", len(gen_input_df))
if num_gen_inputs is None:
num_gen_inputs = len(gen_input_df)
gen_input_df = gen_input_df.iloc[:num_gen_inputs]
train_seq_list = input_data_df['text'].tolist()
# Set up input data - end -
# gen code - start
if num_generations is None:
num_generations = num_gen_inputs * num_gen_samples_per_input
print("num_generations: ", num_generations)
num_unique_seqs_per_batch = gen_batch_size // num_gen_samples_per_input
num_batch = len(gen_input_df) // num_unique_seqs_per_batch
if len(gen_input_df) % num_unique_seqs_per_batch != 0:
num_batch += 1
output_seq_list = []
input_seq_list = []
input_seq_class_list = []
output_tensor_list = []
repeat_list = []
in_train_data_list = []
unique_n_notrain_list = []
start_time = time.time()
prev_save_path = None
repeat_seq_count = 0
in_train_count = 0
temperature = temperature_init
generation_rounds_done = 0
# loading code for batching input for generations
if not args.skip_gen:
gen_model.eval()
# get prepended_cls_token_id
if prepended_cls_token is not None:
prepended_cls_token_id = tokenizer.encode(prepended_cls_token)[0]
else:
prepended_cls_token_id = None
while len(output_seq_list) < num_generations:
# while unique_gen and np.sum(unique_n_notrain_list) < num_generations:
if generation_rounds_done > 0:
temperature = temperature * temperature_multiple
print("New generation round, temperature: ", temperature)
# print("num_batch: ", num_batch)
for batch_ind in tqdm(range(num_batch)):
# print("batch_ind: ", batch_ind)
batch_seqs = gen_input_df[batch_ind*num_unique_seqs_per_batch : (batch_ind+1)*num_unique_seqs_per_batch]['text']
batch_classes = gen_input_df[batch_ind*num_unique_seqs_per_batch : (batch_ind+1)*num_unique_seqs_per_batch]['truth'].tolist()
# print("batch_seqs: ", batch_seqs)
batch_input_ids = []
batch_input_seqs = []
batch_input_seq_classes = []
for seq_ind, seq in enumerate(batch_seqs):
batch_input_seqs = batch_input_seqs + [seq]* num_gen_samples_per_input
# print("batch_classes: ", batch_classes)
seq_class = batch_classes[seq_ind]
# print("seq_class: ", seq_class)
batch_input_seq_classes = batch_input_seq_classes + [seq_class]* num_gen_samples_per_input
input_ids = tokenizer.encode(tokenizer.decode(tokenizer.encode(seq))) # decode first encoded ids to remove space before punctuations such as " ," and " ."
# input_ids = tokenizer.encode(seq)
# prepend cls token to input seqs
if prepended_cls_token_id is not None:
input_ids = [prepended_cls_token_id] + input_ids
input_ids = np.array(input_ids, np.int64)
batch_input_ids.extend([input_ids] * num_gen_samples_per_input)
# batch_input_ids.append(input_ids)
batch_input_ids = torch.from_numpy(pad_sequences(batch_input_ids, 0)).to(gen_model.device)
# print("batch_input_ids.shape: ", batch_input_ids.shape)
# print("A batch_input_ids: ", batch_input_ids)
gen_output = gen_model.generate(batch_input_ids, max_length=85+1, do_sample=True, temperature=temperature, z_tar_edit_before_dec=z_tar_edit_before_dec)
# print("gen_output.shape: ", gen_output.shape)
# print("B gen_output: ", gen_output)
for seq_ind, gen_seq in enumerate(gen_output.cpu().numpy()):
unique_n_notrain = True
repeat = False
in_train_data = False
str_token_seq = tokenizer.decode(gen_seq.tolist(), skip_special_tokens=True )
if str_token_seq in output_seq_list:
repeat_seq_count += 1
repeat = True
unique_n_notrain = False
if str_token_seq in train_seq_list:
in_train_count += 1
in_train_data = True
unique_n_notrain = False
if unique_gen and not unique_n_notrain:
continue
unique_n_notrain_list.append(unique_n_notrain)
repeat_list.append(repeat)
in_train_data_list.append(in_train_data)
input_seq_str = batch_input_seqs[seq_ind]
input_seq_list.append(input_seq_str)
input_seq_class = batch_input_seq_classes[seq_ind]
input_seq_class_list.append(input_seq_class)
output_seq_list.append(str_token_seq)
seq_tensor = gen_output[seq_ind].detach().cpu()
output_tensor_list.append(seq_tensor)
if batch_ind % gen_save_interval == 0 and batch_ind != 0:
save_path = os.path.join(generation_output_dir, "{}gen_seqs{}-{}.pkl".format(prepend_output_name, len(output_seq_list), num_generations))
saved_dict = {'output_seq_list': output_seq_list, "input_seq_list": input_seq_list, "input_seq_class_list": input_seq_class_list, "output_tensor_list": output_tensor_list, 'repeat_list': repeat_list, 'in_train_data_list': in_train_data_list, 'temperature': temperature}
with open(save_path, 'wb') as f:
pickle.dump(saved_dict, f)
print("generated #", len(output_seq_list))
cur_time = time.time()
print("Time taken so far:", cur_time - start_time)
if prev_save_path is not None:
os.remove(prev_save_path)
prev_save_path = save_path
if unique_gen and np.sum(unique_n_notrain_list) > num_generations:
break
generation_rounds_done += 1
if no_repeat_input_seq:
break
save_path = os.path.join(generation_output_dir, "{}gen_seqs_full{}.pkl".format(prepend_output_name, num_generations))
saved_dict = {'output_seq_list': output_seq_list, "input_seq_list": input_seq_list, "input_seq_class_list": input_seq_class_list, "output_tensor_list": output_tensor_list, 'repeat_list': repeat_list, 'in_train_data_list': in_train_data_list, 'temperature': temperature}
with open(save_path, 'wb') as f:
pickle.dump(saved_dict, f)
else:
print("Skipping generation step and loading from saved pkl")
save_path = os.path.join(generation_output_dir, "{}gen_seqs_full{}.pkl".format(prepend_output_name, num_generations))
with open(save_path, 'rb') as f:
saved_dict = pickle.load(f)
output_seq_list = saved_dict['output_seq_list']
input_seq_list = saved_dict['input_seq_list']
input_seq_class_list = saved_dict['input_seq_class_list']
output_tensor_list = saved_dict['output_tensor_list']
repeat_list = saved_dict['repeat_list']
in_train_data_list = saved_dict['in_train_data_list']
temperature = saved_dict['temperature']
if prev_save_path is not None:
os.remove(prev_save_path)
# print("output_tensor_list: ", output_tensor_list)
print("output_tensor_list len: ", len(output_tensor_list))
# print("output_tensor_list shape: ", output_tensor_list.shape)
gen_tensors = output_tensor_list
# gen_tensors = torch.stack(output_tensor_list, dim=0)
# new gen
# Latent Head inference - start
latent_head_pred_list = []
prev_save_path = None
num_disc_batch = len(gen_tensors) // gen_batch_size
if len(gen_tensors) % gen_batch_size != 0:
num_disc_batch += 1
start_time = time.time()
gen_model.eval()
with torch.no_grad():
for batch_ind in tqdm(range(num_disc_batch)):
gen_tensor_batch = gen_tensors[batch_ind*gen_batch_size : (batch_ind+1)*gen_batch_size]
# gen_tensor_batch = gen_tensors[batch_ind*gen_batch_size : (batch_ind+1)*gen_batch_size, 1:]
gen_tensor_batch = torch.nn.utils.rnn.pad_sequence(gen_tensor_batch, batch_first=True, padding_value=0)
gen_tensor_batch = gen_tensor_batch[:, 1:]
gen_tensor_batch = gen_tensor_batch.to(gen_model.device)
# print("C gen_tensor_batch: ", gen_tensor_batch)
# gen_tensor_batch already has <cls> token (32099) at the front due to reconstruction objective
model_outputs = gen_model(gen_tensor_batch, labels=gen_tensor_batch)
contrastive_value = model_outputs[1]
latent_head_pred_list.append(contrastive_value.squeeze().cpu().numpy())
if batch_ind % disc_save_interval == 0:
print("latent head inferred #", (batch_ind+1)*gen_batch_size)
cur_time = time.time()
save_path = os.path.join(generation_output_dir, "{}latent_head_{}-{}.pkl".format(prepend_output_name, (batch_ind+1)*gen_batch_size, num_generations))
with open(save_path, 'wb') as f:
pickle.dump(latent_head_pred_list, f)
cur_time = time.time()
print("Time taken so far:", cur_time - start_time)
if prev_save_path is not None:
os.remove(prev_save_path)
prev_save_path = save_path
latent_head_pred_list = np.concatenate(latent_head_pred_list, axis=None).tolist()
save_path = os.path.join(generation_output_dir, "{}latent_head_full{}.pkl".format(prepend_output_name, num_generations))
with open(save_path, 'wb') as f:
pickle.dump(latent_head_pred_list, f)
if prev_save_path is not None:
os.remove(prev_save_path)
# Latent Head inference - end
# Discriminator inference
# Disc model set up - Start -
t5config = T5Config.from_pretrained(disc_pretrained_dir)
disc_args = {
'latent_pooler': args.disc_latent_pooler,
}
disc_model = T5Discriminator.from_pretrained(disc_pretrained_dir, **disc_args)
disc_model.eval()
disc_model = disc_model.to(gen_model.device)
# Disc model set up - End -
# new disc
disc_pred_list = []
prev_save_path = None
num_disc_batch = len(gen_tensors) // disc_batch_size
if len(gen_tensors) % disc_batch_size != 0:
num_disc_batch += 1
start_time = time.time()
with torch.no_grad():
for batch_ind in tqdm(range(num_disc_batch)):
gen_tensor_batch = gen_tensors[batch_ind*disc_batch_size : (batch_ind+1)*disc_batch_size]
gen_tensor_batch = torch.nn.utils.rnn.pad_sequence(gen_tensor_batch, batch_first=True, padding_value=0)
gen_tensor_batch = gen_tensor_batch[:, 1:]
gen_tensor_batch = gen_tensor_batch.to(gen_model.device)
# print("D gen_tensor_batch: ", gen_tensor_batch)
disc_output = disc_model(gen_tensor_batch)
disc_pred_list.append(disc_output[0].cpu().numpy())
if batch_ind % disc_save_interval == 0:
print("inferred #", (batch_ind+1)*disc_batch_size)
cur_time = time.time()
save_path = os.path.join(generation_output_dir, "{}disc_{}-{}.pkl".format(prepend_output_name, (batch_ind+1)*disc_batch_size, num_generations))
with open(save_path, 'wb') as f:
pickle.dump(disc_pred_list, f)
cur_time = time.time()
print("Time taken so far:", cur_time - start_time)
if prev_save_path is not None:
os.remove(prev_save_path)
prev_save_path = save_path
disc_pred_list = np.concatenate(disc_pred_list, axis=None).tolist()
save_path = os.path.join(generation_output_dir, "{}disc_full{}.pkl".format(prepend_output_name, num_generations))
with open(save_path, 'wb') as f:
pickle.dump(disc_pred_list, f)
if prev_save_path is not None:
os.remove(prev_save_path)
# TODO: Ground-Truth classifier inference - start -
# Ground-Truth model set up - Start -
gt_tokenizer = BertTokenizer.from_pretrained(gt_tokenizer_pretrained_dir)
gt_model = BertForSequenceClassification.from_pretrained(gt_pretrained_dir, num_labels=5)
gt_model.eval()
gt_model = gt_model.to(gen_model.device)
# free up GPU memory
del gen_model
del disc_model
# Ground-Truth model set up - End -
# Ground-Truth model inference
gt_pred_list = []
gt_class_pred_list = []
gt_highest_prob_list = []
gt_neg_prob_list = []
gt_pos_prob_list = []
gt_2class_pred_list = []
prev_save_path = None
num_gt_batch = len(output_seq_list) // gt_batch_size
if len(output_seq_list) % gt_batch_size != 0:
num_gt_batch += 1
start_time = time.time()
with torch.no_grad():
for batch_ind in tqdm(range(num_gt_batch)):
# TODO: Process input batch - start -
gen_seq_batch = output_seq_list[batch_ind*gt_batch_size : (batch_ind+1)*gt_batch_size]
batch_input_ids = []
# tokenize
for seq in gen_seq_batch:
# print("seq: ", seq)
input_ids = gt_tokenizer.encode(seq)
input_ids = np.array(input_ids, np.int64)
batch_input_ids.append(input_ids)
# collate
batch_input_ids = torch.from_numpy(pad_sequences(batch_input_ids, 0)).to(gt_model.device)
# TODO: Process input batch - end -
# print("batch_input_ids: ", batch_input_ids)
gt_output = gt_model(input_ids=batch_input_ids)
gt_pred_list.append(gt_output.logits.cpu().numpy())
# gt_class_pred = torch.argmax(gt_output.logits, dim=1)
gt_class_probs = torch.nn.functional.softmax(gt_output.logits, dim=1)
gt_highest_prob, gt_class_pred = torch.max(gt_class_probs, dim=1)
gt_neg_prob = torch.sum(gt_class_probs[:, [0,1]], dim=1)
gt_pos_prob = torch.sum(gt_class_probs[:, [3,4]], dim=1)
gt_2class_pred = (gt_pos_prob > gt_neg_prob).int()
gt_class_pred_list.append(gt_class_pred.cpu().numpy())
gt_highest_prob_list.append(gt_highest_prob.cpu().numpy())
gt_neg_prob_list.append(gt_neg_prob.cpu().numpy())
gt_pos_prob_list.append(gt_pos_prob.cpu().numpy())
gt_2class_pred_list.append(gt_2class_pred.cpu().numpy())
if batch_ind % gt_save_interval == 0:
print("inferred #", (batch_ind+1)*gt_batch_size)
cur_time = time.time()
save_path = os.path.join(generation_output_dir, "{}gt_{}-{}.pkl".format(prepend_output_name, (batch_ind+1)*gt_batch_size, num_generations))
with open(save_path, 'wb') as f:
pickle.dump(gt_pred_list, f)
cur_time = time.time()
print("Time taken so far:", cur_time - start_time)
if prev_save_path is not None:
os.remove(prev_save_path)
prev_save_path = save_path
gt_pred_list = np.concatenate(gt_pred_list, axis=0)
gt_class_pred_list = np.concatenate(gt_class_pred_list, axis=None).tolist()
gt_highest_prob_list = np.concatenate(gt_highest_prob_list, axis=None).tolist()
gt_neg_prob_list = np.concatenate(gt_neg_prob_list, axis=None).tolist()
gt_pos_prob_list = np.concatenate(gt_pos_prob_list, axis=None).tolist()
gt_2class_pred_list = np.concatenate(gt_2class_pred_list, axis=None).tolist()
save_path = os.path.join(generation_output_dir, "{}gt_pred_full{}.pkl".format(prepend_output_name, num_generations))
with open(save_path, 'wb') as f:
pickle.dump(gt_pred_list, f)
if prev_save_path is not None:
os.remove(prev_save_path)
# TODO:Ground-Truth classifier inference - end -
# TODO: PPL computation with GPT-2 - start -
ppl_batch_size = 1 # only works with batch size 1 now
ppl_model = GPT2LMHeadModel.from_pretrained(ppl_model_id).to(gt_model.device)
ppl_tokenizer = GPT2TokenizerFast.from_pretrained(ppl_model_id)
gen_seq_ppl_list = []
input_seq_ppl_list = []
del gt_model
num_ppl_batch = len(output_seq_list) // ppl_batch_size
if len(output_seq_list) % ppl_batch_size != 0:
num_ppl_batch += 1
start_time = time.time()
print("PPL compute for generated sequences")
with torch.no_grad():
for batch_ind in tqdm(range(num_ppl_batch)):
# TODO: Process input batch - start -
# decoded string sequences
gen_seq_batch = output_seq_list[batch_ind*ppl_batch_size : (batch_ind+1)*ppl_batch_size]
batch_input_ids = []
# tokenize
for seq in gen_seq_batch:
# print("seq: ", seq)
input_ids = ppl_tokenizer.encode(seq)
# print("input_ids: ", input_ids)
input_ids = np.array(input_ids, np.int64)
batch_input_ids.append(input_ids)
# collate
batch_input_ids = torch.from_numpy(pad_sequences(batch_input_ids, 0)).to(ppl_model.device)
# print("batch_input_ids: ", batch_input_ids)
# print("batch_input_ids.shape: ", batch_input_ids.shape)
if batch_input_ids.shape[1] == 0:
gen_seq_ppl_list.append(None)
else:
ppl_output = ppl_model(input_ids=batch_input_ids, labels=batch_input_ids)
log_likelihood = ppl_output[0]
# print("B log_likelihood: ", log_likelihood)
seq_ppl = torch.exp(log_likelihood)
# print("B seq_ppl: ", seq_ppl)
# print("B seq_ppl.shape: ", seq_ppl.shape)
gen_seq_ppl_list.append(seq_ppl.cpu().numpy())
gen_seq_ppl_list = np.concatenate(gen_seq_ppl_list, axis=None).tolist()
print("PPL compute for input sequences")
# infer input_seq ppl
with torch.no_grad():
for batch_ind in tqdm(range(num_ppl_batch)):
# TODO: Process input batch - start -
input_seq_batch = input_seq_list[batch_ind*ppl_batch_size : (batch_ind+1)*ppl_batch_size]
batch_input_ids = []
# tokenize
for seq in input_seq_batch:
fixed_seq = ppl_tokenizer.decode(ppl_tokenizer.encode(seq)) # hack to remove space before punctuations (e.g. ' .' , ' ,') which inflates ppl value
input_ids = ppl_tokenizer.encode(fixed_seq)
input_ids = np.array(input_ids, np.int64)
batch_input_ids.append(input_ids)
# collate
batch_input_ids = torch.from_numpy(pad_sequences(batch_input_ids, 0)).to(ppl_model.device)
# TODO: Process input batch - end -
ppl_output = ppl_model(input_ids=batch_input_ids, labels=batch_input_ids)
log_likelihood = ppl_output[0]
seq_ppl = torch.exp(log_likelihood)
input_seq_ppl_list.append(seq_ppl.cpu().numpy())
input_seq_ppl_list = np.concatenate(input_seq_ppl_list, axis=None).tolist()
# TODO: PPL computation with GPT-2 - end -
df = pd.DataFrame()
df['disc_pred'] = disc_pred_list
df['latent_head_pred'] = latent_head_pred_list
df['gen_input_seq_class'] = input_seq_class_list
df['gt_class_pred'] = gt_class_pred_list
df['gt_highest_prob'] = gt_highest_prob_list
df['gt_2class_pred'] = gt_2class_pred_list
df['gt_neg_prob'] = gt_neg_prob_list
df['gt_pos_prob'] = gt_pos_prob_list
df['generated_seq_ppl'] = gen_seq_ppl_list
df['input_seq_ppl'] = input_seq_ppl_list
df['generated_seq'] = output_seq_list
df['gen_input_seq'] = input_seq_list
df['repeated_gen'] = repeat_list
df['in_train_data_gen'] = in_train_data_list
# Latent head-predicted most postive ones first
df = df.sort_values(by='latent_head_pred', ascending=False)
tsv_name = os.path.join(generation_output_dir, "{}congen_seqs{}.tsv".format(prepend_output_name, num_generations))
df.to_csv(tsv_name, sep="\t", index=False)
|
python
|
# 167. Two Sum II - Input array is sorted - LeetCode
# https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/description/
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
left = 1
right = len(numbers)
while True:
sums = numbers[left-1] + numbers[right-1]
if sums == target:
return [ left, right ]
elif sums > target:
right -= 1
else:
left += 1
test_case = [
( [1,2,3,4,5,6,7,8], 5, [1,4] ),
( [2,7,11,15], 9, [1,2] ),
]
s = Solution()
for i in test_case:
r = s.twoSum(i[0],i[1])
print r, r == i[2]
|
python
|
import logging
def column_headers(header_row):
logging.info(f"Validating header row, headers: {header_row}")
header_list = header_row.split(",")
try:
valid = True
if header_list[0] != "code":
logging.info(f"got in code: {header_list[0]}")
valid = False
if header_list[1] != "english_label":
logging.info(f"got in english_label: {header_list[1]}")
valid = False
if header_list[2] != "level":
logging.info(f"got in level: {header_list[2]}")
valid = False
if header_list[3] != "welsh_label":
logging.info(f"got in welsh_label: {header_list[3]}")
valid = False
except IndexError:
logging.exception(f"index out of range\nheader_row:{header_row}")
valid = False
return valid
|
python
|
import json
from config import TEXT_LANGUAGE
from game.roles.villager import Villager
from game.roles.werewolf import Werewolf
from game.roles.guard import Guard
from game.roles.seer import Seer
from game.roles.lycan import Lycan
from game.roles.betrayer import Betrayer
from game.roles.superwolf import Superwolf
from game.roles.fox import Fox
from game.roles.witch import Witch
from game.roles.zombie import Zombie
from game.roles.cupid import Cupid
import utils
role_info = utils.common.read_json_file("json/role_info.json")
def get_all_roles():
return (Villager, Werewolf, Seer, Guard, Lycan, Betrayer, Superwolf, Fox, Witch, Zombie, Cupid)
def get_role_type(name):
for role in get_all_roles():
if role.__name__ == name:
return role
def get_role_name_in_language(name, language):
name = name.capitalize()
field_name = f"name_{language}"
if name in role_info and field_name in role_info[name]:
return role_info[name][field_name]
def get_role_description(name):
name = name.capitalize()
if name in role_info:
return role_info[name]["description"][TEXT_LANGUAGE]
return None
def get_role_nighttime_commands(name):
name = name.capitalize()
if name in role_info:
return role_info[name]["nighttime_commands"]
return []
|
python
|
t = c = i= 0
menor = ''
while True:
n = str(input('Nome: ')).capitalize()
p = float(input('Preรงo: R$ '))
t += p
if i == 0:
m = p
menor = n
i += 1
if p > 1000:
c += 1
if p < m:
menor = n
op = ' '
while op not in 'sn':
op = str(input('Quer continuar? [s/n]')).lower()[0]
if op == 'n':
break
print(f'Total: {t:.2f}')
print(f'Mais de 1000: {c}')
print(f'Mais barato: {menor}')
|
python
|
# -*- coding: utf-8 -*-
"""
Some generic laguage level utilities for internal use.
"""
import collections
import functools
import sys
import warnings
from typing import (
AbstractSet,
Any,
Callable,
Hashable,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
ValuesView,
cast,
)
T = TypeVar("T")
G = TypeVar("G")
H = TypeVar("H", bound=Hashable)
C = TypeVar("C", bound=Callable[..., T])
FuncType = Callable[..., Any]
Fn = TypeVar("Fn", bound=FuncType)
TType = TypeVar("TType", bound=Type[Any])
Lazy = Union[T, Callable[[], T]]
def lazy(maybe_callable: Union[T, Callable[[], T]]) -> T:
"""
Call and return a value if callable else return it.
>>> lazy(42)
42
>>> lazy(lambda: 42)
42
"""
if callable(maybe_callable):
return maybe_callable()
return maybe_callable
def map_and_filter(
func: Callable[[T], Optional[T]], iterable: Iterable[T]
) -> List[T]:
"""
Map an iterable filtering out None.
>>> map_and_filter(lambda x: None if x % 2 else x, range(10))
[0, 2, 4, 6, 8]
"""
return [m for m in (func(e) for e in iterable) if m is not None]
def deduplicate(
iterable: Iterable[H], key: Optional[Callable[[H], Hashable]] = None
) -> Iterator[H]:
"""
Deduplicate an iterable.
Args:
iterable (Iterator[any]): source iterator
key (Callable): Identity function
Yields:
any: next deduplicated entry in order of original appearance
>>> list(deduplicate([1, 2, 1, 3, 3]))
[1, 2, 3]
>>> list(deduplicate([1, 2, 1, 3, 3], key=lambda x: x % 2))
[1, 2]
"""
seen = set() # type: Set[Hashable]
keyed = (
((entry, entry) for entry in iterable)
if key is None
else ((entry, key(entry)) for entry in iterable)
) # type: Iterator[Tuple[H, Hashable]]
for entry, key_ in keyed:
if key_ in seen:
continue
seen.add(key_)
yield entry
def maybe_first(
iterable: Iterable[T], default: Optional[T] = None
) -> Optional[T]:
"""
Return the first item in an iterable or None.
>>> maybe_first([1, 2, 3])
1
>>> maybe_first(()) is None
True
>>> maybe_first((), 1)
1
"""
try:
return next(iter(iterable))
except StopIteration:
return default
def find_one(
iterable: Iterable[T],
predicate: Callable[[T], Any],
default: Optional[T] = None,
) -> Optional[T]:
"""
Extract first item matching a predicate function in an iterable.
Returns ``None`` if no entry is found.
>>> find_one([1, 2, 3, 4], lambda x: x == 2)
2
>>> find_one([1, 2, 3, 4], lambda x: x == 5) is None
True
"""
return maybe_first(
(entry for entry in iterable if predicate(entry)), default=default
)
# Not sure how to type this correctly without recursive types.
def flatten(lst):
"""
Recursively flatten iterators, ignoring strings.
`itertools.chain` could be used instead but would flatten all iterables
including strings which is not ideal.
>>> list(flatten([1, 2, (1, 2, [3])]))
[1, 2, 1, 2, 3]
>>> list(flatten([1, 2, ('foo', [3])]))
[1, 2, 'foo', 3]
>>> list(flatten([]))
[]
>>> list(flatten([[], []]))
[]
"""
for entry in lst:
if is_iterable(entry, strings=False):
for subentry in flatten(entry):
yield subentry
else:
yield entry
def is_iterable(value: Any, strings: bool = True) -> bool:
"""
Check if a value is iterable.
This does no type comparisons.
Note that by default strings are iterables too!
Args:
value (Any): Maybe iterable
strings (bool): Include strings as iterable, defaults to ``True``
Returns:
bool: Whether ``value`` is iterable
>>> is_iterable([])
True
>>> is_iterable((x for x in range(10)))
True
>>> is_iterable("Hello World!")
True
>>> is_iterable("Hello World!", False)
False
>>> is_iterable(False)
False
"""
try:
iter(value)
except TypeError:
return False
else:
return strings or not isinstance(value, (str, bytes))
if sys.version < "3.6": # noqa: C901
OrderedDict = collections.OrderedDict
K = TypeVar("K")
V = TypeVar("V")
# TODO: There is most certainly a more terse implementation but inherinting
# from OrderedDict doesn't seem to play nice with mypy.
class DefaultOrderedDict(MutableMapping[K, V]):
__slots__ = ("_inner", "default_factory")
def __init__(
self, default_factory: Callable[[], V], *args: Any, **kwargs: Any
):
if default_factory is not None and not callable(default_factory):
raise TypeError("default_factory must be callable")
self.default_factory = default_factory
self._inner = OrderedDict() # type: MutableMapping[K, V]
def __getitem__(self, key: K) -> V:
try:
return self._inner[key]
except KeyError:
return self.__missing__(key)
def __missing__(self, key: K) -> V:
if self.default_factory is None:
raise KeyError(key)
self._inner[key] = value = self.default_factory()
return value
def __len__(self):
return len(self._inner)
def __setitem__(self, key: K, value: V) -> None:
self._inner[key] = value
def __delitem__(self, key: K) -> None:
del self._inner[key]
def __iter__(self) -> Iterator[K]:
return iter(self._inner)
def clear(self) -> None:
self._inner.clear()
def keys(self) -> AbstractSet[K]:
return self._inner.keys()
def values(self) -> ValuesView[V]:
return self._inner.values()
def items(self) -> AbstractSet[Tuple[K, V]]:
return self._inner.items()
def pop(self, key: K, **kwargs: Any) -> V: # type: ignore
return self._inner.pop(key, **kwargs)
def __contains__(self, key: Any) -> bool:
return key in self._inner
def __bool__(self) -> bool:
return bool(self._inner)
else:
OrderedDict = dict # type: ignore
DefaultOrderedDict = collections.defaultdict # type: ignore
def classdispatch(
value: Any, registry: Mapping[TType, C], *args: Any, **kwargs: Any
) -> T:
"""
Poor man's singledispatch to be used inline.
>>> class A:
... pass
>>> class B(A):
... pass
>>> class C(A):
... pass
>>> registry = {A: lambda _: 1, B: lambda _: 2}
>>> classdispatch(A(), registry)
1
>>> classdispatch(B(), registry)
2
>>> classdispatch(C(), registry)
Traceback (most recent call last):
...
TypeError: <class 'py_gql._utils.C'>
>>> classdispatch(object(), registry)
Traceback (most recent call last):
...
TypeError: <class 'object'>
>>> classdispatch(A(), {
... A: lambda _, *a, **kw: (a, sorted(kw.items()))
... }, 0, 1, foo=2, bar=3)
((0, 1), [('bar', 3), ('foo', 2)])
"""
try:
impl = registry[value.__class__]
except KeyError:
raise TypeError(value.__class__)
return impl(value, *args, **kwargs)
def apply_middlewares(
func: Callable[..., Any], middlewares: Sequence[Callable[..., Any]]
) -> Callable[..., Any]:
"""
Apply a list of middlewares to a source function.
- Middlewares must be structured as: ``middleware(next, *args, **kwargs)``
and call the next middleware inline.
>>> def square(x): return x ** 2
>>> def double(next, x): return next(x * 2)
>>> def substract_one(next, x): return next(x - 1)
>>> final = apply_middlewares(square, [double, substract_one])
>>> final(2) # ((2 - 1) * 2) ^ 2
4
>>> final(10) # ((10 - 1) * 2) ^ 2
324
"""
tail = func
for mw in middlewares:
if not callable(mw):
raise TypeError("Middleware should be a callable")
tail = functools.partial(mw, tail)
return tail
def deprecated(reason: str) -> Callable[[Fn], Fn]:
def decorator(fn: Fn) -> Fn:
@functools.wraps
def deprecated_fn(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(reason, category=DeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return cast(Fn, deprecated_fn)
return decorator
|
python
|
""" --- The Angles of a Triangle --- Elementary
You are given the lengths for each side on a triangle.
You need to find all three angles for this triangle. If the given side
lengths cannot form a triangle (or form a degenerated triangle),
then you must return all angles as 0 (zero). The angles should be
represented as a list of integers in ascending order.
Each angle is measured in degrees and rounded to the nearest
integer number (Standard mathematical rounding).
Input: The lengths of the sides of a triangle as integers.
Output: Angles of a triangle in degrees as sorted list of integers.
How it is used: This is a classical geometric task. The ideas can be useful
in topography and architecture. With this concept you can
measure an angle without the need for a protractor.
Precondition: 0 < a,b,c <= 1000
"""
def my_solution(a, b, c):
import math
cos_a = float(b ** 2 + c ** 2 - a ** 2) / (2 * b * c)
cos_b = float(c ** 2 + a ** 2 - b ** 2) / (2 * c * a)
cos_c = float(a ** 2 + b ** 2 - c ** 2) / (2 * a * b)
try:
angle_a = int(round(math.degrees(math.acos(cos_a))))
angle_b = int(round(math.degrees(math.acos(cos_b))))
angle_c = int(round(math.degrees(math.acos(cos_c))))
except ValueError:
return [0, 0, 0]
angles = sorted([angle_a, angle_b, angle_c])
if 0 in angles or 180 in angles:
return [0, 0, 0]
return angles
def bryukh_solution(a, b, c):
from math import acos, degrees
if a + b <= c or a + c <= b or b + c <= a:
return [0, 0, 0]
find_angle = lambda s1, s2, so: int(round(
degrees(acos((s1 ** 2 + s2 ** 2 - so ** 2) / (2 * s1 * s2))), 0
))
return sorted([find_angle(a, b, c), find_angle(a, c, b), find_angle(b, c, a)])
|
python
|
# -*- coding: utf-8 -*-
""" doScan.py
The core interface that takes minimal text description of scan and does IPC with hardware system
This is the common entry into the hardware interface and uses a minimal text desc file containing three
blocks: [Major], [Minor], [Interface]. This routine can be called independently of the RasterGUI program
enabling you to operate the scanning system remotely or via a text editor. The RasterGUI front end just
generates the minimal text desc file for you instead of you having to edit the parameters manually. It also
enables timing functions that allow scanning to be synchronized with ePhys data acquisition.
last revised 19 Dec 2017 BWS
"""
import sys, os, math
import socket
import zipfile
import numpy as np
import os.path as path
import datetime
import time
import importlib
import configparser as ConfigParser
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from .Helper.EasyDict import EasyDict
import Imaging.Helper.processImageData as PI
def doCommonEntry(rasterDescFileIn):
# Main function that recieves an INI-style text file with a command and parameters
# One entry in the [Interface] section must be "doScanFunction = xx" that reflects
# the command (xx = runScanner, genericCommand, armPhotometry, or testPhotometry)
localInputFolder, allParmsStr = _prepareToMakeZipFile(rasterDescFileIn) # reads INI file
errorRetDict = {}
errorRetDict["retOkay"] = False
if not allParmsStr:
print("Error: problem with absence of parm Dict inside doScan.py")
return errorRetDict
requestedFunction = allParmsStr["Interface"]["doScanFunction"].lower().strip()
if requestedFunction in ["runscanner"]:
retDict = _doRunScanner(localInputFolder, allParmsStr)
elif requestedFunction in ["genericcommand"]:
retDict = _doGenericCommand(localInputFolder, allParmsStr)
elif requestedFunction in ["armphotometry"]:
retDict = _doArmPhotometry(localInputFolder, allParmsStr)
elif requestedFunction in ["testphotometry"]:
retDict = _doTestPhotometry(localInputFolder, allParmsStr)
else:
print("Error: requested function not available in DoScan.py: " + requestedFunction)
return errorRetDict
return retDict
def _doGenericCommand(localInputFolder, allParmsStr):
falseStrings = ["false", "no", "0", "off"]
retDict = {}
diagMode = allParmsStr["Interface"]["diagmode"].lower() not in falseStrings
allParmsStr["Interface"]["currentcommand"] = allParmsStr["Interface"]["specificCommand"]
hardwareAddress = (allParmsStr["System"]["hardwareADCip"], int(allParmsStr["System"]["hardwareADCport"]))
retOkay = _sendZipFile(localInputFolder, allParmsStr["Interface"], allParmsStr["System"]["tempFolder"],
hardwareAddress, diagMode)
if not retOkay:
print("Problem on return code from sendZipFile inside doScan.py (doGenericCommand)")
retDict["retOkay"] = False
return retDict
if diagMode:
print("Finished sending generic command file and other files to hardware computer")
retDict["retOkay"] = True
return retDict
def _doTestPhotometry(localInputFolder, allParmsStr):
falseStrings = ["false", "no", "0", "off"]
retDict = {}
diagMode = allParmsStr["Interface"]["diagmode"].lower() not in falseStrings
allParmsStr["Interface"]["currentcommand"] = "DoTestPhotometry"
allParmsStr["Interface"]["destFileName"] = allParmsStr["System"]["tempfolder"] + "/testPhotometry.gsi"
imageDescFN = "PhotometryDescription.txt"
allParmsStr["Interface"]["ImageDesc"] = imageDescFN
# make new scan waveforms
allParmsStr["minor"]["photometrydurms"] = "7" # use 7 ms
retOkay = _createPhotometryScan(allParmsStr, imageDescFN)
if not retOkay:
print("Problem creating photometry scan files.")
retDict["retOkay"] = False
return retDict
if diagMode:
print("Created new photometry scan waveforms as part of test.")
# send photometry scan waveforms to hardware computer
hardwareAddress = (allParmsStr["System"]["hardwareADCip"], int(allParmsStr["System"]["hardwareADCport"]))
retOkay = _sendZipFile(localInputFolder, allParmsStr["Interface"], allParmsStr["System"]["tempFolder"],
hardwareAddress, diagMode)
if not retOkay:
print("Problem on return code from sendZipFile inside doScan.py (doTestPhotometry)")
retDict["retOkay"] = False
return retDict
if diagMode:
print("Finished sending photometry scan waveforms and other files to hardware computer")
# wait for data to be returned; listen on different port to keep things simple for hardwarePC
if diagMode:
print("Request sent now waiting for hardware computer to finish ...")
servAddr = (allParmsStr["System"]["returnIP"], int(allParmsStr["System"]["returnPort"]))
newFileName = allParmsStr["Interface"]["destFileName"].strip()
timeOutSec = 2
retOkay = _waitForSocketResponse(servAddr, newFileName, timeOutSec, diagMode)
if not retOkay:
print("Problem with return code on waitForSocketResponse in DoScan.py (doTestPhotometry).")
retDict["retOkay"] = False
return retDict
print("Finished getting test photometry data " + newFileName + " status: " + str(retOkay))
retDict = PI.loadPhotometryZipFile(newFileName)
if len(retDict["data"]["A"]) >= 6000:
meanA = np.mean(retDict["data"]["A"][1000:6000]) # wait 1 ms for shutter to open then average for 5 ms
meanB = np.mean(retDict["data"]["B"][1000:6000]) # wait 1 ms for shutter to open then average for 5 ms
#print("Mean photoresponse: " + str(meanA) + " " + str(meanB))
retDict["meanChanA"] = meanA
retDict["meanChanB"] = meanB
retDict["retOkay"] = True
return retDict
else:
print("Not enough photometry data returned")
retDict["retOkay"] = False
return retDict
def _doArmPhotometry(localInputFolder, allParmsStr):
falseStrings = ["false", "no", "0", "off"]
retDict = {}
diagMode = allParmsStr["Interface"]["diagmode"].lower() not in falseStrings
allParmsStr["Interface"]["currentcommand"] = "DoLoadPhotometry"
imageDescFN = "PhotometryDescription.txt"
allParmsStr["Interface"]["ImageDesc"] = imageDescFN
allParmsStr["Interface"]["PositionData"] = str(allParmsStr["Minor"]["positionEpisode"])
# make new scan waveforms
retOkay = _createPhotometryScan(allParmsStr, imageDescFN)
if not retOkay:
print("Problem creating photometry scan files.")
retDict["retOkay"] = False
return retDict
if diagMode:
print("Created new photometry scan waveforms.")
# send photometry scan waveforms to hardware computer
hardwareAddress = (allParmsStr["System"]["hardwareADCip"], int(allParmsStr["System"]["hardwareADCport"]))
retOkay = _sendZipFile(localInputFolder, allParmsStr["Interface"], allParmsStr["System"]["tempFolder"],
hardwareAddress, diagMode)
if not retOkay:
print("Problem on return code from sendZipFile inside doScan.py (doArmPhotometry)")
retDict["retOkay"] = False
return retDict
if diagMode:
print("Finished sending photometry scan waveforms and other files to hardware computer")
retDict["retOkay"] = True
return retDict
def _doRunScanner(localInputFolder, allParmsStr):
startTime = datetime.datetime.now()
falseStrings = ["false", "no", "0", "off"]
retDict = {}
diagMode = allParmsStr["Interface"]["diagmode"].lower() not in falseStrings
allParmsStr["Interface"]["currentcommand"] = "DoScan"
# make new scan waveforms and ImageDesc text file if requested
imageDescFN = "ImageDescription.txt"
allParmsStr["Interface"]["ImageDesc"] = imageDescFN
allParmsStr["Interface"]["ReturnPositionData"] = str(int(allParmsStr["Interface"]["positionData"]))
if allParmsStr["Interface"]["updateScanWaveforms"].lower() not in falseStrings:
retOkay = _createScan(allParmsStr, imageDescFN)
if retOkay:
retImageDescFN = imageDescFN # to let calling program know about updated parms
else:
print("Problem creating scan files.")
retDict["retOkay"] = False
return retDict
if diagMode:
print("Created new scan waveforms.")
else:
# hardwarePC is going to recycle cached ImageDescription.txt file since no changed parameters
retImageDescFN = "" # this routine did not create a new ImageDesc file, so nothing to return
hardwareAddress = (allParmsStr["System"]["hardwareADCip"], int(allParmsStr["System"]["hardwareADCport"]))
retOkay = _sendZipFile(localInputFolder, allParmsStr["Interface"], allParmsStr["System"]["tempFolder"],
hardwareAddress, diagMode)
if not retOkay:
print("Problem on return code from sendZipFile inside doScan.py (doGenericCommand)")
retDict["retOkay"] = False
return retDict
if diagMode:
print("Finished sending doRunScanner command file and other files to hardware computer")
# wait for data to be returned; listen on different port to keep things simple for hardwarePC
if diagMode:
print("Request sent now waiting for hardware computer to finish ...")
servAddr = (allParmsStr["System"]["returnIP"], int(allParmsStr["System"]["returnPort"]))
newFileName = allParmsStr["Interface"]["destFileName"].strip()
if "estSecPerFrame" in allParmsStr["Interface"]:
timeOutSec = 2 + (int(allParmsStr["Interface"]["numframes"])) * float(allParmsStr["interface"]["estSecPerFrame"])
else:
timeOutSec = 2 * int(allParmsStr["Interface"]["numframes"]) # estimate of 2 sec per frame max
retOkay = _waitForSocketResponse(servAddr, newFileName, timeOutSec, diagMode)
if not retOkay:
print("Problem with return code on waitForSocketResponse in DoScan.py (runScanner).")
retDict["retOkay"] = False
return retDict
if diagMode:
print(" new file is: " + newFileName)
diffTime = datetime.datetime.now() - startTime
elaspedMs = (diffTime.seconds * 1000) + (diffTime.microseconds / 1000)
print("Total milliseconds required: " + str(int(10. * elaspedMs) / 10.))
retDict["newFileName"] = newFileName
retDict["imageDescFN"] = retImageDescFN
retDict["retOkay"] = True
return retDict
# Private functions below here
def _prepareToMakeZipFile(rasterDescFileIn):
rasterDescFile = path.expanduser(rasterDescFileIn)
if not path.exists(rasterDescFile):
print("Cannot find requested DoScan command file: " + str(rasterDescFile))
return "", ""
allParmsStr = _preProcessParms(rasterDescFile) # actual reading of INI file
# copy key parameters to [Interface] section since the RasterNoGUI only gets this section
allParmsStr["Interface"]["localInputFolder"] = allParmsStr["System"]["tempFolder"] + "/Input"
allParmsStr["Interface"]["commandTimeStamp"] = str(datetime.datetime.now())
allParmsStr["Interface"]["returnIPaddress"] = allParmsStr["System"]["returnIP"]
allParmsStr["Interface"]["returnport"] = allParmsStr["System"]["returnPort"]
# create empty folder to put final parms and waveform files
localInputFolder = allParmsStr["Interface"]["localInputFolder"]
if not path.exists(localInputFolder):
os.makedirs(localInputFolder)
else:
# input folder exists so clear any existing files or subfolders in it
for oneFile in os.listdir(localInputFolder):
filePlusPath = path.join(localInputFolder, oneFile)
if path.isfile(filePlusPath):
os.unlink(filePlusPath)
elif path.isdir(filePlusPath):
shutil.rmtree(filePlusPath)
return localInputFolder, allParmsStr
def _sendZipFile(localInputFolder, parmDict, tempFolder, hardwareAddress, diagMode):
# Makes Cmd.txt file, collapses everything in localInputFolder into a zip file and sends it to IP+port specified
# typically parmDict is only the [Interface] section of the main parameter dict. However
# the hardware RasterNoGUI will make a copy of the input ImageParameters INI file and include it in
# the output Zip file since some of those parameters (e.g., systemLag) are needed to decode raw data
finalCmdFile = localInputFolder + "/Cmd.txt"
with open(finalCmdFile, "w") as fOut: # write interface parameters to Cmd.txt file
print("[Commands]\r", file=fOut) # the extra return char is required for Windows PCs
for key in sorted(parmDict):
print(str(key).lower() + " = " + parmDict[key] + "\r", file=fOut)
# write final Zip file containing cmd, imageDesc, and scan waveforms (if requested)
os.chdir(localInputFolder) # temp folder (typically on a RamDrive)
zipFileName = tempFolder + "/rasterInput.zip"
with zipfile.ZipFile(zipFileName, "w") as fZip:
for root, dirs, files in os.walk(localInputFolder):
for file in files:
fZip.write(file)
# send Zip file to hardwareAddress
zipBytes = open(zipFileName, "rb").read()
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.settimeout(1) # allow 1 sec before triggering a time-out exception
try:
client.connect(hardwareAddress)
if diagMode:
print("hardware client connected ...")
except:
print("** ERROR: Could not connect to hardware computer at " + str(hardwareAddress))
client.close()
return False
client.settimeout(None) # best to turn off to send Zip file properly
client.sendall(zipBytes) # should try sendall() instead of send()
client.close()
return True
def _waitForSocketResponse(servAddr, returnDataFileName, timeOutSec, diagMode):
bufSize = 4096 * 2
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(servAddr)
serv.listen(5)
if diagMode:
print("listening for return of acquired data from hardware computer...")
serv.settimeout(timeOutSec)
while True:
try:
listenConn, listenAddr = serv.accept()
except socket.timeout:
print("ERROR - hardware computer did not respond with acquired data within max time allowed.")
return None
if diagMode:
print('hardware computer connected ... ', listenAddr)
serv.settimeout(None)
myHandle = open(returnDataFileName, "wb")
while True:
data = listenConn.recv(bufSize)
if not data: break
myHandle.write(data)
myHandle.close()
listenConn.close()
if diagMode:
print("Closed socket connection with hardware computer.")
break
return True
def _preProcessParms(parmFileName):
# returns a Dict of input parmeters with subDicts for Major, Minor etc
# returns strings of all parameters
if path.exists(parmFileName):
systemINI = None
config = ConfigParser.ConfigParser()
config.read(parmFileName)
coreDict = EasyDict()
for oneSection in config.sections():
if config[oneSection]:
coreDict[oneSection] = EasyDict()
for key in config[oneSection]:
coreDict[oneSection][key] = config[oneSection][key].split(";")[0].strip()
if key == "systemini":
systemINI = path.expanduser(config[oneSection][key].split(";")[0].strip())
if not path.exists(systemINI):
print("Bad system INI file found in doScan.py")
systemINI = None
if systemINI:
config = ConfigParser.ConfigParser()
config.read(systemINI)
coreDict["System"] = EasyDict()
for oneSection in config.sections():
if config[oneSection]:
for key in config[oneSection]:
coreDict["System"][key] = config[oneSection][key].split(";")[0].strip()
return coreDict
else:
print("no systemINI file processed")
return None
else:
return None
def _createPhotometryScan(allParms, passedImageDescFN):
scanModuleStr = "Imaging.Helper.Scans.createPhotometryScans"
try:
scanModule = importlib.import_module(scanModuleStr)
except:
print("ERROR - problem importing module: " + scanModuleStr)
return False
scanFunctionStr = allParms["Minor"]["photometryShape"].lower().strip()
try:
updatedNewParms = getattr(scanModule, scanFunctionStr)(allParms)
except:
print("ERROR - could not match requested photometryMode with a generation subroutine: " + scanFunctionStr)
return False
localInputFolder = allParms["Interface"]["localInputFolder"]
updatedNewParms["scanWaveformsTimeStamp"] = str(datetime.datetime.now())
with open(localInputFolder + "/" + passedImageDescFN, "w") as fOut:
print("[Derived]\r", file=fOut)
for key, value in sorted(updatedNewParms.items()):
print(str(key).lower() + " = " + str(value) + "\r", file=fOut) # str to fix any number entries
print(" ", file=fOut)
print("[System]\r", file=fOut)
for key, value in sorted(allParms["System"].items()):
print(str(key).lower() + " = " + value + "\r", file=fOut)
return True
def _createScan(allParms, passedImageDescFN):
# called once major and minor parameters are set to create derived Dict
# this routine creates the ImageDescription.txt file that contains all parameters - both
# those specified by the user and the derived settings like turnLength
# it also writes the scan waveforms binary files to the tempFolder
# create common initial variables needed for most scan generation subroutines
newParms = {} # will become [Derived] section in final ImageDescription.txt file
newParms["doScanVersion"] = str(1.2)
newParms["zoomAsVolts"] = str(_zoomToVoltage(float(allParms["Major"]["zoom"])))
lagUs = float(allParms["System"]["systemlagus"]) + float(allParms["Minor"]["lagtweakus"])
newParms["lagUs"] = str(lagUs)
newParms["lagPixels"] = str(int(lagUs / float(allParms["Major"]["pixelUs"])))
numADCs = 0
chanList = ""
if float(allParms["Major"]["chanAfullScale"]) > 0:
numADCs += 1
chanList += "A"
if float(allParms["Major"]["chanBfullScale"]) > 0:
numADCs += 1
chanList += "B"
if float(allParms["Major"]["chanCfullScale"]) > 0:
numADCs += 1
chanList += "C"
if float(allParms["Major"]["chanDfullScale"]) > 0:
numADCs += 1
chanList += "D"
if numADCs == 0:
print("Warning: No ADC channels are enabled.")
newParms["numADCs"] = str(numADCs)
newParms["ADCchanLetters"] = chanList
# now load module and run function selected by Minor parameters to create scan
# allows program to be extended with new types of scans without changing core code
updatedNewParms = None # an empty variable in case scan generation does not work
scanModuleStr = "Imaging.Helper.Scans.createStandardScans"
try:
scanModule = importlib.import_module(scanModuleStr)
except:
print("ERROR - problem importing module: " + scanModuleStr)
scanFunctionStr = allParms["Minor"]["scanfunction"].lower().strip()
try:
updatedNewParms = getattr(scanModule, scanFunctionStr)(allParms, newParms)
except:
print("ERROR - could not match requested scanType with a generation subroutine: " + scanFunctionStr)
# write final ImageDescription file to temp folder
if updatedNewParms:
localInputFolder = allParms["Interface"]["localInputFolder"]
updatedNewParms["scanWaveformsTimeStamp"] = str(datetime.datetime.now())
with open(localInputFolder + "/" + passedImageDescFN, "w") as fOut:
print("[Major]\r", file=fOut)
for key, value in sorted(allParms["Major"].items()):
print(str(key).lower() + " = " + value + "\r", file=fOut)
print(" ", file=fOut)
print("[Minor]\r", file=fOut)
for key, value in sorted(allParms["Minor"].items()):
print(str(key).lower() + " = " + value + "\r", file=fOut)
print(" ", file=fOut)
print("[Derived]\r", file=fOut)
for key, value in sorted(updatedNewParms.items()):
print(str(key).lower() + " = " + str(value) + "\r", file=fOut) # str to fix any number entries
print(" ", file=fOut)
print("[System]\r", file=fOut)
for key, value in sorted(allParms["System"].items()):
print(str(key).lower() + " = " + value + "\r", file=fOut)
return True
else:
return False
def _zoomToVoltage(zoomLevel):
# converts zoom level to voltage span; zoom=1 gives ~ 20V span (-10 to 10 FS)
if zoomLevel < 1:
print("Warning: minimum zoom is 1.")
zoomLevel = 1.
elif zoomLevel > 9:
print("Warning: maximum zoom is 9.")
zoomLevel = 9.
voltSpan = 39.9998 * math.exp((-1. * zoomLevel) / 1.4427)
return voltSpan
if __name__ == "__main__":
if len(sys.argv) == 2:
tempFN = path.expanduser(sys.argv[1])
if path.exists(tempFN):
doRunScanner(tempFN)
else:
print("Bad scan parameter file: " + tempFN)
else:
print("You need to provide a scan parameter text file name with doScan.py")
|
python
|
import logging
from django.core.management.base import BaseCommand, CommandError
from plyara import Plyara
from rules.models import YaraRule
# Configure Logging
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
help = 'Recalculate the logic hashes of the entire rule corpus'
def handle(self, *args, **options):
corpus = YaraRule.objects.all()
rule_count = corpus.count()
message = 'Updating logic hashes for {} rules'.format(rule_count)
logging.info(message)
rule_index = 0
for rule in corpus.iterator():
rule_index += 1
logic_data = {'strings': rule.strings, 'condition_terms': rule.condition}
logic_hash = Plyara.generate_logic_hash(logic_data)
rule.logic_hash = logic_hash
rule.save()
logging.info('Rule Logic Update: {} of {}'.format(rule_index, rule_count))
|
python
|
import pandas as pd
import numpy as np
class ExternalData():
def __init__(self, csv, mol_names, exclude=[]):
self.csv = csv
self.mol_names = mol_names
self.exclude = exclude
def fit(self, molecules):
return molecules
def fit_transform(self, molecules, labels):
return self.transform(self.fit(molecules))
def transform(self, molecules):
print("\tIncorporating external data")
df = pd.read_csv(self.csv)
n_drop = 0
#If not present on your dataset discard
self.mol_names = [m[0].GetProp("_Name") for m in molecules.values]
for i, title in enumerate(df["Title"].values):
if title not in self.mol_names:
df.drop(df.index[i- n_drop], inplace=True)
n_drop += 1
headers = list(df)
values = (None,) * len(headers)
#If not present on your glide discard
for i, title in enumerate(self.mol_names):
if title not in df["Title"].values:
line = pd.DataFrame.from_records([values], columns=headers, index=[i])
df = pd.concat([df.ix[:i-1], line, df.ix[i:]]).reset_index(drop=True)
#Drop features
df = df.replace("--", np.nan)
features_to_drop = [feature for field in self.exclude for feature in headers if field in feature ]
df.drop(features_to_drop, axis=1, inplace=True)
df.to_csv("model_features.txt")
return df
def retrieve_molecule_names(self):
df = pd.read_csv(self.csv)
thresh = int(df.shape[1]*0.8)
df_dropna = df.dropna(thresh=thresh)
return df_dropna["Title"].values
|
python
|
import mailroom_stuff.Front_End
if __name__ == '__main__':
mailroom_stuff.Front_End.main()
|
python
|
from flask_script import Manager,Shell,Server
from app import create_app ,db
from app.models import User
from flask_migrate import Migrate, MigrateCommand
app=create_app('production')
manager = Manager(app)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
manager.add_command('Server',Server)
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User = User )
if __name__ == '__main__':
manager.run()
|
python
|
#!/usr/bin/env python
import socket
import sys
import os
sys.path.append('./src')
from multi_detector import MultiDetector
libpath = "../lib/linux/libnyumaya.so"
hotword_graph="../models/Hotword/marvin_big_0.3.tflite"
hotword_labels="../models/Hotword/marvin_labels.txt"
action_graph="../models/Command/on_off_big_0.3.tflite"
action_labels="../models/Command/on_off_labels.txt"
sonoff_ip="10.0.0.54"
def light_on():
print("Turning light on")
os.system("curl http://" + sonoff_ip +"/cm?cmnd=Power%20On &")
def light_off():
print("Turning light off")
os.system("curl http://" + sonoff_ip +"/cm?cmnd=Power%20Off &")
def detected_something_callback():
#os.system(play_command + " ./resources/tone-beep.wav")
print("Detected Something")
def reset_history_callback():
print("Reset History")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind(('', 9999))
serversocket.listen(5) # become a server socket, maximum 5 connections
mDetector = MultiDetector(libpath,timeout=20)
mDetector.add_detector(action_graph,action_labels,0.8)
mDetector.add_detector(hotword_graph,hotword_labels,0.5)
mDetector.add_command("marvin,on",light_on)
mDetector.add_command("marvin,off",light_off)
mDetector.add_reset_history_callback(reset_history_callback)
mDetector.add_detected_callback(detected_something_callback)
connection, address = serversocket.accept()
while True:
buf = connection.recv(800)
if len(buf) > 0:
mDetector.run_frame(buf)
|
python
|
#!/usr/bin/env python
""" This code implements a ceiling-marker based localization system.
The core of the code is filling out the marker_locators
which allow for the specification of the position and orientation
of the markers on the ceiling of the room """
import rospy
from ar_pose.msg import ARMarkers
from tf.transformations import euler_from_quaternion, rotation_matrix, quaternion_from_matrix, quaternion_from_euler
import numpy as np
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from std_msgs.msg import Header
from tf import TransformListener, TransformBroadcaster
from copy import deepcopy
from math import sin, cos, pi, atan2, fabs
class TransformHelpers:
""" Some convenience functions for translating between various representions of a robot pose.
TODO: nothing... you should not have to modify these """
@staticmethod
def convert_translation_rotation_to_pose(translation, rotation):
""" Convert from representation of a pose as translation and rotation (Quaternion) tuples to a geometry_msgs/Pose message """
return Pose(position=Point(x=translation[0],y=translation[1],z=translation[2]), orientation=Quaternion(x=rotation[0],y=rotation[1],z=rotation[2],w=rotation[3]))
@staticmethod
def convert_pose_inverse_transform(pose):
""" Helper method to invert a transform (this is built into the tf C++ classes, but ommitted from Python) """
translation = np.zeros((4,1))
translation[0] = -pose.position.x
translation[1] = -pose.position.y
translation[2] = -pose.position.z
translation[3] = 1.0
rotation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)
euler_angle = euler_from_quaternion(rotation)
rotation = np.transpose(rotation_matrix(euler_angle[2], [0,0,1])) # the angle is a yaw
transformed_translation = rotation.dot(translation)
translation = (transformed_translation[0], transformed_translation[1], transformed_translation[2])
rotation = quaternion_from_matrix(rotation)
return (translation, rotation)
@staticmethod
def angle_normalize(z):
""" convenience function to map an angle to the range [-pi,pi] """
return atan2(sin(z), cos(z))
@staticmethod
def angle_diff(a, b):
""" Calculates the difference between angle a and angle b (both should be in radians)
the difference is always based on the closest rotation from angle a to angle b
examples:
angle_diff(.1,.2) -> -.1
angle_diff(.1, 2*math.pi - .1) -> .2
angle_diff(.1, .2+2*math.pi) -> -.1
"""
a = TransformHelpers.angle_normalize(a)
b = TransformHelpers.angle_normalize(b)
d1 = a-b
d2 = 2*pi - fabs(d1)
if d1 > 0:
d2 *= -1.0
if fabs(d1) < fabs(d2):
return d1
else:
return d2
class MarkerLocator(object):
def __init__(self, id, position, yaw):
""" Create a MarkerLocator object
id: the id of the marker (this is an index based on the file
specified in ar_pose_multi.launch)
position: this is a tuple of the x,y position of the marker
yaw: this is the angle about a normal vector pointed towards
the STAR center ceiling
"""
self.id = id
self.position = position
self.yaw = yaw
def get_camera_position(self, marker):
""" Outputs the position of the camera in the global coordinates """
euler_angles = euler_from_quaternion((marker.pose.pose.orientation.x,
marker.pose.pose.orientation.y,
marker.pose.pose.orientation.z,
marker.pose.pose.orientation.w))
translation = np.array([marker.pose.pose.position.y,
-marker.pose.pose.position.x,
0,
1.0])
translation_rotated = rotation_matrix(self.yaw-euler_angles[2], [0,0,1]).dot(translation)
xy_yaw = (translation_rotated[0]+self.position[0],translation_rotated[1]+self.position[1],self.yaw-euler_angles[2])
return xy_yaw
class MarkerProcessor(object):
def __init__(self, use_dummy_transform=False):
rospy.init_node('star_center_positioning_node')
if use_dummy_transform:
self.odom_frame_name = "odom_dummy"
else:
self.odom_frame_name = "odom"
self.marker_locators = {}
self.add_marker_locator(MarkerLocator(0,(0.0,0.0),0))
self.add_marker_locator(MarkerLocator(1,(1.4/1.1,2.0/1.1),0))
self.marker_sub = rospy.Subscriber("ar_pose_marker",
ARMarkers,
self.process_markers)
self.odom_sub = rospy.Subscriber("odom", Odometry, self.process_odom, queue_size=10)
self.star_pose_pub = rospy.Publisher("STAR_pose",PoseStamped,queue_size=10)
self.continuous_pose = rospy.Publisher("STAR_pose_continuous",PoseStamped,queue_size=10)
self.tf_listener = TransformListener()
self.tf_broadcaster = TransformBroadcaster()
def add_marker_locator(self, marker_locator):
self.marker_locators[marker_locator.id] = marker_locator
def process_odom(self, msg):
p = PoseStamped(header=Header(stamp=rospy.Time(0), frame_id=self.odom_frame_name),
pose=msg.pose.pose)
try:
STAR_pose = self.tf_listener.transformPose("STAR", p)
STAR_pose.header.stamp = msg.header.stamp
self.continuous_pose.publish(STAR_pose)
except Exception as inst:
print "error is", inst
def process_markers(self, msg):
for marker in msg.markers:
# do some filtering basd on prior knowledge
# we know the approximate z coordinate and that all angles but yaw should be close to zero
euler_angles = euler_from_quaternion((marker.pose.pose.orientation.x,
marker.pose.pose.orientation.y,
marker.pose.pose.orientation.z,
marker.pose.pose.orientation.w))
angle_diffs = TransformHelpers.angle_diff(euler_angles[0],pi), TransformHelpers.angle_diff(euler_angles[1],0)
if (marker.id in self.marker_locators and
2.4 <= marker.pose.pose.position.z <= 2.6 and
fabs(angle_diffs[0]) <= .2 and
fabs(angle_diffs[1]) <= .2):
locator = self.marker_locators[marker.id]
xy_yaw = locator.get_camera_position(marker)
orientation_tuple = quaternion_from_euler(0,0,xy_yaw[2])
pose = Pose(position=Point(x=xy_yaw[0],y=xy_yaw[1],z=0),
orientation=Quaternion(x=orientation_tuple[0], y=orientation_tuple[1], z=orientation_tuple[2], w=orientation_tuple[3]))
# TODO: use markers timestamp instead of now() (unfortunately, not populated currently by ar_pose)
pose_stamped = PoseStamped(header=Header(stamp=rospy.Time.now(),frame_id="STAR"),pose=pose)
try:
offset, quaternion = self.tf_listener.lookupTransform("/base_link", "/base_laser_link", rospy.Time(0))
except Exception as inst:
print "Error", inst
return
# TODO: use frame timestamp instead of now()
pose_stamped_corrected = deepcopy(pose_stamped)
pose_stamped_corrected.pose.position.x -= offset[0]*cos(xy_yaw[2])
pose_stamped_corrected.pose.position.y -= offset[0]*sin(xy_yaw[2])
self.star_pose_pub.publish(pose_stamped_corrected)
self.fix_STAR_to_odom_transform(pose_stamped_corrected)
def fix_STAR_to_odom_transform(self, msg):
""" Super tricky code to properly update map to odom transform... do not modify this... Difficulty level infinity. """
(translation, rotation) = TransformHelpers.convert_pose_inverse_transform(msg.pose)
p = PoseStamped(pose=TransformHelpers.convert_translation_rotation_to_pose(translation,rotation),header=Header(stamp=rospy.Time(),frame_id="base_link"))
try:
self.tf_listener.waitForTransform("odom","base_link",rospy.Time(),rospy.Duration(1.0))
except Exception as inst:
print "whoops", inst
return
print "got transform"
self.odom_to_STAR = self.tf_listener.transformPose("odom", p)
(self.translation, self.rotation) = TransformHelpers.convert_pose_inverse_transform(self.odom_to_STAR.pose)
def broadcast_last_transform(self):
""" Make sure that we are always broadcasting the last map to odom transformation.
This is necessary so things like move_base can work properly. """
if not(hasattr(self,'translation') and hasattr(self,'rotation')):
return
self.tf_broadcaster.sendTransform(self.translation, self.rotation, rospy.get_rostime(), self.odom_frame_name, "STAR")
def run(self):
r = rospy.Rate(10)
while not rospy.is_shutdown():
self.broadcast_last_transform()
r.sleep()
if __name__ == '__main__':
nh = MarkerProcessor(use_dummy_transform=True)
nh.run()
|
python
|
import os
from typing import Any, Dict, Optional
import httpx
import pytest
from fastapi import status
from tortoise import Tortoise
from chapter6.tortoise_relationship.app import app
from chapter6.tortoise_relationship.models import (
CommentDB,
CommentTortoise,
PostDB,
PostTortoise,
)
DATABASE_FILE_PATH = "chapter6_tortoise_relationship.test.db"
DATABASE_URL = f"sqlite://{DATABASE_FILE_PATH}"
@pytest.fixture(autouse=True, scope="module")
async def initialize_database():
await Tortoise.init(
db_url=DATABASE_URL,
modules={"models": ["chapter6.tortoise_relationship.models"]},
)
await Tortoise.generate_schemas()
initial_posts = [
PostDB(id=1, title="Post 1", content="Content 1"),
PostDB(id=2, title="Post 2", content="Content 2"),
PostDB(id=3, title="Post 3", content="Content 3"),
]
await PostTortoise.bulk_create(
(PostTortoise(**post.dict()) for post in initial_posts)
)
initial_comments = [
CommentDB(id=1, post_id=1, content="Post 1 Comment 1"),
CommentDB(id=2, post_id=1, content="Post 1 Comment 2"),
CommentDB(id=3, post_id=1, content="Post 1 Comment 3"),
]
await CommentTortoise.bulk_create(
(CommentTortoise(**comment.dict()) for comment in initial_comments)
)
yield
await Tortoise.close_connections()
os.remove(DATABASE_FILE_PATH)
@pytest.mark.fastapi(app=app, run_lifespan_events=False)
@pytest.mark.asyncio
class TestChapter6TortoiseRelationship:
@pytest.mark.parametrize(
"skip,limit,nb_results", [(None, None, 3), (0, 1, 1), (10, 1, 0)]
)
async def test_list_posts(
self,
client: httpx.AsyncClient,
skip: Optional[int],
limit: Optional[int],
nb_results: int,
):
params = {}
if skip:
params["skip"] = skip
if limit:
params["limit"] = limit
response = await client.get("/posts", params=params)
assert response.status_code == status.HTTP_200_OK
json = response.json()
assert len(json) == nb_results
@pytest.mark.parametrize(
"id,status_code,nb_comments",
[(1, status.HTTP_200_OK, 3), (10, status.HTTP_404_NOT_FOUND, 0)],
)
async def test_get_post(
self, client: httpx.AsyncClient, id: int, status_code: int, nb_comments: int
):
response = await client.get(f"/posts/{id}")
assert response.status_code == status_code
if status_code == status.HTTP_200_OK:
json = response.json()
assert json["id"] == id
assert len(json["comments"]) == nb_comments
@pytest.mark.parametrize(
"payload,status_code",
[
({"title": "New post", "content": "New content"}, status.HTTP_201_CREATED),
({}, status.HTTP_422_UNPROCESSABLE_ENTITY),
],
)
async def test_create_post(
self, client: httpx.AsyncClient, payload: Dict[str, Any], status_code: int
):
response = await client.post("/posts", json=payload)
assert response.status_code == status_code
if status_code == status.HTTP_201_CREATED:
json = response.json()
assert "id" in json
assert json["comments"] == []
@pytest.mark.parametrize(
"id,payload,status_code,nb_comments",
[
(1, {"title": "Post 1 Updated"}, status.HTTP_200_OK, 3),
(10, {"title": "Post 10 Updated"}, status.HTTP_404_NOT_FOUND, 0),
],
)
async def test_update_post(
self,
client: httpx.AsyncClient,
id: int,
payload: Dict[str, Any],
status_code: int,
nb_comments: int,
):
response = await client.patch(f"/posts/{id}", json=payload)
assert response.status_code == status_code
if status_code == status.HTTP_200_OK:
json = response.json()
for key in payload:
assert json[key] == payload[key]
assert len(json["comments"]) == nb_comments
@pytest.mark.parametrize(
"id,status_code",
[(1, status.HTTP_204_NO_CONTENT), (10, status.HTTP_404_NOT_FOUND)],
)
async def test_delete_post(
self, client: httpx.AsyncClient, id: int, status_code: int
):
response = await client.delete(f"/posts/{id}")
assert response.status_code == status_code
@pytest.mark.parametrize(
"payload,status_code",
[
({"post_id": 2, "content": "New comment"}, status.HTTP_201_CREATED),
({"post_id": 10, "content": "New comment"}, status.HTTP_400_BAD_REQUEST),
({}, status.HTTP_422_UNPROCESSABLE_ENTITY),
],
)
async def test_create_comment(
self, client: httpx.AsyncClient, payload: Dict[str, Any], status_code: int
):
response = await client.post("/comments", json=payload)
assert response.status_code == status_code
if status_code == status.HTTP_201_CREATED:
json = response.json()
assert "id" in json
|
python
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Test Project fro machine learning',
author='Vipin',
license='MIT',
)
|
python
|
# Generated by Django 2.1 on 2020-01-10 00:07
from django.db import migrations
import pyuploadcare.dj.models
class Migration(migrations.Migration):
dependencies = [
('rate', '0002_auto_20200109_1306'),
]
operations = [
migrations.AddField(
model_name='profile',
name='picture',
field=pyuploadcare.dj.models.ImageField(blank=True),
),
migrations.AddField(
model_name='project',
name='image',
field=pyuploadcare.dj.models.ImageField(blank=True),
),
]
|
python
|
from .music import *
|
python
|
# -*- coding: UTF-8 -*-
import torbjorn as tbn
from .an2cn import An2Cn
from .cn2an import Cn2An
ac = An2Cn()
ca = Cn2An()
an = 9876543298765432
cn = "ไนๅๅ
ซ็พไธๅๅ
ญไธไบๅๅ็พไธๅไบไบฟไนๅๅ
ซ็พไธๅๅ
ญไธไบๅๅ็พไธๅไบ"
@tbn.run_time
def run_cn2an_ten_thousand_times():
for _ in range(10000):
result = ca.cn2an(cn)
assert result == an
@tbn.run_time
def run_an2cn_ten_thousand_times():
for _ in range(10000):
result = ac.an2cn(an)
assert result == cn
if __name__ == '__main__':
run_cn2an_ten_thousand_times()
run_an2cn_ten_thousand_times()
|
python
|
import warnings
from ..utility.strcmpi import *
from ..utility.debug_message import *
from .fileparts import *
def _open_file(filename, defaultExt, permission, fileDataFcn, verbose=False):
"""
Opens a given file and executes the specified function over its data
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
permission : str
a string representing how to open the file (Ex.: 'r','w',...)
fileDataFcn : callable
the function to be processed over file data. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the fileDataFcn
"""
path, filename, ext = fileparts(filename)
if (not ext) or (not strcmpi(ext[1:-1], defaultExt)):
ext = '.' + defaultExt
debug_message('Opening file "{}/{}{}"...'.format(path, filename, ext), verbose, no_newline=True)
try:
fileID = open(path + '/' + filename + ext, mode=permission)
except:
debug_message('FAILED.', verbose)
ext = '.' + defaultExt.upper()
debug_message('Opening file "{}/{}{}"...'.format(path, filename, ext), verbose, no_newline=True)
try:
fileID = open(path + '/' + filename + ext, mode=permission)
except:
debug_message('FAILED.', verbose)
warnings.warn('File "{}/{}{}" does not exist.'.format(path, filename, ext), RuntimeWarning)
return
debug_message('DONE.', verbose)
debug_message('Starting routine...', verbose)
out = fileDataFcn(fileID)
debug_message('Routine COMPLETED.', verbose)
debug_message('Closing file...', verbose, no_newline=True)
fileID.close()
debug_message('COMPLETED.', verbose)
return out
def export_to_text_file(filename, defaultExt, writeDataFcn, verbose=False):
"""
Opens a given text file and writes data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
writeDataFcn : callable
the function to write data to the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the writeDataFcn
"""
return _open_file(filename, defaultExt, 'w+', writeDataFcn, verbose=verbose)
def export_to_binary_file(filename, defaultExt, writeDataFcn, verbose=False):
"""
Opens a given binary file and writes data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
writeDataFcn : callable
the function to write data to the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the writeDataFcn
"""
return _open_file(filename, defaultExt, 'wb', writeDataFcn, verbose=verbose)
def import_from_text_file(filename, defaultExt, readDataFcn, verbose=False):
"""
Opens a given text file and reads data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
readDataFcn : callable
the function to read data from the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the readDataFcn
"""
return _open_file(filename, defaultExt, 'r', readDataFcn, verbose)
def import_from_binary_file(filename, defaultExt, readDataFcn, verbose=False):
"""
Opens a given binary file and reads data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
readDataFcn : callable
the function to be read data from the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the readDataFcn
"""
return _open_file(filename, defaultExt, 'rb', readDataFcn, verbose)
def append_to_text_file(filename, defaultExt, writeDataFcn, verbose=False):
"""
Opens a given text file and appends data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
writeDataFcn : callable
the function to write data to the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the writeDataFcn
"""
return _open_file(filename, defaultExt, 'a+', writeDataFcn, verbose)
def append_to_binary_file(filename, defaultExt, writeDataFcn, verbose=False):
"""
Opens a given binary file and appends data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
writeDataFcn : callable
the function to write data to the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the writeDataFcn
"""
return _open_file(filename, defaultExt, 'ab', writeDataFcn, verbose)
|
python
|
numbers = ["886"]
def parse_phone_number(phone_number):
# ๅฝๅ
ๅท็ ไธๆ็
งๅฝ้
ๅท็ ๆนๅผๅค็๏ผ็ดๆฅ่ฟๅๆๆบๅท๏ผ่ฟๆ ทไบฟ็พๆ่ฝๅ้ๆๅ
if phone_number.startswith("+86"):
return phone_number[3:]
real_phone_number = None
for number in numbers:
p = "+" + number
if phone_number.startswith(p + "0"):
real_phone_number = phone_number[len(p) + 1:]
elif phone_number.startswith(p):
real_phone_number = phone_number[len(p):]
if real_phone_number:
return "00" + number + real_phone_number
return None
|
python
|
# common conversion point stacking
# import modules
import sys
from geographiclib.geodesic import Geodesic as geo
import numpy as np
# from matplotlib.mlab import griddata
import scipy
import scipy.ndimage
from scipy import interpolate
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
import os
import os.path
import math
import msgpack
import msgpack_numpy as m
m.patch()
import shutil
from matplotlib.colors import LogNorm
import matplotlib.cm as cm
from obspy import read
from scipy import stats
import mpl_toolkits
root = '/raid3/sdat2/Parra/'
# definition of the half width of the fresnel zone
knotspacing = lambda r, vs: 1./2.*np.sqrt(((10./3.*vs)+r)**2.-r**2.) # in m for a 10s wave
def haversine(lat1, long1, lats2, longs2):
"""
Calculate the distance between two points on earth in m
"""
d = []
for i in range(len(lats2)):
lat2 = lats2[i]
long2 = longs2[i]
earth_radius = 6371.e3 # m
dLat = math.radians(lat2 - lat1)
dLong = math.radians(long2 - long1)
a = (math.sin(dLat / 2) ** 2 +
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dLong / 2) ** 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d.append(earth_radius * c)
return float(d[0])
def weight(distance, depth, vs, factor):
'''
Calculates the weight based on the distance and the fresnel zone width for a given depth
'''
delta = distance/(factor*knotspacing(depth*1.e3, vs)) # distance in m ~~~~~~ fresnel zone times factor~~~
if delta > 2:
weight = 0
elif delta > 1:
weight = .25*(2.-delta)**3.
else:
weight = .75*delta**3.-1.5*delta**2.+1.
return weight
class VOL(dict):
def __init__(self, *arg, **kw):
super(VOL, self).__init__(*arg, **kw)
self.__dict__ = self
def __getattr__(self, name):
return self[name]
class ccp_volume(object):
"""
Handling large stacked volumes
"""
def __init__(self, *arg, **kw):
self.VOL = VOL(*arg, **kw)
#
# Load latest volume to dictionary
#
def load_latest(self, name='Megavolume', filter='rff2', conversion='EU60', factor=1.):
line = open(root+'CCP_Stack/Volumes/'+name+'_'+filter+'_'+conversion+'_'+str(factor)+'/filenames.dat', 'r').readlines()[-1]
runnum = int(float(line.split()[0]))
volumefile = line.split()[1]
print('loading', name, runnum, volumefile)
# get last stackfile name
# Read in volumes
self.VOL.update(msgpack.unpack(open(volumefile, 'rb'), use_list=False, object_hook=m.decode))
# del self.VOL['trackRFs']
return self
#
# Plot crossections
#
def plot_crosssection(self, direction, lonorlat, amplify=1., name='Megavolume', filter='rff2', conversion='EU60', factor=2., zoom=False, mincoverage=10):
# set volume lats and lons
if direction == 'NS':
lon = lonorlat
n = np.argmin(np.abs(self.VOL.grid_lon-lon))
crossec = self.VOL.volume[n,:,:].T.copy()
vol_sig = self.VOL.volumesigma[n,:,:].T.copy()
w = self.VOL.volumeweight[n,:,:].T
xaxis = self.VOL.grid_lat
xlabel = 'latitude (dg)'
xends = [lon, lon]
yends = [self.VOL.latmin, self.VOL.latmax]
if direction == 'EW':
lat = lonorlat
n = np.argmin(np.abs(self.VOL.grid_lat-lat))
crossec = self.VOL.volume[:, n,:].T.copy()
vol_sig = self.VOL.volumesigma[:, n,:].T.copy()
w = self.VOL.volumeweight[:, n,:].T
xaxis = self.VOL.grid_lon
xlabel = 'longitude (dg)'
xends = [self.VOL.lonmin, self.VOL.lonmax]
yends = [lat, lat]
depths = self.VOL.grid_depth
# normalize
for i in range(np.shape(w)[0]):
for j in range(np.shape(w)[1]):
if w[i, j] > mincoverage:
crossec[i, j] = crossec[i, j]/w[i, j]
if crossec[i, j] > 0:
vol_sig[i, j] = crossec[i, j]-1.96*np.sqrt(vol_sig[i, j]/(w[i, j]*w[i, j]))
if vol_sig[i, j] < 0:
vol_sig[i, j] = 0.
if crossec[i, j] < 0:
vol_sig[i, j] = crossec[i, j]+1.96*np.sqrt(vol_sig[i, j]/(w[i, j]*w[i, j]))
if vol_sig[i, j] > 0:
vol_sig[i, j] = 0.
else:
crossec[i, j] = 1000.
plt.figure(figsize=(14, 8))
plt.subplot(2, 2, 2)
m = Basemap(projection='merc', llcrnrlat=self.VOL.latmin, urcrnrlat=self.VOL.latmax, llcrnrlon=self.VOL.lonmin, urcrnrlon=self.VOL.lonmax, lat_ts=20, resolution='i')
m.drawparallels(np.arange(self.VOL.latmin, self.VOL.latmax, 5.), labels=[1, 0, 0, 1], labelstyle='+/-', fontsize=10)
m.drawmeridians(np.arange(self.VOL.lonmin, self.VOL.lonmax, 5.), labels=[1, 0, 0, 1], labelstyle='+/-', fontsize=10)
m.drawcoastlines()
m.drawcountries()
m.drawstates()
m.drawmapboundary(fill_color=[1.0, 1.0, 1.0])
x1, y1 = m(xends[0], yends[0])
x2, y2 = m(xends[1], yends[1])
m.plot([x1, x2], [y1, y2], color='r', linewidth=1, zorder=1)
if direction == 'NS':
x3, y3 = m(lon*np.ones(len(xaxis),), np.round(xaxis/10.)*10.)
m.scatter(x3, y3, 80, xaxis, zorder=2)
if direction == 'EW':
x3, y3 = m(np.round(xaxis/10.)*10, lat*np.ones(len(xaxis),) )
m.scatter(x3, y3, 80, xaxis, zorder=2)
norm = 0.2/amplify
# plot
plt.subplot(2, 2, 1)
xx, yy = np.meshgrid(xaxis, depths)
cs = plt.pcolor(xx, yy, crossec, vmin=-0.15, vmax=0.15, rasterized=True,cmap=cm.coolwarm)
plt.colorbar()
cs.cmap.set_over([0.8, 0.8, 0.8])
if zoom:
plt.ylim([300, 800])
else:
plt.ylim([min(depths), max(depths)])
plt.gca().invert_yaxis()
plt.xlim(xends)
plt.subplot(2, 1, 2)
for t in np.arange(0, len(xaxis), 1):
lx = [x for x in range(100, len(depths)) if (np.abs(w[x, t]) > mincoverage)]# and np.abs(vol_sig[x,t])>std[x,t]/1.96)]
RF = vol_sig[lx, t]/norm+xaxis[t]
RFfull = crossec[lx, t]/norm+xaxis[t]
plt.fill_betweenx(depths[lx], RFfull, xaxis[t], where=RFfull >= xaxis[t], facecolor='k', rasterized=True)
plt.fill_betweenx(depths[lx], RFfull, xaxis[t], where=xaxis[t] >= RFfull, facecolor='k', rasterized=True)
plt.fill_betweenx(depths[lx], RF, xaxis[t], where=RF >= xaxis[t], facecolor=[1.0, 0., 0.], rasterized=True)
plt.fill_betweenx(depths[lx], RF, xaxis[t], where=xaxis[t] >= RF, facecolor=[0.0, 0.0, 1.], rasterized=True)
plt.scatter(np.round(xaxis/10.)*10., 80.*np.ones(len(xaxis),), 80, xaxis, rasterized=True)
plt.plot([-15, 140], [410, 410], '--k', linewidth=2)
plt.plot([-15, 140], [660, 660], '--k', linewidth=2)
plt.ylabel('Depth (km)')
plt.xlabel(xlabel, fontsize=12)
plt.xlim([min(xaxis), max(xaxis)])
if zoom:
plt.ylim([300, 800])
else:
plt.ylim([min(depths), max(depths)])
plt.gca().invert_yaxis()
#
# Plot crossections
#
def plot_crosssection_any(self, lon1,lon2,lat1,lat2,numpoints=200,amplify=1.,name='Megavolume',filter='rff2', conversion='EU60', factor=2.,zoom=False,mincoverage=10., color_scheme = 'spectral', reverse = False,degree_limit=10):
# set volume lats and lons
inv = geo.WGS84.Inverse(lat1, lon1, lat2, lon2)
points = np.linspace(0, inv['s12'], numpoints)
line = geo.WGS84.Line(lat1, lon1, inv['azi1'])
lats = []
lons = []
for i in range(len(points)):
lats.append(line.Position(points[i])['lat2'])
lons.append(line.Position(points[i])['lon2'])
lats = np.array(lats)
lons = np.array(lons)
crossec = []
vol_sig = []
w = []
dist = []
for i in range(len(lats)):
dist.append(haversine(lats[0], lons[0], [lats[i]], [lons[i]])/111194.)
# pixelize lon and lat
row = (lons-np.min(self.VOL.grid_lon))/(self.VOL.grid_lon[1]-self.VOL.grid_lon[0])
for i in range(len(row)):
if row[i] < 0:
row[i] = row[i]+len(self.lon)
col = (lats-np.min(self.VOL.grid_lat))/(self.VOL.grid_lat[1]-self.VOL.grid_lat[0])
for dp in range(len(self.VOL.grid_depth)):
crossec.append(scipy.ndimage.map_coordinates(self.VOL.volume[:,:, dp], np.vstack((row, col))))
vol_sig.append(scipy.ndimage.map_coordinates(self.VOL.volumesigma[:,:, dp], np.vstack((row, col))))
w.append(scipy.ndimage.map_coordinates(self.VOL.volumeweight[:,:, dp], np.vstack((row, col))))
crossec = np.array(crossec)
vol_sig = np.array(vol_sig)
w = np.array(w)
xaxis = self.VOL.grid_lat
xlabel = 'latitude (dg)'
xends = [lon1, lon2]
yends = [lat1, lat2]
depths = self.VOL.grid_depth
# normalize
for i in range(np.shape(w)[0]):
for j in range(np.shape(w)[1]):
if w[i, j] > mincoverage:
crossec[i, j] = crossec[i, j]/w[i, j]
if crossec[i, j] > 0:
vol_sig[i, j] = crossec[i, j]-1.96*np.sqrt(vol_sig[i, j]/(w[i, j]*w[i, j]))
if vol_sig[i, j] < 0:
vol_sig[i, j] = 0.
if crossec[i, j] < 0:
vol_sig[i, j] = crossec[i, j]+1.96*np.sqrt(vol_sig[i, j]/(w[i, j]*w[i, j]))
if vol_sig[i, j] > 0:
vol_sig[i, j] = 0.
else:
crossec[i, j] = 100.
plt.subplot(2, 2, 2)
m = Basemap(projection='merc', llcrnrlat=self.VOL.latmin, urcrnrlat=self.VOL.latmax, llcrnrlon=self.VOL.lonmin, urcrnrlon=self.VOL.lonmax, lat_ts=20, resolution='i')
m.drawparallels(np.arange(0, 70, 5.), labels=[1, 0, 0, 1], labelstyle='+/-', fontsize=10)
#lon1,lon2,lat1,lat2
m.drawparallels([lat1,lat2],labels=[0, 1, 1, 0], labelstyle='+/-', fontsize=10)
m.drawmeridians([lon1,lon2],labels=[0, 1, 1, 0], labelstyle='+/-', fontsize=10)
m.drawmeridians(np.arange(-130,100, 5.), labels=[1, 0, 0, 1], labelstyle='+/-', fontsize=10)
m.drawstates()
m.drawcoastlines()
m.drawcountries()
m.drawmapboundary(fill_color=[1.0, 1.0, 1.0])
x1, y1 = m(xends[0], yends[0])
x2, y2 = m(xends[1], yends[1])
m.plot([x1, x2], [y1, y2], color='r', linewidth=1, zorder=1)
######################################################
plt.subplot(2, 2, 1)
xx, yy = np.meshgrid(dist, depths)
#----------------------------------------------------
if color_scheme == 'rainbow' and reverse == True: cmap = cm.rainbow_r
if color_scheme == 'rainbow' and reverse == False: cmap = cm.rainbow
if color_scheme == 'spectral' and reverse == True: cmap = cm.Spectral_r
if color_scheme == 'spectral' and reverse == False: cmap = cm.Spectral
cs = plt.pcolor(xx, yy, crossec, vmin=-0.1, vmax=0.1, rasterized=True,cmap=cmap)
#----------------------------------------------------
plt.colorbar()
cs.cmap.set_over([0.8, 0.8, 0.8])
if zoom:
plt.ylim([300, 800])
plt.xlim([0, degree_limit])#((lat2-lat1)**2+(lon2-lon1)**2)**0.5])
else:
plt.ylim([min(depths), max(depths)])
plt.xlim([0, degree_limit])
plt.gca().invert_yaxis()
# corrected by 3D model
# normalize
norm = 0.2/amplify#np.max(np.max(np.abs(crossec_3D)))/amplify
# plot
plt.subplot(2,1,2)
for t in np.arange(0, len(dist), 1):
lx = [x for x in range(len(depths)) if (np.abs(w[x, t]) > mincoverage)]# and np.abs(vol_sig[x,t])>std[x,t]/1.96)]
RF = vol_sig[lx, t]/norm+dist[t]
RFfull = crossec[lx, t]/norm+dist[t]
plt.fill_betweenx(depths[lx], RFfull, dist[t], where=RFfull >= dist[t], facecolor='k', rasterized=True)
plt.fill_betweenx(depths[lx], RFfull, dist[t], where=dist[t] >= RFfull, facecolor='k', rasterized=True)
plt.fill_betweenx(depths[lx], RF, dist[t], where=RF >= dist[t], facecolor=[1.0, 0., 0.], rasterized=True)
plt.fill_betweenx(depths[lx], RF, dist[t], where=dist[t] >= RF, facecolor=[0.0, 0.0, 1.], rasterized=True)
RF2 = crossec[lx, t]/norm
l410 = [x for x in range(len(depths[lx])) if depths[lx[x]] > 366 and depths[lx[x]] < 454]
l660 = [x for x in range(len(depths[lx])) if depths[lx[x]] > 616 and depths[lx[x]] < 704]
if len(l410) > 20:
max410 = np.argmax(RF2[l410])
ind = lx[l410[max410]]
plt.plot([dist[t]+0.1, 0.5*RF2[l410[max410]]+dist[t]], [depths[ind], depths[ind]], 'y', linewidth=2)
if len(l660) > 20:
max660 = np.argmax(RF2[l660])
ind = lx[l660[max660]]
plt.plot([dist[t]+0.1, 0.5*RF2[l660[max660]]+dist[t]], [depths[ind], depths[ind]], 'y', linewidth=2)
plt.ylabel('Depth (km)')
plt.xlabel('Distance (dg)', fontsize=12)
plt.xlim([min(dist), max(dist)])
plt.plot([-5, 40], [410, 410], '--k', linewidth=2)
plt.plot([-5, 40], [660, 660], '--k', linewidth=2)
if zoom:
plt.ylim([300, 800])
plt.xlim([0, degree_limit])#((lat2-lat1)**2+(lon2-lon1)**2)**0.5])
else:
plt.ylim([min(depths), max(depths)])
plt.xlim([0, degree_limit])
plt.gca().invert_yaxis()
#
# Plot data coverage map at predefined depth
#
def plot_datacoverage(self,depth,name='Megavolume',filter='rff2', conversion='EU60', factor=2.):
fig = plt.figure(figsize=(6,6))
d = np.argmin(np.abs(self.VOL.grid_depth-depth))
slice = self.VOL.volumeweight[:,:, d].copy()
xx, yy = np.meshgrid(self.VOL.grid_lon, self.VOL.grid_lat)
m = Basemap(projection='merc', llcrnrlat=np.min(self.VOL.grid_lat), urcrnrlat=np.max(self.VOL.grid_lat), llcrnrlon=np.min(self.VOL.grid_lon), urcrnrlon=np.max(self.VOL.grid_lon), lat_ts=20, resolution='i')
m.drawparallels(np.arange(self.VOL.latmin, self.VOL.latmax, 2.), labels=[1, 0, 0, 1], linewidth=0.5, dashes=[4, 2], labelstyle='+/-', fontsize=10)
m.drawmeridians(np.arange(self.VOL.lonmin, self.VOL.lonmax, 2.), labels=[1, 0, 0, 1], linewidth=0.5, dashes=[4, 2], labelstyle='+/-', fontsize=10)
m.drawcountries()
coasts = m.drawcoastlines(zorder=2, color='k', linewidth=1)
m.drawmapboundary(fill_color=[1.0, 1.0, 1.0])
x, y = m(xx, yy)
contours = [1., 15, 1.e2, 1.e3, 1.e4]#[1.e0,1.e1,1.e2,1.e3,1.e4]
im =plt.contourf(x, y, slice.T, contours, norm=LogNorm(),zorder=1)
fig.subplots_adjust(bottom=.2)
cbar_ax = fig.add_axes([0.2, 0.1, 0.6, 0.05])
cb = fig.colorbar(im, cax=cbar_ax, orientation='horizontal')
cb.set_label('Sum of weights at ' + str(depth) + ' km')
#
# Plot topography maps
#
def plot_topography(self,mindepth,maxdepth,name='Megavolume',filter='rff2',conversion='prem',factor=2.,mincoverage=15., amplitude = False, color_scheme = 'spectral', reverse = False):
# Plots topography of maximum between mindepth and maxdepth, masking if sum of weights is beneath mincoverage.
# If amplitude =True, it will plot the amplitude and not the depth
plt.figure(figsize=(10, 8))
depths = self.VOL.grid_depth
val_list = [x for x in range(len(depths)) if depths[x] > mindepth and depths[x] < maxdepth]
thickness = np.empty((len(self.VOL.grid_lon), len(self.VOL.grid_lat)))
dmap = np.empty((len(self.VOL.grid_lon), len(self.VOL.grid_lat)))
coverage = np.empty((len(self.VOL.grid_lon), len(self.VOL.grid_lat)))
for i in range(len(self.VOL.grid_lon)):
for j in range(len(self.VOL.grid_lat)):
RF = self.VOL.volume[i, j,:]/self.VOL.volumeweight[i, j,:]
std = 1.96*np.sqrt(self.VOL.volumesigma[i, j,:]/(self.VOL.volumeweight[i, j,:]*self.VOL.volumeweight[i, j,:]))
maxmap = np.argmax(RF[val_list])
if amplitude == False:
dmap[i, j] = depths[val_list[maxmap]]
else:
dmap[i,j] = RF[val_list[maxmap]]
if self.VOL.volumeweight[i, j, val_list[maxmap]] < mincoverage:
dmap[i, j] = 1000.
# Prepare map
m = Basemap(projection='merc', llcrnrlat=np.min(self.VOL.grid_lat)-0, urcrnrlat=np.max(self.VOL.grid_lat)+0, llcrnrlon=np.min(self.VOL.grid_lon)-0, urcrnrlon=np.max(self.VOL.grid_lon)+0, lat_ts=10, resolution='i')
m.drawparallels(np.arange(0, 70, 10.), labels=[1, 0, 0, 1], linewidth=0.5, dashes=[4, 2], labelstyle='+/-', fontsize=8)#[1,0,0,1])
m.drawmeridians(np.arange(-20, 60, 10.), labels=[1, 0, 0, 1], linewidth=0.5, dashes=[4, 2], labelstyle='+/-', fontsize=8)#[1,0,0,1])
m.drawcountries()
m.drawstates()
m.drawcoastlines()
coasts = m.drawcoastlines(zorder=1, color='k', linewidth=0.1)
xx, yy = np.meshgrid(self.VOL.grid_lon, self.VOL.grid_lat)
x, y = m(xx, yy)
#-----------------------------------------------------------------------
if color_scheme == 'rainbow' and reverse == True: cmap = cm.rainbow_r
if color_scheme == 'rainbow' and reverse == False: cmap = cm.rainbow
if color_scheme == 'spectral' and reverse == True: cmap = cm.Spectral_r
if color_scheme == 'spectral' and reverse == False: cmap = cm.Spectral
#------------------------------------------------------------------------
if amplitude is False:
cs = plt.pcolor(x, y, dmap.T, vmin=mindepth, vmax=maxdepth, cmap=cmap, linewidth=0, rasterized=False)
else:
cs = plt.pcolor(x, y, dmap.T, vmin=0.01, vmax=0.12, cmap=cm.cmap, linewidth=0, rasterized=False)
cs.cmap.set_under([0.8, 0.8, 0.8])
cs.cmap.set_over([0.8, 0.8, 0.8])
cb = plt.colorbar()
cb.set_label('Maximum map between ' + str(mindepth)+' and ' + str(maxdepth)+' (km)')
# cb.set_ticks([380,400,420,440])
cb.solids.set_rasterized(True)
xt, yt = m(-13.2, 70.6)
m.drawcoastlines(zorder=1, color='k', linewidth=1)
dmapall = np.ravel(dmap)
if amplitude == False:
l = [l for l in range(len(dmapall)) if dmapall[l] > mindepth+1. and dmapall[l] < maxdepth - 1.]
print ('median', np.median((dmapall[l])))
print ('variance', np.var((dmapall[l])))
#
# Plot map of MTZ width
#
def plot_mtzwidth(self,name='Megavolume',filter='rff2',conversion='prem',factor=2., Max_Thickness = 290, Min_Thickness=230):
plt.figure(figsize=(18, 8))
depths = self.VOL.grid_depth
l410 = [x for x in range(len(depths)) if depths[x] > 370 and depths[x] < 460] # limit between 370 and 460
l660 = [x for x in range(len(depths)) if depths[x] > 630 and depths[x] < 710] # limit between 630 and 710
thickness1D = np.empty((len(self.VOL.grid_lon), len(self.VOL.grid_lat))) # create grids the size of the box to plot the data into
d4101D = np.empty((len(self.VOL.grid_lon), len(self.VOL.grid_lat)))
d6601D = np.empty((len(self.VOL.grid_lon), len(self.VOL.grid_lat)))
with open(root+'CCP_Stack/MTZ_'+conversion+'_'+filter+'_'+str(int(factor))+'.txt', 'w') as output:
for i in range(len(self.VOL.grid_lon)):
for j in range(len(self.VOL.grid_lat)):
RF = self.VOL.volume[i, j,:]/self.VOL.volumeweight[i, j,:]
std = 1.96*np.sqrt(self.VOL.volumesigma[i, j,:]/(self.VOL.volumeweight[i, j,:]*self.VOL.volumeweight[i, j,:]))
max410 = np.argmax(RF[l410])
max660 = np.argmax(RF[l660])
# If both picks are significant, store thickness
if RF[l410[max410]] > std[l410[max410]] and RF[l660[max660]] > std[l660[max660]]:
d4101D[i, j] = depths[l410[max410]]
d6601D[i, j] = depths[l660[max660]]
thickness1D[i, j] = (depths[l660[max660]]-depths[l410[max410]])
output.write(str(depths[l410[max410]])+'\t'+str(depths[l660[max660]])+'\n')
output.close()
# Prepare map
m = Basemap(projection='merc', llcrnrlat=np.min(self.VOL.grid_lat), urcrnrlat=np.max(self.VOL.grid_lat), llcrnrlon=np.min(self.VOL.grid_lon), urcrnrlon=np.max(self.VOL.grid_lon), lat_ts=20, resolution='i')
m.drawparallels(np.arange(np.min(self.VOL.grid_lat), np.max(self.VOL.grid_lat), 5.), labels=[1,0,0,1])
m.drawmeridians(np.arange(np.min(self.VOL.grid_lon), np.max(self.VOL.grid_lon), 5.), labels=[1,0,0,1])
m.drawcoastlines(color='k')
m.drawcountries(color='k')
m.drawstates()
m.drawmapboundary(fill_color=[1.0, 1.0, 1.0])
xx, yy = np.meshgrid(self.VOL.grid_lon, self.VOL.grid_lat)
x, y = m(xx, yy)
cs = plt.contourf(x, y, thickness1D.T, levels=np.linspace(Min_Thickness, Max_Thickness, 81.), cmap=cm.RdYlBu)
cs.cmap.set_under('w')#Max_Thickness = 290, Min_Thickness=230
cs.cmap.set_over('w')
plt.colorbar()
plt.title('MTZ width')
#
# Plot moveout aligned on 410 or 660
#
def plot_moveout(self,d660=True,name='Megavolume',filter='rff2',conversion='EU60',factor=2.):
# ah so the bug is caused by the fact my vertical resolution is 2km not 1km?
# Picks all profiles in grid and organizes them by 660 depth
plt.figure(figsize=(8, 8)) # make a square figure
depths = self.VOL.grid_depth # depths created
if d660:
ldisc = [x for x in range(len(depths)) if depths[x] > 630 and depths[x] < 710] ##My 660 does not go above 710
disc = np.arange(630., 710., 2.) #630 710
ldisc2 = [x for x in range(len(depths)) if depths[x] > 380 and depths[x] < 430] #380 430
else: #will plot 410 instead
ldisc = [x for x in range(len(depths)) if depths[x] > 380 and depths[x] < 430]#380 430
disc = np.arange(380, 430, 2.) #380 430
weights = np.empty((len(disc),))
moveout = np.empty((len(disc), len(depths)))
d660l = [] # an empty list created to store depths of points
d410l = []
for i in range(len(self.VOL.grid_lon)):
for j in range(len(self.VOL.grid_lat)): # iterate through each grid point
RF = self.VOL.volume[i, j,:] # The vertical depth stack at the grid point
for k in range(len(depths)): # Look down the vertical
if self.VOL.volumeweight[i, j, k] > 0:
RF[k] = RF[k]/self.VOL.volumeweight[i, j, k] # weight out by volumeweight
# I think this assumes a depth resolution of 1km
std = 1.96*np.sqrt(self.VOL.volumesigma[i, j,:]/(self.VOL.volumeweight[i, j,:]*self.VOL.volumeweight[i, j,:])) ### not sure about this line?
maxdisc = np.argmax(RF[ldisc]) # search for the maximum in the ldisc part - either around the 410 or 660
if RF[ldisc[maxdisc]] > std[ldisc[maxdisc]]: #
d660l.append(depths[ldisc[maxdisc]]) # search for the maximum
n = np.argmin(np.abs(depths[ldisc[maxdisc]]-disc))
weights[n] = weights[n]+1.
moveout[n,:] = moveout[n,:]+RF
maxdisc = np.argmax(RF[ldisc2])
d410l.append(depths[ldisc2[maxdisc]])
d660l = np.array(d660l) # convert the lists to np arrays
d410l = np.array(d410l)
d660c = np.arange(630., 710., 2.) #630 710
d410c = []
err = []
for i in range(len(d660c)):
filt = [d410l[x] for x in range(len(d410l)) if (d660l[x] == d660c[i] and d410l[x] > 360 and d410l[x] < 460)]
d410c.extend([np.mean(filt)])
err.extend([np.std(filt)])
d410c = np.array(d410c)
midp = []; discp = []; discp2 = [] #
for i in range(len(disc)):
if weights[i] > 0:
moveout[i,:] = moveout[i,:]/weights[i]
if d660:
max660 = np.argmax(moveout[i, ldisc])
discp.append(depths[ldisc[max660]])
midp.append(disc[i])
l410 = [x for x in range(len(depths)) if depths[x] > 380 and depths[x] < 430]#380 430
max410 = np.argmax(moveout[i, l410])
discp2.append(depths[l410[max410]])
else:
max410 = np.argmax(moveout[i, ldisc])
discp.append(depths[ldisc[max410]])
midp.append(disc[i])
l660 = [x for x in range(len(depths)) if depths[x] > 630 and depths[x] < 710]#630 710
max660 = np.argmax(moveout[i, l660])
discp2.append(depths[l660[max660]])
ax = plt.subplot2grid((3, 6), (0, 0), colspan=5)
if d660:
n, bins, patches = plt.hist(d660l, np.arange(639., 713., 2.), histtype='bar')
data = zip(bins+1., n) # +1 as the stack resolution is only at even values
cmbl = plt.cm.get_cmap('bone_r')
for d, p in zip(n, patches):
plt.setp(p, 'facecolor', cmbl(d/max(n)*0.6+0.2))
# f=open('histogram_660.txt','wb')
# np.savetxt(f,data,delimiter='\t')
else:
plt.hist(d660l, np.arange(389, 431, 2.), histtype='bar') # this is for the 440 moveout
plt.ylabel('# of grid points')
plt.text(645, 600, 'a.', fontsize=16)
if d660:
# plt.xlabel('660 depth (km)')
plt.xlim([644.9, 691.07])
else:
plt.xlabel('410 depth (km)')
plt.xlim([390, 430])#380 430
plt.gca().set_yticks([0, 100, 300, 500, 700])
plt.gca().set_xticks([650, 660, 670, 680, 690])
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax = plt.subplot2grid((3, 6), (1, 0), rowspan=2, colspan=5)
cs = plt.pcolor(disc-1., depths, moveout.T, vmin=-0.1, vmax=0.1, rasterized=True, cmap=cm.RdYlBu_r)
plt.plot(midp, discp, '--k', linewidth=2)
plt.plot(midp, discp2, '--k', linewidth=2)
plt.ylabel('Depth (km)')
if d660:
plt.xlim([644.9, 691.07])#630 710
plt.xlabel('660 depth (km)')
else:
plt.xlim([380, 430])#380 430
plt.xlabel('410 depth (km)')
plt.ylim([350, 750])
plt.gca().invert_yaxis()
plt.text(646, 380, 'b.', fontsize=16)
box = plt.gca().get_position()
axcb = plt.axes([box.x0*1.05+box.width*1.05, box.y0+0.1, 0.01, box.height-0.2])
cb = plt.colorbar(cs, cax=axcb, orientation='vertical')
cb.set_ticks([-0.1, 0., .1])
cb.set_label('relative amplitude')
plt.show()
def MTZ_stats():
'''
A nice script that should allow you to compare the 440 to 660 correlations when using the 1-d and 3-d velocity models - has to be run off each volume that you want to compare, it will place it in a new directory 'CCP_Stack/MTZ_files/'+name+'_'+conversion+'_'+model+'_'+str(int(factor)).txt
'''
import sys
import numpy as np
import scipy
from scipy import interpolate
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path
import math
import msgpack
import shutil
from matplotlib.colors import LogNorm
import matplotlib.cm as cm
from scipy import stats
import pandas
from pandas.tools import plotting
import pickle
import seaborn as sns
import mpl_toolkits
from statsmodels.formula.api import ols
files =['mtz_1D.txt', 'mtz_3D.txt']
for i in [0,1]:#range(len(files)):
file=files[i]
data=np.loadtxt(file)
print(data)
## pandas!
mtz=pandas.DataFrame({'d410':data[:,0],'d660':data[:,1]})
print(mtz.mean())
#plotting.scatter_matrix(mtz,alpha=0.4,figsize=(6,6),diagonal='kde')
#sns.pairplot(mtz, vars=['d410','d660'],kind='reg')
#g = sns.JointGrid('d410','d660',data=mtz)
#g.plot(sns.regplot, sns.distplot, stats.pearsonr);
if i==0:
g = sns.JointGrid(x=mtz.d410,y=mtz.d660,xlim=[370,450],ylim=[630,710], space=0, ratio=2)
if i==1:
g.x=mtz.d410
g.y=mtz.d660
if i==0:
g.plot_joint(sns.kdeplot, shade=True, cmap="PuBu", n_levels=10,alpha=1.0);
if i==1:
g.plot_joint(sns.kdeplot, shade=True, cmap="Greens", n_levels=10,alpha=0.8);
#_ = g.ax_marg_x.hist(mtz["d410"], color="b", alpha=.6,bins=np.arange(370, 450, 10))
#_ = g.ax_marg_y.hist(mtz["d660"], color="b", alpha=.6,orientation="horizontal",bins=np.arange(630, 710, 10))
if i==0:
g.plot_marginals(sns.kdeplot, shade=True,alpha=0.5)
if i==1:
g.plot_marginals(sns.kdeplot, shade=True,alpha=0.5)
if i==1:
g.annotate(stats.pearsonr, template="{stat} = {val:.2f} (p = {p:.2g})",fontsize=14);
print(mtz.mean())
print(mtz.mad())
print(mtz.median())
plt.savefig('dist_aftercorrection.pdf')
plt.show()
|
python
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\aging\aging_data.py
# Compiled at: 2019-04-30 00:16:35
# Size of source mod 2**32: 7516 bytes
from sims4.localization import TunableLocalizedStringFactory
from sims4.tuning.tunable import HasTunableSingletonFactory, AutoFactoryInit, TunableMapping, TunableEnumEntry, TunableTuple, TunableRange, TunableReference, Tunable
from sims4.tuning.tunable_base import ExportModes, EnumBinaryExportType
import sims4.resources
from interactions.utils.death import DeathType
from interactions.utils.tunable_icon import TunableIcon
from sims.aging.aging_transition import TunableAgingTransitionReference
from sims.sim_info_types import Age
import services
logger = sims4.log.Logger('AgingData')
class AgingData(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'ages':TunableMapping(description='\n All available ages for this Sim, and any data associated with that\n specific age.\n ',
key_type=TunableEnumEntry(description='\n The available age for the Sim.\n ',
tunable_type=Age,
default=(Age.ADULT),
binary_type=(EnumBinaryExportType.EnumUint32)),
value_type=TunableTuple(description='\n Any further data associated with this age.\n ',
transition=TunableAgingTransitionReference(description='\n The transition data associated with this age, such as\n dialogs, notifications, durations, etc...\n ',
pack_safe=True),
personality_trait_count=TunableRange(description='\n The number of traits available to a Sim of this age.\n ',
tunable_type=int,
default=3,
minimum=0,
export_modes=(ExportModes.All)),
cas_icon=TunableIcon(description='\n Icon to be displayed in the ui for the age.\n ',
export_modes=(ExportModes.ClientBinary)),
cas_icon_selected=TunableIcon(description='\n Icon to be displayed in the UI for the age when buttons are\n selected.\n ',
export_modes=(ExportModes.ClientBinary)),
cas_name=TunableLocalizedStringFactory(description='\n The name to be displayed in the UI for the age.\n ',
export_modes=(ExportModes.ClientBinary)),
export_class_name='AvailableAgeDataTuple'),
minlength=1,
tuple_name='AvailableAgeDataMapping'),
'age_up_interaction':TunableReference(description='\n The default interaction that ages Sims up. This is called when Sims\n auto-age or when the "Age Up" cheat is invoked.\n ',
manager=services.get_instance_manager(sims4.resources.Types.INTERACTION),
pack_safe=True),
'old_age_interaction':TunableReference(description='\n The default interaction that transitions a Sim from old age to\n death.\n ',
manager=services.get_instance_manager(sims4.resources.Types.INTERACTION),
pack_safe=True),
'old_age_npc_death_type_fallback':TunableEnumEntry(description="\n Used if the Old Age Interaction is not a death interaction. In that\n case, the non-instanced NPCs are not running the interaction but also\n can't get their death type from the interaction's tuning. This value\n is used as a fallback. The NPC's death type set to this value, and \n it will effectively become a ghost.\n ",
tunable_type=DeathType,
default=DeathType.NONE,
pack_safe=True),
'bonus_days':TunableMapping(description='\n Specify how much bonus time is added to elder Sims\n possessing these traits.\n ',
key_type=TunableReference(description='\n The trait associated with this modifier.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.TRAIT)),
pack_safe=True),
value_type=Tunable(description='\n The modifier associated with this trait.\n ',
tunable_type=float,
default=0))}
def get_age_transition_data(self, age):
return self.ages[age].transition
def get_birth_age(self):
return min(self.ages)
def get_lifetime_duration(self, sim_info):
total_lifetime = sum((age_data.transition.get_age_duration(sim_info) for age_data in self.ages.values()))
aging_service = services.get_aging_service()
total_lifetime /= aging_service.get_speed_multiple(aging_service.aging_speed)
return total_lifetime
def get_lifetime_bonus(self, sim_info):
lifetime_duration = self.get_lifetime_duration(sim_info)
bonus_multiplier = sum((modifier for trait, modifier in self.bonus_days.items() if sim_info.has_trait(trait)))
return lifetime_duration * bonus_multiplier
def get_personality_trait_count(self, age):
age_data = self.ages.get(age, None)
if age_data is None:
raise ValueError('{} is not in {}'.format(age, self.ages))
return age_data.personality_trait_count
def get_next_age(self, age):
ages = tuple(sorted(self.ages))
for current_age, next_age in zip(ages, ages[1:]):
if current_age <= age < next_age:
return next_age
raise ValueError('There is no age after {}'.format(age))
def get_previous_age(self, age):
ages = tuple(sorted(self.ages))
for previous_age, current_age in zip(ages, ages[1:]):
if previous_age < age <= current_age:
return previous_age
|
python
|
"""Tests for web app requests"""
from unittest import TestCase
import sodar_taskflow
from apis.irods_utils import init_irods, cleanup_irods_data
class AppTestBase(TestCase):
"""Base test class for web app"""
def setUp(self):
# Init iRODS connection
self.irods = init_irods(test_mode=True)
self.app = sodar_taskflow.app.test_client()
pass
def tearDown(self):
# Remove leftover data from iRODS
cleanup_irods_data(self.irods, verbose=False)
pass
class TestHello(AppTestBase):
"""Tests for the hello world page"""
def test_hello_render(self):
"""Test rendering of hello page (to ensure we can connect to app)"""
# url = settings.SERVER_NAME + '/hello'
response = self.app.get('/hello')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, b'Hello world from sodar_taskflow!')
# TODO: Tests for submit & cleanup
|
python
|
from django.db import models
from django.urls import reverse
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('Date published')
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
class Song(models.Model):
song_name = models.CharField(max_length=200)
song_artist = models.CharField(max_length = 200)
release_year = models.IntegerField(default=2000)
def __str__(self):
return self.song_name
def get_absolute_url(self):
return reverse('song_edit', kwargs={'pk': self.pk})
class Track(models.Model):
track_id = models.CharField(max_length=30)
track_name = models.CharField(max_length=500)
track_artist = models.CharField(max_length = 500)
track_duration = models.CharField(max_length = 10)
track_popularity = models.IntegerField(default=100)
track_danceability = models.FloatField(max_length=10)
track_energy = models.FloatField(max_length=10)
track_key = models.IntegerField(max_length=3)
track_loudness = models.FloatField(max_length=10)
track_speechiness = models.FloatField(max_length=10)
track_acousticness = models.FloatField(max_length=10)
track_instrumentalness = models.FloatField(max_length=10)
track_liveness = models.FloatField(max_length=10)
track_valence = models.FloatField(max_length=10)
track_tempo = models.FloatField(max_length=10)
def __str__(self):
return self.track_name
|
python
|
#!/usr/bin/env python2
"""
os_path_test.py: Tests for os_path.py
"""
from __future__ import print_function
import unittest
from pylib import os_path # module under test
class OsPathTest(unittest.TestCase):
def testBasename(self):
self.assertEqual('bar', os_path.basename('foo/bar'))
if __name__ == '__main__':
unittest.main()
|
python
|
from abc import ABCMeta
class OgreMeshSerializerListener(metaclass=ABCMeta):
"""
This class allows users to hook into the mesh loading process and
modify references within the mesh as they are loading. Material and
skeletal references can be processed using this interface which allows
finer control over resources.
"""
@abstractmethod
def processMaterialName(self, mesh, name):
raise NotImplementedError;
@abstractmethod
def processSkeletonName(self,mesh,name):
raise NotImplementedError;
@abstractmethod
def processMeshCompleted(self, mesh):
raise NotImplementedError;
|
python
|
import os
import dill
import numpy as np
from math import pi, exp, atan, sqrt, acos
def angle(x, y):
"""calculates the angle of (x,y) with respect to (0,0)
:param x: x choordinate
:param y: y choordinate
:returns: the angle"""
at = atan(y/x)
if(x < 0): return at+pi
elif(y < 0): return at+2*pi
return at
def angle_between_vectors(v, w):
"""calculates the angle between two vectors (any dimensional)
:param v: vector 1
:param w: vector 2
:returns: the angle"""
scalar = sum(v[i]*w[i] for i in range(len(v)))
if(scalar/(L2_norm(v)*L2_norm(w)) > 1): return 0
if(scalar/(L2_norm(v)*L2_norm(w)) < -1): return pi
return acos(scalar/(L2_norm(v)*L2_norm(w)))
def L1_norm(seq):
"""calculates the L1 norm of a sequence or vector
:param seq: the sequence or vector
:returns: the L1 norm"""
norm = 0
for i in range(len(seq)):
norm += abs(seq[i])
return norm
def L2_norm(seq):
"""calculates the L2 norm of a sequence or vector
:param seq: the sequence or vector
:returns: the L2 norm"""
norm = 0
for i in range(len(seq)):
norm += seq[i]**2
return sqrt(norm)
def Linf_norm(seq):
"""calculates the Linf norm of a sequence or vector
:param seq: the sequence or vector
:returns: the Linf norm"""
largest = 0
for i in range(len(seq)):
if(abs(seq[i]) > largest): largest = abs(seq[i])
return largest
def linear_interpolation(listx, listy, argument):
"""calculates the linear interpolation of [listx,listy] at argument
:param listx: x choordinates (should be ordered in ascending order)
:param listy: y choordinates
:param argument: where to evaluate the linear interpolation
:returns: value of the linear interpolation at argument"""
if(argument in listx):
return listy[listx.index(argument)]
if(argument < listx[0]):
return listy[0] + (listy[0]-listy[1])/(listx[1]-listx[0])*(listx[0]-argument)
if(argument > listx[-1]):
return listy[-1] + (listy[-1]-listy[-2])/(listx[-1]-listx[-2])*(argument-listx[-1])
index = 0
while((listx[index] < argument and listx[index+1] > argument) != 1): index += 1
return listy[index] + (listy[index+1]-listy[index])/(listx[index+1]-listx[index])*(argument-listx[index])
def gaussian(x, std=1):
"""returns a Gaussian distribution
:param x: variable
:param std: standard deviation (default 1)
:returns: Gaussian PDF"""
return 1/(std*sqrt(2*pi))*exp(-x**2/(2*std**2))
def gaussian_der(x, std=1):
"""returns a Gaussian derivative distrbution (x*PDF_normal)
:param x: variable
:param std: the standard deviation
:returns: positive half of the Gaussian derivative PDF"""
if(x < 0): return 0
return (2-pi/2)/std**2*x*exp(-(2-pi/2)*x**2/(2*std**2))
def kernel_convolve(seq, kernel, kernel_width):
"""convolves a sequence with a kernel
:param seq: sequence to be convolved
:param kernel: the kernel, a probability distribuion fucntion (does not have to be normalized)
:param kernel_width: the width of the kernel (std for Gaussian)
:returns: convolved sequence"""
ker = np.array([kernel(i/kernel_width) for i in range(-round(3*kernel_width), round(3*kernel_width))])
ker = ker/sum(ker)
return np.convolve(seq, ker ,mode='same')
def save_object_dill(obj, filename, save_path="objects_save"):
"""saves an object with dill (also makes the 'objects_save/' directory if it wasn't there)
:param obj: object to save
:param filename: filename without any extension
:param save_path: path where to save object, staring from the directory of the simulation (default "objects_save")"""
# directory
if not os.path.exists(save_path):
os.makedirs(save_path)
# pickling
f = open(save_path+"/"+filename+'.pickle', 'wb')
dill.dump(obj, f)
f.close()
def load_object_dill(filename, load_path="objects_save"):
"""loads an object with dill (from 'objects_save/' directory)
:param filename: filename without any extension
:param load_path: path where to load object from, staring from the directory of the simulation (default "objects_save")
:returns: saved object"""
# unpickling
f = open(load_path+"/"+filename+'.pickle', 'rb')
obj = dill.load(f)
f.close()
return obj
|
python
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Adrien Vergรฉ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
import pathspec
import yaml
from . import rules
class YamlLintConfigError(Exception):
pass
class YamlLintConfig(object):
def __init__(self, content=None, file=None):
assert (content is None) ^ (file is None)
self.ignore = None
if file is not None:
with open(file) as f:
content = f.read()
self.parse(content)
self.validate()
def is_file_ignored(self, filepath):
return self.ignore and self.ignore.match_file(filepath)
def enabled_rules(self, filepath):
return [rules.get(id) for id, val in self.rules.items()
if val is not False and (
filepath is None or 'ignore' not in val or
not val['ignore'].match_file(filepath))]
def extend(self, base_config):
assert isinstance(base_config, YamlLintConfig)
for rule in self.rules:
if (type(self.rules[rule]) == dict and
rule in base_config.rules and
base_config.rules[rule] is not False):
base_config.rules[rule].update(self.rules[rule])
else:
base_config.rules[rule] = self.rules[rule]
self.rules = base_config.rules
if base_config.ignore is not None:
self.ignore = base_config.ignore
def parse(self, raw_content):
try:
conf = yaml.safe_load(raw_content)
except Exception as e:
raise YamlLintConfigError('invalid config: %s' % e)
if type(conf) != dict:
raise YamlLintConfigError('invalid config: not a dict')
self.rules = conf.get('rules', {})
# Does this conf override another conf that we need to load?
if 'extends' in conf:
path = get_extended_config_file(conf['extends'])
base = YamlLintConfig(file=path)
try:
self.extend(base)
except Exception as e:
raise YamlLintConfigError('invalid config: %s' % e)
if 'ignore' in conf:
if type(conf['ignore']) != str:
raise YamlLintConfigError(
'invalid config: ignore should contain file patterns')
self.ignore = pathspec.PathSpec.from_lines(
'gitwildmatch', conf['ignore'].splitlines())
def validate(self):
for id in self.rules:
try:
rule = rules.get(id)
except Exception as e:
raise YamlLintConfigError('invalid config: %s' % e)
self.rules[id] = validate_rule_conf(rule, self.rules[id])
def validate_rule_conf(rule, conf):
if conf is False or conf == 'disable':
return False
elif conf == 'enable':
conf = {}
if type(conf) == dict:
if ('ignore' in conf and
type(conf['ignore']) != pathspec.pathspec.PathSpec):
if type(conf['ignore']) != str:
raise YamlLintConfigError(
'invalid config: ignore should contain file patterns')
conf['ignore'] = pathspec.PathSpec.from_lines(
'gitwildmatch', conf['ignore'].splitlines())
if 'level' not in conf:
conf['level'] = 'error'
elif conf['level'] not in ('error', 'warning'):
raise YamlLintConfigError(
'invalid config: level should be "error" or "warning"')
options = getattr(rule, 'CONF', {})
for optkey in conf:
if optkey in ('ignore', 'level'):
continue
if optkey not in options:
raise YamlLintConfigError(
'invalid config: unknown option "%s" for rule "%s"' %
(optkey, rule.ID))
if type(options[optkey]) == tuple:
if (conf[optkey] not in options[optkey] and
type(conf[optkey]) not in options[optkey]):
raise YamlLintConfigError(
'invalid config: option "%s" of "%s" should be in %s'
% (optkey, rule.ID, options[optkey]))
else:
if type(conf[optkey]) != options[optkey]:
raise YamlLintConfigError(
'invalid config: option "%s" of "%s" should be %s'
% (optkey, rule.ID, options[optkey].__name__))
for optkey in options:
if optkey not in conf:
raise YamlLintConfigError(
'invalid config: missing option "%s" for rule "%s"' %
(optkey, rule.ID))
else:
raise YamlLintConfigError(('invalid config: rule "%s": should be '
'either "enable", "disable" or a dict')
% rule.ID)
return conf
def get_extended_config_file(name):
# Is it a standard conf shipped with yamllint...
if '/' not in name:
std_conf = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'conf', name + '.yaml')
if os.path.isfile(std_conf):
return std_conf
# or a custom conf on filesystem?
return name
|
python
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import abc
import datetime
import six
import uuid
from keystone import exception
from keystone import notifications
from keystone.common import dependency
from keystone.common import extension
from keystone.common import manager
from keystone.openstack.common import log
from oslo.utils import timeutils
LOG = log.getLogger(__name__)
EXTENSION_DATA = {
'name': 'Keystone User Registration API',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-REGISTRATION/v1.0',
'alias': 'OS-REGISTRATION',
'description': 'Handles creating users with activation through a key',
'links': [
{
'rel': 'describedby',
# TODO(garcianavalon): needs a description
'type': 'text/html',
'href': 'https://github.com/ging/keystone',
}
]}
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
# TODO(garcianavalon) extract as configuration options in keystone.conf
ACTIVATION_KEY_DURATION = 28800
RESET_TOKEN_DURATION = 28800
DEFAULT_ROLE_ID = ''
DEFAULT_ROLE_NAME = 'owner'
@dependency.requires('assignment_api', 'identity_api')
@dependency.provider('registration_api')
class Manager(manager.Manager):
"""Manager.
See :mod:`keystone.common.manager.Manager` for more details on
how this dynamically calls the backend.
"""
def __init__(self):
self.event_callbacks = {
notifications.ACTIONS.deleted: {
'user': [self.delete_user_projects],
},
}
super(Manager, self).__init__(
'keystone.contrib.user_registration.backends.sql.Registration')
# TODO(garcianavalon) set as configuration option in keystone.conf
def delete_user_projects(self, service, resource_type, operation,
payload):
user_id = payload['resource_info']
# cloud_project_id = ''
# default_project_id = ''
# projects_to_delete = []
# for project_id in projects_to_delete:
# self.assignment_api.delete_project(project_id)
# LOG.info('Deleted project %s because user %s was deleted',
# project_id, user_id)
# delete profiles
self.driver.delete_user_profiles(user_id)
def request_password_reset(self, user_id):
""" Prepares a reset profile for the user"""
profile_ref = {
'user_id': user_id,
'expires_at': self._calculate_expiry_date(RESET_TOKEN_DURATION),
'id': uuid.uuid4().hex,
'reset_token': uuid.uuid4().hex,
}
return self.driver.create_reset_profile(profile_ref)
def register_user(self, user_ref, cloud_project_id):
""" Translates the user_ref to an activation profile."""
profile_ref = {
'user_id': user_ref['id'],
'project_id': user_ref['default_project_id'],
'cloud_project_id': cloud_project_id,
'expires_at': self._calculate_expiry_date(ACTIVATION_KEY_DURATION),
'id': uuid.uuid4().hex,
'activation_key': uuid.uuid4().hex,
}
return self.driver.create_activation_profile(profile_ref)
def _calculate_expiry_date(self, duration_in_seconds):
expire_delta = datetime.timedelta(seconds=duration_in_seconds)
return timeutils.utcnow() + expire_delta
def get_default_role(self):
""" Obtains the default role to give the user in his default organization. If
the role doesn't exists creates a new one.
"""
# NOTE(garcianavalon) mimick v2 Identity API behaviour where both
# name and id are defined in keystone.conf. But it doesn't look like the
# perfect solution, are there other better options to handle this?
if not DEFAULT_ROLE_ID:
default_role = next(role
for role in self.assignment_api.list_roles()
if role['name'] == DEFAULT_ROLE_NAME)
else:
try:
default_role = self.assignment_api.get_role(DEFAULT_ROLE_ID)
except exception.RoleNotFound:
LOG.info(("Creating the default role {0} because it does not \
exist.").format(DEFAULT_ROLE_ID))
role = {'id': DEFAULT_ROLE_ID,
'name': DEFAULT_ROLE_NAME}
default_role = self.assignment_api.create_role(DEFAULT_ROLE_ID, role)
return default_role
def new_activation_key(self, user_id):
profile_ref = self.driver.get_activation_profile(user_id)
profile_ref['expires_at'] = self._calculate_expiry_date(ACTIVATION_KEY_DURATION)
profile_ref['activation_key'] = uuid.uuid4().hex
return self.driver.store_new_activation_key(
profile_ref['id'], profile_ref)
def _assert_expired(self, ref):
current_time = timeutils.normalize_time(timeutils.utcnow())
expires = ref['expires_at']
if current_time > timeutils.normalize_time(expires):
raise exception.Forbidden()
return ref
def get_activation_profile(self, user_id, activation_key=None,
check_expired=True):
profile_ref = self.driver.get_activation_profile(user_id, activation_key)
if check_expired:
profile_ref = self._assert_expired(profile_ref)
return profile_ref
def get_reset_profile(self, user_id, reset_token,
check_expired=True):
profile_ref = self.driver.get_reset_profile(user_id, reset_token)
if check_expired:
profile_ref = self._assert_expired(profile_ref)
return profile_ref
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for drivers"""
@abc.abstractmethod
def create_activation_profile(self, activation_profile):
"""Create an activation_profile for a newly registered user
:param activation_profile: activation_profile data
:type activation_profile: dict
:returns: activation_profile
"""
raise exception.NotImplemented()
@abc.abstractmethod
def store_new_activation_key(self, profile_id, profile_ref):
"""Update key and expires_at of an activation profile
:param profile_id: profile_id
:type profile_id: string
:param profile_ref: the new data to store
:type profile_ref: dict
:returns: activation_profile
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_activation_profile(self, user_id, activation_key=None):
"""Get activation_profile details for a user, if the key is valid
:param user_id: id of user that wants to activate
:type user_id: string
:param activation_key: provided in the registration process
:type activation_key: string
:returns: activation_profile
"""
raise exception.NotImplemented()
@abc.abstractmethod
def create_reset_profile(self, reset_profile):
"""Register a user reset password request
:param reset_profile: reset_profile data
:type reset_profile: dict
:returns: reset_profile
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_reset_profile(self, user_id, reset_token):
"""Get reset_profile details, if the token is valid
:param user_id: id of user that wants to activate
:type user_id: string
:param reset_token: provided in the registration process
:type reset_token: string
:returns: reset_profile
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_user_profiles(self, user_id):
"""Delete all user profiles in the database
:param user_id: id of user that wants to activate
:type user_id: string
:returns: None
"""
raise exception.NotImplemented()
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/david/projects/luxcorerender/LuxCore/src/pyluxcoretools/pyluxcoretools/pyluxcoremenu/menuwindow.ui',
# licensing of '/home/david/projects/luxcorerender/LuxCore/src/pyluxcoretools/pyluxcoretools/pyluxcoremenu/menuwindow.ui' applies.
#
# Created: Sat Jan 18 11:52:29 2020
# by: pyside2-uic running on PySide2 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_MenuWindow(object):
def setupUi(self, MenuWindow):
MenuWindow.setObjectName("MenuWindow")
MenuWindow.resize(260, 240)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MenuWindow.sizePolicy().hasHeightForWidth())
MenuWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtWidgets.QWidget(MenuWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButtonNetNode = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonNetNode.setObjectName("pushButtonNetNode")
self.verticalLayout.addWidget(self.pushButtonNetNode)
self.pushButtonNetConsole = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonNetConsole.setObjectName("pushButtonNetConsole")
self.verticalLayout.addWidget(self.pushButtonNetConsole)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.pushButtonQuit = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonQuit.setObjectName("pushButtonQuit")
self.verticalLayout.addWidget(self.pushButtonQuit)
MenuWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MenuWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 260, 25))
self.menubar.setObjectName("menubar")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
MenuWindow.setMenuBar(self.menubar)
self.actionQuit = QtWidgets.QAction(MenuWindow)
self.actionQuit.setObjectName("actionQuit")
self.menuTools.addAction(self.actionQuit)
self.menubar.addAction(self.menuTools.menuAction())
self.retranslateUi(MenuWindow)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL("activated()"), MenuWindow.close)
QtCore.QObject.connect(self.pushButtonQuit, QtCore.SIGNAL("clicked()"), MenuWindow.close)
QtCore.QObject.connect(self.pushButtonNetNode, QtCore.SIGNAL("clicked()"), MenuWindow.clickedNetNode)
QtCore.QObject.connect(self.pushButtonNetConsole, QtCore.SIGNAL("clicked()"), MenuWindow.clickedNetConsole)
QtCore.QMetaObject.connectSlotsByName(MenuWindow)
MenuWindow.setTabOrder(self.pushButtonNetNode, self.pushButtonNetConsole)
MenuWindow.setTabOrder(self.pushButtonNetConsole, self.pushButtonQuit)
def retranslateUi(self, MenuWindow):
MenuWindow.setWindowTitle(QtWidgets.QApplication.translate("MenuWindow", "PyLuxCore Tools Menu", None, -1))
self.pushButtonNetNode.setText(QtWidgets.QApplication.translate("MenuWindow", "NetNode", None, -1))
self.pushButtonNetConsole.setText(QtWidgets.QApplication.translate("MenuWindow", "NetConsole", None, -1))
self.pushButtonQuit.setText(QtWidgets.QApplication.translate("MenuWindow", "Quit", None, -1))
self.menuTools.setTitle(QtWidgets.QApplication.translate("MenuWindow", "Tools", None, -1))
self.actionQuit.setText(QtWidgets.QApplication.translate("MenuWindow", "&Quit", None, -1))
self.actionQuit.setShortcut(QtWidgets.QApplication.translate("MenuWindow", "Ctrl+Q", None, -1))
|
python
|
from bflib.items.base import Item
from core import flags
from bfgame.factories.recipes import listing
from core.gameobject import GameObject
class ItemFactory(object):
name = "item"
type_map = Item
def __init__(self, game):
self.game = game
def create_new(self, base_item):
recipe = listing.get_recipe(base_item)
if recipe is None:
raise Exception("Found no recipes for item {}".format(base_item))
item_components = self.get_recursive_components(base_item, recipe)
new = GameObject(
game=self.game,
base=base_item,
blocking=False,
name=base_item.name
)
new.flags.add(flags.GameObjectFlags.Character)
for component in item_components:
new.register_component(component)
return new
def get_recursive_components(self, base_item, recipe, result_components=None):
if result_components is None:
result_components = []
result_components.extend(recipe.build_components(base_item, self.game))
for required_recipe in recipe.depends_on:
self.get_recursive_components(base_item, required_recipe, result_components)
return result_components
|
python
|
num = [1,2,3,4]
evens = list(filter(lambda x: x % 2 == 0, num))
print(evens)
|
python
|
from aclarknet.aclarknet.models import Client
from aclarknet.aclarknet.models import Service
from aclarknet.aclarknet.models import TeamMember
from aclarknet.aclarknet.models import Testimonial
from django.contrib import admin
class ClientAdmin(admin.ModelAdmin):
pass
class ServiceAdmin(admin.ModelAdmin):
pass
class TeamMemberAdmin(admin.ModelAdmin):
pass
class TestimonialAdmin(admin.ModelAdmin):
pass
admin.site.register(Client, ClientAdmin)
admin.site.register(Service, ServiceAdmin)
admin.site.register(TeamMember, TeamMemberAdmin)
admin.site.register(Testimonial, TestimonialAdmin)
|
python
|
import unittest
from pydantic import ValidationError
from mobile_handset_price_model.prediction.model import MobileHandsetPriceModel
from mobile_handset_price_model.prediction.schemas import MobileHandsetPriceModelInput, \
MobileHandsetPriceModelOutput, PriceEnum
class ModelTests(unittest.TestCase):
def test_model(self):
# arrange
model = MobileHandsetPriceModel()
inpt = dict(battery_power=842,
has_bluetooth=True,
clock_speed=2.2,
has_dual_sim=False,
front_camera_megapixels=1,
has_four_g=False,
internal_memory=7,
depth=0.6,
weight=188,
number_of_cores=2,
primary_camera_megapixels=2,
pixel_resolution_height=20,
pixel_resolution_width=756,
ram=2549,
screen_height=9,
screen_width=7,
talk_time=19,
has_three_g=False,
has_touch_screen=False,
has_wifi=True)
# act
inpt = MobileHandsetPriceModelInput(**inpt)
prediction = model.predict(inpt)
# assert
self.assertTrue(type(prediction) is MobileHandsetPriceModelOutput)
self.assertTrue(type(prediction.price_range) is PriceEnum)
def test_model_with_missing_optional_fields(self):
# arrange
model = MobileHandsetPriceModel()
inpt = dict(has_bluetooth=False,
has_dual_sim=False,
has_four_g=False,
has_three_g=False,
has_touch_screen=False,
has_wifi=True)
# act
inpt = MobileHandsetPriceModelInput(**inpt)
prediction = model.predict(inpt)
# assert
self.assertTrue(type(prediction) is MobileHandsetPriceModelOutput)
self.assertTrue(type(prediction.price_range) is PriceEnum)
def test_model_with_wrong_input_type(self):
# arrange
model = MobileHandsetPriceModel()
inpt = dict(battery_power=842,
clock_speed=2.2,
has_dual_sim=False,
front_camera_megapixels=1,
has_four_g=False,
internal_memory=7,
weight=188,
number_of_cores=2,
primary_camera_megapixels=2,
pixel_resolution_height=20,
pixel_resolution_width=756,
ram=2549,
screen_height=9,
screen_width=7,
talk_time=19,
has_three_g=False,
has_touch_screen=False,
has_wifi=True)
# act, assert
with self.assertRaises(ValidationError):
inpt = MobileHandsetPriceModelInput(**inpt)
prediction = model.predict(inpt)
if __name__ == '__main__':
unittest.main()
|
python
|
# stdlib
import itertools
from urllib import quote
from unittest import TestCase
import logging
import mock
import json
from utils.splunk.splunk import SplunkSavedSearch, SplunkInstanceConfig, SavedSearches
from utils.splunk.splunk_helper import SplunkHelper
class FakeInstanceConfig(object):
def __init__(self):
self.base_url = 'http://testhost:8089'
self.default_request_timeout_seconds = 10
self.verify_ssl_certificate = False
def get_auth_tuple(self):
return ('username', 'password')
class FakeResponse(object):
def __init__(self, text, status_code=200, headers={}):
self.status_code = status_code
self.payload = text
self.headers = headers
def json(self):
return json.loads(self.payload)
def raise_for_status(self):
return
class TestUtilsSplunk(TestCase):
@mock.patch('utils.splunk.splunk_helper.SplunkHelper._do_post',
return_value=FakeResponse("""{ "sessionKey": "MySessionKeyForThisSession" }""", headers={}))
def test_auth_session_fallback(self, mocked_do_post):
"""
Test request authentication on fallback Authentication header
retrieve auth session key,
set it to the requests session,
and see whether the outgoing request contains the expected HTTP header
The expected HTTP header is Authentication when Set-Cookie is not present
"""
helper = SplunkHelper(FakeInstanceConfig())
helper.auth_session()
mocked_do_post.assert_called_with("/services/auth/login?output_mode=json",
"username=username&password=password&cookie=1", 10)
mocked_do_post.assert_called_once()
expected_header = helper.requests_session.headers.get("Authentication")
self.assertEqual(expected_header, "Splunk MySessionKeyForThisSession")
def test_splunk_helper(self):
instance_config = SplunkInstanceConfig({
'url': 'dummy',
'authentication': {
'basic_auth': {
'username': "admin",
'password': "admin"
}
}
}, {}, {
'default_request_timeout_seconds': 5,
'default_search_max_retry_count': 3,
'default_search_seconds_between_retries': 1,
'default_verify_ssl_certificate': False,
'default_batch_size': 1000,
'default_saved_searches_parallel': 3,
'default_unique_key_fields': ["_bkt", "_cd"],
'default_app': 'default',
'default_parameters': {}
})
splunk_helper = SplunkHelper(instance_config)
saved_search = SplunkSavedSearch(instance_config, {"name": "search", "parameters": {}})
search_offsets = []
def _mocked_search_chunk(*args, **kwargs):
search_offsets.append(args[2])
if args[2] == 4000:
return {"messages": [], "results": []}
else:
return {"messages": [], "results": list(itertools.repeat(None, 1000))}
setattr(splunk_helper, "_search_chunk", _mocked_search_chunk)
res = splunk_helper.saved_search_results("id", saved_search)
self.assertEquals(len(res), 5)
self.assertEquals(search_offsets, [0, 1000, 2000, 3000, 4000])
def test_splunk_saved_searches(self):
instance_config = SplunkInstanceConfig({
'url': 'dummy',
'authentication': {
'basic_auth': {
'username': "admin",
'password': "admin"
}
}
}, {}, {
'default_request_timeout_seconds': 5,
'default_search_max_retry_count': 3,
'default_search_seconds_between_retries': 1,
'default_verify_ssl_certificate': False,
'default_batch_size': 1000,
'default_saved_searches_parallel': 3,
'default_unique_key_fields': ["_bkt", "_cd"],
'default_app': 'default',
'default_parameters': {}
})
splunk_helper = SplunkHelper(instance_config)
def _mocked_do_get(*args, **kwargs):
class MockedResponse():
def json(self):
return {
"entry": [
{
"name": "components"
},
{
"name": "relations"
}
]
}
return MockedResponse()
setattr(splunk_helper, "_do_get", _mocked_do_get)
res = splunk_helper.saved_searches()
self.assertEquals(res, ["components", "relations"])
def test_splunk_dispatch(self):
username = "admin"
appname = "myapp"
instance_config = SplunkInstanceConfig({
'url': 'dummy',
'authentication': {
'basic_auth': {
'username': "admin",
'password': "admin"
}
}
}, {}, {
'default_request_timeout_seconds': 5,
'default_search_max_retry_count': 3,
'default_search_seconds_between_retries': 1,
'default_verify_ssl_certificate': False,
'default_batch_size': 1000,
'default_saved_searches_parallel': 3,
'default_unique_key_fields': ["_bkt", "_cd"],
'default_app': 'default',
'default_parameters': {}
})
splunk_helper = SplunkHelper(instance_config)
saved_search = SplunkSavedSearch(instance_config, {"name": "search", "parameters": {}})
params = {"key1": "val1", "key2": "val2"}
def _mocked_do_post(*args, **kwargs):
self.assertEquals(args,
('/servicesNS/%s/%s/saved/searches/%s/dispatch' % (username, appname, quote(saved_search.name)),
params,
5,
'true'
))
class MockedResponse():
def json(self):
return {"sid": "zesid"}
return MockedResponse()
setattr(splunk_helper, "_do_post", _mocked_do_post)
res = splunk_helper.dispatch(saved_search, username, appname, 'true', params)
self.assertEquals(res, "zesid")
class TestSavedSearches(TestCase):
def test_saved_searches(self):
log = logging.getLogger('%s.%s' % (__name__, "SavedSearches"))
instance_config = SplunkInstanceConfig({
'url': 'dummy',
'authentication': {
'basic_auth': {
'username': "admin",
'password': "admin"
}
}
}, {}, {
'default_request_timeout_seconds': 5,
'default_search_max_retry_count': 3,
'default_search_seconds_between_retries': 1,
'default_verify_ssl_certificate': False,
'default_batch_size': 1000,
'default_saved_searches_parallel': 3,
'default_unique_key_fields': ["_bkt", "_cd"],
'default_app': 'default',
'default_parameters': {}
})
saved_search_components = SplunkSavedSearch(instance_config, {"name": "components", "parameters": {}})
saved_search_match = SplunkSavedSearch(instance_config, {"match": "comp.*", "parameters": {}})
saved_searches = SavedSearches([saved_search_components, saved_search_match])
# Base configuration includes the exactly specified search
saved_searches.update_searches(log, [])
self.assertEquals([s.name for s in saved_searches.searches], ["components"])
# This should not change anything
saved_searches.update_searches(log, ["components"])
self.assertEquals([s.name for s in saved_searches.searches], ["components"])
# Adding two component-like searches
saved_searches.update_searches(log, ["comps1", "comps2", "blaat", "nocomp"])
self.assertEquals(set([s.name for s in saved_searches.searches]), set(["components", "comps1", "comps2"]))
# And remove again
saved_searches.update_searches(log, [])
self.assertEquals([s.name for s in saved_searches.searches], ["components"])
|
python
|
# -*- coding: utf-8 -*-
import logging
import os
from logging.handlers import RotatingFileHandler
from platform import uname
from plexapi.config import PlexConfig, reset_base_headers
from plexapi.utils import SecretsFilter
from uuid import getnode
# Load User Defined Config
DEFAULT_CONFIG_PATH = os.path.expanduser('~/.config/plexapi/config.ini')
CONFIG_PATH = os.environ.get('PLEXAPI_CONFIG_PATH', DEFAULT_CONFIG_PATH)
CONFIG = PlexConfig(CONFIG_PATH)
# PlexAPI Settings
PROJECT = 'PlexAPI'
VERSION = '3.4.0'
TIMEOUT = CONFIG.get('plexapi.timeout', 30, int)
X_PLEX_CONTAINER_SIZE = CONFIG.get('plexapi.container_size', 100, int)
X_PLEX_ENABLE_FAST_CONNECT = CONFIG.get('plexapi.enable_fast_connect', False, bool)
# Plex Header Configuation
X_PLEX_PROVIDES = CONFIG.get('header.provides', 'controller')
X_PLEX_PLATFORM = CONFIG.get('header.platform', CONFIG.get('header.platorm', uname()[0]))
X_PLEX_PLATFORM_VERSION = CONFIG.get('header.platform_version', uname()[2])
X_PLEX_PRODUCT = CONFIG.get('header.product', PROJECT)
X_PLEX_VERSION = CONFIG.get('header.version', VERSION)
X_PLEX_DEVICE = CONFIG.get('header.device', X_PLEX_PLATFORM)
X_PLEX_DEVICE_NAME = CONFIG.get('header.device_name', uname()[1])
X_PLEX_IDENTIFIER = CONFIG.get('header.identifier', str(hex(getnode())))
BASE_HEADERS = reset_base_headers()
# Logging Configuration
log = logging.getLogger('plexapi')
logfile = CONFIG.get('log.path')
logformat = CONFIG.get('log.format', '%(asctime)s %(module)12s:%(lineno)-4s %(levelname)-9s %(message)s')
loglevel = CONFIG.get('log.level', 'INFO').upper()
loghandler = logging.NullHandler()
if logfile: # pragma: no cover
logbackups = CONFIG.get('log.backup_count', 3, int)
logbytes = CONFIG.get('log.rotate_bytes', 512000, int)
loghandler = RotatingFileHandler(os.path.expanduser(logfile), 'a', logbytes, logbackups)
loghandler.setFormatter(logging.Formatter(logformat))
log.addHandler(loghandler)
log.setLevel(loglevel)
logfilter = SecretsFilter()
if CONFIG.get('log.show_secrets', '').lower() != 'true':
log.addFilter(logfilter)
|
python
|
import sys
from cx_Freeze import setup, Executable
product_name = "Triangulation"
exe = Executable(
script = "main.py",
base = "Win32GUI",
targetName = "Triangulation.exe",
icon = "icon.ico",
shortcutName = "Triangulation",
shortcutDir = "ProgramMenuFolder"
)
build_exe_options = {"packages": ["os"],
"excludes": ["tkinter"],
"include_files": ["icon.ico"]
}
bdist_msi_options = {"upgrade_code": "{66620F3A-DC3A-11E2-B341-002219E9B01E}",
"add_to_path": False,
"initial_target_dir": r"[ProgramFilesFolder]\%s" % (product_name),
}
setup(
version = "0.3",
description = "ear-clipping triangulation of flat polygons",
author = "Yannick Augenstein",
author_email = "[email protected]",
name = "Triangulation",
executables = [exe],
options = {"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options
}
)
|
python
|
import sys
sys.path.append('/home/george2/Raise/ProgramRepair/CodeSeer/projects/src/main/python')
from CodeJam.Y12R5P1.kelvinlau.A import *
def func_87caa8d11eaf4a20affa21f893f4cbfc(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
return l
def func_38404980cd9d4ebb8816600eb75132ef(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
return n
def func_d3dc1fc499a041b391d2a2cc0ecf55e0(infile):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
return p
def func_39c4fa9e21a84e8fa4eca4bb2cbc7225(infile):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
return l
def func_0055740904e64ebb93df874b64561c18(infile, n, l):
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return p
def func_44588f283db54e21808ace48d3c6fdcf(infile, n, l):
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return a
def func_b4578cbec34e4bd89610ca2a0828e518(p, n, l):
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return a
def func_c7ccf9ef618a45aaa76254dfc1a4236d(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
return l
def func_56dd741514d243218012eb1f399fa905(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
return n
def func_bd414dc83fc34c47a79d4495495cb9bc(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
return p
def func_a716090739ee414cb65c9a8106bad135(infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return l
def func_39cb2abe0f5943bea3d2188aa6c15123(infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return a
def func_82b03b65077c4e8c95f7f18001ee870f(infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return p
def func_758d479d2b3a48aab8cb63753761f251(infile, n, l):
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return a
def func_9e10b2268e564edca0fde13e06f16502(infile, n, l):
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return p
def func_f88948e2669342e48023ad734c48e399(t, p, n, l):
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return a
def func_290e798ac153494eaa51021d47671d47(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return p
def func_fb4a8190496141dfb083575b22c08248(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return l
def func_33dce28bb84a4a539b5a552605416c29(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return a
def func_569e73dd57434c99b47d206e753f31b2(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
return n
def func_be975da12a944ad7bdc27c11281086f4(infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return p
def func_ef0c7c81ca7842a9b643aa1ea69c58e4(infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return l
def func_7c5ae7337de646708a42eca115ddf5a8(infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return a
def func_0e9cdfaef47d4d07a46e498035e9c8ef(t, infile, n, l):
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return a
def func_3c9ceee25e5146f0b78ee81200ffb56d(t, infile, n, l):
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return p
def func_a992aae3335a488faed41e16e3e93ab7(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return p
def func_055876873de447cea03c195e86751e0a(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return n
def func_cfc24351e6ba48bcb4ecadbe78405af7(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return l
def func_99d4d561cb014cb08073fc6592d69426(infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])
return a
def func_4de152c6e12f4beeae295148f5044599(t, infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return l
def func_9927fd04156445129845c7869ff07cc0(t, infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return p
def func_7e490d6d30094ba4a33de6d4076c39c0(t, infile, n):
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return a
def func_458cf0175eb34db5ba4749c6cea76f11(t, infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return p
def func_4ca607214add474fb869c2191368410a(t, infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return n
def func_17e1dd7305144bc28dfa26439d330a7a(t, infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return l
def func_ec89833a814a487db2a053d16f0675dd(t, infile):
n = int(infile.readline())
l = map(int, infile.readline().split())
p = map(int, infile.readline().split())
a = zip(l, p, range(n))
a.sort(lambda x, y: x[0] * y[1] - x[1] * y[0])('Case #%d:' % t)
return a
def func_ab9d841a56444f9c90abeabf68d8d87d():
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return infile
def func_42d1c60928ca4720ba0d100c4bfa5f2b():
infile = open('codejam/test_files/Y12R5P1/A.in')
T = int(infile.readline())
return T
|
python
|
# -*- coding: utf-8 -*-
from app.api import HTTPStatus, Resource, api
from app.auth import auth
@api.response(code=HTTPStatus.UNAUTHORIZED.value, description="Unauthorized access")
class BaseController(Resource):
method_decorators = [auth.login_required]
|
python
|
# https://www.youtube.com/watch?v=XNKeayZW4dY
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
from tensorflow import keras
from tensorflow.keras import layers
print('You have TensorFlow version {}'.format(tf.__version__))
wine_data_path = os.path.abspath('{}/../dataset/wine_data.csv'.format(os.path.dirname(os.path.abspath(__file__))))
print(wine_data_path)
# Load the dataset
data = pd.read_csv(wine_data_path)
# Print 5 rows
data.head()
# remove country not provided
data = data[pd.notnull(data['country'])]
# remove price not provided
data = data[pd.notnull(data['price'])]
# remove first column
data = data.drop(data.columns[0], axis=1)
# Any data that occurs less than 500 times will not be considered
variety_threshold = 500
value_counts = data['variety'].value_counts()
to_remove = value_counts[value_counts <= variety_threshold].index
data.replace(to_remove, np.nan, inplace=True)
data = data[pd.notnull(data['variety'])]
# Get train data and test data
train_size = int(len(data) * 0.8)
print('Train size:{}'.format(train_size))
print('Test size: {}'.format(len(data) - train_size))
# Train features
description_train = data['description'][:train_size]
variety_train = data['variety'][:train_size]
# Train labels
labels_train = data['price'][:train_size]
# Test features
description_test = data['description'][train_size:]
variety_test = data['variety'][train_size:]
# Test labels
labels_test = data['price'][train_size:]
# Create a tokenizer to preprocess our text descriptions
vocab_size = 3000 # This is a hyperparameter, experiment with different values for your dataset
tokenize = tf.keras.preprocessing.text.Tokenizer(num_words=vocab_size, char_level=False)
tokenize.fit_on_texts(description_train) # only fit on train
# Wide feature 1: sparse bag of words (bow) vocab_size vector
description_bow_train = tokenize.texts_to_matrix(description_train)
description_bow_test = tokenize.texts_to_matrix(description_test)
# Wide feature 2: one-hot vector of variety categories
# Use sklearn utility to convert label string to numbered index
encoder = LabelEncoder()
encoder.fit(variety_train)
variety_train = encoder.transform(variety_train)
variety_test = encoder.transform(variety_test)
num_classes = np.max(variety_train) + 1
# Convert labels to one hot
variety_train = tf.keras.utils.to_categorical(variety_train, num_classes)
variety_test = tf.keras.utils.to_categorical(variety_test, num_classes)
# Define the model with Functional API
bow_inputs = layers.Input(shape=(vocab_size,))
variety_inputs = layers.Input(shape=(num_classes,))
merged_layers = layers.concatenate([bow_inputs, variety_inputs])
merged_layers = layers.Dense(256, activation='relu')(merged_layers)
predictions = layers.Dense(1)(merged_layers)
wide_model = tf.keras.Model(inputs=[bow_inputs, variety_inputs], outputs=predictions)
wide_model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
print(wide_model.summary())
# Deep model feature: word embeddings of wine descriptions
train_embed = tokenize.texts_to_sequences(description_train)
test_embed = tokenize.texts_to_sequences(description_test)
max_seq_length = 170
train_embed = tf.keras.preprocessing.sequence.pad_sequences(
train_embed, maxlen=max_seq_length, padding='post'
)
test_embed = tf.keras.preprocessing.sequence.pad_sequences(
test_embed, maxlen=max_seq_length, padding='post'
)
# Define our deep model with the Functional API
deep_inputs = layers.Input(shape=(max_seq_length,))
embedding = layers.Embedding(vocab_size, 8, input_length=max_seq_length)(deep_inputs)
embedding = layers.Flatten()(embedding)
embed_out = layers.Dense(1)(embedding)
deep_model = tf.keras.Model(inputs=deep_inputs, outputs=embed_out)
print(deep_model.summary())
deep_model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
# Combine wide and deep model into one model
merged_out = layers.concatenate([wide_model.output, deep_model.output])
merged_out = layers.Dense(1)(merged_out)
combined_model = tf.keras.Model(wide_model.input + [deep_model.input], merged_out)
print(combined_model.summary())
combined_model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
# Run training
combined_model.fit([description_bow_train, variety_train] + [train_embed], np.asarray(labels_train), epochs=10, batch_size=128)
combined_model.evaluate([description_bow_test, variety_test] + [test_embed], np.asarray(labels_test), batch_size=128)
# Generate predictions
predictions = combined_model.predict([description_bow_test, variety_test] + [test_embed])
# Compare predictions with actual values for the first few items in our test dataset
num_predictions = 40
diff = 0
for i in range(num_predictions):
val = predictions[i]
print(description_test.iloc[i])
print('Predicted: {} - Actual: {}'.format(val[0], labels_test.iloc[i]))
diff += abs(val[0] - labels_test.iloc[i])
print('Average prediction difference: {}'.format(diff / num_predictions))
|
python
|
"""
Kaggle Challenge:
Modded version from
"http://www.kaggle.com/c/acquire-valued-shoppers-challenge/"
'Reduce the data and generate features' by Triskelion
"""
from datetime import datetime, date
from collections import defaultdict
data_dir = "../data/"
loc_offers = data_dir + "offers.csv"
loc_transactions = data_dir + "transactions.csv"
loc_train = data_dir + "trainHistory.csv"
loc_test = data_dir + "testHistory.csv"
# will be created
loc_reduced = data_dir + "reduced.csv"
loc_out_train = data_dir + "train.vw"
loc_out_test = data_dir + "test.vw"
loc_stats = data_dir + "eda_stats.csv"
###
def eda_stats(loc_transactions, loc_stats):
start = datetime.now()
from collections import Counter
import math
#get all categories and comps on offer in a dict
idd = {}
chain = {}
dept = {}
category = {}
company = {}
brand = {}
date = {}
productsize = Counter()
productmeasure = Counter()
purchasequantity = Counter()
purchaseamount = Counter()
cat_co_brand = Counter()
#open output file
o = open(loc_stats, "wb")
#go through transactions file
for e, line in enumerate( open(loc_transactions) ):
if e != 0:
l = line.split(",")
idd[l[0]] = 1
chain[l[1]] = 1
dept[l[2]] = 1
category[l[3]] = 1
company[l[4]] = 1
brand[l[5]] = 1
date[l[6]] = 1
productsize [math.trunc( float(l[7]))] += 1;
productmeasure[l[8]] += 1;
purchasequantity[ int(l[9])] += 1
purchaseamount[math.trunc( float(l[10]))] += 1
cat_co_brand_val = str(l[3]) +'.'+ str(l[4]) +'.'+ str(l[5])
cat_co_brand[cat_co_brand_val] += 1
#progress
if e % 5000000 == 0:
print e, datetime.now() - start
o.write("unque users: "+ str(len(idd))+'\n')
o.write("chains: " + str(len(chain))+'\n')
o.write("dept's: " + str(len(dept))+'\n')
o.write("categories: "+ str(len(category))+'\n')
o.write("companies: "+ str(len(company))+'\n')
o.write("brands: "+ str(len(brand))+'\n')
o.write("dates: "+ str(len(date))+'\n')
o.write("product sizes: "+str(len(productsize))+'\n')
o.write("product measures: "+ str(len(productmeasure))+'\n')
o.write("purchase quantities: "+ str(len(purchasequantity))+'\n')
o.write("puchase amounts: "+ str(len(purchaseamount))+'\n')
o.write("unique products: "+ str(len(cat_co_brand))+'\n')
o.write("# of transactions: "+ str(e) +'\n')
o.close()
o = open('productsize.json', "wb")
o.write(str(dict(productsize )))
o.close()
o = open('productmesaure.json', "wb")
o.write(str(dict(productmeasure)))
o.close()
o = open('purchasequantity.json', "wb")
o.write(str(dict(purchasequantity)))
o.close()
o = open('purchaseamount.json', "wb")
o.write(str(dict(purchaseamount)))
o.close()
o = open('cat_co_brand.json', "wb")
o.write(str(dict(cat_co_brand)))
o.close()
print e, datetime.now() - start, "done!"
def reduce_data(loc_offers, loc_transactions, loc_reduced):
start = datetime.now()
#get all categories and comps on offer in a dict
offers_cat = {}
offers_co = {}
offers_brand = {}
for e, line in enumerate( open(loc_offers) ):
offers_cat[ line.split(",")[1] ] = 1
offers_co[ line.split(",")[3] ] = 1
offers_brand[ line.split(",")[6] ] = 1
#open output file
with open(loc_reduced, "wb") as outfile:
#go through transactions file and reduce
reduced = 0
for e, line in enumerate( open(loc_transactions) ):
if e != 0:
#only write when if category in offers dict
if line.split(",")[3] in offers_cat or line.split(",")[4] in offers_co:
outfile.write( line )
reduced += 1
else:
outfile.write( line ) #print header
#progress
if e % 5000000 == 0:
print e, reduced, datetime.now() - start
print e, reduced, datetime.now() - start
def diff_days(s1,s2):
date_format = "%Y-%m-%d"
a = datetime.strptime(s1, date_format)
b = datetime.strptime(s2, date_format)
delta = b - a
return delta.days
def generate_features(loc_train, loc_test, loc_transactions, loc_out_train, loc_out_test):
#keep a dictionary with the offerdata
offers = {}
for e, line in enumerate( open(loc_offers) ):
row = line.strip().split(",")
offers[ row[0] ] = row
#keep two dictionaries with the shopper id's from test and train
train_ids = {}
test_ids = {}
for e, line in enumerate( open(loc_train) ):
if e > 0:
row = line.strip().split(",")
train_ids[row[0]] = row
for e, line in enumerate( open(loc_test) ):
if e > 0:
row = line.strip().split(",")
test_ids[row[0]] = row
#open two output files
with open(loc_out_train, "wb") as out_train, open(loc_out_test, "wb") as out_test:
#iterate through reduced dataset
last_id = 0
features = defaultdict(float)
for e, line in enumerate( open(loc_transactions) ):
if e > 0: #skip header
#poor man's csv reader
row = line.strip().split(",")
#write away the features when we get to a new shopper id
if last_id != row[0] and e != 1:
#generate negative features
if "has_bought_company" not in features:
features['never_bought_company'] = 1
if "has_bought_category" not in features:
features['never_bought_category'] = 1
if "has_bought_brand" not in features:
features['never_bought_brand'] = 1
if "has_bought_brand" in features and "has_bought_category" in features and "has_bought_company" in features:
features['has_bought_brand_company_category'] = 1
if "has_bought_brand" in features and "has_bought_category" in features:
features['has_bought_brand_category'] = 1
if "has_bought_brand" in features and "has_bought_company" in features:
features['has_bought_brand_company'] = 1
outline = ""
test = False
for k, v in features.items():
if k == "label" and v == 0.5:
#test
outline = "1 '" + last_id + " |f" + outline
test = True
elif k == "label":
outline = str(v) + " '" + last_id + " |f" + outline
else:
outline += " " + k+":"+str(v)
outline += "\n"
if test:
out_test.write( outline )
else:
out_train.write( outline )
#print "Writing features or storing them in an array"
#reset features
features = defaultdict(float)
#generate features from transaction record
#check if we have a test sample or train sample
if row[0] in train_ids or row[0] in test_ids:
#generate label and history
if row[0] in train_ids:
history = train_ids[row[0]]
if train_ids[row[0]][5] == "t":
features['label'] = 1
else:
features['label'] = 0
else:
history = test_ids[row[0]]
features['label'] = 0.5
#print "label", label
#print "trainhistory", train_ids[row[0]]
#print "transaction", row
#print "offers", offers[ train_ids[row[0]][2] ]
#print
features['offer_value'] = offers[ history[2] ][4]
features['offer_quantity'] = offers[ history[2] ][2]
offervalue = offers[ history[2] ][4]
features['total_spend'] += float( row[10] )
if offers[ history[2] ][3] == row[4]:
features['has_bought_company'] += 1.0
features['has_bought_company_q'] += float( row[9] )
features['has_bought_company_a'] += float( row[10] )
date_diff_days = diff_days(row[6],history[-1])
if date_diff_days < 30:
features['has_bought_company_30'] += 1.0
features['has_bought_company_q_30'] += float( row[9] )
features['has_bought_company_a_30'] += float( row[10] )
if date_diff_days < 60:
features['has_bought_company_60'] += 1.0
features['has_bought_company_q_60'] += float( row[9] )
features['has_bought_company_a_60'] += float( row[10] )
if date_diff_days < 90:
features['has_bought_company_90'] += 1.0
features['has_bought_company_q_90'] += float( row[9] )
features['has_bought_company_a_90'] += float( row[10] )
if date_diff_days < 180:
features['has_bought_company_180'] += 1.0
features['has_bought_company_q_180'] += float( row[9] )
features['has_bought_company_a_180'] += float( row[10] )
if offers[ history[2] ][1] == row[3]:
features['has_bought_category'] += 1.0
features['has_bought_category_q'] += float( row[9] )
features['has_bought_category_a'] += float( row[10] )
date_diff_days = diff_days(row[6],history[-1])
if date_diff_days < 30:
features['has_bought_category_30'] += 1.0
features['has_bought_category_q_30'] += float( row[9] )
features['has_bought_category_a_30'] += float( row[10] )
if date_diff_days < 60:
features['has_bought_category_60'] += 1.0
features['has_bought_category_q_60'] += float( row[9] )
features['has_bought_category_a_60'] += float( row[10] )
if date_diff_days < 90:
features['has_bought_category_90'] += 1.0
features['has_bought_category_q_90'] += float( row[9] )
features['has_bought_category_a_90'] += float( row[10] )
if date_diff_days < 180:
features['has_bought_category_180'] += 1.0
features['has_bought_category_q_180'] += float( row[9] )
features['has_bought_category_a_180'] += float( row[10] )
if offers[ history[2] ][5] == row[5]:
features['has_bought_brand'] += 1.0
features['has_bought_brand_q'] += float( row[9] )
features['has_bought_brand_a'] += float( row[10] )
date_diff_days = diff_days(row[6],history[-1])
if date_diff_days < 30:
features['has_bought_brand_30'] += 1.0
features['has_bought_brand_q_30'] += float( row[9] )
features['has_bought_brand_a_30'] += float( row[10] )
if date_diff_days < 60:
features['has_bought_brand_60'] += 1.0
features['has_bought_brand_q_60'] += float( row[9] )
features['has_bought_brand_a_60'] += float( row[10] )
if date_diff_days < 90:
features['has_bought_brand_90'] += 1.0
features['has_bought_brand_q_90'] += float( row[9] )
features['has_bought_brand_a_90'] += float( row[10] )
if date_diff_days < 180:
features['has_bought_brand_180'] += 1.0
features['has_bought_brand_q_180'] += float( row[9] )
features['has_bought_brand_a_180'] += float( row[10] )
last_id = row[0]
if e % 100000 == 0:
print e
#generate_features(loc_train, loc_test, loc_transactions, loc_out_train, loc_out_test)
if __name__ == '__main__':
# reduce_data(loc_offers, loc_transactions, loc_reduced)
# generate_features(loc_train, loc_test, loc_reduced, loc_out_train, loc_out_test)
eda_stats(loc_transactions, loc_stats)
|
python
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import generics
from rest_framework import status, mixins, viewsets
from rest_framework.decorators import api_view
from rest_framework.decorators import action
#Serializers
from cride.maps.serializers import StopModelSerializer
#models
from cride.maps.models import Stop, Busroute
class StopsViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
APIView):
"""Request view set"""
serializer_class = StopModelSerializer
def get_queryset(self):
busroute = self.request.query_params.get("busroute")
queryset = Stop.objects.filter(
busroute__id = busroute,
)
return queryset
@api_view(["POST"])
def post_stop(request):
value = request.data["busroute"]
busroute = Busroute.objects.get(id = value)
response = Stop.objects.create(
lng = request.data["lng"],
phone =request.data["phone"],
email = request.data["email"],
contact = request.data["contact"],
busroute = busroute,
lat = request.data["lat"],
client = request.data["client"],
comments = request.data["comments"],
arrived_at = request.data["arrived_at"],
finished_at = request.data["finished_at"]
)
data = {"stop": StopModelSerializer(response).data}
return Response(data)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import VERSION as DJANGO_VERSION
from django.db import migrations
def add_image_permissions_to_admin_groups(apps, schema_editor):
ContentType = apps.get_model('contenttypes.ContentType')
Permission = apps.get_model('auth.Permission')
Group = apps.get_model('auth.Group')
# Get image permissions
image_content_type, _created = ContentType.objects.get_or_create(
model='image',
app_label='tuiuiuimages',
defaults={'name': 'image'} if DJANGO_VERSION < (1, 8) else {}
)
add_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename='add_image',
defaults={'name': 'Can add image'}
)
change_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename='change_image',
defaults={'name': 'Can change image'}
)
delete_image_permission, _created = Permission.objects.get_or_create(
content_type=image_content_type,
codename='delete_image',
defaults={'name': 'Can delete image'}
)
# Assign it to Editors and Moderators groups
for group in Group.objects.filter(name__in=['Editors', 'Moderators']):
group.permissions.add(add_image_permission, change_image_permission, delete_image_permission)
def remove_image_permissions(apps, schema_editor):
"""Reverse the above additions of permissions."""
ContentType = apps.get_model('contenttypes.ContentType')
Permission = apps.get_model('auth.Permission')
image_content_type = ContentType.objects.get(
model='image',
app_label='tuiuiuimages',
)
# This cascades to Group
Permission.objects.filter(
content_type=image_content_type,
codename__in=('add_image', 'change_image', 'delete_image')
).delete()
class Migration(migrations.Migration):
dependencies = [
('tuiuiuimages', '0001_initial'),
# Need to run tuiuiucores initial data migration to make sure the groups are created
('tuiuiucore', '0002_initial_data'),
]
operations = [
migrations.RunPython(add_image_permissions_to_admin_groups, remove_image_permissions),
]
|
python
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Curious numbers in Z3
#
# From Martin Henz' collection of puzzles
# http://www.comp.nus.edu.sg/~henz/projects/puzzles/arith/#curious
# """
# Curious Numbers from "Amusements in Mathematics, Dudeney", number 114.
#
# The number 48 has this peculiarity, that if you add 1 to it the result
# is a square number, and if you add 1 to its half, you also get a
# square number. Now, there is no limit to the numbers that have this
# peculiarity, and it is an interesting puzzle to find three more of
# them---the smallest possible numbers. What are they?
# """
#
# This Z3 model was written by Hakan Kjellerstrand ([email protected])
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 6
# variables
up = 4000000; # upper range limit
arr = makeIntVector(sol,"arr",n,1,up)
[X,A,B,C,D,E] = arr
# constraints
sol.add(X + 1 == A) # if you add 1 to it
sol.add(A == B * B) # the result is a square number
sol.add(X == 2 * C) # if you to its half
sol.add(C + 1 == D) # add 1
sol.add(D == E * E) # you also get a square number
num_solutions = 0
cur_nums = []
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
# print("arr:", [mod.eval(arr[i]) for i in range(n)])
cur_nums.append(mod.eval(arr[0]).as_long())
getDifferentSolution(sol,mod,arr)
print("num_solutions:", num_solutions)
cur_nums.sort()
print("cur_nums:", cur_nums)
|
python
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class UserTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.create(identity="identity", x_twilio_webhook_enabled="true")
values = {'Identity': "identity", }
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'post',
'https://conversations.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users',
headers=headers,
))
self.holodeck.assert_has_request(Request(
'post',
'https://conversations.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "admin",
"friendly_name": "name",
"attributes": "{ \\"duty\\": \\"tech\\" }",
"is_online": true,
"is_notifiable": null,
"date_created": "2019-12-16T22:18:37Z",
"date_updated": "2019-12-16T22:18:38Z",
"url": "https://conversations.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.create(identity="identity")
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'post',
'https://conversations.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users/USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
headers=headers,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "admin",
"friendly_name": "new name",
"attributes": "{ \\"duty\\": \\"tech\\", \\"team\\": \\"internals\\" }",
"is_online": true,
"is_notifiable": null,
"date_created": "2019-12-16T22:18:37Z",
"date_updated": "2019-12-16T22:18:38Z",
"url": "https://conversations.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete(x_twilio_webhook_enabled="true")
headers = {'X-Twilio-Webhook-Enabled': "true", }
self.holodeck.assert_has_request(Request(
'delete',
'https://conversations.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users/USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
headers=headers,
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://conversations.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users/USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "admin",
"friendly_name": "name",
"attributes": "{ \\"duty\\": \\"tech\\" }",
"is_online": true,
"is_notifiable": null,
"date_created": "2019-12-16T22:18:37Z",
"date_updated": "2019-12-16T22:18:38Z",
"url": "https://conversations.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users("USXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.list()
self.holodeck.assert_has_request(Request(
'get',
'https://conversations.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Users',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://conversations.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://conversations.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users?PageSize=50&Page=0",
"next_page_url": null,
"key": "users"
},
"users": [
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "admin",
"friendly_name": "name",
"attributes": "{ \\"duty\\": \\"tech\\" }",
"is_online": true,
"is_notifiable": null,
"date_created": "2019-12-16T22:18:37Z",
"date_updated": "2019-12-16T22:18:38Z",
"url": "https://conversations.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
{
"sid": "USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"chat_service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "agent0034",
"friendly_name": "John from customs",
"attributes": "{ \\"duty\\": \\"agent\\" }",
"is_online": false,
"is_notifiable": null,
"date_created": "2020-03-24T20:38:21Z",
"date_updated": "2020-03-24T20:38:21Z",
"url": "https://conversations.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Users/USaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.conversations.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.users.list()
self.assertIsNotNone(actual)
|
python
|
#Curso #7 Templates tags
from flask import Flask
from flask import render_template
app = Flask(__name__)
#app = Flask(__name__, template_folder = 'prueba_template')
#para darle una carpeta en especifico
@app.route('/')
def index():
name = 'Sergio'
return render_template('index.html', nombre = name)
@app.route('/client')
def client():
list_nombres = ['test1', 'test2', 'test3', 'test4']
return render_template('client.html', list = list_nombres)
if __name__ == '__main__':
app.run(debug = True, host="127.0.0.1", port=9566)
|
python
|
from output.models.ms_data.wildcards.wild_h003_xsd.wild_h003 import (
Bar,
Foo,
)
__all__ = [
"Bar",
"Foo",
]
|
python
|
import torch.nn as nn
import functools
import torch.nn.functional as F
import torch
from core.dataset.architecture.mobilespace import *
class MBSpaceController(nn.Module):
def __init__(self, n_conditions=1, n_unit=N_UNITS,
depths=DEPTHS, kernel_sizes=KERNEL_SIZES, expand_ratios=EXPAND_RATIOS,
hidden_size=64, batch_size=1, device="cpu"):
super(MBSpaceController, self).__init__()
self.n_unit = n_unit
self.depths = depths
self.expand_ratios = expand_ratios
self.kernel_sizes = kernel_sizes
self.hidden_size = hidden_size
self.condition_embedding = nn.Embedding(n_conditions, self.hidden_size)
self.depth_embedding = nn.Embedding(len(self.depths), self.hidden_size)
self.ratio_embedding = nn.Embedding(len(self.expand_ratios), self.hidden_size)
self.ks_embedding = nn.Embedding(len(self.kernel_sizes), self.hidden_size)
self.lstm = nn.LSTMCell(self.hidden_size, self.hidden_size)
self.depth_linear = nn.Linear(self.hidden_size, len(self.depths))
self.width_linear = nn.Linear(self.hidden_size, len(self.expand_ratios))
self.ks_linear = nn.Linear(self.hidden_size, len(self.kernel_sizes))
self.batch_size = batch_size
self.device = device
self.reset_parameters()
def reset_parameters(self, init_range=0.1):
for param in self.parameters():
param.data.uniform_(-init_range, init_range)
@functools.lru_cache(maxsize=128)
def _zeros(self, batch_size):
return torch.zeros((batch_size, self.hidden_size), device=self.device, requires_grad=False)
def _impl(self, probs):
m = torch.distributions.Categorical(probs=probs)
action = m.sample().view(-1)
select_log_p = m.log_prob(action)
entropy = m.entropy()
return action, select_log_p, entropy
def forward(self, condition=None, force_uniform=False):
log_ps = []
entrpys = []
if condition is None:
inputs = self._zeros(self.batch_size)
else:
inputs = self.condition_embedding(condition)
hidden = self._zeros(self.batch_size), self._zeros(self.batch_size)
embed = inputs
depths = []
ks = []
ratios = []
for unit in range(self.n_unit):
# depth
if force_uniform:
logits = torch.zeros(len(self.depths))
else:
hx, cx = self.lstm(embed, hidden)
hidden = (hx, cx)
logits = self.depth_linear(hx)
probs = F.softmax(logits, dim=-1)
depth, log_p, entropy = self._impl(probs)
log_ps.append(log_p)
entrpys.append(entropy)
depths.append(self.depths[depth.item()])
embed = self.depth_embedding(depth)
for _ in range(max(self.depths)):
# expand ratio
if force_uniform:
logits = torch.zeros(len(self.expand_ratios))
else:
hx, cx = self.lstm(embed, hidden)
hidden = (hx, cx)
logits = self.width_linear(hx)
probs = F.softmax(logits, dim=-1)
ratio, log_p, entropy = self._impl(probs)
log_ps.append(log_p)
entrpys.append(entropy)
ratios.append(self.expand_ratios[ratio.item()])
embed = self.ratio_embedding(ratio)
# kernel_size
if force_uniform:
logits = torch.zeros(len(self.kernel_sizes))
else:
hx, cx = self.lstm(embed, hidden)
hidden = (hx, cx)
logits = self.ks_linear(hx)
probs = F.softmax(logits, dim=-1)
k, log_p, entropy = self._impl(probs)
log_ps.append(log_p)
entrpys.append(entropy)
ks.append(self.kernel_sizes[k.item()])
embed = self.ks_embedding(k)
return arch2str(MBArchitecture(depths, ks, ratios)), sum(log_ps), sum(entrpys)
def str2arch(string):
def split(items, separator=","):
return [int(item) for item in items.split(separator)]
depths_str, ks_str, ratios_str = string.split(":")
return MBArchitecture(split(depths_str), split(ks_str), split(ratios_str))
def arch2str(arch: MBArchitecture):
def join(items, separator=","):
return separator.join(map(str, items))
return f"{join(arch.depths)}:{join(arch.ks)}:{join(arch.ratios)}"
|
python
|
# Copyright (c) 2019 GalaxyLearning Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Model(object):
def __init__(self):
pass
def set_model(self, model):
self.model = model
def get_model(self):
return self.model
def set_train_strategy(self, train_strategy):
self.train_strategy = train_strategy
def get_train_strategy(self):
return self.train_strategy
def set_job_id(self, job_id):
self.job_id = job_id
def get_job_id(self):
return self.job_id
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.