content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from rest_framework import serializers
from backend.models import Shortener
# Lead Serializer
class ShortenerSerializer(serializers.ModelSerializer):
class Meta:
model = Shortener
fields = '__all__'
read_only_fields = ['id', 'responseURL', 'date']
|
python
|
"""For entities that have specs."""
from gemd.entity.has_dependencies import HasDependencies
from gemd.entity.object.has_template import HasTemplate
from gemd.entity.template.base_template import BaseTemplate
from gemd.entity.link_by_uid import LinkByUID
from abc import abstractmethod
from typing import Optional, Union, Set, Type
class HasSpec(HasDependencies):
"""Mix-in trait for objects that can be assigned specs.
Parameters
----------
spec: :class:`Has_Template <gemd.entity.object.has_template.Has_Template>`
A spec, which expresses the anticipated or aspirational behavior of this object.
"""
def __init__(self, spec: Union[HasTemplate, LinkByUID] = None):
self._spec = None
self.spec = spec
@property
def spec(self) -> Union[HasTemplate, LinkByUID]:
"""Get the spec."""
return self._spec
@spec.setter
def spec(self, spec: Union[HasTemplate, LinkByUID]):
"""Set the spec."""
if spec is None:
self._spec = None
elif isinstance(spec, (self._spec_type(), LinkByUID)):
self._spec = spec
else:
raise TypeError(f"Template must be a {self._spec_type()} or LinkByUID, "
f"not {type(spec)}")
@staticmethod
@abstractmethod
def _spec_type() -> Type:
"""Child must report implementation details."""
@property
def template(self) -> Optional[Union[BaseTemplate, LinkByUID]]:
"""Get the template associated with the spec."""
if isinstance(self.spec, HasTemplate):
return self.spec.template
else:
return None
def _local_dependencies(self) -> Set[Union["BaseEntity", "LinkByUID"]]:
"""Return a set of all immediate dependencies (no recursion)."""
return {self.spec} if self.spec is not None else set()
|
python
|
from otree.api import Currency as c, currency_range
from . import pages
from ._builtin import Bot
from .models import Constants
import csv
class PlayerBot(Bot):
def play_round(self):
with open('trial_set_FR.csv', newline='', encoding='cp1252') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',')
for row in csv_reader:
if row['round'] == str(self.round_number) \
and ( float(row['total_points']) < 0 \
or row['bad_rec'] == "TRUE"):
choice_value = row['order']
break
#choice_value = 1
yield pages.WaitPage_FR
yield pages.ChoiceRound_FR, dict(choice = choice_value)
|
python
|
###########DO NOT DELETE#########################
#cat /proc/sys/vm/overcommit_memory
#echo 1 | sudo tee /proc/sys/vm/overcommit_memory
#################################################
from tifffile import imsave
import numpy as np
import cv2
import os
import datetime
from PIL import Image
norm = np.zeros((480,848,1))
print(norm.shape)
path, dirs, files = next(os.walk("/home/kathir/Desktop/tiff_images"))
file_count = len(files)
print(file_count)
stack = np.empty((file_count, 120, 212))
for i in range(0,file_count):
im = Image.open(f'/home/kathir/Desktop/tiff_images/test-{i+1}.tiff')
#print(im.shape)
stack[i,:,:] = im
imsave('multipage.tif', stack)
|
python
|
from . import executor
from . import phone
from . import snoopphone
|
python
|
"""Added ml_models table
Revision ID: 7e60a33f42bb
Revises: 11aa784195ef
Create Date: 2021-04-16 02:25:10.719706
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "7e60a33f42bb"
down_revision = "11aa784195ef"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"ml_models",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("is_principal", sa.Boolean(), nullable=False),
sa.Column("used_in_competitions", sa.Boolean(), nullable=False),
sa.Column("prediction_type", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_index(
op.f("ix_ml_models_is_principal"), "ml_models", ["is_principal"], unique=False
)
op.create_index(
op.f("ix_ml_models_prediction_type"),
"ml_models",
["prediction_type"],
unique=False,
)
op.create_index(
op.f("ix_ml_models_used_in_competitions"),
"ml_models",
["used_in_competitions"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_ml_models_used_in_competitions"), table_name="ml_models")
op.drop_index(op.f("ix_ml_models_prediction_type"), table_name="ml_models")
op.drop_index(op.f("ix_ml_models_is_principal"), table_name="ml_models")
op.drop_table("ml_models")
# ### end Alembic commands ###
|
python
|
import os, sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
test_dir = os.path.dirname(__file__)
sys.path.insert(0, test_dir)
from django.test.simple import run_tests as django_test_runner
from django.conf import settings
def runtests():
failures = django_test_runner(['localedb'], verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests()
|
python
|
# Working with Date And Time.
import datetime
from datetime import date
# Get today's date from the date class.
today = date.today()
print ("Today's date is ", today)
myDate = datetime.date(2018,6,9)
print(myDate)
print(myDate.year)
print(myDate.month)
print(myDate.day)
#Day-name,Month-name,Day-name,Year
print(myDate.strftime("%A,%B,%d,%Y"))
|
python
|
#!/usr/bin/env python
"""
Merges global.yml and config.yml to create various local.yml for each room assistant peer node. It then
sends the configuration file to peer, and restarts the room assistant service.
See README.md file for details.
"""
import math
import os
import time
import yaml
def mergeDicts(dict1, dict2):
""" Just a wrapper around _mergeDicts to hide the generator. """
return dict(_mergeDicts(dict1, dict2))
def _mergeDicts(dict1, dict2):
""" Recursively merge dictionaries. Taken from here: https://stackoverflow.com/questions/7204805/how-to-merge-dictionaries-of-dictionaries """
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
yield (k, dict(_mergeDicts(dict1[k], dict2[k])))
else:
# If one of the values is not a dict, you can't continue merging it.
# Value from second dict overrides one in first and we move on.
yield (k, dict2[k])
# Alternatively, replace this with exception raiser to alert you of value conflicts
elif k in dict1:
yield (k, dict1[k])
else:
yield (k, dict2[k])
# Read the configuration files. Who needs error handling?
with open('config.yml', 'r') as file:
config = yaml.safe_load(file)
with open('global.yml', 'r') as file:
globalData = yaml.safe_load(file)
def getRaCfg(name, default):
""" Gets a config attribute, if not set, return the default. """
if 'raCfg' in config:
if name in config['raCfg'] and isinstance(config['raCfg'][name], bool):
return config['raCfg'][name]
return default
shutdownPeersOnSync = getRaCfg('shutdownPeersOnSync', False)
restartServices = getRaCfg('restartServices', True)
restartSleepTime = getRaCfg('restartSleepTime', 10)
if 'peers' not in config:
raise KeyError("Error: No 'peers' in config file. Must have peers!")
# Load default values for each peer. It's the same as putting the values in global.yml
peerDefaults = config.pop('_defaults', None)
if peerDefaults is None:
peerDefaults = {}
peersRaw = {} # Store raw peer data here.
peersData = {} # Properly parsed data. Ready to be merged with global.yml data.
# peerAddressesRaw = {}
globalKeys = globalData.keys()
# Find system wide port number for clustering.
clusterPort = 6425
hasClustering = False
if 'cluster' in globalData:
hasClustering = True
if 'port' in globalData['cluster']:
clusterPort = globalData['cluster']['port']
if 'bluetoothLowEnergy' in config:
tagOverrides = config['bluetoothLowEnergy'].pop('tags', None)
if 'bluetoothLowEnergy' not in config:
config['bluetoothLowEnergy'] = {'allowlist': []}
if 'allowlist' not in config:
config['bluetoothLowEnergy']['allowlist'] = []
if tagOverrides is not None:
# allowlist = []
config['bluetoothLowEnergy']['tagOverrides'] = tagOverrides
for tag, data in tagOverrides.items():
config['bluetoothLowEnergy']['allowlist'].append(tag)
if 'bluetoothLowEnergy' not in globalData:
globalData['bluetoothLowEnergy'] = {}
globalData['bluetoothLowEnergy'] = mergeDicts(globalData['bluetoothLowEnergy'], config['bluetoothLowEnergy'])
# print(globalData)
peersAddressList = [] # A list of all peers and their address:port
for peerName, raw in config['peers'].items():
"""
Build peerData from peerRaw data. This allows overriding global.yml settings for all peers
or individual peers. You choose if you want it in global.yml or in _defaults inside config.yml
"""
if peerName == '_defaults':
continue
peersData[peerName] = {}
peersRaw[peerName] = mergeDicts(peerDefaults, raw)
# For each top level global item, lets add some additional data for this peer.
for gKey in globalKeys:
peersData[peerName][gKey] = {}
res = {key: val for key, val in peersRaw[peerName].items()
if key.startswith(f'{gKey}_')}
if len(res) == 0:
continue
for resKey, resData in res.items():
peersData[peerName][gKey][resKey.split('_')[1]] = resData
peerClusterPort = clusterPort
if 'cluster' in peersData[peerName]:
if 'port' in peersData[peerName]['cluster']:
peerClusterPort = peersData[peerName]['cluster']['port']
peersData[peerName]['cluster']['port'] = peerClusterPort
else:
peersData[peerName]['cluster'] = {'port': peerClusterPort}
peersAddressList.append(f"{peersRaw[peerName]['address']}:{peersData[peerName]['cluster']['port']}")
#
# Update / Force various globalData settings.
if 'quorum' not in globalData['cluster']: # Should have a quorum value.
globalData['cluster']['quorum'] = int(math.ceil((len(peersAddressList)+1)/2))
else:
if globalData['cluster']['quorum'] > len(peersAddressList):
raise Exception("quorum value manually set, but it's higher than peer count. Please math better.")
if 'autoDiscovery' not in globalData['cluster']: # Should have a quorum value.
globalData['cluster']['autoDiscovery'] = false
#############################################
# Time to publish the files.
#
# Files are saved in ./dist folder for review, and is used by rsync to copy the file over.
#############################################
# Shutting down all the peers has caused issues, not recommended, but it's here.
if shutdownPeersOnSync:
restartServices = True
print("\nStopping all Room Assistant peers.")
for peerName, data in peersData.items():
print(
f"Stopping peer: {peerName} - {peersRaw[peerName]['address']} - '{peersRaw[peerName]['global_instanceName']}'")
stream = os.popen(f"ssh pi@{peersRaw[peerName]['address']} sudo systemctl stop room-assistant.service")
output = stream.read()
if len(output):
print(output)
timestamp = time.ctime() # Ex: 'Mon Oct 18 13:35:29 2020'
for peerName, data in peersData.items():
try:
os.mkdir('dist')
except FileExistsError:
pass
data = mergeDicts(globalData, data) # Using global data as the base, merge in peer data for final data..
data['cluster']['peerAddresses'] = peersAddressList
textOut = f"#\n# This file was auto generated at {timestamp}\n" \
f"# *** Modifications to this file may be lost. ***\n#\n# {peerName} - {peersRaw[peerName]['address']} - '{peersRaw[peerName]['global_instanceName']}'\n" \
f"#\n"
if "comment" in peersRaw[peerName]:
textOut += f"# {peersRaw[peerName]['comment']}\n#\n"
textOut += "\n\n"
textOut += yaml.dump(data)
with open(f'dist/{peerName}.yml', 'w') as file:
file.write(textOut)
print(f"Sending config file to: {peerName} -{ peersRaw[peerName]['address']} - '{peersRaw[peerName]['global_instanceName']}")
stream = os.popen(f"./sync_config_file.sh {peerName} {peersRaw[peerName]['address']}")
output = stream.read()
if len(output):
print(output)
# print(f"restarting service: ssh pi@{peersRaw[peerName]['address']} sudo systemctl restart room-assistant.service")
stream = os.popen(f"ssh pi@{peersRaw[peerName]['address']} sudo systemctl restart room-assistant.service")
output = stream.read()
if len(output):
print(output)
time.sleep(restartSleepTime)
|
python
|
n,t=map(int,input().split())
x=[]
mp=10**6
for i in range(n):
x.append([*map(int,input().split())])
mp=min(mp,x[i][1])
left,right=-mp,10**7
while abs(left-right)>=0.00000001:
mid=(left+right)/2
hour=0
for i in x:
hour+=i[0]/(i[1]+mid)
if hour>=t: left=mid
else: right=mid
print(left)
|
python
|
from django.apps import AppConfig
class StudentportalConfig(AppConfig):
name = 'studentportal'
|
python
|
import os
import qrcode
from ckeditor.fields import RichTextField
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from . import constants
User = get_user_model()
class Territory(MPTTModel):
"""
Используется для обозначения территории внутри
объекта, на котором расположена КиА."""
name = models.CharField(max_length=50, unique=True,
verbose_name='Размещение')
parent = TreeForeignKey('self', on_delete=models.CASCADE,
null=True, blank=True,
related_name='children',
verbose_name='Родительский класс')
text = RichTextField(blank=True, null=True,
verbose_name='Описание')
class Meta:
ordering = ('name',)
verbose_name = 'Размещение'
verbose_name_plural = 'Размещения'
def clean(self):
"""Не позволяет создавать более 4-х
уровней вложенности для территорий размещения
КиА.
"""
if self.parent is not None:
parent_level = self.parent.get_level() # noqa
if parent_level + 1 > constants.MAX_TREE_DEPTH:
raise ValidationError({
'parent': f'Допустимо '
f'создание {constants.MAX_TREE_DEPTH} '
f'уровней вложенности'
})
else:
pass
def save(self, *args, **kwargs):
self.clean()
super().save(*args, **kwargs)
def __str__(self):
return self.name
class Piezometer(models.Model):
name = models.CharField(max_length=30,
verbose_name='Наименование')
depth = models.DecimalField(max_digits=5, decimal_places=2,
blank=True, null=True,
verbose_name='Глубина пьезометра (до дна)')
edge_height = models.DecimalField(max_digits=5, decimal_places=2,
blank=True, null=True,
verbose_name='Высота уреза оголовка')
filter_depth = models.DecimalField(max_digits=5, decimal_places=2,
blank=True, null=True,
verbose_name='Глубина установки '
'фильтра')
text = RichTextField(blank=True, null=True,
verbose_name='Описание')
pub_date = models.DateTimeField(auto_now_add=True,
verbose_name='Дата создания')
author = models.ForeignKey(User, on_delete=models.SET_NULL,
related_name='piezometers',
blank=True, null=True,
verbose_name='Внёс в систему')
territory = TreeForeignKey(Territory, on_delete=models.CASCADE,
blank=True, null=True,
related_name='piezometers',
verbose_name='Место размещения')
image = models.ImageField(upload_to='photos/',
blank=True, null=True,
verbose_name='Фотография места расположения '
'(изображение)')
passport = models.FileField(upload_to='passports/',
blank=True, null=True,
verbose_name='Паспорт (любой формат)')
status = models.CharField(
max_length=20,
choices=constants.STATUS_CHOICES,
default=constants.WORKING,
verbose_name='Статус КиА'
)
location_x = models.FloatField(max_length=30,
verbose_name='Координата Х')
location_y = models.FloatField(max_length=30,
verbose_name='Координата У')
qr_image = models.ImageField(upload_to='qr/',
blank=True, null=True,
verbose_name='QR-код')
def save(self, *args, **kwargs):
"""
Создаёт QR-коды и при сохраннеии модели пьезометра,
вносит получившуюся картинку в БД."""
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=50,
border=4,
)
data = (
f'Пьезометр: {self.name}, '
f'Местоположение: {self.territory}, '
f'Координата Х: {self.location_x}, '
f'Координата У: {self.location_y}'
)
qr.add_data(data)
qr.make(fit=True)
# Если пользователь в имени КиА вводит "/" - предупреждаем
# ошибку сохранения файла в неправильной директории.
name_safe = self.name.replace('/', '-').replace('.', '-')
path = os.path.join(settings.MEDIA_ROOT, 'qr', f'{name_safe}.jpeg')
img = qr.make_image(fill_color="black", back_color="white")
img.save(path, 'JPEG')
self.qr_image = os.path.join('qr', f'{name_safe}.jpeg')
super().save(*args, **kwargs)
class Meta:
ordering = ('-pub_date',)
verbose_name = 'Пьезометр'
verbose_name_plural = 'Пьезометры'
constraints = (
models.UniqueConstraint(
name='Имя пьезометра должно быть уникальным',
fields=('name', )
),
)
def __str__(self):
return self.name
class Measurement(models.Model):
kia = models.ForeignKey('Piezometer', on_delete=models.CASCADE,
verbose_name='Пьезометр')
date = models.DateTimeField(verbose_name='Дата измерения',
blank=True, null=True)
value = models.DecimalField(max_digits=5, decimal_places=2,
verbose_name='Глубина')
status = models.CharField(max_length=30,
verbose_name='Статус измерения')
author = models.CharField(max_length=30, blank=True, null=True,
verbose_name='Обходчик')
class Mark(models.Model):
name = models.CharField(max_length=30,
verbose_name='Наименование')
text = RichTextField(blank=True, null=True,
verbose_name='Описание')
pub_date = models.DateTimeField(auto_now_add=True,
verbose_name='Дата создания')
author = models.ForeignKey(User, on_delete=models.SET_NULL,
related_name='marks',
blank=True, null=True,
verbose_name='Внёс в систему')
territory = TreeForeignKey(Territory, on_delete=models.CASCADE,
blank=True, null=True, related_name='marks',
verbose_name='Место размещения')
image = models.ImageField(upload_to='photos/',
blank=True, null=True,
verbose_name='Фотография места расположения '
'(изображение)')
status = models.CharField(
max_length=20,
choices=constants.STATUS_CHOICES,
default=constants.WORKING,
verbose_name='Статус КиА'
)
type = models.CharField(
max_length=20,
choices=constants.MARK_TYPE_CHOICES,
default=constants.VERTICAL,
verbose_name='Тип марки'
)
location_x = models.FloatField(max_length=30,
verbose_name='Координата Х')
location_y = models.FloatField(max_length=30,
verbose_name='Координата У')
class Meta:
ordering = ('-pub_date',)
verbose_name = 'Марка'
verbose_name_plural = 'Марки'
def __str__(self):
return self.name
|
python
|
import numpy as np
from tools.metadata import get_hero_dict
import operator
import pandas as pd
import plotly.graph_objs as go
import plotly.plotly as py
def winrate_statistics(dataset_df, mmr_info):
x_data, y_data = dataset_df
wins = np.zeros(114)
games = np.zeros(114)
winrate = np.zeros(114)
for idx, game in enumerate(x_data):
for i in range(228):
if game[i] == 1:
games[i % 114] += 1
if y_data[idx] == 1:
if i < 114:
wins[i] += 1
else:
if i >= 114:
wins[i - 114] += 1
winrate = wins / games
winrate_dict = dict()
hero_dict = get_hero_dict()
for i in range(114):
if i != 23:
winrate_dict[hero_dict[i + 1]] = winrate[i]
sorted_winrates = sorted(winrate_dict.items(), key=operator.itemgetter(1))
x_plot_data = [x[0] for x in sorted_winrates]
y_plot_data = [x[1] for x in sorted_winrates]
title = 'Hero winrates at ' + mmr_info + ' MMR'
data = [go.Bar(
y=x_plot_data,
x=y_plot_data,
orientation='h'
)]
layout = go.Layout(
title=title,
width=1000,
height=1400,
yaxis=dict(title='hero',
ticks='',
nticks=114,
tickfont=dict(
size=8,
color='black')
),
xaxis=dict(title='win rate',
nticks=30,
tickfont=dict(
size=10,
color='black')
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hero_winrates_' + mmr_info)
def pick_statistics(dataset_df, mmr_info):
x_data, y_data = dataset_df
wins = np.zeros(114)
games = np.zeros(114)
pick_rate = np.zeros(114)
for idx, game in enumerate(x_data):
for i in range(228):
if game[i] == 1:
games[i % 114] += 1
if y_data[idx] == 1:
if i < 114:
wins[i] += 1
else:
if i >= 114:
wins[i - 114] += 1
pick_rate = games / np.sum(games)
pick_rate_dict = dict()
hero_dict = get_hero_dict()
for i in range(114):
if i != 23:
pick_rate_dict[hero_dict[i + 1]] = pick_rate[i]
sorted_pickrates = sorted(pick_rate_dict.items(), key=operator.itemgetter(1))
x_plot_data = [x[0] for x in sorted_pickrates]
y_plot_data = [x[1] for x in sorted_pickrates]
title = 'Hero pick rates at ' + mmr_info + ' MMR'
data = [go.Bar(
y=x_plot_data,
x=y_plot_data * 100,
orientation='h'
)]
layout = go.Layout(
title=title,
width=1000,
height=1400,
yaxis=dict(title='hero',
ticks='',
nticks=114,
tickfont=dict(
size=8,
color='black')
),
xaxis=dict(title='pick rate',
nticks=30,
tickfont=dict(
size=10,
color='black')
)
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hero_pickrates_' + mmr_info)
def mmr_distribution(csv_file):
dataset = pd.read_csv(csv_file)
data = [go.Histogram(x=dataset[:30000]['avg_mmr'])]
layout = go.Layout(
title='MMR distribution (sample of 30k games)'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='MMR_distribution')
|
python
|
# First, lets get a list of all files in the data directory
import time
import sys
from tensorflow import keras
import tensorflow as tf
import pathlib
import json
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.layers import Conv3D, MaxPooling3D, BatchNormalization, GlobalAveragePooling3D
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, MaxPooling2D, Conv2D, InputLayer
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import os
import pickle # used for caching
import random
import pandas as pd
from PIL import Image
import numpy as np
import multiprocessing
path = 'data/'
images_list = {}
for i in range(1, 11):
images_list[i] = []
abs_dir = pathlib.Path(__file__).parent.absolute()
broken = []
count = 0
# This is to rename everything
for root, directories, files in os.walk(path, topdown=False):
for name in files:
count = count + 1
try:
old_name = str(os.path.join(abs_dir, root, name))
new_name = str(
os.path.join(abs_dir, root, name))
if ".jpeg" not in str(name):
print("Need to update:", name)
os.rename(old_name, new_name)
img = Image.open(new_name)
img.verify()
classification = int(root.split("/")[-1])
images_list[classification].append(
{"f": name, "p": str(os.path.join(abs_dir, root, name)), "t": str(classification)})
except NameError as error:
print(error)
except FileNotFoundError as error:
print(error)
except:
print("Unexpected error:", sys.exc_info()[0])
print("Unable to handle file: ", os.path.join(
abs_dir, root, name))
broken.append(name)
print("Found", len(broken), "broken items out of a total",
count, "leaving", count - len(broken), "valid")
# Now, lets print some debug informatiton
for key in images_list:
print(key, len(images_list[key]))
# if len(broken) > 1:
# quit()
# for root, directories, files in os.walk(path, topdown=False):
# for name in files:
# try:
# classification = int(root.split("/")[-1])
# images_list[classification].append(
# {"f": name, "p": str(os.path.join(abs_dir, root, name)), "t": str(classification)})
# except:
# print("Unable to handle file: ", name)
# Now, lets separate everything into a training set and validation set
training_split = 0.8
training_set = []
validation_set = []
target_size = (300, 300)
# loop through each key, shuffle, then create training and validation sets
random.seed(14354)
for key in images_list:
random.shuffle(images_list[key])
training_length = int(len(images_list[key]) * training_split)
training_set.extend(images_list[key][:training_length])
validation_set.extend(images_list[key][training_length:])
# now, we need to prepare this as a data frame
training_df = pd.DataFrame(training_set)
validation_df = pd.DataFrame(validation_set)
print(training_df.at[0, 'p'])
print(training_df)
training_df.to_csv("training-df.csv")
validation_df.to_csv("validation-df.csv")
# Now, keras sttuff
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
shear_range=0.2,
zoom_range=0.2,
# horizontal_flip=True,
rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe=training_df,
directory=None,
x_col="p",
y_col="t",
class_mode='categorical',
target_size=target_size,
validate_filenames=False
# save_to_dir="build/"
)
validation_generator = test_datagen.flow_from_dataframe(
dataframe=validation_df,
directory=None,
x_col="p",
y_col="t",
class_mode='categorical',
target_size=target_size,
validate_filenames=False
)
# # now, build the model
# if os.name == 'nt':
# print("--------- USING GPU ACCELERATION ----------")
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
def test_model():
class_names = [str(idx) for idx in range(1, 11)]
print("Class names:", class_names)
# Combine training df and testing df
combined_df = pd.concat([training_df, validation_df])
combined_datagen = ImageDataGenerator(rescale=1.0/255)
combined_generator = combined_datagen.flow_from_dataframe(
dataframe=combined_df,
directory=None,
x_col="p",
y_col="t",
class_mode="categorical",
classes=class_names,
target_size=target_size,
validate_filenames=False,
shuffle=False,
)
class_indices = combined_generator.class_indices
print(class_indices)
class_from_index = [0] * 10
for key in class_indices:
class_from_index[class_indices[key]] = key
# predict results
model = keras.models.load_model(
'trained_models/best_model-image-classifier.hdf5')
model.evaluate(combined_generator)
predictions = model.predict(combined_generator)
count = 0
correct = 0
for idx in range(0, len(predictions)):
expected = combined_df.iloc[idx, :]['t'] # YES
actual = class_from_index[np.argmax(predictions[idx])] # YES
# print(type(expected), expected, type(actual), actual)
count += 1
if(int(expected) == int(actual)):
correct += 1
print("Found", correct, "correct out of", count)
# We need to keep track of the results in each category
# so we know how 1s were interpreted (i.e. histogram)
# the lookup key will
# key will be the known category
# value will be a map of predicted values (hashmap), where the key
# is the str predicted value
category_store = {}
for idx in range(1, 11):
category_store[str(idx)] = np.zeros(10)
# Now, we loop through the predictions and update the category store
for idx in range(0, len(predictions)):
expected = combined_df.iloc[idx, :]['t'] # String
pred_index = np.argmax(predictions[idx]) # array index
pred_value = class_from_index[pred_index] # String
new_index = class_names.index(pred_value)
# this returns the class_names INDEX!!!
category_store[expected][new_index] += 1
# Now, lets reformat category store so it is more usable
for key in category_store:
item = category_store[key]
result = {
'data': item.astype(int).tolist(),
'count': int(item.astype(int).sum())
}
result['accuracy'] = item[class_names.index(key)] / result['count']
category_store[key] = result
# Now build summary
summary = {}
for key in category_store:
summary[key] = {
'accuracy': category_store[key]['accuracy'],
'count': category_store[key]['count']
}
category_store['summary'] = summary
print(json.dumps(category_store, indent=4))
np.set_printoptions(suppress=True)
np.set_printoptions(linewidth=np.inf)
# yep
with open('output.json', 'w') as f:
f.write(json.dumps(category_store, indent=4))
def train_model():
# Lets save out the value mapping
np.save('class_indices', train_generator.class_indices)
# Lets build class_weights
# We need to be sure to remap the weights to the correct values (i.e. following class_indices)
val_counts = training_df['t'].value_counts()
class_weights = {}
for name, val in val_counts.items():
class_weights[name] = val
max_val = val_counts.max()
for key in class_weights:
class_weights[key] = max_val / class_weights[key]
reordered_class_weights = {}
for key in train_generator.class_indices:
reordered_class_weights[train_generator.class_indices[key]
] = class_weights[key]
print("class weights:", reordered_class_weights)
# for dense_layer in dense_layers:
# for filter_size in filter_sizes:
# for conv_layer in conv_layers:
conv_layer = 3
filter_size = 64
dense_layer = 1
NAME = "{}-conv-{}-nodes-{}-dense-{}".format(
conv_layer, filter_size, dense_layer, int(time.time()))
print(NAME)
tensorboard = TensorBoard(
log_dir="logs/{}".format(NAME), update_freq="epoch", profile_batch=0)
# Recognize model checkpoint now
filepath = 'trained_models/best_model-image-classifier.hdf5'
checkpoint = ModelCheckpoint(filepath=filepath,
monitor="val_accuracy",
verbose=1,
save_best_only=True,
mode='max')
model = keras.Sequential(
[
InputLayer(input_shape=(target_size[0], target_size[1], 3)),
Conv2D(32, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dropout(0.8),
Dense(10, activation="softmax"),
]
)
METRICS = [
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision')
]
model.compile(optimizer='sgd',
loss='categorical_crossentropy',
metrics=METRICS)
model.summary()
model.fit(train_generator,
validation_data=validation_generator,
epochs=50,
workers=multiprocessing.cpu_count(),
callbacks=[tensorboard, checkpoint],
shuffle=True,
class_weight=reordered_class_weights
)
if __name__ == '__main__':
train_model()
test_model()
# At this point, we should have our
|
python
|
from Gaussiandistribution import Gaussian
gaussian_one = Gaussian(22, 2)
print(gaussian_one.mean)
|
python
|
from math import floor
def find_position(value):
position = floor(value / 100)
if value % 100 == 0:
position = position - 1
return position
def center_of_square(coordinate, dimension):
return coordinate + dimension / 2
|
python
|
#
# Script to do an Oracle restore given an optional date range of the copy
# An existing restore job is required, copy version of that job will be updated if applicable
# If no date range is provided the latest copy will be used
# Backup job name for the copy to use can also be provided
# Set the cancel parameter = true if using this script to cancel/clean up an existing restore job
#
import json
import sys
import time
import datetime
from optparse import OptionParser
import logging
import ecxclient.sdk.client as client
logger = logging.getLogger('logger')
logging.basicConfig()
logger.setLevel(logging.INFO)
parser = OptionParser()
parser.add_option("--user", dest="username", help="ECX Username")
parser.add_option("--pass", dest="password", help="ECX Password")
parser.add_option("--host", dest="host", help="ECX Host, (ex. https://172.20.58.10:8443)")
parser.add_option("--restore", dest="restore", help="Restore Job Name")
parser.add_option("--start", dest="start", help="Start Date filter for backup version (optional)")
parser.add_option("--end", dest="end", help="End Date filter for backup version (optional)")
parser.add_option("--backup", dest="backup", help="Backup job name for copy to use (optional)")
parser.add_option("--cancel", dest="cancel", help="Enter 'true' for Cancel/Cleanup restore (optional)")
(options, args) = parser.parse_args()
if (options.cancel is None):
options.cancel = "false"
def prettyprint(indata):
print json.dumps(indata, sort_keys=True,indent=4, separators=(',', ': '))
def get_restore_job():
jobs = client.EcxAPI(session, 'job').list()
for job in jobs:
if(job['name'].upper() == options.restore.upper()):
return job
logger.info("No job found with name %s" % options.restore)
session.delete('endeavour/session/')
sys.exit(2)
def get_policy_for_job(job):
policy = client.EcxAPI(session, 'policy').get(resid=job['policyId'])
return policy
def get_version_for_policy(policy):
version = {}
metadata = {}
sourceurl = policy['spec']['source'][0]['href']
source = client.EcxAPI(session, 'oracle').get(url=sourceurl)
# no backup filters supplied use latest
if (options.end is None and options.start is None and options.backup is None):
version['href'] = source['links']['latestversion']['href']
metadata['id'] = "latest"
metadata['name'] = "Use Latest"
version['metadata'] = metadata
logger.info("Using latest backup copy version.")
return version
# match on backup copy name no dates supplied
elif (options.end is None and options.start is None and options.backup is not None):
versionsurl = source['links']['versions']['href']
versions = client.EcxAPI(session, 'oracle').get(url=versionsurl)['versions']
for vers in versions:
prottime = int(vers['protectionInfo']['protectionTime'])
if (options.backup.upper() == vers['protectionInfo']['policyName'].upper()):
version['href'] = vers['links']['self']['href']
metadata['id'] = vers['id']
metadata['name'] = time.ctime(prottime/1000)[4:]
version['metadata'] = metadata
logger.info("Using backup copy version from: %s" % metadata['name'])
return version
# match on dates and backup copy name
elif (options.end is not None and options.start is not None and options.backup is not None):
start = int(datetime.datetime.strptime(options.start, '%m/%d/%Y %H:%M').strftime("%s"))*1000
end = int(datetime.datetime.strptime(options.end, '%m/%d/%Y %H:%M').strftime("%s"))*1000
versionsurl = source['links']['versions']['href']
versions = client.EcxAPI(session, 'oracle').get(url=versionsurl)['versions']
for vers in versions:
prottime = int(vers['protectionInfo']['protectionTime'])
if (prottime > start and prottime < end and options.backup.upper() == vers['protectionInfo']['policyName'].upper()):
version['href'] = vers['links']['self']['href']
metadata['id'] = vers['id']
metadata['name'] = time.ctime(prottime/1000)[4:]
version['metadata'] = metadata
logger.info("Using backup copy version from: %s" % metadata['name'])
return version
# match on dates no copy named supplied
else:
start = int(datetime.datetime.strptime(options.start, '%m/%d/%Y %H:%M').strftime("%s"))*1000
end = int(datetime.datetime.strptime(options.end, '%m/%d/%Y %H:%M').strftime("%s"))*1000
versionsurl = source['links']['versions']['href']
versions = client.EcxAPI(session, 'oracle').get(url=versionsurl)['versions']
for vers in versions:
prottime = int(vers['protectionInfo']['protectionTime'])
if (prottime > start and prottime < end):
version['href'] = vers['links']['self']['href']
metadata['id'] = vers['id']
metadata['name'] = time.ctime(prottime/1000)[4:]
version['metadata'] = metadata
logger.info("Using backup copy version from: %s" % metadata['name'])
return version
logger.info("No backup copy found with provided dates or backup copy name")
session.delete('endeavour/session/')
sys.exit(2)
def update_restore_policy(policy):
polid = policy['id']
del policy['id']
del policy['links']
del policy['lastUpdated']
del policy['creationTime']
del policy['logicalDelete']
del policy['rbacPath']
del policy['tenantId']
policy = client.EcxAPI(session, 'policy').put(resid=polid, data=policy)
return policy
def get_pending_job_session(job):
sessionurl = job['links']['pendingjobsessions']['href']
jobsession = client.EcxAPI(session, 'jobsession').get(url=sessionurl)
if (len(jobsession['sessions']) < 1):
logger.info("No pending job sessions found.")
session.delete('endeavour/session/')
sys.exit(2)
return jobsession['sessions'][0]
def cancel_restore_job(jobsession):
sessioninfo = jobsession['id'] + "?action=resume&actionname=end_ia"
logger.info("Cancelling restore session: %s" % jobsession['id'])
cancel = client.EcxAPI(session, 'jobsession').post(path=sessioninfo)
return cancel
def run_restore_job(job):
logger.info("Running restore job: %s" % job['name'])
job = client.JobAPI(session).run(job['id'])
return job
def run_restore():
job = get_restore_job()
if (options.cancel.upper() == "TRUE"):
jobsession = get_pending_job_session(job)
job = cancel_restore_job(jobsession)
else:
policy = get_policy_for_job(job)
version = get_version_for_policy(policy)
policy['spec']['source'][0]['version'] = version
policy = update_restore_policy(policy)
job = run_restore_job(job)
session = client.EcxSession(options.host, options.username, options.password)
session.login()
run_restore()
session.delete('endeavour/session/')
|
python
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to setup @bazel/typescript dev dependencies.
"""
load("@bazel_gazelle//:deps.bzl", "go_repository")
def ts_setup_dev_workspace():
"""
Setup the toolchain needed for local development, but not needed by users.
These needs to be in a separate file from ts_setup_workspace() so as not
to leak load statements.
"""
ts_setup_workspace()
go_repository(
name = "com_github_kylelemons_godebug",
commit = "9ff306d4fbead574800b66369df5b6144732d58e", # v1.1.0
importpath = "github.com/kylelemons/godebug",
)
go_repository(
name = "com_github_mattn_go_isatty",
commit = "7b513a986450394f7bbf1476909911b3aa3a55ce", # v0.0.12
importpath = "github.com/mattn/go-isatty",
)
|
python
|
def create_app():
from flask import Flask
from localtalk.views import init_views
app = Flask(__name__)
# init views
init_views(app)
return app
def create_server():
from localtalk.server import Server
server = Server()
return server
|
python
|
import asyncio
from datetime import datetime
import logging
import aiohttp
import requests
import certifi
TIMEOUT_FOR_PAR_REQS = 60*5
TIMEOUT_FOR_SEQ_REQS = 60
Logger = logging.getLogger(__name__)
class InvalidClientResponse:
def __init__(self):
self.status = None
async def fetch(uri, session, body=False):
"""
asynchronous `get` or `head`
"""
start = datetime.utcnow()
do_request = session.head if body is False else session.get
try:
async with do_request(uri) as response:
text = await response.text()
except (
aiohttp.ClientResponseError,
aiohttp.ClientError,
AttributeError) as e:
response = InvalidClientResponse()
Logger.exception(e)
text = None
finally:
# acrescenta novos atributos para o objeto ClientResponse
response.uri = uri
response.end_time = datetime.utcnow()
response.start_time = start
response.status_code = response.status
response.text = text
Logger.info("Requested %s: %s", uri, response.status_code)
return response
async def fetch_many(uri_items, body=False, timeout=TIMEOUT_FOR_PAR_REQS):
# https://docs.aiohttp.org/en/stable/client_quickstart.html#timeouts
client_timeout = aiohttp.ClientTimeout(total=timeout or TIMEOUT_FOR_PAR_REQS)
async with aiohttp.ClientSession(timeout=client_timeout) as session:
responses = await asyncio.gather(*[
fetch(url, session, body)
for url in uri_items
])
return responses
def parallel_requests(uri_items, body=False, timeout=TIMEOUT_FOR_PAR_REQS):
"""
performs parallel requests
"""
return asyncio.run(fetch_many(uri_items, body, timeout=timeout))
def seq_requests(uri_items, body=False, timeout=TIMEOUT_FOR_SEQ_REQS):
"""
performs sequential requests
"""
do_request = requests.head if body is False else requests.get
resps = []
for u in uri_items:
resp = do_request(u, timeout=timeout)
resps.append(resp)
return resps
def compare(lista, body=True):
print("")
print("Body: {}".format(body))
print("Requests: ")
print("\n".join(lista))
resps = {}
for name, func in (("Sequential", seq_requests), ("Parallel", parallel_requests)):
print(name)
t1 = datetime.utcnow()
items = func(lista, body)
t2 = datetime.utcnow()
resps[name] = {
"duration (ms)": (t2 - t1).microseconds,
"status": [r.status_code for r in items],
"len": [len(r.text) for r in items],
}
print(resps)
print(resps["Parallel"]["duration (ms)"] / resps["Sequential"]["duration (ms)"])
def main():
lista3 = [
'https://www.scielo.br/scielo.php?script=sci_arttext&pid=S0102-67202020000200304&lng=en&nrm=iso&tlng=en',
'https://www.scielo.br/scielo.php?script=sci_arttext&pid=S0102-67202020000200305&lng=en&nrm=iso',
'https://www.scielo.br/scielo.php?pid=S0100-39842020000200001&script=sci_arttext&tlng=pt',
]
compare(lista3, True)
compare(lista3, False)
compare([lista3[0]], True)
compare([lista3[0]], False)
if __name__ == "__main__":
main()
|
python
|
from django.db import models
from covid_data.models.base import ModeloBase
class Sector(ModeloBase):
"""Identifica el tipo de institución del Sistema Nacional de Salud
que brindó la atención.
"""
clave = models.IntegerField(unique=True)
descripcion = models.CharField(max_length=63)
def __repr__(self):
return self.descripcion
def __str__(self):
return self.descripcion
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayUserStepcounterQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayUserStepcounterQueryResponse, self).__init__()
self._count = None
self._count_date = None
self._time_zone = None
@property
def count(self):
return self._count
@count.setter
def count(self, value):
self._count = value
@property
def count_date(self):
return self._count_date
@count_date.setter
def count_date(self, value):
self._count_date = value
@property
def time_zone(self):
return self._time_zone
@time_zone.setter
def time_zone(self, value):
self._time_zone = value
def parse_response_content(self, response_content):
response = super(AlipayUserStepcounterQueryResponse, self).parse_response_content(response_content)
if 'count' in response:
self.count = response['count']
if 'count_date' in response:
self.count_date = response['count_date']
if 'time_zone' in response:
self.time_zone = response['time_zone']
|
python
|
#!/usr/bin/env python
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import messagebird
# ACCESS_KEY = ''
# PHONE_NUMBER = ''
try:
ACCESS_KEY
except NameError:
print('You need to set an ACCESS_KEY constant in this file')
sys.exit(1)
try:
PHONE_NUMBER
except NameError:
print('You need to set a PHONE_NUMBER constant in this file')
sys.exit(1)
try:
# Create a MessageBird client with the specified ACCESS_KEY.
client = messagebird.Client(ACCESS_KEY)
# Create a new Lookup HLR object.
lookup_hlr = client.lookup_hlr(PHONE_NUMBER)
# Print the object information.
print('\nThe following information was returned as a Lookup HLR object:\n')
print(' id : %s' % lookup_hlr.id)
print(' href : %s' % lookup_hlr.href)
print(' msisdn : %d' % lookup_hlr.msisdn)
print(' network : %d' % lookup_hlr.network)
print(' reference : %s' % lookup_hlr.reference)
print(' status : %s' % lookup_hlr.status)
print(' details : %s' % lookup_hlr.details)
print(' createdDatetime : %s' % lookup_hlr.createdDatetime)
print(' statusDatetime : %s\n' % lookup_hlr.statusDatetime)
except messagebird.client.ErrorException as e:
print('\nAn error occured while requesting a Lookup HLR object:\n')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s\n' % error.parameter)
|
python
|
while 0 < 1:
mec = input("Valor da Medardoria: ")
icns = float(mec) * 17/100
irpf = float(mec) * 15/100
csll = float(mec) * 7.6/100
cofins = float(mec) * 3/100
pis = float(mec) * 1.65/100
me = float(mec) - (icns + irpf + cofins + pis + csll)
print("<<<<<<<<<<<RESLTADO>>>>>>>>>>")
print("Anicota Imposto Valor")
print("17% ICMS " + str(icns))
print("15% IRPF " + str(irpf))
print("7.6% CSLL " + str(csll))
print("3% COFINS " + str(cofins))
print("1.65% PIS " + str(pis))
print("Valor real da Mercadoria: " + str(me))
print("<<<<<<<<<<<RESULTADO>>>>>>>>>>")
|
python
|
from ._fastfood import Fastfood
__all__ = ['Fastfood']
|
python
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.conf import *
from .models import User
import json
# Create your views here.
def login(request):
return render(request,'account/login.html')
def dologin(request):
username=request.POST['username']
pwd=request.POST['pwd']
try:
user=User.users.get(username=username,passwd=pwd)
except Exception as e:
return redirect('account:login')
user1 = json.dumps(user, default=lambda obj: obj.__dict__)
print(user1)
request.session['userinfo']=user1
# a = request.session['userinfo']=user1
# print(a)
return redirect( 'index')
def outlogin(request):
'''
注销
'''
if request.session['userinfo']:
request.session.flush()
return redirect('index')
else:
return HttpResponse('error')
def register(request):
return render(request,'account/register.html')
def do_register(request):
#用户名
username=request.POST['user_name']
# 密码1
password1=request.POST['pwd']
# 密码2
password2=request.POST['cpwd']
#email
email=request.POST['email']
#判断用户是否存在
user=User.users.filter(username=username)
if not user:
#不存在此用户
user=User()
user.username=username
user.passwd=password2
user.email=email
user.save()
user1=User.users.get(username=username)
u=json.dumps(user1,default=lambda obj:obj.__dict__)
request.session['userinfo']=u
return redirect('index')
return HttpResponse('用户已存在')# ??
def user_center(request):
'''
用户详情
'''
return render(request,'account/user_center_info.html')
def user_center_order(request):
'''
用户订单
'''
return render(request,'account/user_center_order.html')
def user_center_site(request):
'''
用户详情
'''
return render(request,'account/user_center_site.html')
|
python
|
#! /usr/bin/env python
#-******************************************************************************
#
# Copyright (c) 2012-2013,
# Sony Pictures Imageworks Inc. and
# Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Sony Pictures Imageworks, nor
# Industrial Light & Magic, nor the names of their contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#-******************************************************************************
import os
import logging
from abcview import config
from abcview import style
__doc__ = """
AbcView is a graphical PyQt-based Alembic inspection
and visualization tool. It offers a number of widgets
to inspect and visualize Alembic data, as well as
assemble hierarchical scenes.
What's new:
- fixes performance issue on GeForce cards
- fixes recursion bug when iterating properties
More information:
http://docs.alembic.io/python/abcview.html
"""
__todo__ = """
TODO:
- better object-level selection/framing
- better session cycle checking on load
- contextual session editing
- more stats (poly count, mem usage)
- support for lights and materials
- edit/remove cameras from viewer
- draggable, pop-out widgets
- support object paths in args
- save split window layouts
- better per-time data caching
- socket connections
- unit tests
"""
# logging handler, imported by most other modules
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT)
log = logging.getLogger(config.__prog__)
log.setLevel(int(os.environ.get('ABCVIEW_LOG_LEVEL', logging.WARN)))
# abcview version info
version_string = config.__version__
version_tuple = tuple(int(v) for v in version_string.split('.'))
|
python
|
# Copyright 2022 The Balsa Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from torch import nn
_ACTIVATIONS = {
'tanh': nn.Tanh,
'relu': nn.ReLU,
'sigmoid': nn.Sigmoid,
}
def ReportModel(model, blacklist=None):
ps = []
for name, p in model.named_parameters():
if blacklist is None or blacklist not in name:
ps.append(np.prod(p.size()))
num_params = sum(ps)
mb = num_params * 4 / 1024 / 1024
print('Number of model parameters: {} (~= {:.1f}MB)'.format(num_params, mb))
print(model)
return mb
def MakeMlp(input_size, num_outputs, hiddens, activation):
layers = []
prev_layer_size = input_size
for size in hiddens:
layers.append(nn.Linear(prev_layer_size, size))
layers.append(_ACTIVATIONS[activation]())
prev_layer_size = size
# Output layer.
layers.append(nn.Linear(prev_layer_size, num_outputs))
return nn.Sequential(*layers)
|
python
|
# -*- coding: utf-8 -*-
"""Main module."""
import sys
import urllib.request
import re
def main():
'''Checks that an argument has been given, otherwise returns and error. Then checks that the file contains something.
Then it converts the file to a string, takes the first line as the size of the array for our lightTester and then
splits the rest of the string into lines of commands and coordinates. Then it runs the lightTester method apply on
each line, switching the lights in the grid on/off/toggling them. Finally it runs a count on the lights left on after
all these operations and returns the number of lights left on.'''
if len(sys.argv)<3:
return "Error - must have one and only one file as an argument e.g. solve_led_project --input text.txt"
elif len(sys.argv)>3:
return "Error - must have one and only one file as an argument e.g. solve_led_project --input text.txt"
else:
file = str(sys.argv[2])
instructions = readFile(file)
if instructions == '':
return "Error - file is empty"
else:
firstLine = instructions.split('\n')[0]
lights=lightTester(firstLine)
instructions = getCommand(instructions)
for line in instructions:
lights.apply(line)
return "The number occupied : ", lights.count()
if __name__ == '__solve_led_project__':
main()
def readFile(file):
'''Checks if the file name provided contains 'http' at the start. If so, opens and reads it as a url, otherwise, opens and reads
it like any other file. Returns a string containing the text of the file just read'''
commandList=""
if file.startswith("http://"):
url=urllib.request.urlopen(file)
commandList=url.read().decode('utf-8')
else:
commandList = open(file, 'r')
commandList = commandList.read()
return commandList
def getCommand(cmd):
'''Sets a RegEx pattern and then reads a string given to it and returns all the lines that match that pattern as a set of lists
which can be operated on'''
pattern = re.compile(".*(turn on|turn off|switch)\s*([+-]?\d+)\s*,\s*([+-]?\d+)\s*through\s*([+-]?\d+)\s*,\s*([+-]?\d+).*")
command = re.findall(pattern, str(cmd))
return command
class lightTester():
'''Class to create a lights grid'''
lights=[]
def __init__(self,N):
'''Takes input N and takes as the size of the array of arrays for the grid. Every light in the grid is set to false (off).'''
N = int(N)
self.lights = [[False]*N for _ in range(N)]
self.size = N
def apply(self,line):
'''Splits the RegEx lines given to it up into commands and coordinates. Checks coordinates aren't outside the range, if so
it amends them to fit inside. Depending on the command, turns all lights on or off or switches them based on the coordinates
given to it'''
start1, start2, end1, end2 = int(line[1]), int(line[2]), int(line[3]), int(line[4])
if start1 < 0: #make sure start points are 0 or greater
start1 = 0
if start2 < 0:
start2 = 0
if end1 >=self.size: #make sure end points aren't greater than array size
end1 = self.size-1
if end2 >=self.size:
end2 = self.size-1
if (line[0] =="turn on") or (line[0] =="turn off" ) or (line[0] =="switch") :
if line[0] == "turn on":
if start1<=end1 and start2<=end2:
for i in range(start1, end1+1):
for j in range(start2, end2+1):
self.lights[i][j] = True
elif line[0] == "turn off":
if start1<=end1 and start2<=end2:
for i in range(start1, end1+1):
for j in range(start2, end2+1):
self.lights[i][j] = False
elif line[0] =="switch":
if start1<=end1 and start2<=end2:
for i in range(start1, end1+1):
for j in range(start2, end2+1):
if self.lights[i][j] == True:
self.lights[i][j]=False
elif self.lights[i][j] == False:
self.lights[i][j]=True
def count(self):
'''Checks each point in the grid to see if the light is on. For each light on it counts it and then returns the total count of lights
that are on in the grid.'''
count=0
for i in range(len(self.lights)):
for j in range(len(self.lights)):
if self.lights[i][j] == True:
count+=1
return count
|
python
|
import os
import logging
import sys
import tempfile
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION='1.1.4'
def readme():
"""Use `pandoc` to convert `README.md` into a `README.rst` file."""
if os.path.isfile('README.md') and any('dist' in x for x in sys.argv[1:]):
if os.system('pandoc -s README.md -o %s/README.rst' %
tempfile.mkdtemp()) != 0:
logging.warning('Unable to generate README.rst')
if os.path.isfile('README.rst'):
with open('README.rst') as fd:
return fd.read()
return ''
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag[1:] != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name='grpcio-opentracing',
version=VERSION,
description='Python OpenTracing Extensions for gRPC',
long_description=readme(),
author='LightStep',
license='Apache',
install_requires=['opentracing>=1.2.2', 'grpcio>=1.1.3,<2.0', 'six>=1.10'],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'future'],
keywords=['opentracing'],
classifiers=[
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6"
],
packages=find_packages(exclude=['docs*', 'tests*', 'examples*']),
cmdclass={
'verify': VerifyVersionCommand,
}
)
|
python
|
import argparse
class Arguments():
def __init__(self):
self.parser = argparse.ArgumentParser(
description='''
Quick & easy to use nutritional supplement tracking software
https://github.com/ncdulo/suppylement
'''
)
# Should this be here, or in parse_args?
self.subparsers = self.parser.add_subparsers(
dest='mode',
help='modes')
def parse_args(self, args):
# List mode
self.list_parser = self.subparsers.add_parser(
'list', help='list entries')
self.list_parser.add_argument(
'--most-recent',
dest='most_recent',
type=int,
default=5,
help='display MOST_RECENT entries, default 5')
self.list_parser.add_argument(
'--less',
dest='search_less',
type=int,
default=-1,
help='list entries where amount is less than SEARCH_LESS')
self.list_parser.add_argument(
'--more',
dest='search_more',
type=int,
default=-1,
help='list entries where amount is greater than SEARCH_MORE')
self.list_parser.add_argument(
'--name',
dest='search_name',
type=str,
default='',
help='list entries where name equals SEARCH_NAME')
# Edit mode
self.edit_parser = self.subparsers.add_parser(
'edit', help='not yet implemented')
self.edit_parser.add_argument(
'id',
type=int,
help='id of entry to edit')
# Log new entry mode
self.log_parser = self.subparsers.add_parser(
'log', help='log an entry')
self.log_parser.add_argument(
'amount',
type=int,
help='amount in miligrams')
self.log_parser.add_argument(
'name',
type=str,
help='name of supplement')
# Remove entry mode
self.rm_parser = self.subparsers.add_parser(
'rm',
help='remove specific entries')
self.rm_parser.add_argument(
'--id',
dest='id_to_remove',
type=int,
default=-1,
help='remove entry number ID_TO_REMOVE')
self.rm_parser.add_argument(
'-i',
dest='rm_interactive',
action='store_true',
default=False,
help='confirm before deleting row')
self.rm_parser.add_argument(
'--most-recent',
dest='most_recent',
type=int,
default=1,
help='remove MOST_RECENT entries')
# Statistics mode
self.stats_parser = self.subparsers.add_parser(
'stats',
help='display various statistics')
self.stats_parser.add_argument(
'--full',
default=False,
action='store_true',
help='full output mode')
self.args = self.parser.parse_args(args)
#if self.args.mode == 'log':
# print(f"self.args.amount='{self.args.amount}', self.args.name='{self.args.name}'")
#elif self.args.mode == 'rm':
# print(f'self.args.most_recent = {self.args.most_recent}')
#print(f'self.args.mode = {self.args.mode}')
return self.args
|
python
|
from timeslot import TimeSlot, sort_by_begin_time, sort_by_end_time
from random import shuffle
import arrow, timeslot
#############################################################################
#
# Testing Merge
#
#############################################################################
def test_merge_unable():
# A before B
A = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
B = TimeSlot('2013-05-12T10:01:00+00:00', '2013-05-12T11:00:00+00:00')
assert A.merge(B) == None and B.merge(A) == None
# A after B
A = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
B = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T08:59:00+00:00')
assert A.merge(B) == None and B.merge(A) == None
def test_merge_able():
# A fits just before B
A = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T09:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansBA = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
assert ansAB.equals(A.merge(B)) and ansBA.equals(B.merge(A))
# A fits just after B
A = TimeSlot('2013-05-12T10:00:00+00:00', '2013-05-12T11:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T11:00:00+00:00')
ansBA = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T11:00:00+00:00')
assert ansAB.equals(A.merge(B)) and ansBA.equals(B.merge(A))
# A front overlaps B
A = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T09:30:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansBA = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
assert ansAB.equals(A.merge(B)) and ansBA.equals(B.merge(A))
# A back overlaps B
A = TimeSlot('2013-05-12T09:30:00+00:00', '2013-05-12T12:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T12:00:00+00:00')
ansBA = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T12:00:00+00:00')
assert ansAB.equals(A.merge(B)) and ansBA.equals(B.merge(A))
# A same as B
A = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansBA = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
assert ansAB.equals(A.merge(B)) and ansBA.equals(B.merge(A))
# A completely overlaps B
A = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T12:00:00+00:00')
B = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T12:00:00+00:00')
ansBA = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T12:00:00+00:00')
assert ansAB.equals(A.merge(B)) and ansBA.equals(B.merge(A))
#############################################################################
#
# Testing Intersect
#
#############################################################################
def test_intersect_unable():
# A fits just before B
A = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T09:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
assert A.intersect(B) == None and B.intersect(A) == None
# A fits just after B
A = TimeSlot('2013-05-12T10:00:00+00:00', '2013-05-12T11:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
assert A.intersect(B) == None and B.intersect(A) == None
def test_intersect_able():
# A front overlaps B
A = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T09:30:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T09:30:00+00:00')
ansBA = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T09:30:00+00:00')
assert ansAB.equals(A.intersect(B)) and ansBA.equals(B.intersect(A))
# A back overlaps B
A = TimeSlot('2013-05-12T09:30:00+00:00', '2013-05-12T12:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T09:30:00+00:00', '2013-05-12T10:00:00+00:00')
ansBA = TimeSlot('2013-05-12T09:30:00+00:00', '2013-05-12T10:00:00+00:00')
assert ansAB.equals(A.intersect(B)) and ansBA.equals(B.intersect(A))
# A same as B
A = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
B = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansBA = TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00')
assert ansAB.equals(A.intersect(B)) and ansBA.equals(B.intersect(A))
# A completely overlaps B
A = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T12:00:00+00:00')
B = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansAB = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
ansBA = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T10:00:00+00:00')
assert ansAB.equals(A.intersect(B)) and ansBA.equals(B.intersect(A))
#############################################################################
#
# Testing Sort
#
#############################################################################
def test_sort():
A = TimeSlot('2017-11-12T09:00:00+00:00', '2017-11-12T17:00:00+00:00')
B = TimeSlot('2017-11-13T09:00:00+00:00', '2017-11-13T17:00:00+00:00')
C = TimeSlot('2017-11-14T00:00:00-08:00', '2017-11-15T00:00:00-08:00')
D = TimeSlot('2017-11-15T09:00:00+00:00', '2017-11-15T17:00:00+00:00')
E = TimeSlot('2017-11-15T21:00:00-08:00', '2017-11-16T10:00:00-08:00')
F = TimeSlot('2017-11-17T00:00:00-08:00', '2017-11-19T00:00:00-08:00')
unsorted = [A,B,C,D,E,F]
shuffle(unsorted)
# Begin time, ascending
ans = [A,B,C,D,E,F]
sort = sort_by_begin_time(unsorted, timeslot.ASCENDING)
assert len(sort) == len(ans)
for i in range(0,len(ans)):
assert ans[i].equals(sort[i])
# End time, descending
ans = [F,E,D,C,B,A]
sort = sort_by_end_time(unsorted, timeslot.DESCENDING)
assert len(sort) == len(ans)
for i in range(0,len(ans)):
assert ans[i].equals(sort[i])
#############################################################################
#
# Testing Finding Free and Busy Times
#
#############################################################################
def test_find_freebusy():
A = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T09:00:00+00:00')
B = TimeSlot('2013-05-12T10:00:00+00:00', '2013-05-12T11:00:00+00:00')
C = TimeSlot('2013-05-12T12:00:00+00:00', '2013-05-12T13:00:00+00:00')
D = TimeSlot('2013-05-12T14:00:00+00:00', '2013-05-12T15:00:00+00:00')
E = TimeSlot('2013-05-12T16:00:00+00:00', '2013-05-12T17:00:00+00:00')
F = TimeSlot('2013-05-14T16:00:00+00:00', '2013-05-14T17:00:00+00:00')
"""
All busy times after free period:
time free busy res
05:00
06:00 [] []
07:00 [] []
08:00 []
09:00
10:00 []
"""
free = TimeSlot('2013-05-12T06:00:00+00:00', '2013-05-12T08:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T06:00:00+00:00', '2013-05-12T08:00:00+00:00') ]
ansB = [ ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1]) and free.equals(ansF[0])
"""
All busy times before the free period:
time free busy res
16:00 []last
17:00 [] []
18:00 [] []
19:00
"""
free = TimeSlot('2013-05-12T17:00:00+00:00', '2013-05-12T19:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T17:00:00+00:00', '2013-05-12T19:00:00+00:00') ]
ansB = [ ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1]) and free.equals(ansF[0])
"""
No busy times in free period:
time free busy res
12:00 []
13:00 [] []
14:00 []
15:00
"""
free = TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00') ]
ansB = [ ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1]) and free.equals(ansF[0])
"""
All busy times in free period
time free busy res
06:00
07:00 [] []
08:00 [] []
09:00 [] []
10:00 [] []
11:00 [] []
12:00 [] []
13:00 [] []
14:00 [] []
15:00 [] []
16:00 [] []
17:00 [] []
18:00 [] []
19:00
"""
print("All busy times within free period:")
free = TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T19:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T08:00:00+00:00'),
TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00'),
TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T12:00:00+00:00'),
TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00'),
TimeSlot('2013-05-12T15:00:00+00:00', '2013-05-12T16:00:00+00:00'),
TimeSlot('2013-05-12T17:00:00+00:00', '2013-05-12T19:00:00+00:00') ]
ansB = [ A, B, C, D, E ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
"""
One busy time takes up whole free period
time free busy res
08:00 []
08:15 [] []
08:30 [] []
08:45 []
09:00
09:15
"""
free = TimeSlot('2013-05-12T08:15:00+00:00', '2013-05-12T08:45:00+00:00')
ansF = [ ]
ansB = [ A ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
assert ansB[0].equals(res[1][0])
# Other cases
"""
time free busy res
09:30
10:00 [] []
10:30 [] []
11:00 [] []
11:30 [] []
12:00 [] []
12:30 [] []
"""
free = TimeSlot('2013-05-12T10:00:00+00:00', '2013-05-12T19:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T12:00:00+00:00'),
TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00'),
TimeSlot('2013-05-12T15:00:00+00:00', '2013-05-12T16:00:00+00:00'),
TimeSlot('2013-05-12T17:00:00+00:00', '2013-05-12T19:00:00+00:00') ]
ansB = [ B, C, D, E]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
"""
time free busy res
09:30
10:00 []
10:30 [] []
11:00 [] []
11:30 [] []
12:00 [] []
12:30 [] []
"""
free = TimeSlot('2013-05-12T10:30:00+00:00', '2013-05-12T19:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T12:00:00+00:00'),
TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00'),
TimeSlot('2013-05-12T15:00:00+00:00', '2013-05-12T16:00:00+00:00'),
TimeSlot('2013-05-12T17:00:00+00:00', '2013-05-12T19:00:00+00:00') ]
ansB = [ B, C, D, E]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
"""
time free busy res
09:30
10:00 []
10:30 []
11:00 [] []
11:30 [] []
12:00 [] []
12:30 [] []
"""
free = TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T19:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T12:00:00+00:00'),
TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00'),
TimeSlot('2013-05-12T15:00:00+00:00', '2013-05-12T16:00:00+00:00'),
TimeSlot('2013-05-12T17:00:00+00:00', '2013-05-12T19:00:00+00:00') ]
ansB = [ C, D, E ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
"""
time free busy res
13:30 [] []
14:00 [] []
14:30 []
15:00
15:30
16:00 []
16:30 []last
"""
free = TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T14:30:00+00:00')
ansF = [ TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T08:00:00+00:00'),
TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00'),
TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T12:00:00+00:00'),
TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00') ]
ansB = [ A, B, C, D ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
"""
time free busy res
13:30 [] []
14:00 [] []
14:30 [] []
15:00
15:30
16:00 []
16:30 []last
"""
print("Some busy ends at same time as the end of the free period")
free = TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T15:00:00+00:00')
ansF = [ TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T08:00:00+00:00'),
TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00'),
TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T12:00:00+00:00'),
TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00') ]
ansB = [ A, B, C, D ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
"""
time free busy res
13:30 [] []
14:00 [] []
14:30 [] []
15:00 [] []
15:30
16:00 []
16:30 []last
"""
free = TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T15:30:00+00:00')
ansF = [ TimeSlot('2013-05-12T07:00:00+00:00', '2013-05-12T08:00:00+00:00'),
TimeSlot('2013-05-12T09:00:00+00:00', '2013-05-12T10:00:00+00:00'),
TimeSlot('2013-05-12T11:00:00+00:00', '2013-05-12T12:00:00+00:00'),
TimeSlot('2013-05-12T13:00:00+00:00', '2013-05-12T14:00:00+00:00'),
TimeSlot('2013-05-12T15:00:00+00:00', '2013-05-12T15:30:00+00:00') ]
ansB = [ A, B, C, D ]
res = free.find_freebusy_from([E,C,B,A,D])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
def test_merge_freebusy():
# Merge Case
A = TimeSlot('2013-05-12T00:00:00+00:00', '2013-05-13T00:00:00+00:00')
B = TimeSlot('2013-05-12T12:00:00+00:00', '2013-05-12T13:00:00+00:00')
C = TimeSlot('2013-05-12T18:00:00+00:00', '2013-05-12T19:00:00+00:00')
D = TimeSlot('2013-05-12T14:00:00+00:00', '2013-05-12T15:00:00+00:00')
E = TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T17:00:00+00:00')
F = TimeSlot('2013-05-12T20:00:00+00:00', '2013-05-12T21:00:00+00:00')
free = TimeSlot('2013-05-12T12:00:00+00:00', '2013-05-12T22:00:00+00:00')
ansF = [TimeSlot('2013-05-12T17:00:00+00:00', '2013-05-12T18:00:00+00:00'),
TimeSlot('2013-05-12T19:00:00+00:00', '2013-05-12T20:00:00+00:00'),
TimeSlot('2013-05-12T21:00:00+00:00', '2013-05-12T22:00:00+00:00')]
ansB = [
TimeSlot('2013-05-12T08:00:00+00:00', '2013-05-12T17:00:00+00:00'),
TimeSlot('2013-05-12T18:00:00+00:00', '2013-05-12T19:00:00+00:00'),
TimeSlot('2013-05-12T20:00:00+00:00', '2013-05-12T21:00:00+00:00')
]
res = free.find_freebusy_from([E,C,B,D,F])
assert len(ansF) == len(res[0]) and len(ansB) == len(res[1])
for i in range(0,len(ansF)):
assert ansF[i].equals(res[0][i])
for i in range(0,len(ansB)):
assert ansB[i].equals(res[1][i])
test_sort()
test_merge_freebusy()
test_find_freebusy()
test_merge_able()
test_merge_unable()
test_intersect_unable()
test_intersect_able()
|
python
|
from zoloto.cameras.camera import Camera # noqa
from zoloto.cameras.file import ImageFileCamera, VideoFileCamera # noqa
|
python
|
from dagster_graphql.client.query import START_PIPELINE_EXECUTION_MUTATION, SUBSCRIPTION_QUERY
from dagster_graphql.implementation.context import DagsterGraphQLContext
from dagster_graphql.implementation.pipeline_execution_manager import SubprocessExecutionManager
from dagster_graphql.schema import create_schema
from dagster_graphql.test.utils import execute_dagster_graphql
from graphql import graphql
from graphql.execution.executors.sync import SyncExecutor
from dagster import ExecutionTargetHandle
from dagster.core.instance import DagsterInstance
from dagster.utils import file_relative_path
def test_execute_hammer_through_dagit():
handle = ExecutionTargetHandle.for_pipeline_python_file(
file_relative_path(__file__, '../../../../examples/dagster_examples/toys/hammer.py'),
'hammer_pipeline',
)
instance = DagsterInstance.local_temp()
execution_manager = SubprocessExecutionManager(instance)
context = DagsterGraphQLContext(
handle=handle, execution_manager=execution_manager, instance=instance
)
executor = SyncExecutor()
variables = {
'executionParams': {
'environmentConfigData': {'storage': {'filesystem': {}}, 'execution': {'dask': {}}},
'selector': {'name': handle.build_pipeline_definition().name},
'mode': 'default',
}
}
start_pipeline_result = graphql(
request_string=START_PIPELINE_EXECUTION_MUTATION,
schema=create_schema(),
context=context,
variables=variables,
executor=executor,
)
run_id = start_pipeline_result.data['startPipelineExecution']['run']['runId']
context.execution_manager.join()
subscription = execute_dagster_graphql(context, SUBSCRIPTION_QUERY, variables={'runId': run_id})
subscribe_results = []
subscription.subscribe(subscribe_results.append)
messages = [x['__typename'] for x in subscribe_results[0].data['pipelineRunLogs']['messages']]
assert 'PipelineStartEvent' in messages
assert 'PipelineSuccessEvent' in messages
|
python
|
from datetime import timedelta
import pendulum
import pytest
import logging
from uuid import uuid4
from unittest.mock import MagicMock
import prefect
from prefect.client.client import FlowRunInfoResult, ProjectInfo
from prefect.engine import signals, state
from prefect.run_configs import UniversalRun
from prefect.backend.flow_run import FlowRunLog
from prefect.engine.results.local_result import LocalResult
from prefect.tasks.prefect import (
create_flow_run,
wait_for_flow_run,
get_task_run_result,
)
@pytest.fixture
def MockClient(monkeypatch):
Client = MagicMock()
monkeypatch.setattr("prefect.tasks.prefect.flow_run.Client", Client)
return Client
@pytest.fixture
def MockFlowView(monkeypatch):
FlowView = MagicMock()
monkeypatch.setattr("prefect.tasks.prefect.flow_run.FlowView", FlowView)
return FlowView
@pytest.fixture
def MockFlowRunView(monkeypatch):
FlowRunView = MagicMock()
monkeypatch.setattr("prefect.backend.flow_run.FlowRunView", FlowRunView)
monkeypatch.setattr("prefect.tasks.prefect.flow_run.FlowRunView", FlowRunView)
return FlowRunView
class TestCreateFlowRun:
def test_does_not_accept_both_id_and_name(self):
with pytest.raises(ValueError, match="Received both `flow_id` and `flow_name`"):
create_flow_run.run(flow_id=uuid4(), flow_name="foo")
def test_requires_id_or_name(self):
with pytest.raises(ValueError, match="`flow_id` and `flow_name` are null"):
create_flow_run.run(flow_id=None, flow_name=None)
@pytest.mark.parametrize(
"kwargs",
[
{"flow_id": "flow-id"},
{"flow_name": "flow-name"},
{"flow_name": "flow-name", "project_name": "project-name"},
],
)
def test_lookup_uses_given_identifiers(self, kwargs, MockFlowView, MockClient):
create_flow_run.run(**kwargs)
if "flow_id" in kwargs:
MockFlowView.from_id.assert_called_once_with("flow-id")
elif "flow_name" in kwargs:
MockFlowView.from_flow_name.assert_called_once_with(
"flow-name", project_name=kwargs.get("project_name", "")
)
def test_creates_flow_run_with_defaults(self, MockFlowView, MockClient):
MockFlowView.from_id.return_value.flow_id = "flow-id"
create_flow_run.run(flow_id="flow-id")
MockClient().create_flow_run.assert_called_once_with(
flow_id="flow-id",
parameters=None,
run_name=None,
labels=None,
context=None,
run_config=None,
scheduled_start_time=None,
)
@pytest.mark.parametrize(
"kwargs",
[
{"parameters": dict(x=1, y="foo")},
{"run_name": "run-name"},
{"labels": ["a", "b"]},
{"context": {"var": "val"}},
{"run_config": UniversalRun(env={"x"})},
{"scheduled_start_time": pendulum.now().add(days=1)},
],
)
def test_creates_flow_with_given_settings(self, MockFlowView, MockClient, kwargs):
MockFlowView.from_id.return_value.flow_id = "flow-id"
create_flow_run.run(flow_id="flow-id", **kwargs)
MockClient().create_flow_run.assert_called_once_with(
flow_id="flow-id",
parameters=kwargs.get("parameters"),
run_name=kwargs.get("run_name"),
labels=kwargs.get("labels"),
context=kwargs.get("context"),
run_config=kwargs.get("run_config"),
scheduled_start_time=kwargs.get("scheduled_start_time"),
)
def test_generates_run_name_from_parent_and_child(self, MockFlowView, MockClient):
MockFlowView.from_id.return_value.flow_id = "flow-id"
MockFlowView.from_id.return_value.name = "child-name"
with prefect.context(flow_run_name="parent-run"):
create_flow_run.run(flow_id="flow-id")
MockClient().create_flow_run.assert_called_once_with(
flow_id="flow-id",
parameters=None,
run_name="parent-run-child-name",
labels=None,
context=None,
run_config=None,
scheduled_start_time=None,
)
def test_returns_flow_run_idl(self, MockFlowView, MockClient):
MockClient().create_flow_run.return_value = "flow-run-id"
result = create_flow_run.run(flow_id="flow-id")
assert result == "flow-run-id"
def test_displays_flow_run_url(self, MockFlowView, MockClient, caplog):
MockClient().create_flow_run.return_value = "flow-run-id"
MockClient().get_cloud_url.return_value = "fake-url"
create_flow_run.run(flow_id="flow-id")
MockClient().get_cloud_url.assert_called_once_with("flow-run", "flow-run-id")
assert "Created flow run '<generated-name>': fake-url" in caplog.text
class TestWaitForFlowRun:
@pytest.fixture
def mock_watch_flow_run(self, monkeypatch):
watch_flow_run = MagicMock()
monkeypatch.setattr(
"prefect.tasks.prefect.flow_run.watch_flow_run", watch_flow_run
)
return watch_flow_run
def test_logs_include_flow_run_name_and_level(
self, MockFlowRunView, mock_watch_flow_run, caplog
):
MockFlowRunView.from_flow_run_id.return_value.name = "fake-run-name"
run_logs = [
FlowRunLog(
timestamp=pendulum.now(), level=logging.INFO, message="Log message"
),
FlowRunLog(
timestamp=pendulum.now(), level=logging.ERROR, message="Another log"
),
]
mock_watch_flow_run.return_value = run_logs
wait_for_flow_run.run("flow-run-id")
for record, run_log in zip(caplog.records, run_logs):
assert record.levelno == run_log.level
assert record.msg == f"Flow 'fake-run-name': {run_log.message}"
@pytest.mark.parametrize("stream_logs", [True, False])
@pytest.mark.parametrize("stream_states", [True, False])
def test_passes_args_to_watch_flow_run(
self, mock_watch_flow_run, stream_logs, stream_states, MockFlowRunView
):
wait_for_flow_run.run(
"flow-run-id", stream_states=stream_states, stream_logs=stream_logs
)
mock_watch_flow_run.assert_called_once_with(
"flow-run-id", stream_states=stream_states, stream_logs=stream_logs
)
def test_returns_latest_flow_run_view(self, mock_watch_flow_run, MockFlowRunView):
MockFlowRunView.from_flow_run_id().get_latest.return_value = "fake-return-value"
result = wait_for_flow_run.run("flow-run-id")
assert result == "fake-return-value"
class TestGetTaskRunResult:
def test_requires_task_slug(self):
with pytest.raises(ValueError, match="`task_slug` is empty"):
get_task_run_result.run(flow_run_id="id", task_slug="")
def test_does_not_allow_current_flow_run(self):
with prefect.context(flow_run_id="id"):
with pytest.raises(
ValueError,
match="`flow_run_id` is the same as the currently running flow",
):
get_task_run_result.run(flow_run_id="id", task_slug="foo")
def test_waits_for_flow_run_to_finish(self, MockFlowRunView, monkeypatch):
# Create a fake flow run that is 'Running' then 'Finished'
flow_run = MagicMock()
flow_run.state = prefect.engine.state.Running()
def mark_flow_run_as_finished():
flow_run.state = prefect.engine.state.Finished()
return flow_run
flow_run.get_latest.side_effect = mark_flow_run_as_finished
# Return the fake flow run during retrieval
MockFlowRunView.from_flow_run_id.return_value = flow_run
# Mock sleep so the test is not slow
mock_sleep = MagicMock()
monkeypatch.setattr("prefect.tasks.prefect.flow_run.time.sleep", mock_sleep)
get_task_run_result.run(flow_run_id="id", task_slug="slug", poll_time=1)
# Should have slept once for the given poll time
mock_sleep.assert_called_once_with(1)
@pytest.mark.parametrize("kwargs", [{}, {"map_index": 5}])
def test_gets_task_run_result(self, MockFlowRunView, monkeypatch, kwargs, caplog):
task_run = MagicMock()
task_run.get_result.return_value = "fake-result"
# Provide a Result class so we can assert it is logged
task_run.state._result = LocalResult()
flow_run = MagicMock()
flow_run.state = prefect.engine.state.Finished()
flow_run.get_task_run.return_value = task_run
MockFlowRunView.from_flow_run_id.return_value = flow_run
# Ensure we aren't sleeping on already finished runs
mock_sleep = MagicMock(
side_effect=RuntimeError(
"Sleep should not be called for a fnished flow run."
)
)
monkeypatch.setattr("prefect.tasks.prefect.flow_run.time.sleep", mock_sleep)
result = get_task_run_result.run(flow_run_id="id", task_slug="slug", **kwargs)
mock_sleep.assert_not_called()
# Task pulled from the flow run, map_index passed through if given
flow_run.get_task_run.assert_called_once_with(
task_slug="slug", map_index=kwargs.get("map_index", -1)
)
# Result loaded from storage
task_run.get_result.assert_called_once()
assert result == "fake-result"
# Result type logged
assert "Loading task run result from LocalResult..." in caplog.text
# Legacy tests -------------------------------------------------------------------------
from prefect.tasks.prefect.flow_run import StartFlowRun
@pytest.fixture()
def client(monkeypatch):
cloud_client = MagicMock(
graphql=MagicMock(
return_value=MagicMock(
data=MagicMock(flow=[MagicMock(id="abc123"), MagicMock(id="def456")])
)
),
create_flow_run=MagicMock(return_value="xyz890"),
get_cloud_url=MagicMock(return_value="https://api.prefect.io/flow/run/url"),
create_task_run_artifact=MagicMock(return_value="id"),
get_flow_run_info=MagicMock(
return_value=FlowRunInfoResult(
id="my-flow-run-id",
name="test-run",
flow_id="xyz890",
version=1,
task_runs=[],
state=state.Success(),
scheduled_start_time=None,
project=ProjectInfo(id="my-project-id", name="Test Project"),
parameters={"test": "ing"},
context={},
)
),
)
monkeypatch.setattr(
"prefect.tasks.prefect.flow_run.Client", MagicMock(return_value=cloud_client)
)
monkeypatch.setattr(
"prefect.artifacts.Client", MagicMock(return_value=cloud_client)
)
yield cloud_client
class TestStartFlowRunCloud:
def test_initialization(self, cloud_api):
now = pendulum.now()
run_config = UniversalRun()
# verify that the task is initialized as expected
task = StartFlowRun(
name="My Flow Run Task",
checkpoint=False,
project_name="Test Project",
flow_name="Test Flow",
new_flow_context={"foo": "bar"},
parameters={"test": "ing"},
run_config=run_config,
run_name="test-run",
scheduled_start_time=now,
)
assert task.name == "My Flow Run Task"
assert task.checkpoint is False
assert task.project_name == "Test Project"
assert task.flow_name == "Test Flow"
assert task.new_flow_context == {"foo": "bar"}
assert task.parameters == {"test": "ing"}
assert task.run_config == run_config
assert task.run_name == "test-run"
assert task.scheduled_start_time == now
def test_init_errors_if_tasks_passed_to_parameters(self, cloud_api):
with pytest.raises(TypeError, match="An instance of `Task` was passed"):
StartFlowRun(
name="testing", parameters={"a": 1, "b": prefect.Parameter("b")}
)
@pytest.mark.parametrize("idempotency_key", [None, "my-key"])
@pytest.mark.parametrize("task_run_id", [None, "test-id"])
def test_flow_run_task_submit_args(
self, client, cloud_api, idempotency_key, task_run_id
):
run_config = UniversalRun()
# verify that create_flow_run was called
task = StartFlowRun(
project_name="Test Project",
flow_name="Test Flow",
parameters={"test": "ing"},
run_config=run_config,
run_name="test-run",
)
# verify that run returns the new flow run ID
with prefect.context(task_run_id=task_run_id):
assert task.run(idempotency_key=idempotency_key) == "xyz890"
# verify the GraphQL query was called with the correct arguments
query_args = list(client.graphql.call_args_list[0][0][0]["query"].keys())[0]
assert "Test Project" in query_args
assert "Test Flow" in query_args
# verify create_flow_run was called with the correct arguments
assert client.create_flow_run.call_args[1] == dict(
flow_id="abc123",
parameters={"test": "ing"},
run_config=run_config,
idempotency_key=idempotency_key or task_run_id,
context=None,
run_name="test-run",
scheduled_start_time=None,
)
def test_flow_run_task_uses_scheduled_start_time(self, client, cloud_api):
in_one_hour = pendulum.now().add(hours=1)
# verify that create_flow_run was called
task = StartFlowRun(
project_name="Test Project",
flow_name="Test Flow",
scheduled_start_time=in_one_hour,
)
# verify that run returns the new flow run ID
assert task.run() == "xyz890"
# verify create_flow_run was called with the correct arguments
client.create_flow_run.assert_called_once_with(
flow_id="abc123",
parameters=None,
idempotency_key=None,
context=None,
run_name=None,
scheduled_start_time=in_one_hour,
run_config=None,
)
def test_flow_run_task_without_flow_name(self, cloud_api):
# verify that a ValueError is raised without a flow name
task = StartFlowRun(project_name="Test Project")
with pytest.raises(ValueError, match="Must provide a flow name."):
task.run()
def test_flow_run_task_without_project_name(self, cloud_api):
# verify that a ValueError is raised without a project name
task = StartFlowRun(flow_name="Test Flow")
with pytest.raises(ValueError, match="Must provide a project name."):
task.run()
def test_flow_run_task_with_no_matching_flow(self, client, cloud_api):
# verify a ValueError is raised if the client returns no flows
task = StartFlowRun(flow_name="Test Flow", project_name="Test Project")
client.graphql = MagicMock(return_value=MagicMock(data=MagicMock(flow=[])))
with pytest.raises(ValueError, match="Flow 'Test Flow' not found."):
task.run()
def test_flow_run_link_artifact(self, client, cloud_api):
task = StartFlowRun(
project_name="Test Project",
flow_name="Test Flow",
parameters={"test": "ing"},
run_name="test-run",
)
with prefect.context(running_with_backend=True, task_run_id="trid"):
task.run()
client.create_task_run_artifact.assert_called_once_with(
data={"link": "/flow/run/url"}, kind="link", task_run_id="trid"
)
class TestStartFlowRunServer:
def test_initialization(self, server_api):
now = pendulum.now()
# verify that the task is initialized as expected
task = StartFlowRun(
name="My Flow Run Task",
project_name="Demo",
checkpoint=False,
flow_name="Test Flow",
new_flow_context={"foo": "bar"},
parameters={"test": "ing"},
run_name="test-run",
scheduled_start_time=now,
)
assert task.name == "My Flow Run Task"
assert task.checkpoint is False
assert task.flow_name == "Test Flow"
assert task.new_flow_context == {"foo": "bar"}
assert task.parameters == {"test": "ing"}
assert task.run_name == "test-run"
assert task.scheduled_start_time == now
def test_flow_run_task(self, client, server_api):
# verify that create_flow_run was called
task = StartFlowRun(
flow_name="Test Flow",
project_name="Demo",
parameters={"test": "ing"},
run_name="test-run",
)
# verify that run returns the new flow run ID
assert task.run() == "xyz890"
# verify the GraphQL query was called with the correct arguments
query_args = list(client.graphql.call_args_list[0][0][0]["query"].keys())[0]
assert "Test Flow" in query_args
# verify create_flow_run was called with the correct arguments
client.create_flow_run.assert_called_once_with(
flow_id="abc123",
parameters={"test": "ing"},
idempotency_key=None,
context=None,
run_name="test-run",
scheduled_start_time=None,
run_config=None,
)
def test_flow_run_task_with_wait(self, client, server_api):
# verify that create_flow_run was called
task = StartFlowRun(
flow_name="Test Flow",
project_name="Demo",
parameters={"test": "ing"},
run_name="test-run",
wait=True,
poll_interval=timedelta(seconds=3),
)
assert task.poll_interval == timedelta(seconds=3)
# Run flow, and assert that signals a success
with pytest.raises(signals.SUCCESS) as exc_info:
task.run()
flow_state_signal = exc_info.value
assert isinstance(flow_state_signal.state, state.Success)
# Check flow ID
assert str(flow_state_signal).split(" ")[0] == "xyz890"
# verify the GraphQL query was called with the correct arguments
query_args = list(client.graphql.call_args_list[0][0][0]["query"].keys())[0]
assert "Test Flow" in query_args
# verify create_flow_run was called with the correct arguments
client.create_flow_run.assert_called_once_with(
flow_id="abc123",
parameters={"test": "ing"},
idempotency_key=None,
context=None,
run_name="test-run",
scheduled_start_time=None,
run_config=None,
)
def test_flow_run_task_poll_interval_too_short(self):
with pytest.raises(ValueError):
task = StartFlowRun(
flow_name="Test Flow",
project_name="Demo",
parameters={"test": "ing"},
run_name="test-run",
wait=True,
poll_interval=timedelta(seconds=2),
)
def test_flow_run_task_without_flow_name(self, server_api):
# verify that a ValueError is raised without a flow name
task = StartFlowRun()
with pytest.raises(ValueError, match="Must provide a flow name."):
task.run()
def test_flow_run_task_with_no_matching_flow(self, client, server_api):
# verify a ValueError is raised if the client returns no flows
task = StartFlowRun(flow_name="Test Flow", project_name="Demo")
client.graphql = MagicMock(return_value=MagicMock(data=MagicMock(flow=[])))
with pytest.raises(ValueError, match="Flow 'Test Flow' not found."):
task.run()
|
python
|
import time
import random
import logging
from datetime import datetime
from osgate.connectors.connector import AbstractConnector, ConnectorBase
log = logging.getLogger(__name__)
class DefaultConnector(AbstractConnector, ConnectorBase):
"""Used for testing of design pattern; allows non-existing connector and devices to be source of data.
Emulates polling of some devices at given interval (based of osgate.json config).
"""
protocol = "default"
last_polled: dict[str:datetime] = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for device in self.devices:
for channel in device.channels:
identifer = ".".join((device.uuid, channel.unit))
self.last_polled[identifer] = datetime.now()
def poll(self):
"""Polls devices via their channels if last poll was outside of the provided interval"""
for device in self.devices:
for channel in device.channels:
identifer = ".".join((device.uuid, channel.unit))
# only poll if enough time has elapsed
time_passed = datetime.now() - self.last_polled[identifer]
if time_passed.seconds > channel.interval_timedelta.seconds:
event = {
"ts": datetime.now().isoformat(),
"device": device.uuid,
"channel": channel.unit,
"value": random.randint(0, 255),
}
log.debug(f"ValueChanged event: {event=}")
for sink in self.sinks:
sink.flush(event)
self.last_polled[identifer] = datetime.now()
def run(self):
log.debug(f"{self} starting polling")
while True:
time.sleep(0.2)
self.poll()
def stop(self):
log.debug(f"shutting down {self}...")
exit()
def ping(self) -> str:
log.debug(f"send ping to connector: {self.name}")
return "pong"
|
python
|
from os import environ, path
from ._settings_default import *
from ..exceptions import ImproperlyConfigured
PROFILE = environ.get("GOLD_DIGGER_PROFILE", "local")
if PROFILE == "master":
from ._settings_master import *
elif PROFILE == "local":
try:
from ._settings_local import *
except ImportError:
raise ImproperlyConfigured(
f"Local configuration not found. Create file _settings_local.py in {path.abspath(path.join(__file__, path.pardir))} directory according to README.",
)
else:
raise ValueError(f"Unsupported settings profile. Got: {PROFILE}. Use one of: master, local.")
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.data_view, name='data_view'),
]
|
python
|
from random import randint
class Die():
"""表示一个骰子的类"""
def __init__(self,number_size = 6):
self.number_size = number_size
#翻滚骰子
def roll(self):
"""返回一个位于1和骰子面数之间的随机数"""
return randint(1,self.number_size)
|
python
|
"""Voorbeeld met 2 armen."""
from asyncgcodecli import UArm
async def move_script(uarms: UArm):
"""Script that moves the robot arm."""
# set de robot arm mode to 0 (pomp)
for uarm in uarms:
uarm.set_mode(0)
for _ in range(1, 5):
for uarm in uarms:
await uarm.sleep(0)
uarms[0].move(150, -200, 150, 200)
uarms[1].move(150, 200, 150, 200)
for uarm in uarms:
await uarm.sleep(0)
uarms[0].move(150, 0, 150, 200)
uarms[1].move(150, 0, 150, 200)
for uarm in uarms:
await uarm.sleep(0)
for uarm in uarms:
# make a nice landing
uarm.move(150, 0, 20, 200)
for uarm in uarms:
await uarm.sleep(0)
for uarm in uarms:
# make a nice landing
uarm.move(150, 0, 0, 10)
# Execute move_script on the UArm that is
# connected to /dev/cu.usbmodem14101
UArm.execute_on_robotarms(
['/dev/cu.usbmodem14101', '/dev/cu.usbmodem14201'],
move_script)
|
python
|
from django.db import models
# Create your models here.
class List(models.Model):
item = models.CharField(max_length=200)
completed = models.BooleanField(default=False)
def __str__(self):
return self.item + ' | ' + str(self.completed)
|
python
|
import unittest
import logging
from varfilter import filter
class Testfilter(unittest.TestCase):
def setUp(self):
print('Preparando el contexto')
def test_fint_int_ok(self):
print('fint con entero correcto')
t = filter.fint(10)
self.assertEqual(t, 10)
def test_fint_str_ok(self):
print('fint con entero string correcto')
t = filter.fint('10')
self.assertEqual(t, 10)
def test_fint_strneg_ok(self):
print('fint con entero negativo string correcto')
t = filter.fint('-10')
self.assertEqual(t, -10)
def test_fint_strhex_ok(self):
print('fint con entero hex string correcto')
t = filter.fint('0xA')
self.assertEqual(t, 10)
def test_fint_strbinary_ok(self):
print('fint con entero binary string correcto')
t = filter.fint('0b1010')
self.assertEqual(t, 10)
def test_ffloat_str_ok(self):
print('ffloat con float string correcto')
t = filter.ffloat(' -10.2')
self.assertEqual(t, -10.2)
def test_fbool_int_ok(self):
print('fbool con entero correcto')
t = filter.fbool(10)
self.assertEqual(t, True)
def test_fbool_int_ko(self):
print('fbool con entero false a 0')
t = filter.fbool(0)
self.assertEqual(t, False)
def test_fbool_int_ko_neg(self):
print('fbool con entero false a 0')
t = filter.fbool(-0)
self.assertEqual(t, False)
def test_fbool_int_ok_neg2(self):
print('fbool con entero false negativo')
t = filter.fbool(-5)
self.assertEqual(t, True)
def test_fbool_str_true(self):
print('fbool con string true')
elements = [' T', 'T ', ' T ', 'T', 't',
'True', 'Y', 'Yes', 'SÍ', 'Si']
for element in elements:
t = filter.fbool(element)
self.assertEqual(t, True)
def test_fbool_str_false(self):
print('fbool con string false')
elements = [' F', 'F ', ' F ', 'F', 'f',
'False', 'N', 'No']
for element in elements:
t = filter.fbool(element)
self.assertEqual(t, False)
def test_fstr_bool_false(self):
print('fstr con bool false')
t = filter.fstr(False)
self.assertEqual(t, 'False')
def test_fnone_none(self):
print('fnone con None')
t = filter.fnone(None)
self.assertEqual(t, None)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.DEBUG)
unittest.main()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('spam', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='spammyposting',
name='comment',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='spammyposting',
name='reviewer',
field=models.ForeignKey(related_name='reviewer', blank=True, null=True, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='spammyposting',
name='status',
field=models.IntegerField(default=10, choices=[(10, 'Flagged'), (20, 'Under review'), (30, 'Rejected'), (40, 'Approved')]),
),
migrations.AlterField(
model_name='spammyposting',
name='reporter',
field=models.ForeignKey(related_name='reporter', blank=True, null=True, to=settings.AUTH_USER_MODEL),
),
]
|
python
|
from messagebird.base import Base
from messagebird.formats import Formats
from messagebird.hlr import HLR
class Lookup(Base):
def __init__(self):
self.href = None
self.countryCode = None
self.countryPrefix = None
self.phoneNumber = None
self.type = None
self._formats = None
self._hlr = None
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
@property
def formats(self):
return self._formats
@formats.setter
def formats(self, value):
self._formats = Formats().load(value)
@property
def hlr(self):
return self._hlr
@hlr.setter
def hlr(self, value):
self._hlr = HLR().load(value)
|
python
|
# import commands
import commands
# password helper
import json
from getpass import getpass
# define 'submitnonce' command
def parse(cmd, arguments, connection):
if len(arguments) != 2:
print("error: '"+cmd.name+"' requires one argument.")
else:
nonce = arguments[1]
response, result = connection.send_request(cmd.name, {'nonce':nonce})
print("alert: server responded with '"+response.response+"'.")
if response.response == 'failed':
print("reason: " + response.reason)
else:
print("---------------------------------")
print(result)
print("---------------------------------")
|
python
|
"""
*C♯ - Level 4*
"""
from ..._pitch import Pitch
__all__ = ["Cs4"]
class Cs4(
Pitch,
):
pass
|
python
|
for i in range(1,5): #More than 2 lines will result in 0 score. Do not leave a blank line also
print(int(pow(10,i)/9) * i)
|
python
|
import datetime
import getpass
import os
import platform
import pwd
import socket
import sys
import time
def collect_python_facts():
return {
'version': {
'major': sys.version_info[0],
'minor': sys.version_info[1],
'micro': sys.version_info[2],
'releaselevel': sys.version_info[3],
'serial': sys.version_info[4]
},
'version_info': list(sys.version_info),
'executable': sys.executable,
}
def collect_env_facts():
# Collect environment facts
env_facts = {}
for k, v in os.environ.items():
env_facts[k] = v
return env_facts
def collect_user_facts():
user_facts = dict()
user_facts['id'] = getpass.getuser()
pwent = pwd.getpwnam(getpass.getuser())
user_facts['uid'] = pwent.pw_uid
user_facts['gid'] = pwent.pw_gid
user_facts['gecos'] = pwent.pw_gecos
user_facts['dir'] = pwent.pw_dir
user_facts['shell'] = pwent.pw_shell
user_facts['real_user_id'] = os.getuid()
user_facts['effective_user_id'] = os.geteuid()
user_facts['real_group_id'] = os.getgid()
user_facts['effective_group_id'] = os.getgid()
return user_facts
def collect_platform_facts():
# Platform
platform_facts = {}
# platform.system() can be Linux, Darwin, Java, or Windows
platform_facts['system'] = platform.system()
platform_facts['kernel'] = platform.release()
platform_facts['machine'] = platform.machine()
platform_facts['python_version'] = platform.python_version()
platform_facts['fqdn'] = socket.getfqdn()
platform_facts['hostname'] = platform.node().split('.')[0]
platform_facts['nodename'] = platform.node()
platform_facts['domain'] = '.'.join(platform_facts['fqdn'].split('.')[1:])
platform_facts['arch'] = platform.architecture()[0]
return platform_facts
def collect_network_facts():
from boot import run
hostname = platform.node().split('.')[0]
ipv4 = [line for line in run('cat /etc/hosts').out.strip().split('\n') if hostname in line].pop()
ipv4 = ipv4.rstrip('app').strip()
return {
'hostname': hostname,
'ipv4': ipv4
}
def collect_datetime_facts():
date_time_facts = {}
now = datetime.datetime.now()
date_time_facts['year'] = now.strftime('%Y')
date_time_facts['month'] = now.strftime('%m')
date_time_facts['weekday'] = now.strftime('%A')
date_time_facts['weekday_number'] = now.strftime('%w')
date_time_facts['weeknumber'] = now.strftime('%W')
date_time_facts['day'] = now.strftime('%d')
date_time_facts['hour'] = now.strftime('%H')
date_time_facts['minute'] = now.strftime('%M')
date_time_facts['second'] = now.strftime('%S')
date_time_facts['epoch'] = now.strftime('%s')
date_time_facts['date'] = now.strftime('%Y-%m-%d')
date_time_facts['time'] = now.strftime('%H:%M:%S')
date_time_facts['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
date_time_facts['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
date_time_facts['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
date_time_facts['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
date_time_facts['tz'] = time.strftime("%Z")
date_time_facts['tz_offset'] = time.strftime("%z")
return date_time_facts
|
python
|
"""Token module to indicate that a feed plugin for LOFAR can be generated from
here.
"""
# Since the LOFAR project provides an AntPat compatible file of beam-model data,
# no initialization steps are necessary.
|
python
|
"""
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings, md5
from urllib import urlopen
try: import datetime
except ImportError:
raise SystemExit('The finance module requires datetime support (python2.3)')
from matplotlib import verbose, get_configdir
from artist import Artist
from dates import date2num, num2date
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
import matplotlib.numerix as nx
from matplotlib.transforms import scale_transform, Value, zero, one, \
scale_sep_transform, blend_xy_sep_transform
from pylab import gca
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
dt = datetime.date(*time.strptime(datestr, '%d-%b-%y')[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
date, open, close, high, low, volume = map(nx.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
"""
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
## urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
##
##
## url = urlFmt % (d1[0], d1[1], d1[2],
## d2[0], d2[1], d2[2], ticker)
urlFmt = 'http://ichart.finance.yahoo.com/table.csv?s=%s&d=%d&e=%d&f=%d&g=d&a=%d&b=%d&c=%d&ignore=.csv'
url = urlFmt % (ticker, d2[0], d2[1], d2[2], d1[0], d1[1], d1[2] )
print url
cachename = os.path.join(cachedir, md5.md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
ticker = ticker.upper()
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * Value(1/72.0)
tickTransform = scale_transform( scale, zero())
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minx, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
right = width/2.0
left = -width/2.0
barVerts = [ ( (left, 0), (left, close-open), (right, close-open), (right, 0) ) for open, close in zip(opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
offsetsBars = [ (i, open) for i,open in zip(xrange(len(opens)), opens) if open != -1 ]
sx = ax.figure.dpi * Value(1/72.0) # scale for points
sy = (ax.bbox.ur().y() - ax.bbox.ll().y()) / (ax.viewLim.ur().y() - ax.viewLim.ll().y())
barTransform = scale_sep_transform(sx,sy)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
assert(len(rangeSegments)==len(offsetsBars))
assert(len(offsetsBars)==len(colors))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minx, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in volumes if v != -1 ]
sx = ax.figure.dpi * Value(1/72.0) # scale for points
sy = (ax.bbox.ur().y() - ax.bbox.ll().y()) / (ax.viewLim.ur().y() - ax.viewLim.ll().y())
barTransform = scale_sep_transform(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minx, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in volumes if v!=-1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * Value(1/72.0) # scale for points
sy = (ax.bbox.ur().y() - ax.bbox.ll().y()) / (ax.viewLim.ur().y() - ax.viewLim.ll().y())
barTransform = scale_sep_transform(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minx, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
|
python
|
"""This script uses web.py to control and retreive the state of thermostats in a home.
"""
import json
import random
import web
# web.py definition that maps /anything to the Index class defined below
urls = ('/(.*)', 'Index')
# In memory database for existing sensors
sensor_devices_in_home = {}
def setup_devices():
"""Creating sensors for in memory database."""
sensor_devices_in_home['100'] = Thermostat(100)
sensor_devices_in_home['101'] = Thermostat(101)
class Thermostat:
"""Thermostat object."""
# Static members
operating_states = ('cool', 'heat', 'off')
fan_operating_states = ('off', 'auto')
lowest_allowed_set_point = 30.0
highest_allowed_set_point = 100.0
def __init__(self, thermostat_id):
self.identifier = thermostat_id
self.name = ''
self.read_temperature()
self.operating_mode = Thermostat.operating_states[2]
self.cooling_set_point = Thermostat.lowest_allowed_set_point
self.heating_set_point = Thermostat.highest_allowed_set_point
self.fan_mode = Thermostat.fan_operating_states[0]
def __str__(self):
return json.dumps(self.__dict__)
def set_thermostat_name(self, thermostat_name):
"""Personalizing the name of the thermostat."""
self.name = thermostat_name
def read_temperature(self):
"""Reads the temperate for a random double generator bound by 30.0-100.0."""
self.current_temp = random.uniform(
Thermostat.lowest_allowed_set_point,
Thermostat.highest_allowed_set_point)
def set_operating_mode(self, mode):
"""Sets the Thermostat operating mode (heat, cool, off)."""
if mode.lower() in Thermostat.operating_states:
self.operating_mode = mode.lower()
else:
raise web.notacceptable(
'{' +
f'"Error": "The Mode entered: {mode} was not found in Thermostat operating states"' +
'}')
def set_fan_mode(self, mode):
"""Sets the Thermostat fan mode (auto, off)."""
if mode.lower() in Thermostat.fan_operating_states:
self.fan_mode = mode.lower()
else:
raise web.notacceptable(
'{' +
f'"Error": "Fan mode entered: {mode} was not found in Thermostat fan operating states"' +
'}')
def set_cooling_set_point(self, temperature):
"""Sets the cooling set point bound by 30.0-100.0."""
if temperature < Thermostat.lowest_allowed_set_point or temperature > Thermostat.highest_allowed_set_point:
raise web.notacceptable(
'{' +
f'"Error": " The temperature {temperature} exceeds allowed temperature range of {Thermostat.lowest_allowed_set_point} to {Thermostat.highest_allowed_set_point}"' +
'}')
self.cooling_set_point = temperature
def set_heating_set_point(self, temperature):
"""Sets the heating set point bound by 30.0-100.0."""
if temperature < Thermostat.lowest_allowed_set_point or temperature > Thermostat.highest_allowed_set_point:
raise web.notacceptable(
'{' +
f'"Error": "The temperature {temperature} exceeds allowed temperature range of {Thermostat.lowest_allowed_set_point} to {Thermostat.highest_allowed_set_point}"' +
'}')
self.heating_set_point = temperature
class Index:
"""Index class called by web.py."""
def GET(self, thermostat_id):
"""GET functionality for web.py; interaction with web browser. Returns Thermostat object."""
if thermostat_id not in sensor_devices_in_home:
raise web.notfound(
'{' + f'"Error": "Could not find device by this thermostat ID: {thermostat_id}"' + '}')
return sensor_devices_in_home[thermostat_id]
def POST(self, thermostat_id):
"""POST functionality for web.py; interaction with web browser. Returns Thermostat object."""
if thermostat_id not in sensor_devices_in_home:
raise web.notfound(
'{' + f'"Error": "Could not find device by this ID: {thermostat_id}"' + '}')
sensor = sensor_devices_in_home[thermostat_id]
data = json.loads(web.data())
if 'name' in data:
sensor.set_thermostat_name(data['name'])
if 'current_temp' in data:
sensor.read_temperature()
if 'operating_mode' in data:
sensor.set_operating_mode(data['operating_mode'])
if 'cooling_set_point' in data:
sensor.set_cooling_set_point(data['cooling_set_point'])
if 'heating_set_point' in data:
sensor.set_heating_set_point(data['heating_set_point'])
if 'fan_mode' in data:
sensor.set_fan_mode(data['fan_mode'])
return sensor
setup_devices()
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
python
|
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import errno
import functools
import select
import socket
def syscall_retry_on_interrupt(func, *args):
"""Attempt system call again if interrupted by EINTR """
for _ in range(0, 5):
try:
return func(*args)
except (OSError, socket.error, select.error) as e:
if errno.EINTR != e.args[0]:
raise
def local_uptime_in_secs():
try:
with open('/proc/uptime', 'r') as f:
uptime_secs = int(float(f.readline().split()[0]))
except IOError:
uptime_secs = 0
return uptime_secs
_process_start_time = local_uptime_in_secs()
def process_uptime_in_secs():
return local_uptime_in_secs() - _process_start_time
class Object(object):
"""
Class Object Type Definition
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def as_dict(self):
return self.__dict__
class Result(object):
"""
Generic Result Object Type Definition
"""
def __init__(self, result_data, ancillary_data=None):
self.result_data = result_data
self.ancillary_data = ancillary_data
def __str__(self):
return("Result: result-data: %s ancillary-data: %s"
% (self.result_data, self.ancillary_data))
class Constants(object):
def __iter__(self):
for attr in dir(self):
if not callable(attr) and not attr.startswith("__"):
value = getattr(self, attr)
yield value
class Constant(object):
"""
Constant Type Definition
"""
def __init__(self, value):
self.value = value
def __get__(self, obj, obj_type):
return self.value
def __set__(self, obj, value):
raise AttributeError("ERROR: attempting to set a constant.")
def __delete__(self, obj):
raise AttributeError("ERROR: attempting to delete a constant.")
class Singleton(type):
"""
Singleton Type Definition
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = \
super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def coroutine(func):
"""
Co-Routine decorator that wraps a function and starts the co-routine
"""
def start(*args, **kwargs):
target = func(*args, **kwargs)
target.send(None)
functools.update_wrapper(start, func)
return target
return start
def get_local_host_name():
"""
Returns the name of the local host
"""
return socket.gethostname()
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Ternaris, Munich, Germany
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division
import os
from marv import bb, db
from marv.bb import job_logger as logger
from bagbunker import bb_bag
__version__ = '0.0.3'
@bb.job_model()
class Metadata(object):
robot_name = db.Column(db.String(42), nullable=False)
use_case = db.Column(db.String(126), nullable=False)
# XXX: This will receive all messages. What we really want is to
# receive only /robot_name/name messages, but be called also if there
# are no messages.
@bb.job()
@bb_bag.messages(topics='*')
def job(fileset, messages):
if not fileset.bag:
return
name_topic = '/robot_name/name'
messages = messages \
if any(True for x in fileset.bag.topics if x.topic.name == name_topic) \
else ()
for topic, msg, timestamp in messages:
if topic == name_topic:
try:
robot_name = msg.data
except AttributeError:
robot_name = msg.robot_name
logger.debug('found robot_name via topic: %s' % msg)
use_case = ''
break
else:
path = fileset.dirpath.split(os.sep)
robot_name = path[3] if len(path) > 3 else 'unknown'
use_case = path[6] if len(path) > 6 else 'unknown'
logger.info('robot_name=%s, use_case=%s', robot_name, use_case)
yield Metadata(robot_name=robot_name, use_case=use_case)
@bb.filter()
@bb.filter_input('robot', operators=['substring'])
def filter_robot(query, ListingEntry, robot):
return query.filter(ListingEntry.robot.contains(robot.val))
@bb.filter()
@bb.filter_input('use_case', operators=['substring'])
def filter_use_case(query, ListingEntry, use_case):
return query.filter(ListingEntry.use_case.contains(use_case.val))
@bb.listing()
@bb.listing_column('robot')
@bb.listing_column('use_case')
def listing(fileset):
jobrun = fileset.get_latest_jobrun('deepfield::metadata')
if jobrun is None:
return {}
meta = Metadata.query.filter(Metadata.jobrun == jobrun).first()
if meta is None:
return {}
return {
'robot': meta.robot_name,
'use_case': meta.use_case,
}
|
python
|
import numpy as np
def calculate_predictions(events, cluster_info):
# This function takes existing events, and backtracks this to predictions.
predictions = np.empty((len(cluster_info), 2), dtype=np.float64)
# The predictions matrices are stored (for historical reasons) in microns. So multiply with pixel size
# TODO: Make this configurable
predictions[:, 0] = (events['y'] - cluster_info['y']) * 55000
predictions[:, 1] = (events['x'] - cluster_info['x']) * 55000
return predictions
|
python
|
import os
import unittest
from __main__ import vtk, qt, ctk, slicer
#
# TortuosityLogicTests
#
class TortuosityLogicTests:
def __init__(self, parent):
parent.title = "TortuosityLogicTests" # TODO make this more human readable by adding spaces
parent.categories = ["Testing.TestCases"]
parent.dependencies = []
parent.contributors = ["Johan Andruejol (Kitware)"] # replace with "Firstname Lastname (Org)"
parent.helpText = """
"""
parent.acknowledgementText = """TODO""" # replace with organization, grant and thanks.
self.parent = parent
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['TortuosityLogicTests'] = self.runTest
def runTest(self):
tester = TortuosityLogicTestsTest()
tester.runTests()
#
# qTortuosityLogicTestsTest
#
class TortuosityLogicTestsTest(unittest.TestCase):
def delayDisplay(self,message,msec=1000):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def getTestMethodNames(self):
methods = []
for method in dir(self):
if (callable(getattr(self, method)) and method.find('test_') != -1):
methods.append(method)
return methods
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
def tearDown(self):
pass
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
children = []
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def runTests(self):
"""Run as few or as many tests as needed here.
"""
for methodName in self.getTestMethodNames():
self.runTest(methodName)
def runTest(self, method):
self.setUp()
getattr(self, method)()
self.tearDown()
def runAndCheckMetrics(self, nameTemplate, expectedValues):
for i in range(len(expectedValues)):
self.delayDisplay('testing %s ' %(nameTemplate %i))
node = slicer.util.getFirstNodeByClassByName('vtkMRMLSpatialObjectsNode', nameTemplate %i)
self.assertTrue(node, 'loading node failed')
logic = slicer.modules.tortuosity.logic()
self.assertTrue(logic.RunMetrics(node, logic.All), 'RunMetrics failed')
dm = logic.GetDistanceMetricArray(node)
self.assertTrue(dm, 'No distance metric array')
icm = logic.GetInflectionCountMetricArray(node)
self.assertTrue(icm, 'No inflection count array')
soam = logic.GetSumOfAnglesMetricArray(node)
self.assertTrue(soam, 'No sum of angles array')
for index in range(dm.GetNumberOfTuples()):
dmValue = dm.GetValue(index)
icmValue = icm.GetValue(index)
soamValue = soam.GetValue(index)
msg = '%s value look up failed. Expected: %s Got: %s (Case #%s)'
self.assertAlmostEqual(dmValue, expectedValues[i]['DM'], 4, msg %('DM', expectedValues[i]['DM'], dmValue, i))
self.assertAlmostEqual(icmValue, expectedValues[i]['ICM'], 4, msg %('ICM', expectedValues[i]['ICM'], icmValue, i) )
self.assertAlmostEqual(soamValue, expectedValues[i]['SOAM'], 4, msg %('SOAM', expectedValues[i]['SOAM'], soamValue, i) )
def test_TestStraightVessels(self):
self.delayDisplay('test_TestStraightVessels')
nameTemplate = 'StraightTube_test%s'
expectedValues = [
{
'DM': 1.0,
'ICM': 1.0,
'SOAM:': 0.0,
},
{
'DM': 1.0,
'ICM': 1.0,
'SOAM:': 0.0,
},
{
'DM': 1.0,
'ICM': 1.0,
'SOAM:': 0.0,
},
]
self.runAndCheckMetrics(nameTemplate, expectedValues)
self.delayDisplay('Test passed!')
def test_TestSinusVessels(self):
self.delayDisplay('test_TestSinusVessels')
nameTemplate = 'SinusTube_test%s'
expectedValues = [
{
'DM': 1.21581,
'ICM': 1.21581 * 2.0,
'SOAM:': 0.411187,
},
{
'DM': 1.21581,
'ICM': 1.21581 * 4.0,
'SOAM:': 0.411187,
},
{
'DM': 5.87042,
'ICM': 5.87042 * 2.0,
'SOAM:': 0.158497,
},
{
'DM': 3.40308,
'ICM': 3.40308 * 2.0,
'SOAM:': 1.28584,
},
]
self.runAndCheckMetrics(nameTemplate, expectedValues)
self.delayDisplay('Test passed!')
#
# qWelcomeModuleTestWidget
#
class TortuosityLogicTestsWidget():
def __init__(self, parent = None):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
self.moduleName = 'TortuosityLogicTests'
self.tester = TortuosityLogicTestsTest()
def setup(self):
# Instantiate and connect widgets ...
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "Tests Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# reload and test button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadAndTestButton = qt.QPushButton("Reload and Test")
self.reloadAndTestButton.toolTip = "Reload this module and then run the self tests."
self.layout.addWidget(self.reloadAndTestButton)
self.reloadAndTestButton.connect('clicked()', self.onReloadAndTest)
self.testButton = qt.QPushButton('Run Tests')
self.layout.addWidget(self.testButton)
self.testButton.connect('clicked(bool)', self.tester.runTests)
# Add vertical spacer
self.layout.addStretch(1)
def onReload(self):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default.
"""
globals()[self.moduleName] = slicer.util.reloadScriptedModule(self.moduleName)
def onReloadAndTest(self):
self.onReload()
self.tester.runTests()
|
python
|
#!/usr/bin/env python3
# author: greyshell
# description: demo how to use heapq library
import heapq
class MaxHeapNode(object):
def __init__(self, key):
self.key = key
def __lt__(self, other):
# compare based on val
# tweak the comparison logic to build max heap: change less_than_sign to greater_than_sign
return self.key > other.key
def __gt__(self, other):
# compare based on val
# tweak the comparison logic to build max heap
return self.key < other.key
def __eq__(self, other):
return self.key == other.key
def __ne__(self, other):
return self.key != other.key
def __str__(self):
return str(self.key)
def demo_max_heap():
print(f"========================================")
print(f"demo max heap ")
print(f"========================================")
# create a max heap that stores an object (key, index, name) and the key is key
max_heap = list()
heapq.heappush(max_heap, MaxHeapNode(17))
heapq.heappush(max_heap, MaxHeapNode(1000))
heapq.heappush(max_heap, MaxHeapNode(250))
heapq.heappush(max_heap, MaxHeapNode(500))
print(f"max value {max_heap[0]}")
node = heapq.heappop(max_heap)
print(f"popped item: {node.key}")
print(f"max value {max_heap[0]}")
class HeapSatelliteNode(object):
def __init__(self, name, age):
# self.val = val
self.name = name
self.age = age
def __lt__(self, other):
# compare based on age
# tweak the comparison logic to build max heap: change less_than_sign to greater_than_sign
# key = age, so compare based on the key
return self.age > other.age
def __eq__(self, other):
return self.age == other.age
def __str__(self):
return f"name:{self.name}, age:{self.age}"
def demo_max_satellite_heap():
print(f"========================================")
print(f"demo max satellite heap ")
print(f"========================================")
# create a max heap that stores an object (key, index, name) and the key is key
max_heap = list()
# compare based on the age
heapq.heappush(max_heap, HeapSatelliteNode('asinha', 39))
heapq.heappush(max_heap, HeapSatelliteNode('dhaval', 22))
heapq.heappush(max_heap, HeapSatelliteNode('ravi', 23))
print(f"max value {max_heap[0]}")
node = heapq.heappop(max_heap)
print(f"popped item: {node.name}")
print(f"max value {max_heap[0]}")
print(heapq.heappop(max_heap))
print(heapq.heappop(max_heap))
if max_heap: # check if the list is empty or not
print(heapq.heappop(max_heap))
def heap_sort(nums):
heapq.heapify(nums)
return [heapq.heappop(nums) for _ in range(0, len(nums))]
def main():
min_heap = [12, 7, 11, 15, 35, 17]
print(f"========================================")
print(f"demo heap sort")
print(f"========================================")
print(f"before heap sort: {min_heap}")
r = heap_sort(min_heap)
print(f"after heap sort: {r}")
print(f"========================================")
print(f"demo min heap ")
print(f"========================================")
# build a min heap
heapq.heapify(min_heap) # in-place, in linear time, O(n), Heap elements can be tuples.
# push an item
heapq.heappush(min_heap, 25) # O(log(n))
heapq.heappush(min_heap, 5) # O(log(n))
heapq.heappush(min_heap, 10) # O(log(n))
# peek the min item
data = min_heap[0] # O(1)
# pop an item
data = heapq.heappop(min_heap) # O(log(n))
print(f"popped item: {data}")
print(f"current heap : {min_heap}")
# when we need to make the heap size constant, we can use heappushpop() and heapreplace()
# time complexity: O(log(n)), improving the performance
dummy_nums = min_heap.copy() # copy all elements to another list
data = -1
popped_value = heapq.heappushpop(min_heap, data) # 1st push then pop
print(f"popped value: {popped_value}")
popped_value = heapq.heapreplace(dummy_nums, data) # 1st pop from existing min then push
print(f"popped value: {popped_value}")
print(f"========================================")
print(f"demo nlargest / smallest element or element ")
print(f"========================================")
# k largest / smallest elements
# best for smaller values of k
min_heap.append(100)
min_heap.append(200)
min_heap.append(50)
print(f"nums = {min_heap}")
large_items = heapq.nlargest(3, min_heap)
small_items = heapq.nsmallest(3, min_heap)
print(f"3 largest values: {large_items}")
print(f"3 smallest values: {small_items}")
# when k==1, it is more efficient to use the built-in min() and max() functions.
# for larger k values it is more efficient to use the sorted() function.
# kth largest element
k = 3
kth_large = heapq.nlargest(k, min_heap)[-1]
print(f"{k}th/rd/nd largest value: {kth_large}") # last element of the kth_large list
# demo max heap
demo_max_heap()
# demo satellite data in the max heap
demo_max_satellite_heap()
if __name__ == '__main__':
main()
|
python
|
import logging
from gehomesdk.erd.converters.abstract import ErdReadOnlyConverter
from gehomesdk.erd.converters.primitives import *
from gehomesdk.erd.values.laundry import ErdTankSelected, TankSelected, TANK_SELECTED_MAP
_LOGGER = logging.getLogger(__name__)
class TankSelectedConverter(ErdReadOnlyConverter[TankSelected]):
def erd_decode(self, value: str) -> TankSelected:
try:
om = ErdTankSelected(erd_decode_int(value))
return TANK_SELECTED_MAP[om].value
except (KeyError, ValueError):
return ErdTankSelected.NA
|
python
|
#!/usr/bin/env python
"""This script generates a two-column reStructuredText table in which:
- each row corresponds to a module in a Python package
- the first column links to the module documentation
- the second column links to the module source code generated by
the 'viewcode' extension
"""
__date__ = "2015-07-4"
__author__ = "Joshua Griffin Dunn"
# coding": utf-8
import sys
import importlib
import argparse
from modulefinder import ModuleFinder
from sphinxcontrib.argdoc.ext import make_rest_table
def get_submodules(package):
"""Find names of all modules in `package`
Parameters
----------
package : imported Python package
Returns
-------
list
Sorted list of fully-qualified module names
"""
mf = ModuleFinder()
modules = sorted(["%s.%s" % (package.__name__,X) for X in mf.find_all_submodules(package) if X != "__init__"])
return modules
def get_link_pair(modname):
"""Return a link to the Sphinx documentation and source code for module specified by `modname`
Parameters
----------
modname : str
Python module name, fully-qualified
Returns
-------
str
Link to module documentation
str
Link to module source code
"""
docsummary = importlib.import_module(modname).__doc__.split("\n")[0]
slashname = modname.replace(".","/")
p1 = ":mod:`%s <%s>`" % (docsummary,modname)
p2 = "`%s <_modules/%s.html>`_" % (modname,slashname)
return p1, p2
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("package",type=str,help="Python package to document")
parser.add_argument("outfile",type=str,help="Output file")
parser.add_argument("--title",default=[],nargs=2,
help="Column titles (optional)")
args = parser.parse_args(argv)
print("Importing package '%s'..." % args.package)
package = importlib.import_module(args.package)
modules = get_submodules(package)
print("Found %s submodules..." % len(modules))
pairs = [get_link_pair(X) for X in modules]
title = False
if len(args.title) > 0:
print("Using column titles '%s' and '%s'" % (args.title[0],args.title[1]))
title = True
pairs = [tuple(args.title)] + pairs
table = u"\n".join(make_rest_table(pairs,title=title))
print("Writing to '%s'..." % args.outfile)
with open(args.outfile,"w") as fout:
fout.write(table)
fout.write("\n")
fout.close()
print("Done.")
if __name__ == "__main__":
main()
|
python
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import uuid
from azure.keyvault.certificates import CertificateClient, CertificatePolicy
from key_vault_base import KeyVaultBase
class KeyVaultCertificates(KeyVaultBase):
def __init__(self):
credential = self.get_default_credential()
self.certificate_client = CertificateClient(
vault_url=os.environ["AZURE_PROJECT_URL"], credential=credential
)
self.certificate_name = "cert-name-" + uuid.uuid1().hex
def create_certificate(self):
print("Creating certificate (name: {})".format(self.certificate_name))
create_poller = self.certificate_client.begin_create_certificate(
certificate_name=self.certificate_name,
policy=CertificatePolicy.get_default())
print("\twaiting...")
create_poller.result()
print("\tdone")
def get_certificate(self):
print("Getting a certificate...")
certificate = self.certificate_client.get_certificate(certificate_name=self.certificate_name)
print("\tdone, certificate: {}.".format(certificate.name))
def delete_certificate(self):
print("Deleting a certificate...")
poller = self.certificate_client.begin_delete_certificate(certificate_name=self.certificate_name)
deleted_certificate = poller.result()
print("\tdone: " + deleted_certificate.name)
def run(self):
print("")
print("------------------------")
print("Key Vault - Certificates\nIdentity - Credential")
print("------------------------")
print("1) Create a certificate")
print("2) Get that certificate")
print("3) Delete that certificate (Clean up the resource)")
print("")
try:
self.create_certificate()
self.get_certificate()
finally:
self.delete_certificate()
|
python
|
import os
import dropbox
from .cloud_provider import CloudProvider
class DBox(CloudProvider):
def __init__(self, access_token):
"""
Initializes class
Parameters
----------
access_token : str
Dropbox access token
"""
if not isinstance(access_token, str):
raise TypeError('Access token must be a string')
access_token = access_token.strip()
if len(access_token) < 1:
raise ValueError("Access token must not be empty")
self.__service__ = dropbox.Dropbox(access_token)
def upload_public_file(self, local_path, dest_path=None):
"""
Upload the provided file to the specified Dropbox location
and share a publicly visible_file
Parameters
----------
local_path : str
Path to local file that needs to be uploaded
dest_path : str, Optional
Path in Dropbox to upload to. Note that this is likely within
the context (directory) where permissions are valid
Returns
-------
str :
Publicly visible address where the file can be accessed
"""
if not dest_path:
_, file_name = os.path.split(local_path)
dest_path = '/' + file_name
with open(local_path, "rb") as file_handle:
_ = self.__service__.files_upload(file_handle.read(), dest_path)
share_metadata = self.__service__.sharing_create_shared_link_with_settings(dest_path,
settings=dropbox.sharing.SharedLinkSettings())
# The shareable link takes one to dropbox instead of the file itself:
embed_link = share_metadata.url.replace("www.dropbox",
"dl.dropboxusercontent")
return embed_link
|
python
|
#####################################
# CELERY
#####################################
from datetime import timedelta
from kombu import Exchange
from kombu import Queue
def celery_queue(key):
return Queue(key, Exchange(key), routing_key=key)
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
celery_queue('default'),
)
QUEUE_ROUTES = {
'default': {'queue': 'default', 'routing_key': 'default'},
}
CELERY_RESULT_BACKEND = None
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERYD_MAX_TASKS_PER_CHILD = 1
|
python
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from authentication.models import Account
from authentication.models import AccountManager
class Entity(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
created_by = Account
updated_at = models.DateTimeField(auto_now=True)
updated_by = Account
class Meta:
abstract = True
class Brewery(Entity):
name = models.CharField(max_length=128)
description = models.CharField(max_length=1024, null=True, blank=True)
def __str__(self):
return self.name
class Brewer(models.Model):
brewery = models.ForeignKey(Brewery, related_name='brewers', null=True, blank=True)
user = models.OneToOneField(Account, primary_key=False)
def __str__(self):
return ' @ '.join((self.user.username, self.brewery.name))
class Brew(Entity):
name = models.CharField(max_length=128)
description = models.CharField(max_length=1024)
style = models.CharField(max_length=128)
type = models.CharField(max_length=128)
abv = models.IntegerField(default=0)
brewer = models.ForeignKey(Brewer)
def __str__(self):
return self.name
class Image(Entity):
title = models.CharField(max_length=128)
image = models.ImageField(upload_to='/static/images/%Y/%m')
brew = models.ForeignKey(Brew, related_name='images', null=True, blank=True)
brewery = models.ForeignKey(Brewery, related_name='images', null=True, blank=True)
def __str__(self):
return self.title
class BrewDate(models.Model):
brew = models.ForeignKey(Brew, related_name='dates', null=True, blank=True)
date = models.DateTimeField()
activity = models.CharField(max_length=128)
TASTING_CATEGORIES = (
(1, 'Appearance'),
(2, 'Smell'),
(3, 'Taste'),
(4, 'Mouthfeel'),
(5, 'Overall'),
)
class Tasting(Entity):
brew = models.ForeignKey(Brew, related_name='tastings')
user = models.ForeignKey(Account, related_name='tastings')
appearance = models.FloatField(validators = [MinValueValidator(0.0), MaxValueValidator(5.0)])
smell = models.FloatField(validators = [MinValueValidator(0.0), MaxValueValidator(5.0)])
taste = models.FloatField(validators = [MinValueValidator(0.0), MaxValueValidator(5.0)])
mouthfeel = models.FloatField(validators = [MinValueValidator(0.0), MaxValueValidator(5.0)])
overall = models.FloatField(validators = [MinValueValidator(0.0), MaxValueValidator(5.0)])
class Keyword(models.Model):
tasting = models.ForeignKey(Tasting, related_name='keywords')
category = models.IntegerField(choices=TASTING_CATEGORIES, default=5)
key = models.CharField(max_length=128)
|
python
|
# Lint as: python3
# Copyright 2021 Jose Carlos Provencio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import name
import cv2
#export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.6/pyrealsense2
import pyrealsense2 as rs
import numpy as np
import time
from datetime import datetime
import json
#for create a new init config file
#init_config = {}
#init_config['ir1']={'enable':1, 'width':640, 'height':480, 'fps':90}
#init_config['ir2']={'enable':1, 'width':640, 'height':480, 'fps':90}
#init_config['color']={'enable':1, 'width':640, 'height':480, 'fps':30}
#init_config['debug']=1
#init_config['preconfig']=1
#with open('realsense_2ir/init_config.json', 'w') as outfile:
# json.dump(init_config, outfile)
class Modulo:
def __init__(self):
None
def start(self, nombre, local_data, out_data):
out_data[nombre]['error'] = {}
nfile='modulos/' + local_data['modName'] +'/'
if (len(local_data['args'])==0):
nfile = nfile+ 'init_config.json'
else:
nfile = nfile + local_data['args']
with open(nfile) as json_file:
self.init_data = json.load(json_file)
local_data['debug']=self.init_data['debug']
local_data['preconfig']=self.init_data['preconfig']
local_data['count']=1
local_data['time']=0
out_data[nombre]['frames'] = {}
out_data[nombre]['error'] = {}
self.config_cam(nombre,local_data, out_data)
if local_data['debug']:
if (self.init_data['ir1']['enable']):
cv2.namedWindow("IR1")
cv2.setMouseCallback("IR1", self.save_images_event)
if (self.init_data['ir2']['enable']):
cv2.namedWindow("IR2")
cv2.setMouseCallback("IR2", self.save_images_event)
if (self.init_data['color']['enable']):
cv2.namedWindow("COLOR")
cv2.setMouseCallback("COLOR", self.save_images_event)
self.save_image=0
def work(self, nombre, local_data, out_data):
time_now= time.time()
try:
out_data[nombre]['error'] = {}
frames = local_data['pipeline'].wait_for_frames()
#depth_frame = frames.get_depth_frame()
if (self.init_data['ir1']['enable']):
ir1 = frames.get_infrared_frame(1)
if (self.init_data['ir2']['enable']):
ir2 = frames.get_infrared_frame(2)
if (self.init_data['color']['enable']):
color = frames.get_color_frame()
except:
out_data[nombre]['error'][1] = 'Fallo de camara'
return
#if (not(1 in out_data['error'])):
# out_data['error']={1:'Fallo de camara'}
#out_data['error'].append(1)
#out_data['error_text'].append ('Fallo de camara')
#color_frame = frames.get_color_frame()
#if not depth_frame or not color_frame:
# continue
#rgb_image = np.asanyarray(color_frame.get_data())
#rgb_image1 = np.asanyarray(ir1.get_data())
#rgb_image2 = np.asanyarray(ir2.get_data())
#out_data['frames'] = []
#out_data['frames'].append(np.asanyarray(ir1.get_data()))
#out_data['frames'].append(np.asanyarray(ir1.get_data()))
out_data[nombre]['frames']={}
if (self.init_data['ir1']['enable']):
out_data[nombre]['frames']['ir1'] = np.asanyarray(ir1.get_data())
if (self.init_data['ir2']['enable']):
out_data[nombre]['frames']['ir2'] = np.asanyarray(ir2.get_data())
if (self.init_data['color']['enable']):
out_data[nombre]['frames']['color'] = np.asanyarray(color.get_data())
#out_data['frames'].append(np.asanyarray(ir1.get_data()))
local_data['count']= local_data['count'] + 1
local_data['time']=(time.time()-time_now)*1000
#cv2.rectangle(rgb_image, (det.left(), det.top()), (det.right(), det.bottom()), color_green, line_width)
if local_data['debug']:
print (local_data['time'])
if (self.init_data['ir1']['enable']):
cv2.imshow('IR1', out_data[nombre]['frames']['ir1'])
if (self.init_data['ir2']['enable']):
cv2.imshow('IR2', out_data[nombre]['frames']['ir2'])
if (self.init_data['color']['enable']):
cv2.imshow('COLOR', out_data[nombre]['frames']['ir2'])
key = cv2.waitKey(1)
if self.save_image:
self.save_image=0
now_str = datetime.now().strftime("%d%m%Y_%H%M%S")
sttt= 'modulos/' + nombre + '/images/' + now_str + '_ir1.jpg'
if self.init_data['ir1']['enable']==1:
cv2.imwrite('modulos/' + nombre + '/images/' + '_ir1.jpg',out_data[nombre]['frames']['ir1'])
if self.init_data['ir2']['enable']==1:
cv2.imwrite( sttt + '_ir2.jpg',out_data[nombre]['frames']['ir2'])
if self.init_data['color']['enable']==1:
cv2.imwrite('modulos/' + nombre + '/images/' + now_str + '_color.jpg', out_data[nombre]['frames']['color'])
def onError(self,nombre,local_data, out_data):
self.config_cam(nombre,local_data, out_data) #local_data['piperline'].stop()
def event (self, nombre, local, out, event, event_sync):
None
def end (self, nombre, local_data, out_data):
local_data['piperline'].stop()
def config_cam(self,nombre,local_data, out_data):
# Configure depth and color streams
local_data['pipeline'] = rs.pipeline()
local_data['config'] = rs.config()
local_data['config'].enable_device('849412062160')
#config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if self.init_data['color']['enable']==1:
local_data['config'].enable_stream(rs.stream.color, self.init_data['color']['width'],
self.init_data['color']['height'],
rs.format.bgr8,
self.init_data['color']['fps'])
if self.init_data['ir1']['enable']==1:
local_data['config'].enable_stream(rs.stream.infrared, 1, self.init_data['ir1']['width'],
self.init_data['ir1']['height'],
rs.format.y8,
self.init_data['ir1']['fps'])
if self.init_data['ir2']['enable']==1:
local_data['config'].enable_stream(rs.stream.infrared, 2, self.init_data['ir2']['width'],
self.init_data['ir2']['height'],
rs.format.y8,
self.init_data['ir2']['fps'])
#config.enable_stream(rs.stream.infrared, 2, 1280,720, rs.format.y8, 6)
# Start streaming
local_data['cfg'] = local_data['pipeline'].start(local_data['config'])
if (local_data['preconfig']==1):
jsonObj = json.load(open('modulos/' + nombre + '/config_cam.json'))
json_string= str(jsonObj).replace("'", '\"')
local_data['dev'] = local_data['cfg'].get_device()
local_data['advnc_mode'] = rs.rs400_advanced_mode(local_data['dev'])
local_data['advnc_mode'].load_json(json_string)
def save_images_event(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.save_image=1
|
python
|
from flask import url_for, request
from flask_restplus import Namespace, Resource
import strongr.restdomain.model.gateways
from strongr.restdomain.model.oauth2 import Client
ns = Namespace('oauth', description='Operations related to oauth2 login')
@ns.route('/revoke', methods=['POST'])
class Revoke(Resource):
def post(self):
auth_server = strongr.restdomain.model.gateways.Gateways.auth_server()
return auth_server.create_revocation_response()
@ns.route('/token', methods=['POST'])
class Token(Resource):
def post(self):
auth_server = strongr.restdomain.model.gateways.Gateways.auth_server()
return auth_server.create_token_response()
# @ns.route('/authorize')
# class Authorize(Resource):
# def get(self):
# auth_server = strongr.restdomain.model.gateways.Gateways.auth_server()
# # Login is required since we need to know the current resource owner.
# # It can be done with a redirection to the login page, or a login
# # form on this authorization page.
# if request.method == 'GET':
# grant = auth_server.validate_authorization_request()
# #return render_template(
# # 'authorize.html',
# # grant=grant,
# # user=current_user,
# #)
# confirmed = request.form['confirm']
# if confirmed:
# # granted by resource owner
# return auth_server.create_authorization_response()
# # denied by resource owner
# return auth_server.create_authorization_response(None)
#
# def post(self):
# auth_server = strongr.restdomain.model.gateways.Gateways.auth_server()
# # Login is required since we need to know the current resource owner.
# # It can be done with a redirection to the login page, or a login
# # form on this authorization page.
# #if request.method == 'GET':
# #grant = auth_server.validate_authorization_request()
# # return render_template(
# # 'authorize.html',
# # grant=grant,
# # user=current_user,
# # )
# #confirmed = request.form['confirm']
# #if confirmed:
# # granted by resource owner
# return auth_server.create_authorization_response('1')
# # denied by resource owner
# #return auth_server.create_authorization_response(None)
|
python
|
from hs_students import HighSchool
highSchoolStudent = HighSchool('Manasvi', 10)
print(highSchoolStudent.get_name_captalize())
print(highSchoolStudent.get_School_name())
|
python
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from photos import views
from .views import BlogCreateView
urlpatterns =[
path('post/new/', BlogCreateView.as_view(), name='post_new'),
url(r'^one/', views.postdetail, name='postdetail'),
url(r'^todays/', views.news_today, name = 'newsToday'),
url(r'^archives/(\d{4}-\d{2}-\d{2})/$',views.past_days_news,name = 'pastNews'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^article/(\d+)',views.article,name ='article'),
url(r'^$', views.index, name='index'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
python
|
from tests.util import BaseTest
class Test_C2025(BaseTest):
def error_code(self) -> str:
return "C2025"
def test_fail_1(self):
code = """
foo = (1 if True
else 0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2025", 1, 8)
def test_fail_2(self):
code = """
foo = (1
if True
else 0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2025", 1, 8)
def test_fail_3(self):
code = """
foo = (1 if True else 0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2025", 1, 8)
|
python
|
"""
This following demonstrates a Noise_NNpsk0+psk2_25519_ChaChaPoly_BLAKE2s handshake and initial transport messages.
"""
from dissononce.processing.impl.handshakestate import HandshakeState
from dissononce.processing.impl.symmetricstate import SymmetricState
from dissononce.processing.impl.cipherstate import CipherState
from dissononce.processing.handshakepatterns.interactive.NN import NNHandshakePattern
from dissononce.processing.modifiers.psk import PSKPatternModifier
from dissononce.cipher.chachapoly import ChaChaPolyCipher
from dissononce.dh.x25519.x25519 import X25519DH
from dissononce.hash.blake2s import Blake2sHash
import dissononce, logging
import os
if __name__ == "__main__":
dissononce.logger.setLevel(logging.DEBUG)
# setup initiator and responder variables
alice_s = X25519DH().generate_keypair()
bob_s = X25519DH().generate_keypair()
psks = (
os.urandom(32),
os.urandom(32)
)
# prepare handshakestate objects for initiator and responder
alice_handshakestate = HandshakeState(
SymmetricState(
CipherState(
ChaChaPolyCipher()
),
Blake2sHash()
),
X25519DH()
)
bob_handshakestate = HandshakeState(
SymmetricState(
CipherState(
ChaChaPolyCipher()
),
Blake2sHash()
),
X25519DH()
)
# modify NNHandshakePattern
nn_psk0_pattern = PSKPatternModifier(0).modify(NNHandshakePattern())
nn_psk0_psk2_pattern = PSKPatternModifier(2).modify(nn_psk0_pattern)
# initialize handshakestate objects
alice_handshakestate.initialize(nn_psk0_psk2_pattern, True, b'prologue', s=alice_s, psks=psks)
bob_handshakestate.initialize(nn_psk0_psk2_pattern, False, b'prologue', s=bob_s, psks=psks)
# -> psk, e
message_buffer = bytearray()
alice_handshakestate.write_message(b'', message_buffer)
bob_handshakestate.read_message(bytes(message_buffer), bytearray())
# <- e, ee, psk
message_buffer = bytearray()
alice_cipherstates = bob_handshakestate.write_message(b'', message_buffer)
bob_cipherstates = alice_handshakestate.read_message(bytes(message_buffer), bytearray())
# transport phase
# alice to bob
ciphertext = alice_cipherstates[0].encrypt_with_ad(b'', b'Hello')
plaintext = bob_cipherstates[0].decrypt_with_ad(b'', ciphertext)
assert plaintext == b'Hello'
# bob to alice
ciphertext = bob_cipherstates[1].encrypt_with_ad(b'', b'World')
plaintext = alice_cipherstates[1].decrypt_with_ad(b'', ciphertext)
assert plaintext == b'World'
|
python
|
from invoke import (
run,
task,
)
from . import common
LANGUAGE = 'js'
@task
def clean():
print 'cleaning %s...' % (LANGUAGE,)
with common.base_directory():
run("rm -rf %(language)s && mkdir %(language)s" % {'language': LANGUAGE})
@task(default=True)
def compile():
print 'compiling %s...' % (LANGUAGE,)
with common.base_directory():
run(
'pbjs src/protobufs/services/registry/all.proto'
' --path=src --target=commonjs --min > js/index.js'
)
|
python
|
import math
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
def _initialize_embeddings(weight: Tensor, d: int) -> None:
d_sqrt_inv = 1 / math.sqrt(d)
# This initialization is taken from torch.nn.Linear and is equivalent to:
# nn.init.kaiming_uniform_(..., a=math.sqrt(5))
# Also, this initialization was used in the paper "Revisiting Deep Learning Models
# for Tabular Data".
nn.init.uniform_(weight, a=-d_sqrt_inv, b=d_sqrt_inv)
class CLSEmbedding(nn.Module):
"""Embedding of the [CLS]-token for BERT-like inference.
To learn about the [CLS]-based inference, see [devlin2018bert].
When used as a module, the [CLS]-embedding is appended **to the beginning** of each
item in the batch.
Examples:
.. testcode::
batch_size = 2
n_tokens = 3
d = 4
cls_embedding = CLSEmbedding(d, 'uniform')
x = torch.randn(batch_size, n_tokens, d)
x = cls_embedding(x)
assert x.shape == (batch_size, n_tokens + 1, d)
assert (x[:, 0, :] == cls_embedding.expand(len(x))).all()
References:
* [devlin2018bert] Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" 2018
"""
def __init__(self, d_embedding: int) -> None:
"""
Args:
d_embedding: the size of the embedding
"""
super().__init__()
self.weight = nn.Parameter(Tensor(d_embedding))
self.reset_parameters()
def reset_parameters(self) -> None:
_initialize_embeddings(self.weight, self.weight.shape[-1])
def expand(self, *d_leading: int) -> Tensor:
"""Repeat the [CLS]-embedding (e.g. to make a batch).
Namely, this::
cls_batch = cls_embedding.expand(d1, d2, ..., dN)
is equivalent to this::
new_dimensions = (1,) * N
cls_batch = cls_embedding.weight.view(*new_dimensions, -1).expand(
d1, d2, ..., dN, len(cls_embedding.weight)
)
Examples:
.. testcode::
batch_size = 2
n_tokens = 3
d = 4
x = torch.randn(batch_size, n_tokens, d)
cls_embedding = CLSEmbedding(d, 'uniform')
assert cls_embedding.expand(len(x)).shape == (len(x), d)
assert cls_embedding.expand(len(x), 1).shape == (len(x), 1, d)
Note:
Under the hood, the `torch.Tensor.expand` method is applied to the
underlying :code:`weight` parameter, so gradients will be propagated as
expected.
Args:
d_leading: the additional new dimensions
Returns:
tensor of the shape :code:`(*d_leading, len(self.weight))`
"""
if not d_leading:
return self.weight
new_dims = (1,) * (len(d_leading) - 1)
return self.weight.view(*new_dims, -1).expand(*d_leading, -1)
def forward(self, x: Tensor) -> Tensor:
if x.ndim != 3:
raise ValueError('The input must have three dimensions')
return torch.cat([self.expand(len(x), 1), x], dim=1)
class OneHotEncoder(nn.Module):
cardinalities: Tensor
def __init__(self, cardinalities: List[int]) -> None:
self.register_buffer('cardinalities', torch.tensor(cardinalities))
def forward(self, x: Tensor) -> Tensor:
if x.ndim != 2:
raise ValueError('The input must have two dimensions')
encoded_columns = [
F.one_hot(column, cardinality)
for column, cardinality in zip(x.T, self.cardinalities)
]
return torch.cat(encoded_columns, 1)
class CatEmbeddings(nn.Module):
"""Embeddings for categorical features."""
category_offsets: Tensor
def __init__(self, cardinalities: List[int], d_embedding: int, bias: bool) -> None:
super().__init__()
if not cardinalities:
raise ValueError('cardinalities must be non-empty')
if d_embedding < 1:
raise ValueError('d_embedding must be positive')
category_offsets = torch.tensor([0] + cardinalities[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.embeddings = nn.Embedding(sum(cardinalities), d_embedding)
self.bias = (
nn.Parameter(Tensor(len(cardinalities), d_embedding)) if bias else None
)
self.reset_parameters()
def reset_parameters(self) -> None:
for parameter in [self.embeddings.weight, self.bias]:
if parameter is not None:
_initialize_embeddings(parameter, parameter.shape[-1])
def forward(self, x: Tensor) -> Tensor:
if x.ndim != 2:
raise ValueError('The input must have two dimensions')
x = self.embeddings(x + self.category_offsets[None])
if self.bias is not None:
x = x + self.bias[None]
return x
class LinearEmbeddings(nn.Module):
"""Linear embeddings for numerical features."""
def __init__(self, n_features: int, d_embedding: int, bias: bool = True):
self.weight = nn.Parameter(Tensor(n_features, d_embedding))
self.bias = nn.Parameter(Tensor(n_features, d_embedding)) if bias else None
self.reset_parameters()
def reset_parameters(self) -> None:
for parameter in [self.weight, self.bias]:
if parameter is not None:
_initialize_embeddings(parameter, parameter.shape[-1])
def forward(self, x: Tensor) -> Tensor:
if x.ndim != 2:
raise ValueError('The input must have two dimensions')
x = self.weight[None] * x[..., None]
if self.bias is not None:
x = x + self.bias[None]
return x
class PeriodicEmbeddings(nn.Module):
# Source: https://github.com/Yura52/tabular-dl-num-embeddings/blob/e49e95c52f829ad0ab7d653e0776c2a84c03e261/lib/deep.py#L28
def __init__(self, n_features: int, d_embedding: int, sigma: float) -> None:
if d_embedding % 2:
raise ValueError('d_embedding must be even')
self.sigma = sigma
self.coefficients = nn.Parameter(Tensor(n_features, d_embedding // 2))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.coefficients, 0.0, self.sigma)
def forward(self, x: Tensor) -> Tensor:
if x.ndim != 2:
raise ValueError('The input must have two dimensions')
x = 2 * math.pi * self.coefficients[None] * x[..., None]
return torch.cat([torch.cos(x), torch.sin(x)], -1)
|
python
|
#!/usr/bin/python3
# Extract Ansible tasks from tripleo repos
import os
import yaml
TASK_ATTRS = [
"action",
"any_errors_fatal",
"args",
"async",
"become",
"become_exe",
"become_flags",
"become_method",
"become_user",
"changed_when",
"check_mode",
"collections",
"connection",
"debugger",
"delay",
"delegate_facts",
"delegate_to",
"diff",
"environment",
"failed_when",
"ignore_errors",
"ignore_unreachable",
"local_action",
"loop",
"loop_control",
"module_defaults",
"name",
"no_log",
"notify",
"poll",
"port",
"register",
"remote_user",
"retries",
"run_once",
"tags",
"throttle",
"timeout",
"until",
"vars",
"when",
"with_",
]
def get_task_action(task):
"""Return the action of the task."""
if "action" in task or "local_action" in task:
action = "action" if "action" in task else "local_action"
if "module" in task[action]:
# - action:
# module: copy
# args:
# src: a
# dest: b
return task[action]["module"]
# - action: copy src=a dest=b
return task[action].split()[0]
with_items = []
for t in task:
if t.startswith("with_"):
with_items.append(t)
action = set(list(task.keys())).difference(set(TASK_ATTRS + with_items))
if len(action) > 1:
raise Exception(f"Task has more than one action: {task}")
if len(action) == 0:
raise Exception(f"Can't get action from task: {task}")
action = list(action)[0]
if action.startswith("ansible.builtin"):
return action.split(".")[-1]
return action
def get_task_options(task):
"""Return task options."""
if "action" in task or "local_action" in task:
action = "action" if "action" in task else "local_action"
if "module" in task[action]:
# - action:
# module: copy
# args:
# src: a
# dest: b
keys = list(task["action"].keys())
keys.remove("module")
return set(keys)
# - action: copy src=a dest=b
return set(task[action].split()[1:])
action = get_task_action(task)
keys = list(task[action].keys())
return set(keys)
def task_stats_print(task, result, output):
"""Print task stats."""
str_result = "\n".join(result)
if output == "-":
print(str_result)
else:
with open(output, "w") as f:
f.write(str_result)
def directory_parse(directory, task=None):
"""Parse directory."""
opts = set()
for root, dirs, files in os.walk(directory):
if files and "molecule" not in root:
for f in files:
if (
f.endswith(".yml")
or f.endswith(".yaml")
and "puppet" not in f
):
opts = opts.union(file_parse(os.path.join(root, f), task))
return opts
def file_parse(file, p_task=None):
"""Parse file."""
options_d = set()
with open(file, "r") as f:
try:
y = yaml.load(f, Loader=yaml.FullLoader)
except yaml.scanner.ScannerError as ye:
print(f"Error parsing YAML in {file}: {ye}")
return options_d
if y:
for task in y:
try:
if isinstance(task, str):
continue
if "block" in task:
for i in task["block"]:
action = get_task_action(i)
if action == p_task:
options = get_task_options(i)
options_d = options_d.union(options)
elif "hosts" in task:
for i in (
task.get("tasks", [])
+ task.get("pre_tasks", [])
+ task.get("post_tasks", [])
):
action = get_task_action(i)
if action == p_task:
options = get_task_options(i)
options_d = options_d.union(options)
# in case of heat templates
elif "outputs" in task:
heat_tasks = []
if "role_data" in task["outputs"]:
new_tasks = task["outputs"]["role_data"]
for prep in (
"upgrade_tasks",
"pre_upgrade_rolling_tasks",
"post_upgrade_tasks",
"update_tasks",
"post_update_tasks",
"host_prep_tasks",
"external_deploy_tasks",
"external_post_deploy_tasks",
):
heat_tasks += new_tasks.get(prep, [])
for i in heat_tasks:
action = get_task_action(i)
if action == p_task:
options = get_task_options(i)
options_d = options_d.union(options)
else:
action = get_task_action(task)
if action == p_task:
options = get_task_options(task)
options_d = options_d.union(options)
except Exception as e:
print(f"Error in file: {file}: {e}")
continue
return options_d
def main():
"""Main function."""
import argparse
parser = argparse.ArgumentParser(
description="Extract tasks from a playbook."
)
parser.add_argument(
"--output",
"-o",
help="Output file. Default: stdout.",
default="-",
)
parser.add_argument(
"--task",
"-t",
help="Get options of a specific Ansible module.",
required=True,
)
parser.add_argument(
"files",
nargs="*",
default="tmp.yml",
help="Files to extract tasks from.",
)
args = parser.parse_args()
opts = set()
for f in args.files:
if os.path.isdir(f):
for r in directory_parse(f, args.task):
opts.add(r)
elif os.path.isfile(f):
opts = opts.union(file_parse(f, args.task))
task_stats_print(args.task, opts, args.output)
if __name__ == "__main__":
main()
|
python
|
from searx import settings, autocomplete
from searx.languages import language_codes as languages
from searx.url_utils import urlencode
COOKIE_MAX_AGE = 60 * 60 * 24 * 365 * 5 # 5 years
LANGUAGE_CODES = [l[0] for l in languages]
LANGUAGE_CODES.append('all')
DISABLED = 0
ENABLED = 1
class MissingArgumentException(Exception):
pass
class ValidationException(Exception):
pass
class Setting(object):
"""Base class of user settings"""
def __init__(self, default_value, **kwargs):
super(Setting, self).__init__()
self.value = default_value
for key, value in kwargs.items():
setattr(self, key, value)
self._post_init()
def _post_init(self):
pass
def parse(self, data):
self.value = data
def get_value(self):
return self.value
def save(self, name, resp):
resp.set_cookie(name, self.value, max_age=COOKIE_MAX_AGE)
class StringSetting(Setting):
"""Setting of plain string values"""
pass
class EnumStringSetting(Setting):
"""Setting of a value which can only come from the given choices"""
def _validate_selection(self, selection):
if selection not in self.choices:
raise ValidationException('Invalid value: "{0}"'.format(selection))
def _post_init(self):
if not hasattr(self, 'choices'):
raise MissingArgumentException('Missing argument: choices')
self._validate_selection(self.value)
def parse(self, data):
self._validate_selection(data)
self.value = data
class MultipleChoiceSetting(EnumStringSetting):
"""Setting of values which can only come from the given choices"""
def _validate_selections(self, selections):
for item in selections:
if item not in self.choices:
raise ValidationException('Invalid value: "{0}"'.format(selections))
def _post_init(self):
if not hasattr(self, 'choices'):
raise MissingArgumentException('Missing argument: choices')
self._validate_selections(self.value)
def parse(self, data):
if data == '':
self.value = []
return
elements = data.split(',')
self._validate_selections(elements)
self.value = elements
def parse_form(self, data):
self.value = []
for choice in data:
if choice in self.choices and choice not in self.value:
self.value.append(choice)
def save(self, name, resp):
resp.set_cookie(name, ','.join(self.value), max_age=COOKIE_MAX_AGE)
class SearchLanguageSetting(EnumStringSetting):
"""Available choices may change, so user's value may not be in choices anymore"""
def parse(self, data):
if data not in self.choices and data != self.value:
# hack to give some backwards compatibility with old language cookies
data = str(data).replace('_', '-')
lang = data.split('-')[0]
if data in self.choices:
pass
elif lang in self.choices:
data = lang
elif data == 'nb-NO':
data = 'no-NO'
elif data == 'ar-XA':
data = 'ar-SA'
else:
data = self.value
self.value = data
class MapSetting(Setting):
"""Setting of a value that has to be translated in order to be storable"""
def _post_init(self):
if not hasattr(self, 'map'):
raise MissingArgumentException('missing argument: map')
if self.value not in self.map.values():
raise ValidationException('Invalid default value')
def parse(self, data):
if data not in self.map:
raise ValidationException('Invalid choice: {0}'.format(data))
self.value = self.map[data]
self.key = data
def save(self, name, resp):
if hasattr(self, 'key'):
resp.set_cookie(name, self.key, max_age=COOKIE_MAX_AGE)
class SwitchableSetting(Setting):
""" Base class for settings that can be turned on && off"""
def _post_init(self):
self.disabled = set()
self.enabled = set()
if not hasattr(self, 'choices'):
raise MissingArgumentException('missing argument: choices')
def transform_form_items(self, items):
return items
def transform_values(self, values):
return values
def parse_cookie(self, data):
if data[DISABLED] != '':
self.disabled = set(data[DISABLED].split(','))
if data[ENABLED] != '':
self.enabled = set(data[ENABLED].split(','))
def parse_form(self, items):
items = self.transform_form_items(items)
self.disabled = set()
self.enabled = set()
for choice in self.choices:
if choice['default_on']:
if choice['id'] in items:
self.disabled.add(choice['id'])
else:
if choice['id'] not in items:
self.enabled.add(choice['id'])
def save(self, resp):
resp.set_cookie('disabled_{0}'.format(self.value), ','.join(self.disabled), max_age=COOKIE_MAX_AGE)
resp.set_cookie('enabled_{0}'.format(self.value), ','.join(self.enabled), max_age=COOKIE_MAX_AGE)
def get_disabled(self):
disabled = self.disabled
for choice in self.choices:
if not choice['default_on'] and choice['id'] not in self.enabled:
disabled.add(choice['id'])
return self.transform_values(disabled)
def get_enabled(self):
enabled = self.enabled
for choice in self.choices:
if choice['default_on'] and choice['id'] not in self.disabled:
enabled.add(choice['id'])
return self.transform_values(enabled)
class EnginesSetting(SwitchableSetting):
def _post_init(self):
super(EnginesSetting, self)._post_init()
transformed_choices = []
for engine_name, engine in self.choices.items():
for category in engine.categories:
transformed_choice = dict()
transformed_choice['default_on'] = not engine.disabled
transformed_choice['id'] = '{}__{}'.format(engine_name, category)
transformed_choices.append(transformed_choice)
self.choices = transformed_choices
def transform_form_items(self, items):
return [item[len('engine_'):].replace('_', ' ').replace(' ', '__') for item in items]
def transform_values(self, values):
if len(values) == 1 and next(iter(values)) == '':
return list()
transformed_values = []
for value in values:
engine, category = value.split('__')
transformed_values.append((engine, category))
return transformed_values
class PluginsSetting(SwitchableSetting):
def _post_init(self):
super(PluginsSetting, self)._post_init()
transformed_choices = []
for plugin in self.choices:
transformed_choice = dict()
transformed_choice['default_on'] = plugin.default_on
transformed_choice['id'] = plugin.id
transformed_choices.append(transformed_choice)
self.choices = transformed_choices
def transform_form_items(self, items):
return [item[len('plugin_'):] for item in items]
class Preferences(object):
"""Validates and saves preferences to cookies"""
def __init__(self, themes, categories, engines, plugins):
super(Preferences, self).__init__()
self.key_value_settings = {'categories': MultipleChoiceSetting(['general'], choices=categories),
'language': SearchLanguageSetting(settings['search']['language'],
choices=LANGUAGE_CODES),
'locale': EnumStringSetting(settings['ui']['default_locale'],
choices=list(settings['locales'].keys()) + ['']),
'autocomplete': EnumStringSetting(settings['search']['autocomplete'],
choices=list(autocomplete.backends.keys()) + ['']),
'image_proxy': MapSetting(settings['server']['image_proxy'],
map={'': settings['server']['image_proxy'],
'0': False,
'1': True,
'True': True,
'False': False}),
'method': EnumStringSetting('POST', choices=('GET', 'POST')),
'safesearch': MapSetting(settings['search']['safe_search'], map={'0': 0,
'1': 1,
'2': 2}),
'theme': EnumStringSetting(settings['ui']['default_theme'], choices=themes),
'results_on_new_tab': MapSetting(False, map={'0': False,
'1': True,
'False': False,
'True': True})}
self.engines = EnginesSetting('engines', choices=engines)
self.plugins = PluginsSetting('plugins', choices=plugins)
self.unknown_params = {}
def get_as_url_params(self):
settings_kv = {}
for k, v in self.key_value_settings.items():
if isinstance(v, MultipleChoiceSetting):
settings_kv[k] = ','.join(v.get_value())
else:
settings_kv[k] = v.get_value()
settings_kv['disabled_engines'] = ','.join(self.engines.disabled)
settings_kv['enabled_engines'] = ','.join(self.engines.enabled)
settings_kv['disabled_plugins'] = ','.join(self.plugins.disabled)
settings_kv['enabled_plugins'] = ','.join(self.plugins.enabled)
return urlencode(settings_kv)
def parse_dict(self, input_data):
for user_setting_name, user_setting in input_data.items():
if user_setting_name in self.key_value_settings:
self.key_value_settings[user_setting_name].parse(user_setting)
elif user_setting_name == 'disabled_engines':
self.engines.parse_cookie((input_data.get('disabled_engines', ''),
input_data.get('enabled_engines', '')))
elif user_setting_name == 'disabled_plugins':
self.plugins.parse_cookie((input_data.get('disabled_plugins', ''),
input_data.get('enabled_plugins', '')))
def parse_form(self, input_data):
disabled_engines = []
enabled_categories = []
disabled_plugins = []
for user_setting_name, user_setting in input_data.items():
if user_setting_name in self.key_value_settings:
self.key_value_settings[user_setting_name].parse(user_setting)
elif user_setting_name.startswith('engine_'):
disabled_engines.append(user_setting_name)
elif user_setting_name.startswith('category_'):
enabled_categories.append(user_setting_name[len('category_'):])
elif user_setting_name.startswith('plugin_'):
disabled_plugins.append(user_setting_name)
else:
self.unknown_params[user_setting_name] = user_setting
self.key_value_settings['categories'].parse_form(enabled_categories)
self.engines.parse_form(disabled_engines)
self.plugins.parse_form(disabled_plugins)
# cannot be used in case of engines or plugins
def get_value(self, user_setting_name):
if user_setting_name in self.key_value_settings:
return self.key_value_settings[user_setting_name].get_value()
def save(self, resp):
for user_setting_name, user_setting in self.key_value_settings.items():
user_setting.save(user_setting_name, resp)
self.engines.save(resp)
self.plugins.save(resp)
for k, v in self.unknown_params.items():
resp.set_cookie(k, v, max_age=COOKIE_MAX_AGE)
return resp
|
python
|
from scapy.all import *
from ccsds_base import CCSDSPacket
class PL_IF_HK_TLM_PKT_TlmPkt(Packet):
"""Pl_if App
app = PL_IF
command = HK_TLM_PKT
msg_id = PL_IF_HK_TLM_MID = 0x09de = 0x0800 + 0x1de
"""
name = "PL_IF_HK_TLM_PKT_TlmPkt"
fields_desc = [
# APPEND_ITEM CMD_VALID_COUNT 16 UINT "Count of valid commands received since startup or the last reset counter command"
ShortField("CMD_VALID_COUNT", 0),
# APPEND_ITEM CMD_ERROR_COUNT 16 UINT "Count of invalid commands received since startup or the last reset counter command"
ShortField("CMD_ERROR_COUNT", 0),
# APPEND_ITEM LAST_TBL_ACTION 8 UINT "Last table action: 1=Register, 2=Load, 3=Dump"
ByteField("LAST_TBL_ACTION", 0),
# APPEND_ITEM LAST_TBL_STATUS 8 UINT "Last table action status: 0=Undefined, 1=Invalid, 2=Valid"
ByteField("LAST_TBL_STATUS", 0),
# APPEND_ITEM PLIF_OBJ_EXEC_CNT 16 UINT "Count of Pl_if object executions"
ShortField("PLIF_OBJ_EXEC_CNT", 0),
]
bind_layers(CCSDSPacket, PL_IF_HK_TLM_PKT_TlmPkt, pkttype=0, apid=478)
class PL_IF_PL_STATUS_PKT_TlmPkt(Packet):
"""Pl_if PL status packet
app = PL_IF
command = PL_STATUS_PKT
msg_id = PL_IF_PL_STATUS_TLM_MID = 0x09e4 = 0x0800 + 0x1e4
"""
name = "PL_IF_PL_STATUS_PKT_TlmPkt"
fields_desc = [
# APPEND_ITEM BUSY_STATUS 8 UINT "Latest busy status"
ByteField("BUSY_STATUS", 0),
# APPEND_ITEM IMG_READY_STATUS 8 UINT "Latest image ready status"
ByteField("IMG_READY_STATUS", 0),
# APPEND_ITEM BAD_ADDR_STATUS 8 UINT "Latest bad address status"
ByteField("BAD_ADDR_STATUS", 0),
# APPEND_ITEM ALIVE_STATUS 8 UINT "Aliveness status of the payload"
ByteField("ALIVE_STATUS", 0),
# APPEND_ITEM DOWNLINK_STATUS 8 UINT "Downlink status of the payload"
ByteField("DOWNLINK_STATUS", 0),
# STATE NO_IMAGE 0
# STATE IN_PROGRESS 1
# STATE COMPLETE 2
# APPEND_ITEM PAD 24 UINT "Pad"
X3BytesField("PAD", 0),
# APPEND_ITEM IMAGE_SIZE 32 UINT "Image size as reported by the payload"
IntField("IMAGE_SIZE", 0),
# APPEND_ITEM CURRENT_DOWNLINK_ADDRESS 32 UINT "Address currently being downlinked from the payload"
IntField("CURRENT_DOWNLINK_ADDRESS", 0),
]
bind_layers(CCSDSPacket, PL_IF_PL_STATUS_PKT_TlmPkt, pkttype=0, apid=484)
|
python
|
from autorop.bof.Corefile import Corefile
|
python
|
import netaddr
from django.urls import reverse
from sidekick.models import (
LogicalSystem, RoutingType,
NetworkServiceType, NetworkService,
NetworkServiceGroup,
)
from .utils import BaseTest
class NetworkServiceTest(BaseTest):
# Logical System
def test_logicalsystem_basic(self):
v = LogicalSystem.objects.get(name="Peering")
self.assertEqual(v.slug, "peering")
def test_view_logicalsystem_index(self):
resp = self.client.get(
reverse('plugins:sidekick:logicalsystem_index'))
self.assertContains(resp, 'Peering')
def test_view_logicalsystem_detail(self):
v = LogicalSystem.objects.get(id=1)
resp = self.client.get(v.get_absolute_url())
self.assertContains(resp, 'Peering')
self.assertContains(resp, "East University's peering service")
# Network Service Type
def test_networkservicectype_basic(self):
v = NetworkServiceType.objects.get(name="Peering")
self.assertEqual(v.slug, "peering")
def test_view_networkservicetype_index(self):
resp = self.client.get(
reverse('plugins:sidekick:networkservicetype_index'))
self.assertContains(resp, 'Peering')
def test_view_networkservicetype_detail(self):
v = NetworkServiceType.objects.get(id=1)
resp = self.client.get(v.get_absolute_url())
self.assertContains(resp, 'Peering')
self.assertContains(resp, "East University's peering service")
# Network Service
def test_networkservice_basic(self):
v = NetworkService.objects.get(
member__name='East University',
description="Peering service for East University")
self.assertEqual(v.name, "East University's peering service")
def test_view_networkservice_index(self):
resp = self.client.get(
reverse('plugins:sidekick:networkservice_index'))
self.assertContains(resp, "East University's peering service")
# Network Service Group
def test_networkservicegroup_basic(self):
ns = NetworkService.objects.get(
member__name='East University',
description="Peering service for East University")
v = NetworkServiceGroup.objects.get(network_services__in=[ns])
self.assertEqual(v.name, 'A Group')
self.assertEqual(v.description, 'Just some group')
def test_view_networkservicegroup_index(self):
resp = self.client.get(
reverse('plugins:sidekick:networkservicegroup_index'))
self.assertContains(resp, 'A Group')
self.assertContains(resp, 'Just some group')
def test_view_networkservicegroup_detail(self):
v = NetworkServiceGroup.objects.get(id=1)
resp = self.client.get(v.get_absolute_url())
self.assertContains(resp, 'A Group')
self.assertContains(resp, 'Just some group')
# Routing Type
def test_routingtype_basic(self):
v = RoutingType.objects.get(name="BGP")
self.assertEqual(v.slug, "bgp")
def test_view_routingtype_index(self):
resp = self.client.get(
reverse('plugins:sidekick:routingtype_index'))
self.assertContains(resp, 'BGP')
def test_view_routingtype_detail(self):
v = RoutingType.objects.get(id=1)
resp = self.client.get(v.get_absolute_url())
self.assertContains(resp, 'BGP')
self.assertContains(resp, "East University's peering service")
# IP Prefixes
def test_ip_prefixes(self):
expected_prefixes = [
netaddr.IPNetwork('192.168.1.0/24'),
netaddr.IPNetwork('192.168.2.0/24'),
]
v = NetworkService.objects.get(
member__name='East University',
description="Peering service for East University")
prefixes = v.get_ipv4_prefixes()
self.assertEqual(prefixes, expected_prefixes)
# Backup service
def test_backup_service(self):
expected_service_name = "East University's backup peering service"
v = NetworkService.objects.get(
member__name='East University',
description="Peering service for East University")
backup = v.get_backup_service()[0]
self.assertEqual(backup.name, expected_service_name)
|
python
|
"""
Http related errors
"""
class HTTPError(Exception):
""" Base class for all http related errors """
status_code = 500
class HTTPForbidden(HTTPError):
""" Http forbidden error (status code 403). """
status_code = 403
class HTTPBadRequest(HTTPError):
""" Client sent a bad request. """
status_code = 400
class HTTPNotFound(HTTPError):
status_code = 404
class HTTPSuccess(HTTPError):
status_code = 200
class HTTPErrorMixin(object):
""" Adds HTTP error handling to dispatching """
def dispatch(self):
try:
return super(HTTPErrorMixin, self).dispatch()
except HTTPError, e:
self.error(e.status_code)
self.response.write(str(e))
|
python
|
# coding=utf-8
"""
Port of the ganglia gearman collector
Collects stats from gearman job server
#### Dependencies
* gearman
"""
from diamond.collector import str_to_bool
import diamond.collector
import os
import subprocess
import time
try:
import gearman
except ImportError:
gearman = None
class GearmanCollector(diamond.collector.Collector):
def get_default_config_help(sef):
config_help = super(GearmanCollector, self).get_default_config_help()
config_help.update({
'gearman_pid_path': 'Gearman PID file path',
'url': 'Gearman endpoint to talk to',
'bin': 'Path to ls command',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(GearmanCollector, self).get_default_config()
config.update({
'path': 'gearman_stats',
'gearman_pid_path': '/var/run/gearman/gearman-job-server.pid',
'url': 'localhost',
'bin': '/bin/ls',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
"""
Collector gearman stats
"""
def get_fds(gearman_pid_path):
with open(gearman_pid_path) as fp:
gearman_pid = fp.read().strip()
proc_path = os.path.join('/proc', gearman_pid, 'fd')
command = [self.config['bin'], proc_path]
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = process.communicate()
if errors:
raise Exception(errors)
return len(output.splitlines())
def publish_server_stats(gm_admin_client):
# Publish idle/running worker counts
# and no. of tasks queued per task
for entry in gm_admin_client.get_status():
total = entry.get('workers', 0)
running = entry.get('running', 0)
idle = total-running
self.dimensions = {'task': entry['task']} # Internally, this dict is cleared on self.publish
self.publish('gearman.queued', entry['queued'])
self.dimensions = {'type': 'running'}
self.publish('gearman.workers', running)
self.dimensions = {'type': 'idle'}
self.publish('gearman.workers', idle)
try:
if gearman is None:
self.log.error("Unable to import python gearman client")
return
# Collect and Publish Metrics
self.log.debug("Using pid file: %s & gearman endpoint : %s",
self.config['gearman_pid_path'], self.config['url'])
gm_admin_client = gearman.GearmanAdminClient([self.config['url']])
self.publish('gearman.ping', gm_admin_client.ping_server())
self.publish('gearman.fds', get_fds(self.config['gearman_pid_path']))
publish_server_stats(gm_admin_client)
except Exception, e:
self.log.error("GearmanCollector Error: %s", e)
|
python
|
import requests
print requests.delete("http://localhost:7215/player/", json={"uid": "K/1H5bdxo7GwMBUp8qVQz42h7ZE=", }).content
print requests.put("http://localhost:7215/player/", json={"uid": "K/1H5bdxo7GwMBUp8qVQz42h7ZE=", "team": "emc", "rank": "grunt",}).content
#print requests.put("http://localhost:7215/battlemode/", json={"team": "emc", "state": "off"}).content
#print requests.get("http://localhost:7215/battlemode/", json={"team": "emc", }).content
|
python
|
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import login, logout
from chat.views import index,register_page
app_name= "multichat"
urlpatterns = [url(r'^home/$', index),
url(r'^$', index),
url(r'^register/$', register_page),
url(r'^accounts/login/$', login),
url(r'^accounts/logout/$', logout,{'next_page': '/home/'}),
url(r'^admin/', admin.site.urls),
]
|
python
|
from .game_move import GameMove
from .game_node import GameNode
from .game_state import GameState
from .game_tree import GameTree
# from .game import Game
from .game_multithread import GameMultithread
|
python
|
from coders import NoopCoder
from sources import CsvFileSource
|
python
|
# built-in
import ctypes;
import os;
# import matey shared library
current_dir = os.path.dirname(__file__);
matey_path = os.path.join(current_dir, "matey.so");
libmatey = ctypes.CDLL(matey_path);
#### SUM #######################################################################
libmatey.sum.restype = ctypes.c_int;
libmatey.sum.argtypes = (
ctypes.c_int,
ctypes.POINTER(ctypes.c_int)
);
def sum(numbers):
# prepare arguments
size = len(numbers);
array_type = ctypes.c_int * size;
# compute sum
result = libmatey.sum(
ctypes.c_int(size),
array_type(*numbers)
);
return int(result);
#### DOT PRODUCT ###############################################################
libmatey.dot.restype = ctypes.c_double;
libmatey.dot.argtypes = (
ctypes.c_int, # size
ctypes.POINTER(ctypes.c_double), # a
ctypes.POINTER(ctypes.c_double) # b
);
def dot(a,b):
# validate input
assert len(a) == len(b);
# prepare arguments
size = len(a);
a_type = ctypes.c_double * size;
b_type = ctypes.c_double * size;
# compute dot product
result = libmatey.dot(
ctypes.c_int(size),
a_type(*a),
b_type(*b)
);
return float(result);
#### SAXPY #####################################################################
libmatey.saxpy.restype = None;
libmatey.saxpy.argtypes = (
ctypes.c_int, # size
ctypes.c_double, # a
ctypes.POINTER(ctypes.c_double), # x
ctypes.POINTER(ctypes.c_double), # y
)
def saxpy(a,x,y):
# validate input
assert len(x) == len(y);
# prepare arguments
size = len(x);
vector_type = ctypes.c_double * size;
# perform saxpy
libmatey.saxpy(
ctypes.c_int(size),
ctypes.c_double(a),
ctypes.byref(vector_type(*x)),
ctypes.byref(vector_type(*x))
);
|
python
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md', 'r') as rm:
long_desc = rm.read()
setup(
name='soloman',
version='3.0.2',
description='For the love of Artificial Intelligence, Python and QML',
long_description=long_desc,
long_description_content_type='text/markdown',
install_requires=['numpy', 'pyaudio', 'pyffmpeg'],
keywords="audio, pyaudio, ffmpeg, qml, pyffmpeg",
url='https://github.com/deuteronomy-works/soloman',
author='Amoh - Gyebi Godwin Ampofo Michael',
author_email='[email protected]',
project_urls={
"Bug Tracker": "https://github.com/deuteronomy-works/soloman/issues/",
"Documentation": "https://github.com/deuteronomy-works/soloman/wiki/",
"Source Code": "https://github.com/deuteronomy-works/soloman/",
},
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules"
],
packages=['soloman', 'soloman.audio', 'soloman.video', 'PyQt5'],
package_data={'PyQt5': ['Qt/qml/soloman/qmldir', 'Qt/qml/soloman/*.qml']},
)
|
python
|
from email.charset import Charset, QP
from email.mime.text import MIMEText
from .base import AnymailBaseBackend, BasePayload
from .._version import __version__
from ..exceptions import AnymailAPIError, AnymailImproperlyInstalled
from ..message import AnymailRecipientStatus
from ..utils import get_anymail_setting, UNSET
try:
import boto3
from botocore.client import Config
from botocore.exceptions import BotoCoreError, ClientError, ConnectionError
except ImportError as err:
raise AnymailImproperlyInstalled(missing_package='boto3', backend='amazon_ses') from err
# boto3 has several root exception classes; this is meant to cover all of them
BOTO_BASE_ERRORS = (BotoCoreError, ClientError, ConnectionError)
class EmailBackend(AnymailBaseBackend):
"""
Amazon SES Email Backend (using boto3)
"""
esp_name = "Amazon SES"
def __init__(self, **kwargs):
"""Init options from Django settings"""
super().__init__(**kwargs)
# AMAZON_SES_CLIENT_PARAMS is optional - boto3 can find credentials several other ways
self.session_params, self.client_params = _get_anymail_boto3_params(kwargs=kwargs)
self.configuration_set_name = get_anymail_setting("configuration_set_name", esp_name=self.esp_name,
kwargs=kwargs, allow_bare=False, default=None)
self.message_tag_name = get_anymail_setting("message_tag_name", esp_name=self.esp_name,
kwargs=kwargs, allow_bare=False, default=None)
self.client = None
def open(self):
if self.client:
return False # already exists
try:
self.client = boto3.session.Session(**self.session_params).client("ses", **self.client_params)
except BOTO_BASE_ERRORS:
if not self.fail_silently:
raise
else:
return True # created client
def close(self):
if self.client is None:
return
# self.client.close() # boto3 doesn't currently seem to support (or require) this
self.client = None
def build_message_payload(self, message, defaults):
# The SES SendRawEmail and SendBulkTemplatedEmail calls have
# very different signatures, so use a custom payload for each
if getattr(message, "template_id", UNSET) is not UNSET:
return AmazonSESSendBulkTemplatedEmailPayload(message, defaults, self)
else:
return AmazonSESSendRawEmailPayload(message, defaults, self)
def post_to_esp(self, payload, message):
try:
response = payload.call_send_api(self.client)
except BOTO_BASE_ERRORS as err:
# ClientError has a response attr with parsed json error response (other errors don't)
raise AnymailAPIError(str(err), backend=self, email_message=message, payload=payload,
response=getattr(err, 'response', None)) from err
return response
def parse_recipient_status(self, response, payload, message):
return payload.parse_recipient_status(response)
class AmazonSESBasePayload(BasePayload):
def init_payload(self):
self.params = {}
if self.backend.configuration_set_name is not None:
self.params["ConfigurationSetName"] = self.backend.configuration_set_name
def call_send_api(self, ses_client):
raise NotImplementedError()
def parse_recipient_status(self, response):
# response is the parsed (dict) JSON returned from the API call
raise NotImplementedError()
def set_esp_extra(self, extra):
# e.g., ConfigurationSetName, FromArn, SourceArn, ReturnPathArn
self.params.update(extra)
class AmazonSESSendRawEmailPayload(AmazonSESBasePayload):
def init_payload(self):
super().init_payload()
self.all_recipients = []
self.mime_message = self.message.message()
# Work around an Amazon SES bug where, if all of:
# - the message body (text or html) contains non-ASCII characters
# - the body is sent with `Content-Transfer-Encoding: 8bit`
# (which is Django email's default for most non-ASCII bodies)
# - you are using an SES ConfigurationSet with open or click tracking enabled
# then SES replaces the non-ASCII characters with question marks as it rewrites
# the message to add tracking. Forcing `CTE: quoted-printable` avoids the problem.
# (https://forums.aws.amazon.com/thread.jspa?threadID=287048)
for part in self.mime_message.walk():
if part.get_content_maintype() == "text" and part["Content-Transfer-Encoding"] == "8bit":
content = part.get_payload()
del part["Content-Transfer-Encoding"]
qp_charset = Charset(part.get_content_charset("us-ascii"))
qp_charset.body_encoding = QP
# (can't use part.set_payload, because SafeMIMEText can undo this workaround)
MIMEText.set_payload(part, content, charset=qp_charset)
def call_send_api(self, ses_client):
# Set Destinations to make sure we pick up all recipients (including bcc).
# Any non-ASCII characters in recipient domains must be encoded with Punycode.
# (Amazon SES doesn't support non-ASCII recipient usernames.)
self.params["Destinations"] = [email.address for email in self.all_recipients]
self.params["RawMessage"] = {
"Data": self.mime_message.as_bytes()
}
return ses_client.send_raw_email(**self.params)
def parse_recipient_status(self, response):
try:
message_id = response["MessageId"]
except (KeyError, TypeError) as err:
raise AnymailAPIError(
"%s parsing Amazon SES send result %r" % (str(err), response),
backend=self.backend, email_message=self.message, payload=self) from None
recipient_status = AnymailRecipientStatus(message_id=message_id, status="queued")
return {recipient.addr_spec: recipient_status for recipient in self.all_recipients}
# Standard EmailMessage attrs...
# These all get rolled into the RFC-5322 raw mime directly via EmailMessage.message()
def _no_send_defaults(self, attr):
# Anymail global send defaults don't work for standard attrs, because the
# merged/computed value isn't forced back into the EmailMessage.
if attr in self.defaults:
self.unsupported_feature("Anymail send defaults for '%s' with Amazon SES" % attr)
def set_from_email_list(self, emails):
# Although Amazon SES will send messages with any From header, it can only parse Source
# if the From header is a single email. Explicit Source avoids an "Illegal address" error:
if len(emails) > 1:
self.params["Source"] = emails[0].addr_spec
# (else SES will look at the (single) address in the From header)
def set_recipients(self, recipient_type, emails):
self.all_recipients += emails
# included in mime_message
assert recipient_type in ("to", "cc", "bcc")
self._no_send_defaults(recipient_type)
def set_subject(self, subject):
# included in mime_message
self._no_send_defaults("subject")
def set_reply_to(self, emails):
# included in mime_message
self._no_send_defaults("reply_to")
def set_extra_headers(self, headers):
# included in mime_message
self._no_send_defaults("extra_headers")
def set_text_body(self, body):
# included in mime_message
self._no_send_defaults("body")
def set_html_body(self, body):
# included in mime_message
self._no_send_defaults("body")
def set_alternatives(self, alternatives):
# included in mime_message
self._no_send_defaults("alternatives")
def set_attachments(self, attachments):
# included in mime_message
self._no_send_defaults("attachments")
# Anymail-specific payload construction
def set_envelope_sender(self, email):
self.params["Source"] = email.addr_spec
def set_spoofed_to_header(self, header_to):
# django.core.mail.EmailMessage.message() has already set
# self.mime_message["To"] = header_to
# and performed any necessary header sanitization.
#
# The actual "to" is already in self.all_recipients,
# which is used as the SendRawEmail Destinations later.
#
# So, nothing to do here, except prevent the default
# "unsupported feature" error.
pass
def set_metadata(self, metadata):
# Amazon SES has two mechanisms for adding custom data to a message:
# * Custom message headers are available to webhooks (SNS notifications),
# but not in CloudWatch metrics/dashboards or Kinesis Firehose streams.
# Custom headers can be sent only with SendRawEmail.
# * "Message Tags" are available to CloudWatch and Firehose, and to SNS
# notifications for SES *events* but not SES *notifications*. (Got that?)
# Message Tags also allow *very* limited characters in both name and value.
# Message Tags can be sent with any SES send call.
# (See "How do message tags work?" in https://aws.amazon.com/blogs/ses/introducing-sending-metrics/
# and https://forums.aws.amazon.com/thread.jspa?messageID=782922.)
# To support reliable retrieval in webhooks, just use custom headers for metadata.
self.mime_message["X-Metadata"] = self.serialize_json(metadata)
def set_tags(self, tags):
# See note about Amazon SES Message Tags and custom headers in set_metadata above.
# To support reliable retrieval in webhooks, use custom headers for tags.
# (There are no restrictions on number or content for custom header tags.)
for tag in tags:
self.mime_message.add_header("X-Tag", tag) # creates multiple X-Tag headers, one per tag
# Also *optionally* pass a single Message Tag if the AMAZON_SES_MESSAGE_TAG_NAME
# Anymail setting is set (default no). The AWS API restricts tag content in this case.
# (This is useful for dashboard segmentation; use esp_extra["Tags"] for anything more complex.)
if tags and self.backend.message_tag_name is not None:
if len(tags) > 1:
self.unsupported_feature("multiple tags with the AMAZON_SES_MESSAGE_TAG_NAME setting")
self.params.setdefault("Tags", []).append(
{"Name": self.backend.message_tag_name, "Value": tags[0]})
def set_template_id(self, template_id):
raise NotImplementedError("AmazonSESSendRawEmailPayload should not have been used with template_id")
def set_merge_data(self, merge_data):
self.unsupported_feature("merge_data without template_id")
def set_merge_global_data(self, merge_global_data):
self.unsupported_feature("global_merge_data without template_id")
class AmazonSESSendBulkTemplatedEmailPayload(AmazonSESBasePayload):
def init_payload(self):
super().init_payload()
# late-bind recipients and merge_data in call_send_api
self.recipients = {"to": [], "cc": [], "bcc": []}
self.merge_data = {}
def call_send_api(self, ses_client):
# include any 'cc' or 'bcc' in every destination
cc_and_bcc_addresses = {}
if self.recipients["cc"]:
cc_and_bcc_addresses["CcAddresses"] = [cc.address for cc in self.recipients["cc"]]
if self.recipients["bcc"]:
cc_and_bcc_addresses["BccAddresses"] = [bcc.address for bcc in self.recipients["bcc"]]
# set up destination and data for each 'to'
self.params["Destinations"] = [{
"Destination": dict(ToAddresses=[to.address], **cc_and_bcc_addresses),
"ReplacementTemplateData": self.serialize_json(self.merge_data.get(to.addr_spec, {}))
} for to in self.recipients["to"]]
return ses_client.send_bulk_templated_email(**self.params)
def parse_recipient_status(self, response):
try:
# response["Status"] should be a list in Destinations (to) order
anymail_statuses = [
AnymailRecipientStatus(
message_id=status.get("MessageId", None),
status='queued' if status.get("Status") == "Success" else 'failed')
for status in response["Status"]
]
except (KeyError, TypeError) as err:
raise AnymailAPIError(
"%s parsing Amazon SES send result %r" % (str(err), response),
backend=self.backend, email_message=self.message, payload=self) from None
to_addrs = [to.addr_spec for to in self.recipients["to"]]
if len(anymail_statuses) != len(to_addrs):
raise AnymailAPIError(
"Sent to %d destinations, but only %d statuses in Amazon SES send result %r"
% (len(to_addrs), len(anymail_statuses), response),
backend=self.backend, email_message=self.message, payload=self)
return dict(zip(to_addrs, anymail_statuses))
def set_from_email(self, email):
self.params["Source"] = email.address # this will RFC2047-encode display_name if needed
def set_recipients(self, recipient_type, emails):
# late-bound in call_send_api
assert recipient_type in ("to", "cc", "bcc")
self.recipients[recipient_type] = emails
def set_subject(self, subject):
# (subject can only come from template; you can use substitution vars in that)
if subject:
self.unsupported_feature("overriding template subject")
def set_reply_to(self, emails):
if emails:
self.params["ReplyToAddresses"] = [email.address for email in emails]
def set_extra_headers(self, headers):
self.unsupported_feature("extra_headers with template")
def set_text_body(self, body):
if body:
self.unsupported_feature("overriding template body content")
def set_html_body(self, body):
if body:
self.unsupported_feature("overriding template body content")
def set_attachments(self, attachments):
if attachments:
self.unsupported_feature("attachments with template")
# Anymail-specific payload construction
def set_envelope_sender(self, email):
self.params["ReturnPath"] = email.addr_spec
def set_metadata(self, metadata):
# no custom headers with SendBulkTemplatedEmail
self.unsupported_feature("metadata with template")
def set_tags(self, tags):
# no custom headers with SendBulkTemplatedEmail, but support AMAZON_SES_MESSAGE_TAG_NAME if used
# (see tags/metadata in AmazonSESSendRawEmailPayload for more info)
if tags:
if self.backend.message_tag_name is not None:
if len(tags) > 1:
self.unsupported_feature("multiple tags with the AMAZON_SES_MESSAGE_TAG_NAME setting")
self.params["DefaultTags"] = [{"Name": self.backend.message_tag_name, "Value": tags[0]}]
else:
self.unsupported_feature(
"tags with template (unless using the AMAZON_SES_MESSAGE_TAG_NAME setting)")
def set_template_id(self, template_id):
self.params["Template"] = template_id
def set_merge_data(self, merge_data):
# late-bound in call_send_api
self.merge_data = merge_data
def set_merge_global_data(self, merge_global_data):
self.params["DefaultTemplateData"] = self.serialize_json(merge_global_data)
def _get_anymail_boto3_params(esp_name=EmailBackend.esp_name, kwargs=None):
"""Returns 2 dicts of params for boto3.session.Session() and .client()
Incorporates ANYMAIL["AMAZON_SES_SESSION_PARAMS"] and
ANYMAIL["AMAZON_SES_CLIENT_PARAMS"] settings.
Converts config dict to botocore.client.Config if needed
May remove keys from kwargs, but won't modify original settings
"""
# (shared with ..webhooks.amazon_ses)
session_params = get_anymail_setting("session_params", esp_name=esp_name, kwargs=kwargs, default={})
client_params = get_anymail_setting("client_params", esp_name=esp_name, kwargs=kwargs, default={})
# Add Anymail user-agent, and convert config dict to botocore.client.Config
client_params = client_params.copy() # don't modify source
config = Config(user_agent_extra="django-anymail/{version}-{esp}".format(
esp=esp_name.lower().replace(" ", "-"), version=__version__))
if "config" in client_params:
# convert config dict to botocore.client.Config if needed
client_params_config = client_params["config"]
if not isinstance(client_params_config, Config):
client_params_config = Config(**client_params_config)
config = config.merge(client_params_config)
client_params["config"] = config
return session_params, client_params
|
python
|
import pandas as pd
import numpy as np
class omdf:
def __init__(self,dff):
self.df = dff
self.arr = self.df.to_numpy()
def df_add_col_instr(self):
self.df['cat'] = self.df.apply(lambda row: TS(row.Summary), axis = 1)
return self.df.to_dict()
def df_add_col_dic(self,colname,newcol,dic):
self.df[newcol] = self.df['scode'].map(dic)
return self.df.to_dict()
def df_add_col_slice_str(self,newcolname):
self.df[newcolname] = self.df.apply(lambda x : x.CustomAttr15[0:5], axis = 1)
return self.df.to_dict()
def df_rmv_column(self,lis):
ndf = self.df[lis]
return ndf.to_dict()
def df_countif(self,column_name,newcolumn_name):
code = pd.Series(self.df[column_name])
lst = code.values.tolist()
dic = {}
for i in lst:
dic[i] = lst.count(i)
df_occ = pd.DataFrame(dic.items(),columns=[column_name, newcolumn_name])
mdf = self.df.merge(df_occ, on=column_name)
return mdf
def df_instr(self,colname,srcstr):
self.df[srcstr] = list(map(lambda x: x.count(srcstr), self.df[colname]))
return self.df
def df_vlookup(self,df2,common_colname):
mdf = self.df.merge(df2, on=common_colname)
return mdf
class pyvb:
def __init__(self, dic, li=[]):
self.df = pd.DataFrame(dic)
self.arr = self.df.to_numpy()
self.lst = self.df[li]
def PrintDf(self):
print(self.df)
def PrintDf_ByList(self):
print(self.lst)
def MatchParse(self,zn,zncol,parsecol_1,parsecol_2,parsecol_3):
hp = ""
ndf = self.df[self.df[zncol].str.contains(zn, na=False)]
for ind in ndf.index:
code = str(ndf[parsecol_1][ind])
lo = str(ndf[parsecol_2][ind])
resource = str(ndf[parsecol_3][ind])
hp = hp + " \n" + code + " || " + lo + " || " + resource
z = zn + ': \n' + hp
return z
def VbMatch_Col(self,search_val,colnum):
lrw = (self.arr).shape[0]
i = 0
while i < lrw:
if search_val == self.arr[i][colnum]:
break
i = i + 1
return i
def VbMatch_Row(self,search_val,rwnum):
lcol = (self.arr).shape[1]
i = 0
while i < lcol:
if search_val == self.arr[rwnum][i]:
break
i = i + 1
return i
def Row_Item_From_List(self,rwnum,lis):
ndf = self.df[lis]
ar = ndf.to_numpy()
lcol = (ar).shape[1]
j = 0
heap = ""
while j < lcol:
hd = str(lis[j]) + ":" + str(ar[rwnum][j])
if j == 0:
heap = hd
else:
heap = heap + '\n' + hd
j = j + 1
return heap
def VbFilter(self,colname,strval):
df2 = self.df[self.df[colname].str.contains(strval, na=False)]
return df2.to_dict()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, absolute_import, division
""" http://projecteuler.net/index.php?section=problems&id=18 triangle
it has been finished in 10 minutes
using recursive method is prefered, brute force not feasible to solve this problem
"""
from projecteulerhelper import *
#########################################
def ProjectEuler18():
tri = ReadMat('ProjectEuler18.txt', ' ')
N = len(tri)
ms=FindMaxSum(tri, 0, 0, N)
print(ms)
def FindMaxSum(tri, row, col,n):
""" row, col is the start index of trianle"""
#list=[]
if n==1: return tri[row][col]
return tri[row][col] + max( FindMaxSum(tri, row+1, col,n-1), FindMaxSum(tri, row+1, col+1,n-1) )
if __name__ == "__main__":
ProjectEuler18()
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import plugins.server
import plugins.aaa
import plugins.session
import aiohttp.web
import typing
import fnmatch
""" Generic preferences endpoint for Pony Mail codename Foal"""
""" This is incomplete, but will work for anonymous tests. """
async def process(
server: plugins.server.BaseServer, session: plugins.session.SessionObject, indata: dict
) -> typing.Union[dict, aiohttp.web.Response]:
prefs: dict = {"login": {}}
lists: dict = {}
for ml, entry in server.data.lists.items():
if "@" in ml:
lname, ldomain = ml.split("@", 1)
can_access = True
if entry.get("private", False):
can_access = plugins.aaa.can_access_list(session, ml)
if server.config.ui.focus_domain != "*":
if '*' in server.config.ui.focus_domain:
if not fnmatch.fnmatch(ldomain, server.config.ui.focus_domain):
continue
elif ldomain != (server.config.ui.focus_domain or session.host):
continue
if can_access:
if ldomain not in lists:
lists[ldomain] = {}
lists[ldomain][lname] = entry["count"]
prefs["lists"] = lists
if session and session.credentials:
prefs["login"] = {
"credentials": {
"uid": session.credentials.uid,
"email": session.credentials.email,
"fullname": session.credentials.name,
}
}
if session.credentials.admin is True:
prefs["login"]["credentials"]["admin"] = True
# Logging out??
if indata.get("logout"):
# Remove session from ElasticSearch
await plugins.session.remove_session(session)
# If stored in memory, remove from there.
if session.cookie in server.data.sessions:
del server.data.sessions[session.cookie]
session.credentials = None
return aiohttp.web.Response(
headers={
"set-cookie": "ponymail=deleted; path=/api; expires=Thu, 01 Jan 1970 00:00:00 GMT",
"content-type": "application/json",
},
status=200,
text='{"okay": true}',
)
return prefs
def register(server: plugins.server.BaseServer):
return plugins.server.Endpoint(process)
|
python
|
# -*- coding: utf-8 -*-
'''
Este módulo tiene todas las funciones pertinentes para obtener y/o calcular los diferentes parámetros del modelo a partir de datos y/o parámetros del artículo de Arenas.
Convención del vector de parámetros siguiendo la Supplementary Table 1 de [1]:
# Parámetros usuales
β = params[0]
k = params[1]
η = params[2]
α = params[3]
ν = params[4]
μ = params[5]
γ = params[6]
ω = params[7]
ψ = params[8]
χᴵ = params[9]
χᴴ = params[10]
# Población
N = params[11]
Paramáteros de contención
σ = params[12]
κ0 = params[13]
ϕ = params[14]
tc = params[15]
tf = params[16]
κf = params[17]
[1]: Arenas, Alex, et al. "Derivation of the effective reproduction number R for COVID-19 in relation to mobility restrictions and confinement." medRxiv (2020).
'''
# Standard libraries
import pandas as pd
import numpy as np
import datetime as dt
import math
# Libraries for fitting
from sklearn.linear_model import LinearRegression
import numpy as np
from numpy.linalg import eigvals
# Módulo de manejo de datos
from data_handling.data_processing import *
# Módulos específicos del modelo
import arenas_params as ap
def get_params_arenas():
'''
Da el vector de parámetros del artículo de Arenas et al.
A este vector aún hay que agregarle la población (params[11]) y cambiar los parámetros calculados por los datos.
Outputs:
- params: Vector de parámteros del modelo según Arenas et al.
Nota: Los parámetros están en el mismo orden que la Supplementary Table 1 en [1].
[1]: Arenas, Alex, et al. "Derivation of the effective reproduction number R for COVID-19 in relation to mobility restrictions and confinement." medRxiv (2020).
'''
# Asumimos import arenas_params as ap
params = [
ap.β, # 0 : β
ap.kg, # 1 : k
ap.η, # 2 : η
ap.αg, # 3 : α
ap.ν, # 4 : ν
ap.μg, # 5 : μ
ap.γg, # 6 : γ
ap.ωg, # 7 : ω
ap.ψg, # 8 : ψ
ap.χg, # 9 : χᴵ
ap.χg, # 10: χᴴ
0, # 11: N
ap.σ, # 12: σ
ap.κ0, # 13: κ0
ap.ϕ, # 14: ϕ
ap.tc, # 15: días desde t0 para confinamiento
ap.tf, # 16: días desde confinamiento para reactivación
ap.κf, # 17: parametro que refleja la nueva normalidad
]
return params
# Parámetros a partir de datos
def get_parametros(series, estado='Nacional'):
'''
Calcula los parámetros relevantes del modelo para `estado` que no vienen de los parámetros de Arenas.
En particular, se calcula
- χ: Tasa de recuperación
- γ: Fracción de casos confirmados que son hospitalizados
- ω: Fracción de de casos hospitalizados que fallecen
- σ: Promedio de ocupantes en viviendas particulares habitadas
Inputs:
- series: Dataframe con las series de tiempo de todos los estados
- estado=Nacional: Entidad federativa a considerar
Outputs:
- γ, ω, χᴵ, σ
Nota: Si `estado` = 'Nacional', calcula los parámetros a nivel nacional. Esto se recomienda para estados con menor volumen de datos.
'''
if estado == 'Nacional':
series = get_serie_nacional( series )
else:
series = get_serie_estatal( series, estado )
# fracción de casos que van a hospital
γ = series['hospitalizados_acumulados'][-1] / series['confirmados_acumulados'][-1]
# fracción de hospitalizados que mueren (fallecidos_por_hospitalizacion_acumulados)
ω = series['fallecidos_acumulados'][-1] / series['hospitalizados_acumulados'][-1]
# tamaño de habitantes por casa promedio [1]
# [1]: https://www.inegi.org.mx/temas/vivienda/
σ = 3.7
# Tasa de recuperación usada por el gobierno: 14 días [2]
# [2]: https://twitter.com/HLGatell/status/1258189863326830592/photo/1
χᴵ = 1/7 # 1/14
return γ, ω, χᴵ, σ
def get_poblacion(estado):
'''
Población (según Wikipedia) para `estado`.
'''
path = './data/poblaciones_y_superficies_por_estado.csv'
return pd.read_csv( path, index_col='ENTIDAD' )['POBLACIONES'][estado]
def get_t0(series, estado, umbral=30):
'''
Da el día en el que se cruza el `umbral` de hospitalizados.
Por default, se toma un umbral de 30 hospitalizados. Recomendamos no tomar menos.
Inputs:
- series: Dataframe con las series de tiempo de todos los estados
- estado: Entidad federativa a considerar
- umbral=30: Corte de casos de hospitalización
Output:
- t0: fecha en la cual el estado cruza el `umbral` de hospitalizados.
'''
series = get_serie_estatal(series, estado)
try:
return series[series['hospitalizados_acumulados'] >= umbral]['confirmados_acumulados'].idxmin()
except:
raise ValueError( 'No se ha cruzado el umbral de {} hospitalizados para {}.'.format( umbral, estado ) )
### FUNCIONES DE FIT ###
## TO-DO: Recognize if t0_fit < t_JNSD (tc) or not. If it is, k_optim = <k>, else k_optim = <k_c>
def get_fit_param(series, estado, umbral=25, t0_fit=None):
'''
Calcula la tasa de crecimiento de hospitalizados haciendo el ajusta a 1 semana a partir de los casos determinados por `umbral`.
Input:
- series: Dataframe con las series de tiempo de todos los estados
- estado: Entidad federativa a considerar
- umbral=25: Corte de casos de hospitalización
- t0_fit=None: Fecha inicial para determinar la tasa de crecimiento. Por default toma la fecha respecto a `umbral`
Output:
- tasa de crecimiento + 1
Nota: Para el crecimiento exponencial en tiempo discreto: y_t = (λ + 1)^t * y_0
'''
if t0_fit == None:
t0_fit = get_t0(series, estado, umbral=umbral)
#t0_fit = dias_desde_t0(t0_fit, 8)
tf_fit = dias_desde_t0(t0_fit, n_dias=8)
series = get_serie_estatal(series, estado)
series = series.loc[t0_fit:tf_fit,'hospitalizados_acumulados']
#series = series.loc[t0_fit:tf_fit,'fallecidos_acumulados']
#series = series.loc[t0_fit:tf_fit,'fallecidos_diarios']
#series = series.loc[t0_fit:tf_fit,'hospitalizados_diarios']
ydata = np.log(series)
xdata = np.array( range(len(ydata)) ).reshape(-1,1)
regressor = LinearRegression()
regressor.fit(xdata, ydata) #training the algorithm
# in the discrete case, the parameter is the exponent plus one.
return regressor.coef_[0] + 1
def get_matriz_transicion_linealizada(k, params):
'''
Construye la matriz de transición de las ecuaciones linealizadas del modelo en función de `k`.
Inputs:
- k: número de contactos promedio.
- params: parámetros del modelo
Outputs:
- M(k): Matriz de transición del modelo en función de `k`.
'''
# Parámetros del modelo necesarios para construir la matriz de transición
β = params[0]
η = params[2]
α = params[3]
ν = params[4]
μ = params[5]
γ = params[6]
ω = params[7]
ψ = params[8]
χᴵ = params[9]
χᴴ = params[10]
# tasa de crecimiento
b_k = -k * np.log( 1 - β )
# Matriz de transición en funcion de k (componentes en la matriz: S,E,A,I,H)
M = np.array([[1, 0, -b_k, -ν*b_k, 0],
[0, 1-η, b_k, ν*b_k, 0],
[0, η, 1-α, 0, 0],
[0, 0, α, γ*(1-μ)+(1-γ)*(1-χᴵ), 0],
[0, 0, 0, μ*γ, (1-ω)*(1-ψ)-(1-ω)*(1-χᴴ)]
])
return M
def get_k_optimo(tasa, params, k_min=5, k_max=15):
'''
Obtiene el numero de contactos promedio óptimo (<k>) respecto a la tasa de crecimiento de hospitalizados `λ`.
Inputs:
- tasa: Tasa de crecimiento de hospitalizados dadas por el fit.
- params: Parámteros del modelo
- k_min=5: Valor mínimo de rango de búsqueda
- k_max=15: Valor máximo de rango de búsqueda
Outputs:
- k_optim: Número de contactos promedio ajustado a la tasa de crecimiento.
'''
# Optimización por barrido de valores en un rango de confianza
k_range = np.linspace(k_min, k_max, num=100)
# Sweep the eigvals of M for k in k_range. We are interested in the 3rd eigenvalue
barrido_eigvals = np.zeros( len(k_range) )
for (i,k) in enumerate(k_range):
# Obtiene el 3er eigenvalor de la matriz de transición
λ3 = eigvals( get_matriz_transicion_linealizada(k, params) )[2]
# Obtiene la diferencia absoluta entre dicho eigenvalor y el exponente λ del fit.
barrido_eigvals[i] = np.abs( λ3 - tasa )
# print (barrido_eigvals)
# Calcula el óptimo tomando el mínimo del barrido
ix_min = np.argmin( barrido_eigvals )
return k_range[ix_min]
## Funciones extra
def get_tiempo_duplicacion(tasa):
'''
Calcula el tiempo de duplicación de casos (en días) dada una `tasa` de crecimiento.
'''
print('Tiempo de duplicación: {} días'.format( np.round(math.log(2, tasa) , 1) ) )
return math.log(2, tasa)
|
python
|
"""Support for TPLink lights."""
from __future__ import annotations
import logging
from typing import Any
from kasa import SmartDevice
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from .const import DOMAIN
from .coordinator import TPLinkDataUpdateCoordinator
from .entity import CoordinatedTPLinkEntity, async_refresh_after
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up switches."""
coordinator: TPLinkDataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
device = coordinator.device
if device.is_bulb or device.is_light_strip or device.is_dimmer:
async_add_entities([TPLinkSmartBulb(device, coordinator)])
class TPLinkSmartBulb(CoordinatedTPLinkEntity, LightEntity):
"""Representation of a TPLink Smart Bulb."""
coordinator: TPLinkDataUpdateCoordinator
def __init__(
self,
device: SmartDevice,
coordinator: TPLinkDataUpdateCoordinator,
) -> None:
"""Initialize the switch."""
super().__init__(device, coordinator)
# For backwards compat with pyHS100
self._attr_unique_id = self.device.mac.replace(":", "").upper()
@async_refresh_after
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the light on."""
if (transition := kwargs.get(ATTR_TRANSITION)) is not None:
transition = int(transition * 1_000)
if (brightness := kwargs.get(ATTR_BRIGHTNESS)) is not None:
brightness = round((brightness * 100.0) / 255.0)
# Handle turning to temp mode
if ATTR_COLOR_TEMP in kwargs:
color_tmp = mired_to_kelvin(int(kwargs[ATTR_COLOR_TEMP]))
_LOGGER.debug("Changing color temp to %s", color_tmp)
await self.device.set_color_temp(
color_tmp, brightness=brightness, transition=transition
)
return
# Handling turning to hs color mode
if ATTR_HS_COLOR in kwargs:
# TP-Link requires integers.
hue, sat = tuple(int(val) for val in kwargs[ATTR_HS_COLOR])
await self.device.set_hsv(hue, sat, brightness, transition=transition)
return
# Fallback to adjusting brightness or turning the bulb on
if brightness is not None:
await self.device.set_brightness(brightness, transition=transition)
else:
await self.device.turn_on(transition=transition)
@async_refresh_after
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the light off."""
if (transition := kwargs.get(ATTR_TRANSITION)) is not None:
transition = int(transition * 1_000)
await self.device.turn_off(transition=transition)
@property
def min_mireds(self) -> int:
"""Return minimum supported color temperature."""
return kelvin_to_mired(self.device.valid_temperature_range.max)
@property
def max_mireds(self) -> int:
"""Return maximum supported color temperature."""
return kelvin_to_mired(self.device.valid_temperature_range.min)
@property
def color_temp(self) -> int | None:
"""Return the color temperature of this light in mireds for HA."""
return kelvin_to_mired(self.device.color_temp)
@property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
return round((self.device.brightness * 255.0) / 100.0)
@property
def hs_color(self) -> tuple[int, int] | None:
"""Return the color."""
hue, saturation, _ = self.device.hsv
return hue, saturation
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_TRANSITION
@property
def supported_color_modes(self) -> set[str] | None:
"""Return list of available color modes."""
modes = set()
if self.device.is_variable_color_temp:
modes.add(COLOR_MODE_COLOR_TEMP)
if self.device.is_color:
modes.add(COLOR_MODE_HS)
if self.device.is_dimmable:
modes.add(COLOR_MODE_BRIGHTNESS)
if not modes:
modes.add(COLOR_MODE_ONOFF)
return modes
@property
def color_mode(self) -> str | None:
"""Return the active color mode."""
if self.device.is_color:
if self.device.is_variable_color_temp and self.device.color_temp:
return COLOR_MODE_COLOR_TEMP
return COLOR_MODE_HS
if self.device.is_variable_color_temp:
return COLOR_MODE_COLOR_TEMP
return COLOR_MODE_BRIGHTNESS
|
python
|
# -*- coding: utf-8 -*-
import logging
import allel
import h5py
import os
import os.path
import haplotype_plot.constants as constants
import haplotype_plot.reader as reader
import haplotype_plot.filter as strainer
import haplotype_plot.haplotyper as haplotyper
import numpy as np
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def debug_hdf5(hdf5_path: str):
callset = h5py.File(hdf5_path, mode='r')
logger.debug("Callset keys: {keys}".format(keys=callset.keys()))
logger.debug("Callset keys of {key}: '{values}'".format(key=constants.HDF5_CALLDATA_KEY,
values=callset[constants.HDF5_CALLDATA_KEY].keys()))
logger.debug("Callset keys of {key}: '{values}'".format(key=constants.HDF5_VARIANTS_KEY,
values=callset[constants.HDF5_VARIANTS_KEY].keys()))
def get_genotypes_n_variants(callset: h5py.File) -> (allel.GenotypeChunkedArray, allel.VariantChunkedTable):
""" Returns a the genotypes and variants tables from an hdf5.
Parameters:
hdf5_path (str): Input path to the HDF5 file.
Returns:
Tuple(allel.GenotypeChunkedArray, allel.VariantChunkedTable)
- allel.GenotypeChunkedArray as the genotypes table
- allel.VariantChunkedTable as the variants table with columns 'CHROM' and 'POS'
"""
logger.debug("Retrieving genotypes via '{key}' key".format(key=constants.HDF5_CALLDATA_GENOTYPE_KEY))
genotypes = allel.GenotypeChunkedArray(callset['calldata/GT'])
logger.debug("Retrieving variants via '{key}' key'".format(key=constants.HDF5_VARIANTS_KEY))
variants = allel.VariantChunkedTable(callset["variants"], names=['CHROM', 'POS'])
return genotypes, variants
def _has_chromosome(variants: allel.VariantChunkedTable, chrom: str):
""" Returns whether a chromosome is listed in a VariantChunkedTable.
Parameters:
variants (VariantChunkedTable): Variants presents in the HDF5 file.
chrom (str): Chromosome to look for in the variants table.
Returns:
bool: True if chromosome 'chrom' is present in the table 'variants'
"""
np_array = strainer.variants_filter_by_chrom(variants, chrom)
return np.count_nonzero(np_array)
def get_sample_index(sample_list: list, sample: str) -> int:
""" Returns the index of a sample in a sample list.
Parameters:
sample_list (list: str): List of samples (strings) present in the VCF file.
sample (str): Sample name in the 'sample_list' to be selected as parental genotype.
Returns:
integer: The index of input sample in the sample list.
"""
try:
parental_sample_index = sample_list.index(sample)
except ValueError:
msg = "Sample '{sample}' is not found in VCF sample list '{sample_list}'".format(
sample=sample,
sample_list=sample_list
)
logger.error(msg)
raise ValueError(msg)
return parental_sample_index
def _sort_genotypes(genotypes: allel.GenotypeChunkedArray,
parental_sample_index: int) -> allel.GenotypeChunkedArray:
""" Sorts an allel.GenotypeChunkedArray object, placing the sample selected as parental
'parental_sample_index' as the first genotype listed in 'genotypes'.
Parameters:
genotypes (allel.GenotypeChunkedArray): GenotypesChunkedArray object.
parental_sample_index (int): Index in the 'sample_list' of the parental sample
Returns:
The allel.GenotypeChunkedArray object but sorted.
"""
selected_progeny = np.repeat(True, genotypes.n_samples)
selected_progeny[parental_sample_index] = False
selected_parental = np.logical_not(selected_progeny)
genotype_progeny = genotypes.subset(None, selected_progeny)[:]
genotype_parental = genotypes.subset(None, selected_parental)[:]
genotype = genotype_parental.concatenate(genotype_progeny, axis=1)
return genotype
def process(vcf_file_path: str, chrom: str,
parental_sample: str, phase: bool,
zygosis: haplotyper.Zygosity) -> haplotyper.HaplotypeWrapper:
""" Returns a 'haplotyper.HaplotypeWrapper' object.
Parameters:
vcf_file_path (str): Input path to the VCF file.
chrom (str): What chromosome should be considered for the haplotype process.
parental_sample (str): Sample name that is considered as the parental one.
zygosis (haplotyper.Zygosity): Whether the VCF has only homozygous variants or has heterozygous
Returns:
haplotyper.HaplotypeWrapper: Wrapper containing genotypes and variants.
"""
vcf_file_abspath = os.path.abspath(vcf_file_path)
vcf_path = os.path.dirname(vcf_file_abspath)
hdf5_filename = os.path.splitext(vcf_file_abspath)[0] + constants.HDF5_EXT
hdf5_file_path = os.path.join(vcf_path, hdf5_filename)
reader.vcf_to_hdf5(vcf_file_path, hdf5_file_path)
logger.debug("Loading HDF5 file '{hdf5_path}'".format(hdf5_path=hdf5_file_path))
callset = h5py.File(hdf5_file_path, mode='r')
sample_list = list(callset["samples"])
parental_sample_index = get_sample_index(sample_list, parental_sample)
genotypes, variants = get_genotypes_n_variants(callset)
if not _has_chromosome(variants, chrom):
msg = "Chromosome '{chrom}' not found in the VCF '{vcf_file_path}'".format(
chrom=chrom,
vcf_file_path=vcf_file_path
)
logger.error(msg)
raise ValueError(msg)
genotypes = _sort_genotypes(genotypes, parental_sample_index)
genotypes_uc, variants_uc = strainer.filters_for_haplotyping(genotypes, variants, chrom)
if phase:
genotypes_uc, variants_uc = strainer.filter_phasing(genotypes_uc, variants_uc)
haplotype_wrapper = haplotyper.HaplotypeWrapper(
vcf_path, genotypes_uc, variants_uc, chrom, sample_list, parental_sample
)
if zygosis == haplotyper.Zygosity.HOM:
haplotype_wrapper.calc_homozygous_haplotypes()
elif zygosis == haplotyper.Zygosity.HET:
haplotype_wrapper.calc_heterozygous_haplotypes()
else:
msg = "Unknown zygosis '{zygosis}' parameter".format(zygosis=zygosis)
logger.error(msg)
raise ValueError(msg)
return haplotype_wrapper
|
python
|
import torch
import transformers
from packaging import version
from torch.utils.data import SequentialSampler
from transformers import BertConfig, BertForSequenceClassification
from .registry import non_distributed_component_funcs
def get_bert_data_loader(
batch_size,
total_samples,
sequence_length,
device=torch.device('cpu:0'),
is_distrbuted=False,
):
train_data = torch.randint(
low=0,
high=1000,
size=(total_samples, sequence_length),
device=device,
dtype=torch.long,
)
train_label = torch.randint(low=0, high=2, size=(total_samples,), device=device, dtype=torch.long)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
if is_distrbuted:
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
sampler = SequentialSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
return train_loader
@non_distributed_component_funcs.register(name='bert')
def get_training_components():
hidden_dim = 8
num_head = 4
sequence_length = 12
num_layer = 2
def bert_model_builder(checkpoint):
config = BertConfig(gradient_checkpointing=checkpoint,
hidden_size=hidden_dim,
intermediate_size=hidden_dim * 4,
num_attention_heads=num_head,
max_position_embeddings=sequence_length,
num_hidden_layers=num_layer,
hidden_dropout_prob=0.,
attention_probs_dropout_prob=0.)
print('building BertForSequenceClassification model')
# adapting huggingface BertForSequenceClassification for single unitest calling interface
class ModelAaptor(BertForSequenceClassification):
def forward(self, input_ids, labels):
"""
inputs: data, label
outputs: loss
"""
return super().forward(input_ids=input_ids, labels=labels)[0]
model = ModelAaptor(config)
if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"):
model.gradient_checkpointing_enable()
return model
trainloader = get_bert_data_loader(batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=True)
testloader = get_bert_data_loader(batch_size=2,
total_samples=10000,
sequence_length=sequence_length,
is_distrbuted=True)
criterion = None
return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
|
python
|
# SPDX-FileCopyrightText: 2021 European Spallation Source <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
__author__ = 'github.com/wardsimon'
__version__ = '0.0.1'
import numpy as np
import pytest
from easyCore.Objects.Base import Descriptor, Parameter, BaseObj
from easyCore.Objects.Groups import BaseCollection
import math
def createSingleObjs(idx):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
reps = math.floor(idx/len(alphabet)) + 1
name = alphabet[idx % len(alphabet)] * reps
if idx % 2:
return Parameter(name, idx)
else:
return Descriptor(name, idx)
def createParam(option):
return pytest.param(option, id=option[0])
def doUndoRedo(obj, attr, future, additional=''):
from easyCore import borg
borg.stack.enabled = True
e = False
def getter(_obj, _attr):
value = getattr(_obj, _attr)
if additional:
value = getattr(value, additional)
return value
try:
previous = getter(obj, attr)
setattr(obj, attr, future)
assert getter(obj, attr) == future
assert borg.stack.canUndo()
borg.stack.undo()
assert getter(obj, attr) == previous
assert borg.stack.canRedo()
borg.stack.redo()
assert getter(obj, attr) == future
except Exception as err:
e = err
finally:
borg.stack.enabled = False
return e
@pytest.mark.parametrize('test', [createParam(option) for option in [('value', 500), ('error', 5), ('enabled', False),
('unit', 'meter / second'), ('display_name', 'boom'),
('fixed', False), ('max', 505), ('min', -1)]])
@pytest.mark.parametrize('idx', [pytest.param(0, id='Descriptor'), pytest.param(1, id='Parameter')])
def test_SinglesUndoRedo(idx, test):
obj = createSingleObjs(idx)
attr = test[0]
value = test[1]
if not hasattr(obj, attr):
pytest.skip(f'Not applicable: {obj} does not have field {attr}')
e = doUndoRedo(obj, attr, value)
if e:
raise e
def test_BaseObjUndoRedo():
objs = {obj.name: obj for obj in [createSingleObjs(idx) for idx in range(5)]}
name = 'test'
obj = BaseObj(name, **objs)
name2 = 'best'
# Test name
# assert not doUndoRedo(obj, 'name', name2)
# Test setting value
for b_obj in objs.values():
e = doUndoRedo(obj, b_obj.name, b_obj.raw_value + 1, 'raw_value')
if e:
raise e
def test_BaseCollectionUndoRedo():
objs = [createSingleObjs(idx) for idx in range(5)]
name = 'test'
obj = BaseCollection(name, *objs)
name2 = 'best'
# assert not doUndoRedo(obj, 'name', name2)
from easyCore import borg
borg.stack.enabled = True
original_length = len(obj)
p = Parameter('slip_in', 50)
idx = 2
obj.insert(idx, p)
assert len(obj) == original_length + 1
objs.insert(idx, p)
for item, obj_r in zip(obj, objs):
assert item == obj_r
# Test inserting items
borg.stack.undo()
assert len(obj) == original_length
_ = objs.pop(idx)
for item, obj_r in zip(obj, objs):
assert item == obj_r
borg.stack.redo()
assert len(obj) == original_length + 1
objs.insert(idx, p)
for item, obj_r in zip(obj, objs):
assert item == obj_r
# Test Del Items
del obj[idx]
del objs[idx]
assert len(obj) == original_length
for item, obj_r in zip(obj, objs):
assert item == obj_r
borg.stack.undo()
assert len(obj) == original_length + 1
objs.insert(idx, p)
for item, obj_r in zip(obj, objs):
assert item == obj_r
del objs[idx]
borg.stack.redo()
assert len(obj) == original_length
for item, obj_r in zip(obj, objs):
assert item == obj_r
# Test Place Item
old_item = objs[idx]
objs[idx] = p
obj[idx] = p
assert len(obj) == original_length
for item, obj_r in zip(obj, objs):
assert item == obj_r
borg.stack.undo()
for i in range(len(obj)):
if i == idx:
item = old_item
else:
item = objs[i]
assert obj[i] == item
borg.stack.redo()
for item, obj_r in zip(obj, objs):
assert item == obj_r
borg.stack.enabled = False
def test_UndoRedoMacros():
items = [createSingleObjs(idx) for idx in range(5)]
offset = 5
undo_text = 'test_macro'
from easyCore import borg
borg.stack.enabled = True
borg.stack.beginMacro(undo_text)
values = [item.raw_value for item in items]
for item, value in zip(items, values):
item.value = value + offset
borg.stack.endMacro()
for item, old_value in zip(items, values):
assert item.raw_value == old_value + offset
assert borg.stack.undoText() == undo_text
borg.stack.undo()
for item, old_value in zip(items, values):
assert item.raw_value == old_value
assert borg.stack.redoText() == undo_text
borg.stack.redo()
for item, old_value in zip(items, values):
assert item.raw_value == old_value + offset
def test_fittingUndoRedo():
m_value = 6
c_value = 2
x = np.linspace(-5, 5, 100)
dy = np.random.rand(*x.shape)
class Line(BaseObj):
def __init__(self, m: Parameter, c: Parameter):
super(Line, self).__init__('basic_line', m=m, c=c)
@classmethod
def default(cls):
m = Parameter('m', m_value)
c = Parameter('c', c_value)
return cls(m=m, c=c)
@classmethod
def from_pars(cls, m_value: float, c_value: float):
m = Parameter('m', m_value)
c = Parameter('c', c_value)
return cls(m=m, c=c)
def __call__(self, x: np.ndarray) -> np.ndarray:
return self.m.raw_value * x + self.c.raw_value
l1 = Line.default()
m_sp = 4
c_sp = -3
l2 = Line.from_pars(m_sp, c_sp)
l2.m.fixed = False
l2.c.fixed = False
y = l1(x) + 0.125*(dy - 0.5)
from easyCore.Fitting.Fitting import Fitter
f = Fitter(l2, l2)
from easyCore import borg
borg.stack.enabled = True
res = f.fit(x, y)
assert l1.c.raw_value == pytest.approx(l2.c.raw_value, rel=l2.c.error)
assert l1.m.raw_value == pytest.approx(l2.m.raw_value, rel=l2.m.error)
assert borg.stack.undoText() == 'Fitting routine'
borg.stack.undo()
assert l2.m.raw_value == m_sp
assert l2.c.raw_value == c_sp
assert borg.stack.redoText() == 'Fitting routine'
borg.stack.redo()
assert l2.m.raw_value == res.p[f'p{borg.map.convert_id_to_key(l2.m)}']
assert l2.c.raw_value == res.p[f'p{borg.map.convert_id_to_key(l2.c)}']
|
python
|
#!/usr/bin/env python3
import csv
import itertools as it
import os
import sys
def write_csv(f, cols, machines, variants):
w = csv.writer(f, delimiter=',')
headers = ["{}-{}".format(m, v) for (m, v) in it.product(machines, variants)]
w.writerow(headers)
for e in zip(*cols):
w.writerow(list(e))
def main(root, machines):
variants = ["one-pool", "many-pools", "many-pools-soa"]
skip_lines = 10
cols = []
for m in machines:
filename = os.path.join(root, "forex_{}.csv".format(m))
with open(filename, 'r') as f:
new_cols = [[], [], []]
r = csv.reader(f, delimiter=',')
for _ in range(skip_lines):
next(r)
for e in r:
test_name = e[0]
cpu_time = e[3]
if "OnePool/" in test_name:
idx = 0
elif "ManyPools/" in test_name:
idx = 1
elif "ManyPoolsSoa/" in test_name:
idx = 2
else:
continue
new_cols[idx].append(cpu_time)
cols.extend(new_cols)
filename = os.path.join(root, "forex.csv")
with open(filename, 'w') as f:
write_csv(f, cols, machines, variants)
if __name__ == "__main__":
root = sys.argv[1] if len(sys.argv) >= 2 else os.getcwd()
machines = sys.argv[2].split(",") if len(sys.argv) >= 3 else ["desktop", "laptop", "graphic", "ray", "voxel"]
main(root, machines)
|
python
|
import torch
from torch.autograd import Variable
from deepqmc.wavefunction.wf_orbital import Orbital
from deepqmc.wavefunction.molecule import Molecule
from pyscf import gto
import matplotlib.pyplot as plt
import numpy as np
import unittest
class TestAOvalues(unittest.TestCase):
def setUp(self):
# define the molecule
at = 'H 0 0 0; H 0 0 1'
self.mol = Molecule(atom=at,
calculator='pyscf',
basis='sto-3g',
unit='bohr')
self.m = gto.M(atom=at, basis='sto-3g', unit='bohr')
# define the wave function
self.wf = Orbital(self.mol)
self.pos = torch.zeros(100, self.mol.nelec * 3)
self.pos[:, 2] = torch.linspace(-5, 5, 100)
self.pos = Variable(self.pos)
self.pos.requires_grad = True
self.iorb = 0
self.x = self.pos[:, 2].detach().numpy()
def test_ao(self):
aovals = self.wf.ao(self.pos).detach().numpy()
aovals_ref = self.m.eval_gto(
'GTOval_cart', self.pos.detach().numpy()[:, :3])
assert np.allclose(
aovals[:, 0, self.iorb], aovals_ref[:, self.iorb])
def test_ao_deriv(self):
ip_aovals = self.wf.ao(
self.pos, derivative=1).detach().numpy()
ip_aovals_ref = self.m.eval_gto(
'GTOval_ip_cart', self.pos.detach().numpy()[:, :3])
ip_aovals_ref = ip_aovals_ref.sum(0)
assert np.allclose(ip_aovals[:, 0, self.iorb],
ip_aovals_ref[:, self.iorb])
def test_ao_hess(self):
i2p_aovals = self.wf.ao(
self.pos, derivative=2).detach().numpy()
ip_aovals_ref = self.m.eval_gto(
'GTOval_ip_cart', self.pos.detach().numpy()[:, :3])
ip_aovals_ref = ip_aovals_ref.sum(0)
i2p_aovals_ref = np.gradient(
ip_aovals_ref[:, self.iorb], self.x)
# assert np.allclose(i2p_aovals[:,0,self.iorb],i2p_aovals_ref)
if __name__ == "__main__":
# unittest.main()
t = TestAOvalues()
t.setUp()
t.test_ao()
t.test_ao_deriv()
|
python
|
from math import ceil
from struct import pack, unpack
import numpy as np
import ftd2xx as ftd
from timeflux.core.node import Node
from timeflux.helpers.clock import now
from timeflux.core.exceptions import WorkerInterrupt, TimefluxException
VID = 0x24f4 # Vendor ID
PID = 0x1000 # Product ID
HEADER = 0xAA # Message header
ACK = 0x0000 # Acknowledgement
INFO = 0x0003 # Device info
START = 0x000B # Start acquisition
STOP = 0x000C # Stop acquisition
BUFFER_SIZE = 65536 # Input and output buffer size in bytes
PACKET_SIZE = 37 # Packet size in bytes
class InvalidChecksumException(TimefluxException):
"""Exception thrown when a PL4 packet cannot be parsed"""
pass
class PhysioLOGX(Node):
"""
Driver for the Mind Media PhysioLOG-4 (PL4) device.
This node provides two streams. The first one (channels 1 and 2 at 1024 Hz) is
expressed in uV. The second one (channels 3 and 4 at 256 Hz) is expressed in mV.
In theory, we should be able to access the device via a serial interface using the
FTDI VCP driver. The device is recognized, but does not appear in the /dev
directory because the product and vendor IDs are unknown (at least on MacOS).
Manually adding the IDs to the known devices table would require re-signing the
driver with a kext-signing certificate. Instead, we install the D2XX driver, which
allows synchronous access through a library and the Python ft2xx wrapper.
Attributes:
o_1024hz (Port): Channels 1 and 2, provides DataFrame.
o_256hz (Port): Channels 3 and 4, provide DataFrame.
Example:
.. literalinclude:: /../../timeflux_pl4/test/graphs/pl4.yaml
:language: yaml
See
- https://www.ftdichip.com/Products/ICs/FT232R.htm
- https://www.ftdichip.com/Drivers/VCP.htm
- https://www.ftdichip.com/Drivers/D2XX.htm
- https://pypi.org/project/ftd2xx
"""
def __init__(self):
self.device = None
# Setup
try:
# On Unix systems, we need to manually set the product and vendor IDs
ftd.setVIDPID(VID, PID)
except AttributeError:
# The method is not available on Windows
pass
# Connect
try:
# Open the first FTDI device
self.device = ftd.open(0)
# Get info
self.logger.info(self.device.getDeviceInfo())
except ftd.ftd2xx.DeviceError:
# Could not open device
raise WorkerInterrupt('Could not open device')
# Initialize connection
if self.device:
self.device.setBaudRate(921600)
self.device.setFlowControl(ftd.defines.FLOW_NONE, 0, 0)
self.device.setDataCharacteristics(ftd.defines.BITS_8, ftd.defines.STOP_BITS_1, ftd.defines.PARITY_NONE)
self.device.setTimeouts(2000, 2000)
self.device.setLatencyTimer(2)
self.device.setUSBParameters(BUFFER_SIZE, BUFFER_SIZE)
# Start acquisition
self.packet_count = 0
self.time_delta = {
'1024Hz': np.timedelta64(int(1e9 / 1024), 'ns'),
'256Hz': np.timedelta64(int(1e9 / 256), 'ns'),
}
self.start()
self.time_start = now()
def update(self):
# How many bytes are available?
queue = self.device.getQueueStatus()
if queue == BUFFER_SIZE:
self.logger.warn('The buffer is full. Please increase the graph rate.')
# Prepare data containers
# The device outputs channels 1-2 at 1024 Hz and channels 3-4 at 256 Hz.
# One packet contains 4 samples for channels 1-2 and 1 sample for channels 3-4.
data = {
'1024Hz': {
'data': { 'counter': [], '1': [], '2': [] },
'index': []
},
'256Hz': {
'data': { 'counter': [], '3': [], '4': [] },
'index': []
}
}
# Parse full packets
for i in range(ceil(queue / PACKET_SIZE)):
# Read one packet
packet = self.read()
if packet:
try:
# Parse the packet
counter, samples = self.parse(packet)
# Append to the data container
data['1024Hz']['data']['counter'] += [counter] * 4
data['1024Hz']['data']['1'] += samples['1']
data['1024Hz']['data']['2'] += samples['2']
data['256Hz']['data']['counter'].append(counter)
data['256Hz']['data']['3'] += samples['3']
data['256Hz']['data']['4'] += samples['4']
# Infer timestamps from packet count and sample rate
# This will fail dramatically if too much packets are lost
self.packet_count += 1
start = self.time_start + self.time_delta['256Hz'] * self.packet_count
stop = start + self.time_delta['256Hz']
data['256Hz']['index'].append(start)
start = self.time_start + self.time_delta['1024Hz'] * self.packet_count * 4
stop = start + self.time_delta['1024Hz'] * 4
timestamps = list(np.arange(start, stop, self.time_delta['1024Hz']))
data['1024Hz']['index'] += timestamps
except InvalidChecksumException:
pass
# Output
if len(data['1024Hz']['index']) > 0:
self.o_1024hz.set(data['1024Hz']['data'], data['1024Hz']['index'])
self.o_256hz.set(data['256Hz']['data'], data['256Hz']['index'])
def terminate(self):
self.stop()
def version(self):
self.command(INFO)
# Header (2B) + Response ID (2B) + Response Size (2B) + Payload Size (10B) + Checksum (2B)
data = self.device.read(18)
#self.logger.debug(f'< {data}')
version = unpack('>BBHHHHHLH', data)
return {
'device_id': version[4],
'software_version': version[5],
'hardware_version': version[6],
'serial_number': version[7]
}
def start(self):
self.device.purge()
self.command(START)
self.ack()
def stop(self):
self.command(STOP)
self.device.purge()
self.device.close()
def command(self, command_id, payload=None):
# Header (2B) + Command ID (2B) + Command Size (2B) + Payload Size + Checksum (2B)
size = 8
data = pack('>BBHH', HEADER, HEADER, command_id, size)
checksum = 65536
for byte in data:
checksum -= byte
data += pack('>H', checksum)
#self.logger.debug(f'> {data}')
self.device.write(data)
def ack(self):
# Header (2B) + Response ID (2B) + Response Size (2B) + Payload Size (41B) + Checksum (2B)
size = 49
data = self.device.read(size)
#self.logger.debug(f'< {data}')
if len(data) == size and int.from_bytes(data[2:4], byteorder='big') == ACK and data[6] == 0x00:
return True
return False
def read(self):
# A full packet is 37 bytes
data = self.device.read(PACKET_SIZE)
# Check if the packet starts with a header byte
if data[0] == HEADER:
return data
# Oh snap! The packet is corrupted...
# Look for the next header byte
self.logger.warn('Invalid header')
for index, byte in enumerate(data):
if byte == HEADER:
data = data[index:] + self.device.read(index)
return data
# Ahem... No luck
return False
def parse(self, data):
# self.logger.debug(f'< {data}')
# Validate checksum
checksum = 0
for byte in data:
checksum += byte
if (checksum % 256) != 0:
self.logger.warn('Invalid checksum')
raise InvalidChecksumException
# Counter
counter = data[1]
# Samples
samples = { '1': [], '2': [], '3': [], '4': [] }
channels = ['1', '2', '3', '1', '2', '1', '2', '4', '1', '2']
# Channels 1 and 2 are expressed in uV
# LSB value ADC for channels 1 and 2: (((Vref * 2) / (resolution ADS1254)) / (gain of ina)) = (((2.048 * 2) / (2^24)) / 20.61161164) = 0.01184481006
# Channels 3 and 4 are expressed in mV
# LSB value ADC for channels 3 and 4: ((Vref * 2) / (resolution ADS1254)) / 1000 = ((2.048 * 2) / (2^24)) / 1000 = 0.000244140625
adc = { '1': -0.01184481006, '2': -0.01184481006, '3': -0.000244140625, '4': -0.000244140625 }
for index, channel in enumerate(channels):
start = 2 * index + index + 2
stop = start + 3
# Each sample is 3 bytes (2's complement)
# We multiply the signed integer by the corresponding ADC to obtain the final value
samples[channel].append(
int.from_bytes(data[start:stop], byteorder='big', signed=True) * adc[channel]
)
return counter, samples
|
python
|
first = "0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 10 9 10 9 9 9 9 9 9 8 9 9 9 9 9 9 9 9 10 10 10 9 8 7 6 5 5 6 5 5 6 7 8 9 10 13 15 17 17 17 17 17 15 13 13 12 10 9 9 7 8 11 12 11 6 3 3 3 4 8 16 49 49 47 46 46 46 45 45 45 45 44 43 42 42 41 42 43 42 41 41 42 43 46 44 42 38 34 32 31 32 33 35 37 37 39 41 45 50 59 64 68 68 68 68 66 60 54 54 52 46 43 43 40 41 47 51 46 33 21 20 22 24 38 65 49 48 47 45 45 45 44 44 43 43 43 43 42 42 41 42 43 41 40 38 39 40 41 39 36 32 32 33 35 38 38 40 41 40 39 42 45 53 63 69 72 68 63 61 59 57 54 55 50 45 44 46 46 44 45 45 41 29 20 20 22 22 37 66 46 46 45 44 43 43 43 43 43 42 42 42 42 41 41 40 39 37 35 34 34 35 36 35 33 32 34 38 40 42 40 43 41 38 37 38 42 54 66 71 70 66 60 57 56 56 55 56 53 51 51 52 53 49 45 39 35 27 20 20 22 21 36 65 45 43 43 42 41 41 41 41 41 40 40 40 39 38 38 37 35 31 29 28 29 29 33 36 38 38 38 42 44 45 43 42 40 40 32 30 37 53 68 72 68 63 61 59 59 59 59 59 59 64 69 72 67 58 49 41 35 27 20 20 22 22 35 64 44 40 39 39 38 38 38 38 38 39 39 37 35 34 32 29 28 25 23 25 28 31 36 38 39 40 43 46 52 50 50 54 51 47 41 36 37 52 70 72 70 67 65 63 63 62 62 63 64 70 77 85 79 66 52 45 38 27 20 20 22 22 35 64 39 37 39 38 37 36 36 37 37 36 36 33 31 27 26 23 22 22 23 25 30 35 39 44 43 45 52 53 55 59 65 66 79 80 72 62 59 60 71 76 73 69 69 67 65 63 64 64 64 69 78 85 85 76 55 49 42 30 20 20 23 23 34 64 41 42 44 42 41 39 38 38 37 36 33 28 25 21 21 22 23 26 28 29 32 36 43 48 48 51 54 66 69 69 77 93 99 99 86 80 82 79 69 71 74 71 72 72 69 66 65 65 64 62 64 73 78 73 60 54 49 35 21 21 24 24 34 64 46 46 47 45 43 40 38 36 37 42 40 30 25 22 23 26 27 31 35 35 34 37 43 49 51 56 67 81 83 84 81 85 101 106 97 80 73 76 95 86 68 73 74 73 73 70 68 66 64 56 51 58 69 68 65 64 57 40 23 20 24 24 34 64 52 46 43 41 38 34 31 29 37 62 76 60 32 24 25 27 29 36 42 42 37 36 44 51 55 64 80 92 99 86 65 73 89 85 89 97 89 78 68 88 88 68 66 71 74 73 72 69 65 55 44 48 62 71 79 82 72 45 26 20 24 24 33 64 46 36 32 30 28 25 21 21 39 84 105 90 53 33 34 31 29 35 39 39 37 40 52 59 64 70 90 91 88 74 61 66 71 63 71 79 95 102 83 73 83 87 76 70 73 75 75 73 68 55 42 44 57 76 93 98 86 53 27 20 24 24 34 64 35 24 22 22 20 20 17 19 39 84 105 91 57 38 38 37 26 27 32 32 36 47 61 67 69 81 100 82 74 72 71 68 67 64 62 57 60 81 88 75 68 84 89 82 72 74 75 77 70 58 41 38 52 74 93 101 91 63 30 21 23 25 35 63 20 19 19 19 18 18 17 19 35 68 88 74 43 34 35 33 23 17 18 24 43 60 68 69 69 77 87 89 91 94 94 87 77 80 75 58 49 44 60 68 67 67 78 83 78 68 75 76 73 62 43 35 46 64 84 97 91 72 36 21 24 26 34 64 18 18 18 18 19 18 18 19 30 55 68 57 32 25 27 23 18 16 19 34 61 69 71 71 74 85 93 103 120 127 127 127 127 109 112 104 80 54 46 51 63 56 62 76 80 71 66 74 75 64 44 33 43 59 73 89 91 82 43 21 24 26 35 65 20 21 21 19 20 20 18 18 27 50 63 56 37 23 21 19 19 18 29 54 74 70 69 68 75 87 103 127 127 127 127 127 127 127 127 127 127 115 74 49 56 64 65 67 77 74 68 67 73 63 46 37 45 58 68 84 90 83 45 22 25 26 36 66 22 23 22 20 20 20 19 19 35 59 78 68 45 24 21 19 18 23 42 74 80 69 69 74 77 95 121 127 127 127 127 127 127 127 127 127 127 127 127 95 65 64 66 59 74 77 70 61 66 63 46 40 43 56 67 78 83 75 44 22 25 29 41 70 23 24 22 20 19 19 19 23 52 83 104 95 67 33 22 22 23 31 70 86 73 65 64 74 78 98 127 127 127 127 127 127 127 127 127 127 127 127 127 127 94 67 62 62 65 75 73 59 59 61 50 44 47 56 65 69 68 61 38 21 27 30 43 72 24 24 22 20 20 20 21 29 67 104 123 114 86 39 20 21 27 54 85 80 65 54 64 71 69 102 127 127 127 127 127 127 127 127 127 127 127 127 127 127 125 79 60 57 69 70 73 61 55 61 52 43 56 68 72 69 60 45 29 22 26 30 42 70 24 24 22 21 22 22 23 32 71 108 125 113 81 38 20 25 37 69 82 69 55 57 63 61 83 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 97 60 46 58 76 70 70 54 56 53 60 75 84 82 72 61 45 24 22 26 29 39 66 23 24 24 24 24 24 23 30 63 93 108 94 63 30 25 31 51 80 76 59 52 60 58 70 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 109 64 32 41 75 72 72 60 55 56 66 83 89 85 77 74 61 32 22 26 27 36 64 24 26 26 26 24 24 23 25 46 75 86 75 44 24 30 40 75 81 72 46 55 55 54 108 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 83 40 36 66 76 66 67 55 54 61 72 78 75 80 80 72 44 23 24 26 34 63 27 27 28 26 24 24 23 25 43 74 89 82 50 32 35 63 80 78 54 48 45 47 65 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 109 46 45 53 81 63 66 59 54 56 53 59 66 72 75 69 42 20 25 26 32 61 28 31 29 25 24 25 25 30 56 90 109 102 73 47 47 71 78 72 43 35 33 42 123 127 127 127 127 127 123 127 127 127 127 127 127 127 127 127 127 127 127 127 111 45 53 55 77 67 63 64 57 50 49 50 57 59 58 49 34 23 26 27 33 61 52 53 35 27 27 27 27 35 64 103 124 115 99 94 83 83 77 63 33 18 22 71 127 127 127 127 127 127 127 126 127 127 127 127 127 127 127 127 119 125 127 127 109 48 61 61 67 66 60 62 56 48 51 50 56 55 50 51 50 39 27 29 34 61 86 84 58 27 25 29 29 37 77 123 127 124 116 118 101 81 73 53 35 18 30 117 127 127 127 127 127 127 127 127 127 127 127 127 127 127 121 118 127 127 127 106 96 55 64 63 62 67 62 60 50 52 49 58 67 69 71 76 71 62 34 29 34 62 94 91 65 32 27 28 28 53 113 127 127 110 101 107 95 80 72 46 46 23 48 127 127 127 127 127 117 107 127 127 127 127 127 127 127 127 127 127 127 127 127 127 94 61 67 65 59 69 59 61 50 51 49 60 73 94 104 99 75 60 34 28 33 61 84 79 50 29 27 27 28 68 127 127 127 79 70 85 91 79 66 52 56 37 64 127 127 127 127 127 127 108 127 127 127 127 127 127 127 127 127 127 127 120 118 119 104 68 66 62 63 68 59 58 52 49 52 56 72 114 127 122 72 39 29 26 32 61 64 58 40 28 27 26 27 73 127 127 127 57 57 74 85 78 68 60 59 49 80 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 92 114 89 121 115 68 64 64 62 66 62 57 56 48 47 54 70 122 127 127 73 26 27 29 33 62 64 62 45 30 28 28 28 73 127 127 127 63 72 85 82 77 72 71 63 54 89 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 125 117 116 127 127 68 62 62 63 64 65 54 59 48 46 53 65 111 127 127 76 23 28 29 33 62 68 67 48 32 30 29 30 72 127 127 127 70 82 89 78 77 76 78 63 56 92 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 68 59 55 64 59 67 53 55 52 46 53 62 99 127 127 80 25 29 29 33 61 70 69 51 36 33 29 27 68 127 127 124 71 89 93 82 83 81 78 54 61 97 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 69 49 49 60 61 66 53 55 58 50 48 60 87 127 127 80 25 28 29 33 61 72 67 52 47 42 33 27 65 127 127 116 76 92 92 83 84 82 80 59 62 101 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 61 35 45 58 60 61 50 53 56 52 45 57 87 127 127 78 25 29 30 33 62 82 72 57 56 52 42 28 60 127 127 115 84 94 89 83 85 85 80 63 75 115 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 116 45 32 42 58 60 56 47 52 55 49 43 57 92 121 127 78 26 27 32 36 63 84 71 65 68 62 53 39 65 127 127 116 90 92 85 82 85 86 81 68 76 79 127 127 127 127 127 127 127 127 127 127 127 127 109 120 127 127 127 127 127 127 127 95 34 35 45 55 66 54 50 52 51 52 44 54 91 107 125 75 38 42 54 59 78 99 84 86 96 94 89 76 87 126 127 114 94 92 84 88 88 86 82 71 86 41 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 125 73 39 40 49 62 65 54 53 47 52 50 43 51 75 89 98 68 53 66 82 89 98 108 93 102 119 125 121 107 99 117 119 111 99 90 84 87 87 84 81 76 127 88 117 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 109 55 44 54 54 61 68 57 51 52 53 48 41 48 63 73 71 62 63 79 94 103 109 111 96 112 127 127 127 120 97 97 100 101 99 89 83 82 86 87 80 71 78 47 86 127 127 127 127 127 127 127 127 127 127 127 127 127 107 101 127 127 127 127 97 39 54 68 62 58 68 64 55 55 53 46 42 43 54 60 61 60 65 78 90 99 107 117 105 113 127 127 127 117 88 81 89 94 99 89 83 78 85 87 79 75 57 38 49 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 126 82 31 60 80 67 65 66 73 60 56 56 48 42 41 47 56 59 62 64 73 81 86 98 114 106 115 120 125 127 109 90 92 103 103 99 89 80 76 84 87 81 77 58 43 39 97 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 117 50 36 63 81 70 67 69 78 68 57 55 52 47 43 46 59 62 59 56 60 63 66 80 127 122 112 112 115 118 112 108 126 127 127 105 90 78 76 84 87 79 75 63 41 41 44 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 124 77 28 45 60 76 72 73 65 77 78 61 56 55 54 48 48 59 65 63 49 39 40 40 57 127 127 124 109 109 126 127 127 127 127 127 116 91 74 72 84 85 78 72 65 46 41 36 62 127 127 127 127 127 127 127 127 127 127 127 127 127 127 126 89 36 39 53 60 66 70 75 64 72 79 65 60 55 52 52 49 59 66 62 44 27 25 25 41 127 127 127 120 112 127 127 127 127 127 127 118 89 72 70 76 79 73 68 61 49 45 37 32 92 127 127 127 127 127 127 127 127 127 127 127 127 123 90 43 36 57 62 57 55 65 67 60 70 78 69 61 59 54 57 51 59 59 53 40 27 25 27 46 127 127 127 127 127 127 127 127 127 127 127 104 81 72 66 64 76 69 60 56 48 47 39 33 75 109 127 127 127 127 127 127 127 127 127 127 108 80 48 31 61 74 66 58 54 59 62 58 71 74 71 65 64 58 56 56 54 50 38 28 25 28 46 66 127 127 127 127 127 127 127 127 127 127 121 92 75 69 62 55 69 63 54 54 44 49 39 33 67 107 113 127 127 127 127 127 127 127 127 98 66 52 42 47 77 79 64 69 65 60 66 52 71 71 74 68 63 64 60 60 52 43 28 24 29 47 74 90 127 127 127 127 127 127 127 127 127 127 112 81 69 64 57 52 60 56 48 46 37 44 39 30 57 113 119 113 118 119 116 117 118 104 81 68 68 65 46 69 82 75 66 84 73 65 64 44 71 72 75 70 61 68 70 62 56 44 31 74 110 117 127 127 127 127 127 127 127 127 127 127 127 127 97 73 65 59 53 51 58 51 41 42 35 30 37 32 45 111 126 121 117 112 103 91 82 81 84 90 89 67 61 82 74 66 76 92 74 70 66 38 69 69 72 70 53 67 76 73 64 52 52 127 127 127 127 127 117 127 127 127 127 127 127 127 127 127 84 65 64 55 50 50 56 47 37 37 32 24 27 29 36 108 127 127 127 125 116 106 97 97 99 101 87 63 79 76 68 67 79 86 70 74 62 39 70 70 70 69 54 67 78 73 76 68 82 127 127 127 127 127 122 127 127 127 127 127 127 127 127 118 83 66 59 48 46 46 51 42 36 37 28 22 19 25 31 103 127 127 127 127 127 119 111 109 112 109 84 78 86 74 63 75 82 78 66 72 51 41 68 68 63 62 48 65 75 85 93 94 113 127 127 127 127 127 127 127 127 127 127 127 127 127 127 115 81 63 52 43 42 47 42 38 34 35 28 19 19 20 26 94 127 127 127 127 127 127 126 122 124 115 92 86 81 67 65 77 79 69 69 66 36 47 64 63 56 44 40 60 87 115 125 110 115 127 127 127 127 127 127 127 127 127 127 127 127 127 127 107 74 55 45 40 39 42 36 37 33 32 24 16 18 17 36 98 127 127 127 127 127 127 127 127 127 116 97 88 75 67 70 73 73 70 63 48 30 51 56 58 41 30 42 64 110 127 127 127 127 127 127 127 127 127 122 116 112 127 127 127 127 127 115 94 64 50 43 39 38 35 37 33 28 27 21 18 17 21 65 126 127 127 127 127 127 127 127 127 127 121 101 89 68 71 72 68 65 63 51 29 32 49 53 50 31 29 43 79 108 127 127 127 127 127 127 127 127 127 119 119 123 127 127 127 127 105 88 71 55 39 39 40 29 34 32 31 30 26 18 15 21 32 104 127 127 127 127 127 127 127 127 127 127 127 104 87 68 69 68 63 58 44 37 25 32 49 45 31 27 32 48 90 121 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 96 73 62 51 36 36 34 26 30 32 34 28 21 21 23 32 42 121 127 127 127 127 127 127 127 127 127 127 111 85 84 71 71 65 57 41 29 27 23 27 42 31 31 30 42 59 92 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 113 94 66 46 39 36 36 37 40 47 53 63 78 56 39 43 46 84 127 127 127 127 127 127 127 127 94 57 60 81 78 71 71 59 44 28 26 30 28 31 35 27 38 44 55 77 92 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 91 67 51 74 100 114 111 106 102 110 119 105 52 49 49 51 53 57 68 87 96 89 76 59 50 49 53 61 81 73 65 59 42 43 30 32 32 35 35 33 35 46 57 60 79 100 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 81 99 125 127 127 127 127 127 127 127 127 112 50 49 52 51 54 54 52 52 51 50 50 49 50 54 57 65 79 67 65 51 49 60 42 45 42 40 35 38 41 54 67 71 81 116 123 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 125 127 127 127 127 127 127 127 127 127 127 127 111 52 54 56 54 54 55 54 55 54 53 53 53 55 59 56 67 77 69 60 56 61 71 57 57 48 44 47 47 53 62 77 78 79 124 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 70 50 53 56 57 57 56 59 59 57 55 55 55 56 59 51 67 76 65 58 64 69 69 71 61 56 52 47 55 68 67 79 83 81 116 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 55 52 53 57 54 52 52 56 58 57 52 54 54 56 56 57 64 69 61 56 67 65 67 67 65 60 56 58 67 74 74 77 83 82 102 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 107 56 53 54 57 57 53 52 53 56 59 55 56 56 56 54 62 64 64 56 58 68 66 63 71 64 57 54 64 67 76 82 76 84 88 90 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 127 69 53 54 56 59 59 51 53 54 56 57 58 54 55 54 54 66 64 62 59 62 67 57 67 75 65 55 59 66 71 72 80 81 82 91 85 125 127 127 127 127 127 127 127 127 127"
second = "0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 10 10 10 10 9 10 9 9 9 9 9 9 8 9 9 9 9 9 9 9 9 10 10 10 9 8 7 6 5 5 6 5 5 6 7 8 9 10 13 15 17 17 17 17 17 15 13 13 12 10 9 9 7 8 11 12 11 6 3 3 3 4 8 16 49 49 47 46 46 46 45 45 45 45 44 43 42 42 41 42 43 42 41 41 42 43 46 44 42 38 34 32 31 32 33 35 37 37 39 41 45 50 59 64 68 68 68 68 66 60 54 54 52 46 43 43 40 41 47 51 46 33 21 20 22 24 38 65 49 48 47 45 45 45 44 44 43 43 43 43 42 42 41 42 43 41 40 38 39 40 41 39 36 32 32 33 35 38 38 40 41 40 39 42 45 53 63 69 72 68 63 61 59 57 54 55 50 45 44 46 46 44 45 45 41 29 20 20 22 22 37 66 46 46 45 44 43 43 43 43 43 42 42 42 42 41 41 40 39 37 35 34 34 35 36 35 33 32 34 38 40 42 40 43 41 38 37 38 42 54 66 71 70 66 60 57 56 56 55 56 53 51 51 52 53 49 45 39 35 27 20 20 22 21 36 65 45 43 43 42 41 41 41 41 41 40 40 40 39 38 38 37 35 31 29 28 29 29 33 36 38 38 38 42 44 45 43 42 40 40 32 30 37 53 68 72 68 63 61 59 59 59 59 59 59 64 69 72 67 58 49 41 35 27 20 20 22 22 35 64 44 40 39 39 38 38 38 38 38 39 39 37 35 34 32 29 28 25 23 25 28 31 36 38 39 40 43 46 52 50 50 54 51 47 41 36 37 52 70 72 70 67 65 63 63 62 62 63 64 70 77 85 79 66 52 45 38 27 20 20 22 22 35 64 39 37 39 38 37 36 36 37 37 36 36 33 31 27 26 23 22 22 23 25 30 35 39 44 43 45 52 53 55 59 65 66 79 80 72 62 59 60 71 76 73 69 69 67 65 63 64 64 64 69 78 85 85 76 55 49 42 30 20 20 23 23 34 64 41 42 44 42 41 39 38 38 37 36 33 28 25 21 21 22 23 26 28 29 32 36 43 48 48 51 54 66 69 69 77 93 99 99 86 80 82 79 69 71 74 71 72 72 69 66 65 65 64 62 64 73 78 73 60 54 49 35 21 21 24 24 34 64 46 46 47 45 43 40 38 36 37 42 40 30 25 22 23 26 27 31 35 35 34 37 43 49 51 56 67 81 83 84 81 85 101 106 97 80 73 76 95 86 68 73 74 73 73 70 68 66 64 56 51 58 69 68 65 64 57 40 23 20 24 24 34 64 52 46 43 41 38 34 31 29 37 62 76 60 32 24 25 27 29 36 42 42 37 36 44 51 55 64 80 92 99 86 65 73 89 85 89 97 89 78 68 88 88 68 66 71 74 73 72 69 65 55 44 48 62 71 79 82 72 45 26 20 24 24 33 64 46 36 32 30 28 25 21 21 39 84 105 90 53 33 34 31 29 35 39 39 37 40 52 59 64 70 90 91 88 74 61 66 71 63 71 79 95 102 83 73 83 87 76 70 73 75 75 73 68 55 42 44 57 76 93 98 86 53 27 20 24 24 34 64 35 24 22 22 20 20 17 19 39 84 105 91 57 38 38 37 26 27 32 32 36 47 61 67 69 81 100 82 74 72 71 68 67 64 62 57 60 81 88 75 68 84 89 82 72 74 75 77 70 58 41 38 52 74 93 101 91 63 30 21 23 25 35 63 20 19 19 19 18 18 17 19 35 68 88 74 43 34 35 33 23 17 18 24 43 60 68 69 69 77 87 89 91 94 94 87 77 80 75 58 49 44 60 68 67 67 78 83 78 68 75 76 73 62 43 35 46 64 84 97 91 72 36 21 24 26 34 64 18 18 18 18 19 18 18 19 30 55 68 57 32 25 27 23 18 16 19 34 61 69 71 71 74 85 93 103 120 136 149 145 127 109 112 104 80 54 46 51 63 56 62 76 80 71 66 74 75 64 44 33 43 59 73 89 91 82 43 21 24 26 35 65 20 21 21 19 20 20 18 18 27 50 63 56 37 23 21 19 19 18 29 54 74 70 69 68 75 87 103 127 165 198 210 208 196 173 164 161 146 115 74 49 56 64 65 67 77 74 68 67 73 63 46 37 45 58 68 84 90 83 45 22 25 26 36 66 22 23 22 20 20 20 19 19 35 59 78 68 45 24 21 19 18 23 42 74 80 69 69 74 77 95 121 162 195 215 223 225 226 219 210 208 199 181 147 95 65 64 66 59 74 77 70 61 66 63 46 40 43 56 67 78 83 75 44 22 25 29 41 70 23 24 22 20 19 19 19 23 52 83 104 95 67 33 22 22 23 31 70 86 73 65 64 74 78 98 141 187 209 220 228 231 228 226 223 218 214 203 187 150 94 67 62 62 65 75 73 59 59 61 50 44 47 56 65 69 68 61 38 21 27 30 43 72 24 24 22 20 20 20 21 29 67 104 123 114 86 39 20 21 27 54 85 80 65 54 64 71 69 102 173 205 211 218 224 228 227 224 222 220 216 208 198 177 125 79 60 57 69 70 73 61 55 61 52 43 56 68 72 69 60 45 29 22 26 30 42 70 24 24 22 21 22 22 23 32 71 108 125 113 81 38 20 25 37 69 82 69 55 57 63 61 83 155 199 208 209 217 221 223 224 223 220 217 212 207 199 186 151 97 60 46 58 76 70 70 54 56 53 60 75 84 82 72 61 45 24 22 26 29 39 66 23 24 24 24 24 24 23 30 63 93 108 94 63 30 25 31 51 80 76 59 52 60 58 70 147 198 205 207 214 220 222 224 225 222 217 214 209 203 195 184 160 109 64 32 41 75 72 72 60 55 56 66 83 89 85 77 74 61 32 22 26 27 36 64 24 26 26 26 24 24 23 25 46 75 86 75 44 24 30 40 75 81 72 46 55 55 54 108 184 202 209 214 219 223 223 225 225 223 220 216 211 205 193 177 161 130 83 40 36 66 76 66 67 55 54 61 72 78 75 80 80 72 44 23 24 26 34 63 27 27 28 26 24 24 23 25 43 74 89 82 50 32 35 63 80 78 54 48 45 47 65 153 188 175 170 183 198 214 217 220 221 221 222 217 213 209 197 178 165 152 109 46 45 53 81 63 66 59 54 56 53 59 66 72 75 69 42 20 25 26 32 61 28 31 29 25 24 25 25 30 56 90 109 102 73 47 47 71 78 72 43 35 33 42 123 178 169 177 163 137 123 139 176 199 209 211 212 210 206 202 198 192 179 160 111 45 53 55 77 67 63 64 57 50 49 50 57 59 58 49 34 23 26 27 33 61 52 53 35 27 27 27 27 35 64 103 124 115 99 94 83 83 77 63 33 18 22 71 167 189 193 184 172 165 143 126 138 171 197 199 190 174 148 128 119 125 134 143 109 48 61 61 67 66 60 62 56 48 51 50 56 55 50 51 50 39 27 29 34 61 86 84 58 27 25 29 29 37 77 123 129 124 116 118 101 81 73 53 35 18 30 117 188 197 178 168 179 188 172 157 146 167 191 187 167 139 121 118 128 141 142 106 96 55 64 63 62 67 62 60 50 52 49 58 67 69 71 76 71 62 34 29 34 62 94 91 65 32 27 28 28 53 113 146 133 110 101 107 95 80 72 46 46 23 48 149 198 202 169 132 117 107 139 161 156 173 191 182 142 129 135 152 156 135 137 133 94 61 67 65 59 69 59 61 50 51 49 60 73 94 104 99 75 60 34 28 33 61 84 79 50 29 27 27 28 68 141 161 138 79 70 85 91 79 66 52 56 37 64 173 207 208 189 153 127 108 147 147 176 192 211 188 140 148 146 133 130 120 118 119 104 68 66 62 63 68 59 58 52 49 52 56 72 114 135 122 72 39 29 26 32 61 64 58 40 28 27 26 27 73 153 167 133 57 57 74 85 78 68 60 59 49 80 187 205 210 204 181 158 151 154 179 198 203 218 191 160 161 152 131 92 114 89 121 115 68 64 64 62 66 62 57 56 48 47 54 70 122 156 141 73 26 27 29 33 62 64 62 45 30 28 28 28 73 152 166 131 63 72 85 82 77 72 71 63 54 89 188 206 211 215 218 210 203 206 209 204 206 221 184 171 182 161 147 125 117 116 154 135 68 62 62 63 64 65 54 59 48 46 53 65 111 161 151 76 23 28 29 33 62 68 67 48 32 30 29 30 72 149 166 128 70 82 89 78 77 76 78 63 56 92 184 205 211 218 229 230 230 226 211 201 209 224 181 171 199 203 180 166 165 166 172 142 68 59 55 64 59 67 53 55 52 46 53 62 99 157 154 80 25 29 29 33 61 70 69 51 36 33 29 27 68 143 160 124 71 89 93 82 83 81 78 54 61 97 176 203 212 215 223 225 220 210 202 200 218 228 191 167 198 218 220 213 206 193 179 143 69 49 49 60 61 66 53 55 58 50 48 60 87 148 151 80 25 28 29 33 61 72 67 52 47 42 33 27 65 139 151 116 76 92 92 83 84 82 80 59 62 101 170 197 208 206 206 204 190 187 200 204 219 221 198 168 173 202 218 220 207 192 170 132 61 35 45 58 60 61 50 53 56 52 45 57 87 131 148 78 25 29 30 33 62 82 72 57 56 52 42 28 60 132 146 115 84 94 89 83 85 85 80 63 75 115 163 185 192 190 186 179 174 190 178 167 188 193 169 153 163 164 192 205 200 185 162 116 45 32 42 58 60 56 47 52 55 49 43 57 92 121 141 78 26 27 32 36 63 84 71 65 68 62 53 39 65 130 143 116 90 92 85 82 85 86 81 68 76 79 146 176 169 172 170 166 189 202 172 138 141 139 109 120 162 153 156 181 180 168 145 95 34 35 45 55 66 54 50 52 51 52 44 54 91 107 125 75 38 42 54 59 78 99 84 86 96 94 89 76 87 126 129 114 94 92 84 88 88 86 82 71 86 41 134 176 162 168 163 170 200 208 194 175 143 134 131 142 168 168 138 156 162 148 125 73 39 40 49 62 65 54 53 47 52 50 43 51 75 89 98 68 53 66 82 89 98 108 93 102 119 125 121 107 99 117 119 111 99 90 84 87 87 84 81 76 141 88 117 180 182 180 149 130 162 167 164 161 157 144 137 142 155 160 149 136 141 130 109 55 44 54 54 61 68 57 51 52 53 48 41 48 63 73 71 62 63 79 94 103 109 111 96 112 129 140 136 120 97 97 100 101 99 89 83 82 86 87 80 71 78 47 86 175 188 186 174 141 137 186 195 187 181 174 167 141 107 101 137 150 148 128 97 39 54 68 62 58 68 64 55 55 53 46 42 43 54 60 61 60 65 78 90 99 107 117 105 113 128 137 136 117 88 81 89 94 99 89 83 78 85 87 79 75 57 38 49 148 179 188 192 199 182 198 222 233 230 227 210 182 140 147 181 180 154 126 82 31 60 80 67 65 66 73 60 56 56 48 42 41 47 56 59 62 64 73 81 86 98 114 106 115 120 125 128 109 90 92 103 103 99 89 80 76 84 87 81 77 58 43 39 97 168 186 201 206 200 184 184 194 193 193 175 142 146 185 187 167 138 117 50 36 63 81 70 67 69 78 68 57 55 52 47 43 46 59 62 59 56 60 63 66 80 133 122 112 112 115 118 112 108 126 146 132 105 90 78 76 84 87 79 75 63 41 41 44 130 182 201 204 203 183 160 155 160 155 141 141 169 188 175 143 124 77 28 45 60 76 72 73 65 77 78 61 56 55 54 48 48 59 65 63 49 39 40 40 57 162 149 124 109 109 126 143 156 169 179 161 116 91 74 72 84 85 78 72 65 46 41 36 62 152 195 210 212 197 180 172 164 158 157 170 182 178 150 126 89 36 39 53 60 66 70 75 64 72 79 65 60 55 52 52 49 59 66 62 44 27 25 25 41 174 166 146 120 112 147 176 189 181 187 163 118 89 72 70 76 79 73 68 61 49 45 37 32 92 159 199 210 214 213 212 204 197 191 192 183 155 123 90 43 36 57 62 57 55 65 67 60 70 78 69 61 59 54 57 51 59 59 53 40 27 25 27 46 160 168 174 164 161 174 179 189 179 173 140 104 81 72 66 64 76 69 60 56 48 47 39 33 75 109 157 189 196 202 207 209 205 200 186 154 108 80 48 31 61 74 66 58 54 59 62 58 71 74 71 65 64 58 56 56 54 50 38 28 25 28 46 66 144 174 201 203 199 191 172 165 160 151 121 92 75 69 62 55 69 63 54 54 44 49 39 33 67 107 113 139 162 168 170 177 180 170 142 98 66 52 42 47 77 79 64 69 65 60 66 52 71 71 74 68 63 64 60 60 52 43 28 24 29 47 74 90 144 183 214 213 203 190 161 153 150 151 112 81 69 64 57 52 60 56 48 46 37 44 39 30 57 113 119 113 118 119 116 117 118 104 81 68 68 65 46 69 82 75 66 84 73 65 64 44 71 72 75 70 61 68 70 62 56 44 31 74 110 117 143 156 136 176 204 199 192 186 169 151 150 150 97 73 65 59 53 51 58 51 41 42 35 30 37 32 45 111 126 121 117 112 103 91 82 81 84 90 89 67 61 82 74 66 76 92 74 70 66 38 69 69 72 70 53 67 76 73 64 52 52 150 189 162 194 201 117 151 174 174 181 184 184 161 153 131 84 65 64 55 50 50 56 47 37 37 32 24 27 29 36 108 134 132 128 125 116 106 97 97 99 101 87 63 79 76 68 67 79 86 70 74 62 39 70 70 70 69 54 67 78 73 76 68 82 186 218 183 203 211 122 141 158 155 159 166 184 170 153 118 83 66 59 48 46 46 51 42 36 37 28 22 19 25 31 103 136 144 142 136 129 119 111 109 112 109 84 78 86 74 63 75 82 78 66 72 51 41 68 68 63 62 48 65 75 85 93 94 113 193 227 213 208 220 152 154 161 159 152 174 184 168 148 115 81 63 52 43 42 47 42 38 34 35 28 19 19 20 26 94 135 151 152 147 142 134 126 122 124 115 92 86 81 67 65 77 79 69 69 66 36 47 64 63 56 44 40 60 87 115 125 110 115 176 219 218 204 215 156 146 149 165 186 198 187 163 135 107 74 55 45 40 39 42 36 37 33 32 24 16 18 17 36 98 132 153 158 154 149 140 134 131 132 116 97 88 75 67 70 73 73 70 63 48 30 51 56 58 41 30 42 64 110 139 170 170 147 131 182 202 204 208 122 116 112 144 194 194 166 141 115 94 64 50 43 39 38 35 37 33 28 27 21 18 17 21 65 126 133 151 157 154 149 143 138 138 135 121 101 89 68 71 72 68 65 63 51 29 32 49 53 50 31 29 43 79 108 149 165 182 173 152 136 178 211 214 119 119 123 138 179 171 131 105 88 71 55 39 39 40 29 34 32 31 30 26 18 15 21 32 104 153 157 149 162 157 153 149 146 149 148 139 104 87 68 69 68 63 58 44 37 25 32 49 45 31 27 32 48 90 121 137 177 173 181 175 155 139 196 205 182 184 184 182 183 170 141 96 73 62 51 36 36 34 26 30 32 34 28 21 21 23 32 42 121 174 178 162 170 162 164 168 168 164 144 111 85 84 71 71 65 57 41 29 27 23 27 42 31 31 30 42 59 92 132 150 173 172 174 181 171 143 149 173 196 197 193 187 182 171 152 113 94 66 46 39 36 36 37 40 47 53 63 78 56 39 43 46 84 147 169 175 185 173 174 163 136 94 57 60 81 78 71 71 59 44 28 26 30 28 31 35 27 38 44 55 77 92 131 156 174 167 169 181 176 160 129 159 192 191 187 182 179 171 154 127 91 67 51 74 100 114 111 106 102 110 119 105 52 49 49 51 53 57 68 87 96 89 76 59 50 49 53 61 81 73 65 59 42 43 30 32 32 35 35 33 35 46 57 60 79 100 128 165 172 175 166 175 180 165 144 158 184 185 183 183 182 179 155 130 81 99 125 139 150 149 141 149 149 169 172 112 50 49 52 51 54 54 52 52 51 50 50 49 50 54 57 65 79 67 65 51 49 60 42 45 42 40 35 38 41 54 67 71 81 116 123 165 182 179 164 172 182 167 143 149 184 186 186 185 184 176 146 125 150 180 187 191 185 179 177 181 184 198 195 111 52 54 56 54 54 55 54 55 54 53 53 53 55 59 56 67 77 69 60 56 61 71 57 57 48 44 47 47 53 62 77 78 79 124 135 162 184 184 170 166 183 173 148 130 183 184 184 184 181 164 160 182 197 201 205 208 210 208 201 197 202 206 192 70 50 53 56 57 57 56 59 59 57 55 55 55 56 59 51 67 76 65 58 64 69 69 71 61 56 52 47 55 68 67 79 83 81 116 142 155 180 184 176 161 183 177 157 140 182 183 184 184 179 180 198 203 208 211 213 214 217 219 215 211 214 211 158 55 52 53 57 54 52 52 56 58 57 52 54 54 56 56 57 64 69 61 56 67 65 67 67 65 60 56 58 67 74 74 77 83 82 102 144 151 183 184 181 158 182 181 164 150 182 184 186 185 195 206 209 212 215 216 217 218 223 225 224 222 220 208 107 56 53 54 57 57 53 52 53 56 59 55 56 56 56 54 62 64 64 56 58 68 66 63 71 64 57 54 64 67 76 82 76 84 88 90 139 144 184 186 181 159 181 184 173 160 182 185 186 198 212 214 218 218 219 220 222 223 223 225 224 226 224 198 69 53 54 56 59 59 51 53 54 56 57 58 54 55 54 54 66 64 62 59 62 67 57 67 75 65 55 59 66 71 72 80 81 82 91 85 125 141 186 188 175 164 179 186 177 162"
print(len(first.split()), len(second.split()))
i = 0
hasdone = False
for f, s in zip(first.split(), second.split()):
i+=1
if f != s:
if not hasdone:
print("------------------------------------", i)
hasdone = True
print("- '" + f + "'", "'" + s + "'", end=" | ")
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
"""
=====
About
=====
Example for DWD RADOLAN Composite RW/SF using wetterdienst and wradlib.
Hourly and gliding 24h sum of radar- and station-based measurements (German).
See also:
- https://docs.wradlib.org/en/stable/notebooks/radolan/radolan_showcase.html.
This program will request daily (RADOLAN SF) data for 2020-09-04T12:00:00
and plot the outcome with matplotlib.
=====
Setup
=====
::
brew install gdal
pip install wradlib
=======
Details
=======
RADOLAN: Radar Online Adjustment
Radar based quantitative precipitation estimation
RADOLAN Composite RW/SF
Hourly and gliding 24h sum of radar- and station-based measurements (German)
The routine procedure RADOLAN (Radar-Online-Calibration) provides area-wide,
spatially and temporally high-resolution quantitative precipitation data in
real-time for Germany.
- https://www.dwd.de/EN/Home/_functions/aktuelles/2019/20190820_radolan.html
- https://www.dwd.de/DE/leistungen/radolan/radolan_info/radolan_poster_201711_en_pdf.pdf?__blob=publicationFile&v=2 # noqa
- https://opendata.dwd.de/climate_environment/CDC/grids_germany/daily/radolan/
- https://docs.wradlib.org/en/stable/notebooks/radolan/radolan_showcase.html#RADOLAN-Composite # noqa
- Hourly: https://docs.wradlib.org/en/stable/notebooks/radolan/radolan_showcase.html#RADOLAN-RW-Product # noqa
- Daily: https://docs.wradlib.org/en/stable/notebooks/radolan/radolan_showcase.html#RADOLAN-SF-Product # noqa
"""
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import wradlib as wrl
from wetterdienst.provider.dwd.radar import (
DwdRadarParameter,
DwdRadarPeriod,
DwdRadarResolution,
DwdRadarValues,
)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
def plot(data: np.ndarray, attributes: dict, label: str):
"""
Convenience function for plotting RADOLAN data.
"""
# Get coordinates.
radolan_grid_xy = wrl.georef.get_radolan_grid(900, 900)
# Mask data.
data = np.ma.masked_equal(data, -9999)
# Plot with matplotlib.
plot_radolan(data, attributes, radolan_grid_xy, clabel=label)
def plot_radolan(data: np.ndarray, attrs: dict, grid: np.dstack, clabel: str = None):
"""
Plotting function for RADOLAN data.
Shamelessly stolen from the wradlib RADOLAN Product Showcase documentation.
https://docs.wradlib.org/en/stable/notebooks/radolan/radolan_showcase.html
Thanks!
"""
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, aspect="equal")
x = grid[:, :, 0]
y = grid[:, :, 1]
pm = ax.pcolormesh(x, y, data, cmap="viridis", shading="auto")
cb = fig.colorbar(pm, shrink=0.75)
cb.set_label(clabel)
plt.xlabel("x [km]")
plt.ylabel("y [km]")
plt.title(
"{0} Product\n{1}".format(attrs["producttype"], attrs["datetime"].isoformat())
)
plt.xlim((x[0, 0], x[-1, -1]))
plt.ylim((y[0, 0], y[-1, -1]))
plt.grid(color="r")
def radolan_info(data: np.ndarray, attributes: dict):
"""
Display metadata from RADOLAN request.
"""
log.info("Data shape: %s", data.shape)
log.info("Attributes:")
for key, value in attributes.items():
print(f"- {key}: {value}")
def label_by_producttype(producttype: str) -> str:
"""
Compute label for RW/SF product.
:param producttype: Either RW or SF.
:return: Label for plot.
"""
if producttype == "RW":
label = "mm * h-1"
elif producttype == "SF":
label = "mm * 24h-1"
else:
label = None
return label
def radolan_grid_example():
log.info("Acquiring RADOLAN_CDC data")
radolan = DwdRadarValues(
parameter=DwdRadarParameter.RADOLAN_CDC,
resolution=DwdRadarResolution.DAILY,
period=DwdRadarPeriod.HISTORICAL,
start_date="2020-09-04T12:00:00",
end_date="2020-09-04T12:00:00",
)
for item in radolan.query():
# Decode data using wradlib.
log.info("Parsing RADOLAN_CDC composite data for %s", item.timestamp)
data, attributes = wrl.io.read_radolan_composite(item.data)
# Compute label matching RW/SF product.
label = label_by_producttype(attributes["producttype"])
# Plot and display data.
plot(data, attributes, label)
if "PYTEST_CURRENT_TEST" not in os.environ:
plt.show()
def main():
radolan_grid_example()
if __name__ == "__main__":
main()
|
python
|
# -*- encoding: utf-8 -*-
import pandas as pd
import pytest
from deeptables.datasets import dsutils
from deeptables.models import deeptable
from deeptables.utils import consts
class TestModelInput:
def setup_class(cls):
cls.df_bank = dsutils.load_bank().sample(frac=0.01)
cls.df_movielens = dsutils.load_movielens()
def _train_and_asset(self, X, y ,conf: deeptable.ModelConfig):
dt = deeptable.DeepTable(config=conf)
model, history = dt.fit(X, y, validation_split=0.2, epochs=2, batch_size=32)
assert len(model.model.input_names) == 1
def test_only_categorical_feature(self):
df = self.df_bank.copy()
X = df[['loan']]
y = df['y']
conf = deeptable.ModelConfig(nets=['dnn_nets'],
task=consts.TASK_BINARY,
metrics=['accuracy'],
fixed_embedding_dim=True,
embeddings_output_dim=4,
apply_gbm_features=False,
apply_class_weight=True,
earlystopping_patience=3,)
self._train_and_asset(X, y, conf)
def test_only_continuous_feature(self):
df = self.df_bank.copy()
X = df[['duration']].astype('float32')
y = df['y']
conf = deeptable.ModelConfig(nets=['dnn_nets'],
task=consts.TASK_BINARY,
metrics=['accuracy'],
fixed_embedding_dim=True,
embeddings_output_dim=4,
apply_gbm_features=False,
apply_class_weight=True,
earlystopping_patience=3,)
self._train_and_asset(X, y, conf)
def test_only_var_len_categorical_feature(self):
df:pd.DataFrame = self.df_movielens.copy()
X = df[['genres']]
y = df['rating']
conf = deeptable.ModelConfig(nets=['dnn_nets'],
task=consts.TASK_REGRESSION,
metrics=['mse'],
fixed_embedding_dim=True,
embeddings_output_dim=4,
apply_gbm_features=False,
apply_class_weight=True,
earlystopping_patience=3,)
self._train_and_asset(X, y, conf)
def test_no_input(self):
df:pd.DataFrame = self.df_movielens.copy()
X = pd.DataFrame()
y = df['rating']
conf = deeptable.ModelConfig(nets=['dnn_nets'],
task=consts.TASK_REGRESSION,
metrics=['mse'],
fixed_embedding_dim=True,
embeddings_output_dim=4,
apply_gbm_features=False,
apply_class_weight=True,
earlystopping_patience=3,)
dt = deeptable.DeepTable(config=conf)
with pytest.raises(ValueError) as err_info:
dt.fit(X, y, validation_split=0.2, epochs=2, batch_size=32)
print(err_info)
|
python
|
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Luke Macken <[email protected]>
import logging
try:
import amqplib.client_0_8 as amqp
except ImportError:
pass
from moksha.lib.helpers import trace
from moksha.hub.amqp.base import BaseAMQPHub
log = logging.getLogger(__name__)
NONPERSISTENT_DELIVERY = PERSISTENT_DELIVERY = range(1, 3)
class AMQPLibHub(BaseAMQPHub):
""" An AMQPHub implemention using the amqplib module """
def __init__(self, broker, username=None, password=None, ssl=False, threaded=False):
self.conn = amqp.Connection(host=broker, ssl=ssl, use_threading=threaded,
userid=username, password=password)
self.channel = self.conn.channel()
self.channel.access_request('/data', active=True, write=True, read=True)
@trace
def create_queue(self, queue, exchange='amq.fanout', durable=True,
exclusive=False, auto_delete=False):
""" Declare a `queue` and bind it to an `exchange` """
if not queue in self.queues:
log.info("Creating %s queue" % queue)
self.channel.queue_declare(queue,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete)
@trace
def exchange_declare(self, exchange, type='fanout', durable=True,
auto_delete=False):
self.channel.exchange_declare(exchange=exchange, type=type,
durable=durable, auto_delete=auto_delete)
@trace
def queue_bind(self, queue, exchange, routing_key=''):
self.channel.queue_bind(queue, exchange, routing_key=routing_key)
# Since queue_name == routing_key, should we just make this method
# def send_message(self, queue, message) ?
def send_message(self, message, exchange='amq.fanout', routing_key='',
delivery_mode=PERSISTENT_DELIVERY, **kw):
"""
Send an AMQP message to a given exchange with the specified routing key
"""
msg = amqp.Message(message, **kw)
msg.properties["delivery_mode"] = delivery_mode
self.channel.basic_publish(msg, exchange, routing_key=routing_key)
@trace
def get_message(self, queue):
""" Immediately grab a message from the queue.
This call will not block, and will return None if there are no new
messages in the queue.
"""
msg = self.channel.basic_get(queue, no_ack=True)
return msg
def queue_subscribe(self, queue, callback, no_ack=True):
"""
Consume messages from a given `queue`, passing each to `callback`
"""
self.channel.basic_consume(queue, callback=callback, no_ack=no_ack)
def wait(self):
self.channel.wait()
@trace
def close(self):
try:
if hasattr(self, 'channel') and self.channel:
self.channel.close()
except Exception, e:
log.exception(e)
try:
if hasattr(self, 'conn') and self.conn:
self.conn.close()
except Exception, e:
log.exception(e)
|
python
|
from ctypes import *
SHM_SIZE = 4096
SHM_KEY = 67483
try:
rt = CDLL('librt.so')
except:
rt = CDLL('librt.so.1')
shmget = rt.shmget
shmget.argtypes = [c_int, c_size_t, c_int]
shmget.restype = c_int
shmat = rt.shmat
shmat.argtypes = [c_int, POINTER(c_void_p), c_int]
shmat.restype = c_void_p
shmctl = rt.shmctl
shmctl.argtypes = [c_int, c_int, POINTER(c_void_p)]
shmctl.restype = c_int
IPC_CREAT = 0o1000
IPC_EXCL = 0o2000
IPC_NOWAIT = 0o4000
IPC_RMID = 0
IPC_SET = 1
IPC_STAT = 2
IPC_INFO = 3
shmid = shmget(SHM_KEY, SHM_SIZE, 0o666 | IPC_CREAT)
if shmid > 0:
print(f"Create a shared memory segment {shmid}")
else:
ret = shmctl(SHM_KEY, IPC_RMID, None)
print(f"Delete exist {SHM_KEY}:{ret}")
# 5177360
addr = shmat(shmid, None, 0)
if addr < 0:
print(f"addr:{addr}")
import numpy as np
# c_buffer
bs = (c_byte * SHM_SIZE).from_address(addr)
s = np.ndarray((4, 4), buffer=bs)
s.fill(5)
print(type(s))
|
python
|